OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [tags/] [gnu-src/] [gcc-4.5.1/] [gcc-4.5.1-or32-1.0rc1/] [gcc/] [sched-deps.c] - Blame information for rev 435

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 280 jeremybenn
/* Instruction scheduling pass.  This file computes dependencies between
2
   instructions.
3
   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5
   Free Software Foundation, Inc.
6
   Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7
   and currently maintained by, Jim Wilson (wilson@cygnus.com)
8
 
9
This file is part of GCC.
10
 
11
GCC is free software; you can redistribute it and/or modify it under
12
the terms of the GNU General Public License as published by the Free
13
Software Foundation; either version 3, or (at your option) any later
14
version.
15
 
16
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17
WARRANTY; without even the implied warranty of MERCHANTABILITY or
18
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19
for more details.
20
 
21
You should have received a copy of the GNU General Public License
22
along with GCC; see the file COPYING3.  If not see
23
<http://www.gnu.org/licenses/>.  */
24
 
25
#include "config.h"
26
#include "system.h"
27
#include "coretypes.h"
28
#include "tm.h"
29
#include "toplev.h"
30
#include "rtl.h"
31
#include "tm_p.h"
32
#include "hard-reg-set.h"
33
#include "regs.h"
34
#include "function.h"
35
#include "flags.h"
36
#include "insn-config.h"
37
#include "insn-attr.h"
38
#include "except.h"
39
#include "toplev.h"
40
#include "recog.h"
41
#include "sched-int.h"
42
#include "params.h"
43
#include "cselib.h"
44
#include "ira.h"
45
#include "target.h"
46
 
47
#ifdef INSN_SCHEDULING
48
 
49
#ifdef ENABLE_CHECKING
50
#define CHECK (true)
51
#else
52
#define CHECK (false)
53
#endif
54
 
55
/* Holds current parameters for the dependency analyzer.  */
56
struct sched_deps_info_def *sched_deps_info;
57
 
58
/* The data is specific to the Haifa scheduler.  */
59
VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
60
 
61
/* Return the major type present in the DS.  */
62
enum reg_note
63
ds_to_dk (ds_t ds)
64
{
65
  if (ds & DEP_TRUE)
66
    return REG_DEP_TRUE;
67
 
68
  if (ds & DEP_OUTPUT)
69
    return REG_DEP_OUTPUT;
70
 
71
  gcc_assert (ds & DEP_ANTI);
72
 
73
  return REG_DEP_ANTI;
74
}
75
 
76
/* Return equivalent dep_status.  */
77
ds_t
78
dk_to_ds (enum reg_note dk)
79
{
80
  switch (dk)
81
    {
82
    case REG_DEP_TRUE:
83
      return DEP_TRUE;
84
 
85
    case REG_DEP_OUTPUT:
86
      return DEP_OUTPUT;
87
 
88
    default:
89
      gcc_assert (dk == REG_DEP_ANTI);
90
      return DEP_ANTI;
91
    }
92
}
93
 
94
/* Functions to operate with dependence information container - dep_t.  */
95
 
96
/* Init DEP with the arguments.  */
97
void
98
init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
99
{
100
  DEP_PRO (dep) = pro;
101
  DEP_CON (dep) = con;
102
  DEP_TYPE (dep) = type;
103
  DEP_STATUS (dep) = ds;
104
}
105
 
106
/* Init DEP with the arguments.
107
   While most of the scheduler (including targets) only need the major type
108
   of the dependency, it is convenient to hide full dep_status from them.  */
109
void
110
init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
111
{
112
  ds_t ds;
113
 
114
  if ((current_sched_info->flags & USE_DEPS_LIST))
115
    ds = dk_to_ds (kind);
116
  else
117
    ds = -1;
118
 
119
  init_dep_1 (dep, pro, con, kind, ds);
120
}
121
 
122
/* Make a copy of FROM in TO.  */
123
static void
124
copy_dep (dep_t to, dep_t from)
125
{
126
  memcpy (to, from, sizeof (*to));
127
}
128
 
129
static void dump_ds (FILE *, ds_t);
130
 
131
/* Define flags for dump_dep ().  */
132
 
133
/* Dump producer of the dependence.  */
134
#define DUMP_DEP_PRO (2)
135
 
136
/* Dump consumer of the dependence.  */
137
#define DUMP_DEP_CON (4)
138
 
139
/* Dump type of the dependence.  */
140
#define DUMP_DEP_TYPE (8)
141
 
142
/* Dump status of the dependence.  */
143
#define DUMP_DEP_STATUS (16)
144
 
145
/* Dump all information about the dependence.  */
146
#define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE       \
147
                      |DUMP_DEP_STATUS)
148
 
149
/* Dump DEP to DUMP.
150
   FLAGS is a bit mask specifying what information about DEP needs
151
   to be printed.
152
   If FLAGS has the very first bit set, then dump all information about DEP
153
   and propagate this bit into the callee dump functions.  */
154
static void
155
dump_dep (FILE *dump, dep_t dep, int flags)
156
{
157
  if (flags & 1)
158
    flags |= DUMP_DEP_ALL;
159
 
160
  fprintf (dump, "<");
161
 
162
  if (flags & DUMP_DEP_PRO)
163
    fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
164
 
165
  if (flags & DUMP_DEP_CON)
166
    fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
167
 
168
  if (flags & DUMP_DEP_TYPE)
169
    {
170
      char t;
171
      enum reg_note type = DEP_TYPE (dep);
172
 
173
      switch (type)
174
        {
175
        case REG_DEP_TRUE:
176
          t = 't';
177
          break;
178
 
179
        case REG_DEP_OUTPUT:
180
          t = 'o';
181
          break;
182
 
183
        case REG_DEP_ANTI:
184
          t = 'a';
185
          break;
186
 
187
        default:
188
          gcc_unreachable ();
189
          break;
190
        }
191
 
192
      fprintf (dump, "%c; ", t);
193
    }
194
 
195
  if (flags & DUMP_DEP_STATUS)
196
    {
197
      if (current_sched_info->flags & USE_DEPS_LIST)
198
        dump_ds (dump, DEP_STATUS (dep));
199
    }
200
 
201
  fprintf (dump, ">");
202
}
203
 
204
/* Default flags for dump_dep ().  */
205
static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
206
 
207
/* Dump all fields of DEP to STDERR.  */
208
void
209
sd_debug_dep (dep_t dep)
210
{
211
  dump_dep (stderr, dep, 1);
212
  fprintf (stderr, "\n");
213
}
214
 
215
/* Determine whether DEP is a dependency link of a non-debug insn on a
216
   debug insn.  */
217
 
218
static inline bool
219
depl_on_debug_p (dep_link_t dep)
220
{
221
  return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
222
          && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
223
}
224
 
225
/* Functions to operate with a single link from the dependencies lists -
226
   dep_link_t.  */
227
 
228
/* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
229
   PREV_NEXT_P.  */
230
static void
231
attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
232
{
233
  dep_link_t next = *prev_nextp;
234
 
235
  gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
236
              && DEP_LINK_NEXT (l) == NULL);
237
 
238
  /* Init node being inserted.  */
239
  DEP_LINK_PREV_NEXTP (l) = prev_nextp;
240
  DEP_LINK_NEXT (l) = next;
241
 
242
  /* Fix next node.  */
243
  if (next != NULL)
244
    {
245
      gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
246
 
247
      DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
248
    }
249
 
250
  /* Fix prev node.  */
251
  *prev_nextp = l;
252
}
253
 
254
/* Add dep_link LINK to deps_list L.  */
255
static void
256
add_to_deps_list (dep_link_t link, deps_list_t l)
257
{
258
  attach_dep_link (link, &DEPS_LIST_FIRST (l));
259
 
260
  /* Don't count debug deps.  */
261
  if (!depl_on_debug_p (link))
262
    ++DEPS_LIST_N_LINKS (l);
263
}
264
 
265
/* Detach dep_link L from the list.  */
266
static void
267
detach_dep_link (dep_link_t l)
268
{
269
  dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
270
  dep_link_t next = DEP_LINK_NEXT (l);
271
 
272
  *prev_nextp = next;
273
 
274
  if (next != NULL)
275
    DEP_LINK_PREV_NEXTP (next) = prev_nextp;
276
 
277
  DEP_LINK_PREV_NEXTP (l) = NULL;
278
  DEP_LINK_NEXT (l) = NULL;
279
}
280
 
281
/* Remove link LINK from list LIST.  */
282
static void
283
remove_from_deps_list (dep_link_t link, deps_list_t list)
284
{
285
  detach_dep_link (link);
286
 
287
  /* Don't count debug deps.  */
288
  if (!depl_on_debug_p (link))
289
    --DEPS_LIST_N_LINKS (list);
290
}
291
 
292
/* Move link LINK from list FROM to list TO.  */
293
static void
294
move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
295
{
296
  remove_from_deps_list (link, from);
297
  add_to_deps_list (link, to);
298
}
299
 
300
/* Return true of LINK is not attached to any list.  */
301
static bool
302
dep_link_is_detached_p (dep_link_t link)
303
{
304
  return DEP_LINK_PREV_NEXTP (link) == NULL;
305
}
306
 
307
/* Pool to hold all dependency nodes (dep_node_t).  */
308
static alloc_pool dn_pool;
309
 
310
/* Number of dep_nodes out there.  */
311
static int dn_pool_diff = 0;
312
 
313
/* Create a dep_node.  */
314
static dep_node_t
315
create_dep_node (void)
316
{
317
  dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
318
  dep_link_t back = DEP_NODE_BACK (n);
319
  dep_link_t forw = DEP_NODE_FORW (n);
320
 
321
  DEP_LINK_NODE (back) = n;
322
  DEP_LINK_NEXT (back) = NULL;
323
  DEP_LINK_PREV_NEXTP (back) = NULL;
324
 
325
  DEP_LINK_NODE (forw) = n;
326
  DEP_LINK_NEXT (forw) = NULL;
327
  DEP_LINK_PREV_NEXTP (forw) = NULL;
328
 
329
  ++dn_pool_diff;
330
 
331
  return n;
332
}
333
 
334
/* Delete dep_node N.  N must not be connected to any deps_list.  */
335
static void
336
delete_dep_node (dep_node_t n)
337
{
338
  gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
339
              && dep_link_is_detached_p (DEP_NODE_FORW (n)));
340
 
341
  --dn_pool_diff;
342
 
343
  pool_free (dn_pool, n);
344
}
345
 
346
/* Pool to hold dependencies lists (deps_list_t).  */
347
static alloc_pool dl_pool;
348
 
349
/* Number of deps_lists out there.  */
350
static int dl_pool_diff = 0;
351
 
352
/* Functions to operate with dependences lists - deps_list_t.  */
353
 
354
/* Return true if list L is empty.  */
355
static bool
356
deps_list_empty_p (deps_list_t l)
357
{
358
  return DEPS_LIST_N_LINKS (l) == 0;
359
}
360
 
361
/* Create a new deps_list.  */
362
static deps_list_t
363
create_deps_list (void)
364
{
365
  deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
366
 
367
  DEPS_LIST_FIRST (l) = NULL;
368
  DEPS_LIST_N_LINKS (l) = 0;
369
 
370
  ++dl_pool_diff;
371
  return l;
372
}
373
 
374
/* Free deps_list L.  */
375
static void
376
free_deps_list (deps_list_t l)
377
{
378
  gcc_assert (deps_list_empty_p (l));
379
 
380
  --dl_pool_diff;
381
 
382
  pool_free (dl_pool, l);
383
}
384
 
385
/* Return true if there is no dep_nodes and deps_lists out there.
386
   After the region is scheduled all the dependency nodes and lists
387
   should [generally] be returned to pool.  */
388
bool
389
deps_pools_are_empty_p (void)
390
{
391
  return dn_pool_diff == 0 && dl_pool_diff == 0;
392
}
393
 
394
/* Remove all elements from L.  */
395
static void
396
clear_deps_list (deps_list_t l)
397
{
398
  do
399
    {
400
      dep_link_t link = DEPS_LIST_FIRST (l);
401
 
402
      if (link == NULL)
403
        break;
404
 
405
      remove_from_deps_list (link, l);
406
    }
407
  while (1);
408
}
409
 
410
static regset reg_pending_sets;
411
static regset reg_pending_clobbers;
412
static regset reg_pending_uses;
413
static enum reg_pending_barrier_mode reg_pending_barrier;
414
 
415
/* Hard registers implicitly clobbered or used (or may be implicitly
416
   clobbered or used) by the currently analyzed insn.  For example,
417
   insn in its constraint has one register class.  Even if there is
418
   currently no hard register in the insn, the particular hard
419
   register will be in the insn after reload pass because the
420
   constraint requires it.  */
421
static HARD_REG_SET implicit_reg_pending_clobbers;
422
static HARD_REG_SET implicit_reg_pending_uses;
423
 
424
/* To speed up the test for duplicate dependency links we keep a
425
   record of dependencies created by add_dependence when the average
426
   number of instructions in a basic block is very large.
427
 
428
   Studies have shown that there is typically around 5 instructions between
429
   branches for typical C code.  So we can make a guess that the average
430
   basic block is approximately 5 instructions long; we will choose 100X
431
   the average size as a very large basic block.
432
 
433
   Each insn has associated bitmaps for its dependencies.  Each bitmap
434
   has enough entries to represent a dependency on any other insn in
435
   the insn chain.  All bitmap for true dependencies cache is
436
   allocated then the rest two ones are also allocated.  */
437
static bitmap_head *true_dependency_cache = NULL;
438
static bitmap_head *output_dependency_cache = NULL;
439
static bitmap_head *anti_dependency_cache = NULL;
440
static bitmap_head *spec_dependency_cache = NULL;
441
static int cache_size;
442
 
443
static int deps_may_trap_p (const_rtx);
444
static void add_dependence_list (rtx, rtx, int, enum reg_note);
445
static void add_dependence_list_and_free (struct deps_desc *, rtx,
446
                                          rtx *, int, enum reg_note);
447
static void delete_all_dependences (rtx);
448
static void fixup_sched_groups (rtx);
449
 
450
static void flush_pending_lists (struct deps_desc *, rtx, int, int);
451
static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
452
static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
453
static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
454
 
455
static bool sched_has_condition_p (const_rtx);
456
static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
457
 
458
static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
459
                                                          rtx, rtx);
460
static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
461
 
462
#ifdef ENABLE_CHECKING
463
static void check_dep (dep_t, bool);
464
#endif
465
 
466
/* Return nonzero if a load of the memory reference MEM can cause a trap.  */
467
 
468
static int
469
deps_may_trap_p (const_rtx mem)
470
{
471
  const_rtx addr = XEXP (mem, 0);
472
 
473
  if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
474
    {
475
      const_rtx t = get_reg_known_value (REGNO (addr));
476
      if (t)
477
        addr = t;
478
    }
479
  return rtx_addr_can_trap_p (addr);
480
}
481
 
482
 
483
/* Find the condition under which INSN is executed.  If REV is not NULL,
484
   it is set to TRUE when the returned comparison should be reversed
485
   to get the actual condition.  */
486
static rtx
487
sched_get_condition_with_rev (const_rtx insn, bool *rev)
488
{
489
  rtx pat = PATTERN (insn);
490
  rtx src;
491
 
492
  if (pat == 0)
493
    return 0;
494
 
495
  if (rev)
496
    *rev = false;
497
 
498
  if (GET_CODE (pat) == COND_EXEC)
499
    return COND_EXEC_TEST (pat);
500
 
501
  if (!any_condjump_p (insn) || !onlyjump_p (insn))
502
    return 0;
503
 
504
  src = SET_SRC (pc_set (insn));
505
 
506
  if (XEXP (src, 2) == pc_rtx)
507
    return XEXP (src, 0);
508
  else if (XEXP (src, 1) == pc_rtx)
509
    {
510
      rtx cond = XEXP (src, 0);
511
      enum rtx_code revcode = reversed_comparison_code (cond, insn);
512
 
513
      if (revcode == UNKNOWN)
514
        return 0;
515
 
516
      if (rev)
517
        *rev = true;
518
      return cond;
519
    }
520
 
521
  return 0;
522
}
523
 
524
/* True when we can find a condition under which INSN is executed.  */
525
static bool
526
sched_has_condition_p (const_rtx insn)
527
{
528
  return !! sched_get_condition_with_rev (insn, NULL);
529
}
530
 
531
 
532
 
533
/* Return nonzero if conditions COND1 and COND2 can never be both true.  */
534
static int
535
conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
536
{
537
  if (COMPARISON_P (cond1)
538
      && COMPARISON_P (cond2)
539
      && GET_CODE (cond1) ==
540
          (rev1==rev2
541
          ? reversed_comparison_code (cond2, NULL)
542
          : GET_CODE (cond2))
543
      && XEXP (cond1, 0) == XEXP (cond2, 0)
544
      && XEXP (cond1, 1) == XEXP (cond2, 1))
545
    return 1;
546
  return 0;
547
}
548
 
549
/* Return true if insn1 and insn2 can never depend on one another because
550
   the conditions under which they are executed are mutually exclusive.  */
551
bool
552
sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
553
{
554
  rtx cond1, cond2;
555
  bool rev1 = false, rev2 = false;
556
 
557
  /* df doesn't handle conditional lifetimes entirely correctly;
558
     calls mess up the conditional lifetimes.  */
559
  if (!CALL_P (insn1) && !CALL_P (insn2))
560
    {
561
      cond1 = sched_get_condition_with_rev (insn1, &rev1);
562
      cond2 = sched_get_condition_with_rev (insn2, &rev2);
563
      if (cond1 && cond2
564
          && conditions_mutex_p (cond1, cond2, rev1, rev2)
565
          /* Make sure first instruction doesn't affect condition of second
566
             instruction if switched.  */
567
          && !modified_in_p (cond1, insn2)
568
          /* Make sure second instruction doesn't affect condition of first
569
             instruction if switched.  */
570
          && !modified_in_p (cond2, insn1))
571
        return true;
572
    }
573
  return false;
574
}
575
 
576
 
577
/* Return true if INSN can potentially be speculated with type DS.  */
578
bool
579
sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
580
{
581
  if (HAS_INTERNAL_DEP (insn))
582
    return false;
583
 
584
  if (!NONJUMP_INSN_P (insn))
585
    return false;
586
 
587
  if (SCHED_GROUP_P (insn))
588
    return false;
589
 
590
  if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
591
    return false;
592
 
593
  if (side_effects_p (PATTERN (insn)))
594
    return false;
595
 
596
  if (ds & BE_IN_SPEC)
597
    /* The following instructions, which depend on a speculatively scheduled
598
       instruction, cannot be speculatively scheduled along.  */
599
    {
600
      if (may_trap_p (PATTERN (insn)))
601
        /* If instruction might trap, it cannot be speculatively scheduled.
602
           For control speculation it's obvious why and for data speculation
603
           it's because the insn might get wrong input if speculation
604
           wasn't successful.  */
605
        return false;
606
 
607
      if ((ds & BE_IN_DATA)
608
          && sched_has_condition_p (insn))
609
        /* If this is a predicated instruction, then it cannot be
610
           speculatively scheduled.  See PR35659.  */
611
        return false;
612
    }
613
 
614
  return true;
615
}
616
 
617
/* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
618
   initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
619
   and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
620
   This function is used to switch sd_iterator to the next list.
621
   !!! For internal use only.  Might consider moving it to sched-int.h.  */
622
void
623
sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
624
              deps_list_t *list_ptr, bool *resolved_p_ptr)
625
{
626
  sd_list_types_def types = *types_ptr;
627
 
628
  if (types & SD_LIST_HARD_BACK)
629
    {
630
      *list_ptr = INSN_HARD_BACK_DEPS (insn);
631
      *resolved_p_ptr = false;
632
      *types_ptr = types & ~SD_LIST_HARD_BACK;
633
    }
634
  else if (types & SD_LIST_SPEC_BACK)
635
    {
636
      *list_ptr = INSN_SPEC_BACK_DEPS (insn);
637
      *resolved_p_ptr = false;
638
      *types_ptr = types & ~SD_LIST_SPEC_BACK;
639
    }
640
  else if (types & SD_LIST_FORW)
641
    {
642
      *list_ptr = INSN_FORW_DEPS (insn);
643
      *resolved_p_ptr = false;
644
      *types_ptr = types & ~SD_LIST_FORW;
645
    }
646
  else if (types & SD_LIST_RES_BACK)
647
    {
648
      *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
649
      *resolved_p_ptr = true;
650
      *types_ptr = types & ~SD_LIST_RES_BACK;
651
    }
652
  else if (types & SD_LIST_RES_FORW)
653
    {
654
      *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
655
      *resolved_p_ptr = true;
656
      *types_ptr = types & ~SD_LIST_RES_FORW;
657
    }
658
  else
659
    {
660
      *list_ptr = NULL;
661
      *resolved_p_ptr = false;
662
      *types_ptr = SD_LIST_NONE;
663
    }
664
}
665
 
666
/* Return the summary size of INSN's lists defined by LIST_TYPES.  */
667
int
668
sd_lists_size (const_rtx insn, sd_list_types_def list_types)
669
{
670
  int size = 0;
671
 
672
  while (list_types != SD_LIST_NONE)
673
    {
674
      deps_list_t list;
675
      bool resolved_p;
676
 
677
      sd_next_list (insn, &list_types, &list, &resolved_p);
678
      if (list)
679
        size += DEPS_LIST_N_LINKS (list);
680
    }
681
 
682
  return size;
683
}
684
 
685
/* Return true if INSN's lists defined by LIST_TYPES are all empty.  */
686
 
687
bool
688
sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
689
{
690
  while (list_types != SD_LIST_NONE)
691
    {
692
      deps_list_t list;
693
      bool resolved_p;
694
 
695
      sd_next_list (insn, &list_types, &list, &resolved_p);
696
      if (!deps_list_empty_p (list))
697
        return false;
698
    }
699
 
700
  return true;
701
}
702
 
703
/* Initialize data for INSN.  */
704
void
705
sd_init_insn (rtx insn)
706
{
707
  INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
708
  INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
709
  INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
710
  INSN_FORW_DEPS (insn) = create_deps_list ();
711
  INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
712
 
713
  if (DEBUG_INSN_P (insn))
714
    DEBUG_INSN_SCHED_P (insn) = TRUE;
715
 
716
  /* ??? It would be nice to allocate dependency caches here.  */
717
}
718
 
719
/* Free data for INSN.  */
720
void
721
sd_finish_insn (rtx insn)
722
{
723
  /* ??? It would be nice to deallocate dependency caches here.  */
724
 
725
  if (DEBUG_INSN_P (insn))
726
    {
727
      gcc_assert (DEBUG_INSN_SCHED_P (insn));
728
      DEBUG_INSN_SCHED_P (insn) = FALSE;
729
    }
730
 
731
  free_deps_list (INSN_HARD_BACK_DEPS (insn));
732
  INSN_HARD_BACK_DEPS (insn) = NULL;
733
 
734
  free_deps_list (INSN_SPEC_BACK_DEPS (insn));
735
  INSN_SPEC_BACK_DEPS (insn) = NULL;
736
 
737
  free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
738
  INSN_RESOLVED_BACK_DEPS (insn) = NULL;
739
 
740
  free_deps_list (INSN_FORW_DEPS (insn));
741
  INSN_FORW_DEPS (insn) = NULL;
742
 
743
  free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
744
  INSN_RESOLVED_FORW_DEPS (insn) = NULL;
745
}
746
 
747
/* Find a dependency between producer PRO and consumer CON.
748
   Search through resolved dependency lists if RESOLVED_P is true.
749
   If no such dependency is found return NULL,
750
   otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
751
   with an iterator pointing to it.  */
752
static dep_t
753
sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
754
                              sd_iterator_def *sd_it_ptr)
755
{
756
  sd_list_types_def pro_list_type;
757
  sd_list_types_def con_list_type;
758
  sd_iterator_def sd_it;
759
  dep_t dep;
760
  bool found_p = false;
761
 
762
  if (resolved_p)
763
    {
764
      pro_list_type = SD_LIST_RES_FORW;
765
      con_list_type = SD_LIST_RES_BACK;
766
    }
767
  else
768
    {
769
      pro_list_type = SD_LIST_FORW;
770
      con_list_type = SD_LIST_BACK;
771
    }
772
 
773
  /* Walk through either back list of INSN or forw list of ELEM
774
     depending on which one is shorter.  */
775
  if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
776
    {
777
      /* Find the dep_link with producer PRO in consumer's back_deps.  */
778
      FOR_EACH_DEP (con, con_list_type, sd_it, dep)
779
        if (DEP_PRO (dep) == pro)
780
          {
781
            found_p = true;
782
            break;
783
          }
784
    }
785
  else
786
    {
787
      /* Find the dep_link with consumer CON in producer's forw_deps.  */
788
      FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
789
        if (DEP_CON (dep) == con)
790
          {
791
            found_p = true;
792
            break;
793
          }
794
    }
795
 
796
  if (found_p)
797
    {
798
      if (sd_it_ptr != NULL)
799
        *sd_it_ptr = sd_it;
800
 
801
      return dep;
802
    }
803
 
804
  return NULL;
805
}
806
 
807
/* Find a dependency between producer PRO and consumer CON.
808
   Use dependency [if available] to check if dependency is present at all.
809
   Search through resolved dependency lists if RESOLVED_P is true.
810
   If the dependency or NULL if none found.  */
811
dep_t
812
sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
813
{
814
  if (true_dependency_cache != NULL)
815
    /* Avoiding the list walk below can cut compile times dramatically
816
       for some code.  */
817
    {
818
      int elem_luid = INSN_LUID (pro);
819
      int insn_luid = INSN_LUID (con);
820
 
821
      gcc_assert (output_dependency_cache != NULL
822
                  && anti_dependency_cache != NULL);
823
 
824
      if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
825
          && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
826
          && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
827
        return NULL;
828
    }
829
 
830
  return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
831
}
832
 
833
/* Add or update  a dependence described by DEP.
834
   MEM1 and MEM2, if non-null, correspond to memory locations in case of
835
   data speculation.
836
 
837
   The function returns a value indicating if an old entry has been changed
838
   or a new entry has been added to insn's backward deps.
839
 
840
   This function merely checks if producer and consumer is the same insn
841
   and doesn't create a dep in this case.  Actual manipulation of
842
   dependence data structures is performed in add_or_update_dep_1.  */
843
static enum DEPS_ADJUST_RESULT
844
maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
845
{
846
  rtx elem = DEP_PRO (dep);
847
  rtx insn = DEP_CON (dep);
848
 
849
  gcc_assert (INSN_P (insn) && INSN_P (elem));
850
 
851
  /* Don't depend an insn on itself.  */
852
  if (insn == elem)
853
    {
854
      if (sched_deps_info->generate_spec_deps)
855
        /* INSN has an internal dependence, which we can't overcome.  */
856
        HAS_INTERNAL_DEP (insn) = 1;
857
 
858
      return DEP_NODEP;
859
    }
860
 
861
  return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
862
}
863
 
864
/* Ask dependency caches what needs to be done for dependence DEP.
865
   Return DEP_CREATED if new dependence should be created and there is no
866
   need to try to find one searching the dependencies lists.
867
   Return DEP_PRESENT if there already is a dependence described by DEP and
868
   hence nothing is to be done.
869
   Return DEP_CHANGED if there already is a dependence, but it should be
870
   updated to incorporate additional information from DEP.  */
871
static enum DEPS_ADJUST_RESULT
872
ask_dependency_caches (dep_t dep)
873
{
874
  int elem_luid = INSN_LUID (DEP_PRO (dep));
875
  int insn_luid = INSN_LUID (DEP_CON (dep));
876
 
877
  gcc_assert (true_dependency_cache != NULL
878
              && output_dependency_cache != NULL
879
              && anti_dependency_cache != NULL);
880
 
881
  if (!(current_sched_info->flags & USE_DEPS_LIST))
882
    {
883
      enum reg_note present_dep_type;
884
 
885
      if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
886
        present_dep_type = REG_DEP_TRUE;
887
      else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
888
        present_dep_type = REG_DEP_OUTPUT;
889
      else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
890
        present_dep_type = REG_DEP_ANTI;
891
      else
892
        /* There is no existing dep so it should be created.  */
893
        return DEP_CREATED;
894
 
895
      if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
896
        /* DEP does not add anything to the existing dependence.  */
897
        return DEP_PRESENT;
898
    }
899
  else
900
    {
901
      ds_t present_dep_types = 0;
902
 
903
      if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
904
        present_dep_types |= DEP_TRUE;
905
      if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
906
        present_dep_types |= DEP_OUTPUT;
907
      if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
908
        present_dep_types |= DEP_ANTI;
909
 
910
      if (present_dep_types == 0)
911
        /* There is no existing dep so it should be created.  */
912
        return DEP_CREATED;
913
 
914
      if (!(current_sched_info->flags & DO_SPECULATION)
915
          || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
916
        {
917
          if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
918
              == present_dep_types)
919
            /* DEP does not add anything to the existing dependence.  */
920
            return DEP_PRESENT;
921
        }
922
      else
923
        {
924
          /* Only true dependencies can be data speculative and
925
             only anti dependencies can be control speculative.  */
926
          gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
927
                      == present_dep_types);
928
 
929
          /* if (DEP is SPECULATIVE) then
930
             ..we should update DEP_STATUS
931
             else
932
             ..we should reset existing dep to non-speculative.  */
933
        }
934
    }
935
 
936
  return DEP_CHANGED;
937
}
938
 
939
/* Set dependency caches according to DEP.  */
940
static void
941
set_dependency_caches (dep_t dep)
942
{
943
  int elem_luid = INSN_LUID (DEP_PRO (dep));
944
  int insn_luid = INSN_LUID (DEP_CON (dep));
945
 
946
  if (!(current_sched_info->flags & USE_DEPS_LIST))
947
    {
948
      switch (DEP_TYPE (dep))
949
        {
950
        case REG_DEP_TRUE:
951
          bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
952
          break;
953
 
954
        case REG_DEP_OUTPUT:
955
          bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
956
          break;
957
 
958
        case REG_DEP_ANTI:
959
          bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
960
          break;
961
 
962
        default:
963
          gcc_unreachable ();
964
        }
965
    }
966
  else
967
    {
968
      ds_t ds = DEP_STATUS (dep);
969
 
970
      if (ds & DEP_TRUE)
971
        bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
972
      if (ds & DEP_OUTPUT)
973
        bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
974
      if (ds & DEP_ANTI)
975
        bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
976
 
977
      if (ds & SPECULATIVE)
978
        {
979
          gcc_assert (current_sched_info->flags & DO_SPECULATION);
980
          bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
981
        }
982
    }
983
}
984
 
985
/* Type of dependence DEP have changed from OLD_TYPE.  Update dependency
986
   caches accordingly.  */
987
static void
988
update_dependency_caches (dep_t dep, enum reg_note old_type)
989
{
990
  int elem_luid = INSN_LUID (DEP_PRO (dep));
991
  int insn_luid = INSN_LUID (DEP_CON (dep));
992
 
993
  /* Clear corresponding cache entry because type of the link
994
     may have changed.  Keep them if we use_deps_list.  */
995
  if (!(current_sched_info->flags & USE_DEPS_LIST))
996
    {
997
      switch (old_type)
998
        {
999
        case REG_DEP_OUTPUT:
1000
          bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1001
          break;
1002
 
1003
        case REG_DEP_ANTI:
1004
          bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1005
          break;
1006
 
1007
        default:
1008
          gcc_unreachable ();
1009
        }
1010
    }
1011
 
1012
  set_dependency_caches (dep);
1013
}
1014
 
1015
/* Convert a dependence pointed to by SD_IT to be non-speculative.  */
1016
static void
1017
change_spec_dep_to_hard (sd_iterator_def sd_it)
1018
{
1019
  dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1020
  dep_link_t link = DEP_NODE_BACK (node);
1021
  dep_t dep = DEP_NODE_DEP (node);
1022
  rtx elem = DEP_PRO (dep);
1023
  rtx insn = DEP_CON (dep);
1024
 
1025
  move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1026
 
1027
  DEP_STATUS (dep) &= ~SPECULATIVE;
1028
 
1029
  if (true_dependency_cache != NULL)
1030
    /* Clear the cache entry.  */
1031
    bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1032
                      INSN_LUID (elem));
1033
}
1034
 
1035
/* Update DEP to incorporate information from NEW_DEP.
1036
   SD_IT points to DEP in case it should be moved to another list.
1037
   MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1038
   data-speculative dependence should be updated.  */
1039
static enum DEPS_ADJUST_RESULT
1040
update_dep (dep_t dep, dep_t new_dep,
1041
            sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1042
            rtx mem1 ATTRIBUTE_UNUSED,
1043
            rtx mem2 ATTRIBUTE_UNUSED)
1044
{
1045
  enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1046
  enum reg_note old_type = DEP_TYPE (dep);
1047
 
1048
  /* If this is a more restrictive type of dependence than the
1049
     existing one, then change the existing dependence to this
1050
     type.  */
1051
  if ((int) DEP_TYPE (new_dep) < (int) old_type)
1052
    {
1053
      DEP_TYPE (dep) = DEP_TYPE (new_dep);
1054
      res = DEP_CHANGED;
1055
    }
1056
 
1057
  if (current_sched_info->flags & USE_DEPS_LIST)
1058
    /* Update DEP_STATUS.  */
1059
    {
1060
      ds_t dep_status = DEP_STATUS (dep);
1061
      ds_t ds = DEP_STATUS (new_dep);
1062
      ds_t new_status = ds | dep_status;
1063
 
1064
      if (new_status & SPECULATIVE)
1065
        /* Either existing dep or a dep we're adding or both are
1066
           speculative.  */
1067
        {
1068
          if (!(ds & SPECULATIVE)
1069
              || !(dep_status & SPECULATIVE))
1070
            /* The new dep can't be speculative.  */
1071
            {
1072
              new_status &= ~SPECULATIVE;
1073
 
1074
              if (dep_status & SPECULATIVE)
1075
                /* The old dep was speculative, but now it
1076
                   isn't.  */
1077
                change_spec_dep_to_hard (sd_it);
1078
            }
1079
          else
1080
            {
1081
              /* Both are speculative.  Merge probabilities.  */
1082
              if (mem1 != NULL)
1083
                {
1084
                  dw_t dw;
1085
 
1086
                  dw = estimate_dep_weak (mem1, mem2);
1087
                  ds = set_dep_weak (ds, BEGIN_DATA, dw);
1088
                }
1089
 
1090
              new_status = ds_merge (dep_status, ds);
1091
            }
1092
        }
1093
 
1094
      ds = new_status;
1095
 
1096
      if (dep_status != ds)
1097
        {
1098
          DEP_STATUS (dep) = ds;
1099
          res = DEP_CHANGED;
1100
        }
1101
    }
1102
 
1103
  if (true_dependency_cache != NULL
1104
      && res == DEP_CHANGED)
1105
    update_dependency_caches (dep, old_type);
1106
 
1107
  return res;
1108
}
1109
 
1110
/* Add or update  a dependence described by DEP.
1111
   MEM1 and MEM2, if non-null, correspond to memory locations in case of
1112
   data speculation.
1113
 
1114
   The function returns a value indicating if an old entry has been changed
1115
   or a new entry has been added to insn's backward deps or nothing has
1116
   been updated at all.  */
1117
static enum DEPS_ADJUST_RESULT
1118
add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1119
                     rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1120
{
1121
  bool maybe_present_p = true;
1122
  bool present_p = false;
1123
 
1124
  gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1125
              && DEP_PRO (new_dep) != DEP_CON (new_dep));
1126
 
1127
#ifdef ENABLE_CHECKING
1128
  check_dep (new_dep, mem1 != NULL);
1129
#endif
1130
 
1131
  if (true_dependency_cache != NULL)
1132
    {
1133
      switch (ask_dependency_caches (new_dep))
1134
        {
1135
        case DEP_PRESENT:
1136
          return DEP_PRESENT;
1137
 
1138
        case DEP_CHANGED:
1139
          maybe_present_p = true;
1140
          present_p = true;
1141
          break;
1142
 
1143
        case DEP_CREATED:
1144
          maybe_present_p = false;
1145
          present_p = false;
1146
          break;
1147
 
1148
        default:
1149
          gcc_unreachable ();
1150
          break;
1151
        }
1152
    }
1153
 
1154
  /* Check that we don't already have this dependence.  */
1155
  if (maybe_present_p)
1156
    {
1157
      dep_t present_dep;
1158
      sd_iterator_def sd_it;
1159
 
1160
      gcc_assert (true_dependency_cache == NULL || present_p);
1161
 
1162
      present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1163
                                                  DEP_CON (new_dep),
1164
                                                  resolved_p, &sd_it);
1165
 
1166
      if (present_dep != NULL)
1167
        /* We found an existing dependency between ELEM and INSN.  */
1168
        return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1169
      else
1170
        /* We didn't find a dep, it shouldn't present in the cache.  */
1171
        gcc_assert (!present_p);
1172
    }
1173
 
1174
  /* Might want to check one level of transitivity to save conses.
1175
     This check should be done in maybe_add_or_update_dep_1.
1176
     Since we made it to add_or_update_dep_1, we must create
1177
     (or update) a link.  */
1178
 
1179
  if (mem1 != NULL_RTX)
1180
    {
1181
      gcc_assert (sched_deps_info->generate_spec_deps);
1182
      DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1183
                                           estimate_dep_weak (mem1, mem2));
1184
    }
1185
 
1186
  sd_add_dep (new_dep, resolved_p);
1187
 
1188
  return DEP_CREATED;
1189
}
1190
 
1191
/* Initialize BACK_LIST_PTR with consumer's backward list and
1192
   FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
1193
   initialize with lists that hold resolved deps.  */
1194
static void
1195
get_back_and_forw_lists (dep_t dep, bool resolved_p,
1196
                         deps_list_t *back_list_ptr,
1197
                         deps_list_t *forw_list_ptr)
1198
{
1199
  rtx con = DEP_CON (dep);
1200
 
1201
  if (!resolved_p)
1202
    {
1203
      if ((current_sched_info->flags & DO_SPECULATION)
1204
          && (DEP_STATUS (dep) & SPECULATIVE))
1205
        *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1206
      else
1207
        *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1208
 
1209
      *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1210
    }
1211
  else
1212
    {
1213
      *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1214
      *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1215
    }
1216
}
1217
 
1218
/* Add dependence described by DEP.
1219
   If RESOLVED_P is true treat the dependence as a resolved one.  */
1220
void
1221
sd_add_dep (dep_t dep, bool resolved_p)
1222
{
1223
  dep_node_t n = create_dep_node ();
1224
  deps_list_t con_back_deps;
1225
  deps_list_t pro_forw_deps;
1226
  rtx elem = DEP_PRO (dep);
1227
  rtx insn = DEP_CON (dep);
1228
 
1229
  gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1230
 
1231
  if ((current_sched_info->flags & DO_SPECULATION)
1232
      && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1233
    DEP_STATUS (dep) &= ~SPECULATIVE;
1234
 
1235
  copy_dep (DEP_NODE_DEP (n), dep);
1236
 
1237
  get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1238
 
1239
  add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1240
 
1241
#ifdef ENABLE_CHECKING
1242
  check_dep (dep, false);
1243
#endif
1244
 
1245
  add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1246
 
1247
  /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1248
     in the bitmap caches of dependency information.  */
1249
  if (true_dependency_cache != NULL)
1250
    set_dependency_caches (dep);
1251
}
1252
 
1253
/* Add or update backward dependence between INSN and ELEM
1254
   with given type DEP_TYPE and dep_status DS.
1255
   This function is a convenience wrapper.  */
1256
enum DEPS_ADJUST_RESULT
1257
sd_add_or_update_dep (dep_t dep, bool resolved_p)
1258
{
1259
  return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1260
}
1261
 
1262
/* Resolved dependence pointed to by SD_IT.
1263
   SD_IT will advance to the next element.  */
1264
void
1265
sd_resolve_dep (sd_iterator_def sd_it)
1266
{
1267
  dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1268
  dep_t dep = DEP_NODE_DEP (node);
1269
  rtx pro = DEP_PRO (dep);
1270
  rtx con = DEP_CON (dep);
1271
 
1272
  if ((current_sched_info->flags & DO_SPECULATION)
1273
      && (DEP_STATUS (dep) & SPECULATIVE))
1274
    move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1275
                   INSN_RESOLVED_BACK_DEPS (con));
1276
  else
1277
    move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1278
                   INSN_RESOLVED_BACK_DEPS (con));
1279
 
1280
  move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1281
                 INSN_RESOLVED_FORW_DEPS (pro));
1282
}
1283
 
1284
/* Make TO depend on all the FROM's producers.
1285
   If RESOLVED_P is true add dependencies to the resolved lists.  */
1286
void
1287
sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1288
{
1289
  sd_list_types_def list_type;
1290
  sd_iterator_def sd_it;
1291
  dep_t dep;
1292
 
1293
  list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1294
 
1295
  FOR_EACH_DEP (from, list_type, sd_it, dep)
1296
    {
1297
      dep_def _new_dep, *new_dep = &_new_dep;
1298
 
1299
      copy_dep (new_dep, dep);
1300
      DEP_CON (new_dep) = to;
1301
      sd_add_dep (new_dep, resolved_p);
1302
    }
1303
}
1304
 
1305
/* Remove a dependency referred to by SD_IT.
1306
   SD_IT will point to the next dependence after removal.  */
1307
void
1308
sd_delete_dep (sd_iterator_def sd_it)
1309
{
1310
  dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1311
  dep_t dep = DEP_NODE_DEP (n);
1312
  rtx pro = DEP_PRO (dep);
1313
  rtx con = DEP_CON (dep);
1314
  deps_list_t con_back_deps;
1315
  deps_list_t pro_forw_deps;
1316
 
1317
  if (true_dependency_cache != NULL)
1318
    {
1319
      int elem_luid = INSN_LUID (pro);
1320
      int insn_luid = INSN_LUID (con);
1321
 
1322
      bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1323
      bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1324
      bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1325
 
1326
      if (current_sched_info->flags & DO_SPECULATION)
1327
        bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1328
    }
1329
 
1330
  get_back_and_forw_lists (dep, sd_it.resolved_p,
1331
                           &con_back_deps, &pro_forw_deps);
1332
 
1333
  remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1334
  remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1335
 
1336
  delete_dep_node (n);
1337
}
1338
 
1339
/* Dump size of the lists.  */
1340
#define DUMP_LISTS_SIZE (2)
1341
 
1342
/* Dump dependencies of the lists.  */
1343
#define DUMP_LISTS_DEPS (4)
1344
 
1345
/* Dump all information about the lists.  */
1346
#define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1347
 
1348
/* Dump deps_lists of INSN specified by TYPES to DUMP.
1349
   FLAGS is a bit mask specifying what information about the lists needs
1350
   to be printed.
1351
   If FLAGS has the very first bit set, then dump all information about
1352
   the lists and propagate this bit into the callee dump functions.  */
1353
static void
1354
dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1355
{
1356
  sd_iterator_def sd_it;
1357
  dep_t dep;
1358
  int all;
1359
 
1360
  all = (flags & 1);
1361
 
1362
  if (all)
1363
    flags |= DUMP_LISTS_ALL;
1364
 
1365
  fprintf (dump, "[");
1366
 
1367
  if (flags & DUMP_LISTS_SIZE)
1368
    fprintf (dump, "%d; ", sd_lists_size (insn, types));
1369
 
1370
  if (flags & DUMP_LISTS_DEPS)
1371
    {
1372
      FOR_EACH_DEP (insn, types, sd_it, dep)
1373
        {
1374
          dump_dep (dump, dep, dump_dep_flags | all);
1375
          fprintf (dump, " ");
1376
        }
1377
    }
1378
}
1379
 
1380
/* Dump all information about deps_lists of INSN specified by TYPES
1381
   to STDERR.  */
1382
void
1383
sd_debug_lists (rtx insn, sd_list_types_def types)
1384
{
1385
  dump_lists (stderr, insn, types, 1);
1386
  fprintf (stderr, "\n");
1387
}
1388
 
1389
/* A convenience wrapper to operate on an entire list.  */
1390
 
1391
static void
1392
add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1393
{
1394
  for (; list; list = XEXP (list, 1))
1395
    {
1396
      if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1397
        add_dependence (insn, XEXP (list, 0), dep_type);
1398
    }
1399
}
1400
 
1401
/* Similar, but free *LISTP at the same time, when the context
1402
   is not readonly.  */
1403
 
1404
static void
1405
add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1406
                              int uncond, enum reg_note dep_type)
1407
{
1408
  rtx list, next;
1409
 
1410
  if (deps->readonly)
1411
    {
1412
      add_dependence_list (insn, *listp, uncond, dep_type);
1413
      return;
1414
    }
1415
 
1416
  for (list = *listp, *listp = NULL; list ; list = next)
1417
    {
1418
      next = XEXP (list, 1);
1419
      if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1420
        add_dependence (insn, XEXP (list, 0), dep_type);
1421
      free_INSN_LIST_node (list);
1422
    }
1423
}
1424
 
1425
/* Remove all occurences of INSN from LIST.  Return the number of
1426
   occurences removed.  */
1427
 
1428
static int
1429
remove_from_dependence_list (rtx insn, rtx* listp)
1430
{
1431
  int removed = 0;
1432
 
1433
  while (*listp)
1434
    {
1435
      if (XEXP (*listp, 0) == insn)
1436
        {
1437
          remove_free_INSN_LIST_node (listp);
1438
          removed++;
1439
          continue;
1440
        }
1441
 
1442
      listp = &XEXP (*listp, 1);
1443
    }
1444
 
1445
  return removed;
1446
}
1447
 
1448
/* Same as above, but process two lists at once.  */
1449
static int
1450
remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1451
{
1452
  int removed = 0;
1453
 
1454
  while (*listp)
1455
    {
1456
      if (XEXP (*listp, 0) == insn)
1457
        {
1458
          remove_free_INSN_LIST_node (listp);
1459
          remove_free_EXPR_LIST_node (exprp);
1460
          removed++;
1461
          continue;
1462
        }
1463
 
1464
      listp = &XEXP (*listp, 1);
1465
      exprp = &XEXP (*exprp, 1);
1466
    }
1467
 
1468
  return removed;
1469
}
1470
 
1471
/* Clear all dependencies for an insn.  */
1472
static void
1473
delete_all_dependences (rtx insn)
1474
{
1475
  sd_iterator_def sd_it;
1476
  dep_t dep;
1477
 
1478
  /* The below cycle can be optimized to clear the caches and back_deps
1479
     in one call but that would provoke duplication of code from
1480
     delete_dep ().  */
1481
 
1482
  for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1483
       sd_iterator_cond (&sd_it, &dep);)
1484
    sd_delete_dep (sd_it);
1485
}
1486
 
1487
/* All insns in a scheduling group except the first should only have
1488
   dependencies on the previous insn in the group.  So we find the
1489
   first instruction in the scheduling group by walking the dependence
1490
   chains backwards. Then we add the dependencies for the group to
1491
   the previous nonnote insn.  */
1492
 
1493
static void
1494
fixup_sched_groups (rtx insn)
1495
{
1496
  sd_iterator_def sd_it;
1497
  dep_t dep;
1498
  rtx prev_nonnote;
1499
 
1500
  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1501
    {
1502
      rtx i = insn;
1503
      rtx pro = DEP_PRO (dep);
1504
 
1505
      do
1506
        {
1507
          i = prev_nonnote_insn (i);
1508
 
1509
          if (pro == i)
1510
            goto next_link;
1511
        } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1512
 
1513
      if (! sched_insns_conditions_mutex_p (i, pro))
1514
        add_dependence (i, pro, DEP_TYPE (dep));
1515
    next_link:;
1516
    }
1517
 
1518
  delete_all_dependences (insn);
1519
 
1520
  prev_nonnote = prev_nonnote_insn (insn);
1521
  while (DEBUG_INSN_P (prev_nonnote))
1522
    prev_nonnote = prev_nonnote_insn (prev_nonnote);
1523
  if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1524
      && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1525
    add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1526
}
1527
 
1528
/* Process an insn's memory dependencies.  There are four kinds of
1529
   dependencies:
1530
 
1531
   (0) read dependence: read follows read
1532
   (1) true dependence: read follows write
1533
   (2) output dependence: write follows write
1534
   (3) anti dependence: write follows read
1535
 
1536
   We are careful to build only dependencies which actually exist, and
1537
   use transitivity to avoid building too many links.  */
1538
 
1539
/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1540
   The MEM is a memory reference contained within INSN, which we are saving
1541
   so that we can do memory aliasing on it.  */
1542
 
1543
static void
1544
add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1545
                         rtx insn, rtx mem)
1546
{
1547
  rtx *insn_list;
1548
  rtx *mem_list;
1549
  rtx link;
1550
 
1551
  gcc_assert (!deps->readonly);
1552
  if (read_p)
1553
    {
1554
      insn_list = &deps->pending_read_insns;
1555
      mem_list = &deps->pending_read_mems;
1556
      if (!DEBUG_INSN_P (insn))
1557
        deps->pending_read_list_length++;
1558
    }
1559
  else
1560
    {
1561
      insn_list = &deps->pending_write_insns;
1562
      mem_list = &deps->pending_write_mems;
1563
      deps->pending_write_list_length++;
1564
    }
1565
 
1566
  link = alloc_INSN_LIST (insn, *insn_list);
1567
  *insn_list = link;
1568
 
1569
  if (sched_deps_info->use_cselib)
1570
    {
1571
      mem = shallow_copy_rtx (mem);
1572
      XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
1573
    }
1574
  link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1575
  *mem_list = link;
1576
}
1577
 
1578
/* Make a dependency between every memory reference on the pending lists
1579
   and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
1580
   dependencies for a read operation, similarly with FOR_WRITE.  */
1581
 
1582
static void
1583
flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1584
                     int for_write)
1585
{
1586
  if (for_write)
1587
    {
1588
      add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1589
                                    1, REG_DEP_ANTI);
1590
      if (!deps->readonly)
1591
        {
1592
          free_EXPR_LIST_list (&deps->pending_read_mems);
1593
          deps->pending_read_list_length = 0;
1594
        }
1595
    }
1596
 
1597
  add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1598
                                for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1599
 
1600
  add_dependence_list_and_free (deps, insn,
1601
                                &deps->last_pending_memory_flush, 1,
1602
                                for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1603
  if (!deps->readonly)
1604
    {
1605
      free_EXPR_LIST_list (&deps->pending_write_mems);
1606
      deps->pending_write_list_length = 0;
1607
 
1608
      deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1609
      deps->pending_flush_length = 1;
1610
    }
1611
}
1612
 
1613
/* Instruction which dependencies we are analyzing.  */
1614
static rtx cur_insn = NULL_RTX;
1615
 
1616
/* Implement hooks for haifa scheduler.  */
1617
 
1618
static void
1619
haifa_start_insn (rtx insn)
1620
{
1621
  gcc_assert (insn && !cur_insn);
1622
 
1623
  cur_insn = insn;
1624
}
1625
 
1626
static void
1627
haifa_finish_insn (void)
1628
{
1629
  cur_insn = NULL;
1630
}
1631
 
1632
void
1633
haifa_note_reg_set (int regno)
1634
{
1635
  SET_REGNO_REG_SET (reg_pending_sets, regno);
1636
}
1637
 
1638
void
1639
haifa_note_reg_clobber (int regno)
1640
{
1641
  SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1642
}
1643
 
1644
void
1645
haifa_note_reg_use (int regno)
1646
{
1647
  SET_REGNO_REG_SET (reg_pending_uses, regno);
1648
}
1649
 
1650
static void
1651
haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1652
{
1653
  if (!(ds & SPECULATIVE))
1654
    {
1655
      mem = NULL_RTX;
1656
      pending_mem = NULL_RTX;
1657
    }
1658
  else
1659
    gcc_assert (ds & BEGIN_DATA);
1660
 
1661
  {
1662
    dep_def _dep, *dep = &_dep;
1663
 
1664
    init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1665
                current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
1666
    maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1667
  }
1668
 
1669
}
1670
 
1671
static void
1672
haifa_note_dep (rtx elem, ds_t ds)
1673
{
1674
  dep_def _dep;
1675
  dep_t dep = &_dep;
1676
 
1677
  init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1678
  maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1679
}
1680
 
1681
static void
1682
note_reg_use (int r)
1683
{
1684
  if (sched_deps_info->note_reg_use)
1685
    sched_deps_info->note_reg_use (r);
1686
}
1687
 
1688
static void
1689
note_reg_set (int r)
1690
{
1691
  if (sched_deps_info->note_reg_set)
1692
    sched_deps_info->note_reg_set (r);
1693
}
1694
 
1695
static void
1696
note_reg_clobber (int r)
1697
{
1698
  if (sched_deps_info->note_reg_clobber)
1699
    sched_deps_info->note_reg_clobber (r);
1700
}
1701
 
1702
static void
1703
note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1704
{
1705
  if (sched_deps_info->note_mem_dep)
1706
    sched_deps_info->note_mem_dep (m1, m2, e, ds);
1707
}
1708
 
1709
static void
1710
note_dep (rtx e, ds_t ds)
1711
{
1712
  if (sched_deps_info->note_dep)
1713
    sched_deps_info->note_dep (e, ds);
1714
}
1715
 
1716
/* Return corresponding to DS reg_note.  */
1717
enum reg_note
1718
ds_to_dt (ds_t ds)
1719
{
1720
  if (ds & DEP_TRUE)
1721
    return REG_DEP_TRUE;
1722
  else if (ds & DEP_OUTPUT)
1723
    return REG_DEP_OUTPUT;
1724
  else
1725
    {
1726
      gcc_assert (ds & DEP_ANTI);
1727
      return REG_DEP_ANTI;
1728
    }
1729
}
1730
 
1731
 
1732
 
1733
/* Functions for computation of info needed for register pressure
1734
   sensitive insn scheduling.  */
1735
 
1736
 
1737
/* Allocate and return reg_use_data structure for REGNO and INSN.  */
1738
static struct reg_use_data *
1739
create_insn_reg_use (int regno, rtx insn)
1740
{
1741
  struct reg_use_data *use;
1742
 
1743
  use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1744
  use->regno = regno;
1745
  use->insn = insn;
1746
  use->next_insn_use = INSN_REG_USE_LIST (insn);
1747
  INSN_REG_USE_LIST (insn) = use;
1748
  return use;
1749
}
1750
 
1751
/* Allocate and return reg_set_data structure for REGNO and INSN.  */
1752
static struct reg_set_data *
1753
create_insn_reg_set (int regno, rtx insn)
1754
{
1755
  struct reg_set_data *set;
1756
 
1757
  set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1758
  set->regno = regno;
1759
  set->insn = insn;
1760
  set->next_insn_set = INSN_REG_SET_LIST (insn);
1761
  INSN_REG_SET_LIST (insn) = set;
1762
  return set;
1763
}
1764
 
1765
/* Set up insn register uses for INSN and dependency context DEPS.  */
1766
static void
1767
setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1768
{
1769
  unsigned i;
1770
  reg_set_iterator rsi;
1771
  rtx list;
1772
  struct reg_use_data *use, *use2, *next;
1773
  struct deps_reg *reg_last;
1774
 
1775
  EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1776
    {
1777
      if (i < FIRST_PSEUDO_REGISTER
1778
          && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1779
        continue;
1780
 
1781
      if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1782
          && ! REGNO_REG_SET_P (reg_pending_sets, i)
1783
          && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1784
        /* Ignore use which is not dying.  */
1785
        continue;
1786
 
1787
      use = create_insn_reg_use (i, insn);
1788
      use->next_regno_use = use;
1789
      reg_last = &deps->reg_last[i];
1790
 
1791
      /* Create the cycle list of uses.  */
1792
      for (list = reg_last->uses; list; list = XEXP (list, 1))
1793
        {
1794
          use2 = create_insn_reg_use (i, XEXP (list, 0));
1795
          next = use->next_regno_use;
1796
          use->next_regno_use = use2;
1797
          use2->next_regno_use = next;
1798
        }
1799
    }
1800
}
1801
 
1802
/* Register pressure info for the currently processed insn.  */
1803
static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1804
 
1805
/* Return TRUE if INSN has the use structure for REGNO.  */
1806
static bool
1807
insn_use_p (rtx insn, int regno)
1808
{
1809
  struct reg_use_data *use;
1810
 
1811
  for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1812
    if (use->regno == regno)
1813
      return true;
1814
  return false;
1815
}
1816
 
1817
/* Update the register pressure info after birth of pseudo register REGNO
1818
   in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
1819
   the register is in clobber or unused after the insn.  */
1820
static void
1821
mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1822
{
1823
  int incr, new_incr;
1824
  enum reg_class cl;
1825
 
1826
  gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1827
  cl = sched_regno_cover_class[regno];
1828
  if (cl != NO_REGS)
1829
    {
1830
      incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1831
      if (clobber_p)
1832
        {
1833
          new_incr = reg_pressure_info[cl].clobber_increase + incr;
1834
          reg_pressure_info[cl].clobber_increase = new_incr;
1835
        }
1836
      else if (unused_p)
1837
        {
1838
          new_incr = reg_pressure_info[cl].unused_set_increase + incr;
1839
          reg_pressure_info[cl].unused_set_increase = new_incr;
1840
        }
1841
      else
1842
        {
1843
          new_incr = reg_pressure_info[cl].set_increase + incr;
1844
          reg_pressure_info[cl].set_increase = new_incr;
1845
          if (! insn_use_p (insn, regno))
1846
            reg_pressure_info[cl].change += incr;
1847
          create_insn_reg_set (regno, insn);
1848
        }
1849
      gcc_assert (new_incr < (1 << INCREASE_BITS));
1850
    }
1851
}
1852
 
1853
/* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
1854
   hard registers involved in the birth.  */
1855
static void
1856
mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
1857
                            bool clobber_p, bool unused_p)
1858
{
1859
  enum reg_class cl;
1860
  int new_incr, last = regno + nregs;
1861
 
1862
  while (regno < last)
1863
    {
1864
      gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1865
      if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1866
        {
1867
          cl = sched_regno_cover_class[regno];
1868
          if (cl != NO_REGS)
1869
            {
1870
              if (clobber_p)
1871
                {
1872
                  new_incr = reg_pressure_info[cl].clobber_increase + 1;
1873
                  reg_pressure_info[cl].clobber_increase = new_incr;
1874
                }
1875
              else if (unused_p)
1876
                {
1877
                  new_incr = reg_pressure_info[cl].unused_set_increase + 1;
1878
                  reg_pressure_info[cl].unused_set_increase = new_incr;
1879
                }
1880
              else
1881
                {
1882
                  new_incr = reg_pressure_info[cl].set_increase + 1;
1883
                  reg_pressure_info[cl].set_increase = new_incr;
1884
                  if (! insn_use_p (insn, regno))
1885
                    reg_pressure_info[cl].change += 1;
1886
                  create_insn_reg_set (regno, insn);
1887
                }
1888
              gcc_assert (new_incr < (1 << INCREASE_BITS));
1889
            }
1890
        }
1891
      regno++;
1892
    }
1893
}
1894
 
1895
/* Update the register pressure info after birth of pseudo or hard
1896
   register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
1897
   correspondingly that the register is in clobber or unused after the
1898
   insn.  */
1899
static void
1900
mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
1901
{
1902
  int regno;
1903
 
1904
  if (GET_CODE (reg) == SUBREG)
1905
    reg = SUBREG_REG (reg);
1906
 
1907
  if (! REG_P (reg))
1908
    return;
1909
 
1910
  regno = REGNO (reg);
1911
  if (regno < FIRST_PSEUDO_REGISTER)
1912
    mark_insn_hard_regno_birth (insn, regno,
1913
                                hard_regno_nregs[regno][GET_MODE (reg)],
1914
                                clobber_p, unused_p);
1915
  else
1916
    mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
1917
}
1918
 
1919
/* Update the register pressure info after death of pseudo register
1920
   REGNO.  */
1921
static void
1922
mark_pseudo_death (int regno)
1923
{
1924
  int incr;
1925
  enum reg_class cl;
1926
 
1927
  gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1928
  cl = sched_regno_cover_class[regno];
1929
  if (cl != NO_REGS)
1930
    {
1931
      incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1932
      reg_pressure_info[cl].change -= incr;
1933
    }
1934
}
1935
 
1936
/* Like mark_pseudo_death except that NREGS saying how many hard
1937
   registers involved in the death.  */
1938
static void
1939
mark_hard_regno_death (int regno, int nregs)
1940
{
1941
  enum reg_class cl;
1942
  int last = regno + nregs;
1943
 
1944
  while (regno < last)
1945
    {
1946
      gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1947
      if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1948
        {
1949
          cl = sched_regno_cover_class[regno];
1950
          if (cl != NO_REGS)
1951
            reg_pressure_info[cl].change -= 1;
1952
        }
1953
      regno++;
1954
    }
1955
}
1956
 
1957
/* Update the register pressure info after death of pseudo or hard
1958
   register REG.  */
1959
static void
1960
mark_reg_death (rtx reg)
1961
{
1962
  int regno;
1963
 
1964
  if (GET_CODE (reg) == SUBREG)
1965
    reg = SUBREG_REG (reg);
1966
 
1967
  if (! REG_P (reg))
1968
    return;
1969
 
1970
  regno = REGNO (reg);
1971
  if (regno < FIRST_PSEUDO_REGISTER)
1972
    mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
1973
  else
1974
    mark_pseudo_death (regno);
1975
}
1976
 
1977
/* Process SETTER of REG.  DATA is an insn containing the setter.  */
1978
static void
1979
mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
1980
{
1981
  if (setter != NULL_RTX && GET_CODE (setter) != SET)
1982
    return;
1983
  mark_insn_reg_birth
1984
    ((rtx) data, reg, false,
1985
     find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
1986
}
1987
 
1988
/* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  */
1989
static void
1990
mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
1991
{
1992
  if (GET_CODE (setter) == CLOBBER)
1993
    mark_insn_reg_birth ((rtx) data, reg, true, false);
1994
}
1995
 
1996
/* Set up reg pressure info related to INSN.  */
1997
static void
1998
setup_insn_reg_pressure_info (rtx insn)
1999
{
2000
  int i, len;
2001
  enum reg_class cl;
2002
  static struct reg_pressure_data *pressure_info;
2003
  rtx link;
2004
 
2005
  gcc_assert (sched_pressure_p);
2006
 
2007
  if (! INSN_P (insn))
2008
    return;
2009
 
2010
  for (i = 0; i < ira_reg_class_cover_size; i++)
2011
    {
2012
      cl = ira_reg_class_cover[i];
2013
      reg_pressure_info[cl].clobber_increase = 0;
2014
      reg_pressure_info[cl].set_increase = 0;
2015
      reg_pressure_info[cl].unused_set_increase = 0;
2016
      reg_pressure_info[cl].change = 0;
2017
    }
2018
 
2019
  note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2020
 
2021
  note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2022
 
2023
#ifdef AUTO_INC_DEC
2024
  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2025
    if (REG_NOTE_KIND (link) == REG_INC)
2026
      mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2027
#endif
2028
 
2029
  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2030
    if (REG_NOTE_KIND (link) == REG_DEAD)
2031
      mark_reg_death (XEXP (link, 0));
2032
 
2033
  len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
2034
  pressure_info
2035
    = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2036
  INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_reg_class_cover_size
2037
                                                  * sizeof (int), 1);
2038
  for (i = 0; i < ira_reg_class_cover_size; i++)
2039
    {
2040
      cl = ira_reg_class_cover[i];
2041
      pressure_info[i].clobber_increase
2042
        = reg_pressure_info[cl].clobber_increase;
2043
      pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2044
      pressure_info[i].unused_set_increase
2045
        = reg_pressure_info[cl].unused_set_increase;
2046
      pressure_info[i].change = reg_pressure_info[cl].change;
2047
    }
2048
}
2049
 
2050
 
2051
 
2052
 
2053
/* Internal variable for sched_analyze_[12] () functions.
2054
   If it is nonzero, this means that sched_analyze_[12] looks
2055
   at the most toplevel SET.  */
2056
static bool can_start_lhs_rhs_p;
2057
 
2058
/* Extend reg info for the deps context DEPS given that
2059
   we have just generated a register numbered REGNO.  */
2060
static void
2061
extend_deps_reg_info (struct deps_desc *deps, int regno)
2062
{
2063
  int max_regno = regno + 1;
2064
 
2065
  gcc_assert (!reload_completed);
2066
 
2067
  /* In a readonly context, it would not hurt to extend info,
2068
     but it should not be needed.  */
2069
  if (reload_completed && deps->readonly)
2070
    {
2071
      deps->max_reg = max_regno;
2072
      return;
2073
    }
2074
 
2075
  if (max_regno > deps->max_reg)
2076
    {
2077
      deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2078
                                   max_regno);
2079
      memset (&deps->reg_last[deps->max_reg],
2080
              0, (max_regno - deps->max_reg)
2081
              * sizeof (struct deps_reg));
2082
      deps->max_reg = max_regno;
2083
    }
2084
}
2085
 
2086
/* Extends REG_INFO_P if needed.  */
2087
void
2088
maybe_extend_reg_info_p (void)
2089
{
2090
  /* Extend REG_INFO_P, if needed.  */
2091
  if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2092
    {
2093
      size_t new_reg_info_p_size = max_regno + 128;
2094
 
2095
      gcc_assert (!reload_completed && sel_sched_p ());
2096
 
2097
      reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2098
                                                    new_reg_info_p_size,
2099
                                                    reg_info_p_size,
2100
                                                    sizeof (*reg_info_p));
2101
      reg_info_p_size = new_reg_info_p_size;
2102
    }
2103
}
2104
 
2105
/* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2106
   The type of the reference is specified by REF and can be SET,
2107
   CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
2108
 
2109
static void
2110
sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2111
                   enum rtx_code ref, rtx insn)
2112
{
2113
  /* We could emit new pseudos in renaming.  Extend the reg structures.  */
2114
  if (!reload_completed && sel_sched_p ()
2115
      && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2116
    extend_deps_reg_info (deps, regno);
2117
 
2118
  maybe_extend_reg_info_p ();
2119
 
2120
  /* A hard reg in a wide mode may really be multiple registers.
2121
     If so, mark all of them just like the first.  */
2122
  if (regno < FIRST_PSEUDO_REGISTER)
2123
    {
2124
      int i = hard_regno_nregs[regno][mode];
2125
      if (ref == SET)
2126
        {
2127
          while (--i >= 0)
2128
            note_reg_set (regno + i);
2129
        }
2130
      else if (ref == USE)
2131
        {
2132
          while (--i >= 0)
2133
            note_reg_use (regno + i);
2134
        }
2135
      else
2136
        {
2137
          while (--i >= 0)
2138
            note_reg_clobber (regno + i);
2139
        }
2140
    }
2141
 
2142
  /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2143
     it does not reload.  Ignore these as they have served their
2144
     purpose already.  */
2145
  else if (regno >= deps->max_reg)
2146
    {
2147
      enum rtx_code code = GET_CODE (PATTERN (insn));
2148
      gcc_assert (code == USE || code == CLOBBER);
2149
    }
2150
 
2151
  else
2152
    {
2153
      if (ref == SET)
2154
        note_reg_set (regno);
2155
      else if (ref == USE)
2156
        note_reg_use (regno);
2157
      else
2158
        note_reg_clobber (regno);
2159
 
2160
      /* Pseudos that are REG_EQUIV to something may be replaced
2161
         by that during reloading.  We need only add dependencies for
2162
        the address in the REG_EQUIV note.  */
2163
      if (!reload_completed && get_reg_known_equiv_p (regno))
2164
        {
2165
          rtx t = get_reg_known_value (regno);
2166
          if (MEM_P (t))
2167
            sched_analyze_2 (deps, XEXP (t, 0), insn);
2168
        }
2169
 
2170
      /* Don't let it cross a call after scheduling if it doesn't
2171
         already cross one.  */
2172
      if (REG_N_CALLS_CROSSED (regno) == 0)
2173
        {
2174
          if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2175
            deps->sched_before_next_call
2176
              = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2177
          else
2178
            add_dependence_list (insn, deps->last_function_call, 1,
2179
                                 REG_DEP_ANTI);
2180
        }
2181
    }
2182
}
2183
 
2184
/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2185
   rtx, X, creating all dependencies generated by the write to the
2186
   destination of X, and reads of everything mentioned.  */
2187
 
2188
static void
2189
sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2190
{
2191
  rtx dest = XEXP (x, 0);
2192
  enum rtx_code code = GET_CODE (x);
2193
  bool cslr_p = can_start_lhs_rhs_p;
2194
 
2195
  can_start_lhs_rhs_p = false;
2196
 
2197
  gcc_assert (dest);
2198
  if (dest == 0)
2199
    return;
2200
 
2201
  if (cslr_p && sched_deps_info->start_lhs)
2202
    sched_deps_info->start_lhs (dest);
2203
 
2204
  if (GET_CODE (dest) == PARALLEL)
2205
    {
2206
      int i;
2207
 
2208
      for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2209
        if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2210
          sched_analyze_1 (deps,
2211
                           gen_rtx_CLOBBER (VOIDmode,
2212
                                            XEXP (XVECEXP (dest, 0, i), 0)),
2213
                           insn);
2214
 
2215
      if (cslr_p && sched_deps_info->finish_lhs)
2216
        sched_deps_info->finish_lhs ();
2217
 
2218
      if (code == SET)
2219
        {
2220
          can_start_lhs_rhs_p = cslr_p;
2221
 
2222
          sched_analyze_2 (deps, SET_SRC (x), insn);
2223
 
2224
          can_start_lhs_rhs_p = false;
2225
        }
2226
 
2227
      return;
2228
    }
2229
 
2230
  while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2231
         || GET_CODE (dest) == ZERO_EXTRACT)
2232
    {
2233
      if (GET_CODE (dest) == STRICT_LOW_PART
2234
         || GET_CODE (dest) == ZERO_EXTRACT
2235
         || df_read_modify_subreg_p (dest))
2236
        {
2237
          /* These both read and modify the result.  We must handle
2238
             them as writes to get proper dependencies for following
2239
             instructions.  We must handle them as reads to get proper
2240
             dependencies from this to previous instructions.
2241
             Thus we need to call sched_analyze_2.  */
2242
 
2243
          sched_analyze_2 (deps, XEXP (dest, 0), insn);
2244
        }
2245
      if (GET_CODE (dest) == ZERO_EXTRACT)
2246
        {
2247
          /* The second and third arguments are values read by this insn.  */
2248
          sched_analyze_2 (deps, XEXP (dest, 1), insn);
2249
          sched_analyze_2 (deps, XEXP (dest, 2), insn);
2250
        }
2251
      dest = XEXP (dest, 0);
2252
    }
2253
 
2254
  if (REG_P (dest))
2255
    {
2256
      int regno = REGNO (dest);
2257
      enum machine_mode mode = GET_MODE (dest);
2258
 
2259
      sched_analyze_reg (deps, regno, mode, code, insn);
2260
 
2261
#ifdef STACK_REGS
2262
      /* Treat all writes to a stack register as modifying the TOS.  */
2263
      if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2264
        {
2265
          int nregs;
2266
 
2267
          /* Avoid analyzing the same register twice.  */
2268
          if (regno != FIRST_STACK_REG)
2269
            sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2270
 
2271
          nregs = hard_regno_nregs[FIRST_STACK_REG][mode];
2272
          while (--nregs >= 0)
2273
            SET_HARD_REG_BIT (implicit_reg_pending_uses,
2274
                              FIRST_STACK_REG + nregs);
2275
        }
2276
#endif
2277
    }
2278
  else if (MEM_P (dest))
2279
    {
2280
      /* Writing memory.  */
2281
      rtx t = dest;
2282
 
2283
      if (sched_deps_info->use_cselib)
2284
        {
2285
          enum machine_mode address_mode
2286
            = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2287
 
2288
          t = shallow_copy_rtx (dest);
2289
          cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
2290
          XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2291
        }
2292
      t = canon_rtx (t);
2293
 
2294
      /* Pending lists can't get larger with a readonly context.  */
2295
      if (!deps->readonly
2296
          && ((deps->pending_read_list_length + deps->pending_write_list_length)
2297
              > MAX_PENDING_LIST_LENGTH))
2298
        {
2299
          /* Flush all pending reads and writes to prevent the pending lists
2300
             from getting any larger.  Insn scheduling runs too slowly when
2301
             these lists get long.  When compiling GCC with itself,
2302
             this flush occurs 8 times for sparc, and 10 times for m88k using
2303
             the default value of 32.  */
2304
          flush_pending_lists (deps, insn, false, true);
2305
        }
2306
      else
2307
        {
2308
          rtx pending, pending_mem;
2309
 
2310
          pending = deps->pending_read_insns;
2311
          pending_mem = deps->pending_read_mems;
2312
          while (pending)
2313
            {
2314
              if (anti_dependence (XEXP (pending_mem, 0), t)
2315
                  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2316
                note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2317
                              DEP_ANTI);
2318
 
2319
              pending = XEXP (pending, 1);
2320
              pending_mem = XEXP (pending_mem, 1);
2321
            }
2322
 
2323
          pending = deps->pending_write_insns;
2324
          pending_mem = deps->pending_write_mems;
2325
          while (pending)
2326
            {
2327
              if (output_dependence (XEXP (pending_mem, 0), t)
2328
                  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2329
                note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2330
                              DEP_OUTPUT);
2331
 
2332
              pending = XEXP (pending, 1);
2333
              pending_mem = XEXP (pending_mem, 1);
2334
            }
2335
 
2336
          add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2337
                               REG_DEP_ANTI);
2338
 
2339
          if (!deps->readonly)
2340
            add_insn_mem_dependence (deps, false, insn, dest);
2341
        }
2342
      sched_analyze_2 (deps, XEXP (dest, 0), insn);
2343
    }
2344
 
2345
  if (cslr_p && sched_deps_info->finish_lhs)
2346
    sched_deps_info->finish_lhs ();
2347
 
2348
  /* Analyze reads.  */
2349
  if (GET_CODE (x) == SET)
2350
    {
2351
      can_start_lhs_rhs_p = cslr_p;
2352
 
2353
      sched_analyze_2 (deps, SET_SRC (x), insn);
2354
 
2355
      can_start_lhs_rhs_p = false;
2356
    }
2357
}
2358
 
2359
/* Analyze the uses of memory and registers in rtx X in INSN.  */
2360
static void
2361
sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2362
{
2363
  int i;
2364
  int j;
2365
  enum rtx_code code;
2366
  const char *fmt;
2367
  bool cslr_p = can_start_lhs_rhs_p;
2368
 
2369
  can_start_lhs_rhs_p = false;
2370
 
2371
  gcc_assert (x);
2372
  if (x == 0)
2373
    return;
2374
 
2375
  if (cslr_p && sched_deps_info->start_rhs)
2376
    sched_deps_info->start_rhs (x);
2377
 
2378
  code = GET_CODE (x);
2379
 
2380
  switch (code)
2381
    {
2382
    case CONST_INT:
2383
    case CONST_DOUBLE:
2384
    case CONST_FIXED:
2385
    case CONST_VECTOR:
2386
    case SYMBOL_REF:
2387
    case CONST:
2388
    case LABEL_REF:
2389
      /* Ignore constants.  */
2390
      if (cslr_p && sched_deps_info->finish_rhs)
2391
        sched_deps_info->finish_rhs ();
2392
 
2393
      return;
2394
 
2395
#ifdef HAVE_cc0
2396
    case CC0:
2397
      /* User of CC0 depends on immediately preceding insn.  */
2398
      SCHED_GROUP_P (insn) = 1;
2399
       /* Don't move CC0 setter to another block (it can set up the
2400
        same flag for previous CC0 users which is safe).  */
2401
      CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2402
 
2403
      if (cslr_p && sched_deps_info->finish_rhs)
2404
        sched_deps_info->finish_rhs ();
2405
 
2406
      return;
2407
#endif
2408
 
2409
    case REG:
2410
      {
2411
        int regno = REGNO (x);
2412
        enum machine_mode mode = GET_MODE (x);
2413
 
2414
        sched_analyze_reg (deps, regno, mode, USE, insn);
2415
 
2416
#ifdef STACK_REGS
2417
      /* Treat all reads of a stack register as modifying the TOS.  */
2418
      if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2419
        {
2420
          /* Avoid analyzing the same register twice.  */
2421
          if (regno != FIRST_STACK_REG)
2422
            sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2423
          sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2424
        }
2425
#endif
2426
 
2427
        if (cslr_p && sched_deps_info->finish_rhs)
2428
          sched_deps_info->finish_rhs ();
2429
 
2430
        return;
2431
      }
2432
 
2433
    case MEM:
2434
      {
2435
        /* Reading memory.  */
2436
        rtx u;
2437
        rtx pending, pending_mem;
2438
        rtx t = x;
2439
 
2440
        if (sched_deps_info->use_cselib)
2441
          {
2442
            enum machine_mode address_mode
2443
              = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2444
 
2445
            t = shallow_copy_rtx (t);
2446
            cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
2447
            XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2448
          }
2449
 
2450
        if (!DEBUG_INSN_P (insn))
2451
          {
2452
            t = canon_rtx (t);
2453
            pending = deps->pending_read_insns;
2454
            pending_mem = deps->pending_read_mems;
2455
            while (pending)
2456
              {
2457
                if (read_dependence (XEXP (pending_mem, 0), t)
2458
                    && ! sched_insns_conditions_mutex_p (insn,
2459
                                                         XEXP (pending, 0)))
2460
                  note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2461
                                DEP_ANTI);
2462
 
2463
                pending = XEXP (pending, 1);
2464
                pending_mem = XEXP (pending_mem, 1);
2465
              }
2466
 
2467
            pending = deps->pending_write_insns;
2468
            pending_mem = deps->pending_write_mems;
2469
            while (pending)
2470
              {
2471
                if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2472
                                     t, rtx_varies_p)
2473
                    && ! sched_insns_conditions_mutex_p (insn,
2474
                                                         XEXP (pending, 0)))
2475
                  note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2476
                                sched_deps_info->generate_spec_deps
2477
                                ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2478
 
2479
                pending = XEXP (pending, 1);
2480
                pending_mem = XEXP (pending_mem, 1);
2481
              }
2482
 
2483
            for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2484
              {
2485
                if (! JUMP_P (XEXP (u, 0)))
2486
                  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2487
                else if (deps_may_trap_p (x))
2488
                  {
2489
                    if ((sched_deps_info->generate_spec_deps)
2490
                        && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2491
                      {
2492
                        ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2493
                                                MAX_DEP_WEAK);
2494
 
2495
                        note_dep (XEXP (u, 0), ds);
2496
                      }
2497
                    else
2498
                      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2499
                  }
2500
              }
2501
          }
2502
 
2503
        /* Always add these dependencies to pending_reads, since
2504
           this insn may be followed by a write.  */
2505
        if (!deps->readonly)
2506
          add_insn_mem_dependence (deps, true, insn, x);
2507
 
2508
        sched_analyze_2 (deps, XEXP (x, 0), insn);
2509
 
2510
        if (cslr_p && sched_deps_info->finish_rhs)
2511
          sched_deps_info->finish_rhs ();
2512
 
2513
        return;
2514
      }
2515
 
2516
    /* Force pending stores to memory in case a trap handler needs them.  */
2517
    case TRAP_IF:
2518
      flush_pending_lists (deps, insn, true, false);
2519
      break;
2520
 
2521
    case PREFETCH:
2522
      if (PREFETCH_SCHEDULE_BARRIER_P (x))
2523
        reg_pending_barrier = TRUE_BARRIER;
2524
      break;
2525
 
2526
    case UNSPEC_VOLATILE:
2527
      flush_pending_lists (deps, insn, true, true);
2528
      /* FALLTHRU */
2529
 
2530
    case ASM_OPERANDS:
2531
    case ASM_INPUT:
2532
      {
2533
        /* Traditional and volatile asm instructions must be considered to use
2534
           and clobber all hard registers, all pseudo-registers and all of
2535
           memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
2536
 
2537
           Consider for instance a volatile asm that changes the fpu rounding
2538
           mode.  An insn should not be moved across this even if it only uses
2539
           pseudo-regs because it might give an incorrectly rounded result.  */
2540
        if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2541
          reg_pending_barrier = TRUE_BARRIER;
2542
 
2543
        /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2544
           We can not just fall through here since then we would be confused
2545
           by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2546
           traditional asms unlike their normal usage.  */
2547
 
2548
        if (code == ASM_OPERANDS)
2549
          {
2550
            for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2551
              sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2552
 
2553
            if (cslr_p && sched_deps_info->finish_rhs)
2554
              sched_deps_info->finish_rhs ();
2555
 
2556
            return;
2557
          }
2558
        break;
2559
      }
2560
 
2561
    case PRE_DEC:
2562
    case POST_DEC:
2563
    case PRE_INC:
2564
    case POST_INC:
2565
      /* These both read and modify the result.  We must handle them as writes
2566
         to get proper dependencies for following instructions.  We must handle
2567
         them as reads to get proper dependencies from this to previous
2568
         instructions.  Thus we need to pass them to both sched_analyze_1
2569
         and sched_analyze_2.  We must call sched_analyze_2 first in order
2570
         to get the proper antecedent for the read.  */
2571
      sched_analyze_2 (deps, XEXP (x, 0), insn);
2572
      sched_analyze_1 (deps, x, insn);
2573
 
2574
      if (cslr_p && sched_deps_info->finish_rhs)
2575
        sched_deps_info->finish_rhs ();
2576
 
2577
      return;
2578
 
2579
    case POST_MODIFY:
2580
    case PRE_MODIFY:
2581
      /* op0 = op0 + op1 */
2582
      sched_analyze_2 (deps, XEXP (x, 0), insn);
2583
      sched_analyze_2 (deps, XEXP (x, 1), insn);
2584
      sched_analyze_1 (deps, x, insn);
2585
 
2586
      if (cslr_p && sched_deps_info->finish_rhs)
2587
        sched_deps_info->finish_rhs ();
2588
 
2589
      return;
2590
 
2591
    default:
2592
      break;
2593
    }
2594
 
2595
  /* Other cases: walk the insn.  */
2596
  fmt = GET_RTX_FORMAT (code);
2597
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2598
    {
2599
      if (fmt[i] == 'e')
2600
        sched_analyze_2 (deps, XEXP (x, i), insn);
2601
      else if (fmt[i] == 'E')
2602
        for (j = 0; j < XVECLEN (x, i); j++)
2603
          sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2604
    }
2605
 
2606
  if (cslr_p && sched_deps_info->finish_rhs)
2607
    sched_deps_info->finish_rhs ();
2608
}
2609
 
2610
/* Analyze an INSN with pattern X to find all dependencies.  */
2611
static void
2612
sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2613
{
2614
  RTX_CODE code = GET_CODE (x);
2615
  rtx link;
2616
  unsigned i;
2617
  reg_set_iterator rsi;
2618
 
2619
  if (! reload_completed)
2620
    {
2621
      HARD_REG_SET temp;
2622
 
2623
      extract_insn (insn);
2624
      preprocess_constraints ();
2625
      ira_implicitly_set_insn_hard_regs (&temp);
2626
      AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2627
      IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2628
    }
2629
 
2630
  can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2631
                         && code == SET);
2632
 
2633
  if (may_trap_p (x))
2634
    /* Avoid moving trapping instructions accross function calls that might
2635
       not always return.  */
2636
    add_dependence_list (insn, deps->last_function_call_may_noreturn,
2637
                         1, REG_DEP_ANTI);
2638
 
2639
  if (code == COND_EXEC)
2640
    {
2641
      sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2642
 
2643
      /* ??? Should be recording conditions so we reduce the number of
2644
         false dependencies.  */
2645
      x = COND_EXEC_CODE (x);
2646
      code = GET_CODE (x);
2647
    }
2648
  if (code == SET || code == CLOBBER)
2649
    {
2650
      sched_analyze_1 (deps, x, insn);
2651
 
2652
      /* Bare clobber insns are used for letting life analysis, reg-stack
2653
         and others know that a value is dead.  Depend on the last call
2654
         instruction so that reg-stack won't get confused.  */
2655
      if (code == CLOBBER)
2656
        add_dependence_list (insn, deps->last_function_call, 1,
2657
                             REG_DEP_OUTPUT);
2658
    }
2659
  else if (code == PARALLEL)
2660
    {
2661
      for (i = XVECLEN (x, 0); i--;)
2662
        {
2663
          rtx sub = XVECEXP (x, 0, i);
2664
          code = GET_CODE (sub);
2665
 
2666
          if (code == COND_EXEC)
2667
            {
2668
              sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2669
              sub = COND_EXEC_CODE (sub);
2670
              code = GET_CODE (sub);
2671
            }
2672
          if (code == SET || code == CLOBBER)
2673
            sched_analyze_1 (deps, sub, insn);
2674
          else
2675
            sched_analyze_2 (deps, sub, insn);
2676
        }
2677
    }
2678
  else
2679
    sched_analyze_2 (deps, x, insn);
2680
 
2681
  /* Mark registers CLOBBERED or used by called function.  */
2682
  if (CALL_P (insn))
2683
    {
2684
      for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2685
        {
2686
          if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2687
            sched_analyze_1 (deps, XEXP (link, 0), insn);
2688
          else
2689
            sched_analyze_2 (deps, XEXP (link, 0), insn);
2690
        }
2691
      if (find_reg_note (insn, REG_SETJMP, NULL))
2692
        reg_pending_barrier = MOVE_BARRIER;
2693
    }
2694
 
2695
  if (JUMP_P (insn))
2696
    {
2697
      rtx next;
2698
      next = next_nonnote_insn (insn);
2699
      while (next && DEBUG_INSN_P (next))
2700
        next = next_nonnote_insn (next);
2701
      if (next && BARRIER_P (next))
2702
        reg_pending_barrier = MOVE_BARRIER;
2703
      else
2704
        {
2705
          rtx pending, pending_mem;
2706
 
2707
          if (sched_deps_info->compute_jump_reg_dependencies)
2708
            {
2709
              regset_head tmp_uses, tmp_sets;
2710
              INIT_REG_SET (&tmp_uses);
2711
              INIT_REG_SET (&tmp_sets);
2712
 
2713
              (*sched_deps_info->compute_jump_reg_dependencies)
2714
                (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
2715
              /* Make latency of jump equal to 0 by using anti-dependence.  */
2716
              EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
2717
                {
2718
                  struct deps_reg *reg_last = &deps->reg_last[i];
2719
                  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2720
                  add_dependence_list (insn, reg_last->implicit_sets,
2721
                                       0, REG_DEP_ANTI);
2722
                  add_dependence_list (insn, reg_last->clobbers, 0,
2723
                                       REG_DEP_ANTI);
2724
 
2725
                  if (!deps->readonly)
2726
                    {
2727
                      reg_last->uses_length++;
2728
                      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2729
                    }
2730
                }
2731
              IOR_REG_SET (reg_pending_sets, &tmp_sets);
2732
 
2733
              CLEAR_REG_SET (&tmp_uses);
2734
              CLEAR_REG_SET (&tmp_sets);
2735
            }
2736
 
2737
          /* All memory writes and volatile reads must happen before the
2738
             jump.  Non-volatile reads must happen before the jump iff
2739
             the result is needed by the above register used mask.  */
2740
 
2741
          pending = deps->pending_write_insns;
2742
          pending_mem = deps->pending_write_mems;
2743
          while (pending)
2744
            {
2745
              if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2746
                add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2747
              pending = XEXP (pending, 1);
2748
              pending_mem = XEXP (pending_mem, 1);
2749
            }
2750
 
2751
          pending = deps->pending_read_insns;
2752
          pending_mem = deps->pending_read_mems;
2753
          while (pending)
2754
            {
2755
              if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2756
                  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2757
                add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2758
              pending = XEXP (pending, 1);
2759
              pending_mem = XEXP (pending_mem, 1);
2760
            }
2761
 
2762
          add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2763
                               REG_DEP_ANTI);
2764
        }
2765
    }
2766
 
2767
  /* If this instruction can throw an exception, then moving it changes
2768
     where block boundaries fall.  This is mighty confusing elsewhere.
2769
     Therefore, prevent such an instruction from being moved.  Same for
2770
     non-jump instructions that define block boundaries.
2771
     ??? Unclear whether this is still necessary in EBB mode.  If not,
2772
     add_branch_dependences should be adjusted for RGN mode instead.  */
2773
  if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2774
      || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2775
    reg_pending_barrier = MOVE_BARRIER;
2776
 
2777
  if (sched_pressure_p)
2778
    {
2779
      setup_insn_reg_uses (deps, insn);
2780
      setup_insn_reg_pressure_info (insn);
2781
    }
2782
 
2783
  /* Add register dependencies for insn.  */
2784
  if (DEBUG_INSN_P (insn))
2785
    {
2786
      rtx prev = deps->last_debug_insn;
2787
      rtx u;
2788
 
2789
      if (!deps->readonly)
2790
        deps->last_debug_insn = insn;
2791
 
2792
      if (prev)
2793
        add_dependence (insn, prev, REG_DEP_ANTI);
2794
 
2795
      add_dependence_list (insn, deps->last_function_call, 1,
2796
                           REG_DEP_ANTI);
2797
 
2798
      for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2799
        if (! JUMP_P (XEXP (u, 0))
2800
            || !sel_sched_p ())
2801
          add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2802
 
2803
      EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2804
        {
2805
          struct deps_reg *reg_last = &deps->reg_last[i];
2806
          add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2807
          add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2808
 
2809
          if (!deps->readonly)
2810
            reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2811
        }
2812
      CLEAR_REG_SET (reg_pending_uses);
2813
 
2814
      /* Quite often, a debug insn will refer to stuff in the
2815
         previous instruction, but the reason we want this
2816
         dependency here is to make sure the scheduler doesn't
2817
         gratuitously move a debug insn ahead.  This could dirty
2818
         DF flags and cause additional analysis that wouldn't have
2819
         occurred in compilation without debug insns, and such
2820
         additional analysis can modify the generated code.  */
2821
      prev = PREV_INSN (insn);
2822
 
2823
      if (prev && NONDEBUG_INSN_P (prev))
2824
        add_dependence (insn, prev, REG_DEP_ANTI);
2825
    }
2826
  else
2827
    {
2828
      EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2829
        {
2830
          struct deps_reg *reg_last = &deps->reg_last[i];
2831
          add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2832
          add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
2833
          add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2834
 
2835
          if (!deps->readonly)
2836
            {
2837
              reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2838
              reg_last->uses_length++;
2839
            }
2840
        }
2841
 
2842
      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2843
        if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
2844
          {
2845
            struct deps_reg *reg_last = &deps->reg_last[i];
2846
            add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2847
            add_dependence_list (insn, reg_last->implicit_sets, 0,
2848
                                 REG_DEP_ANTI);
2849
            add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2850
 
2851
            if (!deps->readonly)
2852
              {
2853
                reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2854
                reg_last->uses_length++;
2855
              }
2856
          }
2857
 
2858
      /* If the current insn is conditional, we can't free any
2859
         of the lists.  */
2860
      if (sched_has_condition_p (insn))
2861
        {
2862
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2863
            {
2864
              struct deps_reg *reg_last = &deps->reg_last[i];
2865
              add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2866
              add_dependence_list (insn, reg_last->implicit_sets, 0,
2867
                                   REG_DEP_ANTI);
2868
              add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2869
 
2870
              if (!deps->readonly)
2871
                {
2872
                  reg_last->clobbers
2873
                    = alloc_INSN_LIST (insn, reg_last->clobbers);
2874
                  reg_last->clobbers_length++;
2875
                }
2876
            }
2877
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2878
            {
2879
              struct deps_reg *reg_last = &deps->reg_last[i];
2880
              add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2881
              add_dependence_list (insn, reg_last->implicit_sets, 0,
2882
                                   REG_DEP_ANTI);
2883
              add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2884
              add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2885
 
2886
              if (!deps->readonly)
2887
                {
2888
                  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2889
                  SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2890
                }
2891
            }
2892
        }
2893
      else
2894
        {
2895
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2896
            {
2897
              struct deps_reg *reg_last = &deps->reg_last[i];
2898
              if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2899
                  || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2900
                {
2901
                  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2902
                                                REG_DEP_OUTPUT);
2903
                  add_dependence_list_and_free (deps, insn,
2904
                                                &reg_last->implicit_sets, 0,
2905
                                                REG_DEP_ANTI);
2906
                  add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2907
                                                REG_DEP_ANTI);
2908
                  add_dependence_list_and_free
2909
                    (deps, insn, &reg_last->clobbers, 0, REG_DEP_OUTPUT);
2910
 
2911
                  if (!deps->readonly)
2912
                    {
2913
                      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2914
                      reg_last->clobbers_length = 0;
2915
                      reg_last->uses_length = 0;
2916
                    }
2917
                }
2918
              else
2919
                {
2920
                  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2921
                  add_dependence_list (insn, reg_last->implicit_sets, 0,
2922
                                       REG_DEP_ANTI);
2923
                  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2924
                }
2925
 
2926
              if (!deps->readonly)
2927
                {
2928
                  reg_last->clobbers_length++;
2929
                  reg_last->clobbers
2930
                    = alloc_INSN_LIST (insn, reg_last->clobbers);
2931
                }
2932
            }
2933
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2934
            {
2935
              struct deps_reg *reg_last = &deps->reg_last[i];
2936
 
2937
              add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2938
                                            REG_DEP_OUTPUT);
2939
              add_dependence_list_and_free (deps, insn,
2940
                                            &reg_last->implicit_sets,
2941
                                            0, REG_DEP_ANTI);
2942
              add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
2943
                                            REG_DEP_OUTPUT);
2944
              add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2945
                                            REG_DEP_ANTI);
2946
 
2947
              if (!deps->readonly)
2948
                {
2949
                  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2950
                  reg_last->uses_length = 0;
2951
                  reg_last->clobbers_length = 0;
2952
                  CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2953
                }
2954
            }
2955
        }
2956
    }
2957
 
2958
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2959
    if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2960
      {
2961
        struct deps_reg *reg_last = &deps->reg_last[i];
2962
        add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2963
        add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
2964
        add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2965
 
2966
        if (!deps->readonly)
2967
          reg_last->implicit_sets
2968
            = alloc_INSN_LIST (insn, reg_last->implicit_sets);
2969
      }
2970
 
2971
  if (!deps->readonly)
2972
    {
2973
      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
2974
      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
2975
      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
2976
      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2977
        if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
2978
            || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2979
          SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2980
 
2981
      /* Set up the pending barrier found.  */
2982
      deps->last_reg_pending_barrier = reg_pending_barrier;
2983
    }
2984
 
2985
  CLEAR_REG_SET (reg_pending_uses);
2986
  CLEAR_REG_SET (reg_pending_clobbers);
2987
  CLEAR_REG_SET (reg_pending_sets);
2988
  CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
2989
  CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
2990
 
2991
  /* Add dependencies if a scheduling barrier was found.  */
2992
  if (reg_pending_barrier)
2993
    {
2994
      /* In the case of barrier the most added dependencies are not
2995
         real, so we use anti-dependence here.  */
2996
      if (sched_has_condition_p (insn))
2997
        {
2998
          EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2999
            {
3000
              struct deps_reg *reg_last = &deps->reg_last[i];
3001
              add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
3002
              add_dependence_list (insn, reg_last->sets, 0,
3003
                                   reg_pending_barrier == TRUE_BARRIER
3004
                                   ? REG_DEP_TRUE : REG_DEP_ANTI);
3005
              add_dependence_list (insn, reg_last->implicit_sets, 0,
3006
                                   REG_DEP_ANTI);
3007
              add_dependence_list (insn, reg_last->clobbers, 0,
3008
                                   reg_pending_barrier == TRUE_BARRIER
3009
                                   ? REG_DEP_TRUE : REG_DEP_ANTI);
3010
            }
3011
        }
3012
      else
3013
        {
3014
          EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3015
            {
3016
              struct deps_reg *reg_last = &deps->reg_last[i];
3017
              add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3018
                                            REG_DEP_ANTI);
3019
              add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3020
                                            reg_pending_barrier == TRUE_BARRIER
3021
                                            ? REG_DEP_TRUE : REG_DEP_ANTI);
3022
              add_dependence_list_and_free (deps, insn,
3023
                                            &reg_last->implicit_sets, 0,
3024
                                            REG_DEP_ANTI);
3025
              add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3026
                                            reg_pending_barrier == TRUE_BARRIER
3027
                                            ? REG_DEP_TRUE : REG_DEP_ANTI);
3028
 
3029
              if (!deps->readonly)
3030
                {
3031
                  reg_last->uses_length = 0;
3032
                  reg_last->clobbers_length = 0;
3033
                }
3034
            }
3035
        }
3036
 
3037
      if (!deps->readonly)
3038
        for (i = 0; i < (unsigned)deps->max_reg; i++)
3039
          {
3040
            struct deps_reg *reg_last = &deps->reg_last[i];
3041
            reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3042
            SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3043
          }
3044
 
3045
      /* Flush pending lists on jumps, but not on speculative checks.  */
3046
      if (JUMP_P (insn) && !(sel_sched_p ()
3047
                             && sel_insn_is_speculation_check (insn)))
3048
        flush_pending_lists (deps, insn, true, true);
3049
 
3050
      if (!deps->readonly)
3051
        CLEAR_REG_SET (&deps->reg_conditional_sets);
3052
      reg_pending_barrier = NOT_A_BARRIER;
3053
    }
3054
 
3055
  /* If a post-call group is still open, see if it should remain so.
3056
     This insn must be a simple move of a hard reg to a pseudo or
3057
     vice-versa.
3058
 
3059
     We must avoid moving these insns for correctness on
3060
     SMALL_REGISTER_CLASS machines, and for special registers like
3061
     PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
3062
     hard regs for all targets.  */
3063
 
3064
  if (deps->in_post_call_group_p)
3065
    {
3066
      rtx tmp, set = single_set (insn);
3067
      int src_regno, dest_regno;
3068
 
3069
      if (set == NULL)
3070
        {
3071
          if (DEBUG_INSN_P (insn))
3072
            /* We don't want to mark debug insns as part of the same
3073
               sched group.  We know they really aren't, but if we use
3074
               debug insns to tell that a call group is over, we'll
3075
               get different code if debug insns are not there and
3076
               instructions that follow seem like they should be part
3077
               of the call group.
3078
 
3079
               Also, if we did, fixup_sched_groups() would move the
3080
               deps of the debug insn to the call insn, modifying
3081
               non-debug post-dependency counts of the debug insn
3082
               dependencies and otherwise messing with the scheduling
3083
               order.
3084
 
3085
               Instead, let such debug insns be scheduled freely, but
3086
               keep the call group open in case there are insns that
3087
               should be part of it afterwards.  Since we grant debug
3088
               insns higher priority than even sched group insns, it
3089
               will all turn out all right.  */
3090
            goto debug_dont_end_call_group;
3091
          else
3092
            goto end_call_group;
3093
        }
3094
 
3095
      tmp = SET_DEST (set);
3096
      if (GET_CODE (tmp) == SUBREG)
3097
        tmp = SUBREG_REG (tmp);
3098
      if (REG_P (tmp))
3099
        dest_regno = REGNO (tmp);
3100
      else
3101
        goto end_call_group;
3102
 
3103
      tmp = SET_SRC (set);
3104
      if (GET_CODE (tmp) == SUBREG)
3105
        tmp = SUBREG_REG (tmp);
3106
      if ((GET_CODE (tmp) == PLUS
3107
           || GET_CODE (tmp) == MINUS)
3108
          && REG_P (XEXP (tmp, 0))
3109
          && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3110
          && dest_regno == STACK_POINTER_REGNUM)
3111
        src_regno = STACK_POINTER_REGNUM;
3112
      else if (REG_P (tmp))
3113
        src_regno = REGNO (tmp);
3114
      else
3115
        goto end_call_group;
3116
 
3117
      if (src_regno < FIRST_PSEUDO_REGISTER
3118
          || dest_regno < FIRST_PSEUDO_REGISTER)
3119
        {
3120
          if (!deps->readonly
3121
              && deps->in_post_call_group_p == post_call_initial)
3122
            deps->in_post_call_group_p = post_call;
3123
 
3124
          if (!sel_sched_p () || sched_emulate_haifa_p)
3125
            {
3126
              SCHED_GROUP_P (insn) = 1;
3127
              CANT_MOVE (insn) = 1;
3128
            }
3129
        }
3130
      else
3131
        {
3132
        end_call_group:
3133
          if (!deps->readonly)
3134
            deps->in_post_call_group_p = not_post_call;
3135
        }
3136
    }
3137
 
3138
 debug_dont_end_call_group:
3139
  if ((current_sched_info->flags & DO_SPECULATION)
3140
      && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3141
    /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3142
       be speculated.  */
3143
    {
3144
      if (sel_sched_p ())
3145
        sel_mark_hard_insn (insn);
3146
      else
3147
        {
3148
          sd_iterator_def sd_it;
3149
          dep_t dep;
3150
 
3151
          for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3152
               sd_iterator_cond (&sd_it, &dep);)
3153
            change_spec_dep_to_hard (sd_it);
3154
        }
3155
    }
3156
}
3157
 
3158
/* Return TRUE if INSN might not always return normally (e.g. call exit,
3159
   longjmp, loop forever, ...).  */
3160
static bool
3161
call_may_noreturn_p (rtx insn)
3162
{
3163
  rtx call;
3164
 
3165
  /* const or pure calls that aren't looping will always return.  */
3166
  if (RTL_CONST_OR_PURE_CALL_P (insn)
3167
      && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3168
    return false;
3169
 
3170
  call = PATTERN (insn);
3171
  if (GET_CODE (call) == PARALLEL)
3172
    call = XVECEXP (call, 0, 0);
3173
  if (GET_CODE (call) == SET)
3174
    call = SET_SRC (call);
3175
  if (GET_CODE (call) == CALL
3176
      && MEM_P (XEXP (call, 0))
3177
      && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3178
    {
3179
      rtx symbol = XEXP (XEXP (call, 0), 0);
3180
      if (SYMBOL_REF_DECL (symbol)
3181
          && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3182
        {
3183
          if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3184
              == BUILT_IN_NORMAL)
3185
            switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3186
              {
3187
              case BUILT_IN_BCMP:
3188
              case BUILT_IN_BCOPY:
3189
              case BUILT_IN_BZERO:
3190
              case BUILT_IN_INDEX:
3191
              case BUILT_IN_MEMCHR:
3192
              case BUILT_IN_MEMCMP:
3193
              case BUILT_IN_MEMCPY:
3194
              case BUILT_IN_MEMMOVE:
3195
              case BUILT_IN_MEMPCPY:
3196
              case BUILT_IN_MEMSET:
3197
              case BUILT_IN_RINDEX:
3198
              case BUILT_IN_STPCPY:
3199
              case BUILT_IN_STPNCPY:
3200
              case BUILT_IN_STRCAT:
3201
              case BUILT_IN_STRCHR:
3202
              case BUILT_IN_STRCMP:
3203
              case BUILT_IN_STRCPY:
3204
              case BUILT_IN_STRCSPN:
3205
              case BUILT_IN_STRLEN:
3206
              case BUILT_IN_STRNCAT:
3207
              case BUILT_IN_STRNCMP:
3208
              case BUILT_IN_STRNCPY:
3209
              case BUILT_IN_STRPBRK:
3210
              case BUILT_IN_STRRCHR:
3211
              case BUILT_IN_STRSPN:
3212
              case BUILT_IN_STRSTR:
3213
                /* Assume certain string/memory builtins always return.  */
3214
                return false;
3215
              default:
3216
                break;
3217
              }
3218
        }
3219
    }
3220
 
3221
  /* For all other calls assume that they might not always return.  */
3222
  return true;
3223
}
3224
 
3225
/* Analyze INSN with DEPS as a context.  */
3226
void
3227
deps_analyze_insn (struct deps_desc *deps, rtx insn)
3228
{
3229
  if (sched_deps_info->start_insn)
3230
    sched_deps_info->start_insn (insn);
3231
 
3232
  if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
3233
    {
3234
      /* Make each JUMP_INSN (but not a speculative check)
3235
         a scheduling barrier for memory references.  */
3236
      if (!deps->readonly
3237
          && JUMP_P (insn)
3238
          && !(sel_sched_p ()
3239
               && sel_insn_is_speculation_check (insn)))
3240
        {
3241
          /* Keep the list a reasonable size.  */
3242
          if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3243
            flush_pending_lists (deps, insn, true, true);
3244
          else
3245
            deps->last_pending_memory_flush
3246
              = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
3247
        }
3248
 
3249
      sched_analyze_insn (deps, PATTERN (insn), insn);
3250
    }
3251
  else if (CALL_P (insn))
3252
    {
3253
      int i;
3254
 
3255
      CANT_MOVE (insn) = 1;
3256
 
3257
      if (find_reg_note (insn, REG_SETJMP, NULL))
3258
        {
3259
          /* This is setjmp.  Assume that all registers, not just
3260
             hard registers, may be clobbered by this call.  */
3261
          reg_pending_barrier = MOVE_BARRIER;
3262
        }
3263
      else
3264
        {
3265
          for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3266
            /* A call may read and modify global register variables.  */
3267
            if (global_regs[i])
3268
              {
3269
                SET_REGNO_REG_SET (reg_pending_sets, i);
3270
                SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3271
              }
3272
          /* Other call-clobbered hard regs may be clobbered.
3273
             Since we only have a choice between 'might be clobbered'
3274
             and 'definitely not clobbered', we must include all
3275
             partly call-clobbered registers here.  */
3276
            else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3277
                     || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3278
              SET_REGNO_REG_SET (reg_pending_clobbers, i);
3279
          /* We don't know what set of fixed registers might be used
3280
             by the function, but it is certain that the stack pointer
3281
             is among them, but be conservative.  */
3282
            else if (fixed_regs[i])
3283
              SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3284
          /* The frame pointer is normally not used by the function
3285
             itself, but by the debugger.  */
3286
          /* ??? MIPS o32 is an exception.  It uses the frame pointer
3287
             in the macro expansion of jal but does not represent this
3288
             fact in the call_insn rtl.  */
3289
            else if (i == FRAME_POINTER_REGNUM
3290
                     || (i == HARD_FRAME_POINTER_REGNUM
3291
                         && (! reload_completed || frame_pointer_needed)))
3292
              SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3293
        }
3294
 
3295
      /* For each insn which shouldn't cross a call, add a dependence
3296
         between that insn and this call insn.  */
3297
      add_dependence_list_and_free (deps, insn,
3298
                                    &deps->sched_before_next_call, 1,
3299
                                    REG_DEP_ANTI);
3300
 
3301
      sched_analyze_insn (deps, PATTERN (insn), insn);
3302
 
3303
      /* If CALL would be in a sched group, then this will violate
3304
         convention that sched group insns have dependencies only on the
3305
         previous instruction.
3306
 
3307
         Of course one can say: "Hey!  What about head of the sched group?"
3308
         And I will answer: "Basic principles (one dep per insn) are always
3309
         the same."  */
3310
      gcc_assert (!SCHED_GROUP_P (insn));
3311
 
3312
      /* In the absence of interprocedural alias analysis, we must flush
3313
         all pending reads and writes, and start new dependencies starting
3314
         from here.  But only flush writes for constant calls (which may
3315
         be passed a pointer to something we haven't written yet).  */
3316
      flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3317
 
3318
      if (!deps->readonly)
3319
        {
3320
          /* Remember the last function call for limiting lifetimes.  */
3321
          free_INSN_LIST_list (&deps->last_function_call);
3322
          deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3323
 
3324
          if (call_may_noreturn_p (insn))
3325
            {
3326
              /* Remember the last function call that might not always return
3327
                 normally for limiting moves of trapping insns.  */
3328
              free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3329
              deps->last_function_call_may_noreturn
3330
                = alloc_INSN_LIST (insn, NULL_RTX);
3331
            }
3332
 
3333
          /* Before reload, begin a post-call group, so as to keep the
3334
             lifetimes of hard registers correct.  */
3335
          if (! reload_completed)
3336
            deps->in_post_call_group_p = post_call;
3337
        }
3338
    }
3339
 
3340
  if (sched_deps_info->use_cselib)
3341
    cselib_process_insn (insn);
3342
 
3343
  /* EH_REGION insn notes can not appear until well after we complete
3344
     scheduling.  */
3345
  if (NOTE_P (insn))
3346
    gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3347
                && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3348
 
3349
  if (sched_deps_info->finish_insn)
3350
    sched_deps_info->finish_insn ();
3351
 
3352
  /* Fixup the dependencies in the sched group.  */
3353
  if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3354
      && SCHED_GROUP_P (insn) && !sel_sched_p ())
3355
    fixup_sched_groups (insn);
3356
}
3357
 
3358
/* Initialize DEPS for the new block beginning with HEAD.  */
3359
void
3360
deps_start_bb (struct deps_desc *deps, rtx head)
3361
{
3362
  gcc_assert (!deps->readonly);
3363
 
3364
  /* Before reload, if the previous block ended in a call, show that
3365
     we are inside a post-call group, so as to keep the lifetimes of
3366
     hard registers correct.  */
3367
  if (! reload_completed && !LABEL_P (head))
3368
    {
3369
      rtx insn = prev_nonnote_insn (head);
3370
 
3371
      while (insn && DEBUG_INSN_P (insn))
3372
        insn = prev_nonnote_insn (insn);
3373
      if (insn && CALL_P (insn))
3374
        deps->in_post_call_group_p = post_call_initial;
3375
    }
3376
}
3377
 
3378
/* Analyze every insn between HEAD and TAIL inclusive, creating backward
3379
   dependencies for each insn.  */
3380
void
3381
sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3382
{
3383
  rtx insn;
3384
 
3385
  if (sched_deps_info->use_cselib)
3386
    cselib_init (CSELIB_RECORD_MEMORY);
3387
 
3388
  deps_start_bb (deps, head);
3389
 
3390
  for (insn = head;; insn = NEXT_INSN (insn))
3391
    {
3392
 
3393
      if (INSN_P (insn))
3394
        {
3395
          /* And initialize deps_lists.  */
3396
          sd_init_insn (insn);
3397
        }
3398
 
3399
      deps_analyze_insn (deps, insn);
3400
 
3401
      if (insn == tail)
3402
        {
3403
          if (sched_deps_info->use_cselib)
3404
            cselib_finish ();
3405
          return;
3406
        }
3407
    }
3408
  gcc_unreachable ();
3409
}
3410
 
3411
/* Helper for sched_free_deps ().
3412
   Delete INSN's (RESOLVED_P) backward dependencies.  */
3413
static void
3414
delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3415
{
3416
  sd_iterator_def sd_it;
3417
  dep_t dep;
3418
  sd_list_types_def types;
3419
 
3420
  if (resolved_p)
3421
    types = SD_LIST_RES_BACK;
3422
  else
3423
    types = SD_LIST_BACK;
3424
 
3425
  for (sd_it = sd_iterator_start (insn, types);
3426
       sd_iterator_cond (&sd_it, &dep);)
3427
    {
3428
      dep_link_t link = *sd_it.linkp;
3429
      dep_node_t node = DEP_LINK_NODE (link);
3430
      deps_list_t back_list;
3431
      deps_list_t forw_list;
3432
 
3433
      get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3434
      remove_from_deps_list (link, back_list);
3435
      delete_dep_node (node);
3436
    }
3437
}
3438
 
3439
/* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3440
   deps_lists.  */
3441
void
3442
sched_free_deps (rtx head, rtx tail, bool resolved_p)
3443
{
3444
  rtx insn;
3445
  rtx next_tail = NEXT_INSN (tail);
3446
 
3447
  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3448
    if (INSN_P (insn) && INSN_LUID (insn) > 0)
3449
      {
3450
        /* Clear resolved back deps together with its dep_nodes.  */
3451
        delete_dep_nodes_in_back_deps (insn, resolved_p);
3452
 
3453
        /* Clear forward deps and leave the dep_nodes to the
3454
           corresponding back_deps list.  */
3455
        if (resolved_p)
3456
          clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3457
        else
3458
          clear_deps_list (INSN_FORW_DEPS (insn));
3459
 
3460
        sd_finish_insn (insn);
3461
      }
3462
}
3463
 
3464
/* Initialize variables for region data dependence analysis.
3465
   When LAZY_REG_LAST is true, do not allocate reg_last array
3466
   of struct deps_desc immediately.  */
3467
 
3468
void
3469
init_deps (struct deps_desc *deps, bool lazy_reg_last)
3470
{
3471
  int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3472
 
3473
  deps->max_reg = max_reg;
3474
  if (lazy_reg_last)
3475
    deps->reg_last = NULL;
3476
  else
3477
    deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3478
  INIT_REG_SET (&deps->reg_last_in_use);
3479
  INIT_REG_SET (&deps->reg_conditional_sets);
3480
 
3481
  deps->pending_read_insns = 0;
3482
  deps->pending_read_mems = 0;
3483
  deps->pending_write_insns = 0;
3484
  deps->pending_write_mems = 0;
3485
  deps->pending_read_list_length = 0;
3486
  deps->pending_write_list_length = 0;
3487
  deps->pending_flush_length = 0;
3488
  deps->last_pending_memory_flush = 0;
3489
  deps->last_function_call = 0;
3490
  deps->last_function_call_may_noreturn = 0;
3491
  deps->sched_before_next_call = 0;
3492
  deps->in_post_call_group_p = not_post_call;
3493
  deps->last_debug_insn = 0;
3494
  deps->last_reg_pending_barrier = NOT_A_BARRIER;
3495
  deps->readonly = 0;
3496
}
3497
 
3498
/* Init only reg_last field of DEPS, which was not allocated before as
3499
   we inited DEPS lazily.  */
3500
void
3501
init_deps_reg_last (struct deps_desc *deps)
3502
{
3503
  gcc_assert (deps && deps->max_reg > 0);
3504
  gcc_assert (deps->reg_last == NULL);
3505
 
3506
  deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3507
}
3508
 
3509
 
3510
/* Free insn lists found in DEPS.  */
3511
 
3512
void
3513
free_deps (struct deps_desc *deps)
3514
{
3515
  unsigned i;
3516
  reg_set_iterator rsi;
3517
 
3518
  /* We set max_reg to 0 when this context was already freed.  */
3519
  if (deps->max_reg == 0)
3520
    {
3521
      gcc_assert (deps->reg_last == NULL);
3522
      return;
3523
    }
3524
  deps->max_reg = 0;
3525
 
3526
  free_INSN_LIST_list (&deps->pending_read_insns);
3527
  free_EXPR_LIST_list (&deps->pending_read_mems);
3528
  free_INSN_LIST_list (&deps->pending_write_insns);
3529
  free_EXPR_LIST_list (&deps->pending_write_mems);
3530
  free_INSN_LIST_list (&deps->last_pending_memory_flush);
3531
 
3532
  /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3533
     times.  For a testcase with 42000 regs and 8000 small basic blocks,
3534
     this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
3535
  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3536
    {
3537
      struct deps_reg *reg_last = &deps->reg_last[i];
3538
      if (reg_last->uses)
3539
        free_INSN_LIST_list (&reg_last->uses);
3540
      if (reg_last->sets)
3541
        free_INSN_LIST_list (&reg_last->sets);
3542
      if (reg_last->implicit_sets)
3543
        free_INSN_LIST_list (&reg_last->implicit_sets);
3544
      if (reg_last->clobbers)
3545
        free_INSN_LIST_list (&reg_last->clobbers);
3546
    }
3547
  CLEAR_REG_SET (&deps->reg_last_in_use);
3548
  CLEAR_REG_SET (&deps->reg_conditional_sets);
3549
 
3550
  /* As we initialize reg_last lazily, it is possible that we didn't allocate
3551
     it at all.  */
3552
  if (deps->reg_last)
3553
    free (deps->reg_last);
3554
  deps->reg_last = NULL;
3555
 
3556
  deps = NULL;
3557
}
3558
 
3559
/* Remove INSN from dependence contexts DEPS.  Caution: reg_conditional_sets
3560
   is not handled.  */
3561
void
3562
remove_from_deps (struct deps_desc *deps, rtx insn)
3563
{
3564
  int removed;
3565
  unsigned i;
3566
  reg_set_iterator rsi;
3567
 
3568
  removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3569
                                               &deps->pending_read_mems);
3570
  if (!DEBUG_INSN_P (insn))
3571
    deps->pending_read_list_length -= removed;
3572
  removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3573
                                               &deps->pending_write_mems);
3574
  deps->pending_write_list_length -= removed;
3575
  removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3576
  deps->pending_flush_length -= removed;
3577
 
3578
  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3579
    {
3580
      struct deps_reg *reg_last = &deps->reg_last[i];
3581
      if (reg_last->uses)
3582
        remove_from_dependence_list (insn, &reg_last->uses);
3583
      if (reg_last->sets)
3584
        remove_from_dependence_list (insn, &reg_last->sets);
3585
      if (reg_last->implicit_sets)
3586
        remove_from_dependence_list (insn, &reg_last->implicit_sets);
3587
      if (reg_last->clobbers)
3588
        remove_from_dependence_list (insn, &reg_last->clobbers);
3589
      if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3590
          && !reg_last->clobbers)
3591
        CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3592
    }
3593
 
3594
  if (CALL_P (insn))
3595
    {
3596
      remove_from_dependence_list (insn, &deps->last_function_call);
3597
      remove_from_dependence_list (insn,
3598
                                   &deps->last_function_call_may_noreturn);
3599
    }
3600
  remove_from_dependence_list (insn, &deps->sched_before_next_call);
3601
}
3602
 
3603
/* Init deps data vector.  */
3604
static void
3605
init_deps_data_vector (void)
3606
{
3607
  int reserve = (sched_max_luid + 1
3608
                 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3609
  if (reserve > 0
3610
      && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3611
    VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3612
                           3 * sched_max_luid / 2);
3613
}
3614
 
3615
/* If it is profitable to use them, initialize or extend (depending on
3616
   GLOBAL_P) dependency data.  */
3617
void
3618
sched_deps_init (bool global_p)
3619
{
3620
  /* Average number of insns in the basic block.
3621
     '+ 1' is used to make it nonzero.  */
3622
  int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3623
 
3624
  init_deps_data_vector ();
3625
 
3626
  /* We use another caching mechanism for selective scheduling, so
3627
     we don't use this one.  */
3628
  if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3629
    {
3630
      /* ?!? We could save some memory by computing a per-region luid mapping
3631
         which could reduce both the number of vectors in the cache and the
3632
         size of each vector.  Instead we just avoid the cache entirely unless
3633
         the average number of instructions in a basic block is very high.  See
3634
         the comment before the declaration of true_dependency_cache for
3635
         what we consider "very high".  */
3636
      cache_size = 0;
3637
      extend_dependency_caches (sched_max_luid, true);
3638
    }
3639
 
3640
  if (global_p)
3641
    {
3642
      dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3643
                                   /* Allocate lists for one block at a time.  */
3644
                                   insns_in_block);
3645
      dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3646
                                   /* Allocate nodes for one block at a time.
3647
                                      We assume that average insn has
3648
                                      5 producers.  */
3649
                                   5 * insns_in_block);
3650
    }
3651
}
3652
 
3653
 
3654
/* Create or extend (depending on CREATE_P) dependency caches to
3655
   size N.  */
3656
void
3657
extend_dependency_caches (int n, bool create_p)
3658
{
3659
  if (create_p || true_dependency_cache)
3660
    {
3661
      int i, luid = cache_size + n;
3662
 
3663
      true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3664
                                          luid);
3665
      output_dependency_cache = XRESIZEVEC (bitmap_head,
3666
                                            output_dependency_cache, luid);
3667
      anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3668
                                          luid);
3669
 
3670
      if (current_sched_info->flags & DO_SPECULATION)
3671
        spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3672
                                            luid);
3673
 
3674
      for (i = cache_size; i < luid; i++)
3675
        {
3676
          bitmap_initialize (&true_dependency_cache[i], 0);
3677
          bitmap_initialize (&output_dependency_cache[i], 0);
3678
          bitmap_initialize (&anti_dependency_cache[i], 0);
3679
 
3680
          if (current_sched_info->flags & DO_SPECULATION)
3681
            bitmap_initialize (&spec_dependency_cache[i], 0);
3682
        }
3683
      cache_size = luid;
3684
    }
3685
}
3686
 
3687
/* Finalize dependency information for the whole function.  */
3688
void
3689
sched_deps_finish (void)
3690
{
3691
  gcc_assert (deps_pools_are_empty_p ());
3692
  free_alloc_pool_if_empty (&dn_pool);
3693
  free_alloc_pool_if_empty (&dl_pool);
3694
  gcc_assert (dn_pool == NULL && dl_pool == NULL);
3695
 
3696
  VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3697
  cache_size = 0;
3698
 
3699
  if (true_dependency_cache)
3700
    {
3701
      int i;
3702
 
3703
      for (i = 0; i < cache_size; i++)
3704
        {
3705
          bitmap_clear (&true_dependency_cache[i]);
3706
          bitmap_clear (&output_dependency_cache[i]);
3707
          bitmap_clear (&anti_dependency_cache[i]);
3708
 
3709
          if (sched_deps_info->generate_spec_deps)
3710
            bitmap_clear (&spec_dependency_cache[i]);
3711
        }
3712
      free (true_dependency_cache);
3713
      true_dependency_cache = NULL;
3714
      free (output_dependency_cache);
3715
      output_dependency_cache = NULL;
3716
      free (anti_dependency_cache);
3717
      anti_dependency_cache = NULL;
3718
 
3719
      if (sched_deps_info->generate_spec_deps)
3720
        {
3721
          free (spec_dependency_cache);
3722
          spec_dependency_cache = NULL;
3723
        }
3724
 
3725
    }
3726
}
3727
 
3728
/* Initialize some global variables needed by the dependency analysis
3729
   code.  */
3730
 
3731
void
3732
init_deps_global (void)
3733
{
3734
  CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3735
  CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3736
  reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
3737
  reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
3738
  reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
3739
  reg_pending_barrier = NOT_A_BARRIER;
3740
 
3741
  if (!sel_sched_p () || sched_emulate_haifa_p)
3742
    {
3743
      sched_deps_info->start_insn = haifa_start_insn;
3744
      sched_deps_info->finish_insn = haifa_finish_insn;
3745
 
3746
      sched_deps_info->note_reg_set = haifa_note_reg_set;
3747
      sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
3748
      sched_deps_info->note_reg_use = haifa_note_reg_use;
3749
 
3750
      sched_deps_info->note_mem_dep = haifa_note_mem_dep;
3751
      sched_deps_info->note_dep = haifa_note_dep;
3752
   }
3753
}
3754
 
3755
/* Free everything used by the dependency analysis code.  */
3756
 
3757
void
3758
finish_deps_global (void)
3759
{
3760
  FREE_REG_SET (reg_pending_sets);
3761
  FREE_REG_SET (reg_pending_clobbers);
3762
  FREE_REG_SET (reg_pending_uses);
3763
}
3764
 
3765
/* Estimate the weakness of dependence between MEM1 and MEM2.  */
3766
dw_t
3767
estimate_dep_weak (rtx mem1, rtx mem2)
3768
{
3769
  rtx r1, r2;
3770
 
3771
  if (mem1 == mem2)
3772
    /* MEMs are the same - don't speculate.  */
3773
    return MIN_DEP_WEAK;
3774
 
3775
  r1 = XEXP (mem1, 0);
3776
  r2 = XEXP (mem2, 0);
3777
 
3778
  if (r1 == r2
3779
      || (REG_P (r1) && REG_P (r2)
3780
          && REGNO (r1) == REGNO (r2)))
3781
    /* Again, MEMs are the same.  */
3782
    return MIN_DEP_WEAK;
3783
  else if ((REG_P (r1) && !REG_P (r2))
3784
           || (!REG_P (r1) && REG_P (r2)))
3785
    /* Different addressing modes - reason to be more speculative,
3786
       than usual.  */
3787
    return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
3788
  else
3789
    /* We can't say anything about the dependence.  */
3790
    return UNCERTAIN_DEP_WEAK;
3791
}
3792
 
3793
/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3794
   This function can handle same INSN and ELEM (INSN == ELEM).
3795
   It is a convenience wrapper.  */
3796
void
3797
add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
3798
{
3799
  ds_t ds;
3800
  bool internal;
3801
 
3802
  if (dep_type == REG_DEP_TRUE)
3803
    ds = DEP_TRUE;
3804
  else if (dep_type == REG_DEP_OUTPUT)
3805
    ds = DEP_OUTPUT;
3806
  else
3807
    {
3808
      gcc_assert (dep_type == REG_DEP_ANTI);
3809
      ds = DEP_ANTI;
3810
    }
3811
 
3812
  /* When add_dependence is called from inside sched-deps.c, we expect
3813
     cur_insn to be non-null.  */
3814
  internal = cur_insn != NULL;
3815
  if (internal)
3816
    gcc_assert (insn == cur_insn);
3817
  else
3818
    cur_insn = insn;
3819
 
3820
  note_dep (elem, ds);
3821
  if (!internal)
3822
    cur_insn = NULL;
3823
}
3824
 
3825
/* Return weakness of speculative type TYPE in the dep_status DS.  */
3826
dw_t
3827
get_dep_weak_1 (ds_t ds, ds_t type)
3828
{
3829
  ds = ds & type;
3830
 
3831
  switch (type)
3832
    {
3833
    case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
3834
    case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
3835
    case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
3836
    case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
3837
    default: gcc_unreachable ();
3838
    }
3839
 
3840
  return (dw_t) ds;
3841
}
3842
 
3843
dw_t
3844
get_dep_weak (ds_t ds, ds_t type)
3845
{
3846
  dw_t dw = get_dep_weak_1 (ds, type);
3847
 
3848
  gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3849
  return dw;
3850
}
3851
 
3852
/* Return the dep_status, which has the same parameters as DS, except for
3853
   speculative type TYPE, that will have weakness DW.  */
3854
ds_t
3855
set_dep_weak (ds_t ds, ds_t type, dw_t dw)
3856
{
3857
  gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3858
 
3859
  ds &= ~type;
3860
  switch (type)
3861
    {
3862
    case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
3863
    case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
3864
    case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
3865
    case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
3866
    default: gcc_unreachable ();
3867
    }
3868
  return ds;
3869
}
3870
 
3871
/* Return the join of two dep_statuses DS1 and DS2.
3872
   If MAX_P is true then choose the greater probability,
3873
   otherwise multiply probabilities.
3874
   This function assumes that both DS1 and DS2 contain speculative bits.  */
3875
static ds_t
3876
ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
3877
{
3878
  ds_t ds, t;
3879
 
3880
  gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
3881
 
3882
  ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
3883
 
3884
  t = FIRST_SPEC_TYPE;
3885
  do
3886
    {
3887
      if ((ds1 & t) && !(ds2 & t))
3888
        ds |= ds1 & t;
3889
      else if (!(ds1 & t) && (ds2 & t))
3890
        ds |= ds2 & t;
3891
      else if ((ds1 & t) && (ds2 & t))
3892
        {
3893
          dw_t dw1 = get_dep_weak (ds1, t);
3894
          dw_t dw2 = get_dep_weak (ds2, t);
3895
          ds_t dw;
3896
 
3897
          if (!max_p)
3898
            {
3899
              dw = ((ds_t) dw1) * ((ds_t) dw2);
3900
              dw /= MAX_DEP_WEAK;
3901
              if (dw < MIN_DEP_WEAK)
3902
                dw = MIN_DEP_WEAK;
3903
            }
3904
          else
3905
            {
3906
              if (dw1 >= dw2)
3907
                dw = dw1;
3908
              else
3909
                dw = dw2;
3910
            }
3911
 
3912
          ds = set_dep_weak (ds, t, (dw_t) dw);
3913
        }
3914
 
3915
      if (t == LAST_SPEC_TYPE)
3916
        break;
3917
      t <<= SPEC_TYPE_SHIFT;
3918
    }
3919
  while (1);
3920
 
3921
  return ds;
3922
}
3923
 
3924
/* Return the join of two dep_statuses DS1 and DS2.
3925
   This function assumes that both DS1 and DS2 contain speculative bits.  */
3926
ds_t
3927
ds_merge (ds_t ds1, ds_t ds2)
3928
{
3929
  return ds_merge_1 (ds1, ds2, false);
3930
}
3931
 
3932
/* Return the join of two dep_statuses DS1 and DS2.  */
3933
ds_t
3934
ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
3935
{
3936
  ds_t new_status = ds | ds2;
3937
 
3938
  if (new_status & SPECULATIVE)
3939
    {
3940
      if ((ds && !(ds & SPECULATIVE))
3941
          || (ds2 && !(ds2 & SPECULATIVE)))
3942
        /* Then this dep can't be speculative.  */
3943
        new_status &= ~SPECULATIVE;
3944
      else
3945
        {
3946
          /* Both are speculative.  Merging probabilities.  */
3947
          if (mem1)
3948
            {
3949
              dw_t dw;
3950
 
3951
              dw = estimate_dep_weak (mem1, mem2);
3952
              ds = set_dep_weak (ds, BEGIN_DATA, dw);
3953
            }
3954
 
3955
          if (!ds)
3956
            new_status = ds2;
3957
          else if (!ds2)
3958
            new_status = ds;
3959
          else
3960
            new_status = ds_merge (ds2, ds);
3961
        }
3962
    }
3963
 
3964
  return new_status;
3965
}
3966
 
3967
/* Return the join of DS1 and DS2.  Use maximum instead of multiplying
3968
   probabilities.  */
3969
ds_t
3970
ds_max_merge (ds_t ds1, ds_t ds2)
3971
{
3972
  if (ds1 == 0 && ds2 == 0)
3973
    return 0;
3974
 
3975
  if (ds1 == 0 && ds2 != 0)
3976
    return ds2;
3977
 
3978
  if (ds1 != 0 && ds2 == 0)
3979
    return ds1;
3980
 
3981
  return ds_merge_1 (ds1, ds2, true);
3982
}
3983
 
3984
/* Return the probability of speculation success for the speculation
3985
   status DS.  */
3986
dw_t
3987
ds_weak (ds_t ds)
3988
{
3989
  ds_t res = 1, dt;
3990
  int n = 0;
3991
 
3992
  dt = FIRST_SPEC_TYPE;
3993
  do
3994
    {
3995
      if (ds & dt)
3996
        {
3997
          res *= (ds_t) get_dep_weak (ds, dt);
3998
          n++;
3999
        }
4000
 
4001
      if (dt == LAST_SPEC_TYPE)
4002
        break;
4003
      dt <<= SPEC_TYPE_SHIFT;
4004
    }
4005
  while (1);
4006
 
4007
  gcc_assert (n);
4008
  while (--n)
4009
    res /= MAX_DEP_WEAK;
4010
 
4011
  if (res < MIN_DEP_WEAK)
4012
    res = MIN_DEP_WEAK;
4013
 
4014
  gcc_assert (res <= MAX_DEP_WEAK);
4015
 
4016
  return (dw_t) res;
4017
}
4018
 
4019
/* Return a dep status that contains all speculation types of DS.  */
4020
ds_t
4021
ds_get_speculation_types (ds_t ds)
4022
{
4023
  if (ds & BEGIN_DATA)
4024
    ds |= BEGIN_DATA;
4025
  if (ds & BE_IN_DATA)
4026
    ds |= BE_IN_DATA;
4027
  if (ds & BEGIN_CONTROL)
4028
    ds |= BEGIN_CONTROL;
4029
  if (ds & BE_IN_CONTROL)
4030
    ds |= BE_IN_CONTROL;
4031
 
4032
  return ds & SPECULATIVE;
4033
}
4034
 
4035
/* Return a dep status that contains maximal weakness for each speculation
4036
   type present in DS.  */
4037
ds_t
4038
ds_get_max_dep_weak (ds_t ds)
4039
{
4040
  if (ds & BEGIN_DATA)
4041
    ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4042
  if (ds & BE_IN_DATA)
4043
    ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4044
  if (ds & BEGIN_CONTROL)
4045
    ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4046
  if (ds & BE_IN_CONTROL)
4047
    ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4048
 
4049
  return ds;
4050
}
4051
 
4052
/* Dump information about the dependence status S.  */
4053
static void
4054
dump_ds (FILE *f, ds_t s)
4055
{
4056
  fprintf (f, "{");
4057
 
4058
  if (s & BEGIN_DATA)
4059
    fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4060
  if (s & BE_IN_DATA)
4061
    fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4062
  if (s & BEGIN_CONTROL)
4063
    fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4064
  if (s & BE_IN_CONTROL)
4065
    fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4066
 
4067
  if (s & HARD_DEP)
4068
    fprintf (f, "HARD_DEP; ");
4069
 
4070
  if (s & DEP_TRUE)
4071
    fprintf (f, "DEP_TRUE; ");
4072
  if (s & DEP_ANTI)
4073
    fprintf (f, "DEP_ANTI; ");
4074
  if (s & DEP_OUTPUT)
4075
    fprintf (f, "DEP_OUTPUT; ");
4076
 
4077
  fprintf (f, "}");
4078
}
4079
 
4080
void
4081
debug_ds (ds_t s)
4082
{
4083
  dump_ds (stderr, s);
4084
  fprintf (stderr, "\n");
4085
}
4086
 
4087
#ifdef ENABLE_CHECKING
4088
/* Verify that dependence type and status are consistent.
4089
   If RELAXED_P is true, then skip dep_weakness checks.  */
4090
static void
4091
check_dep (dep_t dep, bool relaxed_p)
4092
{
4093
  enum reg_note dt = DEP_TYPE (dep);
4094
  ds_t ds = DEP_STATUS (dep);
4095
 
4096
  gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4097
 
4098
  if (!(current_sched_info->flags & USE_DEPS_LIST))
4099
    {
4100
      gcc_assert (ds == -1);
4101
      return;
4102
    }
4103
 
4104
  /* Check that dependence type contains the same bits as the status.  */
4105
  if (dt == REG_DEP_TRUE)
4106
    gcc_assert (ds & DEP_TRUE);
4107
  else if (dt == REG_DEP_OUTPUT)
4108
    gcc_assert ((ds & DEP_OUTPUT)
4109
                && !(ds & DEP_TRUE));
4110
  else
4111
    gcc_assert ((dt == REG_DEP_ANTI)
4112
                && (ds & DEP_ANTI)
4113
                && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4114
 
4115
  /* HARD_DEP can not appear in dep_status of a link.  */
4116
  gcc_assert (!(ds & HARD_DEP));
4117
 
4118
  /* Check that dependence status is set correctly when speculation is not
4119
     supported.  */
4120
  if (!sched_deps_info->generate_spec_deps)
4121
    gcc_assert (!(ds & SPECULATIVE));
4122
  else if (ds & SPECULATIVE)
4123
    {
4124
      if (!relaxed_p)
4125
        {
4126
          ds_t type = FIRST_SPEC_TYPE;
4127
 
4128
          /* Check that dependence weakness is in proper range.  */
4129
          do
4130
            {
4131
              if (ds & type)
4132
                get_dep_weak (ds, type);
4133
 
4134
              if (type == LAST_SPEC_TYPE)
4135
                break;
4136
              type <<= SPEC_TYPE_SHIFT;
4137
            }
4138
          while (1);
4139
        }
4140
 
4141
      if (ds & BEGIN_SPEC)
4142
        {
4143
          /* Only true dependence can be data speculative.  */
4144
          if (ds & BEGIN_DATA)
4145
            gcc_assert (ds & DEP_TRUE);
4146
 
4147
          /* Control dependencies in the insn scheduler are represented by
4148
             anti-dependencies, therefore only anti dependence can be
4149
             control speculative.  */
4150
          if (ds & BEGIN_CONTROL)
4151
            gcc_assert (ds & DEP_ANTI);
4152
        }
4153
      else
4154
        {
4155
          /* Subsequent speculations should resolve true dependencies.  */
4156
          gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4157
        }
4158
 
4159
      /* Check that true and anti dependencies can't have other speculative
4160
         statuses.  */
4161
      if (ds & DEP_TRUE)
4162
        gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4163
      /* An output dependence can't be speculative at all.  */
4164
      gcc_assert (!(ds & DEP_OUTPUT));
4165
      if (ds & DEP_ANTI)
4166
        gcc_assert (ds & BEGIN_CONTROL);
4167
    }
4168
}
4169
#endif /* ENABLE_CHECKING */
4170
 
4171
#endif /* INSN_SCHEDULING */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.