OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-stable/] [gcc-4.5.1/] [gcc/] [sched-deps.c] - Blame information for rev 826

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 280 jeremybenn
/* Instruction scheduling pass.  This file computes dependencies between
2
   instructions.
3
   Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
4
   1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
5
   Free Software Foundation, Inc.
6
   Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
7
   and currently maintained by, Jim Wilson (wilson@cygnus.com)
8
 
9
This file is part of GCC.
10
 
11
GCC is free software; you can redistribute it and/or modify it under
12
the terms of the GNU General Public License as published by the Free
13
Software Foundation; either version 3, or (at your option) any later
14
version.
15
 
16
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17
WARRANTY; without even the implied warranty of MERCHANTABILITY or
18
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19
for more details.
20
 
21
You should have received a copy of the GNU General Public License
22
along with GCC; see the file COPYING3.  If not see
23
<http://www.gnu.org/licenses/>.  */
24
 
25
#include "config.h"
26
#include "system.h"
27
#include "coretypes.h"
28
#include "tm.h"
29
#include "toplev.h"
30
#include "rtl.h"
31
#include "tm_p.h"
32
#include "hard-reg-set.h"
33
#include "regs.h"
34
#include "function.h"
35
#include "flags.h"
36
#include "insn-config.h"
37
#include "insn-attr.h"
38
#include "except.h"
39
#include "toplev.h"
40
#include "recog.h"
41
#include "sched-int.h"
42
#include "params.h"
43
#include "cselib.h"
44
#include "ira.h"
45
#include "target.h"
46
 
47
#ifdef INSN_SCHEDULING
48
 
49
#ifdef ENABLE_CHECKING
50
#define CHECK (true)
51
#else
52
#define CHECK (false)
53
#endif
54
 
55
/* Holds current parameters for the dependency analyzer.  */
56
struct sched_deps_info_def *sched_deps_info;
57
 
58
/* The data is specific to the Haifa scheduler.  */
59
VEC(haifa_deps_insn_data_def, heap) *h_d_i_d = NULL;
60
 
61
/* Return the major type present in the DS.  */
62
enum reg_note
63
ds_to_dk (ds_t ds)
64
{
65
  if (ds & DEP_TRUE)
66
    return REG_DEP_TRUE;
67
 
68
  if (ds & DEP_OUTPUT)
69
    return REG_DEP_OUTPUT;
70
 
71
  gcc_assert (ds & DEP_ANTI);
72
 
73
  return REG_DEP_ANTI;
74
}
75
 
76
/* Return equivalent dep_status.  */
77
ds_t
78
dk_to_ds (enum reg_note dk)
79
{
80
  switch (dk)
81
    {
82
    case REG_DEP_TRUE:
83
      return DEP_TRUE;
84
 
85
    case REG_DEP_OUTPUT:
86
      return DEP_OUTPUT;
87
 
88
    default:
89
      gcc_assert (dk == REG_DEP_ANTI);
90
      return DEP_ANTI;
91
    }
92
}
93
 
94
/* Functions to operate with dependence information container - dep_t.  */
95
 
96
/* Init DEP with the arguments.  */
97
void
98
init_dep_1 (dep_t dep, rtx pro, rtx con, enum reg_note type, ds_t ds)
99
{
100
  DEP_PRO (dep) = pro;
101
  DEP_CON (dep) = con;
102
  DEP_TYPE (dep) = type;
103
  DEP_STATUS (dep) = ds;
104
}
105
 
106
/* Init DEP with the arguments.
107
   While most of the scheduler (including targets) only need the major type
108
   of the dependency, it is convenient to hide full dep_status from them.  */
109
void
110
init_dep (dep_t dep, rtx pro, rtx con, enum reg_note kind)
111
{
112
  ds_t ds;
113
 
114
  if ((current_sched_info->flags & USE_DEPS_LIST))
115
    ds = dk_to_ds (kind);
116
  else
117
    ds = -1;
118
 
119
  init_dep_1 (dep, pro, con, kind, ds);
120
}
121
 
122
/* Make a copy of FROM in TO.  */
123
static void
124
copy_dep (dep_t to, dep_t from)
125
{
126
  memcpy (to, from, sizeof (*to));
127
}
128
 
129
static void dump_ds (FILE *, ds_t);
130
 
131
/* Define flags for dump_dep ().  */
132
 
133
/* Dump producer of the dependence.  */
134
#define DUMP_DEP_PRO (2)
135
 
136
/* Dump consumer of the dependence.  */
137
#define DUMP_DEP_CON (4)
138
 
139
/* Dump type of the dependence.  */
140
#define DUMP_DEP_TYPE (8)
141
 
142
/* Dump status of the dependence.  */
143
#define DUMP_DEP_STATUS (16)
144
 
145
/* Dump all information about the dependence.  */
146
#define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE       \
147
                      |DUMP_DEP_STATUS)
148
 
149
/* Dump DEP to DUMP.
150
   FLAGS is a bit mask specifying what information about DEP needs
151
   to be printed.
152
   If FLAGS has the very first bit set, then dump all information about DEP
153
   and propagate this bit into the callee dump functions.  */
154
static void
155
dump_dep (FILE *dump, dep_t dep, int flags)
156
{
157
  if (flags & 1)
158
    flags |= DUMP_DEP_ALL;
159
 
160
  fprintf (dump, "<");
161
 
162
  if (flags & DUMP_DEP_PRO)
163
    fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
164
 
165
  if (flags & DUMP_DEP_CON)
166
    fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
167
 
168
  if (flags & DUMP_DEP_TYPE)
169
    {
170
      char t;
171
      enum reg_note type = DEP_TYPE (dep);
172
 
173
      switch (type)
174
        {
175
        case REG_DEP_TRUE:
176
          t = 't';
177
          break;
178
 
179
        case REG_DEP_OUTPUT:
180
          t = 'o';
181
          break;
182
 
183
        case REG_DEP_ANTI:
184
          t = 'a';
185
          break;
186
 
187
        default:
188
          gcc_unreachable ();
189
          break;
190
        }
191
 
192
      fprintf (dump, "%c; ", t);
193
    }
194
 
195
  if (flags & DUMP_DEP_STATUS)
196
    {
197
      if (current_sched_info->flags & USE_DEPS_LIST)
198
        dump_ds (dump, DEP_STATUS (dep));
199
    }
200
 
201
  fprintf (dump, ">");
202
}
203
 
204
/* Default flags for dump_dep ().  */
205
static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
206
 
207
/* Dump all fields of DEP to STDERR.  */
208
void
209
sd_debug_dep (dep_t dep)
210
{
211
  dump_dep (stderr, dep, 1);
212
  fprintf (stderr, "\n");
213
}
214
 
215
/* Determine whether DEP is a dependency link of a non-debug insn on a
216
   debug insn.  */
217
 
218
static inline bool
219
depl_on_debug_p (dep_link_t dep)
220
{
221
  return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
222
          && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
223
}
224
 
225
/* Functions to operate with a single link from the dependencies lists -
226
   dep_link_t.  */
227
 
228
/* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
229
   PREV_NEXT_P.  */
230
static void
231
attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
232
{
233
  dep_link_t next = *prev_nextp;
234
 
235
  gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
236
              && DEP_LINK_NEXT (l) == NULL);
237
 
238
  /* Init node being inserted.  */
239
  DEP_LINK_PREV_NEXTP (l) = prev_nextp;
240
  DEP_LINK_NEXT (l) = next;
241
 
242
  /* Fix next node.  */
243
  if (next != NULL)
244
    {
245
      gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
246
 
247
      DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
248
    }
249
 
250
  /* Fix prev node.  */
251
  *prev_nextp = l;
252
}
253
 
254
/* Add dep_link LINK to deps_list L.  */
255
static void
256
add_to_deps_list (dep_link_t link, deps_list_t l)
257
{
258
  attach_dep_link (link, &DEPS_LIST_FIRST (l));
259
 
260
  /* Don't count debug deps.  */
261
  if (!depl_on_debug_p (link))
262
    ++DEPS_LIST_N_LINKS (l);
263
}
264
 
265
/* Detach dep_link L from the list.  */
266
static void
267
detach_dep_link (dep_link_t l)
268
{
269
  dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
270
  dep_link_t next = DEP_LINK_NEXT (l);
271
 
272
  *prev_nextp = next;
273
 
274
  if (next != NULL)
275
    DEP_LINK_PREV_NEXTP (next) = prev_nextp;
276
 
277
  DEP_LINK_PREV_NEXTP (l) = NULL;
278
  DEP_LINK_NEXT (l) = NULL;
279
}
280
 
281
/* Remove link LINK from list LIST.  */
282
static void
283
remove_from_deps_list (dep_link_t link, deps_list_t list)
284
{
285
  detach_dep_link (link);
286
 
287
  /* Don't count debug deps.  */
288
  if (!depl_on_debug_p (link))
289
    --DEPS_LIST_N_LINKS (list);
290
}
291
 
292
/* Move link LINK from list FROM to list TO.  */
293
static void
294
move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
295
{
296
  remove_from_deps_list (link, from);
297
  add_to_deps_list (link, to);
298
}
299
 
300
/* Return true of LINK is not attached to any list.  */
301
static bool
302
dep_link_is_detached_p (dep_link_t link)
303
{
304
  return DEP_LINK_PREV_NEXTP (link) == NULL;
305
}
306
 
307
/* Pool to hold all dependency nodes (dep_node_t).  */
308
static alloc_pool dn_pool;
309
 
310
/* Number of dep_nodes out there.  */
311
static int dn_pool_diff = 0;
312
 
313
/* Create a dep_node.  */
314
static dep_node_t
315
create_dep_node (void)
316
{
317
  dep_node_t n = (dep_node_t) pool_alloc (dn_pool);
318
  dep_link_t back = DEP_NODE_BACK (n);
319
  dep_link_t forw = DEP_NODE_FORW (n);
320
 
321
  DEP_LINK_NODE (back) = n;
322
  DEP_LINK_NEXT (back) = NULL;
323
  DEP_LINK_PREV_NEXTP (back) = NULL;
324
 
325
  DEP_LINK_NODE (forw) = n;
326
  DEP_LINK_NEXT (forw) = NULL;
327
  DEP_LINK_PREV_NEXTP (forw) = NULL;
328
 
329
  ++dn_pool_diff;
330
 
331
  return n;
332
}
333
 
334
/* Delete dep_node N.  N must not be connected to any deps_list.  */
335
static void
336
delete_dep_node (dep_node_t n)
337
{
338
  gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
339
              && dep_link_is_detached_p (DEP_NODE_FORW (n)));
340
 
341
  --dn_pool_diff;
342
 
343
  pool_free (dn_pool, n);
344
}
345
 
346
/* Pool to hold dependencies lists (deps_list_t).  */
347
static alloc_pool dl_pool;
348
 
349
/* Number of deps_lists out there.  */
350
static int dl_pool_diff = 0;
351
 
352
/* Functions to operate with dependences lists - deps_list_t.  */
353
 
354
/* Return true if list L is empty.  */
355
static bool
356
deps_list_empty_p (deps_list_t l)
357
{
358
  return DEPS_LIST_N_LINKS (l) == 0;
359
}
360
 
361
/* Create a new deps_list.  */
362
static deps_list_t
363
create_deps_list (void)
364
{
365
  deps_list_t l = (deps_list_t) pool_alloc (dl_pool);
366
 
367
  DEPS_LIST_FIRST (l) = NULL;
368
  DEPS_LIST_N_LINKS (l) = 0;
369
 
370
  ++dl_pool_diff;
371
  return l;
372
}
373
 
374
/* Free deps_list L.  */
375
static void
376
free_deps_list (deps_list_t l)
377
{
378
  gcc_assert (deps_list_empty_p (l));
379
 
380
  --dl_pool_diff;
381
 
382
  pool_free (dl_pool, l);
383
}
384
 
385
/* Return true if there is no dep_nodes and deps_lists out there.
386
   After the region is scheduled all the dependency nodes and lists
387
   should [generally] be returned to pool.  */
388
bool
389
deps_pools_are_empty_p (void)
390
{
391
  return dn_pool_diff == 0 && dl_pool_diff == 0;
392
}
393
 
394
/* Remove all elements from L.  */
395
static void
396
clear_deps_list (deps_list_t l)
397
{
398
  do
399
    {
400
      dep_link_t link = DEPS_LIST_FIRST (l);
401
 
402
      if (link == NULL)
403
        break;
404
 
405
      remove_from_deps_list (link, l);
406
    }
407
  while (1);
408
}
409
 
410
static regset reg_pending_sets;
411
static regset reg_pending_clobbers;
412
static regset reg_pending_uses;
413
static enum reg_pending_barrier_mode reg_pending_barrier;
414
 
415
/* Hard registers implicitly clobbered or used (or may be implicitly
416
   clobbered or used) by the currently analyzed insn.  For example,
417
   insn in its constraint has one register class.  Even if there is
418
   currently no hard register in the insn, the particular hard
419
   register will be in the insn after reload pass because the
420
   constraint requires it.  */
421
static HARD_REG_SET implicit_reg_pending_clobbers;
422
static HARD_REG_SET implicit_reg_pending_uses;
423
 
424
/* To speed up the test for duplicate dependency links we keep a
425
   record of dependencies created by add_dependence when the average
426
   number of instructions in a basic block is very large.
427
 
428
   Studies have shown that there is typically around 5 instructions between
429
   branches for typical C code.  So we can make a guess that the average
430
   basic block is approximately 5 instructions long; we will choose 100X
431
   the average size as a very large basic block.
432
 
433
   Each insn has associated bitmaps for its dependencies.  Each bitmap
434
   has enough entries to represent a dependency on any other insn in
435
   the insn chain.  All bitmap for true dependencies cache is
436
   allocated then the rest two ones are also allocated.  */
437
static bitmap_head *true_dependency_cache = NULL;
438
static bitmap_head *output_dependency_cache = NULL;
439
static bitmap_head *anti_dependency_cache = NULL;
440
static bitmap_head *spec_dependency_cache = NULL;
441
static int cache_size;
442
 
443
static int deps_may_trap_p (const_rtx);
444
static void add_dependence_list (rtx, rtx, int, enum reg_note);
445
static void add_dependence_list_and_free (struct deps_desc *, rtx,
446
                                          rtx *, int, enum reg_note);
447
static void delete_all_dependences (rtx);
448
static void fixup_sched_groups (rtx);
449
 
450
static void flush_pending_lists (struct deps_desc *, rtx, int, int);
451
static void sched_analyze_1 (struct deps_desc *, rtx, rtx);
452
static void sched_analyze_2 (struct deps_desc *, rtx, rtx);
453
static void sched_analyze_insn (struct deps_desc *, rtx, rtx);
454
 
455
static bool sched_has_condition_p (const_rtx);
456
static int conditions_mutex_p (const_rtx, const_rtx, bool, bool);
457
 
458
static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
459
                                                          rtx, rtx);
460
static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
461
 
462
#ifdef ENABLE_CHECKING
463
static void check_dep (dep_t, bool);
464
#endif
465
 
466
/* Return nonzero if a load of the memory reference MEM can cause a trap.  */
467
 
468
static int
469
deps_may_trap_p (const_rtx mem)
470
{
471
  const_rtx addr = XEXP (mem, 0);
472
 
473
  if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
474
    {
475
      const_rtx t = get_reg_known_value (REGNO (addr));
476
      if (t)
477
        addr = t;
478
    }
479
  return rtx_addr_can_trap_p (addr);
480
}
481
 
482
 
483
/* Find the condition under which INSN is executed.  If REV is not NULL,
484
   it is set to TRUE when the returned comparison should be reversed
485
   to get the actual condition.  */
486
static rtx
487
sched_get_condition_with_rev (const_rtx insn, bool *rev)
488
{
489
  rtx pat = PATTERN (insn);
490
  rtx src;
491
 
492
  if (pat == 0)
493
    return 0;
494
 
495
  if (rev)
496
    *rev = false;
497
 
498
  if (GET_CODE (pat) == COND_EXEC)
499
    return COND_EXEC_TEST (pat);
500
 
501
  if (!any_condjump_p (insn) || !onlyjump_p (insn))
502
    return 0;
503
 
504
  src = SET_SRC (pc_set (insn));
505
 
506
  if (XEXP (src, 2) == pc_rtx)
507
    return XEXP (src, 0);
508
  else if (XEXP (src, 1) == pc_rtx)
509
    {
510
      rtx cond = XEXP (src, 0);
511
      enum rtx_code revcode = reversed_comparison_code (cond, insn);
512
 
513
      if (revcode == UNKNOWN)
514
        return 0;
515
 
516
      if (rev)
517
        *rev = true;
518
      return cond;
519
    }
520
 
521
  return 0;
522
}
523
 
524
/* True when we can find a condition under which INSN is executed.  */
525
static bool
526
sched_has_condition_p (const_rtx insn)
527
{
528
  return !! sched_get_condition_with_rev (insn, NULL);
529
}
530
 
531
 
532
 
533
/* Return nonzero if conditions COND1 and COND2 can never be both true.  */
534
static int
535
conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
536
{
537
  if (COMPARISON_P (cond1)
538
      && COMPARISON_P (cond2)
539
      && GET_CODE (cond1) ==
540
          (rev1==rev2
541
          ? reversed_comparison_code (cond2, NULL)
542
          : GET_CODE (cond2))
543
      && XEXP (cond1, 0) == XEXP (cond2, 0)
544
      && XEXP (cond1, 1) == XEXP (cond2, 1))
545
    return 1;
546
  return 0;
547
}
548
 
549
/* Return true if insn1 and insn2 can never depend on one another because
550
   the conditions under which they are executed are mutually exclusive.  */
551
bool
552
sched_insns_conditions_mutex_p (const_rtx insn1, const_rtx insn2)
553
{
554
  rtx cond1, cond2;
555
  bool rev1 = false, rev2 = false;
556
 
557
  /* df doesn't handle conditional lifetimes entirely correctly;
558
     calls mess up the conditional lifetimes.  */
559
  if (!CALL_P (insn1) && !CALL_P (insn2))
560
    {
561
      cond1 = sched_get_condition_with_rev (insn1, &rev1);
562
      cond2 = sched_get_condition_with_rev (insn2, &rev2);
563
      if (cond1 && cond2
564
          && conditions_mutex_p (cond1, cond2, rev1, rev2)
565
          /* Make sure first instruction doesn't affect condition of second
566
             instruction if switched.  */
567
          && !modified_in_p (cond1, insn2)
568
          /* Make sure second instruction doesn't affect condition of first
569
             instruction if switched.  */
570
          && !modified_in_p (cond2, insn1))
571
        return true;
572
    }
573
  return false;
574
}
575
 
576
 
577
/* Return true if INSN can potentially be speculated with type DS.  */
578
bool
579
sched_insn_is_legitimate_for_speculation_p (const_rtx insn, ds_t ds)
580
{
581
  if (HAS_INTERNAL_DEP (insn))
582
    return false;
583
 
584
  if (!NONJUMP_INSN_P (insn))
585
    return false;
586
 
587
  if (SCHED_GROUP_P (insn))
588
    return false;
589
 
590
  if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX (insn)))
591
    return false;
592
 
593
  if (side_effects_p (PATTERN (insn)))
594
    return false;
595
 
596
  if (ds & BE_IN_SPEC)
597
    /* The following instructions, which depend on a speculatively scheduled
598
       instruction, cannot be speculatively scheduled along.  */
599
    {
600
      if (may_trap_p (PATTERN (insn)))
601
        /* If instruction might trap, it cannot be speculatively scheduled.
602
           For control speculation it's obvious why and for data speculation
603
           it's because the insn might get wrong input if speculation
604
           wasn't successful.  */
605
        return false;
606
 
607
      if ((ds & BE_IN_DATA)
608
          && sched_has_condition_p (insn))
609
        /* If this is a predicated instruction, then it cannot be
610
           speculatively scheduled.  See PR35659.  */
611
        return false;
612
    }
613
 
614
  return true;
615
}
616
 
617
/* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
618
   initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
619
   and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
620
   This function is used to switch sd_iterator to the next list.
621
   !!! For internal use only.  Might consider moving it to sched-int.h.  */
622
void
623
sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
624
              deps_list_t *list_ptr, bool *resolved_p_ptr)
625
{
626
  sd_list_types_def types = *types_ptr;
627
 
628
  if (types & SD_LIST_HARD_BACK)
629
    {
630
      *list_ptr = INSN_HARD_BACK_DEPS (insn);
631
      *resolved_p_ptr = false;
632
      *types_ptr = types & ~SD_LIST_HARD_BACK;
633
    }
634
  else if (types & SD_LIST_SPEC_BACK)
635
    {
636
      *list_ptr = INSN_SPEC_BACK_DEPS (insn);
637
      *resolved_p_ptr = false;
638
      *types_ptr = types & ~SD_LIST_SPEC_BACK;
639
    }
640
  else if (types & SD_LIST_FORW)
641
    {
642
      *list_ptr = INSN_FORW_DEPS (insn);
643
      *resolved_p_ptr = false;
644
      *types_ptr = types & ~SD_LIST_FORW;
645
    }
646
  else if (types & SD_LIST_RES_BACK)
647
    {
648
      *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
649
      *resolved_p_ptr = true;
650
      *types_ptr = types & ~SD_LIST_RES_BACK;
651
    }
652
  else if (types & SD_LIST_RES_FORW)
653
    {
654
      *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
655
      *resolved_p_ptr = true;
656
      *types_ptr = types & ~SD_LIST_RES_FORW;
657
    }
658
  else
659
    {
660
      *list_ptr = NULL;
661
      *resolved_p_ptr = false;
662
      *types_ptr = SD_LIST_NONE;
663
    }
664
}
665
 
666
/* Return the summary size of INSN's lists defined by LIST_TYPES.  */
667
int
668
sd_lists_size (const_rtx insn, sd_list_types_def list_types)
669
{
670
  int size = 0;
671
 
672
  while (list_types != SD_LIST_NONE)
673
    {
674
      deps_list_t list;
675
      bool resolved_p;
676
 
677
      sd_next_list (insn, &list_types, &list, &resolved_p);
678
      if (list)
679
        size += DEPS_LIST_N_LINKS (list);
680
    }
681
 
682
  return size;
683
}
684
 
685
/* Return true if INSN's lists defined by LIST_TYPES are all empty.  */
686
 
687
bool
688
sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
689
{
690
  while (list_types != SD_LIST_NONE)
691
    {
692
      deps_list_t list;
693
      bool resolved_p;
694
 
695
      sd_next_list (insn, &list_types, &list, &resolved_p);
696
      if (!deps_list_empty_p (list))
697
        return false;
698
    }
699
 
700
  return true;
701
}
702
 
703
/* Initialize data for INSN.  */
704
void
705
sd_init_insn (rtx insn)
706
{
707
  INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
708
  INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
709
  INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
710
  INSN_FORW_DEPS (insn) = create_deps_list ();
711
  INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
712
 
713
  if (DEBUG_INSN_P (insn))
714
    DEBUG_INSN_SCHED_P (insn) = TRUE;
715
 
716
  /* ??? It would be nice to allocate dependency caches here.  */
717
}
718
 
719
/* Free data for INSN.  */
720
void
721
sd_finish_insn (rtx insn)
722
{
723
  /* ??? It would be nice to deallocate dependency caches here.  */
724
 
725
  if (DEBUG_INSN_P (insn))
726
    {
727
      gcc_assert (DEBUG_INSN_SCHED_P (insn));
728
      DEBUG_INSN_SCHED_P (insn) = FALSE;
729
    }
730
 
731
  free_deps_list (INSN_HARD_BACK_DEPS (insn));
732
  INSN_HARD_BACK_DEPS (insn) = NULL;
733
 
734
  free_deps_list (INSN_SPEC_BACK_DEPS (insn));
735
  INSN_SPEC_BACK_DEPS (insn) = NULL;
736
 
737
  free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
738
  INSN_RESOLVED_BACK_DEPS (insn) = NULL;
739
 
740
  free_deps_list (INSN_FORW_DEPS (insn));
741
  INSN_FORW_DEPS (insn) = NULL;
742
 
743
  free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
744
  INSN_RESOLVED_FORW_DEPS (insn) = NULL;
745
}
746
 
747
/* Find a dependency between producer PRO and consumer CON.
748
   Search through resolved dependency lists if RESOLVED_P is true.
749
   If no such dependency is found return NULL,
750
   otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
751
   with an iterator pointing to it.  */
752
static dep_t
753
sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
754
                              sd_iterator_def *sd_it_ptr)
755
{
756
  sd_list_types_def pro_list_type;
757
  sd_list_types_def con_list_type;
758
  sd_iterator_def sd_it;
759
  dep_t dep;
760
  bool found_p = false;
761
 
762
  if (resolved_p)
763
    {
764
      pro_list_type = SD_LIST_RES_FORW;
765
      con_list_type = SD_LIST_RES_BACK;
766
    }
767
  else
768
    {
769
      pro_list_type = SD_LIST_FORW;
770
      con_list_type = SD_LIST_BACK;
771
    }
772
 
773
  /* Walk through either back list of INSN or forw list of ELEM
774
     depending on which one is shorter.  */
775
  if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
776
    {
777
      /* Find the dep_link with producer PRO in consumer's back_deps.  */
778
      FOR_EACH_DEP (con, con_list_type, sd_it, dep)
779
        if (DEP_PRO (dep) == pro)
780
          {
781
            found_p = true;
782
            break;
783
          }
784
    }
785
  else
786
    {
787
      /* Find the dep_link with consumer CON in producer's forw_deps.  */
788
      FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
789
        if (DEP_CON (dep) == con)
790
          {
791
            found_p = true;
792
            break;
793
          }
794
    }
795
 
796
  if (found_p)
797
    {
798
      if (sd_it_ptr != NULL)
799
        *sd_it_ptr = sd_it;
800
 
801
      return dep;
802
    }
803
 
804
  return NULL;
805
}
806
 
807
/* Find a dependency between producer PRO and consumer CON.
808
   Use dependency [if available] to check if dependency is present at all.
809
   Search through resolved dependency lists if RESOLVED_P is true.
810
   If the dependency or NULL if none found.  */
811
dep_t
812
sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
813
{
814
  if (true_dependency_cache != NULL)
815
    /* Avoiding the list walk below can cut compile times dramatically
816
       for some code.  */
817
    {
818
      int elem_luid = INSN_LUID (pro);
819
      int insn_luid = INSN_LUID (con);
820
 
821
      gcc_assert (output_dependency_cache != NULL
822
                  && anti_dependency_cache != NULL);
823
 
824
      if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
825
          && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
826
          && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
827
        return NULL;
828
    }
829
 
830
  return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
831
}
832
 
833
/* Add or update  a dependence described by DEP.
834
   MEM1 and MEM2, if non-null, correspond to memory locations in case of
835
   data speculation.
836
 
837
   The function returns a value indicating if an old entry has been changed
838
   or a new entry has been added to insn's backward deps.
839
 
840
   This function merely checks if producer and consumer is the same insn
841
   and doesn't create a dep in this case.  Actual manipulation of
842
   dependence data structures is performed in add_or_update_dep_1.  */
843
static enum DEPS_ADJUST_RESULT
844
maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
845
{
846
  rtx elem = DEP_PRO (dep);
847
  rtx insn = DEP_CON (dep);
848
 
849
  gcc_assert (INSN_P (insn) && INSN_P (elem));
850
 
851
  /* Don't depend an insn on itself.  */
852
  if (insn == elem)
853
    {
854
      if (sched_deps_info->generate_spec_deps)
855
        /* INSN has an internal dependence, which we can't overcome.  */
856
        HAS_INTERNAL_DEP (insn) = 1;
857
 
858
      return DEP_NODEP;
859
    }
860
 
861
  return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
862
}
863
 
864
/* Ask dependency caches what needs to be done for dependence DEP.
865
   Return DEP_CREATED if new dependence should be created and there is no
866
   need to try to find one searching the dependencies lists.
867
   Return DEP_PRESENT if there already is a dependence described by DEP and
868
   hence nothing is to be done.
869
   Return DEP_CHANGED if there already is a dependence, but it should be
870
   updated to incorporate additional information from DEP.  */
871
static enum DEPS_ADJUST_RESULT
872
ask_dependency_caches (dep_t dep)
873
{
874
  int elem_luid = INSN_LUID (DEP_PRO (dep));
875
  int insn_luid = INSN_LUID (DEP_CON (dep));
876
 
877
  gcc_assert (true_dependency_cache != NULL
878
              && output_dependency_cache != NULL
879
              && anti_dependency_cache != NULL);
880
 
881
  if (!(current_sched_info->flags & USE_DEPS_LIST))
882
    {
883
      enum reg_note present_dep_type;
884
 
885
      if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
886
        present_dep_type = REG_DEP_TRUE;
887
      else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
888
        present_dep_type = REG_DEP_OUTPUT;
889
      else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
890
        present_dep_type = REG_DEP_ANTI;
891
      else
892
        /* There is no existing dep so it should be created.  */
893
        return DEP_CREATED;
894
 
895
      if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
896
        /* DEP does not add anything to the existing dependence.  */
897
        return DEP_PRESENT;
898
    }
899
  else
900
    {
901
      ds_t present_dep_types = 0;
902
 
903
      if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
904
        present_dep_types |= DEP_TRUE;
905
      if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
906
        present_dep_types |= DEP_OUTPUT;
907
      if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
908
        present_dep_types |= DEP_ANTI;
909
 
910
      if (present_dep_types == 0)
911
        /* There is no existing dep so it should be created.  */
912
        return DEP_CREATED;
913
 
914
      if (!(current_sched_info->flags & DO_SPECULATION)
915
          || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
916
        {
917
          if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
918
              == present_dep_types)
919
            /* DEP does not add anything to the existing dependence.  */
920
            return DEP_PRESENT;
921
        }
922
      else
923
        {
924
          /* Only true dependencies can be data speculative and
925
             only anti dependencies can be control speculative.  */
926
          gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
927
                      == present_dep_types);
928
 
929
          /* if (DEP is SPECULATIVE) then
930
             ..we should update DEP_STATUS
931
             else
932
             ..we should reset existing dep to non-speculative.  */
933
        }
934
    }
935
 
936
  return DEP_CHANGED;
937
}
938
 
939
/* Set dependency caches according to DEP.  */
940
static void
941
set_dependency_caches (dep_t dep)
942
{
943
  int elem_luid = INSN_LUID (DEP_PRO (dep));
944
  int insn_luid = INSN_LUID (DEP_CON (dep));
945
 
946
  if (!(current_sched_info->flags & USE_DEPS_LIST))
947
    {
948
      switch (DEP_TYPE (dep))
949
        {
950
        case REG_DEP_TRUE:
951
          bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
952
          break;
953
 
954
        case REG_DEP_OUTPUT:
955
          bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
956
          break;
957
 
958
        case REG_DEP_ANTI:
959
          bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
960
          break;
961
 
962
        default:
963
          gcc_unreachable ();
964
        }
965
    }
966
  else
967
    {
968
      ds_t ds = DEP_STATUS (dep);
969
 
970
      if (ds & DEP_TRUE)
971
        bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
972
      if (ds & DEP_OUTPUT)
973
        bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
974
      if (ds & DEP_ANTI)
975
        bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
976
 
977
      if (ds & SPECULATIVE)
978
        {
979
          gcc_assert (current_sched_info->flags & DO_SPECULATION);
980
          bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
981
        }
982
    }
983
}
984
 
985
/* Type of dependence DEP have changed from OLD_TYPE.  Update dependency
986
   caches accordingly.  */
987
static void
988
update_dependency_caches (dep_t dep, enum reg_note old_type)
989
{
990
  int elem_luid = INSN_LUID (DEP_PRO (dep));
991
  int insn_luid = INSN_LUID (DEP_CON (dep));
992
 
993
  /* Clear corresponding cache entry because type of the link
994
     may have changed.  Keep them if we use_deps_list.  */
995
  if (!(current_sched_info->flags & USE_DEPS_LIST))
996
    {
997
      switch (old_type)
998
        {
999
        case REG_DEP_OUTPUT:
1000
          bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1001
          break;
1002
 
1003
        case REG_DEP_ANTI:
1004
          bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1005
          break;
1006
 
1007
        default:
1008
          gcc_unreachable ();
1009
        }
1010
    }
1011
 
1012
  set_dependency_caches (dep);
1013
}
1014
 
1015
/* Convert a dependence pointed to by SD_IT to be non-speculative.  */
1016
static void
1017
change_spec_dep_to_hard (sd_iterator_def sd_it)
1018
{
1019
  dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1020
  dep_link_t link = DEP_NODE_BACK (node);
1021
  dep_t dep = DEP_NODE_DEP (node);
1022
  rtx elem = DEP_PRO (dep);
1023
  rtx insn = DEP_CON (dep);
1024
 
1025
  move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1026
 
1027
  DEP_STATUS (dep) &= ~SPECULATIVE;
1028
 
1029
  if (true_dependency_cache != NULL)
1030
    /* Clear the cache entry.  */
1031
    bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1032
                      INSN_LUID (elem));
1033
}
1034
 
1035
/* Update DEP to incorporate information from NEW_DEP.
1036
   SD_IT points to DEP in case it should be moved to another list.
1037
   MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1038
   data-speculative dependence should be updated.  */
1039
static enum DEPS_ADJUST_RESULT
1040
update_dep (dep_t dep, dep_t new_dep,
1041
            sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1042
            rtx mem1 ATTRIBUTE_UNUSED,
1043
            rtx mem2 ATTRIBUTE_UNUSED)
1044
{
1045
  enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1046
  enum reg_note old_type = DEP_TYPE (dep);
1047
 
1048
  /* If this is a more restrictive type of dependence than the
1049
     existing one, then change the existing dependence to this
1050
     type.  */
1051
  if ((int) DEP_TYPE (new_dep) < (int) old_type)
1052
    {
1053
      DEP_TYPE (dep) = DEP_TYPE (new_dep);
1054
      res = DEP_CHANGED;
1055
    }
1056
 
1057
  if (current_sched_info->flags & USE_DEPS_LIST)
1058
    /* Update DEP_STATUS.  */
1059
    {
1060
      ds_t dep_status = DEP_STATUS (dep);
1061
      ds_t ds = DEP_STATUS (new_dep);
1062
      ds_t new_status = ds | dep_status;
1063
 
1064
      if (new_status & SPECULATIVE)
1065
        /* Either existing dep or a dep we're adding or both are
1066
           speculative.  */
1067
        {
1068
          if (!(ds & SPECULATIVE)
1069
              || !(dep_status & SPECULATIVE))
1070
            /* The new dep can't be speculative.  */
1071
            {
1072
              new_status &= ~SPECULATIVE;
1073
 
1074
              if (dep_status & SPECULATIVE)
1075
                /* The old dep was speculative, but now it
1076
                   isn't.  */
1077
                change_spec_dep_to_hard (sd_it);
1078
            }
1079
          else
1080
            {
1081
              /* Both are speculative.  Merge probabilities.  */
1082
              if (mem1 != NULL)
1083
                {
1084
                  dw_t dw;
1085
 
1086
                  dw = estimate_dep_weak (mem1, mem2);
1087
                  ds = set_dep_weak (ds, BEGIN_DATA, dw);
1088
                }
1089
 
1090
              new_status = ds_merge (dep_status, ds);
1091
            }
1092
        }
1093
 
1094
      ds = new_status;
1095
 
1096
      if (dep_status != ds)
1097
        {
1098
          DEP_STATUS (dep) = ds;
1099
          res = DEP_CHANGED;
1100
        }
1101
    }
1102
 
1103
  if (true_dependency_cache != NULL
1104
      && res == DEP_CHANGED)
1105
    update_dependency_caches (dep, old_type);
1106
 
1107
  return res;
1108
}
1109
 
1110
/* Add or update  a dependence described by DEP.
1111
   MEM1 and MEM2, if non-null, correspond to memory locations in case of
1112
   data speculation.
1113
 
1114
   The function returns a value indicating if an old entry has been changed
1115
   or a new entry has been added to insn's backward deps or nothing has
1116
   been updated at all.  */
1117
static enum DEPS_ADJUST_RESULT
1118
add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1119
                     rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1120
{
1121
  bool maybe_present_p = true;
1122
  bool present_p = false;
1123
 
1124
  gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1125
              && DEP_PRO (new_dep) != DEP_CON (new_dep));
1126
 
1127
#ifdef ENABLE_CHECKING
1128
  check_dep (new_dep, mem1 != NULL);
1129
#endif
1130
 
1131
  if (true_dependency_cache != NULL)
1132
    {
1133
      switch (ask_dependency_caches (new_dep))
1134
        {
1135
        case DEP_PRESENT:
1136
          return DEP_PRESENT;
1137
 
1138
        case DEP_CHANGED:
1139
          maybe_present_p = true;
1140
          present_p = true;
1141
          break;
1142
 
1143
        case DEP_CREATED:
1144
          maybe_present_p = false;
1145
          present_p = false;
1146
          break;
1147
 
1148
        default:
1149
          gcc_unreachable ();
1150
          break;
1151
        }
1152
    }
1153
 
1154
  /* Check that we don't already have this dependence.  */
1155
  if (maybe_present_p)
1156
    {
1157
      dep_t present_dep;
1158
      sd_iterator_def sd_it;
1159
 
1160
      gcc_assert (true_dependency_cache == NULL || present_p);
1161
 
1162
      present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1163
                                                  DEP_CON (new_dep),
1164
                                                  resolved_p, &sd_it);
1165
 
1166
      if (present_dep != NULL)
1167
        /* We found an existing dependency between ELEM and INSN.  */
1168
        return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1169
      else
1170
        /* We didn't find a dep, it shouldn't present in the cache.  */
1171
        gcc_assert (!present_p);
1172
    }
1173
 
1174
  /* Might want to check one level of transitivity to save conses.
1175
     This check should be done in maybe_add_or_update_dep_1.
1176
     Since we made it to add_or_update_dep_1, we must create
1177
     (or update) a link.  */
1178
 
1179
  if (mem1 != NULL_RTX)
1180
    {
1181
      gcc_assert (sched_deps_info->generate_spec_deps);
1182
      DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1183
                                           estimate_dep_weak (mem1, mem2));
1184
    }
1185
 
1186
  sd_add_dep (new_dep, resolved_p);
1187
 
1188
  return DEP_CREATED;
1189
}
1190
 
1191
/* Initialize BACK_LIST_PTR with consumer's backward list and
1192
   FORW_LIST_PTR with producer's forward list.  If RESOLVED_P is true
1193
   initialize with lists that hold resolved deps.  */
1194
static void
1195
get_back_and_forw_lists (dep_t dep, bool resolved_p,
1196
                         deps_list_t *back_list_ptr,
1197
                         deps_list_t *forw_list_ptr)
1198
{
1199
  rtx con = DEP_CON (dep);
1200
 
1201
  if (!resolved_p)
1202
    {
1203
      if ((current_sched_info->flags & DO_SPECULATION)
1204
          && (DEP_STATUS (dep) & SPECULATIVE))
1205
        *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1206
      else
1207
        *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1208
 
1209
      *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1210
    }
1211
  else
1212
    {
1213
      *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1214
      *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1215
    }
1216
}
1217
 
1218
/* Add dependence described by DEP.
1219
   If RESOLVED_P is true treat the dependence as a resolved one.  */
1220
void
1221
sd_add_dep (dep_t dep, bool resolved_p)
1222
{
1223
  dep_node_t n = create_dep_node ();
1224
  deps_list_t con_back_deps;
1225
  deps_list_t pro_forw_deps;
1226
  rtx elem = DEP_PRO (dep);
1227
  rtx insn = DEP_CON (dep);
1228
 
1229
  gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1230
 
1231
  if ((current_sched_info->flags & DO_SPECULATION)
1232
      && !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1233
    DEP_STATUS (dep) &= ~SPECULATIVE;
1234
 
1235
  copy_dep (DEP_NODE_DEP (n), dep);
1236
 
1237
  get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1238
 
1239
  add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1240
 
1241
#ifdef ENABLE_CHECKING
1242
  check_dep (dep, false);
1243
#endif
1244
 
1245
  add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1246
 
1247
  /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1248
     in the bitmap caches of dependency information.  */
1249
  if (true_dependency_cache != NULL)
1250
    set_dependency_caches (dep);
1251
}
1252
 
1253
/* Add or update backward dependence between INSN and ELEM
1254
   with given type DEP_TYPE and dep_status DS.
1255
   This function is a convenience wrapper.  */
1256
enum DEPS_ADJUST_RESULT
1257
sd_add_or_update_dep (dep_t dep, bool resolved_p)
1258
{
1259
  return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1260
}
1261
 
1262
/* Resolved dependence pointed to by SD_IT.
1263
   SD_IT will advance to the next element.  */
1264
void
1265
sd_resolve_dep (sd_iterator_def sd_it)
1266
{
1267
  dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1268
  dep_t dep = DEP_NODE_DEP (node);
1269
  rtx pro = DEP_PRO (dep);
1270
  rtx con = DEP_CON (dep);
1271
 
1272
  if ((current_sched_info->flags & DO_SPECULATION)
1273
      && (DEP_STATUS (dep) & SPECULATIVE))
1274
    move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1275
                   INSN_RESOLVED_BACK_DEPS (con));
1276
  else
1277
    move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1278
                   INSN_RESOLVED_BACK_DEPS (con));
1279
 
1280
  move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1281
                 INSN_RESOLVED_FORW_DEPS (pro));
1282
}
1283
 
1284
/* Make TO depend on all the FROM's producers.
1285
   If RESOLVED_P is true add dependencies to the resolved lists.  */
1286
void
1287
sd_copy_back_deps (rtx to, rtx from, bool resolved_p)
1288
{
1289
  sd_list_types_def list_type;
1290
  sd_iterator_def sd_it;
1291
  dep_t dep;
1292
 
1293
  list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1294
 
1295
  FOR_EACH_DEP (from, list_type, sd_it, dep)
1296
    {
1297
      dep_def _new_dep, *new_dep = &_new_dep;
1298
 
1299
      copy_dep (new_dep, dep);
1300
      DEP_CON (new_dep) = to;
1301
      sd_add_dep (new_dep, resolved_p);
1302
    }
1303
}
1304
 
1305
/* Remove a dependency referred to by SD_IT.
1306
   SD_IT will point to the next dependence after removal.  */
1307
void
1308
sd_delete_dep (sd_iterator_def sd_it)
1309
{
1310
  dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1311
  dep_t dep = DEP_NODE_DEP (n);
1312
  rtx pro = DEP_PRO (dep);
1313
  rtx con = DEP_CON (dep);
1314
  deps_list_t con_back_deps;
1315
  deps_list_t pro_forw_deps;
1316
 
1317
  if (true_dependency_cache != NULL)
1318
    {
1319
      int elem_luid = INSN_LUID (pro);
1320
      int insn_luid = INSN_LUID (con);
1321
 
1322
      bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1323
      bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1324
      bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1325
 
1326
      if (current_sched_info->flags & DO_SPECULATION)
1327
        bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1328
    }
1329
 
1330
  get_back_and_forw_lists (dep, sd_it.resolved_p,
1331
                           &con_back_deps, &pro_forw_deps);
1332
 
1333
  remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1334
  remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1335
 
1336
  delete_dep_node (n);
1337
}
1338
 
1339
/* Dump size of the lists.  */
1340
#define DUMP_LISTS_SIZE (2)
1341
 
1342
/* Dump dependencies of the lists.  */
1343
#define DUMP_LISTS_DEPS (4)
1344
 
1345
/* Dump all information about the lists.  */
1346
#define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1347
 
1348
/* Dump deps_lists of INSN specified by TYPES to DUMP.
1349
   FLAGS is a bit mask specifying what information about the lists needs
1350
   to be printed.
1351
   If FLAGS has the very first bit set, then dump all information about
1352
   the lists and propagate this bit into the callee dump functions.  */
1353
static void
1354
dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1355
{
1356
  sd_iterator_def sd_it;
1357
  dep_t dep;
1358
  int all;
1359
 
1360
  all = (flags & 1);
1361
 
1362
  if (all)
1363
    flags |= DUMP_LISTS_ALL;
1364
 
1365
  fprintf (dump, "[");
1366
 
1367
  if (flags & DUMP_LISTS_SIZE)
1368
    fprintf (dump, "%d; ", sd_lists_size (insn, types));
1369
 
1370
  if (flags & DUMP_LISTS_DEPS)
1371
    {
1372
      FOR_EACH_DEP (insn, types, sd_it, dep)
1373
        {
1374
          dump_dep (dump, dep, dump_dep_flags | all);
1375
          fprintf (dump, " ");
1376
        }
1377
    }
1378
}
1379
 
1380
/* Dump all information about deps_lists of INSN specified by TYPES
1381
   to STDERR.  */
1382
void
1383
sd_debug_lists (rtx insn, sd_list_types_def types)
1384
{
1385
  dump_lists (stderr, insn, types, 1);
1386
  fprintf (stderr, "\n");
1387
}
1388
 
1389
/* A convenience wrapper to operate on an entire list.  */
1390
 
1391
static void
1392
add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
1393
{
1394
  for (; list; list = XEXP (list, 1))
1395
    {
1396
      if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1397
        add_dependence (insn, XEXP (list, 0), dep_type);
1398
    }
1399
}
1400
 
1401
/* Similar, but free *LISTP at the same time, when the context
1402
   is not readonly.  */
1403
 
1404
static void
1405
add_dependence_list_and_free (struct deps_desc *deps, rtx insn, rtx *listp,
1406
                              int uncond, enum reg_note dep_type)
1407
{
1408
  rtx list, next;
1409
 
1410
  if (deps->readonly)
1411
    {
1412
      add_dependence_list (insn, *listp, uncond, dep_type);
1413
      return;
1414
    }
1415
 
1416
  for (list = *listp, *listp = NULL; list ; list = next)
1417
    {
1418
      next = XEXP (list, 1);
1419
      if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
1420
        add_dependence (insn, XEXP (list, 0), dep_type);
1421
      free_INSN_LIST_node (list);
1422
    }
1423
}
1424
 
1425
/* Remove all occurences of INSN from LIST.  Return the number of
1426
   occurences removed.  */
1427
 
1428
static int
1429
remove_from_dependence_list (rtx insn, rtx* listp)
1430
{
1431
  int removed = 0;
1432
 
1433
  while (*listp)
1434
    {
1435
      if (XEXP (*listp, 0) == insn)
1436
        {
1437
          remove_free_INSN_LIST_node (listp);
1438
          removed++;
1439
          continue;
1440
        }
1441
 
1442
      listp = &XEXP (*listp, 1);
1443
    }
1444
 
1445
  return removed;
1446
}
1447
 
1448
/* Same as above, but process two lists at once.  */
1449
static int
1450
remove_from_both_dependence_lists (rtx insn, rtx *listp, rtx *exprp)
1451
{
1452
  int removed = 0;
1453
 
1454
  while (*listp)
1455
    {
1456
      if (XEXP (*listp, 0) == insn)
1457
        {
1458
          remove_free_INSN_LIST_node (listp);
1459
          remove_free_EXPR_LIST_node (exprp);
1460
          removed++;
1461
          continue;
1462
        }
1463
 
1464
      listp = &XEXP (*listp, 1);
1465
      exprp = &XEXP (*exprp, 1);
1466
    }
1467
 
1468
  return removed;
1469
}
1470
 
1471
/* Clear all dependencies for an insn.  */
1472
static void
1473
delete_all_dependences (rtx insn)
1474
{
1475
  sd_iterator_def sd_it;
1476
  dep_t dep;
1477
 
1478
  /* The below cycle can be optimized to clear the caches and back_deps
1479
     in one call but that would provoke duplication of code from
1480
     delete_dep ().  */
1481
 
1482
  for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1483
       sd_iterator_cond (&sd_it, &dep);)
1484
    sd_delete_dep (sd_it);
1485
}
1486
 
1487
/* All insns in a scheduling group except the first should only have
1488
   dependencies on the previous insn in the group.  So we find the
1489
   first instruction in the scheduling group by walking the dependence
1490
   chains backwards. Then we add the dependencies for the group to
1491
   the previous nonnote insn.  */
1492
 
1493
static void
1494
fixup_sched_groups (rtx insn)
1495
{
1496
  sd_iterator_def sd_it;
1497
  dep_t dep;
1498
  rtx prev_nonnote;
1499
 
1500
  FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1501
    {
1502
      rtx i = insn;
1503
      rtx pro = DEP_PRO (dep);
1504
 
1505
      do
1506
        {
1507
          i = prev_nonnote_insn (i);
1508
 
1509
          if (pro == i)
1510
            goto next_link;
1511
        } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1512
 
1513
      if (! sched_insns_conditions_mutex_p (i, pro))
1514
        add_dependence (i, pro, DEP_TYPE (dep));
1515
    next_link:;
1516
    }
1517
 
1518
  delete_all_dependences (insn);
1519
 
1520 378 julius
  prev_nonnote = prev_nonnote_nondebug_insn (insn);
1521 280 jeremybenn
  if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1522
      && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1523
    add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1524
}
1525
 
1526
/* Process an insn's memory dependencies.  There are four kinds of
1527
   dependencies:
1528
 
1529
   (0) read dependence: read follows read
1530
   (1) true dependence: read follows write
1531
   (2) output dependence: write follows write
1532
   (3) anti dependence: write follows read
1533
 
1534
   We are careful to build only dependencies which actually exist, and
1535
   use transitivity to avoid building too many links.  */
1536
 
1537
/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1538
   The MEM is a memory reference contained within INSN, which we are saving
1539
   so that we can do memory aliasing on it.  */
1540
 
1541
static void
1542
add_insn_mem_dependence (struct deps_desc *deps, bool read_p,
1543
                         rtx insn, rtx mem)
1544
{
1545
  rtx *insn_list;
1546
  rtx *mem_list;
1547
  rtx link;
1548
 
1549
  gcc_assert (!deps->readonly);
1550
  if (read_p)
1551
    {
1552
      insn_list = &deps->pending_read_insns;
1553
      mem_list = &deps->pending_read_mems;
1554
      if (!DEBUG_INSN_P (insn))
1555
        deps->pending_read_list_length++;
1556
    }
1557
  else
1558
    {
1559
      insn_list = &deps->pending_write_insns;
1560
      mem_list = &deps->pending_write_mems;
1561
      deps->pending_write_list_length++;
1562
    }
1563
 
1564
  link = alloc_INSN_LIST (insn, *insn_list);
1565
  *insn_list = link;
1566
 
1567
  if (sched_deps_info->use_cselib)
1568
    {
1569
      mem = shallow_copy_rtx (mem);
1570
      XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
1571
    }
1572
  link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1573
  *mem_list = link;
1574
}
1575
 
1576
/* Make a dependency between every memory reference on the pending lists
1577
   and INSN, thus flushing the pending lists.  FOR_READ is true if emitting
1578
   dependencies for a read operation, similarly with FOR_WRITE.  */
1579
 
1580
static void
1581
flush_pending_lists (struct deps_desc *deps, rtx insn, int for_read,
1582
                     int for_write)
1583
{
1584
  if (for_write)
1585
    {
1586
      add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1587
                                    1, REG_DEP_ANTI);
1588
      if (!deps->readonly)
1589
        {
1590
          free_EXPR_LIST_list (&deps->pending_read_mems);
1591
          deps->pending_read_list_length = 0;
1592
        }
1593
    }
1594
 
1595
  add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1596
                                for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1597
 
1598
  add_dependence_list_and_free (deps, insn,
1599
                                &deps->last_pending_memory_flush, 1,
1600
                                for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
1601
  if (!deps->readonly)
1602
    {
1603
      free_EXPR_LIST_list (&deps->pending_write_mems);
1604
      deps->pending_write_list_length = 0;
1605
 
1606
      deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1607
      deps->pending_flush_length = 1;
1608
    }
1609
}
1610
 
1611
/* Instruction which dependencies we are analyzing.  */
1612
static rtx cur_insn = NULL_RTX;
1613
 
1614
/* Implement hooks for haifa scheduler.  */
1615
 
1616
static void
1617
haifa_start_insn (rtx insn)
1618
{
1619
  gcc_assert (insn && !cur_insn);
1620
 
1621
  cur_insn = insn;
1622
}
1623
 
1624
static void
1625
haifa_finish_insn (void)
1626
{
1627
  cur_insn = NULL;
1628
}
1629
 
1630
void
1631
haifa_note_reg_set (int regno)
1632
{
1633
  SET_REGNO_REG_SET (reg_pending_sets, regno);
1634
}
1635
 
1636
void
1637
haifa_note_reg_clobber (int regno)
1638
{
1639
  SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1640
}
1641
 
1642
void
1643
haifa_note_reg_use (int regno)
1644
{
1645
  SET_REGNO_REG_SET (reg_pending_uses, regno);
1646
}
1647
 
1648
static void
1649
haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx pending_insn, ds_t ds)
1650
{
1651
  if (!(ds & SPECULATIVE))
1652
    {
1653
      mem = NULL_RTX;
1654
      pending_mem = NULL_RTX;
1655
    }
1656
  else
1657
    gcc_assert (ds & BEGIN_DATA);
1658
 
1659
  {
1660
    dep_def _dep, *dep = &_dep;
1661
 
1662
    init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1663
                current_sched_info->flags & USE_DEPS_LIST ? ds : -1);
1664
    maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1665
  }
1666
 
1667
}
1668
 
1669
static void
1670
haifa_note_dep (rtx elem, ds_t ds)
1671
{
1672
  dep_def _dep;
1673
  dep_t dep = &_dep;
1674
 
1675
  init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1676
  maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1677
}
1678
 
1679
static void
1680
note_reg_use (int r)
1681
{
1682
  if (sched_deps_info->note_reg_use)
1683
    sched_deps_info->note_reg_use (r);
1684
}
1685
 
1686
static void
1687
note_reg_set (int r)
1688
{
1689
  if (sched_deps_info->note_reg_set)
1690
    sched_deps_info->note_reg_set (r);
1691
}
1692
 
1693
static void
1694
note_reg_clobber (int r)
1695
{
1696
  if (sched_deps_info->note_reg_clobber)
1697
    sched_deps_info->note_reg_clobber (r);
1698
}
1699
 
1700
static void
1701
note_mem_dep (rtx m1, rtx m2, rtx e, ds_t ds)
1702
{
1703
  if (sched_deps_info->note_mem_dep)
1704
    sched_deps_info->note_mem_dep (m1, m2, e, ds);
1705
}
1706
 
1707
static void
1708
note_dep (rtx e, ds_t ds)
1709
{
1710
  if (sched_deps_info->note_dep)
1711
    sched_deps_info->note_dep (e, ds);
1712
}
1713
 
1714
/* Return corresponding to DS reg_note.  */
1715
enum reg_note
1716
ds_to_dt (ds_t ds)
1717
{
1718
  if (ds & DEP_TRUE)
1719
    return REG_DEP_TRUE;
1720
  else if (ds & DEP_OUTPUT)
1721
    return REG_DEP_OUTPUT;
1722
  else
1723
    {
1724
      gcc_assert (ds & DEP_ANTI);
1725
      return REG_DEP_ANTI;
1726
    }
1727
}
1728
 
1729
 
1730
 
1731
/* Functions for computation of info needed for register pressure
1732
   sensitive insn scheduling.  */
1733
 
1734
 
1735
/* Allocate and return reg_use_data structure for REGNO and INSN.  */
1736
static struct reg_use_data *
1737
create_insn_reg_use (int regno, rtx insn)
1738
{
1739
  struct reg_use_data *use;
1740
 
1741
  use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1742
  use->regno = regno;
1743
  use->insn = insn;
1744
  use->next_insn_use = INSN_REG_USE_LIST (insn);
1745
  INSN_REG_USE_LIST (insn) = use;
1746
  return use;
1747
}
1748
 
1749
/* Allocate and return reg_set_data structure for REGNO and INSN.  */
1750
static struct reg_set_data *
1751
create_insn_reg_set (int regno, rtx insn)
1752
{
1753
  struct reg_set_data *set;
1754
 
1755
  set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1756
  set->regno = regno;
1757
  set->insn = insn;
1758
  set->next_insn_set = INSN_REG_SET_LIST (insn);
1759
  INSN_REG_SET_LIST (insn) = set;
1760
  return set;
1761
}
1762
 
1763
/* Set up insn register uses for INSN and dependency context DEPS.  */
1764
static void
1765
setup_insn_reg_uses (struct deps_desc *deps, rtx insn)
1766
{
1767
  unsigned i;
1768
  reg_set_iterator rsi;
1769
  rtx list;
1770
  struct reg_use_data *use, *use2, *next;
1771
  struct deps_reg *reg_last;
1772
 
1773
  EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1774
    {
1775
      if (i < FIRST_PSEUDO_REGISTER
1776
          && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1777
        continue;
1778
 
1779
      if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1780
          && ! REGNO_REG_SET_P (reg_pending_sets, i)
1781
          && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1782
        /* Ignore use which is not dying.  */
1783
        continue;
1784
 
1785
      use = create_insn_reg_use (i, insn);
1786
      use->next_regno_use = use;
1787
      reg_last = &deps->reg_last[i];
1788
 
1789
      /* Create the cycle list of uses.  */
1790
      for (list = reg_last->uses; list; list = XEXP (list, 1))
1791
        {
1792
          use2 = create_insn_reg_use (i, XEXP (list, 0));
1793
          next = use->next_regno_use;
1794
          use->next_regno_use = use2;
1795
          use2->next_regno_use = next;
1796
        }
1797
    }
1798
}
1799
 
1800
/* Register pressure info for the currently processed insn.  */
1801
static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1802
 
1803
/* Return TRUE if INSN has the use structure for REGNO.  */
1804
static bool
1805
insn_use_p (rtx insn, int regno)
1806
{
1807
  struct reg_use_data *use;
1808
 
1809
  for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
1810
    if (use->regno == regno)
1811
      return true;
1812
  return false;
1813
}
1814
 
1815
/* Update the register pressure info after birth of pseudo register REGNO
1816
   in INSN.  Arguments CLOBBER_P and UNUSED_P say correspondingly that
1817
   the register is in clobber or unused after the insn.  */
1818
static void
1819
mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
1820
{
1821
  int incr, new_incr;
1822
  enum reg_class cl;
1823
 
1824
  gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1825
  cl = sched_regno_cover_class[regno];
1826
  if (cl != NO_REGS)
1827
    {
1828
      incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1829
      if (clobber_p)
1830
        {
1831
          new_incr = reg_pressure_info[cl].clobber_increase + incr;
1832
          reg_pressure_info[cl].clobber_increase = new_incr;
1833
        }
1834
      else if (unused_p)
1835
        {
1836
          new_incr = reg_pressure_info[cl].unused_set_increase + incr;
1837
          reg_pressure_info[cl].unused_set_increase = new_incr;
1838
        }
1839
      else
1840
        {
1841
          new_incr = reg_pressure_info[cl].set_increase + incr;
1842
          reg_pressure_info[cl].set_increase = new_incr;
1843
          if (! insn_use_p (insn, regno))
1844
            reg_pressure_info[cl].change += incr;
1845
          create_insn_reg_set (regno, insn);
1846
        }
1847
      gcc_assert (new_incr < (1 << INCREASE_BITS));
1848
    }
1849
}
1850
 
1851
/* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
1852
   hard registers involved in the birth.  */
1853
static void
1854
mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
1855
                            bool clobber_p, bool unused_p)
1856
{
1857
  enum reg_class cl;
1858
  int new_incr, last = regno + nregs;
1859
 
1860
  while (regno < last)
1861
    {
1862
      gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1863
      if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1864
        {
1865
          cl = sched_regno_cover_class[regno];
1866
          if (cl != NO_REGS)
1867
            {
1868
              if (clobber_p)
1869
                {
1870
                  new_incr = reg_pressure_info[cl].clobber_increase + 1;
1871
                  reg_pressure_info[cl].clobber_increase = new_incr;
1872
                }
1873
              else if (unused_p)
1874
                {
1875
                  new_incr = reg_pressure_info[cl].unused_set_increase + 1;
1876
                  reg_pressure_info[cl].unused_set_increase = new_incr;
1877
                }
1878
              else
1879
                {
1880
                  new_incr = reg_pressure_info[cl].set_increase + 1;
1881
                  reg_pressure_info[cl].set_increase = new_incr;
1882
                  if (! insn_use_p (insn, regno))
1883
                    reg_pressure_info[cl].change += 1;
1884
                  create_insn_reg_set (regno, insn);
1885
                }
1886
              gcc_assert (new_incr < (1 << INCREASE_BITS));
1887
            }
1888
        }
1889
      regno++;
1890
    }
1891
}
1892
 
1893
/* Update the register pressure info after birth of pseudo or hard
1894
   register REG in INSN.  Arguments CLOBBER_P and UNUSED_P say
1895
   correspondingly that the register is in clobber or unused after the
1896
   insn.  */
1897
static void
1898
mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
1899
{
1900
  int regno;
1901
 
1902
  if (GET_CODE (reg) == SUBREG)
1903
    reg = SUBREG_REG (reg);
1904
 
1905
  if (! REG_P (reg))
1906
    return;
1907
 
1908
  regno = REGNO (reg);
1909
  if (regno < FIRST_PSEUDO_REGISTER)
1910
    mark_insn_hard_regno_birth (insn, regno,
1911
                                hard_regno_nregs[regno][GET_MODE (reg)],
1912
                                clobber_p, unused_p);
1913
  else
1914
    mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
1915
}
1916
 
1917
/* Update the register pressure info after death of pseudo register
1918
   REGNO.  */
1919
static void
1920
mark_pseudo_death (int regno)
1921
{
1922
  int incr;
1923
  enum reg_class cl;
1924
 
1925
  gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
1926
  cl = sched_regno_cover_class[regno];
1927
  if (cl != NO_REGS)
1928
    {
1929
      incr = ira_reg_class_nregs[cl][PSEUDO_REGNO_MODE (regno)];
1930
      reg_pressure_info[cl].change -= incr;
1931
    }
1932
}
1933
 
1934
/* Like mark_pseudo_death except that NREGS saying how many hard
1935
   registers involved in the death.  */
1936
static void
1937
mark_hard_regno_death (int regno, int nregs)
1938
{
1939
  enum reg_class cl;
1940
  int last = regno + nregs;
1941
 
1942
  while (regno < last)
1943
    {
1944
      gcc_assert (regno < FIRST_PSEUDO_REGISTER);
1945
      if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
1946
        {
1947
          cl = sched_regno_cover_class[regno];
1948
          if (cl != NO_REGS)
1949
            reg_pressure_info[cl].change -= 1;
1950
        }
1951
      regno++;
1952
    }
1953
}
1954
 
1955
/* Update the register pressure info after death of pseudo or hard
1956
   register REG.  */
1957
static void
1958
mark_reg_death (rtx reg)
1959
{
1960
  int regno;
1961
 
1962
  if (GET_CODE (reg) == SUBREG)
1963
    reg = SUBREG_REG (reg);
1964
 
1965
  if (! REG_P (reg))
1966
    return;
1967
 
1968
  regno = REGNO (reg);
1969
  if (regno < FIRST_PSEUDO_REGISTER)
1970
    mark_hard_regno_death (regno, hard_regno_nregs[regno][GET_MODE (reg)]);
1971
  else
1972
    mark_pseudo_death (regno);
1973
}
1974
 
1975
/* Process SETTER of REG.  DATA is an insn containing the setter.  */
1976
static void
1977
mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
1978
{
1979
  if (setter != NULL_RTX && GET_CODE (setter) != SET)
1980
    return;
1981
  mark_insn_reg_birth
1982
    ((rtx) data, reg, false,
1983
     find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
1984
}
1985
 
1986
/* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs.  */
1987
static void
1988
mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
1989
{
1990
  if (GET_CODE (setter) == CLOBBER)
1991
    mark_insn_reg_birth ((rtx) data, reg, true, false);
1992
}
1993
 
1994
/* Set up reg pressure info related to INSN.  */
1995
static void
1996
setup_insn_reg_pressure_info (rtx insn)
1997
{
1998
  int i, len;
1999
  enum reg_class cl;
2000
  static struct reg_pressure_data *pressure_info;
2001
  rtx link;
2002
 
2003
  gcc_assert (sched_pressure_p);
2004
 
2005
  if (! INSN_P (insn))
2006
    return;
2007
 
2008
  for (i = 0; i < ira_reg_class_cover_size; i++)
2009
    {
2010
      cl = ira_reg_class_cover[i];
2011
      reg_pressure_info[cl].clobber_increase = 0;
2012
      reg_pressure_info[cl].set_increase = 0;
2013
      reg_pressure_info[cl].unused_set_increase = 0;
2014
      reg_pressure_info[cl].change = 0;
2015
    }
2016
 
2017
  note_stores (PATTERN (insn), mark_insn_reg_clobber, insn);
2018
 
2019
  note_stores (PATTERN (insn), mark_insn_reg_store, insn);
2020
 
2021
#ifdef AUTO_INC_DEC
2022
  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2023
    if (REG_NOTE_KIND (link) == REG_INC)
2024
      mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2025
#endif
2026
 
2027
  for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2028
    if (REG_NOTE_KIND (link) == REG_DEAD)
2029
      mark_reg_death (XEXP (link, 0));
2030
 
2031
  len = sizeof (struct reg_pressure_data) * ira_reg_class_cover_size;
2032
  pressure_info
2033
    = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2034
  INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_reg_class_cover_size
2035
                                                  * sizeof (int), 1);
2036
  for (i = 0; i < ira_reg_class_cover_size; i++)
2037
    {
2038
      cl = ira_reg_class_cover[i];
2039
      pressure_info[i].clobber_increase
2040
        = reg_pressure_info[cl].clobber_increase;
2041
      pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2042
      pressure_info[i].unused_set_increase
2043
        = reg_pressure_info[cl].unused_set_increase;
2044
      pressure_info[i].change = reg_pressure_info[cl].change;
2045
    }
2046
}
2047
 
2048
 
2049
 
2050
 
2051
/* Internal variable for sched_analyze_[12] () functions.
2052
   If it is nonzero, this means that sched_analyze_[12] looks
2053
   at the most toplevel SET.  */
2054
static bool can_start_lhs_rhs_p;
2055
 
2056
/* Extend reg info for the deps context DEPS given that
2057
   we have just generated a register numbered REGNO.  */
2058
static void
2059
extend_deps_reg_info (struct deps_desc *deps, int regno)
2060
{
2061
  int max_regno = regno + 1;
2062
 
2063
  gcc_assert (!reload_completed);
2064
 
2065
  /* In a readonly context, it would not hurt to extend info,
2066
     but it should not be needed.  */
2067
  if (reload_completed && deps->readonly)
2068
    {
2069
      deps->max_reg = max_regno;
2070
      return;
2071
    }
2072
 
2073
  if (max_regno > deps->max_reg)
2074
    {
2075
      deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2076
                                   max_regno);
2077
      memset (&deps->reg_last[deps->max_reg],
2078
              0, (max_regno - deps->max_reg)
2079
              * sizeof (struct deps_reg));
2080
      deps->max_reg = max_regno;
2081
    }
2082
}
2083
 
2084
/* Extends REG_INFO_P if needed.  */
2085
void
2086
maybe_extend_reg_info_p (void)
2087
{
2088
  /* Extend REG_INFO_P, if needed.  */
2089
  if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2090
    {
2091
      size_t new_reg_info_p_size = max_regno + 128;
2092
 
2093
      gcc_assert (!reload_completed && sel_sched_p ());
2094
 
2095
      reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2096
                                                    new_reg_info_p_size,
2097
                                                    reg_info_p_size,
2098
                                                    sizeof (*reg_info_p));
2099
      reg_info_p_size = new_reg_info_p_size;
2100
    }
2101
}
2102
 
2103
/* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2104
   The type of the reference is specified by REF and can be SET,
2105
   CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE.  */
2106
 
2107
static void
2108
sched_analyze_reg (struct deps_desc *deps, int regno, enum machine_mode mode,
2109
                   enum rtx_code ref, rtx insn)
2110
{
2111
  /* We could emit new pseudos in renaming.  Extend the reg structures.  */
2112
  if (!reload_completed && sel_sched_p ()
2113
      && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2114
    extend_deps_reg_info (deps, regno);
2115
 
2116
  maybe_extend_reg_info_p ();
2117
 
2118
  /* A hard reg in a wide mode may really be multiple registers.
2119
     If so, mark all of them just like the first.  */
2120
  if (regno < FIRST_PSEUDO_REGISTER)
2121
    {
2122
      int i = hard_regno_nregs[regno][mode];
2123
      if (ref == SET)
2124
        {
2125
          while (--i >= 0)
2126
            note_reg_set (regno + i);
2127
        }
2128
      else if (ref == USE)
2129
        {
2130
          while (--i >= 0)
2131
            note_reg_use (regno + i);
2132
        }
2133
      else
2134
        {
2135
          while (--i >= 0)
2136
            note_reg_clobber (regno + i);
2137
        }
2138
    }
2139
 
2140
  /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2141
     it does not reload.  Ignore these as they have served their
2142
     purpose already.  */
2143
  else if (regno >= deps->max_reg)
2144
    {
2145
      enum rtx_code code = GET_CODE (PATTERN (insn));
2146
      gcc_assert (code == USE || code == CLOBBER);
2147
    }
2148
 
2149
  else
2150
    {
2151
      if (ref == SET)
2152
        note_reg_set (regno);
2153
      else if (ref == USE)
2154
        note_reg_use (regno);
2155
      else
2156
        note_reg_clobber (regno);
2157
 
2158
      /* Pseudos that are REG_EQUIV to something may be replaced
2159
         by that during reloading.  We need only add dependencies for
2160
        the address in the REG_EQUIV note.  */
2161
      if (!reload_completed && get_reg_known_equiv_p (regno))
2162
        {
2163
          rtx t = get_reg_known_value (regno);
2164
          if (MEM_P (t))
2165
            sched_analyze_2 (deps, XEXP (t, 0), insn);
2166
        }
2167
 
2168
      /* Don't let it cross a call after scheduling if it doesn't
2169
         already cross one.  */
2170
      if (REG_N_CALLS_CROSSED (regno) == 0)
2171
        {
2172
          if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2173
            deps->sched_before_next_call
2174
              = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2175
          else
2176
            add_dependence_list (insn, deps->last_function_call, 1,
2177
                                 REG_DEP_ANTI);
2178
        }
2179
    }
2180
}
2181
 
2182
/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2183
   rtx, X, creating all dependencies generated by the write to the
2184
   destination of X, and reads of everything mentioned.  */
2185
 
2186
static void
2187
sched_analyze_1 (struct deps_desc *deps, rtx x, rtx insn)
2188
{
2189
  rtx dest = XEXP (x, 0);
2190
  enum rtx_code code = GET_CODE (x);
2191
  bool cslr_p = can_start_lhs_rhs_p;
2192
 
2193
  can_start_lhs_rhs_p = false;
2194
 
2195
  gcc_assert (dest);
2196
  if (dest == 0)
2197
    return;
2198
 
2199
  if (cslr_p && sched_deps_info->start_lhs)
2200
    sched_deps_info->start_lhs (dest);
2201
 
2202
  if (GET_CODE (dest) == PARALLEL)
2203
    {
2204
      int i;
2205
 
2206
      for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2207
        if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2208
          sched_analyze_1 (deps,
2209
                           gen_rtx_CLOBBER (VOIDmode,
2210
                                            XEXP (XVECEXP (dest, 0, i), 0)),
2211
                           insn);
2212
 
2213
      if (cslr_p && sched_deps_info->finish_lhs)
2214
        sched_deps_info->finish_lhs ();
2215
 
2216
      if (code == SET)
2217
        {
2218
          can_start_lhs_rhs_p = cslr_p;
2219
 
2220
          sched_analyze_2 (deps, SET_SRC (x), insn);
2221
 
2222
          can_start_lhs_rhs_p = false;
2223
        }
2224
 
2225
      return;
2226
    }
2227
 
2228
  while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2229
         || GET_CODE (dest) == ZERO_EXTRACT)
2230
    {
2231
      if (GET_CODE (dest) == STRICT_LOW_PART
2232
         || GET_CODE (dest) == ZERO_EXTRACT
2233
         || df_read_modify_subreg_p (dest))
2234
        {
2235
          /* These both read and modify the result.  We must handle
2236
             them as writes to get proper dependencies for following
2237
             instructions.  We must handle them as reads to get proper
2238
             dependencies from this to previous instructions.
2239
             Thus we need to call sched_analyze_2.  */
2240
 
2241
          sched_analyze_2 (deps, XEXP (dest, 0), insn);
2242
        }
2243
      if (GET_CODE (dest) == ZERO_EXTRACT)
2244
        {
2245
          /* The second and third arguments are values read by this insn.  */
2246
          sched_analyze_2 (deps, XEXP (dest, 1), insn);
2247
          sched_analyze_2 (deps, XEXP (dest, 2), insn);
2248
        }
2249
      dest = XEXP (dest, 0);
2250
    }
2251
 
2252
  if (REG_P (dest))
2253
    {
2254
      int regno = REGNO (dest);
2255
      enum machine_mode mode = GET_MODE (dest);
2256
 
2257
      sched_analyze_reg (deps, regno, mode, code, insn);
2258
 
2259
#ifdef STACK_REGS
2260
      /* Treat all writes to a stack register as modifying the TOS.  */
2261
      if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2262
        {
2263
          int nregs;
2264
 
2265
          /* Avoid analyzing the same register twice.  */
2266
          if (regno != FIRST_STACK_REG)
2267
            sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2268
 
2269
          nregs = hard_regno_nregs[FIRST_STACK_REG][mode];
2270
          while (--nregs >= 0)
2271
            SET_HARD_REG_BIT (implicit_reg_pending_uses,
2272
                              FIRST_STACK_REG + nregs);
2273
        }
2274
#endif
2275
    }
2276
  else if (MEM_P (dest))
2277
    {
2278
      /* Writing memory.  */
2279
      rtx t = dest;
2280
 
2281
      if (sched_deps_info->use_cselib)
2282
        {
2283
          enum machine_mode address_mode
2284
            = targetm.addr_space.address_mode (MEM_ADDR_SPACE (dest));
2285
 
2286
          t = shallow_copy_rtx (dest);
2287
          cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
2288
          XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2289
        }
2290
      t = canon_rtx (t);
2291
 
2292
      /* Pending lists can't get larger with a readonly context.  */
2293
      if (!deps->readonly
2294
          && ((deps->pending_read_list_length + deps->pending_write_list_length)
2295
              > MAX_PENDING_LIST_LENGTH))
2296
        {
2297
          /* Flush all pending reads and writes to prevent the pending lists
2298
             from getting any larger.  Insn scheduling runs too slowly when
2299
             these lists get long.  When compiling GCC with itself,
2300
             this flush occurs 8 times for sparc, and 10 times for m88k using
2301
             the default value of 32.  */
2302
          flush_pending_lists (deps, insn, false, true);
2303
        }
2304
      else
2305
        {
2306
          rtx pending, pending_mem;
2307
 
2308
          pending = deps->pending_read_insns;
2309
          pending_mem = deps->pending_read_mems;
2310
          while (pending)
2311
            {
2312
              if (anti_dependence (XEXP (pending_mem, 0), t)
2313
                  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2314
                note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2315
                              DEP_ANTI);
2316
 
2317
              pending = XEXP (pending, 1);
2318
              pending_mem = XEXP (pending_mem, 1);
2319
            }
2320
 
2321
          pending = deps->pending_write_insns;
2322
          pending_mem = deps->pending_write_mems;
2323
          while (pending)
2324
            {
2325
              if (output_dependence (XEXP (pending_mem, 0), t)
2326
                  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2327
                note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2328
                              DEP_OUTPUT);
2329
 
2330
              pending = XEXP (pending, 1);
2331
              pending_mem = XEXP (pending_mem, 1);
2332
            }
2333
 
2334
          add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2335
                               REG_DEP_ANTI);
2336
 
2337
          if (!deps->readonly)
2338
            add_insn_mem_dependence (deps, false, insn, dest);
2339
        }
2340
      sched_analyze_2 (deps, XEXP (dest, 0), insn);
2341
    }
2342
 
2343
  if (cslr_p && sched_deps_info->finish_lhs)
2344
    sched_deps_info->finish_lhs ();
2345
 
2346
  /* Analyze reads.  */
2347
  if (GET_CODE (x) == SET)
2348
    {
2349
      can_start_lhs_rhs_p = cslr_p;
2350
 
2351
      sched_analyze_2 (deps, SET_SRC (x), insn);
2352
 
2353
      can_start_lhs_rhs_p = false;
2354
    }
2355
}
2356
 
2357
/* Analyze the uses of memory and registers in rtx X in INSN.  */
2358
static void
2359
sched_analyze_2 (struct deps_desc *deps, rtx x, rtx insn)
2360
{
2361
  int i;
2362
  int j;
2363
  enum rtx_code code;
2364
  const char *fmt;
2365
  bool cslr_p = can_start_lhs_rhs_p;
2366
 
2367
  can_start_lhs_rhs_p = false;
2368
 
2369
  gcc_assert (x);
2370
  if (x == 0)
2371
    return;
2372
 
2373
  if (cslr_p && sched_deps_info->start_rhs)
2374
    sched_deps_info->start_rhs (x);
2375
 
2376
  code = GET_CODE (x);
2377
 
2378
  switch (code)
2379
    {
2380
    case CONST_INT:
2381
    case CONST_DOUBLE:
2382
    case CONST_FIXED:
2383
    case CONST_VECTOR:
2384
    case SYMBOL_REF:
2385
    case CONST:
2386
    case LABEL_REF:
2387
      /* Ignore constants.  */
2388
      if (cslr_p && sched_deps_info->finish_rhs)
2389
        sched_deps_info->finish_rhs ();
2390
 
2391
      return;
2392
 
2393
#ifdef HAVE_cc0
2394
    case CC0:
2395
      /* User of CC0 depends on immediately preceding insn.  */
2396
      SCHED_GROUP_P (insn) = 1;
2397
       /* Don't move CC0 setter to another block (it can set up the
2398
        same flag for previous CC0 users which is safe).  */
2399
      CANT_MOVE (prev_nonnote_insn (insn)) = 1;
2400
 
2401
      if (cslr_p && sched_deps_info->finish_rhs)
2402
        sched_deps_info->finish_rhs ();
2403
 
2404
      return;
2405
#endif
2406
 
2407
    case REG:
2408
      {
2409
        int regno = REGNO (x);
2410
        enum machine_mode mode = GET_MODE (x);
2411
 
2412
        sched_analyze_reg (deps, regno, mode, USE, insn);
2413
 
2414
#ifdef STACK_REGS
2415
      /* Treat all reads of a stack register as modifying the TOS.  */
2416
      if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2417
        {
2418
          /* Avoid analyzing the same register twice.  */
2419
          if (regno != FIRST_STACK_REG)
2420
            sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2421
          sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2422
        }
2423
#endif
2424
 
2425
        if (cslr_p && sched_deps_info->finish_rhs)
2426
          sched_deps_info->finish_rhs ();
2427
 
2428
        return;
2429
      }
2430
 
2431
    case MEM:
2432
      {
2433
        /* Reading memory.  */
2434
        rtx u;
2435
        rtx pending, pending_mem;
2436
        rtx t = x;
2437
 
2438
        if (sched_deps_info->use_cselib)
2439
          {
2440
            enum machine_mode address_mode
2441
              = targetm.addr_space.address_mode (MEM_ADDR_SPACE (t));
2442
 
2443
            t = shallow_copy_rtx (t);
2444
            cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1, insn);
2445
            XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
2446
          }
2447
 
2448
        if (!DEBUG_INSN_P (insn))
2449
          {
2450
            t = canon_rtx (t);
2451
            pending = deps->pending_read_insns;
2452
            pending_mem = deps->pending_read_mems;
2453
            while (pending)
2454
              {
2455
                if (read_dependence (XEXP (pending_mem, 0), t)
2456
                    && ! sched_insns_conditions_mutex_p (insn,
2457
                                                         XEXP (pending, 0)))
2458
                  note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2459
                                DEP_ANTI);
2460
 
2461
                pending = XEXP (pending, 1);
2462
                pending_mem = XEXP (pending_mem, 1);
2463
              }
2464
 
2465
            pending = deps->pending_write_insns;
2466
            pending_mem = deps->pending_write_mems;
2467
            while (pending)
2468
              {
2469
                if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
2470
                                     t, rtx_varies_p)
2471
                    && ! sched_insns_conditions_mutex_p (insn,
2472
                                                         XEXP (pending, 0)))
2473
                  note_mem_dep (t, XEXP (pending_mem, 0), XEXP (pending, 0),
2474
                                sched_deps_info->generate_spec_deps
2475
                                ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2476
 
2477
                pending = XEXP (pending, 1);
2478
                pending_mem = XEXP (pending_mem, 1);
2479
              }
2480
 
2481
            for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2482
              {
2483
                if (! JUMP_P (XEXP (u, 0)))
2484
                  add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2485
                else if (deps_may_trap_p (x))
2486
                  {
2487
                    if ((sched_deps_info->generate_spec_deps)
2488
                        && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2489
                      {
2490
                        ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2491
                                                MAX_DEP_WEAK);
2492
 
2493
                        note_dep (XEXP (u, 0), ds);
2494
                      }
2495
                    else
2496
                      add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2497
                  }
2498
              }
2499
          }
2500
 
2501
        /* Always add these dependencies to pending_reads, since
2502
           this insn may be followed by a write.  */
2503
        if (!deps->readonly)
2504
          add_insn_mem_dependence (deps, true, insn, x);
2505
 
2506
        sched_analyze_2 (deps, XEXP (x, 0), insn);
2507
 
2508
        if (cslr_p && sched_deps_info->finish_rhs)
2509
          sched_deps_info->finish_rhs ();
2510
 
2511
        return;
2512
      }
2513
 
2514
    /* Force pending stores to memory in case a trap handler needs them.  */
2515
    case TRAP_IF:
2516
      flush_pending_lists (deps, insn, true, false);
2517
      break;
2518
 
2519
    case PREFETCH:
2520
      if (PREFETCH_SCHEDULE_BARRIER_P (x))
2521
        reg_pending_barrier = TRUE_BARRIER;
2522
      break;
2523
 
2524
    case UNSPEC_VOLATILE:
2525
      flush_pending_lists (deps, insn, true, true);
2526
      /* FALLTHRU */
2527
 
2528
    case ASM_OPERANDS:
2529
    case ASM_INPUT:
2530
      {
2531
        /* Traditional and volatile asm instructions must be considered to use
2532
           and clobber all hard registers, all pseudo-registers and all of
2533
           memory.  So must TRAP_IF and UNSPEC_VOLATILE operations.
2534
 
2535
           Consider for instance a volatile asm that changes the fpu rounding
2536
           mode.  An insn should not be moved across this even if it only uses
2537
           pseudo-regs because it might give an incorrectly rounded result.  */
2538
        if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2539
          reg_pending_barrier = TRUE_BARRIER;
2540
 
2541
        /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2542
           We can not just fall through here since then we would be confused
2543
           by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2544
           traditional asms unlike their normal usage.  */
2545
 
2546
        if (code == ASM_OPERANDS)
2547
          {
2548
            for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2549
              sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2550
 
2551
            if (cslr_p && sched_deps_info->finish_rhs)
2552
              sched_deps_info->finish_rhs ();
2553
 
2554
            return;
2555
          }
2556
        break;
2557
      }
2558
 
2559
    case PRE_DEC:
2560
    case POST_DEC:
2561
    case PRE_INC:
2562
    case POST_INC:
2563
      /* These both read and modify the result.  We must handle them as writes
2564
         to get proper dependencies for following instructions.  We must handle
2565
         them as reads to get proper dependencies from this to previous
2566
         instructions.  Thus we need to pass them to both sched_analyze_1
2567
         and sched_analyze_2.  We must call sched_analyze_2 first in order
2568
         to get the proper antecedent for the read.  */
2569
      sched_analyze_2 (deps, XEXP (x, 0), insn);
2570
      sched_analyze_1 (deps, x, insn);
2571
 
2572
      if (cslr_p && sched_deps_info->finish_rhs)
2573
        sched_deps_info->finish_rhs ();
2574
 
2575
      return;
2576
 
2577
    case POST_MODIFY:
2578
    case PRE_MODIFY:
2579
      /* op0 = op0 + op1 */
2580
      sched_analyze_2 (deps, XEXP (x, 0), insn);
2581
      sched_analyze_2 (deps, XEXP (x, 1), insn);
2582
      sched_analyze_1 (deps, x, insn);
2583
 
2584
      if (cslr_p && sched_deps_info->finish_rhs)
2585
        sched_deps_info->finish_rhs ();
2586
 
2587
      return;
2588
 
2589
    default:
2590
      break;
2591
    }
2592
 
2593
  /* Other cases: walk the insn.  */
2594
  fmt = GET_RTX_FORMAT (code);
2595
  for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2596
    {
2597
      if (fmt[i] == 'e')
2598
        sched_analyze_2 (deps, XEXP (x, i), insn);
2599
      else if (fmt[i] == 'E')
2600
        for (j = 0; j < XVECLEN (x, i); j++)
2601
          sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2602
    }
2603
 
2604
  if (cslr_p && sched_deps_info->finish_rhs)
2605
    sched_deps_info->finish_rhs ();
2606
}
2607
 
2608
/* Analyze an INSN with pattern X to find all dependencies.  */
2609
static void
2610
sched_analyze_insn (struct deps_desc *deps, rtx x, rtx insn)
2611
{
2612
  RTX_CODE code = GET_CODE (x);
2613
  rtx link;
2614
  unsigned i;
2615
  reg_set_iterator rsi;
2616
 
2617
  if (! reload_completed)
2618
    {
2619
      HARD_REG_SET temp;
2620
 
2621
      extract_insn (insn);
2622
      preprocess_constraints ();
2623
      ira_implicitly_set_insn_hard_regs (&temp);
2624
      AND_COMPL_HARD_REG_SET (temp, ira_no_alloc_regs);
2625
      IOR_HARD_REG_SET (implicit_reg_pending_clobbers, temp);
2626
    }
2627
 
2628
  can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2629
                         && code == SET);
2630
 
2631
  if (may_trap_p (x))
2632
    /* Avoid moving trapping instructions accross function calls that might
2633
       not always return.  */
2634
    add_dependence_list (insn, deps->last_function_call_may_noreturn,
2635
                         1, REG_DEP_ANTI);
2636
 
2637
  if (code == COND_EXEC)
2638
    {
2639
      sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2640
 
2641
      /* ??? Should be recording conditions so we reduce the number of
2642
         false dependencies.  */
2643
      x = COND_EXEC_CODE (x);
2644
      code = GET_CODE (x);
2645
    }
2646
  if (code == SET || code == CLOBBER)
2647
    {
2648
      sched_analyze_1 (deps, x, insn);
2649
 
2650
      /* Bare clobber insns are used for letting life analysis, reg-stack
2651
         and others know that a value is dead.  Depend on the last call
2652
         instruction so that reg-stack won't get confused.  */
2653
      if (code == CLOBBER)
2654
        add_dependence_list (insn, deps->last_function_call, 1,
2655
                             REG_DEP_OUTPUT);
2656
    }
2657
  else if (code == PARALLEL)
2658
    {
2659
      for (i = XVECLEN (x, 0); i--;)
2660
        {
2661
          rtx sub = XVECEXP (x, 0, i);
2662
          code = GET_CODE (sub);
2663
 
2664
          if (code == COND_EXEC)
2665
            {
2666
              sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2667
              sub = COND_EXEC_CODE (sub);
2668
              code = GET_CODE (sub);
2669
            }
2670
          if (code == SET || code == CLOBBER)
2671
            sched_analyze_1 (deps, sub, insn);
2672
          else
2673
            sched_analyze_2 (deps, sub, insn);
2674
        }
2675
    }
2676
  else
2677
    sched_analyze_2 (deps, x, insn);
2678
 
2679
  /* Mark registers CLOBBERED or used by called function.  */
2680
  if (CALL_P (insn))
2681
    {
2682
      for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2683
        {
2684
          if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2685
            sched_analyze_1 (deps, XEXP (link, 0), insn);
2686
          else
2687
            sched_analyze_2 (deps, XEXP (link, 0), insn);
2688
        }
2689
      if (find_reg_note (insn, REG_SETJMP, NULL))
2690
        reg_pending_barrier = MOVE_BARRIER;
2691
    }
2692
 
2693
  if (JUMP_P (insn))
2694
    {
2695
      rtx next;
2696 378 julius
      next = next_nonnote_nondebug_insn (insn);
2697 280 jeremybenn
      if (next && BARRIER_P (next))
2698
        reg_pending_barrier = MOVE_BARRIER;
2699
      else
2700
        {
2701
          rtx pending, pending_mem;
2702
 
2703
          if (sched_deps_info->compute_jump_reg_dependencies)
2704
            {
2705
              regset_head tmp_uses, tmp_sets;
2706
              INIT_REG_SET (&tmp_uses);
2707
              INIT_REG_SET (&tmp_sets);
2708
 
2709
              (*sched_deps_info->compute_jump_reg_dependencies)
2710
                (insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
2711
              /* Make latency of jump equal to 0 by using anti-dependence.  */
2712
              EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
2713
                {
2714
                  struct deps_reg *reg_last = &deps->reg_last[i];
2715
                  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2716
                  add_dependence_list (insn, reg_last->implicit_sets,
2717
                                       0, REG_DEP_ANTI);
2718
                  add_dependence_list (insn, reg_last->clobbers, 0,
2719
                                       REG_DEP_ANTI);
2720
 
2721
                  if (!deps->readonly)
2722
                    {
2723
                      reg_last->uses_length++;
2724
                      reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2725
                    }
2726
                }
2727
              IOR_REG_SET (reg_pending_sets, &tmp_sets);
2728
 
2729
              CLEAR_REG_SET (&tmp_uses);
2730
              CLEAR_REG_SET (&tmp_sets);
2731
            }
2732
 
2733
          /* All memory writes and volatile reads must happen before the
2734
             jump.  Non-volatile reads must happen before the jump iff
2735
             the result is needed by the above register used mask.  */
2736
 
2737
          pending = deps->pending_write_insns;
2738
          pending_mem = deps->pending_write_mems;
2739
          while (pending)
2740
            {
2741
              if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2742
                add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2743
              pending = XEXP (pending, 1);
2744
              pending_mem = XEXP (pending_mem, 1);
2745
            }
2746
 
2747
          pending = deps->pending_read_insns;
2748
          pending_mem = deps->pending_read_mems;
2749
          while (pending)
2750
            {
2751
              if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
2752
                  && ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
2753
                add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
2754
              pending = XEXP (pending, 1);
2755
              pending_mem = XEXP (pending_mem, 1);
2756
            }
2757
 
2758
          add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2759
                               REG_DEP_ANTI);
2760
        }
2761
    }
2762
 
2763
  /* If this instruction can throw an exception, then moving it changes
2764
     where block boundaries fall.  This is mighty confusing elsewhere.
2765
     Therefore, prevent such an instruction from being moved.  Same for
2766
     non-jump instructions that define block boundaries.
2767
     ??? Unclear whether this is still necessary in EBB mode.  If not,
2768
     add_branch_dependences should be adjusted for RGN mode instead.  */
2769
  if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
2770
      || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
2771
    reg_pending_barrier = MOVE_BARRIER;
2772
 
2773
  if (sched_pressure_p)
2774
    {
2775
      setup_insn_reg_uses (deps, insn);
2776
      setup_insn_reg_pressure_info (insn);
2777
    }
2778
 
2779
  /* Add register dependencies for insn.  */
2780
  if (DEBUG_INSN_P (insn))
2781
    {
2782
      rtx prev = deps->last_debug_insn;
2783
      rtx u;
2784
 
2785
      if (!deps->readonly)
2786
        deps->last_debug_insn = insn;
2787
 
2788
      if (prev)
2789
        add_dependence (insn, prev, REG_DEP_ANTI);
2790
 
2791
      add_dependence_list (insn, deps->last_function_call, 1,
2792
                           REG_DEP_ANTI);
2793
 
2794
      for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
2795
        if (! JUMP_P (XEXP (u, 0))
2796
            || !sel_sched_p ())
2797
          add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
2798
 
2799
      EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2800
        {
2801
          struct deps_reg *reg_last = &deps->reg_last[i];
2802
          add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI);
2803
          add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI);
2804
 
2805
          if (!deps->readonly)
2806
            reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2807
        }
2808
      CLEAR_REG_SET (reg_pending_uses);
2809
 
2810
      /* Quite often, a debug insn will refer to stuff in the
2811
         previous instruction, but the reason we want this
2812
         dependency here is to make sure the scheduler doesn't
2813
         gratuitously move a debug insn ahead.  This could dirty
2814
         DF flags and cause additional analysis that wouldn't have
2815
         occurred in compilation without debug insns, and such
2816
         additional analysis can modify the generated code.  */
2817
      prev = PREV_INSN (insn);
2818
 
2819
      if (prev && NONDEBUG_INSN_P (prev))
2820
        add_dependence (insn, prev, REG_DEP_ANTI);
2821
    }
2822
  else
2823
    {
2824
      EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
2825
        {
2826
          struct deps_reg *reg_last = &deps->reg_last[i];
2827
          add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2828
          add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI);
2829
          add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2830
 
2831
          if (!deps->readonly)
2832
            {
2833
              reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2834
              reg_last->uses_length++;
2835
            }
2836
        }
2837
 
2838
      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2839
        if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
2840
          {
2841
            struct deps_reg *reg_last = &deps->reg_last[i];
2842
            add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
2843
            add_dependence_list (insn, reg_last->implicit_sets, 0,
2844
                                 REG_DEP_ANTI);
2845
            add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
2846
 
2847
            if (!deps->readonly)
2848
              {
2849
                reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
2850
                reg_last->uses_length++;
2851
              }
2852
          }
2853
 
2854
      /* If the current insn is conditional, we can't free any
2855
         of the lists.  */
2856
      if (sched_has_condition_p (insn))
2857
        {
2858
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2859
            {
2860
              struct deps_reg *reg_last = &deps->reg_last[i];
2861
              add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2862
              add_dependence_list (insn, reg_last->implicit_sets, 0,
2863
                                   REG_DEP_ANTI);
2864
              add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2865
 
2866
              if (!deps->readonly)
2867
                {
2868
                  reg_last->clobbers
2869
                    = alloc_INSN_LIST (insn, reg_last->clobbers);
2870
                  reg_last->clobbers_length++;
2871
                }
2872
            }
2873
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2874
            {
2875
              struct deps_reg *reg_last = &deps->reg_last[i];
2876
              add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2877
              add_dependence_list (insn, reg_last->implicit_sets, 0,
2878
                                   REG_DEP_ANTI);
2879
              add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
2880
              add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2881
 
2882
              if (!deps->readonly)
2883
                {
2884
                  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2885
                  SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2886
                }
2887
            }
2888
        }
2889
      else
2890
        {
2891
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
2892
            {
2893
              struct deps_reg *reg_last = &deps->reg_last[i];
2894
              if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
2895
                  || reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
2896
                {
2897
                  add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2898
                                                REG_DEP_OUTPUT);
2899
                  add_dependence_list_and_free (deps, insn,
2900
                                                &reg_last->implicit_sets, 0,
2901
                                                REG_DEP_ANTI);
2902
                  add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2903
                                                REG_DEP_ANTI);
2904
                  add_dependence_list_and_free
2905
                    (deps, insn, &reg_last->clobbers, 0, REG_DEP_OUTPUT);
2906
 
2907
                  if (!deps->readonly)
2908
                    {
2909
                      reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2910
                      reg_last->clobbers_length = 0;
2911
                      reg_last->uses_length = 0;
2912
                    }
2913
                }
2914
              else
2915
                {
2916
                  add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
2917
                  add_dependence_list (insn, reg_last->implicit_sets, 0,
2918
                                       REG_DEP_ANTI);
2919
                  add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2920
                }
2921
 
2922
              if (!deps->readonly)
2923
                {
2924
                  reg_last->clobbers_length++;
2925
                  reg_last->clobbers
2926
                    = alloc_INSN_LIST (insn, reg_last->clobbers);
2927
                }
2928
            }
2929
          EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
2930
            {
2931
              struct deps_reg *reg_last = &deps->reg_last[i];
2932
 
2933
              add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
2934
                                            REG_DEP_OUTPUT);
2935
              add_dependence_list_and_free (deps, insn,
2936
                                            &reg_last->implicit_sets,
2937
                                            0, REG_DEP_ANTI);
2938
              add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
2939
                                            REG_DEP_OUTPUT);
2940
              add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
2941
                                            REG_DEP_ANTI);
2942
 
2943
              if (!deps->readonly)
2944
                {
2945
                  reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
2946
                  reg_last->uses_length = 0;
2947
                  reg_last->clobbers_length = 0;
2948
                  CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
2949
                }
2950
            }
2951
        }
2952
    }
2953
 
2954
  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2955
    if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2956
      {
2957
        struct deps_reg *reg_last = &deps->reg_last[i];
2958
        add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
2959
        add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
2960
        add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2961
 
2962
        if (!deps->readonly)
2963
          reg_last->implicit_sets
2964
            = alloc_INSN_LIST (insn, reg_last->implicit_sets);
2965
      }
2966
 
2967
  if (!deps->readonly)
2968
    {
2969
      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
2970
      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
2971
      IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
2972
      for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
2973
        if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i)
2974
            || TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
2975
          SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
2976
 
2977
      /* Set up the pending barrier found.  */
2978
      deps->last_reg_pending_barrier = reg_pending_barrier;
2979
    }
2980
 
2981
  CLEAR_REG_SET (reg_pending_uses);
2982
  CLEAR_REG_SET (reg_pending_clobbers);
2983
  CLEAR_REG_SET (reg_pending_sets);
2984
  CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
2985
  CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
2986
 
2987
  /* Add dependencies if a scheduling barrier was found.  */
2988
  if (reg_pending_barrier)
2989
    {
2990
      /* In the case of barrier the most added dependencies are not
2991
         real, so we use anti-dependence here.  */
2992
      if (sched_has_condition_p (insn))
2993
        {
2994
          EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
2995
            {
2996
              struct deps_reg *reg_last = &deps->reg_last[i];
2997
              add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
2998
              add_dependence_list (insn, reg_last->sets, 0,
2999
                                   reg_pending_barrier == TRUE_BARRIER
3000
                                   ? REG_DEP_TRUE : REG_DEP_ANTI);
3001
              add_dependence_list (insn, reg_last->implicit_sets, 0,
3002
                                   REG_DEP_ANTI);
3003
              add_dependence_list (insn, reg_last->clobbers, 0,
3004
                                   reg_pending_barrier == TRUE_BARRIER
3005
                                   ? REG_DEP_TRUE : REG_DEP_ANTI);
3006
            }
3007
        }
3008
      else
3009
        {
3010
          EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3011
            {
3012
              struct deps_reg *reg_last = &deps->reg_last[i];
3013
              add_dependence_list_and_free (deps, insn, &reg_last->uses, 0,
3014
                                            REG_DEP_ANTI);
3015
              add_dependence_list_and_free (deps, insn, &reg_last->sets, 0,
3016
                                            reg_pending_barrier == TRUE_BARRIER
3017
                                            ? REG_DEP_TRUE : REG_DEP_ANTI);
3018
              add_dependence_list_and_free (deps, insn,
3019
                                            &reg_last->implicit_sets, 0,
3020
                                            REG_DEP_ANTI);
3021
              add_dependence_list_and_free (deps, insn, &reg_last->clobbers, 0,
3022
                                            reg_pending_barrier == TRUE_BARRIER
3023
                                            ? REG_DEP_TRUE : REG_DEP_ANTI);
3024
 
3025
              if (!deps->readonly)
3026
                {
3027
                  reg_last->uses_length = 0;
3028
                  reg_last->clobbers_length = 0;
3029
                }
3030
            }
3031
        }
3032
 
3033
      if (!deps->readonly)
3034
        for (i = 0; i < (unsigned)deps->max_reg; i++)
3035
          {
3036
            struct deps_reg *reg_last = &deps->reg_last[i];
3037
            reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3038
            SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3039
          }
3040
 
3041
      /* Flush pending lists on jumps, but not on speculative checks.  */
3042
      if (JUMP_P (insn) && !(sel_sched_p ()
3043
                             && sel_insn_is_speculation_check (insn)))
3044
        flush_pending_lists (deps, insn, true, true);
3045
 
3046
      if (!deps->readonly)
3047
        CLEAR_REG_SET (&deps->reg_conditional_sets);
3048
      reg_pending_barrier = NOT_A_BARRIER;
3049
    }
3050
 
3051
  /* If a post-call group is still open, see if it should remain so.
3052
     This insn must be a simple move of a hard reg to a pseudo or
3053
     vice-versa.
3054
 
3055
     We must avoid moving these insns for correctness on
3056
     SMALL_REGISTER_CLASS machines, and for special registers like
3057
     PIC_OFFSET_TABLE_REGNUM.  For simplicity, extend this to all
3058
     hard regs for all targets.  */
3059
 
3060
  if (deps->in_post_call_group_p)
3061
    {
3062
      rtx tmp, set = single_set (insn);
3063
      int src_regno, dest_regno;
3064
 
3065
      if (set == NULL)
3066
        {
3067
          if (DEBUG_INSN_P (insn))
3068
            /* We don't want to mark debug insns as part of the same
3069
               sched group.  We know they really aren't, but if we use
3070
               debug insns to tell that a call group is over, we'll
3071
               get different code if debug insns are not there and
3072
               instructions that follow seem like they should be part
3073
               of the call group.
3074
 
3075
               Also, if we did, fixup_sched_groups() would move the
3076
               deps of the debug insn to the call insn, modifying
3077
               non-debug post-dependency counts of the debug insn
3078
               dependencies and otherwise messing with the scheduling
3079
               order.
3080
 
3081
               Instead, let such debug insns be scheduled freely, but
3082
               keep the call group open in case there are insns that
3083
               should be part of it afterwards.  Since we grant debug
3084
               insns higher priority than even sched group insns, it
3085
               will all turn out all right.  */
3086
            goto debug_dont_end_call_group;
3087
          else
3088
            goto end_call_group;
3089
        }
3090
 
3091
      tmp = SET_DEST (set);
3092
      if (GET_CODE (tmp) == SUBREG)
3093
        tmp = SUBREG_REG (tmp);
3094
      if (REG_P (tmp))
3095
        dest_regno = REGNO (tmp);
3096
      else
3097
        goto end_call_group;
3098
 
3099
      tmp = SET_SRC (set);
3100
      if (GET_CODE (tmp) == SUBREG)
3101
        tmp = SUBREG_REG (tmp);
3102
      if ((GET_CODE (tmp) == PLUS
3103
           || GET_CODE (tmp) == MINUS)
3104
          && REG_P (XEXP (tmp, 0))
3105
          && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3106
          && dest_regno == STACK_POINTER_REGNUM)
3107
        src_regno = STACK_POINTER_REGNUM;
3108
      else if (REG_P (tmp))
3109
        src_regno = REGNO (tmp);
3110
      else
3111
        goto end_call_group;
3112
 
3113
      if (src_regno < FIRST_PSEUDO_REGISTER
3114
          || dest_regno < FIRST_PSEUDO_REGISTER)
3115
        {
3116
          if (!deps->readonly
3117
              && deps->in_post_call_group_p == post_call_initial)
3118
            deps->in_post_call_group_p = post_call;
3119
 
3120
          if (!sel_sched_p () || sched_emulate_haifa_p)
3121
            {
3122
              SCHED_GROUP_P (insn) = 1;
3123
              CANT_MOVE (insn) = 1;
3124
            }
3125
        }
3126
      else
3127
        {
3128
        end_call_group:
3129
          if (!deps->readonly)
3130
            deps->in_post_call_group_p = not_post_call;
3131
        }
3132
    }
3133
 
3134
 debug_dont_end_call_group:
3135
  if ((current_sched_info->flags & DO_SPECULATION)
3136
      && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3137
    /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3138
       be speculated.  */
3139
    {
3140
      if (sel_sched_p ())
3141
        sel_mark_hard_insn (insn);
3142
      else
3143
        {
3144
          sd_iterator_def sd_it;
3145
          dep_t dep;
3146
 
3147
          for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3148
               sd_iterator_cond (&sd_it, &dep);)
3149
            change_spec_dep_to_hard (sd_it);
3150
        }
3151
    }
3152
}
3153
 
3154
/* Return TRUE if INSN might not always return normally (e.g. call exit,
3155
   longjmp, loop forever, ...).  */
3156
static bool
3157
call_may_noreturn_p (rtx insn)
3158
{
3159
  rtx call;
3160
 
3161
  /* const or pure calls that aren't looping will always return.  */
3162
  if (RTL_CONST_OR_PURE_CALL_P (insn)
3163
      && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3164
    return false;
3165
 
3166
  call = PATTERN (insn);
3167
  if (GET_CODE (call) == PARALLEL)
3168
    call = XVECEXP (call, 0, 0);
3169
  if (GET_CODE (call) == SET)
3170
    call = SET_SRC (call);
3171
  if (GET_CODE (call) == CALL
3172
      && MEM_P (XEXP (call, 0))
3173
      && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3174
    {
3175
      rtx symbol = XEXP (XEXP (call, 0), 0);
3176
      if (SYMBOL_REF_DECL (symbol)
3177
          && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3178
        {
3179
          if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3180
              == BUILT_IN_NORMAL)
3181
            switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3182
              {
3183
              case BUILT_IN_BCMP:
3184
              case BUILT_IN_BCOPY:
3185
              case BUILT_IN_BZERO:
3186
              case BUILT_IN_INDEX:
3187
              case BUILT_IN_MEMCHR:
3188
              case BUILT_IN_MEMCMP:
3189
              case BUILT_IN_MEMCPY:
3190
              case BUILT_IN_MEMMOVE:
3191
              case BUILT_IN_MEMPCPY:
3192
              case BUILT_IN_MEMSET:
3193
              case BUILT_IN_RINDEX:
3194
              case BUILT_IN_STPCPY:
3195
              case BUILT_IN_STPNCPY:
3196
              case BUILT_IN_STRCAT:
3197
              case BUILT_IN_STRCHR:
3198
              case BUILT_IN_STRCMP:
3199
              case BUILT_IN_STRCPY:
3200
              case BUILT_IN_STRCSPN:
3201
              case BUILT_IN_STRLEN:
3202
              case BUILT_IN_STRNCAT:
3203
              case BUILT_IN_STRNCMP:
3204
              case BUILT_IN_STRNCPY:
3205
              case BUILT_IN_STRPBRK:
3206
              case BUILT_IN_STRRCHR:
3207
              case BUILT_IN_STRSPN:
3208
              case BUILT_IN_STRSTR:
3209
                /* Assume certain string/memory builtins always return.  */
3210
                return false;
3211
              default:
3212
                break;
3213
              }
3214
        }
3215
    }
3216
 
3217
  /* For all other calls assume that they might not always return.  */
3218
  return true;
3219
}
3220
 
3221
/* Analyze INSN with DEPS as a context.  */
3222
void
3223
deps_analyze_insn (struct deps_desc *deps, rtx insn)
3224
{
3225
  if (sched_deps_info->start_insn)
3226
    sched_deps_info->start_insn (insn);
3227
 
3228
  if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn) || JUMP_P (insn))
3229
    {
3230
      /* Make each JUMP_INSN (but not a speculative check)
3231
         a scheduling barrier for memory references.  */
3232
      if (!deps->readonly
3233
          && JUMP_P (insn)
3234
          && !(sel_sched_p ()
3235
               && sel_insn_is_speculation_check (insn)))
3236
        {
3237
          /* Keep the list a reasonable size.  */
3238
          if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
3239
            flush_pending_lists (deps, insn, true, true);
3240
          else
3241
            deps->last_pending_memory_flush
3242
              = alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
3243
        }
3244
 
3245
      sched_analyze_insn (deps, PATTERN (insn), insn);
3246
    }
3247
  else if (CALL_P (insn))
3248
    {
3249
      int i;
3250
 
3251
      CANT_MOVE (insn) = 1;
3252
 
3253
      if (find_reg_note (insn, REG_SETJMP, NULL))
3254
        {
3255
          /* This is setjmp.  Assume that all registers, not just
3256
             hard registers, may be clobbered by this call.  */
3257
          reg_pending_barrier = MOVE_BARRIER;
3258
        }
3259
      else
3260
        {
3261
          for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3262
            /* A call may read and modify global register variables.  */
3263
            if (global_regs[i])
3264
              {
3265
                SET_REGNO_REG_SET (reg_pending_sets, i);
3266
                SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3267
              }
3268
          /* Other call-clobbered hard regs may be clobbered.
3269
             Since we only have a choice between 'might be clobbered'
3270
             and 'definitely not clobbered', we must include all
3271
             partly call-clobbered registers here.  */
3272
            else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
3273
                     || TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
3274
              SET_REGNO_REG_SET (reg_pending_clobbers, i);
3275
          /* We don't know what set of fixed registers might be used
3276
             by the function, but it is certain that the stack pointer
3277
             is among them, but be conservative.  */
3278
            else if (fixed_regs[i])
3279
              SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3280
          /* The frame pointer is normally not used by the function
3281
             itself, but by the debugger.  */
3282
          /* ??? MIPS o32 is an exception.  It uses the frame pointer
3283
             in the macro expansion of jal but does not represent this
3284
             fact in the call_insn rtl.  */
3285
            else if (i == FRAME_POINTER_REGNUM
3286
                     || (i == HARD_FRAME_POINTER_REGNUM
3287
                         && (! reload_completed || frame_pointer_needed)))
3288
              SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3289
        }
3290
 
3291
      /* For each insn which shouldn't cross a call, add a dependence
3292
         between that insn and this call insn.  */
3293
      add_dependence_list_and_free (deps, insn,
3294
                                    &deps->sched_before_next_call, 1,
3295
                                    REG_DEP_ANTI);
3296
 
3297
      sched_analyze_insn (deps, PATTERN (insn), insn);
3298
 
3299
      /* If CALL would be in a sched group, then this will violate
3300
         convention that sched group insns have dependencies only on the
3301
         previous instruction.
3302
 
3303
         Of course one can say: "Hey!  What about head of the sched group?"
3304
         And I will answer: "Basic principles (one dep per insn) are always
3305
         the same."  */
3306
      gcc_assert (!SCHED_GROUP_P (insn));
3307
 
3308
      /* In the absence of interprocedural alias analysis, we must flush
3309
         all pending reads and writes, and start new dependencies starting
3310
         from here.  But only flush writes for constant calls (which may
3311
         be passed a pointer to something we haven't written yet).  */
3312
      flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3313
 
3314
      if (!deps->readonly)
3315
        {
3316
          /* Remember the last function call for limiting lifetimes.  */
3317
          free_INSN_LIST_list (&deps->last_function_call);
3318
          deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3319
 
3320
          if (call_may_noreturn_p (insn))
3321
            {
3322
              /* Remember the last function call that might not always return
3323
                 normally for limiting moves of trapping insns.  */
3324
              free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3325
              deps->last_function_call_may_noreturn
3326
                = alloc_INSN_LIST (insn, NULL_RTX);
3327
            }
3328
 
3329
          /* Before reload, begin a post-call group, so as to keep the
3330
             lifetimes of hard registers correct.  */
3331
          if (! reload_completed)
3332
            deps->in_post_call_group_p = post_call;
3333
        }
3334
    }
3335
 
3336
  if (sched_deps_info->use_cselib)
3337
    cselib_process_insn (insn);
3338
 
3339
  /* EH_REGION insn notes can not appear until well after we complete
3340
     scheduling.  */
3341
  if (NOTE_P (insn))
3342
    gcc_assert (NOTE_KIND (insn) != NOTE_INSN_EH_REGION_BEG
3343
                && NOTE_KIND (insn) != NOTE_INSN_EH_REGION_END);
3344
 
3345
  if (sched_deps_info->finish_insn)
3346
    sched_deps_info->finish_insn ();
3347
 
3348
  /* Fixup the dependencies in the sched group.  */
3349
  if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3350
      && SCHED_GROUP_P (insn) && !sel_sched_p ())
3351
    fixup_sched_groups (insn);
3352
}
3353
 
3354
/* Initialize DEPS for the new block beginning with HEAD.  */
3355
void
3356
deps_start_bb (struct deps_desc *deps, rtx head)
3357
{
3358
  gcc_assert (!deps->readonly);
3359
 
3360
  /* Before reload, if the previous block ended in a call, show that
3361
     we are inside a post-call group, so as to keep the lifetimes of
3362
     hard registers correct.  */
3363
  if (! reload_completed && !LABEL_P (head))
3364
    {
3365 378 julius
      rtx insn = prev_nonnote_nondebug_insn (head);
3366 280 jeremybenn
 
3367
      if (insn && CALL_P (insn))
3368
        deps->in_post_call_group_p = post_call_initial;
3369
    }
3370
}
3371
 
3372
/* Analyze every insn between HEAD and TAIL inclusive, creating backward
3373
   dependencies for each insn.  */
3374
void
3375
sched_analyze (struct deps_desc *deps, rtx head, rtx tail)
3376
{
3377
  rtx insn;
3378
 
3379
  if (sched_deps_info->use_cselib)
3380
    cselib_init (CSELIB_RECORD_MEMORY);
3381
 
3382
  deps_start_bb (deps, head);
3383
 
3384
  for (insn = head;; insn = NEXT_INSN (insn))
3385
    {
3386
 
3387
      if (INSN_P (insn))
3388
        {
3389
          /* And initialize deps_lists.  */
3390
          sd_init_insn (insn);
3391
        }
3392
 
3393
      deps_analyze_insn (deps, insn);
3394
 
3395
      if (insn == tail)
3396
        {
3397
          if (sched_deps_info->use_cselib)
3398
            cselib_finish ();
3399
          return;
3400
        }
3401
    }
3402
  gcc_unreachable ();
3403
}
3404
 
3405
/* Helper for sched_free_deps ().
3406
   Delete INSN's (RESOLVED_P) backward dependencies.  */
3407
static void
3408
delete_dep_nodes_in_back_deps (rtx insn, bool resolved_p)
3409
{
3410
  sd_iterator_def sd_it;
3411
  dep_t dep;
3412
  sd_list_types_def types;
3413
 
3414
  if (resolved_p)
3415
    types = SD_LIST_RES_BACK;
3416
  else
3417
    types = SD_LIST_BACK;
3418
 
3419
  for (sd_it = sd_iterator_start (insn, types);
3420
       sd_iterator_cond (&sd_it, &dep);)
3421
    {
3422
      dep_link_t link = *sd_it.linkp;
3423
      dep_node_t node = DEP_LINK_NODE (link);
3424
      deps_list_t back_list;
3425
      deps_list_t forw_list;
3426
 
3427
      get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3428
      remove_from_deps_list (link, back_list);
3429
      delete_dep_node (node);
3430
    }
3431
}
3432
 
3433
/* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3434
   deps_lists.  */
3435
void
3436
sched_free_deps (rtx head, rtx tail, bool resolved_p)
3437
{
3438
  rtx insn;
3439
  rtx next_tail = NEXT_INSN (tail);
3440
 
3441
  for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3442
    if (INSN_P (insn) && INSN_LUID (insn) > 0)
3443
      {
3444
        /* Clear resolved back deps together with its dep_nodes.  */
3445
        delete_dep_nodes_in_back_deps (insn, resolved_p);
3446
 
3447
        /* Clear forward deps and leave the dep_nodes to the
3448
           corresponding back_deps list.  */
3449
        if (resolved_p)
3450
          clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3451
        else
3452
          clear_deps_list (INSN_FORW_DEPS (insn));
3453
 
3454
        sd_finish_insn (insn);
3455
      }
3456
}
3457
 
3458
/* Initialize variables for region data dependence analysis.
3459
   When LAZY_REG_LAST is true, do not allocate reg_last array
3460
   of struct deps_desc immediately.  */
3461
 
3462
void
3463
init_deps (struct deps_desc *deps, bool lazy_reg_last)
3464
{
3465
  int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3466
 
3467
  deps->max_reg = max_reg;
3468
  if (lazy_reg_last)
3469
    deps->reg_last = NULL;
3470
  else
3471
    deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3472
  INIT_REG_SET (&deps->reg_last_in_use);
3473
  INIT_REG_SET (&deps->reg_conditional_sets);
3474
 
3475
  deps->pending_read_insns = 0;
3476
  deps->pending_read_mems = 0;
3477
  deps->pending_write_insns = 0;
3478
  deps->pending_write_mems = 0;
3479
  deps->pending_read_list_length = 0;
3480
  deps->pending_write_list_length = 0;
3481
  deps->pending_flush_length = 0;
3482
  deps->last_pending_memory_flush = 0;
3483
  deps->last_function_call = 0;
3484
  deps->last_function_call_may_noreturn = 0;
3485
  deps->sched_before_next_call = 0;
3486
  deps->in_post_call_group_p = not_post_call;
3487
  deps->last_debug_insn = 0;
3488
  deps->last_reg_pending_barrier = NOT_A_BARRIER;
3489
  deps->readonly = 0;
3490
}
3491
 
3492
/* Init only reg_last field of DEPS, which was not allocated before as
3493
   we inited DEPS lazily.  */
3494
void
3495
init_deps_reg_last (struct deps_desc *deps)
3496
{
3497
  gcc_assert (deps && deps->max_reg > 0);
3498
  gcc_assert (deps->reg_last == NULL);
3499
 
3500
  deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3501
}
3502
 
3503
 
3504
/* Free insn lists found in DEPS.  */
3505
 
3506
void
3507
free_deps (struct deps_desc *deps)
3508
{
3509
  unsigned i;
3510
  reg_set_iterator rsi;
3511
 
3512
  /* We set max_reg to 0 when this context was already freed.  */
3513
  if (deps->max_reg == 0)
3514
    {
3515
      gcc_assert (deps->reg_last == NULL);
3516
      return;
3517
    }
3518
  deps->max_reg = 0;
3519
 
3520
  free_INSN_LIST_list (&deps->pending_read_insns);
3521
  free_EXPR_LIST_list (&deps->pending_read_mems);
3522
  free_INSN_LIST_list (&deps->pending_write_insns);
3523
  free_EXPR_LIST_list (&deps->pending_write_mems);
3524
  free_INSN_LIST_list (&deps->last_pending_memory_flush);
3525
 
3526
  /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
3527
     times.  For a testcase with 42000 regs and 8000 small basic blocks,
3528
     this loop accounted for nearly 60% (84 sec) of the total -O2 runtime.  */
3529
  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3530
    {
3531
      struct deps_reg *reg_last = &deps->reg_last[i];
3532
      if (reg_last->uses)
3533
        free_INSN_LIST_list (&reg_last->uses);
3534
      if (reg_last->sets)
3535
        free_INSN_LIST_list (&reg_last->sets);
3536
      if (reg_last->implicit_sets)
3537
        free_INSN_LIST_list (&reg_last->implicit_sets);
3538
      if (reg_last->clobbers)
3539
        free_INSN_LIST_list (&reg_last->clobbers);
3540
    }
3541
  CLEAR_REG_SET (&deps->reg_last_in_use);
3542
  CLEAR_REG_SET (&deps->reg_conditional_sets);
3543
 
3544
  /* As we initialize reg_last lazily, it is possible that we didn't allocate
3545
     it at all.  */
3546
  if (deps->reg_last)
3547
    free (deps->reg_last);
3548
  deps->reg_last = NULL;
3549
 
3550
  deps = NULL;
3551
}
3552
 
3553
/* Remove INSN from dependence contexts DEPS.  Caution: reg_conditional_sets
3554
   is not handled.  */
3555
void
3556
remove_from_deps (struct deps_desc *deps, rtx insn)
3557
{
3558
  int removed;
3559
  unsigned i;
3560
  reg_set_iterator rsi;
3561
 
3562
  removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
3563
                                               &deps->pending_read_mems);
3564
  if (!DEBUG_INSN_P (insn))
3565
    deps->pending_read_list_length -= removed;
3566
  removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
3567
                                               &deps->pending_write_mems);
3568
  deps->pending_write_list_length -= removed;
3569
  removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
3570
  deps->pending_flush_length -= removed;
3571
 
3572
  EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3573
    {
3574
      struct deps_reg *reg_last = &deps->reg_last[i];
3575
      if (reg_last->uses)
3576
        remove_from_dependence_list (insn, &reg_last->uses);
3577
      if (reg_last->sets)
3578
        remove_from_dependence_list (insn, &reg_last->sets);
3579
      if (reg_last->implicit_sets)
3580
        remove_from_dependence_list (insn, &reg_last->implicit_sets);
3581
      if (reg_last->clobbers)
3582
        remove_from_dependence_list (insn, &reg_last->clobbers);
3583
      if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
3584
          && !reg_last->clobbers)
3585
        CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, i);
3586
    }
3587
 
3588
  if (CALL_P (insn))
3589
    {
3590
      remove_from_dependence_list (insn, &deps->last_function_call);
3591
      remove_from_dependence_list (insn,
3592
                                   &deps->last_function_call_may_noreturn);
3593
    }
3594
  remove_from_dependence_list (insn, &deps->sched_before_next_call);
3595
}
3596
 
3597
/* Init deps data vector.  */
3598
static void
3599
init_deps_data_vector (void)
3600
{
3601
  int reserve = (sched_max_luid + 1
3602
                 - VEC_length (haifa_deps_insn_data_def, h_d_i_d));
3603
  if (reserve > 0
3604
      && ! VEC_space (haifa_deps_insn_data_def, h_d_i_d, reserve))
3605
    VEC_safe_grow_cleared (haifa_deps_insn_data_def, heap, h_d_i_d,
3606
                           3 * sched_max_luid / 2);
3607
}
3608
 
3609
/* If it is profitable to use them, initialize or extend (depending on
3610
   GLOBAL_P) dependency data.  */
3611
void
3612
sched_deps_init (bool global_p)
3613
{
3614
  /* Average number of insns in the basic block.
3615
     '+ 1' is used to make it nonzero.  */
3616
  int insns_in_block = sched_max_luid / n_basic_blocks + 1;
3617
 
3618
  init_deps_data_vector ();
3619
 
3620
  /* We use another caching mechanism for selective scheduling, so
3621
     we don't use this one.  */
3622
  if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
3623
    {
3624
      /* ?!? We could save some memory by computing a per-region luid mapping
3625
         which could reduce both the number of vectors in the cache and the
3626
         size of each vector.  Instead we just avoid the cache entirely unless
3627
         the average number of instructions in a basic block is very high.  See
3628
         the comment before the declaration of true_dependency_cache for
3629
         what we consider "very high".  */
3630
      cache_size = 0;
3631
      extend_dependency_caches (sched_max_luid, true);
3632
    }
3633
 
3634
  if (global_p)
3635
    {
3636
      dl_pool = create_alloc_pool ("deps_list", sizeof (struct _deps_list),
3637
                                   /* Allocate lists for one block at a time.  */
3638
                                   insns_in_block);
3639
      dn_pool = create_alloc_pool ("dep_node", sizeof (struct _dep_node),
3640
                                   /* Allocate nodes for one block at a time.
3641
                                      We assume that average insn has
3642
                                      5 producers.  */
3643
                                   5 * insns_in_block);
3644
    }
3645
}
3646
 
3647
 
3648
/* Create or extend (depending on CREATE_P) dependency caches to
3649
   size N.  */
3650
void
3651
extend_dependency_caches (int n, bool create_p)
3652
{
3653
  if (create_p || true_dependency_cache)
3654
    {
3655
      int i, luid = cache_size + n;
3656
 
3657
      true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
3658
                                          luid);
3659
      output_dependency_cache = XRESIZEVEC (bitmap_head,
3660
                                            output_dependency_cache, luid);
3661
      anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
3662
                                          luid);
3663
 
3664
      if (current_sched_info->flags & DO_SPECULATION)
3665
        spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
3666
                                            luid);
3667
 
3668
      for (i = cache_size; i < luid; i++)
3669
        {
3670
          bitmap_initialize (&true_dependency_cache[i], 0);
3671
          bitmap_initialize (&output_dependency_cache[i], 0);
3672
          bitmap_initialize (&anti_dependency_cache[i], 0);
3673
 
3674
          if (current_sched_info->flags & DO_SPECULATION)
3675
            bitmap_initialize (&spec_dependency_cache[i], 0);
3676
        }
3677
      cache_size = luid;
3678
    }
3679
}
3680
 
3681
/* Finalize dependency information for the whole function.  */
3682
void
3683
sched_deps_finish (void)
3684
{
3685
  gcc_assert (deps_pools_are_empty_p ());
3686
  free_alloc_pool_if_empty (&dn_pool);
3687
  free_alloc_pool_if_empty (&dl_pool);
3688
  gcc_assert (dn_pool == NULL && dl_pool == NULL);
3689
 
3690
  VEC_free (haifa_deps_insn_data_def, heap, h_d_i_d);
3691
  cache_size = 0;
3692
 
3693
  if (true_dependency_cache)
3694
    {
3695
      int i;
3696
 
3697
      for (i = 0; i < cache_size; i++)
3698
        {
3699
          bitmap_clear (&true_dependency_cache[i]);
3700
          bitmap_clear (&output_dependency_cache[i]);
3701
          bitmap_clear (&anti_dependency_cache[i]);
3702
 
3703
          if (sched_deps_info->generate_spec_deps)
3704
            bitmap_clear (&spec_dependency_cache[i]);
3705
        }
3706
      free (true_dependency_cache);
3707
      true_dependency_cache = NULL;
3708
      free (output_dependency_cache);
3709
      output_dependency_cache = NULL;
3710
      free (anti_dependency_cache);
3711
      anti_dependency_cache = NULL;
3712
 
3713
      if (sched_deps_info->generate_spec_deps)
3714
        {
3715
          free (spec_dependency_cache);
3716
          spec_dependency_cache = NULL;
3717
        }
3718
 
3719
    }
3720
}
3721
 
3722
/* Initialize some global variables needed by the dependency analysis
3723
   code.  */
3724
 
3725
void
3726
init_deps_global (void)
3727
{
3728
  CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3729
  CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3730
  reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
3731
  reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
3732
  reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
3733
  reg_pending_barrier = NOT_A_BARRIER;
3734
 
3735
  if (!sel_sched_p () || sched_emulate_haifa_p)
3736
    {
3737
      sched_deps_info->start_insn = haifa_start_insn;
3738
      sched_deps_info->finish_insn = haifa_finish_insn;
3739
 
3740
      sched_deps_info->note_reg_set = haifa_note_reg_set;
3741
      sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
3742
      sched_deps_info->note_reg_use = haifa_note_reg_use;
3743
 
3744
      sched_deps_info->note_mem_dep = haifa_note_mem_dep;
3745
      sched_deps_info->note_dep = haifa_note_dep;
3746
   }
3747
}
3748
 
3749
/* Free everything used by the dependency analysis code.  */
3750
 
3751
void
3752
finish_deps_global (void)
3753
{
3754
  FREE_REG_SET (reg_pending_sets);
3755
  FREE_REG_SET (reg_pending_clobbers);
3756
  FREE_REG_SET (reg_pending_uses);
3757
}
3758
 
3759
/* Estimate the weakness of dependence between MEM1 and MEM2.  */
3760
dw_t
3761
estimate_dep_weak (rtx mem1, rtx mem2)
3762
{
3763
  rtx r1, r2;
3764
 
3765
  if (mem1 == mem2)
3766
    /* MEMs are the same - don't speculate.  */
3767
    return MIN_DEP_WEAK;
3768
 
3769
  r1 = XEXP (mem1, 0);
3770
  r2 = XEXP (mem2, 0);
3771
 
3772
  if (r1 == r2
3773
      || (REG_P (r1) && REG_P (r2)
3774
          && REGNO (r1) == REGNO (r2)))
3775
    /* Again, MEMs are the same.  */
3776
    return MIN_DEP_WEAK;
3777
  else if ((REG_P (r1) && !REG_P (r2))
3778
           || (!REG_P (r1) && REG_P (r2)))
3779
    /* Different addressing modes - reason to be more speculative,
3780
       than usual.  */
3781
    return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
3782
  else
3783
    /* We can't say anything about the dependence.  */
3784
    return UNCERTAIN_DEP_WEAK;
3785
}
3786
 
3787
/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
3788
   This function can handle same INSN and ELEM (INSN == ELEM).
3789
   It is a convenience wrapper.  */
3790
void
3791
add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
3792
{
3793
  ds_t ds;
3794
  bool internal;
3795
 
3796
  if (dep_type == REG_DEP_TRUE)
3797
    ds = DEP_TRUE;
3798
  else if (dep_type == REG_DEP_OUTPUT)
3799
    ds = DEP_OUTPUT;
3800
  else
3801
    {
3802
      gcc_assert (dep_type == REG_DEP_ANTI);
3803
      ds = DEP_ANTI;
3804
    }
3805
 
3806
  /* When add_dependence is called from inside sched-deps.c, we expect
3807
     cur_insn to be non-null.  */
3808
  internal = cur_insn != NULL;
3809
  if (internal)
3810
    gcc_assert (insn == cur_insn);
3811
  else
3812
    cur_insn = insn;
3813
 
3814
  note_dep (elem, ds);
3815
  if (!internal)
3816
    cur_insn = NULL;
3817
}
3818
 
3819
/* Return weakness of speculative type TYPE in the dep_status DS.  */
3820
dw_t
3821
get_dep_weak_1 (ds_t ds, ds_t type)
3822
{
3823
  ds = ds & type;
3824
 
3825
  switch (type)
3826
    {
3827
    case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
3828
    case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
3829
    case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
3830
    case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
3831
    default: gcc_unreachable ();
3832
    }
3833
 
3834
  return (dw_t) ds;
3835
}
3836
 
3837
dw_t
3838
get_dep_weak (ds_t ds, ds_t type)
3839
{
3840
  dw_t dw = get_dep_weak_1 (ds, type);
3841
 
3842
  gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3843
  return dw;
3844
}
3845
 
3846
/* Return the dep_status, which has the same parameters as DS, except for
3847
   speculative type TYPE, that will have weakness DW.  */
3848
ds_t
3849
set_dep_weak (ds_t ds, ds_t type, dw_t dw)
3850
{
3851
  gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
3852
 
3853
  ds &= ~type;
3854
  switch (type)
3855
    {
3856
    case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
3857
    case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
3858
    case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
3859
    case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
3860
    default: gcc_unreachable ();
3861
    }
3862
  return ds;
3863
}
3864
 
3865
/* Return the join of two dep_statuses DS1 and DS2.
3866
   If MAX_P is true then choose the greater probability,
3867
   otherwise multiply probabilities.
3868
   This function assumes that both DS1 and DS2 contain speculative bits.  */
3869
static ds_t
3870
ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
3871
{
3872
  ds_t ds, t;
3873
 
3874
  gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
3875
 
3876
  ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
3877
 
3878
  t = FIRST_SPEC_TYPE;
3879
  do
3880
    {
3881
      if ((ds1 & t) && !(ds2 & t))
3882
        ds |= ds1 & t;
3883
      else if (!(ds1 & t) && (ds2 & t))
3884
        ds |= ds2 & t;
3885
      else if ((ds1 & t) && (ds2 & t))
3886
        {
3887
          dw_t dw1 = get_dep_weak (ds1, t);
3888
          dw_t dw2 = get_dep_weak (ds2, t);
3889
          ds_t dw;
3890
 
3891
          if (!max_p)
3892
            {
3893
              dw = ((ds_t) dw1) * ((ds_t) dw2);
3894
              dw /= MAX_DEP_WEAK;
3895
              if (dw < MIN_DEP_WEAK)
3896
                dw = MIN_DEP_WEAK;
3897
            }
3898
          else
3899
            {
3900
              if (dw1 >= dw2)
3901
                dw = dw1;
3902
              else
3903
                dw = dw2;
3904
            }
3905
 
3906
          ds = set_dep_weak (ds, t, (dw_t) dw);
3907
        }
3908
 
3909
      if (t == LAST_SPEC_TYPE)
3910
        break;
3911
      t <<= SPEC_TYPE_SHIFT;
3912
    }
3913
  while (1);
3914
 
3915
  return ds;
3916
}
3917
 
3918
/* Return the join of two dep_statuses DS1 and DS2.
3919
   This function assumes that both DS1 and DS2 contain speculative bits.  */
3920
ds_t
3921
ds_merge (ds_t ds1, ds_t ds2)
3922
{
3923
  return ds_merge_1 (ds1, ds2, false);
3924
}
3925
 
3926
/* Return the join of two dep_statuses DS1 and DS2.  */
3927
ds_t
3928
ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
3929
{
3930
  ds_t new_status = ds | ds2;
3931
 
3932
  if (new_status & SPECULATIVE)
3933
    {
3934
      if ((ds && !(ds & SPECULATIVE))
3935
          || (ds2 && !(ds2 & SPECULATIVE)))
3936
        /* Then this dep can't be speculative.  */
3937
        new_status &= ~SPECULATIVE;
3938
      else
3939
        {
3940
          /* Both are speculative.  Merging probabilities.  */
3941
          if (mem1)
3942
            {
3943
              dw_t dw;
3944
 
3945
              dw = estimate_dep_weak (mem1, mem2);
3946
              ds = set_dep_weak (ds, BEGIN_DATA, dw);
3947
            }
3948
 
3949
          if (!ds)
3950
            new_status = ds2;
3951
          else if (!ds2)
3952
            new_status = ds;
3953
          else
3954
            new_status = ds_merge (ds2, ds);
3955
        }
3956
    }
3957
 
3958
  return new_status;
3959
}
3960
 
3961
/* Return the join of DS1 and DS2.  Use maximum instead of multiplying
3962
   probabilities.  */
3963
ds_t
3964
ds_max_merge (ds_t ds1, ds_t ds2)
3965
{
3966
  if (ds1 == 0 && ds2 == 0)
3967
    return 0;
3968
 
3969
  if (ds1 == 0 && ds2 != 0)
3970
    return ds2;
3971
 
3972
  if (ds1 != 0 && ds2 == 0)
3973
    return ds1;
3974
 
3975
  return ds_merge_1 (ds1, ds2, true);
3976
}
3977
 
3978
/* Return the probability of speculation success for the speculation
3979
   status DS.  */
3980
dw_t
3981
ds_weak (ds_t ds)
3982
{
3983
  ds_t res = 1, dt;
3984
  int n = 0;
3985
 
3986
  dt = FIRST_SPEC_TYPE;
3987
  do
3988
    {
3989
      if (ds & dt)
3990
        {
3991
          res *= (ds_t) get_dep_weak (ds, dt);
3992
          n++;
3993
        }
3994
 
3995
      if (dt == LAST_SPEC_TYPE)
3996
        break;
3997
      dt <<= SPEC_TYPE_SHIFT;
3998
    }
3999
  while (1);
4000
 
4001
  gcc_assert (n);
4002
  while (--n)
4003
    res /= MAX_DEP_WEAK;
4004
 
4005
  if (res < MIN_DEP_WEAK)
4006
    res = MIN_DEP_WEAK;
4007
 
4008
  gcc_assert (res <= MAX_DEP_WEAK);
4009
 
4010
  return (dw_t) res;
4011
}
4012
 
4013
/* Return a dep status that contains all speculation types of DS.  */
4014
ds_t
4015
ds_get_speculation_types (ds_t ds)
4016
{
4017
  if (ds & BEGIN_DATA)
4018
    ds |= BEGIN_DATA;
4019
  if (ds & BE_IN_DATA)
4020
    ds |= BE_IN_DATA;
4021
  if (ds & BEGIN_CONTROL)
4022
    ds |= BEGIN_CONTROL;
4023
  if (ds & BE_IN_CONTROL)
4024
    ds |= BE_IN_CONTROL;
4025
 
4026
  return ds & SPECULATIVE;
4027
}
4028
 
4029
/* Return a dep status that contains maximal weakness for each speculation
4030
   type present in DS.  */
4031
ds_t
4032
ds_get_max_dep_weak (ds_t ds)
4033
{
4034
  if (ds & BEGIN_DATA)
4035
    ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4036
  if (ds & BE_IN_DATA)
4037
    ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4038
  if (ds & BEGIN_CONTROL)
4039
    ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4040
  if (ds & BE_IN_CONTROL)
4041
    ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4042
 
4043
  return ds;
4044
}
4045
 
4046
/* Dump information about the dependence status S.  */
4047
static void
4048
dump_ds (FILE *f, ds_t s)
4049
{
4050
  fprintf (f, "{");
4051
 
4052
  if (s & BEGIN_DATA)
4053
    fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4054
  if (s & BE_IN_DATA)
4055
    fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4056
  if (s & BEGIN_CONTROL)
4057
    fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4058
  if (s & BE_IN_CONTROL)
4059
    fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4060
 
4061
  if (s & HARD_DEP)
4062
    fprintf (f, "HARD_DEP; ");
4063
 
4064
  if (s & DEP_TRUE)
4065
    fprintf (f, "DEP_TRUE; ");
4066
  if (s & DEP_ANTI)
4067
    fprintf (f, "DEP_ANTI; ");
4068
  if (s & DEP_OUTPUT)
4069
    fprintf (f, "DEP_OUTPUT; ");
4070
 
4071
  fprintf (f, "}");
4072
}
4073
 
4074
void
4075
debug_ds (ds_t s)
4076
{
4077
  dump_ds (stderr, s);
4078
  fprintf (stderr, "\n");
4079
}
4080
 
4081
#ifdef ENABLE_CHECKING
4082
/* Verify that dependence type and status are consistent.
4083
   If RELAXED_P is true, then skip dep_weakness checks.  */
4084
static void
4085
check_dep (dep_t dep, bool relaxed_p)
4086
{
4087
  enum reg_note dt = DEP_TYPE (dep);
4088
  ds_t ds = DEP_STATUS (dep);
4089
 
4090
  gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4091
 
4092
  if (!(current_sched_info->flags & USE_DEPS_LIST))
4093
    {
4094
      gcc_assert (ds == -1);
4095
      return;
4096
    }
4097
 
4098
  /* Check that dependence type contains the same bits as the status.  */
4099
  if (dt == REG_DEP_TRUE)
4100
    gcc_assert (ds & DEP_TRUE);
4101
  else if (dt == REG_DEP_OUTPUT)
4102
    gcc_assert ((ds & DEP_OUTPUT)
4103
                && !(ds & DEP_TRUE));
4104
  else
4105
    gcc_assert ((dt == REG_DEP_ANTI)
4106
                && (ds & DEP_ANTI)
4107
                && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4108
 
4109
  /* HARD_DEP can not appear in dep_status of a link.  */
4110
  gcc_assert (!(ds & HARD_DEP));
4111
 
4112
  /* Check that dependence status is set correctly when speculation is not
4113
     supported.  */
4114
  if (!sched_deps_info->generate_spec_deps)
4115
    gcc_assert (!(ds & SPECULATIVE));
4116
  else if (ds & SPECULATIVE)
4117
    {
4118
      if (!relaxed_p)
4119
        {
4120
          ds_t type = FIRST_SPEC_TYPE;
4121
 
4122
          /* Check that dependence weakness is in proper range.  */
4123
          do
4124
            {
4125
              if (ds & type)
4126
                get_dep_weak (ds, type);
4127
 
4128
              if (type == LAST_SPEC_TYPE)
4129
                break;
4130
              type <<= SPEC_TYPE_SHIFT;
4131
            }
4132
          while (1);
4133
        }
4134
 
4135
      if (ds & BEGIN_SPEC)
4136
        {
4137
          /* Only true dependence can be data speculative.  */
4138
          if (ds & BEGIN_DATA)
4139
            gcc_assert (ds & DEP_TRUE);
4140
 
4141
          /* Control dependencies in the insn scheduler are represented by
4142
             anti-dependencies, therefore only anti dependence can be
4143
             control speculative.  */
4144
          if (ds & BEGIN_CONTROL)
4145
            gcc_assert (ds & DEP_ANTI);
4146
        }
4147
      else
4148
        {
4149
          /* Subsequent speculations should resolve true dependencies.  */
4150
          gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4151
        }
4152
 
4153
      /* Check that true and anti dependencies can't have other speculative
4154
         statuses.  */
4155
      if (ds & DEP_TRUE)
4156
        gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4157
      /* An output dependence can't be speculative at all.  */
4158
      gcc_assert (!(ds & DEP_OUTPUT));
4159
      if (ds & DEP_ANTI)
4160
        gcc_assert (ds & BEGIN_CONTROL);
4161
    }
4162
}
4163
#endif /* ENABLE_CHECKING */
4164
 
4165
#endif /* INSN_SCHEDULING */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.