1 |
38 |
julius |
/* Instruction scheduling pass. This file computes dependencies between
|
2 |
|
|
instructions.
|
3 |
|
|
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
|
4 |
|
|
1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
|
5 |
|
|
Free Software Foundation, Inc.
|
6 |
|
|
Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
|
7 |
|
|
and currently maintained by, Jim Wilson (wilson@cygnus.com)
|
8 |
|
|
|
9 |
|
|
This file is part of GCC.
|
10 |
|
|
|
11 |
|
|
GCC is free software; you can redistribute it and/or modify it under
|
12 |
|
|
the terms of the GNU General Public License as published by the Free
|
13 |
|
|
Software Foundation; either version 3, or (at your option) any later
|
14 |
|
|
version.
|
15 |
|
|
|
16 |
|
|
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
17 |
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
18 |
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
19 |
|
|
for more details.
|
20 |
|
|
|
21 |
|
|
You should have received a copy of the GNU General Public License
|
22 |
|
|
along with GCC; see the file COPYING3. If not see
|
23 |
|
|
<http://www.gnu.org/licenses/>. */
|
24 |
|
|
|
25 |
|
|
#include "config.h"
|
26 |
|
|
#include "system.h"
|
27 |
|
|
#include "coretypes.h"
|
28 |
|
|
#include "tm.h"
|
29 |
|
|
#include "toplev.h"
|
30 |
|
|
#include "rtl.h"
|
31 |
|
|
#include "tm_p.h"
|
32 |
|
|
#include "hard-reg-set.h"
|
33 |
|
|
#include "regs.h"
|
34 |
|
|
#include "function.h"
|
35 |
|
|
#include "flags.h"
|
36 |
|
|
#include "insn-config.h"
|
37 |
|
|
#include "insn-attr.h"
|
38 |
|
|
#include "except.h"
|
39 |
|
|
#include "toplev.h"
|
40 |
|
|
#include "recog.h"
|
41 |
|
|
#include "sched-int.h"
|
42 |
|
|
#include "params.h"
|
43 |
|
|
#include "cselib.h"
|
44 |
|
|
#include "df.h"
|
45 |
|
|
|
46 |
|
|
|
47 |
|
|
static regset reg_pending_sets;
|
48 |
|
|
static regset reg_pending_clobbers;
|
49 |
|
|
static regset reg_pending_uses;
|
50 |
|
|
|
51 |
|
|
/* The following enumeration values tell us what dependencies we
|
52 |
|
|
should use to implement the barrier. We use true-dependencies for
|
53 |
|
|
TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
|
54 |
|
|
enum reg_pending_barrier_mode
|
55 |
|
|
{
|
56 |
|
|
NOT_A_BARRIER = 0,
|
57 |
|
|
MOVE_BARRIER,
|
58 |
|
|
TRUE_BARRIER
|
59 |
|
|
};
|
60 |
|
|
|
61 |
|
|
static enum reg_pending_barrier_mode reg_pending_barrier;
|
62 |
|
|
|
63 |
|
|
/* To speed up the test for duplicate dependency links we keep a
|
64 |
|
|
record of dependencies created by add_dependence when the average
|
65 |
|
|
number of instructions in a basic block is very large.
|
66 |
|
|
|
67 |
|
|
Studies have shown that there is typically around 5 instructions between
|
68 |
|
|
branches for typical C code. So we can make a guess that the average
|
69 |
|
|
basic block is approximately 5 instructions long; we will choose 100X
|
70 |
|
|
the average size as a very large basic block.
|
71 |
|
|
|
72 |
|
|
Each insn has associated bitmaps for its dependencies. Each bitmap
|
73 |
|
|
has enough entries to represent a dependency on any other insn in
|
74 |
|
|
the insn chain. All bitmap for true dependencies cache is
|
75 |
|
|
allocated then the rest two ones are also allocated. */
|
76 |
|
|
static bitmap_head *true_dependency_cache;
|
77 |
|
|
static bitmap_head *output_dependency_cache;
|
78 |
|
|
static bitmap_head *anti_dependency_cache;
|
79 |
|
|
static bitmap_head *spec_dependency_cache;
|
80 |
|
|
static int cache_size;
|
81 |
|
|
|
82 |
|
|
/* To speed up checking consistency of formed forward insn
|
83 |
|
|
dependencies we use the following cache. Another possible solution
|
84 |
|
|
could be switching off checking duplication of insns in forward
|
85 |
|
|
dependencies. */
|
86 |
|
|
#ifdef ENABLE_CHECKING
|
87 |
|
|
static bitmap_head *forward_dependency_cache;
|
88 |
|
|
#endif
|
89 |
|
|
|
90 |
|
|
static int deps_may_trap_p (rtx);
|
91 |
|
|
static void add_dependence_list (rtx, rtx, int, enum reg_note);
|
92 |
|
|
static void add_dependence_list_and_free (rtx, rtx *, int, enum reg_note);
|
93 |
|
|
static void delete_all_dependences (rtx);
|
94 |
|
|
static void fixup_sched_groups (rtx);
|
95 |
|
|
|
96 |
|
|
static void flush_pending_lists (struct deps *, rtx, int, int);
|
97 |
|
|
static void sched_analyze_1 (struct deps *, rtx, rtx);
|
98 |
|
|
static void sched_analyze_2 (struct deps *, rtx, rtx);
|
99 |
|
|
static void sched_analyze_insn (struct deps *, rtx, rtx);
|
100 |
|
|
|
101 |
|
|
static rtx sched_get_condition (rtx);
|
102 |
|
|
static int conditions_mutex_p (rtx, rtx);
|
103 |
|
|
|
104 |
|
|
static enum DEPS_ADJUST_RESULT maybe_add_or_update_back_dep_1 (rtx, rtx,
|
105 |
|
|
enum reg_note, ds_t, rtx, rtx, rtx **);
|
106 |
|
|
static enum DEPS_ADJUST_RESULT add_or_update_back_dep_1 (rtx, rtx,
|
107 |
|
|
enum reg_note, ds_t, rtx, rtx, rtx **);
|
108 |
|
|
static void add_back_dep (rtx, rtx, enum reg_note, ds_t);
|
109 |
|
|
|
110 |
|
|
static void adjust_add_sorted_back_dep (rtx, rtx, rtx *);
|
111 |
|
|
static void adjust_back_add_forw_dep (rtx, rtx *);
|
112 |
|
|
static void delete_forw_dep (rtx, rtx);
|
113 |
|
|
static dw_t estimate_dep_weak (rtx, rtx);
|
114 |
|
|
#ifdef INSN_SCHEDULING
|
115 |
|
|
#ifdef ENABLE_CHECKING
|
116 |
|
|
static void check_dep_status (enum reg_note, ds_t, bool);
|
117 |
|
|
#endif
|
118 |
|
|
#endif
|
119 |
|
|
|
120 |
|
|
/* Return nonzero if a load of the memory reference MEM can cause a trap. */
|
121 |
|
|
|
122 |
|
|
static int
|
123 |
|
|
deps_may_trap_p (rtx mem)
|
124 |
|
|
{
|
125 |
|
|
rtx addr = XEXP (mem, 0);
|
126 |
|
|
|
127 |
|
|
if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
|
128 |
|
|
{
|
129 |
|
|
rtx t = get_reg_known_value (REGNO (addr));
|
130 |
|
|
if (t)
|
131 |
|
|
addr = t;
|
132 |
|
|
}
|
133 |
|
|
return rtx_addr_can_trap_p (addr);
|
134 |
|
|
}
|
135 |
|
|
|
136 |
|
|
/* Return the INSN_LIST containing INSN in LIST, or NULL
|
137 |
|
|
if LIST does not contain INSN. */
|
138 |
|
|
|
139 |
|
|
rtx
|
140 |
|
|
find_insn_list (rtx insn, rtx list)
|
141 |
|
|
{
|
142 |
|
|
while (list)
|
143 |
|
|
{
|
144 |
|
|
if (XEXP (list, 0) == insn)
|
145 |
|
|
return list;
|
146 |
|
|
list = XEXP (list, 1);
|
147 |
|
|
}
|
148 |
|
|
return 0;
|
149 |
|
|
}
|
150 |
|
|
|
151 |
|
|
/* Find the condition under which INSN is executed. */
|
152 |
|
|
|
153 |
|
|
static rtx
|
154 |
|
|
sched_get_condition (rtx insn)
|
155 |
|
|
{
|
156 |
|
|
rtx pat = PATTERN (insn);
|
157 |
|
|
rtx src;
|
158 |
|
|
|
159 |
|
|
if (pat == 0)
|
160 |
|
|
return 0;
|
161 |
|
|
|
162 |
|
|
if (GET_CODE (pat) == COND_EXEC)
|
163 |
|
|
return COND_EXEC_TEST (pat);
|
164 |
|
|
|
165 |
|
|
if (!any_condjump_p (insn) || !onlyjump_p (insn))
|
166 |
|
|
return 0;
|
167 |
|
|
|
168 |
|
|
src = SET_SRC (pc_set (insn));
|
169 |
|
|
|
170 |
|
|
if (XEXP (src, 2) == pc_rtx)
|
171 |
|
|
return XEXP (src, 0);
|
172 |
|
|
else if (XEXP (src, 1) == pc_rtx)
|
173 |
|
|
{
|
174 |
|
|
rtx cond = XEXP (src, 0);
|
175 |
|
|
enum rtx_code revcode = reversed_comparison_code (cond, insn);
|
176 |
|
|
|
177 |
|
|
if (revcode == UNKNOWN)
|
178 |
|
|
return 0;
|
179 |
|
|
return gen_rtx_fmt_ee (revcode, GET_MODE (cond), XEXP (cond, 0),
|
180 |
|
|
XEXP (cond, 1));
|
181 |
|
|
}
|
182 |
|
|
|
183 |
|
|
return 0;
|
184 |
|
|
}
|
185 |
|
|
|
186 |
|
|
|
187 |
|
|
/* Return nonzero if conditions COND1 and COND2 can never be both true. */
|
188 |
|
|
|
189 |
|
|
static int
|
190 |
|
|
conditions_mutex_p (rtx cond1, rtx cond2)
|
191 |
|
|
{
|
192 |
|
|
if (COMPARISON_P (cond1)
|
193 |
|
|
&& COMPARISON_P (cond2)
|
194 |
|
|
&& GET_CODE (cond1) == reversed_comparison_code (cond2, NULL)
|
195 |
|
|
&& XEXP (cond1, 0) == XEXP (cond2, 0)
|
196 |
|
|
&& XEXP (cond1, 1) == XEXP (cond2, 1))
|
197 |
|
|
return 1;
|
198 |
|
|
return 0;
|
199 |
|
|
}
|
200 |
|
|
|
201 |
|
|
/* Return true if insn1 and insn2 can never depend on one another because
|
202 |
|
|
the conditions under which they are executed are mutually exclusive. */
|
203 |
|
|
bool
|
204 |
|
|
sched_insns_conditions_mutex_p (rtx insn1, rtx insn2)
|
205 |
|
|
{
|
206 |
|
|
rtx cond1, cond2;
|
207 |
|
|
|
208 |
|
|
/* flow.c doesn't handle conditional lifetimes entirely correctly;
|
209 |
|
|
calls mess up the conditional lifetimes. */
|
210 |
|
|
if (!CALL_P (insn1) && !CALL_P (insn2))
|
211 |
|
|
{
|
212 |
|
|
cond1 = sched_get_condition (insn1);
|
213 |
|
|
cond2 = sched_get_condition (insn2);
|
214 |
|
|
if (cond1 && cond2
|
215 |
|
|
&& conditions_mutex_p (cond1, cond2)
|
216 |
|
|
/* Make sure first instruction doesn't affect condition of second
|
217 |
|
|
instruction if switched. */
|
218 |
|
|
&& !modified_in_p (cond1, insn2)
|
219 |
|
|
/* Make sure second instruction doesn't affect condition of first
|
220 |
|
|
instruction if switched. */
|
221 |
|
|
&& !modified_in_p (cond2, insn1))
|
222 |
|
|
return true;
|
223 |
|
|
}
|
224 |
|
|
return false;
|
225 |
|
|
}
|
226 |
|
|
|
227 |
|
|
/* Add ELEM wrapped in an INSN_LIST with reg note kind DEP_TYPE to the
|
228 |
|
|
LOG_LINKS of INSN, if it is not already there. DEP_TYPE indicates the
|
229 |
|
|
type of dependence that this link represents. DS, if nonzero,
|
230 |
|
|
indicates speculations, through which this dependence can be overcome.
|
231 |
|
|
MEM1 and MEM2, if non-null, corresponds to memory locations in case of
|
232 |
|
|
data speculation. The function returns a value indicating if an old entry
|
233 |
|
|
has been changed or a new entry has been added to insn's LOG_LINK.
|
234 |
|
|
In case of changed entry CHANGED_LINKPP sets to its address.
|
235 |
|
|
See also the definition of enum DEPS_ADJUST_RESULT in sched-int.h.
|
236 |
|
|
Actual manipulation of dependence data structures is performed in
|
237 |
|
|
add_or_update_back_dep_1. */
|
238 |
|
|
|
239 |
|
|
static enum DEPS_ADJUST_RESULT
|
240 |
|
|
maybe_add_or_update_back_dep_1 (rtx insn, rtx elem, enum reg_note dep_type,
|
241 |
|
|
ds_t ds, rtx mem1, rtx mem2,
|
242 |
|
|
rtx **changed_linkpp)
|
243 |
|
|
{
|
244 |
|
|
gcc_assert (INSN_P (insn) && INSN_P (elem));
|
245 |
|
|
|
246 |
|
|
/* Don't depend an insn on itself. */
|
247 |
|
|
if (insn == elem)
|
248 |
|
|
{
|
249 |
|
|
#ifdef INSN_SCHEDULING
|
250 |
|
|
if (current_sched_info->flags & DO_SPECULATION)
|
251 |
|
|
/* INSN has an internal dependence, which we can't overcome. */
|
252 |
|
|
HAS_INTERNAL_DEP (insn) = 1;
|
253 |
|
|
#endif
|
254 |
|
|
return 0;
|
255 |
|
|
}
|
256 |
|
|
|
257 |
|
|
return add_or_update_back_dep_1 (insn, elem, dep_type,
|
258 |
|
|
ds, mem1, mem2, changed_linkpp);
|
259 |
|
|
}
|
260 |
|
|
|
261 |
|
|
/* This function has the same meaning of parameters and return values
|
262 |
|
|
as maybe_add_or_update_back_dep_1. The only difference between these
|
263 |
|
|
two functions is that INSN and ELEM are guaranteed not to be the same
|
264 |
|
|
in this one. */
|
265 |
|
|
static enum DEPS_ADJUST_RESULT
|
266 |
|
|
add_or_update_back_dep_1 (rtx insn, rtx elem, enum reg_note dep_type,
|
267 |
|
|
ds_t ds ATTRIBUTE_UNUSED,
|
268 |
|
|
rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED,
|
269 |
|
|
rtx **changed_linkpp ATTRIBUTE_UNUSED)
|
270 |
|
|
{
|
271 |
|
|
bool maybe_present_p = true, present_p = false;
|
272 |
|
|
|
273 |
|
|
gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
|
274 |
|
|
|
275 |
|
|
#ifdef INSN_SCHEDULING
|
276 |
|
|
|
277 |
|
|
#ifdef ENABLE_CHECKING
|
278 |
|
|
check_dep_status (dep_type, ds, mem1 != NULL);
|
279 |
|
|
#endif
|
280 |
|
|
|
281 |
|
|
/* If we already have a dependency for ELEM, then we do not need to
|
282 |
|
|
do anything. Avoiding the list walk below can cut compile times
|
283 |
|
|
dramatically for some code. */
|
284 |
|
|
if (true_dependency_cache != NULL)
|
285 |
|
|
{
|
286 |
|
|
enum reg_note present_dep_type;
|
287 |
|
|
|
288 |
|
|
gcc_assert (output_dependency_cache);
|
289 |
|
|
gcc_assert (anti_dependency_cache);
|
290 |
|
|
if (!(current_sched_info->flags & USE_DEPS_LIST))
|
291 |
|
|
{
|
292 |
|
|
if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
|
293 |
|
|
INSN_LUID (elem)))
|
294 |
|
|
present_dep_type = REG_DEP_TRUE;
|
295 |
|
|
else if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
|
296 |
|
|
INSN_LUID (elem)))
|
297 |
|
|
present_dep_type = REG_DEP_OUTPUT;
|
298 |
|
|
else if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
|
299 |
|
|
INSN_LUID (elem)))
|
300 |
|
|
present_dep_type = REG_DEP_ANTI;
|
301 |
|
|
else
|
302 |
|
|
maybe_present_p = false;
|
303 |
|
|
|
304 |
|
|
if (maybe_present_p)
|
305 |
|
|
{
|
306 |
|
|
if ((int) dep_type >= (int) present_dep_type)
|
307 |
|
|
return DEP_PRESENT;
|
308 |
|
|
|
309 |
|
|
present_p = true;
|
310 |
|
|
}
|
311 |
|
|
}
|
312 |
|
|
else
|
313 |
|
|
{
|
314 |
|
|
ds_t present_dep_types = 0;
|
315 |
|
|
|
316 |
|
|
if (bitmap_bit_p (&true_dependency_cache[INSN_LUID (insn)],
|
317 |
|
|
INSN_LUID (elem)))
|
318 |
|
|
present_dep_types |= DEP_TRUE;
|
319 |
|
|
if (bitmap_bit_p (&output_dependency_cache[INSN_LUID (insn)],
|
320 |
|
|
INSN_LUID (elem)))
|
321 |
|
|
present_dep_types |= DEP_OUTPUT;
|
322 |
|
|
if (bitmap_bit_p (&anti_dependency_cache[INSN_LUID (insn)],
|
323 |
|
|
INSN_LUID (elem)))
|
324 |
|
|
present_dep_types |= DEP_ANTI;
|
325 |
|
|
|
326 |
|
|
if (present_dep_types)
|
327 |
|
|
{
|
328 |
|
|
if (!(current_sched_info->flags & DO_SPECULATION)
|
329 |
|
|
|| !bitmap_bit_p (&spec_dependency_cache[INSN_LUID (insn)],
|
330 |
|
|
INSN_LUID (elem)))
|
331 |
|
|
{
|
332 |
|
|
if ((present_dep_types | (ds & DEP_TYPES))
|
333 |
|
|
== present_dep_types)
|
334 |
|
|
/* We already have all these bits. */
|
335 |
|
|
return DEP_PRESENT;
|
336 |
|
|
}
|
337 |
|
|
else
|
338 |
|
|
{
|
339 |
|
|
/* Only true dependencies can be data speculative and
|
340 |
|
|
only anti dependencies can be control speculative. */
|
341 |
|
|
gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
|
342 |
|
|
== present_dep_types);
|
343 |
|
|
|
344 |
|
|
/* if (additional dep is SPECULATIVE) then
|
345 |
|
|
we should update DEP_STATUS
|
346 |
|
|
else
|
347 |
|
|
we should reset existing dep to non-speculative. */
|
348 |
|
|
}
|
349 |
|
|
|
350 |
|
|
present_p = true;
|
351 |
|
|
}
|
352 |
|
|
else
|
353 |
|
|
maybe_present_p = false;
|
354 |
|
|
}
|
355 |
|
|
}
|
356 |
|
|
#endif
|
357 |
|
|
|
358 |
|
|
/* Check that we don't already have this dependence. */
|
359 |
|
|
if (maybe_present_p)
|
360 |
|
|
{
|
361 |
|
|
rtx *linkp;
|
362 |
|
|
|
363 |
|
|
for (linkp = &LOG_LINKS (insn); *linkp; linkp = &XEXP (*linkp, 1))
|
364 |
|
|
{
|
365 |
|
|
rtx link = *linkp;
|
366 |
|
|
|
367 |
|
|
gcc_assert (true_dependency_cache == 0 || present_p);
|
368 |
|
|
|
369 |
|
|
if (XEXP (link, 0) == elem)
|
370 |
|
|
{
|
371 |
|
|
enum DEPS_ADJUST_RESULT changed_p = DEP_PRESENT;
|
372 |
|
|
|
373 |
|
|
#ifdef INSN_SCHEDULING
|
374 |
|
|
if (current_sched_info->flags & USE_DEPS_LIST)
|
375 |
|
|
{
|
376 |
|
|
ds_t new_status = ds | DEP_STATUS (link);
|
377 |
|
|
|
378 |
|
|
if (new_status & SPECULATIVE)
|
379 |
|
|
{
|
380 |
|
|
if (!(ds & SPECULATIVE)
|
381 |
|
|
|| !(DEP_STATUS (link) & SPECULATIVE))
|
382 |
|
|
/* Then this dep can't be speculative. */
|
383 |
|
|
{
|
384 |
|
|
new_status &= ~SPECULATIVE;
|
385 |
|
|
if (true_dependency_cache
|
386 |
|
|
&& (DEP_STATUS (link) & SPECULATIVE))
|
387 |
|
|
bitmap_clear_bit (&spec_dependency_cache
|
388 |
|
|
[INSN_LUID (insn)],
|
389 |
|
|
INSN_LUID (elem));
|
390 |
|
|
}
|
391 |
|
|
else
|
392 |
|
|
{
|
393 |
|
|
/* Both are speculative. Merging probabilities. */
|
394 |
|
|
if (mem1)
|
395 |
|
|
{
|
396 |
|
|
dw_t dw;
|
397 |
|
|
|
398 |
|
|
dw = estimate_dep_weak (mem1, mem2);
|
399 |
|
|
ds = set_dep_weak (ds, BEGIN_DATA, dw);
|
400 |
|
|
}
|
401 |
|
|
|
402 |
|
|
new_status = ds_merge (DEP_STATUS (link), ds);
|
403 |
|
|
}
|
404 |
|
|
}
|
405 |
|
|
|
406 |
|
|
ds = new_status;
|
407 |
|
|
}
|
408 |
|
|
|
409 |
|
|
/* Clear corresponding cache entry because type of the link
|
410 |
|
|
may have changed. Keep them if we use_deps_list. */
|
411 |
|
|
if (true_dependency_cache != NULL
|
412 |
|
|
&& !(current_sched_info->flags & USE_DEPS_LIST))
|
413 |
|
|
{
|
414 |
|
|
enum reg_note kind = REG_NOTE_KIND (link);
|
415 |
|
|
|
416 |
|
|
switch (kind)
|
417 |
|
|
{
|
418 |
|
|
case REG_DEP_OUTPUT:
|
419 |
|
|
bitmap_clear_bit (&output_dependency_cache
|
420 |
|
|
[INSN_LUID (insn)], INSN_LUID (elem));
|
421 |
|
|
break;
|
422 |
|
|
case REG_DEP_ANTI:
|
423 |
|
|
bitmap_clear_bit (&anti_dependency_cache
|
424 |
|
|
[INSN_LUID (insn)], INSN_LUID (elem));
|
425 |
|
|
break;
|
426 |
|
|
default:
|
427 |
|
|
gcc_unreachable ();
|
428 |
|
|
}
|
429 |
|
|
}
|
430 |
|
|
|
431 |
|
|
if ((current_sched_info->flags & USE_DEPS_LIST)
|
432 |
|
|
&& DEP_STATUS (link) != ds)
|
433 |
|
|
{
|
434 |
|
|
DEP_STATUS (link) = ds;
|
435 |
|
|
changed_p = DEP_CHANGED;
|
436 |
|
|
}
|
437 |
|
|
#endif
|
438 |
|
|
|
439 |
|
|
/* If this is a more restrictive type of dependence than the
|
440 |
|
|
existing one, then change the existing dependence to this
|
441 |
|
|
type. */
|
442 |
|
|
if ((int) dep_type < (int) REG_NOTE_KIND (link))
|
443 |
|
|
{
|
444 |
|
|
PUT_REG_NOTE_KIND (link, dep_type);
|
445 |
|
|
changed_p = DEP_CHANGED;
|
446 |
|
|
}
|
447 |
|
|
|
448 |
|
|
#ifdef INSN_SCHEDULING
|
449 |
|
|
/* If we are adding a dependency to INSN's LOG_LINKs, then
|
450 |
|
|
note that in the bitmap caches of dependency information. */
|
451 |
|
|
if (true_dependency_cache != NULL)
|
452 |
|
|
{
|
453 |
|
|
if (!(current_sched_info->flags & USE_DEPS_LIST))
|
454 |
|
|
{
|
455 |
|
|
if (REG_NOTE_KIND (link) == REG_DEP_TRUE)
|
456 |
|
|
bitmap_set_bit (&true_dependency_cache
|
457 |
|
|
[INSN_LUID (insn)], INSN_LUID (elem));
|
458 |
|
|
else if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
|
459 |
|
|
bitmap_set_bit (&output_dependency_cache
|
460 |
|
|
[INSN_LUID (insn)], INSN_LUID (elem));
|
461 |
|
|
else if (REG_NOTE_KIND (link) == REG_DEP_ANTI)
|
462 |
|
|
bitmap_set_bit (&anti_dependency_cache
|
463 |
|
|
[INSN_LUID (insn)], INSN_LUID (elem));
|
464 |
|
|
}
|
465 |
|
|
else
|
466 |
|
|
{
|
467 |
|
|
if (ds & DEP_TRUE)
|
468 |
|
|
bitmap_set_bit (&true_dependency_cache
|
469 |
|
|
[INSN_LUID (insn)], INSN_LUID (elem));
|
470 |
|
|
if (ds & DEP_OUTPUT)
|
471 |
|
|
bitmap_set_bit (&output_dependency_cache
|
472 |
|
|
[INSN_LUID (insn)], INSN_LUID (elem));
|
473 |
|
|
if (ds & DEP_ANTI)
|
474 |
|
|
bitmap_set_bit (&anti_dependency_cache
|
475 |
|
|
[INSN_LUID (insn)], INSN_LUID (elem));
|
476 |
|
|
/* Note, that dep can become speculative only
|
477 |
|
|
at the moment of creation. Thus, we don't need to
|
478 |
|
|
check for it here. */
|
479 |
|
|
}
|
480 |
|
|
}
|
481 |
|
|
|
482 |
|
|
if (changed_linkpp && changed_p == DEP_CHANGED)
|
483 |
|
|
*changed_linkpp = linkp;
|
484 |
|
|
#endif
|
485 |
|
|
return changed_p;
|
486 |
|
|
}
|
487 |
|
|
}
|
488 |
|
|
/* We didn't find a dep. It shouldn't be present in the cache. */
|
489 |
|
|
gcc_assert (!present_p);
|
490 |
|
|
}
|
491 |
|
|
|
492 |
|
|
/* Might want to check one level of transitivity to save conses.
|
493 |
|
|
This check should be done in maybe_add_or_update_back_dep_1.
|
494 |
|
|
Since we made it to add_or_update_back_dep_1, we must create
|
495 |
|
|
(or update) a link. */
|
496 |
|
|
|
497 |
|
|
if (mem1)
|
498 |
|
|
{
|
499 |
|
|
gcc_assert (current_sched_info->flags & DO_SPECULATION);
|
500 |
|
|
ds = set_dep_weak (ds, BEGIN_DATA, estimate_dep_weak (mem1, mem2));
|
501 |
|
|
}
|
502 |
|
|
|
503 |
|
|
add_back_dep (insn, elem, dep_type, ds);
|
504 |
|
|
|
505 |
|
|
return DEP_CREATED;
|
506 |
|
|
}
|
507 |
|
|
|
508 |
|
|
/* This function creates a link between INSN and ELEM under any
|
509 |
|
|
conditions. DS describes speculative status of the link. */
|
510 |
|
|
static void
|
511 |
|
|
add_back_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
|
512 |
|
|
{
|
513 |
|
|
gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
|
514 |
|
|
|
515 |
|
|
if (current_sched_info->flags & USE_DEPS_LIST)
|
516 |
|
|
LOG_LINKS (insn) = alloc_DEPS_LIST (elem, LOG_LINKS (insn), ds);
|
517 |
|
|
else
|
518 |
|
|
LOG_LINKS (insn) = alloc_INSN_LIST (elem, LOG_LINKS (insn));
|
519 |
|
|
|
520 |
|
|
/* Insn dependency, not data dependency. */
|
521 |
|
|
PUT_REG_NOTE_KIND (LOG_LINKS (insn), dep_type);
|
522 |
|
|
|
523 |
|
|
#ifdef INSN_SCHEDULING
|
524 |
|
|
#ifdef ENABLE_CHECKING
|
525 |
|
|
check_dep_status (dep_type, ds, false);
|
526 |
|
|
#endif
|
527 |
|
|
|
528 |
|
|
/* If we are adding a dependency to INSN's LOG_LINKs, then note that
|
529 |
|
|
in the bitmap caches of dependency information. */
|
530 |
|
|
if (true_dependency_cache != NULL)
|
531 |
|
|
{
|
532 |
|
|
if (!(current_sched_info->flags & USE_DEPS_LIST))
|
533 |
|
|
{
|
534 |
|
|
if (dep_type == REG_DEP_TRUE)
|
535 |
|
|
bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
|
536 |
|
|
INSN_LUID (elem));
|
537 |
|
|
else if (dep_type == REG_DEP_OUTPUT)
|
538 |
|
|
bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
|
539 |
|
|
INSN_LUID (elem));
|
540 |
|
|
else if (dep_type == REG_DEP_ANTI)
|
541 |
|
|
bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
|
542 |
|
|
INSN_LUID (elem));
|
543 |
|
|
}
|
544 |
|
|
else
|
545 |
|
|
{
|
546 |
|
|
if (ds & DEP_TRUE)
|
547 |
|
|
bitmap_set_bit (&true_dependency_cache[INSN_LUID (insn)],
|
548 |
|
|
INSN_LUID (elem));
|
549 |
|
|
if (ds & DEP_OUTPUT)
|
550 |
|
|
bitmap_set_bit (&output_dependency_cache[INSN_LUID (insn)],
|
551 |
|
|
INSN_LUID (elem));
|
552 |
|
|
if (ds & DEP_ANTI)
|
553 |
|
|
bitmap_set_bit (&anti_dependency_cache[INSN_LUID (insn)],
|
554 |
|
|
INSN_LUID (elem));
|
555 |
|
|
if (ds & SPECULATIVE)
|
556 |
|
|
{
|
557 |
|
|
gcc_assert (current_sched_info->flags & DO_SPECULATION);
|
558 |
|
|
bitmap_set_bit (&spec_dependency_cache[INSN_LUID (insn)],
|
559 |
|
|
INSN_LUID (elem));
|
560 |
|
|
}
|
561 |
|
|
}
|
562 |
|
|
}
|
563 |
|
|
#endif
|
564 |
|
|
}
|
565 |
|
|
|
566 |
|
|
/* A convenience wrapper to operate on an entire list. */
|
567 |
|
|
|
568 |
|
|
static void
|
569 |
|
|
add_dependence_list (rtx insn, rtx list, int uncond, enum reg_note dep_type)
|
570 |
|
|
{
|
571 |
|
|
for (; list; list = XEXP (list, 1))
|
572 |
|
|
{
|
573 |
|
|
if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
|
574 |
|
|
add_dependence (insn, XEXP (list, 0), dep_type);
|
575 |
|
|
}
|
576 |
|
|
}
|
577 |
|
|
|
578 |
|
|
/* Similar, but free *LISTP at the same time. */
|
579 |
|
|
|
580 |
|
|
static void
|
581 |
|
|
add_dependence_list_and_free (rtx insn, rtx *listp, int uncond,
|
582 |
|
|
enum reg_note dep_type)
|
583 |
|
|
{
|
584 |
|
|
rtx list, next;
|
585 |
|
|
for (list = *listp, *listp = NULL; list ; list = next)
|
586 |
|
|
{
|
587 |
|
|
next = XEXP (list, 1);
|
588 |
|
|
if (uncond || ! sched_insns_conditions_mutex_p (insn, XEXP (list, 0)))
|
589 |
|
|
add_dependence (insn, XEXP (list, 0), dep_type);
|
590 |
|
|
free_INSN_LIST_node (list);
|
591 |
|
|
}
|
592 |
|
|
}
|
593 |
|
|
|
594 |
|
|
/* Clear all dependencies for an insn. */
|
595 |
|
|
|
596 |
|
|
static void
|
597 |
|
|
delete_all_dependences (rtx insn)
|
598 |
|
|
{
|
599 |
|
|
/* Clear caches, if they exist, as well as free the dependence. */
|
600 |
|
|
|
601 |
|
|
#ifdef INSN_SCHEDULING
|
602 |
|
|
if (true_dependency_cache != NULL)
|
603 |
|
|
{
|
604 |
|
|
bitmap_clear (&true_dependency_cache[INSN_LUID (insn)]);
|
605 |
|
|
bitmap_clear (&output_dependency_cache[INSN_LUID (insn)]);
|
606 |
|
|
bitmap_clear (&anti_dependency_cache[INSN_LUID (insn)]);
|
607 |
|
|
/* We don't have to clear forward_dependency_cache here,
|
608 |
|
|
because it is formed later. */
|
609 |
|
|
if (current_sched_info->flags & DO_SPECULATION)
|
610 |
|
|
bitmap_clear (&spec_dependency_cache[INSN_LUID (insn)]);
|
611 |
|
|
}
|
612 |
|
|
#endif
|
613 |
|
|
|
614 |
|
|
if (!(current_sched_info->flags & USE_DEPS_LIST))
|
615 |
|
|
/* In this case LOG_LINKS are formed from the DEPS_LISTs,
|
616 |
|
|
not the INSN_LISTs. */
|
617 |
|
|
free_INSN_LIST_list (&LOG_LINKS (insn));
|
618 |
|
|
else
|
619 |
|
|
free_DEPS_LIST_list (&LOG_LINKS (insn));
|
620 |
|
|
}
|
621 |
|
|
|
622 |
|
|
/* All insns in a scheduling group except the first should only have
|
623 |
|
|
dependencies on the previous insn in the group. So we find the
|
624 |
|
|
first instruction in the scheduling group by walking the dependence
|
625 |
|
|
chains backwards. Then we add the dependencies for the group to
|
626 |
|
|
the previous nonnote insn. */
|
627 |
|
|
|
628 |
|
|
static void
|
629 |
|
|
fixup_sched_groups (rtx insn)
|
630 |
|
|
{
|
631 |
|
|
rtx link, prev_nonnote;
|
632 |
|
|
|
633 |
|
|
for (link = LOG_LINKS (insn); link ; link = XEXP (link, 1))
|
634 |
|
|
{
|
635 |
|
|
rtx i = insn;
|
636 |
|
|
do
|
637 |
|
|
{
|
638 |
|
|
i = prev_nonnote_insn (i);
|
639 |
|
|
|
640 |
|
|
if (XEXP (link, 0) == i)
|
641 |
|
|
goto next_link;
|
642 |
|
|
} while (SCHED_GROUP_P (i));
|
643 |
|
|
if (! sched_insns_conditions_mutex_p (i, XEXP (link, 0)))
|
644 |
|
|
add_dependence (i, XEXP (link, 0), REG_NOTE_KIND (link));
|
645 |
|
|
next_link:;
|
646 |
|
|
}
|
647 |
|
|
|
648 |
|
|
delete_all_dependences (insn);
|
649 |
|
|
|
650 |
|
|
prev_nonnote = prev_nonnote_insn (insn);
|
651 |
|
|
if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
|
652 |
|
|
&& ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
|
653 |
|
|
add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
|
654 |
|
|
}
|
655 |
|
|
|
656 |
|
|
/* Process an insn's memory dependencies. There are four kinds of
|
657 |
|
|
dependencies:
|
658 |
|
|
|
659 |
|
|
(0) read dependence: read follows read
|
660 |
|
|
(1) true dependence: read follows write
|
661 |
|
|
(2) output dependence: write follows write
|
662 |
|
|
(3) anti dependence: write follows read
|
663 |
|
|
|
664 |
|
|
We are careful to build only dependencies which actually exist, and
|
665 |
|
|
use transitivity to avoid building too many links. */
|
666 |
|
|
|
667 |
|
|
/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
|
668 |
|
|
The MEM is a memory reference contained within INSN, which we are saving
|
669 |
|
|
so that we can do memory aliasing on it. */
|
670 |
|
|
|
671 |
|
|
static void
|
672 |
|
|
add_insn_mem_dependence (struct deps *deps, rtx *insn_list, rtx *mem_list,
|
673 |
|
|
rtx insn, rtx mem)
|
674 |
|
|
{
|
675 |
|
|
rtx link;
|
676 |
|
|
|
677 |
|
|
link = alloc_INSN_LIST (insn, *insn_list);
|
678 |
|
|
*insn_list = link;
|
679 |
|
|
|
680 |
|
|
if (current_sched_info->use_cselib)
|
681 |
|
|
{
|
682 |
|
|
mem = shallow_copy_rtx (mem);
|
683 |
|
|
XEXP (mem, 0) = cselib_subst_to_values (XEXP (mem, 0));
|
684 |
|
|
}
|
685 |
|
|
link = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
|
686 |
|
|
*mem_list = link;
|
687 |
|
|
|
688 |
|
|
deps->pending_lists_length++;
|
689 |
|
|
}
|
690 |
|
|
|
691 |
|
|
/* Make a dependency between every memory reference on the pending lists
|
692 |
|
|
and INSN, thus flushing the pending lists. FOR_READ is true if emitting
|
693 |
|
|
dependencies for a read operation, similarly with FOR_WRITE. */
|
694 |
|
|
|
695 |
|
|
static void
|
696 |
|
|
flush_pending_lists (struct deps *deps, rtx insn, int for_read,
|
697 |
|
|
int for_write)
|
698 |
|
|
{
|
699 |
|
|
if (for_write)
|
700 |
|
|
{
|
701 |
|
|
add_dependence_list_and_free (insn, &deps->pending_read_insns, 1,
|
702 |
|
|
REG_DEP_ANTI);
|
703 |
|
|
free_EXPR_LIST_list (&deps->pending_read_mems);
|
704 |
|
|
}
|
705 |
|
|
|
706 |
|
|
add_dependence_list_and_free (insn, &deps->pending_write_insns, 1,
|
707 |
|
|
for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
|
708 |
|
|
free_EXPR_LIST_list (&deps->pending_write_mems);
|
709 |
|
|
deps->pending_lists_length = 0;
|
710 |
|
|
|
711 |
|
|
add_dependence_list_and_free (insn, &deps->last_pending_memory_flush, 1,
|
712 |
|
|
for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT);
|
713 |
|
|
deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
|
714 |
|
|
deps->pending_flush_length = 1;
|
715 |
|
|
}
|
716 |
|
|
|
717 |
|
|
/* Analyze a single reference to register (reg:MODE REGNO) in INSN.
|
718 |
|
|
The type of the reference is specified by REF and can be SET,
|
719 |
|
|
CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
|
720 |
|
|
|
721 |
|
|
static void
|
722 |
|
|
sched_analyze_reg (struct deps *deps, int regno, enum machine_mode mode,
|
723 |
|
|
enum rtx_code ref, rtx insn)
|
724 |
|
|
{
|
725 |
|
|
/* A hard reg in a wide mode may really be multiple registers.
|
726 |
|
|
If so, mark all of them just like the first. */
|
727 |
|
|
if (regno < FIRST_PSEUDO_REGISTER)
|
728 |
|
|
{
|
729 |
|
|
int i = hard_regno_nregs[regno][mode];
|
730 |
|
|
if (ref == SET)
|
731 |
|
|
{
|
732 |
|
|
while (--i >= 0)
|
733 |
|
|
SET_REGNO_REG_SET (reg_pending_sets, regno + i);
|
734 |
|
|
}
|
735 |
|
|
else if (ref == USE)
|
736 |
|
|
{
|
737 |
|
|
while (--i >= 0)
|
738 |
|
|
SET_REGNO_REG_SET (reg_pending_uses, regno + i);
|
739 |
|
|
}
|
740 |
|
|
else
|
741 |
|
|
{
|
742 |
|
|
while (--i >= 0)
|
743 |
|
|
SET_REGNO_REG_SET (reg_pending_clobbers, regno + i);
|
744 |
|
|
}
|
745 |
|
|
}
|
746 |
|
|
|
747 |
|
|
/* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
|
748 |
|
|
it does not reload. Ignore these as they have served their
|
749 |
|
|
purpose already. */
|
750 |
|
|
else if (regno >= deps->max_reg)
|
751 |
|
|
{
|
752 |
|
|
enum rtx_code code = GET_CODE (PATTERN (insn));
|
753 |
|
|
gcc_assert (code == USE || code == CLOBBER);
|
754 |
|
|
}
|
755 |
|
|
|
756 |
|
|
else
|
757 |
|
|
{
|
758 |
|
|
if (ref == SET)
|
759 |
|
|
SET_REGNO_REG_SET (reg_pending_sets, regno);
|
760 |
|
|
else if (ref == USE)
|
761 |
|
|
SET_REGNO_REG_SET (reg_pending_uses, regno);
|
762 |
|
|
else
|
763 |
|
|
SET_REGNO_REG_SET (reg_pending_clobbers, regno);
|
764 |
|
|
|
765 |
|
|
/* Pseudos that are REG_EQUIV to something may be replaced
|
766 |
|
|
by that during reloading. We need only add dependencies for
|
767 |
|
|
the address in the REG_EQUIV note. */
|
768 |
|
|
if (!reload_completed && get_reg_known_equiv_p (regno))
|
769 |
|
|
{
|
770 |
|
|
rtx t = get_reg_known_value (regno);
|
771 |
|
|
if (MEM_P (t))
|
772 |
|
|
sched_analyze_2 (deps, XEXP (t, 0), insn);
|
773 |
|
|
}
|
774 |
|
|
|
775 |
|
|
/* Don't let it cross a call after scheduling if it doesn't
|
776 |
|
|
already cross one. */
|
777 |
|
|
if (REG_N_CALLS_CROSSED (regno) == 0)
|
778 |
|
|
{
|
779 |
|
|
if (ref == USE)
|
780 |
|
|
deps->sched_before_next_call
|
781 |
|
|
= alloc_INSN_LIST (insn, deps->sched_before_next_call);
|
782 |
|
|
else
|
783 |
|
|
add_dependence_list (insn, deps->last_function_call, 1,
|
784 |
|
|
REG_DEP_ANTI);
|
785 |
|
|
}
|
786 |
|
|
}
|
787 |
|
|
}
|
788 |
|
|
|
789 |
|
|
/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
|
790 |
|
|
rtx, X, creating all dependencies generated by the write to the
|
791 |
|
|
destination of X, and reads of everything mentioned. */
|
792 |
|
|
|
793 |
|
|
static void
|
794 |
|
|
sched_analyze_1 (struct deps *deps, rtx x, rtx insn)
|
795 |
|
|
{
|
796 |
|
|
rtx dest = XEXP (x, 0);
|
797 |
|
|
enum rtx_code code = GET_CODE (x);
|
798 |
|
|
|
799 |
|
|
if (dest == 0)
|
800 |
|
|
return;
|
801 |
|
|
|
802 |
|
|
if (GET_CODE (dest) == PARALLEL)
|
803 |
|
|
{
|
804 |
|
|
int i;
|
805 |
|
|
|
806 |
|
|
for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
|
807 |
|
|
if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
|
808 |
|
|
sched_analyze_1 (deps,
|
809 |
|
|
gen_rtx_CLOBBER (VOIDmode,
|
810 |
|
|
XEXP (XVECEXP (dest, 0, i), 0)),
|
811 |
|
|
insn);
|
812 |
|
|
|
813 |
|
|
if (GET_CODE (x) == SET)
|
814 |
|
|
sched_analyze_2 (deps, SET_SRC (x), insn);
|
815 |
|
|
return;
|
816 |
|
|
}
|
817 |
|
|
|
818 |
|
|
while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
|
819 |
|
|
|| GET_CODE (dest) == ZERO_EXTRACT)
|
820 |
|
|
{
|
821 |
|
|
if (GET_CODE (dest) == STRICT_LOW_PART
|
822 |
|
|
|| GET_CODE (dest) == ZERO_EXTRACT
|
823 |
|
|
|| df_read_modify_subreg_p (dest))
|
824 |
|
|
{
|
825 |
|
|
/* These both read and modify the result. We must handle
|
826 |
|
|
them as writes to get proper dependencies for following
|
827 |
|
|
instructions. We must handle them as reads to get proper
|
828 |
|
|
dependencies from this to previous instructions.
|
829 |
|
|
Thus we need to call sched_analyze_2. */
|
830 |
|
|
|
831 |
|
|
sched_analyze_2 (deps, XEXP (dest, 0), insn);
|
832 |
|
|
}
|
833 |
|
|
if (GET_CODE (dest) == ZERO_EXTRACT)
|
834 |
|
|
{
|
835 |
|
|
/* The second and third arguments are values read by this insn. */
|
836 |
|
|
sched_analyze_2 (deps, XEXP (dest, 1), insn);
|
837 |
|
|
sched_analyze_2 (deps, XEXP (dest, 2), insn);
|
838 |
|
|
}
|
839 |
|
|
dest = XEXP (dest, 0);
|
840 |
|
|
}
|
841 |
|
|
|
842 |
|
|
if (REG_P (dest))
|
843 |
|
|
{
|
844 |
|
|
int regno = REGNO (dest);
|
845 |
|
|
enum machine_mode mode = GET_MODE (dest);
|
846 |
|
|
|
847 |
|
|
sched_analyze_reg (deps, regno, mode, code, insn);
|
848 |
|
|
|
849 |
|
|
#ifdef STACK_REGS
|
850 |
|
|
/* Treat all writes to a stack register as modifying the TOS. */
|
851 |
|
|
if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
|
852 |
|
|
{
|
853 |
|
|
/* Avoid analyzing the same register twice. */
|
854 |
|
|
if (regno != FIRST_STACK_REG)
|
855 |
|
|
sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
|
856 |
|
|
sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
|
857 |
|
|
}
|
858 |
|
|
#endif
|
859 |
|
|
}
|
860 |
|
|
else if (MEM_P (dest))
|
861 |
|
|
{
|
862 |
|
|
/* Writing memory. */
|
863 |
|
|
rtx t = dest;
|
864 |
|
|
|
865 |
|
|
if (current_sched_info->use_cselib)
|
866 |
|
|
{
|
867 |
|
|
t = shallow_copy_rtx (dest);
|
868 |
|
|
cselib_lookup (XEXP (t, 0), Pmode, 1);
|
869 |
|
|
XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
|
870 |
|
|
}
|
871 |
|
|
t = canon_rtx (t);
|
872 |
|
|
|
873 |
|
|
if (deps->pending_lists_length > MAX_PENDING_LIST_LENGTH)
|
874 |
|
|
{
|
875 |
|
|
/* Flush all pending reads and writes to prevent the pending lists
|
876 |
|
|
from getting any larger. Insn scheduling runs too slowly when
|
877 |
|
|
these lists get long. When compiling GCC with itself,
|
878 |
|
|
this flush occurs 8 times for sparc, and 10 times for m88k using
|
879 |
|
|
the default value of 32. */
|
880 |
|
|
flush_pending_lists (deps, insn, false, true);
|
881 |
|
|
}
|
882 |
|
|
else
|
883 |
|
|
{
|
884 |
|
|
rtx pending, pending_mem;
|
885 |
|
|
|
886 |
|
|
pending = deps->pending_read_insns;
|
887 |
|
|
pending_mem = deps->pending_read_mems;
|
888 |
|
|
while (pending)
|
889 |
|
|
{
|
890 |
|
|
if (anti_dependence (XEXP (pending_mem, 0), t)
|
891 |
|
|
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
|
892 |
|
|
add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
|
893 |
|
|
|
894 |
|
|
pending = XEXP (pending, 1);
|
895 |
|
|
pending_mem = XEXP (pending_mem, 1);
|
896 |
|
|
}
|
897 |
|
|
|
898 |
|
|
pending = deps->pending_write_insns;
|
899 |
|
|
pending_mem = deps->pending_write_mems;
|
900 |
|
|
while (pending)
|
901 |
|
|
{
|
902 |
|
|
if (output_dependence (XEXP (pending_mem, 0), t)
|
903 |
|
|
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
|
904 |
|
|
add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
|
905 |
|
|
|
906 |
|
|
pending = XEXP (pending, 1);
|
907 |
|
|
pending_mem = XEXP (pending_mem, 1);
|
908 |
|
|
}
|
909 |
|
|
|
910 |
|
|
add_dependence_list (insn, deps->last_pending_memory_flush, 1,
|
911 |
|
|
REG_DEP_ANTI);
|
912 |
|
|
|
913 |
|
|
add_insn_mem_dependence (deps, &deps->pending_write_insns,
|
914 |
|
|
&deps->pending_write_mems, insn, dest);
|
915 |
|
|
}
|
916 |
|
|
sched_analyze_2 (deps, XEXP (dest, 0), insn);
|
917 |
|
|
}
|
918 |
|
|
|
919 |
|
|
/* Analyze reads. */
|
920 |
|
|
if (GET_CODE (x) == SET)
|
921 |
|
|
sched_analyze_2 (deps, SET_SRC (x), insn);
|
922 |
|
|
}
|
923 |
|
|
|
924 |
|
|
/* Analyze the uses of memory and registers in rtx X in INSN. */
|
925 |
|
|
|
926 |
|
|
static void
|
927 |
|
|
sched_analyze_2 (struct deps *deps, rtx x, rtx insn)
|
928 |
|
|
{
|
929 |
|
|
int i;
|
930 |
|
|
int j;
|
931 |
|
|
enum rtx_code code;
|
932 |
|
|
const char *fmt;
|
933 |
|
|
|
934 |
|
|
if (x == 0)
|
935 |
|
|
return;
|
936 |
|
|
|
937 |
|
|
code = GET_CODE (x);
|
938 |
|
|
|
939 |
|
|
switch (code)
|
940 |
|
|
{
|
941 |
|
|
case CONST_INT:
|
942 |
|
|
case CONST_DOUBLE:
|
943 |
|
|
case CONST_VECTOR:
|
944 |
|
|
case SYMBOL_REF:
|
945 |
|
|
case CONST:
|
946 |
|
|
case LABEL_REF:
|
947 |
|
|
/* Ignore constants. Note that we must handle CONST_DOUBLE here
|
948 |
|
|
because it may have a cc0_rtx in its CONST_DOUBLE_CHAIN field, but
|
949 |
|
|
this does not mean that this insn is using cc0. */
|
950 |
|
|
return;
|
951 |
|
|
|
952 |
|
|
#ifdef HAVE_cc0
|
953 |
|
|
case CC0:
|
954 |
|
|
/* User of CC0 depends on immediately preceding insn. */
|
955 |
|
|
SCHED_GROUP_P (insn) = 1;
|
956 |
|
|
/* Don't move CC0 setter to another block (it can set up the
|
957 |
|
|
same flag for previous CC0 users which is safe). */
|
958 |
|
|
CANT_MOVE (prev_nonnote_insn (insn)) = 1;
|
959 |
|
|
return;
|
960 |
|
|
#endif
|
961 |
|
|
|
962 |
|
|
case REG:
|
963 |
|
|
{
|
964 |
|
|
int regno = REGNO (x);
|
965 |
|
|
enum machine_mode mode = GET_MODE (x);
|
966 |
|
|
|
967 |
|
|
sched_analyze_reg (deps, regno, mode, USE, insn);
|
968 |
|
|
|
969 |
|
|
#ifdef STACK_REGS
|
970 |
|
|
/* Treat all reads of a stack register as modifying the TOS. */
|
971 |
|
|
if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
|
972 |
|
|
{
|
973 |
|
|
/* Avoid analyzing the same register twice. */
|
974 |
|
|
if (regno != FIRST_STACK_REG)
|
975 |
|
|
sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
|
976 |
|
|
sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
|
977 |
|
|
}
|
978 |
|
|
#endif
|
979 |
|
|
return;
|
980 |
|
|
}
|
981 |
|
|
|
982 |
|
|
case MEM:
|
983 |
|
|
{
|
984 |
|
|
/* Reading memory. */
|
985 |
|
|
rtx u;
|
986 |
|
|
rtx pending, pending_mem;
|
987 |
|
|
rtx t = x;
|
988 |
|
|
|
989 |
|
|
if (current_sched_info->use_cselib)
|
990 |
|
|
{
|
991 |
|
|
t = shallow_copy_rtx (t);
|
992 |
|
|
cselib_lookup (XEXP (t, 0), Pmode, 1);
|
993 |
|
|
XEXP (t, 0) = cselib_subst_to_values (XEXP (t, 0));
|
994 |
|
|
}
|
995 |
|
|
t = canon_rtx (t);
|
996 |
|
|
pending = deps->pending_read_insns;
|
997 |
|
|
pending_mem = deps->pending_read_mems;
|
998 |
|
|
while (pending)
|
999 |
|
|
{
|
1000 |
|
|
if (read_dependence (XEXP (pending_mem, 0), t)
|
1001 |
|
|
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
|
1002 |
|
|
add_dependence (insn, XEXP (pending, 0), REG_DEP_ANTI);
|
1003 |
|
|
|
1004 |
|
|
pending = XEXP (pending, 1);
|
1005 |
|
|
pending_mem = XEXP (pending_mem, 1);
|
1006 |
|
|
}
|
1007 |
|
|
|
1008 |
|
|
pending = deps->pending_write_insns;
|
1009 |
|
|
pending_mem = deps->pending_write_mems;
|
1010 |
|
|
while (pending)
|
1011 |
|
|
{
|
1012 |
|
|
if (true_dependence (XEXP (pending_mem, 0), VOIDmode,
|
1013 |
|
|
t, rtx_varies_p)
|
1014 |
|
|
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
|
1015 |
|
|
{
|
1016 |
|
|
if (current_sched_info->flags & DO_SPECULATION)
|
1017 |
|
|
maybe_add_or_update_back_dep_1 (insn, XEXP (pending, 0),
|
1018 |
|
|
REG_DEP_TRUE,
|
1019 |
|
|
BEGIN_DATA | DEP_TRUE,
|
1020 |
|
|
XEXP (pending_mem, 0), t, 0);
|
1021 |
|
|
else
|
1022 |
|
|
add_dependence (insn, XEXP (pending, 0), REG_DEP_TRUE);
|
1023 |
|
|
}
|
1024 |
|
|
|
1025 |
|
|
pending = XEXP (pending, 1);
|
1026 |
|
|
pending_mem = XEXP (pending_mem, 1);
|
1027 |
|
|
}
|
1028 |
|
|
|
1029 |
|
|
for (u = deps->last_pending_memory_flush; u; u = XEXP (u, 1))
|
1030 |
|
|
if (! JUMP_P (XEXP (u, 0)) || deps_may_trap_p (x))
|
1031 |
|
|
add_dependence (insn, XEXP (u, 0), REG_DEP_ANTI);
|
1032 |
|
|
|
1033 |
|
|
/* Always add these dependencies to pending_reads, since
|
1034 |
|
|
this insn may be followed by a write. */
|
1035 |
|
|
add_insn_mem_dependence (deps, &deps->pending_read_insns,
|
1036 |
|
|
&deps->pending_read_mems, insn, x);
|
1037 |
|
|
|
1038 |
|
|
/* Take advantage of tail recursion here. */
|
1039 |
|
|
sched_analyze_2 (deps, XEXP (x, 0), insn);
|
1040 |
|
|
return;
|
1041 |
|
|
}
|
1042 |
|
|
|
1043 |
|
|
/* Force pending stores to memory in case a trap handler needs them. */
|
1044 |
|
|
case TRAP_IF:
|
1045 |
|
|
flush_pending_lists (deps, insn, true, false);
|
1046 |
|
|
break;
|
1047 |
|
|
|
1048 |
|
|
case ASM_OPERANDS:
|
1049 |
|
|
case ASM_INPUT:
|
1050 |
|
|
case UNSPEC_VOLATILE:
|
1051 |
|
|
{
|
1052 |
|
|
/* Traditional and volatile asm instructions must be considered to use
|
1053 |
|
|
and clobber all hard registers, all pseudo-registers and all of
|
1054 |
|
|
memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
|
1055 |
|
|
|
1056 |
|
|
Consider for instance a volatile asm that changes the fpu rounding
|
1057 |
|
|
mode. An insn should not be moved across this even if it only uses
|
1058 |
|
|
pseudo-regs because it might give an incorrectly rounded result. */
|
1059 |
|
|
if (code != ASM_OPERANDS || MEM_VOLATILE_P (x))
|
1060 |
|
|
reg_pending_barrier = TRUE_BARRIER;
|
1061 |
|
|
|
1062 |
|
|
/* For all ASM_OPERANDS, we must traverse the vector of input operands.
|
1063 |
|
|
We can not just fall through here since then we would be confused
|
1064 |
|
|
by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
|
1065 |
|
|
traditional asms unlike their normal usage. */
|
1066 |
|
|
|
1067 |
|
|
if (code == ASM_OPERANDS)
|
1068 |
|
|
{
|
1069 |
|
|
for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
|
1070 |
|
|
sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
|
1071 |
|
|
return;
|
1072 |
|
|
}
|
1073 |
|
|
break;
|
1074 |
|
|
}
|
1075 |
|
|
|
1076 |
|
|
case PRE_DEC:
|
1077 |
|
|
case POST_DEC:
|
1078 |
|
|
case PRE_INC:
|
1079 |
|
|
case POST_INC:
|
1080 |
|
|
/* These both read and modify the result. We must handle them as writes
|
1081 |
|
|
to get proper dependencies for following instructions. We must handle
|
1082 |
|
|
them as reads to get proper dependencies from this to previous
|
1083 |
|
|
instructions. Thus we need to pass them to both sched_analyze_1
|
1084 |
|
|
and sched_analyze_2. We must call sched_analyze_2 first in order
|
1085 |
|
|
to get the proper antecedent for the read. */
|
1086 |
|
|
sched_analyze_2 (deps, XEXP (x, 0), insn);
|
1087 |
|
|
sched_analyze_1 (deps, x, insn);
|
1088 |
|
|
return;
|
1089 |
|
|
|
1090 |
|
|
case POST_MODIFY:
|
1091 |
|
|
case PRE_MODIFY:
|
1092 |
|
|
/* op0 = op0 + op1 */
|
1093 |
|
|
sched_analyze_2 (deps, XEXP (x, 0), insn);
|
1094 |
|
|
sched_analyze_2 (deps, XEXP (x, 1), insn);
|
1095 |
|
|
sched_analyze_1 (deps, x, insn);
|
1096 |
|
|
return;
|
1097 |
|
|
|
1098 |
|
|
default:
|
1099 |
|
|
break;
|
1100 |
|
|
}
|
1101 |
|
|
|
1102 |
|
|
/* Other cases: walk the insn. */
|
1103 |
|
|
fmt = GET_RTX_FORMAT (code);
|
1104 |
|
|
for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
|
1105 |
|
|
{
|
1106 |
|
|
if (fmt[i] == 'e')
|
1107 |
|
|
sched_analyze_2 (deps, XEXP (x, i), insn);
|
1108 |
|
|
else if (fmt[i] == 'E')
|
1109 |
|
|
for (j = 0; j < XVECLEN (x, i); j++)
|
1110 |
|
|
sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
|
1111 |
|
|
}
|
1112 |
|
|
}
|
1113 |
|
|
|
1114 |
|
|
/* Analyze an INSN with pattern X to find all dependencies. */
|
1115 |
|
|
|
1116 |
|
|
static void
|
1117 |
|
|
sched_analyze_insn (struct deps *deps, rtx x, rtx insn)
|
1118 |
|
|
{
|
1119 |
|
|
RTX_CODE code = GET_CODE (x);
|
1120 |
|
|
rtx link;
|
1121 |
|
|
unsigned i;
|
1122 |
|
|
reg_set_iterator rsi;
|
1123 |
|
|
|
1124 |
|
|
if (code == COND_EXEC)
|
1125 |
|
|
{
|
1126 |
|
|
sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
|
1127 |
|
|
|
1128 |
|
|
/* ??? Should be recording conditions so we reduce the number of
|
1129 |
|
|
false dependencies. */
|
1130 |
|
|
x = COND_EXEC_CODE (x);
|
1131 |
|
|
code = GET_CODE (x);
|
1132 |
|
|
}
|
1133 |
|
|
if (code == SET || code == CLOBBER)
|
1134 |
|
|
{
|
1135 |
|
|
sched_analyze_1 (deps, x, insn);
|
1136 |
|
|
|
1137 |
|
|
/* Bare clobber insns are used for letting life analysis, reg-stack
|
1138 |
|
|
and others know that a value is dead. Depend on the last call
|
1139 |
|
|
instruction so that reg-stack won't get confused. */
|
1140 |
|
|
if (code == CLOBBER)
|
1141 |
|
|
add_dependence_list (insn, deps->last_function_call, 1, REG_DEP_OUTPUT);
|
1142 |
|
|
}
|
1143 |
|
|
else if (code == PARALLEL)
|
1144 |
|
|
{
|
1145 |
|
|
for (i = XVECLEN (x, 0); i--;)
|
1146 |
|
|
{
|
1147 |
|
|
rtx sub = XVECEXP (x, 0, i);
|
1148 |
|
|
code = GET_CODE (sub);
|
1149 |
|
|
|
1150 |
|
|
if (code == COND_EXEC)
|
1151 |
|
|
{
|
1152 |
|
|
sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
|
1153 |
|
|
sub = COND_EXEC_CODE (sub);
|
1154 |
|
|
code = GET_CODE (sub);
|
1155 |
|
|
}
|
1156 |
|
|
if (code == SET || code == CLOBBER)
|
1157 |
|
|
sched_analyze_1 (deps, sub, insn);
|
1158 |
|
|
else
|
1159 |
|
|
sched_analyze_2 (deps, sub, insn);
|
1160 |
|
|
}
|
1161 |
|
|
}
|
1162 |
|
|
else
|
1163 |
|
|
sched_analyze_2 (deps, x, insn);
|
1164 |
|
|
|
1165 |
|
|
/* Mark registers CLOBBERED or used by called function. */
|
1166 |
|
|
if (CALL_P (insn))
|
1167 |
|
|
{
|
1168 |
|
|
for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
|
1169 |
|
|
{
|
1170 |
|
|
if (GET_CODE (XEXP (link, 0)) == CLOBBER)
|
1171 |
|
|
sched_analyze_1 (deps, XEXP (link, 0), insn);
|
1172 |
|
|
else
|
1173 |
|
|
sched_analyze_2 (deps, XEXP (link, 0), insn);
|
1174 |
|
|
}
|
1175 |
|
|
if (find_reg_note (insn, REG_SETJMP, NULL))
|
1176 |
|
|
reg_pending_barrier = MOVE_BARRIER;
|
1177 |
|
|
}
|
1178 |
|
|
|
1179 |
|
|
if (JUMP_P (insn))
|
1180 |
|
|
{
|
1181 |
|
|
rtx next;
|
1182 |
|
|
next = next_nonnote_insn (insn);
|
1183 |
|
|
if (next && BARRIER_P (next))
|
1184 |
|
|
reg_pending_barrier = TRUE_BARRIER;
|
1185 |
|
|
else
|
1186 |
|
|
{
|
1187 |
|
|
rtx pending, pending_mem;
|
1188 |
|
|
regset_head tmp_uses, tmp_sets;
|
1189 |
|
|
INIT_REG_SET (&tmp_uses);
|
1190 |
|
|
INIT_REG_SET (&tmp_sets);
|
1191 |
|
|
|
1192 |
|
|
(*current_sched_info->compute_jump_reg_dependencies)
|
1193 |
|
|
(insn, &deps->reg_conditional_sets, &tmp_uses, &tmp_sets);
|
1194 |
|
|
/* Make latency of jump equal to 0 by using anti-dependence. */
|
1195 |
|
|
EXECUTE_IF_SET_IN_REG_SET (&tmp_uses, 0, i, rsi)
|
1196 |
|
|
{
|
1197 |
|
|
struct deps_reg *reg_last = &deps->reg_last[i];
|
1198 |
|
|
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI);
|
1199 |
|
|
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI);
|
1200 |
|
|
reg_last->uses_length++;
|
1201 |
|
|
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
|
1202 |
|
|
}
|
1203 |
|
|
IOR_REG_SET (reg_pending_sets, &tmp_sets);
|
1204 |
|
|
|
1205 |
|
|
CLEAR_REG_SET (&tmp_uses);
|
1206 |
|
|
CLEAR_REG_SET (&tmp_sets);
|
1207 |
|
|
|
1208 |
|
|
/* All memory writes and volatile reads must happen before the
|
1209 |
|
|
jump. Non-volatile reads must happen before the jump iff
|
1210 |
|
|
the result is needed by the above register used mask. */
|
1211 |
|
|
|
1212 |
|
|
pending = deps->pending_write_insns;
|
1213 |
|
|
pending_mem = deps->pending_write_mems;
|
1214 |
|
|
while (pending)
|
1215 |
|
|
{
|
1216 |
|
|
if (! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
|
1217 |
|
|
add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
|
1218 |
|
|
pending = XEXP (pending, 1);
|
1219 |
|
|
pending_mem = XEXP (pending_mem, 1);
|
1220 |
|
|
}
|
1221 |
|
|
|
1222 |
|
|
pending = deps->pending_read_insns;
|
1223 |
|
|
pending_mem = deps->pending_read_mems;
|
1224 |
|
|
while (pending)
|
1225 |
|
|
{
|
1226 |
|
|
if (MEM_VOLATILE_P (XEXP (pending_mem, 0))
|
1227 |
|
|
&& ! sched_insns_conditions_mutex_p (insn, XEXP (pending, 0)))
|
1228 |
|
|
add_dependence (insn, XEXP (pending, 0), REG_DEP_OUTPUT);
|
1229 |
|
|
pending = XEXP (pending, 1);
|
1230 |
|
|
pending_mem = XEXP (pending_mem, 1);
|
1231 |
|
|
}
|
1232 |
|
|
|
1233 |
|
|
add_dependence_list (insn, deps->last_pending_memory_flush, 1,
|
1234 |
|
|
REG_DEP_ANTI);
|
1235 |
|
|
}
|
1236 |
|
|
}
|
1237 |
|
|
|
1238 |
|
|
/* If this instruction can throw an exception, then moving it changes
|
1239 |
|
|
where block boundaries fall. This is mighty confusing elsewhere.
|
1240 |
|
|
Therefore, prevent such an instruction from being moved. Same for
|
1241 |
|
|
non-jump instructions that define block boundaries.
|
1242 |
|
|
??? Unclear whether this is still necessary in EBB mode. If not,
|
1243 |
|
|
add_branch_dependences should be adjusted for RGN mode instead. */
|
1244 |
|
|
if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
|
1245 |
|
|
|| (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
|
1246 |
|
|
reg_pending_barrier = MOVE_BARRIER;
|
1247 |
|
|
|
1248 |
|
|
/* Add dependencies if a scheduling barrier was found. */
|
1249 |
|
|
if (reg_pending_barrier)
|
1250 |
|
|
{
|
1251 |
|
|
/* In the case of barrier the most added dependencies are not
|
1252 |
|
|
real, so we use anti-dependence here. */
|
1253 |
|
|
if (sched_get_condition (insn))
|
1254 |
|
|
{
|
1255 |
|
|
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
|
1256 |
|
|
{
|
1257 |
|
|
struct deps_reg *reg_last = &deps->reg_last[i];
|
1258 |
|
|
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
|
1259 |
|
|
add_dependence_list
|
1260 |
|
|
(insn, reg_last->sets, 0,
|
1261 |
|
|
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
|
1262 |
|
|
add_dependence_list
|
1263 |
|
|
(insn, reg_last->clobbers, 0,
|
1264 |
|
|
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
|
1265 |
|
|
}
|
1266 |
|
|
}
|
1267 |
|
|
else
|
1268 |
|
|
{
|
1269 |
|
|
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
|
1270 |
|
|
{
|
1271 |
|
|
struct deps_reg *reg_last = &deps->reg_last[i];
|
1272 |
|
|
add_dependence_list_and_free (insn, ®_last->uses, 0,
|
1273 |
|
|
REG_DEP_ANTI);
|
1274 |
|
|
add_dependence_list_and_free
|
1275 |
|
|
(insn, ®_last->sets, 0,
|
1276 |
|
|
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
|
1277 |
|
|
add_dependence_list_and_free
|
1278 |
|
|
(insn, ®_last->clobbers, 0,
|
1279 |
|
|
reg_pending_barrier == TRUE_BARRIER ? REG_DEP_TRUE : REG_DEP_ANTI);
|
1280 |
|
|
reg_last->uses_length = 0;
|
1281 |
|
|
reg_last->clobbers_length = 0;
|
1282 |
|
|
}
|
1283 |
|
|
}
|
1284 |
|
|
|
1285 |
|
|
for (i = 0; i < (unsigned)deps->max_reg; i++)
|
1286 |
|
|
{
|
1287 |
|
|
struct deps_reg *reg_last = &deps->reg_last[i];
|
1288 |
|
|
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
|
1289 |
|
|
SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
|
1290 |
|
|
}
|
1291 |
|
|
|
1292 |
|
|
flush_pending_lists (deps, insn, true, true);
|
1293 |
|
|
CLEAR_REG_SET (&deps->reg_conditional_sets);
|
1294 |
|
|
reg_pending_barrier = NOT_A_BARRIER;
|
1295 |
|
|
}
|
1296 |
|
|
else
|
1297 |
|
|
{
|
1298 |
|
|
/* If the current insn is conditional, we can't free any
|
1299 |
|
|
of the lists. */
|
1300 |
|
|
if (sched_get_condition (insn))
|
1301 |
|
|
{
|
1302 |
|
|
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
|
1303 |
|
|
{
|
1304 |
|
|
struct deps_reg *reg_last = &deps->reg_last[i];
|
1305 |
|
|
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
|
1306 |
|
|
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
|
1307 |
|
|
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
|
1308 |
|
|
reg_last->uses_length++;
|
1309 |
|
|
}
|
1310 |
|
|
EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
|
1311 |
|
|
{
|
1312 |
|
|
struct deps_reg *reg_last = &deps->reg_last[i];
|
1313 |
|
|
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
|
1314 |
|
|
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
|
1315 |
|
|
reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
|
1316 |
|
|
reg_last->clobbers_length++;
|
1317 |
|
|
}
|
1318 |
|
|
EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
|
1319 |
|
|
{
|
1320 |
|
|
struct deps_reg *reg_last = &deps->reg_last[i];
|
1321 |
|
|
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
|
1322 |
|
|
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT);
|
1323 |
|
|
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
|
1324 |
|
|
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
|
1325 |
|
|
SET_REGNO_REG_SET (&deps->reg_conditional_sets, i);
|
1326 |
|
|
}
|
1327 |
|
|
}
|
1328 |
|
|
else
|
1329 |
|
|
{
|
1330 |
|
|
EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
|
1331 |
|
|
{
|
1332 |
|
|
struct deps_reg *reg_last = &deps->reg_last[i];
|
1333 |
|
|
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE);
|
1334 |
|
|
add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE);
|
1335 |
|
|
reg_last->uses_length++;
|
1336 |
|
|
reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
|
1337 |
|
|
}
|
1338 |
|
|
EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
|
1339 |
|
|
{
|
1340 |
|
|
struct deps_reg *reg_last = &deps->reg_last[i];
|
1341 |
|
|
if (reg_last->uses_length > MAX_PENDING_LIST_LENGTH
|
1342 |
|
|
|| reg_last->clobbers_length > MAX_PENDING_LIST_LENGTH)
|
1343 |
|
|
{
|
1344 |
|
|
add_dependence_list_and_free (insn, ®_last->sets, 0,
|
1345 |
|
|
REG_DEP_OUTPUT);
|
1346 |
|
|
add_dependence_list_and_free (insn, ®_last->uses, 0,
|
1347 |
|
|
REG_DEP_ANTI);
|
1348 |
|
|
add_dependence_list_and_free (insn, ®_last->clobbers, 0,
|
1349 |
|
|
REG_DEP_OUTPUT);
|
1350 |
|
|
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
|
1351 |
|
|
reg_last->clobbers_length = 0;
|
1352 |
|
|
reg_last->uses_length = 0;
|
1353 |
|
|
}
|
1354 |
|
|
else
|
1355 |
|
|
{
|
1356 |
|
|
add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT);
|
1357 |
|
|
add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI);
|
1358 |
|
|
}
|
1359 |
|
|
reg_last->clobbers_length++;
|
1360 |
|
|
reg_last->clobbers = alloc_INSN_LIST (insn, reg_last->clobbers);
|
1361 |
|
|
}
|
1362 |
|
|
EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
|
1363 |
|
|
{
|
1364 |
|
|
struct deps_reg *reg_last = &deps->reg_last[i];
|
1365 |
|
|
add_dependence_list_and_free (insn, ®_last->sets, 0,
|
1366 |
|
|
REG_DEP_OUTPUT);
|
1367 |
|
|
add_dependence_list_and_free (insn, ®_last->clobbers, 0,
|
1368 |
|
|
REG_DEP_OUTPUT);
|
1369 |
|
|
add_dependence_list_and_free (insn, ®_last->uses, 0,
|
1370 |
|
|
REG_DEP_ANTI);
|
1371 |
|
|
reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
|
1372 |
|
|
reg_last->uses_length = 0;
|
1373 |
|
|
reg_last->clobbers_length = 0;
|
1374 |
|
|
CLEAR_REGNO_REG_SET (&deps->reg_conditional_sets, i);
|
1375 |
|
|
}
|
1376 |
|
|
}
|
1377 |
|
|
|
1378 |
|
|
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
|
1379 |
|
|
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
|
1380 |
|
|
IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
|
1381 |
|
|
}
|
1382 |
|
|
CLEAR_REG_SET (reg_pending_uses);
|
1383 |
|
|
CLEAR_REG_SET (reg_pending_clobbers);
|
1384 |
|
|
CLEAR_REG_SET (reg_pending_sets);
|
1385 |
|
|
|
1386 |
|
|
/* If we are currently in a libcall scheduling group, then mark the
|
1387 |
|
|
current insn as being in a scheduling group and that it can not
|
1388 |
|
|
be moved into a different basic block. */
|
1389 |
|
|
|
1390 |
|
|
if (deps->libcall_block_tail_insn)
|
1391 |
|
|
{
|
1392 |
|
|
SCHED_GROUP_P (insn) = 1;
|
1393 |
|
|
CANT_MOVE (insn) = 1;
|
1394 |
|
|
}
|
1395 |
|
|
|
1396 |
|
|
/* If a post-call group is still open, see if it should remain so.
|
1397 |
|
|
This insn must be a simple move of a hard reg to a pseudo or
|
1398 |
|
|
vice-versa.
|
1399 |
|
|
|
1400 |
|
|
We must avoid moving these insns for correctness on
|
1401 |
|
|
SMALL_REGISTER_CLASS machines, and for special registers like
|
1402 |
|
|
PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
|
1403 |
|
|
hard regs for all targets. */
|
1404 |
|
|
|
1405 |
|
|
if (deps->in_post_call_group_p)
|
1406 |
|
|
{
|
1407 |
|
|
rtx tmp, set = single_set (insn);
|
1408 |
|
|
int src_regno, dest_regno;
|
1409 |
|
|
|
1410 |
|
|
if (set == NULL)
|
1411 |
|
|
goto end_call_group;
|
1412 |
|
|
|
1413 |
|
|
tmp = SET_DEST (set);
|
1414 |
|
|
if (GET_CODE (tmp) == SUBREG)
|
1415 |
|
|
tmp = SUBREG_REG (tmp);
|
1416 |
|
|
if (REG_P (tmp))
|
1417 |
|
|
dest_regno = REGNO (tmp);
|
1418 |
|
|
else
|
1419 |
|
|
goto end_call_group;
|
1420 |
|
|
|
1421 |
|
|
tmp = SET_SRC (set);
|
1422 |
|
|
if (GET_CODE (tmp) == SUBREG)
|
1423 |
|
|
tmp = SUBREG_REG (tmp);
|
1424 |
|
|
if ((GET_CODE (tmp) == PLUS
|
1425 |
|
|
|| GET_CODE (tmp) == MINUS)
|
1426 |
|
|
&& REG_P (XEXP (tmp, 0))
|
1427 |
|
|
&& REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
|
1428 |
|
|
&& dest_regno == STACK_POINTER_REGNUM)
|
1429 |
|
|
src_regno = STACK_POINTER_REGNUM;
|
1430 |
|
|
else if (REG_P (tmp))
|
1431 |
|
|
src_regno = REGNO (tmp);
|
1432 |
|
|
else
|
1433 |
|
|
goto end_call_group;
|
1434 |
|
|
|
1435 |
|
|
if (src_regno < FIRST_PSEUDO_REGISTER
|
1436 |
|
|
|| dest_regno < FIRST_PSEUDO_REGISTER)
|
1437 |
|
|
{
|
1438 |
|
|
if (deps->in_post_call_group_p == post_call_initial)
|
1439 |
|
|
deps->in_post_call_group_p = post_call;
|
1440 |
|
|
|
1441 |
|
|
SCHED_GROUP_P (insn) = 1;
|
1442 |
|
|
CANT_MOVE (insn) = 1;
|
1443 |
|
|
}
|
1444 |
|
|
else
|
1445 |
|
|
{
|
1446 |
|
|
end_call_group:
|
1447 |
|
|
deps->in_post_call_group_p = not_post_call;
|
1448 |
|
|
}
|
1449 |
|
|
}
|
1450 |
|
|
|
1451 |
|
|
/* Fixup the dependencies in the sched group. */
|
1452 |
|
|
if (SCHED_GROUP_P (insn))
|
1453 |
|
|
fixup_sched_groups (insn);
|
1454 |
|
|
}
|
1455 |
|
|
|
1456 |
|
|
/* Analyze every insn between HEAD and TAIL inclusive, creating LOG_LINKS
|
1457 |
|
|
for every dependency. */
|
1458 |
|
|
|
1459 |
|
|
void
|
1460 |
|
|
sched_analyze (struct deps *deps, rtx head, rtx tail)
|
1461 |
|
|
{
|
1462 |
|
|
rtx insn;
|
1463 |
|
|
|
1464 |
|
|
if (current_sched_info->use_cselib)
|
1465 |
|
|
cselib_init (true);
|
1466 |
|
|
|
1467 |
|
|
/* Before reload, if the previous block ended in a call, show that
|
1468 |
|
|
we are inside a post-call group, so as to keep the lifetimes of
|
1469 |
|
|
hard registers correct. */
|
1470 |
|
|
if (! reload_completed && !LABEL_P (head))
|
1471 |
|
|
{
|
1472 |
|
|
insn = prev_nonnote_insn (head);
|
1473 |
|
|
if (insn && CALL_P (insn))
|
1474 |
|
|
deps->in_post_call_group_p = post_call_initial;
|
1475 |
|
|
}
|
1476 |
|
|
for (insn = head;; insn = NEXT_INSN (insn))
|
1477 |
|
|
{
|
1478 |
|
|
rtx link, end_seq, r0, set;
|
1479 |
|
|
|
1480 |
|
|
if (NONJUMP_INSN_P (insn) || JUMP_P (insn))
|
1481 |
|
|
{
|
1482 |
|
|
/* Clear out the stale LOG_LINKS from flow. */
|
1483 |
|
|
free_INSN_LIST_list (&LOG_LINKS (insn));
|
1484 |
|
|
|
1485 |
|
|
/* Make each JUMP_INSN a scheduling barrier for memory
|
1486 |
|
|
references. */
|
1487 |
|
|
if (JUMP_P (insn))
|
1488 |
|
|
{
|
1489 |
|
|
/* Keep the list a reasonable size. */
|
1490 |
|
|
if (deps->pending_flush_length++ > MAX_PENDING_LIST_LENGTH)
|
1491 |
|
|
flush_pending_lists (deps, insn, true, true);
|
1492 |
|
|
else
|
1493 |
|
|
deps->last_pending_memory_flush
|
1494 |
|
|
= alloc_INSN_LIST (insn, deps->last_pending_memory_flush);
|
1495 |
|
|
}
|
1496 |
|
|
sched_analyze_insn (deps, PATTERN (insn), insn);
|
1497 |
|
|
}
|
1498 |
|
|
else if (CALL_P (insn))
|
1499 |
|
|
{
|
1500 |
|
|
int i;
|
1501 |
|
|
|
1502 |
|
|
CANT_MOVE (insn) = 1;
|
1503 |
|
|
|
1504 |
|
|
/* Clear out the stale LOG_LINKS from flow. */
|
1505 |
|
|
free_INSN_LIST_list (&LOG_LINKS (insn));
|
1506 |
|
|
|
1507 |
|
|
if (find_reg_note (insn, REG_SETJMP, NULL))
|
1508 |
|
|
{
|
1509 |
|
|
/* This is setjmp. Assume that all registers, not just
|
1510 |
|
|
hard registers, may be clobbered by this call. */
|
1511 |
|
|
reg_pending_barrier = MOVE_BARRIER;
|
1512 |
|
|
}
|
1513 |
|
|
else
|
1514 |
|
|
{
|
1515 |
|
|
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
|
1516 |
|
|
/* A call may read and modify global register variables. */
|
1517 |
|
|
if (global_regs[i])
|
1518 |
|
|
{
|
1519 |
|
|
SET_REGNO_REG_SET (reg_pending_sets, i);
|
1520 |
|
|
SET_REGNO_REG_SET (reg_pending_uses, i);
|
1521 |
|
|
}
|
1522 |
|
|
/* Other call-clobbered hard regs may be clobbered.
|
1523 |
|
|
Since we only have a choice between 'might be clobbered'
|
1524 |
|
|
and 'definitely not clobbered', we must include all
|
1525 |
|
|
partly call-clobbered registers here. */
|
1526 |
|
|
else if (HARD_REGNO_CALL_PART_CLOBBERED (i, reg_raw_mode[i])
|
1527 |
|
|
|| TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
|
1528 |
|
|
SET_REGNO_REG_SET (reg_pending_clobbers, i);
|
1529 |
|
|
/* We don't know what set of fixed registers might be used
|
1530 |
|
|
by the function, but it is certain that the stack pointer
|
1531 |
|
|
is among them, but be conservative. */
|
1532 |
|
|
else if (fixed_regs[i])
|
1533 |
|
|
SET_REGNO_REG_SET (reg_pending_uses, i);
|
1534 |
|
|
/* The frame pointer is normally not used by the function
|
1535 |
|
|
itself, but by the debugger. */
|
1536 |
|
|
/* ??? MIPS o32 is an exception. It uses the frame pointer
|
1537 |
|
|
in the macro expansion of jal but does not represent this
|
1538 |
|
|
fact in the call_insn rtl. */
|
1539 |
|
|
else if (i == FRAME_POINTER_REGNUM
|
1540 |
|
|
|| (i == HARD_FRAME_POINTER_REGNUM
|
1541 |
|
|
&& (! reload_completed || frame_pointer_needed)))
|
1542 |
|
|
SET_REGNO_REG_SET (reg_pending_uses, i);
|
1543 |
|
|
}
|
1544 |
|
|
|
1545 |
|
|
/* For each insn which shouldn't cross a call, add a dependence
|
1546 |
|
|
between that insn and this call insn. */
|
1547 |
|
|
add_dependence_list_and_free (insn, &deps->sched_before_next_call, 1,
|
1548 |
|
|
REG_DEP_ANTI);
|
1549 |
|
|
|
1550 |
|
|
sched_analyze_insn (deps, PATTERN (insn), insn);
|
1551 |
|
|
|
1552 |
|
|
/* In the absence of interprocedural alias analysis, we must flush
|
1553 |
|
|
all pending reads and writes, and start new dependencies starting
|
1554 |
|
|
from here. But only flush writes for constant calls (which may
|
1555 |
|
|
be passed a pointer to something we haven't written yet). */
|
1556 |
|
|
flush_pending_lists (deps, insn, true, !CONST_OR_PURE_CALL_P (insn));
|
1557 |
|
|
|
1558 |
|
|
/* Remember the last function call for limiting lifetimes. */
|
1559 |
|
|
free_INSN_LIST_list (&deps->last_function_call);
|
1560 |
|
|
deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
|
1561 |
|
|
|
1562 |
|
|
/* Before reload, begin a post-call group, so as to keep the
|
1563 |
|
|
lifetimes of hard registers correct. */
|
1564 |
|
|
if (! reload_completed)
|
1565 |
|
|
deps->in_post_call_group_p = post_call;
|
1566 |
|
|
}
|
1567 |
|
|
|
1568 |
|
|
/* EH_REGION insn notes can not appear until well after we complete
|
1569 |
|
|
scheduling. */
|
1570 |
|
|
if (NOTE_P (insn))
|
1571 |
|
|
gcc_assert (NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_BEG
|
1572 |
|
|
&& NOTE_LINE_NUMBER (insn) != NOTE_INSN_EH_REGION_END);
|
1573 |
|
|
|
1574 |
|
|
if (current_sched_info->use_cselib)
|
1575 |
|
|
cselib_process_insn (insn);
|
1576 |
|
|
|
1577 |
|
|
/* Now that we have completed handling INSN, check and see if it is
|
1578 |
|
|
a CLOBBER beginning a libcall block. If it is, record the
|
1579 |
|
|
end of the libcall sequence.
|
1580 |
|
|
|
1581 |
|
|
We want to schedule libcall blocks as a unit before reload. While
|
1582 |
|
|
this restricts scheduling, it preserves the meaning of a libcall
|
1583 |
|
|
block.
|
1584 |
|
|
|
1585 |
|
|
As a side effect, we may get better code due to decreased register
|
1586 |
|
|
pressure as well as less chance of a foreign insn appearing in
|
1587 |
|
|
a libcall block. */
|
1588 |
|
|
if (!reload_completed
|
1589 |
|
|
/* Note we may have nested libcall sequences. We only care about
|
1590 |
|
|
the outermost libcall sequence. */
|
1591 |
|
|
&& deps->libcall_block_tail_insn == 0
|
1592 |
|
|
/* The sequence must start with a clobber of a register. */
|
1593 |
|
|
&& NONJUMP_INSN_P (insn)
|
1594 |
|
|
&& GET_CODE (PATTERN (insn)) == CLOBBER
|
1595 |
|
|
&& (r0 = XEXP (PATTERN (insn), 0), REG_P (r0))
|
1596 |
|
|
&& REG_P (XEXP (PATTERN (insn), 0))
|
1597 |
|
|
/* The CLOBBER must also have a REG_LIBCALL note attached. */
|
1598 |
|
|
&& (link = find_reg_note (insn, REG_LIBCALL, NULL_RTX)) != 0
|
1599 |
|
|
&& (end_seq = XEXP (link, 0)) != 0
|
1600 |
|
|
/* The insn referenced by the REG_LIBCALL note must be a
|
1601 |
|
|
simple nop copy with the same destination as the register
|
1602 |
|
|
mentioned in the clobber. */
|
1603 |
|
|
&& (set = single_set (end_seq)) != 0
|
1604 |
|
|
&& SET_DEST (set) == r0 && SET_SRC (set) == r0
|
1605 |
|
|
/* And finally the insn referenced by the REG_LIBCALL must
|
1606 |
|
|
also contain a REG_EQUAL note and a REG_RETVAL note. */
|
1607 |
|
|
&& find_reg_note (end_seq, REG_EQUAL, NULL_RTX) != 0
|
1608 |
|
|
&& find_reg_note (end_seq, REG_RETVAL, NULL_RTX) != 0)
|
1609 |
|
|
deps->libcall_block_tail_insn = XEXP (link, 0);
|
1610 |
|
|
|
1611 |
|
|
/* If we have reached the end of a libcall block, then close the
|
1612 |
|
|
block. */
|
1613 |
|
|
if (deps->libcall_block_tail_insn == insn)
|
1614 |
|
|
deps->libcall_block_tail_insn = 0;
|
1615 |
|
|
|
1616 |
|
|
if (insn == tail)
|
1617 |
|
|
{
|
1618 |
|
|
if (current_sched_info->use_cselib)
|
1619 |
|
|
cselib_finish ();
|
1620 |
|
|
return;
|
1621 |
|
|
}
|
1622 |
|
|
}
|
1623 |
|
|
gcc_unreachable ();
|
1624 |
|
|
}
|
1625 |
|
|
|
1626 |
|
|
|
1627 |
|
|
/* The following function adds forward dependence (FROM, TO) with
|
1628 |
|
|
given DEP_TYPE. The forward dependence should be not exist before. */
|
1629 |
|
|
|
1630 |
|
|
void
|
1631 |
|
|
add_forw_dep (rtx to, rtx link)
|
1632 |
|
|
{
|
1633 |
|
|
rtx new_link, from;
|
1634 |
|
|
|
1635 |
|
|
from = XEXP (link, 0);
|
1636 |
|
|
|
1637 |
|
|
#ifdef ENABLE_CHECKING
|
1638 |
|
|
/* If add_dependence is working properly there should never
|
1639 |
|
|
be notes, deleted insns or duplicates in the backward
|
1640 |
|
|
links. Thus we need not check for them here.
|
1641 |
|
|
|
1642 |
|
|
However, if we have enabled checking we might as well go
|
1643 |
|
|
ahead and verify that add_dependence worked properly. */
|
1644 |
|
|
gcc_assert (INSN_P (from));
|
1645 |
|
|
gcc_assert (!INSN_DELETED_P (from));
|
1646 |
|
|
if (true_dependency_cache)
|
1647 |
|
|
{
|
1648 |
|
|
gcc_assert (!bitmap_bit_p (&forward_dependency_cache[INSN_LUID (from)],
|
1649 |
|
|
INSN_LUID (to)));
|
1650 |
|
|
bitmap_set_bit (&forward_dependency_cache[INSN_LUID (from)],
|
1651 |
|
|
INSN_LUID (to));
|
1652 |
|
|
}
|
1653 |
|
|
else
|
1654 |
|
|
gcc_assert (!find_insn_list (to, INSN_DEPEND (from)));
|
1655 |
|
|
#endif
|
1656 |
|
|
|
1657 |
|
|
if (!(current_sched_info->flags & USE_DEPS_LIST))
|
1658 |
|
|
new_link = alloc_INSN_LIST (to, INSN_DEPEND (from));
|
1659 |
|
|
else
|
1660 |
|
|
new_link = alloc_DEPS_LIST (to, INSN_DEPEND (from), DEP_STATUS (link));
|
1661 |
|
|
|
1662 |
|
|
PUT_REG_NOTE_KIND (new_link, REG_NOTE_KIND (link));
|
1663 |
|
|
|
1664 |
|
|
INSN_DEPEND (from) = new_link;
|
1665 |
|
|
INSN_DEP_COUNT (to) += 1;
|
1666 |
|
|
}
|
1667 |
|
|
|
1668 |
|
|
/* Examine insns in the range [ HEAD, TAIL ] and Use the backward
|
1669 |
|
|
dependences from LOG_LINKS to build forward dependences in
|
1670 |
|
|
INSN_DEPEND. */
|
1671 |
|
|
|
1672 |
|
|
void
|
1673 |
|
|
compute_forward_dependences (rtx head, rtx tail)
|
1674 |
|
|
{
|
1675 |
|
|
rtx insn;
|
1676 |
|
|
rtx next_tail;
|
1677 |
|
|
|
1678 |
|
|
next_tail = NEXT_INSN (tail);
|
1679 |
|
|
for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
|
1680 |
|
|
{
|
1681 |
|
|
rtx link;
|
1682 |
|
|
|
1683 |
|
|
if (! INSN_P (insn))
|
1684 |
|
|
continue;
|
1685 |
|
|
|
1686 |
|
|
if (current_sched_info->flags & DO_SPECULATION)
|
1687 |
|
|
{
|
1688 |
|
|
rtx new = 0, link, next;
|
1689 |
|
|
|
1690 |
|
|
for (link = LOG_LINKS (insn); link; link = next)
|
1691 |
|
|
{
|
1692 |
|
|
next = XEXP (link, 1);
|
1693 |
|
|
adjust_add_sorted_back_dep (insn, link, &new);
|
1694 |
|
|
}
|
1695 |
|
|
|
1696 |
|
|
LOG_LINKS (insn) = new;
|
1697 |
|
|
}
|
1698 |
|
|
|
1699 |
|
|
for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
|
1700 |
|
|
add_forw_dep (insn, link);
|
1701 |
|
|
}
|
1702 |
|
|
}
|
1703 |
|
|
|
1704 |
|
|
/* Initialize variables for region data dependence analysis.
|
1705 |
|
|
n_bbs is the number of region blocks. */
|
1706 |
|
|
|
1707 |
|
|
void
|
1708 |
|
|
init_deps (struct deps *deps)
|
1709 |
|
|
{
|
1710 |
|
|
int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
|
1711 |
|
|
|
1712 |
|
|
deps->max_reg = max_reg;
|
1713 |
|
|
deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
|
1714 |
|
|
INIT_REG_SET (&deps->reg_last_in_use);
|
1715 |
|
|
INIT_REG_SET (&deps->reg_conditional_sets);
|
1716 |
|
|
|
1717 |
|
|
deps->pending_read_insns = 0;
|
1718 |
|
|
deps->pending_read_mems = 0;
|
1719 |
|
|
deps->pending_write_insns = 0;
|
1720 |
|
|
deps->pending_write_mems = 0;
|
1721 |
|
|
deps->pending_lists_length = 0;
|
1722 |
|
|
deps->pending_flush_length = 0;
|
1723 |
|
|
deps->last_pending_memory_flush = 0;
|
1724 |
|
|
deps->last_function_call = 0;
|
1725 |
|
|
deps->sched_before_next_call = 0;
|
1726 |
|
|
deps->in_post_call_group_p = not_post_call;
|
1727 |
|
|
deps->libcall_block_tail_insn = 0;
|
1728 |
|
|
}
|
1729 |
|
|
|
1730 |
|
|
/* Free insn lists found in DEPS. */
|
1731 |
|
|
|
1732 |
|
|
void
|
1733 |
|
|
free_deps (struct deps *deps)
|
1734 |
|
|
{
|
1735 |
|
|
unsigned i;
|
1736 |
|
|
reg_set_iterator rsi;
|
1737 |
|
|
|
1738 |
|
|
free_INSN_LIST_list (&deps->pending_read_insns);
|
1739 |
|
|
free_EXPR_LIST_list (&deps->pending_read_mems);
|
1740 |
|
|
free_INSN_LIST_list (&deps->pending_write_insns);
|
1741 |
|
|
free_EXPR_LIST_list (&deps->pending_write_mems);
|
1742 |
|
|
free_INSN_LIST_list (&deps->last_pending_memory_flush);
|
1743 |
|
|
|
1744 |
|
|
/* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
|
1745 |
|
|
times. For a testcase with 42000 regs and 8000 small basic blocks,
|
1746 |
|
|
this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
|
1747 |
|
|
EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
|
1748 |
|
|
{
|
1749 |
|
|
struct deps_reg *reg_last = &deps->reg_last[i];
|
1750 |
|
|
if (reg_last->uses)
|
1751 |
|
|
free_INSN_LIST_list (®_last->uses);
|
1752 |
|
|
if (reg_last->sets)
|
1753 |
|
|
free_INSN_LIST_list (®_last->sets);
|
1754 |
|
|
if (reg_last->clobbers)
|
1755 |
|
|
free_INSN_LIST_list (®_last->clobbers);
|
1756 |
|
|
}
|
1757 |
|
|
CLEAR_REG_SET (&deps->reg_last_in_use);
|
1758 |
|
|
CLEAR_REG_SET (&deps->reg_conditional_sets);
|
1759 |
|
|
|
1760 |
|
|
free (deps->reg_last);
|
1761 |
|
|
}
|
1762 |
|
|
|
1763 |
|
|
/* If it is profitable to use them, initialize caches for tracking
|
1764 |
|
|
dependency information. LUID is the number of insns to be scheduled,
|
1765 |
|
|
it is used in the estimate of profitability. */
|
1766 |
|
|
|
1767 |
|
|
void
|
1768 |
|
|
init_dependency_caches (int luid)
|
1769 |
|
|
{
|
1770 |
|
|
/* ?!? We could save some memory by computing a per-region luid mapping
|
1771 |
|
|
which could reduce both the number of vectors in the cache and the size
|
1772 |
|
|
of each vector. Instead we just avoid the cache entirely unless the
|
1773 |
|
|
average number of instructions in a basic block is very high. See
|
1774 |
|
|
the comment before the declaration of true_dependency_cache for
|
1775 |
|
|
what we consider "very high". */
|
1776 |
|
|
if (luid / n_basic_blocks > 100 * 5)
|
1777 |
|
|
{
|
1778 |
|
|
cache_size = 0;
|
1779 |
|
|
extend_dependency_caches (luid, true);
|
1780 |
|
|
}
|
1781 |
|
|
}
|
1782 |
|
|
|
1783 |
|
|
/* Create or extend (depending on CREATE_P) dependency caches to
|
1784 |
|
|
size N. */
|
1785 |
|
|
void
|
1786 |
|
|
extend_dependency_caches (int n, bool create_p)
|
1787 |
|
|
{
|
1788 |
|
|
if (create_p || true_dependency_cache)
|
1789 |
|
|
{
|
1790 |
|
|
int i, luid = cache_size + n;
|
1791 |
|
|
|
1792 |
|
|
true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
|
1793 |
|
|
luid);
|
1794 |
|
|
output_dependency_cache = XRESIZEVEC (bitmap_head,
|
1795 |
|
|
output_dependency_cache, luid);
|
1796 |
|
|
anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
|
1797 |
|
|
luid);
|
1798 |
|
|
#ifdef ENABLE_CHECKING
|
1799 |
|
|
forward_dependency_cache = XRESIZEVEC (bitmap_head,
|
1800 |
|
|
forward_dependency_cache, luid);
|
1801 |
|
|
#endif
|
1802 |
|
|
if (current_sched_info->flags & DO_SPECULATION)
|
1803 |
|
|
spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
|
1804 |
|
|
luid);
|
1805 |
|
|
|
1806 |
|
|
for (i = cache_size; i < luid; i++)
|
1807 |
|
|
{
|
1808 |
|
|
bitmap_initialize (&true_dependency_cache[i], 0);
|
1809 |
|
|
bitmap_initialize (&output_dependency_cache[i], 0);
|
1810 |
|
|
bitmap_initialize (&anti_dependency_cache[i], 0);
|
1811 |
|
|
#ifdef ENABLE_CHECKING
|
1812 |
|
|
bitmap_initialize (&forward_dependency_cache[i], 0);
|
1813 |
|
|
#endif
|
1814 |
|
|
if (current_sched_info->flags & DO_SPECULATION)
|
1815 |
|
|
bitmap_initialize (&spec_dependency_cache[i], 0);
|
1816 |
|
|
}
|
1817 |
|
|
cache_size = luid;
|
1818 |
|
|
}
|
1819 |
|
|
}
|
1820 |
|
|
|
1821 |
|
|
/* Free the caches allocated in init_dependency_caches. */
|
1822 |
|
|
|
1823 |
|
|
void
|
1824 |
|
|
free_dependency_caches (void)
|
1825 |
|
|
{
|
1826 |
|
|
if (true_dependency_cache)
|
1827 |
|
|
{
|
1828 |
|
|
int i;
|
1829 |
|
|
|
1830 |
|
|
for (i = 0; i < cache_size; i++)
|
1831 |
|
|
{
|
1832 |
|
|
bitmap_clear (&true_dependency_cache[i]);
|
1833 |
|
|
bitmap_clear (&output_dependency_cache[i]);
|
1834 |
|
|
bitmap_clear (&anti_dependency_cache[i]);
|
1835 |
|
|
#ifdef ENABLE_CHECKING
|
1836 |
|
|
bitmap_clear (&forward_dependency_cache[i]);
|
1837 |
|
|
#endif
|
1838 |
|
|
if (current_sched_info->flags & DO_SPECULATION)
|
1839 |
|
|
bitmap_clear (&spec_dependency_cache[i]);
|
1840 |
|
|
}
|
1841 |
|
|
free (true_dependency_cache);
|
1842 |
|
|
true_dependency_cache = NULL;
|
1843 |
|
|
free (output_dependency_cache);
|
1844 |
|
|
output_dependency_cache = NULL;
|
1845 |
|
|
free (anti_dependency_cache);
|
1846 |
|
|
anti_dependency_cache = NULL;
|
1847 |
|
|
#ifdef ENABLE_CHECKING
|
1848 |
|
|
free (forward_dependency_cache);
|
1849 |
|
|
forward_dependency_cache = NULL;
|
1850 |
|
|
#endif
|
1851 |
|
|
if (current_sched_info->flags & DO_SPECULATION)
|
1852 |
|
|
{
|
1853 |
|
|
free (spec_dependency_cache);
|
1854 |
|
|
spec_dependency_cache = NULL;
|
1855 |
|
|
}
|
1856 |
|
|
}
|
1857 |
|
|
}
|
1858 |
|
|
|
1859 |
|
|
/* Initialize some global variables needed by the dependency analysis
|
1860 |
|
|
code. */
|
1861 |
|
|
|
1862 |
|
|
void
|
1863 |
|
|
init_deps_global (void)
|
1864 |
|
|
{
|
1865 |
|
|
reg_pending_sets = ALLOC_REG_SET (®_obstack);
|
1866 |
|
|
reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
|
1867 |
|
|
reg_pending_uses = ALLOC_REG_SET (®_obstack);
|
1868 |
|
|
reg_pending_barrier = NOT_A_BARRIER;
|
1869 |
|
|
}
|
1870 |
|
|
|
1871 |
|
|
/* Free everything used by the dependency analysis code. */
|
1872 |
|
|
|
1873 |
|
|
void
|
1874 |
|
|
finish_deps_global (void)
|
1875 |
|
|
{
|
1876 |
|
|
FREE_REG_SET (reg_pending_sets);
|
1877 |
|
|
FREE_REG_SET (reg_pending_clobbers);
|
1878 |
|
|
FREE_REG_SET (reg_pending_uses);
|
1879 |
|
|
}
|
1880 |
|
|
|
1881 |
|
|
/* Insert LINK into the dependence chain pointed to by LINKP and
|
1882 |
|
|
maintain the sort order. */
|
1883 |
|
|
static void
|
1884 |
|
|
adjust_add_sorted_back_dep (rtx insn, rtx link, rtx *linkp)
|
1885 |
|
|
{
|
1886 |
|
|
gcc_assert (current_sched_info->flags & DO_SPECULATION);
|
1887 |
|
|
|
1888 |
|
|
/* If the insn cannot move speculatively, but the link is speculative,
|
1889 |
|
|
make it hard dependence. */
|
1890 |
|
|
if (HAS_INTERNAL_DEP (insn)
|
1891 |
|
|
&& (DEP_STATUS (link) & SPECULATIVE))
|
1892 |
|
|
{
|
1893 |
|
|
DEP_STATUS (link) &= ~SPECULATIVE;
|
1894 |
|
|
|
1895 |
|
|
if (true_dependency_cache)
|
1896 |
|
|
bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
|
1897 |
|
|
INSN_LUID (XEXP (link, 0)));
|
1898 |
|
|
}
|
1899 |
|
|
|
1900 |
|
|
/* Non-speculative links go at the head of LOG_LINKS, followed by
|
1901 |
|
|
speculative links. */
|
1902 |
|
|
if (DEP_STATUS (link) & SPECULATIVE)
|
1903 |
|
|
while (*linkp && !(DEP_STATUS (*linkp) & SPECULATIVE))
|
1904 |
|
|
linkp = &XEXP (*linkp, 1);
|
1905 |
|
|
|
1906 |
|
|
XEXP (link, 1) = *linkp;
|
1907 |
|
|
*linkp = link;
|
1908 |
|
|
}
|
1909 |
|
|
|
1910 |
|
|
/* Move the dependence pointed to by LINKP to the back dependencies
|
1911 |
|
|
of INSN, and also add this dependence to the forward ones. All LOG_LINKS,
|
1912 |
|
|
except one pointed to by LINKP, must be sorted. */
|
1913 |
|
|
static void
|
1914 |
|
|
adjust_back_add_forw_dep (rtx insn, rtx *linkp)
|
1915 |
|
|
{
|
1916 |
|
|
rtx link;
|
1917 |
|
|
|
1918 |
|
|
gcc_assert (current_sched_info->flags & DO_SPECULATION);
|
1919 |
|
|
|
1920 |
|
|
link = *linkp;
|
1921 |
|
|
*linkp = XEXP (*linkp, 1);
|
1922 |
|
|
|
1923 |
|
|
adjust_add_sorted_back_dep (insn, link, &LOG_LINKS (insn));
|
1924 |
|
|
add_forw_dep (insn, link);
|
1925 |
|
|
}
|
1926 |
|
|
|
1927 |
|
|
/* Remove forward dependence ELEM from the DEPS_LIST of INSN. */
|
1928 |
|
|
static void
|
1929 |
|
|
delete_forw_dep (rtx insn, rtx elem)
|
1930 |
|
|
{
|
1931 |
|
|
gcc_assert (current_sched_info->flags & DO_SPECULATION);
|
1932 |
|
|
|
1933 |
|
|
#ifdef ENABLE_CHECKING
|
1934 |
|
|
if (true_dependency_cache)
|
1935 |
|
|
bitmap_clear_bit (&forward_dependency_cache[INSN_LUID (elem)],
|
1936 |
|
|
INSN_LUID (insn));
|
1937 |
|
|
#endif
|
1938 |
|
|
|
1939 |
|
|
remove_free_DEPS_LIST_elem (insn, &INSN_DEPEND (elem));
|
1940 |
|
|
INSN_DEP_COUNT (insn)--;
|
1941 |
|
|
}
|
1942 |
|
|
|
1943 |
|
|
/* Estimate the weakness of dependence between MEM1 and MEM2. */
|
1944 |
|
|
static dw_t
|
1945 |
|
|
estimate_dep_weak (rtx mem1, rtx mem2)
|
1946 |
|
|
{
|
1947 |
|
|
rtx r1, r2;
|
1948 |
|
|
|
1949 |
|
|
if (mem1 == mem2)
|
1950 |
|
|
/* MEMs are the same - don't speculate. */
|
1951 |
|
|
return MIN_DEP_WEAK;
|
1952 |
|
|
|
1953 |
|
|
r1 = XEXP (mem1, 0);
|
1954 |
|
|
r2 = XEXP (mem2, 0);
|
1955 |
|
|
|
1956 |
|
|
if (r1 == r2
|
1957 |
|
|
|| (REG_P (r1) && REG_P (r2)
|
1958 |
|
|
&& REGNO (r1) == REGNO (r2)))
|
1959 |
|
|
/* Again, MEMs are the same. */
|
1960 |
|
|
return MIN_DEP_WEAK;
|
1961 |
|
|
else if ((REG_P (r1) && !REG_P (r2))
|
1962 |
|
|
|| (!REG_P (r1) && REG_P (r2)))
|
1963 |
|
|
/* Different addressing modes - reason to be more speculative,
|
1964 |
|
|
than usual. */
|
1965 |
|
|
return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
|
1966 |
|
|
else
|
1967 |
|
|
/* We can't say anything about the dependence. */
|
1968 |
|
|
return UNCERTAIN_DEP_WEAK;
|
1969 |
|
|
}
|
1970 |
|
|
|
1971 |
|
|
/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
|
1972 |
|
|
This function can handle same INSN and ELEM (INSN == ELEM).
|
1973 |
|
|
It is a convenience wrapper. */
|
1974 |
|
|
void
|
1975 |
|
|
add_dependence (rtx insn, rtx elem, enum reg_note dep_type)
|
1976 |
|
|
{
|
1977 |
|
|
ds_t ds;
|
1978 |
|
|
|
1979 |
|
|
if (dep_type == REG_DEP_TRUE)
|
1980 |
|
|
ds = DEP_TRUE;
|
1981 |
|
|
else if (dep_type == REG_DEP_OUTPUT)
|
1982 |
|
|
ds = DEP_OUTPUT;
|
1983 |
|
|
else if (dep_type == REG_DEP_ANTI)
|
1984 |
|
|
ds = DEP_ANTI;
|
1985 |
|
|
else
|
1986 |
|
|
gcc_unreachable ();
|
1987 |
|
|
|
1988 |
|
|
maybe_add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, 0);
|
1989 |
|
|
}
|
1990 |
|
|
|
1991 |
|
|
/* Add or update backward dependence between INSN and ELEM
|
1992 |
|
|
with given type DEP_TYPE and dep_status DS.
|
1993 |
|
|
This function is a convenience wrapper. */
|
1994 |
|
|
enum DEPS_ADJUST_RESULT
|
1995 |
|
|
add_or_update_back_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
|
1996 |
|
|
{
|
1997 |
|
|
return add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, 0);
|
1998 |
|
|
}
|
1999 |
|
|
|
2000 |
|
|
/* Add or update both backward and forward dependencies between INSN and ELEM
|
2001 |
|
|
with given type DEP_TYPE and dep_status DS. */
|
2002 |
|
|
void
|
2003 |
|
|
add_or_update_back_forw_dep (rtx insn, rtx elem, enum reg_note dep_type,
|
2004 |
|
|
ds_t ds)
|
2005 |
|
|
{
|
2006 |
|
|
enum DEPS_ADJUST_RESULT res;
|
2007 |
|
|
rtx *linkp;
|
2008 |
|
|
|
2009 |
|
|
res = add_or_update_back_dep_1 (insn, elem, dep_type, ds, 0, 0, &linkp);
|
2010 |
|
|
|
2011 |
|
|
if (res == DEP_CHANGED || res == DEP_CREATED)
|
2012 |
|
|
{
|
2013 |
|
|
if (res == DEP_CHANGED)
|
2014 |
|
|
delete_forw_dep (insn, elem);
|
2015 |
|
|
else if (res == DEP_CREATED)
|
2016 |
|
|
linkp = &LOG_LINKS (insn);
|
2017 |
|
|
|
2018 |
|
|
adjust_back_add_forw_dep (insn, linkp);
|
2019 |
|
|
}
|
2020 |
|
|
}
|
2021 |
|
|
|
2022 |
|
|
/* Add both backward and forward dependencies between INSN and ELEM
|
2023 |
|
|
with given type DEP_TYPE and dep_status DS. */
|
2024 |
|
|
void
|
2025 |
|
|
add_back_forw_dep (rtx insn, rtx elem, enum reg_note dep_type, ds_t ds)
|
2026 |
|
|
{
|
2027 |
|
|
add_back_dep (insn, elem, dep_type, ds);
|
2028 |
|
|
adjust_back_add_forw_dep (insn, &LOG_LINKS (insn));
|
2029 |
|
|
}
|
2030 |
|
|
|
2031 |
|
|
/* Remove both backward and forward dependencies between INSN and ELEM. */
|
2032 |
|
|
void
|
2033 |
|
|
delete_back_forw_dep (rtx insn, rtx elem)
|
2034 |
|
|
{
|
2035 |
|
|
gcc_assert (current_sched_info->flags & DO_SPECULATION);
|
2036 |
|
|
|
2037 |
|
|
if (true_dependency_cache != NULL)
|
2038 |
|
|
{
|
2039 |
|
|
bitmap_clear_bit (&true_dependency_cache[INSN_LUID (insn)],
|
2040 |
|
|
INSN_LUID (elem));
|
2041 |
|
|
bitmap_clear_bit (&anti_dependency_cache[INSN_LUID (insn)],
|
2042 |
|
|
INSN_LUID (elem));
|
2043 |
|
|
bitmap_clear_bit (&output_dependency_cache[INSN_LUID (insn)],
|
2044 |
|
|
INSN_LUID (elem));
|
2045 |
|
|
bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
|
2046 |
|
|
INSN_LUID (elem));
|
2047 |
|
|
}
|
2048 |
|
|
|
2049 |
|
|
remove_free_DEPS_LIST_elem (elem, &LOG_LINKS (insn));
|
2050 |
|
|
delete_forw_dep (insn, elem);
|
2051 |
|
|
}
|
2052 |
|
|
|
2053 |
|
|
/* Return weakness of speculative type TYPE in the dep_status DS. */
|
2054 |
|
|
dw_t
|
2055 |
|
|
get_dep_weak (ds_t ds, ds_t type)
|
2056 |
|
|
{
|
2057 |
|
|
ds = ds & type;
|
2058 |
|
|
switch (type)
|
2059 |
|
|
{
|
2060 |
|
|
case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
|
2061 |
|
|
case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
|
2062 |
|
|
case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
|
2063 |
|
|
case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
|
2064 |
|
|
default: gcc_unreachable ();
|
2065 |
|
|
}
|
2066 |
|
|
|
2067 |
|
|
gcc_assert (MIN_DEP_WEAK <= ds && ds <= MAX_DEP_WEAK);
|
2068 |
|
|
return (dw_t) ds;
|
2069 |
|
|
}
|
2070 |
|
|
|
2071 |
|
|
/* Return the dep_status, which has the same parameters as DS, except for
|
2072 |
|
|
speculative type TYPE, that will have weakness DW. */
|
2073 |
|
|
ds_t
|
2074 |
|
|
set_dep_weak (ds_t ds, ds_t type, dw_t dw)
|
2075 |
|
|
{
|
2076 |
|
|
gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
|
2077 |
|
|
|
2078 |
|
|
ds &= ~type;
|
2079 |
|
|
switch (type)
|
2080 |
|
|
{
|
2081 |
|
|
case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
|
2082 |
|
|
case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
|
2083 |
|
|
case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
|
2084 |
|
|
case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
|
2085 |
|
|
default: gcc_unreachable ();
|
2086 |
|
|
}
|
2087 |
|
|
return ds;
|
2088 |
|
|
}
|
2089 |
|
|
|
2090 |
|
|
/* Return the join of two dep_statuses DS1 and DS2. */
|
2091 |
|
|
ds_t
|
2092 |
|
|
ds_merge (ds_t ds1, ds_t ds2)
|
2093 |
|
|
{
|
2094 |
|
|
ds_t ds, t;
|
2095 |
|
|
|
2096 |
|
|
gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
|
2097 |
|
|
|
2098 |
|
|
ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
|
2099 |
|
|
|
2100 |
|
|
t = FIRST_SPEC_TYPE;
|
2101 |
|
|
do
|
2102 |
|
|
{
|
2103 |
|
|
if ((ds1 & t) && !(ds2 & t))
|
2104 |
|
|
ds |= ds1 & t;
|
2105 |
|
|
else if (!(ds1 & t) && (ds2 & t))
|
2106 |
|
|
ds |= ds2 & t;
|
2107 |
|
|
else if ((ds1 & t) && (ds2 & t))
|
2108 |
|
|
{
|
2109 |
|
|
ds_t dw;
|
2110 |
|
|
|
2111 |
|
|
dw = ((ds_t) get_dep_weak (ds1, t)) * ((ds_t) get_dep_weak (ds2, t));
|
2112 |
|
|
dw /= MAX_DEP_WEAK;
|
2113 |
|
|
if (dw < MIN_DEP_WEAK)
|
2114 |
|
|
dw = MIN_DEP_WEAK;
|
2115 |
|
|
|
2116 |
|
|
ds = set_dep_weak (ds, t, (dw_t) dw);
|
2117 |
|
|
}
|
2118 |
|
|
|
2119 |
|
|
if (t == LAST_SPEC_TYPE)
|
2120 |
|
|
break;
|
2121 |
|
|
t <<= SPEC_TYPE_SHIFT;
|
2122 |
|
|
}
|
2123 |
|
|
while (1);
|
2124 |
|
|
|
2125 |
|
|
return ds;
|
2126 |
|
|
}
|
2127 |
|
|
|
2128 |
|
|
#ifdef INSN_SCHEDULING
|
2129 |
|
|
#ifdef ENABLE_CHECKING
|
2130 |
|
|
/* Verify that dependence type and status are consistent.
|
2131 |
|
|
If RELAXED_P is true, then skip dep_weakness checks. */
|
2132 |
|
|
static void
|
2133 |
|
|
check_dep_status (enum reg_note dt, ds_t ds, bool relaxed_p)
|
2134 |
|
|
{
|
2135 |
|
|
/* Check that dependence type contains the same bits as the status. */
|
2136 |
|
|
if (dt == REG_DEP_TRUE)
|
2137 |
|
|
gcc_assert (ds & DEP_TRUE);
|
2138 |
|
|
else if (dt == REG_DEP_OUTPUT)
|
2139 |
|
|
gcc_assert ((ds & DEP_OUTPUT)
|
2140 |
|
|
&& !(ds & DEP_TRUE));
|
2141 |
|
|
else
|
2142 |
|
|
gcc_assert ((dt == REG_DEP_ANTI)
|
2143 |
|
|
&& (ds & DEP_ANTI)
|
2144 |
|
|
&& !(ds & (DEP_OUTPUT | DEP_TRUE)));
|
2145 |
|
|
|
2146 |
|
|
/* HARD_DEP can not appear in dep_status of a link. */
|
2147 |
|
|
gcc_assert (!(ds & HARD_DEP));
|
2148 |
|
|
|
2149 |
|
|
/* Check that dependence status is set correctly when speculation is not
|
2150 |
|
|
supported. */
|
2151 |
|
|
if (!(current_sched_info->flags & DO_SPECULATION))
|
2152 |
|
|
gcc_assert (!(ds & SPECULATIVE));
|
2153 |
|
|
else if (ds & SPECULATIVE)
|
2154 |
|
|
{
|
2155 |
|
|
if (!relaxed_p)
|
2156 |
|
|
{
|
2157 |
|
|
ds_t type = FIRST_SPEC_TYPE;
|
2158 |
|
|
|
2159 |
|
|
/* Check that dependence weakness is in proper range. */
|
2160 |
|
|
do
|
2161 |
|
|
{
|
2162 |
|
|
if (ds & type)
|
2163 |
|
|
get_dep_weak (ds, type);
|
2164 |
|
|
|
2165 |
|
|
if (type == LAST_SPEC_TYPE)
|
2166 |
|
|
break;
|
2167 |
|
|
type <<= SPEC_TYPE_SHIFT;
|
2168 |
|
|
}
|
2169 |
|
|
while (1);
|
2170 |
|
|
}
|
2171 |
|
|
|
2172 |
|
|
if (ds & BEGIN_SPEC)
|
2173 |
|
|
{
|
2174 |
|
|
/* Only true dependence can be data speculative. */
|
2175 |
|
|
if (ds & BEGIN_DATA)
|
2176 |
|
|
gcc_assert (ds & DEP_TRUE);
|
2177 |
|
|
|
2178 |
|
|
/* Control dependencies in the insn scheduler are represented by
|
2179 |
|
|
anti-dependencies, therefore only anti dependence can be
|
2180 |
|
|
control speculative. */
|
2181 |
|
|
if (ds & BEGIN_CONTROL)
|
2182 |
|
|
gcc_assert (ds & DEP_ANTI);
|
2183 |
|
|
}
|
2184 |
|
|
else
|
2185 |
|
|
{
|
2186 |
|
|
/* Subsequent speculations should resolve true dependencies. */
|
2187 |
|
|
gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
|
2188 |
|
|
}
|
2189 |
|
|
|
2190 |
|
|
/* Check that true and anti dependencies can't have other speculative
|
2191 |
|
|
statuses. */
|
2192 |
|
|
if (ds & DEP_TRUE)
|
2193 |
|
|
gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
|
2194 |
|
|
/* An output dependence can't be speculative at all. */
|
2195 |
|
|
gcc_assert (!(ds & DEP_OUTPUT));
|
2196 |
|
|
if (ds & DEP_ANTI)
|
2197 |
|
|
gcc_assert (ds & BEGIN_CONTROL);
|
2198 |
|
|
}
|
2199 |
|
|
}
|
2200 |
|
|
#endif
|
2201 |
|
|
#endif
|