1 |
12 |
jlechner |
/* Instruction scheduling pass. This file contains definitions used
|
2 |
|
|
internally in the scheduler.
|
3 |
|
|
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998,
|
4 |
|
|
1999, 2000, 2001, 2003, 2004 Free Software Foundation, Inc.
|
5 |
|
|
|
6 |
|
|
This file is part of GCC.
|
7 |
|
|
|
8 |
|
|
GCC is free software; you can redistribute it and/or modify it under
|
9 |
|
|
the terms of the GNU General Public License as published by the Free
|
10 |
|
|
Software Foundation; either version 2, or (at your option) any later
|
11 |
|
|
version.
|
12 |
|
|
|
13 |
|
|
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
14 |
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
15 |
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
16 |
|
|
for more details.
|
17 |
|
|
|
18 |
|
|
You should have received a copy of the GNU General Public License
|
19 |
|
|
along with GCC; see the file COPYING. If not, write to the Free
|
20 |
|
|
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
|
21 |
|
|
02110-1301, USA. */
|
22 |
|
|
|
23 |
|
|
#ifndef GCC_SCHED_INT_H
|
24 |
|
|
#define GCC_SCHED_INT_H
|
25 |
|
|
|
26 |
|
|
/* For state_t. */
|
27 |
|
|
#include "insn-attr.h"
|
28 |
|
|
/* For regset_head. */
|
29 |
|
|
#include "basic-block.h"
|
30 |
|
|
/* For reg_note. */
|
31 |
|
|
#include "rtl.h"
|
32 |
|
|
|
33 |
|
|
/* Pointer to data describing the current DFA state. */
|
34 |
|
|
extern state_t curr_state;
|
35 |
|
|
|
36 |
|
|
/* Forward declaration. */
|
37 |
|
|
struct ready_list;
|
38 |
|
|
|
39 |
|
|
/* Describe state of dependencies used during sched_analyze phase. */
|
40 |
|
|
struct deps
|
41 |
|
|
{
|
42 |
|
|
/* The *_insns and *_mems are paired lists. Each pending memory operation
|
43 |
|
|
will have a pointer to the MEM rtx on one list and a pointer to the
|
44 |
|
|
containing insn on the other list in the same place in the list. */
|
45 |
|
|
|
46 |
|
|
/* We can't use add_dependence like the old code did, because a single insn
|
47 |
|
|
may have multiple memory accesses, and hence needs to be on the list
|
48 |
|
|
once for each memory access. Add_dependence won't let you add an insn
|
49 |
|
|
to a list more than once. */
|
50 |
|
|
|
51 |
|
|
/* An INSN_LIST containing all insns with pending read operations. */
|
52 |
|
|
rtx pending_read_insns;
|
53 |
|
|
|
54 |
|
|
/* An EXPR_LIST containing all MEM rtx's which are pending reads. */
|
55 |
|
|
rtx pending_read_mems;
|
56 |
|
|
|
57 |
|
|
/* An INSN_LIST containing all insns with pending write operations. */
|
58 |
|
|
rtx pending_write_insns;
|
59 |
|
|
|
60 |
|
|
/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
|
61 |
|
|
rtx pending_write_mems;
|
62 |
|
|
|
63 |
|
|
/* Indicates the combined length of the two pending lists. We must prevent
|
64 |
|
|
these lists from ever growing too large since the number of dependencies
|
65 |
|
|
produced is at least O(N*N), and execution time is at least O(4*N*N), as
|
66 |
|
|
a function of the length of these pending lists. */
|
67 |
|
|
int pending_lists_length;
|
68 |
|
|
|
69 |
|
|
/* Length of the pending memory flush list. Large functions with no
|
70 |
|
|
calls may build up extremely large lists. */
|
71 |
|
|
int pending_flush_length;
|
72 |
|
|
|
73 |
|
|
/* The last insn upon which all memory references must depend.
|
74 |
|
|
This is an insn which flushed the pending lists, creating a dependency
|
75 |
|
|
between it and all previously pending memory references. This creates
|
76 |
|
|
a barrier (or a checkpoint) which no memory reference is allowed to cross.
|
77 |
|
|
|
78 |
|
|
This includes all non constant CALL_INSNs. When we do interprocedural
|
79 |
|
|
alias analysis, this restriction can be relaxed.
|
80 |
|
|
This may also be an INSN that writes memory if the pending lists grow
|
81 |
|
|
too large. */
|
82 |
|
|
rtx last_pending_memory_flush;
|
83 |
|
|
|
84 |
|
|
/* A list of the last function calls we have seen. We use a list to
|
85 |
|
|
represent last function calls from multiple predecessor blocks.
|
86 |
|
|
Used to prevent register lifetimes from expanding unnecessarily. */
|
87 |
|
|
rtx last_function_call;
|
88 |
|
|
|
89 |
|
|
/* A list of insns which use a pseudo register that does not already
|
90 |
|
|
cross a call. We create dependencies between each of those insn
|
91 |
|
|
and the next call insn, to ensure that they won't cross a call after
|
92 |
|
|
scheduling is done. */
|
93 |
|
|
rtx sched_before_next_call;
|
94 |
|
|
|
95 |
|
|
/* Used to keep post-call pseudo/hard reg movements together with
|
96 |
|
|
the call. */
|
97 |
|
|
enum { not_post_call, post_call, post_call_initial } in_post_call_group_p;
|
98 |
|
|
|
99 |
|
|
/* Set to the tail insn of the outermost libcall block.
|
100 |
|
|
|
101 |
|
|
When nonzero, we will mark each insn processed by sched_analyze_insn
|
102 |
|
|
with SCHED_GROUP_P to ensure libcalls are scheduled as a unit. */
|
103 |
|
|
rtx libcall_block_tail_insn;
|
104 |
|
|
|
105 |
|
|
/* The maximum register number for the following arrays. Before reload
|
106 |
|
|
this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
|
107 |
|
|
int max_reg;
|
108 |
|
|
|
109 |
|
|
/* Element N is the next insn that sets (hard or pseudo) register
|
110 |
|
|
N within the current basic block; or zero, if there is no
|
111 |
|
|
such insn. Needed for new registers which may be introduced
|
112 |
|
|
by splitting insns. */
|
113 |
|
|
struct deps_reg
|
114 |
|
|
{
|
115 |
|
|
rtx uses;
|
116 |
|
|
rtx sets;
|
117 |
|
|
rtx clobbers;
|
118 |
|
|
int uses_length;
|
119 |
|
|
int clobbers_length;
|
120 |
|
|
} *reg_last;
|
121 |
|
|
|
122 |
|
|
/* Element N is set for each register that has any nonzero element
|
123 |
|
|
in reg_last[N].{uses,sets,clobbers}. */
|
124 |
|
|
regset_head reg_last_in_use;
|
125 |
|
|
|
126 |
|
|
/* Element N is set for each register that is conditionally set. */
|
127 |
|
|
regset_head reg_conditional_sets;
|
128 |
|
|
};
|
129 |
|
|
|
130 |
|
|
/* This structure holds some state of the current scheduling pass, and
|
131 |
|
|
contains some function pointers that abstract out some of the non-generic
|
132 |
|
|
functionality from functions such as schedule_block or schedule_insn.
|
133 |
|
|
There is one global variable, current_sched_info, which points to the
|
134 |
|
|
sched_info structure currently in use. */
|
135 |
|
|
struct sched_info
|
136 |
|
|
{
|
137 |
|
|
/* Add all insns that are initially ready to the ready list. Called once
|
138 |
|
|
before scheduling a set of insns. */
|
139 |
|
|
void (*init_ready_list) (struct ready_list *);
|
140 |
|
|
/* Called after taking an insn from the ready list. Returns nonzero if
|
141 |
|
|
this insn can be scheduled, nonzero if we should silently discard it. */
|
142 |
|
|
int (*can_schedule_ready_p) (rtx);
|
143 |
|
|
/* Return nonzero if there are more insns that should be scheduled. */
|
144 |
|
|
int (*schedule_more_p) (void);
|
145 |
|
|
/* Called after an insn has all its dependencies resolved. Return nonzero
|
146 |
|
|
if it should be moved to the ready list or the queue, or zero if we
|
147 |
|
|
should silently discard it. */
|
148 |
|
|
int (*new_ready) (rtx);
|
149 |
|
|
/* Compare priority of two insns. Return a positive number if the second
|
150 |
|
|
insn is to be preferred for scheduling, and a negative one if the first
|
151 |
|
|
is to be preferred. Zero if they are equally good. */
|
152 |
|
|
int (*rank) (rtx, rtx);
|
153 |
|
|
/* Return a string that contains the insn uid and optionally anything else
|
154 |
|
|
necessary to identify this insn in an output. It's valid to use a
|
155 |
|
|
static buffer for this. The ALIGNED parameter should cause the string
|
156 |
|
|
to be formatted so that multiple output lines will line up nicely. */
|
157 |
|
|
const char *(*print_insn) (rtx, int);
|
158 |
|
|
/* Return nonzero if an insn should be included in priority
|
159 |
|
|
calculations. */
|
160 |
|
|
int (*contributes_to_priority) (rtx, rtx);
|
161 |
|
|
/* Called when computing dependencies for a JUMP_INSN. This function
|
162 |
|
|
should store the set of registers that must be considered as set by
|
163 |
|
|
the jump in the regset. */
|
164 |
|
|
void (*compute_jump_reg_dependencies) (rtx, regset, regset, regset);
|
165 |
|
|
|
166 |
|
|
/* The boundaries of the set of insns to be scheduled. */
|
167 |
|
|
rtx prev_head, next_tail;
|
168 |
|
|
|
169 |
|
|
/* Filled in after the schedule is finished; the first and last scheduled
|
170 |
|
|
insns. */
|
171 |
|
|
rtx head, tail;
|
172 |
|
|
|
173 |
|
|
/* If nonzero, enables an additional sanity check in schedule_block. */
|
174 |
|
|
unsigned int queue_must_finish_empty:1;
|
175 |
|
|
/* Nonzero if we should use cselib for better alias analysis. This
|
176 |
|
|
must be 0 if the dependency information is used after sched_analyze
|
177 |
|
|
has completed, e.g. if we're using it to initialize state for successor
|
178 |
|
|
blocks in region scheduling. */
|
179 |
|
|
unsigned int use_cselib:1;
|
180 |
|
|
|
181 |
|
|
/* Maximum priority that has been assigned to an insn. */
|
182 |
|
|
int sched_max_insns_priority;
|
183 |
|
|
};
|
184 |
|
|
|
185 |
|
|
extern struct sched_info *current_sched_info;
|
186 |
|
|
|
187 |
|
|
/* Indexed by INSN_UID, the collection of all data associated with
|
188 |
|
|
a single instruction. */
|
189 |
|
|
|
190 |
|
|
struct haifa_insn_data
|
191 |
|
|
{
|
192 |
|
|
/* A list of insns which depend on the instruction. Unlike LOG_LINKS,
|
193 |
|
|
it represents forward dependencies. */
|
194 |
|
|
rtx depend;
|
195 |
|
|
|
196 |
|
|
/* The line number note in effect for each insn. For line number
|
197 |
|
|
notes, this indicates whether the note may be reused. */
|
198 |
|
|
rtx line_note;
|
199 |
|
|
|
200 |
|
|
/* Logical uid gives the original ordering of the insns. */
|
201 |
|
|
int luid;
|
202 |
|
|
|
203 |
|
|
/* A priority for each insn. */
|
204 |
|
|
int priority;
|
205 |
|
|
|
206 |
|
|
/* The number of incoming edges in the forward dependency graph.
|
207 |
|
|
As scheduling proceeds, counts are decreased. An insn moves to
|
208 |
|
|
the ready queue when its counter reaches zero. */
|
209 |
|
|
int dep_count;
|
210 |
|
|
|
211 |
|
|
/* Number of instructions referring to this insn. */
|
212 |
|
|
int ref_count;
|
213 |
|
|
|
214 |
|
|
/* The minimum clock tick at which the insn becomes ready. This is
|
215 |
|
|
used to note timing constraints for the insns in the pending list. */
|
216 |
|
|
int tick;
|
217 |
|
|
|
218 |
|
|
short cost;
|
219 |
|
|
|
220 |
|
|
/* This weight is an estimation of the insn's contribution to
|
221 |
|
|
register pressure. */
|
222 |
|
|
short reg_weight;
|
223 |
|
|
|
224 |
|
|
/* Some insns (e.g. call) are not allowed to move across blocks. */
|
225 |
|
|
unsigned int cant_move : 1;
|
226 |
|
|
|
227 |
|
|
/* Set if there's DEF-USE dependence between some speculatively
|
228 |
|
|
moved load insn and this one. */
|
229 |
|
|
unsigned int fed_by_spec_load : 1;
|
230 |
|
|
unsigned int is_load_insn : 1;
|
231 |
|
|
|
232 |
|
|
/* Nonzero if priority has been computed already. */
|
233 |
|
|
unsigned int priority_known : 1;
|
234 |
|
|
};
|
235 |
|
|
|
236 |
|
|
extern struct haifa_insn_data *h_i_d;
|
237 |
|
|
|
238 |
|
|
/* Accessor macros for h_i_d. There are more in haifa-sched.c and
|
239 |
|
|
sched-rgn.c. */
|
240 |
|
|
#define INSN_DEPEND(INSN) (h_i_d[INSN_UID (INSN)].depend)
|
241 |
|
|
#define INSN_LUID(INSN) (h_i_d[INSN_UID (INSN)].luid)
|
242 |
|
|
#define CANT_MOVE(insn) (h_i_d[INSN_UID (insn)].cant_move)
|
243 |
|
|
#define INSN_DEP_COUNT(INSN) (h_i_d[INSN_UID (INSN)].dep_count)
|
244 |
|
|
#define INSN_PRIORITY(INSN) (h_i_d[INSN_UID (INSN)].priority)
|
245 |
|
|
#define INSN_PRIORITY_KNOWN(INSN) (h_i_d[INSN_UID (INSN)].priority_known)
|
246 |
|
|
#define INSN_COST(INSN) (h_i_d[INSN_UID (INSN)].cost)
|
247 |
|
|
#define INSN_REG_WEIGHT(INSN) (h_i_d[INSN_UID (INSN)].reg_weight)
|
248 |
|
|
|
249 |
|
|
extern FILE *sched_dump;
|
250 |
|
|
extern int sched_verbose;
|
251 |
|
|
|
252 |
|
|
/* Exception Free Loads:
|
253 |
|
|
|
254 |
|
|
We define five classes of speculative loads: IFREE, IRISKY,
|
255 |
|
|
PFREE, PRISKY, and MFREE.
|
256 |
|
|
|
257 |
|
|
IFREE loads are loads that are proved to be exception-free, just
|
258 |
|
|
by examining the load insn. Examples for such loads are loads
|
259 |
|
|
from TOC and loads of global data.
|
260 |
|
|
|
261 |
|
|
IRISKY loads are loads that are proved to be exception-risky,
|
262 |
|
|
just by examining the load insn. Examples for such loads are
|
263 |
|
|
volatile loads and loads from shared memory.
|
264 |
|
|
|
265 |
|
|
PFREE loads are loads for which we can prove, by examining other
|
266 |
|
|
insns, that they are exception-free. Currently, this class consists
|
267 |
|
|
of loads for which we are able to find a "similar load", either in
|
268 |
|
|
the target block, or, if only one split-block exists, in that split
|
269 |
|
|
block. Load2 is similar to load1 if both have same single base
|
270 |
|
|
register. We identify only part of the similar loads, by finding
|
271 |
|
|
an insn upon which both load1 and load2 have a DEF-USE dependence.
|
272 |
|
|
|
273 |
|
|
PRISKY loads are loads for which we can prove, by examining other
|
274 |
|
|
insns, that they are exception-risky. Currently we have two proofs for
|
275 |
|
|
such loads. The first proof detects loads that are probably guarded by a
|
276 |
|
|
test on the memory address. This proof is based on the
|
277 |
|
|
backward and forward data dependence information for the region.
|
278 |
|
|
Let load-insn be the examined load.
|
279 |
|
|
Load-insn is PRISKY iff ALL the following hold:
|
280 |
|
|
|
281 |
|
|
- insn1 is not in the same block as load-insn
|
282 |
|
|
- there is a DEF-USE dependence chain (insn1, ..., load-insn)
|
283 |
|
|
- test-insn is either a compare or a branch, not in the same block
|
284 |
|
|
as load-insn
|
285 |
|
|
- load-insn is reachable from test-insn
|
286 |
|
|
- there is a DEF-USE dependence chain (insn1, ..., test-insn)
|
287 |
|
|
|
288 |
|
|
This proof might fail when the compare and the load are fed
|
289 |
|
|
by an insn not in the region. To solve this, we will add to this
|
290 |
|
|
group all loads that have no input DEF-USE dependence.
|
291 |
|
|
|
292 |
|
|
The second proof detects loads that are directly or indirectly
|
293 |
|
|
fed by a speculative load. This proof is affected by the
|
294 |
|
|
scheduling process. We will use the flag fed_by_spec_load.
|
295 |
|
|
Initially, all insns have this flag reset. After a speculative
|
296 |
|
|
motion of an insn, if insn is either a load, or marked as
|
297 |
|
|
fed_by_spec_load, we will also mark as fed_by_spec_load every
|
298 |
|
|
insn1 for which a DEF-USE dependence (insn, insn1) exists. A
|
299 |
|
|
load which is fed_by_spec_load is also PRISKY.
|
300 |
|
|
|
301 |
|
|
MFREE (maybe-free) loads are all the remaining loads. They may be
|
302 |
|
|
exception-free, but we cannot prove it.
|
303 |
|
|
|
304 |
|
|
Now, all loads in IFREE and PFREE classes are considered
|
305 |
|
|
exception-free, while all loads in IRISKY and PRISKY classes are
|
306 |
|
|
considered exception-risky. As for loads in the MFREE class,
|
307 |
|
|
these are considered either exception-free or exception-risky,
|
308 |
|
|
depending on whether we are pessimistic or optimistic. We have
|
309 |
|
|
to take the pessimistic approach to assure the safety of
|
310 |
|
|
speculative scheduling, but we can take the optimistic approach
|
311 |
|
|
by invoking the -fsched_spec_load_dangerous option. */
|
312 |
|
|
|
313 |
|
|
enum INSN_TRAP_CLASS
|
314 |
|
|
{
|
315 |
|
|
TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
|
316 |
|
|
PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
|
317 |
|
|
};
|
318 |
|
|
|
319 |
|
|
#define WORST_CLASS(class1, class2) \
|
320 |
|
|
((class1 > class2) ? class1 : class2)
|
321 |
|
|
|
322 |
|
|
#ifndef __GNUC__
|
323 |
|
|
#define __inline
|
324 |
|
|
#endif
|
325 |
|
|
|
326 |
|
|
#ifndef HAIFA_INLINE
|
327 |
|
|
#define HAIFA_INLINE __inline
|
328 |
|
|
#endif
|
329 |
|
|
|
330 |
|
|
/* Functions in sched-vis.c. */
|
331 |
|
|
extern void print_insn (char *, rtx, int);
|
332 |
|
|
|
333 |
|
|
/* Functions in sched-deps.c. */
|
334 |
|
|
extern bool sched_insns_conditions_mutex_p (rtx, rtx);
|
335 |
|
|
extern int add_dependence (rtx, rtx, enum reg_note);
|
336 |
|
|
extern void sched_analyze (struct deps *, rtx, rtx);
|
337 |
|
|
extern void init_deps (struct deps *);
|
338 |
|
|
extern void free_deps (struct deps *);
|
339 |
|
|
extern void init_deps_global (void);
|
340 |
|
|
extern void finish_deps_global (void);
|
341 |
|
|
extern void add_forward_dependence (rtx, rtx, enum reg_note);
|
342 |
|
|
extern void compute_forward_dependences (rtx, rtx);
|
343 |
|
|
extern rtx find_insn_list (rtx, rtx);
|
344 |
|
|
extern void init_dependency_caches (int);
|
345 |
|
|
extern void free_dependency_caches (void);
|
346 |
|
|
|
347 |
|
|
/* Functions in haifa-sched.c. */
|
348 |
|
|
extern int haifa_classify_insn (rtx);
|
349 |
|
|
extern void get_block_head_tail (int, rtx *, rtx *);
|
350 |
|
|
extern int no_real_insns_p (rtx, rtx);
|
351 |
|
|
|
352 |
|
|
extern void rm_line_notes (rtx, rtx);
|
353 |
|
|
extern void save_line_notes (int, rtx, rtx);
|
354 |
|
|
extern void restore_line_notes (rtx, rtx);
|
355 |
|
|
extern void rm_redundant_line_notes (void);
|
356 |
|
|
extern void rm_other_notes (rtx, rtx);
|
357 |
|
|
|
358 |
|
|
extern int insn_cost (rtx, rtx, rtx);
|
359 |
|
|
extern int set_priorities (rtx, rtx);
|
360 |
|
|
|
361 |
|
|
extern void schedule_block (int, int);
|
362 |
|
|
extern void sched_init (FILE *);
|
363 |
|
|
extern void sched_finish (void);
|
364 |
|
|
|
365 |
|
|
extern void ready_add (struct ready_list *, rtx);
|
366 |
|
|
|
367 |
|
|
#endif /* GCC_SCHED_INT_H */
|