| 1 |
684 |
jeremybenn |
/* Instruction scheduling pass. This file contains definitions used
|
| 2 |
|
|
internally in the scheduler.
|
| 3 |
|
|
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
|
| 4 |
|
|
2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
|
| 5 |
|
|
Free Software Foundation, Inc.
|
| 6 |
|
|
|
| 7 |
|
|
This file is part of GCC.
|
| 8 |
|
|
|
| 9 |
|
|
GCC is free software; you can redistribute it and/or modify it under
|
| 10 |
|
|
the terms of the GNU General Public License as published by the Free
|
| 11 |
|
|
Software Foundation; either version 3, or (at your option) any later
|
| 12 |
|
|
version.
|
| 13 |
|
|
|
| 14 |
|
|
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
| 15 |
|
|
WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
| 16 |
|
|
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
| 17 |
|
|
for more details.
|
| 18 |
|
|
|
| 19 |
|
|
You should have received a copy of the GNU General Public License
|
| 20 |
|
|
along with GCC; see the file COPYING3. If not see
|
| 21 |
|
|
<http://www.gnu.org/licenses/>. */
|
| 22 |
|
|
|
| 23 |
|
|
#ifndef GCC_SCHED_INT_H
|
| 24 |
|
|
#define GCC_SCHED_INT_H
|
| 25 |
|
|
|
| 26 |
|
|
#ifdef INSN_SCHEDULING
|
| 27 |
|
|
|
| 28 |
|
|
/* For state_t. */
|
| 29 |
|
|
#include "insn-attr.h"
|
| 30 |
|
|
#include "df.h"
|
| 31 |
|
|
#include "basic-block.h"
|
| 32 |
|
|
|
| 33 |
|
|
/* For VEC (int, heap). */
|
| 34 |
|
|
#include "vecprim.h"
|
| 35 |
|
|
|
| 36 |
|
|
/* Identificator of a scheduler pass. */
|
| 37 |
|
|
enum sched_pass_id_t { SCHED_PASS_UNKNOWN, SCHED_RGN_PASS, SCHED_EBB_PASS,
|
| 38 |
|
|
SCHED_SMS_PASS, SCHED_SEL_PASS };
|
| 39 |
|
|
|
| 40 |
|
|
typedef VEC (basic_block, heap) *bb_vec_t;
|
| 41 |
|
|
typedef VEC (rtx, heap) *insn_vec_t;
|
| 42 |
|
|
typedef VEC (rtx, heap) *rtx_vec_t;
|
| 43 |
|
|
|
| 44 |
|
|
extern void sched_init_bbs (void);
|
| 45 |
|
|
|
| 46 |
|
|
extern void sched_extend_luids (void);
|
| 47 |
|
|
extern void sched_init_insn_luid (rtx);
|
| 48 |
|
|
extern void sched_init_luids (bb_vec_t);
|
| 49 |
|
|
extern void sched_finish_luids (void);
|
| 50 |
|
|
|
| 51 |
|
|
extern void sched_extend_target (void);
|
| 52 |
|
|
|
| 53 |
|
|
extern void haifa_init_h_i_d (bb_vec_t);
|
| 54 |
|
|
extern void haifa_finish_h_i_d (void);
|
| 55 |
|
|
|
| 56 |
|
|
/* Hooks that are common to all the schedulers. */
|
| 57 |
|
|
struct common_sched_info_def
|
| 58 |
|
|
{
|
| 59 |
|
|
/* Called after blocks were rearranged due to movement of jump instruction.
|
| 60 |
|
|
The first parameter - index of basic block, in which jump currently is.
|
| 61 |
|
|
The second parameter - index of basic block, in which jump used
|
| 62 |
|
|
to be.
|
| 63 |
|
|
The third parameter - index of basic block, that follows the second
|
| 64 |
|
|
parameter. */
|
| 65 |
|
|
void (*fix_recovery_cfg) (int, int, int);
|
| 66 |
|
|
|
| 67 |
|
|
/* Called to notify frontend, that new basic block is being added.
|
| 68 |
|
|
The first parameter - new basic block.
|
| 69 |
|
|
The second parameter - block, after which new basic block is being added,
|
| 70 |
|
|
or EXIT_BLOCK_PTR, if recovery block is being added,
|
| 71 |
|
|
or NULL, if standalone block is being added. */
|
| 72 |
|
|
void (*add_block) (basic_block, basic_block);
|
| 73 |
|
|
|
| 74 |
|
|
/* Estimate number of insns in the basic block. */
|
| 75 |
|
|
int (*estimate_number_of_insns) (basic_block);
|
| 76 |
|
|
|
| 77 |
|
|
/* Given a non-insn (!INSN_P (x)) return
|
| 78 |
|
|
-1 - if this rtx don't need a luid.
|
| 79 |
|
|
|
| 80 |
|
|
1 - if it needs a separate luid. */
|
| 81 |
|
|
int (*luid_for_non_insn) (rtx);
|
| 82 |
|
|
|
| 83 |
|
|
/* Scheduler pass identifier. It is preferably used in assertions. */
|
| 84 |
|
|
enum sched_pass_id_t sched_pass_id;
|
| 85 |
|
|
};
|
| 86 |
|
|
|
| 87 |
|
|
extern struct common_sched_info_def *common_sched_info;
|
| 88 |
|
|
|
| 89 |
|
|
extern const struct common_sched_info_def haifa_common_sched_info;
|
| 90 |
|
|
|
| 91 |
|
|
/* Return true if selective scheduling pass is working. */
|
| 92 |
|
|
static inline bool
|
| 93 |
|
|
sel_sched_p (void)
|
| 94 |
|
|
{
|
| 95 |
|
|
return common_sched_info->sched_pass_id == SCHED_SEL_PASS;
|
| 96 |
|
|
}
|
| 97 |
|
|
|
| 98 |
|
|
/* Returns maximum priority that an insn was assigned to. */
|
| 99 |
|
|
extern int get_rgn_sched_max_insns_priority (void);
|
| 100 |
|
|
|
| 101 |
|
|
/* Increases effective priority for INSN by AMOUNT. */
|
| 102 |
|
|
extern void sel_add_to_insn_priority (rtx, int);
|
| 103 |
|
|
|
| 104 |
|
|
/* True if during selective scheduling we need to emulate some of haifa
|
| 105 |
|
|
scheduler behaviour. */
|
| 106 |
|
|
extern int sched_emulate_haifa_p;
|
| 107 |
|
|
|
| 108 |
|
|
/* Mapping from INSN_UID to INSN_LUID. In the end all other per insn data
|
| 109 |
|
|
structures should be indexed by luid. */
|
| 110 |
|
|
extern VEC (int, heap) *sched_luids;
|
| 111 |
|
|
#define INSN_LUID(INSN) (VEC_index (int, sched_luids, INSN_UID (INSN)))
|
| 112 |
|
|
#define LUID_BY_UID(UID) (VEC_index (int, sched_luids, UID))
|
| 113 |
|
|
|
| 114 |
|
|
#define SET_INSN_LUID(INSN, LUID) \
|
| 115 |
|
|
(VEC_replace (int, sched_luids, INSN_UID (INSN), (LUID)))
|
| 116 |
|
|
|
| 117 |
|
|
/* The highest INSN_LUID. */
|
| 118 |
|
|
extern int sched_max_luid;
|
| 119 |
|
|
|
| 120 |
|
|
extern int insn_luid (rtx);
|
| 121 |
|
|
|
| 122 |
|
|
/* This list holds ripped off notes from the current block. These notes will
|
| 123 |
|
|
be attached to the beginning of the block when its scheduling is
|
| 124 |
|
|
finished. */
|
| 125 |
|
|
extern rtx note_list;
|
| 126 |
|
|
|
| 127 |
|
|
extern void remove_notes (rtx, rtx);
|
| 128 |
|
|
extern rtx restore_other_notes (rtx, basic_block);
|
| 129 |
|
|
extern void sched_insns_init (rtx);
|
| 130 |
|
|
extern void sched_insns_finish (void);
|
| 131 |
|
|
|
| 132 |
|
|
extern void *xrecalloc (void *, size_t, size_t, size_t);
|
| 133 |
|
|
|
| 134 |
|
|
extern void reemit_notes (rtx);
|
| 135 |
|
|
|
| 136 |
|
|
/* Functions in haifa-sched.c. */
|
| 137 |
|
|
extern int haifa_classify_insn (const_rtx);
|
| 138 |
|
|
|
| 139 |
|
|
/* Functions in sel-sched-ir.c. */
|
| 140 |
|
|
extern void sel_find_rgns (void);
|
| 141 |
|
|
extern void sel_mark_hard_insn (rtx);
|
| 142 |
|
|
|
| 143 |
|
|
extern size_t dfa_state_size;
|
| 144 |
|
|
|
| 145 |
|
|
extern void advance_state (state_t);
|
| 146 |
|
|
|
| 147 |
|
|
extern void setup_sched_dump (void);
|
| 148 |
|
|
extern void sched_init (void);
|
| 149 |
|
|
extern void sched_finish (void);
|
| 150 |
|
|
|
| 151 |
|
|
extern bool sel_insn_is_speculation_check (rtx);
|
| 152 |
|
|
|
| 153 |
|
|
/* Describe the ready list of the scheduler.
|
| 154 |
|
|
VEC holds space enough for all insns in the current region. VECLEN
|
| 155 |
|
|
says how many exactly.
|
| 156 |
|
|
FIRST is the index of the element with the highest priority; i.e. the
|
| 157 |
|
|
last one in the ready list, since elements are ordered by ascending
|
| 158 |
|
|
priority.
|
| 159 |
|
|
N_READY determines how many insns are on the ready list.
|
| 160 |
|
|
N_DEBUG determines how many debug insns are on the ready list. */
|
| 161 |
|
|
struct ready_list
|
| 162 |
|
|
{
|
| 163 |
|
|
rtx *vec;
|
| 164 |
|
|
int veclen;
|
| 165 |
|
|
int first;
|
| 166 |
|
|
int n_ready;
|
| 167 |
|
|
int n_debug;
|
| 168 |
|
|
};
|
| 169 |
|
|
|
| 170 |
|
|
extern char *ready_try;
|
| 171 |
|
|
extern struct ready_list ready;
|
| 172 |
|
|
|
| 173 |
|
|
extern int max_issue (struct ready_list *, int, state_t, bool, int *);
|
| 174 |
|
|
|
| 175 |
|
|
extern void ebb_compute_jump_reg_dependencies (rtx, regset);
|
| 176 |
|
|
|
| 177 |
|
|
extern edge find_fallthru_edge_from (basic_block);
|
| 178 |
|
|
|
| 179 |
|
|
extern void (* sched_init_only_bb) (basic_block, basic_block);
|
| 180 |
|
|
extern basic_block (* sched_split_block) (basic_block, rtx);
|
| 181 |
|
|
extern basic_block sched_split_block_1 (basic_block, rtx);
|
| 182 |
|
|
extern basic_block (* sched_create_empty_bb) (basic_block);
|
| 183 |
|
|
extern basic_block sched_create_empty_bb_1 (basic_block);
|
| 184 |
|
|
|
| 185 |
|
|
extern basic_block sched_create_recovery_block (basic_block *);
|
| 186 |
|
|
extern void sched_create_recovery_edges (basic_block, basic_block,
|
| 187 |
|
|
basic_block);
|
| 188 |
|
|
|
| 189 |
|
|
/* Pointer to data describing the current DFA state. */
|
| 190 |
|
|
extern state_t curr_state;
|
| 191 |
|
|
|
| 192 |
|
|
/* Type to represent status of a dependence. */
|
| 193 |
|
|
typedef int ds_t;
|
| 194 |
|
|
|
| 195 |
|
|
/* Type to represent weakness of speculative dependence. */
|
| 196 |
|
|
typedef int dw_t;
|
| 197 |
|
|
|
| 198 |
|
|
extern enum reg_note ds_to_dk (ds_t);
|
| 199 |
|
|
extern ds_t dk_to_ds (enum reg_note);
|
| 200 |
|
|
|
| 201 |
|
|
/* Information about the dependency. */
|
| 202 |
|
|
struct _dep
|
| 203 |
|
|
{
|
| 204 |
|
|
/* Producer. */
|
| 205 |
|
|
rtx pro;
|
| 206 |
|
|
|
| 207 |
|
|
/* Consumer. */
|
| 208 |
|
|
rtx con;
|
| 209 |
|
|
|
| 210 |
|
|
/* Dependency major type. This field is superseded by STATUS below.
|
| 211 |
|
|
Though, it is still in place because some targets use it. */
|
| 212 |
|
|
enum reg_note type;
|
| 213 |
|
|
|
| 214 |
|
|
/* Dependency status. This field holds all dependency types and additional
|
| 215 |
|
|
information for speculative dependencies. */
|
| 216 |
|
|
ds_t status;
|
| 217 |
|
|
|
| 218 |
|
|
/* Cached cost of the dependency. */
|
| 219 |
|
|
int cost;
|
| 220 |
|
|
};
|
| 221 |
|
|
|
| 222 |
|
|
typedef struct _dep dep_def;
|
| 223 |
|
|
typedef dep_def *dep_t;
|
| 224 |
|
|
|
| 225 |
|
|
#define DEP_PRO(D) ((D)->pro)
|
| 226 |
|
|
#define DEP_CON(D) ((D)->con)
|
| 227 |
|
|
#define DEP_TYPE(D) ((D)->type)
|
| 228 |
|
|
#define DEP_STATUS(D) ((D)->status)
|
| 229 |
|
|
#define DEP_COST(D) ((D)->cost)
|
| 230 |
|
|
|
| 231 |
|
|
#define UNKNOWN_DEP_COST INT_MIN
|
| 232 |
|
|
|
| 233 |
|
|
/* Functions to work with dep. */
|
| 234 |
|
|
|
| 235 |
|
|
extern void init_dep_1 (dep_t, rtx, rtx, enum reg_note, ds_t);
|
| 236 |
|
|
extern void init_dep (dep_t, rtx, rtx, enum reg_note);
|
| 237 |
|
|
|
| 238 |
|
|
extern void sd_debug_dep (dep_t);
|
| 239 |
|
|
|
| 240 |
|
|
/* Definition of this struct resides below. */
|
| 241 |
|
|
struct _dep_node;
|
| 242 |
|
|
typedef struct _dep_node *dep_node_t;
|
| 243 |
|
|
|
| 244 |
|
|
/* A link in the dependency list. This is essentially an equivalent of a
|
| 245 |
|
|
single {INSN, DEPS}_LIST rtx. */
|
| 246 |
|
|
struct _dep_link
|
| 247 |
|
|
{
|
| 248 |
|
|
/* Dep node with all the data. */
|
| 249 |
|
|
dep_node_t node;
|
| 250 |
|
|
|
| 251 |
|
|
/* Next link in the list. For the last one it is NULL. */
|
| 252 |
|
|
struct _dep_link *next;
|
| 253 |
|
|
|
| 254 |
|
|
/* Pointer to the next field of the previous link in the list.
|
| 255 |
|
|
For the first link this points to the deps_list->first.
|
| 256 |
|
|
|
| 257 |
|
|
With help of this field it is easy to remove and insert links to the
|
| 258 |
|
|
list. */
|
| 259 |
|
|
struct _dep_link **prev_nextp;
|
| 260 |
|
|
};
|
| 261 |
|
|
typedef struct _dep_link *dep_link_t;
|
| 262 |
|
|
|
| 263 |
|
|
#define DEP_LINK_NODE(N) ((N)->node)
|
| 264 |
|
|
#define DEP_LINK_NEXT(N) ((N)->next)
|
| 265 |
|
|
#define DEP_LINK_PREV_NEXTP(N) ((N)->prev_nextp)
|
| 266 |
|
|
|
| 267 |
|
|
/* Macros to work dep_link. For most usecases only part of the dependency
|
| 268 |
|
|
information is need. These macros conveniently provide that piece of
|
| 269 |
|
|
information. */
|
| 270 |
|
|
|
| 271 |
|
|
#define DEP_LINK_DEP(N) (DEP_NODE_DEP (DEP_LINK_NODE (N)))
|
| 272 |
|
|
#define DEP_LINK_PRO(N) (DEP_PRO (DEP_LINK_DEP (N)))
|
| 273 |
|
|
#define DEP_LINK_CON(N) (DEP_CON (DEP_LINK_DEP (N)))
|
| 274 |
|
|
#define DEP_LINK_TYPE(N) (DEP_TYPE (DEP_LINK_DEP (N)))
|
| 275 |
|
|
#define DEP_LINK_STATUS(N) (DEP_STATUS (DEP_LINK_DEP (N)))
|
| 276 |
|
|
|
| 277 |
|
|
/* A list of dep_links. */
|
| 278 |
|
|
struct _deps_list
|
| 279 |
|
|
{
|
| 280 |
|
|
/* First element. */
|
| 281 |
|
|
dep_link_t first;
|
| 282 |
|
|
|
| 283 |
|
|
/* Total number of elements in the list. */
|
| 284 |
|
|
int n_links;
|
| 285 |
|
|
};
|
| 286 |
|
|
typedef struct _deps_list *deps_list_t;
|
| 287 |
|
|
|
| 288 |
|
|
#define DEPS_LIST_FIRST(L) ((L)->first)
|
| 289 |
|
|
#define DEPS_LIST_N_LINKS(L) ((L)->n_links)
|
| 290 |
|
|
|
| 291 |
|
|
/* Suppose we have a dependence Y between insn pro1 and con1, where pro1 has
|
| 292 |
|
|
additional dependents con0 and con2, and con1 is dependent on additional
|
| 293 |
|
|
insns pro0 and pro1:
|
| 294 |
|
|
|
| 295 |
|
|
.con0 pro0
|
| 296 |
|
|
. ^ |
|
| 297 |
|
|
. | |
|
| 298 |
|
|
. | |
|
| 299 |
|
|
. X A
|
| 300 |
|
|
. | |
|
| 301 |
|
|
. | |
|
| 302 |
|
|
. | V
|
| 303 |
|
|
.pro1--Y-->con1
|
| 304 |
|
|
. | ^
|
| 305 |
|
|
. | |
|
| 306 |
|
|
. | |
|
| 307 |
|
|
. Z B
|
| 308 |
|
|
. | |
|
| 309 |
|
|
. | |
|
| 310 |
|
|
. V |
|
| 311 |
|
|
.con2 pro2
|
| 312 |
|
|
|
| 313 |
|
|
This is represented using a "dep_node" for each dependence arc, which are
|
| 314 |
|
|
connected as follows (diagram is centered around Y which is fully shown;
|
| 315 |
|
|
other dep_nodes shown partially):
|
| 316 |
|
|
|
| 317 |
|
|
. +------------+ +--------------+ +------------+
|
| 318 |
|
|
. : dep_node X : | dep_node Y | : dep_node Z :
|
| 319 |
|
|
. : : | | : :
|
| 320 |
|
|
. : : | | : :
|
| 321 |
|
|
. : forw : | forw | : forw :
|
| 322 |
|
|
. : +--------+ : | +--------+ | : +--------+ :
|
| 323 |
|
|
forw_deps : |dep_link| : | |dep_link| | : |dep_link| :
|
| 324 |
|
|
+-----+ : | +----+ | : | | +----+ | | : | +----+ | :
|
| 325 |
|
|
|first|----->| |next|-+------+->| |next|-+--+----->| |next|-+--->NULL
|
| 326 |
|
|
+-----+ : | +----+ | : | | +----+ | | : | +----+ | :
|
| 327 |
|
|
. ^ ^ : | ^ | : | | ^ | | : | | :
|
| 328 |
|
|
. | | : | | | : | | | | | : | | :
|
| 329 |
|
|
. | +--<----+--+ +--+---<--+--+--+ +--+--+--<---+--+ | :
|
| 330 |
|
|
. | : | | | : | | | | | : | | | :
|
| 331 |
|
|
. | : | +----+ | : | | +----+ | | : | +----+ | :
|
| 332 |
|
|
. | : | |prev| | : | | |prev| | | : | |prev| | :
|
| 333 |
|
|
. | : | |next| | : | | |next| | | : | |next| | :
|
| 334 |
|
|
. | : | +----+ | : | | +----+ | | : | +----+ | :
|
| 335 |
|
|
. | : | | :<-+ | | | |<-+ : | | :<-+
|
| 336 |
|
|
. | : | +----+ | : | | | +----+ | | | : | +----+ | : |
|
| 337 |
|
|
. | : | |node|-+----+ | | |node|-+--+--+ : | |node|-+----+
|
| 338 |
|
|
. | : | +----+ | : | | +----+ | | : | +----+ | :
|
| 339 |
|
|
. | : | | : | | | | : | | :
|
| 340 |
|
|
. | : +--------+ : | +--------+ | : +--------+ :
|
| 341 |
|
|
. | : : | | : :
|
| 342 |
|
|
. | : SAME pro1 : | +--------+ | : SAME pro1 :
|
| 343 |
|
|
. | : DIFF con0 : | |dep | | : DIFF con2 :
|
| 344 |
|
|
. | : : | | | | : :
|
| 345 |
|
|
. | | | +----+ | |
|
| 346 |
|
|
.RTX<------------------------+--+-|pro1| | |
|
| 347 |
|
|
.pro1 | | +----+ | |
|
| 348 |
|
|
. | | | |
|
| 349 |
|
|
. | | +----+ | |
|
| 350 |
|
|
.RTX<------------------------+--+-|con1| | |
|
| 351 |
|
|
.con1 | | +----+ | |
|
| 352 |
|
|
. | | | | |
|
| 353 |
|
|
. | | | +----+ | |
|
| 354 |
|
|
. | | | |kind| | |
|
| 355 |
|
|
. | | | +----+ | |
|
| 356 |
|
|
. | : : | | |stat| | | : :
|
| 357 |
|
|
. | : DIFF pro0 : | | +----+ | | : DIFF pro2 :
|
| 358 |
|
|
. | : SAME con1 : | | | | : SAME con1 :
|
| 359 |
|
|
. | : : | +--------+ | : :
|
| 360 |
|
|
. | : : | | : :
|
| 361 |
|
|
. | : back : | back | : back :
|
| 362 |
|
|
. v : +--------+ : | +--------+ | : +--------+ :
|
| 363 |
|
|
back_deps : |dep_link| : | |dep_link| | : |dep_link| :
|
| 364 |
|
|
+-----+ : | +----+ | : | | +----+ | | : | +----+ | :
|
| 365 |
|
|
|first|----->| |next|-+------+->| |next|-+--+----->| |next|-+--->NULL
|
| 366 |
|
|
+-----+ : | +----+ | : | | +----+ | | : | +----+ | :
|
| 367 |
|
|
. ^ : | ^ | : | | ^ | | : | | :
|
| 368 |
|
|
. | : | | | : | | | | | : | | :
|
| 369 |
|
|
. +--<----+--+ +--+---<--+--+--+ +--+--+--<---+--+ | :
|
| 370 |
|
|
. : | | | : | | | | | : | | | :
|
| 371 |
|
|
. : | +----+ | : | | +----+ | | : | +----+ | :
|
| 372 |
|
|
. : | |prev| | : | | |prev| | | : | |prev| | :
|
| 373 |
|
|
. : | |next| | : | | |next| | | : | |next| | :
|
| 374 |
|
|
. : | +----+ | : | | +----+ | | : | +----+ | :
|
| 375 |
|
|
. : | | :<-+ | | | |<-+ : | | :<-+
|
| 376 |
|
|
. : | +----+ | : | | | +----+ | | | : | +----+ | : |
|
| 377 |
|
|
. : | |node|-+----+ | | |node|-+--+--+ : | |node|-+----+
|
| 378 |
|
|
. : | +----+ | : | | +----+ | | : | +----+ | :
|
| 379 |
|
|
. : | | : | | | | : | | :
|
| 380 |
|
|
. : +--------+ : | +--------+ | : +--------+ :
|
| 381 |
|
|
. : : | | : :
|
| 382 |
|
|
. : dep_node A : | dep_node Y | : dep_node B :
|
| 383 |
|
|
. +------------+ +--------------+ +------------+
|
| 384 |
|
|
*/
|
| 385 |
|
|
|
| 386 |
|
|
struct _dep_node
|
| 387 |
|
|
{
|
| 388 |
|
|
/* Backward link. */
|
| 389 |
|
|
struct _dep_link back;
|
| 390 |
|
|
|
| 391 |
|
|
/* The dep. */
|
| 392 |
|
|
struct _dep dep;
|
| 393 |
|
|
|
| 394 |
|
|
/* Forward link. */
|
| 395 |
|
|
struct _dep_link forw;
|
| 396 |
|
|
};
|
| 397 |
|
|
|
| 398 |
|
|
#define DEP_NODE_BACK(N) (&(N)->back)
|
| 399 |
|
|
#define DEP_NODE_DEP(N) (&(N)->dep)
|
| 400 |
|
|
#define DEP_NODE_FORW(N) (&(N)->forw)
|
| 401 |
|
|
|
| 402 |
|
|
/* The following enumeration values tell us what dependencies we
|
| 403 |
|
|
should use to implement the barrier. We use true-dependencies for
|
| 404 |
|
|
TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
|
| 405 |
|
|
enum reg_pending_barrier_mode
|
| 406 |
|
|
{
|
| 407 |
|
|
NOT_A_BARRIER = 0,
|
| 408 |
|
|
MOVE_BARRIER,
|
| 409 |
|
|
TRUE_BARRIER
|
| 410 |
|
|
};
|
| 411 |
|
|
|
| 412 |
|
|
/* Whether a register movement is associated with a call. */
|
| 413 |
|
|
enum post_call_group
|
| 414 |
|
|
{
|
| 415 |
|
|
not_post_call,
|
| 416 |
|
|
post_call,
|
| 417 |
|
|
post_call_initial
|
| 418 |
|
|
};
|
| 419 |
|
|
|
| 420 |
|
|
/* Insns which affect pseudo-registers. */
|
| 421 |
|
|
struct deps_reg
|
| 422 |
|
|
{
|
| 423 |
|
|
rtx uses;
|
| 424 |
|
|
rtx sets;
|
| 425 |
|
|
rtx implicit_sets;
|
| 426 |
|
|
rtx control_uses;
|
| 427 |
|
|
rtx clobbers;
|
| 428 |
|
|
int uses_length;
|
| 429 |
|
|
int clobbers_length;
|
| 430 |
|
|
};
|
| 431 |
|
|
|
| 432 |
|
|
/* Describe state of dependencies used during sched_analyze phase. */
|
| 433 |
|
|
struct deps_desc
|
| 434 |
|
|
{
|
| 435 |
|
|
/* The *_insns and *_mems are paired lists. Each pending memory operation
|
| 436 |
|
|
will have a pointer to the MEM rtx on one list and a pointer to the
|
| 437 |
|
|
containing insn on the other list in the same place in the list. */
|
| 438 |
|
|
|
| 439 |
|
|
/* We can't use add_dependence like the old code did, because a single insn
|
| 440 |
|
|
may have multiple memory accesses, and hence needs to be on the list
|
| 441 |
|
|
once for each memory access. Add_dependence won't let you add an insn
|
| 442 |
|
|
to a list more than once. */
|
| 443 |
|
|
|
| 444 |
|
|
/* An INSN_LIST containing all insns with pending read operations. */
|
| 445 |
|
|
rtx pending_read_insns;
|
| 446 |
|
|
|
| 447 |
|
|
/* An EXPR_LIST containing all MEM rtx's which are pending reads. */
|
| 448 |
|
|
rtx pending_read_mems;
|
| 449 |
|
|
|
| 450 |
|
|
/* An INSN_LIST containing all insns with pending write operations. */
|
| 451 |
|
|
rtx pending_write_insns;
|
| 452 |
|
|
|
| 453 |
|
|
/* An EXPR_LIST containing all MEM rtx's which are pending writes. */
|
| 454 |
|
|
rtx pending_write_mems;
|
| 455 |
|
|
|
| 456 |
|
|
/* An INSN_LIST containing all jump insns. */
|
| 457 |
|
|
rtx pending_jump_insns;
|
| 458 |
|
|
|
| 459 |
|
|
/* We must prevent the above lists from ever growing too large since
|
| 460 |
|
|
the number of dependencies produced is at least O(N*N),
|
| 461 |
|
|
and execution time is at least O(4*N*N), as a function of the
|
| 462 |
|
|
length of these pending lists. */
|
| 463 |
|
|
|
| 464 |
|
|
/* Indicates the length of the pending_read list. */
|
| 465 |
|
|
int pending_read_list_length;
|
| 466 |
|
|
|
| 467 |
|
|
/* Indicates the length of the pending_write list. */
|
| 468 |
|
|
int pending_write_list_length;
|
| 469 |
|
|
|
| 470 |
|
|
/* Length of the pending memory flush list plus the length of the pending
|
| 471 |
|
|
jump insn list. Large functions with no calls may build up extremely
|
| 472 |
|
|
large lists. */
|
| 473 |
|
|
int pending_flush_length;
|
| 474 |
|
|
|
| 475 |
|
|
/* The last insn upon which all memory references must depend.
|
| 476 |
|
|
This is an insn which flushed the pending lists, creating a dependency
|
| 477 |
|
|
between it and all previously pending memory references. This creates
|
| 478 |
|
|
a barrier (or a checkpoint) which no memory reference is allowed to cross.
|
| 479 |
|
|
|
| 480 |
|
|
This includes all non constant CALL_INSNs. When we do interprocedural
|
| 481 |
|
|
alias analysis, this restriction can be relaxed.
|
| 482 |
|
|
This may also be an INSN that writes memory if the pending lists grow
|
| 483 |
|
|
too large. */
|
| 484 |
|
|
rtx last_pending_memory_flush;
|
| 485 |
|
|
|
| 486 |
|
|
/* A list of the last function calls we have seen. We use a list to
|
| 487 |
|
|
represent last function calls from multiple predecessor blocks.
|
| 488 |
|
|
Used to prevent register lifetimes from expanding unnecessarily. */
|
| 489 |
|
|
rtx last_function_call;
|
| 490 |
|
|
|
| 491 |
|
|
/* A list of the last function calls that may not return normally
|
| 492 |
|
|
we have seen. We use a list to represent last function calls from
|
| 493 |
|
|
multiple predecessor blocks. Used to prevent moving trapping insns
|
| 494 |
|
|
across such calls. */
|
| 495 |
|
|
rtx last_function_call_may_noreturn;
|
| 496 |
|
|
|
| 497 |
|
|
/* A list of insns which use a pseudo register that does not already
|
| 498 |
|
|
cross a call. We create dependencies between each of those insn
|
| 499 |
|
|
and the next call insn, to ensure that they won't cross a call after
|
| 500 |
|
|
scheduling is done. */
|
| 501 |
|
|
rtx sched_before_next_call;
|
| 502 |
|
|
|
| 503 |
|
|
/* Similarly, a list of insns which should not cross a branch. */
|
| 504 |
|
|
rtx sched_before_next_jump;
|
| 505 |
|
|
|
| 506 |
|
|
/* Used to keep post-call pseudo/hard reg movements together with
|
| 507 |
|
|
the call. */
|
| 508 |
|
|
enum post_call_group in_post_call_group_p;
|
| 509 |
|
|
|
| 510 |
|
|
/* The last debug insn we've seen. */
|
| 511 |
|
|
rtx last_debug_insn;
|
| 512 |
|
|
|
| 513 |
|
|
/* The maximum register number for the following arrays. Before reload
|
| 514 |
|
|
this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
|
| 515 |
|
|
int max_reg;
|
| 516 |
|
|
|
| 517 |
|
|
/* Element N is the next insn that sets (hard or pseudo) register
|
| 518 |
|
|
N within the current basic block; or zero, if there is no
|
| 519 |
|
|
such insn. Needed for new registers which may be introduced
|
| 520 |
|
|
by splitting insns. */
|
| 521 |
|
|
struct deps_reg *reg_last;
|
| 522 |
|
|
|
| 523 |
|
|
/* Element N is set for each register that has any nonzero element
|
| 524 |
|
|
in reg_last[N].{uses,sets,clobbers}. */
|
| 525 |
|
|
regset_head reg_last_in_use;
|
| 526 |
|
|
|
| 527 |
|
|
/* Shows the last value of reg_pending_barrier associated with the insn. */
|
| 528 |
|
|
enum reg_pending_barrier_mode last_reg_pending_barrier;
|
| 529 |
|
|
|
| 530 |
|
|
/* True when this context should be treated as a readonly by
|
| 531 |
|
|
the analysis. */
|
| 532 |
|
|
BOOL_BITFIELD readonly : 1;
|
| 533 |
|
|
};
|
| 534 |
|
|
|
| 535 |
|
|
typedef struct deps_desc *deps_t;
|
| 536 |
|
|
|
| 537 |
|
|
/* This structure holds some state of the current scheduling pass, and
|
| 538 |
|
|
contains some function pointers that abstract out some of the non-generic
|
| 539 |
|
|
functionality from functions such as schedule_block or schedule_insn.
|
| 540 |
|
|
There is one global variable, current_sched_info, which points to the
|
| 541 |
|
|
sched_info structure currently in use. */
|
| 542 |
|
|
struct haifa_sched_info
|
| 543 |
|
|
{
|
| 544 |
|
|
/* Add all insns that are initially ready to the ready list. Called once
|
| 545 |
|
|
before scheduling a set of insns. */
|
| 546 |
|
|
void (*init_ready_list) (void);
|
| 547 |
|
|
/* Called after taking an insn from the ready list. Returns nonzero if
|
| 548 |
|
|
this insn can be scheduled, nonzero if we should silently discard it. */
|
| 549 |
|
|
int (*can_schedule_ready_p) (rtx);
|
| 550 |
|
|
/* Return nonzero if there are more insns that should be scheduled. */
|
| 551 |
|
|
int (*schedule_more_p) (void);
|
| 552 |
|
|
/* Called after an insn has all its hard dependencies resolved.
|
| 553 |
|
|
Adjusts status of instruction (which is passed through second parameter)
|
| 554 |
|
|
to indicate if instruction should be moved to the ready list or the
|
| 555 |
|
|
queue, or if it should silently discard it (until next resolved
|
| 556 |
|
|
dependence). */
|
| 557 |
|
|
ds_t (*new_ready) (rtx, ds_t);
|
| 558 |
|
|
/* Compare priority of two insns. Return a positive number if the second
|
| 559 |
|
|
insn is to be preferred for scheduling, and a negative one if the first
|
| 560 |
|
|
is to be preferred. Zero if they are equally good. */
|
| 561 |
|
|
int (*rank) (rtx, rtx);
|
| 562 |
|
|
/* Return a string that contains the insn uid and optionally anything else
|
| 563 |
|
|
necessary to identify this insn in an output. It's valid to use a
|
| 564 |
|
|
static buffer for this. The ALIGNED parameter should cause the string
|
| 565 |
|
|
to be formatted so that multiple output lines will line up nicely. */
|
| 566 |
|
|
const char *(*print_insn) (const_rtx, int);
|
| 567 |
|
|
/* Return nonzero if an insn should be included in priority
|
| 568 |
|
|
calculations. */
|
| 569 |
|
|
int (*contributes_to_priority) (rtx, rtx);
|
| 570 |
|
|
|
| 571 |
|
|
/* Return true if scheduling insn (passed as the parameter) will trigger
|
| 572 |
|
|
finish of scheduling current block. */
|
| 573 |
|
|
bool (*insn_finishes_block_p) (rtx);
|
| 574 |
|
|
|
| 575 |
|
|
/* The boundaries of the set of insns to be scheduled. */
|
| 576 |
|
|
rtx prev_head, next_tail;
|
| 577 |
|
|
|
| 578 |
|
|
/* Filled in after the schedule is finished; the first and last scheduled
|
| 579 |
|
|
insns. */
|
| 580 |
|
|
rtx head, tail;
|
| 581 |
|
|
|
| 582 |
|
|
/* If nonzero, enables an additional sanity check in schedule_block. */
|
| 583 |
|
|
unsigned int queue_must_finish_empty:1;
|
| 584 |
|
|
|
| 585 |
|
|
/* Maximum priority that has been assigned to an insn. */
|
| 586 |
|
|
int sched_max_insns_priority;
|
| 587 |
|
|
|
| 588 |
|
|
/* Hooks to support speculative scheduling. */
|
| 589 |
|
|
|
| 590 |
|
|
/* Called to notify frontend that instruction is being added (second
|
| 591 |
|
|
parameter == 0) or removed (second parameter == 1). */
|
| 592 |
|
|
void (*add_remove_insn) (rtx, int);
|
| 593 |
|
|
|
| 594 |
|
|
/* Called to notify the frontend that instruction INSN is being
|
| 595 |
|
|
scheduled. */
|
| 596 |
|
|
void (*begin_schedule_ready) (rtx insn);
|
| 597 |
|
|
|
| 598 |
|
|
/* Called to notify the frontend that an instruction INSN is about to be
|
| 599 |
|
|
moved to its correct place in the final schedule. This is done for all
|
| 600 |
|
|
insns in order of the schedule. LAST indicates the last scheduled
|
| 601 |
|
|
instruction. */
|
| 602 |
|
|
void (*begin_move_insn) (rtx insn, rtx last);
|
| 603 |
|
|
|
| 604 |
|
|
/* If the second parameter is not NULL, return nonnull value, if the
|
| 605 |
|
|
basic block should be advanced.
|
| 606 |
|
|
If the second parameter is NULL, return the next basic block in EBB.
|
| 607 |
|
|
The first parameter is the current basic block in EBB. */
|
| 608 |
|
|
basic_block (*advance_target_bb) (basic_block, rtx);
|
| 609 |
|
|
|
| 610 |
|
|
/* Allocate memory, store the frontend scheduler state in it, and
|
| 611 |
|
|
return it. */
|
| 612 |
|
|
void *(*save_state) (void);
|
| 613 |
|
|
/* Restore frontend scheduler state from the argument, and free the
|
| 614 |
|
|
memory. */
|
| 615 |
|
|
void (*restore_state) (void *);
|
| 616 |
|
|
|
| 617 |
|
|
/* ??? FIXME: should use straight bitfields inside sched_info instead of
|
| 618 |
|
|
this flag field. */
|
| 619 |
|
|
unsigned int flags;
|
| 620 |
|
|
};
|
| 621 |
|
|
|
| 622 |
|
|
/* This structure holds description of the properties for speculative
|
| 623 |
|
|
scheduling. */
|
| 624 |
|
|
struct spec_info_def
|
| 625 |
|
|
{
|
| 626 |
|
|
/* Holds types of allowed speculations: BEGIN_{DATA|CONTROL},
|
| 627 |
|
|
BE_IN_{DATA_CONTROL}. */
|
| 628 |
|
|
int mask;
|
| 629 |
|
|
|
| 630 |
|
|
/* A dump file for additional information on speculative scheduling. */
|
| 631 |
|
|
FILE *dump;
|
| 632 |
|
|
|
| 633 |
|
|
/* Minimal cumulative weakness of speculative instruction's
|
| 634 |
|
|
dependencies, so that insn will be scheduled. */
|
| 635 |
|
|
dw_t data_weakness_cutoff;
|
| 636 |
|
|
|
| 637 |
|
|
/* Minimal usefulness of speculative instruction to be considered for
|
| 638 |
|
|
scheduling. */
|
| 639 |
|
|
int control_weakness_cutoff;
|
| 640 |
|
|
|
| 641 |
|
|
/* Flags from the enum SPEC_SCHED_FLAGS. */
|
| 642 |
|
|
int flags;
|
| 643 |
|
|
};
|
| 644 |
|
|
typedef struct spec_info_def *spec_info_t;
|
| 645 |
|
|
|
| 646 |
|
|
extern spec_info_t spec_info;
|
| 647 |
|
|
|
| 648 |
|
|
extern struct haifa_sched_info *current_sched_info;
|
| 649 |
|
|
|
| 650 |
|
|
/* Do register pressure sensitive insn scheduling if the flag is set
|
| 651 |
|
|
up. */
|
| 652 |
|
|
extern bool sched_pressure_p;
|
| 653 |
|
|
|
| 654 |
|
|
/* Map regno -> its pressure class. The map defined only when
|
| 655 |
|
|
SCHED_PRESSURE_P is true. */
|
| 656 |
|
|
extern enum reg_class *sched_regno_pressure_class;
|
| 657 |
|
|
|
| 658 |
|
|
/* Indexed by INSN_UID, the collection of all data associated with
|
| 659 |
|
|
a single instruction. */
|
| 660 |
|
|
|
| 661 |
|
|
struct _haifa_deps_insn_data
|
| 662 |
|
|
{
|
| 663 |
|
|
/* The number of incoming edges in the forward dependency graph.
|
| 664 |
|
|
As scheduling proceeds, counts are decreased. An insn moves to
|
| 665 |
|
|
the ready queue when its counter reaches zero. */
|
| 666 |
|
|
int dep_count;
|
| 667 |
|
|
|
| 668 |
|
|
/* Nonzero if instruction has internal dependence
|
| 669 |
|
|
(e.g. add_dependence was invoked with (insn == elem)). */
|
| 670 |
|
|
unsigned int has_internal_dep;
|
| 671 |
|
|
|
| 672 |
|
|
/* NB: We can't place 'struct _deps_list' here instead of deps_list_t into
|
| 673 |
|
|
h_i_d because when h_i_d extends, addresses of the deps_list->first
|
| 674 |
|
|
change without updating deps_list->first->next->prev_nextp. Thus
|
| 675 |
|
|
BACK_DEPS and RESOLVED_BACK_DEPS are allocated on the heap and FORW_DEPS
|
| 676 |
|
|
list is allocated on the obstack. */
|
| 677 |
|
|
|
| 678 |
|
|
/* A list of hard backward dependencies. The insn is a consumer of all the
|
| 679 |
|
|
deps mentioned here. */
|
| 680 |
|
|
deps_list_t hard_back_deps;
|
| 681 |
|
|
|
| 682 |
|
|
/* A list of speculative (weak) dependencies. The insn is a consumer of all
|
| 683 |
|
|
the deps mentioned here. */
|
| 684 |
|
|
deps_list_t spec_back_deps;
|
| 685 |
|
|
|
| 686 |
|
|
/* A list of insns which depend on the instruction. Unlike 'back_deps',
|
| 687 |
|
|
it represents forward dependencies. */
|
| 688 |
|
|
deps_list_t forw_deps;
|
| 689 |
|
|
|
| 690 |
|
|
/* A list of scheduled producers of the instruction. Links are being moved
|
| 691 |
|
|
from 'back_deps' to 'resolved_back_deps' while scheduling. */
|
| 692 |
|
|
deps_list_t resolved_back_deps;
|
| 693 |
|
|
|
| 694 |
|
|
/* A list of scheduled consumers of the instruction. Links are being moved
|
| 695 |
|
|
from 'forw_deps' to 'resolved_forw_deps' while scheduling to fasten the
|
| 696 |
|
|
search in 'forw_deps'. */
|
| 697 |
|
|
deps_list_t resolved_forw_deps;
|
| 698 |
|
|
|
| 699 |
|
|
/* If the insn is conditional (either through COND_EXEC, or because
|
| 700 |
|
|
it is a conditional branch), this records the condition. NULL
|
| 701 |
|
|
for insns that haven't been seen yet or don't have a condition;
|
| 702 |
|
|
const_true_rtx to mark an insn without a condition, or with a
|
| 703 |
|
|
condition that has been clobbered by a subsequent insn. */
|
| 704 |
|
|
rtx cond;
|
| 705 |
|
|
|
| 706 |
|
|
/* For a conditional insn, a list of insns that could set the condition
|
| 707 |
|
|
register. Used when generating control dependencies. */
|
| 708 |
|
|
rtx cond_deps;
|
| 709 |
|
|
|
| 710 |
|
|
/* True if the condition in 'cond' should be reversed to get the actual
|
| 711 |
|
|
condition. */
|
| 712 |
|
|
unsigned int reverse_cond : 1;
|
| 713 |
|
|
|
| 714 |
|
|
/* Some insns (e.g. call) are not allowed to move across blocks. */
|
| 715 |
|
|
unsigned int cant_move : 1;
|
| 716 |
|
|
};
|
| 717 |
|
|
|
| 718 |
|
|
/* Bits used for storing values of the fields in the following
|
| 719 |
|
|
structure. */
|
| 720 |
|
|
#define INCREASE_BITS 8
|
| 721 |
|
|
|
| 722 |
|
|
/* The structure describes how the corresponding insn increases the
|
| 723 |
|
|
register pressure for each pressure class. */
|
| 724 |
|
|
struct reg_pressure_data
|
| 725 |
|
|
{
|
| 726 |
|
|
/* Pressure increase for given class because of clobber. */
|
| 727 |
|
|
unsigned int clobber_increase : INCREASE_BITS;
|
| 728 |
|
|
/* Increase in register pressure for given class because of register
|
| 729 |
|
|
sets. */
|
| 730 |
|
|
unsigned int set_increase : INCREASE_BITS;
|
| 731 |
|
|
/* Pressure increase for given class because of unused register
|
| 732 |
|
|
set. */
|
| 733 |
|
|
unsigned int unused_set_increase : INCREASE_BITS;
|
| 734 |
|
|
/* Pressure change: #sets - #deaths. */
|
| 735 |
|
|
int change : INCREASE_BITS;
|
| 736 |
|
|
};
|
| 737 |
|
|
|
| 738 |
|
|
/* The following structure describes usage of registers by insns. */
|
| 739 |
|
|
struct reg_use_data
|
| 740 |
|
|
{
|
| 741 |
|
|
/* Regno used in the insn. */
|
| 742 |
|
|
int regno;
|
| 743 |
|
|
/* Insn using the regno. */
|
| 744 |
|
|
rtx insn;
|
| 745 |
|
|
/* Cyclic list of elements with the same regno. */
|
| 746 |
|
|
struct reg_use_data *next_regno_use;
|
| 747 |
|
|
/* List of elements with the same insn. */
|
| 748 |
|
|
struct reg_use_data *next_insn_use;
|
| 749 |
|
|
};
|
| 750 |
|
|
|
| 751 |
|
|
/* The following structure describes used sets of registers by insns.
|
| 752 |
|
|
Registers are pseudos whose pressure class is not NO_REGS or hard
|
| 753 |
|
|
registers available for allocations. */
|
| 754 |
|
|
struct reg_set_data
|
| 755 |
|
|
{
|
| 756 |
|
|
/* Regno used in the insn. */
|
| 757 |
|
|
int regno;
|
| 758 |
|
|
/* Insn setting the regno. */
|
| 759 |
|
|
rtx insn;
|
| 760 |
|
|
/* List of elements with the same insn. */
|
| 761 |
|
|
struct reg_set_data *next_insn_set;
|
| 762 |
|
|
};
|
| 763 |
|
|
|
| 764 |
|
|
struct _haifa_insn_data
|
| 765 |
|
|
{
|
| 766 |
|
|
/* We can't place 'struct _deps_list' into h_i_d instead of deps_list_t
|
| 767 |
|
|
because when h_i_d extends, addresses of the deps_list->first
|
| 768 |
|
|
change without updating deps_list->first->next->prev_nextp. */
|
| 769 |
|
|
|
| 770 |
|
|
/* Logical uid gives the original ordering of the insns. */
|
| 771 |
|
|
int luid;
|
| 772 |
|
|
|
| 773 |
|
|
/* A priority for each insn. */
|
| 774 |
|
|
int priority;
|
| 775 |
|
|
|
| 776 |
|
|
/* The minimum clock tick at which the insn becomes ready. This is
|
| 777 |
|
|
used to note timing constraints for the insns in the pending list. */
|
| 778 |
|
|
int tick;
|
| 779 |
|
|
|
| 780 |
|
|
/* For insns that are scheduled at a fixed difference from another,
|
| 781 |
|
|
this records the tick in which they must be ready. */
|
| 782 |
|
|
int exact_tick;
|
| 783 |
|
|
|
| 784 |
|
|
/* INTER_TICK is used to adjust INSN_TICKs of instructions from the
|
| 785 |
|
|
subsequent blocks in a region. */
|
| 786 |
|
|
int inter_tick;
|
| 787 |
|
|
|
| 788 |
|
|
/* Used temporarily to estimate an INSN_TICK value for an insn given
|
| 789 |
|
|
current knowledge. */
|
| 790 |
|
|
int tick_estimate;
|
| 791 |
|
|
|
| 792 |
|
|
/* See comment on QUEUE_INDEX macro in haifa-sched.c. */
|
| 793 |
|
|
int queue_index;
|
| 794 |
|
|
|
| 795 |
|
|
short cost;
|
| 796 |
|
|
|
| 797 |
|
|
/* Set if there's DEF-USE dependence between some speculatively
|
| 798 |
|
|
moved load insn and this one. */
|
| 799 |
|
|
unsigned int fed_by_spec_load : 1;
|
| 800 |
|
|
unsigned int is_load_insn : 1;
|
| 801 |
|
|
/* Nonzero if this insn has negative-cost forward dependencies against
|
| 802 |
|
|
an already scheduled insn. */
|
| 803 |
|
|
unsigned int feeds_backtrack_insn : 1;
|
| 804 |
|
|
|
| 805 |
|
|
/* Nonzero if this insn is a shadow of another, scheduled after a fixed
|
| 806 |
|
|
delay. We only emit shadows at the end of a cycle, with no other
|
| 807 |
|
|
real insns following them. */
|
| 808 |
|
|
unsigned int shadow_p : 1;
|
| 809 |
|
|
|
| 810 |
|
|
/* Used internally in unschedule_insns_until to mark insns that must have
|
| 811 |
|
|
their TODO_SPEC recomputed. */
|
| 812 |
|
|
unsigned int must_recompute_spec : 1;
|
| 813 |
|
|
|
| 814 |
|
|
/* '> 0' if priority is valid,
|
| 815 |
|
|
'== 0' if priority was not yet computed,
|
| 816 |
|
|
'< 0' if priority in invalid and should be recomputed. */
|
| 817 |
|
|
signed char priority_status;
|
| 818 |
|
|
|
| 819 |
|
|
/* What speculations are necessary to apply to schedule the instruction. */
|
| 820 |
|
|
ds_t todo_spec;
|
| 821 |
|
|
|
| 822 |
|
|
/* What speculations were already applied. */
|
| 823 |
|
|
ds_t done_spec;
|
| 824 |
|
|
|
| 825 |
|
|
/* What speculations are checked by this instruction. */
|
| 826 |
|
|
ds_t check_spec;
|
| 827 |
|
|
|
| 828 |
|
|
/* Recovery block for speculation checks. */
|
| 829 |
|
|
basic_block recovery_block;
|
| 830 |
|
|
|
| 831 |
|
|
/* Original pattern of the instruction. */
|
| 832 |
|
|
rtx orig_pat;
|
| 833 |
|
|
|
| 834 |
|
|
/* For insns with DEP_CONTROL dependencies, the predicated pattern if it
|
| 835 |
|
|
was ever successfully constructed. */
|
| 836 |
|
|
rtx predicated_pat;
|
| 837 |
|
|
|
| 838 |
|
|
/* The following array contains info how the insn increases register
|
| 839 |
|
|
pressure. There is an element for each cover class of pseudos
|
| 840 |
|
|
referenced in insns. */
|
| 841 |
|
|
struct reg_pressure_data *reg_pressure;
|
| 842 |
|
|
/* The following array contains maximal reg pressure between last
|
| 843 |
|
|
scheduled insn and given insn. There is an element for each
|
| 844 |
|
|
pressure class of pseudos referenced in insns. This info updated
|
| 845 |
|
|
after scheduling each insn for each insn between the two
|
| 846 |
|
|
mentioned insns. */
|
| 847 |
|
|
int *max_reg_pressure;
|
| 848 |
|
|
/* The following list contains info about used pseudos and hard
|
| 849 |
|
|
registers available for allocation. */
|
| 850 |
|
|
struct reg_use_data *reg_use_list;
|
| 851 |
|
|
/* The following list contains info about set pseudos and hard
|
| 852 |
|
|
registers available for allocation. */
|
| 853 |
|
|
struct reg_set_data *reg_set_list;
|
| 854 |
|
|
/* Info about how scheduling the insn changes cost of register
|
| 855 |
|
|
pressure excess (between source and target). */
|
| 856 |
|
|
int reg_pressure_excess_cost_change;
|
| 857 |
|
|
};
|
| 858 |
|
|
|
| 859 |
|
|
typedef struct _haifa_insn_data haifa_insn_data_def;
|
| 860 |
|
|
typedef haifa_insn_data_def *haifa_insn_data_t;
|
| 861 |
|
|
|
| 862 |
|
|
DEF_VEC_O (haifa_insn_data_def);
|
| 863 |
|
|
DEF_VEC_ALLOC_O (haifa_insn_data_def, heap);
|
| 864 |
|
|
|
| 865 |
|
|
extern VEC(haifa_insn_data_def, heap) *h_i_d;
|
| 866 |
|
|
|
| 867 |
|
|
#define HID(INSN) (VEC_index (haifa_insn_data_def, h_i_d, INSN_UID (INSN)))
|
| 868 |
|
|
|
| 869 |
|
|
/* Accessor macros for h_i_d. There are more in haifa-sched.c and
|
| 870 |
|
|
sched-rgn.c. */
|
| 871 |
|
|
#define INSN_PRIORITY(INSN) (HID (INSN)->priority)
|
| 872 |
|
|
#define INSN_REG_PRESSURE(INSN) (HID (INSN)->reg_pressure)
|
| 873 |
|
|
#define INSN_MAX_REG_PRESSURE(INSN) (HID (INSN)->max_reg_pressure)
|
| 874 |
|
|
#define INSN_REG_USE_LIST(INSN) (HID (INSN)->reg_use_list)
|
| 875 |
|
|
#define INSN_REG_SET_LIST(INSN) (HID (INSN)->reg_set_list)
|
| 876 |
|
|
#define INSN_REG_PRESSURE_EXCESS_COST_CHANGE(INSN) \
|
| 877 |
|
|
(HID (INSN)->reg_pressure_excess_cost_change)
|
| 878 |
|
|
#define INSN_PRIORITY_STATUS(INSN) (HID (INSN)->priority_status)
|
| 879 |
|
|
|
| 880 |
|
|
typedef struct _haifa_deps_insn_data haifa_deps_insn_data_def;
|
| 881 |
|
|
typedef haifa_deps_insn_data_def *haifa_deps_insn_data_t;
|
| 882 |
|
|
|
| 883 |
|
|
DEF_VEC_O (haifa_deps_insn_data_def);
|
| 884 |
|
|
DEF_VEC_ALLOC_O (haifa_deps_insn_data_def, heap);
|
| 885 |
|
|
|
| 886 |
|
|
extern VEC(haifa_deps_insn_data_def, heap) *h_d_i_d;
|
| 887 |
|
|
|
| 888 |
|
|
#define HDID(INSN) (VEC_index (haifa_deps_insn_data_def, h_d_i_d, \
|
| 889 |
|
|
INSN_LUID (INSN)))
|
| 890 |
|
|
#define INSN_DEP_COUNT(INSN) (HDID (INSN)->dep_count)
|
| 891 |
|
|
#define HAS_INTERNAL_DEP(INSN) (HDID (INSN)->has_internal_dep)
|
| 892 |
|
|
#define INSN_FORW_DEPS(INSN) (HDID (INSN)->forw_deps)
|
| 893 |
|
|
#define INSN_RESOLVED_BACK_DEPS(INSN) (HDID (INSN)->resolved_back_deps)
|
| 894 |
|
|
#define INSN_RESOLVED_FORW_DEPS(INSN) (HDID (INSN)->resolved_forw_deps)
|
| 895 |
|
|
#define INSN_HARD_BACK_DEPS(INSN) (HDID (INSN)->hard_back_deps)
|
| 896 |
|
|
#define INSN_SPEC_BACK_DEPS(INSN) (HDID (INSN)->spec_back_deps)
|
| 897 |
|
|
#define INSN_CACHED_COND(INSN) (HDID (INSN)->cond)
|
| 898 |
|
|
#define INSN_REVERSE_COND(INSN) (HDID (INSN)->reverse_cond)
|
| 899 |
|
|
#define INSN_COND_DEPS(INSN) (HDID (INSN)->cond_deps)
|
| 900 |
|
|
#define CANT_MOVE(INSN) (HDID (INSN)->cant_move)
|
| 901 |
|
|
#define CANT_MOVE_BY_LUID(LUID) (VEC_index (haifa_deps_insn_data_def, h_d_i_d, \
|
| 902 |
|
|
LUID)->cant_move)
|
| 903 |
|
|
|
| 904 |
|
|
|
| 905 |
|
|
#define INSN_PRIORITY(INSN) (HID (INSN)->priority)
|
| 906 |
|
|
#define INSN_PRIORITY_STATUS(INSN) (HID (INSN)->priority_status)
|
| 907 |
|
|
#define INSN_PRIORITY_KNOWN(INSN) (INSN_PRIORITY_STATUS (INSN) > 0)
|
| 908 |
|
|
#define TODO_SPEC(INSN) (HID (INSN)->todo_spec)
|
| 909 |
|
|
#define DONE_SPEC(INSN) (HID (INSN)->done_spec)
|
| 910 |
|
|
#define CHECK_SPEC(INSN) (HID (INSN)->check_spec)
|
| 911 |
|
|
#define RECOVERY_BLOCK(INSN) (HID (INSN)->recovery_block)
|
| 912 |
|
|
#define ORIG_PAT(INSN) (HID (INSN)->orig_pat)
|
| 913 |
|
|
#define PREDICATED_PAT(INSN) (HID (INSN)->predicated_pat)
|
| 914 |
|
|
|
| 915 |
|
|
/* INSN is either a simple or a branchy speculation check. */
|
| 916 |
|
|
#define IS_SPECULATION_CHECK_P(INSN) \
|
| 917 |
|
|
(sel_sched_p () ? sel_insn_is_speculation_check (INSN) : RECOVERY_BLOCK (INSN) != NULL)
|
| 918 |
|
|
|
| 919 |
|
|
/* INSN is a speculation check that will simply reexecute the speculatively
|
| 920 |
|
|
scheduled instruction if the speculation fails. */
|
| 921 |
|
|
#define IS_SPECULATION_SIMPLE_CHECK_P(INSN) \
|
| 922 |
|
|
(RECOVERY_BLOCK (INSN) == EXIT_BLOCK_PTR)
|
| 923 |
|
|
|
| 924 |
|
|
/* INSN is a speculation check that will branch to RECOVERY_BLOCK if the
|
| 925 |
|
|
speculation fails. Insns in that block will reexecute the speculatively
|
| 926 |
|
|
scheduled code and then will return immediately after INSN thus preserving
|
| 927 |
|
|
semantics of the program. */
|
| 928 |
|
|
#define IS_SPECULATION_BRANCHY_CHECK_P(INSN) \
|
| 929 |
|
|
(RECOVERY_BLOCK (INSN) != NULL && RECOVERY_BLOCK (INSN) != EXIT_BLOCK_PTR)
|
| 930 |
|
|
|
| 931 |
|
|
/* Dep status (aka ds_t) of the link encapsulates information, that is needed
|
| 932 |
|
|
for speculative scheduling. Namely, it is 4 integers in the range
|
| 933 |
|
|
[0, MAX_DEP_WEAK] and 3 bits.
|
| 934 |
|
|
The integers correspond to the probability of the dependence to *not*
|
| 935 |
|
|
exist, it is the probability, that overcoming of this dependence will
|
| 936 |
|
|
not be followed by execution of the recovery code. Nevertheless,
|
| 937 |
|
|
whatever high the probability of success is, recovery code should still
|
| 938 |
|
|
be generated to preserve semantics of the program. To find a way to
|
| 939 |
|
|
get/set these integers, please refer to the {get, set}_dep_weak ()
|
| 940 |
|
|
functions in sched-deps.c .
|
| 941 |
|
|
The 3 bits in the DEP_STATUS correspond to 3 dependence types: true-,
|
| 942 |
|
|
output- and anti- dependence. It is not enough for speculative scheduling
|
| 943 |
|
|
to know just the major type of all the dependence between two instructions,
|
| 944 |
|
|
as only true dependence can be overcome.
|
| 945 |
|
|
There also is the 4-th bit in the DEP_STATUS (HARD_DEP), that is reserved
|
| 946 |
|
|
for using to describe instruction's status. It is set whenever instruction
|
| 947 |
|
|
has at least one dependence, that cannot be overcame.
|
| 948 |
|
|
See also: check_dep_status () in sched-deps.c . */
|
| 949 |
|
|
|
| 950 |
|
|
/* We exclude sign bit. */
|
| 951 |
|
|
#define BITS_PER_DEP_STATUS (HOST_BITS_PER_INT - 1)
|
| 952 |
|
|
|
| 953 |
|
|
/* First '6' stands for 4 dep type bits and the HARD_DEP and DEP_CANCELLED
|
| 954 |
|
|
bits.
|
| 955 |
|
|
Second '4' stands for BEGIN_{DATA, CONTROL}, BE_IN_{DATA, CONTROL}
|
| 956 |
|
|
dep weakness. */
|
| 957 |
|
|
#define BITS_PER_DEP_WEAK ((BITS_PER_DEP_STATUS - 6) / 4)
|
| 958 |
|
|
|
| 959 |
|
|
/* Mask of speculative weakness in dep_status. */
|
| 960 |
|
|
#define DEP_WEAK_MASK ((1 << BITS_PER_DEP_WEAK) - 1)
|
| 961 |
|
|
|
| 962 |
|
|
/* This constant means that dependence is fake with 99.999...% probability.
|
| 963 |
|
|
This is the maximum value, that can appear in dep_status.
|
| 964 |
|
|
Note, that we don't want MAX_DEP_WEAK to be the same as DEP_WEAK_MASK for
|
| 965 |
|
|
debugging reasons. Though, it can be set to DEP_WEAK_MASK, and, when
|
| 966 |
|
|
done so, we'll get fast (mul for)/(div by) NO_DEP_WEAK. */
|
| 967 |
|
|
#define MAX_DEP_WEAK (DEP_WEAK_MASK - 1)
|
| 968 |
|
|
|
| 969 |
|
|
/* This constant means that dependence is 99.999...% real and it is a really
|
| 970 |
|
|
bad idea to overcome it (though this can be done, preserving program
|
| 971 |
|
|
semantics). */
|
| 972 |
|
|
#define MIN_DEP_WEAK 1
|
| 973 |
|
|
|
| 974 |
|
|
/* This constant represents 100% probability.
|
| 975 |
|
|
E.g. it is used to represent weakness of dependence, that doesn't exist. */
|
| 976 |
|
|
#define NO_DEP_WEAK (MAX_DEP_WEAK + MIN_DEP_WEAK)
|
| 977 |
|
|
|
| 978 |
|
|
/* Default weakness of speculative dependence. Used when we can't say
|
| 979 |
|
|
neither bad nor good about the dependence. */
|
| 980 |
|
|
#define UNCERTAIN_DEP_WEAK (MAX_DEP_WEAK - MAX_DEP_WEAK / 4)
|
| 981 |
|
|
|
| 982 |
|
|
/* Offset for speculative weaknesses in dep_status. */
|
| 983 |
|
|
enum SPEC_TYPES_OFFSETS {
|
| 984 |
|
|
BEGIN_DATA_BITS_OFFSET = 0,
|
| 985 |
|
|
BE_IN_DATA_BITS_OFFSET = BEGIN_DATA_BITS_OFFSET + BITS_PER_DEP_WEAK,
|
| 986 |
|
|
BEGIN_CONTROL_BITS_OFFSET = BE_IN_DATA_BITS_OFFSET + BITS_PER_DEP_WEAK,
|
| 987 |
|
|
BE_IN_CONTROL_BITS_OFFSET = BEGIN_CONTROL_BITS_OFFSET + BITS_PER_DEP_WEAK
|
| 988 |
|
|
};
|
| 989 |
|
|
|
| 990 |
|
|
/* The following defines provide numerous constants used to distinguish between
|
| 991 |
|
|
different types of speculative dependencies. */
|
| 992 |
|
|
|
| 993 |
|
|
/* Dependence can be overcome with generation of new data speculative
|
| 994 |
|
|
instruction. */
|
| 995 |
|
|
#define BEGIN_DATA (((ds_t) DEP_WEAK_MASK) << BEGIN_DATA_BITS_OFFSET)
|
| 996 |
|
|
|
| 997 |
|
|
/* This dependence is to the instruction in the recovery block, that was
|
| 998 |
|
|
formed to recover after data-speculation failure.
|
| 999 |
|
|
Thus, this dependence can overcome with generating of the copy of
|
| 1000 |
|
|
this instruction in the recovery block. */
|
| 1001 |
|
|
#define BE_IN_DATA (((ds_t) DEP_WEAK_MASK) << BE_IN_DATA_BITS_OFFSET)
|
| 1002 |
|
|
|
| 1003 |
|
|
/* Dependence can be overcome with generation of new control speculative
|
| 1004 |
|
|
instruction. */
|
| 1005 |
|
|
#define BEGIN_CONTROL (((ds_t) DEP_WEAK_MASK) << BEGIN_CONTROL_BITS_OFFSET)
|
| 1006 |
|
|
|
| 1007 |
|
|
/* This dependence is to the instruction in the recovery block, that was
|
| 1008 |
|
|
formed to recover after control-speculation failure.
|
| 1009 |
|
|
Thus, this dependence can be overcome with generating of the copy of
|
| 1010 |
|
|
this instruction in the recovery block. */
|
| 1011 |
|
|
#define BE_IN_CONTROL (((ds_t) DEP_WEAK_MASK) << BE_IN_CONTROL_BITS_OFFSET)
|
| 1012 |
|
|
|
| 1013 |
|
|
/* A few convenient combinations. */
|
| 1014 |
|
|
#define BEGIN_SPEC (BEGIN_DATA | BEGIN_CONTROL)
|
| 1015 |
|
|
#define DATA_SPEC (BEGIN_DATA | BE_IN_DATA)
|
| 1016 |
|
|
#define CONTROL_SPEC (BEGIN_CONTROL | BE_IN_CONTROL)
|
| 1017 |
|
|
#define SPECULATIVE (DATA_SPEC | CONTROL_SPEC)
|
| 1018 |
|
|
#define BE_IN_SPEC (BE_IN_DATA | BE_IN_CONTROL)
|
| 1019 |
|
|
|
| 1020 |
|
|
/* Constants, that are helpful in iterating through dep_status. */
|
| 1021 |
|
|
#define FIRST_SPEC_TYPE BEGIN_DATA
|
| 1022 |
|
|
#define LAST_SPEC_TYPE BE_IN_CONTROL
|
| 1023 |
|
|
#define SPEC_TYPE_SHIFT BITS_PER_DEP_WEAK
|
| 1024 |
|
|
|
| 1025 |
|
|
/* Dependence on instruction can be of multiple types
|
| 1026 |
|
|
(e.g. true and output). This fields enhance REG_NOTE_KIND information
|
| 1027 |
|
|
of the dependence. */
|
| 1028 |
|
|
#define DEP_TRUE (((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + BITS_PER_DEP_WEAK))
|
| 1029 |
|
|
#define DEP_OUTPUT (DEP_TRUE << 1)
|
| 1030 |
|
|
#define DEP_ANTI (DEP_OUTPUT << 1)
|
| 1031 |
|
|
#define DEP_CONTROL (DEP_ANTI << 1)
|
| 1032 |
|
|
|
| 1033 |
|
|
#define DEP_TYPES (DEP_TRUE | DEP_OUTPUT | DEP_ANTI | DEP_CONTROL)
|
| 1034 |
|
|
|
| 1035 |
|
|
/* Instruction has non-speculative dependence. This bit represents the
|
| 1036 |
|
|
property of an instruction - not the one of a dependence.
|
| 1037 |
|
|
Therefore, it can appear only in TODO_SPEC field of an instruction. */
|
| 1038 |
|
|
#define HARD_DEP (DEP_CONTROL << 1)
|
| 1039 |
|
|
|
| 1040 |
|
|
#define DEP_CANCELLED (HARD_DEP << 1)
|
| 1041 |
|
|
|
| 1042 |
|
|
/* This represents the results of calling sched-deps.c functions,
|
| 1043 |
|
|
which modify dependencies. */
|
| 1044 |
|
|
enum DEPS_ADJUST_RESULT {
|
| 1045 |
|
|
/* No dependence needed (e.g. producer == consumer). */
|
| 1046 |
|
|
DEP_NODEP,
|
| 1047 |
|
|
/* Dependence is already present and wasn't modified. */
|
| 1048 |
|
|
DEP_PRESENT,
|
| 1049 |
|
|
/* Existing dependence was modified to include additional information. */
|
| 1050 |
|
|
DEP_CHANGED,
|
| 1051 |
|
|
/* New dependence has been created. */
|
| 1052 |
|
|
DEP_CREATED
|
| 1053 |
|
|
};
|
| 1054 |
|
|
|
| 1055 |
|
|
/* Represents the bits that can be set in the flags field of the
|
| 1056 |
|
|
sched_info structure. */
|
| 1057 |
|
|
enum SCHED_FLAGS {
|
| 1058 |
|
|
/* If set, generate links between instruction as DEPS_LIST.
|
| 1059 |
|
|
Otherwise, generate usual INSN_LIST links. */
|
| 1060 |
|
|
USE_DEPS_LIST = 1,
|
| 1061 |
|
|
/* Perform data or control (or both) speculation.
|
| 1062 |
|
|
Results in generation of data and control speculative dependencies.
|
| 1063 |
|
|
Requires USE_DEPS_LIST set. */
|
| 1064 |
|
|
DO_SPECULATION = USE_DEPS_LIST << 1,
|
| 1065 |
|
|
DO_BACKTRACKING = DO_SPECULATION << 1,
|
| 1066 |
|
|
DO_PREDICATION = DO_BACKTRACKING << 1,
|
| 1067 |
|
|
SCHED_RGN = DO_PREDICATION << 1,
|
| 1068 |
|
|
SCHED_EBB = SCHED_RGN << 1,
|
| 1069 |
|
|
/* Scheduler can possibly create new basic blocks. Used for assertions. */
|
| 1070 |
|
|
NEW_BBS = SCHED_EBB << 1,
|
| 1071 |
|
|
SEL_SCHED = NEW_BBS << 1
|
| 1072 |
|
|
};
|
| 1073 |
|
|
|
| 1074 |
|
|
enum SPEC_SCHED_FLAGS {
|
| 1075 |
|
|
COUNT_SPEC_IN_CRITICAL_PATH = 1,
|
| 1076 |
|
|
PREFER_NON_DATA_SPEC = COUNT_SPEC_IN_CRITICAL_PATH << 1,
|
| 1077 |
|
|
PREFER_NON_CONTROL_SPEC = PREFER_NON_DATA_SPEC << 1,
|
| 1078 |
|
|
SEL_SCHED_SPEC_DONT_CHECK_CONTROL = PREFER_NON_CONTROL_SPEC << 1
|
| 1079 |
|
|
};
|
| 1080 |
|
|
|
| 1081 |
|
|
#define NOTE_NOT_BB_P(NOTE) (NOTE_P (NOTE) && (NOTE_KIND (NOTE) \
|
| 1082 |
|
|
!= NOTE_INSN_BASIC_BLOCK))
|
| 1083 |
|
|
|
| 1084 |
|
|
extern FILE *sched_dump;
|
| 1085 |
|
|
extern int sched_verbose;
|
| 1086 |
|
|
|
| 1087 |
|
|
extern spec_info_t spec_info;
|
| 1088 |
|
|
extern bool haifa_recovery_bb_ever_added_p;
|
| 1089 |
|
|
|
| 1090 |
|
|
/* Exception Free Loads:
|
| 1091 |
|
|
|
| 1092 |
|
|
We define five classes of speculative loads: IFREE, IRISKY,
|
| 1093 |
|
|
PFREE, PRISKY, and MFREE.
|
| 1094 |
|
|
|
| 1095 |
|
|
IFREE loads are loads that are proved to be exception-free, just
|
| 1096 |
|
|
by examining the load insn. Examples for such loads are loads
|
| 1097 |
|
|
from TOC and loads of global data.
|
| 1098 |
|
|
|
| 1099 |
|
|
IRISKY loads are loads that are proved to be exception-risky,
|
| 1100 |
|
|
just by examining the load insn. Examples for such loads are
|
| 1101 |
|
|
volatile loads and loads from shared memory.
|
| 1102 |
|
|
|
| 1103 |
|
|
PFREE loads are loads for which we can prove, by examining other
|
| 1104 |
|
|
insns, that they are exception-free. Currently, this class consists
|
| 1105 |
|
|
of loads for which we are able to find a "similar load", either in
|
| 1106 |
|
|
the target block, or, if only one split-block exists, in that split
|
| 1107 |
|
|
block. Load2 is similar to load1 if both have same single base
|
| 1108 |
|
|
register. We identify only part of the similar loads, by finding
|
| 1109 |
|
|
an insn upon which both load1 and load2 have a DEF-USE dependence.
|
| 1110 |
|
|
|
| 1111 |
|
|
PRISKY loads are loads for which we can prove, by examining other
|
| 1112 |
|
|
insns, that they are exception-risky. Currently we have two proofs for
|
| 1113 |
|
|
such loads. The first proof detects loads that are probably guarded by a
|
| 1114 |
|
|
test on the memory address. This proof is based on the
|
| 1115 |
|
|
backward and forward data dependence information for the region.
|
| 1116 |
|
|
Let load-insn be the examined load.
|
| 1117 |
|
|
Load-insn is PRISKY iff ALL the following hold:
|
| 1118 |
|
|
|
| 1119 |
|
|
- insn1 is not in the same block as load-insn
|
| 1120 |
|
|
- there is a DEF-USE dependence chain (insn1, ..., load-insn)
|
| 1121 |
|
|
- test-insn is either a compare or a branch, not in the same block
|
| 1122 |
|
|
as load-insn
|
| 1123 |
|
|
- load-insn is reachable from test-insn
|
| 1124 |
|
|
- there is a DEF-USE dependence chain (insn1, ..., test-insn)
|
| 1125 |
|
|
|
| 1126 |
|
|
This proof might fail when the compare and the load are fed
|
| 1127 |
|
|
by an insn not in the region. To solve this, we will add to this
|
| 1128 |
|
|
group all loads that have no input DEF-USE dependence.
|
| 1129 |
|
|
|
| 1130 |
|
|
The second proof detects loads that are directly or indirectly
|
| 1131 |
|
|
fed by a speculative load. This proof is affected by the
|
| 1132 |
|
|
scheduling process. We will use the flag fed_by_spec_load.
|
| 1133 |
|
|
Initially, all insns have this flag reset. After a speculative
|
| 1134 |
|
|
motion of an insn, if insn is either a load, or marked as
|
| 1135 |
|
|
fed_by_spec_load, we will also mark as fed_by_spec_load every
|
| 1136 |
|
|
insn1 for which a DEF-USE dependence (insn, insn1) exists. A
|
| 1137 |
|
|
load which is fed_by_spec_load is also PRISKY.
|
| 1138 |
|
|
|
| 1139 |
|
|
MFREE (maybe-free) loads are all the remaining loads. They may be
|
| 1140 |
|
|
exception-free, but we cannot prove it.
|
| 1141 |
|
|
|
| 1142 |
|
|
Now, all loads in IFREE and PFREE classes are considered
|
| 1143 |
|
|
exception-free, while all loads in IRISKY and PRISKY classes are
|
| 1144 |
|
|
considered exception-risky. As for loads in the MFREE class,
|
| 1145 |
|
|
these are considered either exception-free or exception-risky,
|
| 1146 |
|
|
depending on whether we are pessimistic or optimistic. We have
|
| 1147 |
|
|
to take the pessimistic approach to assure the safety of
|
| 1148 |
|
|
speculative scheduling, but we can take the optimistic approach
|
| 1149 |
|
|
by invoking the -fsched_spec_load_dangerous option. */
|
| 1150 |
|
|
|
| 1151 |
|
|
enum INSN_TRAP_CLASS
|
| 1152 |
|
|
{
|
| 1153 |
|
|
TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
|
| 1154 |
|
|
PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
|
| 1155 |
|
|
};
|
| 1156 |
|
|
|
| 1157 |
|
|
#define WORST_CLASS(class1, class2) \
|
| 1158 |
|
|
((class1 > class2) ? class1 : class2)
|
| 1159 |
|
|
|
| 1160 |
|
|
#ifndef __GNUC__
|
| 1161 |
|
|
#define __inline
|
| 1162 |
|
|
#endif
|
| 1163 |
|
|
|
| 1164 |
|
|
#ifndef HAIFA_INLINE
|
| 1165 |
|
|
#define HAIFA_INLINE __inline
|
| 1166 |
|
|
#endif
|
| 1167 |
|
|
|
| 1168 |
|
|
struct sched_deps_info_def
|
| 1169 |
|
|
{
|
| 1170 |
|
|
/* Called when computing dependencies for a JUMP_INSN. This function
|
| 1171 |
|
|
should store the set of registers that must be considered as set by
|
| 1172 |
|
|
the jump in the regset. */
|
| 1173 |
|
|
void (*compute_jump_reg_dependencies) (rtx, regset);
|
| 1174 |
|
|
|
| 1175 |
|
|
/* Start analyzing insn. */
|
| 1176 |
|
|
void (*start_insn) (rtx);
|
| 1177 |
|
|
|
| 1178 |
|
|
/* Finish analyzing insn. */
|
| 1179 |
|
|
void (*finish_insn) (void);
|
| 1180 |
|
|
|
| 1181 |
|
|
/* Start analyzing insn LHS (Left Hand Side). */
|
| 1182 |
|
|
void (*start_lhs) (rtx);
|
| 1183 |
|
|
|
| 1184 |
|
|
/* Finish analyzing insn LHS. */
|
| 1185 |
|
|
void (*finish_lhs) (void);
|
| 1186 |
|
|
|
| 1187 |
|
|
/* Start analyzing insn RHS (Right Hand Side). */
|
| 1188 |
|
|
void (*start_rhs) (rtx);
|
| 1189 |
|
|
|
| 1190 |
|
|
/* Finish analyzing insn RHS. */
|
| 1191 |
|
|
void (*finish_rhs) (void);
|
| 1192 |
|
|
|
| 1193 |
|
|
/* Note set of the register. */
|
| 1194 |
|
|
void (*note_reg_set) (int);
|
| 1195 |
|
|
|
| 1196 |
|
|
/* Note clobber of the register. */
|
| 1197 |
|
|
void (*note_reg_clobber) (int);
|
| 1198 |
|
|
|
| 1199 |
|
|
/* Note use of the register. */
|
| 1200 |
|
|
void (*note_reg_use) (int);
|
| 1201 |
|
|
|
| 1202 |
|
|
/* Note memory dependence of type DS between MEM1 and MEM2 (which is
|
| 1203 |
|
|
in the INSN2). */
|
| 1204 |
|
|
void (*note_mem_dep) (rtx mem1, rtx mem2, rtx insn2, ds_t ds);
|
| 1205 |
|
|
|
| 1206 |
|
|
/* Note a dependence of type DS from the INSN. */
|
| 1207 |
|
|
void (*note_dep) (rtx insn, ds_t ds);
|
| 1208 |
|
|
|
| 1209 |
|
|
/* Nonzero if we should use cselib for better alias analysis. This
|
| 1210 |
|
|
must be 0 if the dependency information is used after sched_analyze
|
| 1211 |
|
|
has completed, e.g. if we're using it to initialize state for successor
|
| 1212 |
|
|
blocks in region scheduling. */
|
| 1213 |
|
|
unsigned int use_cselib : 1;
|
| 1214 |
|
|
|
| 1215 |
|
|
/* If set, generate links between instruction as DEPS_LIST.
|
| 1216 |
|
|
Otherwise, generate usual INSN_LIST links. */
|
| 1217 |
|
|
unsigned int use_deps_list : 1;
|
| 1218 |
|
|
|
| 1219 |
|
|
/* Generate data and control speculative dependencies.
|
| 1220 |
|
|
Requires USE_DEPS_LIST set. */
|
| 1221 |
|
|
unsigned int generate_spec_deps : 1;
|
| 1222 |
|
|
};
|
| 1223 |
|
|
|
| 1224 |
|
|
extern struct sched_deps_info_def *sched_deps_info;
|
| 1225 |
|
|
|
| 1226 |
|
|
|
| 1227 |
|
|
/* Functions in sched-deps.c. */
|
| 1228 |
|
|
extern rtx sched_get_reverse_condition_uncached (const_rtx);
|
| 1229 |
|
|
extern bool sched_insns_conditions_mutex_p (const_rtx, const_rtx);
|
| 1230 |
|
|
extern bool sched_insn_is_legitimate_for_speculation_p (const_rtx, ds_t);
|
| 1231 |
|
|
extern void add_dependence (rtx, rtx, enum reg_note);
|
| 1232 |
|
|
extern void sched_analyze (struct deps_desc *, rtx, rtx);
|
| 1233 |
|
|
extern void init_deps (struct deps_desc *, bool);
|
| 1234 |
|
|
extern void init_deps_reg_last (struct deps_desc *);
|
| 1235 |
|
|
extern void free_deps (struct deps_desc *);
|
| 1236 |
|
|
extern void init_deps_global (void);
|
| 1237 |
|
|
extern void finish_deps_global (void);
|
| 1238 |
|
|
extern void deps_analyze_insn (struct deps_desc *, rtx);
|
| 1239 |
|
|
extern void remove_from_deps (struct deps_desc *, rtx);
|
| 1240 |
|
|
extern void init_insn_reg_pressure_info (rtx);
|
| 1241 |
|
|
|
| 1242 |
|
|
extern dw_t get_dep_weak_1 (ds_t, ds_t);
|
| 1243 |
|
|
extern dw_t get_dep_weak (ds_t, ds_t);
|
| 1244 |
|
|
extern ds_t set_dep_weak (ds_t, ds_t, dw_t);
|
| 1245 |
|
|
extern dw_t estimate_dep_weak (rtx, rtx);
|
| 1246 |
|
|
extern ds_t ds_merge (ds_t, ds_t);
|
| 1247 |
|
|
extern ds_t ds_full_merge (ds_t, ds_t, rtx, rtx);
|
| 1248 |
|
|
extern ds_t ds_max_merge (ds_t, ds_t);
|
| 1249 |
|
|
extern dw_t ds_weak (ds_t);
|
| 1250 |
|
|
extern ds_t ds_get_speculation_types (ds_t);
|
| 1251 |
|
|
extern ds_t ds_get_max_dep_weak (ds_t);
|
| 1252 |
|
|
|
| 1253 |
|
|
extern void sched_deps_init (bool);
|
| 1254 |
|
|
extern void sched_deps_finish (void);
|
| 1255 |
|
|
|
| 1256 |
|
|
extern void haifa_note_reg_set (int);
|
| 1257 |
|
|
extern void haifa_note_reg_clobber (int);
|
| 1258 |
|
|
extern void haifa_note_reg_use (int);
|
| 1259 |
|
|
|
| 1260 |
|
|
extern void maybe_extend_reg_info_p (void);
|
| 1261 |
|
|
|
| 1262 |
|
|
extern void deps_start_bb (struct deps_desc *, rtx);
|
| 1263 |
|
|
extern enum reg_note ds_to_dt (ds_t);
|
| 1264 |
|
|
|
| 1265 |
|
|
extern bool deps_pools_are_empty_p (void);
|
| 1266 |
|
|
extern void sched_free_deps (rtx, rtx, bool);
|
| 1267 |
|
|
extern void extend_dependency_caches (int, bool);
|
| 1268 |
|
|
|
| 1269 |
|
|
extern void debug_ds (ds_t);
|
| 1270 |
|
|
|
| 1271 |
|
|
|
| 1272 |
|
|
/* Functions in haifa-sched.c. */
|
| 1273 |
|
|
extern void sched_init_region_reg_pressure_info (void);
|
| 1274 |
|
|
extern int haifa_classify_insn (const_rtx);
|
| 1275 |
|
|
extern void get_ebb_head_tail (basic_block, basic_block, rtx *, rtx *);
|
| 1276 |
|
|
extern int no_real_insns_p (const_rtx, const_rtx);
|
| 1277 |
|
|
|
| 1278 |
|
|
extern int insn_cost (rtx);
|
| 1279 |
|
|
extern int dep_cost_1 (dep_t, dw_t);
|
| 1280 |
|
|
extern int dep_cost (dep_t);
|
| 1281 |
|
|
extern int set_priorities (rtx, rtx);
|
| 1282 |
|
|
|
| 1283 |
|
|
extern void sched_setup_bb_reg_pressure_info (basic_block, rtx);
|
| 1284 |
|
|
extern bool schedule_block (basic_block *);
|
| 1285 |
|
|
|
| 1286 |
|
|
extern int cycle_issued_insns;
|
| 1287 |
|
|
extern int issue_rate;
|
| 1288 |
|
|
extern int dfa_lookahead;
|
| 1289 |
|
|
|
| 1290 |
|
|
extern void ready_sort (struct ready_list *);
|
| 1291 |
|
|
extern rtx ready_element (struct ready_list *, int);
|
| 1292 |
|
|
extern rtx *ready_lastpos (struct ready_list *);
|
| 1293 |
|
|
|
| 1294 |
|
|
extern int try_ready (rtx);
|
| 1295 |
|
|
extern void sched_extend_ready_list (int);
|
| 1296 |
|
|
extern void sched_finish_ready_list (void);
|
| 1297 |
|
|
extern void sched_change_pattern (rtx, rtx);
|
| 1298 |
|
|
extern int sched_speculate_insn (rtx, ds_t, rtx *);
|
| 1299 |
|
|
extern void unlink_bb_notes (basic_block, basic_block);
|
| 1300 |
|
|
extern void add_block (basic_block, basic_block);
|
| 1301 |
|
|
extern rtx bb_note (basic_block);
|
| 1302 |
|
|
extern void concat_note_lists (rtx, rtx *);
|
| 1303 |
|
|
extern rtx sched_emit_insn (rtx);
|
| 1304 |
|
|
extern rtx get_ready_element (int);
|
| 1305 |
|
|
extern int number_in_ready (void);
|
| 1306 |
|
|
|
| 1307 |
|
|
/* Types and functions in sched-ebb.c. */
|
| 1308 |
|
|
|
| 1309 |
|
|
extern basic_block schedule_ebb (rtx, rtx, bool);
|
| 1310 |
|
|
extern void schedule_ebbs_init (void);
|
| 1311 |
|
|
extern void schedule_ebbs_finish (void);
|
| 1312 |
|
|
|
| 1313 |
|
|
/* Types and functions in sched-rgn.c. */
|
| 1314 |
|
|
|
| 1315 |
|
|
/* A region is the main entity for interblock scheduling: insns
|
| 1316 |
|
|
are allowed to move between blocks in the same region, along
|
| 1317 |
|
|
control flow graph edges, in the 'up' direction. */
|
| 1318 |
|
|
typedef struct
|
| 1319 |
|
|
{
|
| 1320 |
|
|
/* Number of extended basic blocks in region. */
|
| 1321 |
|
|
int rgn_nr_blocks;
|
| 1322 |
|
|
/* cblocks in the region (actually index in rgn_bb_table). */
|
| 1323 |
|
|
int rgn_blocks;
|
| 1324 |
|
|
/* Dependencies for this region are already computed. Basically, indicates,
|
| 1325 |
|
|
that this is a recovery block. */
|
| 1326 |
|
|
unsigned int dont_calc_deps : 1;
|
| 1327 |
|
|
/* This region has at least one non-trivial ebb. */
|
| 1328 |
|
|
unsigned int has_real_ebb : 1;
|
| 1329 |
|
|
}
|
| 1330 |
|
|
region;
|
| 1331 |
|
|
|
| 1332 |
|
|
extern int nr_regions;
|
| 1333 |
|
|
extern region *rgn_table;
|
| 1334 |
|
|
extern int *rgn_bb_table;
|
| 1335 |
|
|
extern int *block_to_bb;
|
| 1336 |
|
|
extern int *containing_rgn;
|
| 1337 |
|
|
|
| 1338 |
|
|
/* Often used short-hand in the scheduler. The rest of the compiler uses
|
| 1339 |
|
|
BLOCK_FOR_INSN(INSN) and an indirect reference to get the basic block
|
| 1340 |
|
|
number ("index"). For historical reasons, the scheduler does not. */
|
| 1341 |
|
|
#define BLOCK_NUM(INSN) (BLOCK_FOR_INSN (INSN)->index + 0)
|
| 1342 |
|
|
|
| 1343 |
|
|
#define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
|
| 1344 |
|
|
#define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
|
| 1345 |
|
|
#define RGN_DONT_CALC_DEPS(rgn) (rgn_table[rgn].dont_calc_deps)
|
| 1346 |
|
|
#define RGN_HAS_REAL_EBB(rgn) (rgn_table[rgn].has_real_ebb)
|
| 1347 |
|
|
#define BLOCK_TO_BB(block) (block_to_bb[block])
|
| 1348 |
|
|
#define CONTAINING_RGN(block) (containing_rgn[block])
|
| 1349 |
|
|
|
| 1350 |
|
|
/* The mapping from ebb to block. */
|
| 1351 |
|
|
extern int *ebb_head;
|
| 1352 |
|
|
#define BB_TO_BLOCK(ebb) (rgn_bb_table[ebb_head[ebb]])
|
| 1353 |
|
|
#define EBB_FIRST_BB(ebb) BASIC_BLOCK (BB_TO_BLOCK (ebb))
|
| 1354 |
|
|
#define EBB_LAST_BB(ebb) BASIC_BLOCK (rgn_bb_table[ebb_head[ebb + 1] - 1])
|
| 1355 |
|
|
#define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
|
| 1356 |
|
|
|
| 1357 |
|
|
extern int current_nr_blocks;
|
| 1358 |
|
|
extern int current_blocks;
|
| 1359 |
|
|
extern int target_bb;
|
| 1360 |
|
|
extern bool sched_no_dce;
|
| 1361 |
|
|
|
| 1362 |
|
|
extern void set_modulo_params (int, int, int, int);
|
| 1363 |
|
|
extern void record_delay_slot_pair (rtx, rtx, int, int);
|
| 1364 |
|
|
extern rtx real_insn_for_shadow (rtx);
|
| 1365 |
|
|
extern void discard_delay_pairs_above (int);
|
| 1366 |
|
|
extern void free_delay_pairs (void);
|
| 1367 |
|
|
extern void add_delay_dependencies (rtx);
|
| 1368 |
|
|
extern bool sched_is_disabled_for_current_region_p (void);
|
| 1369 |
|
|
extern void sched_rgn_init (bool);
|
| 1370 |
|
|
extern void sched_rgn_finish (void);
|
| 1371 |
|
|
extern void rgn_setup_region (int);
|
| 1372 |
|
|
extern void sched_rgn_compute_dependencies (int);
|
| 1373 |
|
|
extern void sched_rgn_local_init (int);
|
| 1374 |
|
|
extern void sched_rgn_local_finish (void);
|
| 1375 |
|
|
extern void sched_rgn_local_free (void);
|
| 1376 |
|
|
extern void extend_regions (void);
|
| 1377 |
|
|
extern void rgn_make_new_region_out_of_new_block (basic_block);
|
| 1378 |
|
|
|
| 1379 |
|
|
extern void compute_priorities (void);
|
| 1380 |
|
|
extern void increase_insn_priority (rtx, int);
|
| 1381 |
|
|
extern void debug_rgn_dependencies (int);
|
| 1382 |
|
|
extern void debug_dependencies (rtx, rtx);
|
| 1383 |
|
|
extern void free_rgn_deps (void);
|
| 1384 |
|
|
extern int contributes_to_priority (rtx, rtx);
|
| 1385 |
|
|
extern void extend_rgns (int *, int *, sbitmap, int *);
|
| 1386 |
|
|
extern void deps_join (struct deps_desc *, struct deps_desc *);
|
| 1387 |
|
|
|
| 1388 |
|
|
extern void rgn_setup_common_sched_info (void);
|
| 1389 |
|
|
extern void rgn_setup_sched_infos (void);
|
| 1390 |
|
|
|
| 1391 |
|
|
extern void debug_regions (void);
|
| 1392 |
|
|
extern void debug_region (int);
|
| 1393 |
|
|
extern void dump_region_dot (FILE *, int);
|
| 1394 |
|
|
extern void dump_region_dot_file (const char *, int);
|
| 1395 |
|
|
|
| 1396 |
|
|
extern void haifa_sched_init (void);
|
| 1397 |
|
|
extern void haifa_sched_finish (void);
|
| 1398 |
|
|
|
| 1399 |
|
|
/* sched-deps.c interface to walk, add, search, update, resolve, delete
|
| 1400 |
|
|
and debug instruction dependencies. */
|
| 1401 |
|
|
|
| 1402 |
|
|
/* Constants defining dependences lists. */
|
| 1403 |
|
|
|
| 1404 |
|
|
/* No list. */
|
| 1405 |
|
|
#define SD_LIST_NONE (0)
|
| 1406 |
|
|
|
| 1407 |
|
|
/* hard_back_deps. */
|
| 1408 |
|
|
#define SD_LIST_HARD_BACK (1)
|
| 1409 |
|
|
|
| 1410 |
|
|
/* spec_back_deps. */
|
| 1411 |
|
|
#define SD_LIST_SPEC_BACK (2)
|
| 1412 |
|
|
|
| 1413 |
|
|
/* forw_deps. */
|
| 1414 |
|
|
#define SD_LIST_FORW (4)
|
| 1415 |
|
|
|
| 1416 |
|
|
/* resolved_back_deps. */
|
| 1417 |
|
|
#define SD_LIST_RES_BACK (8)
|
| 1418 |
|
|
|
| 1419 |
|
|
/* resolved_forw_deps. */
|
| 1420 |
|
|
#define SD_LIST_RES_FORW (16)
|
| 1421 |
|
|
|
| 1422 |
|
|
#define SD_LIST_BACK (SD_LIST_HARD_BACK | SD_LIST_SPEC_BACK)
|
| 1423 |
|
|
|
| 1424 |
|
|
/* A type to hold above flags. */
|
| 1425 |
|
|
typedef int sd_list_types_def;
|
| 1426 |
|
|
|
| 1427 |
|
|
extern void sd_next_list (const_rtx, sd_list_types_def *, deps_list_t *, bool *);
|
| 1428 |
|
|
|
| 1429 |
|
|
/* Iterator to walk through, resolve and delete dependencies. */
|
| 1430 |
|
|
struct _sd_iterator
|
| 1431 |
|
|
{
|
| 1432 |
|
|
/* What lists to walk. Can be any combination of SD_LIST_* flags. */
|
| 1433 |
|
|
sd_list_types_def types;
|
| 1434 |
|
|
|
| 1435 |
|
|
/* Instruction dependencies lists of which will be walked. */
|
| 1436 |
|
|
rtx insn;
|
| 1437 |
|
|
|
| 1438 |
|
|
/* Pointer to the next field of the previous element. This is not
|
| 1439 |
|
|
simply a pointer to the next element to allow easy deletion from the
|
| 1440 |
|
|
list. When a dep is being removed from the list the iterator
|
| 1441 |
|
|
will automatically advance because the value in *linkp will start
|
| 1442 |
|
|
referring to the next element. */
|
| 1443 |
|
|
dep_link_t *linkp;
|
| 1444 |
|
|
|
| 1445 |
|
|
/* True if the current list is a resolved one. */
|
| 1446 |
|
|
bool resolved_p;
|
| 1447 |
|
|
};
|
| 1448 |
|
|
|
| 1449 |
|
|
typedef struct _sd_iterator sd_iterator_def;
|
| 1450 |
|
|
|
| 1451 |
|
|
/* ??? We can move some definitions that are used in below inline functions
|
| 1452 |
|
|
out of sched-int.h to sched-deps.c provided that the below functions will
|
| 1453 |
|
|
become global externals.
|
| 1454 |
|
|
These definitions include:
|
| 1455 |
|
|
* struct _deps_list: opaque pointer is needed at global scope.
|
| 1456 |
|
|
* struct _dep_link: opaque pointer is needed at scope of sd_iterator_def.
|
| 1457 |
|
|
* struct _dep_node: opaque pointer is needed at scope of
|
| 1458 |
|
|
struct _deps_link. */
|
| 1459 |
|
|
|
| 1460 |
|
|
/* Return initialized iterator. */
|
| 1461 |
|
|
static inline sd_iterator_def
|
| 1462 |
|
|
sd_iterator_start (rtx insn, sd_list_types_def types)
|
| 1463 |
|
|
{
|
| 1464 |
|
|
/* Some dep_link a pointer to which will return NULL. */
|
| 1465 |
|
|
static dep_link_t null_link = NULL;
|
| 1466 |
|
|
|
| 1467 |
|
|
sd_iterator_def i;
|
| 1468 |
|
|
|
| 1469 |
|
|
i.types = types;
|
| 1470 |
|
|
i.insn = insn;
|
| 1471 |
|
|
i.linkp = &null_link;
|
| 1472 |
|
|
|
| 1473 |
|
|
/* Avoid 'uninitialized warning'. */
|
| 1474 |
|
|
i.resolved_p = false;
|
| 1475 |
|
|
|
| 1476 |
|
|
return i;
|
| 1477 |
|
|
}
|
| 1478 |
|
|
|
| 1479 |
|
|
/* Return the current element. */
|
| 1480 |
|
|
static inline bool
|
| 1481 |
|
|
sd_iterator_cond (sd_iterator_def *it_ptr, dep_t *dep_ptr)
|
| 1482 |
|
|
{
|
| 1483 |
|
|
dep_link_t link = *it_ptr->linkp;
|
| 1484 |
|
|
|
| 1485 |
|
|
if (link != NULL)
|
| 1486 |
|
|
{
|
| 1487 |
|
|
*dep_ptr = DEP_LINK_DEP (link);
|
| 1488 |
|
|
return true;
|
| 1489 |
|
|
}
|
| 1490 |
|
|
else
|
| 1491 |
|
|
{
|
| 1492 |
|
|
sd_list_types_def types = it_ptr->types;
|
| 1493 |
|
|
|
| 1494 |
|
|
if (types != SD_LIST_NONE)
|
| 1495 |
|
|
/* Switch to next list. */
|
| 1496 |
|
|
{
|
| 1497 |
|
|
deps_list_t list;
|
| 1498 |
|
|
|
| 1499 |
|
|
sd_next_list (it_ptr->insn,
|
| 1500 |
|
|
&it_ptr->types, &list, &it_ptr->resolved_p);
|
| 1501 |
|
|
|
| 1502 |
|
|
it_ptr->linkp = &DEPS_LIST_FIRST (list);
|
| 1503 |
|
|
|
| 1504 |
|
|
if (list)
|
| 1505 |
|
|
return sd_iterator_cond (it_ptr, dep_ptr);
|
| 1506 |
|
|
}
|
| 1507 |
|
|
|
| 1508 |
|
|
*dep_ptr = NULL;
|
| 1509 |
|
|
return false;
|
| 1510 |
|
|
}
|
| 1511 |
|
|
}
|
| 1512 |
|
|
|
| 1513 |
|
|
/* Advance iterator. */
|
| 1514 |
|
|
static inline void
|
| 1515 |
|
|
sd_iterator_next (sd_iterator_def *it_ptr)
|
| 1516 |
|
|
{
|
| 1517 |
|
|
it_ptr->linkp = &DEP_LINK_NEXT (*it_ptr->linkp);
|
| 1518 |
|
|
}
|
| 1519 |
|
|
|
| 1520 |
|
|
/* A cycle wrapper. */
|
| 1521 |
|
|
#define FOR_EACH_DEP(INSN, LIST_TYPES, ITER, DEP) \
|
| 1522 |
|
|
for ((ITER) = sd_iterator_start ((INSN), (LIST_TYPES)); \
|
| 1523 |
|
|
sd_iterator_cond (&(ITER), &(DEP)); \
|
| 1524 |
|
|
sd_iterator_next (&(ITER)))
|
| 1525 |
|
|
|
| 1526 |
|
|
#define IS_DISPATCH_ON 1
|
| 1527 |
|
|
#define IS_CMP 2
|
| 1528 |
|
|
#define DISPATCH_VIOLATION 3
|
| 1529 |
|
|
#define FITS_DISPATCH_WINDOW 4
|
| 1530 |
|
|
#define DISPATCH_INIT 5
|
| 1531 |
|
|
#define ADD_TO_DISPATCH_WINDOW 6
|
| 1532 |
|
|
|
| 1533 |
|
|
extern int sd_lists_size (const_rtx, sd_list_types_def);
|
| 1534 |
|
|
extern bool sd_lists_empty_p (const_rtx, sd_list_types_def);
|
| 1535 |
|
|
extern void sd_init_insn (rtx);
|
| 1536 |
|
|
extern void sd_finish_insn (rtx);
|
| 1537 |
|
|
extern dep_t sd_find_dep_between (rtx, rtx, bool);
|
| 1538 |
|
|
extern void sd_add_dep (dep_t, bool);
|
| 1539 |
|
|
extern enum DEPS_ADJUST_RESULT sd_add_or_update_dep (dep_t, bool);
|
| 1540 |
|
|
extern void sd_resolve_dep (sd_iterator_def);
|
| 1541 |
|
|
extern void sd_unresolve_dep (sd_iterator_def);
|
| 1542 |
|
|
extern void sd_copy_back_deps (rtx, rtx, bool);
|
| 1543 |
|
|
extern void sd_delete_dep (sd_iterator_def);
|
| 1544 |
|
|
extern void sd_debug_lists (rtx, sd_list_types_def);
|
| 1545 |
|
|
|
| 1546 |
|
|
#endif /* INSN_SCHEDULING */
|
| 1547 |
|
|
|
| 1548 |
|
|
/* Functions in sched-vis.c. These must be outside INSN_SCHEDULING as
|
| 1549 |
|
|
sched-vis.c is compiled always. */
|
| 1550 |
|
|
extern void print_insn (char *, const_rtx, int);
|
| 1551 |
|
|
extern void print_pattern (char *, const_rtx, int);
|
| 1552 |
|
|
extern void print_value (char *, const_rtx, int);
|
| 1553 |
|
|
|
| 1554 |
|
|
#endif /* GCC_SCHED_INT_H */
|
| 1555 |
|
|
|