OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [sel-sched.c] - Blame information for rev 318

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 280 jeremybenn
/* Instruction scheduling pass.  Selective scheduler and pipeliner.
2
   Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
3
 
4
This file is part of GCC.
5
 
6
GCC is free software; you can redistribute it and/or modify it under
7
the terms of the GNU General Public License as published by the Free
8
Software Foundation; either version 3, or (at your option) any later
9
version.
10
 
11
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12
WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14
for more details.
15
 
16
You should have received a copy of the GNU General Public License
17
along with GCC; see the file COPYING3.  If not see
18
<http://www.gnu.org/licenses/>.  */
19
 
20
#include "config.h"
21
#include "system.h"
22
#include "coretypes.h"
23
#include "tm.h"
24
#include "toplev.h"
25
#include "rtl.h"
26
#include "tm_p.h"
27
#include "hard-reg-set.h"
28
#include "regs.h"
29
#include "function.h"
30
#include "flags.h"
31
#include "insn-config.h"
32
#include "insn-attr.h"
33
#include "except.h"
34
#include "toplev.h"
35
#include "recog.h"
36
#include "params.h"
37
#include "target.h"
38
#include "output.h"
39
#include "timevar.h"
40
#include "tree-pass.h"
41
#include "sched-int.h"
42
#include "ggc.h"
43
#include "tree.h"
44
#include "vec.h"
45
#include "langhooks.h"
46
#include "rtlhooks-def.h"
47
#include "output.h"
48
 
49
#ifdef INSN_SCHEDULING
50
#include "sel-sched-ir.h"
51
#include "sel-sched-dump.h"
52
#include "sel-sched.h"
53
#include "dbgcnt.h"
54
 
55
/* Implementation of selective scheduling approach.
56
   The below implementation follows the original approach with the following
57
   changes:
58
 
59
   o the scheduler works after register allocation (but can be also tuned
60
   to work before RA);
61
   o some instructions are not copied or register renamed;
62
   o conditional jumps are not moved with code duplication;
63
   o several jumps in one parallel group are not supported;
64
   o when pipelining outer loops, code motion through inner loops
65
   is not supported;
66
   o control and data speculation are supported;
67
   o some improvements for better compile time/performance were made.
68
 
69
   Terminology
70
   ===========
71
 
72
   A vinsn, or virtual insn, is an insn with additional data characterizing
73
   insn pattern, such as LHS, RHS, register sets used/set/clobbered, etc.
74
   Vinsns also act as smart pointers to save memory by reusing them in
75
   different expressions.  A vinsn is described by vinsn_t type.
76
 
77
   An expression is a vinsn with additional data characterizing its properties
78
   at some point in the control flow graph.  The data may be its usefulness,
79
   priority, speculative status, whether it was renamed/subsituted, etc.
80
   An expression is described by expr_t type.
81
 
82
   Availability set (av_set) is a set of expressions at a given control flow
83
   point. It is represented as av_set_t.  The expressions in av sets are kept
84
   sorted in the terms of expr_greater_p function.  It allows to truncate
85
   the set while leaving the best expressions.
86
 
87
   A fence is a point through which code motion is prohibited.  On each step,
88
   we gather a parallel group of insns at a fence.  It is possible to have
89
   multiple fences. A fence is represented via fence_t.
90
 
91
   A boundary is the border between the fence group and the rest of the code.
92
   Currently, we never have more than one boundary per fence, as we finalize
93
   the fence group when a jump is scheduled. A boundary is represented
94
   via bnd_t.
95
 
96
   High-level overview
97
   ===================
98
 
99
   The scheduler finds regions to schedule, schedules each one, and finalizes.
100
   The regions are formed starting from innermost loops, so that when the inner
101
   loop is pipelined, its prologue can be scheduled together with yet unprocessed
102
   outer loop. The rest of acyclic regions are found using extend_rgns:
103
   the blocks that are not yet allocated to any regions are traversed in top-down
104
   order, and a block is added to a region to which all its predecessors belong;
105
   otherwise, the block starts its own region.
106
 
107
   The main scheduling loop (sel_sched_region_2) consists of just
108
   scheduling on each fence and updating fences.  For each fence,
109
   we fill a parallel group of insns (fill_insns) until some insns can be added.
110
   First, we compute available exprs (av-set) at the boundary of the current
111
   group.  Second, we choose the best expression from it.  If the stall is
112
   required to schedule any of the expressions, we advance the current cycle
113
   appropriately.  So, the final group does not exactly correspond to a VLIW
114
   word.  Third, we move the chosen expression to the boundary (move_op)
115
   and update the intermediate av sets and liveness sets.  We quit fill_insns
116
   when either no insns left for scheduling or we have scheduled enough insns
117
   so we feel like advancing a scheduling point.
118
 
119
   Computing available expressions
120
   ===============================
121
 
122
   The computation (compute_av_set) is a bottom-up traversal.  At each insn,
123
   we're moving the union of its successors' sets through it via
124
   moveup_expr_set.  The dependent expressions are removed.  Local
125
   transformations (substitution, speculation) are applied to move more
126
   exprs.  Then the expr corresponding to the current insn is added.
127
   The result is saved on each basic block header.
128
 
129
   When traversing the CFG, we're moving down for no more than max_ws insns.
130
   Also, we do not move down to ineligible successors (is_ineligible_successor),
131
   which include moving along a back-edge, moving to already scheduled code,
132
   and moving to another fence.  The first two restrictions are lifted during
133
   pipelining, which allows us to move insns along a back-edge.  We always have
134
   an acyclic region for scheduling because we forbid motion through fences.
135
 
136
   Choosing the best expression
137
   ============================
138
 
139
   We sort the final availability set via sel_rank_for_schedule, then we remove
140
   expressions which are not yet ready (tick_check_p) or which dest registers
141
   cannot be used.  For some of them, we choose another register via
142
   find_best_reg.  To do this, we run find_used_regs to calculate the set of
143
   registers which cannot be used.  The find_used_regs function performs
144
   a traversal of code motion paths for an expr.  We consider for renaming
145
   only registers which are from the same regclass as the original one and
146
   using which does not interfere with any live ranges.  Finally, we convert
147
   the resulting set to the ready list format and use max_issue and reorder*
148
   hooks similarly to the Haifa scheduler.
149
 
150
   Scheduling the best expression
151
   ==============================
152
 
153
   We run the move_op routine to perform the same type of code motion paths
154
   traversal as in find_used_regs.  (These are working via the same driver,
155
   code_motion_path_driver.)  When moving down the CFG, we look for original
156
   instruction that gave birth to a chosen expression.  We undo
157
   the transformations performed on an expression via the history saved in it.
158
   When found, we remove the instruction or leave a reg-reg copy/speculation
159
   check if needed.  On a way up, we insert bookkeeping copies at each join
160
   point.  If a copy is not needed, it will be removed later during this
161
   traversal.  We update the saved av sets and liveness sets on the way up, too.
162
 
163
   Finalizing the schedule
164
   =======================
165
 
166
   When pipelining, we reschedule the blocks from which insns were pipelined
167
   to get a tighter schedule.  On Itanium, we also perform bundling via
168
   the same routine from ia64.c.
169
 
170
   Dependence analysis changes
171
   ===========================
172
 
173
   We augmented the sched-deps.c with hooks that get called when a particular
174
   dependence is found in a particular part of an insn.  Using these hooks, we
175
   can do several actions such as: determine whether an insn can be moved through
176
   another (has_dependence_p, moveup_expr); find out whether an insn can be
177
   scheduled on the current cycle (tick_check_p); find out registers that
178
   are set/used/clobbered by an insn and find out all the strange stuff that
179
   restrict its movement, like SCHED_GROUP_P or CANT_MOVE (done in
180
   init_global_and_expr_for_insn).
181
 
182
   Initialization changes
183
   ======================
184
 
185
   There are parts of haifa-sched.c, sched-deps.c, and sched-rgn.c that are
186
   reused in all of the schedulers.  We have split up the initialization of data
187
   of such parts into different functions prefixed with scheduler type and
188
   postfixed with the type of data initialized: {,sel_,haifa_}sched_{init,finish},
189
   sched_rgn_init/finish, sched_deps_init/finish, sched_init_{luids/bbs}, etc.
190
   The same splitting is done with current_sched_info structure:
191
   dependence-related parts are in sched_deps_info, common part is in
192
   common_sched_info, and haifa/sel/etc part is in current_sched_info.
193
 
194
   Target contexts
195
   ===============
196
 
197
   As we now have multiple-point scheduling, this would not work with backends
198
   which save some of the scheduler state to use it in the target hooks.
199
   For this purpose, we introduce a concept of target contexts, which
200
   encapsulate such information.  The backend should implement simple routines
201
   of allocating/freeing/setting such a context.  The scheduler calls these
202
   as target hooks and handles the target context as an opaque pointer (similar
203
   to the DFA state type, state_t).
204
 
205
   Various speedups
206
   ================
207
 
208
   As the correct data dependence graph is not supported during scheduling (which
209
   is to be changed in mid-term), we cache as much of the dependence analysis
210
   results as possible to avoid reanalyzing.  This includes: bitmap caches on
211
   each insn in stream of the region saying yes/no for a query with a pair of
212
   UIDs; hashtables with the previously done transformations on each insn in
213
   stream; a vector keeping a history of transformations on each expr.
214
 
215
   Also, we try to minimize the dependence context used on each fence to check
216
   whether the given expression is ready for scheduling by removing from it
217
   insns that are definitely completed the execution.  The results of
218
   tick_check_p checks are also cached in a vector on each fence.
219
 
220
   We keep a valid liveness set on each insn in a region to avoid the high
221
   cost of recomputation on large basic blocks.
222
 
223
   Finally, we try to minimize the number of needed updates to the availability
224
   sets.  The updates happen in two cases: when fill_insns terminates,
225
   we advance all fences and increase the stage number to show that the region
226
   has changed and the sets are to be recomputed; and when the next iteration
227
   of a loop in fill_insns happens (but this one reuses the saved av sets
228
   on bb headers.)  Thus, we try to break the fill_insns loop only when
229
   "significant" number of insns from the current scheduling window was
230
   scheduled.  This should be made a target param.
231
 
232
 
233
   TODO: correctly support the data dependence graph at all stages and get rid
234
   of all caches.  This should speed up the scheduler.
235
   TODO: implement moving cond jumps with bookkeeping copies on both targets.
236
   TODO: tune the scheduler before RA so it does not create too much pseudos.
237
 
238
 
239
   References:
240
   S.-M. Moon and K. Ebcioglu. Parallelizing nonnumerical code with
241
   selective scheduling and software pipelining.
242
   ACM TOPLAS, Vol 19, No. 6, pages 853--898, Nov. 1997.
243
 
244
   Andrey Belevantsev, Maxim Kuvyrkov, Vladimir Makarov, Dmitry Melnik,
245
   and Dmitry Zhurikhin.  An interblock VLIW-targeted instruction scheduler
246
   for GCC. In Proceedings of GCC Developers' Summit 2006.
247
 
248
   Arutyun Avetisyan, Andrey Belevantsev, and Dmitry Melnik.  GCC Instruction
249
   Scheduler and Software Pipeliner on the Itanium Platform.   EPIC-7 Workshop.
250
   http://rogue.colorado.edu/EPIC7/.
251
 
252
*/
253
 
254
/* True when pipelining is enabled.  */
255
bool pipelining_p;
256
 
257
/* True if bookkeeping is enabled.  */
258
bool bookkeeping_p;
259
 
260
/* Maximum number of insns that are eligible for renaming.  */
261
int max_insns_to_rename;
262
 
263
 
264
/* Definitions of local types and macros.  */
265
 
266
/* Represents possible outcomes of moving an expression through an insn.  */
267
enum MOVEUP_EXPR_CODE
268
  {
269
    /* The expression is not changed.  */
270
    MOVEUP_EXPR_SAME,
271
 
272
    /* Not changed, but requires a new destination register.  */
273
    MOVEUP_EXPR_AS_RHS,
274
 
275
    /* Cannot be moved.  */
276
    MOVEUP_EXPR_NULL,
277
 
278
    /* Changed (substituted or speculated).  */
279
    MOVEUP_EXPR_CHANGED
280
  };
281
 
282
/* The container to be passed into rtx search & replace functions.  */
283
struct rtx_search_arg
284
{
285
  /* What we are searching for.  */
286
  rtx x;
287
 
288
  /* The occurence counter.  */
289
  int n;
290
};
291
 
292
typedef struct rtx_search_arg *rtx_search_arg_p;
293
 
294
/* This struct contains precomputed hard reg sets that are needed when
295
   computing registers available for renaming.  */
296
struct hard_regs_data
297
{
298
  /* For every mode, this stores registers available for use with
299
     that mode.  */
300
  HARD_REG_SET regs_for_mode[NUM_MACHINE_MODES];
301
 
302
  /* True when regs_for_mode[mode] is initialized.  */
303
  bool regs_for_mode_ok[NUM_MACHINE_MODES];
304
 
305
  /* For every register, it has regs that are ok to rename into it.
306
     The register in question is always set.  If not, this means
307
     that the whole set is not computed yet.  */
308
  HARD_REG_SET regs_for_rename[FIRST_PSEUDO_REGISTER];
309
 
310
  /* For every mode, this stores registers not available due to
311
     call clobbering.  */
312
  HARD_REG_SET regs_for_call_clobbered[NUM_MACHINE_MODES];
313
 
314
  /* All registers that are used or call used.  */
315
  HARD_REG_SET regs_ever_used;
316
 
317
#ifdef STACK_REGS
318
  /* Stack registers.  */
319
  HARD_REG_SET stack_regs;
320
#endif
321
};
322
 
323
/* Holds the results of computation of available for renaming and
324
   unavailable hard registers.  */
325
struct reg_rename
326
{
327
  /* These are unavailable due to calls crossing, globalness, etc.  */
328
  HARD_REG_SET unavailable_hard_regs;
329
 
330
  /* These are *available* for renaming.  */
331
  HARD_REG_SET available_for_renaming;
332
 
333
  /* Whether this code motion path crosses a call.  */
334
  bool crosses_call;
335
};
336
 
337
/* A global structure that contains the needed information about harg
338
   regs.  */
339
static struct hard_regs_data sel_hrd;
340
 
341
 
342
/* This structure holds local data used in code_motion_path_driver hooks on
343
   the same or adjacent levels of recursion.  Here we keep those parameters
344
   that are not used in code_motion_path_driver routine itself, but only in
345
   its hooks.  Moreover, all parameters that can be modified in hooks are
346
   in this structure, so all other parameters passed explicitly to hooks are
347
   read-only.  */
348
struct cmpd_local_params
349
{
350
  /* Local params used in move_op_* functions.  */
351
 
352
  /* Edges for bookkeeping generation.  */
353
  edge e1, e2;
354
 
355
  /* C_EXPR merged from all successors and locally allocated temporary C_EXPR.  */
356
  expr_t c_expr_merged, c_expr_local;
357
 
358
  /* Local params used in fur_* functions.  */
359
  /* Copy of the ORIGINAL_INSN list, stores the original insns already
360
     found before entering the current level of code_motion_path_driver.  */
361
  def_list_t old_original_insns;
362
 
363
  /* Local params used in move_op_* functions.  */
364
  /* True when we have removed last insn in the block which was
365
     also a boundary.  Do not update anything or create bookkeeping copies.  */
366
  BOOL_BITFIELD removed_last_insn : 1;
367
};
368
 
369
/* Stores the static parameters for move_op_* calls.  */
370
struct moveop_static_params
371
{
372
  /* Destination register.  */
373
  rtx dest;
374
 
375
  /* Current C_EXPR.  */
376
  expr_t c_expr;
377
 
378
  /* An UID of expr_vliw which is to be moved up.  If we find other exprs,
379
     they are to be removed.  */
380
  int uid;
381
 
382
#ifdef ENABLE_CHECKING
383
  /* This is initialized to the insn on which the driver stopped its traversal.  */
384
  insn_t failed_insn;
385
#endif
386
 
387
  /* True if we scheduled an insn with different register.  */
388
  bool was_renamed;
389
};
390
 
391
/* Stores the static parameters for fur_* calls.  */
392
struct fur_static_params
393
{
394
  /* Set of registers unavailable on the code motion path.  */
395
  regset used_regs;
396
 
397
  /* Pointer to the list of original insns definitions.  */
398
  def_list_t *original_insns;
399
 
400
  /* True if a code motion path contains a CALL insn.  */
401
  bool crosses_call;
402
};
403
 
404
typedef struct fur_static_params *fur_static_params_p;
405
typedef struct cmpd_local_params *cmpd_local_params_p;
406
typedef struct moveop_static_params *moveop_static_params_p;
407
 
408
/* Set of hooks and parameters that determine behaviour specific to
409
   move_op or find_used_regs functions.  */
410
struct code_motion_path_driver_info_def
411
{
412
  /* Called on enter to the basic block.  */
413
  int (*on_enter) (insn_t, cmpd_local_params_p, void *, bool);
414
 
415
  /* Called when original expr is found.  */
416
  void (*orig_expr_found) (insn_t, expr_t, cmpd_local_params_p, void *);
417
 
418
  /* Called while descending current basic block if current insn is not
419
     the original EXPR we're searching for.  */
420
  bool (*orig_expr_not_found) (insn_t, av_set_t, void *);
421
 
422
  /* Function to merge C_EXPRes from different successors.  */
423
  void (*merge_succs) (insn_t, insn_t, int, cmpd_local_params_p, void *);
424
 
425
  /* Function to finalize merge from different successors and possibly
426
     deallocate temporary data structures used for merging.  */
427
  void (*after_merge_succs) (cmpd_local_params_p, void *);
428
 
429
  /* Called on the backward stage of recursion to do moveup_expr.
430
     Used only with move_op_*.  */
431
  void (*ascend) (insn_t, void *);
432
 
433
  /* Called on the ascending pass, before returning from the current basic
434
     block or from the whole traversal.  */
435
  void (*at_first_insn) (insn_t, cmpd_local_params_p, void *);
436
 
437
  /* When processing successors in move_op we need only descend into
438
     SUCCS_NORMAL successors, while in find_used_regs we need SUCCS_ALL.  */
439
  int succ_flags;
440
 
441
  /* The routine name to print in dumps ("move_op" of "find_used_regs").  */
442
  const char *routine_name;
443
};
444
 
445
/* Global pointer to current hooks, either points to MOVE_OP_HOOKS or
446
   FUR_HOOKS.  */
447
struct code_motion_path_driver_info_def *code_motion_path_driver_info;
448
 
449
/* Set of hooks for performing move_op and find_used_regs routines with
450
   code_motion_path_driver.  */
451
extern struct code_motion_path_driver_info_def move_op_hooks, fur_hooks;
452
 
453
/* True if/when we want to emulate Haifa scheduler in the common code.
454
   This is used in sched_rgn_local_init and in various places in
455
   sched-deps.c.  */
456
int sched_emulate_haifa_p;
457
 
458
/* GLOBAL_LEVEL is used to discard information stored in basic block headers
459
   av_sets.  Av_set of bb header is valid if its (bb header's) level is equal
460
   to GLOBAL_LEVEL.  And invalid if lesser.  This is primarily used to advance
461
   scheduling window.  */
462
int global_level;
463
 
464
/* Current fences.  */
465
flist_t fences;
466
 
467
/* True when separable insns should be scheduled as RHSes.  */
468
static bool enable_schedule_as_rhs_p;
469
 
470
/* Used in verify_target_availability to assert that target reg is reported
471
   unavailabile by both TARGET_UNAVAILABLE and find_used_regs only if
472
   we haven't scheduled anything on the previous fence.
473
   if scheduled_something_on_previous_fence is true, TARGET_UNAVAILABLE can
474
   have more conservative value than the one returned by the
475
   find_used_regs, thus we shouldn't assert that these values are equal.  */
476
static bool scheduled_something_on_previous_fence;
477
 
478
/* All newly emitted insns will have their uids greater than this value.  */
479
static int first_emitted_uid;
480
 
481
/* Set of basic blocks that are forced to start new ebbs.  This is a subset
482
   of all the ebb heads.  */
483
static bitmap_head _forced_ebb_heads;
484
bitmap_head *forced_ebb_heads = &_forced_ebb_heads;
485
 
486
/* Blocks that need to be rescheduled after pipelining.  */
487
bitmap blocks_to_reschedule = NULL;
488
 
489
/* True when the first lv set should be ignored when updating liveness.  */
490
static bool ignore_first = false;
491
 
492
/* Number of insns max_issue has initialized data structures for.  */
493
static int max_issue_size = 0;
494
 
495
/* Whether we can issue more instructions.  */
496
static int can_issue_more;
497
 
498
/* Maximum software lookahead window size, reduced when rescheduling after
499
   pipelining.  */
500
static int max_ws;
501
 
502
/* Number of insns scheduled in current region.  */
503
static int num_insns_scheduled;
504
 
505
/* A vector of expressions is used to be able to sort them.  */
506
DEF_VEC_P(expr_t);
507
DEF_VEC_ALLOC_P(expr_t,heap);
508
static VEC(expr_t, heap) *vec_av_set = NULL;
509
 
510
/* A vector of vinsns is used to hold temporary lists of vinsns.  */
511
DEF_VEC_P(vinsn_t);
512
DEF_VEC_ALLOC_P(vinsn_t,heap);
513
typedef VEC(vinsn_t, heap) *vinsn_vec_t;
514
 
515
/* This vector has the exprs which may still present in av_sets, but actually
516
   can't be moved up due to bookkeeping created during code motion to another
517
   fence.  See comment near the call to update_and_record_unavailable_insns
518
   for the detailed explanations.  */
519
static vinsn_vec_t vec_bookkeeping_blocked_vinsns = NULL;
520
 
521
/* This vector has vinsns which are scheduled with renaming on the first fence
522
   and then seen on the second.  For expressions with such vinsns, target
523
   availability information may be wrong.  */
524
static vinsn_vec_t vec_target_unavailable_vinsns = NULL;
525
 
526
/* Vector to store temporary nops inserted in move_op to prevent removal
527
   of empty bbs.  */
528
DEF_VEC_P(insn_t);
529
DEF_VEC_ALLOC_P(insn_t,heap);
530
static VEC(insn_t, heap) *vec_temp_moveop_nops = NULL;
531
 
532
/* These bitmaps record original instructions scheduled on the current
533
   iteration and bookkeeping copies created by them.  */
534
static bitmap current_originators = NULL;
535
static bitmap current_copies = NULL;
536
 
537
/* This bitmap marks the blocks visited by code_motion_path_driver so we don't
538
   visit them afterwards.  */
539
static bitmap code_motion_visited_blocks = NULL;
540
 
541
/* Variables to accumulate different statistics.  */
542
 
543
/* The number of bookkeeping copies created.  */
544
static int stat_bookkeeping_copies;
545
 
546
/* The number of insns that required bookkeeiping for their scheduling.  */
547
static int stat_insns_needed_bookkeeping;
548
 
549
/* The number of insns that got renamed.  */
550
static int stat_renamed_scheduled;
551
 
552
/* The number of substitutions made during scheduling.  */
553
static int stat_substitutions_total;
554
 
555
 
556
/* Forward declarations of static functions.  */
557
static bool rtx_ok_for_substitution_p (rtx, rtx);
558
static int sel_rank_for_schedule (const void *, const void *);
559
static av_set_t find_sequential_best_exprs (bnd_t, expr_t, bool);
560
static basic_block find_block_for_bookkeeping (edge e1, edge e2, bool lax);
561
 
562
static rtx get_dest_from_orig_ops (av_set_t);
563
static basic_block generate_bookkeeping_insn (expr_t, edge, edge);
564
static bool find_used_regs (insn_t, av_set_t, regset, struct reg_rename *,
565
                            def_list_t *);
566
static bool move_op (insn_t, av_set_t, expr_t, rtx, expr_t, bool*);
567
static int code_motion_path_driver (insn_t, av_set_t, ilist_t,
568
                                    cmpd_local_params_p, void *);
569
static void sel_sched_region_1 (void);
570
static void sel_sched_region_2 (int);
571
static av_set_t compute_av_set_inside_bb (insn_t, ilist_t, int, bool);
572
 
573
static void debug_state (state_t);
574
 
575
 
576
/* Functions that work with fences.  */
577
 
578
/* Advance one cycle on FENCE.  */
579
static void
580
advance_one_cycle (fence_t fence)
581
{
582
  unsigned i;
583
  int cycle;
584
  rtx insn;
585
 
586
  advance_state (FENCE_STATE (fence));
587
  cycle = ++FENCE_CYCLE (fence);
588
  FENCE_ISSUED_INSNS (fence) = 0;
589
  FENCE_STARTS_CYCLE_P (fence) = 1;
590
  can_issue_more = issue_rate;
591
  FENCE_ISSUE_MORE (fence) = can_issue_more;
592
 
593
  for (i = 0; VEC_iterate (rtx, FENCE_EXECUTING_INSNS (fence), i, insn); )
594
    {
595
      if (INSN_READY_CYCLE (insn) < cycle)
596
        {
597
          remove_from_deps (FENCE_DC (fence), insn);
598
          VEC_unordered_remove (rtx, FENCE_EXECUTING_INSNS (fence), i);
599
          continue;
600
        }
601
      i++;
602
    }
603
  if (sched_verbose >= 2)
604
    {
605
      sel_print ("Finished a cycle.  Current cycle = %d\n", FENCE_CYCLE (fence));
606
      debug_state (FENCE_STATE (fence));
607
    }
608
}
609
 
610
/* Returns true when SUCC in a fallthru bb of INSN, possibly
611
   skipping empty basic blocks.  */
612
static bool
613
in_fallthru_bb_p (rtx insn, rtx succ)
614
{
615
  basic_block bb = BLOCK_FOR_INSN (insn);
616
 
617
  if (bb == BLOCK_FOR_INSN (succ))
618
    return true;
619
 
620
  if (find_fallthru_edge (bb))
621
    bb = find_fallthru_edge (bb)->dest;
622
  else
623
    return false;
624
 
625
  while (sel_bb_empty_p (bb))
626
    bb = bb->next_bb;
627
 
628
  return bb == BLOCK_FOR_INSN (succ);
629
}
630
 
631
/* Construct successor fences from OLD_FENCEs and put them in NEW_FENCES.
632
   When a successor will continue a ebb, transfer all parameters of a fence
633
   to the new fence.  ORIG_MAX_SEQNO is the maximal seqno before this round
634
   of scheduling helping to distinguish between the old and the new code.  */
635
static void
636
extract_new_fences_from (flist_t old_fences, flist_tail_t new_fences,
637
                         int orig_max_seqno)
638
{
639
  bool was_here_p = false;
640
  insn_t insn = NULL_RTX;
641
  insn_t succ;
642
  succ_iterator si;
643
  ilist_iterator ii;
644
  fence_t fence = FLIST_FENCE (old_fences);
645
  basic_block bb;
646
 
647
  /* Get the only element of FENCE_BNDS (fence).  */
648
  FOR_EACH_INSN (insn, ii, FENCE_BNDS (fence))
649
    {
650
      gcc_assert (!was_here_p);
651
      was_here_p = true;
652
    }
653
  gcc_assert (was_here_p && insn != NULL_RTX);
654
 
655
  /* When in the "middle" of the block, just move this fence
656
     to the new list.  */
657
  bb = BLOCK_FOR_INSN (insn);
658
  if (! sel_bb_end_p (insn)
659
      || (single_succ_p (bb)
660
          && single_pred_p (single_succ (bb))))
661
    {
662
      insn_t succ;
663
 
664
      succ = (sel_bb_end_p (insn)
665
              ? sel_bb_head (single_succ (bb))
666
              : NEXT_INSN (insn));
667
 
668
      if (INSN_SEQNO (succ) > 0
669
          && INSN_SEQNO (succ) <= orig_max_seqno
670
          && INSN_SCHED_TIMES (succ) <= 0)
671
        {
672
          FENCE_INSN (fence) = succ;
673
          move_fence_to_fences (old_fences, new_fences);
674
 
675
          if (sched_verbose >= 1)
676
            sel_print ("Fence %d continues as %d[%d] (state continue)\n",
677
                       INSN_UID (insn), INSN_UID (succ), BLOCK_NUM (succ));
678
        }
679
      return;
680
    }
681
 
682
  /* Otherwise copy fence's structures to (possibly) multiple successors.  */
683
  FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
684
    {
685
      int seqno = INSN_SEQNO (succ);
686
 
687
      if (0 < seqno && seqno <= orig_max_seqno
688
          && (pipelining_p || INSN_SCHED_TIMES (succ) <= 0))
689
        {
690
          bool b = (in_same_ebb_p (insn, succ)
691
                    || in_fallthru_bb_p (insn, succ));
692
 
693
          if (sched_verbose >= 1)
694
            sel_print ("Fence %d continues as %d[%d] (state %s)\n",
695
                       INSN_UID (insn), INSN_UID (succ),
696
                       BLOCK_NUM (succ), b ? "continue" : "reset");
697
 
698
          if (b)
699
            add_dirty_fence_to_fences (new_fences, succ, fence);
700
          else
701
            {
702
              /* Mark block of the SUCC as head of the new ebb.  */
703
              bitmap_set_bit (forced_ebb_heads, BLOCK_NUM (succ));
704
              add_clean_fence_to_fences (new_fences, succ, fence);
705
            }
706
        }
707
    }
708
}
709
 
710
 
711
/* Functions to support substitution.  */
712
 
713
/* Returns whether INSN with dependence status DS is eligible for
714
   substitution, i.e. it's a copy operation x := y, and RHS that is
715
   moved up through this insn should be substituted.  */
716
static bool
717
can_substitute_through_p (insn_t insn, ds_t ds)
718
{
719
  /* We can substitute only true dependencies.  */
720
  if ((ds & DEP_OUTPUT)
721
      || (ds & DEP_ANTI)
722
      || ! INSN_RHS (insn)
723
      || ! INSN_LHS (insn))
724
    return false;
725
 
726
  /* Now we just need to make sure the INSN_RHS consists of only one
727
     simple REG rtx.  */
728
  if (REG_P (INSN_LHS (insn))
729
      && REG_P (INSN_RHS (insn)))
730
    return true;
731
  return false;
732
}
733
 
734
/* Substitute all occurences of INSN's destination in EXPR' vinsn with INSN's
735
   source (if INSN is eligible for substitution).  Returns TRUE if
736
   substitution was actually performed, FALSE otherwise.  Substitution might
737
   be not performed because it's either EXPR' vinsn doesn't contain INSN's
738
   destination or the resulting insn is invalid for the target machine.
739
   When UNDO is true, perform unsubstitution instead (the difference is in
740
   the part of rtx on which validate_replace_rtx is called).  */
741
static bool
742
substitute_reg_in_expr (expr_t expr, insn_t insn, bool undo)
743
{
744
  rtx *where;
745
  bool new_insn_valid;
746
  vinsn_t *vi = &EXPR_VINSN (expr);
747
  bool has_rhs = VINSN_RHS (*vi) != NULL;
748
  rtx old, new_rtx;
749
 
750
  /* Do not try to replace in SET_DEST.  Although we'll choose new
751
     register for the RHS, we don't want to change RHS' original reg.
752
     If the insn is not SET, we may still be able to substitute something
753
     in it, and if we're here (don't have deps), it doesn't write INSN's
754
     dest.  */
755
  where = (has_rhs
756
           ? &VINSN_RHS (*vi)
757
           : &PATTERN (VINSN_INSN_RTX (*vi)));
758
  old = undo ? INSN_RHS (insn) : INSN_LHS (insn);
759
 
760
  /* Substitute if INSN has a form of x:=y and LHS(INSN) occurs in *VI.  */
761
  if (rtx_ok_for_substitution_p (old, *where))
762
    {
763
      rtx new_insn;
764
      rtx *where_replace;
765
 
766
      /* We should copy these rtxes before substitution.  */
767
      new_rtx = copy_rtx (undo ? INSN_LHS (insn) : INSN_RHS (insn));
768
      new_insn = create_copy_of_insn_rtx (VINSN_INSN_RTX (*vi));
769
 
770
      /* Where we'll replace.
771
         WHERE_REPLACE should point inside NEW_INSN, so INSN_RHS couldn't be
772
         used instead of SET_SRC.  */
773
      where_replace = (has_rhs
774
                       ? &SET_SRC (PATTERN (new_insn))
775
                       : &PATTERN (new_insn));
776
 
777
      new_insn_valid
778
        = validate_replace_rtx_part_nosimplify (old, new_rtx, where_replace,
779
                                                new_insn);
780
 
781
      /* ??? Actually, constrain_operands result depends upon choice of
782
         destination register.  E.g. if we allow single register to be an rhs,
783
         and if we try to move dx=ax(as rhs) through ax=dx, we'll result
784
         in invalid insn dx=dx, so we'll loose this rhs here.
785
         Just can't come up with significant testcase for this, so just
786
         leaving it for now.  */
787
      if (new_insn_valid)
788
        {
789
          change_vinsn_in_expr (expr,
790
                                create_vinsn_from_insn_rtx (new_insn, false));
791
 
792
          /* Do not allow clobbering the address register of speculative
793
             insns.  */
794
          if ((EXPR_SPEC_DONE_DS (expr) & SPECULATIVE)
795
              && bitmap_bit_p (VINSN_REG_USES (EXPR_VINSN (expr)),
796
                               expr_dest_regno (expr)))
797
            EXPR_TARGET_AVAILABLE (expr) = false;
798
 
799
          return true;
800
        }
801
      else
802
        return false;
803
    }
804
  else
805
    return false;
806
}
807
 
808
/* Helper function for count_occurences_equiv.  */
809
static int
810
count_occurrences_1 (rtx *cur_rtx, void *arg)
811
{
812
  rtx_search_arg_p p = (rtx_search_arg_p) arg;
813
 
814
  /* The last param FOR_GCSE is true, because otherwise it performs excessive
815
    substitutions like
816
        r8 = r33
817
        r16 = r33
818
    for the last insn it presumes r33 equivalent to r8, so it changes it to
819
    r33.  Actually, there's no change, but it spoils debugging.  */
820
  if (exp_equiv_p (*cur_rtx, p->x, 0, true))
821
    {
822
      /* Bail out if we occupy more than one register.  */
823
      if (REG_P (*cur_rtx)
824
          && HARD_REGISTER_P (*cur_rtx)
825
          && hard_regno_nregs[REGNO(*cur_rtx)][GET_MODE (*cur_rtx)] > 1)
826
        {
827
          p->n = 0;
828
          return 1;
829
        }
830
 
831
      p->n++;
832
 
833
      /* Do not traverse subexprs.  */
834
      return -1;
835
    }
836
 
837
  if (GET_CODE (*cur_rtx) == SUBREG
838
      && REG_P (p->x)
839
      && REGNO (SUBREG_REG (*cur_rtx)) == REGNO (p->x))
840
    {
841
      /* ??? Do not support substituting regs inside subregs.  In that case,
842
         simplify_subreg will be called by validate_replace_rtx, and
843
         unsubstitution will fail later.  */
844
      p->n = 0;
845
      return 1;
846
    }
847
 
848
  /* Continue search.  */
849
  return 0;
850
}
851
 
852
/* Return the number of places WHAT appears within WHERE.
853
   Bail out when we found a reference occupying several hard registers.  */
854
static int
855
count_occurrences_equiv (rtx what, rtx where)
856
{
857
  struct rtx_search_arg arg;
858
 
859
  arg.x = what;
860
  arg.n = 0;
861
 
862
  for_each_rtx (&where, &count_occurrences_1, (void *) &arg);
863
 
864
  return arg.n;
865
}
866
 
867
/* Returns TRUE if WHAT is found in WHERE rtx tree.  */
868
static bool
869
rtx_ok_for_substitution_p (rtx what, rtx where)
870
{
871
  return (count_occurrences_equiv (what, where) > 0);
872
}
873
 
874
 
875
/* Functions to support register renaming.  */
876
 
877
/* Substitute VI's set source with REGNO.  Returns newly created pattern
878
   that has REGNO as its source.  */
879
static rtx
880
create_insn_rtx_with_rhs (vinsn_t vi, rtx rhs_rtx)
881
{
882
  rtx lhs_rtx;
883
  rtx pattern;
884
  rtx insn_rtx;
885
 
886
  lhs_rtx = copy_rtx (VINSN_LHS (vi));
887
 
888
  pattern = gen_rtx_SET (VOIDmode, lhs_rtx, rhs_rtx);
889
  insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX);
890
 
891
  return insn_rtx;
892
}
893
 
894
/* Returns whether INSN's src can be replaced with register number
895
   NEW_SRC_REG. E.g. the following insn is valid for i386:
896
 
897
    (insn:HI 2205 6585 2207 727 ../../gcc/libiberty/regex.c:3337
898
      (set (mem/s:QI (plus:SI (plus:SI (reg/f:SI 7 sp)
899
                        (reg:SI 0 ax [orig:770 c1 ] [770]))
900
                    (const_int 288 [0x120])) [0 str S1 A8])
901
            (const_int 0 [0x0])) 43 {*movqi_1} (nil)
902
        (nil))
903
 
904
  But if we change (const_int 0 [0x0]) to (reg:QI 4 si), it will be invalid
905
  because of operand constraints:
906
 
907
    (define_insn "*movqi_1"
908
      [(set (match_operand:QI 0 "nonimmediate_operand" "=q,q ,q ,r,r ,?r,m")
909
            (match_operand:QI 1 "general_operand"      " q,qn,qm,q,rn,qm,qn")
910
            )]
911
 
912
  So do constrain_operands here, before choosing NEW_SRC_REG as best
913
  reg for rhs.  */
914
 
915
static bool
916
replace_src_with_reg_ok_p (insn_t insn, rtx new_src_reg)
917
{
918
  vinsn_t vi = INSN_VINSN (insn);
919
  enum machine_mode mode;
920
  rtx dst_loc;
921
  bool res;
922
 
923
  gcc_assert (VINSN_SEPARABLE_P (vi));
924
 
925
  get_dest_and_mode (insn, &dst_loc, &mode);
926
  gcc_assert (mode == GET_MODE (new_src_reg));
927
 
928
  if (REG_P (dst_loc) && REGNO (new_src_reg) == REGNO (dst_loc))
929
    return true;
930
 
931
  /* See whether SET_SRC can be replaced with this register.  */
932
  validate_change (insn, &SET_SRC (PATTERN (insn)), new_src_reg, 1);
933
  res = verify_changes (0);
934
  cancel_changes (0);
935
 
936
  return res;
937
}
938
 
939
/* Returns whether INSN still be valid after replacing it's DEST with
940
   register NEW_REG.  */
941
static bool
942
replace_dest_with_reg_ok_p (insn_t insn, rtx new_reg)
943
{
944
  vinsn_t vi = INSN_VINSN (insn);
945
  bool res;
946
 
947
  /* We should deal here only with separable insns.  */
948
  gcc_assert (VINSN_SEPARABLE_P (vi));
949
  gcc_assert (GET_MODE (VINSN_LHS (vi)) == GET_MODE (new_reg));
950
 
951
  /* See whether SET_DEST can be replaced with this register.  */
952
  validate_change (insn, &SET_DEST (PATTERN (insn)), new_reg, 1);
953
  res = verify_changes (0);
954
  cancel_changes (0);
955
 
956
  return res;
957
}
958
 
959
/* Create a pattern with rhs of VI and lhs of LHS_RTX.  */
960
static rtx
961
create_insn_rtx_with_lhs (vinsn_t vi, rtx lhs_rtx)
962
{
963
  rtx rhs_rtx;
964
  rtx pattern;
965
  rtx insn_rtx;
966
 
967
  rhs_rtx = copy_rtx (VINSN_RHS (vi));
968
 
969
  pattern = gen_rtx_SET (VOIDmode, lhs_rtx, rhs_rtx);
970
  insn_rtx = create_insn_rtx_from_pattern (pattern, NULL_RTX);
971
 
972
  return insn_rtx;
973
}
974
 
975
/* Substitute lhs in the given expression EXPR for the register with number
976
   NEW_REGNO.  SET_DEST may be arbitrary rtx, not only register.  */
977
static void
978
replace_dest_with_reg_in_expr (expr_t expr, rtx new_reg)
979
{
980
  rtx insn_rtx;
981
  vinsn_t vinsn;
982
 
983
  insn_rtx = create_insn_rtx_with_lhs (EXPR_VINSN (expr), new_reg);
984
  vinsn = create_vinsn_from_insn_rtx (insn_rtx, false);
985
 
986
  change_vinsn_in_expr (expr, vinsn);
987
  EXPR_WAS_RENAMED (expr) = 1;
988
  EXPR_TARGET_AVAILABLE (expr) = 1;
989
}
990
 
991
/* Returns whether VI writes either one of the USED_REGS registers or,
992
   if a register is a hard one, one of the UNAVAILABLE_HARD_REGS registers.  */
993
static bool
994
vinsn_writes_one_of_regs_p (vinsn_t vi, regset used_regs,
995
                            HARD_REG_SET unavailable_hard_regs)
996
{
997
  unsigned regno;
998
  reg_set_iterator rsi;
999
 
1000
  EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (vi), 0, regno, rsi)
1001
    {
1002
      if (REGNO_REG_SET_P (used_regs, regno))
1003
        return true;
1004
      if (HARD_REGISTER_NUM_P (regno)
1005
          && TEST_HARD_REG_BIT (unavailable_hard_regs, regno))
1006
        return true;
1007
    }
1008
 
1009
  EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (vi), 0, regno, rsi)
1010
    {
1011
      if (REGNO_REG_SET_P (used_regs, regno))
1012
        return true;
1013
      if (HARD_REGISTER_NUM_P (regno)
1014
          && TEST_HARD_REG_BIT (unavailable_hard_regs, regno))
1015
        return true;
1016
    }
1017
 
1018
  return false;
1019
}
1020
 
1021
/* Returns register class of the output register in INSN.
1022
   Returns NO_REGS for call insns because some targets have constraints on
1023
   destination register of a call insn.
1024
 
1025
   Code adopted from regrename.c::build_def_use.  */
1026
static enum reg_class
1027
get_reg_class (rtx insn)
1028
{
1029
  int alt, i, n_ops;
1030
 
1031
  extract_insn (insn);
1032
  if (! constrain_operands (1))
1033
    fatal_insn_not_found (insn);
1034
  preprocess_constraints ();
1035
  alt = which_alternative;
1036
  n_ops = recog_data.n_operands;
1037
 
1038
  for (i = 0; i < n_ops; ++i)
1039
    {
1040
      int matches = recog_op_alt[i][alt].matches;
1041
      if (matches >= 0)
1042
        recog_op_alt[i][alt].cl = recog_op_alt[matches][alt].cl;
1043
    }
1044
 
1045
  if (asm_noperands (PATTERN (insn)) > 0)
1046
    {
1047
      for (i = 0; i < n_ops; i++)
1048
        if (recog_data.operand_type[i] == OP_OUT)
1049
          {
1050
            rtx *loc = recog_data.operand_loc[i];
1051
            rtx op = *loc;
1052
            enum reg_class cl = recog_op_alt[i][alt].cl;
1053
 
1054
            if (REG_P (op)
1055
                && REGNO (op) == ORIGINAL_REGNO (op))
1056
              continue;
1057
 
1058
            return cl;
1059
          }
1060
    }
1061
  else if (!CALL_P (insn))
1062
    {
1063
      for (i = 0; i < n_ops + recog_data.n_dups; i++)
1064
       {
1065
         int opn = i < n_ops ? i : recog_data.dup_num[i - n_ops];
1066
         enum reg_class cl = recog_op_alt[opn][alt].cl;
1067
 
1068
         if (recog_data.operand_type[opn] == OP_OUT ||
1069
             recog_data.operand_type[opn] == OP_INOUT)
1070
           return cl;
1071
       }
1072
    }
1073
 
1074
/*  Insns like
1075
    (insn (set (reg:CCZ 17 flags) (compare:CCZ ...)))
1076
    may result in returning NO_REGS, cause flags is written implicitly through
1077
    CMP insn, which has no OP_OUT | OP_INOUT operands.  */
1078
  return NO_REGS;
1079
}
1080
 
1081
#ifdef HARD_REGNO_RENAME_OK
1082
/* Calculate HARD_REGNO_RENAME_OK data for REGNO.  */
1083
static void
1084
init_hard_regno_rename (int regno)
1085
{
1086
  int cur_reg;
1087
 
1088
  SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], regno);
1089
 
1090
  for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1091
    {
1092
      /* We are not interested in renaming in other regs.  */
1093
      if (!TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg))
1094
        continue;
1095
 
1096
      if (HARD_REGNO_RENAME_OK (regno, cur_reg))
1097
        SET_HARD_REG_BIT (sel_hrd.regs_for_rename[regno], cur_reg);
1098
    }
1099
}
1100
#endif
1101
 
1102
/* A wrapper around HARD_REGNO_RENAME_OK that will look into the hard regs
1103
   data first.  */
1104
static inline bool
1105
sel_hard_regno_rename_ok (int from ATTRIBUTE_UNUSED, int to ATTRIBUTE_UNUSED)
1106
{
1107
#ifdef HARD_REGNO_RENAME_OK
1108
  /* Check whether this is all calculated.  */
1109
  if (TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], from))
1110
    return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to);
1111
 
1112
  init_hard_regno_rename (from);
1113
 
1114
  return TEST_HARD_REG_BIT (sel_hrd.regs_for_rename[from], to);
1115
#else
1116
  return true;
1117
#endif
1118
}
1119
 
1120
/* Calculate set of registers that are capable of holding MODE.  */
1121
static void
1122
init_regs_for_mode (enum machine_mode mode)
1123
{
1124
  int cur_reg;
1125
 
1126
  CLEAR_HARD_REG_SET (sel_hrd.regs_for_mode[mode]);
1127
  CLEAR_HARD_REG_SET (sel_hrd.regs_for_call_clobbered[mode]);
1128
 
1129
  for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1130
    {
1131
      int nregs = hard_regno_nregs[cur_reg][mode];
1132
      int i;
1133
 
1134
      for (i = nregs - 1; i >= 0; --i)
1135
        if (fixed_regs[cur_reg + i]
1136
                || global_regs[cur_reg + i]
1137
            /* Can't use regs which aren't saved by
1138
               the prologue.  */
1139
            || !TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg + i)
1140
#ifdef LEAF_REGISTERS
1141
            /* We can't use a non-leaf register if we're in a
1142
               leaf function.  */
1143
            || (current_function_is_leaf
1144
                && !LEAF_REGISTERS[cur_reg + i])
1145
#endif
1146
            )
1147
          break;
1148
 
1149
      if (i >= 0)
1150
        continue;
1151
 
1152
      /* See whether it accepts all modes that occur in
1153
         original insns.  */
1154
      if (! HARD_REGNO_MODE_OK (cur_reg, mode))
1155
        continue;
1156
 
1157
      if (HARD_REGNO_CALL_PART_CLOBBERED (cur_reg, mode))
1158
        SET_HARD_REG_BIT (sel_hrd.regs_for_call_clobbered[mode],
1159
                          cur_reg);
1160
 
1161
      /* If the CUR_REG passed all the checks above,
1162
         then it's ok.  */
1163
      SET_HARD_REG_BIT (sel_hrd.regs_for_mode[mode], cur_reg);
1164
    }
1165
 
1166
  sel_hrd.regs_for_mode_ok[mode] = true;
1167
}
1168
 
1169
/* Init all register sets gathered in HRD.  */
1170
static void
1171
init_hard_regs_data (void)
1172
{
1173
  int cur_reg = 0;
1174
  int cur_mode = 0;
1175
 
1176
  CLEAR_HARD_REG_SET (sel_hrd.regs_ever_used);
1177
  for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1178
    if (df_regs_ever_live_p (cur_reg) || call_used_regs[cur_reg])
1179
      SET_HARD_REG_BIT (sel_hrd.regs_ever_used, cur_reg);
1180
 
1181
  /* Initialize registers that are valid based on mode when this is
1182
     really needed.  */
1183
  for (cur_mode = 0; cur_mode < NUM_MACHINE_MODES; cur_mode++)
1184
    sel_hrd.regs_for_mode_ok[cur_mode] = false;
1185
 
1186
  /* Mark that all HARD_REGNO_RENAME_OK is not calculated.  */
1187
  for (cur_reg = 0; cur_reg < FIRST_PSEUDO_REGISTER; cur_reg++)
1188
    CLEAR_HARD_REG_SET (sel_hrd.regs_for_rename[cur_reg]);
1189
 
1190
#ifdef STACK_REGS
1191
  CLEAR_HARD_REG_SET (sel_hrd.stack_regs);
1192
 
1193
  for (cur_reg = FIRST_STACK_REG; cur_reg <= LAST_STACK_REG; cur_reg++)
1194
    SET_HARD_REG_BIT (sel_hrd.stack_regs, cur_reg);
1195
#endif
1196
}
1197
 
1198
/* Mark hardware regs in REG_RENAME_P that are not suitable
1199
   for renaming rhs in INSN due to hardware restrictions (register class,
1200
   modes compatibility etc).  This doesn't affect original insn's dest reg,
1201
   if it isn't in USED_REGS.  DEF is a definition insn of rhs for which the
1202
   destination register is sought.  LHS (DEF->ORIG_INSN) may be REG or MEM.
1203
   Registers that are in used_regs are always marked in
1204
   unavailable_hard_regs as well.  */
1205
 
1206
static void
1207
mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
1208
                            regset used_regs ATTRIBUTE_UNUSED)
1209
{
1210
  enum machine_mode mode;
1211
  enum reg_class cl = NO_REGS;
1212
  rtx orig_dest;
1213
  unsigned cur_reg, regno;
1214
  hard_reg_set_iterator hrsi;
1215
 
1216
  gcc_assert (GET_CODE (PATTERN (def->orig_insn)) == SET);
1217
  gcc_assert (reg_rename_p);
1218
 
1219
  orig_dest = SET_DEST (PATTERN (def->orig_insn));
1220
 
1221
  /* We have decided not to rename 'mem = something;' insns, as 'something'
1222
     is usually a register.  */
1223
  if (!REG_P (orig_dest))
1224
    return;
1225
 
1226
  regno = REGNO (orig_dest);
1227
 
1228
  /* If before reload, don't try to work with pseudos.  */
1229
  if (!reload_completed && !HARD_REGISTER_NUM_P (regno))
1230
    return;
1231
 
1232
  if (reload_completed)
1233
    cl = get_reg_class (def->orig_insn);
1234
 
1235
  /* Stop if the original register is one of the fixed_regs, global_regs or
1236
     frame pointer, or we could not discover its class.  */
1237
  if (fixed_regs[regno]
1238
      || global_regs[regno]
1239
#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
1240
      || (frame_pointer_needed && regno == HARD_FRAME_POINTER_REGNUM)
1241
#else
1242
      || (frame_pointer_needed && regno == FRAME_POINTER_REGNUM)
1243
#endif
1244
      || (reload_completed && cl == NO_REGS))
1245
    {
1246
      SET_HARD_REG_SET (reg_rename_p->unavailable_hard_regs);
1247
 
1248
      /* Give a chance for original register, if it isn't in used_regs.  */
1249
      if (!def->crosses_call)
1250
        CLEAR_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno);
1251
 
1252
      return;
1253
    }
1254
 
1255
  /* If something allocated on stack in this function, mark frame pointer
1256
     register unavailable, considering also modes.
1257
     FIXME: it is enough to do this once per all original defs.  */
1258
  if (frame_pointer_needed)
1259
    {
1260
      int i;
1261
 
1262
      for (i = hard_regno_nregs[FRAME_POINTER_REGNUM][Pmode]; i--;)
1263
        SET_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
1264
                          FRAME_POINTER_REGNUM + i);
1265
 
1266
#if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM
1267
      for (i = hard_regno_nregs[HARD_FRAME_POINTER_REGNUM][Pmode]; i--;)
1268
        SET_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
1269
                          HARD_FRAME_POINTER_REGNUM + i);
1270
#endif
1271
    }
1272
 
1273
#ifdef STACK_REGS
1274
  /* For the stack registers the presence of FIRST_STACK_REG in USED_REGS
1275
     is equivalent to as if all stack regs were in this set.
1276
     I.e. no stack register can be renamed, and even if it's an original
1277
     register here we make sure it won't be lifted over it's previous def
1278
     (it's previous def will appear as if it's a FIRST_STACK_REG def.
1279
     The HARD_REGNO_RENAME_OK covers other cases in condition below.  */
1280
  if (IN_RANGE (REGNO (orig_dest), FIRST_STACK_REG, LAST_STACK_REG)
1281
      && REGNO_REG_SET_P (used_regs, FIRST_STACK_REG))
1282
    IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
1283
                      sel_hrd.stack_regs);
1284
#endif
1285
 
1286
  /* If there's a call on this path, make regs from call_used_reg_set
1287
     unavailable.  */
1288
  if (def->crosses_call)
1289
    IOR_HARD_REG_SET (reg_rename_p->unavailable_hard_regs,
1290
                      call_used_reg_set);
1291
 
1292
  /* Stop here before reload: we need FRAME_REGS, STACK_REGS, and crosses_call,
1293
     but not register classes.  */
1294
  if (!reload_completed)
1295
    return;
1296
 
1297
  /* Leave regs as 'available' only from the current
1298
     register class.  */
1299
  COPY_HARD_REG_SET (reg_rename_p->available_for_renaming,
1300
                     reg_class_contents[cl]);
1301
 
1302
  mode = GET_MODE (orig_dest);
1303
 
1304
  /* Leave only registers available for this mode.  */
1305
  if (!sel_hrd.regs_for_mode_ok[mode])
1306
    init_regs_for_mode (mode);
1307
  AND_HARD_REG_SET (reg_rename_p->available_for_renaming,
1308
                    sel_hrd.regs_for_mode[mode]);
1309
 
1310
  /* Exclude registers that are partially call clobbered.  */
1311
  if (def->crosses_call
1312
      && ! HARD_REGNO_CALL_PART_CLOBBERED (regno, mode))
1313
    AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
1314
                            sel_hrd.regs_for_call_clobbered[mode]);
1315
 
1316
  /* Leave only those that are ok to rename.  */
1317
  EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
1318
                                  0, cur_reg, hrsi)
1319
    {
1320
      int nregs;
1321
      int i;
1322
 
1323
      nregs = hard_regno_nregs[cur_reg][mode];
1324
      gcc_assert (nregs > 0);
1325
 
1326
      for (i = nregs - 1; i >= 0; --i)
1327
        if (! sel_hard_regno_rename_ok (regno + i, cur_reg + i))
1328
          break;
1329
 
1330
      if (i >= 0)
1331
        CLEAR_HARD_REG_BIT (reg_rename_p->available_for_renaming,
1332
                            cur_reg);
1333
    }
1334
 
1335
  AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
1336
                          reg_rename_p->unavailable_hard_regs);
1337
 
1338
  /* Regno is always ok from the renaming part of view, but it really
1339
     could be in *unavailable_hard_regs already, so set it here instead
1340
     of there.  */
1341
  SET_HARD_REG_BIT (reg_rename_p->available_for_renaming, regno);
1342
}
1343
 
1344
/* reg_rename_tick[REG1] > reg_rename_tick[REG2] if REG1 was chosen as the
1345
   best register more recently than REG2.  */
1346
static int reg_rename_tick[FIRST_PSEUDO_REGISTER];
1347
 
1348
/* Indicates the number of times renaming happened before the current one.  */
1349
static int reg_rename_this_tick;
1350
 
1351
/* Choose the register among free, that is suitable for storing
1352
   the rhs value.
1353
 
1354
   ORIGINAL_INSNS is the list of insns where the operation (rhs)
1355
   originally appears.  There could be multiple original operations
1356
   for single rhs since we moving it up and merging along different
1357
   paths.
1358
 
1359
   Some code is adapted from regrename.c (regrename_optimize).
1360
   If original register is available, function returns it.
1361
   Otherwise it performs the checks, so the new register should
1362
   comply with the following:
1363
    - it should not violate any live ranges (such registers are in
1364
      REG_RENAME_P->available_for_renaming set);
1365
    - it should not be in the HARD_REGS_USED regset;
1366
    - it should be in the class compatible with original uses;
1367
    - it should not be clobbered through reference with different mode;
1368
    - if we're in the leaf function, then the new register should
1369
      not be in the LEAF_REGISTERS;
1370
    - etc.
1371
 
1372
   If several registers meet the conditions, the register with smallest
1373
   tick is returned to achieve more even register allocation.
1374
 
1375
   If original register seems to be ok, we set *IS_ORIG_REG_P_PTR to true.
1376
 
1377
   If no register satisfies the above conditions, NULL_RTX is returned.  */
1378
static rtx
1379
choose_best_reg_1 (HARD_REG_SET hard_regs_used,
1380
                   struct reg_rename *reg_rename_p,
1381
                   def_list_t original_insns, bool *is_orig_reg_p_ptr)
1382
{
1383
  int best_new_reg;
1384
  unsigned cur_reg;
1385
  enum machine_mode mode = VOIDmode;
1386
  unsigned regno, i, n;
1387
  hard_reg_set_iterator hrsi;
1388
  def_list_iterator di;
1389
  def_t def;
1390
 
1391
  /* If original register is available, return it.  */
1392
  *is_orig_reg_p_ptr = true;
1393
 
1394
  FOR_EACH_DEF (def, di, original_insns)
1395
    {
1396
      rtx orig_dest = SET_DEST (PATTERN (def->orig_insn));
1397
 
1398
      gcc_assert (REG_P (orig_dest));
1399
 
1400
      /* Check that all original operations have the same mode.
1401
         This is done for the next loop; if we'd return from this
1402
         loop, we'd check only part of them, but in this case
1403
         it doesn't matter.  */
1404
      if (mode == VOIDmode)
1405
        mode = GET_MODE (orig_dest);
1406
      gcc_assert (mode == GET_MODE (orig_dest));
1407
 
1408
      regno = REGNO (orig_dest);
1409
      for (i = 0, n = hard_regno_nregs[regno][mode]; i < n; i++)
1410
        if (TEST_HARD_REG_BIT (hard_regs_used, regno + i))
1411
          break;
1412
 
1413
      /* All hard registers are available.  */
1414
      if (i == n)
1415
        {
1416
          gcc_assert (mode != VOIDmode);
1417
 
1418
          /* Hard registers should not be shared.  */
1419
          return gen_rtx_REG (mode, regno);
1420
        }
1421
    }
1422
 
1423
  *is_orig_reg_p_ptr = false;
1424
  best_new_reg = -1;
1425
 
1426
  /* Among all available regs choose the register that was
1427
     allocated earliest.  */
1428
  EXECUTE_IF_SET_IN_HARD_REG_SET (reg_rename_p->available_for_renaming,
1429
                                  0, cur_reg, hrsi)
1430
    if (! TEST_HARD_REG_BIT (hard_regs_used, cur_reg))
1431
      {
1432
        /* Check that all hard regs for mode are available.  */
1433
        for (i = 1, n = hard_regno_nregs[cur_reg][mode]; i < n; i++)
1434
          if (TEST_HARD_REG_BIT (hard_regs_used, cur_reg + i)
1435
              || !TEST_HARD_REG_BIT (reg_rename_p->available_for_renaming,
1436
                                     cur_reg + i))
1437
            break;
1438
 
1439
        if (i < n)
1440
          continue;
1441
 
1442
        /* All hard registers are available.  */
1443
        if (best_new_reg < 0
1444
            || reg_rename_tick[cur_reg] < reg_rename_tick[best_new_reg])
1445
          {
1446
            best_new_reg = cur_reg;
1447
 
1448
            /* Return immediately when we know there's no better reg.  */
1449
            if (! reg_rename_tick[best_new_reg])
1450
              break;
1451
          }
1452
      }
1453
 
1454
  if (best_new_reg >= 0)
1455
    {
1456
      /* Use the check from the above loop.  */
1457
      gcc_assert (mode != VOIDmode);
1458
      return gen_rtx_REG (mode, best_new_reg);
1459
    }
1460
 
1461
  return NULL_RTX;
1462
}
1463
 
1464
/* A wrapper around choose_best_reg_1 () to verify that we make correct
1465
   assumptions about available registers in the function.  */
1466
static rtx
1467
choose_best_reg (HARD_REG_SET hard_regs_used, struct reg_rename *reg_rename_p,
1468
                 def_list_t original_insns, bool *is_orig_reg_p_ptr)
1469
{
1470
  rtx best_reg = choose_best_reg_1 (hard_regs_used, reg_rename_p,
1471
                                    original_insns, is_orig_reg_p_ptr);
1472
 
1473
  /* FIXME loop over hard_regno_nregs here.  */
1474
  gcc_assert (best_reg == NULL_RTX
1475
              || TEST_HARD_REG_BIT (sel_hrd.regs_ever_used, REGNO (best_reg)));
1476
 
1477
  return best_reg;
1478
}
1479
 
1480
/* Choose the pseudo register for storing rhs value.  As this is supposed
1481
   to work before reload, we return either the original register or make
1482
   the new one.  The parameters are the same that in choose_nest_reg_1
1483
   functions, except that USED_REGS may contain pseudos.
1484
   If we work with hard regs, check also REG_RENAME_P->UNAVAILABLE_HARD_REGS.
1485
 
1486
   TODO: take into account register pressure while doing this.  Up to this
1487
   moment, this function would never return NULL for pseudos, but we should
1488
   not rely on this.  */
1489
static rtx
1490
choose_best_pseudo_reg (regset used_regs,
1491
                        struct reg_rename *reg_rename_p,
1492
                        def_list_t original_insns, bool *is_orig_reg_p_ptr)
1493
{
1494
  def_list_iterator i;
1495
  def_t def;
1496
  enum machine_mode mode = VOIDmode;
1497
  bool bad_hard_regs = false;
1498
 
1499
  /* We should not use this after reload.  */
1500
  gcc_assert (!reload_completed);
1501
 
1502
  /* If original register is available, return it.  */
1503
  *is_orig_reg_p_ptr = true;
1504
 
1505
  FOR_EACH_DEF (def, i, original_insns)
1506
    {
1507
      rtx dest = SET_DEST (PATTERN (def->orig_insn));
1508
      int orig_regno;
1509
 
1510
      gcc_assert (REG_P (dest));
1511
 
1512
      /* Check that all original operations have the same mode.  */
1513
      if (mode == VOIDmode)
1514
        mode = GET_MODE (dest);
1515
      else
1516
        gcc_assert (mode == GET_MODE (dest));
1517
      orig_regno = REGNO (dest);
1518
 
1519
      if (!REGNO_REG_SET_P (used_regs, orig_regno))
1520
        {
1521
          if (orig_regno < FIRST_PSEUDO_REGISTER)
1522
            {
1523
              gcc_assert (df_regs_ever_live_p (orig_regno));
1524
 
1525
              /* For hard registers, we have to check hardware imposed
1526
                 limitations (frame/stack registers, calls crossed).  */
1527
              if (!TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs,
1528
                                      orig_regno))
1529
                {
1530
                  /* Don't let register cross a call if it doesn't already
1531
                     cross one.  This condition is written in accordance with
1532
                     that in sched-deps.c sched_analyze_reg().  */
1533
                  if (!reg_rename_p->crosses_call
1534
                      || REG_N_CALLS_CROSSED (orig_regno) > 0)
1535
                    return gen_rtx_REG (mode, orig_regno);
1536
                }
1537
 
1538
              bad_hard_regs = true;
1539
            }
1540
          else
1541
            return dest;
1542
        }
1543
     }
1544
 
1545
  *is_orig_reg_p_ptr = false;
1546
 
1547
  /* We had some original hard registers that couldn't be used.
1548
     Those were likely special.  Don't try to create a pseudo.  */
1549
  if (bad_hard_regs)
1550
    return NULL_RTX;
1551
 
1552
  /* We haven't found a register from original operations.  Get a new one.
1553
     FIXME: control register pressure somehow.  */
1554
  {
1555
    rtx new_reg = gen_reg_rtx (mode);
1556
 
1557
    gcc_assert (mode != VOIDmode);
1558
 
1559
    max_regno = max_reg_num ();
1560
    maybe_extend_reg_info_p ();
1561
    REG_N_CALLS_CROSSED (REGNO (new_reg)) = reg_rename_p->crosses_call ? 1 : 0;
1562
 
1563
    return new_reg;
1564
  }
1565
}
1566
 
1567
/* True when target of EXPR is available due to EXPR_TARGET_AVAILABLE,
1568
   USED_REGS and REG_RENAME_P->UNAVAILABLE_HARD_REGS.  */
1569
static void
1570
verify_target_availability (expr_t expr, regset used_regs,
1571
                            struct reg_rename *reg_rename_p)
1572
{
1573
  unsigned n, i, regno;
1574
  enum machine_mode mode;
1575
  bool target_available, live_available, hard_available;
1576
 
1577
  if (!REG_P (EXPR_LHS (expr)) || EXPR_TARGET_AVAILABLE (expr) < 0)
1578
    return;
1579
 
1580
  regno = expr_dest_regno (expr);
1581
  mode = GET_MODE (EXPR_LHS (expr));
1582
  target_available = EXPR_TARGET_AVAILABLE (expr) == 1;
1583
  n = reload_completed ? hard_regno_nregs[regno][mode] : 1;
1584
 
1585
  live_available = hard_available = true;
1586
  for (i = 0; i < n; i++)
1587
    {
1588
      if (bitmap_bit_p (used_regs, regno + i))
1589
        live_available = false;
1590
      if (TEST_HARD_REG_BIT (reg_rename_p->unavailable_hard_regs, regno + i))
1591
        hard_available = false;
1592
    }
1593
 
1594
  /* When target is not available, it may be due to hard register
1595
     restrictions, e.g. crosses calls, so we check hard_available too.  */
1596
  if (target_available)
1597
    gcc_assert (live_available);
1598
  else
1599
    /* Check only if we haven't scheduled something on the previous fence,
1600
       cause due to MAX_SOFTWARE_LOOKAHEAD_WINDOW_SIZE issues
1601
       and having more than one fence, we may end having targ_un in a block
1602
       in which successors target register is actually available.
1603
 
1604
       The last condition handles the case when a dependence from a call insn
1605
       was created in sched-deps.c for insns with destination registers that
1606
       never crossed a call before, but do cross one after our code motion.
1607
 
1608
       FIXME: in the latter case, we just uselessly called find_used_regs,
1609
       because we can't move this expression with any other register
1610
       as well.  */
1611
    gcc_assert (scheduled_something_on_previous_fence || !live_available
1612
                || !hard_available
1613
                || (!reload_completed && reg_rename_p->crosses_call
1614
                    && REG_N_CALLS_CROSSED (regno) == 0));
1615
}
1616
 
1617
/* Collect unavailable registers due to liveness for EXPR from BNDS
1618
   into USED_REGS.  Save additional information about available
1619
   registers and unavailable due to hardware restriction registers
1620
   into REG_RENAME_P structure.  Save original insns into ORIGINAL_INSNS
1621
   list.  */
1622
static void
1623
collect_unavailable_regs_from_bnds (expr_t expr, blist_t bnds, regset used_regs,
1624
                                    struct reg_rename *reg_rename_p,
1625
                                    def_list_t *original_insns)
1626
{
1627
  for (; bnds; bnds = BLIST_NEXT (bnds))
1628
    {
1629
      bool res;
1630
      av_set_t orig_ops = NULL;
1631
      bnd_t bnd = BLIST_BND (bnds);
1632
 
1633
      /* If the chosen best expr doesn't belong to current boundary,
1634
         skip it.  */
1635
      if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr)))
1636
        continue;
1637
 
1638
      /* Put in ORIG_OPS all exprs from this boundary that became
1639
         RES on top.  */
1640
      orig_ops = find_sequential_best_exprs (bnd, expr, false);
1641
 
1642
      /* Compute used regs and OR it into the USED_REGS.  */
1643
      res = find_used_regs (BND_TO (bnd), orig_ops, used_regs,
1644
                            reg_rename_p, original_insns);
1645
 
1646
      /* FIXME: the assert is true until we'd have several boundaries.  */
1647
      gcc_assert (res);
1648
      av_set_clear (&orig_ops);
1649
    }
1650
}
1651
 
1652
/* Return TRUE if it is possible to replace LHSes of ORIG_INSNS with BEST_REG.
1653
   If BEST_REG is valid, replace LHS of EXPR with it.  */
1654
static bool
1655
try_replace_dest_reg (ilist_t orig_insns, rtx best_reg, expr_t expr)
1656
{
1657
  /* Try whether we'll be able to generate the insn
1658
     'dest := best_reg' at the place of the original operation.  */
1659
  for (; orig_insns; orig_insns = ILIST_NEXT (orig_insns))
1660
    {
1661
      insn_t orig_insn = DEF_LIST_DEF (orig_insns)->orig_insn;
1662
 
1663
      gcc_assert (EXPR_SEPARABLE_P (INSN_EXPR (orig_insn)));
1664
 
1665
      if (REGNO (best_reg) != REGNO (INSN_LHS (orig_insn))
1666
          && (! replace_src_with_reg_ok_p (orig_insn, best_reg)
1667
              || ! replace_dest_with_reg_ok_p (orig_insn, best_reg)))
1668
        return false;
1669
    }
1670
 
1671
  /* Make sure that EXPR has the right destination
1672
     register.  */
1673
  if (expr_dest_regno (expr) != REGNO (best_reg))
1674
    replace_dest_with_reg_in_expr (expr, best_reg);
1675
  else
1676
    EXPR_TARGET_AVAILABLE (expr) = 1;
1677
 
1678
  return true;
1679
}
1680
 
1681
/* Select and assign best register to EXPR searching from BNDS.
1682
   Set *IS_ORIG_REG_P to TRUE if original register was selected.
1683
   Return FALSE if no register can be chosen, which could happen when:
1684
   * EXPR_SEPARABLE_P is true but we were unable to find suitable register;
1685
   * EXPR_SEPARABLE_P is false but the insn sets/clobbers one of the registers
1686
     that are used on the moving path.  */
1687
static bool
1688
find_best_reg_for_expr (expr_t expr, blist_t bnds, bool *is_orig_reg_p)
1689
{
1690
  static struct reg_rename reg_rename_data;
1691
 
1692
  regset used_regs;
1693
  def_list_t original_insns = NULL;
1694
  bool reg_ok;
1695
 
1696
  *is_orig_reg_p = false;
1697
 
1698
  /* Don't bother to do anything if this insn doesn't set any registers.  */
1699
  if (bitmap_empty_p (VINSN_REG_SETS (EXPR_VINSN (expr)))
1700
      && bitmap_empty_p (VINSN_REG_CLOBBERS (EXPR_VINSN (expr))))
1701
    return true;
1702
 
1703
  used_regs = get_clear_regset_from_pool ();
1704
  CLEAR_HARD_REG_SET (reg_rename_data.unavailable_hard_regs);
1705
 
1706
  collect_unavailable_regs_from_bnds (expr, bnds, used_regs, &reg_rename_data,
1707
                                      &original_insns);
1708
 
1709
#ifdef ENABLE_CHECKING
1710
  /* If after reload, make sure we're working with hard regs here.  */
1711
  if (reload_completed)
1712
    {
1713
      reg_set_iterator rsi;
1714
      unsigned i;
1715
 
1716
      EXECUTE_IF_SET_IN_REG_SET (used_regs, FIRST_PSEUDO_REGISTER, i, rsi)
1717
        gcc_unreachable ();
1718
    }
1719
#endif
1720
 
1721
  if (EXPR_SEPARABLE_P (expr))
1722
    {
1723
      rtx best_reg = NULL_RTX;
1724
      /* Check that we have computed availability of a target register
1725
         correctly.  */
1726
      verify_target_availability (expr, used_regs, &reg_rename_data);
1727
 
1728
      /* Turn everything in hard regs after reload.  */
1729
      if (reload_completed)
1730
        {
1731
          HARD_REG_SET hard_regs_used;
1732
          REG_SET_TO_HARD_REG_SET (hard_regs_used, used_regs);
1733
 
1734
          /* Join hard registers unavailable due to register class
1735
             restrictions and live range intersection.  */
1736
          IOR_HARD_REG_SET (hard_regs_used,
1737
                            reg_rename_data.unavailable_hard_regs);
1738
 
1739
          best_reg = choose_best_reg (hard_regs_used, &reg_rename_data,
1740
                                      original_insns, is_orig_reg_p);
1741
        }
1742
      else
1743
        best_reg = choose_best_pseudo_reg (used_regs, &reg_rename_data,
1744
                                           original_insns, is_orig_reg_p);
1745
 
1746
      if (!best_reg)
1747
        reg_ok = false;
1748
      else if (*is_orig_reg_p)
1749
        {
1750
          /* In case of unification BEST_REG may be different from EXPR's LHS
1751
             when EXPR's LHS is unavailable, and there is another LHS among
1752
             ORIGINAL_INSNS.  */
1753
          reg_ok = try_replace_dest_reg (original_insns, best_reg, expr);
1754
        }
1755
      else
1756
        {
1757
          /* Forbid renaming of low-cost insns.  */
1758
          if (sel_vinsn_cost (EXPR_VINSN (expr)) < 2)
1759
            reg_ok = false;
1760
          else
1761
            reg_ok = try_replace_dest_reg (original_insns, best_reg, expr);
1762
        }
1763
    }
1764
  else
1765
    {
1766
      /* If !EXPR_SCHEDULE_AS_RHS (EXPR), just make sure INSN doesn't set
1767
         any of the HARD_REGS_USED set.  */
1768
      if (vinsn_writes_one_of_regs_p (EXPR_VINSN (expr), used_regs,
1769
                                      reg_rename_data.unavailable_hard_regs))
1770
        {
1771
          reg_ok = false;
1772
          gcc_assert (EXPR_TARGET_AVAILABLE (expr) <= 0);
1773
        }
1774
      else
1775
        {
1776
          reg_ok = true;
1777
          gcc_assert (EXPR_TARGET_AVAILABLE (expr) != 0);
1778
        }
1779
    }
1780
 
1781
  ilist_clear (&original_insns);
1782
  return_regset_to_pool (used_regs);
1783
 
1784
  return reg_ok;
1785
}
1786
 
1787
 
1788
/* Return true if dependence described by DS can be overcomed.  */
1789
static bool
1790
can_speculate_dep_p (ds_t ds)
1791
{
1792
  if (spec_info == NULL)
1793
    return false;
1794
 
1795
  /* Leave only speculative data.  */
1796
  ds &= SPECULATIVE;
1797
 
1798
  if (ds == 0)
1799
    return false;
1800
 
1801
  {
1802
    /* FIXME: make sched-deps.c produce only those non-hard dependencies,
1803
       that we can overcome.  */
1804
    ds_t spec_mask = spec_info->mask;
1805
 
1806
    if ((ds & spec_mask) != ds)
1807
      return false;
1808
  }
1809
 
1810
  if (ds_weak (ds) < spec_info->data_weakness_cutoff)
1811
    return false;
1812
 
1813
  return true;
1814
}
1815
 
1816
/* Get a speculation check instruction.
1817
   C_EXPR is a speculative expression,
1818
   CHECK_DS describes speculations that should be checked,
1819
   ORIG_INSN is the original non-speculative insn in the stream.  */
1820
static insn_t
1821
create_speculation_check (expr_t c_expr, ds_t check_ds, insn_t orig_insn)
1822
{
1823
  rtx check_pattern;
1824
  rtx insn_rtx;
1825
  insn_t insn;
1826
  basic_block recovery_block;
1827
  rtx label;
1828
 
1829
  /* Create a recovery block if target is going to emit branchy check, or if
1830
     ORIG_INSN was speculative already.  */
1831
  if (targetm.sched.needs_block_p (check_ds)
1832
      || EXPR_SPEC_DONE_DS (INSN_EXPR (orig_insn)) != 0)
1833
    {
1834
      recovery_block = sel_create_recovery_block (orig_insn);
1835
      label = BB_HEAD (recovery_block);
1836
    }
1837
  else
1838
    {
1839
      recovery_block = NULL;
1840
      label = NULL_RTX;
1841
    }
1842
 
1843
  /* Get pattern of the check.  */
1844
  check_pattern = targetm.sched.gen_spec_check (EXPR_INSN_RTX (c_expr), label,
1845
                                                check_ds);
1846
 
1847
  gcc_assert (check_pattern != NULL);
1848
 
1849
  /* Emit check.  */
1850
  insn_rtx = create_insn_rtx_from_pattern (check_pattern, label);
1851
 
1852
  insn = sel_gen_insn_from_rtx_after (insn_rtx, INSN_EXPR (orig_insn),
1853
                                      INSN_SEQNO (orig_insn), orig_insn);
1854
 
1855
  /* Make check to be non-speculative.  */
1856
  EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
1857
  INSN_SPEC_CHECKED_DS (insn) = check_ds;
1858
 
1859
  /* Decrease priority of check by difference of load/check instruction
1860
     latencies.  */
1861
  EXPR_PRIORITY (INSN_EXPR (insn)) -= (sel_vinsn_cost (INSN_VINSN (orig_insn))
1862
                                       - sel_vinsn_cost (INSN_VINSN (insn)));
1863
 
1864
  /* Emit copy of original insn (though with replaced target register,
1865
     if needed) to the recovery block.  */
1866
  if (recovery_block != NULL)
1867
    {
1868
      rtx twin_rtx;
1869
 
1870
      twin_rtx = copy_rtx (PATTERN (EXPR_INSN_RTX (c_expr)));
1871
      twin_rtx = create_insn_rtx_from_pattern (twin_rtx, NULL_RTX);
1872
      sel_gen_recovery_insn_from_rtx_after (twin_rtx,
1873
                                            INSN_EXPR (orig_insn),
1874
                                            INSN_SEQNO (insn),
1875
                                            bb_note (recovery_block));
1876
    }
1877
 
1878
  /* If we've generated a data speculation check, make sure
1879
     that all the bookkeeping instruction we'll create during
1880
     this move_op () will allocate an ALAT entry so that the
1881
     check won't fail.
1882
     In case of control speculation we must convert C_EXPR to control
1883
     speculative mode, because failing to do so will bring us an exception
1884
     thrown by the non-control-speculative load.  */
1885
  check_ds = ds_get_max_dep_weak (check_ds);
1886
  speculate_expr (c_expr, check_ds);
1887
 
1888
  return insn;
1889
}
1890
 
1891
/* True when INSN is a "regN = regN" copy.  */
1892
static bool
1893
identical_copy_p (rtx insn)
1894
{
1895
  rtx lhs, rhs, pat;
1896
 
1897
  pat = PATTERN (insn);
1898
 
1899
  if (GET_CODE (pat) != SET)
1900
    return false;
1901
 
1902
  lhs = SET_DEST (pat);
1903
  if (!REG_P (lhs))
1904
    return false;
1905
 
1906
  rhs = SET_SRC (pat);
1907
  if (!REG_P (rhs))
1908
    return false;
1909
 
1910
  return REGNO (lhs) == REGNO (rhs);
1911
}
1912
 
1913
/* Undo all transformations on *AV_PTR that were done when
1914
   moving through INSN.  */
1915
static void
1916
undo_transformations (av_set_t *av_ptr, rtx insn)
1917
{
1918
  av_set_iterator av_iter;
1919
  expr_t expr;
1920
  av_set_t new_set = NULL;
1921
 
1922
  /* First, kill any EXPR that uses registers set by an insn.  This is
1923
     required for correctness.  */
1924
  FOR_EACH_EXPR_1 (expr, av_iter, av_ptr)
1925
    if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (expr))
1926
        && bitmap_intersect_p (INSN_REG_SETS (insn),
1927
                               VINSN_REG_USES (EXPR_VINSN (expr)))
1928
        /* When an insn looks like 'r1 = r1', we could substitute through
1929
           it, but the above condition will still hold.  This happened with
1930
           gcc.c-torture/execute/961125-1.c.  */
1931
        && !identical_copy_p (insn))
1932
      {
1933
        if (sched_verbose >= 6)
1934
          sel_print ("Expr %d removed due to use/set conflict\n",
1935
                     INSN_UID (EXPR_INSN_RTX (expr)));
1936
        av_set_iter_remove (&av_iter);
1937
      }
1938
 
1939
  /* Undo transformations looking at the history vector.  */
1940
  FOR_EACH_EXPR (expr, av_iter, *av_ptr)
1941
    {
1942
      int index = find_in_history_vect (EXPR_HISTORY_OF_CHANGES (expr),
1943
                                        insn, EXPR_VINSN (expr), true);
1944
 
1945
      if (index >= 0)
1946
        {
1947
          expr_history_def *phist;
1948
 
1949
          phist = VEC_index (expr_history_def,
1950
                             EXPR_HISTORY_OF_CHANGES (expr),
1951
                             index);
1952
 
1953
          switch (phist->type)
1954
            {
1955
            case TRANS_SPECULATION:
1956
              {
1957
                ds_t old_ds, new_ds;
1958
 
1959
                /* Compute the difference between old and new speculative
1960
                   statuses: that's what we need to check.
1961
                   Earlier we used to assert that the status will really
1962
                   change.  This no longer works because only the probability
1963
                   bits in the status may have changed during compute_av_set,
1964
                   and in the case of merging different probabilities of the
1965
                   same speculative status along different paths we do not
1966
                   record this in the history vector.  */
1967
                old_ds = phist->spec_ds;
1968
                new_ds = EXPR_SPEC_DONE_DS (expr);
1969
 
1970
                old_ds &= SPECULATIVE;
1971
                new_ds &= SPECULATIVE;
1972
                new_ds &= ~old_ds;
1973
 
1974
                EXPR_SPEC_TO_CHECK_DS (expr) |= new_ds;
1975
                break;
1976
              }
1977
            case TRANS_SUBSTITUTION:
1978
              {
1979
                expr_def _tmp_expr, *tmp_expr = &_tmp_expr;
1980
                vinsn_t new_vi;
1981
                bool add = true;
1982
 
1983
                new_vi = phist->old_expr_vinsn;
1984
 
1985
                gcc_assert (VINSN_SEPARABLE_P (new_vi)
1986
                            == EXPR_SEPARABLE_P (expr));
1987
                copy_expr (tmp_expr, expr);
1988
 
1989
                if (vinsn_equal_p (phist->new_expr_vinsn,
1990
                                   EXPR_VINSN (tmp_expr)))
1991
                  change_vinsn_in_expr (tmp_expr, new_vi);
1992
                else
1993
                  /* This happens when we're unsubstituting on a bookkeeping
1994
                     copy, which was in turn substituted.  The history is wrong
1995
                     in this case.  Do it the hard way.  */
1996
                  add = substitute_reg_in_expr (tmp_expr, insn, true);
1997
                if (add)
1998
                  av_set_add (&new_set, tmp_expr);
1999
                clear_expr (tmp_expr);
2000
                break;
2001
              }
2002
            default:
2003
              gcc_unreachable ();
2004
            }
2005
        }
2006
 
2007
    }
2008
 
2009
  av_set_union_and_clear (av_ptr, &new_set, NULL);
2010
}
2011
 
2012
 
2013
/* Moveup_* helpers for code motion and computing av sets.  */
2014
 
2015
/* Propagates EXPR inside an insn group through THROUGH_INSN.
2016
   The difference from the below function is that only substitution is
2017
   performed.  */
2018
static enum MOVEUP_EXPR_CODE
2019
moveup_expr_inside_insn_group (expr_t expr, insn_t through_insn)
2020
{
2021
  vinsn_t vi = EXPR_VINSN (expr);
2022
  ds_t *has_dep_p;
2023
  ds_t full_ds;
2024
 
2025
  /* Do this only inside insn group.  */
2026
  gcc_assert (INSN_SCHED_CYCLE (through_insn) > 0);
2027
 
2028
  full_ds = has_dependence_p (expr, through_insn, &has_dep_p);
2029
  if (full_ds == 0)
2030
    return MOVEUP_EXPR_SAME;
2031
 
2032
  /* Substitution is the possible choice in this case.  */
2033
  if (has_dep_p[DEPS_IN_RHS])
2034
    {
2035
      /* Can't substitute UNIQUE VINSNs.  */
2036
      gcc_assert (!VINSN_UNIQUE_P (vi));
2037
 
2038
      if (can_substitute_through_p (through_insn,
2039
                                    has_dep_p[DEPS_IN_RHS])
2040
          && substitute_reg_in_expr (expr, through_insn, false))
2041
        {
2042
          EXPR_WAS_SUBSTITUTED (expr) = true;
2043
          return MOVEUP_EXPR_CHANGED;
2044
        }
2045
 
2046
      /* Don't care about this, as even true dependencies may be allowed
2047
         in an insn group.  */
2048
      return MOVEUP_EXPR_SAME;
2049
    }
2050
 
2051
  /* This can catch output dependencies in COND_EXECs.  */
2052
  if (has_dep_p[DEPS_IN_INSN])
2053
    return MOVEUP_EXPR_NULL;
2054
 
2055
  /* This is either an output or an anti dependence, which usually have
2056
     a zero latency.  Allow this here, if we'd be wrong, tick_check_p
2057
     will fix this.  */
2058
  gcc_assert (has_dep_p[DEPS_IN_LHS]);
2059
  return MOVEUP_EXPR_AS_RHS;
2060
}
2061
 
2062
/* True when a trapping EXPR cannot be moved through THROUGH_INSN.  */
2063
#define CANT_MOVE_TRAPPING(expr, through_insn)                \
2064
  (VINSN_MAY_TRAP_P (EXPR_VINSN (expr))                       \
2065
   && !sel_insn_has_single_succ_p ((through_insn), SUCCS_ALL) \
2066
   && !sel_insn_is_speculation_check (through_insn))
2067
 
2068
/* True when a conflict on a target register was found during moveup_expr.  */
2069
static bool was_target_conflict = false;
2070
 
2071
/* Return true when moving a debug INSN across THROUGH_INSN will
2072
   create a bookkeeping block.  We don't want to create such blocks,
2073
   for they would cause codegen differences between compilations with
2074
   and without debug info.  */
2075
 
2076
static bool
2077
moving_insn_creates_bookkeeping_block_p (insn_t insn,
2078
                                         insn_t through_insn)
2079
{
2080
  basic_block bbi, bbt;
2081
  edge e1, e2;
2082
  edge_iterator ei1, ei2;
2083
 
2084
  if (!bookkeeping_can_be_created_if_moved_through_p (through_insn))
2085
    {
2086
      if (sched_verbose >= 9)
2087
        sel_print ("no bookkeeping required: ");
2088
      return FALSE;
2089
    }
2090
 
2091
  bbi = BLOCK_FOR_INSN (insn);
2092
 
2093
  if (EDGE_COUNT (bbi->preds) == 1)
2094
    {
2095
      if (sched_verbose >= 9)
2096
        sel_print ("only one pred edge: ");
2097
      return TRUE;
2098
    }
2099
 
2100
  bbt = BLOCK_FOR_INSN (through_insn);
2101
 
2102
  FOR_EACH_EDGE (e1, ei1, bbt->succs)
2103
    {
2104
      FOR_EACH_EDGE (e2, ei2, bbi->preds)
2105
        {
2106
          if (find_block_for_bookkeeping (e1, e2, TRUE))
2107
            {
2108
              if (sched_verbose >= 9)
2109
                sel_print ("found existing block: ");
2110
              return FALSE;
2111
            }
2112
        }
2113
    }
2114
 
2115
  if (sched_verbose >= 9)
2116
    sel_print ("would create bookkeeping block: ");
2117
 
2118
  return TRUE;
2119
}
2120
 
2121
/* Modifies EXPR so it can be moved through the THROUGH_INSN,
2122
   performing necessary transformations.  Record the type of transformation
2123
   made in PTRANS_TYPE, when it is not NULL.  When INSIDE_INSN_GROUP,
2124
   permit all dependencies except true ones, and try to remove those
2125
   too via forward substitution.  All cases when a non-eliminable
2126
   non-zero cost dependency exists inside an insn group will be fixed
2127
   in tick_check_p instead.  */
2128
static enum MOVEUP_EXPR_CODE
2129
moveup_expr (expr_t expr, insn_t through_insn, bool inside_insn_group,
2130
            enum local_trans_type *ptrans_type)
2131
{
2132
  vinsn_t vi = EXPR_VINSN (expr);
2133
  insn_t insn = VINSN_INSN_RTX (vi);
2134
  bool was_changed = false;
2135
  bool as_rhs = false;
2136
  ds_t *has_dep_p;
2137
  ds_t full_ds;
2138
 
2139
  /* When inside_insn_group, delegate to the helper.  */
2140
  if (inside_insn_group)
2141
    return moveup_expr_inside_insn_group (expr, through_insn);
2142
 
2143
  /* Deal with unique insns and control dependencies.  */
2144
  if (VINSN_UNIQUE_P (vi))
2145
    {
2146
      /* We can move jumps without side-effects or jumps that are
2147
         mutually exclusive with instruction THROUGH_INSN (all in cases
2148
         dependencies allow to do so and jump is not speculative).  */
2149
      if (control_flow_insn_p (insn))
2150
        {
2151
          basic_block fallthru_bb;
2152
 
2153
          /* Do not move checks and do not move jumps through other
2154
             jumps.  */
2155
          if (control_flow_insn_p (through_insn)
2156
              || sel_insn_is_speculation_check (insn))
2157
            return MOVEUP_EXPR_NULL;
2158
 
2159
          /* Don't move jumps through CFG joins.  */
2160
          if (bookkeeping_can_be_created_if_moved_through_p (through_insn))
2161
            return MOVEUP_EXPR_NULL;
2162
 
2163
          /* The jump should have a clear fallthru block, and
2164
             this block should be in the current region.  */
2165
          if ((fallthru_bb = fallthru_bb_of_jump (insn)) == NULL
2166
              || ! in_current_region_p (fallthru_bb))
2167
            return MOVEUP_EXPR_NULL;
2168
 
2169
          /* And it should be mutually exclusive with through_insn, or
2170
             be an unconditional jump.  */
2171
          if (! any_uncondjump_p (insn)
2172
              && ! sched_insns_conditions_mutex_p (insn, through_insn)
2173
              && ! DEBUG_INSN_P (through_insn))
2174
            return MOVEUP_EXPR_NULL;
2175
        }
2176
 
2177
      /* Don't move what we can't move.  */
2178
      if (EXPR_CANT_MOVE (expr)
2179
          && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn))
2180
        return MOVEUP_EXPR_NULL;
2181
 
2182
      /* Don't move SCHED_GROUP instruction through anything.
2183
         If we don't force this, then it will be possible to start
2184
         scheduling a sched_group before all its dependencies are
2185
         resolved.
2186
         ??? Haifa deals with this issue by delaying the SCHED_GROUP
2187
         as late as possible through rank_for_schedule.  */
2188
      if (SCHED_GROUP_P (insn))
2189
        return MOVEUP_EXPR_NULL;
2190
    }
2191
  else
2192
    gcc_assert (!control_flow_insn_p (insn));
2193
 
2194
  /* Don't move debug insns if this would require bookkeeping.  */
2195
  if (DEBUG_INSN_P (insn)
2196
      && BLOCK_FOR_INSN (through_insn) != BLOCK_FOR_INSN (insn)
2197
      && moving_insn_creates_bookkeeping_block_p (insn, through_insn))
2198
    return MOVEUP_EXPR_NULL;
2199
 
2200
  /* Deal with data dependencies.  */
2201
  was_target_conflict = false;
2202
  full_ds = has_dependence_p (expr, through_insn, &has_dep_p);
2203
  if (full_ds == 0)
2204
    {
2205
      if (!CANT_MOVE_TRAPPING (expr, through_insn))
2206
        return MOVEUP_EXPR_SAME;
2207
    }
2208
  else
2209
    {
2210
      /* We can move UNIQUE insn up only as a whole and unchanged,
2211
         so it shouldn't have any dependencies.  */
2212
      if (VINSN_UNIQUE_P (vi))
2213
        return MOVEUP_EXPR_NULL;
2214
    }
2215
 
2216
  if (full_ds != 0 && can_speculate_dep_p (full_ds))
2217
    {
2218
      int res;
2219
 
2220
      res = speculate_expr (expr, full_ds);
2221
      if (res >= 0)
2222
        {
2223
          /* Speculation was successful.  */
2224
          full_ds = 0;
2225
          was_changed = (res > 0);
2226
          if (res == 2)
2227
            was_target_conflict = true;
2228
          if (ptrans_type)
2229
            *ptrans_type = TRANS_SPECULATION;
2230
          sel_clear_has_dependence ();
2231
        }
2232
    }
2233
 
2234
  if (has_dep_p[DEPS_IN_INSN])
2235
    /* We have some dependency that cannot be discarded.  */
2236
    return MOVEUP_EXPR_NULL;
2237
 
2238
  if (has_dep_p[DEPS_IN_LHS])
2239
    {
2240
      /* Only separable insns can be moved up with the new register.
2241
         Anyways, we should mark that the original register is
2242
         unavailable.  */
2243
      if (!enable_schedule_as_rhs_p || !EXPR_SEPARABLE_P (expr))
2244
        return MOVEUP_EXPR_NULL;
2245
 
2246
      EXPR_TARGET_AVAILABLE (expr) = false;
2247
      was_target_conflict = true;
2248
      as_rhs = true;
2249
    }
2250
 
2251
  /* At this point we have either separable insns, that will be lifted
2252
     up only as RHSes, or non-separable insns with no dependency in lhs.
2253
     If dependency is in RHS, then try to perform substitution and move up
2254
     substituted RHS:
2255
 
2256
      Ex. 1:                              Ex.2
2257
        y = x;                              y = x;
2258
        z = y*2;                            y = y*2;
2259
 
2260
    In Ex.1 y*2 can be substituted for x*2 and the whole operation can be
2261
    moved above y=x assignment as z=x*2.
2262
 
2263
    In Ex.2 y*2 also can be substituted for x*2, but only the right hand
2264
    side can be moved because of the output dependency.  The operation was
2265
    cropped to its rhs above.  */
2266
  if (has_dep_p[DEPS_IN_RHS])
2267
    {
2268
      ds_t *rhs_dsp = &has_dep_p[DEPS_IN_RHS];
2269
 
2270
      /* Can't substitute UNIQUE VINSNs.  */
2271
      gcc_assert (!VINSN_UNIQUE_P (vi));
2272
 
2273
      if (can_speculate_dep_p (*rhs_dsp))
2274
        {
2275
          int res;
2276
 
2277
          res = speculate_expr (expr, *rhs_dsp);
2278
          if (res >= 0)
2279
            {
2280
              /* Speculation was successful.  */
2281
              *rhs_dsp = 0;
2282
              was_changed = (res > 0);
2283
              if (res == 2)
2284
                was_target_conflict = true;
2285
              if (ptrans_type)
2286
                *ptrans_type = TRANS_SPECULATION;
2287
            }
2288
          else
2289
            return MOVEUP_EXPR_NULL;
2290
        }
2291
      else if (can_substitute_through_p (through_insn,
2292
                                         *rhs_dsp)
2293
               && substitute_reg_in_expr (expr, through_insn, false))
2294
        {
2295
          /* ??? We cannot perform substitution AND speculation on the same
2296
             insn.  */
2297
          gcc_assert (!was_changed);
2298
          was_changed = true;
2299
          if (ptrans_type)
2300
            *ptrans_type = TRANS_SUBSTITUTION;
2301
          EXPR_WAS_SUBSTITUTED (expr) = true;
2302
        }
2303
      else
2304
        return MOVEUP_EXPR_NULL;
2305
    }
2306
 
2307
  /* Don't move trapping insns through jumps.
2308
     This check should be at the end to give a chance to control speculation
2309
     to perform its duties.  */
2310
  if (CANT_MOVE_TRAPPING (expr, through_insn))
2311
    return MOVEUP_EXPR_NULL;
2312
 
2313
  return (was_changed
2314
          ? MOVEUP_EXPR_CHANGED
2315
          : (as_rhs
2316
             ? MOVEUP_EXPR_AS_RHS
2317
             : MOVEUP_EXPR_SAME));
2318
}
2319
 
2320
/* Try to look at bitmap caches for EXPR and INSN pair, return true
2321
   if successful.  When INSIDE_INSN_GROUP, also try ignore dependencies
2322
   that can exist within a parallel group.  Write to RES the resulting
2323
   code for moveup_expr.  */
2324
static bool
2325
try_bitmap_cache (expr_t expr, insn_t insn,
2326
                  bool inside_insn_group,
2327
                  enum MOVEUP_EXPR_CODE *res)
2328
{
2329
  int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
2330
 
2331
  /* First check whether we've analyzed this situation already.  */
2332
  if (bitmap_bit_p (INSN_ANALYZED_DEPS (insn), expr_uid))
2333
    {
2334
      if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid))
2335
        {
2336
          if (sched_verbose >= 6)
2337
            sel_print ("removed (cached)\n");
2338
          *res = MOVEUP_EXPR_NULL;
2339
          return true;
2340
        }
2341
      else
2342
        {
2343
          if (sched_verbose >= 6)
2344
            sel_print ("unchanged (cached)\n");
2345
          *res = MOVEUP_EXPR_SAME;
2346
          return true;
2347
        }
2348
    }
2349
  else if (bitmap_bit_p (INSN_FOUND_DEPS (insn), expr_uid))
2350
    {
2351
      if (inside_insn_group)
2352
        {
2353
          if (sched_verbose >= 6)
2354
            sel_print ("unchanged (as RHS, cached, inside insn group)\n");
2355
          *res = MOVEUP_EXPR_SAME;
2356
          return true;
2357
 
2358
        }
2359
      else
2360
        EXPR_TARGET_AVAILABLE (expr) = false;
2361
 
2362
      /* This is the only case when propagation result can change over time,
2363
         as we can dynamically switch off scheduling as RHS.  In this case,
2364
         just check the flag to reach the correct decision.  */
2365
      if (enable_schedule_as_rhs_p)
2366
        {
2367
          if (sched_verbose >= 6)
2368
            sel_print ("unchanged (as RHS, cached)\n");
2369
          *res = MOVEUP_EXPR_AS_RHS;
2370
          return true;
2371
        }
2372
      else
2373
        {
2374
          if (sched_verbose >= 6)
2375
            sel_print ("removed (cached as RHS, but renaming"
2376
                       " is now disabled)\n");
2377
          *res = MOVEUP_EXPR_NULL;
2378
          return true;
2379
        }
2380
    }
2381
 
2382
  return false;
2383
}
2384
 
2385
/* Try to look at bitmap caches for EXPR and INSN pair, return true
2386
   if successful.  Write to RES the resulting code for moveup_expr.  */
2387
static bool
2388
try_transformation_cache (expr_t expr, insn_t insn,
2389
                          enum MOVEUP_EXPR_CODE *res)
2390
{
2391
  struct transformed_insns *pti
2392
    = (struct transformed_insns *)
2393
    htab_find_with_hash (INSN_TRANSFORMED_INSNS (insn),
2394
                         &EXPR_VINSN (expr),
2395
                         VINSN_HASH_RTX (EXPR_VINSN (expr)));
2396
  if (pti)
2397
    {
2398
      /* This EXPR was already moved through this insn and was
2399
         changed as a result.  Fetch the proper data from
2400
         the hashtable.  */
2401
      insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2402
                              INSN_UID (insn), pti->type,
2403
                              pti->vinsn_old, pti->vinsn_new,
2404
                              EXPR_SPEC_DONE_DS (expr));
2405
 
2406
      if (INSN_IN_STREAM_P (VINSN_INSN_RTX (pti->vinsn_new)))
2407
        pti->vinsn_new = vinsn_copy (pti->vinsn_new, true);
2408
      change_vinsn_in_expr (expr, pti->vinsn_new);
2409
      if (pti->was_target_conflict)
2410
        EXPR_TARGET_AVAILABLE (expr) = false;
2411
      if (pti->type == TRANS_SPECULATION)
2412
        {
2413
          EXPR_SPEC_DONE_DS (expr) = pti->ds;
2414
          EXPR_NEEDS_SPEC_CHECK_P (expr) |= pti->needs_check;
2415
        }
2416
 
2417
      if (sched_verbose >= 6)
2418
        {
2419
          sel_print ("changed (cached): ");
2420
          dump_expr (expr);
2421
          sel_print ("\n");
2422
        }
2423
 
2424
      *res = MOVEUP_EXPR_CHANGED;
2425
      return true;
2426
    }
2427
 
2428
  return false;
2429
}
2430
 
2431
/* Update bitmap caches on INSN with result RES of propagating EXPR.  */
2432
static void
2433
update_bitmap_cache (expr_t expr, insn_t insn, bool inside_insn_group,
2434
                     enum MOVEUP_EXPR_CODE res)
2435
{
2436
  int expr_uid = INSN_UID (EXPR_INSN_RTX (expr));
2437
 
2438
  /* Do not cache result of propagating jumps through an insn group,
2439
     as it is always true, which is not useful outside the group.  */
2440
  if (inside_insn_group)
2441
    return;
2442
 
2443
  if (res == MOVEUP_EXPR_NULL)
2444
    {
2445
      bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2446
      bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid);
2447
    }
2448
  else if (res == MOVEUP_EXPR_SAME)
2449
    {
2450
      bitmap_set_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2451
      bitmap_clear_bit (INSN_FOUND_DEPS (insn), expr_uid);
2452
    }
2453
  else if (res == MOVEUP_EXPR_AS_RHS)
2454
    {
2455
      bitmap_clear_bit (INSN_ANALYZED_DEPS (insn), expr_uid);
2456
      bitmap_set_bit (INSN_FOUND_DEPS (insn), expr_uid);
2457
    }
2458
  else
2459
    gcc_unreachable ();
2460
}
2461
 
2462
/* Update hashtable on INSN with changed EXPR, old EXPR_OLD_VINSN
2463
   and transformation type TRANS_TYPE.  */
2464
static void
2465
update_transformation_cache (expr_t expr, insn_t insn,
2466
                             bool inside_insn_group,
2467
                             enum local_trans_type trans_type,
2468
                             vinsn_t expr_old_vinsn)
2469
{
2470
  struct transformed_insns *pti;
2471
 
2472
  if (inside_insn_group)
2473
    return;
2474
 
2475
  pti = XNEW (struct transformed_insns);
2476
  pti->vinsn_old = expr_old_vinsn;
2477
  pti->vinsn_new = EXPR_VINSN (expr);
2478
  pti->type = trans_type;
2479
  pti->was_target_conflict = was_target_conflict;
2480
  pti->ds = EXPR_SPEC_DONE_DS (expr);
2481
  pti->needs_check = EXPR_NEEDS_SPEC_CHECK_P (expr);
2482
  vinsn_attach (pti->vinsn_old);
2483
  vinsn_attach (pti->vinsn_new);
2484
  *((struct transformed_insns **)
2485
    htab_find_slot_with_hash (INSN_TRANSFORMED_INSNS (insn),
2486
                              pti, VINSN_HASH_RTX (expr_old_vinsn),
2487
                              INSERT)) = pti;
2488
}
2489
 
2490
/* Same as moveup_expr, but first looks up the result of
2491
   transformation in caches.  */
2492
static enum MOVEUP_EXPR_CODE
2493
moveup_expr_cached (expr_t expr, insn_t insn, bool inside_insn_group)
2494
{
2495
  enum MOVEUP_EXPR_CODE res;
2496
  bool got_answer = false;
2497
 
2498
  if (sched_verbose >= 6)
2499
    {
2500
      sel_print ("Moving ");
2501
      dump_expr (expr);
2502
      sel_print (" through %d: ", INSN_UID (insn));
2503
    }
2504
 
2505
  if (DEBUG_INSN_P (EXPR_INSN_RTX (expr))
2506
      && (sel_bb_head (BLOCK_FOR_INSN (EXPR_INSN_RTX (expr)))
2507
          == EXPR_INSN_RTX (expr)))
2508
    /* Don't use cached information for debug insns that are heads of
2509
       basic blocks.  */;
2510
  else if (try_bitmap_cache (expr, insn, inside_insn_group, &res))
2511
    /* When inside insn group, we do not want remove stores conflicting
2512
       with previosly issued loads.  */
2513
    got_answer = ! inside_insn_group || res != MOVEUP_EXPR_NULL;
2514
  else if (try_transformation_cache (expr, insn, &res))
2515
    got_answer = true;
2516
 
2517
  if (! got_answer)
2518
    {
2519
      /* Invoke moveup_expr and record the results.  */
2520
      vinsn_t expr_old_vinsn = EXPR_VINSN (expr);
2521
      ds_t expr_old_spec_ds = EXPR_SPEC_DONE_DS (expr);
2522
      int expr_uid = INSN_UID (VINSN_INSN_RTX (expr_old_vinsn));
2523
      bool unique_p = VINSN_UNIQUE_P (expr_old_vinsn);
2524
      enum local_trans_type trans_type = TRANS_SUBSTITUTION;
2525
 
2526
      /* ??? Invent something better than this.  We can't allow old_vinsn
2527
         to go, we need it for the history vector.  */
2528
      vinsn_attach (expr_old_vinsn);
2529
 
2530
      res = moveup_expr (expr, insn, inside_insn_group,
2531
                         &trans_type);
2532
      switch (res)
2533
        {
2534
        case MOVEUP_EXPR_NULL:
2535
          update_bitmap_cache (expr, insn, inside_insn_group, res);
2536
          if (sched_verbose >= 6)
2537
            sel_print ("removed\n");
2538
          break;
2539
 
2540
        case MOVEUP_EXPR_SAME:
2541
          update_bitmap_cache (expr, insn, inside_insn_group, res);
2542
          if (sched_verbose >= 6)
2543
            sel_print ("unchanged\n");
2544
          break;
2545
 
2546
        case MOVEUP_EXPR_AS_RHS:
2547
          gcc_assert (!unique_p || inside_insn_group);
2548
          update_bitmap_cache (expr, insn, inside_insn_group, res);
2549
          if (sched_verbose >= 6)
2550
            sel_print ("unchanged (as RHS)\n");
2551
          break;
2552
 
2553
        case MOVEUP_EXPR_CHANGED:
2554
          gcc_assert (INSN_UID (EXPR_INSN_RTX (expr)) != expr_uid
2555
                      || EXPR_SPEC_DONE_DS (expr) != expr_old_spec_ds);
2556
          insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (expr),
2557
                                  INSN_UID (insn), trans_type,
2558
                                  expr_old_vinsn, EXPR_VINSN (expr),
2559
                                  expr_old_spec_ds);
2560
          update_transformation_cache (expr, insn, inside_insn_group,
2561
                                       trans_type, expr_old_vinsn);
2562
          if (sched_verbose >= 6)
2563
            {
2564
              sel_print ("changed: ");
2565
              dump_expr (expr);
2566
              sel_print ("\n");
2567
            }
2568
          break;
2569
        default:
2570
          gcc_unreachable ();
2571
        }
2572
 
2573
      vinsn_detach (expr_old_vinsn);
2574
    }
2575
 
2576
  return res;
2577
}
2578
 
2579
/* Moves an av set AVP up through INSN, performing necessary
2580
   transformations.  */
2581
static void
2582
moveup_set_expr (av_set_t *avp, insn_t insn, bool inside_insn_group)
2583
{
2584
  av_set_iterator i;
2585
  expr_t expr;
2586
 
2587
  FOR_EACH_EXPR_1 (expr, i, avp)
2588
    {
2589
 
2590
      switch (moveup_expr_cached (expr, insn, inside_insn_group))
2591
        {
2592
        case MOVEUP_EXPR_SAME:
2593
        case MOVEUP_EXPR_AS_RHS:
2594
          break;
2595
 
2596
        case MOVEUP_EXPR_NULL:
2597
          av_set_iter_remove (&i);
2598
          break;
2599
 
2600
        case MOVEUP_EXPR_CHANGED:
2601
          expr = merge_with_other_exprs (avp, &i, expr);
2602
          break;
2603
 
2604
        default:
2605
          gcc_unreachable ();
2606
        }
2607
    }
2608
}
2609
 
2610
/* Moves AVP set along PATH.  */
2611
static void
2612
moveup_set_inside_insn_group (av_set_t *avp, ilist_t path)
2613
{
2614
  int last_cycle;
2615
 
2616
  if (sched_verbose >= 6)
2617
    sel_print ("Moving expressions up in the insn group...\n");
2618
  if (! path)
2619
    return;
2620
  last_cycle = INSN_SCHED_CYCLE (ILIST_INSN (path));
2621
  while (path
2622
         && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
2623
    {
2624
      moveup_set_expr (avp, ILIST_INSN (path), true);
2625
      path = ILIST_NEXT (path);
2626
    }
2627
}
2628
 
2629
/* Returns true if after moving EXPR along PATH it equals to EXPR_VLIW.  */
2630
static bool
2631
equal_after_moveup_path_p (expr_t expr, ilist_t path, expr_t expr_vliw)
2632
{
2633
  expr_def _tmp, *tmp = &_tmp;
2634
  int last_cycle;
2635
  bool res = true;
2636
 
2637
  copy_expr_onside (tmp, expr);
2638
  last_cycle = path ? INSN_SCHED_CYCLE (ILIST_INSN (path)) : 0;
2639
  while (path
2640
         && res
2641
         && INSN_SCHED_CYCLE (ILIST_INSN (path)) == last_cycle)
2642
    {
2643
      res = (moveup_expr_cached (tmp, ILIST_INSN (path), true)
2644
             != MOVEUP_EXPR_NULL);
2645
      path = ILIST_NEXT (path);
2646
    }
2647
 
2648
  if (res)
2649
    {
2650
      vinsn_t tmp_vinsn = EXPR_VINSN (tmp);
2651
      vinsn_t expr_vliw_vinsn = EXPR_VINSN (expr_vliw);
2652
 
2653
      if (tmp_vinsn != expr_vliw_vinsn)
2654
        res = vinsn_equal_p (tmp_vinsn, expr_vliw_vinsn);
2655
    }
2656
 
2657
  clear_expr (tmp);
2658
  return res;
2659
}
2660
 
2661
 
2662
/* Functions that compute av and lv sets.  */
2663
 
2664
/* Returns true if INSN is not a downward continuation of the given path P in
2665
   the current stage.  */
2666
static bool
2667
is_ineligible_successor (insn_t insn, ilist_t p)
2668
{
2669
  insn_t prev_insn;
2670
 
2671
  /* Check if insn is not deleted.  */
2672
  if (PREV_INSN (insn) && NEXT_INSN (PREV_INSN (insn)) != insn)
2673
    gcc_unreachable ();
2674
  else if (NEXT_INSN (insn) && PREV_INSN (NEXT_INSN (insn)) != insn)
2675
    gcc_unreachable ();
2676
 
2677
  /* If it's the first insn visited, then the successor is ok.  */
2678
  if (!p)
2679
    return false;
2680
 
2681
  prev_insn = ILIST_INSN (p);
2682
 
2683
  if (/* a backward edge.  */
2684
      INSN_SEQNO (insn) < INSN_SEQNO (prev_insn)
2685
      /* is already visited.  */
2686
      || (INSN_SEQNO (insn) == INSN_SEQNO (prev_insn)
2687
          && (ilist_is_in_p (p, insn)
2688
              /* We can reach another fence here and still seqno of insn
2689
                 would be equal to seqno of prev_insn.  This is possible
2690
                 when prev_insn is a previously created bookkeeping copy.
2691
                 In that case it'd get a seqno of insn.  Thus, check here
2692
                 whether insn is in current fence too.  */
2693
              || IN_CURRENT_FENCE_P (insn)))
2694
      /* Was already scheduled on this round.  */
2695
      || (INSN_SEQNO (insn) > INSN_SEQNO (prev_insn)
2696
          && IN_CURRENT_FENCE_P (insn))
2697
      /* An insn from another fence could also be
2698
         scheduled earlier even if this insn is not in
2699
         a fence list right now.  Check INSN_SCHED_CYCLE instead.  */
2700
      || (!pipelining_p
2701
          && INSN_SCHED_TIMES (insn) > 0))
2702
    return true;
2703
  else
2704
    return false;
2705
}
2706
 
2707
/* Computes the av_set below the last bb insn INSN, doing all the 'dirty work'
2708
   of handling multiple successors and properly merging its av_sets.  P is
2709
   the current path traversed.  WS is the size of lookahead window.
2710
   Return the av set computed.  */
2711
static av_set_t
2712
compute_av_set_at_bb_end (insn_t insn, ilist_t p, int ws)
2713
{
2714
  struct succs_info *sinfo;
2715
  av_set_t expr_in_all_succ_branches = NULL;
2716
  int is;
2717
  insn_t succ, zero_succ = NULL;
2718
  av_set_t av1 = NULL;
2719
 
2720
  gcc_assert (sel_bb_end_p (insn));
2721
 
2722
  /* Find different kind of successors needed for correct computing of
2723
     SPEC and TARGET_AVAILABLE attributes.  */
2724
  sinfo = compute_succs_info (insn, SUCCS_NORMAL);
2725
 
2726
  /* Debug output.  */
2727
  if (sched_verbose >= 6)
2728
    {
2729
      sel_print ("successors of bb end (%d): ", INSN_UID (insn));
2730
      dump_insn_vector (sinfo->succs_ok);
2731
      sel_print ("\n");
2732
      if (sinfo->succs_ok_n != sinfo->all_succs_n)
2733
        sel_print ("real successors num: %d\n", sinfo->all_succs_n);
2734
    }
2735
 
2736
  /* Add insn to to the tail of current path.  */
2737
  ilist_add (&p, insn);
2738
 
2739
  for (is = 0; VEC_iterate (rtx, sinfo->succs_ok, is, succ); is++)
2740
    {
2741
      av_set_t succ_set;
2742
 
2743
      /* We will edit SUCC_SET and EXPR_SPEC field of its elements.  */
2744
      succ_set = compute_av_set_inside_bb (succ, p, ws, true);
2745
 
2746
      av_set_split_usefulness (succ_set,
2747
                               VEC_index (int, sinfo->probs_ok, is),
2748
                               sinfo->all_prob);
2749
 
2750
      if (sinfo->all_succs_n > 1)
2751
        {
2752
          /* Find EXPR'es that came from *all* successors and save them
2753
             into expr_in_all_succ_branches.  This set will be used later
2754
             for calculating speculation attributes of EXPR'es.  */
2755
          if (is == 0)
2756
            {
2757
              expr_in_all_succ_branches = av_set_copy (succ_set);
2758
 
2759
              /* Remember the first successor for later. */
2760
              zero_succ = succ;
2761
            }
2762
          else
2763
            {
2764
              av_set_iterator i;
2765
              expr_t expr;
2766
 
2767
              FOR_EACH_EXPR_1 (expr, i, &expr_in_all_succ_branches)
2768
                if (!av_set_is_in_p (succ_set, EXPR_VINSN (expr)))
2769
                  av_set_iter_remove (&i);
2770
            }
2771
        }
2772
 
2773
      /* Union the av_sets.  Check liveness restrictions on target registers
2774
         in special case of two successors.  */
2775
      if (sinfo->succs_ok_n == 2 && is == 1)
2776
        {
2777
          basic_block bb0 = BLOCK_FOR_INSN (zero_succ);
2778
          basic_block bb1 = BLOCK_FOR_INSN (succ);
2779
 
2780
          gcc_assert (BB_LV_SET_VALID_P (bb0) && BB_LV_SET_VALID_P (bb1));
2781
          av_set_union_and_live (&av1, &succ_set,
2782
                                 BB_LV_SET (bb0),
2783
                                 BB_LV_SET (bb1),
2784
                                 insn);
2785
        }
2786
      else
2787
        av_set_union_and_clear (&av1, &succ_set, insn);
2788
    }
2789
 
2790
  /* Check liveness restrictions via hard way when there are more than
2791
     two successors.  */
2792
  if (sinfo->succs_ok_n > 2)
2793
    for (is = 0; VEC_iterate (rtx, sinfo->succs_ok, is, succ); is++)
2794
      {
2795
        basic_block succ_bb = BLOCK_FOR_INSN (succ);
2796
 
2797
        gcc_assert (BB_LV_SET_VALID_P (succ_bb));
2798
        mark_unavailable_targets (av1, BB_AV_SET (succ_bb),
2799
                                  BB_LV_SET (succ_bb));
2800
      }
2801
 
2802
  /* Finally, check liveness restrictions on paths leaving the region.  */
2803
  if (sinfo->all_succs_n > sinfo->succs_ok_n)
2804
    for (is = 0; VEC_iterate (rtx, sinfo->succs_other, is, succ); is++)
2805
      mark_unavailable_targets
2806
        (av1, NULL, BB_LV_SET (BLOCK_FOR_INSN (succ)));
2807
 
2808
  if (sinfo->all_succs_n > 1)
2809
    {
2810
      av_set_iterator i;
2811
      expr_t expr;
2812
 
2813
      /* Increase the spec attribute of all EXPR'es that didn't come
2814
         from all successors.  */
2815
      FOR_EACH_EXPR (expr, i, av1)
2816
        if (!av_set_is_in_p (expr_in_all_succ_branches, EXPR_VINSN (expr)))
2817
          EXPR_SPEC (expr)++;
2818
 
2819
      av_set_clear (&expr_in_all_succ_branches);
2820
 
2821
      /* Do not move conditional branches through other
2822
         conditional branches.  So, remove all conditional
2823
         branches from av_set if current operator is a conditional
2824
         branch.  */
2825
      av_set_substract_cond_branches (&av1);
2826
    }
2827
 
2828
  ilist_remove (&p);
2829
  free_succs_info (sinfo);
2830
 
2831
  if (sched_verbose >= 6)
2832
    {
2833
      sel_print ("av_succs (%d): ", INSN_UID (insn));
2834
      dump_av_set (av1);
2835
      sel_print ("\n");
2836
    }
2837
 
2838
  return av1;
2839
}
2840
 
2841
/* This function computes av_set for the FIRST_INSN by dragging valid
2842
   av_set through all basic block insns either from the end of basic block
2843
   (computed using compute_av_set_at_bb_end) or from the insn on which
2844
   MAX_WS was exceeded.  It uses compute_av_set_at_bb_end to compute av_set
2845
   below the basic block and handling conditional branches.
2846
   FIRST_INSN - the basic block head, P - path consisting of the insns
2847
   traversed on the way to the FIRST_INSN (the path is sparse, only bb heads
2848
   and bb ends are added to the path), WS - current window size,
2849
   NEED_COPY_P - true if we'll make a copy of av_set before returning it.  */
2850
static av_set_t
2851
compute_av_set_inside_bb (insn_t first_insn, ilist_t p, int ws,
2852
                          bool need_copy_p)
2853
{
2854
  insn_t cur_insn;
2855
  int end_ws = ws;
2856
  insn_t bb_end = sel_bb_end (BLOCK_FOR_INSN (first_insn));
2857
  insn_t after_bb_end = NEXT_INSN (bb_end);
2858
  insn_t last_insn;
2859
  av_set_t av = NULL;
2860
  basic_block cur_bb = BLOCK_FOR_INSN (first_insn);
2861
 
2862
  /* Return NULL if insn is not on the legitimate downward path.  */
2863
  if (is_ineligible_successor (first_insn, p))
2864
    {
2865
      if (sched_verbose >= 6)
2866
        sel_print ("Insn %d is ineligible_successor\n", INSN_UID (first_insn));
2867
 
2868
      return NULL;
2869
    }
2870
 
2871
  /* If insn already has valid av(insn) computed, just return it.  */
2872
  if (AV_SET_VALID_P (first_insn))
2873
    {
2874
      av_set_t av_set;
2875
 
2876
      if (sel_bb_head_p (first_insn))
2877
        av_set = BB_AV_SET (BLOCK_FOR_INSN (first_insn));
2878
      else
2879
        av_set = NULL;
2880
 
2881
      if (sched_verbose >= 6)
2882
        {
2883
          sel_print ("Insn %d has a valid av set: ", INSN_UID (first_insn));
2884
          dump_av_set (av_set);
2885
          sel_print ("\n");
2886
        }
2887
 
2888
      return need_copy_p ? av_set_copy (av_set) : av_set;
2889
    }
2890
 
2891
  ilist_add (&p, first_insn);
2892
 
2893
  /* As the result after this loop have completed, in LAST_INSN we'll
2894
     have the insn which has valid av_set to start backward computation
2895
     from: it either will be NULL because on it the window size was exceeded
2896
     or other valid av_set as returned by compute_av_set for the last insn
2897
     of the basic block.  */
2898
  for (last_insn = first_insn; last_insn != after_bb_end;
2899
       last_insn = NEXT_INSN (last_insn))
2900
    {
2901
      /* We may encounter valid av_set not only on bb_head, but also on
2902
         those insns on which previously MAX_WS was exceeded.  */
2903
      if (AV_SET_VALID_P (last_insn))
2904
        {
2905
          if (sched_verbose >= 6)
2906
            sel_print ("Insn %d has a valid empty av set\n", INSN_UID (last_insn));
2907
          break;
2908
        }
2909
 
2910
      /* The special case: the last insn of the BB may be an
2911
         ineligible_successor due to its SEQ_NO that was set on
2912
         it as a bookkeeping.  */
2913
      if (last_insn != first_insn
2914
          && is_ineligible_successor (last_insn, p))
2915
        {
2916
          if (sched_verbose >= 6)
2917
            sel_print ("Insn %d is ineligible_successor\n", INSN_UID (last_insn));
2918
          break;
2919
        }
2920
 
2921
      if (DEBUG_INSN_P (last_insn))
2922
        continue;
2923
 
2924
      if (end_ws > max_ws)
2925
        {
2926
          /* We can reach max lookahead size at bb_header, so clean av_set
2927
             first.  */
2928
          INSN_WS_LEVEL (last_insn) = global_level;
2929
 
2930
          if (sched_verbose >= 6)
2931
            sel_print ("Insn %d is beyond the software lookahead window size\n",
2932
                       INSN_UID (last_insn));
2933
          break;
2934
        }
2935
 
2936
      end_ws++;
2937
    }
2938
 
2939
  /* Get the valid av_set into AV above the LAST_INSN to start backward
2940
     computation from.  It either will be empty av_set or av_set computed from
2941
     the successors on the last insn of the current bb.  */
2942
  if (last_insn != after_bb_end)
2943
    {
2944
      av = NULL;
2945
 
2946
      /* This is needed only to obtain av_sets that are identical to
2947
         those computed by the old compute_av_set version.  */
2948
      if (last_insn == first_insn && !INSN_NOP_P (last_insn))
2949
        av_set_add (&av, INSN_EXPR (last_insn));
2950
    }
2951
  else
2952
    /* END_WS is always already increased by 1 if LAST_INSN == AFTER_BB_END.  */
2953
    av = compute_av_set_at_bb_end (bb_end, p, end_ws);
2954
 
2955
  /* Compute av_set in AV starting from below the LAST_INSN up to
2956
     location above the FIRST_INSN.  */
2957
  for (cur_insn = PREV_INSN (last_insn); cur_insn != PREV_INSN (first_insn);
2958
       cur_insn = PREV_INSN (cur_insn))
2959
    if (!INSN_NOP_P (cur_insn))
2960
      {
2961
        expr_t expr;
2962
 
2963
        moveup_set_expr (&av, cur_insn, false);
2964
 
2965
        /* If the expression for CUR_INSN is already in the set,
2966
           replace it by the new one.  */
2967
        expr = av_set_lookup (av, INSN_VINSN (cur_insn));
2968
        if (expr != NULL)
2969
          {
2970
            clear_expr (expr);
2971
            copy_expr (expr, INSN_EXPR (cur_insn));
2972
          }
2973
        else
2974
          av_set_add (&av, INSN_EXPR (cur_insn));
2975
      }
2976
 
2977
  /* Clear stale bb_av_set.  */
2978
  if (sel_bb_head_p (first_insn))
2979
    {
2980
      av_set_clear (&BB_AV_SET (cur_bb));
2981
      BB_AV_SET (cur_bb) = need_copy_p ? av_set_copy (av) : av;
2982
      BB_AV_LEVEL (cur_bb) = global_level;
2983
    }
2984
 
2985
  if (sched_verbose >= 6)
2986
    {
2987
      sel_print ("Computed av set for insn %d: ", INSN_UID (first_insn));
2988
      dump_av_set (av);
2989
      sel_print ("\n");
2990
    }
2991
 
2992
  ilist_remove (&p);
2993
  return av;
2994
}
2995
 
2996
/* Compute av set before INSN.
2997
   INSN - the current operation (actual rtx INSN)
2998
   P - the current path, which is list of insns visited so far
2999
   WS - software lookahead window size.
3000
   UNIQUE_P - TRUE, if returned av_set will be changed, hence
3001
   if we want to save computed av_set in s_i_d, we should make a copy of it.
3002
 
3003
   In the resulting set we will have only expressions that don't have delay
3004
   stalls and nonsubstitutable dependences.  */
3005
static av_set_t
3006
compute_av_set (insn_t insn, ilist_t p, int ws, bool unique_p)
3007
{
3008
  return compute_av_set_inside_bb (insn, p, ws, unique_p);
3009
}
3010
 
3011
/* Propagate a liveness set LV through INSN.  */
3012
static void
3013
propagate_lv_set (regset lv, insn_t insn)
3014
{
3015
  gcc_assert (INSN_P (insn));
3016
 
3017
  if (INSN_NOP_P (insn))
3018
    return;
3019
 
3020
  df_simulate_one_insn_backwards (BLOCK_FOR_INSN (insn), insn, lv);
3021
}
3022
 
3023
/* Return livness set at the end of BB.  */
3024
static regset
3025
compute_live_after_bb (basic_block bb)
3026
{
3027
  edge e;
3028
  edge_iterator ei;
3029
  regset lv = get_clear_regset_from_pool ();
3030
 
3031
  gcc_assert (!ignore_first);
3032
 
3033
  FOR_EACH_EDGE (e, ei, bb->succs)
3034
    if (sel_bb_empty_p (e->dest))
3035
      {
3036
        if (! BB_LV_SET_VALID_P (e->dest))
3037
          {
3038
            gcc_unreachable ();
3039
            gcc_assert (BB_LV_SET (e->dest) == NULL);
3040
            BB_LV_SET (e->dest) = compute_live_after_bb (e->dest);
3041
            BB_LV_SET_VALID_P (e->dest) = true;
3042
          }
3043
        IOR_REG_SET (lv, BB_LV_SET (e->dest));
3044
      }
3045
    else
3046
      IOR_REG_SET (lv, compute_live (sel_bb_head (e->dest)));
3047
 
3048
  return lv;
3049
}
3050
 
3051
/* Compute the set of all live registers at the point before INSN and save
3052
   it at INSN if INSN is bb header.  */
3053
regset
3054
compute_live (insn_t insn)
3055
{
3056
  basic_block bb = BLOCK_FOR_INSN (insn);
3057
  insn_t final, temp;
3058
  regset lv;
3059
 
3060
  /* Return the valid set if we're already on it.  */
3061
  if (!ignore_first)
3062
    {
3063
      regset src = NULL;
3064
 
3065
      if (sel_bb_head_p (insn) && BB_LV_SET_VALID_P (bb))
3066
        src = BB_LV_SET (bb);
3067
      else
3068
        {
3069
          gcc_assert (in_current_region_p (bb));
3070
          if (INSN_LIVE_VALID_P (insn))
3071
            src = INSN_LIVE (insn);
3072
        }
3073
 
3074
      if (src)
3075
        {
3076
          lv = get_regset_from_pool ();
3077
          COPY_REG_SET (lv, src);
3078
 
3079
          if (sel_bb_head_p (insn) && ! BB_LV_SET_VALID_P (bb))
3080
            {
3081
              COPY_REG_SET (BB_LV_SET (bb), lv);
3082
              BB_LV_SET_VALID_P (bb) = true;
3083
            }
3084
 
3085
          return_regset_to_pool (lv);
3086
          return lv;
3087
        }
3088
    }
3089
 
3090
  /* We've skipped the wrong lv_set.  Don't skip the right one.  */
3091
  ignore_first = false;
3092
  gcc_assert (in_current_region_p (bb));
3093
 
3094
  /* Find a valid LV set in this block or below, if needed.
3095
     Start searching from the next insn: either ignore_first is true, or
3096
     INSN doesn't have a correct live set.  */
3097
  temp = NEXT_INSN (insn);
3098
  final = NEXT_INSN (BB_END (bb));
3099
  while (temp != final && ! INSN_LIVE_VALID_P (temp))
3100
    temp = NEXT_INSN (temp);
3101
  if (temp == final)
3102
    {
3103
      lv = compute_live_after_bb (bb);
3104
      temp = PREV_INSN (temp);
3105
    }
3106
  else
3107
    {
3108
      lv = get_regset_from_pool ();
3109
      COPY_REG_SET (lv, INSN_LIVE (temp));
3110
    }
3111
 
3112
  /* Put correct lv sets on the insns which have bad sets.  */
3113
  final = PREV_INSN (insn);
3114
  while (temp != final)
3115
    {
3116
      propagate_lv_set (lv, temp);
3117
      COPY_REG_SET (INSN_LIVE (temp), lv);
3118
      INSN_LIVE_VALID_P (temp) = true;
3119
      temp = PREV_INSN (temp);
3120
    }
3121
 
3122
  /* Also put it in a BB.  */
3123
  if (sel_bb_head_p (insn))
3124
    {
3125
      basic_block bb = BLOCK_FOR_INSN (insn);
3126
 
3127
      COPY_REG_SET (BB_LV_SET (bb), lv);
3128
      BB_LV_SET_VALID_P (bb) = true;
3129
    }
3130
 
3131
  /* We return LV to the pool, but will not clear it there.  Thus we can
3132
     legimatelly use LV till the next use of regset_pool_get ().  */
3133
  return_regset_to_pool (lv);
3134
  return lv;
3135
}
3136
 
3137
/* Update liveness sets for INSN.  */
3138
static inline void
3139
update_liveness_on_insn (rtx insn)
3140
{
3141
  ignore_first = true;
3142
  compute_live (insn);
3143
}
3144
 
3145
/* Compute liveness below INSN and write it into REGS.  */
3146
static inline void
3147
compute_live_below_insn (rtx insn, regset regs)
3148
{
3149
  rtx succ;
3150
  succ_iterator si;
3151
 
3152
  FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL)
3153
    IOR_REG_SET (regs, compute_live (succ));
3154
}
3155
 
3156
/* Update the data gathered in av and lv sets starting from INSN.  */
3157
static void
3158
update_data_sets (rtx insn)
3159
{
3160
  update_liveness_on_insn (insn);
3161
  if (sel_bb_head_p (insn))
3162
    {
3163
      gcc_assert (AV_LEVEL (insn) != 0);
3164
      BB_AV_LEVEL (BLOCK_FOR_INSN (insn)) = -1;
3165
      compute_av_set (insn, NULL, 0, 0);
3166
    }
3167
}
3168
 
3169
 
3170
/* Helper for move_op () and find_used_regs ().
3171
   Return speculation type for which a check should be created on the place
3172
   of INSN.  EXPR is one of the original ops we are searching for.  */
3173
static ds_t
3174
get_spec_check_type_for_insn (insn_t insn, expr_t expr)
3175
{
3176
  ds_t to_check_ds;
3177
  ds_t already_checked_ds = EXPR_SPEC_DONE_DS (INSN_EXPR (insn));
3178
 
3179
  to_check_ds = EXPR_SPEC_TO_CHECK_DS (expr);
3180
 
3181
  if (targetm.sched.get_insn_checked_ds)
3182
    already_checked_ds |= targetm.sched.get_insn_checked_ds (insn);
3183
 
3184
  if (spec_info != NULL
3185
      && (spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL))
3186
    already_checked_ds |= BEGIN_CONTROL;
3187
 
3188
  already_checked_ds = ds_get_speculation_types (already_checked_ds);
3189
 
3190
  to_check_ds &= ~already_checked_ds;
3191
 
3192
  return to_check_ds;
3193
}
3194
 
3195
/* Find the set of registers that are unavailable for storing expres
3196
   while moving ORIG_OPS up on the path starting from INSN due to
3197
   liveness (USED_REGS) or hardware restrictions (REG_RENAME_P).
3198
 
3199
   All the original operations found during the traversal are saved in the
3200
   ORIGINAL_INSNS list.
3201
 
3202
   REG_RENAME_P denotes the set of hardware registers that
3203
   can not be used with renaming due to the register class restrictions,
3204
   mode restrictions and other (the register we'll choose should be
3205
   compatible class with the original uses, shouldn't be in call_used_regs,
3206
   should be HARD_REGNO_RENAME_OK etc).
3207
 
3208
   Returns TRUE if we've found all original insns, FALSE otherwise.
3209
 
3210
   This function utilizes code_motion_path_driver (formerly find_used_regs_1)
3211
   to traverse the code motion paths.  This helper function finds registers
3212
   that are not available for storing expres while moving ORIG_OPS up on the
3213
   path starting from INSN.  A register considered as used on the moving path,
3214
   if one of the following conditions is not satisfied:
3215
 
3216
      (1) a register not set or read on any path from xi to an instance of
3217
          the original operation,
3218
      (2) not among the live registers of the point immediately following the
3219
          first original operation on a given downward path, except for the
3220
          original target register of the operation,
3221
      (3) not live on the other path of any conditional branch that is passed
3222
          by the operation, in case original operations are not present on
3223
          both paths of the conditional branch.
3224
 
3225
   All the original operations found during the traversal are saved in the
3226
   ORIGINAL_INSNS list.
3227
 
3228
   REG_RENAME_P->CROSSES_CALL is true, if there is a call insn on the path
3229
   from INSN to original insn. In this case CALL_USED_REG_SET will be added
3230
   to unavailable hard regs at the point original operation is found.  */
3231
 
3232
static bool
3233
find_used_regs (insn_t insn, av_set_t orig_ops, regset used_regs,
3234
                struct reg_rename  *reg_rename_p, def_list_t *original_insns)
3235
{
3236
  def_list_iterator i;
3237
  def_t def;
3238
  int res;
3239
  bool needs_spec_check_p = false;
3240
  expr_t expr;
3241
  av_set_iterator expr_iter;
3242
  struct fur_static_params sparams;
3243
  struct cmpd_local_params lparams;
3244
 
3245
  /* We haven't visited any blocks yet.  */
3246
  bitmap_clear (code_motion_visited_blocks);
3247
 
3248
  /* Init parameters for code_motion_path_driver.  */
3249
  sparams.crosses_call = false;
3250
  sparams.original_insns = original_insns;
3251
  sparams.used_regs = used_regs;
3252
 
3253
  /* Set the appropriate hooks and data.  */
3254
  code_motion_path_driver_info = &fur_hooks;
3255
 
3256
  res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
3257
 
3258
  reg_rename_p->crosses_call |= sparams.crosses_call;
3259
 
3260
  gcc_assert (res == 1);
3261
  gcc_assert (original_insns && *original_insns);
3262
 
3263
  /* ??? We calculate whether an expression needs a check when computing
3264
     av sets.  This information is not as precise as it could be due to
3265
     merging this bit in merge_expr.  We can do better in find_used_regs,
3266
     but we want to avoid multiple traversals of the same code motion
3267
     paths.  */
3268
  FOR_EACH_EXPR (expr, expr_iter, orig_ops)
3269
    needs_spec_check_p |= EXPR_NEEDS_SPEC_CHECK_P (expr);
3270
 
3271
  /* Mark hardware regs in REG_RENAME_P that are not suitable
3272
     for renaming expr in INSN due to hardware restrictions (register class,
3273
     modes compatibility etc).  */
3274
  FOR_EACH_DEF (def, i, *original_insns)
3275
    {
3276
      vinsn_t vinsn = INSN_VINSN (def->orig_insn);
3277
 
3278
      if (VINSN_SEPARABLE_P (vinsn))
3279
        mark_unavailable_hard_regs (def, reg_rename_p, used_regs);
3280
 
3281
      /* Do not allow clobbering of ld.[sa] address in case some of the
3282
         original operations need a check.  */
3283
      if (needs_spec_check_p)
3284
        IOR_REG_SET (used_regs, VINSN_REG_USES (vinsn));
3285
    }
3286
 
3287
  return true;
3288
}
3289
 
3290
 
3291
/* Functions to choose the best insn from available ones.  */
3292
 
3293
/* Adjusts the priority for EXPR using the backend *_adjust_priority hook.  */
3294
static int
3295
sel_target_adjust_priority (expr_t expr)
3296
{
3297
  int priority = EXPR_PRIORITY (expr);
3298
  int new_priority;
3299
 
3300
  if (targetm.sched.adjust_priority)
3301
    new_priority = targetm.sched.adjust_priority (EXPR_INSN_RTX (expr), priority);
3302
  else
3303
    new_priority = priority;
3304
 
3305
  /* If the priority has changed, adjust EXPR_PRIORITY_ADJ accordingly.  */
3306
  EXPR_PRIORITY_ADJ (expr) = new_priority - EXPR_PRIORITY (expr);
3307
 
3308
  gcc_assert (EXPR_PRIORITY_ADJ (expr) >= 0);
3309
 
3310
  if (sched_verbose >= 4)
3311
    sel_print ("sel_target_adjust_priority: insn %d,  %d+%d = %d.\n",
3312
               INSN_UID (EXPR_INSN_RTX (expr)), EXPR_PRIORITY (expr),
3313
               EXPR_PRIORITY_ADJ (expr), new_priority);
3314
 
3315
  return new_priority;
3316
}
3317
 
3318
/* Rank two available exprs for schedule.  Never return 0 here.  */
3319
static int
3320
sel_rank_for_schedule (const void *x, const void *y)
3321
{
3322
  expr_t tmp = *(const expr_t *) y;
3323
  expr_t tmp2 = *(const expr_t *) x;
3324
  insn_t tmp_insn, tmp2_insn;
3325
  vinsn_t tmp_vinsn, tmp2_vinsn;
3326
  int val;
3327
 
3328
  tmp_vinsn = EXPR_VINSN (tmp);
3329
  tmp2_vinsn = EXPR_VINSN (tmp2);
3330
  tmp_insn = EXPR_INSN_RTX (tmp);
3331
  tmp2_insn = EXPR_INSN_RTX (tmp2);
3332
 
3333
  /* Schedule debug insns as early as possible.  */
3334
  if (DEBUG_INSN_P (tmp_insn) && !DEBUG_INSN_P (tmp2_insn))
3335
    return -1;
3336
  else if (DEBUG_INSN_P (tmp2_insn))
3337
    return 1;
3338
 
3339
  /* Prefer SCHED_GROUP_P insns to any others.  */
3340
  if (SCHED_GROUP_P (tmp_insn) != SCHED_GROUP_P (tmp2_insn))
3341
    {
3342
      if (VINSN_UNIQUE_P (tmp_vinsn) && VINSN_UNIQUE_P (tmp2_vinsn))
3343
        return SCHED_GROUP_P (tmp2_insn) ? 1 : -1;
3344
 
3345
      /* Now uniqueness means SCHED_GROUP_P is set, because schedule groups
3346
         cannot be cloned.  */
3347
      if (VINSN_UNIQUE_P (tmp2_vinsn))
3348
        return 1;
3349
      return -1;
3350
    }
3351
 
3352
  /* Discourage scheduling of speculative checks.  */
3353
  val = (sel_insn_is_speculation_check (tmp_insn)
3354
         - sel_insn_is_speculation_check (tmp2_insn));
3355
  if (val)
3356
    return val;
3357
 
3358
  /* Prefer not scheduled insn over scheduled one.  */
3359
  if (EXPR_SCHED_TIMES (tmp) > 0 || EXPR_SCHED_TIMES (tmp2) > 0)
3360
    {
3361
      val = EXPR_SCHED_TIMES (tmp) - EXPR_SCHED_TIMES (tmp2);
3362
      if (val)
3363
        return val;
3364
    }
3365
 
3366
  /* Prefer jump over non-jump instruction.  */
3367
  if (control_flow_insn_p (tmp_insn) && !control_flow_insn_p (tmp2_insn))
3368
    return -1;
3369
  else if (control_flow_insn_p (tmp2_insn) && !control_flow_insn_p (tmp_insn))
3370
    return 1;
3371
 
3372
  /* Prefer an expr with greater priority.  */
3373
  if (EXPR_USEFULNESS (tmp) != 0 && EXPR_USEFULNESS (tmp2) != 0)
3374
    {
3375
      int p2 = EXPR_PRIORITY (tmp2) + EXPR_PRIORITY_ADJ (tmp2),
3376
          p1 = EXPR_PRIORITY (tmp) + EXPR_PRIORITY_ADJ (tmp);
3377
 
3378
      val = p2 * EXPR_USEFULNESS (tmp2) - p1 * EXPR_USEFULNESS (tmp);
3379
    }
3380
  else
3381
    val = EXPR_PRIORITY (tmp2) - EXPR_PRIORITY (tmp)
3382
          + EXPR_PRIORITY_ADJ (tmp2) - EXPR_PRIORITY_ADJ (tmp);
3383
  if (val)
3384
    return val;
3385
 
3386
  if (spec_info != NULL && spec_info->mask != 0)
3387
    /* This code was taken from haifa-sched.c: rank_for_schedule ().  */
3388
    {
3389
      ds_t ds1, ds2;
3390
      dw_t dw1, dw2;
3391
      int dw;
3392
 
3393
      ds1 = EXPR_SPEC_DONE_DS (tmp);
3394
      if (ds1)
3395
        dw1 = ds_weak (ds1);
3396
      else
3397
        dw1 = NO_DEP_WEAK;
3398
 
3399
      ds2 = EXPR_SPEC_DONE_DS (tmp2);
3400
      if (ds2)
3401
        dw2 = ds_weak (ds2);
3402
      else
3403
        dw2 = NO_DEP_WEAK;
3404
 
3405
      dw = dw2 - dw1;
3406
      if (dw > (NO_DEP_WEAK / 8) || dw < -(NO_DEP_WEAK / 8))
3407
        return dw;
3408
    }
3409
 
3410
  /* Prefer an old insn to a bookkeeping insn.  */
3411
  if (INSN_UID (tmp_insn) < first_emitted_uid
3412
      && INSN_UID (tmp2_insn) >= first_emitted_uid)
3413
    return -1;
3414
  if (INSN_UID (tmp_insn) >= first_emitted_uid
3415
      && INSN_UID (tmp2_insn) < first_emitted_uid)
3416
    return 1;
3417
 
3418
  /* Prefer an insn with smaller UID, as a last resort.
3419
     We can't safely use INSN_LUID as it is defined only for those insns
3420
     that are in the stream.  */
3421
  return INSN_UID (tmp_insn) - INSN_UID (tmp2_insn);
3422
}
3423
 
3424
/* Filter out expressions from av set pointed to by AV_PTR
3425
   that are pipelined too many times.  */
3426
static void
3427
process_pipelined_exprs (av_set_t *av_ptr)
3428
{
3429
  expr_t expr;
3430
  av_set_iterator si;
3431
 
3432
  /* Don't pipeline already pipelined code as that would increase
3433
     number of unnecessary register moves.  */
3434
  FOR_EACH_EXPR_1 (expr, si, av_ptr)
3435
    {
3436
      if (EXPR_SCHED_TIMES (expr)
3437
          >= PARAM_VALUE (PARAM_SELSCHED_MAX_SCHED_TIMES))
3438
        av_set_iter_remove (&si);
3439
    }
3440
}
3441
 
3442
/* Filter speculative insns from AV_PTR if we don't want them.  */
3443
static void
3444
process_spec_exprs (av_set_t *av_ptr)
3445
{
3446
  bool try_data_p = true;
3447
  bool try_control_p = true;
3448
  expr_t expr;
3449
  av_set_iterator si;
3450
 
3451
  if (spec_info == NULL)
3452
    return;
3453
 
3454
  /* Scan *AV_PTR to find out if we want to consider speculative
3455
     instructions for scheduling.  */
3456
  FOR_EACH_EXPR_1 (expr, si, av_ptr)
3457
    {
3458
      ds_t ds;
3459
 
3460
      ds = EXPR_SPEC_DONE_DS (expr);
3461
 
3462
      /* The probability of a success is too low - don't speculate.  */
3463
      if ((ds & SPECULATIVE)
3464
          && (ds_weak (ds) < spec_info->data_weakness_cutoff
3465
              || EXPR_USEFULNESS (expr) < spec_info->control_weakness_cutoff
3466
              || (pipelining_p && false
3467
                  && (ds & DATA_SPEC)
3468
                  && (ds & CONTROL_SPEC))))
3469
        {
3470
          av_set_iter_remove (&si);
3471
          continue;
3472
        }
3473
 
3474
      if ((spec_info->flags & PREFER_NON_DATA_SPEC)
3475
          && !(ds & BEGIN_DATA))
3476
        try_data_p = false;
3477
 
3478
      if ((spec_info->flags & PREFER_NON_CONTROL_SPEC)
3479
          && !(ds & BEGIN_CONTROL))
3480
        try_control_p = false;
3481
    }
3482
 
3483
  FOR_EACH_EXPR_1 (expr, si, av_ptr)
3484
    {
3485
      ds_t ds;
3486
 
3487
      ds = EXPR_SPEC_DONE_DS (expr);
3488
 
3489
      if (ds & SPECULATIVE)
3490
        {
3491
          if ((ds & BEGIN_DATA) && !try_data_p)
3492
            /* We don't want any data speculative instructions right
3493
               now.  */
3494
            av_set_iter_remove (&si);
3495
 
3496
          if ((ds & BEGIN_CONTROL) && !try_control_p)
3497
            /* We don't want any control speculative instructions right
3498
               now.  */
3499
            av_set_iter_remove (&si);
3500
        }
3501
    }
3502
}
3503
 
3504
/* Search for any use-like insns in AV_PTR and decide on scheduling
3505
   them.  Return one when found, and NULL otherwise.
3506
   Note that we check here whether a USE could be scheduled to avoid
3507
   an infinite loop later.  */
3508
static expr_t
3509
process_use_exprs (av_set_t *av_ptr)
3510
{
3511
  expr_t expr;
3512
  av_set_iterator si;
3513
  bool uses_present_p = false;
3514
  bool try_uses_p = true;
3515
 
3516
  FOR_EACH_EXPR_1 (expr, si, av_ptr)
3517
    {
3518
      /* This will also initialize INSN_CODE for later use.  */
3519
      if (recog_memoized (EXPR_INSN_RTX (expr)) < 0)
3520
        {
3521
          /* If we have a USE in *AV_PTR that was not scheduled yet,
3522
             do so because it will do good only.  */
3523
          if (EXPR_SCHED_TIMES (expr) <= 0)
3524
            {
3525
              if (EXPR_TARGET_AVAILABLE (expr) == 1)
3526
                return expr;
3527
 
3528
              av_set_iter_remove (&si);
3529
            }
3530
          else
3531
            {
3532
              gcc_assert (pipelining_p);
3533
 
3534
              uses_present_p = true;
3535
            }
3536
        }
3537
      else
3538
        try_uses_p = false;
3539
    }
3540
 
3541
  if (uses_present_p)
3542
    {
3543
      /* If we don't want to schedule any USEs right now and we have some
3544
           in *AV_PTR, remove them, else just return the first one found.  */
3545
      if (!try_uses_p)
3546
        {
3547
          FOR_EACH_EXPR_1 (expr, si, av_ptr)
3548
            if (INSN_CODE (EXPR_INSN_RTX (expr)) < 0)
3549
              av_set_iter_remove (&si);
3550
        }
3551
      else
3552
        {
3553
          FOR_EACH_EXPR_1 (expr, si, av_ptr)
3554
            {
3555
              gcc_assert (INSN_CODE (EXPR_INSN_RTX (expr)) < 0);
3556
 
3557
              if (EXPR_TARGET_AVAILABLE (expr) == 1)
3558
                return expr;
3559
 
3560
              av_set_iter_remove (&si);
3561
            }
3562
        }
3563
    }
3564
 
3565
  return NULL;
3566
}
3567
 
3568
/* Lookup EXPR in VINSN_VEC and return TRUE if found.  */
3569
static bool
3570
vinsn_vec_has_expr_p (vinsn_vec_t vinsn_vec, expr_t expr)
3571
{
3572
  vinsn_t vinsn;
3573
  int n;
3574
 
3575
  for (n = 0; VEC_iterate (vinsn_t, vinsn_vec, n, vinsn); n++)
3576
    if (VINSN_SEPARABLE_P (vinsn))
3577
      {
3578
        if (vinsn_equal_p (vinsn, EXPR_VINSN (expr)))
3579
          return true;
3580
      }
3581
    else
3582
      {
3583
        /* For non-separable instructions, the blocking insn can have
3584
           another pattern due to substitution, and we can't choose
3585
           different register as in the above case.  Check all registers
3586
           being written instead.  */
3587
        if (bitmap_intersect_p (VINSN_REG_SETS (vinsn),
3588
                                VINSN_REG_SETS (EXPR_VINSN (expr))))
3589
          return true;
3590
      }
3591
 
3592
  return false;
3593
}
3594
 
3595
#ifdef ENABLE_CHECKING
3596
/* Return true if either of expressions from ORIG_OPS can be blocked
3597
   by previously created bookkeeping code.  STATIC_PARAMS points to static
3598
   parameters of move_op.  */
3599
static bool
3600
av_set_could_be_blocked_by_bookkeeping_p (av_set_t orig_ops, void *static_params)
3601
{
3602
  expr_t expr;
3603
  av_set_iterator iter;
3604
  moveop_static_params_p sparams;
3605
 
3606
  /* This checks that expressions in ORIG_OPS are not blocked by bookkeeping
3607
     created while scheduling on another fence.  */
3608
  FOR_EACH_EXPR (expr, iter, orig_ops)
3609
    if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
3610
      return true;
3611
 
3612
  gcc_assert (code_motion_path_driver_info == &move_op_hooks);
3613
  sparams = (moveop_static_params_p) static_params;
3614
 
3615
  /* Expressions can be also blocked by bookkeeping created during current
3616
     move_op.  */
3617
  if (bitmap_bit_p (current_copies, INSN_UID (sparams->failed_insn)))
3618
    FOR_EACH_EXPR (expr, iter, orig_ops)
3619
      if (moveup_expr_cached (expr, sparams->failed_insn, false) != MOVEUP_EXPR_NULL)
3620
        return true;
3621
 
3622
  /* Expressions in ORIG_OPS may have wrong destination register due to
3623
     renaming.  Check with the right register instead.  */
3624
  if (sparams->dest && REG_P (sparams->dest))
3625
    {
3626
      unsigned regno = REGNO (sparams->dest);
3627
      vinsn_t failed_vinsn = INSN_VINSN (sparams->failed_insn);
3628
 
3629
      if (bitmap_bit_p (VINSN_REG_SETS (failed_vinsn), regno)
3630
          || bitmap_bit_p (VINSN_REG_USES (failed_vinsn), regno)
3631
          || bitmap_bit_p (VINSN_REG_CLOBBERS (failed_vinsn), regno))
3632
        return true;
3633
    }
3634
 
3635
  return false;
3636
}
3637
#endif
3638
 
3639
/* Clear VINSN_VEC and detach vinsns.  */
3640
static void
3641
vinsn_vec_clear (vinsn_vec_t *vinsn_vec)
3642
{
3643
  unsigned len = VEC_length (vinsn_t, *vinsn_vec);
3644
  if (len > 0)
3645
    {
3646
      vinsn_t vinsn;
3647
      int n;
3648
 
3649
      for (n = 0; VEC_iterate (vinsn_t, *vinsn_vec, n, vinsn); n++)
3650
        vinsn_detach (vinsn);
3651
      VEC_block_remove (vinsn_t, *vinsn_vec, 0, len);
3652
    }
3653
}
3654
 
3655
/* Add the vinsn of EXPR to the VINSN_VEC.  */
3656
static void
3657
vinsn_vec_add (vinsn_vec_t *vinsn_vec, expr_t expr)
3658
{
3659
  vinsn_attach (EXPR_VINSN (expr));
3660
  VEC_safe_push (vinsn_t, heap, *vinsn_vec, EXPR_VINSN (expr));
3661
}
3662
 
3663
/* Free the vector representing blocked expressions.  */
3664
static void
3665
vinsn_vec_free (vinsn_vec_t *vinsn_vec)
3666
{
3667
  if (*vinsn_vec)
3668
    VEC_free (vinsn_t, heap, *vinsn_vec);
3669
}
3670
 
3671
/* Increase EXPR_PRIORITY_ADJ for INSN by AMOUNT.  */
3672
 
3673
void sel_add_to_insn_priority (rtx insn, int amount)
3674
{
3675
  EXPR_PRIORITY_ADJ (INSN_EXPR (insn)) += amount;
3676
 
3677
  if (sched_verbose >= 2)
3678
    sel_print ("sel_add_to_insn_priority: insn %d, by %d (now %d+%d).\n",
3679
               INSN_UID (insn), amount, EXPR_PRIORITY (INSN_EXPR (insn)),
3680
               EXPR_PRIORITY_ADJ (INSN_EXPR (insn)));
3681
}
3682
 
3683
/* Turn AV into a vector, filter inappropriate insns and sort it.  Return
3684
   true if there is something to schedule.  BNDS and FENCE are current
3685
   boundaries and fence, respectively.  If we need to stall for some cycles
3686
   before an expr from AV would become available, write this number to
3687
   *PNEED_STALL.  */
3688
static bool
3689
fill_vec_av_set (av_set_t av, blist_t bnds, fence_t fence,
3690
                 int *pneed_stall)
3691
{
3692
  av_set_iterator si;
3693
  expr_t expr;
3694
  int sched_next_worked = 0, stalled, n;
3695
  static int av_max_prio, est_ticks_till_branch;
3696
  int min_need_stall = -1;
3697
  deps_t dc = BND_DC (BLIST_BND (bnds));
3698
 
3699
  /* Bail out early when the ready list contained only USEs/CLOBBERs that are
3700
     already scheduled.  */
3701
  if (av == NULL)
3702
    return false;
3703
 
3704
  /* Empty vector from the previous stuff.  */
3705
  if (VEC_length (expr_t, vec_av_set) > 0)
3706
    VEC_block_remove (expr_t, vec_av_set, 0, VEC_length (expr_t, vec_av_set));
3707
 
3708
  /* Turn the set into a vector for sorting and call sel_target_adjust_priority
3709
     for each insn.  */
3710
  gcc_assert (VEC_empty (expr_t, vec_av_set));
3711
  FOR_EACH_EXPR (expr, si, av)
3712
    {
3713
      VEC_safe_push (expr_t, heap, vec_av_set, expr);
3714
 
3715
      gcc_assert (EXPR_PRIORITY_ADJ (expr) == 0 || *pneed_stall);
3716
 
3717
      /* Adjust priority using target backend hook.  */
3718
      sel_target_adjust_priority (expr);
3719
    }
3720
 
3721
  /* Sort the vector.  */
3722
  qsort (VEC_address (expr_t, vec_av_set), VEC_length (expr_t, vec_av_set),
3723
         sizeof (expr_t), sel_rank_for_schedule);
3724
 
3725
  /* We record maximal priority of insns in av set for current instruction
3726
     group.  */
3727
  if (FENCE_STARTS_CYCLE_P (fence))
3728
    av_max_prio = est_ticks_till_branch = INT_MIN;
3729
 
3730
  /* Filter out inappropriate expressions.  Loop's direction is reversed to
3731
     visit "best" instructions first.  We assume that VEC_unordered_remove
3732
     moves last element in place of one being deleted.  */
3733
  for (n = VEC_length (expr_t, vec_av_set) - 1, stalled = 0; n >= 0; n--)
3734
    {
3735
      expr_t expr = VEC_index (expr_t, vec_av_set, n);
3736
      insn_t insn = EXPR_INSN_RTX (expr);
3737
      char target_available;
3738
      bool is_orig_reg_p = true;
3739
      int need_cycles, new_prio;
3740
 
3741
      /* Don't allow any insns other than from SCHED_GROUP if we have one.  */
3742
      if (FENCE_SCHED_NEXT (fence) && insn != FENCE_SCHED_NEXT (fence))
3743
        {
3744
          VEC_unordered_remove (expr_t, vec_av_set, n);
3745
          continue;
3746
        }
3747
 
3748
      /* Set number of sched_next insns (just in case there
3749
         could be several).  */
3750
      if (FENCE_SCHED_NEXT (fence))
3751
        sched_next_worked++;
3752
 
3753
      /* Check all liveness requirements and try renaming.
3754
         FIXME: try to minimize calls to this.  */
3755
      target_available = EXPR_TARGET_AVAILABLE (expr);
3756
 
3757
      /* If insn was already scheduled on the current fence,
3758
         set TARGET_AVAILABLE to -1 no matter what expr's attribute says.  */
3759
      if (vinsn_vec_has_expr_p (vec_target_unavailable_vinsns, expr))
3760
        target_available = -1;
3761
 
3762
      /* If the availability of the EXPR is invalidated by the insertion of
3763
         bookkeeping earlier, make sure that we won't choose this expr for
3764
         scheduling if it's not separable, and if it is separable, then
3765
         we have to recompute the set of available registers for it.  */
3766
      if (vinsn_vec_has_expr_p (vec_bookkeeping_blocked_vinsns, expr))
3767
        {
3768
          VEC_unordered_remove (expr_t, vec_av_set, n);
3769
          if (sched_verbose >= 4)
3770
            sel_print ("Expr %d is blocked by bookkeeping inserted earlier\n",
3771
                       INSN_UID (insn));
3772
          continue;
3773
        }
3774
 
3775
      if (target_available == true)
3776
        {
3777
          /* Do nothing -- we can use an existing register.  */
3778
          is_orig_reg_p = EXPR_SEPARABLE_P (expr);
3779
        }
3780
      else if (/* Non-separable instruction will never
3781
                  get another register. */
3782
               (target_available == false
3783
                && !EXPR_SEPARABLE_P (expr))
3784
               /* Don't try to find a register for low-priority expression.  */
3785
               || (int) VEC_length (expr_t, vec_av_set) - 1 - n >= max_insns_to_rename
3786
               /* ??? FIXME: Don't try to rename data speculation.  */
3787
               || (EXPR_SPEC_DONE_DS (expr) & BEGIN_DATA)
3788
               || ! find_best_reg_for_expr (expr, bnds, &is_orig_reg_p))
3789
        {
3790
          VEC_unordered_remove (expr_t, vec_av_set, n);
3791
          if (sched_verbose >= 4)
3792
            sel_print ("Expr %d has no suitable target register\n",
3793
                       INSN_UID (insn));
3794
          continue;
3795
        }
3796
 
3797
      /* Filter expressions that need to be renamed or speculated when
3798
         pipelining, because compensating register copies or speculation
3799
         checks are likely to be placed near the beginning of the loop,
3800
         causing a stall.  */
3801
      if (pipelining_p && EXPR_ORIG_SCHED_CYCLE (expr) > 0
3802
          && (!is_orig_reg_p || EXPR_SPEC_DONE_DS (expr) != 0))
3803
        {
3804
          /* Estimation of number of cycles until loop branch for
3805
             renaming/speculation to be successful.  */
3806
          int need_n_ticks_till_branch = sel_vinsn_cost (EXPR_VINSN (expr));
3807
 
3808
          if ((int) current_loop_nest->ninsns < 9)
3809
            {
3810
              VEC_unordered_remove (expr_t, vec_av_set, n);
3811
              if (sched_verbose >= 4)
3812
                sel_print ("Pipelining expr %d will likely cause stall\n",
3813
                           INSN_UID (insn));
3814
              continue;
3815
            }
3816
 
3817
          if ((int) current_loop_nest->ninsns - num_insns_scheduled
3818
              < need_n_ticks_till_branch * issue_rate / 2
3819
              && est_ticks_till_branch < need_n_ticks_till_branch)
3820
             {
3821
               VEC_unordered_remove (expr_t, vec_av_set, n);
3822
               if (sched_verbose >= 4)
3823
                 sel_print ("Pipelining expr %d will likely cause stall\n",
3824
                            INSN_UID (insn));
3825
               continue;
3826
             }
3827
        }
3828
 
3829
      /* We want to schedule speculation checks as late as possible.  Discard
3830
         them from av set if there are instructions with higher priority.  */
3831
      if (sel_insn_is_speculation_check (insn)
3832
          && EXPR_PRIORITY (expr) < av_max_prio)
3833
        {
3834
          stalled++;
3835
          min_need_stall = min_need_stall < 0 ? 1 : MIN (min_need_stall, 1);
3836
          VEC_unordered_remove (expr_t, vec_av_set, n);
3837
          if (sched_verbose >= 4)
3838
            sel_print ("Delaying speculation check %d until its first use\n",
3839
                       INSN_UID (insn));
3840
          continue;
3841
        }
3842
 
3843
      /* Ignore EXPRs available from pipelining to update AV_MAX_PRIO.  */
3844
      if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3845
        av_max_prio = MAX (av_max_prio, EXPR_PRIORITY (expr));
3846
 
3847
      /* Don't allow any insns whose data is not yet ready.
3848
         Check first whether we've already tried them and failed.  */
3849
      if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
3850
        {
3851
          need_cycles = (FENCE_READY_TICKS (fence)[INSN_UID (insn)]
3852
                         - FENCE_CYCLE (fence));
3853
          if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3854
            est_ticks_till_branch = MAX (est_ticks_till_branch,
3855
                                         EXPR_PRIORITY (expr) + need_cycles);
3856
 
3857
          if (need_cycles > 0)
3858
            {
3859
              stalled++;
3860
              min_need_stall = (min_need_stall < 0
3861
                                ? need_cycles
3862
                                : MIN (min_need_stall, need_cycles));
3863
              VEC_unordered_remove (expr_t, vec_av_set, n);
3864
 
3865
              if (sched_verbose >= 4)
3866
                sel_print ("Expr %d is not ready until cycle %d (cached)\n",
3867
                           INSN_UID (insn),
3868
                           FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
3869
              continue;
3870
            }
3871
        }
3872
 
3873
      /* Now resort to dependence analysis to find whether EXPR might be
3874
         stalled due to dependencies from FENCE's context.  */
3875
      need_cycles = tick_check_p (expr, dc, fence);
3876
      new_prio = EXPR_PRIORITY (expr) + EXPR_PRIORITY_ADJ (expr) + need_cycles;
3877
 
3878
      if (EXPR_ORIG_SCHED_CYCLE (expr) <= 0)
3879
        est_ticks_till_branch = MAX (est_ticks_till_branch,
3880
                                     new_prio);
3881
 
3882
      if (need_cycles > 0)
3883
        {
3884
          if (INSN_UID (insn) >= FENCE_READY_TICKS_SIZE (fence))
3885
            {
3886
              int new_size = INSN_UID (insn) * 3 / 2;
3887
 
3888
              FENCE_READY_TICKS (fence)
3889
                = (int *) xrecalloc (FENCE_READY_TICKS (fence),
3890
                                     new_size, FENCE_READY_TICKS_SIZE (fence),
3891
                                     sizeof (int));
3892
            }
3893
          FENCE_READY_TICKS (fence)[INSN_UID (insn)]
3894
            = FENCE_CYCLE (fence) + need_cycles;
3895
 
3896
          stalled++;
3897
          min_need_stall = (min_need_stall < 0
3898
                            ? need_cycles
3899
                            : MIN (min_need_stall, need_cycles));
3900
 
3901
          VEC_unordered_remove (expr_t, vec_av_set, n);
3902
 
3903
          if (sched_verbose >= 4)
3904
            sel_print ("Expr %d is not ready yet until cycle %d\n",
3905
                       INSN_UID (insn),
3906
                       FENCE_READY_TICKS (fence)[INSN_UID (insn)]);
3907
          continue;
3908
        }
3909
 
3910
      if (sched_verbose >= 4)
3911
        sel_print ("Expr %d is ok\n", INSN_UID (insn));
3912
      min_need_stall = 0;
3913
    }
3914
 
3915
  /* Clear SCHED_NEXT.  */
3916
  if (FENCE_SCHED_NEXT (fence))
3917
    {
3918
      gcc_assert (sched_next_worked == 1);
3919
      FENCE_SCHED_NEXT (fence) = NULL_RTX;
3920
    }
3921
 
3922
  /* No need to stall if this variable was not initialized.  */
3923
  if (min_need_stall < 0)
3924
    min_need_stall = 0;
3925
 
3926
  if (VEC_empty (expr_t, vec_av_set))
3927
    {
3928
      /* We need to set *pneed_stall here, because later we skip this code
3929
         when ready list is empty.  */
3930
      *pneed_stall = min_need_stall;
3931
      return false;
3932
    }
3933
  else
3934
    gcc_assert (min_need_stall == 0);
3935
 
3936
  /* Sort the vector.  */
3937
  qsort (VEC_address (expr_t, vec_av_set), VEC_length (expr_t, vec_av_set),
3938
         sizeof (expr_t), sel_rank_for_schedule);
3939
 
3940
  if (sched_verbose >= 4)
3941
    {
3942
      sel_print ("Total ready exprs: %d, stalled: %d\n",
3943
                 VEC_length (expr_t, vec_av_set), stalled);
3944
      sel_print ("Sorted av set (%d): ", VEC_length (expr_t, vec_av_set));
3945
      for (n = 0; VEC_iterate (expr_t, vec_av_set, n, expr); n++)
3946
        dump_expr (expr);
3947
      sel_print ("\n");
3948
    }
3949
 
3950
  *pneed_stall = 0;
3951
  return true;
3952
}
3953
 
3954
/* Convert a vectored and sorted av set to the ready list that
3955
   the rest of the backend wants to see.  */
3956
static void
3957
convert_vec_av_set_to_ready (void)
3958
{
3959
  int n;
3960
  expr_t expr;
3961
 
3962
  /* Allocate and fill the ready list from the sorted vector.  */
3963
  ready.n_ready = VEC_length (expr_t, vec_av_set);
3964
  ready.first = ready.n_ready - 1;
3965
 
3966
  gcc_assert (ready.n_ready > 0);
3967
 
3968
  if (ready.n_ready > max_issue_size)
3969
    {
3970
      max_issue_size = ready.n_ready;
3971
      sched_extend_ready_list (ready.n_ready);
3972
    }
3973
 
3974
  for (n = 0; VEC_iterate (expr_t, vec_av_set, n, expr); n++)
3975
    {
3976
      vinsn_t vi = EXPR_VINSN (expr);
3977
      insn_t insn = VINSN_INSN_RTX (vi);
3978
 
3979
      ready_try[n] = 0;
3980
      ready.vec[n] = insn;
3981
    }
3982
}
3983
 
3984
/* Initialize ready list from *AV_PTR for the max_issue () call.
3985
   If any unrecognizable insn found in *AV_PTR, return it (and skip
3986
   max_issue).  BND and FENCE are current boundary and fence,
3987
   respectively.  If we need to stall for some cycles before an expr
3988
   from *AV_PTR would become available, write this number to *PNEED_STALL.  */
3989
static expr_t
3990
fill_ready_list (av_set_t *av_ptr, blist_t bnds, fence_t fence,
3991
                 int *pneed_stall)
3992
{
3993
  expr_t expr;
3994
 
3995
  /* We do not support multiple boundaries per fence.  */
3996
  gcc_assert (BLIST_NEXT (bnds) == NULL);
3997
 
3998
  /* Process expressions required special handling, i.e.  pipelined,
3999
     speculative and recog() < 0 expressions first.  */
4000
  process_pipelined_exprs (av_ptr);
4001
  process_spec_exprs (av_ptr);
4002
 
4003
  /* A USE could be scheduled immediately.  */
4004
  expr = process_use_exprs (av_ptr);
4005
  if (expr)
4006
    {
4007
      *pneed_stall = 0;
4008
      return expr;
4009
    }
4010
 
4011
  /* Turn the av set to a vector for sorting.  */
4012
  if (! fill_vec_av_set (*av_ptr, bnds, fence, pneed_stall))
4013
    {
4014
      ready.n_ready = 0;
4015
      return NULL;
4016
    }
4017
 
4018
  /* Build the final ready list.  */
4019
  convert_vec_av_set_to_ready ();
4020
  return NULL;
4021
}
4022
 
4023
/* Wrapper for dfa_new_cycle ().  Returns TRUE if cycle was advanced.  */
4024
static bool
4025
sel_dfa_new_cycle (insn_t insn, fence_t fence)
4026
{
4027
  int last_scheduled_cycle = FENCE_LAST_SCHEDULED_INSN (fence)
4028
                             ? INSN_SCHED_CYCLE (FENCE_LAST_SCHEDULED_INSN (fence))
4029
                             : FENCE_CYCLE (fence) - 1;
4030
  bool res = false;
4031
  int sort_p = 0;
4032
 
4033
  if (!targetm.sched.dfa_new_cycle)
4034
    return false;
4035
 
4036
  memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4037
 
4038
  while (!sort_p && targetm.sched.dfa_new_cycle (sched_dump, sched_verbose,
4039
                                                 insn, last_scheduled_cycle,
4040
                                                 FENCE_CYCLE (fence), &sort_p))
4041
    {
4042
      memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
4043
      advance_one_cycle (fence);
4044
      memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4045
      res = true;
4046
    }
4047
 
4048
  return res;
4049
}
4050
 
4051
/* Invoke reorder* target hooks on the ready list.  Return the number of insns
4052
   we can issue.  FENCE is the current fence.  */
4053
static int
4054
invoke_reorder_hooks (fence_t fence)
4055
{
4056
  int issue_more;
4057
  bool ran_hook = false;
4058
 
4059
  /* Call the reorder hook at the beginning of the cycle, and call
4060
     the reorder2 hook in the middle of the cycle.  */
4061
  if (FENCE_ISSUED_INSNS (fence) == 0)
4062
    {
4063
      if (targetm.sched.reorder
4064
          && !SCHED_GROUP_P (ready_element (&ready, 0))
4065
          && ready.n_ready > 1)
4066
        {
4067
          /* Don't give reorder the most prioritized insn as it can break
4068
             pipelining.  */
4069
          if (pipelining_p)
4070
            --ready.n_ready;
4071
 
4072
          issue_more
4073
            = targetm.sched.reorder (sched_dump, sched_verbose,
4074
                                     ready_lastpos (&ready),
4075
                                     &ready.n_ready, FENCE_CYCLE (fence));
4076
 
4077
          if (pipelining_p)
4078
            ++ready.n_ready;
4079
 
4080
          ran_hook = true;
4081
        }
4082
      else
4083
        /* Initialize can_issue_more for variable_issue.  */
4084
        issue_more = issue_rate;
4085
    }
4086
  else if (targetm.sched.reorder2
4087
           && !SCHED_GROUP_P (ready_element (&ready, 0)))
4088
    {
4089
      if (ready.n_ready == 1)
4090
        issue_more =
4091
          targetm.sched.reorder2 (sched_dump, sched_verbose,
4092
                                  ready_lastpos (&ready),
4093
                                  &ready.n_ready, FENCE_CYCLE (fence));
4094
      else
4095
        {
4096
          if (pipelining_p)
4097
            --ready.n_ready;
4098
 
4099
          issue_more =
4100
            targetm.sched.reorder2 (sched_dump, sched_verbose,
4101
                                    ready.n_ready
4102
                                    ? ready_lastpos (&ready) : NULL,
4103
                                    &ready.n_ready, FENCE_CYCLE (fence));
4104
 
4105
          if (pipelining_p)
4106
            ++ready.n_ready;
4107
        }
4108
 
4109
      ran_hook = true;
4110
    }
4111
  else
4112
    issue_more = FENCE_ISSUE_MORE (fence);
4113
 
4114
  /* Ensure that ready list and vec_av_set are in line with each other,
4115
     i.e. vec_av_set[i] == ready_element (&ready, i).  */
4116
  if (issue_more && ran_hook)
4117
    {
4118
      int i, j, n;
4119
      rtx *arr = ready.vec;
4120
      expr_t *vec = VEC_address (expr_t, vec_av_set);
4121
 
4122
      for (i = 0, n = ready.n_ready; i < n; i++)
4123
        if (EXPR_INSN_RTX (vec[i]) != arr[i])
4124
          {
4125
            expr_t tmp;
4126
 
4127
            for (j = i; j < n; j++)
4128
              if (EXPR_INSN_RTX (vec[j]) == arr[i])
4129
                break;
4130
            gcc_assert (j < n);
4131
 
4132
            tmp = vec[i];
4133
            vec[i] = vec[j];
4134
            vec[j] = tmp;
4135
          }
4136
    }
4137
 
4138
  return issue_more;
4139
}
4140
 
4141
/* Return an EXPR correponding to INDEX element of ready list, if
4142
   FOLLOW_READY_ELEMENT is true (i.e., an expr of
4143
   ready_element (&ready, INDEX) will be returned), and to INDEX element of
4144
   ready.vec otherwise.  */
4145
static inline expr_t
4146
find_expr_for_ready (int index, bool follow_ready_element)
4147
{
4148
  expr_t expr;
4149
  int real_index;
4150
 
4151
  real_index = follow_ready_element ? ready.first - index : index;
4152
 
4153
  expr = VEC_index (expr_t, vec_av_set, real_index);
4154
  gcc_assert (ready.vec[real_index] == EXPR_INSN_RTX (expr));
4155
 
4156
  return expr;
4157
}
4158
 
4159
/* Calculate insns worth trying via lookahead_guard hook.  Return a number
4160
   of such insns found.  */
4161
static int
4162
invoke_dfa_lookahead_guard (void)
4163
{
4164
  int i, n;
4165
  bool have_hook
4166
    = targetm.sched.first_cycle_multipass_dfa_lookahead_guard != NULL;
4167
 
4168
  if (sched_verbose >= 2)
4169
    sel_print ("ready after reorder: ");
4170
 
4171
  for (i = 0, n = 0; i < ready.n_ready; i++)
4172
    {
4173
      expr_t expr;
4174
      insn_t insn;
4175
      int r;
4176
 
4177
      /* In this loop insn is Ith element of the ready list given by
4178
         ready_element, not Ith element of ready.vec.  */
4179
      insn = ready_element (&ready, i);
4180
 
4181
      if (! have_hook || i == 0)
4182
        r = 0;
4183
      else
4184
        r = !targetm.sched.first_cycle_multipass_dfa_lookahead_guard (insn);
4185
 
4186
      gcc_assert (INSN_CODE (insn) >= 0);
4187
 
4188
      /* Only insns with ready_try = 0 can get here
4189
         from fill_ready_list.  */
4190
      gcc_assert (ready_try [i] == 0);
4191
      ready_try[i] = r;
4192
      if (!r)
4193
        n++;
4194
 
4195
      expr = find_expr_for_ready (i, true);
4196
 
4197
      if (sched_verbose >= 2)
4198
        {
4199
          dump_vinsn (EXPR_VINSN (expr));
4200
          sel_print (":%d; ", ready_try[i]);
4201
        }
4202
    }
4203
 
4204
  if (sched_verbose >= 2)
4205
    sel_print ("\n");
4206
  return n;
4207
}
4208
 
4209
/* Calculate the number of privileged insns and return it.  */
4210
static int
4211
calculate_privileged_insns (void)
4212
{
4213
  expr_t cur_expr, min_spec_expr = NULL;
4214
  int privileged_n = 0, i;
4215
 
4216
  for (i = 0; i < ready.n_ready; i++)
4217
    {
4218
      if (ready_try[i])
4219
        continue;
4220
 
4221
      if (! min_spec_expr)
4222
        min_spec_expr = find_expr_for_ready (i, true);
4223
 
4224
      cur_expr = find_expr_for_ready (i, true);
4225
 
4226
      if (EXPR_SPEC (cur_expr) > EXPR_SPEC (min_spec_expr))
4227
        break;
4228
 
4229
      ++privileged_n;
4230
    }
4231
 
4232
  if (i == ready.n_ready)
4233
    privileged_n = 0;
4234
 
4235
  if (sched_verbose >= 2)
4236
    sel_print ("privileged_n: %d insns with SPEC %d\n",
4237
               privileged_n, privileged_n ? EXPR_SPEC (min_spec_expr) : -1);
4238
  return privileged_n;
4239
}
4240
 
4241
/* Call the rest of the hooks after the choice was made.  Return
4242
   the number of insns that still can be issued given that the current
4243
   number is ISSUE_MORE.  FENCE and BEST_INSN are the current fence
4244
   and the insn chosen for scheduling, respectively.  */
4245
static int
4246
invoke_aftermath_hooks (fence_t fence, rtx best_insn, int issue_more)
4247
{
4248
  gcc_assert (INSN_P (best_insn));
4249
 
4250
  /* First, call dfa_new_cycle, and then variable_issue, if available.  */
4251
  sel_dfa_new_cycle (best_insn, fence);
4252
 
4253
  if (targetm.sched.variable_issue)
4254
    {
4255
      memcpy (curr_state, FENCE_STATE (fence), dfa_state_size);
4256
      issue_more =
4257
        targetm.sched.variable_issue (sched_dump, sched_verbose, best_insn,
4258
                                      issue_more);
4259
      memcpy (FENCE_STATE (fence), curr_state, dfa_state_size);
4260
    }
4261
  else if (GET_CODE (PATTERN (best_insn)) != USE
4262
           && GET_CODE (PATTERN (best_insn)) != CLOBBER)
4263
    issue_more--;
4264
 
4265
  return issue_more;
4266
}
4267
 
4268
/* Estimate the cost of issuing INSN on DFA state STATE.  */
4269
static int
4270
estimate_insn_cost (rtx insn, state_t state)
4271
{
4272
  static state_t temp = NULL;
4273
  int cost;
4274
 
4275
  if (!temp)
4276
    temp = xmalloc (dfa_state_size);
4277
 
4278
  memcpy (temp, state, dfa_state_size);
4279
  cost = state_transition (temp, insn);
4280
 
4281
  if (cost < 0)
4282
    return 0;
4283
  else if (cost == 0)
4284
    return 1;
4285
  return cost;
4286
}
4287
 
4288
/* Return the cost of issuing EXPR on the FENCE as estimated by DFA.
4289
   This function properly handles ASMs, USEs etc.  */
4290
static int
4291
get_expr_cost (expr_t expr, fence_t fence)
4292
{
4293
  rtx insn = EXPR_INSN_RTX (expr);
4294
 
4295
  if (recog_memoized (insn) < 0)
4296
    {
4297
      if (!FENCE_STARTS_CYCLE_P (fence)
4298
          && INSN_ASM_P (insn))
4299
        /* This is asm insn which is tryed to be issued on the
4300
           cycle not first.  Issue it on the next cycle.  */
4301
        return 1;
4302
      else
4303
        /* A USE insn, or something else we don't need to
4304
           understand.  We can't pass these directly to
4305
           state_transition because it will trigger a
4306
           fatal error for unrecognizable insns.  */
4307
        return 0;
4308
    }
4309
  else
4310
    return estimate_insn_cost (insn, FENCE_STATE (fence));
4311
}
4312
 
4313
/* Find the best insn for scheduling, either via max_issue or just take
4314
   the most prioritized available.  */
4315
static int
4316
choose_best_insn (fence_t fence, int privileged_n, int *index)
4317
{
4318
  int can_issue = 0;
4319
 
4320
  if (dfa_lookahead > 0)
4321
    {
4322
      cycle_issued_insns = FENCE_ISSUED_INSNS (fence);
4323
      can_issue = max_issue (&ready, privileged_n,
4324
                             FENCE_STATE (fence), index);
4325
      if (sched_verbose >= 2)
4326
        sel_print ("max_issue: we can issue %d insns, already did %d insns\n",
4327
                   can_issue, FENCE_ISSUED_INSNS (fence));
4328
    }
4329
  else
4330
    {
4331
      /* We can't use max_issue; just return the first available element.  */
4332
      int i;
4333
 
4334
      for (i = 0; i < ready.n_ready; i++)
4335
        {
4336
          expr_t expr = find_expr_for_ready (i, true);
4337
 
4338
          if (get_expr_cost (expr, fence) < 1)
4339
            {
4340
              can_issue = can_issue_more;
4341
              *index = i;
4342
 
4343
              if (sched_verbose >= 2)
4344
                sel_print ("using %dth insn from the ready list\n", i + 1);
4345
 
4346
              break;
4347
            }
4348
        }
4349
 
4350
      if (i == ready.n_ready)
4351
        {
4352
          can_issue = 0;
4353
          *index = -1;
4354
        }
4355
    }
4356
 
4357
  return can_issue;
4358
}
4359
 
4360
/* Choose the best expr from *AV_VLIW_PTR and a suitable register for it.
4361
   BNDS and FENCE are current boundaries and scheduling fence respectively.
4362
   Return the expr found and NULL if nothing can be issued atm.
4363
   Write to PNEED_STALL the number of cycles to stall if no expr was found.  */
4364
static expr_t
4365
find_best_expr (av_set_t *av_vliw_ptr, blist_t bnds, fence_t fence,
4366
                int *pneed_stall)
4367
{
4368
  expr_t best;
4369
 
4370
  /* Choose the best insn for scheduling via:
4371
     1) sorting the ready list based on priority;
4372
     2) calling the reorder hook;
4373
     3) calling max_issue.  */
4374
  best = fill_ready_list (av_vliw_ptr, bnds, fence, pneed_stall);
4375
  if (best == NULL && ready.n_ready > 0)
4376
    {
4377
      int privileged_n, index;
4378
 
4379
      can_issue_more = invoke_reorder_hooks (fence);
4380
      if (can_issue_more > 0)
4381
        {
4382
          /* Try choosing the best insn until we find one that is could be
4383
             scheduled due to liveness restrictions on its destination register.
4384
             In the future, we'd like to choose once and then just probe insns
4385
             in the order of their priority.  */
4386
          invoke_dfa_lookahead_guard ();
4387
          privileged_n = calculate_privileged_insns ();
4388
          can_issue_more = choose_best_insn (fence, privileged_n, &index);
4389
          if (can_issue_more)
4390
            best = find_expr_for_ready (index, true);
4391
        }
4392
      /* We had some available insns, so if we can't issue them,
4393
         we have a stall.  */
4394
      if (can_issue_more == 0)
4395
        {
4396
          best = NULL;
4397
          *pneed_stall = 1;
4398
        }
4399
    }
4400
 
4401
  if (best != NULL)
4402
    {
4403
      can_issue_more = invoke_aftermath_hooks (fence, EXPR_INSN_RTX (best),
4404
                                               can_issue_more);
4405
      if (can_issue_more == 0)
4406
        *pneed_stall = 1;
4407
    }
4408
 
4409
  if (sched_verbose >= 2)
4410
    {
4411
      if (best != NULL)
4412
        {
4413
          sel_print ("Best expression (vliw form): ");
4414
          dump_expr (best);
4415
          sel_print ("; cycle %d\n", FENCE_CYCLE (fence));
4416
        }
4417
      else
4418
        sel_print ("No best expr found!\n");
4419
    }
4420
 
4421
  return best;
4422
}
4423
 
4424
 
4425
/* Functions that implement the core of the scheduler.  */
4426
 
4427
 
4428
/* Emit an instruction from EXPR with SEQNO and VINSN after
4429
   PLACE_TO_INSERT.  */
4430
static insn_t
4431
emit_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno,
4432
                           insn_t place_to_insert)
4433
{
4434
  /* This assert fails when we have identical instructions
4435
     one of which dominates the other.  In this case move_op ()
4436
     finds the first instruction and doesn't search for second one.
4437
     The solution would be to compute av_set after the first found
4438
     insn and, if insn present in that set, continue searching.
4439
     For now we workaround this issue in move_op.  */
4440
  gcc_assert (!INSN_IN_STREAM_P (EXPR_INSN_RTX (expr)));
4441
 
4442
  if (EXPR_WAS_RENAMED (expr))
4443
    {
4444
      unsigned regno = expr_dest_regno (expr);
4445
 
4446
      if (HARD_REGISTER_NUM_P (regno))
4447
        {
4448
          df_set_regs_ever_live (regno, true);
4449
          reg_rename_tick[regno] = ++reg_rename_this_tick;
4450
        }
4451
    }
4452
 
4453
  return sel_gen_insn_from_expr_after (expr, vinsn, seqno,
4454
                                       place_to_insert);
4455
}
4456
 
4457
/* Return TRUE if BB can hold bookkeeping code.  */
4458
static bool
4459
block_valid_for_bookkeeping_p (basic_block bb)
4460
{
4461
  insn_t bb_end = BB_END (bb);
4462
 
4463
  if (!in_current_region_p (bb) || EDGE_COUNT (bb->succs) > 1)
4464
    return false;
4465
 
4466
  if (INSN_P (bb_end))
4467
    {
4468
      if (INSN_SCHED_TIMES (bb_end) > 0)
4469
        return false;
4470
    }
4471
  else
4472
    gcc_assert (NOTE_INSN_BASIC_BLOCK_P (bb_end));
4473
 
4474
  return true;
4475
}
4476
 
4477
/* Attempt to find a block that can hold bookkeeping code for path(s) incoming
4478
   into E2->dest, except from E1->src (there may be a sequence of empty basic
4479
   blocks between E1->src and E2->dest).  Return found block, or NULL if new
4480
   one must be created.  If LAX holds, don't assume there is a simple path
4481
   from E1->src to E2->dest.  */
4482
static basic_block
4483
find_block_for_bookkeeping (edge e1, edge e2, bool lax)
4484
{
4485
  basic_block candidate_block = NULL;
4486
  edge e;
4487
 
4488
  /* Loop over edges from E1 to E2, inclusive.  */
4489
  for (e = e1; !lax || e->dest != EXIT_BLOCK_PTR; e = EDGE_SUCC (e->dest, 0))
4490
    {
4491
      if (EDGE_COUNT (e->dest->preds) == 2)
4492
        {
4493
          if (candidate_block == NULL)
4494
            candidate_block = (EDGE_PRED (e->dest, 0) == e
4495
                               ? EDGE_PRED (e->dest, 1)->src
4496
                               : EDGE_PRED (e->dest, 0)->src);
4497
          else
4498
            /* Found additional edge leading to path from e1 to e2
4499
               from aside.  */
4500
            return NULL;
4501
        }
4502
      else if (EDGE_COUNT (e->dest->preds) > 2)
4503
        /* Several edges leading to path from e1 to e2 from aside.  */
4504
        return NULL;
4505
 
4506
      if (e == e2)
4507
        return ((!lax || candidate_block)
4508
                && block_valid_for_bookkeeping_p (candidate_block)
4509
                ? candidate_block
4510
                : NULL);
4511
 
4512
      if (lax && EDGE_COUNT (e->dest->succs) != 1)
4513
        return NULL;
4514
    }
4515
 
4516
  if (lax)
4517
    return NULL;
4518
 
4519
  gcc_unreachable ();
4520
}
4521
 
4522
/* Create new basic block for bookkeeping code for path(s) incoming into
4523
   E2->dest, except from E1->src.  Return created block.  */
4524
static basic_block
4525
create_block_for_bookkeeping (edge e1, edge e2)
4526
{
4527
  basic_block new_bb, bb = e2->dest;
4528
 
4529
  /* Check that we don't spoil the loop structure.  */
4530
  if (current_loop_nest)
4531
    {
4532
      basic_block latch = current_loop_nest->latch;
4533
 
4534
      /* We do not split header.  */
4535
      gcc_assert (e2->dest != current_loop_nest->header);
4536
 
4537
      /* We do not redirect the only edge to the latch block.  */
4538
      gcc_assert (e1->dest != latch
4539
                  || !single_pred_p (latch)
4540
                  || e1 != single_pred_edge (latch));
4541
    }
4542
 
4543
  /* Split BB to insert BOOK_INSN there.  */
4544
  new_bb = sched_split_block (bb, NULL);
4545
 
4546
  /* Move note_list from the upper bb.  */
4547
  gcc_assert (BB_NOTE_LIST (new_bb) == NULL_RTX);
4548
  BB_NOTE_LIST (new_bb) = BB_NOTE_LIST (bb);
4549
  BB_NOTE_LIST (bb) = NULL_RTX;
4550
 
4551
  gcc_assert (e2->dest == bb);
4552
 
4553
  /* Skip block for bookkeeping copy when leaving E1->src.  */
4554
  if (e1->flags & EDGE_FALLTHRU)
4555
    sel_redirect_edge_and_branch_force (e1, new_bb);
4556
  else
4557
    sel_redirect_edge_and_branch (e1, new_bb);
4558
 
4559
  gcc_assert (e1->dest == new_bb);
4560
  gcc_assert (sel_bb_empty_p (bb));
4561
 
4562
  /* To keep basic block numbers in sync between debug and non-debug
4563
     compilations, we have to rotate blocks here.  Consider that we
4564
     started from (a,b)->d, (c,d)->e, and d contained only debug
4565
     insns.  It would have been removed before if the debug insns
4566
     weren't there, so we'd have split e rather than d.  So what we do
4567
     now is to swap the block numbers of new_bb and
4568
     single_succ(new_bb) == e, so that the insns that were in e before
4569
     get the new block number.  */
4570
 
4571
  if (MAY_HAVE_DEBUG_INSNS)
4572
    {
4573
      basic_block succ;
4574
      insn_t insn = sel_bb_head (new_bb);
4575
      insn_t last;
4576
 
4577
      if (DEBUG_INSN_P (insn)
4578
          && single_succ_p (new_bb)
4579
          && (succ = single_succ (new_bb))
4580
          && succ != EXIT_BLOCK_PTR
4581
          && DEBUG_INSN_P ((last = sel_bb_end (new_bb))))
4582
        {
4583
          while (insn != last && (DEBUG_INSN_P (insn) || NOTE_P (insn)))
4584
            insn = NEXT_INSN (insn);
4585
 
4586
          if (insn == last)
4587
            {
4588
              sel_global_bb_info_def gbi;
4589
              sel_region_bb_info_def rbi;
4590
              int i;
4591
 
4592
              if (sched_verbose >= 2)
4593
                sel_print ("Swapping block ids %i and %i\n",
4594
                           new_bb->index, succ->index);
4595
 
4596
              i = new_bb->index;
4597
              new_bb->index = succ->index;
4598
              succ->index = i;
4599
 
4600
              SET_BASIC_BLOCK (new_bb->index, new_bb);
4601
              SET_BASIC_BLOCK (succ->index, succ);
4602
 
4603
              memcpy (&gbi, SEL_GLOBAL_BB_INFO (new_bb), sizeof (gbi));
4604
              memcpy (SEL_GLOBAL_BB_INFO (new_bb), SEL_GLOBAL_BB_INFO (succ),
4605
                      sizeof (gbi));
4606
              memcpy (SEL_GLOBAL_BB_INFO (succ), &gbi, sizeof (gbi));
4607
 
4608
              memcpy (&rbi, SEL_REGION_BB_INFO (new_bb), sizeof (rbi));
4609
              memcpy (SEL_REGION_BB_INFO (new_bb), SEL_REGION_BB_INFO (succ),
4610
                      sizeof (rbi));
4611
              memcpy (SEL_REGION_BB_INFO (succ), &rbi, sizeof (rbi));
4612
 
4613
              i = BLOCK_TO_BB (new_bb->index);
4614
              BLOCK_TO_BB (new_bb->index) = BLOCK_TO_BB (succ->index);
4615
              BLOCK_TO_BB (succ->index) = i;
4616
 
4617
              i = CONTAINING_RGN (new_bb->index);
4618
              CONTAINING_RGN (new_bb->index) = CONTAINING_RGN (succ->index);
4619
              CONTAINING_RGN (succ->index) = i;
4620
 
4621
              for (i = 0; i < current_nr_blocks; i++)
4622
                if (BB_TO_BLOCK (i) == succ->index)
4623
                  BB_TO_BLOCK (i) = new_bb->index;
4624
                else if (BB_TO_BLOCK (i) == new_bb->index)
4625
                  BB_TO_BLOCK (i) = succ->index;
4626
 
4627
              FOR_BB_INSNS (new_bb, insn)
4628
                if (INSN_P (insn))
4629
                  EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = new_bb->index;
4630
 
4631
              FOR_BB_INSNS (succ, insn)
4632
                if (INSN_P (insn))
4633
                  EXPR_ORIG_BB_INDEX (INSN_EXPR (insn)) = succ->index;
4634
 
4635
              if (bitmap_bit_p (code_motion_visited_blocks, new_bb->index))
4636
                {
4637
                  bitmap_set_bit (code_motion_visited_blocks, succ->index);
4638
                  bitmap_clear_bit (code_motion_visited_blocks, new_bb->index);
4639
                }
4640
 
4641
              gcc_assert (LABEL_P (BB_HEAD (new_bb))
4642
                          && LABEL_P (BB_HEAD (succ)));
4643
 
4644
              if (sched_verbose >= 4)
4645
                sel_print ("Swapping code labels %i and %i\n",
4646
                           CODE_LABEL_NUMBER (BB_HEAD (new_bb)),
4647
                           CODE_LABEL_NUMBER (BB_HEAD (succ)));
4648
 
4649
              i = CODE_LABEL_NUMBER (BB_HEAD (new_bb));
4650
              CODE_LABEL_NUMBER (BB_HEAD (new_bb))
4651
                = CODE_LABEL_NUMBER (BB_HEAD (succ));
4652
              CODE_LABEL_NUMBER (BB_HEAD (succ)) = i;
4653
            }
4654
        }
4655
    }
4656
 
4657
  return bb;
4658
}
4659
 
4660
/* Return insn after which we must insert bookkeeping code for path(s) incoming
4661
   into E2->dest, except from E1->src.  */
4662
static insn_t
4663
find_place_for_bookkeeping (edge e1, edge e2)
4664
{
4665
  insn_t place_to_insert;
4666
  /* Find a basic block that can hold bookkeeping.  If it can be found, do not
4667
     create new basic block, but insert bookkeeping there.  */
4668
  basic_block book_block = find_block_for_bookkeeping (e1, e2, FALSE);
4669
 
4670
  if (book_block)
4671
    {
4672
      place_to_insert = BB_END (book_block);
4673
 
4674
      /* Don't use a block containing only debug insns for
4675
         bookkeeping, this causes scheduling differences between debug
4676
         and non-debug compilations, for the block would have been
4677
         removed already.  */
4678
      if (DEBUG_INSN_P (place_to_insert))
4679
        {
4680
          rtx insn = sel_bb_head (book_block);
4681
 
4682
          while (insn != place_to_insert &&
4683
                 (DEBUG_INSN_P (insn) || NOTE_P (insn)))
4684
            insn = NEXT_INSN (insn);
4685
 
4686
          if (insn == place_to_insert)
4687
            book_block = NULL;
4688
        }
4689
    }
4690
 
4691
  if (!book_block)
4692
    {
4693
      book_block = create_block_for_bookkeeping (e1, e2);
4694
      place_to_insert = BB_END (book_block);
4695
      if (sched_verbose >= 9)
4696
        sel_print ("New block is %i, split from bookkeeping block %i\n",
4697
                   EDGE_SUCC (book_block, 0)->dest->index, book_block->index);
4698
    }
4699
  else
4700
    {
4701
      if (sched_verbose >= 9)
4702
        sel_print ("Pre-existing bookkeeping block is %i\n", book_block->index);
4703
    }
4704
 
4705
  /* If basic block ends with a jump, insert bookkeeping code right before it.  */
4706
  if (INSN_P (place_to_insert) && control_flow_insn_p (place_to_insert))
4707
    place_to_insert = PREV_INSN (place_to_insert);
4708
 
4709
  return place_to_insert;
4710
}
4711
 
4712
/* Find a proper seqno for bookkeeing insn inserted at PLACE_TO_INSERT
4713
   for JOIN_POINT.   */
4714
static int
4715
find_seqno_for_bookkeeping (insn_t place_to_insert, insn_t join_point)
4716
{
4717
  int seqno;
4718
  rtx next;
4719
 
4720
  /* Check if we are about to insert bookkeeping copy before a jump, and use
4721
     jump's seqno for the copy; otherwise, use JOIN_POINT's seqno.  */
4722
  next = NEXT_INSN (place_to_insert);
4723
  if (INSN_P (next)
4724
      && JUMP_P (next)
4725
      && BLOCK_FOR_INSN (next) == BLOCK_FOR_INSN (place_to_insert))
4726
    {
4727
      gcc_assert (INSN_SCHED_TIMES (next) == 0);
4728
      seqno = INSN_SEQNO (next);
4729
    }
4730
  else if (INSN_SEQNO (join_point) > 0)
4731
    seqno = INSN_SEQNO (join_point);
4732
  else
4733
    {
4734
      seqno = get_seqno_by_preds (place_to_insert);
4735
 
4736
      /* Sometimes the fences can move in such a way that there will be
4737
         no instructions with positive seqno around this bookkeeping.
4738
         This means that there will be no way to get to it by a regular
4739
         fence movement.  Never mind because we pick up such pieces for
4740
         rescheduling anyways, so any positive value will do for now.  */
4741
      if (seqno < 0)
4742
        {
4743
          gcc_assert (pipelining_p);
4744
          seqno = 1;
4745
        }
4746
    }
4747
 
4748
  gcc_assert (seqno > 0);
4749
  return seqno;
4750
}
4751
 
4752
/* Insert bookkeeping copy of C_EXPS's insn after PLACE_TO_INSERT, assigning
4753
   NEW_SEQNO to it.  Return created insn.  */
4754
static insn_t
4755
emit_bookkeeping_insn (insn_t place_to_insert, expr_t c_expr, int new_seqno)
4756
{
4757
  rtx new_insn_rtx = create_copy_of_insn_rtx (EXPR_INSN_RTX (c_expr));
4758
 
4759
  vinsn_t new_vinsn
4760
    = create_vinsn_from_insn_rtx (new_insn_rtx,
4761
                                  VINSN_UNIQUE_P (EXPR_VINSN (c_expr)));
4762
 
4763
  insn_t new_insn = emit_insn_from_expr_after (c_expr, new_vinsn, new_seqno,
4764
                                               place_to_insert);
4765
 
4766
  INSN_SCHED_TIMES (new_insn) = 0;
4767
  bitmap_set_bit (current_copies, INSN_UID (new_insn));
4768
 
4769
  return new_insn;
4770
}
4771
 
4772
/* Generate a bookkeeping copy of C_EXPR's insn for path(s) incoming into to
4773
   E2->dest, except from E1->src (there may be a sequence of empty blocks
4774
   between E1->src and E2->dest).  Return block containing the copy.
4775
   All scheduler data is initialized for the newly created insn.  */
4776
static basic_block
4777
generate_bookkeeping_insn (expr_t c_expr, edge e1, edge e2)
4778
{
4779
  insn_t join_point, place_to_insert, new_insn;
4780
  int new_seqno;
4781
  bool need_to_exchange_data_sets;
4782
 
4783
  if (sched_verbose >= 4)
4784
    sel_print ("Generating bookkeeping insn (%d->%d)\n", e1->src->index,
4785
               e2->dest->index);
4786
 
4787
  join_point = sel_bb_head (e2->dest);
4788
  place_to_insert = find_place_for_bookkeeping (e1, e2);
4789
  if (!place_to_insert)
4790
    return NULL;
4791
  new_seqno = find_seqno_for_bookkeeping (place_to_insert, join_point);
4792
  need_to_exchange_data_sets
4793
    = sel_bb_empty_p (BLOCK_FOR_INSN (place_to_insert));
4794
 
4795
  new_insn = emit_bookkeeping_insn (place_to_insert, c_expr, new_seqno);
4796
 
4797
  /* When inserting bookkeeping insn in new block, av sets should be
4798
     following: old basic block (that now holds bookkeeping) data sets are
4799
     the same as was before generation of bookkeeping, and new basic block
4800
     (that now hold all other insns of old basic block) data sets are
4801
     invalid.  So exchange data sets for these basic blocks as sel_split_block
4802
     mistakenly exchanges them in this case.  Cannot do it earlier because
4803
     when single instruction is added to new basic block it should hold NULL
4804
     lv_set.  */
4805
  if (need_to_exchange_data_sets)
4806
    exchange_data_sets (BLOCK_FOR_INSN (new_insn),
4807
                        BLOCK_FOR_INSN (join_point));
4808
 
4809
  stat_bookkeeping_copies++;
4810
  return BLOCK_FOR_INSN (new_insn);
4811
}
4812
 
4813
/* Remove from AV_PTR all insns that may need bookkeeping when scheduling
4814
   on FENCE, but we are unable to copy them.  */
4815
static void
4816
remove_insns_that_need_bookkeeping (fence_t fence, av_set_t *av_ptr)
4817
{
4818
  expr_t expr;
4819
  av_set_iterator i;
4820
 
4821
  /*  An expression does not need bookkeeping if it is available on all paths
4822
      from current block to original block and current block dominates
4823
      original block.  We check availability on all paths by examining
4824
      EXPR_SPEC; this is not equivalent, because it may be positive even
4825
      if expr is available on all paths (but if expr is not available on
4826
      any path, EXPR_SPEC will be positive).  */
4827
 
4828
  FOR_EACH_EXPR_1 (expr, i, av_ptr)
4829
    {
4830
      if (!control_flow_insn_p (EXPR_INSN_RTX (expr))
4831
          && (!bookkeeping_p || VINSN_UNIQUE_P (EXPR_VINSN (expr)))
4832
          && (EXPR_SPEC (expr)
4833
              || !EXPR_ORIG_BB_INDEX (expr)
4834
              || !dominated_by_p (CDI_DOMINATORS,
4835
                                  BASIC_BLOCK (EXPR_ORIG_BB_INDEX (expr)),
4836
                                  BLOCK_FOR_INSN (FENCE_INSN (fence)))))
4837
        {
4838
          if (sched_verbose >= 4)
4839
            sel_print ("Expr %d removed because it would need bookkeeping, which "
4840
                       "cannot be created\n", INSN_UID (EXPR_INSN_RTX (expr)));
4841
          av_set_iter_remove (&i);
4842
        }
4843
    }
4844
}
4845
 
4846
/* Moving conditional jump through some instructions.
4847
 
4848
   Consider example:
4849
 
4850
       ...                     <- current scheduling point
4851
       NOTE BASIC BLOCK:       <- bb header
4852
       (p8)  add r14=r14+0x9;;
4853
       (p8)  mov [r14]=r23
4854
       (!p8) jump L1;;
4855
       NOTE BASIC BLOCK:
4856
       ...
4857
 
4858
   We can schedule jump one cycle earlier, than mov, because they cannot be
4859
   executed together as their predicates are mutually exclusive.
4860
 
4861
   This is done in this way: first, new fallthrough basic block is created
4862
   after jump (it is always can be done, because there already should be a
4863
   fallthrough block, where control flow goes in case of predicate being true -
4864
   in our example; otherwise there should be a dependence between those
4865
   instructions and jump and we cannot schedule jump right now);
4866
   next, all instructions between jump and current scheduling point are moved
4867
   to this new block.  And the result is this:
4868
 
4869
      NOTE BASIC BLOCK:
4870
      (!p8) jump L1           <- current scheduling point
4871
      NOTE BASIC BLOCK:       <- bb header
4872
      (p8)  add r14=r14+0x9;;
4873
      (p8)  mov [r14]=r23
4874
      NOTE BASIC BLOCK:
4875
      ...
4876
*/
4877
static void
4878
move_cond_jump (rtx insn, bnd_t bnd)
4879
{
4880
  edge ft_edge;
4881
  basic_block block_from, block_next, block_new;
4882
  rtx next, prev, link;
4883
 
4884
  /* BLOCK_FROM holds basic block of the jump.  */
4885
  block_from = BLOCK_FOR_INSN (insn);
4886
 
4887
  /* Moving of jump should not cross any other jumps or
4888
  beginnings of new basic blocks.  */
4889
  gcc_assert (block_from == BLOCK_FOR_INSN (BND_TO (bnd)));
4890
 
4891
  /* Jump is moved to the boundary.  */
4892
  prev = BND_TO (bnd);
4893
  next = PREV_INSN (insn);
4894
  BND_TO (bnd) = insn;
4895
 
4896
  ft_edge = find_fallthru_edge (block_from);
4897
  block_next = ft_edge->dest;
4898
  /* There must be a fallthrough block (or where should go
4899
  control flow in case of false jump predicate otherwise?).  */
4900
  gcc_assert (block_next);
4901
 
4902
  /* Create new empty basic block after source block.  */
4903
  block_new = sel_split_edge (ft_edge);
4904
  gcc_assert (block_new->next_bb == block_next
4905
              && block_from->next_bb == block_new);
4906
 
4907
  gcc_assert (BB_END (block_from) == insn);
4908
 
4909
  /* Move all instructions except INSN from BLOCK_FROM to
4910
     BLOCK_NEW.  */
4911
  for (link = prev; link != insn; link = NEXT_INSN (link))
4912
    {
4913
      EXPR_ORIG_BB_INDEX (INSN_EXPR (link)) = block_new->index;
4914
      df_insn_change_bb (link, block_new);
4915
    }
4916
 
4917
  /* Set correct basic block and instructions properties.  */
4918
  BB_END (block_new) = PREV_INSN (insn);
4919
 
4920
  NEXT_INSN (PREV_INSN (prev)) = insn;
4921
  PREV_INSN (insn) = PREV_INSN (prev);
4922
 
4923
  /* Assert there is no jump to BLOCK_NEW, only fallthrough edge.  */
4924
  gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (block_new)));
4925
  PREV_INSN (prev) = BB_HEAD (block_new);
4926
  NEXT_INSN (next) = NEXT_INSN (BB_HEAD (block_new));
4927
  NEXT_INSN (BB_HEAD (block_new)) = prev;
4928
  PREV_INSN (NEXT_INSN (next)) = next;
4929
 
4930
  gcc_assert (!sel_bb_empty_p (block_from)
4931
              && !sel_bb_empty_p (block_new));
4932
 
4933
  /* Update data sets for BLOCK_NEW to represent that INSN and
4934
     instructions from the other branch of INSN is no longer
4935
     available at BLOCK_NEW.  */
4936
  BB_AV_LEVEL (block_new) = global_level;
4937
  gcc_assert (BB_LV_SET (block_new) == NULL);
4938
  BB_LV_SET (block_new) = get_clear_regset_from_pool ();
4939
  update_data_sets (sel_bb_head (block_new));
4940
 
4941
  /* INSN is a new basic block header - so prepare its data
4942
     structures and update availability and liveness sets.  */
4943
  update_data_sets (insn);
4944
 
4945
  if (sched_verbose >= 4)
4946
    sel_print ("Moving jump %d\n", INSN_UID (insn));
4947
}
4948
 
4949
/* Remove nops generated during move_op for preventing removal of empty
4950
   basic blocks.  */
4951
static void
4952
remove_temp_moveop_nops (bool full_tidying)
4953
{
4954
  int i;
4955
  insn_t insn;
4956
 
4957
  for (i = 0; VEC_iterate (insn_t, vec_temp_moveop_nops, i, insn); i++)
4958
    {
4959
      gcc_assert (INSN_NOP_P (insn));
4960
      return_nop_to_pool (insn, full_tidying);
4961
    }
4962
 
4963
  /* Empty the vector.  */
4964
  if (VEC_length (insn_t, vec_temp_moveop_nops) > 0)
4965
    VEC_block_remove (insn_t, vec_temp_moveop_nops, 0,
4966
                      VEC_length (insn_t, vec_temp_moveop_nops));
4967
}
4968
 
4969
/* Records the maximal UID before moving up an instruction.  Used for
4970
   distinguishing between bookkeeping copies and original insns.  */
4971
static int max_uid_before_move_op = 0;
4972
 
4973
/* Remove from AV_VLIW_P all instructions but next when debug counter
4974
   tells us so.  Next instruction is fetched from BNDS.  */
4975
static void
4976
remove_insns_for_debug (blist_t bnds, av_set_t *av_vliw_p)
4977
{
4978
  if (! dbg_cnt (sel_sched_insn_cnt))
4979
    /* Leave only the next insn in av_vliw.  */
4980
    {
4981
      av_set_iterator av_it;
4982
      expr_t expr;
4983
      bnd_t bnd = BLIST_BND (bnds);
4984
      insn_t next = BND_TO (bnd);
4985
 
4986
      gcc_assert (BLIST_NEXT (bnds) == NULL);
4987
 
4988
      FOR_EACH_EXPR_1 (expr, av_it, av_vliw_p)
4989
        if (EXPR_INSN_RTX (expr) != next)
4990
          av_set_iter_remove (&av_it);
4991
    }
4992
}
4993
 
4994
/* Compute available instructions on BNDS.  FENCE is the current fence.  Write
4995
   the computed set to *AV_VLIW_P.  */
4996
static void
4997
compute_av_set_on_boundaries (fence_t fence, blist_t bnds, av_set_t *av_vliw_p)
4998
{
4999
  if (sched_verbose >= 2)
5000
    {
5001
      sel_print ("Boundaries: ");
5002
      dump_blist (bnds);
5003
      sel_print ("\n");
5004
    }
5005
 
5006
  for (; bnds; bnds = BLIST_NEXT (bnds))
5007
    {
5008
      bnd_t bnd = BLIST_BND (bnds);
5009
      av_set_t av1_copy;
5010
      insn_t bnd_to = BND_TO (bnd);
5011
 
5012
      /* Rewind BND->TO to the basic block header in case some bookkeeping
5013
         instructions were inserted before BND->TO and it needs to be
5014
         adjusted.  */
5015
      if (sel_bb_head_p (bnd_to))
5016
        gcc_assert (INSN_SCHED_TIMES (bnd_to) == 0);
5017
      else
5018
        while (INSN_SCHED_TIMES (PREV_INSN (bnd_to)) == 0)
5019
          {
5020
            bnd_to = PREV_INSN (bnd_to);
5021
            if (sel_bb_head_p (bnd_to))
5022
              break;
5023
          }
5024
 
5025
      if (BND_TO (bnd) != bnd_to)
5026
        {
5027
          gcc_assert (FENCE_INSN (fence) == BND_TO (bnd));
5028
          FENCE_INSN (fence) = bnd_to;
5029
          BND_TO (bnd) = bnd_to;
5030
        }
5031
 
5032
      av_set_clear (&BND_AV (bnd));
5033
      BND_AV (bnd) = compute_av_set (BND_TO (bnd), NULL, 0, true);
5034
 
5035
      av_set_clear (&BND_AV1 (bnd));
5036
      BND_AV1 (bnd) = av_set_copy (BND_AV (bnd));
5037
 
5038
      moveup_set_inside_insn_group (&BND_AV1 (bnd), NULL);
5039
 
5040
      av1_copy = av_set_copy (BND_AV1 (bnd));
5041
      av_set_union_and_clear (av_vliw_p, &av1_copy, NULL);
5042
    }
5043
 
5044
  if (sched_verbose >= 2)
5045
    {
5046
      sel_print ("Available exprs (vliw form): ");
5047
      dump_av_set (*av_vliw_p);
5048
      sel_print ("\n");
5049
    }
5050
}
5051
 
5052
/* Calculate the sequential av set on BND corresponding to the EXPR_VLIW
5053
   expression.  When FOR_MOVEOP is true, also replace the register of
5054
   expressions found with the register from EXPR_VLIW.  */
5055
static av_set_t
5056
find_sequential_best_exprs (bnd_t bnd, expr_t expr_vliw, bool for_moveop)
5057
{
5058
  av_set_t expr_seq = NULL;
5059
  expr_t expr;
5060
  av_set_iterator i;
5061
 
5062
  FOR_EACH_EXPR (expr, i, BND_AV (bnd))
5063
    {
5064
      if (equal_after_moveup_path_p (expr, NULL, expr_vliw))
5065
        {
5066
          if (for_moveop)
5067
            {
5068
              /* The sequential expression has the right form to pass
5069
                 to move_op except when renaming happened.  Put the
5070
                 correct register in EXPR then.  */
5071
              if (EXPR_SEPARABLE_P (expr) && REG_P (EXPR_LHS (expr)))
5072
                {
5073
                  if (expr_dest_regno (expr) != expr_dest_regno (expr_vliw))
5074
                    {
5075
                      replace_dest_with_reg_in_expr (expr, EXPR_LHS (expr_vliw));
5076
                      stat_renamed_scheduled++;
5077
                    }
5078
                  /* Also put the correct TARGET_AVAILABLE bit on the expr.
5079
                     This is needed when renaming came up with original
5080
                     register.  */
5081
                  else if (EXPR_TARGET_AVAILABLE (expr)
5082
                           != EXPR_TARGET_AVAILABLE (expr_vliw))
5083
                    {
5084
                      gcc_assert (EXPR_TARGET_AVAILABLE (expr_vliw) == 1);
5085
                      EXPR_TARGET_AVAILABLE (expr) = 1;
5086
                    }
5087
                }
5088
              if (EXPR_WAS_SUBSTITUTED (expr))
5089
                stat_substitutions_total++;
5090
            }
5091
 
5092
          av_set_add (&expr_seq, expr);
5093
 
5094
          /* With substitution inside insn group, it is possible
5095
             that more than one expression in expr_seq will correspond
5096
             to expr_vliw.  In this case, choose one as the attempt to
5097
             move both leads to miscompiles.  */
5098
          break;
5099
        }
5100
    }
5101
 
5102
  if (for_moveop && sched_verbose >= 2)
5103
    {
5104
      sel_print ("Best expression(s) (sequential form): ");
5105
      dump_av_set (expr_seq);
5106
      sel_print ("\n");
5107
    }
5108
 
5109
  return expr_seq;
5110
}
5111
 
5112
 
5113
/* Move nop to previous block.  */
5114
static void ATTRIBUTE_UNUSED
5115
move_nop_to_previous_block (insn_t nop, basic_block prev_bb)
5116
{
5117
  insn_t prev_insn, next_insn, note;
5118
 
5119
  gcc_assert (sel_bb_head_p (nop)
5120
              && prev_bb == BLOCK_FOR_INSN (nop)->prev_bb);
5121
  note = bb_note (BLOCK_FOR_INSN (nop));
5122
  prev_insn = sel_bb_end (prev_bb);
5123
  next_insn = NEXT_INSN (nop);
5124
  gcc_assert (prev_insn != NULL_RTX
5125
              && PREV_INSN (note) == prev_insn);
5126
 
5127
  NEXT_INSN (prev_insn) = nop;
5128
  PREV_INSN (nop) = prev_insn;
5129
 
5130
  PREV_INSN (note) = nop;
5131
  NEXT_INSN (note) = next_insn;
5132
 
5133
  NEXT_INSN (nop) = note;
5134
  PREV_INSN (next_insn) = note;
5135
 
5136
  BB_END (prev_bb) = nop;
5137
  BLOCK_FOR_INSN (nop) = prev_bb;
5138
}
5139
 
5140
/* Prepare a place to insert the chosen expression on BND.  */
5141
static insn_t
5142
prepare_place_to_insert (bnd_t bnd)
5143
{
5144
  insn_t place_to_insert;
5145
 
5146
  /* Init place_to_insert before calling move_op, as the later
5147
     can possibly remove BND_TO (bnd).  */
5148
  if (/* If this is not the first insn scheduled.  */
5149
      BND_PTR (bnd))
5150
    {
5151
      /* Add it after last scheduled.  */
5152
      place_to_insert = ILIST_INSN (BND_PTR (bnd));
5153
      if (DEBUG_INSN_P (place_to_insert))
5154
        {
5155
          ilist_t l = BND_PTR (bnd);
5156
          while ((l = ILIST_NEXT (l)) &&
5157
                 DEBUG_INSN_P (ILIST_INSN (l)))
5158
            ;
5159
          if (!l)
5160
            place_to_insert = NULL;
5161
        }
5162
    }
5163
  else
5164
    place_to_insert = NULL;
5165
 
5166
  if (!place_to_insert)
5167
    {
5168
      /* Add it before BND_TO.  The difference is in the
5169
         basic block, where INSN will be added.  */
5170
      place_to_insert = get_nop_from_pool (BND_TO (bnd));
5171
      gcc_assert (BLOCK_FOR_INSN (place_to_insert)
5172
                  == BLOCK_FOR_INSN (BND_TO (bnd)));
5173
    }
5174
 
5175
  return place_to_insert;
5176
}
5177
 
5178
/* Find original instructions for EXPR_SEQ and move it to BND boundary.
5179
   Return the expression to emit in C_EXPR.  */
5180
static bool
5181
move_exprs_to_boundary (bnd_t bnd, expr_t expr_vliw,
5182
                        av_set_t expr_seq, expr_t c_expr)
5183
{
5184
  bool b, should_move;
5185
  unsigned book_uid;
5186
  bitmap_iterator bi;
5187
  int n_bookkeeping_copies_before_moveop;
5188
 
5189
  /* Make a move.  This call will remove the original operation,
5190
     insert all necessary bookkeeping instructions and update the
5191
     data sets.  After that all we have to do is add the operation
5192
     at before BND_TO (BND).  */
5193
  n_bookkeeping_copies_before_moveop = stat_bookkeeping_copies;
5194
  max_uid_before_move_op = get_max_uid ();
5195
  bitmap_clear (current_copies);
5196
  bitmap_clear (current_originators);
5197
 
5198
  b = move_op (BND_TO (bnd), expr_seq, expr_vliw,
5199
               get_dest_from_orig_ops (expr_seq), c_expr, &should_move);
5200
 
5201
  /* We should be able to find the expression we've chosen for
5202
     scheduling.  */
5203
  gcc_assert (b);
5204
 
5205
  if (stat_bookkeeping_copies > n_bookkeeping_copies_before_moveop)
5206
    stat_insns_needed_bookkeeping++;
5207
 
5208
  EXECUTE_IF_SET_IN_BITMAP (current_copies, 0, book_uid, bi)
5209
    {
5210
      unsigned uid;
5211
      bitmap_iterator bi;
5212
 
5213
      /* We allocate these bitmaps lazily.  */
5214
      if (! INSN_ORIGINATORS_BY_UID (book_uid))
5215
        INSN_ORIGINATORS_BY_UID (book_uid) = BITMAP_ALLOC (NULL);
5216
 
5217
      bitmap_copy (INSN_ORIGINATORS_BY_UID (book_uid),
5218
                   current_originators);
5219
 
5220
      /* Transitively add all originators' originators.  */
5221
      EXECUTE_IF_SET_IN_BITMAP (current_originators, 0, uid, bi)
5222
       if (INSN_ORIGINATORS_BY_UID (uid))
5223
         bitmap_ior_into (INSN_ORIGINATORS_BY_UID (book_uid),
5224
                          INSN_ORIGINATORS_BY_UID (uid));
5225
    }
5226
 
5227
  return should_move;
5228
}
5229
 
5230
 
5231
/* Debug a DFA state as an array of bytes.  */
5232
static void
5233
debug_state (state_t state)
5234
{
5235
  unsigned char *p;
5236
  unsigned int i, size = dfa_state_size;
5237
 
5238
  sel_print ("state (%u):", size);
5239
  for (i = 0, p = (unsigned char *) state; i < size; i++)
5240
    sel_print (" %d", p[i]);
5241
  sel_print ("\n");
5242
}
5243
 
5244
/* Advance state on FENCE with INSN.  Return true if INSN is
5245
   an ASM, and we should advance state once more.  */
5246
static bool
5247
advance_state_on_fence (fence_t fence, insn_t insn)
5248
{
5249
  bool asm_p;
5250
 
5251
  if (recog_memoized (insn) >= 0)
5252
    {
5253
      int res;
5254
      state_t temp_state = alloca (dfa_state_size);
5255
 
5256
      gcc_assert (!INSN_ASM_P (insn));
5257
      asm_p = false;
5258
 
5259
      memcpy (temp_state, FENCE_STATE (fence), dfa_state_size);
5260
      res = state_transition (FENCE_STATE (fence), insn);
5261
      gcc_assert (res < 0);
5262
 
5263
      if (memcmp (temp_state, FENCE_STATE (fence), dfa_state_size))
5264
        {
5265
          FENCE_ISSUED_INSNS (fence)++;
5266
 
5267
          /* We should never issue more than issue_rate insns.  */
5268
          if (FENCE_ISSUED_INSNS (fence) > issue_rate)
5269
            gcc_unreachable ();
5270
        }
5271
    }
5272
  else
5273
    {
5274
      /* This could be an ASM insn which we'd like to schedule
5275
         on the next cycle.  */
5276
      asm_p = INSN_ASM_P (insn);
5277
      if (!FENCE_STARTS_CYCLE_P (fence) && asm_p)
5278
        advance_one_cycle (fence);
5279
    }
5280
 
5281
  if (sched_verbose >= 2)
5282
    debug_state (FENCE_STATE (fence));
5283
  if (!DEBUG_INSN_P (insn))
5284
    FENCE_STARTS_CYCLE_P (fence) = 0;
5285
  FENCE_ISSUE_MORE (fence) = can_issue_more;
5286
  return asm_p;
5287
}
5288
 
5289
/* Update FENCE on which INSN was scheduled and this INSN, too.  NEED_STALL
5290
   is nonzero if we need to stall after issuing INSN.  */
5291
static void
5292
update_fence_and_insn (fence_t fence, insn_t insn, int need_stall)
5293
{
5294
  bool asm_p;
5295
 
5296
  /* First, reflect that something is scheduled on this fence.  */
5297
  asm_p = advance_state_on_fence (fence, insn);
5298
  FENCE_LAST_SCHEDULED_INSN (fence) = insn;
5299
  VEC_safe_push (rtx, gc, FENCE_EXECUTING_INSNS (fence), insn);
5300
  if (SCHED_GROUP_P (insn))
5301
    {
5302
      FENCE_SCHED_NEXT (fence) = INSN_SCHED_NEXT (insn);
5303
      SCHED_GROUP_P (insn) = 0;
5304
    }
5305
  else
5306
    FENCE_SCHED_NEXT (fence) = NULL_RTX;
5307
  if (INSN_UID (insn) < FENCE_READY_TICKS_SIZE (fence))
5308
    FENCE_READY_TICKS (fence) [INSN_UID (insn)] = 0;
5309
 
5310
  /* Set instruction scheduling info.  This will be used in bundling,
5311
     pipelining, tick computations etc.  */
5312
  ++INSN_SCHED_TIMES (insn);
5313
  EXPR_TARGET_AVAILABLE (INSN_EXPR (insn)) = true;
5314
  EXPR_ORIG_SCHED_CYCLE (INSN_EXPR (insn)) = FENCE_CYCLE (fence);
5315
  INSN_AFTER_STALL_P (insn) = FENCE_AFTER_STALL_P (fence);
5316
  INSN_SCHED_CYCLE (insn) = FENCE_CYCLE (fence);
5317
 
5318
  /* This does not account for adjust_cost hooks, just add the biggest
5319
     constant the hook may add to the latency.  TODO: make this
5320
     a target dependent constant.  */
5321
  INSN_READY_CYCLE (insn)
5322
    = INSN_SCHED_CYCLE (insn) + (INSN_CODE (insn) < 0
5323
                                 ? 1
5324
                                 : maximal_insn_latency (insn) + 1);
5325
 
5326
  /* Change these fields last, as they're used above.  */
5327
  FENCE_AFTER_STALL_P (fence) = 0;
5328
  if (asm_p || need_stall)
5329
    advance_one_cycle (fence);
5330
 
5331
  /* Indicate that we've scheduled something on this fence.  */
5332
  FENCE_SCHEDULED_P (fence) = true;
5333
  scheduled_something_on_previous_fence = true;
5334
 
5335
  /* Print debug information when insn's fields are updated.  */
5336
  if (sched_verbose >= 2)
5337
    {
5338
      sel_print ("Scheduling insn: ");
5339
      dump_insn_1 (insn, 1);
5340
      sel_print ("\n");
5341
    }
5342
}
5343
 
5344
/* Update boundary BND (and, if needed, FENCE) with INSN, remove the
5345
   old boundary from BNDSP, add new boundaries to BNDS_TAIL_P and
5346
   return it.  */
5347
static blist_t *
5348
update_boundaries (fence_t fence, bnd_t bnd, insn_t insn, blist_t *bndsp,
5349
                   blist_t *bnds_tailp)
5350
{
5351
  succ_iterator si;
5352
  insn_t succ;
5353
 
5354
  advance_deps_context (BND_DC (bnd), insn);
5355
  FOR_EACH_SUCC_1 (succ, si, insn,
5356
                   SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
5357
    {
5358
      ilist_t ptr = ilist_copy (BND_PTR (bnd));
5359
 
5360
      ilist_add (&ptr, insn);
5361
 
5362
      if (DEBUG_INSN_P (insn) && sel_bb_end_p (insn)
5363
          && is_ineligible_successor (succ, ptr))
5364
        {
5365
          ilist_clear (&ptr);
5366
          continue;
5367
        }
5368
 
5369
      if (FENCE_INSN (fence) == insn && !sel_bb_end_p (insn))
5370
        {
5371
          if (sched_verbose >= 9)
5372
            sel_print ("Updating fence insn from %i to %i\n",
5373
                       INSN_UID (insn), INSN_UID (succ));
5374
          FENCE_INSN (fence) = succ;
5375
        }
5376
      blist_add (bnds_tailp, succ, ptr, BND_DC (bnd));
5377
      bnds_tailp = &BLIST_NEXT (*bnds_tailp);
5378
    }
5379
 
5380
  blist_remove (bndsp);
5381
  return bnds_tailp;
5382
}
5383
 
5384
/* Schedule EXPR_VLIW on BND.  Return the insn emitted.  */
5385
static insn_t
5386
schedule_expr_on_boundary (bnd_t bnd, expr_t expr_vliw, int seqno)
5387
{
5388
  av_set_t expr_seq;
5389
  expr_t c_expr = XALLOCA (expr_def);
5390
  insn_t place_to_insert;
5391
  insn_t insn;
5392
  bool should_move;
5393
 
5394
  expr_seq = find_sequential_best_exprs (bnd, expr_vliw, true);
5395
 
5396
  /* In case of scheduling a jump skipping some other instructions,
5397
     prepare CFG.  After this, jump is at the boundary and can be
5398
     scheduled as usual insn by MOVE_OP.  */
5399
  if (vinsn_cond_branch_p (EXPR_VINSN (expr_vliw)))
5400
    {
5401
      insn = EXPR_INSN_RTX (expr_vliw);
5402
 
5403
      /* Speculative jumps are not handled.  */
5404
      if (insn != BND_TO (bnd)
5405
          && !sel_insn_is_speculation_check (insn))
5406
        move_cond_jump (insn, bnd);
5407
    }
5408
 
5409
  /* Find a place for C_EXPR to schedule.  */
5410
  place_to_insert = prepare_place_to_insert (bnd);
5411
  should_move = move_exprs_to_boundary (bnd, expr_vliw, expr_seq, c_expr);
5412
  clear_expr (c_expr);
5413
 
5414
  /* Add the instruction.  The corner case to care about is when
5415
     the expr_seq set has more than one expr, and we chose the one that
5416
     is not equal to expr_vliw.  Then expr_vliw may be insn in stream, and
5417
     we can't use it.  Generate the new vinsn.  */
5418
  if (INSN_IN_STREAM_P (EXPR_INSN_RTX (expr_vliw)))
5419
    {
5420
      vinsn_t vinsn_new;
5421
 
5422
      vinsn_new = vinsn_copy (EXPR_VINSN (expr_vliw), false);
5423
      change_vinsn_in_expr (expr_vliw, vinsn_new);
5424
      should_move = false;
5425
    }
5426
  if (should_move)
5427
    insn = sel_move_insn (expr_vliw, seqno, place_to_insert);
5428
  else
5429
    insn = emit_insn_from_expr_after (expr_vliw, NULL, seqno,
5430
                                      place_to_insert);
5431
 
5432
  /* Return the nops generated for preserving of data sets back
5433
     into pool.  */
5434
  if (INSN_NOP_P (place_to_insert))
5435
    return_nop_to_pool (place_to_insert, !DEBUG_INSN_P (insn));
5436
  remove_temp_moveop_nops (!DEBUG_INSN_P (insn));
5437
 
5438
  av_set_clear (&expr_seq);
5439
 
5440
  /* Save the expression scheduled so to reset target availability if we'll
5441
     meet it later on the same fence.  */
5442
  if (EXPR_WAS_RENAMED (expr_vliw))
5443
    vinsn_vec_add (&vec_target_unavailable_vinsns, INSN_EXPR (insn));
5444
 
5445
  /* Check that the recent movement didn't destroyed loop
5446
     structure.  */
5447
  gcc_assert (!pipelining_p
5448
              || current_loop_nest == NULL
5449
              || loop_latch_edge (current_loop_nest));
5450
  return insn;
5451
}
5452
 
5453
/* Stall for N cycles on FENCE.  */
5454
static void
5455
stall_for_cycles (fence_t fence, int n)
5456
{
5457
  int could_more;
5458
 
5459
  could_more = n > 1 || FENCE_ISSUED_INSNS (fence) < issue_rate;
5460
  while (n--)
5461
    advance_one_cycle (fence);
5462
  if (could_more)
5463
    FENCE_AFTER_STALL_P (fence) = 1;
5464
}
5465
 
5466
/* Gather a parallel group of insns at FENCE and assign their seqno
5467
   to SEQNO.  All scheduled insns are gathered in SCHEDULED_INSNS_TAILPP
5468
   list for later recalculation of seqnos.  */
5469
static void
5470
fill_insns (fence_t fence, int seqno, ilist_t **scheduled_insns_tailpp)
5471
{
5472
  blist_t bnds = NULL, *bnds_tailp;
5473
  av_set_t av_vliw = NULL;
5474
  insn_t insn = FENCE_INSN (fence);
5475
 
5476
  if (sched_verbose >= 2)
5477
    sel_print ("Starting fill_insns for insn %d, cycle %d\n",
5478
               INSN_UID (insn), FENCE_CYCLE (fence));
5479
 
5480
  blist_add (&bnds, insn, NULL, FENCE_DC (fence));
5481
  bnds_tailp = &BLIST_NEXT (bnds);
5482
  set_target_context (FENCE_TC (fence));
5483
  can_issue_more = FENCE_ISSUE_MORE (fence);
5484
  target_bb = INSN_BB (insn);
5485
 
5486
  /* Do while we can add any operation to the current group.  */
5487
  do
5488
    {
5489
      blist_t *bnds_tailp1, *bndsp;
5490
      expr_t expr_vliw;
5491
      int need_stall;
5492
      int was_stall = 0, scheduled_insns = 0, stall_iterations = 0;
5493
      int max_insns = pipelining_p ? issue_rate : 2 * issue_rate;
5494
      int max_stall = pipelining_p ? 1 : 3;
5495
      bool last_insn_was_debug = false;
5496
      bool was_debug_bb_end_p = false;
5497
 
5498
      compute_av_set_on_boundaries (fence, bnds, &av_vliw);
5499
      remove_insns_that_need_bookkeeping (fence, &av_vliw);
5500
      remove_insns_for_debug (bnds, &av_vliw);
5501
 
5502
      /* Return early if we have nothing to schedule.  */
5503
      if (av_vliw == NULL)
5504
        break;
5505
 
5506
      /* Choose the best expression and, if needed, destination register
5507
         for it.  */
5508
      do
5509
        {
5510
          expr_vliw = find_best_expr (&av_vliw, bnds, fence, &need_stall);
5511
          if (!expr_vliw && need_stall)
5512
            {
5513
              /* All expressions required a stall.  Do not recompute av sets
5514
                 as we'll get the same answer (modulo the insns between
5515
                 the fence and its boundary, which will not be available for
5516
                 pipelining).  */
5517
              gcc_assert (! expr_vliw && stall_iterations < 2);
5518
              was_stall++;
5519
              /* If we are going to stall for too long, break to recompute av
5520
                 sets and bring more insns for pipelining.  */
5521
              if (need_stall <= 3)
5522
                stall_for_cycles (fence, need_stall);
5523
              else
5524
                {
5525
                  stall_for_cycles (fence, 1);
5526
                  break;
5527
                }
5528
            }
5529
        }
5530
      while (! expr_vliw && need_stall);
5531
 
5532
      /* Now either we've selected expr_vliw or we have nothing to schedule.  */
5533
      if (!expr_vliw)
5534
        {
5535
          av_set_clear (&av_vliw);
5536
          break;
5537
        }
5538
 
5539
      bndsp = &bnds;
5540
      bnds_tailp1 = bnds_tailp;
5541
 
5542
      do
5543
        /* This code will be executed only once until we'd have several
5544
           boundaries per fence.  */
5545
        {
5546
          bnd_t bnd = BLIST_BND (*bndsp);
5547
 
5548
          if (!av_set_is_in_p (BND_AV1 (bnd), EXPR_VINSN (expr_vliw)))
5549
            {
5550
              bndsp = &BLIST_NEXT (*bndsp);
5551
              continue;
5552
            }
5553
 
5554
          insn = schedule_expr_on_boundary (bnd, expr_vliw, seqno);
5555
          last_insn_was_debug = DEBUG_INSN_P (insn);
5556
          if (last_insn_was_debug)
5557
            was_debug_bb_end_p = (insn == BND_TO (bnd) && sel_bb_end_p (insn));
5558
          update_fence_and_insn (fence, insn, need_stall);
5559
          bnds_tailp = update_boundaries (fence, bnd, insn, bndsp, bnds_tailp);
5560
 
5561
          /* Add insn to the list of scheduled on this cycle instructions.  */
5562
          ilist_add (*scheduled_insns_tailpp, insn);
5563
          *scheduled_insns_tailpp = &ILIST_NEXT (**scheduled_insns_tailpp);
5564
        }
5565
      while (*bndsp != *bnds_tailp1);
5566
 
5567
      av_set_clear (&av_vliw);
5568
      if (!last_insn_was_debug)
5569
        scheduled_insns++;
5570
 
5571
      /* We currently support information about candidate blocks only for
5572
         one 'target_bb' block.  Hence we can't schedule after jump insn,
5573
         as this will bring two boundaries and, hence, necessity to handle
5574
         information for two or more blocks concurrently.  */
5575
      if ((last_insn_was_debug ? was_debug_bb_end_p : sel_bb_end_p (insn))
5576
          || (was_stall
5577
              && (was_stall >= max_stall
5578
                  || scheduled_insns >= max_insns)))
5579
        break;
5580
    }
5581
  while (bnds);
5582
 
5583
  gcc_assert (!FENCE_BNDS (fence));
5584
 
5585
  /* Update boundaries of the FENCE.  */
5586
  while (bnds)
5587
    {
5588
      ilist_t ptr = BND_PTR (BLIST_BND (bnds));
5589
 
5590
      if (ptr)
5591
        {
5592
          insn = ILIST_INSN (ptr);
5593
 
5594
          if (!ilist_is_in_p (FENCE_BNDS (fence), insn))
5595
            ilist_add (&FENCE_BNDS (fence), insn);
5596
        }
5597
 
5598
      blist_remove (&bnds);
5599
    }
5600
 
5601
  /* Update target context on the fence.  */
5602
  reset_target_context (FENCE_TC (fence), false);
5603
}
5604
 
5605
/* All exprs in ORIG_OPS must have the same destination register or memory.
5606
   Return that destination.  */
5607
static rtx
5608
get_dest_from_orig_ops (av_set_t orig_ops)
5609
{
5610
  rtx dest = NULL_RTX;
5611
  av_set_iterator av_it;
5612
  expr_t expr;
5613
  bool first_p = true;
5614
 
5615
  FOR_EACH_EXPR (expr, av_it, orig_ops)
5616
    {
5617
      rtx x = EXPR_LHS (expr);
5618
 
5619
      if (first_p)
5620
        {
5621
          first_p = false;
5622
          dest = x;
5623
        }
5624
      else
5625
        gcc_assert (dest == x
5626
                    || (dest != NULL_RTX && x != NULL_RTX
5627
                        && rtx_equal_p (dest, x)));
5628
    }
5629
 
5630
  return dest;
5631
}
5632
 
5633
/* Update data sets for the bookkeeping block and record those expressions
5634
   which become no longer available after inserting this bookkeeping.  */
5635
static void
5636
update_and_record_unavailable_insns (basic_block book_block)
5637
{
5638
  av_set_iterator i;
5639
  av_set_t old_av_set = NULL;
5640
  expr_t cur_expr;
5641
  rtx bb_end = sel_bb_end (book_block);
5642
 
5643
  /* First, get correct liveness in the bookkeeping block.  The problem is
5644
     the range between the bookeeping insn and the end of block.  */
5645
  update_liveness_on_insn (bb_end);
5646
  if (control_flow_insn_p (bb_end))
5647
    update_liveness_on_insn (PREV_INSN (bb_end));
5648
 
5649
  /* If there's valid av_set on BOOK_BLOCK, then there might exist another
5650
     fence above, where we may choose to schedule an insn which is
5651
     actually blocked from moving up with the bookkeeping we create here.  */
5652
  if (AV_SET_VALID_P (sel_bb_head (book_block)))
5653
    {
5654
      old_av_set = av_set_copy (BB_AV_SET (book_block));
5655
      update_data_sets (sel_bb_head (book_block));
5656
 
5657
      /* Traverse all the expressions in the old av_set and check whether
5658
         CUR_EXPR is in new AV_SET.  */
5659
      FOR_EACH_EXPR (cur_expr, i, old_av_set)
5660
        {
5661
          expr_t new_expr = av_set_lookup (BB_AV_SET (book_block),
5662
                                           EXPR_VINSN (cur_expr));
5663
 
5664
          if (! new_expr
5665
              /* In this case, we can just turn off the E_T_A bit, but we can't
5666
                 represent this information with the current vector.  */
5667
              || EXPR_TARGET_AVAILABLE (new_expr)
5668
                 != EXPR_TARGET_AVAILABLE (cur_expr))
5669
            /* Unfortunately, the below code could be also fired up on
5670
               separable insns.
5671
               FIXME: add an example of how this could happen.  */
5672
            vinsn_vec_add (&vec_bookkeeping_blocked_vinsns, cur_expr);
5673
        }
5674
 
5675
      av_set_clear (&old_av_set);
5676
    }
5677
}
5678
 
5679
/* The main effect of this function is that sparams->c_expr is merged
5680
   with (or copied to) lparams->c_expr_merged.  If there's only one successor,
5681
   we avoid merging anything by copying sparams->c_expr to lparams->c_expr_merged.
5682
   lparams->c_expr_merged is copied back to sparams->c_expr after all
5683
   successors has been traversed.  lparams->c_expr_local is an expr allocated
5684
   on stack in the caller function, and is used if there is more than one
5685
   successor.
5686
 
5687
   SUCC is one of the SUCCS_NORMAL successors of INSN,
5688
   MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ,
5689
   LPARAMS and STATIC_PARAMS contain the parameters described above.  */
5690
static void
5691
move_op_merge_succs (insn_t insn ATTRIBUTE_UNUSED,
5692
                     insn_t succ ATTRIBUTE_UNUSED,
5693
                     int moveop_drv_call_res,
5694
                     cmpd_local_params_p lparams, void *static_params)
5695
{
5696
  moveop_static_params_p sparams = (moveop_static_params_p) static_params;
5697
 
5698
  /* Nothing to do, if original expr wasn't found below.  */
5699
  if (moveop_drv_call_res != 1)
5700
    return;
5701
 
5702
  /* If this is a first successor.  */
5703
  if (!lparams->c_expr_merged)
5704
    {
5705
      lparams->c_expr_merged = sparams->c_expr;
5706
      sparams->c_expr = lparams->c_expr_local;
5707
    }
5708
  else
5709
    {
5710
      /* We must merge all found expressions to get reasonable
5711
         EXPR_SPEC_DONE_DS for the resulting insn.  If we don't
5712
         do so then we can first find the expr with epsilon
5713
         speculation success probability and only then with the
5714
         good probability.  As a result the insn will get epsilon
5715
         probability and will never be scheduled because of
5716
         weakness_cutoff in find_best_expr.
5717
 
5718
         We call merge_expr_data here instead of merge_expr
5719
         because due to speculation C_EXPR and X may have the
5720
         same insns with different speculation types.  And as of
5721
         now such insns are considered non-equal.
5722
 
5723
         However, EXPR_SCHED_TIMES is different -- we must get
5724
         SCHED_TIMES from a real insn, not a bookkeeping copy.
5725
         We force this here.  Instead, we may consider merging
5726
         SCHED_TIMES to the maximum instead of minimum in the
5727
         below function.  */
5728
      int old_times = EXPR_SCHED_TIMES (lparams->c_expr_merged);
5729
 
5730
      merge_expr_data (lparams->c_expr_merged, sparams->c_expr, NULL);
5731
      if (EXPR_SCHED_TIMES (sparams->c_expr) == 0)
5732
        EXPR_SCHED_TIMES (lparams->c_expr_merged) = old_times;
5733
 
5734
      clear_expr (sparams->c_expr);
5735
    }
5736
}
5737
 
5738
/*  Add used regs for the successor SUCC into SPARAMS->USED_REGS.
5739
 
5740
   SUCC is one of the SUCCS_NORMAL successors of INSN,
5741
   MOVEOP_DRV_CALL_RES is the result of call code_motion_path_driver on succ or 0,
5742
     if SUCC is one of SUCCS_BACK or SUCCS_OUT.
5743
   STATIC_PARAMS contain USED_REGS set.  */
5744
static void
5745
fur_merge_succs (insn_t insn ATTRIBUTE_UNUSED, insn_t succ,
5746
                 int moveop_drv_call_res,
5747
                 cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
5748
                 void *static_params)
5749
{
5750
  regset succ_live;
5751
  fur_static_params_p sparams = (fur_static_params_p) static_params;
5752
 
5753
  /* Here we compute live regsets only for branches that do not lie
5754
     on the code motion paths.  These branches correspond to value
5755
     MOVEOP_DRV_CALL_RES==0 and include SUCCS_BACK and SUCCS_OUT, though
5756
     for such branches code_motion_path_driver is not called.  */
5757
  if (moveop_drv_call_res != 0)
5758
    return;
5759
 
5760
  /* Mark all registers that do not meet the following condition:
5761
     (3) not live on the other path of any conditional branch
5762
     that is passed by the operation, in case original
5763
     operations are not present on both paths of the
5764
     conditional branch.  */
5765
  succ_live = compute_live (succ);
5766
  IOR_REG_SET (sparams->used_regs, succ_live);
5767
}
5768
 
5769
/* This function is called after the last successor.  Copies LP->C_EXPR_MERGED
5770
   into SP->CEXPR.  */
5771
static void
5772
move_op_after_merge_succs (cmpd_local_params_p lp, void *sparams)
5773
{
5774
  moveop_static_params_p sp = (moveop_static_params_p) sparams;
5775
 
5776
  sp->c_expr = lp->c_expr_merged;
5777
}
5778
 
5779
/* Track bookkeeping copies created, insns scheduled, and blocks for
5780
   rescheduling when INSN is found by move_op.  */
5781
static void
5782
track_scheduled_insns_and_blocks (rtx insn)
5783
{
5784
  /* Even if this insn can be a copy that will be removed during current move_op,
5785
     we still need to count it as an originator.  */
5786
  bitmap_set_bit (current_originators, INSN_UID (insn));
5787
 
5788
  if (!bitmap_bit_p (current_copies, INSN_UID (insn)))
5789
    {
5790
      /* Note that original block needs to be rescheduled, as we pulled an
5791
         instruction out of it.  */
5792
      if (INSN_SCHED_TIMES (insn) > 0)
5793
        bitmap_set_bit (blocks_to_reschedule, BLOCK_FOR_INSN (insn)->index);
5794
      else if (INSN_UID (insn) < first_emitted_uid && !DEBUG_INSN_P (insn))
5795
        num_insns_scheduled++;
5796
    }
5797
  else
5798
    bitmap_clear_bit (current_copies, INSN_UID (insn));
5799
 
5800
  /* For instructions we must immediately remove insn from the
5801
     stream, so subsequent update_data_sets () won't include this
5802
     insn into av_set.
5803
     For expr we must make insn look like "INSN_REG (insn) := c_expr".  */
5804
  if (INSN_UID (insn) > max_uid_before_move_op)
5805
    stat_bookkeeping_copies--;
5806
}
5807
 
5808
/* Emit a register-register copy for INSN if needed.  Return true if
5809
   emitted one.  PARAMS is the move_op static parameters.  */
5810
static bool
5811
maybe_emit_renaming_copy (rtx insn,
5812
                          moveop_static_params_p params)
5813
{
5814
  bool insn_emitted  = false;
5815
  rtx cur_reg;
5816
 
5817
  /* Bail out early when expression can not be renamed at all.  */
5818
  if (!EXPR_SEPARABLE_P (params->c_expr))
5819
    return false;
5820
 
5821
  cur_reg = expr_dest_reg (params->c_expr);
5822
  gcc_assert (cur_reg && params->dest && REG_P (params->dest));
5823
 
5824
  /* If original operation has expr and the register chosen for
5825
     that expr is not original operation's dest reg, substitute
5826
     operation's right hand side with the register chosen.  */
5827
  if (REGNO (params->dest) != REGNO (cur_reg))
5828
    {
5829
      insn_t reg_move_insn, reg_move_insn_rtx;
5830
 
5831
      reg_move_insn_rtx = create_insn_rtx_with_rhs (INSN_VINSN (insn),
5832
                                                    params->dest);
5833
      reg_move_insn = sel_gen_insn_from_rtx_after (reg_move_insn_rtx,
5834
                                                   INSN_EXPR (insn),
5835
                                                   INSN_SEQNO (insn),
5836
                                                   insn);
5837
      EXPR_SPEC_DONE_DS (INSN_EXPR (reg_move_insn)) = 0;
5838
      replace_dest_with_reg_in_expr (params->c_expr, params->dest);
5839
 
5840
      insn_emitted = true;
5841
      params->was_renamed = true;
5842
    }
5843
 
5844
  return insn_emitted;
5845
}
5846
 
5847
/* Emit a speculative check for INSN speculated as EXPR if needed.
5848
   Return true if we've  emitted one.  PARAMS is the move_op static
5849
   parameters.  */
5850
static bool
5851
maybe_emit_speculative_check (rtx insn, expr_t expr,
5852
                              moveop_static_params_p params)
5853
{
5854
  bool insn_emitted = false;
5855
  insn_t x;
5856
  ds_t check_ds;
5857
 
5858
  check_ds = get_spec_check_type_for_insn (insn, expr);
5859
  if (check_ds != 0)
5860
    {
5861
      /* A speculation check should be inserted.  */
5862
      x = create_speculation_check (params->c_expr, check_ds, insn);
5863
      insn_emitted = true;
5864
    }
5865
  else
5866
    {
5867
      EXPR_SPEC_DONE_DS (INSN_EXPR (insn)) = 0;
5868
      x = insn;
5869
    }
5870
 
5871
  gcc_assert (EXPR_SPEC_DONE_DS (INSN_EXPR (x)) == 0
5872
              && EXPR_SPEC_TO_CHECK_DS (INSN_EXPR (x)) == 0);
5873
  return insn_emitted;
5874
}
5875
 
5876
/* Handle transformations that leave an insn in place of original
5877
   insn such as renaming/speculation.  Return true if one of such
5878
   transformations actually happened, and we have emitted this insn.  */
5879
static bool
5880
handle_emitting_transformations (rtx insn, expr_t expr,
5881
                                 moveop_static_params_p params)
5882
{
5883
  bool insn_emitted = false;
5884
 
5885
  insn_emitted = maybe_emit_renaming_copy (insn, params);
5886
  insn_emitted |= maybe_emit_speculative_check (insn, expr, params);
5887
 
5888
  return insn_emitted;
5889
}
5890
 
5891
/* If INSN is the only insn in the basic block (not counting JUMP,
5892
   which may be a jump to next insn, and DEBUG_INSNs), we want to
5893
   leave a NOP there till the return to fill_insns.  */
5894
 
5895
static bool
5896
need_nop_to_preserve_insn_bb (rtx insn)
5897
{
5898
  insn_t bb_head, bb_end, bb_next, in_next;
5899
  basic_block bb = BLOCK_FOR_INSN (insn);
5900
 
5901
  bb_head = sel_bb_head (bb);
5902
  bb_end = sel_bb_end (bb);
5903
 
5904
  if (bb_head == bb_end)
5905
    return true;
5906
 
5907
  while (bb_head != bb_end && DEBUG_INSN_P (bb_head))
5908
    bb_head = NEXT_INSN (bb_head);
5909
 
5910
  if (bb_head == bb_end)
5911
    return true;
5912
 
5913
  while (bb_head != bb_end && DEBUG_INSN_P (bb_end))
5914
    bb_end = PREV_INSN (bb_end);
5915
 
5916
  if (bb_head == bb_end)
5917
    return true;
5918
 
5919
  bb_next = NEXT_INSN (bb_head);
5920
  while (bb_next != bb_end && DEBUG_INSN_P (bb_next))
5921
    bb_next = NEXT_INSN (bb_next);
5922
 
5923
  if (bb_next == bb_end && JUMP_P (bb_end))
5924
    return true;
5925
 
5926
  in_next = NEXT_INSN (insn);
5927
  while (DEBUG_INSN_P (in_next))
5928
    in_next = NEXT_INSN (in_next);
5929
 
5930
  if (IN_CURRENT_FENCE_P (in_next))
5931
    return true;
5932
 
5933
  return false;
5934
}
5935
 
5936
/* Remove INSN from stream.  When ONLY_DISCONNECT is true, its data
5937
   is not removed but reused when INSN is re-emitted.  */
5938
static void
5939
remove_insn_from_stream (rtx insn, bool only_disconnect)
5940
{
5941
  /* If there's only one insn in the BB, make sure that a nop is
5942
     inserted into it, so the basic block won't disappear when we'll
5943
     delete INSN below with sel_remove_insn. It should also survive
5944
     till the return to fill_insns.  */
5945
  if (need_nop_to_preserve_insn_bb (insn))
5946
    {
5947
      insn_t nop = get_nop_from_pool (insn);
5948
      gcc_assert (INSN_NOP_P (nop));
5949
      VEC_safe_push (insn_t, heap, vec_temp_moveop_nops, nop);
5950
    }
5951
 
5952
  sel_remove_insn (insn, only_disconnect, false);
5953
}
5954
 
5955
/* This function is called when original expr is found.
5956
   INSN - current insn traversed, EXPR - the corresponding expr found.
5957
   LPARAMS is the local parameters of code modion driver, STATIC_PARAMS
5958
   is static parameters of move_op.  */
5959
static void
5960
move_op_orig_expr_found (insn_t insn, expr_t expr,
5961
                         cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
5962
                         void *static_params)
5963
{
5964
  bool only_disconnect, insn_emitted;
5965
  moveop_static_params_p params = (moveop_static_params_p) static_params;
5966
 
5967
  copy_expr_onside (params->c_expr, INSN_EXPR (insn));
5968
  track_scheduled_insns_and_blocks (insn);
5969
  insn_emitted = handle_emitting_transformations (insn, expr, params);
5970
  only_disconnect = (params->uid == INSN_UID (insn)
5971
                     && ! insn_emitted  && ! EXPR_WAS_CHANGED (expr));
5972
 
5973
  /* Mark that we've disconnected an insn.  */
5974
  if (only_disconnect)
5975
    params->uid = -1;
5976
  remove_insn_from_stream (insn, only_disconnect);
5977
}
5978
 
5979
/* The function is called when original expr is found.
5980
   INSN - current insn traversed, EXPR - the corresponding expr found,
5981
   crosses_call and original_insns in STATIC_PARAMS are updated.  */
5982
static void
5983
fur_orig_expr_found (insn_t insn, expr_t expr ATTRIBUTE_UNUSED,
5984
                     cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
5985
                     void *static_params)
5986
{
5987
  fur_static_params_p params = (fur_static_params_p) static_params;
5988
  regset tmp;
5989
 
5990
  if (CALL_P (insn))
5991
    params->crosses_call = true;
5992
 
5993
  def_list_add (params->original_insns, insn, params->crosses_call);
5994
 
5995
  /* Mark the registers that do not meet the following condition:
5996
    (2) not among the live registers of the point
5997
        immediately following the first original operation on
5998
        a given downward path, except for the original target
5999
        register of the operation.  */
6000
  tmp = get_clear_regset_from_pool ();
6001
  compute_live_below_insn (insn, tmp);
6002
  AND_COMPL_REG_SET (tmp, INSN_REG_SETS (insn));
6003
  AND_COMPL_REG_SET (tmp, INSN_REG_CLOBBERS (insn));
6004
  IOR_REG_SET (params->used_regs, tmp);
6005
  return_regset_to_pool (tmp);
6006
 
6007
  /* (*1) We need to add to USED_REGS registers that are read by
6008
     INSN's lhs. This may lead to choosing wrong src register.
6009
     E.g. (scheduling const expr enabled):
6010
 
6011
        429: ax=0x0     <- Can't use AX for this expr (0x0)
6012
        433: dx=[bp-0x18]
6013
        427: [ax+dx+0x1]=ax
6014
          REG_DEAD: ax
6015
        168: di=dx
6016
          REG_DEAD: dx
6017
     */
6018
  /* FIXME: see comment above and enable MEM_P
6019
     in vinsn_separable_p.  */
6020
  gcc_assert (!VINSN_SEPARABLE_P (INSN_VINSN (insn))
6021
              || !MEM_P (INSN_LHS (insn)));
6022
}
6023
 
6024
/* This function is called on the ascending pass, before returning from
6025
   current basic block.  */
6026
static void
6027
move_op_at_first_insn (insn_t insn, cmpd_local_params_p lparams,
6028
                       void *static_params)
6029
{
6030
  moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6031
  basic_block book_block = NULL;
6032
 
6033
  /* When we have removed the boundary insn for scheduling, which also
6034
     happened to be the end insn in its bb, we don't need to update sets.  */
6035
  if (!lparams->removed_last_insn
6036
      && lparams->e1
6037
      && sel_bb_head_p (insn))
6038
    {
6039
      /* We should generate bookkeeping code only if we are not at the
6040
         top level of the move_op.  */
6041
      if (sel_num_cfg_preds_gt_1 (insn))
6042
        book_block = generate_bookkeeping_insn (sparams->c_expr,
6043
                                                lparams->e1, lparams->e2);
6044
      /* Update data sets for the current insn.  */
6045
      update_data_sets (insn);
6046
    }
6047
 
6048
  /* If bookkeeping code was inserted, we need to update av sets of basic
6049
     block that received bookkeeping.  After generation of bookkeeping insn,
6050
     bookkeeping block does not contain valid av set because we are not following
6051
     the original algorithm in every detail with regards to e.g. renaming
6052
     simple reg-reg copies.  Consider example:
6053
 
6054
     bookkeeping block           scheduling fence
6055
     \            /
6056
      \    join  /
6057
       ----------
6058
       |        |
6059
       ----------
6060
      /           \
6061
     /             \
6062
     r1 := r2          r1 := r3
6063
 
6064
     We try to schedule insn "r1 := r3" on the current
6065
     scheduling fence.  Also, note that av set of bookkeeping block
6066
     contain both insns "r1 := r2" and "r1 := r3".  When the insn has
6067
     been scheduled, the CFG is as follows:
6068
 
6069
     r1 := r3               r1 := r3
6070
     bookkeeping block           scheduling fence
6071
     \            /
6072
      \    join  /
6073
       ----------
6074
       |        |
6075
       ----------
6076
      /          \
6077
     /            \
6078
     r1 := r2
6079
 
6080
     Here, insn "r1 := r3" was scheduled at the current scheduling point
6081
     and bookkeeping code was generated at the bookeeping block.  This
6082
     way insn "r1 := r2" is no longer available as a whole instruction
6083
     (but only as expr) ahead of insn "r1 := r3" in bookkeeping block.
6084
     This situation is handled by calling update_data_sets.
6085
 
6086
     Since update_data_sets is called only on the bookkeeping block, and
6087
     it also may have predecessors with av_sets, containing instructions that
6088
     are no longer available, we save all such expressions that become
6089
     unavailable during data sets update on the bookkeeping block in
6090
     VEC_BOOKKEEPING_BLOCKED_VINSNS.  Later we avoid selecting such
6091
     expressions for scheduling.  This allows us to avoid recomputation of
6092
     av_sets outside the code motion path.  */
6093
 
6094
  if (book_block)
6095
    update_and_record_unavailable_insns (book_block);
6096
 
6097
  /* If INSN was previously marked for deletion, it's time to do it.  */
6098
  if (lparams->removed_last_insn)
6099
    insn = PREV_INSN (insn);
6100
 
6101
  /* Do not tidy control flow at the topmost moveop, as we can erroneously
6102
     kill a block with a single nop in which the insn should be emitted.  */
6103
  if (lparams->e1)
6104
    tidy_control_flow (BLOCK_FOR_INSN (insn), true);
6105
}
6106
 
6107
/* This function is called on the ascending pass, before returning from the
6108
   current basic block.  */
6109
static void
6110
fur_at_first_insn (insn_t insn,
6111
                   cmpd_local_params_p lparams ATTRIBUTE_UNUSED,
6112
                   void *static_params ATTRIBUTE_UNUSED)
6113
{
6114
  gcc_assert (!sel_bb_head_p (insn) || AV_SET_VALID_P (insn)
6115
              || AV_LEVEL (insn) == -1);
6116
}
6117
 
6118
/* Called on the backward stage of recursion to call moveup_expr for insn
6119
   and sparams->c_expr.  */
6120
static void
6121
move_op_ascend (insn_t insn, void *static_params)
6122
{
6123
  enum MOVEUP_EXPR_CODE res;
6124
  moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6125
 
6126
  if (! INSN_NOP_P (insn))
6127
    {
6128
      res = moveup_expr_cached (sparams->c_expr, insn, false);
6129
      gcc_assert (res != MOVEUP_EXPR_NULL);
6130
    }
6131
 
6132
  /* Update liveness for this insn as it was invalidated.  */
6133
  update_liveness_on_insn (insn);
6134
}
6135
 
6136
/* This function is called on enter to the basic block.
6137
   Returns TRUE if this block already have been visited and
6138
   code_motion_path_driver should return 1, FALSE otherwise.  */
6139
static int
6140
fur_on_enter (insn_t insn ATTRIBUTE_UNUSED, cmpd_local_params_p local_params,
6141
              void *static_params, bool visited_p)
6142
{
6143
  fur_static_params_p sparams = (fur_static_params_p) static_params;
6144
 
6145
  if (visited_p)
6146
    {
6147
      /* If we have found something below this block, there should be at
6148
         least one insn in ORIGINAL_INSNS.  */
6149
      gcc_assert (*sparams->original_insns);
6150
 
6151
      /* Adjust CROSSES_CALL, since we may have come to this block along
6152
         different path.  */
6153
      DEF_LIST_DEF (*sparams->original_insns)->crosses_call
6154
          |= sparams->crosses_call;
6155
    }
6156
  else
6157
    local_params->old_original_insns = *sparams->original_insns;
6158
 
6159
  return 1;
6160
}
6161
 
6162
/* Same as above but for move_op.   */
6163
static int
6164
move_op_on_enter (insn_t insn ATTRIBUTE_UNUSED,
6165
                  cmpd_local_params_p local_params ATTRIBUTE_UNUSED,
6166
                  void *static_params ATTRIBUTE_UNUSED, bool visited_p)
6167
{
6168
  if (visited_p)
6169
    return -1;
6170
  return 1;
6171
}
6172
 
6173
/* This function is called while descending current basic block if current
6174
   insn is not the original EXPR we're searching for.
6175
 
6176
   Return value: FALSE, if code_motion_path_driver should perform a local
6177
                        cleanup and return 0 itself;
6178
                 TRUE, if code_motion_path_driver should continue.  */
6179
static bool
6180
move_op_orig_expr_not_found (insn_t insn, av_set_t orig_ops ATTRIBUTE_UNUSED,
6181
                            void *static_params)
6182
{
6183
  moveop_static_params_p sparams = (moveop_static_params_p) static_params;
6184
 
6185
#ifdef ENABLE_CHECKING
6186
  sparams->failed_insn = insn;
6187
#endif
6188
 
6189
  /* If we're scheduling separate expr, in order to generate correct code
6190
     we need to stop the search at bookkeeping code generated with the
6191
     same destination register or memory.  */
6192
  if (lhs_of_insn_equals_to_dest_p (insn, sparams->dest))
6193
    return false;
6194
  return true;
6195
}
6196
 
6197
/* This function is called while descending current basic block if current
6198
   insn is not the original EXPR we're searching for.
6199
 
6200
   Return value: TRUE (code_motion_path_driver should continue).  */
6201
static bool
6202
fur_orig_expr_not_found (insn_t insn, av_set_t orig_ops, void *static_params)
6203
{
6204
  bool mutexed;
6205
  expr_t r;
6206
  av_set_iterator avi;
6207
  fur_static_params_p sparams = (fur_static_params_p) static_params;
6208
 
6209
  if (CALL_P (insn))
6210
    sparams->crosses_call = true;
6211
  else if (DEBUG_INSN_P (insn))
6212
    return true;
6213
 
6214
  /* If current insn we are looking at cannot be executed together
6215
     with original insn, then we can skip it safely.
6216
 
6217
     Example: ORIG_OPS = { (p6) r14 = sign_extend (r15); }
6218
              INSN = (!p6) r14 = r14 + 1;
6219
 
6220
     Here we can schedule ORIG_OP with lhs = r14, though only
6221
     looking at the set of used and set registers of INSN we must
6222
     forbid it.  So, add set/used in INSN registers to the
6223
     untouchable set only if there is an insn in ORIG_OPS that can
6224
     affect INSN.  */
6225
  mutexed = true;
6226
  FOR_EACH_EXPR (r, avi, orig_ops)
6227
    if (!sched_insns_conditions_mutex_p (insn, EXPR_INSN_RTX (r)))
6228
      {
6229
        mutexed = false;
6230
        break;
6231
      }
6232
 
6233
  /* Mark all registers that do not meet the following condition:
6234
     (1) Not set or read on any path from xi to an instance of the
6235
         original operation.  */
6236
  if (!mutexed)
6237
    {
6238
      IOR_REG_SET (sparams->used_regs, INSN_REG_SETS (insn));
6239
      IOR_REG_SET (sparams->used_regs, INSN_REG_USES (insn));
6240
      IOR_REG_SET (sparams->used_regs, INSN_REG_CLOBBERS (insn));
6241
    }
6242
 
6243
  return true;
6244
}
6245
 
6246
/* Hooks and data to perform move_op operations with code_motion_path_driver.  */
6247
struct code_motion_path_driver_info_def move_op_hooks = {
6248
  move_op_on_enter,
6249
  move_op_orig_expr_found,
6250
  move_op_orig_expr_not_found,
6251
  move_op_merge_succs,
6252
  move_op_after_merge_succs,
6253
  move_op_ascend,
6254
  move_op_at_first_insn,
6255
  SUCCS_NORMAL,
6256
  "move_op"
6257
};
6258
 
6259
/* Hooks and data to perform find_used_regs operations
6260
   with code_motion_path_driver.  */
6261
struct code_motion_path_driver_info_def fur_hooks = {
6262
  fur_on_enter,
6263
  fur_orig_expr_found,
6264
  fur_orig_expr_not_found,
6265
  fur_merge_succs,
6266
  NULL, /* fur_after_merge_succs */
6267
  NULL, /* fur_ascend */
6268
  fur_at_first_insn,
6269
  SUCCS_ALL,
6270
  "find_used_regs"
6271
};
6272
 
6273
/* Traverse all successors of INSN.  For each successor that is SUCCS_NORMAL
6274
   code_motion_path_driver is called recursively.  Original operation
6275
   was found at least on one path that is starting with one of INSN's
6276
   successors (this fact is asserted).  ORIG_OPS is expressions we're looking
6277
   for, PATH is the path we've traversed, STATIC_PARAMS is the parameters
6278
   of either move_op or find_used_regs depending on the caller.
6279
 
6280
   Return 0 if we haven't found expression, 1 if we found it, -1 if we don't
6281
   know for sure at this point.  */
6282
static int
6283
code_motion_process_successors (insn_t insn, av_set_t orig_ops,
6284
                                ilist_t path, void *static_params)
6285
{
6286
  int res = 0;
6287
  succ_iterator succ_i;
6288
  rtx succ;
6289
  basic_block bb;
6290
  int old_index;
6291
  unsigned old_succs;
6292
 
6293
  struct cmpd_local_params lparams;
6294
  expr_def _x;
6295
 
6296
  lparams.c_expr_local = &_x;
6297
  lparams.c_expr_merged = NULL;
6298
 
6299
  /* We need to process only NORMAL succs for move_op, and collect live
6300
     registers from ALL branches (including those leading out of the
6301
     region) for find_used_regs.
6302
 
6303
     In move_op, there can be a case when insn's bb number has changed
6304
     due to created bookkeeping.  This happens very rare, as we need to
6305
     move expression from the beginning to the end of the same block.
6306
     Rescan successors in this case.  */
6307
 
6308
 rescan:
6309
  bb = BLOCK_FOR_INSN (insn);
6310
  old_index = bb->index;
6311
  old_succs = EDGE_COUNT (bb->succs);
6312
 
6313
  FOR_EACH_SUCC_1 (succ, succ_i, insn, code_motion_path_driver_info->succ_flags)
6314
    {
6315
      int b;
6316
 
6317
      lparams.e1 = succ_i.e1;
6318
      lparams.e2 = succ_i.e2;
6319
 
6320
      /* Go deep into recursion only for NORMAL edges (non-backedges within the
6321
         current region).  */
6322
      if (succ_i.current_flags == SUCCS_NORMAL)
6323
        b = code_motion_path_driver (succ, orig_ops, path, &lparams,
6324
                                     static_params);
6325
      else
6326
        b = 0;
6327
 
6328
      /* Merge c_expres found or unify live register sets from different
6329
         successors.  */
6330
      code_motion_path_driver_info->merge_succs (insn, succ, b, &lparams,
6331
                                                 static_params);
6332
      if (b == 1)
6333
        res = b;
6334
      else if (b == -1 && res != 1)
6335
        res = b;
6336
 
6337
      /* We have simplified the control flow below this point.  In this case,
6338
         the iterator becomes invalid.  We need to try again.  */
6339
      if (BLOCK_FOR_INSN (insn)->index != old_index
6340
          || EDGE_COUNT (bb->succs) != old_succs)
6341
        goto rescan;
6342
    }
6343
 
6344
#ifdef ENABLE_CHECKING
6345
  /* Here, RES==1 if original expr was found at least for one of the
6346
     successors.  After the loop, RES may happen to have zero value
6347
     only if at some point the expr searched is present in av_set, but is
6348
     not found below.  In most cases, this situation is an error.
6349
     The exception is when the original operation is blocked by
6350
     bookkeeping generated for another fence or for another path in current
6351
     move_op.  */
6352
  gcc_assert (res == 1
6353
              || (res == 0
6354
                  && av_set_could_be_blocked_by_bookkeeping_p (orig_ops,
6355
                                                               static_params))
6356
              || res == -1);
6357
#endif
6358
 
6359
  /* Merge data, clean up, etc.  */
6360
  if (res != -1 && code_motion_path_driver_info->after_merge_succs)
6361
    code_motion_path_driver_info->after_merge_succs (&lparams, static_params);
6362
 
6363
  return res;
6364
}
6365
 
6366
 
6367
/* Perform a cleanup when the driver is about to terminate.  ORIG_OPS_P
6368
   is the pointer to the av set with expressions we were looking for,
6369
   PATH_P is the pointer to the traversed path.  */
6370
static inline void
6371
code_motion_path_driver_cleanup (av_set_t *orig_ops_p, ilist_t *path_p)
6372
{
6373
  ilist_remove (path_p);
6374
  av_set_clear (orig_ops_p);
6375
}
6376
 
6377
/* The driver function that implements move_op or find_used_regs
6378
   functionality dependent whether code_motion_path_driver_INFO is set to
6379
   &MOVE_OP_HOOKS or &FUR_HOOKS.  This function implements the common parts
6380
   of code (CFG traversal etc) that are shared among both functions.  INSN
6381
   is the insn we're starting the search from, ORIG_OPS are the expressions
6382
   we're searching for, PATH is traversed path, LOCAL_PARAMS_IN are local
6383
   parameters of the driver, and STATIC_PARAMS are static parameters of
6384
   the caller.
6385
 
6386
   Returns whether original instructions were found.  Note that top-level
6387
   code_motion_path_driver always returns true.  */
6388
static int
6389
code_motion_path_driver (insn_t insn, av_set_t orig_ops, ilist_t path,
6390
                         cmpd_local_params_p local_params_in,
6391
                         void *static_params)
6392
{
6393
  expr_t expr = NULL;
6394
  basic_block bb = BLOCK_FOR_INSN (insn);
6395
  insn_t first_insn, bb_tail, before_first;
6396
  bool removed_last_insn = false;
6397
 
6398
  if (sched_verbose >= 6)
6399
    {
6400
      sel_print ("%s (", code_motion_path_driver_info->routine_name);
6401
      dump_insn (insn);
6402
      sel_print (",");
6403
      dump_av_set (orig_ops);
6404
      sel_print (")\n");
6405
    }
6406
 
6407
  gcc_assert (orig_ops);
6408
 
6409
  /* If no original operations exist below this insn, return immediately.  */
6410
  if (is_ineligible_successor (insn, path))
6411
    {
6412
      if (sched_verbose >= 6)
6413
        sel_print ("Insn %d is ineligible successor\n", INSN_UID (insn));
6414
      return false;
6415
    }
6416
 
6417
  /* The block can have invalid av set, in which case it was created earlier
6418
     during move_op.  Return immediately.  */
6419
  if (sel_bb_head_p (insn))
6420
    {
6421
      if (! AV_SET_VALID_P (insn))
6422
        {
6423
          if (sched_verbose >= 6)
6424
            sel_print ("Returned from block %d as it had invalid av set\n",
6425
                       bb->index);
6426
          return false;
6427
        }
6428
 
6429
      if (bitmap_bit_p (code_motion_visited_blocks, bb->index))
6430
        {
6431
          /* We have already found an original operation on this branch, do not
6432
             go any further and just return TRUE here.  If we don't stop here,
6433
             function can have exponential behaviour even on the small code
6434
             with many different paths (e.g. with data speculation and
6435
             recovery blocks).  */
6436
          if (sched_verbose >= 6)
6437
            sel_print ("Block %d already visited in this traversal\n", bb->index);
6438
          if (code_motion_path_driver_info->on_enter)
6439
            return code_motion_path_driver_info->on_enter (insn,
6440
                                                           local_params_in,
6441
                                                           static_params,
6442
                                                           true);
6443
        }
6444
    }
6445
 
6446
  if (code_motion_path_driver_info->on_enter)
6447
    code_motion_path_driver_info->on_enter (insn, local_params_in,
6448
                                            static_params, false);
6449
  orig_ops = av_set_copy (orig_ops);
6450
 
6451
  /* Filter the orig_ops set.  */
6452
  if (AV_SET_VALID_P (insn))
6453
    av_set_intersect (&orig_ops, AV_SET (insn));
6454
 
6455
  /* If no more original ops, return immediately.  */
6456
  if (!orig_ops)
6457
    {
6458
      if (sched_verbose >= 6)
6459
        sel_print ("No intersection with av set of block %d\n", bb->index);
6460
      return false;
6461
    }
6462
 
6463
  /* For non-speculative insns we have to leave only one form of the
6464
     original operation, because if we don't, we may end up with
6465
     different C_EXPRes and, consequently, with bookkeepings for different
6466
     expression forms along the same code motion path.  That may lead to
6467
     generation of incorrect code.  So for each code motion we stick to
6468
     the single form of the instruction,  except for speculative insns
6469
     which we need to keep in different forms with all speculation
6470
     types.  */
6471
  av_set_leave_one_nonspec (&orig_ops);
6472
 
6473
  /* It is not possible that all ORIG_OPS are filtered out.  */
6474
  gcc_assert (orig_ops);
6475
 
6476
  /* It is enough to place only heads and tails of visited basic blocks into
6477
     the PATH.  */
6478
  ilist_add (&path, insn);
6479
  first_insn = insn;
6480
  bb_tail = sel_bb_end (bb);
6481
 
6482
  /* Descend the basic block in search of the original expr; this part
6483
     corresponds to the part of the original move_op procedure executed
6484
     before the recursive call.  */
6485
  for (;;)
6486
    {
6487
      /* Look at the insn and decide if it could be an ancestor of currently
6488
         scheduling operation.  If it is so, then the insn "dest = op" could
6489
         either be replaced with "dest = reg", because REG now holds the result
6490
         of OP, or just removed, if we've scheduled the insn as a whole.
6491
 
6492
         If this insn doesn't contain currently scheduling OP, then proceed
6493
         with searching and look at its successors.  Operations we're searching
6494
         for could have changed when moving up through this insn via
6495
         substituting.  In this case, perform unsubstitution on them first.
6496
 
6497
         When traversing the DAG below this insn is finished, insert
6498
         bookkeeping code, if the insn is a joint point, and remove
6499
         leftovers.  */
6500
 
6501
      expr = av_set_lookup (orig_ops, INSN_VINSN (insn));
6502
      if (expr)
6503
        {
6504
          insn_t last_insn = PREV_INSN (insn);
6505
 
6506
          /* We have found the original operation.   */
6507
          if (sched_verbose >= 6)
6508
            sel_print ("Found original operation at insn %d\n", INSN_UID (insn));
6509
 
6510
          code_motion_path_driver_info->orig_expr_found
6511
            (insn, expr, local_params_in, static_params);
6512
 
6513
          /* Step back, so on the way back we'll start traversing from the
6514
             previous insn (or we'll see that it's bb_note and skip that
6515
             loop).  */
6516
          if (insn == first_insn)
6517
            {
6518
              first_insn = NEXT_INSN (last_insn);
6519
              removed_last_insn = sel_bb_end_p (last_insn);
6520
            }
6521
          insn = last_insn;
6522
          break;
6523
        }
6524
      else
6525
        {
6526
          /* We haven't found the original expr, continue descending the basic
6527
             block.  */
6528
          if (code_motion_path_driver_info->orig_expr_not_found
6529
              (insn, orig_ops, static_params))
6530
            {
6531
              /* Av set ops could have been changed when moving through this
6532
                 insn.  To find them below it, we have to un-substitute them.  */
6533
              undo_transformations (&orig_ops, insn);
6534
            }
6535
          else
6536
            {
6537
              /* Clean up and return, if the hook tells us to do so.  It may
6538
                 happen if we've encountered the previously created
6539
                 bookkeeping.  */
6540
              code_motion_path_driver_cleanup (&orig_ops, &path);
6541
              return -1;
6542
            }
6543
 
6544
          gcc_assert (orig_ops);
6545
        }
6546
 
6547
      /* Stop at insn if we got to the end of BB.  */
6548
      if (insn == bb_tail)
6549
        break;
6550
 
6551
      insn = NEXT_INSN (insn);
6552
    }
6553
 
6554
  /* Here INSN either points to the insn before the original insn (may be
6555
     bb_note, if original insn was a bb_head) or to the bb_end.  */
6556
  if (!expr)
6557
    {
6558
      int res;
6559
 
6560
      gcc_assert (insn == sel_bb_end (bb));
6561
 
6562
      /* Add bb tail to PATH (but it doesn't make any sense if it's a bb_head -
6563
         it's already in PATH then).  */
6564
      if (insn != first_insn)
6565
        ilist_add (&path, insn);
6566
 
6567
      /* Process_successors should be able to find at least one
6568
         successor for which code_motion_path_driver returns TRUE.  */
6569
      res = code_motion_process_successors (insn, orig_ops,
6570
                                            path, static_params);
6571
 
6572
      /* Remove bb tail from path.  */
6573
      if (insn != first_insn)
6574
        ilist_remove (&path);
6575
 
6576
      if (res != 1)
6577
        {
6578
          /* This is the case when one of the original expr is no longer available
6579
             due to bookkeeping created on this branch with the same register.
6580
             In the original algorithm, which doesn't have update_data_sets call
6581
             on a bookkeeping block, it would simply result in returning
6582
             FALSE when we've encountered a previously generated bookkeeping
6583
             insn in moveop_orig_expr_not_found.  */
6584
          code_motion_path_driver_cleanup (&orig_ops, &path);
6585
          return res;
6586
        }
6587
    }
6588
 
6589
  /* Don't need it any more.  */
6590
  av_set_clear (&orig_ops);
6591
 
6592
  /* Backward pass: now, when we have C_EXPR computed, we'll drag it to
6593
     the beginning of the basic block.  */
6594
  before_first = PREV_INSN (first_insn);
6595
  while (insn != before_first)
6596
    {
6597
      if (code_motion_path_driver_info->ascend)
6598
        code_motion_path_driver_info->ascend (insn, static_params);
6599
 
6600
      insn = PREV_INSN (insn);
6601
    }
6602
 
6603
  /* Now we're at the bb head.  */
6604
  insn = first_insn;
6605
  ilist_remove (&path);
6606
  local_params_in->removed_last_insn = removed_last_insn;
6607
  code_motion_path_driver_info->at_first_insn (insn, local_params_in, static_params);
6608
 
6609
  /* This should be the very last operation as at bb head we could change
6610
     the numbering by creating bookkeeping blocks.  */
6611
  if (removed_last_insn)
6612
    insn = PREV_INSN (insn);
6613
  bitmap_set_bit (code_motion_visited_blocks, BLOCK_FOR_INSN (insn)->index);
6614
  return true;
6615
}
6616
 
6617
/* Move up the operations from ORIG_OPS set traversing the dag starting
6618
   from INSN.  PATH represents the edges traversed so far.
6619
   DEST is the register chosen for scheduling the current expr.  Insert
6620
   bookkeeping code in the join points.  EXPR_VLIW is the chosen expression,
6621
   C_EXPR is how it looks like at the given cfg point.
6622
   Set *SHOULD_MOVE to indicate whether we have only disconnected
6623
   one of the insns found.
6624
 
6625
   Returns whether original instructions were found, which is asserted
6626
   to be true in the caller.  */
6627
static bool
6628
move_op (insn_t insn, av_set_t orig_ops, expr_t expr_vliw,
6629
         rtx dest, expr_t c_expr, bool *should_move)
6630
{
6631
  struct moveop_static_params sparams;
6632
  struct cmpd_local_params lparams;
6633
  bool res;
6634
 
6635
  /* Init params for code_motion_path_driver.  */
6636
  sparams.dest = dest;
6637
  sparams.c_expr = c_expr;
6638
  sparams.uid = INSN_UID (EXPR_INSN_RTX (expr_vliw));
6639
#ifdef ENABLE_CHECKING
6640
  sparams.failed_insn = NULL;
6641
#endif
6642
  sparams.was_renamed = false;
6643
  lparams.e1 = NULL;
6644
 
6645
  /* We haven't visited any blocks yet.  */
6646
  bitmap_clear (code_motion_visited_blocks);
6647
 
6648
  /* Set appropriate hooks and data.  */
6649
  code_motion_path_driver_info = &move_op_hooks;
6650
  res = code_motion_path_driver (insn, orig_ops, NULL, &lparams, &sparams);
6651
 
6652
  if (sparams.was_renamed)
6653
    EXPR_WAS_RENAMED (expr_vliw) = true;
6654
 
6655
  *should_move = (sparams.uid == -1);
6656
 
6657
  return res;
6658
}
6659
 
6660
 
6661
/* Functions that work with regions.  */
6662
 
6663
/* Current number of seqno used in init_seqno and init_seqno_1.  */
6664
static int cur_seqno;
6665
 
6666
/* A helper for init_seqno.  Traverse the region starting from BB and
6667
   compute seqnos for visited insns, marking visited bbs in VISITED_BBS.
6668
   Clear visited blocks from BLOCKS_TO_RESCHEDULE.  */
6669
static void
6670
init_seqno_1 (basic_block bb, sbitmap visited_bbs, bitmap blocks_to_reschedule)
6671
{
6672
  int bbi = BLOCK_TO_BB (bb->index);
6673
  insn_t insn, note = bb_note (bb);
6674
  insn_t succ_insn;
6675
  succ_iterator si;
6676
 
6677
  SET_BIT (visited_bbs, bbi);
6678
  if (blocks_to_reschedule)
6679
    bitmap_clear_bit (blocks_to_reschedule, bb->index);
6680
 
6681
  FOR_EACH_SUCC_1 (succ_insn, si, BB_END (bb),
6682
                   SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
6683
    {
6684
      basic_block succ = BLOCK_FOR_INSN (succ_insn);
6685
      int succ_bbi = BLOCK_TO_BB (succ->index);
6686
 
6687
      gcc_assert (in_current_region_p (succ));
6688
 
6689
      if (!TEST_BIT (visited_bbs, succ_bbi))
6690
        {
6691
          gcc_assert (succ_bbi > bbi);
6692
 
6693
          init_seqno_1 (succ, visited_bbs, blocks_to_reschedule);
6694
        }
6695
    }
6696
 
6697
  for (insn = BB_END (bb); insn != note; insn = PREV_INSN (insn))
6698
    INSN_SEQNO (insn) = cur_seqno--;
6699
}
6700
 
6701
/* Initialize seqnos for the current region.  NUMBER_OF_INSNS is the number
6702
   of instructions in the region, BLOCKS_TO_RESCHEDULE contains blocks on
6703
   which we're rescheduling when pipelining, FROM is the block where
6704
   traversing region begins (it may not be the head of the region when
6705
   pipelining, but the head of the loop instead).
6706
 
6707
   Returns the maximal seqno found.  */
6708
static int
6709
init_seqno (int number_of_insns, bitmap blocks_to_reschedule, basic_block from)
6710
{
6711
  sbitmap visited_bbs;
6712
  bitmap_iterator bi;
6713
  unsigned bbi;
6714
 
6715
  visited_bbs = sbitmap_alloc (current_nr_blocks);
6716
 
6717
  if (blocks_to_reschedule)
6718
    {
6719
      sbitmap_ones (visited_bbs);
6720
      EXECUTE_IF_SET_IN_BITMAP (blocks_to_reschedule, 0, bbi, bi)
6721
        {
6722
          gcc_assert (BLOCK_TO_BB (bbi) < current_nr_blocks);
6723
          RESET_BIT (visited_bbs, BLOCK_TO_BB (bbi));
6724
        }
6725
    }
6726
  else
6727
    {
6728
      sbitmap_zero (visited_bbs);
6729
      from = EBB_FIRST_BB (0);
6730
    }
6731
 
6732
  cur_seqno = number_of_insns > 0 ? number_of_insns : sched_max_luid - 1;
6733
  init_seqno_1 (from, visited_bbs, blocks_to_reschedule);
6734
  gcc_assert (cur_seqno == 0 || number_of_insns == 0);
6735
 
6736
  sbitmap_free (visited_bbs);
6737
  return sched_max_luid - 1;
6738
}
6739
 
6740
/* Initialize scheduling parameters for current region.  */
6741
static void
6742
sel_setup_region_sched_flags (void)
6743
{
6744
  enable_schedule_as_rhs_p = 1;
6745
  bookkeeping_p = 1;
6746
  pipelining_p = (bookkeeping_p
6747
                  && (flag_sel_sched_pipelining != 0)
6748
                  && current_loop_nest != NULL);
6749
  max_insns_to_rename = PARAM_VALUE (PARAM_SELSCHED_INSNS_TO_RENAME);
6750
  max_ws = MAX_WS;
6751
}
6752
 
6753
/* Return true if all basic blocks of current region are empty.  */
6754
static bool
6755
current_region_empty_p (void)
6756
{
6757
  int i;
6758
  for (i = 0; i < current_nr_blocks; i++)
6759
    if (! sel_bb_empty_p (BASIC_BLOCK (BB_TO_BLOCK (i))))
6760
      return false;
6761
 
6762
  return true;
6763
}
6764
 
6765
/* Prepare and verify loop nest for pipelining.  */
6766
static void
6767
setup_current_loop_nest (int rgn)
6768
{
6769
  current_loop_nest = get_loop_nest_for_rgn (rgn);
6770
 
6771
  if (!current_loop_nest)
6772
    return;
6773
 
6774
  /* If this loop has any saved loop preheaders from nested loops,
6775
     add these basic blocks to the current region.  */
6776
  sel_add_loop_preheaders ();
6777
 
6778
  /* Check that we're starting with a valid information.  */
6779
  gcc_assert (loop_latch_edge (current_loop_nest));
6780
  gcc_assert (LOOP_MARKED_FOR_PIPELINING_P (current_loop_nest));
6781
}
6782
 
6783
/* Compute instruction priorities for current region.  */
6784
static void
6785
sel_compute_priorities (int rgn)
6786
{
6787
  sched_rgn_compute_dependencies (rgn);
6788
 
6789
  /* Compute insn priorities in haifa style.  Then free haifa style
6790
     dependencies that we've calculated for this.  */
6791
  compute_priorities ();
6792
 
6793
  if (sched_verbose >= 5)
6794
    debug_rgn_dependencies (0);
6795
 
6796
  free_rgn_deps ();
6797
}
6798
 
6799
/* Init scheduling data for RGN.  Returns true when this region should not
6800
   be scheduled.  */
6801
static bool
6802
sel_region_init (int rgn)
6803
{
6804
  int i;
6805
  bb_vec_t bbs;
6806
 
6807
  rgn_setup_region (rgn);
6808
 
6809
  /* Even if sched_is_disabled_for_current_region_p() is true, we still
6810
     do region initialization here so the region can be bundled correctly,
6811
     but we'll skip the scheduling in sel_sched_region ().  */
6812
  if (current_region_empty_p ())
6813
    return true;
6814
 
6815
  if (flag_sel_sched_pipelining)
6816
    setup_current_loop_nest (rgn);
6817
 
6818
  sel_setup_region_sched_flags ();
6819
 
6820
  bbs = VEC_alloc (basic_block, heap, current_nr_blocks);
6821
 
6822
  for (i = 0; i < current_nr_blocks; i++)
6823
    VEC_quick_push (basic_block, bbs, BASIC_BLOCK (BB_TO_BLOCK (i)));
6824
 
6825
  sel_init_bbs (bbs, NULL);
6826
 
6827
  /* Initialize luids and dependence analysis which both sel-sched and haifa
6828
     need.  */
6829
  sched_init_luids (bbs, NULL, NULL, NULL);
6830
  sched_deps_init (false);
6831
 
6832
  /* Initialize haifa data.  */
6833
  rgn_setup_sched_infos ();
6834
  sel_set_sched_flags ();
6835
  haifa_init_h_i_d (bbs, NULL, NULL, NULL);
6836
 
6837
  sel_compute_priorities (rgn);
6838
  init_deps_global ();
6839
 
6840
  /* Main initialization.  */
6841
  sel_setup_sched_infos ();
6842
  sel_init_global_and_expr (bbs);
6843
 
6844
  VEC_free (basic_block, heap, bbs);
6845
 
6846
  blocks_to_reschedule = BITMAP_ALLOC (NULL);
6847
 
6848
  /* Init correct liveness sets on each instruction of a single-block loop.
6849
     This is the only situation when we can't update liveness when calling
6850
     compute_live for the first insn of the loop.  */
6851
  if (current_loop_nest)
6852
    {
6853
      int header = (sel_is_loop_preheader_p (BASIC_BLOCK (BB_TO_BLOCK (0)))
6854
                    ? 1
6855
                    : 0);
6856
 
6857
      if (current_nr_blocks == header + 1)
6858
        update_liveness_on_insn
6859
          (sel_bb_head (BASIC_BLOCK (BB_TO_BLOCK (header))));
6860
    }
6861
 
6862
  /* Set hooks so that no newly generated insn will go out unnoticed.  */
6863
  sel_register_cfg_hooks ();
6864
 
6865
  /* !!! We call target.sched.md_init () for the whole region, but we invoke
6866
     targetm.sched.md_finish () for every ebb.  */
6867
  if (targetm.sched.md_init)
6868
    /* None of the arguments are actually used in any target.  */
6869
    targetm.sched.md_init (sched_dump, sched_verbose, -1);
6870
 
6871
  first_emitted_uid = get_max_uid () + 1;
6872
  preheader_removed = false;
6873
 
6874
  /* Reset register allocation ticks array.  */
6875
  memset (reg_rename_tick, 0, sizeof reg_rename_tick);
6876
  reg_rename_this_tick = 0;
6877
 
6878
  bitmap_initialize (forced_ebb_heads, 0);
6879
  bitmap_clear (forced_ebb_heads);
6880
 
6881
  setup_nop_vinsn ();
6882
  current_copies = BITMAP_ALLOC (NULL);
6883
  current_originators = BITMAP_ALLOC (NULL);
6884
  code_motion_visited_blocks = BITMAP_ALLOC (NULL);
6885
 
6886
  return false;
6887
}
6888
 
6889
/* Simplify insns after the scheduling.  */
6890
static void
6891
simplify_changed_insns (void)
6892
{
6893
  int i;
6894
 
6895
  for (i = 0; i < current_nr_blocks; i++)
6896
    {
6897
      basic_block bb = BASIC_BLOCK (BB_TO_BLOCK (i));
6898
      rtx insn;
6899
 
6900
      FOR_BB_INSNS (bb, insn)
6901
        if (INSN_P (insn))
6902
          {
6903
            expr_t expr = INSN_EXPR (insn);
6904
 
6905
            if (EXPR_WAS_SUBSTITUTED (expr))
6906
              validate_simplify_insn (insn);
6907
          }
6908
    }
6909
}
6910
 
6911
/* Find boundaries of the EBB starting from basic block BB, marking blocks of
6912
   this EBB in SCHEDULED_BLOCKS and appropriately filling in HEAD, TAIL,
6913
   PREV_HEAD, and NEXT_TAIL fields of CURRENT_SCHED_INFO structure.  */
6914
static void
6915
find_ebb_boundaries (basic_block bb, bitmap scheduled_blocks)
6916
{
6917
  insn_t head, tail;
6918
  basic_block bb1 = bb;
6919
  if (sched_verbose >= 2)
6920
    sel_print ("Finishing schedule in bbs: ");
6921
 
6922
  do
6923
    {
6924
      bitmap_set_bit (scheduled_blocks, BLOCK_TO_BB (bb1->index));
6925
 
6926
      if (sched_verbose >= 2)
6927
        sel_print ("%d; ", bb1->index);
6928
    }
6929
  while (!bb_ends_ebb_p (bb1) && (bb1 = bb_next_bb (bb1)));
6930
 
6931
  if (sched_verbose >= 2)
6932
    sel_print ("\n");
6933
 
6934
  get_ebb_head_tail (bb, bb1, &head, &tail);
6935
 
6936
  current_sched_info->head = head;
6937
  current_sched_info->tail = tail;
6938
  current_sched_info->prev_head = PREV_INSN (head);
6939
  current_sched_info->next_tail = NEXT_INSN (tail);
6940
}
6941
 
6942
/* Regenerate INSN_SCHED_CYCLEs for insns of current EBB.  */
6943
static void
6944
reset_sched_cycles_in_current_ebb (void)
6945
{
6946
  int last_clock = 0;
6947
  int haifa_last_clock = -1;
6948
  int haifa_clock = 0;
6949
  insn_t insn;
6950
 
6951
  if (targetm.sched.md_init)
6952
    {
6953
      /* None of the arguments are actually used in any target.
6954
         NB: We should have md_reset () hook for cases like this.  */
6955
      targetm.sched.md_init (sched_dump, sched_verbose, -1);
6956
    }
6957
 
6958
  state_reset (curr_state);
6959
  advance_state (curr_state);
6960
 
6961
  for (insn = current_sched_info->head;
6962
       insn != current_sched_info->next_tail;
6963
       insn = NEXT_INSN (insn))
6964
    {
6965
      int cost, haifa_cost;
6966
      int sort_p;
6967
      bool asm_p, real_insn, after_stall;
6968
      int clock;
6969
 
6970
      if (!INSN_P (insn))
6971
        continue;
6972
 
6973
      asm_p = false;
6974
      real_insn = recog_memoized (insn) >= 0;
6975
      clock = INSN_SCHED_CYCLE (insn);
6976
 
6977
      cost = clock - last_clock;
6978
 
6979
      /* Initialize HAIFA_COST.  */
6980
      if (! real_insn)
6981
        {
6982
          asm_p = INSN_ASM_P (insn);
6983
 
6984
          if (asm_p)
6985
            /* This is asm insn which *had* to be scheduled first
6986
               on the cycle.  */
6987
            haifa_cost = 1;
6988
          else
6989
            /* This is a use/clobber insn.  It should not change
6990
               cost.  */
6991
            haifa_cost = 0;
6992
        }
6993
      else
6994
        haifa_cost = estimate_insn_cost (insn, curr_state);
6995
 
6996
      /* Stall for whatever cycles we've stalled before.  */
6997
      after_stall = 0;
6998
      if (INSN_AFTER_STALL_P (insn) && cost > haifa_cost)
6999
        {
7000
          haifa_cost = cost;
7001
          after_stall = 1;
7002
        }
7003
 
7004
      if (haifa_cost > 0)
7005
        {
7006
          int i = 0;
7007
 
7008
          while (haifa_cost--)
7009
            {
7010
              advance_state (curr_state);
7011
              i++;
7012
 
7013
              if (sched_verbose >= 2)
7014
                {
7015
                  sel_print ("advance_state (state_transition)\n");
7016
                  debug_state (curr_state);
7017
                }
7018
 
7019
              /* The DFA may report that e.g. insn requires 2 cycles to be
7020
                 issued, but on the next cycle it says that insn is ready
7021
                 to go.  Check this here.  */
7022
              if (!after_stall
7023
                  && real_insn
7024
                  && haifa_cost > 0
7025
                  && estimate_insn_cost (insn, curr_state) == 0)
7026
                break;
7027
            }
7028
 
7029
          haifa_clock += i;
7030
        }
7031
      else
7032
        gcc_assert (haifa_cost == 0);
7033
 
7034
      if (sched_verbose >= 2)
7035
        sel_print ("Haifa cost for insn %d: %d\n", INSN_UID (insn), haifa_cost);
7036
 
7037
      if (targetm.sched.dfa_new_cycle)
7038
        while (targetm.sched.dfa_new_cycle (sched_dump, sched_verbose, insn,
7039
                                            haifa_last_clock, haifa_clock,
7040
                                            &sort_p))
7041
          {
7042
            advance_state (curr_state);
7043
            haifa_clock++;
7044
            if (sched_verbose >= 2)
7045
              {
7046
                sel_print ("advance_state (dfa_new_cycle)\n");
7047
                debug_state (curr_state);
7048
              }
7049
          }
7050
 
7051
      if (real_insn)
7052
        {
7053
          cost = state_transition (curr_state, insn);
7054
 
7055
          if (sched_verbose >= 2)
7056
            debug_state (curr_state);
7057
 
7058
          gcc_assert (cost < 0);
7059
        }
7060
 
7061
      if (targetm.sched.variable_issue)
7062
        targetm.sched.variable_issue (sched_dump, sched_verbose, insn, 0);
7063
 
7064
      INSN_SCHED_CYCLE (insn) = haifa_clock;
7065
 
7066
      last_clock = clock;
7067
      haifa_last_clock = haifa_clock;
7068
    }
7069
}
7070
 
7071
/* Put TImode markers on insns starting a new issue group.  */
7072
static void
7073
put_TImodes (void)
7074
{
7075
  int last_clock = -1;
7076
  insn_t insn;
7077
 
7078
  for (insn = current_sched_info->head; insn != current_sched_info->next_tail;
7079
       insn = NEXT_INSN (insn))
7080
    {
7081
      int cost, clock;
7082
 
7083
      if (!INSN_P (insn))
7084
        continue;
7085
 
7086
      clock = INSN_SCHED_CYCLE (insn);
7087
      cost = (last_clock == -1) ? 1 : clock - last_clock;
7088
 
7089
      gcc_assert (cost >= 0);
7090
 
7091
      if (issue_rate > 1
7092
          && GET_CODE (PATTERN (insn)) != USE
7093
          && GET_CODE (PATTERN (insn)) != CLOBBER)
7094
        {
7095
          if (reload_completed && cost > 0)
7096
            PUT_MODE (insn, TImode);
7097
 
7098
          last_clock = clock;
7099
        }
7100
 
7101
      if (sched_verbose >= 2)
7102
        sel_print ("Cost for insn %d is %d\n", INSN_UID (insn), cost);
7103
    }
7104
}
7105
 
7106
/* Perform MD_FINISH on EBBs comprising current region.  When
7107
   RESET_SCHED_CYCLES_P is true, run a pass emulating the scheduler
7108
   to produce correct sched cycles on insns.  */
7109
static void
7110
sel_region_target_finish (bool reset_sched_cycles_p)
7111
{
7112
  int i;
7113
  bitmap scheduled_blocks = BITMAP_ALLOC (NULL);
7114
 
7115
  for (i = 0; i < current_nr_blocks; i++)
7116
    {
7117
      if (bitmap_bit_p (scheduled_blocks, i))
7118
        continue;
7119
 
7120
      /* While pipelining outer loops, skip bundling for loop
7121
         preheaders.  Those will be rescheduled in the outer loop.  */
7122
      if (sel_is_loop_preheader_p (EBB_FIRST_BB (i)))
7123
        continue;
7124
 
7125
      find_ebb_boundaries (EBB_FIRST_BB (i), scheduled_blocks);
7126
 
7127
      if (no_real_insns_p (current_sched_info->head, current_sched_info->tail))
7128
        continue;
7129
 
7130
      if (reset_sched_cycles_p)
7131
        reset_sched_cycles_in_current_ebb ();
7132
 
7133
      if (targetm.sched.md_init)
7134
        targetm.sched.md_init (sched_dump, sched_verbose, -1);
7135
 
7136
      put_TImodes ();
7137
 
7138
      if (targetm.sched.md_finish)
7139
        {
7140
          targetm.sched.md_finish (sched_dump, sched_verbose);
7141
 
7142
          /* Extend luids so that insns generated by the target will
7143
             get zero luid.  */
7144
          sched_init_luids (NULL, NULL, NULL, NULL);
7145
        }
7146
    }
7147
 
7148
  BITMAP_FREE (scheduled_blocks);
7149
}
7150
 
7151
/* Free the scheduling data for the current region.  When RESET_SCHED_CYCLES_P
7152
   is true, make an additional pass emulating scheduler to get correct insn
7153
   cycles for md_finish calls.  */
7154
static void
7155
sel_region_finish (bool reset_sched_cycles_p)
7156
{
7157
  simplify_changed_insns ();
7158
  sched_finish_ready_list ();
7159
  free_nop_pool ();
7160
 
7161
  /* Free the vectors.  */
7162
  if (vec_av_set)
7163
    VEC_free (expr_t, heap, vec_av_set);
7164
  BITMAP_FREE (current_copies);
7165
  BITMAP_FREE (current_originators);
7166
  BITMAP_FREE (code_motion_visited_blocks);
7167
  vinsn_vec_free (&vec_bookkeeping_blocked_vinsns);
7168
  vinsn_vec_free (&vec_target_unavailable_vinsns);
7169
 
7170
  /* If LV_SET of the region head should be updated, do it now because
7171
     there will be no other chance.  */
7172
  {
7173
    succ_iterator si;
7174
    insn_t insn;
7175
 
7176
    FOR_EACH_SUCC_1 (insn, si, bb_note (EBB_FIRST_BB (0)),
7177
                     SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS)
7178
      {
7179
        basic_block bb = BLOCK_FOR_INSN (insn);
7180
 
7181
        if (!BB_LV_SET_VALID_P (bb))
7182
          compute_live (insn);
7183
      }
7184
  }
7185
 
7186
  /* Emulate the Haifa scheduler for bundling.  */
7187
  if (reload_completed)
7188
    sel_region_target_finish (reset_sched_cycles_p);
7189
 
7190
  sel_finish_global_and_expr ();
7191
 
7192
  bitmap_clear (forced_ebb_heads);
7193
 
7194
  free_nop_vinsn ();
7195
 
7196
  finish_deps_global ();
7197
  sched_finish_luids ();
7198
 
7199
  sel_finish_bbs ();
7200
  BITMAP_FREE (blocks_to_reschedule);
7201
 
7202
  sel_unregister_cfg_hooks ();
7203
 
7204
  max_issue_size = 0;
7205
}
7206
 
7207
 
7208
/* Functions that implement the scheduler driver.  */
7209
 
7210
/* Schedule a parallel instruction group on each of FENCES.  MAX_SEQNO
7211
   is the current maximum seqno.  SCHEDULED_INSNS_TAILPP is the list
7212
   of insns scheduled -- these would be postprocessed later.  */
7213
static void
7214
schedule_on_fences (flist_t fences, int max_seqno,
7215
                    ilist_t **scheduled_insns_tailpp)
7216
{
7217
  flist_t old_fences = fences;
7218
 
7219
  if (sched_verbose >= 1)
7220
    {
7221
      sel_print ("\nScheduling on fences: ");
7222
      dump_flist (fences);
7223
      sel_print ("\n");
7224
    }
7225
 
7226
  scheduled_something_on_previous_fence = false;
7227
  for (; fences; fences = FLIST_NEXT (fences))
7228
    {
7229
      fence_t fence = NULL;
7230
      int seqno = 0;
7231
      flist_t fences2;
7232
      bool first_p = true;
7233
 
7234
      /* Choose the next fence group to schedule.
7235
         The fact that insn can be scheduled only once
7236
         on the cycle is guaranteed by two properties:
7237
         1. seqnos of parallel groups decrease with each iteration.
7238
         2. If is_ineligible_successor () sees the larger seqno, it
7239
         checks if candidate insn is_in_current_fence_p ().  */
7240
      for (fences2 = old_fences; fences2; fences2 = FLIST_NEXT (fences2))
7241
        {
7242
          fence_t f = FLIST_FENCE (fences2);
7243
 
7244
          if (!FENCE_PROCESSED_P (f))
7245
            {
7246
              int i = INSN_SEQNO (FENCE_INSN (f));
7247
 
7248
              if (first_p || i > seqno)
7249
                {
7250
                  seqno = i;
7251
                  fence = f;
7252
                  first_p = false;
7253
                }
7254
              else
7255
                /* ??? Seqnos of different groups should be different.  */
7256
                gcc_assert (1 || i != seqno);
7257
            }
7258
        }
7259
 
7260
      gcc_assert (fence);
7261
 
7262
      /* As FENCE is nonnull, SEQNO is initialized.  */
7263
      seqno -= max_seqno + 1;
7264
      fill_insns (fence, seqno, scheduled_insns_tailpp);
7265
      FENCE_PROCESSED_P (fence) = true;
7266
    }
7267
 
7268
  /* All av_sets are invalidated by GLOBAL_LEVEL increase, thus we
7269
     don't need to keep bookkeeping-invalidated and target-unavailable
7270
     vinsns any more.  */
7271
  vinsn_vec_clear (&vec_bookkeeping_blocked_vinsns);
7272
  vinsn_vec_clear (&vec_target_unavailable_vinsns);
7273
}
7274
 
7275
/* Calculate MIN_SEQNO and MAX_SEQNO.  */
7276
static void
7277
find_min_max_seqno (flist_t fences, int *min_seqno, int *max_seqno)
7278
{
7279
  *min_seqno = *max_seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
7280
 
7281
  /* The first element is already processed.  */
7282
  while ((fences = FLIST_NEXT (fences)))
7283
    {
7284
      int seqno = INSN_SEQNO (FENCE_INSN (FLIST_FENCE (fences)));
7285
 
7286
      if (*min_seqno > seqno)
7287
        *min_seqno = seqno;
7288
      else if (*max_seqno < seqno)
7289
        *max_seqno = seqno;
7290
    }
7291
}
7292
 
7293
/* Calculate new fences from FENCES.  */
7294
static flist_t
7295
calculate_new_fences (flist_t fences, int orig_max_seqno)
7296
{
7297
  flist_t old_fences = fences;
7298
  struct flist_tail_def _new_fences, *new_fences = &_new_fences;
7299
 
7300
  flist_tail_init (new_fences);
7301
  for (; fences; fences = FLIST_NEXT (fences))
7302
    {
7303
      fence_t fence = FLIST_FENCE (fences);
7304
      insn_t insn;
7305
 
7306
      if (!FENCE_BNDS (fence))
7307
        {
7308
          /* This fence doesn't have any successors.  */
7309
          if (!FENCE_SCHEDULED_P (fence))
7310
            {
7311
              /* Nothing was scheduled on this fence.  */
7312
              int seqno;
7313
 
7314
              insn = FENCE_INSN (fence);
7315
              seqno = INSN_SEQNO (insn);
7316
              gcc_assert (seqno > 0 && seqno <= orig_max_seqno);
7317
 
7318
              if (sched_verbose >= 1)
7319
                sel_print ("Fence %d[%d] has not changed\n",
7320
                           INSN_UID (insn),
7321
                           BLOCK_NUM (insn));
7322
              move_fence_to_fences (fences, new_fences);
7323
            }
7324
        }
7325
      else
7326
        extract_new_fences_from (fences, new_fences, orig_max_seqno);
7327
    }
7328
 
7329
  flist_clear (&old_fences);
7330
  return FLIST_TAIL_HEAD (new_fences);
7331
}
7332
 
7333
/* Update seqnos of insns given by PSCHEDULED_INSNS.  MIN_SEQNO and MAX_SEQNO
7334
   are the miminum and maximum seqnos of the group, HIGHEST_SEQNO_IN_USE is
7335
   the highest seqno used in a region.  Return the updated highest seqno.  */
7336
static int
7337
update_seqnos_and_stage (int min_seqno, int max_seqno,
7338
                         int highest_seqno_in_use,
7339
                         ilist_t *pscheduled_insns)
7340
{
7341
  int new_hs;
7342
  ilist_iterator ii;
7343
  insn_t insn;
7344
 
7345
  /* Actually, new_hs is the seqno of the instruction, that was
7346
     scheduled first (i.e. it is the first one in SCHEDULED_INSNS).  */
7347
  if (*pscheduled_insns)
7348
    {
7349
      new_hs = (INSN_SEQNO (ILIST_INSN (*pscheduled_insns))
7350
                + highest_seqno_in_use + max_seqno - min_seqno + 2);
7351
      gcc_assert (new_hs > highest_seqno_in_use);
7352
    }
7353
  else
7354
    new_hs = highest_seqno_in_use;
7355
 
7356
  FOR_EACH_INSN (insn, ii, *pscheduled_insns)
7357
    {
7358
      gcc_assert (INSN_SEQNO (insn) < 0);
7359
      INSN_SEQNO (insn) += highest_seqno_in_use + max_seqno - min_seqno + 2;
7360
      gcc_assert (INSN_SEQNO (insn) <= new_hs);
7361
 
7362
      /* When not pipelining, purge unneeded insn info on the scheduled insns.
7363
         For example, having reg_last array of INSN_DEPS_CONTEXT in memory may
7364
         require > 1GB of memory e.g. on limit-fnargs.c.  */
7365
      if (! pipelining_p)
7366
        free_data_for_scheduled_insn (insn);
7367
    }
7368
 
7369
  ilist_clear (pscheduled_insns);
7370
  global_level++;
7371
 
7372
  return new_hs;
7373
}
7374
 
7375
/* The main driver for scheduling a region.  This function is responsible
7376
   for correct propagation of fences (i.e. scheduling points) and creating
7377
   a group of parallel insns at each of them.  It also supports
7378
   pipelining.  ORIG_MAX_SEQNO is the maximal seqno before this pass
7379
   of scheduling.  */
7380
static void
7381
sel_sched_region_2 (int orig_max_seqno)
7382
{
7383
  int highest_seqno_in_use = orig_max_seqno;
7384
 
7385
  stat_bookkeeping_copies = 0;
7386
  stat_insns_needed_bookkeeping = 0;
7387
  stat_renamed_scheduled = 0;
7388
  stat_substitutions_total = 0;
7389
  num_insns_scheduled = 0;
7390
 
7391
  while (fences)
7392
    {
7393
      int min_seqno, max_seqno;
7394
      ilist_t scheduled_insns = NULL;
7395
      ilist_t *scheduled_insns_tailp = &scheduled_insns;
7396
 
7397
      find_min_max_seqno (fences, &min_seqno, &max_seqno);
7398
      schedule_on_fences (fences, max_seqno, &scheduled_insns_tailp);
7399
      fences = calculate_new_fences (fences, orig_max_seqno);
7400
      highest_seqno_in_use = update_seqnos_and_stage (min_seqno, max_seqno,
7401
                                                      highest_seqno_in_use,
7402
                                                      &scheduled_insns);
7403
    }
7404
 
7405
  if (sched_verbose >= 1)
7406
    sel_print ("Scheduled %d bookkeeping copies, %d insns needed "
7407
               "bookkeeping, %d insns renamed, %d insns substituted\n",
7408
               stat_bookkeeping_copies,
7409
               stat_insns_needed_bookkeeping,
7410
               stat_renamed_scheduled,
7411
               stat_substitutions_total);
7412
}
7413
 
7414
/* Schedule a region.  When pipelining, search for possibly never scheduled
7415
   bookkeeping code and schedule it.  Reschedule pipelined code without
7416
   pipelining after.  */
7417
static void
7418
sel_sched_region_1 (void)
7419
{
7420
  int number_of_insns;
7421
  int orig_max_seqno;
7422
 
7423
  /* Remove empty blocks that might be in the region from the beginning.
7424
     We need to do save sched_max_luid before that, as it actually shows
7425
     the number of insns in the region, and purge_empty_blocks can
7426
     alter it.  */
7427
  number_of_insns = sched_max_luid - 1;
7428
  purge_empty_blocks ();
7429
 
7430
  orig_max_seqno = init_seqno (number_of_insns, NULL, NULL);
7431
  gcc_assert (orig_max_seqno >= 1);
7432
 
7433
  /* When pipelining outer loops, create fences on the loop header,
7434
     not preheader.  */
7435
  fences = NULL;
7436
  if (current_loop_nest)
7437
    init_fences (BB_END (EBB_FIRST_BB (0)));
7438
  else
7439
    init_fences (bb_note (EBB_FIRST_BB (0)));
7440
  global_level = 1;
7441
 
7442
  sel_sched_region_2 (orig_max_seqno);
7443
 
7444
  gcc_assert (fences == NULL);
7445
 
7446
  if (pipelining_p)
7447
    {
7448
      int i;
7449
      basic_block bb;
7450
      struct flist_tail_def _new_fences;
7451
      flist_tail_t new_fences = &_new_fences;
7452
      bool do_p = true;
7453
 
7454
      pipelining_p = false;
7455
      max_ws = MIN (max_ws, issue_rate * 3 / 2);
7456
      bookkeeping_p = false;
7457
      enable_schedule_as_rhs_p = false;
7458
 
7459
      /* Schedule newly created code, that has not been scheduled yet.  */
7460
      do_p = true;
7461
 
7462
      while (do_p)
7463
        {
7464
          do_p = false;
7465
 
7466
          for (i = 0; i < current_nr_blocks; i++)
7467
            {
7468
              basic_block bb = EBB_FIRST_BB (i);
7469
 
7470
              if (sel_bb_empty_p (bb))
7471
                {
7472
                  bitmap_clear_bit (blocks_to_reschedule, bb->index);
7473
                  continue;
7474
                }
7475
 
7476
              if (bitmap_bit_p (blocks_to_reschedule, bb->index))
7477
                {
7478
                  clear_outdated_rtx_info (bb);
7479
                  if (sel_insn_is_speculation_check (BB_END (bb))
7480
                      && JUMP_P (BB_END (bb)))
7481
                    bitmap_set_bit (blocks_to_reschedule,
7482
                                    BRANCH_EDGE (bb)->dest->index);
7483
                }
7484
              else if (INSN_SCHED_TIMES (sel_bb_head (bb)) <= 0)
7485
                bitmap_set_bit (blocks_to_reschedule, bb->index);
7486
            }
7487
 
7488
          for (i = 0; i < current_nr_blocks; i++)
7489
            {
7490
              bb = EBB_FIRST_BB (i);
7491
 
7492
              /* While pipelining outer loops, skip bundling for loop
7493
                 preheaders.  Those will be rescheduled in the outer
7494
                 loop.  */
7495
              if (sel_is_loop_preheader_p (bb))
7496
                {
7497
                  clear_outdated_rtx_info (bb);
7498
                  continue;
7499
                }
7500
 
7501
              if (bitmap_bit_p (blocks_to_reschedule, bb->index))
7502
                {
7503
                  flist_tail_init (new_fences);
7504
 
7505
                  orig_max_seqno = init_seqno (0, blocks_to_reschedule, bb);
7506
 
7507
                  /* Mark BB as head of the new ebb.  */
7508
                  bitmap_set_bit (forced_ebb_heads, bb->index);
7509
 
7510
                  bitmap_clear_bit (blocks_to_reschedule, bb->index);
7511
 
7512
                  gcc_assert (fences == NULL);
7513
 
7514
                  init_fences (bb_note (bb));
7515
 
7516
                  sel_sched_region_2 (orig_max_seqno);
7517
 
7518
                  do_p = true;
7519
                  break;
7520
                }
7521
            }
7522
        }
7523
    }
7524
}
7525
 
7526
/* Schedule the RGN region.  */
7527
void
7528
sel_sched_region (int rgn)
7529
{
7530
  bool schedule_p;
7531
  bool reset_sched_cycles_p;
7532
 
7533
  if (sel_region_init (rgn))
7534
    return;
7535
 
7536
  if (sched_verbose >= 1)
7537
    sel_print ("Scheduling region %d\n", rgn);
7538
 
7539
  schedule_p = (!sched_is_disabled_for_current_region_p ()
7540
                && dbg_cnt (sel_sched_region_cnt));
7541
  reset_sched_cycles_p = pipelining_p;
7542
  if (schedule_p)
7543
    sel_sched_region_1 ();
7544
  else
7545
    /* Force initialization of INSN_SCHED_CYCLEs for correct bundling.  */
7546
    reset_sched_cycles_p = true;
7547
 
7548
  sel_region_finish (reset_sched_cycles_p);
7549
}
7550
 
7551
/* Perform global init for the scheduler.  */
7552
static void
7553
sel_global_init (void)
7554
{
7555
  calculate_dominance_info (CDI_DOMINATORS);
7556
  alloc_sched_pools ();
7557
 
7558
  /* Setup the infos for sched_init.  */
7559
  sel_setup_sched_infos ();
7560
  setup_sched_dump ();
7561
 
7562
  sched_rgn_init (false);
7563
  sched_init ();
7564
 
7565
  sched_init_bbs ();
7566
  /* Reset AFTER_RECOVERY if it has been set by the 1st scheduler pass.  */
7567
  after_recovery = 0;
7568
  can_issue_more = issue_rate;
7569
 
7570
  sched_extend_target ();
7571
  sched_deps_init (true);
7572
  setup_nop_and_exit_insns ();
7573
  sel_extend_global_bb_info ();
7574
  init_lv_sets ();
7575
  init_hard_regs_data ();
7576
}
7577
 
7578
/* Free the global data of the scheduler.  */
7579
static void
7580
sel_global_finish (void)
7581
{
7582
  free_bb_note_pool ();
7583
  free_lv_sets ();
7584
  sel_finish_global_bb_info ();
7585
 
7586
  free_regset_pool ();
7587
  free_nop_and_exit_insns ();
7588
 
7589
  sched_rgn_finish ();
7590
  sched_deps_finish ();
7591
  sched_finish ();
7592
 
7593
  if (current_loops)
7594
    sel_finish_pipelining ();
7595
 
7596
  free_sched_pools ();
7597
  free_dominance_info (CDI_DOMINATORS);
7598
}
7599
 
7600
/* Return true when we need to skip selective scheduling.  Used for debugging.  */
7601
bool
7602
maybe_skip_selective_scheduling (void)
7603
{
7604
  return ! dbg_cnt (sel_sched_cnt);
7605
}
7606
 
7607
/* The entry point.  */
7608
void
7609
run_selective_scheduling (void)
7610
{
7611
  int rgn;
7612
 
7613
  if (n_basic_blocks == NUM_FIXED_BLOCKS)
7614
    return;
7615
 
7616
  sel_global_init ();
7617
 
7618
  for (rgn = 0; rgn < nr_regions; rgn++)
7619
    sel_sched_region (rgn);
7620
 
7621
  sel_global_finish ();
7622
}
7623
 
7624
#endif

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.