OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [sel-sched-ir.h] - Blame information for rev 749

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Instruction scheduling pass.  This file contains definitions used
2
   internally in the scheduler.
3
   Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012
4
   Free Software Foundation, Inc.
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
#ifndef GCC_SEL_SCHED_IR_H
23
#define GCC_SEL_SCHED_IR_H
24
 
25
/* For state_t.  */
26
#include "insn-attr.h"
27
#include "regset.h"
28
#include "basic-block.h"
29
/* For reg_note.  */
30
#include "rtl.h"
31
#include "ggc.h"
32
#include "bitmap.h"
33
#include "vecprim.h"
34
#include "sched-int.h"
35
#include "cfgloop.h"
36
 
37
/* tc_t is a short for target context.  This is a state of the target
38
   backend.  */
39
typedef void *tc_t;
40
 
41
/* List data types used for av sets, fences, paths, and boundaries.  */
42
 
43
/* Forward declarations for types that are part of some list nodes.  */
44
struct _list_node;
45
 
46
/* List backend.  */
47
typedef struct _list_node *_list_t;
48
#define _LIST_NEXT(L) ((L)->next)
49
 
50
/* Instruction data that is part of vinsn type.  */
51
struct idata_def;
52
typedef struct idata_def *idata_t;
53
 
54
/* A virtual instruction, i.e. an instruction as seen by the scheduler.  */
55
struct vinsn_def;
56
typedef struct vinsn_def *vinsn_t;
57
 
58
/* RTX list.
59
   This type is the backend for ilist.  */
60
typedef _list_t _xlist_t;
61
#define _XLIST_X(L) ((L)->u.x)
62
#define _XLIST_NEXT(L) (_LIST_NEXT (L))
63
 
64
/* Instruction.  */
65
typedef rtx insn_t;
66
 
67
/* List of insns.  */
68
typedef _xlist_t ilist_t;
69
#define ILIST_INSN(L) (_XLIST_X (L))
70
#define ILIST_NEXT(L) (_XLIST_NEXT (L))
71
 
72
/* This lists possible transformations that done locally, i.e. in
73
   moveup_expr.  */
74
enum local_trans_type
75
  {
76
    TRANS_SUBSTITUTION,
77
    TRANS_SPECULATION
78
  };
79
 
80
/* This struct is used to record the history of expression's
81
   transformations.  */
82
struct expr_history_def_1
83
{
84
  /* UID of the insn.  */
85
  unsigned uid;
86
 
87
  /* How the expression looked like.  */
88
  vinsn_t old_expr_vinsn;
89
 
90
  /* How the expression looks after the transformation.  */
91
  vinsn_t new_expr_vinsn;
92
 
93
  /* And its speculative status.  */
94
  ds_t spec_ds;
95
 
96
  /* Type of the transformation.  */
97
  enum local_trans_type type;
98
};
99
 
100
typedef struct expr_history_def_1 expr_history_def;
101
 
102
DEF_VEC_O (expr_history_def);
103
DEF_VEC_ALLOC_O (expr_history_def, heap);
104
 
105
/* Expression information.  */
106
struct _expr
107
{
108
  /* Insn description.  */
109
  vinsn_t vinsn;
110
 
111
  /* SPEC is the degree of speculativeness.
112
     FIXME: now spec is increased when an rhs is moved through a
113
     conditional, thus showing only control speculativeness.  In the
114
     future we'd like to count data spec separately to allow a better
115
     control on scheduling.  */
116
  int spec;
117
 
118
  /* Degree of speculativeness measured as probability of executing
119
     instruction's original basic block given relative to
120
     the current scheduling point.  */
121
  int usefulness;
122
 
123
  /* A priority of this expression.  */
124
  int priority;
125
 
126
  /* A priority adjustment of this expression.  */
127
  int priority_adj;
128
 
129
  /* Number of times the insn was scheduled.  */
130
  int sched_times;
131
 
132
  /* A basic block index this was originated from.  Zero when there is
133
     more than one originator.  */
134
  int orig_bb_index;
135
 
136
  /* Instruction should be of SPEC_DONE_DS type in order to be moved to this
137
     point.  */
138
  ds_t spec_done_ds;
139
 
140
  /* SPEC_TO_CHECK_DS hold speculation types that should be checked
141
     (used only during move_op ()).  */
142
  ds_t spec_to_check_ds;
143
 
144
  /* Cycle on which original insn was scheduled.  Zero when it has not yet
145
     been scheduled or more than one originator.  */
146
  int orig_sched_cycle;
147
 
148
  /* This vector contains the history of insn's transformations.  */
149
  VEC(expr_history_def, heap) *history_of_changes;
150
 
151
  /* True (1) when original target (register or memory) of this instruction
152
     is available for scheduling, false otherwise.  -1 means we're not sure;
153
     please run find_used_regs to clarify.  */
154
  signed char target_available;
155
 
156
  /* True when this expression needs a speculation check to be scheduled.
157
     This is used during find_used_regs.  */
158
  BOOL_BITFIELD needs_spec_check_p : 1;
159
 
160
  /* True when the expression was substituted.  Used for statistical
161
     purposes.  */
162
  BOOL_BITFIELD was_substituted : 1;
163
 
164
  /* True when the expression was renamed.  */
165
  BOOL_BITFIELD was_renamed : 1;
166
 
167
  /* True when expression can't be moved.  */
168
  BOOL_BITFIELD cant_move : 1;
169
};
170
 
171
typedef struct _expr expr_def;
172
typedef expr_def *expr_t;
173
 
174
#define EXPR_VINSN(EXPR) ((EXPR)->vinsn)
175
#define EXPR_INSN_RTX(EXPR) (VINSN_INSN_RTX (EXPR_VINSN (EXPR)))
176
#define EXPR_PATTERN(EXPR) (VINSN_PATTERN (EXPR_VINSN (EXPR)))
177
#define EXPR_LHS(EXPR) (VINSN_LHS (EXPR_VINSN (EXPR)))
178
#define EXPR_RHS(EXPR) (VINSN_RHS (EXPR_VINSN (EXPR)))
179
#define EXPR_TYPE(EXPR) (VINSN_TYPE (EXPR_VINSN (EXPR)))
180
#define EXPR_SEPARABLE_P(EXPR) (VINSN_SEPARABLE_P (EXPR_VINSN (EXPR)))
181
 
182
#define EXPR_SPEC(EXPR) ((EXPR)->spec)
183
#define EXPR_USEFULNESS(EXPR) ((EXPR)->usefulness)
184
#define EXPR_PRIORITY(EXPR) ((EXPR)->priority)
185
#define EXPR_PRIORITY_ADJ(EXPR) ((EXPR)->priority_adj)
186
#define EXPR_SCHED_TIMES(EXPR) ((EXPR)->sched_times)
187
#define EXPR_ORIG_BB_INDEX(EXPR) ((EXPR)->orig_bb_index)
188
#define EXPR_ORIG_SCHED_CYCLE(EXPR) ((EXPR)->orig_sched_cycle)
189
#define EXPR_SPEC_DONE_DS(EXPR) ((EXPR)->spec_done_ds)
190
#define EXPR_SPEC_TO_CHECK_DS(EXPR) ((EXPR)->spec_to_check_ds)
191
#define EXPR_HISTORY_OF_CHANGES(EXPR) ((EXPR)->history_of_changes)
192
#define EXPR_TARGET_AVAILABLE(EXPR) ((EXPR)->target_available)
193
#define EXPR_NEEDS_SPEC_CHECK_P(EXPR) ((EXPR)->needs_spec_check_p)
194
#define EXPR_WAS_SUBSTITUTED(EXPR) ((EXPR)->was_substituted)
195
#define EXPR_WAS_RENAMED(EXPR) ((EXPR)->was_renamed)
196
#define EXPR_CANT_MOVE(EXPR) ((EXPR)->cant_move)
197
 
198
#define EXPR_WAS_CHANGED(EXPR) (VEC_length (expr_history_def, \
199
                                            EXPR_HISTORY_OF_CHANGES (EXPR)) > 0)
200
 
201
/* Insn definition for list of original insns in find_used_regs.  */
202
struct _def
203
{
204
  insn_t orig_insn;
205
 
206
  /* FIXME: Get rid of CROSSES_CALL in each def, since if we're moving up
207
     rhs from two different places, but only one of the code motion paths
208
     crosses a call, we can't use any of the call_used_regs, no matter which
209
     path or whether all paths crosses a call.  Thus we should move CROSSES_CALL
210
     to static params.  */
211
  bool crosses_call;
212
};
213
typedef struct _def *def_t;
214
 
215
 
216
/* Availability sets are sets of expressions we're scheduling.  */
217
typedef _list_t av_set_t;
218
#define _AV_SET_EXPR(L) (&(L)->u.expr)
219
#define _AV_SET_NEXT(L) (_LIST_NEXT (L))
220
 
221
 
222
/* Boundary of the current fence group.  */
223
struct _bnd
224
{
225
  /* The actual boundary instruction.  */
226
  insn_t to;
227
 
228
  /* Its path to the fence.  */
229
  ilist_t ptr;
230
 
231
  /* Availability set at the boundary.  */
232
  av_set_t av;
233
 
234
  /* This set moved to the fence.  */
235
  av_set_t av1;
236
 
237
  /* Deps context at this boundary.  As long as we have one boundary per fence,
238
     this is just a pointer to the same deps context as in the corresponding
239
     fence.  */
240
  deps_t dc;
241
};
242
typedef struct _bnd *bnd_t;
243
#define BND_TO(B) ((B)->to)
244
 
245
/* PTR stands not for pointer as you might think, but as a Path To Root of the
246
   current instruction group from boundary B.  */
247
#define BND_PTR(B) ((B)->ptr)
248
#define BND_AV(B) ((B)->av)
249
#define BND_AV1(B) ((B)->av1)
250
#define BND_DC(B) ((B)->dc)
251
 
252
/* List of boundaries.  */
253
typedef _list_t blist_t;
254
#define BLIST_BND(L) (&(L)->u.bnd)
255
#define BLIST_NEXT(L) (_LIST_NEXT (L))
256
 
257
 
258
/* Fence information.  A fence represents current scheduling point and also
259
   blocks code motion through it when pipelining.  */
260
struct _fence
261
{
262
  /* Insn before which we gather an instruction group.*/
263
  insn_t insn;
264
 
265
  /* Modeled state of the processor pipeline.  */
266
  state_t state;
267
 
268
  /* Current cycle that is being scheduled on this fence.  */
269
  int cycle;
270
 
271
  /* Number of insns that were scheduled on the current cycle.
272
     This information has to be local to a fence.  */
273
  int cycle_issued_insns;
274
 
275
  /* At the end of fill_insns () this field holds the list of the instructions
276
     that are inner boundaries of the scheduled parallel group.  */
277
  ilist_t bnds;
278
 
279
  /* Deps context at this fence.  It is used to model dependencies at the
280
     fence so that insn ticks can be properly evaluated.  */
281
  deps_t dc;
282
 
283
  /* Target context at this fence.  Used to save and load any local target
284
     scheduling information when changing fences.  */
285
  tc_t tc;
286
 
287
  /* A vector of insns that are scheduled but not yet completed.  */
288
  VEC (rtx,gc) *executing_insns;
289
 
290
  /* A vector indexed by UIDs that caches the earliest cycle on which
291
     an insn can be scheduled on this fence.  */
292
  int *ready_ticks;
293
 
294
  /* Its size.  */
295
  int ready_ticks_size;
296
 
297
  /* Insn, which has been scheduled last on this fence.  */
298
  rtx last_scheduled_insn;
299
 
300
  /* The last value of can_issue_more variable on this fence.  */
301
  int issue_more;
302
 
303
  /* If non-NULL force the next scheduled insn to be SCHED_NEXT.  */
304
  rtx sched_next;
305
 
306
  /* True if fill_insns processed this fence.  */
307
  BOOL_BITFIELD processed_p : 1;
308
 
309
  /* True if fill_insns actually scheduled something on this fence.  */
310
  BOOL_BITFIELD scheduled_p : 1;
311
 
312
  /* True when the next insn scheduled here would start a cycle.  */
313
  BOOL_BITFIELD starts_cycle_p : 1;
314
 
315
  /* True when the next insn scheduled here would be scheduled after a stall.  */
316
  BOOL_BITFIELD after_stall_p : 1;
317
};
318
typedef struct _fence *fence_t;
319
 
320
#define FENCE_INSN(F) ((F)->insn)
321
#define FENCE_STATE(F) ((F)->state)
322
#define FENCE_BNDS(F) ((F)->bnds)
323
#define FENCE_PROCESSED_P(F) ((F)->processed_p)
324
#define FENCE_SCHEDULED_P(F) ((F)->scheduled_p)
325
#define FENCE_ISSUED_INSNS(F) ((F)->cycle_issued_insns)
326
#define FENCE_CYCLE(F) ((F)->cycle)
327
#define FENCE_STARTS_CYCLE_P(F) ((F)->starts_cycle_p)
328
#define FENCE_AFTER_STALL_P(F) ((F)->after_stall_p)
329
#define FENCE_DC(F) ((F)->dc)
330
#define FENCE_TC(F) ((F)->tc)
331
#define FENCE_LAST_SCHEDULED_INSN(F) ((F)->last_scheduled_insn)
332
#define FENCE_ISSUE_MORE(F) ((F)->issue_more)
333
#define FENCE_EXECUTING_INSNS(F) ((F)->executing_insns)
334
#define FENCE_READY_TICKS(F) ((F)->ready_ticks)
335
#define FENCE_READY_TICKS_SIZE(F) ((F)->ready_ticks_size)
336
#define FENCE_SCHED_NEXT(F) ((F)->sched_next)
337
 
338
/* List of fences.  */
339
typedef _list_t flist_t;
340
#define FLIST_FENCE(L) (&(L)->u.fence)
341
#define FLIST_NEXT(L) (_LIST_NEXT (L))
342
 
343
/* List of fences with pointer to the tail node.  */
344
struct flist_tail_def
345
{
346
  flist_t head;
347
  flist_t *tailp;
348
};
349
 
350
typedef struct flist_tail_def *flist_tail_t;
351
#define FLIST_TAIL_HEAD(L) ((L)->head)
352
#define FLIST_TAIL_TAILP(L) ((L)->tailp)
353
 
354
/* List node information.  A list node can be any of the types above.  */
355
struct _list_node
356
{
357
  _list_t next;
358
 
359
  union
360
  {
361
    rtx x;
362
    struct _bnd bnd;
363
    expr_def expr;
364
    struct _fence fence;
365
    struct _def def;
366
    void *data;
367
  } u;
368
};
369
 
370
 
371
/* _list_t functions.
372
   All of _*list_* functions are used through accessor macros, thus
373
   we can't move them in sel-sched-ir.c.  */
374
extern alloc_pool sched_lists_pool;
375
 
376
static inline _list_t
377
_list_alloc (void)
378
{
379
  return (_list_t) pool_alloc (sched_lists_pool);
380
}
381
 
382
static inline void
383
_list_add (_list_t *lp)
384
{
385
  _list_t l = _list_alloc ();
386
 
387
  _LIST_NEXT (l) = *lp;
388
  *lp = l;
389
}
390
 
391
static inline void
392
_list_remove_nofree (_list_t *lp)
393
{
394
  _list_t n = *lp;
395
 
396
  *lp = _LIST_NEXT (n);
397
}
398
 
399
static inline void
400
_list_remove (_list_t *lp)
401
{
402
  _list_t n = *lp;
403
 
404
  *lp = _LIST_NEXT (n);
405
  pool_free (sched_lists_pool, n);
406
}
407
 
408
static inline void
409
_list_clear (_list_t *l)
410
{
411
  while (*l)
412
    _list_remove (l);
413
}
414
 
415
 
416
/* List iterator backend.  */
417
typedef struct
418
{
419
  /* The list we're iterating.  */
420
  _list_t *lp;
421
 
422
  /* True when this iterator supprts removing.  */
423
  bool can_remove_p;
424
 
425
  /* True when we've actually removed something.  */
426
  bool removed_p;
427
} _list_iterator;
428
 
429
static inline void
430
_list_iter_start (_list_iterator *ip, _list_t *lp, bool can_remove_p)
431
{
432
  ip->lp = lp;
433
  ip->can_remove_p = can_remove_p;
434
  ip->removed_p = false;
435
}
436
 
437
static inline void
438
_list_iter_next (_list_iterator *ip)
439
{
440
  if (!ip->removed_p)
441
    ip->lp = &_LIST_NEXT (*ip->lp);
442
  else
443
    ip->removed_p = false;
444
}
445
 
446
static inline void
447
_list_iter_remove (_list_iterator *ip)
448
{
449
  gcc_assert (!ip->removed_p && ip->can_remove_p);
450
  _list_remove (ip->lp);
451
  ip->removed_p = true;
452
}
453
 
454
static inline void
455
_list_iter_remove_nofree (_list_iterator *ip)
456
{
457
  gcc_assert (!ip->removed_p && ip->can_remove_p);
458
  _list_remove_nofree (ip->lp);
459
  ip->removed_p = true;
460
}
461
 
462
/* General macros to traverse a list.  FOR_EACH_* interfaces are
463
   implemented using these.  */
464
#define _FOR_EACH(TYPE, ELEM, I, L)                             \
465
  for (_list_iter_start (&(I), &(L), false);                    \
466
       _list_iter_cond_##TYPE (*(I).lp, &(ELEM));               \
467
       _list_iter_next (&(I)))
468
 
469
#define _FOR_EACH_1(TYPE, ELEM, I, LP)                              \
470
  for (_list_iter_start (&(I), (LP), true);                         \
471
       _list_iter_cond_##TYPE (*(I).lp, &(ELEM));                   \
472
       _list_iter_next (&(I)))
473
 
474
 
475
/* _xlist_t functions.  */
476
 
477
static inline void
478
_xlist_add (_xlist_t *lp, rtx x)
479
{
480
  _list_add (lp);
481
  _XLIST_X (*lp) = x;
482
}
483
 
484
#define _xlist_remove(LP) (_list_remove (LP))
485
#define _xlist_clear(LP) (_list_clear (LP))
486
 
487
static inline bool
488
_xlist_is_in_p (_xlist_t l, rtx x)
489
{
490
  while (l)
491
    {
492
      if (_XLIST_X (l) == x)
493
        return true;
494
      l = _XLIST_NEXT (l);
495
    }
496
 
497
  return false;
498
}
499
 
500
/* Used through _FOR_EACH.  */
501
static inline bool
502
_list_iter_cond_x (_xlist_t l, rtx *xp)
503
{
504
  if (l)
505
    {
506
      *xp = _XLIST_X (l);
507
      return true;
508
    }
509
 
510
  return false;
511
}
512
 
513
#define _xlist_iter_remove(IP) (_list_iter_remove (IP))
514
 
515
typedef _list_iterator _xlist_iterator;
516
#define _FOR_EACH_X(X, I, L) _FOR_EACH (x, (X), (I), (L))
517
#define _FOR_EACH_X_1(X, I, LP) _FOR_EACH_1 (x, (X), (I), (LP))
518
 
519
 
520
/* ilist_t functions.  Instruction lists are simply RTX lists.  */
521
 
522
#define ilist_add(LP, INSN) (_xlist_add ((LP), (INSN)))
523
#define ilist_remove(LP) (_xlist_remove (LP))
524
#define ilist_clear(LP) (_xlist_clear (LP))
525
#define ilist_is_in_p(L, INSN) (_xlist_is_in_p ((L), (INSN)))
526
#define ilist_iter_remove(IP) (_xlist_iter_remove (IP))
527
 
528
typedef _xlist_iterator ilist_iterator;
529
#define FOR_EACH_INSN(INSN, I, L) _FOR_EACH_X (INSN, I, L)
530
#define FOR_EACH_INSN_1(INSN, I, LP) _FOR_EACH_X_1 (INSN, I, LP)
531
 
532
 
533
/* Av set iterators.  */
534
typedef _list_iterator av_set_iterator;
535
#define FOR_EACH_EXPR(EXPR, I, AV) _FOR_EACH (expr, (EXPR), (I), (AV))
536
#define FOR_EACH_EXPR_1(EXPR, I, AV) _FOR_EACH_1 (expr, (EXPR), (I), (AV))
537
 
538
static bool
539
_list_iter_cond_expr (av_set_t av, expr_t *exprp)
540
{
541
  if (av)
542
    {
543
      *exprp = _AV_SET_EXPR (av);
544
      return true;
545
    }
546
 
547
  return false;
548
}
549
 
550
 
551
/* Def list iterators.  */
552
typedef _list_t def_list_t;
553
typedef _list_iterator def_list_iterator;
554
 
555
#define DEF_LIST_NEXT(L) (_LIST_NEXT (L))
556
#define DEF_LIST_DEF(L) (&(L)->u.def)
557
 
558
#define FOR_EACH_DEF(DEF, I, DEF_LIST) _FOR_EACH (def, (DEF), (I), (DEF_LIST))
559
 
560
static inline bool
561
_list_iter_cond_def (def_list_t def_list, def_t *def)
562
{
563
  if (def_list)
564
    {
565
      *def = DEF_LIST_DEF (def_list);
566
      return true;
567
    }
568
 
569
  return false;
570
}
571
 
572
 
573
/* InstructionData.  Contains information about insn pattern.  */
574
struct idata_def
575
{
576
  /* Type of the insn.
577
     o CALL_INSN - Call insn
578
     o JUMP_INSN - Jump insn
579
     o INSN - INSN that cannot be cloned
580
     o USE - INSN that can be cloned
581
     o SET - INSN that can be cloned and separable into lhs and rhs
582
     o PC - simplejump.  Insns that simply redirect control flow should not
583
     have any dependencies.  Sched-deps.c, though, might consider them as
584
     producers or consumers of certain registers.  To avoid that we handle
585
     dependency for simple jumps ourselves.  */
586
  int type;
587
 
588
  /* If insn is a SET, this is its left hand side.  */
589
  rtx lhs;
590
 
591
  /* If insn is a SET, this is its right hand side.  */
592
  rtx rhs;
593
 
594
  /* Registers that are set/used by this insn.  This info is now gathered
595
     via sched-deps.c.  The downside of this is that we also use live info
596
     from flow that is accumulated in the basic blocks.  These two infos
597
     can be slightly inconsistent, hence in the beginning we make a pass
598
     through CFG and calculating the conservative solution for the info in
599
     basic blocks.  When this scheduler will be switched to use dataflow,
600
     this can be unified as df gives us both per basic block and per
601
     instruction info.  Actually, we don't do that pass and just hope
602
     for the best.  */
603
  regset reg_sets;
604
 
605
  regset reg_clobbers;
606
 
607
  regset reg_uses;
608
};
609
 
610
#define IDATA_TYPE(ID) ((ID)->type)
611
#define IDATA_LHS(ID) ((ID)->lhs)
612
#define IDATA_RHS(ID) ((ID)->rhs)
613
#define IDATA_REG_SETS(ID) ((ID)->reg_sets)
614
#define IDATA_REG_USES(ID) ((ID)->reg_uses)
615
#define IDATA_REG_CLOBBERS(ID) ((ID)->reg_clobbers)
616
 
617
/* Type to represent all needed info to emit an insn.
618
   This is a virtual equivalent of the insn.
619
   Every insn in the stream has an associated vinsn.  This is used
620
   to reduce memory consumption basing on the fact that many insns
621
   don't change through the scheduler.
622
 
623
   vinsn can be either normal or unique.
624
   * Normal vinsn is the one, that can be cloned multiple times and typically
625
   corresponds to normal instruction.
626
 
627
   * Unique vinsn derivates from CALL, ASM, JUMP (for a while) and other
628
   unusual stuff.  Such a vinsn is described by its INSN field, which is a
629
   reference to the original instruction.  */
630
struct vinsn_def
631
{
632
  /* Associated insn.  */
633
  rtx insn_rtx;
634
 
635
  /* Its description.  */
636
  struct idata_def id;
637
 
638
  /* Hash of vinsn.  It is computed either from pattern or from rhs using
639
     hash_rtx.  It is not placed in ID for faster compares.  */
640
  unsigned hash;
641
 
642
  /* Hash of the insn_rtx pattern.  */
643
  unsigned hash_rtx;
644
 
645
  /* Smart pointer counter.  */
646
  int count;
647
 
648
  /* Cached cost of the vinsn.  To access it please use vinsn_cost ().  */
649
  int cost;
650
 
651
  /* Mark insns that may trap so we don't move them through jumps.  */
652
  bool may_trap_p;
653
};
654
 
655
#define VINSN_INSN_RTX(VI) ((VI)->insn_rtx)
656
#define VINSN_PATTERN(VI) (PATTERN (VINSN_INSN_RTX (VI)))
657
 
658
#define VINSN_ID(VI) (&((VI)->id))
659
#define VINSN_HASH(VI) ((VI)->hash)
660
#define VINSN_HASH_RTX(VI) ((VI)->hash_rtx)
661
#define VINSN_TYPE(VI) (IDATA_TYPE (VINSN_ID (VI)))
662
#define VINSN_SEPARABLE_P(VI) (VINSN_TYPE (VI) == SET)
663
#define VINSN_CLONABLE_P(VI) (VINSN_SEPARABLE_P (VI) || VINSN_TYPE (VI) == USE)
664
#define VINSN_UNIQUE_P(VI) (!VINSN_CLONABLE_P (VI))
665
#define VINSN_LHS(VI) (IDATA_LHS (VINSN_ID (VI)))
666
#define VINSN_RHS(VI) (IDATA_RHS (VINSN_ID (VI)))
667
#define VINSN_REG_SETS(VI) (IDATA_REG_SETS (VINSN_ID (VI)))
668
#define VINSN_REG_USES(VI) (IDATA_REG_USES (VINSN_ID (VI)))
669
#define VINSN_REG_CLOBBERS(VI) (IDATA_REG_CLOBBERS (VINSN_ID (VI)))
670
#define VINSN_COUNT(VI) ((VI)->count)
671
#define VINSN_MAY_TRAP_P(VI) ((VI)->may_trap_p)
672
 
673
 
674
/* An entry of the hashtable describing transformations happened when
675
   moving up through an insn.  */
676
struct transformed_insns
677
{
678
  /* Previous vinsn.  Used to find the proper element.  */
679
  vinsn_t vinsn_old;
680
 
681
  /* A new vinsn.  */
682
  vinsn_t vinsn_new;
683
 
684
  /* Speculative status.  */
685
  ds_t ds;
686
 
687
  /* Type of transformation happened.  */
688
  enum local_trans_type type;
689
 
690
  /* Whether a conflict on the target register happened.  */
691
  BOOL_BITFIELD was_target_conflict : 1;
692
 
693
  /* Whether a check was needed.  */
694
  BOOL_BITFIELD needs_check : 1;
695
};
696
 
697
/* Indexed by INSN_LUID, the collection of all data associated with
698
   a single instruction that is in the stream.  */
699
struct _sel_insn_data
700
{
701
  /* The expression that contains vinsn for this insn and some
702
     flow-sensitive data like priority.  */
703
  expr_def expr;
704
 
705
  /* If (WS_LEVEL == GLOBAL_LEVEL) then AV is empty.  */
706
  int ws_level;
707
 
708
  /* A number that helps in defining a traversing order for a region.  */
709
  int seqno;
710
 
711
  /* A liveness data computed above this insn.  */
712
  regset live;
713
 
714
  /* An INSN_UID bit is set when deps analysis result is already known.  */
715
  bitmap analyzed_deps;
716
 
717
  /* An INSN_UID bit is set when a hard dep was found, not set when
718
     no dependence is found.  This is meaningful only when the analyzed_deps
719
     bitmap has its bit set.  */
720
  bitmap found_deps;
721
 
722
  /* An INSN_UID bit is set when this is a bookkeeping insn generated from
723
     a parent with this uid.  If a parent is a bookkeeping copy, all its
724
     originators are transitively included in this set.  */
725
  bitmap originators;
726
 
727
  /* A hashtable caching the result of insn transformations through this one.  */
728
  htab_t transformed_insns;
729
 
730
  /* A context incapsulating this insn.  */
731
  struct deps_desc deps_context;
732
 
733
  /* This field is initialized at the beginning of scheduling and is used
734
     to handle sched group instructions.  If it is non-null, then it points
735
     to the instruction, which should be forced to schedule next.  Such
736
     instructions are unique.  */
737
  insn_t sched_next;
738
 
739
  /* Cycle at which insn was scheduled.  It is greater than zero if insn was
740
     scheduled.  This is used for bundling.  */
741
  int sched_cycle;
742
 
743
  /* Cycle at which insn's data will be fully ready.  */
744
  int ready_cycle;
745
 
746
  /* Speculations that are being checked by this insn.  */
747
  ds_t spec_checked_ds;
748
 
749
  /* Whether the live set valid or not.  */
750
  BOOL_BITFIELD live_valid_p : 1;
751
  /* Insn is an ASM.  */
752
  BOOL_BITFIELD asm_p : 1;
753
 
754
  /* True when an insn is scheduled after we've determined that a stall is
755
     required.
756
     This is used when emulating the Haifa scheduler for bundling.  */
757
  BOOL_BITFIELD after_stall_p : 1;
758
};
759
 
760
typedef struct _sel_insn_data sel_insn_data_def;
761
typedef sel_insn_data_def *sel_insn_data_t;
762
 
763
DEF_VEC_O (sel_insn_data_def);
764
DEF_VEC_ALLOC_O (sel_insn_data_def, heap);
765
extern VEC (sel_insn_data_def, heap) *s_i_d;
766
 
767
/* Accessor macros for s_i_d.  */
768
#define SID(INSN) (VEC_index (sel_insn_data_def, s_i_d, INSN_LUID (INSN)))
769
#define SID_BY_UID(UID) (VEC_index (sel_insn_data_def, s_i_d,   LUID_BY_UID (UID)))
770
 
771
extern sel_insn_data_def insn_sid (insn_t);
772
 
773
#define INSN_ASM_P(INSN) (SID (INSN)->asm_p)
774
#define INSN_SCHED_NEXT(INSN) (SID (INSN)->sched_next)
775
#define INSN_ANALYZED_DEPS(INSN) (SID (INSN)->analyzed_deps)
776
#define INSN_FOUND_DEPS(INSN) (SID (INSN)->found_deps)
777
#define INSN_DEPS_CONTEXT(INSN) (SID (INSN)->deps_context)
778
#define INSN_ORIGINATORS(INSN) (SID (INSN)->originators)
779
#define INSN_ORIGINATORS_BY_UID(UID) (SID_BY_UID (UID)->originators)
780
#define INSN_TRANSFORMED_INSNS(INSN) (SID (INSN)->transformed_insns)
781
 
782
#define INSN_EXPR(INSN) (&SID (INSN)->expr)
783
#define INSN_LIVE(INSN) (SID (INSN)->live)
784
#define INSN_LIVE_VALID_P(INSN) (SID (INSN)->live_valid_p)
785
#define INSN_VINSN(INSN) (EXPR_VINSN (INSN_EXPR (INSN)))
786
#define INSN_TYPE(INSN) (VINSN_TYPE (INSN_VINSN (INSN)))
787
#define INSN_SIMPLEJUMP_P(INSN) (INSN_TYPE (INSN) == PC)
788
#define INSN_LHS(INSN) (VINSN_LHS (INSN_VINSN (INSN)))
789
#define INSN_RHS(INSN) (VINSN_RHS (INSN_VINSN (INSN)))
790
#define INSN_REG_SETS(INSN) (VINSN_REG_SETS (INSN_VINSN (INSN)))
791
#define INSN_REG_CLOBBERS(INSN) (VINSN_REG_CLOBBERS (INSN_VINSN (INSN)))
792
#define INSN_REG_USES(INSN) (VINSN_REG_USES (INSN_VINSN (INSN)))
793
#define INSN_SCHED_TIMES(INSN) (EXPR_SCHED_TIMES (INSN_EXPR (INSN)))
794
#define INSN_SEQNO(INSN) (SID (INSN)->seqno)
795
#define INSN_AFTER_STALL_P(INSN) (SID (INSN)->after_stall_p)
796
#define INSN_SCHED_CYCLE(INSN) (SID (INSN)->sched_cycle)
797
#define INSN_READY_CYCLE(INSN) (SID (INSN)->ready_cycle)
798
#define INSN_SPEC_CHECKED_DS(INSN) (SID (INSN)->spec_checked_ds)
799
 
800
/* A global level shows whether an insn is valid or not.  */
801
extern int global_level;
802
 
803
#define INSN_WS_LEVEL(INSN) (SID (INSN)->ws_level)
804
 
805
extern av_set_t get_av_set (insn_t);
806
extern int get_av_level (insn_t);
807
 
808
#define AV_SET(INSN) (get_av_set (INSN))
809
#define AV_LEVEL(INSN) (get_av_level (INSN))
810
#define AV_SET_VALID_P(INSN) (AV_LEVEL (INSN) == global_level)
811
 
812
/* A list of fences currently in the works.  */
813
extern flist_t fences;
814
 
815
/* A NOP pattern used as a placeholder for real insns.  */
816
extern rtx nop_pattern;
817
 
818
/* An insn that 'contained' in EXIT block.  */
819
extern rtx exit_insn;
820
 
821
/* Provide a separate luid for the insn.  */
822
#define INSN_INIT_TODO_LUID (1)
823
 
824
/* Initialize s_s_i_d.  */
825
#define INSN_INIT_TODO_SSID (2)
826
 
827
/* Initialize data for simplejump.  */
828
#define INSN_INIT_TODO_SIMPLEJUMP (4)
829
 
830
/* Return true if INSN is a local NOP.  The nop is local in the sense that
831
   it was emitted by the scheduler as a temporary insn and will soon be
832
   deleted.  These nops are identified by their pattern.  */
833
#define INSN_NOP_P(INSN) (PATTERN (INSN) == nop_pattern)
834
 
835
/* Return true if INSN is linked into instruction stream.
836
   NB: It is impossible for INSN to have one field null and the other not
837
   null: gcc_assert ((PREV_INSN (INSN) == NULL_RTX)
838
   == (NEXT_INSN (INSN) == NULL_RTX)) is valid.  */
839
#define INSN_IN_STREAM_P(INSN) (PREV_INSN (INSN) && NEXT_INSN (INSN))
840
 
841
/* Return true if INSN is in current fence.  */
842
#define IN_CURRENT_FENCE_P(INSN) (flist_lookup (fences, INSN) != NULL)
843
 
844
/* Marks loop as being considered for pipelining.  */
845
#define MARK_LOOP_FOR_PIPELINING(LOOP) ((LOOP)->aux = (void *)(size_t)(1))
846
#define LOOP_MARKED_FOR_PIPELINING_P(LOOP) ((size_t)((LOOP)->aux))
847
 
848
/* Saved loop preheader to transfer when scheduling the loop.  */
849
#define LOOP_PREHEADER_BLOCKS(LOOP) ((size_t)((LOOP)->aux) == 1         \
850
                                     ? NULL                             \
851
                                     : ((VEC(basic_block, heap) *) (LOOP)->aux))
852
#define SET_LOOP_PREHEADER_BLOCKS(LOOP,BLOCKS) ((LOOP)->aux             \
853
                                                = (BLOCKS != NULL       \
854
                                                   ? BLOCKS             \
855
                                                   : (LOOP)->aux))
856
 
857
extern bitmap blocks_to_reschedule;
858
 
859
 
860
/* A variable to track which part of rtx we are scanning in
861
   sched-deps.c: sched_analyze_insn ().  */
862
enum deps_where_def
863
  {
864
    DEPS_IN_INSN,
865
    DEPS_IN_LHS,
866
    DEPS_IN_RHS,
867
    DEPS_IN_NOWHERE
868
  };
869
typedef enum deps_where_def deps_where_t;
870
 
871
 
872
/* Per basic block data for the whole CFG.  */
873
typedef struct
874
{
875
  /* For each bb header this field contains a set of live registers.
876
     For all other insns this field has a NULL.
877
     We also need to know LV sets for the instructions, that are immediatly
878
     after the border of the region.  */
879
  regset lv_set;
880
 
881
  /* Status of LV_SET.
882
     true - block has usable LV_SET.
883
     false - block's LV_SET should be recomputed.  */
884
  bool lv_set_valid_p;
885
} sel_global_bb_info_def;
886
 
887
typedef sel_global_bb_info_def *sel_global_bb_info_t;
888
 
889
DEF_VEC_O (sel_global_bb_info_def);
890
DEF_VEC_ALLOC_O (sel_global_bb_info_def, heap);
891
 
892
/* Per basic block data.  This array is indexed by basic block index.  */
893
extern VEC (sel_global_bb_info_def, heap) *sel_global_bb_info;
894
 
895
extern void sel_extend_global_bb_info (void);
896
extern void sel_finish_global_bb_info (void);
897
 
898
/* Get data for BB.  */
899
#define SEL_GLOBAL_BB_INFO(BB)                                  \
900
  (VEC_index (sel_global_bb_info_def, sel_global_bb_info, (BB)->index))
901
 
902
/* Access macros.  */
903
#define BB_LV_SET(BB) (SEL_GLOBAL_BB_INFO (BB)->lv_set)
904
#define BB_LV_SET_VALID_P(BB) (SEL_GLOBAL_BB_INFO (BB)->lv_set_valid_p)
905
 
906
/* Per basic block data for the region.  */
907
typedef struct
908
{
909
  /* This insn stream is constructed in such a way that it should be
910
     traversed by PREV_INSN field - (*not* NEXT_INSN).  */
911
  rtx note_list;
912
 
913
  /* Cached availability set at the beginning of a block.
914
     See also AV_LEVEL () for conditions when this av_set can be used.  */
915
  av_set_t av_set;
916
 
917
  /* If (AV_LEVEL == GLOBAL_LEVEL) then AV is valid.  */
918
  int av_level;
919
} sel_region_bb_info_def;
920
 
921
typedef sel_region_bb_info_def *sel_region_bb_info_t;
922
 
923
DEF_VEC_O (sel_region_bb_info_def);
924
DEF_VEC_ALLOC_O (sel_region_bb_info_def, heap);
925
 
926
/* Per basic block data.  This array is indexed by basic block index.  */
927
extern VEC (sel_region_bb_info_def, heap) *sel_region_bb_info;
928
 
929
/* Get data for BB.  */
930
#define SEL_REGION_BB_INFO(BB) (VEC_index (sel_region_bb_info_def,      \
931
                                           sel_region_bb_info, (BB)->index))
932
 
933
/* Get BB's note_list.
934
   A note_list is a list of various notes that was scattered across BB
935
   before scheduling, and will be appended at the beginning of BB after
936
   scheduling is finished.  */
937
#define BB_NOTE_LIST(BB) (SEL_REGION_BB_INFO (BB)->note_list)
938
 
939
#define BB_AV_SET(BB) (SEL_REGION_BB_INFO (BB)->av_set)
940
#define BB_AV_LEVEL(BB) (SEL_REGION_BB_INFO (BB)->av_level)
941
#define BB_AV_SET_VALID_P(BB) (BB_AV_LEVEL (BB) == global_level)
942
 
943
/* Used in bb_in_ebb_p.  */
944
extern bitmap_head *forced_ebb_heads;
945
 
946
/* The loop nest being pipelined.  */
947
extern struct loop *current_loop_nest;
948
 
949
/* Saves pipelined blocks.  Bitmap is indexed by bb->index.  */
950
extern sbitmap bbs_pipelined;
951
 
952
/* Various flags.  */
953
extern bool enable_moveup_set_path_p;
954
extern bool pipelining_p;
955
extern bool bookkeeping_p;
956
extern int max_insns_to_rename;
957
extern bool preheader_removed;
958
 
959
/* Software lookahead window size.
960
   According to the results in Nakatani and Ebcioglu [1993], window size of 16
961
   is enough to extract most ILP in integer code.  */
962
#define MAX_WS (PARAM_VALUE (PARAM_SELSCHED_MAX_LOOKAHEAD))
963
 
964
extern regset sel_all_regs;
965
 
966
 
967
/* Successor iterator backend.  */
968
typedef struct
969
{
970
  /* True if we're at BB end.  */
971
  bool bb_end;
972
 
973
  /* An edge on which we're iterating.  */
974
  edge e1;
975
 
976
  /* The previous edge saved after skipping empty blocks.  */
977
  edge e2;
978
 
979
  /* Edge iterator used when there are successors in other basic blocks.  */
980
  edge_iterator ei;
981
 
982
  /* Successor block we're traversing.  */
983
  basic_block bb;
984
 
985
  /* Flags that are passed to the iterator.  We return only successors
986
     that comply to these flags.  */
987
  short flags;
988
 
989
  /* When flags include SUCCS_ALL, this will be set to the exact type
990
     of the sucessor we're traversing now.  */
991
  short current_flags;
992
 
993
  /* If skip to loop exits, save here information about loop exits.  */
994
  int current_exit;
995
  VEC (edge, heap) *loop_exits;
996
} succ_iterator;
997
 
998
/* A structure returning all successor's information.  */
999
struct succs_info
1000
{
1001
  /* Flags that these succcessors were computed with.  */
1002
  short flags;
1003
 
1004
  /* Successors that correspond to the flags.  */
1005
  insn_vec_t succs_ok;
1006
 
1007
  /* Their probabilities.  As of now, we don't need this for other
1008
     successors.  */
1009
  VEC(int,heap) *probs_ok;
1010
 
1011
  /* Other successors.  */
1012
  insn_vec_t succs_other;
1013
 
1014
  /* Probability of all successors.  */
1015
  int all_prob;
1016
 
1017
  /* The number of all successors.  */
1018
  int all_succs_n;
1019
 
1020
  /* The number of good successors.  */
1021
  int succs_ok_n;
1022
};
1023
 
1024
/* Some needed definitions.  */
1025
extern basic_block after_recovery;
1026
 
1027
extern insn_t sel_bb_head (basic_block);
1028
extern insn_t sel_bb_end (basic_block);
1029
extern bool sel_bb_empty_p (basic_block);
1030
extern bool in_current_region_p (basic_block);
1031
 
1032
/* True when BB is a header of the inner loop.  */
1033
static inline bool
1034
inner_loop_header_p (basic_block bb)
1035
{
1036
  struct loop *inner_loop;
1037
 
1038
  if (!current_loop_nest)
1039
    return false;
1040
 
1041
  if (bb == EXIT_BLOCK_PTR)
1042
    return false;
1043
 
1044
  inner_loop = bb->loop_father;
1045
  if (inner_loop == current_loop_nest)
1046
    return false;
1047
 
1048
  /* If successor belongs to another loop.  */
1049
  if (bb == inner_loop->header
1050
      && flow_bb_inside_loop_p (current_loop_nest, bb))
1051
    {
1052
      /* Could be '=' here because of wrong loop depths.  */
1053
      gcc_assert (loop_depth (inner_loop) >= loop_depth (current_loop_nest));
1054
      return true;
1055
    }
1056
 
1057
  return false;
1058
}
1059
 
1060
/* Return exit edges of LOOP, filtering out edges with the same dest bb.  */
1061
static inline VEC (edge, heap) *
1062
get_loop_exit_edges_unique_dests (const struct loop *loop)
1063
{
1064
  VEC (edge, heap) *edges = NULL;
1065
  struct loop_exit *exit;
1066
 
1067
  gcc_assert (loop->latch != EXIT_BLOCK_PTR
1068
              && current_loops->state & LOOPS_HAVE_RECORDED_EXITS);
1069
 
1070
  for (exit = loop->exits->next; exit->e; exit = exit->next)
1071
    {
1072
      int i;
1073
      edge e;
1074
      bool was_dest = false;
1075
 
1076
      for (i = 0; VEC_iterate (edge, edges, i, e); i++)
1077
        if (e->dest == exit->e->dest)
1078
          {
1079
            was_dest = true;
1080
            break;
1081
          }
1082
 
1083
      if (!was_dest)
1084
        VEC_safe_push (edge, heap, edges, exit->e);
1085
    }
1086
  return edges;
1087
}
1088
 
1089
static bool
1090
sel_bb_empty_or_nop_p (basic_block bb)
1091
{
1092
  insn_t first = sel_bb_head (bb), last;
1093
 
1094
  if (first == NULL_RTX)
1095
    return true;
1096
 
1097
  if (!INSN_NOP_P (first))
1098
    return false;
1099
 
1100
  if (bb == EXIT_BLOCK_PTR)
1101
    return false;
1102
 
1103
  last = sel_bb_end (bb);
1104
  if (first != last)
1105
    return false;
1106
 
1107
  return true;
1108
}
1109
 
1110
/* Collect all loop exits recursively, skipping empty BBs between them.
1111
   E.g. if BB is a loop header which has several loop exits,
1112
   traverse all of them and if any of them turns out to be another loop header
1113
   (after skipping empty BBs), add its loop exits to the resulting vector
1114
   as well.  */
1115
static inline VEC(edge, heap) *
1116
get_all_loop_exits (basic_block bb)
1117
{
1118
  VEC(edge, heap) *exits = NULL;
1119
 
1120
  /* If bb is empty, and we're skipping to loop exits, then
1121
     consider bb as a possible gate to the inner loop now.  */
1122
  while (sel_bb_empty_or_nop_p (bb)
1123
         && in_current_region_p (bb)
1124
         && EDGE_COUNT (bb->succs) > 0)
1125
    {
1126
      bb = single_succ (bb);
1127
 
1128
      /* This empty block could only lead outside the region.  */
1129
      gcc_assert (! in_current_region_p (bb));
1130
    }
1131
 
1132
  /* And now check whether we should skip over inner loop.  */
1133
  if (inner_loop_header_p (bb))
1134
    {
1135
      struct loop *this_loop;
1136
      struct loop *pred_loop = NULL;
1137
      int i;
1138
      edge e;
1139
 
1140
      for (this_loop = bb->loop_father;
1141
           this_loop && this_loop != current_loop_nest;
1142
           this_loop = loop_outer (this_loop))
1143
        pred_loop = this_loop;
1144
 
1145
      this_loop = pred_loop;
1146
      gcc_assert (this_loop != NULL);
1147
 
1148
      exits = get_loop_exit_edges_unique_dests (this_loop);
1149
 
1150
      /* Traverse all loop headers.  */
1151
      for (i = 0; VEC_iterate (edge, exits, i, e); i++)
1152
        if (in_current_region_p (e->dest)
1153
            || inner_loop_header_p (e->dest))
1154
          {
1155
            VEC(edge, heap) *next_exits = get_all_loop_exits (e->dest);
1156
 
1157
            if (next_exits)
1158
              {
1159
                int j;
1160
                edge ne;
1161
 
1162
                /* Add all loop exits for the current edge into the
1163
                   resulting vector.  */
1164
                for (j = 0; VEC_iterate (edge, next_exits, j, ne); j++)
1165
                  VEC_safe_push (edge, heap, exits, ne);
1166
 
1167
                /* Remove the original edge.  */
1168
                VEC_ordered_remove (edge, exits, i);
1169
 
1170
                /*  Decrease the loop counter so we won't skip anything.  */
1171
                i--;
1172
                continue;
1173
              }
1174
          }
1175
    }
1176
 
1177
  return exits;
1178
}
1179
 
1180
/* Flags to pass to compute_succs_info and FOR_EACH_SUCC.
1181
   Any successor will fall into exactly one category.   */
1182
 
1183
/* Include normal successors.  */
1184
#define SUCCS_NORMAL (1)
1185
 
1186
/* Include back-edge successors.  */
1187
#define SUCCS_BACK (2)
1188
 
1189
/* Include successors that are outside of the current region.  */
1190
#define SUCCS_OUT (4)
1191
 
1192
/* When pipelining of the outer loops is enabled, skip innermost loops
1193
   to their exits.  */
1194
#define SUCCS_SKIP_TO_LOOP_EXITS (8)
1195
 
1196
/* Include all successors.  */
1197
#define SUCCS_ALL (SUCCS_NORMAL | SUCCS_BACK | SUCCS_OUT)
1198
 
1199
/* We need to return a succ_iterator to avoid 'unitialized' warning
1200
   during bootstrap.  */
1201
static inline succ_iterator
1202
_succ_iter_start (insn_t *succp, insn_t insn, int flags)
1203
{
1204
  succ_iterator i;
1205
 
1206
  basic_block bb = BLOCK_FOR_INSN (insn);
1207
 
1208
  gcc_assert (INSN_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn));
1209
 
1210
  i.flags = flags;
1211
 
1212
  /* Avoid 'uninitialized' warning.  */
1213
  *succp = NULL;
1214
  i.e1 = NULL;
1215
  i.e2 = NULL;
1216
  i.bb = bb;
1217
  i.current_flags = 0;
1218
  i.current_exit = -1;
1219
  i.loop_exits = NULL;
1220
 
1221
  if (bb != EXIT_BLOCK_PTR && BB_END (bb) != insn)
1222
    {
1223
      i.bb_end = false;
1224
 
1225
      /* Avoid 'uninitialized' warning.  */
1226
      i.ei.index = 0;
1227
      i.ei.container = NULL;
1228
    }
1229
  else
1230
    {
1231
      i.ei = ei_start (bb->succs);
1232
      i.bb_end = true;
1233
    }
1234
 
1235
  return i;
1236
}
1237
 
1238
static inline bool
1239
_succ_iter_cond (succ_iterator *ip, rtx *succp, rtx insn,
1240
                 bool check (edge, succ_iterator *))
1241
{
1242
  if (!ip->bb_end)
1243
    {
1244
      /* When we're in a middle of a basic block, return
1245
         the next insn immediately, but only when SUCCS_NORMAL is set.  */
1246
      if (*succp != NULL || (ip->flags & SUCCS_NORMAL) == 0)
1247
        return false;
1248
 
1249
      *succp = NEXT_INSN (insn);
1250
      ip->current_flags = SUCCS_NORMAL;
1251
      return true;
1252
    }
1253
  else
1254
    {
1255
      while (1)
1256
        {
1257
          edge e_tmp = NULL;
1258
 
1259
          /* First, try loop exits, if we have them.  */
1260
          if (ip->loop_exits)
1261
            {
1262
              do
1263
                {
1264
                  VEC_iterate (edge, ip->loop_exits,
1265
                               ip->current_exit, e_tmp);
1266
                  ip->current_exit++;
1267
                }
1268
              while (e_tmp && !check (e_tmp, ip));
1269
 
1270
              if (!e_tmp)
1271
                VEC_free (edge, heap, ip->loop_exits);
1272
            }
1273
 
1274
          /* If we have found a successor, then great.  */
1275
          if (e_tmp)
1276
            {
1277
              ip->e1 = e_tmp;
1278
              break;
1279
            }
1280
 
1281
          /* If not, then try the next edge.  */
1282
          while (ei_cond (ip->ei, &(ip->e1)))
1283
            {
1284
              basic_block bb = ip->e1->dest;
1285
 
1286
              /* Consider bb as a possible loop header.  */
1287
              if ((ip->flags & SUCCS_SKIP_TO_LOOP_EXITS)
1288
                  && flag_sel_sched_pipelining_outer_loops
1289
                  && (!in_current_region_p (bb)
1290
                      || BLOCK_TO_BB (ip->bb->index)
1291
                         < BLOCK_TO_BB (bb->index)))
1292
                {
1293
                  /* Get all loop exits recursively.  */
1294
                  ip->loop_exits = get_all_loop_exits (bb);
1295
 
1296
                  if (ip->loop_exits)
1297
                    {
1298
                      ip->current_exit = 0;
1299
                      /* Move the iterator now, because we won't do
1300
                         succ_iter_next until loop exits will end.  */
1301
                      ei_next (&(ip->ei));
1302
                      break;
1303
                    }
1304
                }
1305
 
1306
              /* bb is not a loop header, check as usual.  */
1307
              if (check (ip->e1, ip))
1308
                break;
1309
 
1310
              ei_next (&(ip->ei));
1311
            }
1312
 
1313
          /* If loop_exits are non null, we have found an inner loop;
1314
             do one more iteration to fetch an edge from these exits.  */
1315
          if (ip->loop_exits)
1316
            continue;
1317
 
1318
          /* Otherwise, we've found an edge in a usual way.  Break now.  */
1319
          break;
1320
        }
1321
 
1322
      if (ip->e1)
1323
        {
1324
          basic_block bb = ip->e2->dest;
1325
 
1326
          if (bb == EXIT_BLOCK_PTR || bb == after_recovery)
1327
            *succp = exit_insn;
1328
          else
1329
            {
1330
              *succp = sel_bb_head (bb);
1331
 
1332
              gcc_assert (ip->flags != SUCCS_NORMAL
1333
                          || *succp == NEXT_INSN (bb_note (bb)));
1334
              gcc_assert (BLOCK_FOR_INSN (*succp) == bb);
1335
            }
1336
 
1337
          return true;
1338
        }
1339
      else
1340
        return false;
1341
    }
1342
}
1343
 
1344
static inline void
1345
_succ_iter_next (succ_iterator *ip)
1346
{
1347
  gcc_assert (!ip->e2 || ip->e1);
1348
 
1349
  if (ip->bb_end && ip->e1 && !ip->loop_exits)
1350
    ei_next (&(ip->ei));
1351
}
1352
 
1353
/* Returns true when E1 is an eligible successor edge, possibly skipping
1354
   empty blocks.  When E2P is not null, the resulting edge is written there.
1355
   FLAGS are used to specify whether back edges and out-of-region edges
1356
   should be considered.  */
1357
static inline bool
1358
_eligible_successor_edge_p (edge e1, succ_iterator *ip)
1359
{
1360
  edge e2 = e1;
1361
  basic_block bb;
1362
  int flags = ip->flags;
1363
  bool src_outside_rgn = !in_current_region_p (e1->src);
1364
 
1365
  gcc_assert (flags != 0);
1366
 
1367
  if (src_outside_rgn)
1368
    {
1369
      /* Any successor of the block that is outside current region is
1370
         ineligible, except when we're skipping to loop exits.  */
1371
      gcc_assert (flags & (SUCCS_OUT | SUCCS_SKIP_TO_LOOP_EXITS));
1372
 
1373
      if (flags & SUCCS_OUT)
1374
        return false;
1375
    }
1376
 
1377
  bb = e2->dest;
1378
 
1379
  /* Skip empty blocks, but be careful not to leave the region.  */
1380
  while (1)
1381
    {
1382
      if (!sel_bb_empty_p (bb))
1383
        {
1384
          edge ne;
1385
          basic_block nbb;
1386
 
1387
          if (!sel_bb_empty_or_nop_p (bb))
1388
            break;
1389
 
1390
          ne = EDGE_SUCC (bb, 0);
1391
          nbb = ne->dest;
1392
 
1393
          if (!in_current_region_p (nbb)
1394
              && !(flags & SUCCS_OUT))
1395
            break;
1396
 
1397
          e2 = ne;
1398
          bb = nbb;
1399
          continue;
1400
        }
1401
 
1402
      if (!in_current_region_p (bb)
1403
          && !(flags & SUCCS_OUT))
1404
        return false;
1405
 
1406
      if (EDGE_COUNT (bb->succs) == 0)
1407
        return false;
1408
 
1409
      e2 = EDGE_SUCC (bb, 0);
1410
      bb = e2->dest;
1411
    }
1412
 
1413
  /* Save the second edge for later checks.  */
1414
  ip->e2 = e2;
1415
 
1416
  if (in_current_region_p (bb))
1417
    {
1418
      /* BLOCK_TO_BB sets topological order of the region here.
1419
         It is important to use real predecessor here, which is ip->bb,
1420
         as we may well have e1->src outside current region,
1421
         when skipping to loop exits.  */
1422
      bool succeeds_in_top_order = (BLOCK_TO_BB (ip->bb->index)
1423
                                    < BLOCK_TO_BB (bb->index));
1424
 
1425
      /* This is true for the all cases except the last one.  */
1426
      ip->current_flags = SUCCS_NORMAL;
1427
 
1428
      /* We are advancing forward in the region, as usual.  */
1429
      if (succeeds_in_top_order)
1430
        {
1431
          /* We are skipping to loop exits here.  */
1432
          gcc_assert (!src_outside_rgn
1433
                      || flag_sel_sched_pipelining_outer_loops);
1434
          return !!(flags & SUCCS_NORMAL);
1435
        }
1436
 
1437
      /* This is a back edge.  During pipelining we ignore back edges,
1438
         but only when it leads to the same loop.  It can lead to the header
1439
         of the outer loop, which will also be the preheader of
1440
         the current loop.  */
1441
      if (pipelining_p
1442
           && e1->src->loop_father == bb->loop_father)
1443
        return !!(flags & SUCCS_NORMAL);
1444
 
1445
      /* A back edge should be requested explicitly.  */
1446
      ip->current_flags = SUCCS_BACK;
1447
      return !!(flags & SUCCS_BACK);
1448
    }
1449
 
1450
  ip->current_flags = SUCCS_OUT;
1451
  return !!(flags & SUCCS_OUT);
1452
}
1453
 
1454
#define FOR_EACH_SUCC_1(SUCC, ITER, INSN, FLAGS)                        \
1455
  for ((ITER) = _succ_iter_start (&(SUCC), (INSN), (FLAGS));            \
1456
       _succ_iter_cond (&(ITER), &(SUCC), (INSN), _eligible_successor_edge_p); \
1457
       _succ_iter_next (&(ITER)))
1458
 
1459
#define FOR_EACH_SUCC(SUCC, ITER, INSN)                 \
1460
  FOR_EACH_SUCC_1 (SUCC, ITER, INSN, SUCCS_NORMAL)
1461
 
1462
/* Return the current edge along which a successor was built.  */
1463
#define SUCC_ITER_EDGE(ITER) ((ITER)->e1)
1464
 
1465
/* Return the next block of BB not running into inconsistencies.  */
1466
static inline basic_block
1467
bb_next_bb (basic_block bb)
1468
{
1469
  switch (EDGE_COUNT (bb->succs))
1470
    {
1471
    case 0:
1472
      return bb->next_bb;
1473
 
1474
    case 1:
1475
      return single_succ (bb);
1476
 
1477
    case 2:
1478
      return FALLTHRU_EDGE (bb)->dest;
1479
 
1480
    default:
1481
      return bb->next_bb;
1482
    }
1483
 
1484
  gcc_unreachable ();
1485
}
1486
 
1487
 
1488
 
1489
/* Functions that are used in sel-sched.c.  */
1490
 
1491
/* List functions.  */
1492
extern ilist_t ilist_copy (ilist_t);
1493
extern ilist_t ilist_invert (ilist_t);
1494
extern void blist_add (blist_t *, insn_t, ilist_t, deps_t);
1495
extern void blist_remove (blist_t *);
1496
extern void flist_tail_init (flist_tail_t);
1497
 
1498
extern fence_t flist_lookup (flist_t, insn_t);
1499
extern void flist_clear (flist_t *);
1500
extern void def_list_add (def_list_t *, insn_t, bool);
1501
 
1502
/* Target context functions.  */
1503
extern tc_t create_target_context (bool);
1504
extern void set_target_context (tc_t);
1505
extern void reset_target_context (tc_t, bool);
1506
 
1507
/* Deps context functions.  */
1508
extern void advance_deps_context (deps_t, insn_t);
1509
 
1510
/* Fences functions.  */
1511
extern void init_fences (insn_t);
1512
extern void add_clean_fence_to_fences (flist_tail_t, insn_t, fence_t);
1513
extern void add_dirty_fence_to_fences (flist_tail_t, insn_t, fence_t);
1514
extern void move_fence_to_fences (flist_t, flist_tail_t);
1515
 
1516
/* Pool functions.  */
1517
extern regset get_regset_from_pool (void);
1518
extern regset get_clear_regset_from_pool (void);
1519
extern void return_regset_to_pool (regset);
1520
extern void free_regset_pool (void);
1521
 
1522
extern insn_t get_nop_from_pool (insn_t);
1523
extern void return_nop_to_pool (insn_t, bool);
1524
extern void free_nop_pool (void);
1525
 
1526
/* Vinsns functions.  */
1527
extern bool vinsn_separable_p (vinsn_t);
1528
extern bool vinsn_cond_branch_p (vinsn_t);
1529
extern void recompute_vinsn_lhs_rhs (vinsn_t);
1530
extern int sel_vinsn_cost (vinsn_t);
1531
extern insn_t sel_gen_insn_from_rtx_after (rtx, expr_t, int, insn_t);
1532
extern insn_t sel_gen_recovery_insn_from_rtx_after (rtx, expr_t, int, insn_t);
1533
extern insn_t sel_gen_insn_from_expr_after (expr_t, vinsn_t, int, insn_t);
1534
extern insn_t  sel_move_insn (expr_t, int, insn_t);
1535
extern void vinsn_attach (vinsn_t);
1536
extern void vinsn_detach (vinsn_t);
1537
extern vinsn_t vinsn_copy (vinsn_t, bool);
1538
extern bool vinsn_equal_p (vinsn_t, vinsn_t);
1539
 
1540
/* EXPR functions.  */
1541
extern void copy_expr (expr_t, expr_t);
1542
extern void copy_expr_onside (expr_t, expr_t);
1543
extern void merge_expr_data (expr_t, expr_t, insn_t);
1544
extern void merge_expr (expr_t, expr_t, insn_t);
1545
extern void clear_expr (expr_t);
1546
extern unsigned expr_dest_regno (expr_t);
1547
extern rtx expr_dest_reg (expr_t);
1548
extern int find_in_history_vect (VEC(expr_history_def, heap) *,
1549
                                 rtx, vinsn_t, bool);
1550
extern void insert_in_history_vect (VEC(expr_history_def, heap) **,
1551
                                    unsigned, enum local_trans_type,
1552
                                    vinsn_t, vinsn_t, ds_t);
1553
extern void mark_unavailable_targets (av_set_t, av_set_t, regset);
1554
extern int speculate_expr (expr_t, ds_t);
1555
 
1556
/* Av set functions.  */
1557
extern void av_set_add (av_set_t *, expr_t);
1558
extern void av_set_iter_remove (av_set_iterator *);
1559
extern expr_t av_set_lookup (av_set_t, vinsn_t);
1560
extern expr_t merge_with_other_exprs (av_set_t *, av_set_iterator *, expr_t);
1561
extern bool av_set_is_in_p (av_set_t, vinsn_t);
1562
extern av_set_t av_set_copy (av_set_t);
1563
extern void av_set_union_and_clear (av_set_t *, av_set_t *, insn_t);
1564
extern void av_set_union_and_live (av_set_t *, av_set_t *, regset, regset, insn_t);
1565
extern void av_set_clear (av_set_t *);
1566
extern void av_set_leave_one_nonspec (av_set_t *);
1567
extern expr_t av_set_element (av_set_t, int);
1568
extern void av_set_substract_cond_branches (av_set_t *);
1569
extern void av_set_split_usefulness (av_set_t, int, int);
1570
extern void av_set_code_motion_filter (av_set_t *, av_set_t);
1571
 
1572
extern void sel_save_haifa_priorities (void);
1573
 
1574
extern void sel_init_global_and_expr (bb_vec_t);
1575
extern void sel_finish_global_and_expr (void);
1576
 
1577
extern regset compute_live (insn_t);
1578
extern bool register_unavailable_p (regset, rtx);
1579
 
1580
/* Dependence analysis functions.  */
1581
extern void sel_clear_has_dependence (void);
1582
extern ds_t has_dependence_p (expr_t, insn_t, ds_t **);
1583
 
1584
extern int tick_check_p (expr_t, deps_t, fence_t);
1585
 
1586
/* Functions to work with insns.  */
1587
extern bool lhs_of_insn_equals_to_dest_p (insn_t, rtx);
1588
extern bool insn_eligible_for_subst_p (insn_t);
1589
extern void get_dest_and_mode (rtx, rtx *, enum machine_mode *);
1590
 
1591
extern bool bookkeeping_can_be_created_if_moved_through_p (insn_t);
1592
extern bool sel_remove_insn (insn_t, bool, bool);
1593
extern bool bb_header_p (insn_t);
1594
extern void sel_init_invalid_data_sets (insn_t);
1595
extern bool insn_at_boundary_p (insn_t);
1596
 
1597
/* Basic block and CFG functions.  */
1598
 
1599
extern insn_t sel_bb_head (basic_block);
1600
extern bool sel_bb_head_p (insn_t);
1601
extern insn_t sel_bb_end (basic_block);
1602
extern bool sel_bb_end_p (insn_t);
1603
extern bool sel_bb_empty_p (basic_block);
1604
 
1605
extern bool in_current_region_p (basic_block);
1606
extern basic_block fallthru_bb_of_jump (rtx);
1607
 
1608
extern void sel_init_bbs (bb_vec_t);
1609
extern void sel_finish_bbs (void);
1610
 
1611
extern struct succs_info * compute_succs_info (insn_t, short);
1612
extern void free_succs_info (struct succs_info *);
1613
extern bool sel_insn_has_single_succ_p (insn_t, int);
1614
extern bool sel_num_cfg_preds_gt_1 (insn_t);
1615
extern int get_seqno_by_preds (rtx);
1616
 
1617
extern bool bb_ends_ebb_p (basic_block);
1618
extern bool in_same_ebb_p (insn_t, insn_t);
1619
 
1620
extern bool tidy_control_flow (basic_block, bool);
1621
extern void free_bb_note_pool (void);
1622
 
1623
extern void purge_empty_blocks (void);
1624
extern basic_block sel_split_edge (edge);
1625
extern basic_block sel_create_recovery_block (insn_t);
1626
extern bool sel_redirect_edge_and_branch (edge, basic_block);
1627
extern void sel_redirect_edge_and_branch_force (edge, basic_block);
1628
extern void sel_init_pipelining (void);
1629
extern void sel_finish_pipelining (void);
1630
extern void sel_sched_region (int);
1631
extern loop_p get_loop_nest_for_rgn (unsigned int);
1632
extern bool considered_for_pipelining_p (struct loop *);
1633
extern void make_region_from_loop_preheader (VEC(basic_block, heap) **);
1634
extern void sel_add_loop_preheaders (bb_vec_t *);
1635
extern bool sel_is_loop_preheader_p (basic_block);
1636
extern void clear_outdated_rtx_info (basic_block);
1637
extern void free_data_sets (basic_block);
1638
extern void exchange_data_sets (basic_block, basic_block);
1639
extern void copy_data_sets (basic_block, basic_block);
1640
 
1641
extern void sel_register_cfg_hooks (void);
1642
extern void sel_unregister_cfg_hooks (void);
1643
 
1644
/* Expression transformation routines.  */
1645
extern rtx create_insn_rtx_from_pattern (rtx, rtx);
1646
extern vinsn_t create_vinsn_from_insn_rtx (rtx, bool);
1647
extern rtx create_copy_of_insn_rtx (rtx);
1648
extern void change_vinsn_in_expr (expr_t, vinsn_t);
1649
 
1650
/* Various initialization functions.  */
1651
extern void init_lv_sets (void);
1652
extern void free_lv_sets (void);
1653
extern void setup_nop_and_exit_insns (void);
1654
extern void free_nop_and_exit_insns (void);
1655
extern void free_data_for_scheduled_insn (insn_t);
1656
extern void setup_nop_vinsn (void);
1657
extern void free_nop_vinsn (void);
1658
extern void sel_set_sched_flags (void);
1659
extern void sel_setup_sched_infos (void);
1660
extern void alloc_sched_pools (void);
1661
extern void free_sched_pools (void);
1662
 
1663
#endif /* GCC_SEL_SCHED_IR_H */
1664
 
1665
 
1666
 
1667
 
1668
 
1669
 
1670
 
1671
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.