OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [tree-vectorizer.h] - Blame information for rev 327

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 280 jeremybenn
/* Vectorizer
2
   Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
3
   Free Software Foundation, Inc.
4
   Contributed by Dorit Naishlos <dorit@il.ibm.com>
5
 
6
This file is part of GCC.
7
 
8
GCC is free software; you can redistribute it and/or modify it under
9
the terms of the GNU General Public License as published by the Free
10
Software Foundation; either version 3, or (at your option) any later
11
version.
12
 
13
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14
WARRANTY; without even the implied warranty of MERCHANTABILITY or
15
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
16
for more details.
17
 
18
You should have received a copy of the GNU General Public License
19
along with GCC; see the file COPYING3.  If not see
20
<http://www.gnu.org/licenses/>.  */
21
 
22
#ifndef GCC_TREE_VECTORIZER_H
23
#define GCC_TREE_VECTORIZER_H
24
 
25
#include "tree-data-ref.h"
26
 
27
typedef source_location LOC;
28
#define UNKNOWN_LOC UNKNOWN_LOCATION
29
#define EXPR_LOC(e) EXPR_LOCATION(e)
30
#define LOC_FILE(l) LOCATION_FILE (l)
31
#define LOC_LINE(l) LOCATION_LINE (l)
32
 
33
/* Used for naming of new temporaries.  */
34
enum vect_var_kind {
35
  vect_simple_var,
36
  vect_pointer_var,
37
  vect_scalar_var
38
};
39
 
40
/* Defines type of operation.  */
41
enum operation_type {
42
  unary_op = 1,
43
  binary_op,
44
  ternary_op
45
};
46
 
47
/* Define type of available alignment support.  */
48
enum dr_alignment_support {
49
  dr_unaligned_unsupported,
50
  dr_unaligned_supported,
51
  dr_explicit_realign,
52
  dr_explicit_realign_optimized,
53
  dr_aligned
54
};
55
 
56
/* Define type of def-use cross-iteration cycle.  */
57
enum vect_def_type {
58
  vect_uninitialized_def = 0,
59
  vect_constant_def = 1,
60
  vect_external_def,
61
  vect_internal_def,
62
  vect_induction_def,
63
  vect_reduction_def,
64
  vect_double_reduction_def,
65
  vect_nested_cycle,
66
  vect_unknown_def_type
67
};
68
 
69
#define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def)           \
70
                                   || ((D) == vect_double_reduction_def) \
71
                                   || ((D) == vect_nested_cycle))
72
 
73
/* Define verbosity levels.  */
74
enum verbosity_levels {
75
  REPORT_NONE,
76
  REPORT_VECTORIZED_LOCATIONS,
77
  REPORT_UNVECTORIZED_LOCATIONS,
78
  REPORT_COST,
79
  REPORT_ALIGNMENT,
80
  REPORT_DR_DETAILS,
81
  REPORT_BAD_FORM_LOOPS,
82
  REPORT_OUTER_LOOPS,
83
  REPORT_SLP,
84
  REPORT_DETAILS,
85
  /* New verbosity levels should be added before this one.  */
86
  MAX_VERBOSITY_LEVEL
87
};
88
 
89
/************************************************************************
90
  SLP
91
 ************************************************************************/
92
 
93
/* A computation tree of an SLP instance. Each node corresponds to a group of
94
   stmts to be packed in a SIMD stmt.  */
95
typedef struct _slp_tree {
96
  /* Only binary and unary operations are supported. LEFT child corresponds to
97
     the first operand and RIGHT child to the second if the operation is
98
     binary.  */
99
  struct _slp_tree *left;
100
  struct _slp_tree *right;
101
  /* A group of scalar stmts to be vectorized together.  */
102
  VEC (gimple, heap) *stmts;
103
  /* Vectorized stmt/s.  */
104
  VEC (gimple, heap) *vec_stmts;
105
  /* Number of vector stmts that are created to replace the group of scalar
106
     stmts. It is calculated during the transformation phase as the number of
107
     scalar elements in one scalar iteration (GROUP_SIZE) multiplied by VF
108
     divided by vector size.  */
109
  unsigned int vec_stmts_size;
110
  /* Vectorization costs associated with SLP node.  */
111
  struct
112
  {
113
    int outside_of_loop;     /* Statements generated outside loop.  */
114
    int inside_of_loop;      /* Statements generated inside loop.  */
115
  } cost;
116
} *slp_tree;
117
 
118
DEF_VEC_P(slp_tree);
119
DEF_VEC_ALLOC_P(slp_tree, heap);
120
 
121
/* SLP instance is a sequence of stmts in a loop that can be packed into
122
   SIMD stmts.  */
123
typedef struct _slp_instance {
124
  /* The root of SLP tree.  */
125
  slp_tree root;
126
 
127
  /* Size of groups of scalar stmts that will be replaced by SIMD stmt/s.  */
128
  unsigned int group_size;
129
 
130
  /* The unrolling factor required to vectorized this SLP instance.  */
131
  unsigned int unrolling_factor;
132
 
133
  /* Vectorization costs associated with SLP instance.  */
134
  struct
135
  {
136
    int outside_of_loop;     /* Statements generated outside loop.  */
137
    int inside_of_loop;      /* Statements generated inside loop.  */
138
  } cost;
139
 
140
  /* Loads permutation relatively to the stores, NULL if there is no
141
     permutation.  */
142
  VEC (int, heap) *load_permutation;
143
 
144
  /* The group of nodes that contain loads of this SLP instance.  */
145
  VEC (slp_tree, heap) *loads;
146
 
147
  /* The first scalar load of the instance. The created vector loads will be
148
     inserted before this statement.  */
149
  gimple first_load;
150
} *slp_instance;
151
 
152
DEF_VEC_P(slp_instance);
153
DEF_VEC_ALLOC_P(slp_instance, heap);
154
 
155
/* Access Functions.  */
156
#define SLP_INSTANCE_TREE(S)                     (S)->root
157
#define SLP_INSTANCE_GROUP_SIZE(S)               (S)->group_size
158
#define SLP_INSTANCE_UNROLLING_FACTOR(S)         (S)->unrolling_factor
159
#define SLP_INSTANCE_OUTSIDE_OF_LOOP_COST(S)     (S)->cost.outside_of_loop
160
#define SLP_INSTANCE_INSIDE_OF_LOOP_COST(S)      (S)->cost.inside_of_loop
161
#define SLP_INSTANCE_LOAD_PERMUTATION(S)         (S)->load_permutation
162
#define SLP_INSTANCE_LOADS(S)                    (S)->loads
163
#define SLP_INSTANCE_FIRST_LOAD_STMT(S)          (S)->first_load
164
 
165
#define SLP_TREE_LEFT(S)                         (S)->left
166
#define SLP_TREE_RIGHT(S)                        (S)->right
167
#define SLP_TREE_SCALAR_STMTS(S)                 (S)->stmts
168
#define SLP_TREE_VEC_STMTS(S)                    (S)->vec_stmts
169
#define SLP_TREE_NUMBER_OF_VEC_STMTS(S)          (S)->vec_stmts_size
170
#define SLP_TREE_OUTSIDE_OF_LOOP_COST(S)         (S)->cost.outside_of_loop
171
#define SLP_TREE_INSIDE_OF_LOOP_COST(S)          (S)->cost.inside_of_loop
172
 
173
/*-----------------------------------------------------------------*/
174
/* Info on vectorized loops.                                       */
175
/*-----------------------------------------------------------------*/
176
typedef struct _loop_vec_info {
177
 
178
  /* The loop to which this info struct refers to.  */
179
  struct loop *loop;
180
 
181
  /* The loop basic blocks.  */
182
  basic_block *bbs;
183
 
184
  /* Number of iterations.  */
185
  tree num_iters;
186
  tree num_iters_unchanged;
187
 
188
  /* Minimum number of iterations below which vectorization is expected to
189
     not be profitable (as estimated by the cost model).
190
     -1 indicates that vectorization will not be profitable.
191
     FORNOW: This field is an int. Will be a tree in the future, to represent
192
             values unknown at compile time.  */
193
  int min_profitable_iters;
194
 
195
  /* Is the loop vectorizable? */
196
  bool vectorizable;
197
 
198
  /* Unrolling factor  */
199
  int vectorization_factor;
200
 
201
  /* Unknown DRs according to which loop was peeled.  */
202
  struct data_reference *unaligned_dr;
203
 
204
  /* peeling_for_alignment indicates whether peeling for alignment will take
205
     place, and what the peeling factor should be:
206
     peeling_for_alignment = X means:
207
        If X=0: Peeling for alignment will not be applied.
208
        If X>0: Peel first X iterations.
209
        If X=-1: Generate a runtime test to calculate the number of iterations
210
                 to be peeled, using the dataref recorded in the field
211
                 unaligned_dr.  */
212
  int peeling_for_alignment;
213
 
214
  /* The mask used to check the alignment of pointers or arrays.  */
215
  int ptr_mask;
216
 
217
  /* All data references in the loop.  */
218
  VEC (data_reference_p, heap) *datarefs;
219
 
220
  /* All data dependences in the loop.  */
221
  VEC (ddr_p, heap) *ddrs;
222
 
223
  /* Data Dependence Relations defining address ranges that are candidates
224
     for a run-time aliasing check.  */
225
  VEC (ddr_p, heap) *may_alias_ddrs;
226
 
227
  /* Statements in the loop that have data references that are candidates for a
228
     runtime (loop versioning) misalignment check.  */
229
  VEC(gimple,heap) *may_misalign_stmts;
230
 
231
  /* The loop location in the source.  */
232
  LOC loop_line_number;
233
 
234
  /* All interleaving chains of stores in the loop, represented by the first
235
     stmt in the chain.  */
236
  VEC(gimple, heap) *strided_stores;
237
 
238
  /* All SLP instances in the loop. This is a subset of the set of STRIDED_STORES
239
     of the loop.  */
240
  VEC(slp_instance, heap) *slp_instances;
241
 
242
  /* The unrolling factor needed to SLP the loop. In case of that pure SLP is
243
     applied to the loop, i.e., no unrolling is needed, this is 1.  */
244
  unsigned slp_unrolling_factor;
245
} *loop_vec_info;
246
 
247
/* Access Functions.  */
248
#define LOOP_VINFO_LOOP(L)                 (L)->loop
249
#define LOOP_VINFO_BBS(L)                  (L)->bbs
250
#define LOOP_VINFO_NITERS(L)               (L)->num_iters
251
/* Since LOOP_VINFO_NITERS can change after prologue peeling
252
   retain total unchanged scalar loop iterations for cost model.  */
253
#define LOOP_VINFO_NITERS_UNCHANGED(L)     (L)->num_iters_unchanged
254
#define LOOP_VINFO_COST_MODEL_MIN_ITERS(L) (L)->min_profitable_iters
255
#define LOOP_VINFO_VECTORIZABLE_P(L)       (L)->vectorizable
256
#define LOOP_VINFO_VECT_FACTOR(L)          (L)->vectorization_factor
257
#define LOOP_VINFO_PTR_MASK(L)             (L)->ptr_mask
258
#define LOOP_VINFO_DATAREFS(L)             (L)->datarefs
259
#define LOOP_VINFO_DDRS(L)                 (L)->ddrs
260
#define LOOP_VINFO_INT_NITERS(L)           (TREE_INT_CST_LOW ((L)->num_iters))
261
#define LOOP_PEELING_FOR_ALIGNMENT(L)      (L)->peeling_for_alignment
262
#define LOOP_VINFO_UNALIGNED_DR(L)         (L)->unaligned_dr
263
#define LOOP_VINFO_MAY_MISALIGN_STMTS(L)   (L)->may_misalign_stmts
264
#define LOOP_VINFO_LOC(L)                  (L)->loop_line_number
265
#define LOOP_VINFO_MAY_ALIAS_DDRS(L)       (L)->may_alias_ddrs
266
#define LOOP_VINFO_STRIDED_STORES(L)       (L)->strided_stores
267
#define LOOP_VINFO_SLP_INSTANCES(L)        (L)->slp_instances
268
#define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
269
 
270
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
271
VEC_length (gimple, (L)->may_misalign_stmts) > 0
272
#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L)     \
273
VEC_length (ddr_p, (L)->may_alias_ddrs) > 0
274
 
275
#define NITERS_KNOWN_P(n)                     \
276
(host_integerp ((n),0)                        \
277
&& TREE_INT_CST_LOW ((n)) > 0)
278
 
279
#define LOOP_VINFO_NITERS_KNOWN_P(L)          \
280
NITERS_KNOWN_P((L)->num_iters)
281
 
282
static inline loop_vec_info
283
loop_vec_info_for_loop (struct loop *loop)
284
{
285
  return (loop_vec_info) loop->aux;
286
}
287
 
288
static inline bool
289
nested_in_vect_loop_p (struct loop *loop, gimple stmt)
290
{
291
  return (loop->inner
292
          && (loop->inner == (gimple_bb (stmt))->loop_father));
293
}
294
 
295
typedef struct _bb_vec_info {
296
 
297
  basic_block bb;
298
  /* All interleaving chains of stores in the basic block, represented by the
299
     first stmt in the chain.  */
300
  VEC(gimple, heap) *strided_stores;
301
 
302
  /* All SLP instances in the basic block. This is a subset of the set of
303
     STRIDED_STORES of the basic block.  */
304
  VEC(slp_instance, heap) *slp_instances;
305
 
306
  /* All data references in the basic block.  */
307
  VEC (data_reference_p, heap) *datarefs;
308
 
309
  /* All data dependences in the basic block.  */
310
  VEC (ddr_p, heap) *ddrs;
311
} *bb_vec_info;
312
 
313
#define BB_VINFO_BB(B)              (B)->bb
314
#define BB_VINFO_STRIDED_STORES(B)  (B)->strided_stores
315
#define BB_VINFO_SLP_INSTANCES(B)   (B)->slp_instances
316
#define BB_VINFO_DATAREFS(B)        (B)->datarefs
317
#define BB_VINFO_DDRS(B)            (B)->ddrs
318
 
319
static inline bb_vec_info
320
vec_info_for_bb (basic_block bb)
321
{
322
  return (bb_vec_info) bb->aux;
323
}
324
 
325
/*-----------------------------------------------------------------*/
326
/* Info on vectorized defs.                                        */
327
/*-----------------------------------------------------------------*/
328
enum stmt_vec_info_type {
329
  undef_vec_info_type = 0,
330
  load_vec_info_type,
331
  store_vec_info_type,
332
  op_vec_info_type,
333
  call_vec_info_type,
334
  assignment_vec_info_type,
335
  condition_vec_info_type,
336
  reduc_vec_info_type,
337
  induc_vec_info_type,
338
  type_promotion_vec_info_type,
339
  type_demotion_vec_info_type,
340
  type_conversion_vec_info_type,
341
  loop_exit_ctrl_vec_info_type
342
};
343
 
344
/* Indicates whether/how a variable is used in the scope of loop/basic
345
   block.  */
346
enum vect_relevant {
347
  vect_unused_in_scope = 0,
348
  /* The def is in the inner loop, and the use is in the outer loop, and the
349
     use is a reduction stmt.  */
350
  vect_used_in_outer_by_reduction,
351
  /* The def is in the inner loop, and the use is in the outer loop (and is
352
     not part of reduction).  */
353
  vect_used_in_outer,
354
 
355
  /* defs that feed computations that end up (only) in a reduction. These
356
     defs may be used by non-reduction stmts, but eventually, any
357
     computations/values that are affected by these defs are used to compute
358
     a reduction (i.e. don't get stored to memory, for example). We use this
359
     to identify computations that we can change the order in which they are
360
     computed.  */
361
  vect_used_by_reduction,
362
 
363
  vect_used_in_scope
364
};
365
 
366
/* The type of vectorization that can be applied to the stmt: regular loop-based
367
   vectorization; pure SLP - the stmt is a part of SLP instances and does not
368
   have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
369
   a part of SLP instance and also must be loop-based vectorized, since it has
370
   uses outside SLP sequences.
371
 
372
   In the loop context the meanings of pure and hybrid SLP are slightly
373
   different. By saying that pure SLP is applied to the loop, we mean that we
374
   exploit only intra-iteration parallelism in the loop; i.e., the loop can be
375
   vectorized without doing any conceptual unrolling, cause we don't pack
376
   together stmts from different iterations, only within a single iteration.
377
   Loop hybrid SLP means that we exploit both intra-iteration and
378
   inter-iteration parallelism (e.g., number of elements in the vector is 4
379
   and the slp-group-size is 2, in which case we don't have enough parallelism
380
   within an iteration, so we obtain the rest of the parallelism from subsequent
381
   iterations by unrolling the loop by 2).  */
382
enum slp_vect_type {
383
  loop_vect = 0,
384
  pure_slp,
385
  hybrid
386
};
387
 
388
 
389
typedef struct data_reference *dr_p;
390
DEF_VEC_P(dr_p);
391
DEF_VEC_ALLOC_P(dr_p,heap);
392
 
393
typedef struct _stmt_vec_info {
394
 
395
  enum stmt_vec_info_type type;
396
 
397
  /* The stmt to which this info struct refers to.  */
398
  gimple stmt;
399
 
400
  /* The loop_vec_info with respect to which STMT is vectorized.  */
401
  loop_vec_info loop_vinfo;
402
 
403
  /* Not all stmts in the loop need to be vectorized. e.g, the increment
404
     of the loop induction variable and computation of array indexes. relevant
405
     indicates whether the stmt needs to be vectorized.  */
406
  enum vect_relevant relevant;
407
 
408
  /* Indicates whether this stmts is part of a computation whose result is
409
     used outside the loop.  */
410
  bool live;
411
 
412
  /* The vector type to be used.  */
413
  tree vectype;
414
 
415
  /* The vectorized version of the stmt.  */
416
  gimple vectorized_stmt;
417
 
418
 
419
  /** The following is relevant only for stmts that contain a non-scalar
420
     data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
421
     at most one such data-ref.  **/
422
 
423
  /* Information about the data-ref (access function, etc),
424
     relative to the inner-most containing loop.  */
425
  struct data_reference *data_ref_info;
426
 
427
  /* Information about the data-ref relative to this loop
428
     nest (the loop that is being considered for vectorization).  */
429
  tree dr_base_address;
430
  tree dr_init;
431
  tree dr_offset;
432
  tree dr_step;
433
  tree dr_aligned_to;
434
 
435
  /* Stmt is part of some pattern (computation idiom)  */
436
  bool in_pattern_p;
437
 
438
  /* Used for various bookkeeping purposes, generally holding a pointer to
439
     some other stmt S that is in some way "related" to this stmt.
440
     Current use of this field is:
441
        If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
442
        true): S is the "pattern stmt" that represents (and replaces) the
443
        sequence of stmts that constitutes the pattern.  Similarly, the
444
        related_stmt of the "pattern stmt" points back to this stmt (which is
445
        the last stmt in the original sequence of stmts that constitutes the
446
        pattern).  */
447
  gimple related_stmt;
448
 
449
  /* List of datarefs that are known to have the same alignment as the dataref
450
     of this stmt.  */
451
  VEC(dr_p,heap) *same_align_refs;
452
 
453
  /* Classify the def of this stmt.  */
454
  enum vect_def_type def_type;
455
 
456
  /* Interleaving info.  */
457
  /* First data-ref in the interleaving group.  */
458
  gimple first_dr;
459
  /* Pointer to the next data-ref in the group.  */
460
  gimple next_dr;
461
  /* The size of the interleaving group.  */
462
  unsigned int size;
463
  /* For stores, number of stores from this group seen. We vectorize the last
464
     one.  */
465
  unsigned int store_count;
466
  /* For loads only, the gap from the previous load. For consecutive loads, GAP
467
     is 1.  */
468
  unsigned int gap;
469
  /* In case that two or more stmts share data-ref, this is the pointer to the
470
     previously detected stmt with the same dr.  */
471
  gimple same_dr_stmt;
472
  /* For loads only, if there is a store with the same location, this field is
473
     TRUE.  */
474
  bool read_write_dep;
475
 
476
  /* Vectorization costs associated with statement.  */
477
  struct
478
  {
479
    int outside_of_loop;     /* Statements generated outside loop.  */
480
    int inside_of_loop;      /* Statements generated inside loop.  */
481
  } cost;
482
 
483
  /*  Whether the stmt is SLPed, loop-based vectorized, or both.  */
484
  enum slp_vect_type slp_type;
485
 
486
  /* The bb_vec_info with respect to which STMT is vectorized.  */
487
  bb_vec_info bb_vinfo;
488
} *stmt_vec_info;
489
 
490
/* Access Functions.  */
491
#define STMT_VINFO_TYPE(S)                 (S)->type
492
#define STMT_VINFO_STMT(S)                 (S)->stmt
493
#define STMT_VINFO_LOOP_VINFO(S)           (S)->loop_vinfo
494
#define STMT_VINFO_BB_VINFO(S)             (S)->bb_vinfo
495
#define STMT_VINFO_RELEVANT(S)             (S)->relevant
496
#define STMT_VINFO_LIVE_P(S)               (S)->live
497
#define STMT_VINFO_VECTYPE(S)              (S)->vectype
498
#define STMT_VINFO_VEC_STMT(S)             (S)->vectorized_stmt
499
#define STMT_VINFO_DATA_REF(S)             (S)->data_ref_info
500
 
501
#define STMT_VINFO_DR_BASE_ADDRESS(S)      (S)->dr_base_address
502
#define STMT_VINFO_DR_INIT(S)              (S)->dr_init
503
#define STMT_VINFO_DR_OFFSET(S)            (S)->dr_offset
504
#define STMT_VINFO_DR_STEP(S)              (S)->dr_step
505
#define STMT_VINFO_DR_ALIGNED_TO(S)        (S)->dr_aligned_to
506
 
507
#define STMT_VINFO_IN_PATTERN_P(S)         (S)->in_pattern_p
508
#define STMT_VINFO_RELATED_STMT(S)         (S)->related_stmt
509
#define STMT_VINFO_SAME_ALIGN_REFS(S)      (S)->same_align_refs
510
#define STMT_VINFO_DEF_TYPE(S)             (S)->def_type
511
#define STMT_VINFO_DR_GROUP_FIRST_DR(S)    (S)->first_dr
512
#define STMT_VINFO_DR_GROUP_NEXT_DR(S)     (S)->next_dr
513
#define STMT_VINFO_DR_GROUP_SIZE(S)        (S)->size
514
#define STMT_VINFO_DR_GROUP_STORE_COUNT(S) (S)->store_count
515
#define STMT_VINFO_DR_GROUP_GAP(S)         (S)->gap
516
#define STMT_VINFO_DR_GROUP_SAME_DR_STMT(S)(S)->same_dr_stmt
517
#define STMT_VINFO_DR_GROUP_READ_WRITE_DEPENDENCE(S)  (S)->read_write_dep
518
#define STMT_VINFO_STRIDED_ACCESS(S)      ((S)->first_dr != NULL)
519
 
520
#define DR_GROUP_FIRST_DR(S)               (S)->first_dr
521
#define DR_GROUP_NEXT_DR(S)                (S)->next_dr
522
#define DR_GROUP_SIZE(S)                   (S)->size
523
#define DR_GROUP_STORE_COUNT(S)            (S)->store_count
524
#define DR_GROUP_GAP(S)                    (S)->gap
525
#define DR_GROUP_SAME_DR_STMT(S)           (S)->same_dr_stmt
526
#define DR_GROUP_READ_WRITE_DEPENDENCE(S)  (S)->read_write_dep
527
 
528
#define STMT_VINFO_RELEVANT_P(S)          ((S)->relevant != vect_unused_in_scope)
529
#define STMT_VINFO_OUTSIDE_OF_LOOP_COST(S) (S)->cost.outside_of_loop
530
#define STMT_VINFO_INSIDE_OF_LOOP_COST(S)  (S)->cost.inside_of_loop
531
 
532
#define HYBRID_SLP_STMT(S)                ((S)->slp_type == hybrid)
533
#define PURE_SLP_STMT(S)                  ((S)->slp_type == pure_slp)
534
#define STMT_SLP_TYPE(S)                   (S)->slp_type
535
 
536
/* These are some defines for the initial implementation of the vectorizer's
537
   cost model.  These will later be target specific hooks.  */
538
 
539
/* Cost of conditional taken branch.  */
540
#ifndef TARG_COND_TAKEN_BRANCH_COST
541
#define TARG_COND_TAKEN_BRANCH_COST        3
542
#endif
543
 
544
/* Cost of conditional not taken branch.  */
545
#ifndef TARG_COND_NOT_TAKEN_BRANCH_COST
546
#define TARG_COND_NOT_TAKEN_BRANCH_COST        1
547
#endif
548
 
549
/* Cost of any scalar operation, excluding load and store.  */
550
#ifndef TARG_SCALAR_STMT_COST
551
#define TARG_SCALAR_STMT_COST           1
552
#endif
553
 
554
/* Cost of scalar load.  */
555
#ifndef TARG_SCALAR_LOAD_COST
556
#define TARG_SCALAR_LOAD_COST           1
557
#endif
558
 
559
/* Cost of scalar store.  */
560
#ifndef TARG_SCALAR_STORE_COST
561
#define TARG_SCALAR_STORE_COST           1
562
#endif
563
 
564
/* Cost of any vector operation, excluding load, store or vector to scalar
565
   operation.  */
566
#ifndef TARG_VEC_STMT_COST
567
#define TARG_VEC_STMT_COST           1
568
#endif
569
 
570
/* Cost of vector to scalar operation.  */
571
#ifndef TARG_VEC_TO_SCALAR_COST
572
#define TARG_VEC_TO_SCALAR_COST      1
573
#endif
574
 
575
/* Cost of scalar to vector operation.  */
576
#ifndef TARG_SCALAR_TO_VEC_COST
577
#define TARG_SCALAR_TO_VEC_COST      1
578
#endif
579
 
580
/* Cost of aligned vector load.  */
581
#ifndef TARG_VEC_LOAD_COST
582
#define TARG_VEC_LOAD_COST           1
583
#endif
584
 
585
/* Cost of misaligned vector load.  */
586
#ifndef TARG_VEC_UNALIGNED_LOAD_COST
587
#define TARG_VEC_UNALIGNED_LOAD_COST 2
588
#endif
589
 
590
/* Cost of vector store.  */
591
#ifndef TARG_VEC_STORE_COST
592
#define TARG_VEC_STORE_COST          1
593
#endif
594
 
595
/* Cost of vector permutation.  */
596
#ifndef TARG_VEC_PERMUTE_COST
597
#define TARG_VEC_PERMUTE_COST          1
598
#endif
599
 
600
/* The maximum number of intermediate steps required in multi-step type
601
   conversion.  */
602
#define MAX_INTERM_CVT_STEPS         3
603
 
604
/* Avoid GTY(()) on stmt_vec_info.  */
605
typedef void *vec_void_p;
606
DEF_VEC_P (vec_void_p);
607
DEF_VEC_ALLOC_P (vec_void_p, heap);
608
 
609
extern VEC(vec_void_p,heap) *stmt_vec_info_vec;
610
 
611
void init_stmt_vec_info_vec (void);
612
void free_stmt_vec_info_vec (void);
613
 
614
static inline stmt_vec_info
615
vinfo_for_stmt (gimple stmt)
616
{
617
  unsigned int uid = gimple_uid (stmt);
618
  if (uid == 0)
619
    return NULL;
620
 
621
  gcc_assert (uid <= VEC_length (vec_void_p, stmt_vec_info_vec));
622
  return (stmt_vec_info) VEC_index (vec_void_p, stmt_vec_info_vec, uid - 1);
623
}
624
 
625
static inline void
626
set_vinfo_for_stmt (gimple stmt, stmt_vec_info info)
627
{
628
  unsigned int uid = gimple_uid (stmt);
629
  if (uid == 0)
630
    {
631
      gcc_assert (info);
632
      uid = VEC_length (vec_void_p, stmt_vec_info_vec) + 1;
633
      gimple_set_uid (stmt, uid);
634
      VEC_safe_push (vec_void_p, heap, stmt_vec_info_vec, (vec_void_p) info);
635
    }
636
  else
637
    VEC_replace (vec_void_p, stmt_vec_info_vec, uid - 1, (vec_void_p) info);
638
}
639
 
640
static inline gimple
641
get_earlier_stmt (gimple stmt1, gimple stmt2)
642
{
643
  unsigned int uid1, uid2;
644
 
645
  if (stmt1 == NULL)
646
    return stmt2;
647
 
648
  if (stmt2 == NULL)
649
    return stmt1;
650
 
651
  uid1 = gimple_uid (stmt1);
652
  uid2 = gimple_uid (stmt2);
653
 
654
  if (uid1 == 0 || uid2 == 0)
655
    return NULL;
656
 
657
  gcc_assert (uid1 <= VEC_length (vec_void_p, stmt_vec_info_vec));
658
  gcc_assert (uid2 <= VEC_length (vec_void_p, stmt_vec_info_vec));
659
 
660
  if (uid1 < uid2)
661
    return stmt1;
662
  else
663
    return stmt2;
664
}
665
 
666
static inline bool
667
is_pattern_stmt_p (stmt_vec_info stmt_info)
668
{
669
  gimple related_stmt;
670
  stmt_vec_info related_stmt_info;
671
 
672
  related_stmt = STMT_VINFO_RELATED_STMT (stmt_info);
673
  if (related_stmt
674
      && (related_stmt_info = vinfo_for_stmt (related_stmt))
675
      && STMT_VINFO_IN_PATTERN_P (related_stmt_info))
676
    return true;
677
 
678
  return false;
679
}
680
 
681
static inline bool
682
is_loop_header_bb_p (basic_block bb)
683
{
684
  if (bb == (bb->loop_father)->header)
685
    return true;
686
  gcc_assert (EDGE_COUNT (bb->preds) == 1);
687
  return false;
688
}
689
 
690
static inline void
691
stmt_vinfo_set_inside_of_loop_cost (stmt_vec_info stmt_info, slp_tree slp_node,
692
                                    int cost)
693
{
694
  if (slp_node)
695
    SLP_TREE_INSIDE_OF_LOOP_COST (slp_node) = cost;
696
  else
697
    STMT_VINFO_INSIDE_OF_LOOP_COST (stmt_info) = cost;
698
}
699
 
700
static inline void
701
stmt_vinfo_set_outside_of_loop_cost (stmt_vec_info stmt_info, slp_tree slp_node,
702
                                     int cost)
703
{
704
  if (slp_node)
705
    SLP_TREE_OUTSIDE_OF_LOOP_COST (slp_node) = cost;
706
  else
707
    STMT_VINFO_OUTSIDE_OF_LOOP_COST (stmt_info) = cost;
708
}
709
 
710
static inline int
711
vect_pow2 (int x)
712
{
713
  int i, res = 1;
714
 
715
  for (i = 0; i < x; i++)
716
    res *= 2;
717
 
718
  return res;
719
}
720
 
721
/*-----------------------------------------------------------------*/
722
/* Info on data references alignment.                              */
723
/*-----------------------------------------------------------------*/
724
 
725
/* Reflects actual alignment of first access in the vectorized loop,
726
   taking into account peeling/versioning if applied.  */
727
#define DR_MISALIGNMENT(DR)   ((int) (size_t) (DR)->aux)
728
#define SET_DR_MISALIGNMENT(DR, VAL)   ((DR)->aux = (void *) (size_t) (VAL))
729
 
730
static inline bool
731
aligned_access_p (struct data_reference *data_ref_info)
732
{
733
  return (DR_MISALIGNMENT (data_ref_info) == 0);
734
}
735
 
736
static inline bool
737
known_alignment_for_access_p (struct data_reference *data_ref_info)
738
{
739
  return (DR_MISALIGNMENT (data_ref_info) != -1);
740
}
741
 
742
/* vect_dump will be set to stderr or dump_file if exist.  */
743
extern FILE *vect_dump;
744
extern LOC vect_loop_location;
745
 
746
/*-----------------------------------------------------------------*/
747
/* Function prototypes.                                            */
748
/*-----------------------------------------------------------------*/
749
 
750
/* Simple loop peeling and versioning utilities for vectorizer's purposes -
751
   in tree-vect-loop-manip.c.  */
752
extern void slpeel_make_loop_iterate_ntimes (struct loop *, tree);
753
extern bool slpeel_can_duplicate_loop_p (const struct loop *, const_edge);
754
extern void vect_loop_versioning (loop_vec_info, bool, tree *, gimple_seq *);
755
extern void vect_do_peeling_for_loop_bound (loop_vec_info, tree *,
756
                                            tree, gimple_seq);
757
extern void vect_do_peeling_for_alignment (loop_vec_info);
758
extern LOC find_loop_location (struct loop *);
759
extern bool vect_can_advance_ivs_p (loop_vec_info);
760
 
761
/* In tree-vect-stmts.c.  */
762
extern tree get_vectype_for_scalar_type (tree);
763
extern bool vect_is_simple_use (tree, loop_vec_info, bb_vec_info, gimple *,
764
                                tree *,  enum vect_def_type *);
765
extern bool supportable_widening_operation (enum tree_code, gimple, tree,
766
                                            tree *, tree *, enum tree_code *,
767
                                            enum tree_code *, int *,
768
                                            VEC (tree, heap) **);
769
extern bool supportable_narrowing_operation (enum tree_code, const_gimple,
770
                                             tree, enum tree_code *, int *,
771
                                             VEC (tree, heap) **);
772
extern stmt_vec_info new_stmt_vec_info (gimple stmt, loop_vec_info,
773
                                        bb_vec_info);
774
extern void free_stmt_vec_info (gimple stmt);
775
extern tree vectorizable_function (gimple, tree, tree);
776
extern void vect_model_simple_cost (stmt_vec_info, int, enum vect_def_type *,
777
                                    slp_tree);
778
extern void vect_model_store_cost (stmt_vec_info, int, enum vect_def_type,
779
                                   slp_tree);
780
extern void vect_model_load_cost (stmt_vec_info, int, slp_tree);
781
extern void vect_finish_stmt_generation (gimple, gimple,
782
                                         gimple_stmt_iterator *);
783
extern bool vect_mark_stmts_to_be_vectorized (loop_vec_info);
784
extern int cost_for_stmt (gimple);
785
extern tree vect_get_vec_def_for_operand (tree, gimple, tree *);
786
extern tree vect_init_vector (gimple, tree, tree,
787
                              gimple_stmt_iterator *);
788
extern tree vect_get_vec_def_for_stmt_copy (enum vect_def_type, tree);
789
extern bool vect_transform_stmt (gimple, gimple_stmt_iterator *,
790
                                 bool *, slp_tree, slp_instance);
791
extern void vect_remove_stores (gimple);
792
extern bool vect_analyze_stmt (gimple, bool *, slp_tree);
793
extern bool vectorizable_condition (gimple, gimple_stmt_iterator *, gimple *,
794
                                    tree, int);
795
 
796
/* In tree-vect-data-refs.c.  */
797
extern bool vect_can_force_dr_alignment_p (const_tree, unsigned int);
798
extern enum dr_alignment_support vect_supportable_dr_alignment
799
                                           (struct data_reference *);
800
extern tree vect_get_smallest_scalar_type (gimple, HOST_WIDE_INT *,
801
                                           HOST_WIDE_INT *);
802
extern bool vect_analyze_data_ref_dependences (loop_vec_info, bb_vec_info);
803
extern bool vect_enhance_data_refs_alignment (loop_vec_info);
804
extern bool vect_analyze_data_refs_alignment (loop_vec_info, bb_vec_info);
805
extern bool vect_verify_datarefs_alignment (loop_vec_info, bb_vec_info);
806
extern bool vect_analyze_data_ref_accesses (loop_vec_info, bb_vec_info);
807
extern bool vect_prune_runtime_alias_test_list (loop_vec_info);
808
extern bool vect_analyze_data_refs (loop_vec_info, bb_vec_info);
809
extern tree vect_create_data_ref_ptr (gimple, struct loop *, tree, tree *,
810
                                      gimple *, bool, bool *);
811
extern tree bump_vector_ptr (tree, gimple, gimple_stmt_iterator *, gimple, tree);
812
extern tree vect_create_destination_var (tree, tree);
813
extern bool vect_strided_store_supported (tree);
814
extern bool vect_strided_load_supported (tree);
815
extern bool vect_permute_store_chain (VEC(tree,heap) *,unsigned int, gimple,
816
                                    gimple_stmt_iterator *, VEC(tree,heap) **);
817
extern tree vect_setup_realignment (gimple, gimple_stmt_iterator *, tree *,
818
                                    enum dr_alignment_support, tree,
819
                                    struct loop **);
820
extern bool vect_permute_load_chain (VEC(tree,heap) *,unsigned int, gimple,
821
                                    gimple_stmt_iterator *, VEC(tree,heap) **);
822
extern bool vect_transform_strided_load (gimple, VEC(tree,heap) *, int,
823
                                         gimple_stmt_iterator *);
824
extern int vect_get_place_in_interleaving_chain (gimple, gimple);
825
extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
826
extern tree vect_create_addr_base_for_vector_ref (gimple, gimple_seq *,
827
                                                  tree, struct loop *);
828
 
829
/* In tree-vect-loop.c.  */
830
/* FORNOW: Used in tree-parloops.c.  */
831
extern void destroy_loop_vec_info (loop_vec_info, bool);
832
extern gimple vect_is_simple_reduction (loop_vec_info, gimple, bool, bool *);
833
/* Drive for loop analysis stage.  */
834
extern loop_vec_info vect_analyze_loop (struct loop *);
835
/* Drive for loop transformation stage.  */
836
extern void vect_transform_loop (loop_vec_info);
837
extern loop_vec_info vect_analyze_loop_form (struct loop *);
838
extern bool vectorizable_live_operation (gimple, gimple_stmt_iterator *,
839
                                         gimple *);
840
extern bool vectorizable_reduction (gimple, gimple_stmt_iterator *, gimple *);
841
extern bool vectorizable_induction (gimple, gimple_stmt_iterator *, gimple *);
842
extern int vect_estimate_min_profitable_iters (loop_vec_info);
843
extern tree get_initial_def_for_reduction (gimple, tree, tree *);
844
extern int vect_min_worthwhile_factor (enum tree_code);
845
 
846
 
847
/* In tree-vect-slp.c.  */
848
extern void vect_free_slp_instance (slp_instance);
849
extern bool vect_transform_slp_perm_load (gimple, VEC (tree, heap) *,
850
                                          gimple_stmt_iterator *, int,
851
                                          slp_instance, bool);
852
extern bool vect_schedule_slp (loop_vec_info, bb_vec_info);
853
extern void vect_update_slp_costs_according_to_vf (loop_vec_info);
854
extern bool vect_analyze_slp (loop_vec_info, bb_vec_info);
855
extern void vect_make_slp_decision (loop_vec_info);
856
extern void vect_detect_hybrid_slp (loop_vec_info);
857
extern void vect_get_slp_defs (slp_tree, VEC (tree,heap) **,
858
                               VEC (tree,heap) **);
859
extern LOC find_bb_location (basic_block);
860
extern bb_vec_info vect_slp_analyze_bb (basic_block);
861
extern void vect_slp_transform_bb (basic_block);
862
 
863
/* In tree-vect-patterns.c.  */
864
/* Pattern recognition functions.
865
   Additional pattern recognition functions can (and will) be added
866
   in the future.  */
867
typedef gimple (* vect_recog_func_ptr) (gimple, tree *, tree *);
868
#define NUM_PATTERNS 4
869
void vect_pattern_recog (loop_vec_info);
870
 
871
/* In tree-vectorizer.c.  */
872
unsigned vectorize_loops (void);
873
/* Vectorization debug information */
874
extern bool vect_print_dump_info (enum verbosity_levels);
875
 
876
#endif  /* GCC_TREE_VECTORIZER_H  */

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.