OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [omp-low.c] - Blame information for rev 685

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Lowering pass for OpenMP directives.  Converts OpenMP directives
2
   into explicit calls to the runtime library (libgomp) and data
3
   marshalling to implement data sharing and copying clauses.
4
   Contributed by Diego Novillo <dnovillo@redhat.com>
5
 
6
   Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
7
   Free Software Foundation, Inc.
8
 
9
This file is part of GCC.
10
 
11
GCC is free software; you can redistribute it and/or modify it under
12
the terms of the GNU General Public License as published by the Free
13
Software Foundation; either version 3, or (at your option) any later
14
version.
15
 
16
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17
WARRANTY; without even the implied warranty of MERCHANTABILITY or
18
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19
for more details.
20
 
21
You should have received a copy of the GNU General Public License
22
along with GCC; see the file COPYING3.  If not see
23
<http://www.gnu.org/licenses/>.  */
24
 
25
#include "config.h"
26
#include "system.h"
27
#include "coretypes.h"
28
#include "tm.h"
29
#include "tree.h"
30
#include "rtl.h"
31
#include "gimple.h"
32
#include "tree-iterator.h"
33
#include "tree-inline.h"
34
#include "langhooks.h"
35
#include "diagnostic-core.h"
36
#include "tree-flow.h"
37
#include "timevar.h"
38
#include "flags.h"
39
#include "function.h"
40
#include "expr.h"
41
#include "tree-pass.h"
42
#include "ggc.h"
43
#include "except.h"
44
#include "splay-tree.h"
45
#include "optabs.h"
46
#include "cfgloop.h"
47
 
48
 
49
/* Lowering of OpenMP parallel and workshare constructs proceeds in two
50
   phases.  The first phase scans the function looking for OMP statements
51
   and then for variables that must be replaced to satisfy data sharing
52
   clauses.  The second phase expands code for the constructs, as well as
53
   re-gimplifying things when variables have been replaced with complex
54
   expressions.
55
 
56
   Final code generation is done by pass_expand_omp.  The flowgraph is
57
   scanned for parallel regions which are then moved to a new
58
   function, to be invoked by the thread library.  */
59
 
60
/* Context structure.  Used to store information about each parallel
61
   directive in the code.  */
62
 
63
typedef struct omp_context
64
{
65
  /* This field must be at the beginning, as we do "inheritance": Some
66
     callback functions for tree-inline.c (e.g., omp_copy_decl)
67
     receive a copy_body_data pointer that is up-casted to an
68
     omp_context pointer.  */
69
  copy_body_data cb;
70
 
71
  /* The tree of contexts corresponding to the encountered constructs.  */
72
  struct omp_context *outer;
73
  gimple stmt;
74
 
75
  /* Map variables to fields in a structure that allows communication
76
     between sending and receiving threads.  */
77
  splay_tree field_map;
78
  tree record_type;
79
  tree sender_decl;
80
  tree receiver_decl;
81
 
82
  /* These are used just by task contexts, if task firstprivate fn is
83
     needed.  srecord_type is used to communicate from the thread
84
     that encountered the task construct to task firstprivate fn,
85
     record_type is allocated by GOMP_task, initialized by task firstprivate
86
     fn and passed to the task body fn.  */
87
  splay_tree sfield_map;
88
  tree srecord_type;
89
 
90
  /* A chain of variables to add to the top-level block surrounding the
91
     construct.  In the case of a parallel, this is in the child function.  */
92
  tree block_vars;
93
 
94
  /* What to do with variables with implicitly determined sharing
95
     attributes.  */
96
  enum omp_clause_default_kind default_kind;
97
 
98
  /* Nesting depth of this context.  Used to beautify error messages re
99
     invalid gotos.  The outermost ctx is depth 1, with depth 0 being
100
     reserved for the main body of the function.  */
101
  int depth;
102
 
103
  /* True if this parallel directive is nested within another.  */
104
  bool is_nested;
105
} omp_context;
106
 
107
 
108
struct omp_for_data_loop
109
{
110
  tree v, n1, n2, step;
111
  enum tree_code cond_code;
112
};
113
 
114
/* A structure describing the main elements of a parallel loop.  */
115
 
116
struct omp_for_data
117
{
118
  struct omp_for_data_loop loop;
119
  tree chunk_size;
120
  gimple for_stmt;
121
  tree pre, iter_type;
122
  int collapse;
123
  bool have_nowait, have_ordered;
124
  enum omp_clause_schedule_kind sched_kind;
125
  struct omp_for_data_loop *loops;
126
};
127
 
128
 
129
static splay_tree all_contexts;
130
static int taskreg_nesting_level;
131
struct omp_region *root_omp_region;
132
static bitmap task_shared_vars;
133
 
134
static void scan_omp (gimple_seq, omp_context *);
135
static tree scan_omp_1_op (tree *, int *, void *);
136
 
137
#define WALK_SUBSTMTS  \
138
    case GIMPLE_BIND: \
139
    case GIMPLE_TRY: \
140
    case GIMPLE_CATCH: \
141
    case GIMPLE_EH_FILTER: \
142
    case GIMPLE_TRANSACTION: \
143
      /* The sub-statements for these should be walked.  */ \
144
      *handled_ops_p = false; \
145
      break;
146
 
147
/* Convenience function for calling scan_omp_1_op on tree operands.  */
148
 
149
static inline tree
150
scan_omp_op (tree *tp, omp_context *ctx)
151
{
152
  struct walk_stmt_info wi;
153
 
154
  memset (&wi, 0, sizeof (wi));
155
  wi.info = ctx;
156
  wi.want_locations = true;
157
 
158
  return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159
}
160
 
161
static void lower_omp (gimple_seq, omp_context *);
162
static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163
static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164
 
165
/* Find an OpenMP clause of type KIND within CLAUSES.  */
166
 
167
tree
168
find_omp_clause (tree clauses, enum omp_clause_code kind)
169
{
170
  for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
171
    if (OMP_CLAUSE_CODE (clauses) == kind)
172
      return clauses;
173
 
174
  return NULL_TREE;
175
}
176
 
177
/* Return true if CTX is for an omp parallel.  */
178
 
179
static inline bool
180
is_parallel_ctx (omp_context *ctx)
181
{
182
  return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
183
}
184
 
185
 
186
/* Return true if CTX is for an omp task.  */
187
 
188
static inline bool
189
is_task_ctx (omp_context *ctx)
190
{
191
  return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
192
}
193
 
194
 
195
/* Return true if CTX is for an omp parallel or omp task.  */
196
 
197
static inline bool
198
is_taskreg_ctx (omp_context *ctx)
199
{
200
  return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201
         || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
202
}
203
 
204
 
205
/* Return true if REGION is a combined parallel+workshare region.  */
206
 
207
static inline bool
208
is_combined_parallel (struct omp_region *region)
209
{
210
  return region->is_combined_parallel;
211
}
212
 
213
 
214
/* Extract the header elements of parallel loop FOR_STMT and store
215
   them into *FD.  */
216
 
217
static void
218
extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
219
                      struct omp_for_data_loop *loops)
220
{
221
  tree t, var, *collapse_iter, *collapse_count;
222
  tree count = NULL_TREE, iter_type = long_integer_type_node;
223
  struct omp_for_data_loop *loop;
224
  int i;
225
  struct omp_for_data_loop dummy_loop;
226
  location_t loc = gimple_location (for_stmt);
227
 
228
  fd->for_stmt = for_stmt;
229
  fd->pre = NULL;
230
  fd->collapse = gimple_omp_for_collapse (for_stmt);
231
  if (fd->collapse > 1)
232
    fd->loops = loops;
233
  else
234
    fd->loops = &fd->loop;
235
 
236
  fd->have_nowait = fd->have_ordered = false;
237
  fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238
  fd->chunk_size = NULL_TREE;
239
  collapse_iter = NULL;
240
  collapse_count = NULL;
241
 
242
  for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
243
    switch (OMP_CLAUSE_CODE (t))
244
      {
245
      case OMP_CLAUSE_NOWAIT:
246
        fd->have_nowait = true;
247
        break;
248
      case OMP_CLAUSE_ORDERED:
249
        fd->have_ordered = true;
250
        break;
251
      case OMP_CLAUSE_SCHEDULE:
252
        fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253
        fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254
        break;
255
      case OMP_CLAUSE_COLLAPSE:
256
        if (fd->collapse > 1)
257
          {
258
            collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259
            collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260
          }
261
      default:
262
        break;
263
      }
264
 
265
  /* FIXME: for now map schedule(auto) to schedule(static).
266
     There should be analysis to determine whether all iterations
267
     are approximately the same amount of work (then schedule(static)
268
     is best) or if it varies (then schedule(dynamic,N) is better).  */
269
  if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270
    {
271
      fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272
      gcc_assert (fd->chunk_size == NULL);
273
    }
274
  gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
275
  if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276
    gcc_assert (fd->chunk_size == NULL);
277
  else if (fd->chunk_size == NULL)
278
    {
279
      /* We only need to compute a default chunk size for ordered
280
         static loops and dynamic loops.  */
281
      if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282
          || fd->have_ordered
283
          || fd->collapse > 1)
284
        fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285
                         ? integer_zero_node : integer_one_node;
286
    }
287
 
288
  for (i = 0; i < fd->collapse; i++)
289
    {
290
      if (fd->collapse == 1)
291
        loop = &fd->loop;
292
      else if (loops != NULL)
293
        loop = loops + i;
294
      else
295
        loop = &dummy_loop;
296
 
297
 
298
      loop->v = gimple_omp_for_index (for_stmt, i);
299
      gcc_assert (SSA_VAR_P (loop->v));
300
      gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301
                  || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302
      var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
303
      loop->n1 = gimple_omp_for_initial (for_stmt, i);
304
 
305
      loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306
      loop->n2 = gimple_omp_for_final (for_stmt, i);
307
      switch (loop->cond_code)
308
        {
309
        case LT_EXPR:
310
        case GT_EXPR:
311
          break;
312
        case LE_EXPR:
313
          if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
314
            loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, 1);
315
          else
316
            loop->n2 = fold_build2_loc (loc,
317
                                    PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
318
                                    build_int_cst (TREE_TYPE (loop->n2), 1));
319
          loop->cond_code = LT_EXPR;
320
          break;
321
        case GE_EXPR:
322
          if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
323
            loop->n2 = fold_build_pointer_plus_hwi_loc (loc, loop->n2, -1);
324
          else
325
            loop->n2 = fold_build2_loc (loc,
326
                                    MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
327
                                    build_int_cst (TREE_TYPE (loop->n2), 1));
328
          loop->cond_code = GT_EXPR;
329
          break;
330
        default:
331
          gcc_unreachable ();
332
        }
333
 
334
      t = gimple_omp_for_incr (for_stmt, i);
335
      gcc_assert (TREE_OPERAND (t, 0) == var);
336
      switch (TREE_CODE (t))
337
        {
338
        case PLUS_EXPR:
339
        case POINTER_PLUS_EXPR:
340
          loop->step = TREE_OPERAND (t, 1);
341
          break;
342
        case MINUS_EXPR:
343
          loop->step = TREE_OPERAND (t, 1);
344
          loop->step = fold_build1_loc (loc,
345
                                    NEGATE_EXPR, TREE_TYPE (loop->step),
346
                                    loop->step);
347
          break;
348
        default:
349
          gcc_unreachable ();
350
        }
351
 
352
      if (iter_type != long_long_unsigned_type_node)
353
        {
354
          if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
355
            iter_type = long_long_unsigned_type_node;
356
          else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
357
                   && TYPE_PRECISION (TREE_TYPE (loop->v))
358
                      >= TYPE_PRECISION (iter_type))
359
            {
360
              tree n;
361
 
362
              if (loop->cond_code == LT_EXPR)
363
                n = fold_build2_loc (loc,
364
                                 PLUS_EXPR, TREE_TYPE (loop->v),
365
                                 loop->n2, loop->step);
366
              else
367
                n = loop->n1;
368
              if (TREE_CODE (n) != INTEGER_CST
369
                  || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
370
                iter_type = long_long_unsigned_type_node;
371
            }
372
          else if (TYPE_PRECISION (TREE_TYPE (loop->v))
373
                   > TYPE_PRECISION (iter_type))
374
            {
375
              tree n1, n2;
376
 
377
              if (loop->cond_code == LT_EXPR)
378
                {
379
                  n1 = loop->n1;
380
                  n2 = fold_build2_loc (loc,
381
                                    PLUS_EXPR, TREE_TYPE (loop->v),
382
                                    loop->n2, loop->step);
383
                }
384
              else
385
                {
386
                  n1 = fold_build2_loc (loc,
387
                                    MINUS_EXPR, TREE_TYPE (loop->v),
388
                                    loop->n2, loop->step);
389
                  n2 = loop->n1;
390
                }
391
              if (TREE_CODE (n1) != INTEGER_CST
392
                  || TREE_CODE (n2) != INTEGER_CST
393
                  || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
394
                  || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
395
                iter_type = long_long_unsigned_type_node;
396
            }
397
        }
398
 
399
      if (collapse_count && *collapse_count == NULL)
400
        {
401
          if ((i == 0 || count != NULL_TREE)
402
              && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
403
              && TREE_CONSTANT (loop->n1)
404
              && TREE_CONSTANT (loop->n2)
405
              && TREE_CODE (loop->step) == INTEGER_CST)
406
            {
407
              tree itype = TREE_TYPE (loop->v);
408
 
409
              if (POINTER_TYPE_P (itype))
410
                itype
411
                  = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
412
              t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
413
              t = fold_build2_loc (loc,
414
                               PLUS_EXPR, itype,
415
                               fold_convert_loc (loc, itype, loop->step), t);
416
              t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
417
                               fold_convert_loc (loc, itype, loop->n2));
418
              t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
419
                               fold_convert_loc (loc, itype, loop->n1));
420
              if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
421
                t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
422
                                 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
423
                                 fold_build1_loc (loc, NEGATE_EXPR, itype,
424
                                              fold_convert_loc (loc, itype,
425
                                                                loop->step)));
426
              else
427
                t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
428
                                 fold_convert_loc (loc, itype, loop->step));
429
              t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
430
              if (count != NULL_TREE)
431
                count = fold_build2_loc (loc,
432
                                     MULT_EXPR, long_long_unsigned_type_node,
433
                                     count, t);
434
              else
435
                count = t;
436
              if (TREE_CODE (count) != INTEGER_CST)
437
                count = NULL_TREE;
438
            }
439
          else
440
            count = NULL_TREE;
441
        }
442
    }
443
 
444
  if (count)
445
    {
446
      if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
447
        iter_type = long_long_unsigned_type_node;
448
      else
449
        iter_type = long_integer_type_node;
450
    }
451
  else if (collapse_iter && *collapse_iter != NULL)
452
    iter_type = TREE_TYPE (*collapse_iter);
453
  fd->iter_type = iter_type;
454
  if (collapse_iter && *collapse_iter == NULL)
455
    *collapse_iter = create_tmp_var (iter_type, ".iter");
456
  if (collapse_count && *collapse_count == NULL)
457
    {
458
      if (count)
459
        *collapse_count = fold_convert_loc (loc, iter_type, count);
460
      else
461
        *collapse_count = create_tmp_var (iter_type, ".count");
462
    }
463
 
464
  if (fd->collapse > 1)
465
    {
466
      fd->loop.v = *collapse_iter;
467
      fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
468
      fd->loop.n2 = *collapse_count;
469
      fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
470
      fd->loop.cond_code = LT_EXPR;
471
    }
472
}
473
 
474
 
475
/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
476
   is the immediate dominator of PAR_ENTRY_BB, return true if there
477
   are no data dependencies that would prevent expanding the parallel
478
   directive at PAR_ENTRY_BB as a combined parallel+workshare region.
479
 
480
   When expanding a combined parallel+workshare region, the call to
481
   the child function may need additional arguments in the case of
482
   GIMPLE_OMP_FOR regions.  In some cases, these arguments are
483
   computed out of variables passed in from the parent to the child
484
   via 'struct .omp_data_s'.  For instance:
485
 
486
        #pragma omp parallel for schedule (guided, i * 4)
487
        for (j ...)
488
 
489
   Is lowered into:
490
 
491
        # BLOCK 2 (PAR_ENTRY_BB)
492
        .omp_data_o.i = i;
493
        #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
494
 
495
        # BLOCK 3 (WS_ENTRY_BB)
496
        .omp_data_i = &.omp_data_o;
497
        D.1667 = .omp_data_i->i;
498
        D.1598 = D.1667 * 4;
499
        #pragma omp for schedule (guided, D.1598)
500
 
501
   When we outline the parallel region, the call to the child function
502
   'bar.omp_fn.0' will need the value D.1598 in its argument list, but
503
   that value is computed *after* the call site.  So, in principle we
504
   cannot do the transformation.
505
 
506
   To see whether the code in WS_ENTRY_BB blocks the combined
507
   parallel+workshare call, we collect all the variables used in the
508
   GIMPLE_OMP_FOR header check whether they appear on the LHS of any
509
   statement in WS_ENTRY_BB.  If so, then we cannot emit the combined
510
   call.
511
 
512
   FIXME.  If we had the SSA form built at this point, we could merely
513
   hoist the code in block 3 into block 2 and be done with it.  But at
514
   this point we don't have dataflow information and though we could
515
   hack something up here, it is really not worth the aggravation.  */
516
 
517
static bool
518
workshare_safe_to_combine_p (basic_block ws_entry_bb)
519
{
520
  struct omp_for_data fd;
521
  gimple ws_stmt = last_stmt (ws_entry_bb);
522
 
523
  if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
524
    return true;
525
 
526
  gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
527
 
528
  extract_omp_for_data (ws_stmt, &fd, NULL);
529
 
530
  if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
531
    return false;
532
  if (fd.iter_type != long_integer_type_node)
533
    return false;
534
 
535
  /* FIXME.  We give up too easily here.  If any of these arguments
536
     are not constants, they will likely involve variables that have
537
     been mapped into fields of .omp_data_s for sharing with the child
538
     function.  With appropriate data flow, it would be possible to
539
     see through this.  */
540
  if (!is_gimple_min_invariant (fd.loop.n1)
541
      || !is_gimple_min_invariant (fd.loop.n2)
542
      || !is_gimple_min_invariant (fd.loop.step)
543
      || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
544
    return false;
545
 
546
  return true;
547
}
548
 
549
 
550
/* Collect additional arguments needed to emit a combined
551
   parallel+workshare call.  WS_STMT is the workshare directive being
552
   expanded.  */
553
 
554
static VEC(tree,gc) *
555
get_ws_args_for (gimple ws_stmt)
556
{
557
  tree t;
558
  location_t loc = gimple_location (ws_stmt);
559
  VEC(tree,gc) *ws_args;
560
 
561
  if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
562
    {
563
      struct omp_for_data fd;
564
 
565
      extract_omp_for_data (ws_stmt, &fd, NULL);
566
 
567
      ws_args = VEC_alloc (tree, gc, 3 + (fd.chunk_size != 0));
568
 
569
      t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
570
      VEC_quick_push (tree, ws_args, t);
571
 
572
      t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
573
      VEC_quick_push (tree, ws_args, t);
574
 
575
      t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
576
      VEC_quick_push (tree, ws_args, t);
577
 
578
      if (fd.chunk_size)
579
        {
580
          t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
581
          VEC_quick_push (tree, ws_args, t);
582
        }
583
 
584
      return ws_args;
585
    }
586
  else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
587
    {
588
      /* Number of sections is equal to the number of edges from the
589
         GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
590
         the exit of the sections region.  */
591
      basic_block bb = single_succ (gimple_bb (ws_stmt));
592
      t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
593
      ws_args = VEC_alloc (tree, gc, 1);
594
      VEC_quick_push (tree, ws_args, t);
595
      return ws_args;
596
    }
597
 
598
  gcc_unreachable ();
599
}
600
 
601
 
602
/* Discover whether REGION is a combined parallel+workshare region.  */
603
 
604
static void
605
determine_parallel_type (struct omp_region *region)
606
{
607
  basic_block par_entry_bb, par_exit_bb;
608
  basic_block ws_entry_bb, ws_exit_bb;
609
 
610
  if (region == NULL || region->inner == NULL
611
      || region->exit == NULL || region->inner->exit == NULL
612
      || region->inner->cont == NULL)
613
    return;
614
 
615
  /* We only support parallel+for and parallel+sections.  */
616
  if (region->type != GIMPLE_OMP_PARALLEL
617
      || (region->inner->type != GIMPLE_OMP_FOR
618
          && region->inner->type != GIMPLE_OMP_SECTIONS))
619
    return;
620
 
621
  /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
622
     WS_EXIT_BB -> PAR_EXIT_BB.  */
623
  par_entry_bb = region->entry;
624
  par_exit_bb = region->exit;
625
  ws_entry_bb = region->inner->entry;
626
  ws_exit_bb = region->inner->exit;
627
 
628
  if (single_succ (par_entry_bb) == ws_entry_bb
629
      && single_succ (ws_exit_bb) == par_exit_bb
630
      && workshare_safe_to_combine_p (ws_entry_bb)
631
      && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
632
          || (last_and_only_stmt (ws_entry_bb)
633
              && last_and_only_stmt (par_exit_bb))))
634
    {
635
      gimple ws_stmt = last_stmt (ws_entry_bb);
636
 
637
      if (region->inner->type == GIMPLE_OMP_FOR)
638
        {
639
          /* If this is a combined parallel loop, we need to determine
640
             whether or not to use the combined library calls.  There
641
             are two cases where we do not apply the transformation:
642
             static loops and any kind of ordered loop.  In the first
643
             case, we already open code the loop so there is no need
644
             to do anything else.  In the latter case, the combined
645
             parallel loop call would still need extra synchronization
646
             to implement ordered semantics, so there would not be any
647
             gain in using the combined call.  */
648
          tree clauses = gimple_omp_for_clauses (ws_stmt);
649
          tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
650
          if (c == NULL
651
              || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
652
              || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
653
            {
654
              region->is_combined_parallel = false;
655
              region->inner->is_combined_parallel = false;
656
              return;
657
            }
658
        }
659
 
660
      region->is_combined_parallel = true;
661
      region->inner->is_combined_parallel = true;
662
      region->ws_args = get_ws_args_for (ws_stmt);
663
    }
664
}
665
 
666
 
667
/* Return true if EXPR is variable sized.  */
668
 
669
static inline bool
670
is_variable_sized (const_tree expr)
671
{
672
  return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
673
}
674
 
675
/* Return true if DECL is a reference type.  */
676
 
677
static inline bool
678
is_reference (tree decl)
679
{
680
  return lang_hooks.decls.omp_privatize_by_reference (decl);
681
}
682
 
683
/* Lookup variables in the decl or field splay trees.  The "maybe" form
684
   allows for the variable form to not have been entered, otherwise we
685
   assert that the variable must have been entered.  */
686
 
687
static inline tree
688
lookup_decl (tree var, omp_context *ctx)
689
{
690
  tree *n;
691
  n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
692
  return *n;
693
}
694
 
695
static inline tree
696
maybe_lookup_decl (const_tree var, omp_context *ctx)
697
{
698
  tree *n;
699
  n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
700
  return n ? *n : NULL_TREE;
701
}
702
 
703
static inline tree
704
lookup_field (tree var, omp_context *ctx)
705
{
706
  splay_tree_node n;
707
  n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
708
  return (tree) n->value;
709
}
710
 
711
static inline tree
712
lookup_sfield (tree var, omp_context *ctx)
713
{
714
  splay_tree_node n;
715
  n = splay_tree_lookup (ctx->sfield_map
716
                         ? ctx->sfield_map : ctx->field_map,
717
                         (splay_tree_key) var);
718
  return (tree) n->value;
719
}
720
 
721
static inline tree
722
maybe_lookup_field (tree var, omp_context *ctx)
723
{
724
  splay_tree_node n;
725
  n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
726
  return n ? (tree) n->value : NULL_TREE;
727
}
728
 
729
/* Return true if DECL should be copied by pointer.  SHARED_CTX is
730
   the parallel context if DECL is to be shared.  */
731
 
732
static bool
733
use_pointer_for_field (tree decl, omp_context *shared_ctx)
734
{
735
  if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
736
    return true;
737
 
738
  /* We can only use copy-in/copy-out semantics for shared variables
739
     when we know the value is not accessible from an outer scope.  */
740
  if (shared_ctx)
741
    {
742
      /* ??? Trivially accessible from anywhere.  But why would we even
743
         be passing an address in this case?  Should we simply assert
744
         this to be false, or should we have a cleanup pass that removes
745
         these from the list of mappings?  */
746
      if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
747
        return true;
748
 
749
      /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
750
         without analyzing the expression whether or not its location
751
         is accessible to anyone else.  In the case of nested parallel
752
         regions it certainly may be.  */
753
      if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
754
        return true;
755
 
756
      /* Do not use copy-in/copy-out for variables that have their
757
         address taken.  */
758
      if (TREE_ADDRESSABLE (decl))
759
        return true;
760
 
761
      /* Disallow copy-in/out in nested parallel if
762
         decl is shared in outer parallel, otherwise
763
         each thread could store the shared variable
764
         in its own copy-in location, making the
765
         variable no longer really shared.  */
766
      if (!TREE_READONLY (decl) && shared_ctx->is_nested)
767
        {
768
          omp_context *up;
769
 
770
          for (up = shared_ctx->outer; up; up = up->outer)
771
            if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
772
              break;
773
 
774
          if (up)
775
            {
776
              tree c;
777
 
778
              for (c = gimple_omp_taskreg_clauses (up->stmt);
779
                   c; c = OMP_CLAUSE_CHAIN (c))
780
                if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
781
                    && OMP_CLAUSE_DECL (c) == decl)
782
                  break;
783
 
784
              if (c)
785
                goto maybe_mark_addressable_and_ret;
786
            }
787
        }
788
 
789
      /* For tasks avoid using copy-in/out, unless they are readonly
790
         (in which case just copy-in is used).  As tasks can be
791
         deferred or executed in different thread, when GOMP_task
792
         returns, the task hasn't necessarily terminated.  */
793
      if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
794
        {
795
          tree outer;
796
        maybe_mark_addressable_and_ret:
797
          outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
798
          if (is_gimple_reg (outer))
799
            {
800
              /* Taking address of OUTER in lower_send_shared_vars
801
                 might need regimplification of everything that uses the
802
                 variable.  */
803
              if (!task_shared_vars)
804
                task_shared_vars = BITMAP_ALLOC (NULL);
805
              bitmap_set_bit (task_shared_vars, DECL_UID (outer));
806
              TREE_ADDRESSABLE (outer) = 1;
807
            }
808
          return true;
809
        }
810
    }
811
 
812
  return false;
813
}
814
 
815
/* Create a new VAR_DECL and copy information from VAR to it.  */
816
 
817
tree
818
copy_var_decl (tree var, tree name, tree type)
819
{
820
  tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
821
 
822
  TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
823
  TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
824
  DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
825
  DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
826
  DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
827
  DECL_CONTEXT (copy) = DECL_CONTEXT (var);
828
  TREE_USED (copy) = 1;
829
  DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
830
 
831
  return copy;
832
}
833
 
834
/* Construct a new automatic decl similar to VAR.  */
835
 
836
static tree
837
omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
838
{
839
  tree copy = copy_var_decl (var, name, type);
840
 
841
  DECL_CONTEXT (copy) = current_function_decl;
842
  DECL_CHAIN (copy) = ctx->block_vars;
843
  ctx->block_vars = copy;
844
 
845
  return copy;
846
}
847
 
848
static tree
849
omp_copy_decl_1 (tree var, omp_context *ctx)
850
{
851
  return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
852
}
853
 
854
/* Build tree nodes to access the field for VAR on the receiver side.  */
855
 
856
static tree
857
build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
858
{
859
  tree x, field = lookup_field (var, ctx);
860
 
861
  /* If the receiver record type was remapped in the child function,
862
     remap the field into the new record type.  */
863
  x = maybe_lookup_field (field, ctx);
864
  if (x != NULL)
865
    field = x;
866
 
867
  x = build_simple_mem_ref (ctx->receiver_decl);
868
  x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
869
  if (by_ref)
870
    x = build_simple_mem_ref (x);
871
 
872
  return x;
873
}
874
 
875
/* Build tree nodes to access VAR in the scope outer to CTX.  In the case
876
   of a parallel, this is a component reference; for workshare constructs
877
   this is some variable.  */
878
 
879
static tree
880
build_outer_var_ref (tree var, omp_context *ctx)
881
{
882
  tree x;
883
 
884
  if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
885
    x = var;
886
  else if (is_variable_sized (var))
887
    {
888
      x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
889
      x = build_outer_var_ref (x, ctx);
890
      x = build_simple_mem_ref (x);
891
    }
892
  else if (is_taskreg_ctx (ctx))
893
    {
894
      bool by_ref = use_pointer_for_field (var, NULL);
895
      x = build_receiver_ref (var, by_ref, ctx);
896
    }
897
  else if (ctx->outer)
898
    x = lookup_decl (var, ctx->outer);
899
  else if (is_reference (var))
900
    /* This can happen with orphaned constructs.  If var is reference, it is
901
       possible it is shared and as such valid.  */
902
    x = var;
903
  else
904
    gcc_unreachable ();
905
 
906
  if (is_reference (var))
907
    x = build_simple_mem_ref (x);
908
 
909
  return x;
910
}
911
 
912
/* Build tree nodes to access the field for VAR on the sender side.  */
913
 
914
static tree
915
build_sender_ref (tree var, omp_context *ctx)
916
{
917
  tree field = lookup_sfield (var, ctx);
918
  return build3 (COMPONENT_REF, TREE_TYPE (field),
919
                 ctx->sender_decl, field, NULL);
920
}
921
 
922
/* Add a new field for VAR inside the structure CTX->SENDER_DECL.  */
923
 
924
static void
925
install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
926
{
927
  tree field, type, sfield = NULL_TREE;
928
 
929
  gcc_assert ((mask & 1) == 0
930
              || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
931
  gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
932
              || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
933
 
934
  type = TREE_TYPE (var);
935
  if (by_ref)
936
    type = build_pointer_type (type);
937
  else if ((mask & 3) == 1 && is_reference (var))
938
    type = TREE_TYPE (type);
939
 
940
  field = build_decl (DECL_SOURCE_LOCATION (var),
941
                      FIELD_DECL, DECL_NAME (var), type);
942
 
943
  /* Remember what variable this field was created for.  This does have a
944
     side effect of making dwarf2out ignore this member, so for helpful
945
     debugging we clear it later in delete_omp_context.  */
946
  DECL_ABSTRACT_ORIGIN (field) = var;
947
  if (type == TREE_TYPE (var))
948
    {
949
      DECL_ALIGN (field) = DECL_ALIGN (var);
950
      DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
951
      TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
952
    }
953
  else
954
    DECL_ALIGN (field) = TYPE_ALIGN (type);
955
 
956
  if ((mask & 3) == 3)
957
    {
958
      insert_field_into_struct (ctx->record_type, field);
959
      if (ctx->srecord_type)
960
        {
961
          sfield = build_decl (DECL_SOURCE_LOCATION (var),
962
                               FIELD_DECL, DECL_NAME (var), type);
963
          DECL_ABSTRACT_ORIGIN (sfield) = var;
964
          DECL_ALIGN (sfield) = DECL_ALIGN (field);
965
          DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
966
          TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
967
          insert_field_into_struct (ctx->srecord_type, sfield);
968
        }
969
    }
970
  else
971
    {
972
      if (ctx->srecord_type == NULL_TREE)
973
        {
974
          tree t;
975
 
976
          ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
977
          ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
978
          for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
979
            {
980
              sfield = build_decl (DECL_SOURCE_LOCATION (var),
981
                                   FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
982
              DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
983
              insert_field_into_struct (ctx->srecord_type, sfield);
984
              splay_tree_insert (ctx->sfield_map,
985
                                 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
986
                                 (splay_tree_value) sfield);
987
            }
988
        }
989
      sfield = field;
990
      insert_field_into_struct ((mask & 1) ? ctx->record_type
991
                                : ctx->srecord_type, field);
992
    }
993
 
994
  if (mask & 1)
995
    splay_tree_insert (ctx->field_map, (splay_tree_key) var,
996
                       (splay_tree_value) field);
997
  if ((mask & 2) && ctx->sfield_map)
998
    splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
999
                       (splay_tree_value) sfield);
1000
}
1001
 
1002
static tree
1003
install_var_local (tree var, omp_context *ctx)
1004
{
1005
  tree new_var = omp_copy_decl_1 (var, ctx);
1006
  insert_decl_map (&ctx->cb, var, new_var);
1007
  return new_var;
1008
}
1009
 
1010
/* Adjust the replacement for DECL in CTX for the new context.  This means
1011
   copying the DECL_VALUE_EXPR, and fixing up the type.  */
1012
 
1013
static void
1014
fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1015
{
1016
  tree new_decl, size;
1017
 
1018
  new_decl = lookup_decl (decl, ctx);
1019
 
1020
  TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1021
 
1022
  if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1023
      && DECL_HAS_VALUE_EXPR_P (decl))
1024
    {
1025
      tree ve = DECL_VALUE_EXPR (decl);
1026
      walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1027
      SET_DECL_VALUE_EXPR (new_decl, ve);
1028
      DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1029
    }
1030
 
1031
  if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1032
    {
1033
      size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1034
      if (size == error_mark_node)
1035
        size = TYPE_SIZE (TREE_TYPE (new_decl));
1036
      DECL_SIZE (new_decl) = size;
1037
 
1038
      size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1039
      if (size == error_mark_node)
1040
        size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1041
      DECL_SIZE_UNIT (new_decl) = size;
1042
    }
1043
}
1044
 
1045
/* The callback for remap_decl.  Search all containing contexts for a
1046
   mapping of the variable; this avoids having to duplicate the splay
1047
   tree ahead of time.  We know a mapping doesn't already exist in the
1048
   given context.  Create new mappings to implement default semantics.  */
1049
 
1050
static tree
1051
omp_copy_decl (tree var, copy_body_data *cb)
1052
{
1053
  omp_context *ctx = (omp_context *) cb;
1054
  tree new_var;
1055
 
1056
  if (TREE_CODE (var) == LABEL_DECL)
1057
    {
1058
      new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1059
      DECL_CONTEXT (new_var) = current_function_decl;
1060
      insert_decl_map (&ctx->cb, var, new_var);
1061
      return new_var;
1062
    }
1063
 
1064
  while (!is_taskreg_ctx (ctx))
1065
    {
1066
      ctx = ctx->outer;
1067
      if (ctx == NULL)
1068
        return var;
1069
      new_var = maybe_lookup_decl (var, ctx);
1070
      if (new_var)
1071
        return new_var;
1072
    }
1073
 
1074
  if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1075
    return var;
1076
 
1077
  return error_mark_node;
1078
}
1079
 
1080
 
1081
/* Return the parallel region associated with STMT.  */
1082
 
1083
/* Debugging dumps for parallel regions.  */
1084
void dump_omp_region (FILE *, struct omp_region *, int);
1085
void debug_omp_region (struct omp_region *);
1086
void debug_all_omp_regions (void);
1087
 
1088
/* Dump the parallel region tree rooted at REGION.  */
1089
 
1090
void
1091
dump_omp_region (FILE *file, struct omp_region *region, int indent)
1092
{
1093
  fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1094
           gimple_code_name[region->type]);
1095
 
1096
  if (region->inner)
1097
    dump_omp_region (file, region->inner, indent + 4);
1098
 
1099
  if (region->cont)
1100
    {
1101
      fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1102
               region->cont->index);
1103
    }
1104
 
1105
  if (region->exit)
1106
    fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1107
             region->exit->index);
1108
  else
1109
    fprintf (file, "%*s[no exit marker]\n", indent, "");
1110
 
1111
  if (region->next)
1112
    dump_omp_region (file, region->next, indent);
1113
}
1114
 
1115
DEBUG_FUNCTION void
1116
debug_omp_region (struct omp_region *region)
1117
{
1118
  dump_omp_region (stderr, region, 0);
1119
}
1120
 
1121
DEBUG_FUNCTION void
1122
debug_all_omp_regions (void)
1123
{
1124
  dump_omp_region (stderr, root_omp_region, 0);
1125
}
1126
 
1127
 
1128
/* Create a new parallel region starting at STMT inside region PARENT.  */
1129
 
1130
struct omp_region *
1131
new_omp_region (basic_block bb, enum gimple_code type,
1132
                struct omp_region *parent)
1133
{
1134
  struct omp_region *region = XCNEW (struct omp_region);
1135
 
1136
  region->outer = parent;
1137
  region->entry = bb;
1138
  region->type = type;
1139
 
1140
  if (parent)
1141
    {
1142
      /* This is a nested region.  Add it to the list of inner
1143
         regions in PARENT.  */
1144
      region->next = parent->inner;
1145
      parent->inner = region;
1146
    }
1147
  else
1148
    {
1149
      /* This is a toplevel region.  Add it to the list of toplevel
1150
         regions in ROOT_OMP_REGION.  */
1151
      region->next = root_omp_region;
1152
      root_omp_region = region;
1153
    }
1154
 
1155
  return region;
1156
}
1157
 
1158
/* Release the memory associated with the region tree rooted at REGION.  */
1159
 
1160
static void
1161
free_omp_region_1 (struct omp_region *region)
1162
{
1163
  struct omp_region *i, *n;
1164
 
1165
  for (i = region->inner; i ; i = n)
1166
    {
1167
      n = i->next;
1168
      free_omp_region_1 (i);
1169
    }
1170
 
1171
  free (region);
1172
}
1173
 
1174
/* Release the memory for the entire omp region tree.  */
1175
 
1176
void
1177
free_omp_regions (void)
1178
{
1179
  struct omp_region *r, *n;
1180
  for (r = root_omp_region; r ; r = n)
1181
    {
1182
      n = r->next;
1183
      free_omp_region_1 (r);
1184
    }
1185
  root_omp_region = NULL;
1186
}
1187
 
1188
 
1189
/* Create a new context, with OUTER_CTX being the surrounding context.  */
1190
 
1191
static omp_context *
1192
new_omp_context (gimple stmt, omp_context *outer_ctx)
1193
{
1194
  omp_context *ctx = XCNEW (omp_context);
1195
 
1196
  splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1197
                     (splay_tree_value) ctx);
1198
  ctx->stmt = stmt;
1199
 
1200
  if (outer_ctx)
1201
    {
1202
      ctx->outer = outer_ctx;
1203
      ctx->cb = outer_ctx->cb;
1204
      ctx->cb.block = NULL;
1205
      ctx->depth = outer_ctx->depth + 1;
1206
    }
1207
  else
1208
    {
1209
      ctx->cb.src_fn = current_function_decl;
1210
      ctx->cb.dst_fn = current_function_decl;
1211
      ctx->cb.src_node = cgraph_get_node (current_function_decl);
1212
      gcc_checking_assert (ctx->cb.src_node);
1213
      ctx->cb.dst_node = ctx->cb.src_node;
1214
      ctx->cb.src_cfun = cfun;
1215
      ctx->cb.copy_decl = omp_copy_decl;
1216
      ctx->cb.eh_lp_nr = 0;
1217
      ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1218
      ctx->depth = 1;
1219
    }
1220
 
1221
  ctx->cb.decl_map = pointer_map_create ();
1222
 
1223
  return ctx;
1224
}
1225
 
1226
static gimple_seq maybe_catch_exception (gimple_seq);
1227
 
1228
/* Finalize task copyfn.  */
1229
 
1230
static void
1231
finalize_task_copyfn (gimple task_stmt)
1232
{
1233
  struct function *child_cfun;
1234
  tree child_fn, old_fn;
1235
  gimple_seq seq, new_seq;
1236
  gimple bind;
1237
 
1238
  child_fn = gimple_omp_task_copy_fn (task_stmt);
1239
  if (child_fn == NULL_TREE)
1240
    return;
1241
 
1242
  child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1243
 
1244
  /* Inform the callgraph about the new function.  */
1245
  DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1246
    = cfun->curr_properties;
1247
 
1248
  old_fn = current_function_decl;
1249
  push_cfun (child_cfun);
1250
  current_function_decl = child_fn;
1251
  bind = gimplify_body (child_fn, false);
1252
  seq = gimple_seq_alloc ();
1253
  gimple_seq_add_stmt (&seq, bind);
1254
  new_seq = maybe_catch_exception (seq);
1255
  if (new_seq != seq)
1256
    {
1257
      bind = gimple_build_bind (NULL, new_seq, NULL);
1258
      seq = gimple_seq_alloc ();
1259
      gimple_seq_add_stmt (&seq, bind);
1260
    }
1261
  gimple_set_body (child_fn, seq);
1262
  pop_cfun ();
1263
  current_function_decl = old_fn;
1264
 
1265
  cgraph_add_new_function (child_fn, false);
1266
}
1267
 
1268
/* Destroy a omp_context data structures.  Called through the splay tree
1269
   value delete callback.  */
1270
 
1271
static void
1272
delete_omp_context (splay_tree_value value)
1273
{
1274
  omp_context *ctx = (omp_context *) value;
1275
 
1276
  pointer_map_destroy (ctx->cb.decl_map);
1277
 
1278
  if (ctx->field_map)
1279
    splay_tree_delete (ctx->field_map);
1280
  if (ctx->sfield_map)
1281
    splay_tree_delete (ctx->sfield_map);
1282
 
1283
  /* We hijacked DECL_ABSTRACT_ORIGIN earlier.  We need to clear it before
1284
     it produces corrupt debug information.  */
1285
  if (ctx->record_type)
1286
    {
1287
      tree t;
1288
      for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
1289
        DECL_ABSTRACT_ORIGIN (t) = NULL;
1290
    }
1291
  if (ctx->srecord_type)
1292
    {
1293
      tree t;
1294
      for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
1295
        DECL_ABSTRACT_ORIGIN (t) = NULL;
1296
    }
1297
 
1298
  if (is_task_ctx (ctx))
1299
    finalize_task_copyfn (ctx->stmt);
1300
 
1301
  XDELETE (ctx);
1302
}
1303
 
1304
/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1305
   context.  */
1306
 
1307
static void
1308
fixup_child_record_type (omp_context *ctx)
1309
{
1310
  tree f, type = ctx->record_type;
1311
 
1312
  /* ??? It isn't sufficient to just call remap_type here, because
1313
     variably_modified_type_p doesn't work the way we expect for
1314
     record types.  Testing each field for whether it needs remapping
1315
     and creating a new record by hand works, however.  */
1316
  for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
1317
    if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1318
      break;
1319
  if (f)
1320
    {
1321
      tree name, new_fields = NULL;
1322
 
1323
      type = lang_hooks.types.make_type (RECORD_TYPE);
1324
      name = DECL_NAME (TYPE_NAME (ctx->record_type));
1325
      name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1326
                         TYPE_DECL, name, type);
1327
      TYPE_NAME (type) = name;
1328
 
1329
      for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
1330
        {
1331
          tree new_f = copy_node (f);
1332
          DECL_CONTEXT (new_f) = type;
1333
          TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1334
          DECL_CHAIN (new_f) = new_fields;
1335
          walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1336
          walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1337
                     &ctx->cb, NULL);
1338
          walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1339
                     &ctx->cb, NULL);
1340
          new_fields = new_f;
1341
 
1342
          /* Arrange to be able to look up the receiver field
1343
             given the sender field.  */
1344
          splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1345
                             (splay_tree_value) new_f);
1346
        }
1347
      TYPE_FIELDS (type) = nreverse (new_fields);
1348
      layout_type (type);
1349
    }
1350
 
1351
  TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1352
}
1353
 
1354
/* Instantiate decls as necessary in CTX to satisfy the data sharing
1355
   specified by CLAUSES.  */
1356
 
1357
static void
1358
scan_sharing_clauses (tree clauses, omp_context *ctx)
1359
{
1360
  tree c, decl;
1361
  bool scan_array_reductions = false;
1362
 
1363
  for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1364
    {
1365
      bool by_ref;
1366
 
1367
      switch (OMP_CLAUSE_CODE (c))
1368
        {
1369
        case OMP_CLAUSE_PRIVATE:
1370
          decl = OMP_CLAUSE_DECL (c);
1371
          if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1372
            goto do_private;
1373
          else if (!is_variable_sized (decl))
1374
            install_var_local (decl, ctx);
1375
          break;
1376
 
1377
        case OMP_CLAUSE_SHARED:
1378
          gcc_assert (is_taskreg_ctx (ctx));
1379
          decl = OMP_CLAUSE_DECL (c);
1380
          gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1381
                      || !is_variable_sized (decl));
1382
          /* Global variables don't need to be copied,
1383
             the receiver side will use them directly.  */
1384
          if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1385
            break;
1386
          by_ref = use_pointer_for_field (decl, ctx);
1387
          if (! TREE_READONLY (decl)
1388
              || TREE_ADDRESSABLE (decl)
1389
              || by_ref
1390
              || is_reference (decl))
1391
            {
1392
              install_var_field (decl, by_ref, 3, ctx);
1393
              install_var_local (decl, ctx);
1394
              break;
1395
            }
1396
          /* We don't need to copy const scalar vars back.  */
1397
          OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1398
          goto do_private;
1399
 
1400
        case OMP_CLAUSE_LASTPRIVATE:
1401
          /* Let the corresponding firstprivate clause create
1402
             the variable.  */
1403
          if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1404
            break;
1405
          /* FALLTHRU */
1406
 
1407
        case OMP_CLAUSE_FIRSTPRIVATE:
1408
        case OMP_CLAUSE_REDUCTION:
1409
          decl = OMP_CLAUSE_DECL (c);
1410
        do_private:
1411
          if (is_variable_sized (decl))
1412
            {
1413
              if (is_task_ctx (ctx))
1414
                install_var_field (decl, false, 1, ctx);
1415
              break;
1416
            }
1417
          else if (is_taskreg_ctx (ctx))
1418
            {
1419
              bool global
1420
                = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1421
              by_ref = use_pointer_for_field (decl, NULL);
1422
 
1423
              if (is_task_ctx (ctx)
1424
                  && (global || by_ref || is_reference (decl)))
1425
                {
1426
                  install_var_field (decl, false, 1, ctx);
1427
                  if (!global)
1428
                    install_var_field (decl, by_ref, 2, ctx);
1429
                }
1430
              else if (!global)
1431
                install_var_field (decl, by_ref, 3, ctx);
1432
            }
1433
          install_var_local (decl, ctx);
1434
          break;
1435
 
1436
        case OMP_CLAUSE_COPYPRIVATE:
1437
        case OMP_CLAUSE_COPYIN:
1438
          decl = OMP_CLAUSE_DECL (c);
1439
          by_ref = use_pointer_for_field (decl, NULL);
1440
          install_var_field (decl, by_ref, 3, ctx);
1441
          break;
1442
 
1443
        case OMP_CLAUSE_DEFAULT:
1444
          ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1445
          break;
1446
 
1447
        case OMP_CLAUSE_FINAL:
1448
        case OMP_CLAUSE_IF:
1449
        case OMP_CLAUSE_NUM_THREADS:
1450
        case OMP_CLAUSE_SCHEDULE:
1451
          if (ctx->outer)
1452
            scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1453
          break;
1454
 
1455
        case OMP_CLAUSE_NOWAIT:
1456
        case OMP_CLAUSE_ORDERED:
1457
        case OMP_CLAUSE_COLLAPSE:
1458
        case OMP_CLAUSE_UNTIED:
1459
        case OMP_CLAUSE_MERGEABLE:
1460
          break;
1461
 
1462
        default:
1463
          gcc_unreachable ();
1464
        }
1465
    }
1466
 
1467
  for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1468
    {
1469
      switch (OMP_CLAUSE_CODE (c))
1470
        {
1471
        case OMP_CLAUSE_LASTPRIVATE:
1472
          /* Let the corresponding firstprivate clause create
1473
             the variable.  */
1474
          if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1475
            scan_array_reductions = true;
1476
          if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1477
            break;
1478
          /* FALLTHRU */
1479
 
1480
        case OMP_CLAUSE_PRIVATE:
1481
        case OMP_CLAUSE_FIRSTPRIVATE:
1482
        case OMP_CLAUSE_REDUCTION:
1483
          decl = OMP_CLAUSE_DECL (c);
1484
          if (is_variable_sized (decl))
1485
            install_var_local (decl, ctx);
1486
          fixup_remapped_decl (decl, ctx,
1487
                               OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1488
                               && OMP_CLAUSE_PRIVATE_DEBUG (c));
1489
          if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1490
              && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1491
            scan_array_reductions = true;
1492
          break;
1493
 
1494
        case OMP_CLAUSE_SHARED:
1495
          decl = OMP_CLAUSE_DECL (c);
1496
          if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1497
            fixup_remapped_decl (decl, ctx, false);
1498
          break;
1499
 
1500
        case OMP_CLAUSE_COPYPRIVATE:
1501
        case OMP_CLAUSE_COPYIN:
1502
        case OMP_CLAUSE_DEFAULT:
1503
        case OMP_CLAUSE_IF:
1504
        case OMP_CLAUSE_NUM_THREADS:
1505
        case OMP_CLAUSE_SCHEDULE:
1506
        case OMP_CLAUSE_NOWAIT:
1507
        case OMP_CLAUSE_ORDERED:
1508
        case OMP_CLAUSE_COLLAPSE:
1509
        case OMP_CLAUSE_UNTIED:
1510
        case OMP_CLAUSE_FINAL:
1511
        case OMP_CLAUSE_MERGEABLE:
1512
          break;
1513
 
1514
        default:
1515
          gcc_unreachable ();
1516
        }
1517
    }
1518
 
1519
  if (scan_array_reductions)
1520
    for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1521
      if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1522
          && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1523
        {
1524
          scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1525
          scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1526
        }
1527
      else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1528
               && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1529
        scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1530
}
1531
 
1532
/* Create a new name for omp child function.  Returns an identifier.  */
1533
 
1534
static GTY(()) unsigned int tmp_ompfn_id_num;
1535
 
1536
static tree
1537
create_omp_child_function_name (bool task_copy)
1538
{
1539
  return (clone_function_name (current_function_decl,
1540
                               task_copy ? "_omp_cpyfn" : "_omp_fn"));
1541
}
1542
 
1543
/* Build a decl for the omp child function.  It'll not contain a body
1544
   yet, just the bare decl.  */
1545
 
1546
static void
1547
create_omp_child_function (omp_context *ctx, bool task_copy)
1548
{
1549
  tree decl, type, name, t;
1550
 
1551
  name = create_omp_child_function_name (task_copy);
1552
  if (task_copy)
1553
    type = build_function_type_list (void_type_node, ptr_type_node,
1554
                                     ptr_type_node, NULL_TREE);
1555
  else
1556
    type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1557
 
1558
  decl = build_decl (gimple_location (ctx->stmt),
1559
                     FUNCTION_DECL, name, type);
1560
 
1561
  if (!task_copy)
1562
    ctx->cb.dst_fn = decl;
1563
  else
1564
    gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1565
 
1566
  TREE_STATIC (decl) = 1;
1567
  TREE_USED (decl) = 1;
1568
  DECL_ARTIFICIAL (decl) = 1;
1569
  DECL_NAMELESS (decl) = 1;
1570
  DECL_IGNORED_P (decl) = 0;
1571
  TREE_PUBLIC (decl) = 0;
1572
  DECL_UNINLINABLE (decl) = 1;
1573
  DECL_EXTERNAL (decl) = 0;
1574
  DECL_CONTEXT (decl) = NULL_TREE;
1575
  DECL_INITIAL (decl) = make_node (BLOCK);
1576
 
1577
  t = build_decl (DECL_SOURCE_LOCATION (decl),
1578
                  RESULT_DECL, NULL_TREE, void_type_node);
1579
  DECL_ARTIFICIAL (t) = 1;
1580
  DECL_IGNORED_P (t) = 1;
1581
  DECL_CONTEXT (t) = decl;
1582
  DECL_RESULT (decl) = t;
1583
 
1584
  t = build_decl (DECL_SOURCE_LOCATION (decl),
1585
                  PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1586
  DECL_ARTIFICIAL (t) = 1;
1587
  DECL_NAMELESS (t) = 1;
1588
  DECL_ARG_TYPE (t) = ptr_type_node;
1589
  DECL_CONTEXT (t) = current_function_decl;
1590
  TREE_USED (t) = 1;
1591
  DECL_ARGUMENTS (decl) = t;
1592
  if (!task_copy)
1593
    ctx->receiver_decl = t;
1594
  else
1595
    {
1596
      t = build_decl (DECL_SOURCE_LOCATION (decl),
1597
                      PARM_DECL, get_identifier (".omp_data_o"),
1598
                      ptr_type_node);
1599
      DECL_ARTIFICIAL (t) = 1;
1600
      DECL_NAMELESS (t) = 1;
1601
      DECL_ARG_TYPE (t) = ptr_type_node;
1602
      DECL_CONTEXT (t) = current_function_decl;
1603
      TREE_USED (t) = 1;
1604
      TREE_ADDRESSABLE (t) = 1;
1605
      DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1606
      DECL_ARGUMENTS (decl) = t;
1607
    }
1608
 
1609
  /* Allocate memory for the function structure.  The call to
1610
     allocate_struct_function clobbers CFUN, so we need to restore
1611
     it afterward.  */
1612
  push_struct_function (decl);
1613
  cfun->function_end_locus = gimple_location (ctx->stmt);
1614
  pop_cfun ();
1615
}
1616
 
1617
 
1618
/* Scan an OpenMP parallel directive.  */
1619
 
1620
static void
1621
scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1622
{
1623
  omp_context *ctx;
1624
  tree name;
1625
  gimple stmt = gsi_stmt (*gsi);
1626
 
1627
  /* Ignore parallel directives with empty bodies, unless there
1628
     are copyin clauses.  */
1629
  if (optimize > 0
1630
      && empty_body_p (gimple_omp_body (stmt))
1631
      && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1632
                          OMP_CLAUSE_COPYIN) == NULL)
1633
    {
1634
      gsi_replace (gsi, gimple_build_nop (), false);
1635
      return;
1636
    }
1637
 
1638
  ctx = new_omp_context (stmt, outer_ctx);
1639
  if (taskreg_nesting_level > 1)
1640
    ctx->is_nested = true;
1641
  ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1642
  ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1643
  ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1644
  name = create_tmp_var_name (".omp_data_s");
1645
  name = build_decl (gimple_location (stmt),
1646
                     TYPE_DECL, name, ctx->record_type);
1647
  DECL_ARTIFICIAL (name) = 1;
1648
  DECL_NAMELESS (name) = 1;
1649
  TYPE_NAME (ctx->record_type) = name;
1650
  create_omp_child_function (ctx, false);
1651
  gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1652
 
1653
  scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1654
  scan_omp (gimple_omp_body (stmt), ctx);
1655
 
1656
  if (TYPE_FIELDS (ctx->record_type) == NULL)
1657
    ctx->record_type = ctx->receiver_decl = NULL;
1658
  else
1659
    {
1660
      layout_type (ctx->record_type);
1661
      fixup_child_record_type (ctx);
1662
    }
1663
}
1664
 
1665
/* Scan an OpenMP task directive.  */
1666
 
1667
static void
1668
scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1669
{
1670
  omp_context *ctx;
1671
  tree name, t;
1672
  gimple stmt = gsi_stmt (*gsi);
1673
  location_t loc = gimple_location (stmt);
1674
 
1675
  /* Ignore task directives with empty bodies.  */
1676
  if (optimize > 0
1677
      && empty_body_p (gimple_omp_body (stmt)))
1678
    {
1679
      gsi_replace (gsi, gimple_build_nop (), false);
1680
      return;
1681
    }
1682
 
1683
  ctx = new_omp_context (stmt, outer_ctx);
1684
  if (taskreg_nesting_level > 1)
1685
    ctx->is_nested = true;
1686
  ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1687
  ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1688
  ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1689
  name = create_tmp_var_name (".omp_data_s");
1690
  name = build_decl (gimple_location (stmt),
1691
                     TYPE_DECL, name, ctx->record_type);
1692
  DECL_ARTIFICIAL (name) = 1;
1693
  DECL_NAMELESS (name) = 1;
1694
  TYPE_NAME (ctx->record_type) = name;
1695
  create_omp_child_function (ctx, false);
1696
  gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1697
 
1698
  scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1699
 
1700
  if (ctx->srecord_type)
1701
    {
1702
      name = create_tmp_var_name (".omp_data_a");
1703
      name = build_decl (gimple_location (stmt),
1704
                         TYPE_DECL, name, ctx->srecord_type);
1705
      DECL_ARTIFICIAL (name) = 1;
1706
      DECL_NAMELESS (name) = 1;
1707
      TYPE_NAME (ctx->srecord_type) = name;
1708
      create_omp_child_function (ctx, true);
1709
    }
1710
 
1711
  scan_omp (gimple_omp_body (stmt), ctx);
1712
 
1713
  if (TYPE_FIELDS (ctx->record_type) == NULL)
1714
    {
1715
      ctx->record_type = ctx->receiver_decl = NULL;
1716
      t = build_int_cst (long_integer_type_node, 0);
1717
      gimple_omp_task_set_arg_size (stmt, t);
1718
      t = build_int_cst (long_integer_type_node, 1);
1719
      gimple_omp_task_set_arg_align (stmt, t);
1720
    }
1721
  else
1722
    {
1723
      tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1724
      /* Move VLA fields to the end.  */
1725
      p = &TYPE_FIELDS (ctx->record_type);
1726
      while (*p)
1727
        if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1728
            || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1729
          {
1730
            *q = *p;
1731
            *p = TREE_CHAIN (*p);
1732
            TREE_CHAIN (*q) = NULL_TREE;
1733
            q = &TREE_CHAIN (*q);
1734
          }
1735
        else
1736
          p = &DECL_CHAIN (*p);
1737
      *p = vla_fields;
1738
      layout_type (ctx->record_type);
1739
      fixup_child_record_type (ctx);
1740
      if (ctx->srecord_type)
1741
        layout_type (ctx->srecord_type);
1742
      t = fold_convert_loc (loc, long_integer_type_node,
1743
                        TYPE_SIZE_UNIT (ctx->record_type));
1744
      gimple_omp_task_set_arg_size (stmt, t);
1745
      t = build_int_cst (long_integer_type_node,
1746
                         TYPE_ALIGN_UNIT (ctx->record_type));
1747
      gimple_omp_task_set_arg_align (stmt, t);
1748
    }
1749
}
1750
 
1751
 
1752
/* Scan an OpenMP loop directive.  */
1753
 
1754
static void
1755
scan_omp_for (gimple stmt, omp_context *outer_ctx)
1756
{
1757
  omp_context *ctx;
1758
  size_t i;
1759
 
1760
  ctx = new_omp_context (stmt, outer_ctx);
1761
 
1762
  scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1763
 
1764
  scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1765
  for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1766
    {
1767
      scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1768
      scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1769
      scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1770
      scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1771
    }
1772
  scan_omp (gimple_omp_body (stmt), ctx);
1773
}
1774
 
1775
/* Scan an OpenMP sections directive.  */
1776
 
1777
static void
1778
scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1779
{
1780
  omp_context *ctx;
1781
 
1782
  ctx = new_omp_context (stmt, outer_ctx);
1783
  scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1784
  scan_omp (gimple_omp_body (stmt), ctx);
1785
}
1786
 
1787
/* Scan an OpenMP single directive.  */
1788
 
1789
static void
1790
scan_omp_single (gimple stmt, omp_context *outer_ctx)
1791
{
1792
  omp_context *ctx;
1793
  tree name;
1794
 
1795
  ctx = new_omp_context (stmt, outer_ctx);
1796
  ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1797
  ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1798
  name = create_tmp_var_name (".omp_copy_s");
1799
  name = build_decl (gimple_location (stmt),
1800
                     TYPE_DECL, name, ctx->record_type);
1801
  TYPE_NAME (ctx->record_type) = name;
1802
 
1803
  scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1804
  scan_omp (gimple_omp_body (stmt), ctx);
1805
 
1806
  if (TYPE_FIELDS (ctx->record_type) == NULL)
1807
    ctx->record_type = NULL;
1808
  else
1809
    layout_type (ctx->record_type);
1810
}
1811
 
1812
 
1813
/* Check OpenMP nesting restrictions.  */
1814
static void
1815
check_omp_nesting_restrictions (gimple  stmt, omp_context *ctx)
1816
{
1817
  switch (gimple_code (stmt))
1818
    {
1819
    case GIMPLE_OMP_FOR:
1820
    case GIMPLE_OMP_SECTIONS:
1821
    case GIMPLE_OMP_SINGLE:
1822
    case GIMPLE_CALL:
1823
      for (; ctx != NULL; ctx = ctx->outer)
1824
        switch (gimple_code (ctx->stmt))
1825
          {
1826
          case GIMPLE_OMP_FOR:
1827
          case GIMPLE_OMP_SECTIONS:
1828
          case GIMPLE_OMP_SINGLE:
1829
          case GIMPLE_OMP_ORDERED:
1830
          case GIMPLE_OMP_MASTER:
1831
          case GIMPLE_OMP_TASK:
1832
            if (is_gimple_call (stmt))
1833
              {
1834
                warning (0, "barrier region may not be closely nested inside "
1835
                            "of work-sharing, critical, ordered, master or "
1836
                            "explicit task region");
1837
                return;
1838
              }
1839
            warning (0, "work-sharing region may not be closely nested inside "
1840
                        "of work-sharing, critical, ordered, master or explicit "
1841
                        "task region");
1842
            return;
1843
          case GIMPLE_OMP_PARALLEL:
1844
            return;
1845
          default:
1846
            break;
1847
          }
1848
      break;
1849
    case GIMPLE_OMP_MASTER:
1850
      for (; ctx != NULL; ctx = ctx->outer)
1851
        switch (gimple_code (ctx->stmt))
1852
          {
1853
          case GIMPLE_OMP_FOR:
1854
          case GIMPLE_OMP_SECTIONS:
1855
          case GIMPLE_OMP_SINGLE:
1856
          case GIMPLE_OMP_TASK:
1857
            warning (0, "master region may not be closely nested inside "
1858
                        "of work-sharing or explicit task region");
1859
            return;
1860
          case GIMPLE_OMP_PARALLEL:
1861
            return;
1862
          default:
1863
            break;
1864
          }
1865
      break;
1866
    case GIMPLE_OMP_ORDERED:
1867
      for (; ctx != NULL; ctx = ctx->outer)
1868
        switch (gimple_code (ctx->stmt))
1869
          {
1870
          case GIMPLE_OMP_CRITICAL:
1871
          case GIMPLE_OMP_TASK:
1872
            warning (0, "ordered region may not be closely nested inside "
1873
                        "of critical or explicit task region");
1874
            return;
1875
          case GIMPLE_OMP_FOR:
1876
            if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1877
                                 OMP_CLAUSE_ORDERED) == NULL)
1878
              warning (0, "ordered region must be closely nested inside "
1879
                          "a loop region with an ordered clause");
1880
            return;
1881
          case GIMPLE_OMP_PARALLEL:
1882
            return;
1883
          default:
1884
            break;
1885
          }
1886
      break;
1887
    case GIMPLE_OMP_CRITICAL:
1888
      for (; ctx != NULL; ctx = ctx->outer)
1889
        if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1890
            && (gimple_omp_critical_name (stmt)
1891
                == gimple_omp_critical_name (ctx->stmt)))
1892
          {
1893
            warning (0, "critical region may not be nested inside a critical "
1894
                        "region with the same name");
1895
            return;
1896
          }
1897
      break;
1898
    default:
1899
      break;
1900
    }
1901
}
1902
 
1903
 
1904
/* Helper function scan_omp.
1905
 
1906
   Callback for walk_tree or operators in walk_gimple_stmt used to
1907
   scan for OpenMP directives in TP.  */
1908
 
1909
static tree
1910
scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1911
{
1912
  struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1913
  omp_context *ctx = (omp_context *) wi->info;
1914
  tree t = *tp;
1915
 
1916
  switch (TREE_CODE (t))
1917
    {
1918
    case VAR_DECL:
1919
    case PARM_DECL:
1920
    case LABEL_DECL:
1921
    case RESULT_DECL:
1922
      if (ctx)
1923
        *tp = remap_decl (t, &ctx->cb);
1924
      break;
1925
 
1926
    default:
1927
      if (ctx && TYPE_P (t))
1928
        *tp = remap_type (t, &ctx->cb);
1929
      else if (!DECL_P (t))
1930
        {
1931
          *walk_subtrees = 1;
1932
          if (ctx)
1933
            {
1934
              tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
1935
              if (tem != TREE_TYPE (t))
1936
                {
1937
                  if (TREE_CODE (t) == INTEGER_CST)
1938
                    *tp = build_int_cst_wide (tem,
1939
                                              TREE_INT_CST_LOW (t),
1940
                                              TREE_INT_CST_HIGH (t));
1941
                  else
1942
                    TREE_TYPE (t) = tem;
1943
                }
1944
            }
1945
        }
1946
      break;
1947
    }
1948
 
1949
  return NULL_TREE;
1950
}
1951
 
1952
 
1953
/* Helper function for scan_omp.
1954
 
1955
   Callback for walk_gimple_stmt used to scan for OpenMP directives in
1956
   the current statement in GSI.  */
1957
 
1958
static tree
1959
scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1960
                 struct walk_stmt_info *wi)
1961
{
1962
  gimple stmt = gsi_stmt (*gsi);
1963
  omp_context *ctx = (omp_context *) wi->info;
1964
 
1965
  if (gimple_has_location (stmt))
1966
    input_location = gimple_location (stmt);
1967
 
1968
  /* Check the OpenMP nesting restrictions.  */
1969
  if (ctx != NULL)
1970
    {
1971
      if (is_gimple_omp (stmt))
1972
        check_omp_nesting_restrictions (stmt, ctx);
1973
      else if (is_gimple_call (stmt))
1974
        {
1975
          tree fndecl = gimple_call_fndecl (stmt);
1976
          if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1977
              && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1978
            check_omp_nesting_restrictions (stmt, ctx);
1979
        }
1980
    }
1981
 
1982
  *handled_ops_p = true;
1983
 
1984
  switch (gimple_code (stmt))
1985
    {
1986
    case GIMPLE_OMP_PARALLEL:
1987
      taskreg_nesting_level++;
1988
      scan_omp_parallel (gsi, ctx);
1989
      taskreg_nesting_level--;
1990
      break;
1991
 
1992
    case GIMPLE_OMP_TASK:
1993
      taskreg_nesting_level++;
1994
      scan_omp_task (gsi, ctx);
1995
      taskreg_nesting_level--;
1996
      break;
1997
 
1998
    case GIMPLE_OMP_FOR:
1999
      scan_omp_for (stmt, ctx);
2000
      break;
2001
 
2002
    case GIMPLE_OMP_SECTIONS:
2003
      scan_omp_sections (stmt, ctx);
2004
      break;
2005
 
2006
    case GIMPLE_OMP_SINGLE:
2007
      scan_omp_single (stmt, ctx);
2008
      break;
2009
 
2010
    case GIMPLE_OMP_SECTION:
2011
    case GIMPLE_OMP_MASTER:
2012
    case GIMPLE_OMP_ORDERED:
2013
    case GIMPLE_OMP_CRITICAL:
2014
      ctx = new_omp_context (stmt, ctx);
2015
      scan_omp (gimple_omp_body (stmt), ctx);
2016
      break;
2017
 
2018
    case GIMPLE_BIND:
2019
      {
2020
        tree var;
2021
 
2022
        *handled_ops_p = false;
2023
        if (ctx)
2024
          for (var = gimple_bind_vars (stmt); var ; var = DECL_CHAIN (var))
2025
            insert_decl_map (&ctx->cb, var, var);
2026
      }
2027
      break;
2028
    default:
2029
      *handled_ops_p = false;
2030
      break;
2031
    }
2032
 
2033
  return NULL_TREE;
2034
}
2035
 
2036
 
2037
/* Scan all the statements starting at the current statement.  CTX
2038
   contains context information about the OpenMP directives and
2039
   clauses found during the scan.  */
2040
 
2041
static void
2042
scan_omp (gimple_seq body, omp_context *ctx)
2043
{
2044
  location_t saved_location;
2045
  struct walk_stmt_info wi;
2046
 
2047
  memset (&wi, 0, sizeof (wi));
2048
  wi.info = ctx;
2049
  wi.want_locations = true;
2050
 
2051
  saved_location = input_location;
2052
  walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2053
  input_location = saved_location;
2054
}
2055
 
2056
/* Re-gimplification and code generation routines.  */
2057
 
2058
/* Build a call to GOMP_barrier.  */
2059
 
2060
static tree
2061
build_omp_barrier (void)
2062
{
2063
  return build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_BARRIER), 0);
2064
}
2065
 
2066
/* If a context was created for STMT when it was scanned, return it.  */
2067
 
2068
static omp_context *
2069
maybe_lookup_ctx (gimple stmt)
2070
{
2071
  splay_tree_node n;
2072
  n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2073
  return n ? (omp_context *) n->value : NULL;
2074
}
2075
 
2076
 
2077
/* Find the mapping for DECL in CTX or the immediately enclosing
2078
   context that has a mapping for DECL.
2079
 
2080
   If CTX is a nested parallel directive, we may have to use the decl
2081
   mappings created in CTX's parent context.  Suppose that we have the
2082
   following parallel nesting (variable UIDs showed for clarity):
2083
 
2084
        iD.1562 = 0;
2085
        #omp parallel shared(iD.1562)           -> outer parallel
2086
          iD.1562 = iD.1562 + 1;
2087
 
2088
          #omp parallel shared (iD.1562)        -> inner parallel
2089
             iD.1562 = iD.1562 - 1;
2090
 
2091
   Each parallel structure will create a distinct .omp_data_s structure
2092
   for copying iD.1562 in/out of the directive:
2093
 
2094
        outer parallel          .omp_data_s.1.i -> iD.1562
2095
        inner parallel          .omp_data_s.2.i -> iD.1562
2096
 
2097
   A shared variable mapping will produce a copy-out operation before
2098
   the parallel directive and a copy-in operation after it.  So, in
2099
   this case we would have:
2100
 
2101
        iD.1562 = 0;
2102
        .omp_data_o.1.i = iD.1562;
2103
        #omp parallel shared(iD.1562)           -> outer parallel
2104
          .omp_data_i.1 = &.omp_data_o.1
2105
          .omp_data_i.1->i = .omp_data_i.1->i + 1;
2106
 
2107
          .omp_data_o.2.i = iD.1562;            -> **
2108
          #omp parallel shared(iD.1562)         -> inner parallel
2109
            .omp_data_i.2 = &.omp_data_o.2
2110
            .omp_data_i.2->i = .omp_data_i.2->i - 1;
2111
 
2112
 
2113
    ** This is a problem.  The symbol iD.1562 cannot be referenced
2114
       inside the body of the outer parallel region.  But since we are
2115
       emitting this copy operation while expanding the inner parallel
2116
       directive, we need to access the CTX structure of the outer
2117
       parallel directive to get the correct mapping:
2118
 
2119
          .omp_data_o.2.i = .omp_data_i.1->i
2120
 
2121
    Since there may be other workshare or parallel directives enclosing
2122
    the parallel directive, it may be necessary to walk up the context
2123
    parent chain.  This is not a problem in general because nested
2124
    parallelism happens only rarely.  */
2125
 
2126
static tree
2127
lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2128
{
2129
  tree t;
2130
  omp_context *up;
2131
 
2132
  for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2133
    t = maybe_lookup_decl (decl, up);
2134
 
2135
  gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2136
 
2137
  return t ? t : decl;
2138
}
2139
 
2140
 
2141
/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2142
   in outer contexts.  */
2143
 
2144
static tree
2145
maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2146
{
2147
  tree t = NULL;
2148
  omp_context *up;
2149
 
2150
  for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2151
    t = maybe_lookup_decl (decl, up);
2152
 
2153
  return t ? t : decl;
2154
}
2155
 
2156
 
2157
/* Construct the initialization value for reduction CLAUSE.  */
2158
 
2159
tree
2160
omp_reduction_init (tree clause, tree type)
2161
{
2162
  location_t loc = OMP_CLAUSE_LOCATION (clause);
2163
  switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2164
    {
2165
    case PLUS_EXPR:
2166
    case MINUS_EXPR:
2167
    case BIT_IOR_EXPR:
2168
    case BIT_XOR_EXPR:
2169
    case TRUTH_OR_EXPR:
2170
    case TRUTH_ORIF_EXPR:
2171
    case TRUTH_XOR_EXPR:
2172
    case NE_EXPR:
2173
      return build_zero_cst (type);
2174
 
2175
    case MULT_EXPR:
2176
    case TRUTH_AND_EXPR:
2177
    case TRUTH_ANDIF_EXPR:
2178
    case EQ_EXPR:
2179
      return fold_convert_loc (loc, type, integer_one_node);
2180
 
2181
    case BIT_AND_EXPR:
2182
      return fold_convert_loc (loc, type, integer_minus_one_node);
2183
 
2184
    case MAX_EXPR:
2185
      if (SCALAR_FLOAT_TYPE_P (type))
2186
        {
2187
          REAL_VALUE_TYPE max, min;
2188
          if (HONOR_INFINITIES (TYPE_MODE (type)))
2189
            {
2190
              real_inf (&max);
2191
              real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2192
            }
2193
          else
2194
            real_maxval (&min, 1, TYPE_MODE (type));
2195
          return build_real (type, min);
2196
        }
2197
      else
2198
        {
2199
          gcc_assert (INTEGRAL_TYPE_P (type));
2200
          return TYPE_MIN_VALUE (type);
2201
        }
2202
 
2203
    case MIN_EXPR:
2204
      if (SCALAR_FLOAT_TYPE_P (type))
2205
        {
2206
          REAL_VALUE_TYPE max;
2207
          if (HONOR_INFINITIES (TYPE_MODE (type)))
2208
            real_inf (&max);
2209
          else
2210
            real_maxval (&max, 0, TYPE_MODE (type));
2211
          return build_real (type, max);
2212
        }
2213
      else
2214
        {
2215
          gcc_assert (INTEGRAL_TYPE_P (type));
2216
          return TYPE_MAX_VALUE (type);
2217
        }
2218
 
2219
    default:
2220
      gcc_unreachable ();
2221
    }
2222
}
2223
 
2224
/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2225
   from the receiver (aka child) side and initializers for REFERENCE_TYPE
2226
   private variables.  Initialization statements go in ILIST, while calls
2227
   to destructors go in DLIST.  */
2228
 
2229
static void
2230
lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2231
                         omp_context *ctx)
2232
{
2233
  gimple_stmt_iterator diter;
2234
  tree c, dtor, copyin_seq, x, ptr;
2235
  bool copyin_by_ref = false;
2236
  bool lastprivate_firstprivate = false;
2237
  int pass;
2238
 
2239
  *dlist = gimple_seq_alloc ();
2240
  diter = gsi_start (*dlist);
2241
  copyin_seq = NULL;
2242
 
2243
  /* Do all the fixed sized types in the first pass, and the variable sized
2244
     types in the second pass.  This makes sure that the scalar arguments to
2245
     the variable sized types are processed before we use them in the
2246
     variable sized operations.  */
2247
  for (pass = 0; pass < 2; ++pass)
2248
    {
2249
      for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2250
        {
2251
          enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2252
          tree var, new_var;
2253
          bool by_ref;
2254
          location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2255
 
2256
          switch (c_kind)
2257
            {
2258
            case OMP_CLAUSE_PRIVATE:
2259
              if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2260
                continue;
2261
              break;
2262
            case OMP_CLAUSE_SHARED:
2263
              if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2264
                {
2265
                  gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2266
                  continue;
2267
                }
2268
            case OMP_CLAUSE_FIRSTPRIVATE:
2269
            case OMP_CLAUSE_COPYIN:
2270
            case OMP_CLAUSE_REDUCTION:
2271
              break;
2272
            case OMP_CLAUSE_LASTPRIVATE:
2273
              if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2274
                {
2275
                  lastprivate_firstprivate = true;
2276
                  if (pass != 0)
2277
                    continue;
2278
                }
2279
              break;
2280
            default:
2281
              continue;
2282
            }
2283
 
2284
          new_var = var = OMP_CLAUSE_DECL (c);
2285
          if (c_kind != OMP_CLAUSE_COPYIN)
2286
            new_var = lookup_decl (var, ctx);
2287
 
2288
          if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2289
            {
2290
              if (pass != 0)
2291
                continue;
2292
            }
2293
          else if (is_variable_sized (var))
2294
            {
2295
              /* For variable sized types, we need to allocate the
2296
                 actual storage here.  Call alloca and store the
2297
                 result in the pointer decl that we created elsewhere.  */
2298
              if (pass == 0)
2299
                continue;
2300
 
2301
              if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2302
                {
2303
                  gimple stmt;
2304
                  tree tmp, atmp;
2305
 
2306
                  ptr = DECL_VALUE_EXPR (new_var);
2307
                  gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2308
                  ptr = TREE_OPERAND (ptr, 0);
2309
                  gcc_assert (DECL_P (ptr));
2310
                  x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2311
 
2312
                  /* void *tmp = __builtin_alloca */
2313
                  atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2314
                  stmt = gimple_build_call (atmp, 1, x);
2315
                  tmp = create_tmp_var_raw (ptr_type_node, NULL);
2316
                  gimple_add_tmp_var (tmp);
2317
                  gimple_call_set_lhs (stmt, tmp);
2318
 
2319
                  gimple_seq_add_stmt (ilist, stmt);
2320
 
2321
                  x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2322
                  gimplify_assign (ptr, x, ilist);
2323
                }
2324
            }
2325
          else if (is_reference (var))
2326
            {
2327
              /* For references that are being privatized for Fortran,
2328
                 allocate new backing storage for the new pointer
2329
                 variable.  This allows us to avoid changing all the
2330
                 code that expects a pointer to something that expects
2331
                 a direct variable.  Note that this doesn't apply to
2332
                 C++, since reference types are disallowed in data
2333
                 sharing clauses there, except for NRV optimized
2334
                 return values.  */
2335
              if (pass == 0)
2336
                continue;
2337
 
2338
              x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2339
              if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2340
                {
2341
                  x = build_receiver_ref (var, false, ctx);
2342
                  x = build_fold_addr_expr_loc (clause_loc, x);
2343
                }
2344
              else if (TREE_CONSTANT (x))
2345
                {
2346
                  const char *name = NULL;
2347
                  if (DECL_NAME (var))
2348
                    name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2349
 
2350
                  x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2351
                                          name);
2352
                  gimple_add_tmp_var (x);
2353
                  TREE_ADDRESSABLE (x) = 1;
2354
                  x = build_fold_addr_expr_loc (clause_loc, x);
2355
                }
2356
              else
2357
                {
2358
                  tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA);
2359
                  x = build_call_expr_loc (clause_loc, atmp, 1, x);
2360
                }
2361
 
2362
              x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2363
              gimplify_assign (new_var, x, ilist);
2364
 
2365
              new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2366
            }
2367
          else if (c_kind == OMP_CLAUSE_REDUCTION
2368
                   && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2369
            {
2370
              if (pass == 0)
2371
                continue;
2372
            }
2373
          else if (pass != 0)
2374
            continue;
2375
 
2376
          switch (OMP_CLAUSE_CODE (c))
2377
            {
2378
            case OMP_CLAUSE_SHARED:
2379
              /* Shared global vars are just accessed directly.  */
2380
              if (is_global_var (new_var))
2381
                break;
2382
              /* Set up the DECL_VALUE_EXPR for shared variables now.  This
2383
                 needs to be delayed until after fixup_child_record_type so
2384
                 that we get the correct type during the dereference.  */
2385
              by_ref = use_pointer_for_field (var, ctx);
2386
              x = build_receiver_ref (var, by_ref, ctx);
2387
              SET_DECL_VALUE_EXPR (new_var, x);
2388
              DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2389
 
2390
              /* ??? If VAR is not passed by reference, and the variable
2391
                 hasn't been initialized yet, then we'll get a warning for
2392
                 the store into the omp_data_s structure.  Ideally, we'd be
2393
                 able to notice this and not store anything at all, but
2394
                 we're generating code too early.  Suppress the warning.  */
2395
              if (!by_ref)
2396
                TREE_NO_WARNING (var) = 1;
2397
              break;
2398
 
2399
            case OMP_CLAUSE_LASTPRIVATE:
2400
              if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2401
                break;
2402
              /* FALLTHRU */
2403
 
2404
            case OMP_CLAUSE_PRIVATE:
2405
              if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2406
                x = build_outer_var_ref (var, ctx);
2407
              else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2408
                {
2409
                  if (is_task_ctx (ctx))
2410
                    x = build_receiver_ref (var, false, ctx);
2411
                  else
2412
                    x = build_outer_var_ref (var, ctx);
2413
                }
2414
              else
2415
                x = NULL;
2416
              x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2417
              if (x)
2418
                gimplify_and_add (x, ilist);
2419
              /* FALLTHRU */
2420
 
2421
            do_dtor:
2422
              x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2423
              if (x)
2424
                {
2425
                  gimple_seq tseq = NULL;
2426
 
2427
                  dtor = x;
2428
                  gimplify_stmt (&dtor, &tseq);
2429
                  gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2430
                }
2431
              break;
2432
 
2433
            case OMP_CLAUSE_FIRSTPRIVATE:
2434
              if (is_task_ctx (ctx))
2435
                {
2436
                  if (is_reference (var) || is_variable_sized (var))
2437
                    goto do_dtor;
2438
                  else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2439
                                                                          ctx))
2440
                           || use_pointer_for_field (var, NULL))
2441
                    {
2442
                      x = build_receiver_ref (var, false, ctx);
2443
                      SET_DECL_VALUE_EXPR (new_var, x);
2444
                      DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2445
                      goto do_dtor;
2446
                    }
2447
                }
2448
              x = build_outer_var_ref (var, ctx);
2449
              x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2450
              gimplify_and_add (x, ilist);
2451
              goto do_dtor;
2452
              break;
2453
 
2454
            case OMP_CLAUSE_COPYIN:
2455
              by_ref = use_pointer_for_field (var, NULL);
2456
              x = build_receiver_ref (var, by_ref, ctx);
2457
              x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2458
              append_to_statement_list (x, &copyin_seq);
2459
              copyin_by_ref |= by_ref;
2460
              break;
2461
 
2462
            case OMP_CLAUSE_REDUCTION:
2463
              if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2464
                {
2465
                  tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2466
                  x = build_outer_var_ref (var, ctx);
2467
 
2468
                  if (is_reference (var))
2469
                    x = build_fold_addr_expr_loc (clause_loc, x);
2470
                  SET_DECL_VALUE_EXPR (placeholder, x);
2471
                  DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2472
                  lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2473
                  gimple_seq_add_seq (ilist,
2474
                                      OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2475
                  OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2476
                  DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2477
                }
2478
              else
2479
                {
2480
                  x = omp_reduction_init (c, TREE_TYPE (new_var));
2481
                  gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2482
                  gimplify_assign (new_var, x, ilist);
2483
                }
2484
              break;
2485
 
2486
            default:
2487
              gcc_unreachable ();
2488
            }
2489
        }
2490
    }
2491
 
2492
  /* The copyin sequence is not to be executed by the main thread, since
2493
     that would result in self-copies.  Perhaps not visible to scalars,
2494
     but it certainly is to C++ operator=.  */
2495
  if (copyin_seq)
2496
    {
2497
      x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
2498
                           0);
2499
      x = build2 (NE_EXPR, boolean_type_node, x,
2500
                  build_int_cst (TREE_TYPE (x), 0));
2501
      x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2502
      gimplify_and_add (x, ilist);
2503
    }
2504
 
2505
  /* If any copyin variable is passed by reference, we must ensure the
2506
     master thread doesn't modify it before it is copied over in all
2507
     threads.  Similarly for variables in both firstprivate and
2508
     lastprivate clauses we need to ensure the lastprivate copying
2509
     happens after firstprivate copying in all threads.  */
2510
  if (copyin_by_ref || lastprivate_firstprivate)
2511
    gimplify_and_add (build_omp_barrier (), ilist);
2512
}
2513
 
2514
 
2515
/* Generate code to implement the LASTPRIVATE clauses.  This is used for
2516
   both parallel and workshare constructs.  PREDICATE may be NULL if it's
2517
   always true.   */
2518
 
2519
static void
2520
lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2521
                            omp_context *ctx)
2522
{
2523
  tree x, c, label = NULL;
2524
  bool par_clauses = false;
2525
 
2526
  /* Early exit if there are no lastprivate clauses.  */
2527
  clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2528
  if (clauses == NULL)
2529
    {
2530
      /* If this was a workshare clause, see if it had been combined
2531
         with its parallel.  In that case, look for the clauses on the
2532
         parallel statement itself.  */
2533
      if (is_parallel_ctx (ctx))
2534
        return;
2535
 
2536
      ctx = ctx->outer;
2537
      if (ctx == NULL || !is_parallel_ctx (ctx))
2538
        return;
2539
 
2540
      clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2541
                                 OMP_CLAUSE_LASTPRIVATE);
2542
      if (clauses == NULL)
2543
        return;
2544
      par_clauses = true;
2545
    }
2546
 
2547
  if (predicate)
2548
    {
2549
      gimple stmt;
2550
      tree label_true, arm1, arm2;
2551
 
2552
      label = create_artificial_label (UNKNOWN_LOCATION);
2553
      label_true = create_artificial_label (UNKNOWN_LOCATION);
2554
      arm1 = TREE_OPERAND (predicate, 0);
2555
      arm2 = TREE_OPERAND (predicate, 1);
2556
      gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2557
      gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2558
      stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2559
                                label_true, label);
2560
      gimple_seq_add_stmt (stmt_list, stmt);
2561
      gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2562
    }
2563
 
2564
  for (c = clauses; c ;)
2565
    {
2566
      tree var, new_var;
2567
      location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2568
 
2569
      if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2570
        {
2571
          var = OMP_CLAUSE_DECL (c);
2572
          new_var = lookup_decl (var, ctx);
2573
 
2574
          if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2575
            {
2576
              lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2577
              gimple_seq_add_seq (stmt_list,
2578
                                  OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2579
            }
2580
          OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2581
 
2582
          x = build_outer_var_ref (var, ctx);
2583
          if (is_reference (var))
2584
            new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2585
          x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2586
          gimplify_and_add (x, stmt_list);
2587
        }
2588
      c = OMP_CLAUSE_CHAIN (c);
2589
      if (c == NULL && !par_clauses)
2590
        {
2591
          /* If this was a workshare clause, see if it had been combined
2592
             with its parallel.  In that case, continue looking for the
2593
             clauses also on the parallel statement itself.  */
2594
          if (is_parallel_ctx (ctx))
2595
            break;
2596
 
2597
          ctx = ctx->outer;
2598
          if (ctx == NULL || !is_parallel_ctx (ctx))
2599
            break;
2600
 
2601
          c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2602
                               OMP_CLAUSE_LASTPRIVATE);
2603
          par_clauses = true;
2604
        }
2605
    }
2606
 
2607
  if (label)
2608
    gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2609
}
2610
 
2611
 
2612
/* Generate code to implement the REDUCTION clauses.  */
2613
 
2614
static void
2615
lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2616
{
2617
  gimple_seq sub_seq = NULL;
2618
  gimple stmt;
2619
  tree x, c;
2620
  int count = 0;
2621
 
2622
  /* First see if there is exactly one reduction clause.  Use OMP_ATOMIC
2623
     update in that case, otherwise use a lock.  */
2624
  for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2625
    if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2626
      {
2627
        if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2628
          {
2629
            /* Never use OMP_ATOMIC for array reductions.  */
2630
            count = -1;
2631
            break;
2632
          }
2633
        count++;
2634
      }
2635
 
2636
  if (count == 0)
2637
    return;
2638
 
2639
  for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2640
    {
2641
      tree var, ref, new_var;
2642
      enum tree_code code;
2643
      location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2644
 
2645
      if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2646
        continue;
2647
 
2648
      var = OMP_CLAUSE_DECL (c);
2649
      new_var = lookup_decl (var, ctx);
2650
      if (is_reference (var))
2651
        new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2652
      ref = build_outer_var_ref (var, ctx);
2653
      code = OMP_CLAUSE_REDUCTION_CODE (c);
2654
 
2655
      /* reduction(-:var) sums up the partial results, so it acts
2656
         identically to reduction(+:var).  */
2657
      if (code == MINUS_EXPR)
2658
        code = PLUS_EXPR;
2659
 
2660
      if (count == 1)
2661
        {
2662
          tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2663
 
2664
          addr = save_expr (addr);
2665
          ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2666
          x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2667
          x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2668
          gimplify_and_add (x, stmt_seqp);
2669
          return;
2670
        }
2671
 
2672
      if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2673
        {
2674
          tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2675
 
2676
          if (is_reference (var))
2677
            ref = build_fold_addr_expr_loc (clause_loc, ref);
2678
          SET_DECL_VALUE_EXPR (placeholder, ref);
2679
          DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2680
          lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2681
          gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2682
          OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2683
          OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2684
        }
2685
      else
2686
        {
2687
          x = build2 (code, TREE_TYPE (ref), ref, new_var);
2688
          ref = build_outer_var_ref (var, ctx);
2689
          gimplify_assign (ref, x, &sub_seq);
2690
        }
2691
    }
2692
 
2693
  stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
2694
                            0);
2695
  gimple_seq_add_stmt (stmt_seqp, stmt);
2696
 
2697
  gimple_seq_add_seq (stmt_seqp, sub_seq);
2698
 
2699
  stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
2700
                            0);
2701
  gimple_seq_add_stmt (stmt_seqp, stmt);
2702
}
2703
 
2704
 
2705
/* Generate code to implement the COPYPRIVATE clauses.  */
2706
 
2707
static void
2708
lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2709
                            omp_context *ctx)
2710
{
2711
  tree c;
2712
 
2713
  for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2714
    {
2715
      tree var, new_var, ref, x;
2716
      bool by_ref;
2717
      location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2718
 
2719
      if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2720
        continue;
2721
 
2722
      var = OMP_CLAUSE_DECL (c);
2723
      by_ref = use_pointer_for_field (var, NULL);
2724
 
2725
      ref = build_sender_ref (var, ctx);
2726
      x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2727
      if (by_ref)
2728
        {
2729
          x = build_fold_addr_expr_loc (clause_loc, new_var);
2730
          x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2731
        }
2732
      gimplify_assign (ref, x, slist);
2733
 
2734
      ref = build_receiver_ref (var, false, ctx);
2735
      if (by_ref)
2736
        {
2737
          ref = fold_convert_loc (clause_loc,
2738
                                  build_pointer_type (TREE_TYPE (new_var)),
2739
                                  ref);
2740
          ref = build_fold_indirect_ref_loc (clause_loc, ref);
2741
        }
2742
      if (is_reference (var))
2743
        {
2744
          ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2745
          ref = build_simple_mem_ref_loc (clause_loc, ref);
2746
          new_var = build_simple_mem_ref_loc (clause_loc, new_var);
2747
        }
2748
      x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2749
      gimplify_and_add (x, rlist);
2750
    }
2751
}
2752
 
2753
 
2754
/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2755
   and REDUCTION from the sender (aka parent) side.  */
2756
 
2757
static void
2758
lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2759
                    omp_context *ctx)
2760
{
2761
  tree c;
2762
 
2763
  for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2764
    {
2765
      tree val, ref, x, var;
2766
      bool by_ref, do_in = false, do_out = false;
2767
      location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2768
 
2769
      switch (OMP_CLAUSE_CODE (c))
2770
        {
2771
        case OMP_CLAUSE_PRIVATE:
2772
          if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2773
            break;
2774
          continue;
2775
        case OMP_CLAUSE_FIRSTPRIVATE:
2776
        case OMP_CLAUSE_COPYIN:
2777
        case OMP_CLAUSE_LASTPRIVATE:
2778
        case OMP_CLAUSE_REDUCTION:
2779
          break;
2780
        default:
2781
          continue;
2782
        }
2783
 
2784
      val = OMP_CLAUSE_DECL (c);
2785
      var = lookup_decl_in_outer_ctx (val, ctx);
2786
 
2787
      if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2788
          && is_global_var (var))
2789
        continue;
2790
      if (is_variable_sized (val))
2791
        continue;
2792
      by_ref = use_pointer_for_field (val, NULL);
2793
 
2794
      switch (OMP_CLAUSE_CODE (c))
2795
        {
2796
        case OMP_CLAUSE_PRIVATE:
2797
        case OMP_CLAUSE_FIRSTPRIVATE:
2798
        case OMP_CLAUSE_COPYIN:
2799
          do_in = true;
2800
          break;
2801
 
2802
        case OMP_CLAUSE_LASTPRIVATE:
2803
          if (by_ref || is_reference (val))
2804
            {
2805
              if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2806
                continue;
2807
              do_in = true;
2808
            }
2809
          else
2810
            {
2811
              do_out = true;
2812
              if (lang_hooks.decls.omp_private_outer_ref (val))
2813
                do_in = true;
2814
            }
2815
          break;
2816
 
2817
        case OMP_CLAUSE_REDUCTION:
2818
          do_in = true;
2819
          do_out = !(by_ref || is_reference (val));
2820
          break;
2821
 
2822
        default:
2823
          gcc_unreachable ();
2824
        }
2825
 
2826
      if (do_in)
2827
        {
2828
          ref = build_sender_ref (val, ctx);
2829
          x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2830
          gimplify_assign (ref, x, ilist);
2831
          if (is_task_ctx (ctx))
2832
            DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2833
        }
2834
 
2835
      if (do_out)
2836
        {
2837
          ref = build_sender_ref (val, ctx);
2838
          gimplify_assign (var, ref, olist);
2839
        }
2840
    }
2841
}
2842
 
2843
/* Generate code to implement SHARED from the sender (aka parent)
2844
   side.  This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2845
   list things that got automatically shared.  */
2846
 
2847
static void
2848
lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2849
{
2850
  tree var, ovar, nvar, f, x, record_type;
2851
 
2852
  if (ctx->record_type == NULL)
2853
    return;
2854
 
2855
  record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2856
  for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
2857
    {
2858
      ovar = DECL_ABSTRACT_ORIGIN (f);
2859
      nvar = maybe_lookup_decl (ovar, ctx);
2860
      if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2861
        continue;
2862
 
2863
      /* If CTX is a nested parallel directive.  Find the immediately
2864
         enclosing parallel or workshare construct that contains a
2865
         mapping for OVAR.  */
2866
      var = lookup_decl_in_outer_ctx (ovar, ctx);
2867
 
2868
      if (use_pointer_for_field (ovar, ctx))
2869
        {
2870
          x = build_sender_ref (ovar, ctx);
2871
          var = build_fold_addr_expr (var);
2872
          gimplify_assign (x, var, ilist);
2873
        }
2874
      else
2875
        {
2876
          x = build_sender_ref (ovar, ctx);
2877
          gimplify_assign (x, var, ilist);
2878
 
2879
          if (!TREE_READONLY (var)
2880
              /* We don't need to receive a new reference to a result
2881
                 or parm decl.  In fact we may not store to it as we will
2882
                 invalidate any pending RSO and generate wrong gimple
2883
                 during inlining.  */
2884
              && !((TREE_CODE (var) == RESULT_DECL
2885
                    || TREE_CODE (var) == PARM_DECL)
2886
                   && DECL_BY_REFERENCE (var)))
2887
            {
2888
              x = build_sender_ref (ovar, ctx);
2889
              gimplify_assign (var, x, olist);
2890
            }
2891
        }
2892
    }
2893
}
2894
 
2895
 
2896
/* A convenience function to build an empty GIMPLE_COND with just the
2897
   condition.  */
2898
 
2899
static gimple
2900
gimple_build_cond_empty (tree cond)
2901
{
2902
  enum tree_code pred_code;
2903
  tree lhs, rhs;
2904
 
2905
  gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2906
  return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2907
}
2908
 
2909
 
2910
/* Build the function calls to GOMP_parallel_start etc to actually
2911
   generate the parallel operation.  REGION is the parallel region
2912
   being expanded.  BB is the block where to insert the code.  WS_ARGS
2913
   will be set if this is a call to a combined parallel+workshare
2914
   construct, it contains the list of additional arguments needed by
2915
   the workshare construct.  */
2916
 
2917
static void
2918
expand_parallel_call (struct omp_region *region, basic_block bb,
2919
                      gimple entry_stmt, VEC(tree,gc) *ws_args)
2920
{
2921
  tree t, t1, t2, val, cond, c, clauses;
2922
  gimple_stmt_iterator gsi;
2923
  gimple stmt;
2924
  enum built_in_function start_ix;
2925
  int start_ix2;
2926
  location_t clause_loc;
2927
  VEC(tree,gc) *args;
2928
 
2929
  clauses = gimple_omp_parallel_clauses (entry_stmt);
2930
 
2931
  /* Determine what flavor of GOMP_parallel_start we will be
2932
     emitting.  */
2933
  start_ix = BUILT_IN_GOMP_PARALLEL_START;
2934
  if (is_combined_parallel (region))
2935
    {
2936
      switch (region->inner->type)
2937
        {
2938
        case GIMPLE_OMP_FOR:
2939
          gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2940
          start_ix2 = ((int)BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2941
                       + (region->inner->sched_kind
2942
                          == OMP_CLAUSE_SCHEDULE_RUNTIME
2943
                          ? 3 : region->inner->sched_kind));
2944
          start_ix = (enum built_in_function)start_ix2;
2945
          break;
2946
        case GIMPLE_OMP_SECTIONS:
2947
          start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2948
          break;
2949
        default:
2950
          gcc_unreachable ();
2951
        }
2952
    }
2953
 
2954
  /* By default, the value of NUM_THREADS is zero (selected at run time)
2955
     and there is no conditional.  */
2956
  cond = NULL_TREE;
2957
  val = build_int_cst (unsigned_type_node, 0);
2958
 
2959
  c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2960
  if (c)
2961
    cond = OMP_CLAUSE_IF_EXPR (c);
2962
 
2963
  c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2964
  if (c)
2965
    {
2966
      val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2967
      clause_loc = OMP_CLAUSE_LOCATION (c);
2968
    }
2969
  else
2970
    clause_loc = gimple_location (entry_stmt);
2971
 
2972
  /* Ensure 'val' is of the correct type.  */
2973
  val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2974
 
2975
  /* If we found the clause 'if (cond)', build either
2976
     (cond != 0) or (cond ? val : 1u).  */
2977
  if (cond)
2978
    {
2979
      gimple_stmt_iterator gsi;
2980
 
2981
      cond = gimple_boolify (cond);
2982
 
2983
      if (integer_zerop (val))
2984
        val = fold_build2_loc (clause_loc,
2985
                           EQ_EXPR, unsigned_type_node, cond,
2986
                           build_int_cst (TREE_TYPE (cond), 0));
2987
      else
2988
        {
2989
          basic_block cond_bb, then_bb, else_bb;
2990
          edge e, e_then, e_else;
2991
          tree tmp_then, tmp_else, tmp_join, tmp_var;
2992
 
2993
          tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2994
          if (gimple_in_ssa_p (cfun))
2995
            {
2996
              tmp_then = make_ssa_name (tmp_var, NULL);
2997
              tmp_else = make_ssa_name (tmp_var, NULL);
2998
              tmp_join = make_ssa_name (tmp_var, NULL);
2999
            }
3000
          else
3001
            {
3002
              tmp_then = tmp_var;
3003
              tmp_else = tmp_var;
3004
              tmp_join = tmp_var;
3005
            }
3006
 
3007
          e = split_block (bb, NULL);
3008
          cond_bb = e->src;
3009
          bb = e->dest;
3010
          remove_edge (e);
3011
 
3012
          then_bb = create_empty_bb (cond_bb);
3013
          else_bb = create_empty_bb (then_bb);
3014
          set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
3015
          set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
3016
 
3017
          stmt = gimple_build_cond_empty (cond);
3018
          gsi = gsi_start_bb (cond_bb);
3019
          gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3020
 
3021
          gsi = gsi_start_bb (then_bb);
3022
          stmt = gimple_build_assign (tmp_then, val);
3023
          gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3024
 
3025
          gsi = gsi_start_bb (else_bb);
3026
          stmt = gimple_build_assign
3027
                   (tmp_else, build_int_cst (unsigned_type_node, 1));
3028
          gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3029
 
3030
          make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3031
          make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3032
          e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3033
          e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3034
 
3035
          if (gimple_in_ssa_p (cfun))
3036
            {
3037
              gimple phi = create_phi_node (tmp_join, bb);
3038
              SSA_NAME_DEF_STMT (tmp_join) = phi;
3039
              add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3040
              add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3041
            }
3042
 
3043
          val = tmp_join;
3044
        }
3045
 
3046
      gsi = gsi_start_bb (bb);
3047
      val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3048
                                      false, GSI_CONTINUE_LINKING);
3049
    }
3050
 
3051
  gsi = gsi_last_bb (bb);
3052
  t = gimple_omp_parallel_data_arg (entry_stmt);
3053
  if (t == NULL)
3054
    t1 = null_pointer_node;
3055
  else
3056
    t1 = build_fold_addr_expr (t);
3057
  t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3058
 
3059
  args = VEC_alloc (tree, gc, 3 + VEC_length (tree, ws_args));
3060
  VEC_quick_push (tree, args, t2);
3061
  VEC_quick_push (tree, args, t1);
3062
  VEC_quick_push (tree, args, val);
3063
  VEC_splice (tree, args, ws_args);
3064
 
3065
  t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
3066
                               builtin_decl_explicit (start_ix), args);
3067
 
3068
  force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3069
                            false, GSI_CONTINUE_LINKING);
3070
 
3071
  t = gimple_omp_parallel_data_arg (entry_stmt);
3072
  if (t == NULL)
3073
    t = null_pointer_node;
3074
  else
3075
    t = build_fold_addr_expr (t);
3076
  t = build_call_expr_loc (gimple_location (entry_stmt),
3077
                           gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3078
  force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3079
                            false, GSI_CONTINUE_LINKING);
3080
 
3081
  t = build_call_expr_loc (gimple_location (entry_stmt),
3082
                           builtin_decl_explicit (BUILT_IN_GOMP_PARALLEL_END),
3083
                           0);
3084
  force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3085
                            false, GSI_CONTINUE_LINKING);
3086
}
3087
 
3088
 
3089
/* Build the function call to GOMP_task to actually
3090
   generate the task operation.  BB is the block where to insert the code.  */
3091
 
3092
static void
3093
expand_task_call (basic_block bb, gimple entry_stmt)
3094
{
3095
  tree t, t1, t2, t3, flags, cond, c, c2, clauses;
3096
  gimple_stmt_iterator gsi;
3097
  location_t loc = gimple_location (entry_stmt);
3098
 
3099
  clauses = gimple_omp_task_clauses (entry_stmt);
3100
 
3101
  c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3102
  if (c)
3103
    cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3104
  else
3105
    cond = boolean_true_node;
3106
 
3107
  c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3108
  c2 = find_omp_clause (clauses, OMP_CLAUSE_MERGEABLE);
3109
  flags = build_int_cst (unsigned_type_node,
3110
                         (c ? 1 : 0) + (c2 ? 4 : 0));
3111
 
3112
  c = find_omp_clause (clauses, OMP_CLAUSE_FINAL);
3113
  if (c)
3114
    {
3115
      c = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (c));
3116
      c = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, c,
3117
                           build_int_cst (unsigned_type_node, 2),
3118
                           build_int_cst (unsigned_type_node, 0));
3119
      flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, c);
3120
    }
3121
 
3122
  gsi = gsi_last_bb (bb);
3123
  t = gimple_omp_task_data_arg (entry_stmt);
3124
  if (t == NULL)
3125
    t2 = null_pointer_node;
3126
  else
3127
    t2 = build_fold_addr_expr_loc (loc, t);
3128
  t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3129
  t = gimple_omp_task_copy_fn (entry_stmt);
3130
  if (t == NULL)
3131
    t3 = null_pointer_node;
3132
  else
3133
    t3 = build_fold_addr_expr_loc (loc, t);
3134
 
3135
  t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
3136
                       7, t1, t2, t3,
3137
                       gimple_omp_task_arg_size (entry_stmt),
3138
                       gimple_omp_task_arg_align (entry_stmt), cond, flags);
3139
 
3140
  force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3141
                            false, GSI_CONTINUE_LINKING);
3142
}
3143
 
3144
 
3145
/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3146
   catch handler and return it.  This prevents programs from violating the
3147
   structured block semantics with throws.  */
3148
 
3149
static gimple_seq
3150
maybe_catch_exception (gimple_seq body)
3151
{
3152
  gimple g;
3153
  tree decl;
3154
 
3155
  if (!flag_exceptions)
3156
    return body;
3157
 
3158
  if (lang_hooks.eh_protect_cleanup_actions != NULL)
3159
    decl = lang_hooks.eh_protect_cleanup_actions ();
3160
  else
3161
    decl = builtin_decl_explicit (BUILT_IN_TRAP);
3162
 
3163
  g = gimple_build_eh_must_not_throw (decl);
3164
  g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3165
                        GIMPLE_TRY_CATCH);
3166
 
3167
 return gimple_seq_alloc_with_stmt (g);
3168
}
3169
 
3170
/* Chain all the DECLs in LIST by their TREE_CHAIN fields.  */
3171
 
3172
static tree
3173
vec2chain (VEC(tree,gc) *v)
3174
{
3175
  tree chain = NULL_TREE, t;
3176
  unsigned ix;
3177
 
3178
  FOR_EACH_VEC_ELT_REVERSE (tree, v, ix, t)
3179
    {
3180
      DECL_CHAIN (t) = chain;
3181
      chain = t;
3182
    }
3183
 
3184
  return chain;
3185
}
3186
 
3187
 
3188
/* Remove barriers in REGION->EXIT's block.  Note that this is only
3189
   valid for GIMPLE_OMP_PARALLEL regions.  Since the end of a parallel region
3190
   is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3191
   left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3192
   removed.  */
3193
 
3194
static void
3195
remove_exit_barrier (struct omp_region *region)
3196
{
3197
  gimple_stmt_iterator gsi;
3198
  basic_block exit_bb;
3199
  edge_iterator ei;
3200
  edge e;
3201
  gimple stmt;
3202
  int any_addressable_vars = -1;
3203
 
3204
  exit_bb = region->exit;
3205
 
3206
  /* If the parallel region doesn't return, we don't have REGION->EXIT
3207
     block at all.  */
3208
  if (! exit_bb)
3209
    return;
3210
 
3211
  /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN.  The
3212
     workshare's GIMPLE_OMP_RETURN will be in a preceding block.  The kinds of
3213
     statements that can appear in between are extremely limited -- no
3214
     memory operations at all.  Here, we allow nothing at all, so the
3215
     only thing we allow to precede this GIMPLE_OMP_RETURN is a label.  */
3216
  gsi = gsi_last_bb (exit_bb);
3217
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3218
  gsi_prev (&gsi);
3219
  if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3220
    return;
3221
 
3222
  FOR_EACH_EDGE (e, ei, exit_bb->preds)
3223
    {
3224
      gsi = gsi_last_bb (e->src);
3225
      if (gsi_end_p (gsi))
3226
        continue;
3227
      stmt = gsi_stmt (gsi);
3228
      if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3229
          && !gimple_omp_return_nowait_p (stmt))
3230
        {
3231
          /* OpenMP 3.0 tasks unfortunately prevent this optimization
3232
             in many cases.  If there could be tasks queued, the barrier
3233
             might be needed to let the tasks run before some local
3234
             variable of the parallel that the task uses as shared
3235
             runs out of scope.  The task can be spawned either
3236
             from within current function (this would be easy to check)
3237
             or from some function it calls and gets passed an address
3238
             of such a variable.  */
3239
          if (any_addressable_vars < 0)
3240
            {
3241
              gimple parallel_stmt = last_stmt (region->entry);
3242
              tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3243
              tree local_decls, block, decl;
3244
              unsigned ix;
3245
 
3246
              any_addressable_vars = 0;
3247
              FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
3248
                if (TREE_ADDRESSABLE (decl))
3249
                  {
3250
                    any_addressable_vars = 1;
3251
                    break;
3252
                  }
3253
              for (block = gimple_block (stmt);
3254
                   !any_addressable_vars
3255
                   && block
3256
                   && TREE_CODE (block) == BLOCK;
3257
                   block = BLOCK_SUPERCONTEXT (block))
3258
                {
3259
                  for (local_decls = BLOCK_VARS (block);
3260
                       local_decls;
3261
                       local_decls = DECL_CHAIN (local_decls))
3262
                    if (TREE_ADDRESSABLE (local_decls))
3263
                      {
3264
                        any_addressable_vars = 1;
3265
                        break;
3266
                      }
3267
                  if (block == gimple_block (parallel_stmt))
3268
                    break;
3269
                }
3270
            }
3271
          if (!any_addressable_vars)
3272
            gimple_omp_return_set_nowait (stmt);
3273
        }
3274
    }
3275
}
3276
 
3277
static void
3278
remove_exit_barriers (struct omp_region *region)
3279
{
3280
  if (region->type == GIMPLE_OMP_PARALLEL)
3281
    remove_exit_barrier (region);
3282
 
3283
  if (region->inner)
3284
    {
3285
      region = region->inner;
3286
      remove_exit_barriers (region);
3287
      while (region->next)
3288
        {
3289
          region = region->next;
3290
          remove_exit_barriers (region);
3291
        }
3292
    }
3293
}
3294
 
3295
/* Optimize omp_get_thread_num () and omp_get_num_threads ()
3296
   calls.  These can't be declared as const functions, but
3297
   within one parallel body they are constant, so they can be
3298
   transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3299
   which are declared const.  Similarly for task body, except
3300
   that in untied task omp_get_thread_num () can change at any task
3301
   scheduling point.  */
3302
 
3303
static void
3304
optimize_omp_library_calls (gimple entry_stmt)
3305
{
3306
  basic_block bb;
3307
  gimple_stmt_iterator gsi;
3308
  tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3309
  tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
3310
  tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3311
  tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
3312
  bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3313
                      && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3314
                                          OMP_CLAUSE_UNTIED) != NULL);
3315
 
3316
  FOR_EACH_BB (bb)
3317
    for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3318
      {
3319
        gimple call = gsi_stmt (gsi);
3320
        tree decl;
3321
 
3322
        if (is_gimple_call (call)
3323
            && (decl = gimple_call_fndecl (call))
3324
            && DECL_EXTERNAL (decl)
3325
            && TREE_PUBLIC (decl)
3326
            && DECL_INITIAL (decl) == NULL)
3327
          {
3328
            tree built_in;
3329
 
3330
            if (DECL_NAME (decl) == thr_num_id)
3331
              {
3332
                /* In #pragma omp task untied omp_get_thread_num () can change
3333
                   during the execution of the task region.  */
3334
                if (untied_task)
3335
                  continue;
3336
                built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
3337
              }
3338
            else if (DECL_NAME (decl) == num_thr_id)
3339
              built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
3340
            else
3341
              continue;
3342
 
3343
            if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3344
                || gimple_call_num_args (call) != 0)
3345
              continue;
3346
 
3347
            if (flag_exceptions && !TREE_NOTHROW (decl))
3348
              continue;
3349
 
3350
            if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3351
                || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3352
                                        TREE_TYPE (TREE_TYPE (built_in))))
3353
              continue;
3354
 
3355
            gimple_call_set_fndecl (call, built_in);
3356
          }
3357
      }
3358
}
3359
 
3360
/* Expand the OpenMP parallel or task directive starting at REGION.  */
3361
 
3362
static void
3363
expand_omp_taskreg (struct omp_region *region)
3364
{
3365
  basic_block entry_bb, exit_bb, new_bb;
3366
  struct function *child_cfun;
3367
  tree child_fn, block, t;
3368
  tree save_current;
3369
  gimple_stmt_iterator gsi;
3370
  gimple entry_stmt, stmt;
3371
  edge e;
3372
  VEC(tree,gc) *ws_args;
3373
 
3374
  entry_stmt = last_stmt (region->entry);
3375
  child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3376
  child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3377
  /* If this function has been already instrumented, make sure
3378
     the child function isn't instrumented again.  */
3379
  child_cfun->after_tree_profile = cfun->after_tree_profile;
3380
 
3381
  entry_bb = region->entry;
3382
  exit_bb = region->exit;
3383
 
3384
  if (is_combined_parallel (region))
3385
    ws_args = region->ws_args;
3386
  else
3387
    ws_args = NULL;
3388
 
3389
  if (child_cfun->cfg)
3390
    {
3391
      /* Due to inlining, it may happen that we have already outlined
3392
         the region, in which case all we need to do is make the
3393
         sub-graph unreachable and emit the parallel call.  */
3394
      edge entry_succ_e, exit_succ_e;
3395
      gimple_stmt_iterator gsi;
3396
 
3397
      entry_succ_e = single_succ_edge (entry_bb);
3398
 
3399
      gsi = gsi_last_bb (entry_bb);
3400
      gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3401
                  || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3402
      gsi_remove (&gsi, true);
3403
 
3404
      new_bb = entry_bb;
3405
      if (exit_bb)
3406
        {
3407
          exit_succ_e = single_succ_edge (exit_bb);
3408
          make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3409
        }
3410
      remove_edge_and_dominated_blocks (entry_succ_e);
3411
    }
3412
  else
3413
    {
3414
      unsigned srcidx, dstidx, num;
3415
 
3416
      /* If the parallel region needs data sent from the parent
3417
         function, then the very first statement (except possible
3418
         tree profile counter updates) of the parallel body
3419
         is a copy assignment .OMP_DATA_I = &.OMP_DATA_O.  Since
3420
         &.OMP_DATA_O is passed as an argument to the child function,
3421
         we need to replace it with the argument as seen by the child
3422
         function.
3423
 
3424
         In most cases, this will end up being the identity assignment
3425
         .OMP_DATA_I = .OMP_DATA_I.  However, if the parallel body had
3426
         a function call that has been inlined, the original PARM_DECL
3427
         .OMP_DATA_I may have been converted into a different local
3428
         variable.  In which case, we need to keep the assignment.  */
3429
      if (gimple_omp_taskreg_data_arg (entry_stmt))
3430
        {
3431
          basic_block entry_succ_bb = single_succ (entry_bb);
3432
          gimple_stmt_iterator gsi;
3433
          tree arg, narg;
3434
          gimple parcopy_stmt = NULL;
3435
 
3436
          for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3437
            {
3438
              gimple stmt;
3439
 
3440
              gcc_assert (!gsi_end_p (gsi));
3441
              stmt = gsi_stmt (gsi);
3442
              if (gimple_code (stmt) != GIMPLE_ASSIGN)
3443
                continue;
3444
 
3445
              if (gimple_num_ops (stmt) == 2)
3446
                {
3447
                  tree arg = gimple_assign_rhs1 (stmt);
3448
 
3449
                  /* We're ignore the subcode because we're
3450
                     effectively doing a STRIP_NOPS.  */
3451
 
3452
                  if (TREE_CODE (arg) == ADDR_EXPR
3453
                      && TREE_OPERAND (arg, 0)
3454
                        == gimple_omp_taskreg_data_arg (entry_stmt))
3455
                    {
3456
                      parcopy_stmt = stmt;
3457
                      break;
3458
                    }
3459
                }
3460
            }
3461
 
3462
          gcc_assert (parcopy_stmt != NULL);
3463
          arg = DECL_ARGUMENTS (child_fn);
3464
 
3465
          if (!gimple_in_ssa_p (cfun))
3466
            {
3467
              if (gimple_assign_lhs (parcopy_stmt) == arg)
3468
                gsi_remove (&gsi, true);
3469
              else
3470
                {
3471
                  /* ?? Is setting the subcode really necessary ??  */
3472
                  gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3473
                  gimple_assign_set_rhs1 (parcopy_stmt, arg);
3474
                }
3475
            }
3476
          else
3477
            {
3478
              /* If we are in ssa form, we must load the value from the default
3479
                 definition of the argument.  That should not be defined now,
3480
                 since the argument is not used uninitialized.  */
3481
              gcc_assert (gimple_default_def (cfun, arg) == NULL);
3482
              narg = make_ssa_name (arg, gimple_build_nop ());
3483
              set_default_def (arg, narg);
3484
              /* ?? Is setting the subcode really necessary ??  */
3485
              gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3486
              gimple_assign_set_rhs1 (parcopy_stmt, narg);
3487
              update_stmt (parcopy_stmt);
3488
            }
3489
        }
3490
 
3491
      /* Declare local variables needed in CHILD_CFUN.  */
3492
      block = DECL_INITIAL (child_fn);
3493
      BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
3494
      /* The gimplifier could record temporaries in parallel/task block
3495
         rather than in containing function's local_decls chain,
3496
         which would mean cgraph missed finalizing them.  Do it now.  */
3497
      for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
3498
        if (TREE_CODE (t) == VAR_DECL
3499
            && TREE_STATIC (t)
3500
            && !DECL_EXTERNAL (t))
3501
          varpool_finalize_decl (t);
3502
      DECL_SAVED_TREE (child_fn) = NULL;
3503
      gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3504
      TREE_USED (block) = 1;
3505
 
3506
      /* Reset DECL_CONTEXT on function arguments.  */
3507
      for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
3508
        DECL_CONTEXT (t) = child_fn;
3509
 
3510
      /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3511
         so that it can be moved to the child function.  */
3512
      gsi = gsi_last_bb (entry_bb);
3513
      stmt = gsi_stmt (gsi);
3514
      gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3515
                           || gimple_code (stmt) == GIMPLE_OMP_TASK));
3516
      gsi_remove (&gsi, true);
3517
      e = split_block (entry_bb, stmt);
3518
      entry_bb = e->dest;
3519
      single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3520
 
3521
      /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR.  */
3522
      if (exit_bb)
3523
        {
3524
          gsi = gsi_last_bb (exit_bb);
3525
          gcc_assert (!gsi_end_p (gsi)
3526
                      && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3527
          stmt = gimple_build_return (NULL);
3528
          gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3529
          gsi_remove (&gsi, true);
3530
        }
3531
 
3532
      /* Move the parallel region into CHILD_CFUN.  */
3533
 
3534
      if (gimple_in_ssa_p (cfun))
3535
        {
3536
          push_cfun (child_cfun);
3537
          init_tree_ssa (child_cfun);
3538
          init_ssa_operands ();
3539
          cfun->gimple_df->in_ssa_p = true;
3540
          pop_cfun ();
3541
          block = NULL_TREE;
3542
        }
3543
      else
3544
        block = gimple_block (entry_stmt);
3545
 
3546
      new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3547
      if (exit_bb)
3548
        single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3549
 
3550
      /* Remove non-local VAR_DECLs from child_cfun->local_decls list.  */
3551
      num = VEC_length (tree, child_cfun->local_decls);
3552
      for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
3553
        {
3554
          t = VEC_index (tree, child_cfun->local_decls, srcidx);
3555
          if (DECL_CONTEXT (t) == cfun->decl)
3556
            continue;
3557
          if (srcidx != dstidx)
3558
            VEC_replace (tree, child_cfun->local_decls, dstidx, t);
3559
          dstidx++;
3560
        }
3561
      if (dstidx != num)
3562
        VEC_truncate (tree, child_cfun->local_decls, dstidx);
3563
 
3564
      /* Inform the callgraph about the new function.  */
3565
      DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3566
        = cfun->curr_properties;
3567
      cgraph_add_new_function (child_fn, true);
3568
 
3569
      /* Fix the callgraph edges for child_cfun.  Those for cfun will be
3570
         fixed in a following pass.  */
3571
      push_cfun (child_cfun);
3572
      save_current = current_function_decl;
3573
      current_function_decl = child_fn;
3574
      if (optimize)
3575
        optimize_omp_library_calls (entry_stmt);
3576
      rebuild_cgraph_edges ();
3577
 
3578
      /* Some EH regions might become dead, see PR34608.  If
3579
         pass_cleanup_cfg isn't the first pass to happen with the
3580
         new child, these dead EH edges might cause problems.
3581
         Clean them up now.  */
3582
      if (flag_exceptions)
3583
        {
3584
          basic_block bb;
3585
          bool changed = false;
3586
 
3587
          FOR_EACH_BB (bb)
3588
            changed |= gimple_purge_dead_eh_edges (bb);
3589
          if (changed)
3590
            cleanup_tree_cfg ();
3591
        }
3592
      if (gimple_in_ssa_p (cfun))
3593
        update_ssa (TODO_update_ssa);
3594
      current_function_decl = save_current;
3595
      pop_cfun ();
3596
    }
3597
 
3598
  /* Emit a library call to launch the children threads.  */
3599
  if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3600
    expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3601
  else
3602
    expand_task_call (new_bb, entry_stmt);
3603
  update_ssa (TODO_update_ssa_only_virtuals);
3604
}
3605
 
3606
 
3607
/* A subroutine of expand_omp_for.  Generate code for a parallel
3608
   loop with any schedule.  Given parameters:
3609
 
3610
        for (V = N1; V cond N2; V += STEP) BODY;
3611
 
3612
   where COND is "<" or ">", we generate pseudocode
3613
 
3614
        more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3615
        if (more) goto L0; else goto L3;
3616
    L0:
3617
        V = istart0;
3618
        iend = iend0;
3619
    L1:
3620
        BODY;
3621
        V += STEP;
3622
        if (V cond iend) goto L1; else goto L2;
3623
    L2:
3624
        if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3625
    L3:
3626
 
3627
    If this is a combined omp parallel loop, instead of the call to
3628
    GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3629
 
3630
    For collapsed loops, given parameters:
3631
      collapse(3)
3632
      for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3633
        for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3634
          for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3635
            BODY;
3636
 
3637
    we generate pseudocode
3638
 
3639
        if (cond3 is <)
3640
          adj = STEP3 - 1;
3641
        else
3642
          adj = STEP3 + 1;
3643
        count3 = (adj + N32 - N31) / STEP3;
3644
        if (cond2 is <)
3645
          adj = STEP2 - 1;
3646
        else
3647
          adj = STEP2 + 1;
3648
        count2 = (adj + N22 - N21) / STEP2;
3649
        if (cond1 is <)
3650
          adj = STEP1 - 1;
3651
        else
3652
          adj = STEP1 + 1;
3653
        count1 = (adj + N12 - N11) / STEP1;
3654
        count = count1 * count2 * count3;
3655
        more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3656
        if (more) goto L0; else goto L3;
3657
    L0:
3658
        V = istart0;
3659
        T = V;
3660
        V3 = N31 + (T % count3) * STEP3;
3661
        T = T / count3;
3662
        V2 = N21 + (T % count2) * STEP2;
3663
        T = T / count2;
3664
        V1 = N11 + T * STEP1;
3665
        iend = iend0;
3666
    L1:
3667
        BODY;
3668
        V += 1;
3669
        if (V < iend) goto L10; else goto L2;
3670
    L10:
3671
        V3 += STEP3;
3672
        if (V3 cond3 N32) goto L1; else goto L11;
3673
    L11:
3674
        V3 = N31;
3675
        V2 += STEP2;
3676
        if (V2 cond2 N22) goto L1; else goto L12;
3677
    L12:
3678
        V2 = N21;
3679
        V1 += STEP1;
3680
        goto L1;
3681
    L2:
3682
        if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3683
    L3:
3684
 
3685
      */
3686
 
3687
static void
3688
expand_omp_for_generic (struct omp_region *region,
3689
                        struct omp_for_data *fd,
3690
                        enum built_in_function start_fn,
3691
                        enum built_in_function next_fn)
3692
{
3693
  tree type, istart0, iend0, iend;
3694
  tree t, vmain, vback, bias = NULL_TREE;
3695
  basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3696
  basic_block l2_bb = NULL, l3_bb = NULL;
3697
  gimple_stmt_iterator gsi;
3698
  gimple stmt;
3699
  bool in_combined_parallel = is_combined_parallel (region);
3700
  bool broken_loop = region->cont == NULL;
3701
  edge e, ne;
3702
  tree *counts = NULL;
3703
  int i;
3704
 
3705
  gcc_assert (!broken_loop || !in_combined_parallel);
3706
  gcc_assert (fd->iter_type == long_integer_type_node
3707
              || !in_combined_parallel);
3708
 
3709
  type = TREE_TYPE (fd->loop.v);
3710
  istart0 = create_tmp_var (fd->iter_type, ".istart0");
3711
  iend0 = create_tmp_var (fd->iter_type, ".iend0");
3712
  TREE_ADDRESSABLE (istart0) = 1;
3713
  TREE_ADDRESSABLE (iend0) = 1;
3714
  if (gimple_in_ssa_p (cfun))
3715
    {
3716
      add_referenced_var (istart0);
3717
      add_referenced_var (iend0);
3718
    }
3719
 
3720
  /* See if we need to bias by LLONG_MIN.  */
3721
  if (fd->iter_type == long_long_unsigned_type_node
3722
      && TREE_CODE (type) == INTEGER_TYPE
3723
      && !TYPE_UNSIGNED (type))
3724
    {
3725
      tree n1, n2;
3726
 
3727
      if (fd->loop.cond_code == LT_EXPR)
3728
        {
3729
          n1 = fd->loop.n1;
3730
          n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3731
        }
3732
      else
3733
        {
3734
          n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3735
          n2 = fd->loop.n1;
3736
        }
3737
      if (TREE_CODE (n1) != INTEGER_CST
3738
          || TREE_CODE (n2) != INTEGER_CST
3739
          || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3740
        bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3741
    }
3742
 
3743
  entry_bb = region->entry;
3744
  cont_bb = region->cont;
3745
  collapse_bb = NULL;
3746
  gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3747
  gcc_assert (broken_loop
3748
              || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3749
  l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3750
  l1_bb = single_succ (l0_bb);
3751
  if (!broken_loop)
3752
    {
3753
      l2_bb = create_empty_bb (cont_bb);
3754
      gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3755
      gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3756
    }
3757
  else
3758
    l2_bb = NULL;
3759
  l3_bb = BRANCH_EDGE (entry_bb)->dest;
3760
  exit_bb = region->exit;
3761
 
3762
  gsi = gsi_last_bb (entry_bb);
3763
 
3764
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3765
  if (fd->collapse > 1)
3766
    {
3767
      /* collapsed loops need work for expansion in SSA form.  */
3768
      gcc_assert (!gimple_in_ssa_p (cfun));
3769
      counts = (tree *) alloca (fd->collapse * sizeof (tree));
3770
      for (i = 0; i < fd->collapse; i++)
3771
        {
3772
          tree itype = TREE_TYPE (fd->loops[i].v);
3773
 
3774
          if (POINTER_TYPE_P (itype))
3775
            itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3776
          t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3777
                                     ? -1 : 1));
3778
          t = fold_build2 (PLUS_EXPR, itype,
3779
                           fold_convert (itype, fd->loops[i].step), t);
3780
          t = fold_build2 (PLUS_EXPR, itype, t,
3781
                           fold_convert (itype, fd->loops[i].n2));
3782
          t = fold_build2 (MINUS_EXPR, itype, t,
3783
                           fold_convert (itype, fd->loops[i].n1));
3784
          if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3785
            t = fold_build2 (TRUNC_DIV_EXPR, itype,
3786
                             fold_build1 (NEGATE_EXPR, itype, t),
3787
                             fold_build1 (NEGATE_EXPR, itype,
3788
                                          fold_convert (itype,
3789
                                                        fd->loops[i].step)));
3790
          else
3791
            t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3792
                             fold_convert (itype, fd->loops[i].step));
3793
          t = fold_convert (type, t);
3794
          if (TREE_CODE (t) == INTEGER_CST)
3795
            counts[i] = t;
3796
          else
3797
            {
3798
              counts[i] = create_tmp_var (type, ".count");
3799
              t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3800
                                            true, GSI_SAME_STMT);
3801
              stmt = gimple_build_assign (counts[i], t);
3802
              gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3803
            }
3804
          if (SSA_VAR_P (fd->loop.n2))
3805
            {
3806
              if (i == 0)
3807
                t = counts[0];
3808
              else
3809
                {
3810
                  t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3811
                  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3812
                                                true, GSI_SAME_STMT);
3813
                }
3814
              stmt = gimple_build_assign (fd->loop.n2, t);
3815
              gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3816
            }
3817
        }
3818
    }
3819
  if (in_combined_parallel)
3820
    {
3821
      /* In a combined parallel loop, emit a call to
3822
         GOMP_loop_foo_next.  */
3823
      t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3824
                           build_fold_addr_expr (istart0),
3825
                           build_fold_addr_expr (iend0));
3826
    }
3827
  else
3828
    {
3829
      tree t0, t1, t2, t3, t4;
3830
      /* If this is not a combined parallel loop, emit a call to
3831
         GOMP_loop_foo_start in ENTRY_BB.  */
3832
      t4 = build_fold_addr_expr (iend0);
3833
      t3 = build_fold_addr_expr (istart0);
3834
      t2 = fold_convert (fd->iter_type, fd->loop.step);
3835
      if (POINTER_TYPE_P (type)
3836
          && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3837
        {
3838
          /* Avoid casting pointers to integer of a different size.  */
3839
          tree itype
3840
            = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3841
          t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3842
          t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3843
        }
3844
      else
3845
        {
3846
          t1 = fold_convert (fd->iter_type, fd->loop.n2);
3847
          t0 = fold_convert (fd->iter_type, fd->loop.n1);
3848
        }
3849
      if (bias)
3850
        {
3851
          t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3852
          t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3853
        }
3854
      if (fd->iter_type == long_integer_type_node)
3855
        {
3856
          if (fd->chunk_size)
3857
            {
3858
              t = fold_convert (fd->iter_type, fd->chunk_size);
3859
              t = build_call_expr (builtin_decl_explicit (start_fn),
3860
                                   6, t0, t1, t2, t, t3, t4);
3861
            }
3862
          else
3863
            t = build_call_expr (builtin_decl_explicit (start_fn),
3864
                                 5, t0, t1, t2, t3, t4);
3865
        }
3866
      else
3867
        {
3868
          tree t5;
3869
          tree c_bool_type;
3870
          tree bfn_decl;
3871
 
3872
          /* The GOMP_loop_ull_*start functions have additional boolean
3873
             argument, true for < loops and false for > loops.
3874
             In Fortran, the C bool type can be different from
3875
             boolean_type_node.  */
3876
          bfn_decl = builtin_decl_explicit (start_fn);
3877
          c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
3878
          t5 = build_int_cst (c_bool_type,
3879
                              fd->loop.cond_code == LT_EXPR ? 1 : 0);
3880
          if (fd->chunk_size)
3881
            {
3882
              tree bfn_decl = builtin_decl_explicit (start_fn);
3883
              t = fold_convert (fd->iter_type, fd->chunk_size);
3884
              t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
3885
            }
3886
          else
3887
            t = build_call_expr (builtin_decl_explicit (start_fn),
3888
                                 6, t5, t0, t1, t2, t3, t4);
3889
        }
3890
    }
3891
  if (TREE_TYPE (t) != boolean_type_node)
3892
    t = fold_build2 (NE_EXPR, boolean_type_node,
3893
                     t, build_int_cst (TREE_TYPE (t), 0));
3894
  t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3895
                                true, GSI_SAME_STMT);
3896
  gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3897
 
3898
  /* Remove the GIMPLE_OMP_FOR statement.  */
3899
  gsi_remove (&gsi, true);
3900
 
3901
  /* Iteration setup for sequential loop goes in L0_BB.  */
3902
  gsi = gsi_start_bb (l0_bb);
3903
  t = istart0;
3904
  if (bias)
3905
    t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3906
  if (POINTER_TYPE_P (type))
3907
    t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3908
                                                      0), t);
3909
  t = fold_convert (type, t);
3910
  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3911
                                false, GSI_CONTINUE_LINKING);
3912
  stmt = gimple_build_assign (fd->loop.v, t);
3913
  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3914
 
3915
  t = iend0;
3916
  if (bias)
3917
    t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3918
  if (POINTER_TYPE_P (type))
3919
    t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3920
                                                      0), t);
3921
  t = fold_convert (type, t);
3922
  iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3923
                                   false, GSI_CONTINUE_LINKING);
3924
  if (fd->collapse > 1)
3925
    {
3926
      tree tem = create_tmp_var (type, ".tem");
3927
 
3928
      stmt = gimple_build_assign (tem, fd->loop.v);
3929
      gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3930
      for (i = fd->collapse - 1; i >= 0; i--)
3931
        {
3932
          tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3933
          itype = vtype;
3934
          if (POINTER_TYPE_P (vtype))
3935
            itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3936
          t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3937
          t = fold_convert (itype, t);
3938
          t = fold_build2 (MULT_EXPR, itype, t,
3939
                           fold_convert (itype, fd->loops[i].step));
3940
          if (POINTER_TYPE_P (vtype))
3941
            t = fold_build_pointer_plus (fd->loops[i].n1, t);
3942
          else
3943
            t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3944
          t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3945
                                        false, GSI_CONTINUE_LINKING);
3946
          stmt = gimple_build_assign (fd->loops[i].v, t);
3947
          gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3948
          if (i != 0)
3949
            {
3950
              t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3951
              t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3952
                                            false, GSI_CONTINUE_LINKING);
3953
              stmt = gimple_build_assign (tem, t);
3954
              gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3955
            }
3956
        }
3957
    }
3958
 
3959
  if (!broken_loop)
3960
    {
3961
      /* Code to control the increment and predicate for the sequential
3962
         loop goes in the CONT_BB.  */
3963
      gsi = gsi_last_bb (cont_bb);
3964
      stmt = gsi_stmt (gsi);
3965
      gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3966
      vmain = gimple_omp_continue_control_use (stmt);
3967
      vback = gimple_omp_continue_control_def (stmt);
3968
 
3969
      if (POINTER_TYPE_P (type))
3970
        t = fold_build_pointer_plus (vmain, fd->loop.step);
3971
      else
3972
        t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3973
      t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3974
                                    true, GSI_SAME_STMT);
3975
      stmt = gimple_build_assign (vback, t);
3976
      gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3977
 
3978
      t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3979
      stmt = gimple_build_cond_empty (t);
3980
      gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3981
 
3982
      /* Remove GIMPLE_OMP_CONTINUE.  */
3983
      gsi_remove (&gsi, true);
3984
 
3985
      if (fd->collapse > 1)
3986
        {
3987
          basic_block last_bb, bb;
3988
 
3989
          last_bb = cont_bb;
3990
          for (i = fd->collapse - 1; i >= 0; i--)
3991
            {
3992
              tree vtype = TREE_TYPE (fd->loops[i].v);
3993
 
3994
              bb = create_empty_bb (last_bb);
3995
              gsi = gsi_start_bb (bb);
3996
 
3997
              if (i < fd->collapse - 1)
3998
                {
3999
                  e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
4000
                  e->probability = REG_BR_PROB_BASE / 8;
4001
 
4002
                  t = fd->loops[i + 1].n1;
4003
                  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4004
                                                false, GSI_CONTINUE_LINKING);
4005
                  stmt = gimple_build_assign (fd->loops[i + 1].v, t);
4006
                  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4007
                }
4008
              else
4009
                collapse_bb = bb;
4010
 
4011
              set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
4012
 
4013
              if (POINTER_TYPE_P (vtype))
4014
                t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
4015
              else
4016
                t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
4017
                                 fd->loops[i].step);
4018
              t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4019
                                            false, GSI_CONTINUE_LINKING);
4020
              stmt = gimple_build_assign (fd->loops[i].v, t);
4021
              gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4022
 
4023
              if (i > 0)
4024
                {
4025
                  t = fd->loops[i].n2;
4026
                  t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4027
                                                false, GSI_CONTINUE_LINKING);
4028
                  t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
4029
                                   fd->loops[i].v, t);
4030
                  stmt = gimple_build_cond_empty (t);
4031
                  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4032
                  e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
4033
                  e->probability = REG_BR_PROB_BASE * 7 / 8;
4034
                }
4035
              else
4036
                make_edge (bb, l1_bb, EDGE_FALLTHRU);
4037
              last_bb = bb;
4038
            }
4039
        }
4040
 
4041
      /* Emit code to get the next parallel iteration in L2_BB.  */
4042
      gsi = gsi_start_bb (l2_bb);
4043
 
4044
      t = build_call_expr (builtin_decl_explicit (next_fn), 2,
4045
                           build_fold_addr_expr (istart0),
4046
                           build_fold_addr_expr (iend0));
4047
      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4048
                                    false, GSI_CONTINUE_LINKING);
4049
      if (TREE_TYPE (t) != boolean_type_node)
4050
        t = fold_build2 (NE_EXPR, boolean_type_node,
4051
                         t, build_int_cst (TREE_TYPE (t), 0));
4052
      stmt = gimple_build_cond_empty (t);
4053
      gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4054
    }
4055
 
4056
  /* Add the loop cleanup function.  */
4057
  gsi = gsi_last_bb (exit_bb);
4058
  if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4059
    t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
4060
  else
4061
    t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
4062
  stmt = gimple_build_call (t, 0);
4063
  gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4064
  gsi_remove (&gsi, true);
4065
 
4066
  /* Connect the new blocks.  */
4067
  find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4068
  find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4069
 
4070
  if (!broken_loop)
4071
    {
4072
      gimple_seq phis;
4073
 
4074
      e = find_edge (cont_bb, l3_bb);
4075
      ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4076
 
4077
      phis = phi_nodes (l3_bb);
4078
      for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4079
        {
4080
          gimple phi = gsi_stmt (gsi);
4081
          SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4082
                   PHI_ARG_DEF_FROM_EDGE (phi, e));
4083
        }
4084
      remove_edge (e);
4085
 
4086
      make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4087
      if (fd->collapse > 1)
4088
        {
4089
          e = find_edge (cont_bb, l1_bb);
4090
          remove_edge (e);
4091
          e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4092
        }
4093
      else
4094
        {
4095
          e = find_edge (cont_bb, l1_bb);
4096
          e->flags = EDGE_TRUE_VALUE;
4097
        }
4098
      e->probability = REG_BR_PROB_BASE * 7 / 8;
4099
      find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4100
      make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4101
 
4102
      set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4103
                               recompute_dominator (CDI_DOMINATORS, l2_bb));
4104
      set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4105
                               recompute_dominator (CDI_DOMINATORS, l3_bb));
4106
      set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4107
                               recompute_dominator (CDI_DOMINATORS, l0_bb));
4108
      set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4109
                               recompute_dominator (CDI_DOMINATORS, l1_bb));
4110
    }
4111
}
4112
 
4113
 
4114
/* A subroutine of expand_omp_for.  Generate code for a parallel
4115
   loop with static schedule and no specified chunk size.  Given
4116
   parameters:
4117
 
4118
        for (V = N1; V cond N2; V += STEP) BODY;
4119
 
4120
   where COND is "<" or ">", we generate pseudocode
4121
 
4122
        if (cond is <)
4123
          adj = STEP - 1;
4124
        else
4125
          adj = STEP + 1;
4126
        if ((__typeof (V)) -1 > 0 && cond is >)
4127
          n = -(adj + N2 - N1) / -STEP;
4128
        else
4129
          n = (adj + N2 - N1) / STEP;
4130
        q = n / nthreads;
4131
        tt = n % nthreads;
4132
        if (threadid < tt) goto L3; else goto L4;
4133
    L3:
4134
        tt = 0;
4135
        q = q + 1;
4136
    L4:
4137
        s0 = q * threadid + tt;
4138
        e0 = s0 + q;
4139
        V = s0 * STEP + N1;
4140
        if (s0 >= e0) goto L2; else goto L0;
4141
    L0:
4142
        e = e0 * STEP + N1;
4143
    L1:
4144
        BODY;
4145
        V += STEP;
4146
        if (V cond e) goto L1;
4147
    L2:
4148
*/
4149
 
4150
static void
4151
expand_omp_for_static_nochunk (struct omp_region *region,
4152
                               struct omp_for_data *fd)
4153
{
4154
  tree n, q, s0, e0, e, t, tt, nthreads, threadid;
4155
  tree type, itype, vmain, vback;
4156
  basic_block entry_bb, second_bb, third_bb, exit_bb, seq_start_bb;
4157
  basic_block body_bb, cont_bb;
4158
  basic_block fin_bb;
4159
  gimple_stmt_iterator gsi;
4160
  gimple stmt;
4161
  edge ep;
4162
 
4163
  itype = type = TREE_TYPE (fd->loop.v);
4164
  if (POINTER_TYPE_P (type))
4165
    itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4166
 
4167
  entry_bb = region->entry;
4168
  cont_bb = region->cont;
4169
  gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4170
  gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4171
  seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4172
  body_bb = single_succ (seq_start_bb);
4173
  gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4174
  gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4175
  fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4176
  exit_bb = region->exit;
4177
 
4178
  /* Iteration space partitioning goes in ENTRY_BB.  */
4179
  gsi = gsi_last_bb (entry_bb);
4180
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4181
 
4182
  t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4183
  t = fold_convert (itype, t);
4184
  nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4185
                                       true, GSI_SAME_STMT);
4186
 
4187
  t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4188
  t = fold_convert (itype, t);
4189
  threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4190
                                       true, GSI_SAME_STMT);
4191
 
4192
  fd->loop.n1
4193
    = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4194
                                true, NULL_TREE, true, GSI_SAME_STMT);
4195
  fd->loop.n2
4196
    = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4197
                                true, NULL_TREE, true, GSI_SAME_STMT);
4198
  fd->loop.step
4199
    = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4200
                                true, NULL_TREE, true, GSI_SAME_STMT);
4201
 
4202
  t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4203
  t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4204
  t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4205
  t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4206
  if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4207
    t = fold_build2 (TRUNC_DIV_EXPR, itype,
4208
                     fold_build1 (NEGATE_EXPR, itype, t),
4209
                     fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4210
  else
4211
    t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4212
  t = fold_convert (itype, t);
4213
  n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4214
 
4215
  q = create_tmp_var (itype, "q");
4216
  t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4217
  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4218
  gsi_insert_before (&gsi, gimple_build_assign (q, t), GSI_SAME_STMT);
4219
 
4220
  tt = create_tmp_var (itype, "tt");
4221
  t = fold_build2 (TRUNC_MOD_EXPR, itype, n, nthreads);
4222
  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE, true, GSI_SAME_STMT);
4223
  gsi_insert_before (&gsi, gimple_build_assign (tt, t), GSI_SAME_STMT);
4224
 
4225
  t = build2 (LT_EXPR, boolean_type_node, threadid, tt);
4226
  stmt = gimple_build_cond_empty (t);
4227
  gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4228
 
4229
  second_bb = split_block (entry_bb, stmt)->dest;
4230
  gsi = gsi_last_bb (second_bb);
4231
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4232
 
4233
  gsi_insert_before (&gsi, gimple_build_assign (tt, build_int_cst (itype, 0)),
4234
                     GSI_SAME_STMT);
4235
  stmt = gimple_build_assign_with_ops (PLUS_EXPR, q, q,
4236
                                       build_int_cst (itype, 1));
4237
  gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4238
 
4239
  third_bb = split_block (second_bb, stmt)->dest;
4240
  gsi = gsi_last_bb (third_bb);
4241
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4242
 
4243
  t = build2 (MULT_EXPR, itype, q, threadid);
4244
  t = build2 (PLUS_EXPR, itype, t, tt);
4245
  s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4246
 
4247
  t = fold_build2 (PLUS_EXPR, itype, s0, q);
4248
  e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4249
 
4250
  t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4251
  gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4252
 
4253
  /* Remove the GIMPLE_OMP_FOR statement.  */
4254
  gsi_remove (&gsi, true);
4255
 
4256
  /* Setup code for sequential iteration goes in SEQ_START_BB.  */
4257
  gsi = gsi_start_bb (seq_start_bb);
4258
 
4259
  t = fold_convert (itype, s0);
4260
  t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4261
  if (POINTER_TYPE_P (type))
4262
    t = fold_build_pointer_plus (fd->loop.n1, t);
4263
  else
4264
    t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4265
  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4266
                                false, GSI_CONTINUE_LINKING);
4267
  stmt = gimple_build_assign (fd->loop.v, t);
4268
  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4269
 
4270
  t = fold_convert (itype, e0);
4271
  t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4272
  if (POINTER_TYPE_P (type))
4273
    t = fold_build_pointer_plus (fd->loop.n1, t);
4274
  else
4275
    t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4276
  e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4277
                                false, GSI_CONTINUE_LINKING);
4278
 
4279
  /* The code controlling the sequential loop replaces the
4280
     GIMPLE_OMP_CONTINUE.  */
4281
  gsi = gsi_last_bb (cont_bb);
4282
  stmt = gsi_stmt (gsi);
4283
  gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4284
  vmain = gimple_omp_continue_control_use (stmt);
4285
  vback = gimple_omp_continue_control_def (stmt);
4286
 
4287
  if (POINTER_TYPE_P (type))
4288
    t = fold_build_pointer_plus (vmain, fd->loop.step);
4289
  else
4290
    t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4291
  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4292
                                true, GSI_SAME_STMT);
4293
  stmt = gimple_build_assign (vback, t);
4294
  gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4295
 
4296
  t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4297
  gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4298
 
4299
  /* Remove the GIMPLE_OMP_CONTINUE statement.  */
4300
  gsi_remove (&gsi, true);
4301
 
4302
  /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing.  */
4303
  gsi = gsi_last_bb (exit_bb);
4304
  if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4305
    force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4306
                              false, GSI_SAME_STMT);
4307
  gsi_remove (&gsi, true);
4308
 
4309
  /* Connect all the blocks.  */
4310
  ep = make_edge (entry_bb, third_bb, EDGE_FALSE_VALUE);
4311
  ep->probability = REG_BR_PROB_BASE / 4 * 3;
4312
  ep = find_edge (entry_bb, second_bb);
4313
  ep->flags = EDGE_TRUE_VALUE;
4314
  ep->probability = REG_BR_PROB_BASE / 4;
4315
  find_edge (third_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4316
  find_edge (third_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4317
 
4318
  find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4319
  find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4320
 
4321
  set_immediate_dominator (CDI_DOMINATORS, second_bb, entry_bb);
4322
  set_immediate_dominator (CDI_DOMINATORS, third_bb, entry_bb);
4323
  set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, third_bb);
4324
  set_immediate_dominator (CDI_DOMINATORS, body_bb,
4325
                           recompute_dominator (CDI_DOMINATORS, body_bb));
4326
  set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4327
                           recompute_dominator (CDI_DOMINATORS, fin_bb));
4328
}
4329
 
4330
 
4331
/* A subroutine of expand_omp_for.  Generate code for a parallel
4332
   loop with static schedule and a specified chunk size.  Given
4333
   parameters:
4334
 
4335
        for (V = N1; V cond N2; V += STEP) BODY;
4336
 
4337
   where COND is "<" or ">", we generate pseudocode
4338
 
4339
        if (cond is <)
4340
          adj = STEP - 1;
4341
        else
4342
          adj = STEP + 1;
4343
        if ((__typeof (V)) -1 > 0 && cond is >)
4344
          n = -(adj + N2 - N1) / -STEP;
4345
        else
4346
          n = (adj + N2 - N1) / STEP;
4347
        trip = 0;
4348
        V = threadid * CHUNK * STEP + N1;  -- this extra definition of V is
4349
                                              here so that V is defined
4350
                                              if the loop is not entered
4351
    L0:
4352
        s0 = (trip * nthreads + threadid) * CHUNK;
4353
        e0 = min(s0 + CHUNK, n);
4354
        if (s0 < n) goto L1; else goto L4;
4355
    L1:
4356
        V = s0 * STEP + N1;
4357
        e = e0 * STEP + N1;
4358
    L2:
4359
        BODY;
4360
        V += STEP;
4361
        if (V cond e) goto L2; else goto L3;
4362
    L3:
4363
        trip += 1;
4364
        goto L0;
4365
    L4:
4366
*/
4367
 
4368
static void
4369
expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4370
{
4371
  tree n, s0, e0, e, t;
4372
  tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4373
  tree type, itype, v_main, v_back, v_extra;
4374
  basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4375
  basic_block trip_update_bb, cont_bb, fin_bb;
4376
  gimple_stmt_iterator si;
4377
  gimple stmt;
4378
  edge se;
4379
 
4380
  itype = type = TREE_TYPE (fd->loop.v);
4381
  if (POINTER_TYPE_P (type))
4382
    itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4383
 
4384
  entry_bb = region->entry;
4385
  se = split_block (entry_bb, last_stmt (entry_bb));
4386
  entry_bb = se->src;
4387
  iter_part_bb = se->dest;
4388
  cont_bb = region->cont;
4389
  gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4390
  gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4391
              == FALLTHRU_EDGE (cont_bb)->dest);
4392
  seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4393
  body_bb = single_succ (seq_start_bb);
4394
  gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4395
  gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4396
  fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4397
  trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4398
  exit_bb = region->exit;
4399
 
4400
  /* Trip and adjustment setup goes in ENTRY_BB.  */
4401
  si = gsi_last_bb (entry_bb);
4402
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4403
 
4404
  t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS), 0);
4405
  t = fold_convert (itype, t);
4406
  nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4407
                                       true, GSI_SAME_STMT);
4408
 
4409
  t = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM), 0);
4410
  t = fold_convert (itype, t);
4411
  threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4412
                                       true, GSI_SAME_STMT);
4413
 
4414
  fd->loop.n1
4415
    = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4416
                                true, NULL_TREE, true, GSI_SAME_STMT);
4417
  fd->loop.n2
4418
    = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4419
                                true, NULL_TREE, true, GSI_SAME_STMT);
4420
  fd->loop.step
4421
    = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4422
                                true, NULL_TREE, true, GSI_SAME_STMT);
4423
  fd->chunk_size
4424
    = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4425
                                true, NULL_TREE, true, GSI_SAME_STMT);
4426
 
4427
  t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4428
  t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4429
  t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4430
  t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4431
  if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4432
    t = fold_build2 (TRUNC_DIV_EXPR, itype,
4433
                     fold_build1 (NEGATE_EXPR, itype, t),
4434
                     fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4435
  else
4436
    t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4437
  t = fold_convert (itype, t);
4438
  n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4439
                                true, GSI_SAME_STMT);
4440
 
4441
  trip_var = create_tmp_var (itype, ".trip");
4442
  if (gimple_in_ssa_p (cfun))
4443
    {
4444
      add_referenced_var (trip_var);
4445
      trip_init = make_ssa_name (trip_var, NULL);
4446
      trip_main = make_ssa_name (trip_var, NULL);
4447
      trip_back = make_ssa_name (trip_var, NULL);
4448
    }
4449
  else
4450
    {
4451
      trip_init = trip_var;
4452
      trip_main = trip_var;
4453
      trip_back = trip_var;
4454
    }
4455
 
4456
  stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4457
  gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4458
 
4459
  t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4460
  t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4461
  if (POINTER_TYPE_P (type))
4462
    t = fold_build_pointer_plus (fd->loop.n1, t);
4463
  else
4464
    t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4465
  v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4466
                                      true, GSI_SAME_STMT);
4467
 
4468
  /* Remove the GIMPLE_OMP_FOR.  */
4469
  gsi_remove (&si, true);
4470
 
4471
  /* Iteration space partitioning goes in ITER_PART_BB.  */
4472
  si = gsi_last_bb (iter_part_bb);
4473
 
4474
  t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4475
  t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4476
  t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4477
  s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4478
                                 false, GSI_CONTINUE_LINKING);
4479
 
4480
  t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4481
  t = fold_build2 (MIN_EXPR, itype, t, n);
4482
  e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4483
                                 false, GSI_CONTINUE_LINKING);
4484
 
4485
  t = build2 (LT_EXPR, boolean_type_node, s0, n);
4486
  gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4487
 
4488
  /* Setup code for sequential iteration goes in SEQ_START_BB.  */
4489
  si = gsi_start_bb (seq_start_bb);
4490
 
4491
  t = fold_convert (itype, s0);
4492
  t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4493
  if (POINTER_TYPE_P (type))
4494
    t = fold_build_pointer_plus (fd->loop.n1, t);
4495
  else
4496
    t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4497
  t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4498
                                false, GSI_CONTINUE_LINKING);
4499
  stmt = gimple_build_assign (fd->loop.v, t);
4500
  gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4501
 
4502
  t = fold_convert (itype, e0);
4503
  t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4504
  if (POINTER_TYPE_P (type))
4505
    t = fold_build_pointer_plus (fd->loop.n1, t);
4506
  else
4507
    t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4508
  e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4509
                                false, GSI_CONTINUE_LINKING);
4510
 
4511
  /* The code controlling the sequential loop goes in CONT_BB,
4512
     replacing the GIMPLE_OMP_CONTINUE.  */
4513
  si = gsi_last_bb (cont_bb);
4514
  stmt = gsi_stmt (si);
4515
  gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4516
  v_main = gimple_omp_continue_control_use (stmt);
4517
  v_back = gimple_omp_continue_control_def (stmt);
4518
 
4519
  if (POINTER_TYPE_P (type))
4520
    t = fold_build_pointer_plus (v_main, fd->loop.step);
4521
  else
4522
    t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4523
  stmt = gimple_build_assign (v_back, t);
4524
  gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4525
 
4526
  t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4527
  gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4528
 
4529
  /* Remove GIMPLE_OMP_CONTINUE.  */
4530
  gsi_remove (&si, true);
4531
 
4532
  /* Trip update code goes into TRIP_UPDATE_BB.  */
4533
  si = gsi_start_bb (trip_update_bb);
4534
 
4535
  t = build_int_cst (itype, 1);
4536
  t = build2 (PLUS_EXPR, itype, trip_main, t);
4537
  stmt = gimple_build_assign (trip_back, t);
4538
  gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4539
 
4540
  /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing.  */
4541
  si = gsi_last_bb (exit_bb);
4542
  if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4543
    force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4544
                              false, GSI_SAME_STMT);
4545
  gsi_remove (&si, true);
4546
 
4547
  /* Connect the new blocks.  */
4548
  find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4549
  find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4550
 
4551
  find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4552
  find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4553
 
4554
  redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4555
 
4556
  if (gimple_in_ssa_p (cfun))
4557
    {
4558
      gimple_stmt_iterator psi;
4559
      gimple phi;
4560
      edge re, ene;
4561
      edge_var_map_vector head;
4562
      edge_var_map *vm;
4563
      size_t i;
4564
 
4565
      /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4566
         remove arguments of the phi nodes in fin_bb.  We need to create
4567
         appropriate phi nodes in iter_part_bb instead.  */
4568
      se = single_pred_edge (fin_bb);
4569
      re = single_succ_edge (trip_update_bb);
4570
      head = redirect_edge_var_map_vector (re);
4571
      ene = single_succ_edge (entry_bb);
4572
 
4573
      psi = gsi_start_phis (fin_bb);
4574
      for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4575
           gsi_next (&psi), ++i)
4576
        {
4577
          gimple nphi;
4578
          source_location locus;
4579
 
4580
          phi = gsi_stmt (psi);
4581
          t = gimple_phi_result (phi);
4582
          gcc_assert (t == redirect_edge_var_map_result (vm));
4583
          nphi = create_phi_node (t, iter_part_bb);
4584
          SSA_NAME_DEF_STMT (t) = nphi;
4585
 
4586
          t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4587
          locus = gimple_phi_arg_location_from_edge (phi, se);
4588
 
4589
          /* A special case -- fd->loop.v is not yet computed in
4590
             iter_part_bb, we need to use v_extra instead.  */
4591
          if (t == fd->loop.v)
4592
            t = v_extra;
4593
          add_phi_arg (nphi, t, ene, locus);
4594
          locus = redirect_edge_var_map_location (vm);
4595
          add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4596
        }
4597
      gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4598
      redirect_edge_var_map_clear (re);
4599
      while (1)
4600
        {
4601
          psi = gsi_start_phis (fin_bb);
4602
          if (gsi_end_p (psi))
4603
            break;
4604
          remove_phi_node (&psi, false);
4605
        }
4606
 
4607
      /* Make phi node for trip.  */
4608
      phi = create_phi_node (trip_main, iter_part_bb);
4609
      SSA_NAME_DEF_STMT (trip_main) = phi;
4610
      add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4611
                   UNKNOWN_LOCATION);
4612
      add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4613
                   UNKNOWN_LOCATION);
4614
    }
4615
 
4616
  set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4617
  set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4618
                           recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4619
  set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4620
                           recompute_dominator (CDI_DOMINATORS, fin_bb));
4621
  set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4622
                           recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4623
  set_immediate_dominator (CDI_DOMINATORS, body_bb,
4624
                           recompute_dominator (CDI_DOMINATORS, body_bb));
4625
}
4626
 
4627
 
4628
/* Expand the OpenMP loop defined by REGION.  */
4629
 
4630
static void
4631
expand_omp_for (struct omp_region *region)
4632
{
4633
  struct omp_for_data fd;
4634
  struct omp_for_data_loop *loops;
4635
 
4636
  loops
4637
    = (struct omp_for_data_loop *)
4638
      alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4639
              * sizeof (struct omp_for_data_loop));
4640
  extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4641
  region->sched_kind = fd.sched_kind;
4642
 
4643
  gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4644
  BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4645
  FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4646
  if (region->cont)
4647
    {
4648
      gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4649
      BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4650
      FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4651
    }
4652
 
4653
  if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4654
      && !fd.have_ordered
4655
      && fd.collapse == 1
4656
      && region->cont != NULL)
4657
    {
4658
      if (fd.chunk_size == NULL)
4659
        expand_omp_for_static_nochunk (region, &fd);
4660
      else
4661
        expand_omp_for_static_chunk (region, &fd);
4662
    }
4663
  else
4664
    {
4665
      int fn_index, start_ix, next_ix;
4666
 
4667
      if (fd.chunk_size == NULL
4668
          && fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
4669
        fd.chunk_size = integer_zero_node;
4670
      gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4671
      fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4672
                  ? 3 : fd.sched_kind;
4673
      fn_index += fd.have_ordered * 4;
4674
      start_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_START) + fn_index;
4675
      next_ix = ((int)BUILT_IN_GOMP_LOOP_STATIC_NEXT) + fn_index;
4676
      if (fd.iter_type == long_long_unsigned_type_node)
4677
        {
4678
          start_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4679
                        - (int)BUILT_IN_GOMP_LOOP_STATIC_START);
4680
          next_ix += ((int)BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4681
                      - (int)BUILT_IN_GOMP_LOOP_STATIC_NEXT);
4682
        }
4683
      expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4684
                              (enum built_in_function) next_ix);
4685
    }
4686
 
4687
  update_ssa (TODO_update_ssa_only_virtuals);
4688
}
4689
 
4690
 
4691
/* Expand code for an OpenMP sections directive.  In pseudo code, we generate
4692
 
4693
        v = GOMP_sections_start (n);
4694
    L0:
4695
        switch (v)
4696
          {
4697
          case 0:
4698
            goto L2;
4699
          case 1:
4700
            section 1;
4701
            goto L1;
4702
          case 2:
4703
            ...
4704
          case n:
4705
            ...
4706
          default:
4707
            abort ();
4708
          }
4709
    L1:
4710
        v = GOMP_sections_next ();
4711
        goto L0;
4712
    L2:
4713
        reduction;
4714
 
4715
    If this is a combined parallel sections, replace the call to
4716
    GOMP_sections_start with call to GOMP_sections_next.  */
4717
 
4718
static void
4719
expand_omp_sections (struct omp_region *region)
4720
{
4721
  tree t, u, vin = NULL, vmain, vnext, l2;
4722
  VEC (tree,heap) *label_vec;
4723
  unsigned len;
4724
  basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4725
  gimple_stmt_iterator si, switch_si;
4726
  gimple sections_stmt, stmt, cont;
4727
  edge_iterator ei;
4728
  edge e;
4729
  struct omp_region *inner;
4730
  unsigned i, casei;
4731
  bool exit_reachable = region->cont != NULL;
4732
 
4733
  gcc_assert (exit_reachable == (region->exit != NULL));
4734
  entry_bb = region->entry;
4735
  l0_bb = single_succ (entry_bb);
4736
  l1_bb = region->cont;
4737
  l2_bb = region->exit;
4738
  if (exit_reachable)
4739
    {
4740
      if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4741
        l2 = gimple_block_label (l2_bb);
4742
      else
4743
        {
4744
          /* This can happen if there are reductions.  */
4745
          len = EDGE_COUNT (l0_bb->succs);
4746
          gcc_assert (len > 0);
4747
          e = EDGE_SUCC (l0_bb, len - 1);
4748
          si = gsi_last_bb (e->dest);
4749
          l2 = NULL_TREE;
4750
          if (gsi_end_p (si)
4751
              || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4752
            l2 = gimple_block_label (e->dest);
4753
          else
4754
            FOR_EACH_EDGE (e, ei, l0_bb->succs)
4755
              {
4756
                si = gsi_last_bb (e->dest);
4757
                if (gsi_end_p (si)
4758
                    || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4759
                  {
4760
                    l2 = gimple_block_label (e->dest);
4761
                    break;
4762
                  }
4763
              }
4764
        }
4765
      default_bb = create_empty_bb (l1_bb->prev_bb);
4766
    }
4767
  else
4768
    {
4769
      default_bb = create_empty_bb (l0_bb);
4770
      l2 = gimple_block_label (default_bb);
4771
    }
4772
 
4773
  /* We will build a switch() with enough cases for all the
4774
     GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4775
     and a default case to abort if something goes wrong.  */
4776
  len = EDGE_COUNT (l0_bb->succs);
4777
 
4778
  /* Use VEC_quick_push on label_vec throughout, since we know the size
4779
     in advance.  */
4780
  label_vec = VEC_alloc (tree, heap, len);
4781
 
4782
  /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4783
     GIMPLE_OMP_SECTIONS statement.  */
4784
  si = gsi_last_bb (entry_bb);
4785
  sections_stmt = gsi_stmt (si);
4786
  gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4787
  vin = gimple_omp_sections_control (sections_stmt);
4788
  if (!is_combined_parallel (region))
4789
    {
4790
      /* If we are not inside a combined parallel+sections region,
4791
         call GOMP_sections_start.  */
4792
      t = build_int_cst (unsigned_type_node,
4793
                         exit_reachable ? len - 1 : len);
4794
      u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_START);
4795
      stmt = gimple_build_call (u, 1, t);
4796
    }
4797
  else
4798
    {
4799
      /* Otherwise, call GOMP_sections_next.  */
4800
      u = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4801
      stmt = gimple_build_call (u, 0);
4802
    }
4803
  gimple_call_set_lhs (stmt, vin);
4804
  gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4805
  gsi_remove (&si, true);
4806
 
4807
  /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4808
     L0_BB.  */
4809
  switch_si = gsi_last_bb (l0_bb);
4810
  gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4811
  if (exit_reachable)
4812
    {
4813
      cont = last_stmt (l1_bb);
4814
      gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4815
      vmain = gimple_omp_continue_control_use (cont);
4816
      vnext = gimple_omp_continue_control_def (cont);
4817
    }
4818
  else
4819
    {
4820
      vmain = vin;
4821
      vnext = NULL_TREE;
4822
    }
4823
 
4824
  i = 0;
4825
  if (exit_reachable)
4826
    {
4827
      t = build_case_label (build_int_cst (unsigned_type_node, 0), NULL, l2);
4828
      VEC_quick_push (tree, label_vec, t);
4829
      i++;
4830
    }
4831
 
4832
  /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR.  */
4833
  for (inner = region->inner, casei = 1;
4834
       inner;
4835
       inner = inner->next, i++, casei++)
4836
    {
4837
      basic_block s_entry_bb, s_exit_bb;
4838
 
4839
      /* Skip optional reduction region.  */
4840
      if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4841
        {
4842
          --i;
4843
          --casei;
4844
          continue;
4845
        }
4846
 
4847
      s_entry_bb = inner->entry;
4848
      s_exit_bb = inner->exit;
4849
 
4850
      t = gimple_block_label (s_entry_bb);
4851
      u = build_int_cst (unsigned_type_node, casei);
4852
      u = build_case_label (u, NULL, t);
4853
      VEC_quick_push (tree, label_vec, u);
4854
 
4855
      si = gsi_last_bb (s_entry_bb);
4856
      gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4857
      gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4858
      gsi_remove (&si, true);
4859
      single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4860
 
4861
      if (s_exit_bb == NULL)
4862
        continue;
4863
 
4864
      si = gsi_last_bb (s_exit_bb);
4865
      gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4866
      gsi_remove (&si, true);
4867
 
4868
      single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4869
    }
4870
 
4871
  /* Error handling code goes in DEFAULT_BB.  */
4872
  t = gimple_block_label (default_bb);
4873
  u = build_case_label (NULL, NULL, t);
4874
  make_edge (l0_bb, default_bb, 0);
4875
 
4876
  stmt = gimple_build_switch_vec (vmain, u, label_vec);
4877
  gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4878
  gsi_remove (&switch_si, true);
4879
  VEC_free (tree, heap, label_vec);
4880
 
4881
  si = gsi_start_bb (default_bb);
4882
  stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_TRAP), 0);
4883
  gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4884
 
4885
  if (exit_reachable)
4886
    {
4887
      tree bfn_decl;
4888
 
4889
      /* Code to get the next section goes in L1_BB.  */
4890
      si = gsi_last_bb (l1_bb);
4891
      gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4892
 
4893
      bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_NEXT);
4894
      stmt = gimple_build_call (bfn_decl, 0);
4895
      gimple_call_set_lhs (stmt, vnext);
4896
      gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4897
      gsi_remove (&si, true);
4898
 
4899
      single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4900
 
4901
      /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB.  */
4902
      si = gsi_last_bb (l2_bb);
4903
      if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4904
        t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END_NOWAIT);
4905
      else
4906
        t = builtin_decl_explicit (BUILT_IN_GOMP_SECTIONS_END);
4907
      stmt = gimple_build_call (t, 0);
4908
      gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4909
      gsi_remove (&si, true);
4910
    }
4911
 
4912
  set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4913
}
4914
 
4915
 
4916
/* Expand code for an OpenMP single directive.  We've already expanded
4917
   much of the code, here we simply place the GOMP_barrier call.  */
4918
 
4919
static void
4920
expand_omp_single (struct omp_region *region)
4921
{
4922
  basic_block entry_bb, exit_bb;
4923
  gimple_stmt_iterator si;
4924
  bool need_barrier = false;
4925
 
4926
  entry_bb = region->entry;
4927
  exit_bb = region->exit;
4928
 
4929
  si = gsi_last_bb (entry_bb);
4930
  /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4931
     be removed.  We need to ensure that the thread that entered the single
4932
     does not exit before the data is copied out by the other threads.  */
4933
  if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4934
                       OMP_CLAUSE_COPYPRIVATE))
4935
    need_barrier = true;
4936
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4937
  gsi_remove (&si, true);
4938
  single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4939
 
4940
  si = gsi_last_bb (exit_bb);
4941
  if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4942
    force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4943
                              false, GSI_SAME_STMT);
4944
  gsi_remove (&si, true);
4945
  single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4946
}
4947
 
4948
 
4949
/* Generic expansion for OpenMP synchronization directives: master,
4950
   ordered and critical.  All we need to do here is remove the entry
4951
   and exit markers for REGION.  */
4952
 
4953
static void
4954
expand_omp_synch (struct omp_region *region)
4955
{
4956
  basic_block entry_bb, exit_bb;
4957
  gimple_stmt_iterator si;
4958
 
4959
  entry_bb = region->entry;
4960
  exit_bb = region->exit;
4961
 
4962
  si = gsi_last_bb (entry_bb);
4963
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4964
              || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4965
              || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4966
              || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4967
  gsi_remove (&si, true);
4968
  single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4969
 
4970
  if (exit_bb)
4971
    {
4972
      si = gsi_last_bb (exit_bb);
4973
      gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4974
      gsi_remove (&si, true);
4975
      single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4976
    }
4977
}
4978
 
4979
/* A subroutine of expand_omp_atomic.  Attempt to implement the atomic
4980
   operation as a normal volatile load.  */
4981
 
4982
static bool
4983
expand_omp_atomic_load (basic_block load_bb, tree addr,
4984
                        tree loaded_val, int index)
4985
{
4986
  enum built_in_function tmpbase;
4987
  gimple_stmt_iterator gsi;
4988
  basic_block store_bb;
4989
  location_t loc;
4990
  gimple stmt;
4991
  tree decl, call, type, itype;
4992
 
4993
  gsi = gsi_last_bb (load_bb);
4994
  stmt = gsi_stmt (gsi);
4995
  gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
4996
  loc = gimple_location (stmt);
4997
 
4998
  /* ??? If the target does not implement atomic_load_optab[mode], and mode
4999
     is smaller than word size, then expand_atomic_load assumes that the load
5000
     is atomic.  We could avoid the builtin entirely in this case.  */
5001
 
5002
  tmpbase = (enum built_in_function) (BUILT_IN_ATOMIC_LOAD_N + index + 1);
5003
  decl = builtin_decl_explicit (tmpbase);
5004
  if (decl == NULL_TREE)
5005
    return false;
5006
 
5007
  type = TREE_TYPE (loaded_val);
5008
  itype = TREE_TYPE (TREE_TYPE (decl));
5009
 
5010
  call = build_call_expr_loc (loc, decl, 2, addr,
5011
                              build_int_cst (NULL, MEMMODEL_RELAXED));
5012
  if (!useless_type_conversion_p (type, itype))
5013
    call = fold_build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5014
  call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5015
 
5016
  force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5017
  gsi_remove (&gsi, true);
5018
 
5019
  store_bb = single_succ (load_bb);
5020
  gsi = gsi_last_bb (store_bb);
5021
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5022
  gsi_remove (&gsi, true);
5023
 
5024
  if (gimple_in_ssa_p (cfun))
5025
    update_ssa (TODO_update_ssa_no_phi);
5026
 
5027
  return true;
5028
}
5029
 
5030
/* A subroutine of expand_omp_atomic.  Attempt to implement the atomic
5031
   operation as a normal volatile store.  */
5032
 
5033
static bool
5034
expand_omp_atomic_store (basic_block load_bb, tree addr,
5035
                         tree loaded_val, tree stored_val, int index)
5036
{
5037
  enum built_in_function tmpbase;
5038
  gimple_stmt_iterator gsi;
5039
  basic_block store_bb = single_succ (load_bb);
5040
  location_t loc;
5041
  gimple stmt;
5042
  tree decl, call, type, itype;
5043
  enum machine_mode imode;
5044
  bool exchange;
5045
 
5046
  gsi = gsi_last_bb (load_bb);
5047
  stmt = gsi_stmt (gsi);
5048
  gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD);
5049
 
5050
  /* If the load value is needed, then this isn't a store but an exchange.  */
5051
  exchange = gimple_omp_atomic_need_value_p (stmt);
5052
 
5053
  gsi = gsi_last_bb (store_bb);
5054
  stmt = gsi_stmt (gsi);
5055
  gcc_assert (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE);
5056
  loc = gimple_location (stmt);
5057
 
5058
  /* ??? If the target does not implement atomic_store_optab[mode], and mode
5059
     is smaller than word size, then expand_atomic_store assumes that the store
5060
     is atomic.  We could avoid the builtin entirely in this case.  */
5061
 
5062
  tmpbase = (exchange ? BUILT_IN_ATOMIC_EXCHANGE_N : BUILT_IN_ATOMIC_STORE_N);
5063
  tmpbase = (enum built_in_function) ((int) tmpbase + index + 1);
5064
  decl = builtin_decl_explicit (tmpbase);
5065
  if (decl == NULL_TREE)
5066
    return false;
5067
 
5068
  type = TREE_TYPE (stored_val);
5069
 
5070
  /* Dig out the type of the function's second argument.  */
5071
  itype = TREE_TYPE (decl);
5072
  itype = TYPE_ARG_TYPES (itype);
5073
  itype = TREE_CHAIN (itype);
5074
  itype = TREE_VALUE (itype);
5075
  imode = TYPE_MODE (itype);
5076
 
5077
  if (exchange && !can_atomic_exchange_p (imode, true))
5078
    return false;
5079
 
5080
  if (!useless_type_conversion_p (itype, type))
5081
    stored_val = fold_build1_loc (loc, VIEW_CONVERT_EXPR, itype, stored_val);
5082
  call = build_call_expr_loc (loc, decl, 3, addr, stored_val,
5083
                              build_int_cst (NULL, MEMMODEL_RELAXED));
5084
  if (exchange)
5085
    {
5086
      if (!useless_type_conversion_p (type, itype))
5087
        call = build1_loc (loc, VIEW_CONVERT_EXPR, type, call);
5088
      call = build2_loc (loc, MODIFY_EXPR, void_type_node, loaded_val, call);
5089
    }
5090
 
5091
  force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5092
  gsi_remove (&gsi, true);
5093
 
5094
  /* Remove the GIMPLE_OMP_ATOMIC_LOAD that we verified above.  */
5095
  gsi = gsi_last_bb (load_bb);
5096
  gsi_remove (&gsi, true);
5097
 
5098
  if (gimple_in_ssa_p (cfun))
5099
    update_ssa (TODO_update_ssa_no_phi);
5100
 
5101
  return true;
5102
}
5103
 
5104
/* A subroutine of expand_omp_atomic.  Attempt to implement the atomic
5105
   operation as a __atomic_fetch_op builtin.  INDEX is log2 of the
5106
   size of the data type, and thus usable to find the index of the builtin
5107
   decl.  Returns false if the expression is not of the proper form.  */
5108
 
5109
static bool
5110
expand_omp_atomic_fetch_op (basic_block load_bb,
5111
                            tree addr, tree loaded_val,
5112
                            tree stored_val, int index)
5113
{
5114
  enum built_in_function oldbase, newbase, tmpbase;
5115
  tree decl, itype, call;
5116
  tree lhs, rhs;
5117
  basic_block store_bb = single_succ (load_bb);
5118
  gimple_stmt_iterator gsi;
5119
  gimple stmt;
5120
  location_t loc;
5121
  enum tree_code code;
5122
  bool need_old, need_new;
5123
  enum machine_mode imode;
5124
 
5125
  /* We expect to find the following sequences:
5126
 
5127
   load_bb:
5128
       GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
5129
 
5130
   store_bb:
5131
       val = tmp OP something; (or: something OP tmp)
5132
       GIMPLE_OMP_STORE (val)
5133
 
5134
  ???FIXME: Allow a more flexible sequence.
5135
  Perhaps use data flow to pick the statements.
5136
 
5137
  */
5138
 
5139
  gsi = gsi_after_labels (store_bb);
5140
  stmt = gsi_stmt (gsi);
5141
  loc = gimple_location (stmt);
5142
  if (!is_gimple_assign (stmt))
5143
    return false;
5144
  gsi_next (&gsi);
5145
  if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
5146
    return false;
5147
  need_new = gimple_omp_atomic_need_value_p (gsi_stmt (gsi));
5148
  need_old = gimple_omp_atomic_need_value_p (last_stmt (load_bb));
5149
  gcc_checking_assert (!need_old || !need_new);
5150
 
5151
  if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
5152
    return false;
5153
 
5154
  /* Check for one of the supported fetch-op operations.  */
5155
  code = gimple_assign_rhs_code (stmt);
5156
  switch (code)
5157
    {
5158
    case PLUS_EXPR:
5159
    case POINTER_PLUS_EXPR:
5160
      oldbase = BUILT_IN_ATOMIC_FETCH_ADD_N;
5161
      newbase = BUILT_IN_ATOMIC_ADD_FETCH_N;
5162
      break;
5163
    case MINUS_EXPR:
5164
      oldbase = BUILT_IN_ATOMIC_FETCH_SUB_N;
5165
      newbase = BUILT_IN_ATOMIC_SUB_FETCH_N;
5166
      break;
5167
    case BIT_AND_EXPR:
5168
      oldbase = BUILT_IN_ATOMIC_FETCH_AND_N;
5169
      newbase = BUILT_IN_ATOMIC_AND_FETCH_N;
5170
      break;
5171
    case BIT_IOR_EXPR:
5172
      oldbase = BUILT_IN_ATOMIC_FETCH_OR_N;
5173
      newbase = BUILT_IN_ATOMIC_OR_FETCH_N;
5174
      break;
5175
    case BIT_XOR_EXPR:
5176
      oldbase = BUILT_IN_ATOMIC_FETCH_XOR_N;
5177
      newbase = BUILT_IN_ATOMIC_XOR_FETCH_N;
5178
      break;
5179
    default:
5180
      return false;
5181
    }
5182
 
5183
  /* Make sure the expression is of the proper form.  */
5184
  if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
5185
    rhs = gimple_assign_rhs2 (stmt);
5186
  else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
5187
           && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
5188
    rhs = gimple_assign_rhs1 (stmt);
5189
  else
5190
    return false;
5191
 
5192
  tmpbase = ((enum built_in_function)
5193
             ((need_new ? newbase : oldbase) + index + 1));
5194
  decl = builtin_decl_explicit (tmpbase);
5195
  if (decl == NULL_TREE)
5196
    return false;
5197
  itype = TREE_TYPE (TREE_TYPE (decl));
5198
  imode = TYPE_MODE (itype);
5199
 
5200
  /* We could test all of the various optabs involved, but the fact of the
5201
     matter is that (with the exception of i486 vs i586 and xadd) all targets
5202
     that support any atomic operaton optab also implements compare-and-swap.
5203
     Let optabs.c take care of expanding any compare-and-swap loop.  */
5204
  if (!can_compare_and_swap_p (imode, true))
5205
    return false;
5206
 
5207
  gsi = gsi_last_bb (load_bb);
5208
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5209
 
5210
  /* OpenMP does not imply any barrier-like semantics on its atomic ops.
5211
     It only requires that the operation happen atomically.  Thus we can
5212
     use the RELAXED memory model.  */
5213
  call = build_call_expr_loc (loc, decl, 3, addr,
5214
                              fold_convert_loc (loc, itype, rhs),
5215
                              build_int_cst (NULL, MEMMODEL_RELAXED));
5216
 
5217
  if (need_old || need_new)
5218
    {
5219
      lhs = need_old ? loaded_val : stored_val;
5220
      call = fold_convert_loc (loc, TREE_TYPE (lhs), call);
5221
      call = build2_loc (loc, MODIFY_EXPR, void_type_node, lhs, call);
5222
    }
5223
  else
5224
    call = fold_convert_loc (loc, void_type_node, call);
5225
  force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5226
  gsi_remove (&gsi, true);
5227
 
5228
  gsi = gsi_last_bb (store_bb);
5229
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5230
  gsi_remove (&gsi, true);
5231
  gsi = gsi_last_bb (store_bb);
5232
  gsi_remove (&gsi, true);
5233
 
5234
  if (gimple_in_ssa_p (cfun))
5235
    update_ssa (TODO_update_ssa_no_phi);
5236
 
5237
  return true;
5238
}
5239
 
5240
/* A subroutine of expand_omp_atomic.  Implement the atomic operation as:
5241
 
5242
      oldval = *addr;
5243
      repeat:
5244
        newval = rhs;    // with oldval replacing *addr in rhs
5245
        oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5246
        if (oldval != newval)
5247
          goto repeat;
5248
 
5249
   INDEX is log2 of the size of the data type, and thus usable to find the
5250
   index of the builtin decl.  */
5251
 
5252
static bool
5253
expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5254
                            tree addr, tree loaded_val, tree stored_val,
5255
                            int index)
5256
{
5257
  tree loadedi, storedi, initial, new_storedi, old_vali;
5258
  tree type, itype, cmpxchg, iaddr;
5259
  gimple_stmt_iterator si;
5260
  basic_block loop_header = single_succ (load_bb);
5261
  gimple phi, stmt;
5262
  edge e;
5263
  enum built_in_function fncode;
5264
 
5265
  /* ??? We need a non-pointer interface to __atomic_compare_exchange in
5266
     order to use the RELAXED memory model effectively.  */
5267
  fncode = (enum built_in_function)((int)BUILT_IN_SYNC_VAL_COMPARE_AND_SWAP_N
5268
                                    + index + 1);
5269
  cmpxchg = builtin_decl_explicit (fncode);
5270
  if (cmpxchg == NULL_TREE)
5271
    return false;
5272
  type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5273
  itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5274
 
5275
  if (!can_compare_and_swap_p (TYPE_MODE (itype), true))
5276
    return false;
5277
 
5278
  /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD.  */
5279
  si = gsi_last_bb (load_bb);
5280
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5281
 
5282
  /* For floating-point values, we'll need to view-convert them to integers
5283
     so that we can perform the atomic compare and swap.  Simplify the
5284
     following code by always setting up the "i"ntegral variables.  */
5285
  if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5286
    {
5287
      tree iaddr_val;
5288
 
5289
      iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5290
                                                           true), NULL);
5291
      iaddr_val
5292
        = force_gimple_operand_gsi (&si,
5293
                                    fold_convert (TREE_TYPE (iaddr), addr),
5294
                                    false, NULL_TREE, true, GSI_SAME_STMT);
5295
      stmt = gimple_build_assign (iaddr, iaddr_val);
5296
      gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5297
      loadedi = create_tmp_var (itype, NULL);
5298
      if (gimple_in_ssa_p (cfun))
5299
        {
5300
          add_referenced_var (iaddr);
5301
          add_referenced_var (loadedi);
5302
          loadedi = make_ssa_name (loadedi, NULL);
5303
        }
5304
    }
5305
  else
5306
    {
5307
      iaddr = addr;
5308
      loadedi = loaded_val;
5309
    }
5310
 
5311
  initial
5312
    = force_gimple_operand_gsi (&si,
5313
                                build2 (MEM_REF, TREE_TYPE (TREE_TYPE (iaddr)),
5314
                                        iaddr,
5315
                                        build_int_cst (TREE_TYPE (iaddr), 0)),
5316
                                true, NULL_TREE, true, GSI_SAME_STMT);
5317
 
5318
  /* Move the value to the LOADEDI temporary.  */
5319
  if (gimple_in_ssa_p (cfun))
5320
    {
5321
      gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5322
      phi = create_phi_node (loadedi, loop_header);
5323
      SSA_NAME_DEF_STMT (loadedi) = phi;
5324
      SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5325
               initial);
5326
    }
5327
  else
5328
    gsi_insert_before (&si,
5329
                       gimple_build_assign (loadedi, initial),
5330
                       GSI_SAME_STMT);
5331
  if (loadedi != loaded_val)
5332
    {
5333
      gimple_stmt_iterator gsi2;
5334
      tree x;
5335
 
5336
      x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5337
      gsi2 = gsi_start_bb (loop_header);
5338
      if (gimple_in_ssa_p (cfun))
5339
        {
5340
          gimple stmt;
5341
          x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5342
                                        true, GSI_SAME_STMT);
5343
          stmt = gimple_build_assign (loaded_val, x);
5344
          gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5345
        }
5346
      else
5347
        {
5348
          x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5349
          force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5350
                                    true, GSI_SAME_STMT);
5351
        }
5352
    }
5353
  gsi_remove (&si, true);
5354
 
5355
  si = gsi_last_bb (store_bb);
5356
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5357
 
5358
  if (iaddr == addr)
5359
    storedi = stored_val;
5360
  else
5361
    storedi =
5362
      force_gimple_operand_gsi (&si,
5363
                                build1 (VIEW_CONVERT_EXPR, itype,
5364
                                        stored_val), true, NULL_TREE, true,
5365
                                GSI_SAME_STMT);
5366
 
5367
  /* Build the compare&swap statement.  */
5368
  new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5369
  new_storedi = force_gimple_operand_gsi (&si,
5370
                                          fold_convert (TREE_TYPE (loadedi),
5371
                                                        new_storedi),
5372
                                          true, NULL_TREE,
5373
                                          true, GSI_SAME_STMT);
5374
 
5375
  if (gimple_in_ssa_p (cfun))
5376
    old_vali = loadedi;
5377
  else
5378
    {
5379
      old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5380
      if (gimple_in_ssa_p (cfun))
5381
        add_referenced_var (old_vali);
5382
      stmt = gimple_build_assign (old_vali, loadedi);
5383
      gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5384
 
5385
      stmt = gimple_build_assign (loadedi, new_storedi);
5386
      gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5387
    }
5388
 
5389
  /* Note that we always perform the comparison as an integer, even for
5390
     floating point.  This allows the atomic operation to properly
5391
     succeed even with NaNs and -0.0.  */
5392
  stmt = gimple_build_cond_empty
5393
           (build2 (NE_EXPR, boolean_type_node,
5394
                    new_storedi, old_vali));
5395
  gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5396
 
5397
  /* Update cfg.  */
5398
  e = single_succ_edge (store_bb);
5399
  e->flags &= ~EDGE_FALLTHRU;
5400
  e->flags |= EDGE_FALSE_VALUE;
5401
 
5402
  e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5403
 
5404
  /* Copy the new value to loadedi (we already did that before the condition
5405
     if we are not in SSA).  */
5406
  if (gimple_in_ssa_p (cfun))
5407
    {
5408
      phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5409
      SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5410
    }
5411
 
5412
  /* Remove GIMPLE_OMP_ATOMIC_STORE.  */
5413
  gsi_remove (&si, true);
5414
 
5415
  if (gimple_in_ssa_p (cfun))
5416
    update_ssa (TODO_update_ssa_no_phi);
5417
 
5418
  return true;
5419
}
5420
 
5421
/* A subroutine of expand_omp_atomic.  Implement the atomic operation as:
5422
 
5423
                                  GOMP_atomic_start ();
5424
                                  *addr = rhs;
5425
                                  GOMP_atomic_end ();
5426
 
5427
   The result is not globally atomic, but works so long as all parallel
5428
   references are within #pragma omp atomic directives.  According to
5429
   responses received from omp@openmp.org, appears to be within spec.
5430
   Which makes sense, since that's how several other compilers handle
5431
   this situation as well.
5432
   LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5433
   expanding.  STORED_VAL is the operand of the matching
5434
   GIMPLE_OMP_ATOMIC_STORE.
5435
 
5436
   We replace
5437
   GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5438
   loaded_val = *addr;
5439
 
5440
   and replace
5441
   GIMPLE_OMP_ATOMIC_STORE (stored_val)  with
5442
   *addr = stored_val;
5443
*/
5444
 
5445
static bool
5446
expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5447
                         tree addr, tree loaded_val, tree stored_val)
5448
{
5449
  gimple_stmt_iterator si;
5450
  gimple stmt;
5451
  tree t;
5452
 
5453
  si = gsi_last_bb (load_bb);
5454
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5455
 
5456
  t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
5457
  t = build_call_expr (t, 0);
5458
  force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5459
 
5460
  stmt = gimple_build_assign (loaded_val, build_simple_mem_ref (addr));
5461
  gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5462
  gsi_remove (&si, true);
5463
 
5464
  si = gsi_last_bb (store_bb);
5465
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5466
 
5467
  stmt = gimple_build_assign (build_simple_mem_ref (unshare_expr (addr)),
5468
                              stored_val);
5469
  gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5470
 
5471
  t = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
5472
  t = build_call_expr (t, 0);
5473
  force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5474
  gsi_remove (&si, true);
5475
 
5476
  if (gimple_in_ssa_p (cfun))
5477
    update_ssa (TODO_update_ssa_no_phi);
5478
  return true;
5479
}
5480
 
5481
/* Expand an GIMPLE_OMP_ATOMIC statement.  We try to expand
5482
   using expand_omp_atomic_fetch_op. If it failed, we try to
5483
   call expand_omp_atomic_pipeline, and if it fails too, the
5484
   ultimate fallback is wrapping the operation in a mutex
5485
   (expand_omp_atomic_mutex).  REGION is the atomic region built
5486
   by build_omp_regions_1().  */
5487
 
5488
static void
5489
expand_omp_atomic (struct omp_region *region)
5490
{
5491
  basic_block load_bb = region->entry, store_bb = region->exit;
5492
  gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5493
  tree loaded_val = gimple_omp_atomic_load_lhs (load);
5494
  tree addr = gimple_omp_atomic_load_rhs (load);
5495
  tree stored_val = gimple_omp_atomic_store_val (store);
5496
  tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5497
  HOST_WIDE_INT index;
5498
 
5499
  /* Make sure the type is one of the supported sizes.  */
5500
  index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5501
  index = exact_log2 (index);
5502
  if (index >= 0 && index <= 4)
5503
    {
5504
      unsigned int align = TYPE_ALIGN_UNIT (type);
5505
 
5506
      /* __sync builtins require strict data alignment.  */
5507
      /* ??? Assume BIGGEST_ALIGNMENT *is* aligned.  */
5508
      if (exact_log2 (align) >= index
5509
          || align * BITS_PER_UNIT >= BIGGEST_ALIGNMENT)
5510
        {
5511
          /* Atomic load.  */
5512
          if (loaded_val == stored_val
5513
              && (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5514
                  || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5515
              && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5516
              && expand_omp_atomic_load (load_bb, addr, loaded_val, index))
5517
            return;
5518
 
5519
          /* Atomic store.  */
5520
          if ((GET_MODE_CLASS (TYPE_MODE (type)) == MODE_INT
5521
               || GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT)
5522
              && GET_MODE_BITSIZE (TYPE_MODE (type)) <= BITS_PER_WORD
5523
              && store_bb == single_succ (load_bb)
5524
              && first_stmt (store_bb) == store
5525
              && expand_omp_atomic_store (load_bb, addr, loaded_val,
5526
                                          stored_val, index))
5527
            return;
5528
 
5529
          /* When possible, use specialized atomic update functions.  */
5530
          if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5531
              && store_bb == single_succ (load_bb)
5532
              && expand_omp_atomic_fetch_op (load_bb, addr,
5533
                                             loaded_val, stored_val, index))
5534
            return;
5535
 
5536
          /* If we don't have specialized __sync builtins, try and implement
5537
             as a compare and swap loop.  */
5538
          if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5539
                                          loaded_val, stored_val, index))
5540
            return;
5541
        }
5542
    }
5543
 
5544
  /* The ultimate fallback is wrapping the operation in a mutex.  */
5545
  expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5546
}
5547
 
5548
 
5549
/* Expand the parallel region tree rooted at REGION.  Expansion
5550
   proceeds in depth-first order.  Innermost regions are expanded
5551
   first.  This way, parallel regions that require a new function to
5552
   be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5553
   internal dependencies in their body.  */
5554
 
5555
static void
5556
expand_omp (struct omp_region *region)
5557
{
5558
  while (region)
5559
    {
5560
      location_t saved_location;
5561
 
5562
      /* First, determine whether this is a combined parallel+workshare
5563
         region.  */
5564
      if (region->type == GIMPLE_OMP_PARALLEL)
5565
        determine_parallel_type (region);
5566
 
5567
      if (region->inner)
5568
        expand_omp (region->inner);
5569
 
5570
      saved_location = input_location;
5571
      if (gimple_has_location (last_stmt (region->entry)))
5572
        input_location = gimple_location (last_stmt (region->entry));
5573
 
5574
      switch (region->type)
5575
        {
5576
        case GIMPLE_OMP_PARALLEL:
5577
        case GIMPLE_OMP_TASK:
5578
          expand_omp_taskreg (region);
5579
          break;
5580
 
5581
        case GIMPLE_OMP_FOR:
5582
          expand_omp_for (region);
5583
          break;
5584
 
5585
        case GIMPLE_OMP_SECTIONS:
5586
          expand_omp_sections (region);
5587
          break;
5588
 
5589
        case GIMPLE_OMP_SECTION:
5590
          /* Individual omp sections are handled together with their
5591
             parent GIMPLE_OMP_SECTIONS region.  */
5592
          break;
5593
 
5594
        case GIMPLE_OMP_SINGLE:
5595
          expand_omp_single (region);
5596
          break;
5597
 
5598
        case GIMPLE_OMP_MASTER:
5599
        case GIMPLE_OMP_ORDERED:
5600
        case GIMPLE_OMP_CRITICAL:
5601
          expand_omp_synch (region);
5602
          break;
5603
 
5604
        case GIMPLE_OMP_ATOMIC_LOAD:
5605
          expand_omp_atomic (region);
5606
          break;
5607
 
5608
        default:
5609
          gcc_unreachable ();
5610
        }
5611
 
5612
      input_location = saved_location;
5613
      region = region->next;
5614
    }
5615
}
5616
 
5617
 
5618
/* Helper for build_omp_regions.  Scan the dominator tree starting at
5619
   block BB.  PARENT is the region that contains BB.  If SINGLE_TREE is
5620
   true, the function ends once a single tree is built (otherwise, whole
5621
   forest of OMP constructs may be built).  */
5622
 
5623
static void
5624
build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5625
                     bool single_tree)
5626
{
5627
  gimple_stmt_iterator gsi;
5628
  gimple stmt;
5629
  basic_block son;
5630
 
5631
  gsi = gsi_last_bb (bb);
5632
  if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5633
    {
5634
      struct omp_region *region;
5635
      enum gimple_code code;
5636
 
5637
      stmt = gsi_stmt (gsi);
5638
      code = gimple_code (stmt);
5639
      if (code == GIMPLE_OMP_RETURN)
5640
        {
5641
          /* STMT is the return point out of region PARENT.  Mark it
5642
             as the exit point and make PARENT the immediately
5643
             enclosing region.  */
5644
          gcc_assert (parent);
5645
          region = parent;
5646
          region->exit = bb;
5647
          parent = parent->outer;
5648
        }
5649
      else if (code == GIMPLE_OMP_ATOMIC_STORE)
5650
        {
5651
          /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5652
             GIMPLE_OMP_RETURN, but matches with
5653
             GIMPLE_OMP_ATOMIC_LOAD.  */
5654
          gcc_assert (parent);
5655
          gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5656
          region = parent;
5657
          region->exit = bb;
5658
          parent = parent->outer;
5659
        }
5660
 
5661
      else if (code == GIMPLE_OMP_CONTINUE)
5662
        {
5663
          gcc_assert (parent);
5664
          parent->cont = bb;
5665
        }
5666
      else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5667
        {
5668
          /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5669
             GIMPLE_OMP_SECTIONS, and we do nothing for it.  */
5670
          ;
5671
        }
5672
      else
5673
        {
5674
          /* Otherwise, this directive becomes the parent for a new
5675
             region.  */
5676
          region = new_omp_region (bb, code, parent);
5677
          parent = region;
5678
        }
5679
    }
5680
 
5681
  if (single_tree && !parent)
5682
    return;
5683
 
5684
  for (son = first_dom_son (CDI_DOMINATORS, bb);
5685
       son;
5686
       son = next_dom_son (CDI_DOMINATORS, son))
5687
    build_omp_regions_1 (son, parent, single_tree);
5688
}
5689
 
5690
/* Builds the tree of OMP regions rooted at ROOT, storing it to
5691
   root_omp_region.  */
5692
 
5693
static void
5694
build_omp_regions_root (basic_block root)
5695
{
5696
  gcc_assert (root_omp_region == NULL);
5697
  build_omp_regions_1 (root, NULL, true);
5698
  gcc_assert (root_omp_region != NULL);
5699
}
5700
 
5701
/* Expands omp construct (and its subconstructs) starting in HEAD.  */
5702
 
5703
void
5704
omp_expand_local (basic_block head)
5705
{
5706
  build_omp_regions_root (head);
5707
  if (dump_file && (dump_flags & TDF_DETAILS))
5708
    {
5709
      fprintf (dump_file, "\nOMP region tree\n\n");
5710
      dump_omp_region (dump_file, root_omp_region, 0);
5711
      fprintf (dump_file, "\n");
5712
    }
5713
 
5714
  remove_exit_barriers (root_omp_region);
5715
  expand_omp (root_omp_region);
5716
 
5717
  free_omp_regions ();
5718
}
5719
 
5720
/* Scan the CFG and build a tree of OMP regions.  Return the root of
5721
   the OMP region tree.  */
5722
 
5723
static void
5724
build_omp_regions (void)
5725
{
5726
  gcc_assert (root_omp_region == NULL);
5727
  calculate_dominance_info (CDI_DOMINATORS);
5728
  build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5729
}
5730
 
5731
/* Main entry point for expanding OMP-GIMPLE into runtime calls.  */
5732
 
5733
static unsigned int
5734
execute_expand_omp (void)
5735
{
5736
  build_omp_regions ();
5737
 
5738
  if (!root_omp_region)
5739
    return 0;
5740
 
5741
  if (dump_file)
5742
    {
5743
      fprintf (dump_file, "\nOMP region tree\n\n");
5744
      dump_omp_region (dump_file, root_omp_region, 0);
5745
      fprintf (dump_file, "\n");
5746
    }
5747
 
5748
  remove_exit_barriers (root_omp_region);
5749
 
5750
  expand_omp (root_omp_region);
5751
 
5752
  cleanup_tree_cfg ();
5753
 
5754
  free_omp_regions ();
5755
 
5756
  return 0;
5757
}
5758
 
5759
/* OMP expansion -- the default pass, run before creation of SSA form.  */
5760
 
5761
static bool
5762
gate_expand_omp (void)
5763
{
5764
  return (flag_openmp != 0 && !seen_error ());
5765
}
5766
 
5767
struct gimple_opt_pass pass_expand_omp =
5768
{
5769
 {
5770
  GIMPLE_PASS,
5771
  "ompexp",                             /* name */
5772
  gate_expand_omp,                      /* gate */
5773
  execute_expand_omp,                   /* execute */
5774
  NULL,                                 /* sub */
5775
  NULL,                                 /* next */
5776
  0,                                     /* static_pass_number */
5777
  TV_NONE,                              /* tv_id */
5778
  PROP_gimple_any,                      /* properties_required */
5779
  0,                                     /* properties_provided */
5780
  0,                                     /* properties_destroyed */
5781
  0,                                     /* todo_flags_start */
5782
 
5783
 }
5784
};
5785
 
5786
/* Routines to lower OpenMP directives into OMP-GIMPLE.  */
5787
 
5788
/* Lower the OpenMP sections directive in the current statement in GSI_P.
5789
   CTX is the enclosing OMP context for the current statement.  */
5790
 
5791
static void
5792
lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5793
{
5794
  tree block, control;
5795
  gimple_stmt_iterator tgsi;
5796
  unsigned i, len;
5797
  gimple stmt, new_stmt, bind, t;
5798
  gimple_seq ilist, dlist, olist, new_body, body;
5799
  struct gimplify_ctx gctx;
5800
 
5801
  stmt = gsi_stmt (*gsi_p);
5802
 
5803
  push_gimplify_context (&gctx);
5804
 
5805
  dlist = NULL;
5806
  ilist = NULL;
5807
  lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5808
                           &ilist, &dlist, ctx);
5809
 
5810
  tgsi = gsi_start (gimple_omp_body (stmt));
5811
  for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5812
    continue;
5813
 
5814
  tgsi = gsi_start (gimple_omp_body (stmt));
5815
  body = NULL;
5816
  for (i = 0; i < len; i++, gsi_next (&tgsi))
5817
    {
5818
      omp_context *sctx;
5819
      gimple sec_start;
5820
 
5821
      sec_start = gsi_stmt (tgsi);
5822
      sctx = maybe_lookup_ctx (sec_start);
5823
      gcc_assert (sctx);
5824
 
5825
      gimple_seq_add_stmt (&body, sec_start);
5826
 
5827
      lower_omp (gimple_omp_body (sec_start), sctx);
5828
      gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5829
      gimple_omp_set_body (sec_start, NULL);
5830
 
5831
      if (i == len - 1)
5832
        {
5833
          gimple_seq l = NULL;
5834
          lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5835
                                     &l, ctx);
5836
          gimple_seq_add_seq (&body, l);
5837
          gimple_omp_section_set_last (sec_start);
5838
        }
5839
 
5840
      gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5841
    }
5842
 
5843
  block = make_node (BLOCK);
5844
  bind = gimple_build_bind (NULL, body, block);
5845
 
5846
  olist = NULL;
5847
  lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5848
 
5849
  block = make_node (BLOCK);
5850
  new_stmt = gimple_build_bind (NULL, NULL, block);
5851
 
5852
  pop_gimplify_context (new_stmt);
5853
  gimple_bind_append_vars (new_stmt, ctx->block_vars);
5854
  BLOCK_VARS (block) = gimple_bind_vars (bind);
5855
  if (BLOCK_VARS (block))
5856
    TREE_USED (block) = 1;
5857
 
5858
  new_body = NULL;
5859
  gimple_seq_add_seq (&new_body, ilist);
5860
  gimple_seq_add_stmt (&new_body, stmt);
5861
  gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5862
  gimple_seq_add_stmt (&new_body, bind);
5863
 
5864
  control = create_tmp_var (unsigned_type_node, ".section");
5865
  t = gimple_build_omp_continue (control, control);
5866
  gimple_omp_sections_set_control (stmt, control);
5867
  gimple_seq_add_stmt (&new_body, t);
5868
 
5869
  gimple_seq_add_seq (&new_body, olist);
5870
  gimple_seq_add_seq (&new_body, dlist);
5871
 
5872
  new_body = maybe_catch_exception (new_body);
5873
 
5874
  t = gimple_build_omp_return
5875
        (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5876
                            OMP_CLAUSE_NOWAIT));
5877
  gimple_seq_add_stmt (&new_body, t);
5878
 
5879
  gimple_bind_set_body (new_stmt, new_body);
5880
  gimple_omp_set_body (stmt, NULL);
5881
 
5882
  gsi_replace (gsi_p, new_stmt, true);
5883
}
5884
 
5885
 
5886
/* A subroutine of lower_omp_single.  Expand the simple form of
5887
   a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5888
 
5889
        if (GOMP_single_start ())
5890
          BODY;
5891
        [ GOMP_barrier (); ]    -> unless 'nowait' is present.
5892
 
5893
  FIXME.  It may be better to delay expanding the logic of this until
5894
  pass_expand_omp.  The expanded logic may make the job more difficult
5895
  to a synchronization analysis pass.  */
5896
 
5897
static void
5898
lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5899
{
5900
  location_t loc = gimple_location (single_stmt);
5901
  tree tlabel = create_artificial_label (loc);
5902
  tree flabel = create_artificial_label (loc);
5903
  gimple call, cond;
5904
  tree lhs, decl;
5905
 
5906
  decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
5907
  lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5908
  call = gimple_build_call (decl, 0);
5909
  gimple_call_set_lhs (call, lhs);
5910
  gimple_seq_add_stmt (pre_p, call);
5911
 
5912
  cond = gimple_build_cond (EQ_EXPR, lhs,
5913
                            fold_convert_loc (loc, TREE_TYPE (lhs),
5914
                                              boolean_true_node),
5915
                            tlabel, flabel);
5916
  gimple_seq_add_stmt (pre_p, cond);
5917
  gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5918
  gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5919
  gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5920
}
5921
 
5922
 
5923
/* A subroutine of lower_omp_single.  Expand the simple form of
5924
   a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5925
 
5926
        #pragma omp single copyprivate (a, b, c)
5927
 
5928
   Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5929
 
5930
      {
5931
        if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5932
          {
5933
            BODY;
5934
            copyout.a = a;
5935
            copyout.b = b;
5936
            copyout.c = c;
5937
            GOMP_single_copy_end (&copyout);
5938
          }
5939
        else
5940
          {
5941
            a = copyout_p->a;
5942
            b = copyout_p->b;
5943
            c = copyout_p->c;
5944
          }
5945
        GOMP_barrier ();
5946
      }
5947
 
5948
  FIXME.  It may be better to delay expanding the logic of this until
5949
  pass_expand_omp.  The expanded logic may make the job more difficult
5950
  to a synchronization analysis pass.  */
5951
 
5952
static void
5953
lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5954
{
5955
  tree ptr_type, t, l0, l1, l2, bfn_decl;
5956
  gimple_seq copyin_seq;
5957
  location_t loc = gimple_location (single_stmt);
5958
 
5959
  ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5960
 
5961
  ptr_type = build_pointer_type (ctx->record_type);
5962
  ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5963
 
5964
  l0 = create_artificial_label (loc);
5965
  l1 = create_artificial_label (loc);
5966
  l2 = create_artificial_label (loc);
5967
 
5968
  bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
5969
  t = build_call_expr_loc (loc, bfn_decl, 0);
5970
  t = fold_convert_loc (loc, ptr_type, t);
5971
  gimplify_assign (ctx->receiver_decl, t, pre_p);
5972
 
5973
  t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5974
              build_int_cst (ptr_type, 0));
5975
  t = build3 (COND_EXPR, void_type_node, t,
5976
              build_and_jump (&l0), build_and_jump (&l1));
5977
  gimplify_and_add (t, pre_p);
5978
 
5979
  gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5980
 
5981
  gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5982
 
5983
  copyin_seq = NULL;
5984
  lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5985
                              &copyin_seq, ctx);
5986
 
5987
  t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5988
  bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
5989
  t = build_call_expr_loc (loc, bfn_decl, 1, t);
5990
  gimplify_and_add (t, pre_p);
5991
 
5992
  t = build_and_jump (&l2);
5993
  gimplify_and_add (t, pre_p);
5994
 
5995
  gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5996
 
5997
  gimple_seq_add_seq (pre_p, copyin_seq);
5998
 
5999
  gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
6000
}
6001
 
6002
 
6003
/* Expand code for an OpenMP single directive.  */
6004
 
6005
static void
6006
lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6007
{
6008
  tree block;
6009
  gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
6010
  gimple_seq bind_body, dlist;
6011
  struct gimplify_ctx gctx;
6012
 
6013
  push_gimplify_context (&gctx);
6014
 
6015
  bind_body = NULL;
6016
  lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
6017
                           &bind_body, &dlist, ctx);
6018
  lower_omp (gimple_omp_body (single_stmt), ctx);
6019
 
6020
  gimple_seq_add_stmt (&bind_body, single_stmt);
6021
 
6022
  if (ctx->record_type)
6023
    lower_omp_single_copy (single_stmt, &bind_body, ctx);
6024
  else
6025
    lower_omp_single_simple (single_stmt, &bind_body);
6026
 
6027
  gimple_omp_set_body (single_stmt, NULL);
6028
 
6029
  gimple_seq_add_seq (&bind_body, dlist);
6030
 
6031
  bind_body = maybe_catch_exception (bind_body);
6032
 
6033
  t = gimple_build_omp_return
6034
        (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
6035
                            OMP_CLAUSE_NOWAIT));
6036
  gimple_seq_add_stmt (&bind_body, t);
6037
 
6038
  block = make_node (BLOCK);
6039
  bind = gimple_build_bind (NULL, bind_body, block);
6040
 
6041
  pop_gimplify_context (bind);
6042
 
6043
  gimple_bind_append_vars (bind, ctx->block_vars);
6044
  BLOCK_VARS (block) = ctx->block_vars;
6045
  gsi_replace (gsi_p, bind, true);
6046
  if (BLOCK_VARS (block))
6047
    TREE_USED (block) = 1;
6048
}
6049
 
6050
 
6051
/* Expand code for an OpenMP master directive.  */
6052
 
6053
static void
6054
lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6055
{
6056
  tree block, lab = NULL, x, bfn_decl;
6057
  gimple stmt = gsi_stmt (*gsi_p), bind;
6058
  location_t loc = gimple_location (stmt);
6059
  gimple_seq tseq;
6060
  struct gimplify_ctx gctx;
6061
 
6062
  push_gimplify_context (&gctx);
6063
 
6064
  block = make_node (BLOCK);
6065
  bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
6066
                                 block);
6067
 
6068
  bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
6069
  x = build_call_expr_loc (loc, bfn_decl, 0);
6070
  x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
6071
  x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
6072
  tseq = NULL;
6073
  gimplify_and_add (x, &tseq);
6074
  gimple_bind_add_seq (bind, tseq);
6075
 
6076
  lower_omp (gimple_omp_body (stmt), ctx);
6077
  gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6078
  gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6079
  gimple_omp_set_body (stmt, NULL);
6080
 
6081
  gimple_bind_add_stmt (bind, gimple_build_label (lab));
6082
 
6083
  gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6084
 
6085
  pop_gimplify_context (bind);
6086
 
6087
  gimple_bind_append_vars (bind, ctx->block_vars);
6088
  BLOCK_VARS (block) = ctx->block_vars;
6089
  gsi_replace (gsi_p, bind, true);
6090
}
6091
 
6092
 
6093
/* Expand code for an OpenMP ordered directive.  */
6094
 
6095
static void
6096
lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6097
{
6098
  tree block;
6099
  gimple stmt = gsi_stmt (*gsi_p), bind, x;
6100
  struct gimplify_ctx gctx;
6101
 
6102
  push_gimplify_context (&gctx);
6103
 
6104
  block = make_node (BLOCK);
6105
  bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
6106
                                   block);
6107
 
6108
  x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
6109
                         0);
6110
  gimple_bind_add_stmt (bind, x);
6111
 
6112
  lower_omp (gimple_omp_body (stmt), ctx);
6113
  gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6114
  gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6115
  gimple_omp_set_body (stmt, NULL);
6116
 
6117
  x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END), 0);
6118
  gimple_bind_add_stmt (bind, x);
6119
 
6120
  gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6121
 
6122
  pop_gimplify_context (bind);
6123
 
6124
  gimple_bind_append_vars (bind, ctx->block_vars);
6125
  BLOCK_VARS (block) = gimple_bind_vars (bind);
6126
  gsi_replace (gsi_p, bind, true);
6127
}
6128
 
6129
 
6130
/* Gimplify a GIMPLE_OMP_CRITICAL statement.  This is a relatively simple
6131
   substitution of a couple of function calls.  But in the NAMED case,
6132
   requires that languages coordinate a symbol name.  It is therefore
6133
   best put here in common code.  */
6134
 
6135
static GTY((param1_is (tree), param2_is (tree)))
6136
  splay_tree critical_name_mutexes;
6137
 
6138
static void
6139
lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6140
{
6141
  tree block;
6142
  tree name, lock, unlock;
6143
  gimple stmt = gsi_stmt (*gsi_p), bind;
6144
  location_t loc = gimple_location (stmt);
6145
  gimple_seq tbody;
6146
  struct gimplify_ctx gctx;
6147
 
6148
  name = gimple_omp_critical_name (stmt);
6149
  if (name)
6150
    {
6151
      tree decl;
6152
      splay_tree_node n;
6153
 
6154
      if (!critical_name_mutexes)
6155
        critical_name_mutexes
6156
          = splay_tree_new_ggc (splay_tree_compare_pointers,
6157
                                ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_s,
6158
                                ggc_alloc_splay_tree_tree_node_tree_node_splay_tree_node_s);
6159
 
6160
      n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
6161
      if (n == NULL)
6162
        {
6163
          char *new_str;
6164
 
6165
          decl = create_tmp_var_raw (ptr_type_node, NULL);
6166
 
6167
          new_str = ACONCAT ((".gomp_critical_user_",
6168
                              IDENTIFIER_POINTER (name), NULL));
6169
          DECL_NAME (decl) = get_identifier (new_str);
6170
          TREE_PUBLIC (decl) = 1;
6171
          TREE_STATIC (decl) = 1;
6172
          DECL_COMMON (decl) = 1;
6173
          DECL_ARTIFICIAL (decl) = 1;
6174
          DECL_IGNORED_P (decl) = 1;
6175
          varpool_finalize_decl (decl);
6176
 
6177
          splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
6178
                             (splay_tree_value) decl);
6179
        }
6180
      else
6181
        decl = (tree) n->value;
6182
 
6183
      lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
6184
      lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
6185
 
6186
      unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
6187
      unlock = build_call_expr_loc (loc, unlock, 1,
6188
                                build_fold_addr_expr_loc (loc, decl));
6189
    }
6190
  else
6191
    {
6192
      lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
6193
      lock = build_call_expr_loc (loc, lock, 0);
6194
 
6195
      unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
6196
      unlock = build_call_expr_loc (loc, unlock, 0);
6197
    }
6198
 
6199
  push_gimplify_context (&gctx);
6200
 
6201
  block = make_node (BLOCK);
6202
  bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
6203
 
6204
  tbody = gimple_bind_body (bind);
6205
  gimplify_and_add (lock, &tbody);
6206
  gimple_bind_set_body (bind, tbody);
6207
 
6208
  lower_omp (gimple_omp_body (stmt), ctx);
6209
  gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
6210
  gimple_bind_add_seq (bind, gimple_omp_body (stmt));
6211
  gimple_omp_set_body (stmt, NULL);
6212
 
6213
  tbody = gimple_bind_body (bind);
6214
  gimplify_and_add (unlock, &tbody);
6215
  gimple_bind_set_body (bind, tbody);
6216
 
6217
  gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
6218
 
6219
  pop_gimplify_context (bind);
6220
  gimple_bind_append_vars (bind, ctx->block_vars);
6221
  BLOCK_VARS (block) = gimple_bind_vars (bind);
6222
  gsi_replace (gsi_p, bind, true);
6223
}
6224
 
6225
 
6226
/* A subroutine of lower_omp_for.  Generate code to emit the predicate
6227
   for a lastprivate clause.  Given a loop control predicate of (V
6228
   cond N2), we gate the clause on (!(V cond N2)).  The lowered form
6229
   is appended to *DLIST, iterator initialization is appended to
6230
   *BODY_P.  */
6231
 
6232
static void
6233
lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
6234
                           gimple_seq *dlist, struct omp_context *ctx)
6235
{
6236
  tree clauses, cond, vinit;
6237
  enum tree_code cond_code;
6238
  gimple_seq stmts;
6239
 
6240
  cond_code = fd->loop.cond_code;
6241
  cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
6242
 
6243
  /* When possible, use a strict equality expression.  This can let VRP
6244
     type optimizations deduce the value and remove a copy.  */
6245
  if (host_integerp (fd->loop.step, 0))
6246
    {
6247
      HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
6248
      if (step == 1 || step == -1)
6249
        cond_code = EQ_EXPR;
6250
    }
6251
 
6252
  cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
6253
 
6254
  clauses = gimple_omp_for_clauses (fd->for_stmt);
6255
  stmts = NULL;
6256
  lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6257
  if (!gimple_seq_empty_p (stmts))
6258
    {
6259
      gimple_seq_add_seq (&stmts, *dlist);
6260
      *dlist = stmts;
6261
 
6262
      /* Optimize: v = 0; is usually cheaper than v = some_other_constant.  */
6263
      vinit = fd->loop.n1;
6264
      if (cond_code == EQ_EXPR
6265
          && host_integerp (fd->loop.n2, 0)
6266
          && ! integer_zerop (fd->loop.n2))
6267
        vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6268
 
6269
      /* Initialize the iterator variable, so that threads that don't execute
6270
         any iterations don't execute the lastprivate clauses by accident.  */
6271
      gimplify_assign (fd->loop.v, vinit, body_p);
6272
    }
6273
}
6274
 
6275
 
6276
/* Lower code for an OpenMP loop directive.  */
6277
 
6278
static void
6279
lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6280
{
6281
  tree *rhs_p, block;
6282
  struct omp_for_data fd;
6283
  gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6284
  gimple_seq omp_for_body, body, dlist;
6285
  size_t i;
6286
  struct gimplify_ctx gctx;
6287
 
6288
  push_gimplify_context (&gctx);
6289
 
6290
  lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6291
  lower_omp (gimple_omp_body (stmt), ctx);
6292
 
6293
  block = make_node (BLOCK);
6294
  new_stmt = gimple_build_bind (NULL, NULL, block);
6295
 
6296
  /* Move declaration of temporaries in the loop body before we make
6297
     it go away.  */
6298
  omp_for_body = gimple_omp_body (stmt);
6299
  if (!gimple_seq_empty_p (omp_for_body)
6300
      && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6301
    {
6302
      tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6303
      gimple_bind_append_vars (new_stmt, vars);
6304
    }
6305
 
6306
  /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR.  */
6307
  dlist = NULL;
6308
  body = NULL;
6309
  lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6310
  gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6311
 
6312
  /* Lower the header expressions.  At this point, we can assume that
6313
     the header is of the form:
6314
 
6315
        #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6316
 
6317
     We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6318
     using the .omp_data_s mapping, if needed.  */
6319
  for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6320
    {
6321
      rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6322
      if (!is_gimple_min_invariant (*rhs_p))
6323
        *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6324
 
6325
      rhs_p = gimple_omp_for_final_ptr (stmt, i);
6326
      if (!is_gimple_min_invariant (*rhs_p))
6327
        *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6328
 
6329
      rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6330
      if (!is_gimple_min_invariant (*rhs_p))
6331
        *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6332
    }
6333
 
6334
  /* Once lowered, extract the bounds and clauses.  */
6335
  extract_omp_for_data (stmt, &fd, NULL);
6336
 
6337
  lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6338
 
6339
  gimple_seq_add_stmt (&body, stmt);
6340
  gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6341
 
6342
  gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6343
                                                         fd.loop.v));
6344
 
6345
  /* After the loop, add exit clauses.  */
6346
  lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6347
  gimple_seq_add_seq (&body, dlist);
6348
 
6349
  body = maybe_catch_exception (body);
6350
 
6351
  /* Region exit marker goes at the end of the loop body.  */
6352
  gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6353
 
6354
  pop_gimplify_context (new_stmt);
6355
 
6356
  gimple_bind_append_vars (new_stmt, ctx->block_vars);
6357
  BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6358
  if (BLOCK_VARS (block))
6359
    TREE_USED (block) = 1;
6360
 
6361
  gimple_bind_set_body (new_stmt, body);
6362
  gimple_omp_set_body (stmt, NULL);
6363
  gimple_omp_for_set_pre_body (stmt, NULL);
6364
  gsi_replace (gsi_p, new_stmt, true);
6365
}
6366
 
6367
/* Callback for walk_stmts.  Check if the current statement only contains
6368
   GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL.  */
6369
 
6370
static tree
6371
check_combined_parallel (gimple_stmt_iterator *gsi_p,
6372
                         bool *handled_ops_p,
6373
                         struct walk_stmt_info *wi)
6374
{
6375
  int *info = (int *) wi->info;
6376
  gimple stmt = gsi_stmt (*gsi_p);
6377
 
6378
  *handled_ops_p = true;
6379
  switch (gimple_code (stmt))
6380
    {
6381
    WALK_SUBSTMTS;
6382
 
6383
    case GIMPLE_OMP_FOR:
6384
    case GIMPLE_OMP_SECTIONS:
6385
      *info = *info == 0 ? 1 : -1;
6386
      break;
6387
    default:
6388
      *info = -1;
6389
      break;
6390
    }
6391
  return NULL;
6392
}
6393
 
6394
struct omp_taskcopy_context
6395
{
6396
  /* This field must be at the beginning, as we do "inheritance": Some
6397
     callback functions for tree-inline.c (e.g., omp_copy_decl)
6398
     receive a copy_body_data pointer that is up-casted to an
6399
     omp_context pointer.  */
6400
  copy_body_data cb;
6401
  omp_context *ctx;
6402
};
6403
 
6404
static tree
6405
task_copyfn_copy_decl (tree var, copy_body_data *cb)
6406
{
6407
  struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6408
 
6409
  if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6410
    return create_tmp_var (TREE_TYPE (var), NULL);
6411
 
6412
  return var;
6413
}
6414
 
6415
static tree
6416
task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6417
{
6418
  tree name, new_fields = NULL, type, f;
6419
 
6420
  type = lang_hooks.types.make_type (RECORD_TYPE);
6421
  name = DECL_NAME (TYPE_NAME (orig_type));
6422
  name = build_decl (gimple_location (tcctx->ctx->stmt),
6423
                     TYPE_DECL, name, type);
6424
  TYPE_NAME (type) = name;
6425
 
6426
  for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6427
    {
6428
      tree new_f = copy_node (f);
6429
      DECL_CONTEXT (new_f) = type;
6430
      TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6431
      TREE_CHAIN (new_f) = new_fields;
6432
      walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6433
      walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6434
      walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6435
                 &tcctx->cb, NULL);
6436
      new_fields = new_f;
6437
      *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6438
    }
6439
  TYPE_FIELDS (type) = nreverse (new_fields);
6440
  layout_type (type);
6441
  return type;
6442
}
6443
 
6444
/* Create task copyfn.  */
6445
 
6446
static void
6447
create_task_copyfn (gimple task_stmt, omp_context *ctx)
6448
{
6449
  struct function *child_cfun;
6450
  tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6451
  tree record_type, srecord_type, bind, list;
6452
  bool record_needs_remap = false, srecord_needs_remap = false;
6453
  splay_tree_node n;
6454
  struct omp_taskcopy_context tcctx;
6455
  struct gimplify_ctx gctx;
6456
  location_t loc = gimple_location (task_stmt);
6457
 
6458
  child_fn = gimple_omp_task_copy_fn (task_stmt);
6459
  child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6460
  gcc_assert (child_cfun->cfg == NULL);
6461
  DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6462
 
6463
  /* Reset DECL_CONTEXT on function arguments.  */
6464
  for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
6465
    DECL_CONTEXT (t) = child_fn;
6466
 
6467
  /* Populate the function.  */
6468
  push_gimplify_context (&gctx);
6469
  current_function_decl = child_fn;
6470
 
6471
  bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6472
  TREE_SIDE_EFFECTS (bind) = 1;
6473
  list = NULL;
6474
  DECL_SAVED_TREE (child_fn) = bind;
6475
  DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6476
 
6477
  /* Remap src and dst argument types if needed.  */
6478
  record_type = ctx->record_type;
6479
  srecord_type = ctx->srecord_type;
6480
  for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
6481
    if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6482
      {
6483
        record_needs_remap = true;
6484
        break;
6485
      }
6486
  for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
6487
    if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6488
      {
6489
        srecord_needs_remap = true;
6490
        break;
6491
      }
6492
 
6493
  if (record_needs_remap || srecord_needs_remap)
6494
    {
6495
      memset (&tcctx, '\0', sizeof (tcctx));
6496
      tcctx.cb.src_fn = ctx->cb.src_fn;
6497
      tcctx.cb.dst_fn = child_fn;
6498
      tcctx.cb.src_node = cgraph_get_node (tcctx.cb.src_fn);
6499
      gcc_checking_assert (tcctx.cb.src_node);
6500
      tcctx.cb.dst_node = tcctx.cb.src_node;
6501
      tcctx.cb.src_cfun = ctx->cb.src_cfun;
6502
      tcctx.cb.copy_decl = task_copyfn_copy_decl;
6503
      tcctx.cb.eh_lp_nr = 0;
6504
      tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6505
      tcctx.cb.decl_map = pointer_map_create ();
6506
      tcctx.ctx = ctx;
6507
 
6508
      if (record_needs_remap)
6509
        record_type = task_copyfn_remap_type (&tcctx, record_type);
6510
      if (srecord_needs_remap)
6511
        srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6512
    }
6513
  else
6514
    tcctx.cb.decl_map = NULL;
6515
 
6516
  push_cfun (child_cfun);
6517
 
6518
  arg = DECL_ARGUMENTS (child_fn);
6519
  TREE_TYPE (arg) = build_pointer_type (record_type);
6520
  sarg = DECL_CHAIN (arg);
6521
  TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6522
 
6523
  /* First pass: initialize temporaries used in record_type and srecord_type
6524
     sizes and field offsets.  */
6525
  if (tcctx.cb.decl_map)
6526
    for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6527
      if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6528
        {
6529
          tree *p;
6530
 
6531
          decl = OMP_CLAUSE_DECL (c);
6532
          p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6533
          if (p == NULL)
6534
            continue;
6535
          n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6536
          sf = (tree) n->value;
6537
          sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6538
          src = build_simple_mem_ref_loc (loc, sarg);
6539
          src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6540
          t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6541
          append_to_statement_list (t, &list);
6542
        }
6543
 
6544
  /* Second pass: copy shared var pointers and copy construct non-VLA
6545
     firstprivate vars.  */
6546
  for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6547
    switch (OMP_CLAUSE_CODE (c))
6548
      {
6549
      case OMP_CLAUSE_SHARED:
6550
        decl = OMP_CLAUSE_DECL (c);
6551
        n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6552
        if (n == NULL)
6553
          break;
6554
        f = (tree) n->value;
6555
        if (tcctx.cb.decl_map)
6556
          f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6557
        n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6558
        sf = (tree) n->value;
6559
        if (tcctx.cb.decl_map)
6560
          sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6561
        src = build_simple_mem_ref_loc (loc, sarg);
6562
        src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6563
        dst = build_simple_mem_ref_loc (loc, arg);
6564
        dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6565
        t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6566
        append_to_statement_list (t, &list);
6567
        break;
6568
      case OMP_CLAUSE_FIRSTPRIVATE:
6569
        decl = OMP_CLAUSE_DECL (c);
6570
        if (is_variable_sized (decl))
6571
          break;
6572
        n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6573
        if (n == NULL)
6574
          break;
6575
        f = (tree) n->value;
6576
        if (tcctx.cb.decl_map)
6577
          f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6578
        n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6579
        if (n != NULL)
6580
          {
6581
            sf = (tree) n->value;
6582
            if (tcctx.cb.decl_map)
6583
              sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6584
            src = build_simple_mem_ref_loc (loc, sarg);
6585
            src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6586
            if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6587
              src = build_simple_mem_ref_loc (loc, src);
6588
          }
6589
        else
6590
          src = decl;
6591
        dst = build_simple_mem_ref_loc (loc, arg);
6592
        dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6593
        t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6594
        append_to_statement_list (t, &list);
6595
        break;
6596
      case OMP_CLAUSE_PRIVATE:
6597
        if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6598
          break;
6599
        decl = OMP_CLAUSE_DECL (c);
6600
        n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6601
        f = (tree) n->value;
6602
        if (tcctx.cb.decl_map)
6603
          f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6604
        n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6605
        if (n != NULL)
6606
          {
6607
            sf = (tree) n->value;
6608
            if (tcctx.cb.decl_map)
6609
              sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6610
            src = build_simple_mem_ref_loc (loc, sarg);
6611
            src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6612
            if (use_pointer_for_field (decl, NULL))
6613
              src = build_simple_mem_ref_loc (loc, src);
6614
          }
6615
        else
6616
          src = decl;
6617
        dst = build_simple_mem_ref_loc (loc, arg);
6618
        dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6619
        t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6620
        append_to_statement_list (t, &list);
6621
        break;
6622
      default:
6623
        break;
6624
      }
6625
 
6626
  /* Last pass: handle VLA firstprivates.  */
6627
  if (tcctx.cb.decl_map)
6628
    for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6629
      if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6630
        {
6631
          tree ind, ptr, df;
6632
 
6633
          decl = OMP_CLAUSE_DECL (c);
6634
          if (!is_variable_sized (decl))
6635
            continue;
6636
          n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6637
          if (n == NULL)
6638
            continue;
6639
          f = (tree) n->value;
6640
          f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6641
          gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6642
          ind = DECL_VALUE_EXPR (decl);
6643
          gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6644
          gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6645
          n = splay_tree_lookup (ctx->sfield_map,
6646
                                 (splay_tree_key) TREE_OPERAND (ind, 0));
6647
          sf = (tree) n->value;
6648
          sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6649
          src = build_simple_mem_ref_loc (loc, sarg);
6650
          src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6651
          src = build_simple_mem_ref_loc (loc, src);
6652
          dst = build_simple_mem_ref_loc (loc, arg);
6653
          dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6654
          t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6655
          append_to_statement_list (t, &list);
6656
          n = splay_tree_lookup (ctx->field_map,
6657
                                 (splay_tree_key) TREE_OPERAND (ind, 0));
6658
          df = (tree) n->value;
6659
          df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6660
          ptr = build_simple_mem_ref_loc (loc, arg);
6661
          ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6662
          t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6663
                      build_fold_addr_expr_loc (loc, dst));
6664
          append_to_statement_list (t, &list);
6665
        }
6666
 
6667
  t = build1 (RETURN_EXPR, void_type_node, NULL);
6668
  append_to_statement_list (t, &list);
6669
 
6670
  if (tcctx.cb.decl_map)
6671
    pointer_map_destroy (tcctx.cb.decl_map);
6672
  pop_gimplify_context (NULL);
6673
  BIND_EXPR_BODY (bind) = list;
6674
  pop_cfun ();
6675
  current_function_decl = ctx->cb.src_fn;
6676
}
6677
 
6678
/* Lower the OpenMP parallel or task directive in the current statement
6679
   in GSI_P.  CTX holds context information for the directive.  */
6680
 
6681
static void
6682
lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6683
{
6684
  tree clauses;
6685
  tree child_fn, t;
6686
  gimple stmt = gsi_stmt (*gsi_p);
6687
  gimple par_bind, bind;
6688
  gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6689
  struct gimplify_ctx gctx;
6690
  location_t loc = gimple_location (stmt);
6691
 
6692
  clauses = gimple_omp_taskreg_clauses (stmt);
6693
  par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6694
  par_body = gimple_bind_body (par_bind);
6695
  child_fn = ctx->cb.dst_fn;
6696
  if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6697
      && !gimple_omp_parallel_combined_p (stmt))
6698
    {
6699
      struct walk_stmt_info wi;
6700
      int ws_num = 0;
6701
 
6702
      memset (&wi, 0, sizeof (wi));
6703
      wi.info = &ws_num;
6704
      wi.val_only = true;
6705
      walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6706
      if (ws_num == 1)
6707
        gimple_omp_parallel_set_combined_p (stmt, true);
6708
    }
6709
  if (ctx->srecord_type)
6710
    create_task_copyfn (stmt, ctx);
6711
 
6712
  push_gimplify_context (&gctx);
6713
 
6714
  par_olist = NULL;
6715
  par_ilist = NULL;
6716
  lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6717
  lower_omp (par_body, ctx);
6718
  if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6719
    lower_reduction_clauses (clauses, &par_olist, ctx);
6720
 
6721
  /* Declare all the variables created by mapping and the variables
6722
     declared in the scope of the parallel body.  */
6723
  record_vars_into (ctx->block_vars, child_fn);
6724
  record_vars_into (gimple_bind_vars (par_bind), child_fn);
6725
 
6726
  if (ctx->record_type)
6727
    {
6728
      ctx->sender_decl
6729
        = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6730
                          : ctx->record_type, ".omp_data_o");
6731
      DECL_NAMELESS (ctx->sender_decl) = 1;
6732
      TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6733
      gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6734
    }
6735
 
6736
  olist = NULL;
6737
  ilist = NULL;
6738
  lower_send_clauses (clauses, &ilist, &olist, ctx);
6739
  lower_send_shared_vars (&ilist, &olist, ctx);
6740
 
6741
  /* Once all the expansions are done, sequence all the different
6742
     fragments inside gimple_omp_body.  */
6743
 
6744
  new_body = NULL;
6745
 
6746
  if (ctx->record_type)
6747
    {
6748
      t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6749
      /* fixup_child_record_type might have changed receiver_decl's type.  */
6750
      t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6751
      gimple_seq_add_stmt (&new_body,
6752
                           gimple_build_assign (ctx->receiver_decl, t));
6753
    }
6754
 
6755
  gimple_seq_add_seq (&new_body, par_ilist);
6756
  gimple_seq_add_seq (&new_body, par_body);
6757
  gimple_seq_add_seq (&new_body, par_olist);
6758
  new_body = maybe_catch_exception (new_body);
6759
  gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6760
  gimple_omp_set_body (stmt, new_body);
6761
 
6762
  bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6763
  gimple_bind_add_stmt (bind, stmt);
6764
  if (ilist || olist)
6765
    {
6766
      gimple_seq_add_stmt (&ilist, bind);
6767
      gimple_seq_add_seq (&ilist, olist);
6768
      bind = gimple_build_bind (NULL, ilist, NULL);
6769
    }
6770
 
6771
  gsi_replace (gsi_p, bind, true);
6772
 
6773
  pop_gimplify_context (NULL);
6774
}
6775
 
6776
/* Callback for lower_omp_1.  Return non-NULL if *tp needs to be
6777
   regimplified.  If DATA is non-NULL, lower_omp_1 is outside
6778
   of OpenMP context, but with task_shared_vars set.  */
6779
 
6780
static tree
6781
lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6782
                        void *data)
6783
{
6784
  tree t = *tp;
6785
 
6786
  /* Any variable with DECL_VALUE_EXPR needs to be regimplified.  */
6787
  if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6788
    return t;
6789
 
6790
  if (task_shared_vars
6791
      && DECL_P (t)
6792
      && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6793
    return t;
6794
 
6795
  /* If a global variable has been privatized, TREE_CONSTANT on
6796
     ADDR_EXPR might be wrong.  */
6797
  if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6798
    recompute_tree_invariant_for_addr_expr (t);
6799
 
6800
  *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6801
  return NULL_TREE;
6802
}
6803
 
6804
static void
6805
lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6806
{
6807
  gimple stmt = gsi_stmt (*gsi_p);
6808
  struct walk_stmt_info wi;
6809
 
6810
  if (gimple_has_location (stmt))
6811
    input_location = gimple_location (stmt);
6812
 
6813
  if (task_shared_vars)
6814
    memset (&wi, '\0', sizeof (wi));
6815
 
6816
  /* If we have issued syntax errors, avoid doing any heavy lifting.
6817
     Just replace the OpenMP directives with a NOP to avoid
6818
     confusing RTL expansion.  */
6819
  if (seen_error () && is_gimple_omp (stmt))
6820
    {
6821
      gsi_replace (gsi_p, gimple_build_nop (), true);
6822
      return;
6823
    }
6824
 
6825
  switch (gimple_code (stmt))
6826
    {
6827
    case GIMPLE_COND:
6828
      if ((ctx || task_shared_vars)
6829
          && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6830
                         ctx ? NULL : &wi, NULL)
6831
              || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6832
                            ctx ? NULL : &wi, NULL)))
6833
        gimple_regimplify_operands (stmt, gsi_p);
6834
      break;
6835
    case GIMPLE_CATCH:
6836
      lower_omp (gimple_catch_handler (stmt), ctx);
6837
      break;
6838
    case GIMPLE_EH_FILTER:
6839
      lower_omp (gimple_eh_filter_failure (stmt), ctx);
6840
      break;
6841
    case GIMPLE_TRY:
6842
      lower_omp (gimple_try_eval (stmt), ctx);
6843
      lower_omp (gimple_try_cleanup (stmt), ctx);
6844
      break;
6845
    case GIMPLE_BIND:
6846
      lower_omp (gimple_bind_body (stmt), ctx);
6847
      break;
6848
    case GIMPLE_OMP_PARALLEL:
6849
    case GIMPLE_OMP_TASK:
6850
      ctx = maybe_lookup_ctx (stmt);
6851
      lower_omp_taskreg (gsi_p, ctx);
6852
      break;
6853
    case GIMPLE_OMP_FOR:
6854
      ctx = maybe_lookup_ctx (stmt);
6855
      gcc_assert (ctx);
6856
      lower_omp_for (gsi_p, ctx);
6857
      break;
6858
    case GIMPLE_OMP_SECTIONS:
6859
      ctx = maybe_lookup_ctx (stmt);
6860
      gcc_assert (ctx);
6861
      lower_omp_sections (gsi_p, ctx);
6862
      break;
6863
    case GIMPLE_OMP_SINGLE:
6864
      ctx = maybe_lookup_ctx (stmt);
6865
      gcc_assert (ctx);
6866
      lower_omp_single (gsi_p, ctx);
6867
      break;
6868
    case GIMPLE_OMP_MASTER:
6869
      ctx = maybe_lookup_ctx (stmt);
6870
      gcc_assert (ctx);
6871
      lower_omp_master (gsi_p, ctx);
6872
      break;
6873
    case GIMPLE_OMP_ORDERED:
6874
      ctx = maybe_lookup_ctx (stmt);
6875
      gcc_assert (ctx);
6876
      lower_omp_ordered (gsi_p, ctx);
6877
      break;
6878
    case GIMPLE_OMP_CRITICAL:
6879
      ctx = maybe_lookup_ctx (stmt);
6880
      gcc_assert (ctx);
6881
      lower_omp_critical (gsi_p, ctx);
6882
      break;
6883
    case GIMPLE_OMP_ATOMIC_LOAD:
6884
      if ((ctx || task_shared_vars)
6885
          && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6886
                        lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6887
        gimple_regimplify_operands (stmt, gsi_p);
6888
      break;
6889
    default:
6890
      if ((ctx || task_shared_vars)
6891
          && walk_gimple_op (stmt, lower_omp_regimplify_p,
6892
                             ctx ? NULL : &wi))
6893
        gimple_regimplify_operands (stmt, gsi_p);
6894
      break;
6895
    }
6896
}
6897
 
6898
static void
6899
lower_omp (gimple_seq body, omp_context *ctx)
6900
{
6901
  location_t saved_location = input_location;
6902
  gimple_stmt_iterator gsi = gsi_start (body);
6903
  for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6904
    lower_omp_1 (&gsi, ctx);
6905
  input_location = saved_location;
6906
}
6907
 
6908
/* Main entry point.  */
6909
 
6910
static unsigned int
6911
execute_lower_omp (void)
6912
{
6913
  gimple_seq body;
6914
 
6915
  /* This pass always runs, to provide PROP_gimple_lomp.
6916
     But there is nothing to do unless -fopenmp is given.  */
6917
  if (flag_openmp == 0)
6918
    return 0;
6919
 
6920
  all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6921
                                 delete_omp_context);
6922
 
6923
  body = gimple_body (current_function_decl);
6924
  scan_omp (body, NULL);
6925
  gcc_assert (taskreg_nesting_level == 0);
6926
 
6927
  if (all_contexts->root)
6928
    {
6929
      struct gimplify_ctx gctx;
6930
 
6931
      if (task_shared_vars)
6932
        push_gimplify_context (&gctx);
6933
      lower_omp (body, NULL);
6934
      if (task_shared_vars)
6935
        pop_gimplify_context (NULL);
6936
    }
6937
 
6938
  if (all_contexts)
6939
    {
6940
      splay_tree_delete (all_contexts);
6941
      all_contexts = NULL;
6942
    }
6943
  BITMAP_FREE (task_shared_vars);
6944
  return 0;
6945
}
6946
 
6947
struct gimple_opt_pass pass_lower_omp =
6948
{
6949
 {
6950
  GIMPLE_PASS,
6951
  "omplower",                           /* name */
6952
  NULL,                                 /* gate */
6953
  execute_lower_omp,                    /* execute */
6954
  NULL,                                 /* sub */
6955
  NULL,                                 /* next */
6956
  0,                                     /* static_pass_number */
6957
  TV_NONE,                              /* tv_id */
6958
  PROP_gimple_any,                      /* properties_required */
6959
  PROP_gimple_lomp,                     /* properties_provided */
6960
  0,                                     /* properties_destroyed */
6961
  0,                                     /* todo_flags_start */
6962
 
6963
 }
6964
};
6965
 
6966
/* The following is a utility to diagnose OpenMP structured block violations.
6967
   It is not part of the "omplower" pass, as that's invoked too late.  It
6968
   should be invoked by the respective front ends after gimplification.  */
6969
 
6970
static splay_tree all_labels;
6971
 
6972
/* Check for mismatched contexts and generate an error if needed.  Return
6973
   true if an error is detected.  */
6974
 
6975
static bool
6976
diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6977
               gimple branch_ctx, gimple label_ctx)
6978
{
6979
  if (label_ctx == branch_ctx)
6980
    return false;
6981
 
6982
 
6983
  /*
6984
     Previously we kept track of the label's entire context in diagnose_sb_[12]
6985
     so we could traverse it and issue a correct "exit" or "enter" error
6986
     message upon a structured block violation.
6987
 
6988
     We built the context by building a list with tree_cons'ing, but there is
6989
     no easy counterpart in gimple tuples.  It seems like far too much work
6990
     for issuing exit/enter error messages.  If someone really misses the
6991
     distinct error message... patches welcome.
6992
   */
6993
 
6994
#if 0
6995
  /* Try to avoid confusing the user by producing and error message
6996
     with correct "exit" or "enter" verbiage.  We prefer "exit"
6997
     unless we can show that LABEL_CTX is nested within BRANCH_CTX.  */
6998
  if (branch_ctx == NULL)
6999
    exit_p = false;
7000
  else
7001
    {
7002
      while (label_ctx)
7003
        {
7004
          if (TREE_VALUE (label_ctx) == branch_ctx)
7005
            {
7006
              exit_p = false;
7007
              break;
7008
            }
7009
          label_ctx = TREE_CHAIN (label_ctx);
7010
        }
7011
    }
7012
 
7013
  if (exit_p)
7014
    error ("invalid exit from OpenMP structured block");
7015
  else
7016
    error ("invalid entry to OpenMP structured block");
7017
#endif
7018
 
7019
  /* If it's obvious we have an invalid entry, be specific about the error.  */
7020
  if (branch_ctx == NULL)
7021
    error ("invalid entry to OpenMP structured block");
7022
  else
7023
    /* Otherwise, be vague and lazy, but efficient.  */
7024
    error ("invalid branch to/from an OpenMP structured block");
7025
 
7026
  gsi_replace (gsi_p, gimple_build_nop (), false);
7027
  return true;
7028
}
7029
 
7030
/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
7031
   where each label is found.  */
7032
 
7033
static tree
7034
diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7035
               struct walk_stmt_info *wi)
7036
{
7037
  gimple context = (gimple) wi->info;
7038
  gimple inner_context;
7039
  gimple stmt = gsi_stmt (*gsi_p);
7040
 
7041
  *handled_ops_p = true;
7042
 
7043
 switch (gimple_code (stmt))
7044
    {
7045
    WALK_SUBSTMTS;
7046
 
7047
    case GIMPLE_OMP_PARALLEL:
7048
    case GIMPLE_OMP_TASK:
7049
    case GIMPLE_OMP_SECTIONS:
7050
    case GIMPLE_OMP_SINGLE:
7051
    case GIMPLE_OMP_SECTION:
7052
    case GIMPLE_OMP_MASTER:
7053
    case GIMPLE_OMP_ORDERED:
7054
    case GIMPLE_OMP_CRITICAL:
7055
      /* The minimal context here is just the current OMP construct.  */
7056
      inner_context = stmt;
7057
      wi->info = inner_context;
7058
      walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7059
      wi->info = context;
7060
      break;
7061
 
7062
    case GIMPLE_OMP_FOR:
7063
      inner_context = stmt;
7064
      wi->info = inner_context;
7065
      /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7066
         walk them.  */
7067
      walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7068
                       diagnose_sb_1, NULL, wi);
7069
      walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
7070
      wi->info = context;
7071
      break;
7072
 
7073
    case GIMPLE_LABEL:
7074
      splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
7075
                         (splay_tree_value) context);
7076
      break;
7077
 
7078
    default:
7079
      break;
7080
    }
7081
 
7082
  return NULL_TREE;
7083
}
7084
 
7085
/* Pass 2: Check each branch and see if its context differs from that of
7086
   the destination label's context.  */
7087
 
7088
static tree
7089
diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7090
               struct walk_stmt_info *wi)
7091
{
7092
  gimple context = (gimple) wi->info;
7093
  splay_tree_node n;
7094
  gimple stmt = gsi_stmt (*gsi_p);
7095
 
7096
  *handled_ops_p = true;
7097
 
7098
  switch (gimple_code (stmt))
7099
    {
7100
    WALK_SUBSTMTS;
7101
 
7102
    case GIMPLE_OMP_PARALLEL:
7103
    case GIMPLE_OMP_TASK:
7104
    case GIMPLE_OMP_SECTIONS:
7105
    case GIMPLE_OMP_SINGLE:
7106
    case GIMPLE_OMP_SECTION:
7107
    case GIMPLE_OMP_MASTER:
7108
    case GIMPLE_OMP_ORDERED:
7109
    case GIMPLE_OMP_CRITICAL:
7110
      wi->info = stmt;
7111
      walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
7112
      wi->info = context;
7113
      break;
7114
 
7115
    case GIMPLE_OMP_FOR:
7116
      wi->info = stmt;
7117
      /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
7118
         walk them.  */
7119
      walk_gimple_seq (gimple_omp_for_pre_body (stmt),
7120
                       diagnose_sb_2, NULL, wi);
7121
      walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
7122
      wi->info = context;
7123
      break;
7124
 
7125
    case GIMPLE_COND:
7126
        {
7127
          tree lab = gimple_cond_true_label (stmt);
7128
          if (lab)
7129
            {
7130
              n = splay_tree_lookup (all_labels,
7131
                                     (splay_tree_key) lab);
7132
              diagnose_sb_0 (gsi_p, context,
7133
                             n ? (gimple) n->value : NULL);
7134
            }
7135
          lab = gimple_cond_false_label (stmt);
7136
          if (lab)
7137
            {
7138
              n = splay_tree_lookup (all_labels,
7139
                                     (splay_tree_key) lab);
7140
              diagnose_sb_0 (gsi_p, context,
7141
                             n ? (gimple) n->value : NULL);
7142
            }
7143
        }
7144
      break;
7145
 
7146
    case GIMPLE_GOTO:
7147
      {
7148
        tree lab = gimple_goto_dest (stmt);
7149
        if (TREE_CODE (lab) != LABEL_DECL)
7150
          break;
7151
 
7152
        n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7153
        diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
7154
      }
7155
      break;
7156
 
7157
    case GIMPLE_SWITCH:
7158
      {
7159
        unsigned int i;
7160
        for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
7161
          {
7162
            tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
7163
            n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
7164
            if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
7165
              break;
7166
          }
7167
      }
7168
      break;
7169
 
7170
    case GIMPLE_RETURN:
7171
      diagnose_sb_0 (gsi_p, context, NULL);
7172
      break;
7173
 
7174
    default:
7175
      break;
7176
    }
7177
 
7178
  return NULL_TREE;
7179
}
7180
 
7181
static unsigned int
7182
diagnose_omp_structured_block_errors (void)
7183
{
7184
  struct walk_stmt_info wi;
7185
  gimple_seq body = gimple_body (current_function_decl);
7186
 
7187
  all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
7188
 
7189
  memset (&wi, 0, sizeof (wi));
7190
  walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
7191
 
7192
  memset (&wi, 0, sizeof (wi));
7193
  wi.want_locations = true;
7194
  walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
7195
 
7196
  splay_tree_delete (all_labels);
7197
  all_labels = NULL;
7198
 
7199
  return 0;
7200
}
7201
 
7202
static bool
7203
gate_diagnose_omp_blocks (void)
7204
{
7205
  return flag_openmp != 0;
7206
}
7207
 
7208
struct gimple_opt_pass pass_diagnose_omp_blocks =
7209
{
7210
  {
7211
    GIMPLE_PASS,
7212
    "*diagnose_omp_blocks",             /* name */
7213
    gate_diagnose_omp_blocks,           /* gate */
7214
    diagnose_omp_structured_block_errors,       /* execute */
7215
    NULL,                               /* sub */
7216
    NULL,                               /* next */
7217
    0,                                   /* static_pass_number */
7218
    TV_NONE,                            /* tv_id */
7219
    PROP_gimple_any,                    /* properties_required */
7220
    0,                                   /* properties_provided */
7221
    0,                                   /* properties_destroyed */
7222
    0,                                   /* todo_flags_start */
7223
    0,                                   /* todo_flags_finish */
7224
  }
7225
};
7226
 
7227
#include "gt-omp-low.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.