OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gcc-4.5.1/] [gcc/] [omp-low.c] - Blame information for rev 309

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 280 jeremybenn
/* Lowering pass for OpenMP directives.  Converts OpenMP directives
2
   into explicit calls to the runtime library (libgomp) and data
3
   marshalling to implement data sharing and copying clauses.
4
   Contributed by Diego Novillo <dnovillo@redhat.com>
5
 
6
   Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010
7
   Free Software Foundation, Inc.
8
 
9
This file is part of GCC.
10
 
11
GCC is free software; you can redistribute it and/or modify it under
12
the terms of the GNU General Public License as published by the Free
13
Software Foundation; either version 3, or (at your option) any later
14
version.
15
 
16
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17
WARRANTY; without even the implied warranty of MERCHANTABILITY or
18
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
19
for more details.
20
 
21
You should have received a copy of the GNU General Public License
22
along with GCC; see the file COPYING3.  If not see
23
<http://www.gnu.org/licenses/>.  */
24
 
25
#include "config.h"
26
#include "system.h"
27
#include "coretypes.h"
28
#include "tm.h"
29
#include "tree.h"
30
#include "rtl.h"
31
#include "gimple.h"
32
#include "tree-iterator.h"
33
#include "tree-inline.h"
34
#include "langhooks.h"
35
#include "diagnostic.h"
36
#include "tree-flow.h"
37
#include "timevar.h"
38
#include "flags.h"
39
#include "function.h"
40
#include "expr.h"
41
#include "toplev.h"
42
#include "tree-pass.h"
43
#include "ggc.h"
44
#include "except.h"
45
#include "splay-tree.h"
46
#include "optabs.h"
47
#include "cfgloop.h"
48
 
49
 
50
/* Lowering of OpenMP parallel and workshare constructs proceeds in two
51
   phases.  The first phase scans the function looking for OMP statements
52
   and then for variables that must be replaced to satisfy data sharing
53
   clauses.  The second phase expands code for the constructs, as well as
54
   re-gimplifying things when variables have been replaced with complex
55
   expressions.
56
 
57
   Final code generation is done by pass_expand_omp.  The flowgraph is
58
   scanned for parallel regions which are then moved to a new
59
   function, to be invoked by the thread library.  */
60
 
61
/* Context structure.  Used to store information about each parallel
62
   directive in the code.  */
63
 
64
typedef struct omp_context
65
{
66
  /* This field must be at the beginning, as we do "inheritance": Some
67
     callback functions for tree-inline.c (e.g., omp_copy_decl)
68
     receive a copy_body_data pointer that is up-casted to an
69
     omp_context pointer.  */
70
  copy_body_data cb;
71
 
72
  /* The tree of contexts corresponding to the encountered constructs.  */
73
  struct omp_context *outer;
74
  gimple stmt;
75
 
76
  /* Map variables to fields in a structure that allows communication
77
     between sending and receiving threads.  */
78
  splay_tree field_map;
79
  tree record_type;
80
  tree sender_decl;
81
  tree receiver_decl;
82
 
83
  /* These are used just by task contexts, if task firstprivate fn is
84
     needed.  srecord_type is used to communicate from the thread
85
     that encountered the task construct to task firstprivate fn,
86
     record_type is allocated by GOMP_task, initialized by task firstprivate
87
     fn and passed to the task body fn.  */
88
  splay_tree sfield_map;
89
  tree srecord_type;
90
 
91
  /* A chain of variables to add to the top-level block surrounding the
92
     construct.  In the case of a parallel, this is in the child function.  */
93
  tree block_vars;
94
 
95
  /* What to do with variables with implicitly determined sharing
96
     attributes.  */
97
  enum omp_clause_default_kind default_kind;
98
 
99
  /* Nesting depth of this context.  Used to beautify error messages re
100
     invalid gotos.  The outermost ctx is depth 1, with depth 0 being
101
     reserved for the main body of the function.  */
102
  int depth;
103
 
104
  /* True if this parallel directive is nested within another.  */
105
  bool is_nested;
106
} omp_context;
107
 
108
 
109
struct omp_for_data_loop
110
{
111
  tree v, n1, n2, step;
112
  enum tree_code cond_code;
113
};
114
 
115
/* A structure describing the main elements of a parallel loop.  */
116
 
117
struct omp_for_data
118
{
119
  struct omp_for_data_loop loop;
120
  tree chunk_size;
121
  gimple for_stmt;
122
  tree pre, iter_type;
123
  int collapse;
124
  bool have_nowait, have_ordered;
125
  enum omp_clause_schedule_kind sched_kind;
126
  struct omp_for_data_loop *loops;
127
};
128
 
129
 
130
static splay_tree all_contexts;
131
static int taskreg_nesting_level;
132
struct omp_region *root_omp_region;
133
static bitmap task_shared_vars;
134
 
135
static void scan_omp (gimple_seq, omp_context *);
136
static tree scan_omp_1_op (tree *, int *, void *);
137
 
138
#define WALK_SUBSTMTS  \
139
    case GIMPLE_BIND: \
140
    case GIMPLE_TRY: \
141
    case GIMPLE_CATCH: \
142
    case GIMPLE_EH_FILTER: \
143
      /* The sub-statements for these should be walked.  */ \
144
      *handled_ops_p = false; \
145
      break;
146
 
147
/* Convenience function for calling scan_omp_1_op on tree operands.  */
148
 
149
static inline tree
150
scan_omp_op (tree *tp, omp_context *ctx)
151
{
152
  struct walk_stmt_info wi;
153
 
154
  memset (&wi, 0, sizeof (wi));
155
  wi.info = ctx;
156
  wi.want_locations = true;
157
 
158
  return walk_tree (tp, scan_omp_1_op, &wi, NULL);
159
}
160
 
161
static void lower_omp (gimple_seq, omp_context *);
162
static tree lookup_decl_in_outer_ctx (tree, omp_context *);
163
static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
164
 
165
/* Find an OpenMP clause of type KIND within CLAUSES.  */
166
 
167
tree
168
find_omp_clause (tree clauses, enum omp_clause_code kind)
169
{
170
  for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
171
    if (OMP_CLAUSE_CODE (clauses) == kind)
172
      return clauses;
173
 
174
  return NULL_TREE;
175
}
176
 
177
/* Return true if CTX is for an omp parallel.  */
178
 
179
static inline bool
180
is_parallel_ctx (omp_context *ctx)
181
{
182
  return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
183
}
184
 
185
 
186
/* Return true if CTX is for an omp task.  */
187
 
188
static inline bool
189
is_task_ctx (omp_context *ctx)
190
{
191
  return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
192
}
193
 
194
 
195
/* Return true if CTX is for an omp parallel or omp task.  */
196
 
197
static inline bool
198
is_taskreg_ctx (omp_context *ctx)
199
{
200
  return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL
201
         || gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
202
}
203
 
204
 
205
/* Return true if REGION is a combined parallel+workshare region.  */
206
 
207
static inline bool
208
is_combined_parallel (struct omp_region *region)
209
{
210
  return region->is_combined_parallel;
211
}
212
 
213
 
214
/* Extract the header elements of parallel loop FOR_STMT and store
215
   them into *FD.  */
216
 
217
static void
218
extract_omp_for_data (gimple for_stmt, struct omp_for_data *fd,
219
                      struct omp_for_data_loop *loops)
220
{
221
  tree t, var, *collapse_iter, *collapse_count;
222
  tree count = NULL_TREE, iter_type = long_integer_type_node;
223
  struct omp_for_data_loop *loop;
224
  int i;
225
  struct omp_for_data_loop dummy_loop;
226
  location_t loc = gimple_location (for_stmt);
227
 
228
  fd->for_stmt = for_stmt;
229
  fd->pre = NULL;
230
  fd->collapse = gimple_omp_for_collapse (for_stmt);
231
  if (fd->collapse > 1)
232
    fd->loops = loops;
233
  else
234
    fd->loops = &fd->loop;
235
 
236
  fd->have_nowait = fd->have_ordered = false;
237
  fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
238
  fd->chunk_size = NULL_TREE;
239
  collapse_iter = NULL;
240
  collapse_count = NULL;
241
 
242
  for (t = gimple_omp_for_clauses (for_stmt); t ; t = OMP_CLAUSE_CHAIN (t))
243
    switch (OMP_CLAUSE_CODE (t))
244
      {
245
      case OMP_CLAUSE_NOWAIT:
246
        fd->have_nowait = true;
247
        break;
248
      case OMP_CLAUSE_ORDERED:
249
        fd->have_ordered = true;
250
        break;
251
      case OMP_CLAUSE_SCHEDULE:
252
        fd->sched_kind = OMP_CLAUSE_SCHEDULE_KIND (t);
253
        fd->chunk_size = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (t);
254
        break;
255
      case OMP_CLAUSE_COLLAPSE:
256
        if (fd->collapse > 1)
257
          {
258
            collapse_iter = &OMP_CLAUSE_COLLAPSE_ITERVAR (t);
259
            collapse_count = &OMP_CLAUSE_COLLAPSE_COUNT (t);
260
          }
261
      default:
262
        break;
263
      }
264
 
265
  /* FIXME: for now map schedule(auto) to schedule(static).
266
     There should be analysis to determine whether all iterations
267
     are approximately the same amount of work (then schedule(static)
268
     is best) or if it varies (then schedule(dynamic,N) is better).  */
269
  if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_AUTO)
270
    {
271
      fd->sched_kind = OMP_CLAUSE_SCHEDULE_STATIC;
272
      gcc_assert (fd->chunk_size == NULL);
273
    }
274
  gcc_assert (fd->collapse == 1 || collapse_iter != NULL);
275
  if (fd->sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
276
    gcc_assert (fd->chunk_size == NULL);
277
  else if (fd->chunk_size == NULL)
278
    {
279
      /* We only need to compute a default chunk size for ordered
280
         static loops and dynamic loops.  */
281
      if (fd->sched_kind != OMP_CLAUSE_SCHEDULE_STATIC
282
          || fd->have_ordered
283
          || fd->collapse > 1)
284
        fd->chunk_size = (fd->sched_kind == OMP_CLAUSE_SCHEDULE_STATIC)
285
                         ? integer_zero_node : integer_one_node;
286
    }
287
 
288
  for (i = 0; i < fd->collapse; i++)
289
    {
290
      if (fd->collapse == 1)
291
        loop = &fd->loop;
292
      else if (loops != NULL)
293
        loop = loops + i;
294
      else
295
        loop = &dummy_loop;
296
 
297
 
298
      loop->v = gimple_omp_for_index (for_stmt, i);
299
      gcc_assert (SSA_VAR_P (loop->v));
300
      gcc_assert (TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
301
                  || TREE_CODE (TREE_TYPE (loop->v)) == POINTER_TYPE);
302
      var = TREE_CODE (loop->v) == SSA_NAME ? SSA_NAME_VAR (loop->v) : loop->v;
303
      loop->n1 = gimple_omp_for_initial (for_stmt, i);
304
 
305
      loop->cond_code = gimple_omp_for_cond (for_stmt, i);
306
      loop->n2 = gimple_omp_for_final (for_stmt, i);
307
      switch (loop->cond_code)
308
        {
309
        case LT_EXPR:
310
        case GT_EXPR:
311
          break;
312
        case LE_EXPR:
313
          if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
314
            loop->n2 = fold_build2_loc (loc,
315
                                    POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
316
                                    loop->n2, size_one_node);
317
          else
318
            loop->n2 = fold_build2_loc (loc,
319
                                    PLUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
320
                                    build_int_cst (TREE_TYPE (loop->n2), 1));
321
          loop->cond_code = LT_EXPR;
322
          break;
323
        case GE_EXPR:
324
          if (POINTER_TYPE_P (TREE_TYPE (loop->n2)))
325
            loop->n2 = fold_build2_loc (loc,
326
                                    POINTER_PLUS_EXPR, TREE_TYPE (loop->n2),
327
                                    loop->n2, size_int (-1));
328
          else
329
            loop->n2 = fold_build2_loc (loc,
330
                                    MINUS_EXPR, TREE_TYPE (loop->n2), loop->n2,
331
                                    build_int_cst (TREE_TYPE (loop->n2), 1));
332
          loop->cond_code = GT_EXPR;
333
          break;
334
        default:
335
          gcc_unreachable ();
336
        }
337
 
338
      t = gimple_omp_for_incr (for_stmt, i);
339
      gcc_assert (TREE_OPERAND (t, 0) == var);
340
      switch (TREE_CODE (t))
341
        {
342
        case PLUS_EXPR:
343
        case POINTER_PLUS_EXPR:
344
          loop->step = TREE_OPERAND (t, 1);
345
          break;
346
        case MINUS_EXPR:
347
          loop->step = TREE_OPERAND (t, 1);
348
          loop->step = fold_build1_loc (loc,
349
                                    NEGATE_EXPR, TREE_TYPE (loop->step),
350
                                    loop->step);
351
          break;
352
        default:
353
          gcc_unreachable ();
354
        }
355
 
356
      if (iter_type != long_long_unsigned_type_node)
357
        {
358
          if (POINTER_TYPE_P (TREE_TYPE (loop->v)))
359
            iter_type = long_long_unsigned_type_node;
360
          else if (TYPE_UNSIGNED (TREE_TYPE (loop->v))
361
                   && TYPE_PRECISION (TREE_TYPE (loop->v))
362
                      >= TYPE_PRECISION (iter_type))
363
            {
364
              tree n;
365
 
366
              if (loop->cond_code == LT_EXPR)
367
                n = fold_build2_loc (loc,
368
                                 PLUS_EXPR, TREE_TYPE (loop->v),
369
                                 loop->n2, loop->step);
370
              else
371
                n = loop->n1;
372
              if (TREE_CODE (n) != INTEGER_CST
373
                  || tree_int_cst_lt (TYPE_MAX_VALUE (iter_type), n))
374
                iter_type = long_long_unsigned_type_node;
375
            }
376
          else if (TYPE_PRECISION (TREE_TYPE (loop->v))
377
                   > TYPE_PRECISION (iter_type))
378
            {
379
              tree n1, n2;
380
 
381
              if (loop->cond_code == LT_EXPR)
382
                {
383
                  n1 = loop->n1;
384
                  n2 = fold_build2_loc (loc,
385
                                    PLUS_EXPR, TREE_TYPE (loop->v),
386
                                    loop->n2, loop->step);
387
                }
388
              else
389
                {
390
                  n1 = fold_build2_loc (loc,
391
                                    MINUS_EXPR, TREE_TYPE (loop->v),
392
                                    loop->n2, loop->step);
393
                  n2 = loop->n1;
394
                }
395
              if (TREE_CODE (n1) != INTEGER_CST
396
                  || TREE_CODE (n2) != INTEGER_CST
397
                  || !tree_int_cst_lt (TYPE_MIN_VALUE (iter_type), n1)
398
                  || !tree_int_cst_lt (n2, TYPE_MAX_VALUE (iter_type)))
399
                iter_type = long_long_unsigned_type_node;
400
            }
401
        }
402
 
403
      if (collapse_count && *collapse_count == NULL)
404
        {
405
          if ((i == 0 || count != NULL_TREE)
406
              && TREE_CODE (TREE_TYPE (loop->v)) == INTEGER_TYPE
407
              && TREE_CONSTANT (loop->n1)
408
              && TREE_CONSTANT (loop->n2)
409
              && TREE_CODE (loop->step) == INTEGER_CST)
410
            {
411
              tree itype = TREE_TYPE (loop->v);
412
 
413
              if (POINTER_TYPE_P (itype))
414
                itype
415
                  = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
416
              t = build_int_cst (itype, (loop->cond_code == LT_EXPR ? -1 : 1));
417
              t = fold_build2_loc (loc,
418
                               PLUS_EXPR, itype,
419
                               fold_convert_loc (loc, itype, loop->step), t);
420
              t = fold_build2_loc (loc, PLUS_EXPR, itype, t,
421
                               fold_convert_loc (loc, itype, loop->n2));
422
              t = fold_build2_loc (loc, MINUS_EXPR, itype, t,
423
                               fold_convert_loc (loc, itype, loop->n1));
424
              if (TYPE_UNSIGNED (itype) && loop->cond_code == GT_EXPR)
425
                t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype,
426
                                 fold_build1_loc (loc, NEGATE_EXPR, itype, t),
427
                                 fold_build1_loc (loc, NEGATE_EXPR, itype,
428
                                              fold_convert_loc (loc, itype,
429
                                                                loop->step)));
430
              else
431
                t = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, t,
432
                                 fold_convert_loc (loc, itype, loop->step));
433
              t = fold_convert_loc (loc, long_long_unsigned_type_node, t);
434
              if (count != NULL_TREE)
435
                count = fold_build2_loc (loc,
436
                                     MULT_EXPR, long_long_unsigned_type_node,
437
                                     count, t);
438
              else
439
                count = t;
440
              if (TREE_CODE (count) != INTEGER_CST)
441
                count = NULL_TREE;
442
            }
443
          else
444
            count = NULL_TREE;
445
        }
446
    }
447
 
448
  if (count)
449
    {
450
      if (!tree_int_cst_lt (count, TYPE_MAX_VALUE (long_integer_type_node)))
451
        iter_type = long_long_unsigned_type_node;
452
      else
453
        iter_type = long_integer_type_node;
454
    }
455
  else if (collapse_iter && *collapse_iter != NULL)
456
    iter_type = TREE_TYPE (*collapse_iter);
457
  fd->iter_type = iter_type;
458
  if (collapse_iter && *collapse_iter == NULL)
459
    *collapse_iter = create_tmp_var (iter_type, ".iter");
460
  if (collapse_count && *collapse_count == NULL)
461
    {
462
      if (count)
463
        *collapse_count = fold_convert_loc (loc, iter_type, count);
464
      else
465
        *collapse_count = create_tmp_var (iter_type, ".count");
466
    }
467
 
468
  if (fd->collapse > 1)
469
    {
470
      fd->loop.v = *collapse_iter;
471
      fd->loop.n1 = build_int_cst (TREE_TYPE (fd->loop.v), 0);
472
      fd->loop.n2 = *collapse_count;
473
      fd->loop.step = build_int_cst (TREE_TYPE (fd->loop.v), 1);
474
      fd->loop.cond_code = LT_EXPR;
475
    }
476
}
477
 
478
 
479
/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
480
   is the immediate dominator of PAR_ENTRY_BB, return true if there
481
   are no data dependencies that would prevent expanding the parallel
482
   directive at PAR_ENTRY_BB as a combined parallel+workshare region.
483
 
484
   When expanding a combined parallel+workshare region, the call to
485
   the child function may need additional arguments in the case of
486
   GIMPLE_OMP_FOR regions.  In some cases, these arguments are
487
   computed out of variables passed in from the parent to the child
488
   via 'struct .omp_data_s'.  For instance:
489
 
490
        #pragma omp parallel for schedule (guided, i * 4)
491
        for (j ...)
492
 
493
   Is lowered into:
494
 
495
        # BLOCK 2 (PAR_ENTRY_BB)
496
        .omp_data_o.i = i;
497
        #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
498
 
499
        # BLOCK 3 (WS_ENTRY_BB)
500
        .omp_data_i = &.omp_data_o;
501
        D.1667 = .omp_data_i->i;
502
        D.1598 = D.1667 * 4;
503
        #pragma omp for schedule (guided, D.1598)
504
 
505
   When we outline the parallel region, the call to the child function
506
   'bar.omp_fn.0' will need the value D.1598 in its argument list, but
507
   that value is computed *after* the call site.  So, in principle we
508
   cannot do the transformation.
509
 
510
   To see whether the code in WS_ENTRY_BB blocks the combined
511
   parallel+workshare call, we collect all the variables used in the
512
   GIMPLE_OMP_FOR header check whether they appear on the LHS of any
513
   statement in WS_ENTRY_BB.  If so, then we cannot emit the combined
514
   call.
515
 
516
   FIXME.  If we had the SSA form built at this point, we could merely
517
   hoist the code in block 3 into block 2 and be done with it.  But at
518
   this point we don't have dataflow information and though we could
519
   hack something up here, it is really not worth the aggravation.  */
520
 
521
static bool
522
workshare_safe_to_combine_p (basic_block ws_entry_bb)
523
{
524
  struct omp_for_data fd;
525
  gimple ws_stmt = last_stmt (ws_entry_bb);
526
 
527
  if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
528
    return true;
529
 
530
  gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
531
 
532
  extract_omp_for_data (ws_stmt, &fd, NULL);
533
 
534
  if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
535
    return false;
536
  if (fd.iter_type != long_integer_type_node)
537
    return false;
538
 
539
  /* FIXME.  We give up too easily here.  If any of these arguments
540
     are not constants, they will likely involve variables that have
541
     been mapped into fields of .omp_data_s for sharing with the child
542
     function.  With appropriate data flow, it would be possible to
543
     see through this.  */
544
  if (!is_gimple_min_invariant (fd.loop.n1)
545
      || !is_gimple_min_invariant (fd.loop.n2)
546
      || !is_gimple_min_invariant (fd.loop.step)
547
      || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
548
    return false;
549
 
550
  return true;
551
}
552
 
553
 
554
/* Collect additional arguments needed to emit a combined
555
   parallel+workshare call.  WS_STMT is the workshare directive being
556
   expanded.  */
557
 
558
static tree
559
get_ws_args_for (gimple ws_stmt)
560
{
561
  tree t;
562
  location_t loc = gimple_location (ws_stmt);
563
 
564
  if (gimple_code (ws_stmt) == GIMPLE_OMP_FOR)
565
    {
566
      struct omp_for_data fd;
567
      tree ws_args;
568
 
569
      extract_omp_for_data (ws_stmt, &fd, NULL);
570
 
571
      ws_args = NULL_TREE;
572
      if (fd.chunk_size)
573
        {
574
          t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
575
          ws_args = tree_cons (NULL, t, ws_args);
576
        }
577
 
578
      t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
579
      ws_args = tree_cons (NULL, t, ws_args);
580
 
581
      t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n2);
582
      ws_args = tree_cons (NULL, t, ws_args);
583
 
584
      t = fold_convert_loc (loc, long_integer_type_node, fd.loop.n1);
585
      ws_args = tree_cons (NULL, t, ws_args);
586
 
587
      return ws_args;
588
    }
589
  else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
590
    {
591
      /* Number of sections is equal to the number of edges from the
592
         GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
593
         the exit of the sections region.  */
594
      basic_block bb = single_succ (gimple_bb (ws_stmt));
595
      t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
596
      t = tree_cons (NULL, t, NULL);
597
      return t;
598
    }
599
 
600
  gcc_unreachable ();
601
}
602
 
603
 
604
/* Discover whether REGION is a combined parallel+workshare region.  */
605
 
606
static void
607
determine_parallel_type (struct omp_region *region)
608
{
609
  basic_block par_entry_bb, par_exit_bb;
610
  basic_block ws_entry_bb, ws_exit_bb;
611
 
612
  if (region == NULL || region->inner == NULL
613
      || region->exit == NULL || region->inner->exit == NULL
614
      || region->inner->cont == NULL)
615
    return;
616
 
617
  /* We only support parallel+for and parallel+sections.  */
618
  if (region->type != GIMPLE_OMP_PARALLEL
619
      || (region->inner->type != GIMPLE_OMP_FOR
620
          && region->inner->type != GIMPLE_OMP_SECTIONS))
621
    return;
622
 
623
  /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
624
     WS_EXIT_BB -> PAR_EXIT_BB.  */
625
  par_entry_bb = region->entry;
626
  par_exit_bb = region->exit;
627
  ws_entry_bb = region->inner->entry;
628
  ws_exit_bb = region->inner->exit;
629
 
630
  if (single_succ (par_entry_bb) == ws_entry_bb
631
      && single_succ (ws_exit_bb) == par_exit_bb
632
      && workshare_safe_to_combine_p (ws_entry_bb)
633
      && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
634
          || (last_and_only_stmt (ws_entry_bb)
635
              && last_and_only_stmt (par_exit_bb))))
636
    {
637
      gimple ws_stmt = last_stmt (ws_entry_bb);
638
 
639
      if (region->inner->type == GIMPLE_OMP_FOR)
640
        {
641
          /* If this is a combined parallel loop, we need to determine
642
             whether or not to use the combined library calls.  There
643
             are two cases where we do not apply the transformation:
644
             static loops and any kind of ordered loop.  In the first
645
             case, we already open code the loop so there is no need
646
             to do anything else.  In the latter case, the combined
647
             parallel loop call would still need extra synchronization
648
             to implement ordered semantics, so there would not be any
649
             gain in using the combined call.  */
650
          tree clauses = gimple_omp_for_clauses (ws_stmt);
651
          tree c = find_omp_clause (clauses, OMP_CLAUSE_SCHEDULE);
652
          if (c == NULL
653
              || OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_STATIC
654
              || find_omp_clause (clauses, OMP_CLAUSE_ORDERED))
655
            {
656
              region->is_combined_parallel = false;
657
              region->inner->is_combined_parallel = false;
658
              return;
659
            }
660
        }
661
 
662
      region->is_combined_parallel = true;
663
      region->inner->is_combined_parallel = true;
664
      region->ws_args = get_ws_args_for (ws_stmt);
665
    }
666
}
667
 
668
 
669
/* Return true if EXPR is variable sized.  */
670
 
671
static inline bool
672
is_variable_sized (const_tree expr)
673
{
674
  return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
675
}
676
 
677
/* Return true if DECL is a reference type.  */
678
 
679
static inline bool
680
is_reference (tree decl)
681
{
682
  return lang_hooks.decls.omp_privatize_by_reference (decl);
683
}
684
 
685
/* Lookup variables in the decl or field splay trees.  The "maybe" form
686
   allows for the variable form to not have been entered, otherwise we
687
   assert that the variable must have been entered.  */
688
 
689
static inline tree
690
lookup_decl (tree var, omp_context *ctx)
691
{
692
  tree *n;
693
  n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
694
  return *n;
695
}
696
 
697
static inline tree
698
maybe_lookup_decl (const_tree var, omp_context *ctx)
699
{
700
  tree *n;
701
  n = (tree *) pointer_map_contains (ctx->cb.decl_map, var);
702
  return n ? *n : NULL_TREE;
703
}
704
 
705
static inline tree
706
lookup_field (tree var, omp_context *ctx)
707
{
708
  splay_tree_node n;
709
  n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
710
  return (tree) n->value;
711
}
712
 
713
static inline tree
714
lookup_sfield (tree var, omp_context *ctx)
715
{
716
  splay_tree_node n;
717
  n = splay_tree_lookup (ctx->sfield_map
718
                         ? ctx->sfield_map : ctx->field_map,
719
                         (splay_tree_key) var);
720
  return (tree) n->value;
721
}
722
 
723
static inline tree
724
maybe_lookup_field (tree var, omp_context *ctx)
725
{
726
  splay_tree_node n;
727
  n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
728
  return n ? (tree) n->value : NULL_TREE;
729
}
730
 
731
/* Return true if DECL should be copied by pointer.  SHARED_CTX is
732
   the parallel context if DECL is to be shared.  */
733
 
734
static bool
735
use_pointer_for_field (tree decl, omp_context *shared_ctx)
736
{
737
  if (AGGREGATE_TYPE_P (TREE_TYPE (decl)))
738
    return true;
739
 
740
  /* We can only use copy-in/copy-out semantics for shared variables
741
     when we know the value is not accessible from an outer scope.  */
742
  if (shared_ctx)
743
    {
744
      /* ??? Trivially accessible from anywhere.  But why would we even
745
         be passing an address in this case?  Should we simply assert
746
         this to be false, or should we have a cleanup pass that removes
747
         these from the list of mappings?  */
748
      if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
749
        return true;
750
 
751
      /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
752
         without analyzing the expression whether or not its location
753
         is accessible to anyone else.  In the case of nested parallel
754
         regions it certainly may be.  */
755
      if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
756
        return true;
757
 
758
      /* Do not use copy-in/copy-out for variables that have their
759
         address taken.  */
760
      if (TREE_ADDRESSABLE (decl))
761
        return true;
762
 
763
      /* Disallow copy-in/out in nested parallel if
764
         decl is shared in outer parallel, otherwise
765
         each thread could store the shared variable
766
         in its own copy-in location, making the
767
         variable no longer really shared.  */
768
      if (!TREE_READONLY (decl) && shared_ctx->is_nested)
769
        {
770
          omp_context *up;
771
 
772
          for (up = shared_ctx->outer; up; up = up->outer)
773
            if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
774
              break;
775
 
776
          if (up)
777
            {
778
              tree c;
779
 
780
              for (c = gimple_omp_taskreg_clauses (up->stmt);
781
                   c; c = OMP_CLAUSE_CHAIN (c))
782
                if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
783
                    && OMP_CLAUSE_DECL (c) == decl)
784
                  break;
785
 
786
              if (c)
787
                return true;
788
            }
789
        }
790
 
791
      /* For tasks avoid using copy-in/out, unless they are readonly
792
         (in which case just copy-in is used).  As tasks can be
793
         deferred or executed in different thread, when GOMP_task
794
         returns, the task hasn't necessarily terminated.  */
795
      if (!TREE_READONLY (decl) && is_task_ctx (shared_ctx))
796
        {
797
          tree outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
798
          if (is_gimple_reg (outer))
799
            {
800
              /* Taking address of OUTER in lower_send_shared_vars
801
                 might need regimplification of everything that uses the
802
                 variable.  */
803
              if (!task_shared_vars)
804
                task_shared_vars = BITMAP_ALLOC (NULL);
805
              bitmap_set_bit (task_shared_vars, DECL_UID (outer));
806
              TREE_ADDRESSABLE (outer) = 1;
807
            }
808
          return true;
809
        }
810
    }
811
 
812
  return false;
813
}
814
 
815
/* Create a new VAR_DECL and copy information from VAR to it.  */
816
 
817
tree
818
copy_var_decl (tree var, tree name, tree type)
819
{
820
  tree copy = build_decl (DECL_SOURCE_LOCATION (var), VAR_DECL, name, type);
821
 
822
  TREE_ADDRESSABLE (copy) = TREE_ADDRESSABLE (var);
823
  TREE_THIS_VOLATILE (copy) = TREE_THIS_VOLATILE (var);
824
  DECL_GIMPLE_REG_P (copy) = DECL_GIMPLE_REG_P (var);
825
  DECL_ARTIFICIAL (copy) = DECL_ARTIFICIAL (var);
826
  DECL_IGNORED_P (copy) = DECL_IGNORED_P (var);
827
  DECL_CONTEXT (copy) = DECL_CONTEXT (var);
828
  TREE_USED (copy) = 1;
829
  DECL_SEEN_IN_BIND_EXPR_P (copy) = 1;
830
 
831
  return copy;
832
}
833
 
834
/* Construct a new automatic decl similar to VAR.  */
835
 
836
static tree
837
omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
838
{
839
  tree copy = copy_var_decl (var, name, type);
840
 
841
  DECL_CONTEXT (copy) = current_function_decl;
842
  TREE_CHAIN (copy) = ctx->block_vars;
843
  ctx->block_vars = copy;
844
 
845
  return copy;
846
}
847
 
848
static tree
849
omp_copy_decl_1 (tree var, omp_context *ctx)
850
{
851
  return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
852
}
853
 
854
/* Build tree nodes to access the field for VAR on the receiver side.  */
855
 
856
static tree
857
build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
858
{
859
  tree x, field = lookup_field (var, ctx);
860
 
861
  /* If the receiver record type was remapped in the child function,
862
     remap the field into the new record type.  */
863
  x = maybe_lookup_field (field, ctx);
864
  if (x != NULL)
865
    field = x;
866
 
867
  x = build_fold_indirect_ref (ctx->receiver_decl);
868
  x = build3 (COMPONENT_REF, TREE_TYPE (field), x, field, NULL);
869
  if (by_ref)
870
    x = build_fold_indirect_ref (x);
871
 
872
  return x;
873
}
874
 
875
/* Build tree nodes to access VAR in the scope outer to CTX.  In the case
876
   of a parallel, this is a component reference; for workshare constructs
877
   this is some variable.  */
878
 
879
static tree
880
build_outer_var_ref (tree var, omp_context *ctx)
881
{
882
  tree x;
883
 
884
  if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
885
    x = var;
886
  else if (is_variable_sized (var))
887
    {
888
      x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
889
      x = build_outer_var_ref (x, ctx);
890
      x = build_fold_indirect_ref (x);
891
    }
892
  else if (is_taskreg_ctx (ctx))
893
    {
894
      bool by_ref = use_pointer_for_field (var, NULL);
895
      x = build_receiver_ref (var, by_ref, ctx);
896
    }
897
  else if (ctx->outer)
898
    x = lookup_decl (var, ctx->outer);
899
  else if (is_reference (var))
900
    /* This can happen with orphaned constructs.  If var is reference, it is
901
       possible it is shared and as such valid.  */
902
    x = var;
903
  else
904
    gcc_unreachable ();
905
 
906
  if (is_reference (var))
907
    x = build_fold_indirect_ref (x);
908
 
909
  return x;
910
}
911
 
912
/* Build tree nodes to access the field for VAR on the sender side.  */
913
 
914
static tree
915
build_sender_ref (tree var, omp_context *ctx)
916
{
917
  tree field = lookup_sfield (var, ctx);
918
  return build3 (COMPONENT_REF, TREE_TYPE (field),
919
                 ctx->sender_decl, field, NULL);
920
}
921
 
922
/* Add a new field for VAR inside the structure CTX->SENDER_DECL.  */
923
 
924
static void
925
install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
926
{
927
  tree field, type, sfield = NULL_TREE;
928
 
929
  gcc_assert ((mask & 1) == 0
930
              || !splay_tree_lookup (ctx->field_map, (splay_tree_key) var));
931
  gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
932
              || !splay_tree_lookup (ctx->sfield_map, (splay_tree_key) var));
933
 
934
  type = TREE_TYPE (var);
935
  if (by_ref)
936
    type = build_pointer_type (type);
937
  else if ((mask & 3) == 1 && is_reference (var))
938
    type = TREE_TYPE (type);
939
 
940
  field = build_decl (DECL_SOURCE_LOCATION (var),
941
                      FIELD_DECL, DECL_NAME (var), type);
942
 
943
  /* Remember what variable this field was created for.  This does have a
944
     side effect of making dwarf2out ignore this member, so for helpful
945
     debugging we clear it later in delete_omp_context.  */
946
  DECL_ABSTRACT_ORIGIN (field) = var;
947
  if (type == TREE_TYPE (var))
948
    {
949
      DECL_ALIGN (field) = DECL_ALIGN (var);
950
      DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
951
      TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
952
    }
953
  else
954
    DECL_ALIGN (field) = TYPE_ALIGN (type);
955
 
956
  if ((mask & 3) == 3)
957
    {
958
      insert_field_into_struct (ctx->record_type, field);
959
      if (ctx->srecord_type)
960
        {
961
          sfield = build_decl (DECL_SOURCE_LOCATION (var),
962
                               FIELD_DECL, DECL_NAME (var), type);
963
          DECL_ABSTRACT_ORIGIN (sfield) = var;
964
          DECL_ALIGN (sfield) = DECL_ALIGN (field);
965
          DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
966
          TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
967
          insert_field_into_struct (ctx->srecord_type, sfield);
968
        }
969
    }
970
  else
971
    {
972
      if (ctx->srecord_type == NULL_TREE)
973
        {
974
          tree t;
975
 
976
          ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
977
          ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
978
          for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
979
            {
980
              sfield = build_decl (DECL_SOURCE_LOCATION (var),
981
                                   FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
982
              DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
983
              insert_field_into_struct (ctx->srecord_type, sfield);
984
              splay_tree_insert (ctx->sfield_map,
985
                                 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
986
                                 (splay_tree_value) sfield);
987
            }
988
        }
989
      sfield = field;
990
      insert_field_into_struct ((mask & 1) ? ctx->record_type
991
                                : ctx->srecord_type, field);
992
    }
993
 
994
  if (mask & 1)
995
    splay_tree_insert (ctx->field_map, (splay_tree_key) var,
996
                       (splay_tree_value) field);
997
  if ((mask & 2) && ctx->sfield_map)
998
    splay_tree_insert (ctx->sfield_map, (splay_tree_key) var,
999
                       (splay_tree_value) sfield);
1000
}
1001
 
1002
static tree
1003
install_var_local (tree var, omp_context *ctx)
1004
{
1005
  tree new_var = omp_copy_decl_1 (var, ctx);
1006
  insert_decl_map (&ctx->cb, var, new_var);
1007
  return new_var;
1008
}
1009
 
1010
/* Adjust the replacement for DECL in CTX for the new context.  This means
1011
   copying the DECL_VALUE_EXPR, and fixing up the type.  */
1012
 
1013
static void
1014
fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
1015
{
1016
  tree new_decl, size;
1017
 
1018
  new_decl = lookup_decl (decl, ctx);
1019
 
1020
  TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
1021
 
1022
  if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
1023
      && DECL_HAS_VALUE_EXPR_P (decl))
1024
    {
1025
      tree ve = DECL_VALUE_EXPR (decl);
1026
      walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
1027
      SET_DECL_VALUE_EXPR (new_decl, ve);
1028
      DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
1029
    }
1030
 
1031
  if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
1032
    {
1033
      size = remap_decl (DECL_SIZE (decl), &ctx->cb);
1034
      if (size == error_mark_node)
1035
        size = TYPE_SIZE (TREE_TYPE (new_decl));
1036
      DECL_SIZE (new_decl) = size;
1037
 
1038
      size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
1039
      if (size == error_mark_node)
1040
        size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
1041
      DECL_SIZE_UNIT (new_decl) = size;
1042
    }
1043
}
1044
 
1045
/* The callback for remap_decl.  Search all containing contexts for a
1046
   mapping of the variable; this avoids having to duplicate the splay
1047
   tree ahead of time.  We know a mapping doesn't already exist in the
1048
   given context.  Create new mappings to implement default semantics.  */
1049
 
1050
static tree
1051
omp_copy_decl (tree var, copy_body_data *cb)
1052
{
1053
  omp_context *ctx = (omp_context *) cb;
1054
  tree new_var;
1055
 
1056
  if (TREE_CODE (var) == LABEL_DECL)
1057
    {
1058
      new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
1059
      DECL_CONTEXT (new_var) = current_function_decl;
1060
      insert_decl_map (&ctx->cb, var, new_var);
1061
      return new_var;
1062
    }
1063
 
1064
  while (!is_taskreg_ctx (ctx))
1065
    {
1066
      ctx = ctx->outer;
1067
      if (ctx == NULL)
1068
        return var;
1069
      new_var = maybe_lookup_decl (var, ctx);
1070
      if (new_var)
1071
        return new_var;
1072
    }
1073
 
1074
  if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
1075
    return var;
1076
 
1077
  return error_mark_node;
1078
}
1079
 
1080
 
1081
/* Return the parallel region associated with STMT.  */
1082
 
1083
/* Debugging dumps for parallel regions.  */
1084
void dump_omp_region (FILE *, struct omp_region *, int);
1085
void debug_omp_region (struct omp_region *);
1086
void debug_all_omp_regions (void);
1087
 
1088
/* Dump the parallel region tree rooted at REGION.  */
1089
 
1090
void
1091
dump_omp_region (FILE *file, struct omp_region *region, int indent)
1092
{
1093
  fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
1094
           gimple_code_name[region->type]);
1095
 
1096
  if (region->inner)
1097
    dump_omp_region (file, region->inner, indent + 4);
1098
 
1099
  if (region->cont)
1100
    {
1101
      fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
1102
               region->cont->index);
1103
    }
1104
 
1105
  if (region->exit)
1106
    fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
1107
             region->exit->index);
1108
  else
1109
    fprintf (file, "%*s[no exit marker]\n", indent, "");
1110
 
1111
  if (region->next)
1112
    dump_omp_region (file, region->next, indent);
1113
}
1114
 
1115
void
1116
debug_omp_region (struct omp_region *region)
1117
{
1118
  dump_omp_region (stderr, region, 0);
1119
}
1120
 
1121
void
1122
debug_all_omp_regions (void)
1123
{
1124
  dump_omp_region (stderr, root_omp_region, 0);
1125
}
1126
 
1127
 
1128
/* Create a new parallel region starting at STMT inside region PARENT.  */
1129
 
1130
struct omp_region *
1131
new_omp_region (basic_block bb, enum gimple_code type,
1132
                struct omp_region *parent)
1133
{
1134
  struct omp_region *region = XCNEW (struct omp_region);
1135
 
1136
  region->outer = parent;
1137
  region->entry = bb;
1138
  region->type = type;
1139
 
1140
  if (parent)
1141
    {
1142
      /* This is a nested region.  Add it to the list of inner
1143
         regions in PARENT.  */
1144
      region->next = parent->inner;
1145
      parent->inner = region;
1146
    }
1147
  else
1148
    {
1149
      /* This is a toplevel region.  Add it to the list of toplevel
1150
         regions in ROOT_OMP_REGION.  */
1151
      region->next = root_omp_region;
1152
      root_omp_region = region;
1153
    }
1154
 
1155
  return region;
1156
}
1157
 
1158
/* Release the memory associated with the region tree rooted at REGION.  */
1159
 
1160
static void
1161
free_omp_region_1 (struct omp_region *region)
1162
{
1163
  struct omp_region *i, *n;
1164
 
1165
  for (i = region->inner; i ; i = n)
1166
    {
1167
      n = i->next;
1168
      free_omp_region_1 (i);
1169
    }
1170
 
1171
  free (region);
1172
}
1173
 
1174
/* Release the memory for the entire omp region tree.  */
1175
 
1176
void
1177
free_omp_regions (void)
1178
{
1179
  struct omp_region *r, *n;
1180
  for (r = root_omp_region; r ; r = n)
1181
    {
1182
      n = r->next;
1183
      free_omp_region_1 (r);
1184
    }
1185
  root_omp_region = NULL;
1186
}
1187
 
1188
 
1189
/* Create a new context, with OUTER_CTX being the surrounding context.  */
1190
 
1191
static omp_context *
1192
new_omp_context (gimple stmt, omp_context *outer_ctx)
1193
{
1194
  omp_context *ctx = XCNEW (omp_context);
1195
 
1196
  splay_tree_insert (all_contexts, (splay_tree_key) stmt,
1197
                     (splay_tree_value) ctx);
1198
  ctx->stmt = stmt;
1199
 
1200
  if (outer_ctx)
1201
    {
1202
      ctx->outer = outer_ctx;
1203
      ctx->cb = outer_ctx->cb;
1204
      ctx->cb.block = NULL;
1205
      ctx->depth = outer_ctx->depth + 1;
1206
    }
1207
  else
1208
    {
1209
      ctx->cb.src_fn = current_function_decl;
1210
      ctx->cb.dst_fn = current_function_decl;
1211
      ctx->cb.src_node = cgraph_node (current_function_decl);
1212
      ctx->cb.dst_node = ctx->cb.src_node;
1213
      ctx->cb.src_cfun = cfun;
1214
      ctx->cb.copy_decl = omp_copy_decl;
1215
      ctx->cb.eh_lp_nr = 0;
1216
      ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
1217
      ctx->depth = 1;
1218
    }
1219
 
1220
  ctx->cb.decl_map = pointer_map_create ();
1221
 
1222
  return ctx;
1223
}
1224
 
1225
static gimple_seq maybe_catch_exception (gimple_seq);
1226
 
1227
/* Finalize task copyfn.  */
1228
 
1229
static void
1230
finalize_task_copyfn (gimple task_stmt)
1231
{
1232
  struct function *child_cfun;
1233
  tree child_fn, old_fn;
1234
  gimple_seq seq, new_seq;
1235
  gimple bind;
1236
 
1237
  child_fn = gimple_omp_task_copy_fn (task_stmt);
1238
  if (child_fn == NULL_TREE)
1239
    return;
1240
 
1241
  child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1242
 
1243
  /* Inform the callgraph about the new function.  */
1244
  DECL_STRUCT_FUNCTION (child_fn)->curr_properties
1245
    = cfun->curr_properties;
1246
 
1247
  old_fn = current_function_decl;
1248
  push_cfun (child_cfun);
1249
  current_function_decl = child_fn;
1250
  bind = gimplify_body (&DECL_SAVED_TREE (child_fn), child_fn, false);
1251
  seq = gimple_seq_alloc ();
1252
  gimple_seq_add_stmt (&seq, bind);
1253
  new_seq = maybe_catch_exception (seq);
1254
  if (new_seq != seq)
1255
    {
1256
      bind = gimple_build_bind (NULL, new_seq, NULL);
1257
      seq = gimple_seq_alloc ();
1258
      gimple_seq_add_stmt (&seq, bind);
1259
    }
1260
  gimple_set_body (child_fn, seq);
1261
  pop_cfun ();
1262
  current_function_decl = old_fn;
1263
 
1264
  cgraph_add_new_function (child_fn, false);
1265
}
1266
 
1267
/* Destroy a omp_context data structures.  Called through the splay tree
1268
   value delete callback.  */
1269
 
1270
static void
1271
delete_omp_context (splay_tree_value value)
1272
{
1273
  omp_context *ctx = (omp_context *) value;
1274
 
1275
  pointer_map_destroy (ctx->cb.decl_map);
1276
 
1277
  if (ctx->field_map)
1278
    splay_tree_delete (ctx->field_map);
1279
  if (ctx->sfield_map)
1280
    splay_tree_delete (ctx->sfield_map);
1281
 
1282
  /* We hijacked DECL_ABSTRACT_ORIGIN earlier.  We need to clear it before
1283
     it produces corrupt debug information.  */
1284
  if (ctx->record_type)
1285
    {
1286
      tree t;
1287
      for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
1288
        DECL_ABSTRACT_ORIGIN (t) = NULL;
1289
    }
1290
  if (ctx->srecord_type)
1291
    {
1292
      tree t;
1293
      for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = TREE_CHAIN (t))
1294
        DECL_ABSTRACT_ORIGIN (t) = NULL;
1295
    }
1296
 
1297
  if (is_task_ctx (ctx))
1298
    finalize_task_copyfn (ctx->stmt);
1299
 
1300
  XDELETE (ctx);
1301
}
1302
 
1303
/* Fix up RECEIVER_DECL with a type that has been remapped to the child
1304
   context.  */
1305
 
1306
static void
1307
fixup_child_record_type (omp_context *ctx)
1308
{
1309
  tree f, type = ctx->record_type;
1310
 
1311
  /* ??? It isn't sufficient to just call remap_type here, because
1312
     variably_modified_type_p doesn't work the way we expect for
1313
     record types.  Testing each field for whether it needs remapping
1314
     and creating a new record by hand works, however.  */
1315
  for (f = TYPE_FIELDS (type); f ; f = TREE_CHAIN (f))
1316
    if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
1317
      break;
1318
  if (f)
1319
    {
1320
      tree name, new_fields = NULL;
1321
 
1322
      type = lang_hooks.types.make_type (RECORD_TYPE);
1323
      name = DECL_NAME (TYPE_NAME (ctx->record_type));
1324
      name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
1325
                         TYPE_DECL, name, type);
1326
      TYPE_NAME (type) = name;
1327
 
1328
      for (f = TYPE_FIELDS (ctx->record_type); f ; f = TREE_CHAIN (f))
1329
        {
1330
          tree new_f = copy_node (f);
1331
          DECL_CONTEXT (new_f) = type;
1332
          TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
1333
          TREE_CHAIN (new_f) = new_fields;
1334
          walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
1335
          walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
1336
                     &ctx->cb, NULL);
1337
          walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
1338
                     &ctx->cb, NULL);
1339
          new_fields = new_f;
1340
 
1341
          /* Arrange to be able to look up the receiver field
1342
             given the sender field.  */
1343
          splay_tree_insert (ctx->field_map, (splay_tree_key) f,
1344
                             (splay_tree_value) new_f);
1345
        }
1346
      TYPE_FIELDS (type) = nreverse (new_fields);
1347
      layout_type (type);
1348
    }
1349
 
1350
  TREE_TYPE (ctx->receiver_decl) = build_pointer_type (type);
1351
}
1352
 
1353
/* Instantiate decls as necessary in CTX to satisfy the data sharing
1354
   specified by CLAUSES.  */
1355
 
1356
static void
1357
scan_sharing_clauses (tree clauses, omp_context *ctx)
1358
{
1359
  tree c, decl;
1360
  bool scan_array_reductions = false;
1361
 
1362
  for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1363
    {
1364
      bool by_ref;
1365
 
1366
      switch (OMP_CLAUSE_CODE (c))
1367
        {
1368
        case OMP_CLAUSE_PRIVATE:
1369
          decl = OMP_CLAUSE_DECL (c);
1370
          if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1371
            goto do_private;
1372
          else if (!is_variable_sized (decl))
1373
            install_var_local (decl, ctx);
1374
          break;
1375
 
1376
        case OMP_CLAUSE_SHARED:
1377
          gcc_assert (is_taskreg_ctx (ctx));
1378
          decl = OMP_CLAUSE_DECL (c);
1379
          gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1380
                      || !is_variable_sized (decl));
1381
          /* Global variables don't need to be copied,
1382
             the receiver side will use them directly.  */
1383
          if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1384
            break;
1385
          by_ref = use_pointer_for_field (decl, ctx);
1386
          if (! TREE_READONLY (decl)
1387
              || TREE_ADDRESSABLE (decl)
1388
              || by_ref
1389
              || is_reference (decl))
1390
            {
1391
              install_var_field (decl, by_ref, 3, ctx);
1392
              install_var_local (decl, ctx);
1393
              break;
1394
            }
1395
          /* We don't need to copy const scalar vars back.  */
1396
          OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1397
          goto do_private;
1398
 
1399
        case OMP_CLAUSE_LASTPRIVATE:
1400
          /* Let the corresponding firstprivate clause create
1401
             the variable.  */
1402
          if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1403
            break;
1404
          /* FALLTHRU */
1405
 
1406
        case OMP_CLAUSE_FIRSTPRIVATE:
1407
        case OMP_CLAUSE_REDUCTION:
1408
          decl = OMP_CLAUSE_DECL (c);
1409
        do_private:
1410
          if (is_variable_sized (decl))
1411
            {
1412
              if (is_task_ctx (ctx))
1413
                install_var_field (decl, false, 1, ctx);
1414
              break;
1415
            }
1416
          else if (is_taskreg_ctx (ctx))
1417
            {
1418
              bool global
1419
                = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1420
              by_ref = use_pointer_for_field (decl, NULL);
1421
 
1422
              if (is_task_ctx (ctx)
1423
                  && (global || by_ref || is_reference (decl)))
1424
                {
1425
                  install_var_field (decl, false, 1, ctx);
1426
                  if (!global)
1427
                    install_var_field (decl, by_ref, 2, ctx);
1428
                }
1429
              else if (!global)
1430
                install_var_field (decl, by_ref, 3, ctx);
1431
            }
1432
          install_var_local (decl, ctx);
1433
          break;
1434
 
1435
        case OMP_CLAUSE_COPYPRIVATE:
1436
        case OMP_CLAUSE_COPYIN:
1437
          decl = OMP_CLAUSE_DECL (c);
1438
          by_ref = use_pointer_for_field (decl, NULL);
1439
          install_var_field (decl, by_ref, 3, ctx);
1440
          break;
1441
 
1442
        case OMP_CLAUSE_DEFAULT:
1443
          ctx->default_kind = OMP_CLAUSE_DEFAULT_KIND (c);
1444
          break;
1445
 
1446
        case OMP_CLAUSE_IF:
1447
        case OMP_CLAUSE_NUM_THREADS:
1448
        case OMP_CLAUSE_SCHEDULE:
1449
          if (ctx->outer)
1450
            scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1451
          break;
1452
 
1453
        case OMP_CLAUSE_NOWAIT:
1454
        case OMP_CLAUSE_ORDERED:
1455
        case OMP_CLAUSE_COLLAPSE:
1456
        case OMP_CLAUSE_UNTIED:
1457
          break;
1458
 
1459
        default:
1460
          gcc_unreachable ();
1461
        }
1462
    }
1463
 
1464
  for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1465
    {
1466
      switch (OMP_CLAUSE_CODE (c))
1467
        {
1468
        case OMP_CLAUSE_LASTPRIVATE:
1469
          /* Let the corresponding firstprivate clause create
1470
             the variable.  */
1471
          if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1472
            scan_array_reductions = true;
1473
          if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1474
            break;
1475
          /* FALLTHRU */
1476
 
1477
        case OMP_CLAUSE_PRIVATE:
1478
        case OMP_CLAUSE_FIRSTPRIVATE:
1479
        case OMP_CLAUSE_REDUCTION:
1480
          decl = OMP_CLAUSE_DECL (c);
1481
          if (is_variable_sized (decl))
1482
            install_var_local (decl, ctx);
1483
          fixup_remapped_decl (decl, ctx,
1484
                               OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1485
                               && OMP_CLAUSE_PRIVATE_DEBUG (c));
1486
          if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1487
              && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1488
            scan_array_reductions = true;
1489
          break;
1490
 
1491
        case OMP_CLAUSE_SHARED:
1492
          decl = OMP_CLAUSE_DECL (c);
1493
          if (! is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1494
            fixup_remapped_decl (decl, ctx, false);
1495
          break;
1496
 
1497
        case OMP_CLAUSE_COPYPRIVATE:
1498
        case OMP_CLAUSE_COPYIN:
1499
        case OMP_CLAUSE_DEFAULT:
1500
        case OMP_CLAUSE_IF:
1501
        case OMP_CLAUSE_NUM_THREADS:
1502
        case OMP_CLAUSE_SCHEDULE:
1503
        case OMP_CLAUSE_NOWAIT:
1504
        case OMP_CLAUSE_ORDERED:
1505
        case OMP_CLAUSE_COLLAPSE:
1506
        case OMP_CLAUSE_UNTIED:
1507
          break;
1508
 
1509
        default:
1510
          gcc_unreachable ();
1511
        }
1512
    }
1513
 
1514
  if (scan_array_reductions)
1515
    for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1516
      if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1517
          && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1518
        {
1519
          scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1520
          scan_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1521
        }
1522
      else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1523
               && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1524
        scan_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1525
}
1526
 
1527
/* Create a new name for omp child function.  Returns an identifier.  */
1528
 
1529
static GTY(()) unsigned int tmp_ompfn_id_num;
1530
 
1531
static tree
1532
create_omp_child_function_name (bool task_copy)
1533
{
1534
  tree name = DECL_ASSEMBLER_NAME (current_function_decl);
1535
  size_t len = IDENTIFIER_LENGTH (name);
1536
  char *tmp_name, *prefix;
1537
  const char *suffix;
1538
 
1539
  suffix = task_copy ? "_omp_cpyfn" : "_omp_fn";
1540
  prefix = XALLOCAVEC (char, len + strlen (suffix) + 1);
1541
  memcpy (prefix, IDENTIFIER_POINTER (name), len);
1542
  strcpy (prefix + len, suffix);
1543
#ifndef NO_DOT_IN_LABEL
1544
  prefix[len] = '.';
1545
#elif !defined NO_DOLLAR_IN_LABEL
1546
  prefix[len] = '$';
1547
#endif
1548
  ASM_FORMAT_PRIVATE_NAME (tmp_name, prefix, tmp_ompfn_id_num++);
1549
  return get_identifier (tmp_name);
1550
}
1551
 
1552
/* Build a decl for the omp child function.  It'll not contain a body
1553
   yet, just the bare decl.  */
1554
 
1555
static void
1556
create_omp_child_function (omp_context *ctx, bool task_copy)
1557
{
1558
  tree decl, type, name, t;
1559
 
1560
  name = create_omp_child_function_name (task_copy);
1561
  if (task_copy)
1562
    type = build_function_type_list (void_type_node, ptr_type_node,
1563
                                     ptr_type_node, NULL_TREE);
1564
  else
1565
    type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1566
 
1567
  decl = build_decl (gimple_location (ctx->stmt),
1568
                     FUNCTION_DECL, name, type);
1569
 
1570
  if (!task_copy)
1571
    ctx->cb.dst_fn = decl;
1572
  else
1573
    gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1574
 
1575
  TREE_STATIC (decl) = 1;
1576
  TREE_USED (decl) = 1;
1577
  DECL_ARTIFICIAL (decl) = 1;
1578
  DECL_IGNORED_P (decl) = 0;
1579
  TREE_PUBLIC (decl) = 0;
1580
  DECL_UNINLINABLE (decl) = 1;
1581
  DECL_EXTERNAL (decl) = 0;
1582
  DECL_CONTEXT (decl) = NULL_TREE;
1583
  DECL_INITIAL (decl) = make_node (BLOCK);
1584
 
1585
  t = build_decl (DECL_SOURCE_LOCATION (decl),
1586
                  RESULT_DECL, NULL_TREE, void_type_node);
1587
  DECL_ARTIFICIAL (t) = 1;
1588
  DECL_IGNORED_P (t) = 1;
1589
  DECL_CONTEXT (t) = decl;
1590
  DECL_RESULT (decl) = t;
1591
 
1592
  t = build_decl (DECL_SOURCE_LOCATION (decl),
1593
                  PARM_DECL, get_identifier (".omp_data_i"), ptr_type_node);
1594
  DECL_ARTIFICIAL (t) = 1;
1595
  DECL_ARG_TYPE (t) = ptr_type_node;
1596
  DECL_CONTEXT (t) = current_function_decl;
1597
  TREE_USED (t) = 1;
1598
  DECL_ARGUMENTS (decl) = t;
1599
  if (!task_copy)
1600
    ctx->receiver_decl = t;
1601
  else
1602
    {
1603
      t = build_decl (DECL_SOURCE_LOCATION (decl),
1604
                      PARM_DECL, get_identifier (".omp_data_o"),
1605
                      ptr_type_node);
1606
      DECL_ARTIFICIAL (t) = 1;
1607
      DECL_ARG_TYPE (t) = ptr_type_node;
1608
      DECL_CONTEXT (t) = current_function_decl;
1609
      TREE_USED (t) = 1;
1610
      TREE_ADDRESSABLE (t) = 1;
1611
      TREE_CHAIN (t) = DECL_ARGUMENTS (decl);
1612
      DECL_ARGUMENTS (decl) = t;
1613
    }
1614
 
1615
  /* Allocate memory for the function structure.  The call to
1616
     allocate_struct_function clobbers CFUN, so we need to restore
1617
     it afterward.  */
1618
  push_struct_function (decl);
1619
  cfun->function_end_locus = gimple_location (ctx->stmt);
1620
  pop_cfun ();
1621
}
1622
 
1623
 
1624
/* Scan an OpenMP parallel directive.  */
1625
 
1626
static void
1627
scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1628
{
1629
  omp_context *ctx;
1630
  tree name;
1631
  gimple stmt = gsi_stmt (*gsi);
1632
 
1633
  /* Ignore parallel directives with empty bodies, unless there
1634
     are copyin clauses.  */
1635
  if (optimize > 0
1636
      && empty_body_p (gimple_omp_body (stmt))
1637
      && find_omp_clause (gimple_omp_parallel_clauses (stmt),
1638
                          OMP_CLAUSE_COPYIN) == NULL)
1639
    {
1640
      gsi_replace (gsi, gimple_build_nop (), false);
1641
      return;
1642
    }
1643
 
1644
  ctx = new_omp_context (stmt, outer_ctx);
1645
  if (taskreg_nesting_level > 1)
1646
    ctx->is_nested = true;
1647
  ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1648
  ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1649
  ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1650
  name = create_tmp_var_name (".omp_data_s");
1651
  name = build_decl (gimple_location (stmt),
1652
                     TYPE_DECL, name, ctx->record_type);
1653
  TYPE_NAME (ctx->record_type) = name;
1654
  create_omp_child_function (ctx, false);
1655
  gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1656
 
1657
  scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1658
  scan_omp (gimple_omp_body (stmt), ctx);
1659
 
1660
  if (TYPE_FIELDS (ctx->record_type) == NULL)
1661
    ctx->record_type = ctx->receiver_decl = NULL;
1662
  else
1663
    {
1664
      layout_type (ctx->record_type);
1665
      fixup_child_record_type (ctx);
1666
    }
1667
}
1668
 
1669
/* Scan an OpenMP task directive.  */
1670
 
1671
static void
1672
scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1673
{
1674
  omp_context *ctx;
1675
  tree name, t;
1676
  gimple stmt = gsi_stmt (*gsi);
1677
  location_t loc = gimple_location (stmt);
1678
 
1679
  /* Ignore task directives with empty bodies.  */
1680
  if (optimize > 0
1681
      && empty_body_p (gimple_omp_body (stmt)))
1682
    {
1683
      gsi_replace (gsi, gimple_build_nop (), false);
1684
      return;
1685
    }
1686
 
1687
  ctx = new_omp_context (stmt, outer_ctx);
1688
  if (taskreg_nesting_level > 1)
1689
    ctx->is_nested = true;
1690
  ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1691
  ctx->default_kind = OMP_CLAUSE_DEFAULT_SHARED;
1692
  ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1693
  name = create_tmp_var_name (".omp_data_s");
1694
  name = build_decl (gimple_location (stmt),
1695
                     TYPE_DECL, name, ctx->record_type);
1696
  TYPE_NAME (ctx->record_type) = name;
1697
  create_omp_child_function (ctx, false);
1698
  gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1699
 
1700
  scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1701
 
1702
  if (ctx->srecord_type)
1703
    {
1704
      name = create_tmp_var_name (".omp_data_a");
1705
      name = build_decl (gimple_location (stmt),
1706
                         TYPE_DECL, name, ctx->srecord_type);
1707
      TYPE_NAME (ctx->srecord_type) = name;
1708
      create_omp_child_function (ctx, true);
1709
    }
1710
 
1711
  scan_omp (gimple_omp_body (stmt), ctx);
1712
 
1713
  if (TYPE_FIELDS (ctx->record_type) == NULL)
1714
    {
1715
      ctx->record_type = ctx->receiver_decl = NULL;
1716
      t = build_int_cst (long_integer_type_node, 0);
1717
      gimple_omp_task_set_arg_size (stmt, t);
1718
      t = build_int_cst (long_integer_type_node, 1);
1719
      gimple_omp_task_set_arg_align (stmt, t);
1720
    }
1721
  else
1722
    {
1723
      tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1724
      /* Move VLA fields to the end.  */
1725
      p = &TYPE_FIELDS (ctx->record_type);
1726
      while (*p)
1727
        if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1728
            || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1729
          {
1730
            *q = *p;
1731
            *p = TREE_CHAIN (*p);
1732
            TREE_CHAIN (*q) = NULL_TREE;
1733
            q = &TREE_CHAIN (*q);
1734
          }
1735
        else
1736
          p = &TREE_CHAIN (*p);
1737
      *p = vla_fields;
1738
      layout_type (ctx->record_type);
1739
      fixup_child_record_type (ctx);
1740
      if (ctx->srecord_type)
1741
        layout_type (ctx->srecord_type);
1742
      t = fold_convert_loc (loc, long_integer_type_node,
1743
                        TYPE_SIZE_UNIT (ctx->record_type));
1744
      gimple_omp_task_set_arg_size (stmt, t);
1745
      t = build_int_cst (long_integer_type_node,
1746
                         TYPE_ALIGN_UNIT (ctx->record_type));
1747
      gimple_omp_task_set_arg_align (stmt, t);
1748
    }
1749
}
1750
 
1751
 
1752
/* Scan an OpenMP loop directive.  */
1753
 
1754
static void
1755
scan_omp_for (gimple stmt, omp_context *outer_ctx)
1756
{
1757
  omp_context *ctx;
1758
  size_t i;
1759
 
1760
  ctx = new_omp_context (stmt, outer_ctx);
1761
 
1762
  scan_sharing_clauses (gimple_omp_for_clauses (stmt), ctx);
1763
 
1764
  scan_omp (gimple_omp_for_pre_body (stmt), ctx);
1765
  for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
1766
    {
1767
      scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
1768
      scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
1769
      scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
1770
      scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
1771
    }
1772
  scan_omp (gimple_omp_body (stmt), ctx);
1773
}
1774
 
1775
/* Scan an OpenMP sections directive.  */
1776
 
1777
static void
1778
scan_omp_sections (gimple stmt, omp_context *outer_ctx)
1779
{
1780
  omp_context *ctx;
1781
 
1782
  ctx = new_omp_context (stmt, outer_ctx);
1783
  scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
1784
  scan_omp (gimple_omp_body (stmt), ctx);
1785
}
1786
 
1787
/* Scan an OpenMP single directive.  */
1788
 
1789
static void
1790
scan_omp_single (gimple stmt, omp_context *outer_ctx)
1791
{
1792
  omp_context *ctx;
1793
  tree name;
1794
 
1795
  ctx = new_omp_context (stmt, outer_ctx);
1796
  ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1797
  ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1798
  name = create_tmp_var_name (".omp_copy_s");
1799
  name = build_decl (gimple_location (stmt),
1800
                     TYPE_DECL, name, ctx->record_type);
1801
  TYPE_NAME (ctx->record_type) = name;
1802
 
1803
  scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
1804
  scan_omp (gimple_omp_body (stmt), ctx);
1805
 
1806
  if (TYPE_FIELDS (ctx->record_type) == NULL)
1807
    ctx->record_type = NULL;
1808
  else
1809
    layout_type (ctx->record_type);
1810
}
1811
 
1812
 
1813
/* Check OpenMP nesting restrictions.  */
1814
static void
1815
check_omp_nesting_restrictions (gimple  stmt, omp_context *ctx)
1816
{
1817
  switch (gimple_code (stmt))
1818
    {
1819
    case GIMPLE_OMP_FOR:
1820
    case GIMPLE_OMP_SECTIONS:
1821
    case GIMPLE_OMP_SINGLE:
1822
    case GIMPLE_CALL:
1823
      for (; ctx != NULL; ctx = ctx->outer)
1824
        switch (gimple_code (ctx->stmt))
1825
          {
1826
          case GIMPLE_OMP_FOR:
1827
          case GIMPLE_OMP_SECTIONS:
1828
          case GIMPLE_OMP_SINGLE:
1829
          case GIMPLE_OMP_ORDERED:
1830
          case GIMPLE_OMP_MASTER:
1831
          case GIMPLE_OMP_TASK:
1832
            if (is_gimple_call (stmt))
1833
              {
1834
                warning (0, "barrier region may not be closely nested inside "
1835
                            "of work-sharing, critical, ordered, master or "
1836
                            "explicit task region");
1837
                return;
1838
              }
1839
            warning (0, "work-sharing region may not be closely nested inside "
1840
                        "of work-sharing, critical, ordered, master or explicit "
1841
                        "task region");
1842
            return;
1843
          case GIMPLE_OMP_PARALLEL:
1844
            return;
1845
          default:
1846
            break;
1847
          }
1848
      break;
1849
    case GIMPLE_OMP_MASTER:
1850
      for (; ctx != NULL; ctx = ctx->outer)
1851
        switch (gimple_code (ctx->stmt))
1852
          {
1853
          case GIMPLE_OMP_FOR:
1854
          case GIMPLE_OMP_SECTIONS:
1855
          case GIMPLE_OMP_SINGLE:
1856
          case GIMPLE_OMP_TASK:
1857
            warning (0, "master region may not be closely nested inside "
1858
                        "of work-sharing or explicit task region");
1859
            return;
1860
          case GIMPLE_OMP_PARALLEL:
1861
            return;
1862
          default:
1863
            break;
1864
          }
1865
      break;
1866
    case GIMPLE_OMP_ORDERED:
1867
      for (; ctx != NULL; ctx = ctx->outer)
1868
        switch (gimple_code (ctx->stmt))
1869
          {
1870
          case GIMPLE_OMP_CRITICAL:
1871
          case GIMPLE_OMP_TASK:
1872
            warning (0, "ordered region may not be closely nested inside "
1873
                        "of critical or explicit task region");
1874
            return;
1875
          case GIMPLE_OMP_FOR:
1876
            if (find_omp_clause (gimple_omp_for_clauses (ctx->stmt),
1877
                                 OMP_CLAUSE_ORDERED) == NULL)
1878
              warning (0, "ordered region must be closely nested inside "
1879
                          "a loop region with an ordered clause");
1880
            return;
1881
          case GIMPLE_OMP_PARALLEL:
1882
            return;
1883
          default:
1884
            break;
1885
          }
1886
      break;
1887
    case GIMPLE_OMP_CRITICAL:
1888
      for (; ctx != NULL; ctx = ctx->outer)
1889
        if (gimple_code (ctx->stmt) == GIMPLE_OMP_CRITICAL
1890
            && (gimple_omp_critical_name (stmt)
1891
                == gimple_omp_critical_name (ctx->stmt)))
1892
          {
1893
            warning (0, "critical region may not be nested inside a critical "
1894
                        "region with the same name");
1895
            return;
1896
          }
1897
      break;
1898
    default:
1899
      break;
1900
    }
1901
}
1902
 
1903
 
1904
/* Helper function scan_omp.
1905
 
1906
   Callback for walk_tree or operators in walk_gimple_stmt used to
1907
   scan for OpenMP directives in TP.  */
1908
 
1909
static tree
1910
scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
1911
{
1912
  struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
1913
  omp_context *ctx = (omp_context *) wi->info;
1914
  tree t = *tp;
1915
 
1916
  switch (TREE_CODE (t))
1917
    {
1918
    case VAR_DECL:
1919
    case PARM_DECL:
1920
    case LABEL_DECL:
1921
    case RESULT_DECL:
1922
      if (ctx)
1923
        *tp = remap_decl (t, &ctx->cb);
1924
      break;
1925
 
1926
    default:
1927
      if (ctx && TYPE_P (t))
1928
        *tp = remap_type (t, &ctx->cb);
1929
      else if (!DECL_P (t))
1930
        {
1931
          *walk_subtrees = 1;
1932
          if (ctx)
1933
            TREE_TYPE (t) = remap_type (TREE_TYPE (t), &ctx->cb);
1934
        }
1935
      break;
1936
    }
1937
 
1938
  return NULL_TREE;
1939
}
1940
 
1941
 
1942
/* Helper function for scan_omp.
1943
 
1944
   Callback for walk_gimple_stmt used to scan for OpenMP directives in
1945
   the current statement in GSI.  */
1946
 
1947
static tree
1948
scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1949
                 struct walk_stmt_info *wi)
1950
{
1951
  gimple stmt = gsi_stmt (*gsi);
1952
  omp_context *ctx = (omp_context *) wi->info;
1953
 
1954
  if (gimple_has_location (stmt))
1955
    input_location = gimple_location (stmt);
1956
 
1957
  /* Check the OpenMP nesting restrictions.  */
1958
  if (ctx != NULL)
1959
    {
1960
      if (is_gimple_omp (stmt))
1961
        check_omp_nesting_restrictions (stmt, ctx);
1962
      else if (is_gimple_call (stmt))
1963
        {
1964
          tree fndecl = gimple_call_fndecl (stmt);
1965
          if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
1966
              && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
1967
            check_omp_nesting_restrictions (stmt, ctx);
1968
        }
1969
    }
1970
 
1971
  *handled_ops_p = true;
1972
 
1973
  switch (gimple_code (stmt))
1974
    {
1975
    case GIMPLE_OMP_PARALLEL:
1976
      taskreg_nesting_level++;
1977
      scan_omp_parallel (gsi, ctx);
1978
      taskreg_nesting_level--;
1979
      break;
1980
 
1981
    case GIMPLE_OMP_TASK:
1982
      taskreg_nesting_level++;
1983
      scan_omp_task (gsi, ctx);
1984
      taskreg_nesting_level--;
1985
      break;
1986
 
1987
    case GIMPLE_OMP_FOR:
1988
      scan_omp_for (stmt, ctx);
1989
      break;
1990
 
1991
    case GIMPLE_OMP_SECTIONS:
1992
      scan_omp_sections (stmt, ctx);
1993
      break;
1994
 
1995
    case GIMPLE_OMP_SINGLE:
1996
      scan_omp_single (stmt, ctx);
1997
      break;
1998
 
1999
    case GIMPLE_OMP_SECTION:
2000
    case GIMPLE_OMP_MASTER:
2001
    case GIMPLE_OMP_ORDERED:
2002
    case GIMPLE_OMP_CRITICAL:
2003
      ctx = new_omp_context (stmt, ctx);
2004
      scan_omp (gimple_omp_body (stmt), ctx);
2005
      break;
2006
 
2007
    case GIMPLE_BIND:
2008
      {
2009
        tree var;
2010
 
2011
        *handled_ops_p = false;
2012
        if (ctx)
2013
          for (var = gimple_bind_vars (stmt); var ; var = TREE_CHAIN (var))
2014
            insert_decl_map (&ctx->cb, var, var);
2015
      }
2016
      break;
2017
    default:
2018
      *handled_ops_p = false;
2019
      break;
2020
    }
2021
 
2022
  return NULL_TREE;
2023
}
2024
 
2025
 
2026
/* Scan all the statements starting at the current statement.  CTX
2027
   contains context information about the OpenMP directives and
2028
   clauses found during the scan.  */
2029
 
2030
static void
2031
scan_omp (gimple_seq body, omp_context *ctx)
2032
{
2033
  location_t saved_location;
2034
  struct walk_stmt_info wi;
2035
 
2036
  memset (&wi, 0, sizeof (wi));
2037
  wi.info = ctx;
2038
  wi.want_locations = true;
2039
 
2040
  saved_location = input_location;
2041
  walk_gimple_seq (body, scan_omp_1_stmt, scan_omp_1_op, &wi);
2042
  input_location = saved_location;
2043
}
2044
 
2045
/* Re-gimplification and code generation routines.  */
2046
 
2047
/* Build a call to GOMP_barrier.  */
2048
 
2049
static tree
2050
build_omp_barrier (void)
2051
{
2052
  return build_call_expr (built_in_decls[BUILT_IN_GOMP_BARRIER], 0);
2053
}
2054
 
2055
/* If a context was created for STMT when it was scanned, return it.  */
2056
 
2057
static omp_context *
2058
maybe_lookup_ctx (gimple stmt)
2059
{
2060
  splay_tree_node n;
2061
  n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
2062
  return n ? (omp_context *) n->value : NULL;
2063
}
2064
 
2065
 
2066
/* Find the mapping for DECL in CTX or the immediately enclosing
2067
   context that has a mapping for DECL.
2068
 
2069
   If CTX is a nested parallel directive, we may have to use the decl
2070
   mappings created in CTX's parent context.  Suppose that we have the
2071
   following parallel nesting (variable UIDs showed for clarity):
2072
 
2073
        iD.1562 = 0;
2074
        #omp parallel shared(iD.1562)           -> outer parallel
2075
          iD.1562 = iD.1562 + 1;
2076
 
2077
          #omp parallel shared (iD.1562)        -> inner parallel
2078
             iD.1562 = iD.1562 - 1;
2079
 
2080
   Each parallel structure will create a distinct .omp_data_s structure
2081
   for copying iD.1562 in/out of the directive:
2082
 
2083
        outer parallel          .omp_data_s.1.i -> iD.1562
2084
        inner parallel          .omp_data_s.2.i -> iD.1562
2085
 
2086
   A shared variable mapping will produce a copy-out operation before
2087
   the parallel directive and a copy-in operation after it.  So, in
2088
   this case we would have:
2089
 
2090
        iD.1562 = 0;
2091
        .omp_data_o.1.i = iD.1562;
2092
        #omp parallel shared(iD.1562)           -> outer parallel
2093
          .omp_data_i.1 = &.omp_data_o.1
2094
          .omp_data_i.1->i = .omp_data_i.1->i + 1;
2095
 
2096
          .omp_data_o.2.i = iD.1562;            -> **
2097
          #omp parallel shared(iD.1562)         -> inner parallel
2098
            .omp_data_i.2 = &.omp_data_o.2
2099
            .omp_data_i.2->i = .omp_data_i.2->i - 1;
2100
 
2101
 
2102
    ** This is a problem.  The symbol iD.1562 cannot be referenced
2103
       inside the body of the outer parallel region.  But since we are
2104
       emitting this copy operation while expanding the inner parallel
2105
       directive, we need to access the CTX structure of the outer
2106
       parallel directive to get the correct mapping:
2107
 
2108
          .omp_data_o.2.i = .omp_data_i.1->i
2109
 
2110
    Since there may be other workshare or parallel directives enclosing
2111
    the parallel directive, it may be necessary to walk up the context
2112
    parent chain.  This is not a problem in general because nested
2113
    parallelism happens only rarely.  */
2114
 
2115
static tree
2116
lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2117
{
2118
  tree t;
2119
  omp_context *up;
2120
 
2121
  for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2122
    t = maybe_lookup_decl (decl, up);
2123
 
2124
  gcc_assert (!ctx->is_nested || t || is_global_var (decl));
2125
 
2126
  return t ? t : decl;
2127
}
2128
 
2129
 
2130
/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
2131
   in outer contexts.  */
2132
 
2133
static tree
2134
maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
2135
{
2136
  tree t = NULL;
2137
  omp_context *up;
2138
 
2139
  for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
2140
    t = maybe_lookup_decl (decl, up);
2141
 
2142
  return t ? t : decl;
2143
}
2144
 
2145
 
2146
/* Construct the initialization value for reduction CLAUSE.  */
2147
 
2148
tree
2149
omp_reduction_init (tree clause, tree type)
2150
{
2151
  location_t loc = OMP_CLAUSE_LOCATION (clause);
2152
  switch (OMP_CLAUSE_REDUCTION_CODE (clause))
2153
    {
2154
    case PLUS_EXPR:
2155
    case MINUS_EXPR:
2156
    case BIT_IOR_EXPR:
2157
    case BIT_XOR_EXPR:
2158
    case TRUTH_OR_EXPR:
2159
    case TRUTH_ORIF_EXPR:
2160
    case TRUTH_XOR_EXPR:
2161
    case NE_EXPR:
2162
      return fold_convert_loc (loc, type, integer_zero_node);
2163
 
2164
    case MULT_EXPR:
2165
    case TRUTH_AND_EXPR:
2166
    case TRUTH_ANDIF_EXPR:
2167
    case EQ_EXPR:
2168
      return fold_convert_loc (loc, type, integer_one_node);
2169
 
2170
    case BIT_AND_EXPR:
2171
      return fold_convert_loc (loc, type, integer_minus_one_node);
2172
 
2173
    case MAX_EXPR:
2174
      if (SCALAR_FLOAT_TYPE_P (type))
2175
        {
2176
          REAL_VALUE_TYPE max, min;
2177
          if (HONOR_INFINITIES (TYPE_MODE (type)))
2178
            {
2179
              real_inf (&max);
2180
              real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
2181
            }
2182
          else
2183
            real_maxval (&min, 1, TYPE_MODE (type));
2184
          return build_real (type, min);
2185
        }
2186
      else
2187
        {
2188
          gcc_assert (INTEGRAL_TYPE_P (type));
2189
          return TYPE_MIN_VALUE (type);
2190
        }
2191
 
2192
    case MIN_EXPR:
2193
      if (SCALAR_FLOAT_TYPE_P (type))
2194
        {
2195
          REAL_VALUE_TYPE max;
2196
          if (HONOR_INFINITIES (TYPE_MODE (type)))
2197
            real_inf (&max);
2198
          else
2199
            real_maxval (&max, 0, TYPE_MODE (type));
2200
          return build_real (type, max);
2201
        }
2202
      else
2203
        {
2204
          gcc_assert (INTEGRAL_TYPE_P (type));
2205
          return TYPE_MAX_VALUE (type);
2206
        }
2207
 
2208
    default:
2209
      gcc_unreachable ();
2210
    }
2211
}
2212
 
2213
/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
2214
   from the receiver (aka child) side and initializers for REFERENCE_TYPE
2215
   private variables.  Initialization statements go in ILIST, while calls
2216
   to destructors go in DLIST.  */
2217
 
2218
static void
2219
lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
2220
                         omp_context *ctx)
2221
{
2222
  gimple_stmt_iterator diter;
2223
  tree c, dtor, copyin_seq, x, ptr;
2224
  bool copyin_by_ref = false;
2225
  bool lastprivate_firstprivate = false;
2226
  int pass;
2227
 
2228
  *dlist = gimple_seq_alloc ();
2229
  diter = gsi_start (*dlist);
2230
  copyin_seq = NULL;
2231
 
2232
  /* Do all the fixed sized types in the first pass, and the variable sized
2233
     types in the second pass.  This makes sure that the scalar arguments to
2234
     the variable sized types are processed before we use them in the
2235
     variable sized operations.  */
2236
  for (pass = 0; pass < 2; ++pass)
2237
    {
2238
      for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2239
        {
2240
          enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
2241
          tree var, new_var;
2242
          bool by_ref;
2243
          location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2244
 
2245
          switch (c_kind)
2246
            {
2247
            case OMP_CLAUSE_PRIVATE:
2248
              if (OMP_CLAUSE_PRIVATE_DEBUG (c))
2249
                continue;
2250
              break;
2251
            case OMP_CLAUSE_SHARED:
2252
              if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
2253
                {
2254
                  gcc_assert (is_global_var (OMP_CLAUSE_DECL (c)));
2255
                  continue;
2256
                }
2257
            case OMP_CLAUSE_FIRSTPRIVATE:
2258
            case OMP_CLAUSE_COPYIN:
2259
            case OMP_CLAUSE_REDUCTION:
2260
              break;
2261
            case OMP_CLAUSE_LASTPRIVATE:
2262
              if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2263
                {
2264
                  lastprivate_firstprivate = true;
2265
                  if (pass != 0)
2266
                    continue;
2267
                }
2268
              break;
2269
            default:
2270
              continue;
2271
            }
2272
 
2273
          new_var = var = OMP_CLAUSE_DECL (c);
2274
          if (c_kind != OMP_CLAUSE_COPYIN)
2275
            new_var = lookup_decl (var, ctx);
2276
 
2277
          if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
2278
            {
2279
              if (pass != 0)
2280
                continue;
2281
            }
2282
          else if (is_variable_sized (var))
2283
            {
2284
              /* For variable sized types, we need to allocate the
2285
                 actual storage here.  Call alloca and store the
2286
                 result in the pointer decl that we created elsewhere.  */
2287
              if (pass == 0)
2288
                continue;
2289
 
2290
              if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
2291
                {
2292
                  gimple stmt;
2293
                  tree tmp;
2294
 
2295
                  ptr = DECL_VALUE_EXPR (new_var);
2296
                  gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
2297
                  ptr = TREE_OPERAND (ptr, 0);
2298
                  gcc_assert (DECL_P (ptr));
2299
                  x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
2300
 
2301
                  /* void *tmp = __builtin_alloca */
2302
                  stmt
2303
                    = gimple_build_call (built_in_decls[BUILT_IN_ALLOCA], 1, x);
2304
                  tmp = create_tmp_var_raw (ptr_type_node, NULL);
2305
                  gimple_add_tmp_var (tmp);
2306
                  gimple_call_set_lhs (stmt, tmp);
2307
 
2308
                  gimple_seq_add_stmt (ilist, stmt);
2309
 
2310
                  x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
2311
                  gimplify_assign (ptr, x, ilist);
2312
                }
2313
            }
2314
          else if (is_reference (var))
2315
            {
2316
              /* For references that are being privatized for Fortran,
2317
                 allocate new backing storage for the new pointer
2318
                 variable.  This allows us to avoid changing all the
2319
                 code that expects a pointer to something that expects
2320
                 a direct variable.  Note that this doesn't apply to
2321
                 C++, since reference types are disallowed in data
2322
                 sharing clauses there, except for NRV optimized
2323
                 return values.  */
2324
              if (pass == 0)
2325
                continue;
2326
 
2327
              x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
2328
              if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
2329
                {
2330
                  x = build_receiver_ref (var, false, ctx);
2331
                  x = build_fold_addr_expr_loc (clause_loc, x);
2332
                }
2333
              else if (TREE_CONSTANT (x))
2334
                {
2335
                  const char *name = NULL;
2336
                  if (DECL_NAME (var))
2337
                    name = IDENTIFIER_POINTER (DECL_NAME (new_var));
2338
 
2339
                  x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
2340
                                          name);
2341
                  gimple_add_tmp_var (x);
2342
                  TREE_ADDRESSABLE (x) = 1;
2343
                  x = build_fold_addr_expr_loc (clause_loc, x);
2344
                }
2345
              else
2346
                {
2347
                  x = build_call_expr_loc (clause_loc,
2348
                                       built_in_decls[BUILT_IN_ALLOCA], 1, x);
2349
                }
2350
 
2351
              x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
2352
              gimplify_assign (new_var, x, ilist);
2353
 
2354
              new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
2355
            }
2356
          else if (c_kind == OMP_CLAUSE_REDUCTION
2357
                   && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2358
            {
2359
              if (pass == 0)
2360
                continue;
2361
            }
2362
          else if (pass != 0)
2363
            continue;
2364
 
2365
          switch (OMP_CLAUSE_CODE (c))
2366
            {
2367
            case OMP_CLAUSE_SHARED:
2368
              /* Shared global vars are just accessed directly.  */
2369
              if (is_global_var (new_var))
2370
                break;
2371
              /* Set up the DECL_VALUE_EXPR for shared variables now.  This
2372
                 needs to be delayed until after fixup_child_record_type so
2373
                 that we get the correct type during the dereference.  */
2374
              by_ref = use_pointer_for_field (var, ctx);
2375
              x = build_receiver_ref (var, by_ref, ctx);
2376
              SET_DECL_VALUE_EXPR (new_var, x);
2377
              DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2378
 
2379
              /* ??? If VAR is not passed by reference, and the variable
2380
                 hasn't been initialized yet, then we'll get a warning for
2381
                 the store into the omp_data_s structure.  Ideally, we'd be
2382
                 able to notice this and not store anything at all, but
2383
                 we're generating code too early.  Suppress the warning.  */
2384
              if (!by_ref)
2385
                TREE_NO_WARNING (var) = 1;
2386
              break;
2387
 
2388
            case OMP_CLAUSE_LASTPRIVATE:
2389
              if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2390
                break;
2391
              /* FALLTHRU */
2392
 
2393
            case OMP_CLAUSE_PRIVATE:
2394
              if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
2395
                x = build_outer_var_ref (var, ctx);
2396
              else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2397
                {
2398
                  if (is_task_ctx (ctx))
2399
                    x = build_receiver_ref (var, false, ctx);
2400
                  else
2401
                    x = build_outer_var_ref (var, ctx);
2402
                }
2403
              else
2404
                x = NULL;
2405
              x = lang_hooks.decls.omp_clause_default_ctor (c, new_var, x);
2406
              if (x)
2407
                gimplify_and_add (x, ilist);
2408
              /* FALLTHRU */
2409
 
2410
            do_dtor:
2411
              x = lang_hooks.decls.omp_clause_dtor (c, new_var);
2412
              if (x)
2413
                {
2414
                  gimple_seq tseq = NULL;
2415
 
2416
                  dtor = x;
2417
                  gimplify_stmt (&dtor, &tseq);
2418
                  gsi_insert_seq_before (&diter, tseq, GSI_SAME_STMT);
2419
                }
2420
              break;
2421
 
2422
            case OMP_CLAUSE_FIRSTPRIVATE:
2423
              if (is_task_ctx (ctx))
2424
                {
2425
                  if (is_reference (var) || is_variable_sized (var))
2426
                    goto do_dtor;
2427
                  else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
2428
                                                                          ctx))
2429
                           || use_pointer_for_field (var, NULL))
2430
                    {
2431
                      x = build_receiver_ref (var, false, ctx);
2432
                      SET_DECL_VALUE_EXPR (new_var, x);
2433
                      DECL_HAS_VALUE_EXPR_P (new_var) = 1;
2434
                      goto do_dtor;
2435
                    }
2436
                }
2437
              x = build_outer_var_ref (var, ctx);
2438
              x = lang_hooks.decls.omp_clause_copy_ctor (c, new_var, x);
2439
              gimplify_and_add (x, ilist);
2440
              goto do_dtor;
2441
              break;
2442
 
2443
            case OMP_CLAUSE_COPYIN:
2444
              by_ref = use_pointer_for_field (var, NULL);
2445
              x = build_receiver_ref (var, by_ref, ctx);
2446
              x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
2447
              append_to_statement_list (x, &copyin_seq);
2448
              copyin_by_ref |= by_ref;
2449
              break;
2450
 
2451
            case OMP_CLAUSE_REDUCTION:
2452
              if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2453
                {
2454
                  tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2455
                  x = build_outer_var_ref (var, ctx);
2456
 
2457
                  if (is_reference (var))
2458
                    x = build_fold_addr_expr_loc (clause_loc, x);
2459
                  SET_DECL_VALUE_EXPR (placeholder, x);
2460
                  DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2461
                  lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
2462
                  gimple_seq_add_seq (ilist,
2463
                                      OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c));
2464
                  OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
2465
                  DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
2466
                }
2467
              else
2468
                {
2469
                  x = omp_reduction_init (c, TREE_TYPE (new_var));
2470
                  gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
2471
                  gimplify_assign (new_var, x, ilist);
2472
                }
2473
              break;
2474
 
2475
            default:
2476
              gcc_unreachable ();
2477
            }
2478
        }
2479
    }
2480
 
2481
  /* The copyin sequence is not to be executed by the main thread, since
2482
     that would result in self-copies.  Perhaps not visible to scalars,
2483
     but it certainly is to C++ operator=.  */
2484
  if (copyin_seq)
2485
    {
2486
      x = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
2487
      x = build2 (NE_EXPR, boolean_type_node, x,
2488
                  build_int_cst (TREE_TYPE (x), 0));
2489
      x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
2490
      gimplify_and_add (x, ilist);
2491
    }
2492
 
2493
  /* If any copyin variable is passed by reference, we must ensure the
2494
     master thread doesn't modify it before it is copied over in all
2495
     threads.  Similarly for variables in both firstprivate and
2496
     lastprivate clauses we need to ensure the lastprivate copying
2497
     happens after firstprivate copying in all threads.  */
2498
  if (copyin_by_ref || lastprivate_firstprivate)
2499
    gimplify_and_add (build_omp_barrier (), ilist);
2500
}
2501
 
2502
 
2503
/* Generate code to implement the LASTPRIVATE clauses.  This is used for
2504
   both parallel and workshare constructs.  PREDICATE may be NULL if it's
2505
   always true.   */
2506
 
2507
static void
2508
lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *stmt_list,
2509
                            omp_context *ctx)
2510
{
2511
  tree x, c, label = NULL;
2512
  bool par_clauses = false;
2513
 
2514
  /* Early exit if there are no lastprivate clauses.  */
2515
  clauses = find_omp_clause (clauses, OMP_CLAUSE_LASTPRIVATE);
2516
  if (clauses == NULL)
2517
    {
2518
      /* If this was a workshare clause, see if it had been combined
2519
         with its parallel.  In that case, look for the clauses on the
2520
         parallel statement itself.  */
2521
      if (is_parallel_ctx (ctx))
2522
        return;
2523
 
2524
      ctx = ctx->outer;
2525
      if (ctx == NULL || !is_parallel_ctx (ctx))
2526
        return;
2527
 
2528
      clauses = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2529
                                 OMP_CLAUSE_LASTPRIVATE);
2530
      if (clauses == NULL)
2531
        return;
2532
      par_clauses = true;
2533
    }
2534
 
2535
  if (predicate)
2536
    {
2537
      gimple stmt;
2538
      tree label_true, arm1, arm2;
2539
 
2540
      label = create_artificial_label (UNKNOWN_LOCATION);
2541
      label_true = create_artificial_label (UNKNOWN_LOCATION);
2542
      arm1 = TREE_OPERAND (predicate, 0);
2543
      arm2 = TREE_OPERAND (predicate, 1);
2544
      gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
2545
      gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
2546
      stmt = gimple_build_cond (TREE_CODE (predicate), arm1, arm2,
2547
                                label_true, label);
2548
      gimple_seq_add_stmt (stmt_list, stmt);
2549
      gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
2550
    }
2551
 
2552
  for (c = clauses; c ;)
2553
    {
2554
      tree var, new_var;
2555
      location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2556
 
2557
      if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE)
2558
        {
2559
          var = OMP_CLAUSE_DECL (c);
2560
          new_var = lookup_decl (var, ctx);
2561
 
2562
          if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
2563
            {
2564
              lower_omp (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
2565
              gimple_seq_add_seq (stmt_list,
2566
                                  OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
2567
            }
2568
          OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
2569
 
2570
          x = build_outer_var_ref (var, ctx);
2571
          if (is_reference (var))
2572
            new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
2573
          x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
2574
          gimplify_and_add (x, stmt_list);
2575
        }
2576
      c = OMP_CLAUSE_CHAIN (c);
2577
      if (c == NULL && !par_clauses)
2578
        {
2579
          /* If this was a workshare clause, see if it had been combined
2580
             with its parallel.  In that case, continue looking for the
2581
             clauses also on the parallel statement itself.  */
2582
          if (is_parallel_ctx (ctx))
2583
            break;
2584
 
2585
          ctx = ctx->outer;
2586
          if (ctx == NULL || !is_parallel_ctx (ctx))
2587
            break;
2588
 
2589
          c = find_omp_clause (gimple_omp_parallel_clauses (ctx->stmt),
2590
                               OMP_CLAUSE_LASTPRIVATE);
2591
          par_clauses = true;
2592
        }
2593
    }
2594
 
2595
  if (label)
2596
    gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
2597
}
2598
 
2599
 
2600
/* Generate code to implement the REDUCTION clauses.  */
2601
 
2602
static void
2603
lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp, omp_context *ctx)
2604
{
2605
  gimple_seq sub_seq = NULL;
2606
  gimple stmt;
2607
  tree x, c;
2608
  int count = 0;
2609
 
2610
  /* First see if there is exactly one reduction clause.  Use OMP_ATOMIC
2611
     update in that case, otherwise use a lock.  */
2612
  for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
2613
    if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
2614
      {
2615
        if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2616
          {
2617
            /* Never use OMP_ATOMIC for array reductions.  */
2618
            count = -1;
2619
            break;
2620
          }
2621
        count++;
2622
      }
2623
 
2624
  if (count == 0)
2625
    return;
2626
 
2627
  for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2628
    {
2629
      tree var, ref, new_var;
2630
      enum tree_code code;
2631
      location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2632
 
2633
      if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
2634
        continue;
2635
 
2636
      var = OMP_CLAUSE_DECL (c);
2637
      new_var = lookup_decl (var, ctx);
2638
      if (is_reference (var))
2639
        new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
2640
      ref = build_outer_var_ref (var, ctx);
2641
      code = OMP_CLAUSE_REDUCTION_CODE (c);
2642
 
2643
      /* reduction(-:var) sums up the partial results, so it acts
2644
         identically to reduction(+:var).  */
2645
      if (code == MINUS_EXPR)
2646
        code = PLUS_EXPR;
2647
 
2648
      if (count == 1)
2649
        {
2650
          tree addr = build_fold_addr_expr_loc (clause_loc, ref);
2651
 
2652
          addr = save_expr (addr);
2653
          ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
2654
          x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
2655
          x = build2 (OMP_ATOMIC, void_type_node, addr, x);
2656
          gimplify_and_add (x, stmt_seqp);
2657
          return;
2658
        }
2659
 
2660
      if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
2661
        {
2662
          tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
2663
 
2664
          if (is_reference (var))
2665
            ref = build_fold_addr_expr_loc (clause_loc, ref);
2666
          SET_DECL_VALUE_EXPR (placeholder, ref);
2667
          DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
2668
          lower_omp (OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
2669
          gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
2670
          OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
2671
          OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
2672
        }
2673
      else
2674
        {
2675
          x = build2 (code, TREE_TYPE (ref), ref, new_var);
2676
          ref = build_outer_var_ref (var, ctx);
2677
          gimplify_assign (ref, x, &sub_seq);
2678
        }
2679
    }
2680
 
2681
  stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_START], 0);
2682
  gimple_seq_add_stmt (stmt_seqp, stmt);
2683
 
2684
  gimple_seq_add_seq (stmt_seqp, sub_seq);
2685
 
2686
  stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ATOMIC_END], 0);
2687
  gimple_seq_add_stmt (stmt_seqp, stmt);
2688
}
2689
 
2690
 
2691
/* Generate code to implement the COPYPRIVATE clauses.  */
2692
 
2693
static void
2694
lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
2695
                            omp_context *ctx)
2696
{
2697
  tree c;
2698
 
2699
  for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2700
    {
2701
      tree var, new_var, ref, x;
2702
      bool by_ref;
2703
      location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2704
 
2705
      if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
2706
        continue;
2707
 
2708
      var = OMP_CLAUSE_DECL (c);
2709
      by_ref = use_pointer_for_field (var, NULL);
2710
 
2711
      ref = build_sender_ref (var, ctx);
2712
      x = new_var = lookup_decl_in_outer_ctx (var, ctx);
2713
      if (by_ref)
2714
        {
2715
          x = build_fold_addr_expr_loc (clause_loc, new_var);
2716
          x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
2717
        }
2718
      gimplify_assign (ref, x, slist);
2719
 
2720
      ref = build_receiver_ref (var, false, ctx);
2721
      if (by_ref)
2722
        {
2723
          ref = fold_convert_loc (clause_loc,
2724
                                  build_pointer_type (TREE_TYPE (new_var)),
2725
                                  ref);
2726
          ref = build_fold_indirect_ref_loc (clause_loc, ref);
2727
        }
2728
      if (is_reference (var))
2729
        {
2730
          ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
2731
          ref = build_fold_indirect_ref_loc (clause_loc, ref);
2732
          new_var = build_fold_indirect_ref_loc (clause_loc, new_var);
2733
        }
2734
      x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
2735
      gimplify_and_add (x, rlist);
2736
    }
2737
}
2738
 
2739
 
2740
/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
2741
   and REDUCTION from the sender (aka parent) side.  */
2742
 
2743
static void
2744
lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
2745
                    omp_context *ctx)
2746
{
2747
  tree c;
2748
 
2749
  for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
2750
    {
2751
      tree val, ref, x, var;
2752
      bool by_ref, do_in = false, do_out = false;
2753
      location_t clause_loc = OMP_CLAUSE_LOCATION (c);
2754
 
2755
      switch (OMP_CLAUSE_CODE (c))
2756
        {
2757
        case OMP_CLAUSE_PRIVATE:
2758
          if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
2759
            break;
2760
          continue;
2761
        case OMP_CLAUSE_FIRSTPRIVATE:
2762
        case OMP_CLAUSE_COPYIN:
2763
        case OMP_CLAUSE_LASTPRIVATE:
2764
        case OMP_CLAUSE_REDUCTION:
2765
          break;
2766
        default:
2767
          continue;
2768
        }
2769
 
2770
      val = OMP_CLAUSE_DECL (c);
2771
      var = lookup_decl_in_outer_ctx (val, ctx);
2772
 
2773
      if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
2774
          && is_global_var (var))
2775
        continue;
2776
      if (is_variable_sized (val))
2777
        continue;
2778
      by_ref = use_pointer_for_field (val, NULL);
2779
 
2780
      switch (OMP_CLAUSE_CODE (c))
2781
        {
2782
        case OMP_CLAUSE_PRIVATE:
2783
        case OMP_CLAUSE_FIRSTPRIVATE:
2784
        case OMP_CLAUSE_COPYIN:
2785
          do_in = true;
2786
          break;
2787
 
2788
        case OMP_CLAUSE_LASTPRIVATE:
2789
          if (by_ref || is_reference (val))
2790
            {
2791
              if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
2792
                continue;
2793
              do_in = true;
2794
            }
2795
          else
2796
            {
2797
              do_out = true;
2798
              if (lang_hooks.decls.omp_private_outer_ref (val))
2799
                do_in = true;
2800
            }
2801
          break;
2802
 
2803
        case OMP_CLAUSE_REDUCTION:
2804
          do_in = true;
2805
          do_out = !(by_ref || is_reference (val));
2806
          break;
2807
 
2808
        default:
2809
          gcc_unreachable ();
2810
        }
2811
 
2812
      if (do_in)
2813
        {
2814
          ref = build_sender_ref (val, ctx);
2815
          x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
2816
          gimplify_assign (ref, x, ilist);
2817
          if (is_task_ctx (ctx))
2818
            DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
2819
        }
2820
 
2821
      if (do_out)
2822
        {
2823
          ref = build_sender_ref (val, ctx);
2824
          gimplify_assign (var, ref, olist);
2825
        }
2826
    }
2827
}
2828
 
2829
/* Generate code to implement SHARED from the sender (aka parent)
2830
   side.  This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
2831
   list things that got automatically shared.  */
2832
 
2833
static void
2834
lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
2835
{
2836
  tree var, ovar, nvar, f, x, record_type;
2837
 
2838
  if (ctx->record_type == NULL)
2839
    return;
2840
 
2841
  record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
2842
  for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
2843
    {
2844
      ovar = DECL_ABSTRACT_ORIGIN (f);
2845
      nvar = maybe_lookup_decl (ovar, ctx);
2846
      if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
2847
        continue;
2848
 
2849
      /* If CTX is a nested parallel directive.  Find the immediately
2850
         enclosing parallel or workshare construct that contains a
2851
         mapping for OVAR.  */
2852
      var = lookup_decl_in_outer_ctx (ovar, ctx);
2853
 
2854
      if (use_pointer_for_field (ovar, ctx))
2855
        {
2856
          x = build_sender_ref (ovar, ctx);
2857
          var = build_fold_addr_expr (var);
2858
          gimplify_assign (x, var, ilist);
2859
        }
2860
      else
2861
        {
2862
          x = build_sender_ref (ovar, ctx);
2863
          gimplify_assign (x, var, ilist);
2864
 
2865
          if (!TREE_READONLY (var)
2866
              /* We don't need to receive a new reference to a result
2867
                 or parm decl.  In fact we may not store to it as we will
2868
                 invalidate any pending RSO and generate wrong gimple
2869
                 during inlining.  */
2870
              && !((TREE_CODE (var) == RESULT_DECL
2871
                    || TREE_CODE (var) == PARM_DECL)
2872
                   && DECL_BY_REFERENCE (var)))
2873
            {
2874
              x = build_sender_ref (ovar, ctx);
2875
              gimplify_assign (var, x, olist);
2876
            }
2877
        }
2878
    }
2879
}
2880
 
2881
 
2882
/* A convenience function to build an empty GIMPLE_COND with just the
2883
   condition.  */
2884
 
2885
static gimple
2886
gimple_build_cond_empty (tree cond)
2887
{
2888
  enum tree_code pred_code;
2889
  tree lhs, rhs;
2890
 
2891
  gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
2892
  return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
2893
}
2894
 
2895
 
2896
/* Build the function calls to GOMP_parallel_start etc to actually
2897
   generate the parallel operation.  REGION is the parallel region
2898
   being expanded.  BB is the block where to insert the code.  WS_ARGS
2899
   will be set if this is a call to a combined parallel+workshare
2900
   construct, it contains the list of additional arguments needed by
2901
   the workshare construct.  */
2902
 
2903
static void
2904
expand_parallel_call (struct omp_region *region, basic_block bb,
2905
                      gimple entry_stmt, tree ws_args)
2906
{
2907
  tree t, t1, t2, val, cond, c, clauses;
2908
  gimple_stmt_iterator gsi;
2909
  gimple stmt;
2910
  int start_ix;
2911
  location_t clause_loc;
2912
 
2913
  clauses = gimple_omp_parallel_clauses (entry_stmt);
2914
 
2915
  /* Determine what flavor of GOMP_parallel_start we will be
2916
     emitting.  */
2917
  start_ix = BUILT_IN_GOMP_PARALLEL_START;
2918
  if (is_combined_parallel (region))
2919
    {
2920
      switch (region->inner->type)
2921
        {
2922
        case GIMPLE_OMP_FOR:
2923
          gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
2924
          start_ix = BUILT_IN_GOMP_PARALLEL_LOOP_STATIC_START
2925
                     + (region->inner->sched_kind
2926
                        == OMP_CLAUSE_SCHEDULE_RUNTIME
2927
                        ? 3 : region->inner->sched_kind);
2928
          break;
2929
        case GIMPLE_OMP_SECTIONS:
2930
          start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS_START;
2931
          break;
2932
        default:
2933
          gcc_unreachable ();
2934
        }
2935
    }
2936
 
2937
  /* By default, the value of NUM_THREADS is zero (selected at run time)
2938
     and there is no conditional.  */
2939
  cond = NULL_TREE;
2940
  val = build_int_cst (unsigned_type_node, 0);
2941
 
2942
  c = find_omp_clause (clauses, OMP_CLAUSE_IF);
2943
  if (c)
2944
    cond = OMP_CLAUSE_IF_EXPR (c);
2945
 
2946
  c = find_omp_clause (clauses, OMP_CLAUSE_NUM_THREADS);
2947
  if (c)
2948
    {
2949
      val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
2950
      clause_loc = OMP_CLAUSE_LOCATION (c);
2951
    }
2952
  else
2953
    clause_loc = gimple_location (entry_stmt);
2954
 
2955
  /* Ensure 'val' is of the correct type.  */
2956
  val = fold_convert_loc (clause_loc, unsigned_type_node, val);
2957
 
2958
  /* If we found the clause 'if (cond)', build either
2959
     (cond != 0) or (cond ? val : 1u).  */
2960
  if (cond)
2961
    {
2962
      gimple_stmt_iterator gsi;
2963
 
2964
      cond = gimple_boolify (cond);
2965
 
2966
      if (integer_zerop (val))
2967
        val = fold_build2_loc (clause_loc,
2968
                           EQ_EXPR, unsigned_type_node, cond,
2969
                           build_int_cst (TREE_TYPE (cond), 0));
2970
      else
2971
        {
2972
          basic_block cond_bb, then_bb, else_bb;
2973
          edge e, e_then, e_else;
2974
          tree tmp_then, tmp_else, tmp_join, tmp_var;
2975
 
2976
          tmp_var = create_tmp_var (TREE_TYPE (val), NULL);
2977
          if (gimple_in_ssa_p (cfun))
2978
            {
2979
              tmp_then = make_ssa_name (tmp_var, NULL);
2980
              tmp_else = make_ssa_name (tmp_var, NULL);
2981
              tmp_join = make_ssa_name (tmp_var, NULL);
2982
            }
2983
          else
2984
            {
2985
              tmp_then = tmp_var;
2986
              tmp_else = tmp_var;
2987
              tmp_join = tmp_var;
2988
            }
2989
 
2990
          e = split_block (bb, NULL);
2991
          cond_bb = e->src;
2992
          bb = e->dest;
2993
          remove_edge (e);
2994
 
2995
          then_bb = create_empty_bb (cond_bb);
2996
          else_bb = create_empty_bb (then_bb);
2997
          set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
2998
          set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
2999
 
3000
          stmt = gimple_build_cond_empty (cond);
3001
          gsi = gsi_start_bb (cond_bb);
3002
          gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3003
 
3004
          gsi = gsi_start_bb (then_bb);
3005
          stmt = gimple_build_assign (tmp_then, val);
3006
          gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3007
 
3008
          gsi = gsi_start_bb (else_bb);
3009
          stmt = gimple_build_assign
3010
                   (tmp_else, build_int_cst (unsigned_type_node, 1));
3011
          gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3012
 
3013
          make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
3014
          make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
3015
          e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
3016
          e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
3017
 
3018
          if (gimple_in_ssa_p (cfun))
3019
            {
3020
              gimple phi = create_phi_node (tmp_join, bb);
3021
              SSA_NAME_DEF_STMT (tmp_join) = phi;
3022
              add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
3023
              add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
3024
            }
3025
 
3026
          val = tmp_join;
3027
        }
3028
 
3029
      gsi = gsi_start_bb (bb);
3030
      val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
3031
                                      false, GSI_CONTINUE_LINKING);
3032
    }
3033
 
3034
  gsi = gsi_last_bb (bb);
3035
  t = gimple_omp_parallel_data_arg (entry_stmt);
3036
  if (t == NULL)
3037
    t1 = null_pointer_node;
3038
  else
3039
    t1 = build_fold_addr_expr (t);
3040
  t2 = build_fold_addr_expr (gimple_omp_parallel_child_fn (entry_stmt));
3041
 
3042
  if (ws_args)
3043
    {
3044
      tree args = tree_cons (NULL, t2,
3045
                             tree_cons (NULL, t1,
3046
                                        tree_cons (NULL, val, ws_args)));
3047
      t = build_function_call_expr (UNKNOWN_LOCATION,
3048
                                    built_in_decls[start_ix], args);
3049
    }
3050
  else
3051
    t = build_call_expr (built_in_decls[start_ix], 3, t2, t1, val);
3052
 
3053
  force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3054
                            false, GSI_CONTINUE_LINKING);
3055
 
3056
  t = gimple_omp_parallel_data_arg (entry_stmt);
3057
  if (t == NULL)
3058
    t = null_pointer_node;
3059
  else
3060
    t = build_fold_addr_expr (t);
3061
  t = build_call_expr_loc (gimple_location (entry_stmt),
3062
                           gimple_omp_parallel_child_fn (entry_stmt), 1, t);
3063
  force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3064
                            false, GSI_CONTINUE_LINKING);
3065
 
3066
  t = build_call_expr_loc (gimple_location (entry_stmt),
3067
                           built_in_decls[BUILT_IN_GOMP_PARALLEL_END], 0);
3068
  force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3069
                            false, GSI_CONTINUE_LINKING);
3070
}
3071
 
3072
 
3073
/* Build the function call to GOMP_task to actually
3074
   generate the task operation.  BB is the block where to insert the code.  */
3075
 
3076
static void
3077
expand_task_call (basic_block bb, gimple entry_stmt)
3078
{
3079
  tree t, t1, t2, t3, flags, cond, c, clauses;
3080
  gimple_stmt_iterator gsi;
3081
  location_t loc = gimple_location (entry_stmt);
3082
 
3083
  clauses = gimple_omp_task_clauses (entry_stmt);
3084
 
3085
  c = find_omp_clause (clauses, OMP_CLAUSE_IF);
3086
  if (c)
3087
    cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (c));
3088
  else
3089
    cond = boolean_true_node;
3090
 
3091
  c = find_omp_clause (clauses, OMP_CLAUSE_UNTIED);
3092
  flags = build_int_cst (unsigned_type_node, (c ? 1 : 0));
3093
 
3094
  gsi = gsi_last_bb (bb);
3095
  t = gimple_omp_task_data_arg (entry_stmt);
3096
  if (t == NULL)
3097
    t2 = null_pointer_node;
3098
  else
3099
    t2 = build_fold_addr_expr_loc (loc, t);
3100
  t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
3101
  t = gimple_omp_task_copy_fn (entry_stmt);
3102
  if (t == NULL)
3103
    t3 = null_pointer_node;
3104
  else
3105
    t3 = build_fold_addr_expr_loc (loc, t);
3106
 
3107
  t = build_call_expr (built_in_decls[BUILT_IN_GOMP_TASK], 7, t1, t2, t3,
3108
                       gimple_omp_task_arg_size (entry_stmt),
3109
                       gimple_omp_task_arg_align (entry_stmt), cond, flags);
3110
 
3111
  force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3112
                            false, GSI_CONTINUE_LINKING);
3113
}
3114
 
3115
 
3116
/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
3117
   catch handler and return it.  This prevents programs from violating the
3118
   structured block semantics with throws.  */
3119
 
3120
static gimple_seq
3121
maybe_catch_exception (gimple_seq body)
3122
{
3123
  gimple g;
3124
  tree decl;
3125
 
3126
  if (!flag_exceptions)
3127
    return body;
3128
 
3129
  if (lang_protect_cleanup_actions)
3130
    decl = lang_protect_cleanup_actions ();
3131
  else
3132
    decl = built_in_decls[BUILT_IN_TRAP];
3133
 
3134
  g = gimple_build_eh_must_not_throw (decl);
3135
  g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
3136
                        GIMPLE_TRY_CATCH);
3137
 
3138
 return gimple_seq_alloc_with_stmt (g);
3139
}
3140
 
3141
/* Chain all the DECLs in LIST by their TREE_CHAIN fields.  */
3142
 
3143
static tree
3144
list2chain (tree list)
3145
{
3146
  tree t;
3147
 
3148
  for (t = list; t; t = TREE_CHAIN (t))
3149
    {
3150
      tree var = TREE_VALUE (t);
3151
      if (TREE_CHAIN (t))
3152
        TREE_CHAIN (var) = TREE_VALUE (TREE_CHAIN (t));
3153
      else
3154
        TREE_CHAIN (var) = NULL_TREE;
3155
    }
3156
 
3157
  return list ? TREE_VALUE (list) : NULL_TREE;
3158
}
3159
 
3160
 
3161
/* Remove barriers in REGION->EXIT's block.  Note that this is only
3162
   valid for GIMPLE_OMP_PARALLEL regions.  Since the end of a parallel region
3163
   is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
3164
   left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
3165
   removed.  */
3166
 
3167
static void
3168
remove_exit_barrier (struct omp_region *region)
3169
{
3170
  gimple_stmt_iterator gsi;
3171
  basic_block exit_bb;
3172
  edge_iterator ei;
3173
  edge e;
3174
  gimple stmt;
3175
  int any_addressable_vars = -1;
3176
 
3177
  exit_bb = region->exit;
3178
 
3179
  /* If the parallel region doesn't return, we don't have REGION->EXIT
3180
     block at all.  */
3181
  if (! exit_bb)
3182
    return;
3183
 
3184
  /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN.  The
3185
     workshare's GIMPLE_OMP_RETURN will be in a preceding block.  The kinds of
3186
     statements that can appear in between are extremely limited -- no
3187
     memory operations at all.  Here, we allow nothing at all, so the
3188
     only thing we allow to precede this GIMPLE_OMP_RETURN is a label.  */
3189
  gsi = gsi_last_bb (exit_bb);
3190
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3191
  gsi_prev (&gsi);
3192
  if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
3193
    return;
3194
 
3195
  FOR_EACH_EDGE (e, ei, exit_bb->preds)
3196
    {
3197
      gsi = gsi_last_bb (e->src);
3198
      if (gsi_end_p (gsi))
3199
        continue;
3200
      stmt = gsi_stmt (gsi);
3201
      if (gimple_code (stmt) == GIMPLE_OMP_RETURN
3202
          && !gimple_omp_return_nowait_p (stmt))
3203
        {
3204
          /* OpenMP 3.0 tasks unfortunately prevent this optimization
3205
             in many cases.  If there could be tasks queued, the barrier
3206
             might be needed to let the tasks run before some local
3207
             variable of the parallel that the task uses as shared
3208
             runs out of scope.  The task can be spawned either
3209
             from within current function (this would be easy to check)
3210
             or from some function it calls and gets passed an address
3211
             of such a variable.  */
3212
          if (any_addressable_vars < 0)
3213
            {
3214
              gimple parallel_stmt = last_stmt (region->entry);
3215
              tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
3216
              tree local_decls = DECL_STRUCT_FUNCTION (child_fun)->local_decls;
3217
              tree block;
3218
 
3219
              any_addressable_vars = 0;
3220
              for (; local_decls; local_decls = TREE_CHAIN (local_decls))
3221
                if (TREE_ADDRESSABLE (TREE_VALUE (local_decls)))
3222
                  {
3223
                    any_addressable_vars = 1;
3224
                    break;
3225
                  }
3226
              for (block = gimple_block (stmt);
3227
                   !any_addressable_vars
3228
                   && block
3229
                   && TREE_CODE (block) == BLOCK;
3230
                   block = BLOCK_SUPERCONTEXT (block))
3231
                {
3232
                  for (local_decls = BLOCK_VARS (block);
3233
                       local_decls;
3234
                       local_decls = TREE_CHAIN (local_decls))
3235
                    if (TREE_ADDRESSABLE (local_decls))
3236
                      {
3237
                        any_addressable_vars = 1;
3238
                        break;
3239
                      }
3240
                  if (block == gimple_block (parallel_stmt))
3241
                    break;
3242
                }
3243
            }
3244
          if (!any_addressable_vars)
3245
            gimple_omp_return_set_nowait (stmt);
3246
        }
3247
    }
3248
}
3249
 
3250
static void
3251
remove_exit_barriers (struct omp_region *region)
3252
{
3253
  if (region->type == GIMPLE_OMP_PARALLEL)
3254
    remove_exit_barrier (region);
3255
 
3256
  if (region->inner)
3257
    {
3258
      region = region->inner;
3259
      remove_exit_barriers (region);
3260
      while (region->next)
3261
        {
3262
          region = region->next;
3263
          remove_exit_barriers (region);
3264
        }
3265
    }
3266
}
3267
 
3268
/* Optimize omp_get_thread_num () and omp_get_num_threads ()
3269
   calls.  These can't be declared as const functions, but
3270
   within one parallel body they are constant, so they can be
3271
   transformed there into __builtin_omp_get_{thread_num,num_threads} ()
3272
   which are declared const.  Similarly for task body, except
3273
   that in untied task omp_get_thread_num () can change at any task
3274
   scheduling point.  */
3275
 
3276
static void
3277
optimize_omp_library_calls (gimple entry_stmt)
3278
{
3279
  basic_block bb;
3280
  gimple_stmt_iterator gsi;
3281
  tree thr_num_id
3282
    = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM]);
3283
  tree num_thr_id
3284
    = DECL_ASSEMBLER_NAME (built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS]);
3285
  bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
3286
                      && find_omp_clause (gimple_omp_task_clauses (entry_stmt),
3287
                                          OMP_CLAUSE_UNTIED) != NULL);
3288
 
3289
  FOR_EACH_BB (bb)
3290
    for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3291
      {
3292
        gimple call = gsi_stmt (gsi);
3293
        tree decl;
3294
 
3295
        if (is_gimple_call (call)
3296
            && (decl = gimple_call_fndecl (call))
3297
            && DECL_EXTERNAL (decl)
3298
            && TREE_PUBLIC (decl)
3299
            && DECL_INITIAL (decl) == NULL)
3300
          {
3301
            tree built_in;
3302
 
3303
            if (DECL_NAME (decl) == thr_num_id)
3304
              {
3305
                /* In #pragma omp task untied omp_get_thread_num () can change
3306
                   during the execution of the task region.  */
3307
                if (untied_task)
3308
                  continue;
3309
                built_in = built_in_decls [BUILT_IN_OMP_GET_THREAD_NUM];
3310
              }
3311
            else if (DECL_NAME (decl) == num_thr_id)
3312
              built_in = built_in_decls [BUILT_IN_OMP_GET_NUM_THREADS];
3313
            else
3314
              continue;
3315
 
3316
            if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
3317
                || gimple_call_num_args (call) != 0)
3318
              continue;
3319
 
3320
            if (flag_exceptions && !TREE_NOTHROW (decl))
3321
              continue;
3322
 
3323
            if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
3324
                || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
3325
                                        TREE_TYPE (TREE_TYPE (built_in))))
3326
              continue;
3327
 
3328
            gimple_call_set_fndecl (call, built_in);
3329
          }
3330
      }
3331
}
3332
 
3333
/* Expand the OpenMP parallel or task directive starting at REGION.  */
3334
 
3335
static void
3336
expand_omp_taskreg (struct omp_region *region)
3337
{
3338
  basic_block entry_bb, exit_bb, new_bb;
3339
  struct function *child_cfun;
3340
  tree child_fn, block, t, ws_args, *tp;
3341
  tree save_current;
3342
  gimple_stmt_iterator gsi;
3343
  gimple entry_stmt, stmt;
3344
  edge e;
3345
 
3346
  entry_stmt = last_stmt (region->entry);
3347
  child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
3348
  child_cfun = DECL_STRUCT_FUNCTION (child_fn);
3349
  /* If this function has been already instrumented, make sure
3350
     the child function isn't instrumented again.  */
3351
  child_cfun->after_tree_profile = cfun->after_tree_profile;
3352
 
3353
  entry_bb = region->entry;
3354
  exit_bb = region->exit;
3355
 
3356
  if (is_combined_parallel (region))
3357
    ws_args = region->ws_args;
3358
  else
3359
    ws_args = NULL_TREE;
3360
 
3361
  if (child_cfun->cfg)
3362
    {
3363
      /* Due to inlining, it may happen that we have already outlined
3364
         the region, in which case all we need to do is make the
3365
         sub-graph unreachable and emit the parallel call.  */
3366
      edge entry_succ_e, exit_succ_e;
3367
      gimple_stmt_iterator gsi;
3368
 
3369
      entry_succ_e = single_succ_edge (entry_bb);
3370
 
3371
      gsi = gsi_last_bb (entry_bb);
3372
      gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
3373
                  || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
3374
      gsi_remove (&gsi, true);
3375
 
3376
      new_bb = entry_bb;
3377
      if (exit_bb)
3378
        {
3379
          exit_succ_e = single_succ_edge (exit_bb);
3380
          make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
3381
        }
3382
      remove_edge_and_dominated_blocks (entry_succ_e);
3383
    }
3384
  else
3385
    {
3386
      /* If the parallel region needs data sent from the parent
3387
         function, then the very first statement (except possible
3388
         tree profile counter updates) of the parallel body
3389
         is a copy assignment .OMP_DATA_I = &.OMP_DATA_O.  Since
3390
         &.OMP_DATA_O is passed as an argument to the child function,
3391
         we need to replace it with the argument as seen by the child
3392
         function.
3393
 
3394
         In most cases, this will end up being the identity assignment
3395
         .OMP_DATA_I = .OMP_DATA_I.  However, if the parallel body had
3396
         a function call that has been inlined, the original PARM_DECL
3397
         .OMP_DATA_I may have been converted into a different local
3398
         variable.  In which case, we need to keep the assignment.  */
3399
      if (gimple_omp_taskreg_data_arg (entry_stmt))
3400
        {
3401
          basic_block entry_succ_bb = single_succ (entry_bb);
3402
          gimple_stmt_iterator gsi;
3403
          tree arg, narg;
3404
          gimple parcopy_stmt = NULL;
3405
 
3406
          for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
3407
            {
3408
              gimple stmt;
3409
 
3410
              gcc_assert (!gsi_end_p (gsi));
3411
              stmt = gsi_stmt (gsi);
3412
              if (gimple_code (stmt) != GIMPLE_ASSIGN)
3413
                continue;
3414
 
3415
              if (gimple_num_ops (stmt) == 2)
3416
                {
3417
                  tree arg = gimple_assign_rhs1 (stmt);
3418
 
3419
                  /* We're ignore the subcode because we're
3420
                     effectively doing a STRIP_NOPS.  */
3421
 
3422
                  if (TREE_CODE (arg) == ADDR_EXPR
3423
                      && TREE_OPERAND (arg, 0)
3424
                        == gimple_omp_taskreg_data_arg (entry_stmt))
3425
                    {
3426
                      parcopy_stmt = stmt;
3427
                      break;
3428
                    }
3429
                }
3430
            }
3431
 
3432
          gcc_assert (parcopy_stmt != NULL);
3433
          arg = DECL_ARGUMENTS (child_fn);
3434
 
3435
          if (!gimple_in_ssa_p (cfun))
3436
            {
3437
              if (gimple_assign_lhs (parcopy_stmt) == arg)
3438
                gsi_remove (&gsi, true);
3439
              else
3440
                {
3441
                  /* ?? Is setting the subcode really necessary ??  */
3442
                  gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
3443
                  gimple_assign_set_rhs1 (parcopy_stmt, arg);
3444
                }
3445
            }
3446
          else
3447
            {
3448
              /* If we are in ssa form, we must load the value from the default
3449
                 definition of the argument.  That should not be defined now,
3450
                 since the argument is not used uninitialized.  */
3451
              gcc_assert (gimple_default_def (cfun, arg) == NULL);
3452
              narg = make_ssa_name (arg, gimple_build_nop ());
3453
              set_default_def (arg, narg);
3454
              /* ?? Is setting the subcode really necessary ??  */
3455
              gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (narg));
3456
              gimple_assign_set_rhs1 (parcopy_stmt, narg);
3457
              update_stmt (parcopy_stmt);
3458
            }
3459
        }
3460
 
3461
      /* Declare local variables needed in CHILD_CFUN.  */
3462
      block = DECL_INITIAL (child_fn);
3463
      BLOCK_VARS (block) = list2chain (child_cfun->local_decls);
3464
      /* The gimplifier could record temporaries in parallel/task block
3465
         rather than in containing function's local_decls chain,
3466
         which would mean cgraph missed finalizing them.  Do it now.  */
3467
      for (t = BLOCK_VARS (block); t; t = TREE_CHAIN (t))
3468
        if (TREE_CODE (t) == VAR_DECL
3469
            && TREE_STATIC (t)
3470
            && !DECL_EXTERNAL (t))
3471
          varpool_finalize_decl (t);
3472
      DECL_SAVED_TREE (child_fn) = NULL;
3473
      gimple_set_body (child_fn, bb_seq (single_succ (entry_bb)));
3474
      TREE_USED (block) = 1;
3475
 
3476
      /* Reset DECL_CONTEXT on function arguments.  */
3477
      for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
3478
        DECL_CONTEXT (t) = child_fn;
3479
 
3480
      /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
3481
         so that it can be moved to the child function.  */
3482
      gsi = gsi_last_bb (entry_bb);
3483
      stmt = gsi_stmt (gsi);
3484
      gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
3485
                           || gimple_code (stmt) == GIMPLE_OMP_TASK));
3486
      gsi_remove (&gsi, true);
3487
      e = split_block (entry_bb, stmt);
3488
      entry_bb = e->dest;
3489
      single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
3490
 
3491
      /* Convert GIMPLE_OMP_RETURN into a RETURN_EXPR.  */
3492
      if (exit_bb)
3493
        {
3494
          gsi = gsi_last_bb (exit_bb);
3495
          gcc_assert (!gsi_end_p (gsi)
3496
                      && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
3497
          stmt = gimple_build_return (NULL);
3498
          gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
3499
          gsi_remove (&gsi, true);
3500
        }
3501
 
3502
      /* Move the parallel region into CHILD_CFUN.  */
3503
 
3504
      if (gimple_in_ssa_p (cfun))
3505
        {
3506
          push_cfun (child_cfun);
3507
          init_tree_ssa (child_cfun);
3508
          init_ssa_operands ();
3509
          cfun->gimple_df->in_ssa_p = true;
3510
          pop_cfun ();
3511
          block = NULL_TREE;
3512
        }
3513
      else
3514
        block = gimple_block (entry_stmt);
3515
 
3516
      new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
3517
      if (exit_bb)
3518
        single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
3519
 
3520
      /* Remove non-local VAR_DECLs from child_cfun->local_decls list.  */
3521
      for (tp = &child_cfun->local_decls; *tp; )
3522
        if (DECL_CONTEXT (TREE_VALUE (*tp)) != cfun->decl)
3523
          tp = &TREE_CHAIN (*tp);
3524
        else
3525
          *tp = TREE_CHAIN (*tp);
3526
 
3527
      /* Inform the callgraph about the new function.  */
3528
      DECL_STRUCT_FUNCTION (child_fn)->curr_properties
3529
        = cfun->curr_properties;
3530
      cgraph_add_new_function (child_fn, true);
3531
 
3532
      /* Fix the callgraph edges for child_cfun.  Those for cfun will be
3533
         fixed in a following pass.  */
3534
      push_cfun (child_cfun);
3535
      save_current = current_function_decl;
3536
      current_function_decl = child_fn;
3537
      if (optimize)
3538
        optimize_omp_library_calls (entry_stmt);
3539
      rebuild_cgraph_edges ();
3540
 
3541
      /* Some EH regions might become dead, see PR34608.  If
3542
         pass_cleanup_cfg isn't the first pass to happen with the
3543
         new child, these dead EH edges might cause problems.
3544
         Clean them up now.  */
3545
      if (flag_exceptions)
3546
        {
3547
          basic_block bb;
3548
          bool changed = false;
3549
 
3550
          FOR_EACH_BB (bb)
3551
            changed |= gimple_purge_dead_eh_edges (bb);
3552
          if (changed)
3553
            cleanup_tree_cfg ();
3554
        }
3555
      if (gimple_in_ssa_p (cfun))
3556
        update_ssa (TODO_update_ssa);
3557
      current_function_decl = save_current;
3558
      pop_cfun ();
3559
    }
3560
 
3561
  /* Emit a library call to launch the children threads.  */
3562
  if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
3563
    expand_parallel_call (region, new_bb, entry_stmt, ws_args);
3564
  else
3565
    expand_task_call (new_bb, entry_stmt);
3566
  update_ssa (TODO_update_ssa_only_virtuals);
3567
}
3568
 
3569
 
3570
/* A subroutine of expand_omp_for.  Generate code for a parallel
3571
   loop with any schedule.  Given parameters:
3572
 
3573
        for (V = N1; V cond N2; V += STEP) BODY;
3574
 
3575
   where COND is "<" or ">", we generate pseudocode
3576
 
3577
        more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
3578
        if (more) goto L0; else goto L3;
3579
    L0:
3580
        V = istart0;
3581
        iend = iend0;
3582
    L1:
3583
        BODY;
3584
        V += STEP;
3585
        if (V cond iend) goto L1; else goto L2;
3586
    L2:
3587
        if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3588
    L3:
3589
 
3590
    If this is a combined omp parallel loop, instead of the call to
3591
    GOMP_loop_foo_start, we call GOMP_loop_foo_next.
3592
 
3593
    For collapsed loops, given parameters:
3594
      collapse(3)
3595
      for (V1 = N11; V1 cond1 N12; V1 += STEP1)
3596
        for (V2 = N21; V2 cond2 N22; V2 += STEP2)
3597
          for (V3 = N31; V3 cond3 N32; V3 += STEP3)
3598
            BODY;
3599
 
3600
    we generate pseudocode
3601
 
3602
        if (cond3 is <)
3603
          adj = STEP3 - 1;
3604
        else
3605
          adj = STEP3 + 1;
3606
        count3 = (adj + N32 - N31) / STEP3;
3607
        if (cond2 is <)
3608
          adj = STEP2 - 1;
3609
        else
3610
          adj = STEP2 + 1;
3611
        count2 = (adj + N22 - N21) / STEP2;
3612
        if (cond1 is <)
3613
          adj = STEP1 - 1;
3614
        else
3615
          adj = STEP1 + 1;
3616
        count1 = (adj + N12 - N11) / STEP1;
3617
        count = count1 * count2 * count3;
3618
        more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
3619
        if (more) goto L0; else goto L3;
3620
    L0:
3621
        V = istart0;
3622
        T = V;
3623
        V3 = N31 + (T % count3) * STEP3;
3624
        T = T / count3;
3625
        V2 = N21 + (T % count2) * STEP2;
3626
        T = T / count2;
3627
        V1 = N11 + T * STEP1;
3628
        iend = iend0;
3629
    L1:
3630
        BODY;
3631
        V += 1;
3632
        if (V < iend) goto L10; else goto L2;
3633
    L10:
3634
        V3 += STEP3;
3635
        if (V3 cond3 N32) goto L1; else goto L11;
3636
    L11:
3637
        V3 = N31;
3638
        V2 += STEP2;
3639
        if (V2 cond2 N22) goto L1; else goto L12;
3640
    L12:
3641
        V2 = N21;
3642
        V1 += STEP1;
3643
        goto L1;
3644
    L2:
3645
        if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
3646
    L3:
3647
 
3648
      */
3649
 
3650
static void
3651
expand_omp_for_generic (struct omp_region *region,
3652
                        struct omp_for_data *fd,
3653
                        enum built_in_function start_fn,
3654
                        enum built_in_function next_fn)
3655
{
3656
  tree type, istart0, iend0, iend;
3657
  tree t, vmain, vback, bias = NULL_TREE;
3658
  basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
3659
  basic_block l2_bb = NULL, l3_bb = NULL;
3660
  gimple_stmt_iterator gsi;
3661
  gimple stmt;
3662
  bool in_combined_parallel = is_combined_parallel (region);
3663
  bool broken_loop = region->cont == NULL;
3664
  edge e, ne;
3665
  tree *counts = NULL;
3666
  int i;
3667
 
3668
  gcc_assert (!broken_loop || !in_combined_parallel);
3669
  gcc_assert (fd->iter_type == long_integer_type_node
3670
              || !in_combined_parallel);
3671
 
3672
  type = TREE_TYPE (fd->loop.v);
3673
  istart0 = create_tmp_var (fd->iter_type, ".istart0");
3674
  iend0 = create_tmp_var (fd->iter_type, ".iend0");
3675
  TREE_ADDRESSABLE (istart0) = 1;
3676
  TREE_ADDRESSABLE (iend0) = 1;
3677
  if (gimple_in_ssa_p (cfun))
3678
    {
3679
      add_referenced_var (istart0);
3680
      add_referenced_var (iend0);
3681
    }
3682
 
3683
  /* See if we need to bias by LLONG_MIN.  */
3684
  if (fd->iter_type == long_long_unsigned_type_node
3685
      && TREE_CODE (type) == INTEGER_TYPE
3686
      && !TYPE_UNSIGNED (type))
3687
    {
3688
      tree n1, n2;
3689
 
3690
      if (fd->loop.cond_code == LT_EXPR)
3691
        {
3692
          n1 = fd->loop.n1;
3693
          n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
3694
        }
3695
      else
3696
        {
3697
          n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
3698
          n2 = fd->loop.n1;
3699
        }
3700
      if (TREE_CODE (n1) != INTEGER_CST
3701
          || TREE_CODE (n2) != INTEGER_CST
3702
          || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
3703
        bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
3704
    }
3705
 
3706
  entry_bb = region->entry;
3707
  cont_bb = region->cont;
3708
  collapse_bb = NULL;
3709
  gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
3710
  gcc_assert (broken_loop
3711
              || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
3712
  l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
3713
  l1_bb = single_succ (l0_bb);
3714
  if (!broken_loop)
3715
    {
3716
      l2_bb = create_empty_bb (cont_bb);
3717
      gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb);
3718
      gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
3719
    }
3720
  else
3721
    l2_bb = NULL;
3722
  l3_bb = BRANCH_EDGE (entry_bb)->dest;
3723
  exit_bb = region->exit;
3724
 
3725
  gsi = gsi_last_bb (entry_bb);
3726
 
3727
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
3728
  if (fd->collapse > 1)
3729
    {
3730
      /* collapsed loops need work for expansion in SSA form.  */
3731
      gcc_assert (!gimple_in_ssa_p (cfun));
3732
      counts = (tree *) alloca (fd->collapse * sizeof (tree));
3733
      for (i = 0; i < fd->collapse; i++)
3734
        {
3735
          tree itype = TREE_TYPE (fd->loops[i].v);
3736
 
3737
          if (POINTER_TYPE_P (itype))
3738
            itype = lang_hooks.types.type_for_size (TYPE_PRECISION (itype), 0);
3739
          t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
3740
                                     ? -1 : 1));
3741
          t = fold_build2 (PLUS_EXPR, itype,
3742
                           fold_convert (itype, fd->loops[i].step), t);
3743
          t = fold_build2 (PLUS_EXPR, itype, t,
3744
                           fold_convert (itype, fd->loops[i].n2));
3745
          t = fold_build2 (MINUS_EXPR, itype, t,
3746
                           fold_convert (itype, fd->loops[i].n1));
3747
          if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
3748
            t = fold_build2 (TRUNC_DIV_EXPR, itype,
3749
                             fold_build1 (NEGATE_EXPR, itype, t),
3750
                             fold_build1 (NEGATE_EXPR, itype,
3751
                                          fold_convert (itype,
3752
                                                        fd->loops[i].step)));
3753
          else
3754
            t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
3755
                             fold_convert (itype, fd->loops[i].step));
3756
          t = fold_convert (type, t);
3757
          if (TREE_CODE (t) == INTEGER_CST)
3758
            counts[i] = t;
3759
          else
3760
            {
3761
              counts[i] = create_tmp_var (type, ".count");
3762
              t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3763
                                            true, GSI_SAME_STMT);
3764
              stmt = gimple_build_assign (counts[i], t);
3765
              gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3766
            }
3767
          if (SSA_VAR_P (fd->loop.n2))
3768
            {
3769
              if (i == 0)
3770
                t = counts[0];
3771
              else
3772
                {
3773
                  t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
3774
                  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3775
                                                true, GSI_SAME_STMT);
3776
                }
3777
              stmt = gimple_build_assign (fd->loop.n2, t);
3778
              gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3779
            }
3780
        }
3781
    }
3782
  if (in_combined_parallel)
3783
    {
3784
      /* In a combined parallel loop, emit a call to
3785
         GOMP_loop_foo_next.  */
3786
      t = build_call_expr (built_in_decls[next_fn], 2,
3787
                           build_fold_addr_expr (istart0),
3788
                           build_fold_addr_expr (iend0));
3789
    }
3790
  else
3791
    {
3792
      tree t0, t1, t2, t3, t4;
3793
      /* If this is not a combined parallel loop, emit a call to
3794
         GOMP_loop_foo_start in ENTRY_BB.  */
3795
      t4 = build_fold_addr_expr (iend0);
3796
      t3 = build_fold_addr_expr (istart0);
3797
      t2 = fold_convert (fd->iter_type, fd->loop.step);
3798
      if (POINTER_TYPE_P (type)
3799
          && TYPE_PRECISION (type) != TYPE_PRECISION (fd->iter_type))
3800
        {
3801
          /* Avoid casting pointers to integer of a different size.  */
3802
          tree itype
3803
            = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
3804
          t1 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n2));
3805
          t0 = fold_convert (fd->iter_type, fold_convert (itype, fd->loop.n1));
3806
        }
3807
      else
3808
        {
3809
          t1 = fold_convert (fd->iter_type, fd->loop.n2);
3810
          t0 = fold_convert (fd->iter_type, fd->loop.n1);
3811
        }
3812
      if (bias)
3813
        {
3814
          t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
3815
          t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
3816
        }
3817
      if (fd->iter_type == long_integer_type_node)
3818
        {
3819
          if (fd->chunk_size)
3820
            {
3821
              t = fold_convert (fd->iter_type, fd->chunk_size);
3822
              t = build_call_expr (built_in_decls[start_fn], 6,
3823
                                   t0, t1, t2, t, t3, t4);
3824
            }
3825
          else
3826
            t = build_call_expr (built_in_decls[start_fn], 5,
3827
                                 t0, t1, t2, t3, t4);
3828
        }
3829
      else
3830
        {
3831
          tree t5;
3832
          tree c_bool_type;
3833
 
3834
          /* The GOMP_loop_ull_*start functions have additional boolean
3835
             argument, true for < loops and false for > loops.
3836
             In Fortran, the C bool type can be different from
3837
             boolean_type_node.  */
3838
          c_bool_type = TREE_TYPE (TREE_TYPE (built_in_decls[start_fn]));
3839
          t5 = build_int_cst (c_bool_type,
3840
                              fd->loop.cond_code == LT_EXPR ? 1 : 0);
3841
          if (fd->chunk_size)
3842
            {
3843
              t = fold_convert (fd->iter_type, fd->chunk_size);
3844
              t = build_call_expr (built_in_decls[start_fn], 7,
3845
                                   t5, t0, t1, t2, t, t3, t4);
3846
            }
3847
          else
3848
            t = build_call_expr (built_in_decls[start_fn], 6,
3849
                                 t5, t0, t1, t2, t3, t4);
3850
        }
3851
    }
3852
  if (TREE_TYPE (t) != boolean_type_node)
3853
    t = fold_build2 (NE_EXPR, boolean_type_node,
3854
                     t, build_int_cst (TREE_TYPE (t), 0));
3855
  t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3856
                                true, GSI_SAME_STMT);
3857
  gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
3858
 
3859
  /* Remove the GIMPLE_OMP_FOR statement.  */
3860
  gsi_remove (&gsi, true);
3861
 
3862
  /* Iteration setup for sequential loop goes in L0_BB.  */
3863
  gsi = gsi_start_bb (l0_bb);
3864
  t = istart0;
3865
  if (bias)
3866
    t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3867
  if (POINTER_TYPE_P (type))
3868
    t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3869
                                                      0), t);
3870
  t = fold_convert (type, t);
3871
  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3872
                                false, GSI_CONTINUE_LINKING);
3873
  stmt = gimple_build_assign (fd->loop.v, t);
3874
  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3875
 
3876
  t = iend0;
3877
  if (bias)
3878
    t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
3879
  if (POINTER_TYPE_P (type))
3880
    t = fold_convert (lang_hooks.types.type_for_size (TYPE_PRECISION (type),
3881
                                                      0), t);
3882
  t = fold_convert (type, t);
3883
  iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3884
                                   false, GSI_CONTINUE_LINKING);
3885
  if (fd->collapse > 1)
3886
    {
3887
      tree tem = create_tmp_var (type, ".tem");
3888
 
3889
      stmt = gimple_build_assign (tem, fd->loop.v);
3890
      gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3891
      for (i = fd->collapse - 1; i >= 0; i--)
3892
        {
3893
          tree vtype = TREE_TYPE (fd->loops[i].v), itype;
3894
          itype = vtype;
3895
          if (POINTER_TYPE_P (vtype))
3896
            itype = lang_hooks.types.type_for_size (TYPE_PRECISION (vtype), 0);
3897
          t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
3898
          t = fold_convert (itype, t);
3899
          t = fold_build2 (MULT_EXPR, itype, t,
3900
                           fold_convert (itype, fd->loops[i].step));
3901
          if (POINTER_TYPE_P (vtype))
3902
            t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3903
                             fd->loops[i].n1, fold_convert (sizetype, t));
3904
          else
3905
            t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
3906
          t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3907
                                        false, GSI_CONTINUE_LINKING);
3908
          stmt = gimple_build_assign (fd->loops[i].v, t);
3909
          gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3910
          if (i != 0)
3911
            {
3912
              t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
3913
              t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3914
                                            false, GSI_CONTINUE_LINKING);
3915
              stmt = gimple_build_assign (tem, t);
3916
              gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3917
            }
3918
        }
3919
    }
3920
 
3921
  if (!broken_loop)
3922
    {
3923
      /* Code to control the increment and predicate for the sequential
3924
         loop goes in the CONT_BB.  */
3925
      gsi = gsi_last_bb (cont_bb);
3926
      stmt = gsi_stmt (gsi);
3927
      gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
3928
      vmain = gimple_omp_continue_control_use (stmt);
3929
      vback = gimple_omp_continue_control_def (stmt);
3930
 
3931
      if (POINTER_TYPE_P (type))
3932
        t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
3933
                         fold_convert (sizetype, fd->loop.step));
3934
      else
3935
        t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3936
      t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3937
                                    true, GSI_SAME_STMT);
3938
      stmt = gimple_build_assign (vback, t);
3939
      gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3940
 
3941
      t = build2 (fd->loop.cond_code, boolean_type_node, vback, iend);
3942
      stmt = gimple_build_cond_empty (t);
3943
      gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
3944
 
3945
      /* Remove GIMPLE_OMP_CONTINUE.  */
3946
      gsi_remove (&gsi, true);
3947
 
3948
      if (fd->collapse > 1)
3949
        {
3950
          basic_block last_bb, bb;
3951
 
3952
          last_bb = cont_bb;
3953
          for (i = fd->collapse - 1; i >= 0; i--)
3954
            {
3955
              tree vtype = TREE_TYPE (fd->loops[i].v);
3956
 
3957
              bb = create_empty_bb (last_bb);
3958
              gsi = gsi_start_bb (bb);
3959
 
3960
              if (i < fd->collapse - 1)
3961
                {
3962
                  e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
3963
                  e->probability = REG_BR_PROB_BASE / 8;
3964
 
3965
                  t = fd->loops[i + 1].n1;
3966
                  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3967
                                                false, GSI_CONTINUE_LINKING);
3968
                  stmt = gimple_build_assign (fd->loops[i + 1].v, t);
3969
                  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3970
                }
3971
              else
3972
                collapse_bb = bb;
3973
 
3974
              set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
3975
 
3976
              if (POINTER_TYPE_P (vtype))
3977
                t = fold_build2 (POINTER_PLUS_EXPR, vtype,
3978
                                 fd->loops[i].v,
3979
                                 fold_convert (sizetype, fd->loops[i].step));
3980
              else
3981
                t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v,
3982
                                 fd->loops[i].step);
3983
              t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
3984
                                            false, GSI_CONTINUE_LINKING);
3985
              stmt = gimple_build_assign (fd->loops[i].v, t);
3986
              gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3987
 
3988
              if (i > 0)
3989
                {
3990
                  t = fd->loops[i].n2;
3991
                  t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3992
                                                false, GSI_CONTINUE_LINKING);
3993
                  t = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
3994
                                   fd->loops[i].v, t);
3995
                  stmt = gimple_build_cond_empty (t);
3996
                  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
3997
                  e = make_edge (bb, l1_bb, EDGE_TRUE_VALUE);
3998
                  e->probability = REG_BR_PROB_BASE * 7 / 8;
3999
                }
4000
              else
4001
                make_edge (bb, l1_bb, EDGE_FALLTHRU);
4002
              last_bb = bb;
4003
            }
4004
        }
4005
 
4006
      /* Emit code to get the next parallel iteration in L2_BB.  */
4007
      gsi = gsi_start_bb (l2_bb);
4008
 
4009
      t = build_call_expr (built_in_decls[next_fn], 2,
4010
                           build_fold_addr_expr (istart0),
4011
                           build_fold_addr_expr (iend0));
4012
      t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4013
                                    false, GSI_CONTINUE_LINKING);
4014
      if (TREE_TYPE (t) != boolean_type_node)
4015
        t = fold_build2 (NE_EXPR, boolean_type_node,
4016
                         t, build_int_cst (TREE_TYPE (t), 0));
4017
      stmt = gimple_build_cond_empty (t);
4018
      gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4019
    }
4020
 
4021
  /* Add the loop cleanup function.  */
4022
  gsi = gsi_last_bb (exit_bb);
4023
  if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4024
    t = built_in_decls[BUILT_IN_GOMP_LOOP_END_NOWAIT];
4025
  else
4026
    t = built_in_decls[BUILT_IN_GOMP_LOOP_END];
4027
  stmt = gimple_build_call (t, 0);
4028
  gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
4029
  gsi_remove (&gsi, true);
4030
 
4031
  /* Connect the new blocks.  */
4032
  find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
4033
  find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
4034
 
4035
  if (!broken_loop)
4036
    {
4037
      gimple_seq phis;
4038
 
4039
      e = find_edge (cont_bb, l3_bb);
4040
      ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
4041
 
4042
      phis = phi_nodes (l3_bb);
4043
      for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
4044
        {
4045
          gimple phi = gsi_stmt (gsi);
4046
          SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
4047
                   PHI_ARG_DEF_FROM_EDGE (phi, e));
4048
        }
4049
      remove_edge (e);
4050
 
4051
      make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
4052
      if (fd->collapse > 1)
4053
        {
4054
          e = find_edge (cont_bb, l1_bb);
4055
          remove_edge (e);
4056
          e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
4057
        }
4058
      else
4059
        {
4060
          e = find_edge (cont_bb, l1_bb);
4061
          e->flags = EDGE_TRUE_VALUE;
4062
        }
4063
      e->probability = REG_BR_PROB_BASE * 7 / 8;
4064
      find_edge (cont_bb, l2_bb)->probability = REG_BR_PROB_BASE / 8;
4065
      make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
4066
 
4067
      set_immediate_dominator (CDI_DOMINATORS, l2_bb,
4068
                               recompute_dominator (CDI_DOMINATORS, l2_bb));
4069
      set_immediate_dominator (CDI_DOMINATORS, l3_bb,
4070
                               recompute_dominator (CDI_DOMINATORS, l3_bb));
4071
      set_immediate_dominator (CDI_DOMINATORS, l0_bb,
4072
                               recompute_dominator (CDI_DOMINATORS, l0_bb));
4073
      set_immediate_dominator (CDI_DOMINATORS, l1_bb,
4074
                               recompute_dominator (CDI_DOMINATORS, l1_bb));
4075
    }
4076
}
4077
 
4078
 
4079
/* A subroutine of expand_omp_for.  Generate code for a parallel
4080
   loop with static schedule and no specified chunk size.  Given
4081
   parameters:
4082
 
4083
        for (V = N1; V cond N2; V += STEP) BODY;
4084
 
4085
   where COND is "<" or ">", we generate pseudocode
4086
 
4087
        if (cond is <)
4088
          adj = STEP - 1;
4089
        else
4090
          adj = STEP + 1;
4091
        if ((__typeof (V)) -1 > 0 && cond is >)
4092
          n = -(adj + N2 - N1) / -STEP;
4093
        else
4094
          n = (adj + N2 - N1) / STEP;
4095
        q = n / nthreads;
4096
        q += (q * nthreads != n);
4097
        s0 = q * threadid;
4098
        e0 = min(s0 + q, n);
4099
        V = s0 * STEP + N1;
4100
        if (s0 >= e0) goto L2; else goto L0;
4101
    L0:
4102
        e = e0 * STEP + N1;
4103
    L1:
4104
        BODY;
4105
        V += STEP;
4106
        if (V cond e) goto L1;
4107
    L2:
4108
*/
4109
 
4110
static void
4111
expand_omp_for_static_nochunk (struct omp_region *region,
4112
                               struct omp_for_data *fd)
4113
{
4114
  tree n, q, s0, e0, e, t, nthreads, threadid;
4115
  tree type, itype, vmain, vback;
4116
  basic_block entry_bb, exit_bb, seq_start_bb, body_bb, cont_bb;
4117
  basic_block fin_bb;
4118
  gimple_stmt_iterator gsi;
4119
  gimple stmt;
4120
 
4121
  itype = type = TREE_TYPE (fd->loop.v);
4122
  if (POINTER_TYPE_P (type))
4123
    itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4124
 
4125
  entry_bb = region->entry;
4126
  cont_bb = region->cont;
4127
  gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
4128
  gcc_assert (BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
4129
  seq_start_bb = split_edge (FALLTHRU_EDGE (entry_bb));
4130
  body_bb = single_succ (seq_start_bb);
4131
  gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4132
  gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4133
  fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4134
  exit_bb = region->exit;
4135
 
4136
  /* Iteration space partitioning goes in ENTRY_BB.  */
4137
  gsi = gsi_last_bb (entry_bb);
4138
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
4139
 
4140
  t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4141
  t = fold_convert (itype, t);
4142
  nthreads = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4143
                                       true, GSI_SAME_STMT);
4144
 
4145
  t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4146
  t = fold_convert (itype, t);
4147
  threadid = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4148
                                       true, GSI_SAME_STMT);
4149
 
4150
  fd->loop.n1
4151
    = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loop.n1),
4152
                                true, NULL_TREE, true, GSI_SAME_STMT);
4153
  fd->loop.n2
4154
    = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.n2),
4155
                                true, NULL_TREE, true, GSI_SAME_STMT);
4156
  fd->loop.step
4157
    = force_gimple_operand_gsi (&gsi, fold_convert (itype, fd->loop.step),
4158
                                true, NULL_TREE, true, GSI_SAME_STMT);
4159
 
4160
  t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4161
  t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4162
  t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4163
  t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4164
  if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4165
    t = fold_build2 (TRUNC_DIV_EXPR, itype,
4166
                     fold_build1 (NEGATE_EXPR, itype, t),
4167
                     fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4168
  else
4169
    t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4170
  t = fold_convert (itype, t);
4171
  n = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4172
 
4173
  t = fold_build2 (TRUNC_DIV_EXPR, itype, n, nthreads);
4174
  q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4175
 
4176
  t = fold_build2 (MULT_EXPR, itype, q, nthreads);
4177
  t = fold_build2 (NE_EXPR, itype, t, n);
4178
  t = fold_build2 (PLUS_EXPR, itype, q, t);
4179
  q = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4180
 
4181
  t = build2 (MULT_EXPR, itype, q, threadid);
4182
  s0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4183
 
4184
  t = fold_build2 (PLUS_EXPR, itype, s0, q);
4185
  t = fold_build2 (MIN_EXPR, itype, t, n);
4186
  e0 = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE, true, GSI_SAME_STMT);
4187
 
4188
  t = build2 (GE_EXPR, boolean_type_node, s0, e0);
4189
  gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4190
 
4191
  /* Remove the GIMPLE_OMP_FOR statement.  */
4192
  gsi_remove (&gsi, true);
4193
 
4194
  /* Setup code for sequential iteration goes in SEQ_START_BB.  */
4195
  gsi = gsi_start_bb (seq_start_bb);
4196
 
4197
  t = fold_convert (itype, s0);
4198
  t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4199
  if (POINTER_TYPE_P (type))
4200
    t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4201
                     fold_convert (sizetype, t));
4202
  else
4203
    t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4204
  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4205
                                false, GSI_CONTINUE_LINKING);
4206
  stmt = gimple_build_assign (fd->loop.v, t);
4207
  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
4208
 
4209
  t = fold_convert (itype, e0);
4210
  t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4211
  if (POINTER_TYPE_P (type))
4212
    t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4213
                     fold_convert (sizetype, t));
4214
  else
4215
    t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4216
  e = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
4217
                                false, GSI_CONTINUE_LINKING);
4218
 
4219
  /* The code controlling the sequential loop replaces the
4220
     GIMPLE_OMP_CONTINUE.  */
4221
  gsi = gsi_last_bb (cont_bb);
4222
  stmt = gsi_stmt (gsi);
4223
  gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4224
  vmain = gimple_omp_continue_control_use (stmt);
4225
  vback = gimple_omp_continue_control_def (stmt);
4226
 
4227
  if (POINTER_TYPE_P (type))
4228
    t = fold_build2 (POINTER_PLUS_EXPR, type, vmain,
4229
                     fold_convert (sizetype, fd->loop.step));
4230
  else
4231
    t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
4232
  t = force_gimple_operand_gsi (&gsi, t, false, NULL_TREE,
4233
                                true, GSI_SAME_STMT);
4234
  stmt = gimple_build_assign (vback, t);
4235
  gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
4236
 
4237
  t = build2 (fd->loop.cond_code, boolean_type_node, vback, e);
4238
  gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
4239
 
4240
  /* Remove the GIMPLE_OMP_CONTINUE statement.  */
4241
  gsi_remove (&gsi, true);
4242
 
4243
  /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing.  */
4244
  gsi = gsi_last_bb (exit_bb);
4245
  if (!gimple_omp_return_nowait_p (gsi_stmt (gsi)))
4246
    force_gimple_operand_gsi (&gsi, build_omp_barrier (), false, NULL_TREE,
4247
                              false, GSI_SAME_STMT);
4248
  gsi_remove (&gsi, true);
4249
 
4250
  /* Connect all the blocks.  */
4251
  find_edge (entry_bb, seq_start_bb)->flags = EDGE_FALSE_VALUE;
4252
  find_edge (entry_bb, fin_bb)->flags = EDGE_TRUE_VALUE;
4253
 
4254
  find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4255
  find_edge (cont_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4256
 
4257
  set_immediate_dominator (CDI_DOMINATORS, seq_start_bb, entry_bb);
4258
  set_immediate_dominator (CDI_DOMINATORS, body_bb,
4259
                           recompute_dominator (CDI_DOMINATORS, body_bb));
4260
  set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4261
                           recompute_dominator (CDI_DOMINATORS, fin_bb));
4262
}
4263
 
4264
 
4265
/* A subroutine of expand_omp_for.  Generate code for a parallel
4266
   loop with static schedule and a specified chunk size.  Given
4267
   parameters:
4268
 
4269
        for (V = N1; V cond N2; V += STEP) BODY;
4270
 
4271
   where COND is "<" or ">", we generate pseudocode
4272
 
4273
        if (cond is <)
4274
          adj = STEP - 1;
4275
        else
4276
          adj = STEP + 1;
4277
        if ((__typeof (V)) -1 > 0 && cond is >)
4278
          n = -(adj + N2 - N1) / -STEP;
4279
        else
4280
          n = (adj + N2 - N1) / STEP;
4281
        trip = 0;
4282
        V = threadid * CHUNK * STEP + N1;  -- this extra definition of V is
4283
                                              here so that V is defined
4284
                                              if the loop is not entered
4285
    L0:
4286
        s0 = (trip * nthreads + threadid) * CHUNK;
4287
        e0 = min(s0 + CHUNK, n);
4288
        if (s0 < n) goto L1; else goto L4;
4289
    L1:
4290
        V = s0 * STEP + N1;
4291
        e = e0 * STEP + N1;
4292
    L2:
4293
        BODY;
4294
        V += STEP;
4295
        if (V cond e) goto L2; else goto L3;
4296
    L3:
4297
        trip += 1;
4298
        goto L0;
4299
    L4:
4300
*/
4301
 
4302
static void
4303
expand_omp_for_static_chunk (struct omp_region *region, struct omp_for_data *fd)
4304
{
4305
  tree n, s0, e0, e, t;
4306
  tree trip_var, trip_init, trip_main, trip_back, nthreads, threadid;
4307
  tree type, itype, v_main, v_back, v_extra;
4308
  basic_block entry_bb, exit_bb, body_bb, seq_start_bb, iter_part_bb;
4309
  basic_block trip_update_bb, cont_bb, fin_bb;
4310
  gimple_stmt_iterator si;
4311
  gimple stmt;
4312
  edge se;
4313
 
4314
  itype = type = TREE_TYPE (fd->loop.v);
4315
  if (POINTER_TYPE_P (type))
4316
    itype = lang_hooks.types.type_for_size (TYPE_PRECISION (type), 0);
4317
 
4318
  entry_bb = region->entry;
4319
  se = split_block (entry_bb, last_stmt (entry_bb));
4320
  entry_bb = se->src;
4321
  iter_part_bb = se->dest;
4322
  cont_bb = region->cont;
4323
  gcc_assert (EDGE_COUNT (iter_part_bb->succs) == 2);
4324
  gcc_assert (BRANCH_EDGE (iter_part_bb)->dest
4325
              == FALLTHRU_EDGE (cont_bb)->dest);
4326
  seq_start_bb = split_edge (FALLTHRU_EDGE (iter_part_bb));
4327
  body_bb = single_succ (seq_start_bb);
4328
  gcc_assert (BRANCH_EDGE (cont_bb)->dest == body_bb);
4329
  gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
4330
  fin_bb = FALLTHRU_EDGE (cont_bb)->dest;
4331
  trip_update_bb = split_edge (FALLTHRU_EDGE (cont_bb));
4332
  exit_bb = region->exit;
4333
 
4334
  /* Trip and adjustment setup goes in ENTRY_BB.  */
4335
  si = gsi_last_bb (entry_bb);
4336
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_FOR);
4337
 
4338
  t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_NUM_THREADS], 0);
4339
  t = fold_convert (itype, t);
4340
  nthreads = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4341
                                       true, GSI_SAME_STMT);
4342
 
4343
  t = build_call_expr (built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
4344
  t = fold_convert (itype, t);
4345
  threadid = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4346
                                       true, GSI_SAME_STMT);
4347
 
4348
  fd->loop.n1
4349
    = force_gimple_operand_gsi (&si, fold_convert (type, fd->loop.n1),
4350
                                true, NULL_TREE, true, GSI_SAME_STMT);
4351
  fd->loop.n2
4352
    = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.n2),
4353
                                true, NULL_TREE, true, GSI_SAME_STMT);
4354
  fd->loop.step
4355
    = force_gimple_operand_gsi (&si, fold_convert (itype, fd->loop.step),
4356
                                true, NULL_TREE, true, GSI_SAME_STMT);
4357
  fd->chunk_size
4358
    = force_gimple_operand_gsi (&si, fold_convert (itype, fd->chunk_size),
4359
                                true, NULL_TREE, true, GSI_SAME_STMT);
4360
 
4361
  t = build_int_cst (itype, (fd->loop.cond_code == LT_EXPR ? -1 : 1));
4362
  t = fold_build2 (PLUS_EXPR, itype, fd->loop.step, t);
4363
  t = fold_build2 (PLUS_EXPR, itype, t, fd->loop.n2);
4364
  t = fold_build2 (MINUS_EXPR, itype, t, fold_convert (itype, fd->loop.n1));
4365
  if (TYPE_UNSIGNED (itype) && fd->loop.cond_code == GT_EXPR)
4366
    t = fold_build2 (TRUNC_DIV_EXPR, itype,
4367
                     fold_build1 (NEGATE_EXPR, itype, t),
4368
                     fold_build1 (NEGATE_EXPR, itype, fd->loop.step));
4369
  else
4370
    t = fold_build2 (TRUNC_DIV_EXPR, itype, t, fd->loop.step);
4371
  t = fold_convert (itype, t);
4372
  n = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4373
                                true, GSI_SAME_STMT);
4374
 
4375
  trip_var = create_tmp_var (itype, ".trip");
4376
  if (gimple_in_ssa_p (cfun))
4377
    {
4378
      add_referenced_var (trip_var);
4379
      trip_init = make_ssa_name (trip_var, NULL);
4380
      trip_main = make_ssa_name (trip_var, NULL);
4381
      trip_back = make_ssa_name (trip_var, NULL);
4382
    }
4383
  else
4384
    {
4385
      trip_init = trip_var;
4386
      trip_main = trip_var;
4387
      trip_back = trip_var;
4388
    }
4389
 
4390
  stmt = gimple_build_assign (trip_init, build_int_cst (itype, 0));
4391
  gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4392
 
4393
  t = fold_build2 (MULT_EXPR, itype, threadid, fd->chunk_size);
4394
  t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4395
  if (POINTER_TYPE_P (type))
4396
    t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4397
                     fold_convert (sizetype, t));
4398
  else
4399
    t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4400
  v_extra = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4401
                                      true, GSI_SAME_STMT);
4402
 
4403
  /* Remove the GIMPLE_OMP_FOR.  */
4404
  gsi_remove (&si, true);
4405
 
4406
  /* Iteration space partitioning goes in ITER_PART_BB.  */
4407
  si = gsi_last_bb (iter_part_bb);
4408
 
4409
  t = fold_build2 (MULT_EXPR, itype, trip_main, nthreads);
4410
  t = fold_build2 (PLUS_EXPR, itype, t, threadid);
4411
  t = fold_build2 (MULT_EXPR, itype, t, fd->chunk_size);
4412
  s0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4413
                                 false, GSI_CONTINUE_LINKING);
4414
 
4415
  t = fold_build2 (PLUS_EXPR, itype, s0, fd->chunk_size);
4416
  t = fold_build2 (MIN_EXPR, itype, t, n);
4417
  e0 = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4418
                                 false, GSI_CONTINUE_LINKING);
4419
 
4420
  t = build2 (LT_EXPR, boolean_type_node, s0, n);
4421
  gsi_insert_after (&si, gimple_build_cond_empty (t), GSI_CONTINUE_LINKING);
4422
 
4423
  /* Setup code for sequential iteration goes in SEQ_START_BB.  */
4424
  si = gsi_start_bb (seq_start_bb);
4425
 
4426
  t = fold_convert (itype, s0);
4427
  t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4428
  if (POINTER_TYPE_P (type))
4429
    t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4430
                     fold_convert (sizetype, t));
4431
  else
4432
    t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4433
  t = force_gimple_operand_gsi (&si, t, false, NULL_TREE,
4434
                                false, GSI_CONTINUE_LINKING);
4435
  stmt = gimple_build_assign (fd->loop.v, t);
4436
  gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4437
 
4438
  t = fold_convert (itype, e0);
4439
  t = fold_build2 (MULT_EXPR, itype, t, fd->loop.step);
4440
  if (POINTER_TYPE_P (type))
4441
    t = fold_build2 (POINTER_PLUS_EXPR, type, fd->loop.n1,
4442
                     fold_convert (sizetype, t));
4443
  else
4444
    t = fold_build2 (PLUS_EXPR, type, t, fd->loop.n1);
4445
  e = force_gimple_operand_gsi (&si, t, true, NULL_TREE,
4446
                                false, GSI_CONTINUE_LINKING);
4447
 
4448
  /* The code controlling the sequential loop goes in CONT_BB,
4449
     replacing the GIMPLE_OMP_CONTINUE.  */
4450
  si = gsi_last_bb (cont_bb);
4451
  stmt = gsi_stmt (si);
4452
  gcc_assert (gimple_code (stmt) == GIMPLE_OMP_CONTINUE);
4453
  v_main = gimple_omp_continue_control_use (stmt);
4454
  v_back = gimple_omp_continue_control_def (stmt);
4455
 
4456
  if (POINTER_TYPE_P (type))
4457
    t = fold_build2 (POINTER_PLUS_EXPR, type, v_main,
4458
                     fold_convert (sizetype, fd->loop.step));
4459
  else
4460
    t = fold_build2 (PLUS_EXPR, type, v_main, fd->loop.step);
4461
  stmt = gimple_build_assign (v_back, t);
4462
  gsi_insert_before (&si, stmt, GSI_SAME_STMT);
4463
 
4464
  t = build2 (fd->loop.cond_code, boolean_type_node, v_back, e);
4465
  gsi_insert_before (&si, gimple_build_cond_empty (t), GSI_SAME_STMT);
4466
 
4467
  /* Remove GIMPLE_OMP_CONTINUE.  */
4468
  gsi_remove (&si, true);
4469
 
4470
  /* Trip update code goes into TRIP_UPDATE_BB.  */
4471
  si = gsi_start_bb (trip_update_bb);
4472
 
4473
  t = build_int_cst (itype, 1);
4474
  t = build2 (PLUS_EXPR, itype, trip_main, t);
4475
  stmt = gimple_build_assign (trip_back, t);
4476
  gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4477
 
4478
  /* Replace the GIMPLE_OMP_RETURN with a barrier, or nothing.  */
4479
  si = gsi_last_bb (exit_bb);
4480
  if (!gimple_omp_return_nowait_p (gsi_stmt (si)))
4481
    force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4482
                              false, GSI_SAME_STMT);
4483
  gsi_remove (&si, true);
4484
 
4485
  /* Connect the new blocks.  */
4486
  find_edge (iter_part_bb, seq_start_bb)->flags = EDGE_TRUE_VALUE;
4487
  find_edge (iter_part_bb, fin_bb)->flags = EDGE_FALSE_VALUE;
4488
 
4489
  find_edge (cont_bb, body_bb)->flags = EDGE_TRUE_VALUE;
4490
  find_edge (cont_bb, trip_update_bb)->flags = EDGE_FALSE_VALUE;
4491
 
4492
  redirect_edge_and_branch (single_succ_edge (trip_update_bb), iter_part_bb);
4493
 
4494
  if (gimple_in_ssa_p (cfun))
4495
    {
4496
      gimple_stmt_iterator psi;
4497
      gimple phi;
4498
      edge re, ene;
4499
      edge_var_map_vector head;
4500
      edge_var_map *vm;
4501
      size_t i;
4502
 
4503
      /* When we redirect the edge from trip_update_bb to iter_part_bb, we
4504
         remove arguments of the phi nodes in fin_bb.  We need to create
4505
         appropriate phi nodes in iter_part_bb instead.  */
4506
      se = single_pred_edge (fin_bb);
4507
      re = single_succ_edge (trip_update_bb);
4508
      head = redirect_edge_var_map_vector (re);
4509
      ene = single_succ_edge (entry_bb);
4510
 
4511
      psi = gsi_start_phis (fin_bb);
4512
      for (i = 0; !gsi_end_p (psi) && VEC_iterate (edge_var_map, head, i, vm);
4513
           gsi_next (&psi), ++i)
4514
        {
4515
          gimple nphi;
4516
          source_location locus;
4517
 
4518
          phi = gsi_stmt (psi);
4519
          t = gimple_phi_result (phi);
4520
          gcc_assert (t == redirect_edge_var_map_result (vm));
4521
          nphi = create_phi_node (t, iter_part_bb);
4522
          SSA_NAME_DEF_STMT (t) = nphi;
4523
 
4524
          t = PHI_ARG_DEF_FROM_EDGE (phi, se);
4525
          locus = gimple_phi_arg_location_from_edge (phi, se);
4526
 
4527
          /* A special case -- fd->loop.v is not yet computed in
4528
             iter_part_bb, we need to use v_extra instead.  */
4529
          if (t == fd->loop.v)
4530
            t = v_extra;
4531
          add_phi_arg (nphi, t, ene, locus);
4532
          locus = redirect_edge_var_map_location (vm);
4533
          add_phi_arg (nphi, redirect_edge_var_map_def (vm), re, locus);
4534
        }
4535
      gcc_assert (!gsi_end_p (psi) && i == VEC_length (edge_var_map, head));
4536
      redirect_edge_var_map_clear (re);
4537
      while (1)
4538
        {
4539
          psi = gsi_start_phis (fin_bb);
4540
          if (gsi_end_p (psi))
4541
            break;
4542
          remove_phi_node (&psi, false);
4543
        }
4544
 
4545
      /* Make phi node for trip.  */
4546
      phi = create_phi_node (trip_main, iter_part_bb);
4547
      SSA_NAME_DEF_STMT (trip_main) = phi;
4548
      add_phi_arg (phi, trip_back, single_succ_edge (trip_update_bb),
4549
                   UNKNOWN_LOCATION);
4550
      add_phi_arg (phi, trip_init, single_succ_edge (entry_bb),
4551
                   UNKNOWN_LOCATION);
4552
    }
4553
 
4554
  set_immediate_dominator (CDI_DOMINATORS, trip_update_bb, cont_bb);
4555
  set_immediate_dominator (CDI_DOMINATORS, iter_part_bb,
4556
                           recompute_dominator (CDI_DOMINATORS, iter_part_bb));
4557
  set_immediate_dominator (CDI_DOMINATORS, fin_bb,
4558
                           recompute_dominator (CDI_DOMINATORS, fin_bb));
4559
  set_immediate_dominator (CDI_DOMINATORS, seq_start_bb,
4560
                           recompute_dominator (CDI_DOMINATORS, seq_start_bb));
4561
  set_immediate_dominator (CDI_DOMINATORS, body_bb,
4562
                           recompute_dominator (CDI_DOMINATORS, body_bb));
4563
}
4564
 
4565
 
4566
/* Expand the OpenMP loop defined by REGION.  */
4567
 
4568
static void
4569
expand_omp_for (struct omp_region *region)
4570
{
4571
  struct omp_for_data fd;
4572
  struct omp_for_data_loop *loops;
4573
 
4574
  loops
4575
    = (struct omp_for_data_loop *)
4576
      alloca (gimple_omp_for_collapse (last_stmt (region->entry))
4577
              * sizeof (struct omp_for_data_loop));
4578
  extract_omp_for_data (last_stmt (region->entry), &fd, loops);
4579
  region->sched_kind = fd.sched_kind;
4580
 
4581
  gcc_assert (EDGE_COUNT (region->entry->succs) == 2);
4582
  BRANCH_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4583
  FALLTHRU_EDGE (region->entry)->flags &= ~EDGE_ABNORMAL;
4584
  if (region->cont)
4585
    {
4586
      gcc_assert (EDGE_COUNT (region->cont->succs) == 2);
4587
      BRANCH_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4588
      FALLTHRU_EDGE (region->cont)->flags &= ~EDGE_ABNORMAL;
4589
    }
4590
 
4591
  if (fd.sched_kind == OMP_CLAUSE_SCHEDULE_STATIC
4592
      && !fd.have_ordered
4593
      && fd.collapse == 1
4594
      && region->cont != NULL)
4595
    {
4596
      if (fd.chunk_size == NULL)
4597
        expand_omp_for_static_nochunk (region, &fd);
4598
      else
4599
        expand_omp_for_static_chunk (region, &fd);
4600
    }
4601
  else
4602
    {
4603
      int fn_index, start_ix, next_ix;
4604
 
4605
      gcc_assert (fd.sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
4606
      fn_index = (fd.sched_kind == OMP_CLAUSE_SCHEDULE_RUNTIME)
4607
                  ? 3 : fd.sched_kind;
4608
      fn_index += fd.have_ordered * 4;
4609
      start_ix = BUILT_IN_GOMP_LOOP_STATIC_START + fn_index;
4610
      next_ix = BUILT_IN_GOMP_LOOP_STATIC_NEXT + fn_index;
4611
      if (fd.iter_type == long_long_unsigned_type_node)
4612
        {
4613
          start_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_START
4614
                      - BUILT_IN_GOMP_LOOP_STATIC_START;
4615
          next_ix += BUILT_IN_GOMP_LOOP_ULL_STATIC_NEXT
4616
                     - BUILT_IN_GOMP_LOOP_STATIC_NEXT;
4617
        }
4618
      expand_omp_for_generic (region, &fd, (enum built_in_function) start_ix,
4619
                              (enum built_in_function) next_ix);
4620
    }
4621
 
4622
  update_ssa (TODO_update_ssa_only_virtuals);
4623
}
4624
 
4625
 
4626
/* Expand code for an OpenMP sections directive.  In pseudo code, we generate
4627
 
4628
        v = GOMP_sections_start (n);
4629
    L0:
4630
        switch (v)
4631
          {
4632
          case 0:
4633
            goto L2;
4634
          case 1:
4635
            section 1;
4636
            goto L1;
4637
          case 2:
4638
            ...
4639
          case n:
4640
            ...
4641
          default:
4642
            abort ();
4643
          }
4644
    L1:
4645
        v = GOMP_sections_next ();
4646
        goto L0;
4647
    L2:
4648
        reduction;
4649
 
4650
    If this is a combined parallel sections, replace the call to
4651
    GOMP_sections_start with call to GOMP_sections_next.  */
4652
 
4653
static void
4654
expand_omp_sections (struct omp_region *region)
4655
{
4656
  tree t, u, vin = NULL, vmain, vnext, l2;
4657
  VEC (tree,heap) *label_vec;
4658
  unsigned len;
4659
  basic_block entry_bb, l0_bb, l1_bb, l2_bb, default_bb;
4660
  gimple_stmt_iterator si, switch_si;
4661
  gimple sections_stmt, stmt, cont;
4662
  edge_iterator ei;
4663
  edge e;
4664
  struct omp_region *inner;
4665
  unsigned i, casei;
4666
  bool exit_reachable = region->cont != NULL;
4667
 
4668
  gcc_assert (exit_reachable == (region->exit != NULL));
4669
  entry_bb = region->entry;
4670
  l0_bb = single_succ (entry_bb);
4671
  l1_bb = region->cont;
4672
  l2_bb = region->exit;
4673
  if (exit_reachable)
4674
    {
4675
      if (single_pred_p (l2_bb) && single_pred (l2_bb) == l0_bb)
4676
        l2 = gimple_block_label (l2_bb);
4677
      else
4678
        {
4679
          /* This can happen if there are reductions.  */
4680
          len = EDGE_COUNT (l0_bb->succs);
4681
          gcc_assert (len > 0);
4682
          e = EDGE_SUCC (l0_bb, len - 1);
4683
          si = gsi_last_bb (e->dest);
4684
          l2 = NULL_TREE;
4685
          if (gsi_end_p (si)
4686
              || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4687
            l2 = gimple_block_label (e->dest);
4688
          else
4689
            FOR_EACH_EDGE (e, ei, l0_bb->succs)
4690
              {
4691
                si = gsi_last_bb (e->dest);
4692
                if (gsi_end_p (si)
4693
                    || gimple_code (gsi_stmt (si)) != GIMPLE_OMP_SECTION)
4694
                  {
4695
                    l2 = gimple_block_label (e->dest);
4696
                    break;
4697
                  }
4698
              }
4699
        }
4700
      default_bb = create_empty_bb (l1_bb->prev_bb);
4701
    }
4702
  else
4703
    {
4704
      default_bb = create_empty_bb (l0_bb);
4705
      l2 = gimple_block_label (default_bb);
4706
    }
4707
 
4708
  /* We will build a switch() with enough cases for all the
4709
     GIMPLE_OMP_SECTION regions, a '0' case to handle the end of more work
4710
     and a default case to abort if something goes wrong.  */
4711
  len = EDGE_COUNT (l0_bb->succs);
4712
 
4713
  /* Use VEC_quick_push on label_vec throughout, since we know the size
4714
     in advance.  */
4715
  label_vec = VEC_alloc (tree, heap, len);
4716
 
4717
  /* The call to GOMP_sections_start goes in ENTRY_BB, replacing the
4718
     GIMPLE_OMP_SECTIONS statement.  */
4719
  si = gsi_last_bb (entry_bb);
4720
  sections_stmt = gsi_stmt (si);
4721
  gcc_assert (gimple_code (sections_stmt) == GIMPLE_OMP_SECTIONS);
4722
  vin = gimple_omp_sections_control (sections_stmt);
4723
  if (!is_combined_parallel (region))
4724
    {
4725
      /* If we are not inside a combined parallel+sections region,
4726
         call GOMP_sections_start.  */
4727
      t = build_int_cst (unsigned_type_node,
4728
                         exit_reachable ? len - 1 : len);
4729
      u = built_in_decls[BUILT_IN_GOMP_SECTIONS_START];
4730
      stmt = gimple_build_call (u, 1, t);
4731
    }
4732
  else
4733
    {
4734
      /* Otherwise, call GOMP_sections_next.  */
4735
      u = built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT];
4736
      stmt = gimple_build_call (u, 0);
4737
    }
4738
  gimple_call_set_lhs (stmt, vin);
4739
  gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4740
  gsi_remove (&si, true);
4741
 
4742
  /* The switch() statement replacing GIMPLE_OMP_SECTIONS_SWITCH goes in
4743
     L0_BB.  */
4744
  switch_si = gsi_last_bb (l0_bb);
4745
  gcc_assert (gimple_code (gsi_stmt (switch_si)) == GIMPLE_OMP_SECTIONS_SWITCH);
4746
  if (exit_reachable)
4747
    {
4748
      cont = last_stmt (l1_bb);
4749
      gcc_assert (gimple_code (cont) == GIMPLE_OMP_CONTINUE);
4750
      vmain = gimple_omp_continue_control_use (cont);
4751
      vnext = gimple_omp_continue_control_def (cont);
4752
    }
4753
  else
4754
    {
4755
      vmain = vin;
4756
      vnext = NULL_TREE;
4757
    }
4758
 
4759
  i = 0;
4760
  if (exit_reachable)
4761
    {
4762
      t = build3 (CASE_LABEL_EXPR, void_type_node,
4763
                  build_int_cst (unsigned_type_node, 0), NULL, l2);
4764
      VEC_quick_push (tree, label_vec, t);
4765
      i++;
4766
    }
4767
 
4768
  /* Convert each GIMPLE_OMP_SECTION into a CASE_LABEL_EXPR.  */
4769
  for (inner = region->inner, casei = 1;
4770
       inner;
4771
       inner = inner->next, i++, casei++)
4772
    {
4773
      basic_block s_entry_bb, s_exit_bb;
4774
 
4775
      /* Skip optional reduction region.  */
4776
      if (inner->type == GIMPLE_OMP_ATOMIC_LOAD)
4777
        {
4778
          --i;
4779
          --casei;
4780
          continue;
4781
        }
4782
 
4783
      s_entry_bb = inner->entry;
4784
      s_exit_bb = inner->exit;
4785
 
4786
      t = gimple_block_label (s_entry_bb);
4787
      u = build_int_cst (unsigned_type_node, casei);
4788
      u = build3 (CASE_LABEL_EXPR, void_type_node, u, NULL, t);
4789
      VEC_quick_push (tree, label_vec, u);
4790
 
4791
      si = gsi_last_bb (s_entry_bb);
4792
      gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SECTION);
4793
      gcc_assert (i < len || gimple_omp_section_last_p (gsi_stmt (si)));
4794
      gsi_remove (&si, true);
4795
      single_succ_edge (s_entry_bb)->flags = EDGE_FALLTHRU;
4796
 
4797
      if (s_exit_bb == NULL)
4798
        continue;
4799
 
4800
      si = gsi_last_bb (s_exit_bb);
4801
      gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4802
      gsi_remove (&si, true);
4803
 
4804
      single_succ_edge (s_exit_bb)->flags = EDGE_FALLTHRU;
4805
    }
4806
 
4807
  /* Error handling code goes in DEFAULT_BB.  */
4808
  t = gimple_block_label (default_bb);
4809
  u = build3 (CASE_LABEL_EXPR, void_type_node, NULL, NULL, t);
4810
  make_edge (l0_bb, default_bb, 0);
4811
 
4812
  stmt = gimple_build_switch_vec (vmain, u, label_vec);
4813
  gsi_insert_after (&switch_si, stmt, GSI_SAME_STMT);
4814
  gsi_remove (&switch_si, true);
4815
  VEC_free (tree, heap, label_vec);
4816
 
4817
  si = gsi_start_bb (default_bb);
4818
  stmt = gimple_build_call (built_in_decls[BUILT_IN_TRAP], 0);
4819
  gsi_insert_after (&si, stmt, GSI_CONTINUE_LINKING);
4820
 
4821
  if (exit_reachable)
4822
    {
4823
      /* Code to get the next section goes in L1_BB.  */
4824
      si = gsi_last_bb (l1_bb);
4825
      gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CONTINUE);
4826
 
4827
      stmt = gimple_build_call (built_in_decls[BUILT_IN_GOMP_SECTIONS_NEXT], 0);
4828
      gimple_call_set_lhs (stmt, vnext);
4829
      gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4830
      gsi_remove (&si, true);
4831
 
4832
      single_succ_edge (l1_bb)->flags = EDGE_FALLTHRU;
4833
 
4834
      /* Cleanup function replaces GIMPLE_OMP_RETURN in EXIT_BB.  */
4835
      si = gsi_last_bb (l2_bb);
4836
      if (gimple_omp_return_nowait_p (gsi_stmt (si)))
4837
        t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END_NOWAIT];
4838
      else
4839
        t = built_in_decls[BUILT_IN_GOMP_SECTIONS_END];
4840
      stmt = gimple_build_call (t, 0);
4841
      gsi_insert_after (&si, stmt, GSI_SAME_STMT);
4842
      gsi_remove (&si, true);
4843
    }
4844
 
4845
  set_immediate_dominator (CDI_DOMINATORS, default_bb, l0_bb);
4846
}
4847
 
4848
 
4849
/* Expand code for an OpenMP single directive.  We've already expanded
4850
   much of the code, here we simply place the GOMP_barrier call.  */
4851
 
4852
static void
4853
expand_omp_single (struct omp_region *region)
4854
{
4855
  basic_block entry_bb, exit_bb;
4856
  gimple_stmt_iterator si;
4857
  bool need_barrier = false;
4858
 
4859
  entry_bb = region->entry;
4860
  exit_bb = region->exit;
4861
 
4862
  si = gsi_last_bb (entry_bb);
4863
  /* The terminal barrier at the end of a GOMP_single_copy sequence cannot
4864
     be removed.  We need to ensure that the thread that entered the single
4865
     does not exit before the data is copied out by the other threads.  */
4866
  if (find_omp_clause (gimple_omp_single_clauses (gsi_stmt (si)),
4867
                       OMP_CLAUSE_COPYPRIVATE))
4868
    need_barrier = true;
4869
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE);
4870
  gsi_remove (&si, true);
4871
  single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4872
 
4873
  si = gsi_last_bb (exit_bb);
4874
  if (!gimple_omp_return_nowait_p (gsi_stmt (si)) || need_barrier)
4875
    force_gimple_operand_gsi (&si, build_omp_barrier (), false, NULL_TREE,
4876
                              false, GSI_SAME_STMT);
4877
  gsi_remove (&si, true);
4878
  single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4879
}
4880
 
4881
 
4882
/* Generic expansion for OpenMP synchronization directives: master,
4883
   ordered and critical.  All we need to do here is remove the entry
4884
   and exit markers for REGION.  */
4885
 
4886
static void
4887
expand_omp_synch (struct omp_region *region)
4888
{
4889
  basic_block entry_bb, exit_bb;
4890
  gimple_stmt_iterator si;
4891
 
4892
  entry_bb = region->entry;
4893
  exit_bb = region->exit;
4894
 
4895
  si = gsi_last_bb (entry_bb);
4896
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_SINGLE
4897
              || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_MASTER
4898
              || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ORDERED
4899
              || gimple_code (gsi_stmt (si)) == GIMPLE_OMP_CRITICAL);
4900
  gsi_remove (&si, true);
4901
  single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
4902
 
4903
  if (exit_bb)
4904
    {
4905
      si = gsi_last_bb (exit_bb);
4906
      gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_RETURN);
4907
      gsi_remove (&si, true);
4908
      single_succ_edge (exit_bb)->flags = EDGE_FALLTHRU;
4909
    }
4910
}
4911
 
4912
/* A subroutine of expand_omp_atomic.  Attempt to implement the atomic
4913
   operation as a __sync_fetch_and_op builtin.  INDEX is log2 of the
4914
   size of the data type, and thus usable to find the index of the builtin
4915
   decl.  Returns false if the expression is not of the proper form.  */
4916
 
4917
static bool
4918
expand_omp_atomic_fetch_op (basic_block load_bb,
4919
                            tree addr, tree loaded_val,
4920
                            tree stored_val, int index)
4921
{
4922
  enum built_in_function base;
4923
  tree decl, itype, call;
4924
  enum insn_code *optab;
4925
  tree rhs;
4926
  basic_block store_bb = single_succ (load_bb);
4927
  gimple_stmt_iterator gsi;
4928
  gimple stmt;
4929
  location_t loc;
4930
 
4931
  /* We expect to find the following sequences:
4932
 
4933
   load_bb:
4934
       GIMPLE_OMP_ATOMIC_LOAD (tmp, mem)
4935
 
4936
   store_bb:
4937
       val = tmp OP something; (or: something OP tmp)
4938
       GIMPLE_OMP_STORE (val)
4939
 
4940
  ???FIXME: Allow a more flexible sequence.
4941
  Perhaps use data flow to pick the statements.
4942
 
4943
  */
4944
 
4945
  gsi = gsi_after_labels (store_bb);
4946
  stmt = gsi_stmt (gsi);
4947
  loc = gimple_location (stmt);
4948
  if (!is_gimple_assign (stmt))
4949
    return false;
4950
  gsi_next (&gsi);
4951
  if (gimple_code (gsi_stmt (gsi)) != GIMPLE_OMP_ATOMIC_STORE)
4952
    return false;
4953
 
4954
  if (!operand_equal_p (gimple_assign_lhs (stmt), stored_val, 0))
4955
    return false;
4956
 
4957
  /* Check for one of the supported fetch-op operations.  */
4958
  switch (gimple_assign_rhs_code (stmt))
4959
    {
4960
    case PLUS_EXPR:
4961
    case POINTER_PLUS_EXPR:
4962
      base = BUILT_IN_FETCH_AND_ADD_N;
4963
      optab = sync_add_optab;
4964
      break;
4965
    case MINUS_EXPR:
4966
      base = BUILT_IN_FETCH_AND_SUB_N;
4967
      optab = sync_add_optab;
4968
      break;
4969
    case BIT_AND_EXPR:
4970
      base = BUILT_IN_FETCH_AND_AND_N;
4971
      optab = sync_and_optab;
4972
      break;
4973
    case BIT_IOR_EXPR:
4974
      base = BUILT_IN_FETCH_AND_OR_N;
4975
      optab = sync_ior_optab;
4976
      break;
4977
    case BIT_XOR_EXPR:
4978
      base = BUILT_IN_FETCH_AND_XOR_N;
4979
      optab = sync_xor_optab;
4980
      break;
4981
    default:
4982
      return false;
4983
    }
4984
  /* Make sure the expression is of the proper form.  */
4985
  if (operand_equal_p (gimple_assign_rhs1 (stmt), loaded_val, 0))
4986
    rhs = gimple_assign_rhs2 (stmt);
4987
  else if (commutative_tree_code (gimple_assign_rhs_code (stmt))
4988
           && operand_equal_p (gimple_assign_rhs2 (stmt), loaded_val, 0))
4989
    rhs = gimple_assign_rhs1 (stmt);
4990
  else
4991
    return false;
4992
 
4993
  decl = built_in_decls[base + index + 1];
4994
  itype = TREE_TYPE (TREE_TYPE (decl));
4995
 
4996
  if (optab[TYPE_MODE (itype)] == CODE_FOR_nothing)
4997
    return false;
4998
 
4999
  gsi = gsi_last_bb (load_bb);
5000
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_LOAD);
5001
  call = build_call_expr_loc (loc,
5002
                          decl, 2, addr,
5003
                          fold_convert_loc (loc, itype, rhs));
5004
  call = fold_convert_loc (loc, void_type_node, call);
5005
  force_gimple_operand_gsi (&gsi, call, true, NULL_TREE, true, GSI_SAME_STMT);
5006
  gsi_remove (&gsi, true);
5007
 
5008
  gsi = gsi_last_bb (store_bb);
5009
  gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_ATOMIC_STORE);
5010
  gsi_remove (&gsi, true);
5011
  gsi = gsi_last_bb (store_bb);
5012
  gsi_remove (&gsi, true);
5013
 
5014
  if (gimple_in_ssa_p (cfun))
5015
    update_ssa (TODO_update_ssa_no_phi);
5016
 
5017
  return true;
5018
}
5019
 
5020
/* A subroutine of expand_omp_atomic.  Implement the atomic operation as:
5021
 
5022
      oldval = *addr;
5023
      repeat:
5024
        newval = rhs;    // with oldval replacing *addr in rhs
5025
        oldval = __sync_val_compare_and_swap (addr, oldval, newval);
5026
        if (oldval != newval)
5027
          goto repeat;
5028
 
5029
   INDEX is log2 of the size of the data type, and thus usable to find the
5030
   index of the builtin decl.  */
5031
 
5032
static bool
5033
expand_omp_atomic_pipeline (basic_block load_bb, basic_block store_bb,
5034
                            tree addr, tree loaded_val, tree stored_val,
5035
                            int index)
5036
{
5037
  tree loadedi, storedi, initial, new_storedi, old_vali;
5038
  tree type, itype, cmpxchg, iaddr;
5039
  gimple_stmt_iterator si;
5040
  basic_block loop_header = single_succ (load_bb);
5041
  gimple phi, stmt;
5042
  edge e;
5043
 
5044
  cmpxchg = built_in_decls[BUILT_IN_VAL_COMPARE_AND_SWAP_N + index + 1];
5045
  type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5046
  itype = TREE_TYPE (TREE_TYPE (cmpxchg));
5047
 
5048
  if (sync_compare_and_swap[TYPE_MODE (itype)] == CODE_FOR_nothing)
5049
    return false;
5050
 
5051
  /* Load the initial value, replacing the GIMPLE_OMP_ATOMIC_LOAD.  */
5052
  si = gsi_last_bb (load_bb);
5053
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5054
 
5055
  /* For floating-point values, we'll need to view-convert them to integers
5056
     so that we can perform the atomic compare and swap.  Simplify the
5057
     following code by always setting up the "i"ntegral variables.  */
5058
  if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type))
5059
    {
5060
      tree iaddr_val;
5061
 
5062
      iaddr = create_tmp_var (build_pointer_type_for_mode (itype, ptr_mode,
5063
                                                           true), NULL);
5064
      iaddr_val
5065
        = force_gimple_operand_gsi (&si,
5066
                                    fold_convert (TREE_TYPE (iaddr), addr),
5067
                                    false, NULL_TREE, true, GSI_SAME_STMT);
5068
      stmt = gimple_build_assign (iaddr, iaddr_val);
5069
      gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5070
      loadedi = create_tmp_var (itype, NULL);
5071
      if (gimple_in_ssa_p (cfun))
5072
        {
5073
          add_referenced_var (iaddr);
5074
          add_referenced_var (loadedi);
5075
          loadedi = make_ssa_name (loadedi, NULL);
5076
        }
5077
    }
5078
  else
5079
    {
5080
      iaddr = addr;
5081
      loadedi = loaded_val;
5082
    }
5083
 
5084
  initial = force_gimple_operand_gsi (&si, build_fold_indirect_ref (iaddr),
5085
                                      true, NULL_TREE, true, GSI_SAME_STMT);
5086
 
5087
  /* Move the value to the LOADEDI temporary.  */
5088
  if (gimple_in_ssa_p (cfun))
5089
    {
5090
      gcc_assert (gimple_seq_empty_p (phi_nodes (loop_header)));
5091
      phi = create_phi_node (loadedi, loop_header);
5092
      SSA_NAME_DEF_STMT (loadedi) = phi;
5093
      SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (load_bb)),
5094
               initial);
5095
    }
5096
  else
5097
    gsi_insert_before (&si,
5098
                       gimple_build_assign (loadedi, initial),
5099
                       GSI_SAME_STMT);
5100
  if (loadedi != loaded_val)
5101
    {
5102
      gimple_stmt_iterator gsi2;
5103
      tree x;
5104
 
5105
      x = build1 (VIEW_CONVERT_EXPR, type, loadedi);
5106
      gsi2 = gsi_start_bb (loop_header);
5107
      if (gimple_in_ssa_p (cfun))
5108
        {
5109
          gimple stmt;
5110
          x = force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5111
                                        true, GSI_SAME_STMT);
5112
          stmt = gimple_build_assign (loaded_val, x);
5113
          gsi_insert_before (&gsi2, stmt, GSI_SAME_STMT);
5114
        }
5115
      else
5116
        {
5117
          x = build2 (MODIFY_EXPR, TREE_TYPE (loaded_val), loaded_val, x);
5118
          force_gimple_operand_gsi (&gsi2, x, true, NULL_TREE,
5119
                                    true, GSI_SAME_STMT);
5120
        }
5121
    }
5122
  gsi_remove (&si, true);
5123
 
5124
  si = gsi_last_bb (store_bb);
5125
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5126
 
5127
  if (iaddr == addr)
5128
    storedi = stored_val;
5129
  else
5130
    storedi =
5131
      force_gimple_operand_gsi (&si,
5132
                                build1 (VIEW_CONVERT_EXPR, itype,
5133
                                        stored_val), true, NULL_TREE, true,
5134
                                GSI_SAME_STMT);
5135
 
5136
  /* Build the compare&swap statement.  */
5137
  new_storedi = build_call_expr (cmpxchg, 3, iaddr, loadedi, storedi);
5138
  new_storedi = force_gimple_operand_gsi (&si,
5139
                                          fold_convert (TREE_TYPE (loadedi),
5140
                                                        new_storedi),
5141
                                          true, NULL_TREE,
5142
                                          true, GSI_SAME_STMT);
5143
 
5144
  if (gimple_in_ssa_p (cfun))
5145
    old_vali = loadedi;
5146
  else
5147
    {
5148
      old_vali = create_tmp_var (TREE_TYPE (loadedi), NULL);
5149
      if (gimple_in_ssa_p (cfun))
5150
        add_referenced_var (old_vali);
5151
      stmt = gimple_build_assign (old_vali, loadedi);
5152
      gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5153
 
5154
      stmt = gimple_build_assign (loadedi, new_storedi);
5155
      gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5156
    }
5157
 
5158
  /* Note that we always perform the comparison as an integer, even for
5159
     floating point.  This allows the atomic operation to properly
5160
     succeed even with NaNs and -0.0.  */
5161
  stmt = gimple_build_cond_empty
5162
           (build2 (NE_EXPR, boolean_type_node,
5163
                    new_storedi, old_vali));
5164
  gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5165
 
5166
  /* Update cfg.  */
5167
  e = single_succ_edge (store_bb);
5168
  e->flags &= ~EDGE_FALLTHRU;
5169
  e->flags |= EDGE_FALSE_VALUE;
5170
 
5171
  e = make_edge (store_bb, loop_header, EDGE_TRUE_VALUE);
5172
 
5173
  /* Copy the new value to loadedi (we already did that before the condition
5174
     if we are not in SSA).  */
5175
  if (gimple_in_ssa_p (cfun))
5176
    {
5177
      phi = gimple_seq_first_stmt (phi_nodes (loop_header));
5178
      SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, e), new_storedi);
5179
    }
5180
 
5181
  /* Remove GIMPLE_OMP_ATOMIC_STORE.  */
5182
  gsi_remove (&si, true);
5183
 
5184
  if (gimple_in_ssa_p (cfun))
5185
    update_ssa (TODO_update_ssa_no_phi);
5186
 
5187
  return true;
5188
}
5189
 
5190
/* A subroutine of expand_omp_atomic.  Implement the atomic operation as:
5191
 
5192
                                  GOMP_atomic_start ();
5193
                                  *addr = rhs;
5194
                                  GOMP_atomic_end ();
5195
 
5196
   The result is not globally atomic, but works so long as all parallel
5197
   references are within #pragma omp atomic directives.  According to
5198
   responses received from omp@openmp.org, appears to be within spec.
5199
   Which makes sense, since that's how several other compilers handle
5200
   this situation as well.
5201
   LOADED_VAL and ADDR are the operands of GIMPLE_OMP_ATOMIC_LOAD we're
5202
   expanding.  STORED_VAL is the operand of the matching
5203
   GIMPLE_OMP_ATOMIC_STORE.
5204
 
5205
   We replace
5206
   GIMPLE_OMP_ATOMIC_LOAD (loaded_val, addr) with
5207
   loaded_val = *addr;
5208
 
5209
   and replace
5210
   GIMPLE_OMP_ATOMIC_ATORE (stored_val)  with
5211
   *addr = stored_val;
5212
*/
5213
 
5214
static bool
5215
expand_omp_atomic_mutex (basic_block load_bb, basic_block store_bb,
5216
                         tree addr, tree loaded_val, tree stored_val)
5217
{
5218
  gimple_stmt_iterator si;
5219
  gimple stmt;
5220
  tree t;
5221
 
5222
  si = gsi_last_bb (load_bb);
5223
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_LOAD);
5224
 
5225
  t = built_in_decls[BUILT_IN_GOMP_ATOMIC_START];
5226
  t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
5227
  force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5228
 
5229
  stmt = gimple_build_assign (loaded_val, build_fold_indirect_ref (addr));
5230
  gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5231
  gsi_remove (&si, true);
5232
 
5233
  si = gsi_last_bb (store_bb);
5234
  gcc_assert (gimple_code (gsi_stmt (si)) == GIMPLE_OMP_ATOMIC_STORE);
5235
 
5236
  stmt = gimple_build_assign (build_fold_indirect_ref (unshare_expr (addr)),
5237
                                stored_val);
5238
  gsi_insert_before (&si, stmt, GSI_SAME_STMT);
5239
 
5240
  t = built_in_decls[BUILT_IN_GOMP_ATOMIC_END];
5241
  t = build_function_call_expr (UNKNOWN_LOCATION, t, 0);
5242
  force_gimple_operand_gsi (&si, t, true, NULL_TREE, true, GSI_SAME_STMT);
5243
  gsi_remove (&si, true);
5244
 
5245
  if (gimple_in_ssa_p (cfun))
5246
    update_ssa (TODO_update_ssa_no_phi);
5247
  return true;
5248
}
5249
 
5250
/* Expand an GIMPLE_OMP_ATOMIC statement.  We try to expand
5251
   using expand_omp_atomic_fetch_op. If it failed, we try to
5252
   call expand_omp_atomic_pipeline, and if it fails too, the
5253
   ultimate fallback is wrapping the operation in a mutex
5254
   (expand_omp_atomic_mutex).  REGION is the atomic region built
5255
   by build_omp_regions_1().  */
5256
 
5257
static void
5258
expand_omp_atomic (struct omp_region *region)
5259
{
5260
  basic_block load_bb = region->entry, store_bb = region->exit;
5261
  gimple load = last_stmt (load_bb), store = last_stmt (store_bb);
5262
  tree loaded_val = gimple_omp_atomic_load_lhs (load);
5263
  tree addr = gimple_omp_atomic_load_rhs (load);
5264
  tree stored_val = gimple_omp_atomic_store_val (store);
5265
  tree type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (addr)));
5266
  HOST_WIDE_INT index;
5267
 
5268
  /* Make sure the type is one of the supported sizes.  */
5269
  index = tree_low_cst (TYPE_SIZE_UNIT (type), 1);
5270
  index = exact_log2 (index);
5271
  if (index >= 0 && index <= 4)
5272
    {
5273
      unsigned int align = TYPE_ALIGN_UNIT (type);
5274
 
5275
      /* __sync builtins require strict data alignment.  */
5276
      if (exact_log2 (align) >= index)
5277
        {
5278
          /* When possible, use specialized atomic update functions.  */
5279
          if ((INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type))
5280
              && store_bb == single_succ (load_bb))
5281
            {
5282
              if (expand_omp_atomic_fetch_op (load_bb, addr,
5283
                                              loaded_val, stored_val, index))
5284
                return;
5285
            }
5286
 
5287
          /* If we don't have specialized __sync builtins, try and implement
5288
             as a compare and swap loop.  */
5289
          if (expand_omp_atomic_pipeline (load_bb, store_bb, addr,
5290
                                          loaded_val, stored_val, index))
5291
            return;
5292
        }
5293
    }
5294
 
5295
  /* The ultimate fallback is wrapping the operation in a mutex.  */
5296
  expand_omp_atomic_mutex (load_bb, store_bb, addr, loaded_val, stored_val);
5297
}
5298
 
5299
 
5300
/* Expand the parallel region tree rooted at REGION.  Expansion
5301
   proceeds in depth-first order.  Innermost regions are expanded
5302
   first.  This way, parallel regions that require a new function to
5303
   be created (e.g., GIMPLE_OMP_PARALLEL) can be expanded without having any
5304
   internal dependencies in their body.  */
5305
 
5306
static void
5307
expand_omp (struct omp_region *region)
5308
{
5309
  while (region)
5310
    {
5311
      location_t saved_location;
5312
 
5313
      /* First, determine whether this is a combined parallel+workshare
5314
         region.  */
5315
      if (region->type == GIMPLE_OMP_PARALLEL)
5316
        determine_parallel_type (region);
5317
 
5318
      if (region->inner)
5319
        expand_omp (region->inner);
5320
 
5321
      saved_location = input_location;
5322
      if (gimple_has_location (last_stmt (region->entry)))
5323
        input_location = gimple_location (last_stmt (region->entry));
5324
 
5325
      switch (region->type)
5326
        {
5327
        case GIMPLE_OMP_PARALLEL:
5328
        case GIMPLE_OMP_TASK:
5329
          expand_omp_taskreg (region);
5330
          break;
5331
 
5332
        case GIMPLE_OMP_FOR:
5333
          expand_omp_for (region);
5334
          break;
5335
 
5336
        case GIMPLE_OMP_SECTIONS:
5337
          expand_omp_sections (region);
5338
          break;
5339
 
5340
        case GIMPLE_OMP_SECTION:
5341
          /* Individual omp sections are handled together with their
5342
             parent GIMPLE_OMP_SECTIONS region.  */
5343
          break;
5344
 
5345
        case GIMPLE_OMP_SINGLE:
5346
          expand_omp_single (region);
5347
          break;
5348
 
5349
        case GIMPLE_OMP_MASTER:
5350
        case GIMPLE_OMP_ORDERED:
5351
        case GIMPLE_OMP_CRITICAL:
5352
          expand_omp_synch (region);
5353
          break;
5354
 
5355
        case GIMPLE_OMP_ATOMIC_LOAD:
5356
          expand_omp_atomic (region);
5357
          break;
5358
 
5359
        default:
5360
          gcc_unreachable ();
5361
        }
5362
 
5363
      input_location = saved_location;
5364
      region = region->next;
5365
    }
5366
}
5367
 
5368
 
5369
/* Helper for build_omp_regions.  Scan the dominator tree starting at
5370
   block BB.  PARENT is the region that contains BB.  If SINGLE_TREE is
5371
   true, the function ends once a single tree is built (otherwise, whole
5372
   forest of OMP constructs may be built).  */
5373
 
5374
static void
5375
build_omp_regions_1 (basic_block bb, struct omp_region *parent,
5376
                     bool single_tree)
5377
{
5378
  gimple_stmt_iterator gsi;
5379
  gimple stmt;
5380
  basic_block son;
5381
 
5382
  gsi = gsi_last_bb (bb);
5383
  if (!gsi_end_p (gsi) && is_gimple_omp (gsi_stmt (gsi)))
5384
    {
5385
      struct omp_region *region;
5386
      enum gimple_code code;
5387
 
5388
      stmt = gsi_stmt (gsi);
5389
      code = gimple_code (stmt);
5390
      if (code == GIMPLE_OMP_RETURN)
5391
        {
5392
          /* STMT is the return point out of region PARENT.  Mark it
5393
             as the exit point and make PARENT the immediately
5394
             enclosing region.  */
5395
          gcc_assert (parent);
5396
          region = parent;
5397
          region->exit = bb;
5398
          parent = parent->outer;
5399
        }
5400
      else if (code == GIMPLE_OMP_ATOMIC_STORE)
5401
        {
5402
          /* GIMPLE_OMP_ATOMIC_STORE is analoguous to
5403
             GIMPLE_OMP_RETURN, but matches with
5404
             GIMPLE_OMP_ATOMIC_LOAD.  */
5405
          gcc_assert (parent);
5406
          gcc_assert (parent->type == GIMPLE_OMP_ATOMIC_LOAD);
5407
          region = parent;
5408
          region->exit = bb;
5409
          parent = parent->outer;
5410
        }
5411
 
5412
      else if (code == GIMPLE_OMP_CONTINUE)
5413
        {
5414
          gcc_assert (parent);
5415
          parent->cont = bb;
5416
        }
5417
      else if (code == GIMPLE_OMP_SECTIONS_SWITCH)
5418
        {
5419
          /* GIMPLE_OMP_SECTIONS_SWITCH is part of
5420
             GIMPLE_OMP_SECTIONS, and we do nothing for it.  */
5421
          ;
5422
        }
5423
      else
5424
        {
5425
          /* Otherwise, this directive becomes the parent for a new
5426
             region.  */
5427
          region = new_omp_region (bb, code, parent);
5428
          parent = region;
5429
        }
5430
    }
5431
 
5432
  if (single_tree && !parent)
5433
    return;
5434
 
5435
  for (son = first_dom_son (CDI_DOMINATORS, bb);
5436
       son;
5437
       son = next_dom_son (CDI_DOMINATORS, son))
5438
    build_omp_regions_1 (son, parent, single_tree);
5439
}
5440
 
5441
/* Builds the tree of OMP regions rooted at ROOT, storing it to
5442
   root_omp_region.  */
5443
 
5444
static void
5445
build_omp_regions_root (basic_block root)
5446
{
5447
  gcc_assert (root_omp_region == NULL);
5448
  build_omp_regions_1 (root, NULL, true);
5449
  gcc_assert (root_omp_region != NULL);
5450
}
5451
 
5452
/* Expands omp construct (and its subconstructs) starting in HEAD.  */
5453
 
5454
void
5455
omp_expand_local (basic_block head)
5456
{
5457
  build_omp_regions_root (head);
5458
  if (dump_file && (dump_flags & TDF_DETAILS))
5459
    {
5460
      fprintf (dump_file, "\nOMP region tree\n\n");
5461
      dump_omp_region (dump_file, root_omp_region, 0);
5462
      fprintf (dump_file, "\n");
5463
    }
5464
 
5465
  remove_exit_barriers (root_omp_region);
5466
  expand_omp (root_omp_region);
5467
 
5468
  free_omp_regions ();
5469
}
5470
 
5471
/* Scan the CFG and build a tree of OMP regions.  Return the root of
5472
   the OMP region tree.  */
5473
 
5474
static void
5475
build_omp_regions (void)
5476
{
5477
  gcc_assert (root_omp_region == NULL);
5478
  calculate_dominance_info (CDI_DOMINATORS);
5479
  build_omp_regions_1 (ENTRY_BLOCK_PTR, NULL, false);
5480
}
5481
 
5482
/* Main entry point for expanding OMP-GIMPLE into runtime calls.  */
5483
 
5484
static unsigned int
5485
execute_expand_omp (void)
5486
{
5487
  build_omp_regions ();
5488
 
5489
  if (!root_omp_region)
5490
    return 0;
5491
 
5492
  if (dump_file)
5493
    {
5494
      fprintf (dump_file, "\nOMP region tree\n\n");
5495
      dump_omp_region (dump_file, root_omp_region, 0);
5496
      fprintf (dump_file, "\n");
5497
    }
5498
 
5499
  remove_exit_barriers (root_omp_region);
5500
 
5501
  expand_omp (root_omp_region);
5502
 
5503
  cleanup_tree_cfg ();
5504
 
5505
  free_omp_regions ();
5506
 
5507
  return 0;
5508
}
5509
 
5510
/* OMP expansion -- the default pass, run before creation of SSA form.  */
5511
 
5512
static bool
5513
gate_expand_omp (void)
5514
{
5515
  return (flag_openmp != 0 && errorcount == 0);
5516
}
5517
 
5518
struct gimple_opt_pass pass_expand_omp =
5519
{
5520
 {
5521
  GIMPLE_PASS,
5522
  "ompexp",                             /* name */
5523
  gate_expand_omp,                      /* gate */
5524
  execute_expand_omp,                   /* execute */
5525
  NULL,                                 /* sub */
5526
  NULL,                                 /* next */
5527
  0,                                     /* static_pass_number */
5528
  TV_NONE,                              /* tv_id */
5529
  PROP_gimple_any,                      /* properties_required */
5530
  0,                                     /* properties_provided */
5531
  0,                                     /* properties_destroyed */
5532
  0,                                     /* todo_flags_start */
5533
  TODO_dump_func                        /* todo_flags_finish */
5534
 }
5535
};
5536
 
5537
/* Routines to lower OpenMP directives into OMP-GIMPLE.  */
5538
 
5539
/* Lower the OpenMP sections directive in the current statement in GSI_P.
5540
   CTX is the enclosing OMP context for the current statement.  */
5541
 
5542
static void
5543
lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5544
{
5545
  tree block, control;
5546
  gimple_stmt_iterator tgsi;
5547
  unsigned i, len;
5548
  gimple stmt, new_stmt, bind, t;
5549
  gimple_seq ilist, dlist, olist, new_body, body;
5550
  struct gimplify_ctx gctx;
5551
 
5552
  stmt = gsi_stmt (*gsi_p);
5553
 
5554
  push_gimplify_context (&gctx);
5555
 
5556
  dlist = NULL;
5557
  ilist = NULL;
5558
  lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
5559
                           &ilist, &dlist, ctx);
5560
 
5561
  tgsi = gsi_start (gimple_omp_body (stmt));
5562
  for (len = 0; !gsi_end_p (tgsi); len++, gsi_next (&tgsi))
5563
    continue;
5564
 
5565
  tgsi = gsi_start (gimple_omp_body (stmt));
5566
  body = NULL;
5567
  for (i = 0; i < len; i++, gsi_next (&tgsi))
5568
    {
5569
      omp_context *sctx;
5570
      gimple sec_start;
5571
 
5572
      sec_start = gsi_stmt (tgsi);
5573
      sctx = maybe_lookup_ctx (sec_start);
5574
      gcc_assert (sctx);
5575
 
5576
      gimple_seq_add_stmt (&body, sec_start);
5577
 
5578
      lower_omp (gimple_omp_body (sec_start), sctx);
5579
      gimple_seq_add_seq (&body, gimple_omp_body (sec_start));
5580
      gimple_omp_set_body (sec_start, NULL);
5581
 
5582
      if (i == len - 1)
5583
        {
5584
          gimple_seq l = NULL;
5585
          lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
5586
                                     &l, ctx);
5587
          gimple_seq_add_seq (&body, l);
5588
          gimple_omp_section_set_last (sec_start);
5589
        }
5590
 
5591
      gimple_seq_add_stmt (&body, gimple_build_omp_return (false));
5592
    }
5593
 
5594
  block = make_node (BLOCK);
5595
  bind = gimple_build_bind (NULL, body, block);
5596
 
5597
  olist = NULL;
5598
  lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist, ctx);
5599
 
5600
  block = make_node (BLOCK);
5601
  new_stmt = gimple_build_bind (NULL, NULL, block);
5602
 
5603
  pop_gimplify_context (new_stmt);
5604
  gimple_bind_append_vars (new_stmt, ctx->block_vars);
5605
  BLOCK_VARS (block) = gimple_bind_vars (bind);
5606
  if (BLOCK_VARS (block))
5607
    TREE_USED (block) = 1;
5608
 
5609
  new_body = NULL;
5610
  gimple_seq_add_seq (&new_body, ilist);
5611
  gimple_seq_add_stmt (&new_body, stmt);
5612
  gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
5613
  gimple_seq_add_stmt (&new_body, bind);
5614
 
5615
  control = create_tmp_var (unsigned_type_node, ".section");
5616
  t = gimple_build_omp_continue (control, control);
5617
  gimple_omp_sections_set_control (stmt, control);
5618
  gimple_seq_add_stmt (&new_body, t);
5619
 
5620
  gimple_seq_add_seq (&new_body, olist);
5621
  gimple_seq_add_seq (&new_body, dlist);
5622
 
5623
  new_body = maybe_catch_exception (new_body);
5624
 
5625
  t = gimple_build_omp_return
5626
        (!!find_omp_clause (gimple_omp_sections_clauses (stmt),
5627
                            OMP_CLAUSE_NOWAIT));
5628
  gimple_seq_add_stmt (&new_body, t);
5629
 
5630
  gimple_bind_set_body (new_stmt, new_body);
5631
  gimple_omp_set_body (stmt, NULL);
5632
 
5633
  gsi_replace (gsi_p, new_stmt, true);
5634
}
5635
 
5636
 
5637
/* A subroutine of lower_omp_single.  Expand the simple form of
5638
   a GIMPLE_OMP_SINGLE, without a copyprivate clause:
5639
 
5640
        if (GOMP_single_start ())
5641
          BODY;
5642
        [ GOMP_barrier (); ]    -> unless 'nowait' is present.
5643
 
5644
  FIXME.  It may be better to delay expanding the logic of this until
5645
  pass_expand_omp.  The expanded logic may make the job more difficult
5646
  to a synchronization analysis pass.  */
5647
 
5648
static void
5649
lower_omp_single_simple (gimple single_stmt, gimple_seq *pre_p)
5650
{
5651
  location_t loc = gimple_location (single_stmt);
5652
  tree tlabel = create_artificial_label (loc);
5653
  tree flabel = create_artificial_label (loc);
5654
  gimple call, cond;
5655
  tree lhs, decl;
5656
 
5657
  decl = built_in_decls[BUILT_IN_GOMP_SINGLE_START];
5658
  lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)), NULL);
5659
  call = gimple_build_call (decl, 0);
5660
  gimple_call_set_lhs (call, lhs);
5661
  gimple_seq_add_stmt (pre_p, call);
5662
 
5663
  cond = gimple_build_cond (EQ_EXPR, lhs,
5664
                            fold_convert_loc (loc, TREE_TYPE (lhs),
5665
                                              boolean_true_node),
5666
                            tlabel, flabel);
5667
  gimple_seq_add_stmt (pre_p, cond);
5668
  gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
5669
  gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5670
  gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
5671
}
5672
 
5673
 
5674
/* A subroutine of lower_omp_single.  Expand the simple form of
5675
   a GIMPLE_OMP_SINGLE, with a copyprivate clause:
5676
 
5677
        #pragma omp single copyprivate (a, b, c)
5678
 
5679
   Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
5680
 
5681
      {
5682
        if ((copyout_p = GOMP_single_copy_start ()) == NULL)
5683
          {
5684
            BODY;
5685
            copyout.a = a;
5686
            copyout.b = b;
5687
            copyout.c = c;
5688
            GOMP_single_copy_end (&copyout);
5689
          }
5690
        else
5691
          {
5692
            a = copyout_p->a;
5693
            b = copyout_p->b;
5694
            c = copyout_p->c;
5695
          }
5696
        GOMP_barrier ();
5697
      }
5698
 
5699
  FIXME.  It may be better to delay expanding the logic of this until
5700
  pass_expand_omp.  The expanded logic may make the job more difficult
5701
  to a synchronization analysis pass.  */
5702
 
5703
static void
5704
lower_omp_single_copy (gimple single_stmt, gimple_seq *pre_p, omp_context *ctx)
5705
{
5706
  tree ptr_type, t, l0, l1, l2;
5707
  gimple_seq copyin_seq;
5708
  location_t loc = gimple_location (single_stmt);
5709
 
5710
  ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
5711
 
5712
  ptr_type = build_pointer_type (ctx->record_type);
5713
  ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
5714
 
5715
  l0 = create_artificial_label (loc);
5716
  l1 = create_artificial_label (loc);
5717
  l2 = create_artificial_label (loc);
5718
 
5719
  t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_START], 0);
5720
  t = fold_convert_loc (loc, ptr_type, t);
5721
  gimplify_assign (ctx->receiver_decl, t, pre_p);
5722
 
5723
  t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
5724
              build_int_cst (ptr_type, 0));
5725
  t = build3 (COND_EXPR, void_type_node, t,
5726
              build_and_jump (&l0), build_and_jump (&l1));
5727
  gimplify_and_add (t, pre_p);
5728
 
5729
  gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
5730
 
5731
  gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
5732
 
5733
  copyin_seq = NULL;
5734
  lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
5735
                              &copyin_seq, ctx);
5736
 
5737
  t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
5738
  t = build_call_expr_loc (loc, built_in_decls[BUILT_IN_GOMP_SINGLE_COPY_END],
5739
                       1, t);
5740
  gimplify_and_add (t, pre_p);
5741
 
5742
  t = build_and_jump (&l2);
5743
  gimplify_and_add (t, pre_p);
5744
 
5745
  gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
5746
 
5747
  gimple_seq_add_seq (pre_p, copyin_seq);
5748
 
5749
  gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
5750
}
5751
 
5752
 
5753
/* Expand code for an OpenMP single directive.  */
5754
 
5755
static void
5756
lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5757
{
5758
  tree block;
5759
  gimple t, bind, single_stmt = gsi_stmt (*gsi_p);
5760
  gimple_seq bind_body, dlist;
5761
  struct gimplify_ctx gctx;
5762
 
5763
  push_gimplify_context (&gctx);
5764
 
5765
  bind_body = NULL;
5766
  lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
5767
                           &bind_body, &dlist, ctx);
5768
  lower_omp (gimple_omp_body (single_stmt), ctx);
5769
 
5770
  gimple_seq_add_stmt (&bind_body, single_stmt);
5771
 
5772
  if (ctx->record_type)
5773
    lower_omp_single_copy (single_stmt, &bind_body, ctx);
5774
  else
5775
    lower_omp_single_simple (single_stmt, &bind_body);
5776
 
5777
  gimple_omp_set_body (single_stmt, NULL);
5778
 
5779
  gimple_seq_add_seq (&bind_body, dlist);
5780
 
5781
  bind_body = maybe_catch_exception (bind_body);
5782
 
5783
  t = gimple_build_omp_return
5784
        (!!find_omp_clause (gimple_omp_single_clauses (single_stmt),
5785
                            OMP_CLAUSE_NOWAIT));
5786
  gimple_seq_add_stmt (&bind_body, t);
5787
 
5788
  block = make_node (BLOCK);
5789
  bind = gimple_build_bind (NULL, bind_body, block);
5790
 
5791
  pop_gimplify_context (bind);
5792
 
5793
  gimple_bind_append_vars (bind, ctx->block_vars);
5794
  BLOCK_VARS (block) = ctx->block_vars;
5795
  gsi_replace (gsi_p, bind, true);
5796
  if (BLOCK_VARS (block))
5797
    TREE_USED (block) = 1;
5798
}
5799
 
5800
 
5801
/* Expand code for an OpenMP master directive.  */
5802
 
5803
static void
5804
lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5805
{
5806
  tree block, lab = NULL, x;
5807
  gimple stmt = gsi_stmt (*gsi_p), bind;
5808
  location_t loc = gimple_location (stmt);
5809
  gimple_seq tseq;
5810
  struct gimplify_ctx gctx;
5811
 
5812
  push_gimplify_context (&gctx);
5813
 
5814
  block = make_node (BLOCK);
5815
  bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5816
                                 block);
5817
 
5818
  x = build_call_expr_loc (loc, built_in_decls[BUILT_IN_OMP_GET_THREAD_NUM], 0);
5819
  x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
5820
  x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
5821
  tseq = NULL;
5822
  gimplify_and_add (x, &tseq);
5823
  gimple_bind_add_seq (bind, tseq);
5824
 
5825
  lower_omp (gimple_omp_body (stmt), ctx);
5826
  gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5827
  gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5828
  gimple_omp_set_body (stmt, NULL);
5829
 
5830
  gimple_bind_add_stmt (bind, gimple_build_label (lab));
5831
 
5832
  gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5833
 
5834
  pop_gimplify_context (bind);
5835
 
5836
  gimple_bind_append_vars (bind, ctx->block_vars);
5837
  BLOCK_VARS (block) = ctx->block_vars;
5838
  gsi_replace (gsi_p, bind, true);
5839
}
5840
 
5841
 
5842
/* Expand code for an OpenMP ordered directive.  */
5843
 
5844
static void
5845
lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5846
{
5847
  tree block;
5848
  gimple stmt = gsi_stmt (*gsi_p), bind, x;
5849
  struct gimplify_ctx gctx;
5850
 
5851
  push_gimplify_context (&gctx);
5852
 
5853
  block = make_node (BLOCK);
5854
  bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt),
5855
                                   block);
5856
 
5857
  x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_START], 0);
5858
  gimple_bind_add_stmt (bind, x);
5859
 
5860
  lower_omp (gimple_omp_body (stmt), ctx);
5861
  gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5862
  gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5863
  gimple_omp_set_body (stmt, NULL);
5864
 
5865
  x = gimple_build_call (built_in_decls[BUILT_IN_GOMP_ORDERED_END], 0);
5866
  gimple_bind_add_stmt (bind, x);
5867
 
5868
  gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5869
 
5870
  pop_gimplify_context (bind);
5871
 
5872
  gimple_bind_append_vars (bind, ctx->block_vars);
5873
  BLOCK_VARS (block) = gimple_bind_vars (bind);
5874
  gsi_replace (gsi_p, bind, true);
5875
}
5876
 
5877
 
5878
/* Gimplify a GIMPLE_OMP_CRITICAL statement.  This is a relatively simple
5879
   substitution of a couple of function calls.  But in the NAMED case,
5880
   requires that languages coordinate a symbol name.  It is therefore
5881
   best put here in common code.  */
5882
 
5883
static GTY((param1_is (tree), param2_is (tree)))
5884
  splay_tree critical_name_mutexes;
5885
 
5886
static void
5887
lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
5888
{
5889
  tree block;
5890
  tree name, lock, unlock;
5891
  gimple stmt = gsi_stmt (*gsi_p), bind;
5892
  location_t loc = gimple_location (stmt);
5893
  gimple_seq tbody;
5894
  struct gimplify_ctx gctx;
5895
 
5896
  name = gimple_omp_critical_name (stmt);
5897
  if (name)
5898
    {
5899
      tree decl;
5900
      splay_tree_node n;
5901
 
5902
      if (!critical_name_mutexes)
5903
        critical_name_mutexes
5904
          = splay_tree_new_ggc (splay_tree_compare_pointers);
5905
 
5906
      n = splay_tree_lookup (critical_name_mutexes, (splay_tree_key) name);
5907
      if (n == NULL)
5908
        {
5909
          char *new_str;
5910
 
5911
          decl = create_tmp_var_raw (ptr_type_node, NULL);
5912
 
5913
          new_str = ACONCAT ((".gomp_critical_user_",
5914
                              IDENTIFIER_POINTER (name), NULL));
5915
          DECL_NAME (decl) = get_identifier (new_str);
5916
          TREE_PUBLIC (decl) = 1;
5917
          TREE_STATIC (decl) = 1;
5918
          DECL_COMMON (decl) = 1;
5919
          DECL_ARTIFICIAL (decl) = 1;
5920
          DECL_IGNORED_P (decl) = 1;
5921
          varpool_finalize_decl (decl);
5922
 
5923
          splay_tree_insert (critical_name_mutexes, (splay_tree_key) name,
5924
                             (splay_tree_value) decl);
5925
        }
5926
      else
5927
        decl = (tree) n->value;
5928
 
5929
      lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_START];
5930
      lock = build_call_expr_loc (loc, lock, 1, build_fold_addr_expr_loc (loc, decl));
5931
 
5932
      unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_NAME_END];
5933
      unlock = build_call_expr_loc (loc, unlock, 1,
5934
                                build_fold_addr_expr_loc (loc, decl));
5935
    }
5936
  else
5937
    {
5938
      lock = built_in_decls[BUILT_IN_GOMP_CRITICAL_START];
5939
      lock = build_call_expr_loc (loc, lock, 0);
5940
 
5941
      unlock = built_in_decls[BUILT_IN_GOMP_CRITICAL_END];
5942
      unlock = build_call_expr_loc (loc, unlock, 0);
5943
    }
5944
 
5945
  push_gimplify_context (&gctx);
5946
 
5947
  block = make_node (BLOCK);
5948
  bind = gimple_build_bind (NULL, gimple_seq_alloc_with_stmt (stmt), block);
5949
 
5950
  tbody = gimple_bind_body (bind);
5951
  gimplify_and_add (lock, &tbody);
5952
  gimple_bind_set_body (bind, tbody);
5953
 
5954
  lower_omp (gimple_omp_body (stmt), ctx);
5955
  gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
5956
  gimple_bind_add_seq (bind, gimple_omp_body (stmt));
5957
  gimple_omp_set_body (stmt, NULL);
5958
 
5959
  tbody = gimple_bind_body (bind);
5960
  gimplify_and_add (unlock, &tbody);
5961
  gimple_bind_set_body (bind, tbody);
5962
 
5963
  gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
5964
 
5965
  pop_gimplify_context (bind);
5966
  gimple_bind_append_vars (bind, ctx->block_vars);
5967
  BLOCK_VARS (block) = gimple_bind_vars (bind);
5968
  gsi_replace (gsi_p, bind, true);
5969
}
5970
 
5971
 
5972
/* A subroutine of lower_omp_for.  Generate code to emit the predicate
5973
   for a lastprivate clause.  Given a loop control predicate of (V
5974
   cond N2), we gate the clause on (!(V cond N2)).  The lowered form
5975
   is appended to *DLIST, iterator initialization is appended to
5976
   *BODY_P.  */
5977
 
5978
static void
5979
lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
5980
                           gimple_seq *dlist, struct omp_context *ctx)
5981
{
5982
  tree clauses, cond, vinit;
5983
  enum tree_code cond_code;
5984
  gimple_seq stmts;
5985
 
5986
  cond_code = fd->loop.cond_code;
5987
  cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
5988
 
5989
  /* When possible, use a strict equality expression.  This can let VRP
5990
     type optimizations deduce the value and remove a copy.  */
5991
  if (host_integerp (fd->loop.step, 0))
5992
    {
5993
      HOST_WIDE_INT step = TREE_INT_CST_LOW (fd->loop.step);
5994
      if (step == 1 || step == -1)
5995
        cond_code = EQ_EXPR;
5996
    }
5997
 
5998
  cond = build2 (cond_code, boolean_type_node, fd->loop.v, fd->loop.n2);
5999
 
6000
  clauses = gimple_omp_for_clauses (fd->for_stmt);
6001
  stmts = NULL;
6002
  lower_lastprivate_clauses (clauses, cond, &stmts, ctx);
6003
  if (!gimple_seq_empty_p (stmts))
6004
    {
6005
      gimple_seq_add_seq (&stmts, *dlist);
6006
      *dlist = stmts;
6007
 
6008
      /* Optimize: v = 0; is usually cheaper than v = some_other_constant.  */
6009
      vinit = fd->loop.n1;
6010
      if (cond_code == EQ_EXPR
6011
          && host_integerp (fd->loop.n2, 0)
6012
          && ! integer_zerop (fd->loop.n2))
6013
        vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
6014
 
6015
      /* Initialize the iterator variable, so that threads that don't execute
6016
         any iterations don't execute the lastprivate clauses by accident.  */
6017
      gimplify_assign (fd->loop.v, vinit, body_p);
6018
    }
6019
}
6020
 
6021
 
6022
/* Lower code for an OpenMP loop directive.  */
6023
 
6024
static void
6025
lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6026
{
6027
  tree *rhs_p, block;
6028
  struct omp_for_data fd;
6029
  gimple stmt = gsi_stmt (*gsi_p), new_stmt;
6030
  gimple_seq omp_for_body, body, dlist;
6031
  size_t i;
6032
  struct gimplify_ctx gctx;
6033
 
6034
  push_gimplify_context (&gctx);
6035
 
6036
  lower_omp (gimple_omp_for_pre_body (stmt), ctx);
6037
  lower_omp (gimple_omp_body (stmt), ctx);
6038
 
6039
  block = make_node (BLOCK);
6040
  new_stmt = gimple_build_bind (NULL, NULL, block);
6041
 
6042
  /* Move declaration of temporaries in the loop body before we make
6043
     it go away.  */
6044
  omp_for_body = gimple_omp_body (stmt);
6045
  if (!gimple_seq_empty_p (omp_for_body)
6046
      && gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
6047
    {
6048
      tree vars = gimple_bind_vars (gimple_seq_first_stmt (omp_for_body));
6049
      gimple_bind_append_vars (new_stmt, vars);
6050
    }
6051
 
6052
  /* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR.  */
6053
  dlist = NULL;
6054
  body = NULL;
6055
  lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx);
6056
  gimple_seq_add_seq (&body, gimple_omp_for_pre_body (stmt));
6057
 
6058
  /* Lower the header expressions.  At this point, we can assume that
6059
     the header is of the form:
6060
 
6061
        #pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
6062
 
6063
     We just need to make sure that VAL1, VAL2 and VAL3 are lowered
6064
     using the .omp_data_s mapping, if needed.  */
6065
  for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
6066
    {
6067
      rhs_p = gimple_omp_for_initial_ptr (stmt, i);
6068
      if (!is_gimple_min_invariant (*rhs_p))
6069
        *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6070
 
6071
      rhs_p = gimple_omp_for_final_ptr (stmt, i);
6072
      if (!is_gimple_min_invariant (*rhs_p))
6073
        *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6074
 
6075
      rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
6076
      if (!is_gimple_min_invariant (*rhs_p))
6077
        *rhs_p = get_formal_tmp_var (*rhs_p, &body);
6078
    }
6079
 
6080
  /* Once lowered, extract the bounds and clauses.  */
6081
  extract_omp_for_data (stmt, &fd, NULL);
6082
 
6083
  lower_omp_for_lastprivate (&fd, &body, &dlist, ctx);
6084
 
6085
  gimple_seq_add_stmt (&body, stmt);
6086
  gimple_seq_add_seq (&body, gimple_omp_body (stmt));
6087
 
6088
  gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
6089
                                                         fd.loop.v));
6090
 
6091
  /* After the loop, add exit clauses.  */
6092
  lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, ctx);
6093
  gimple_seq_add_seq (&body, dlist);
6094
 
6095
  body = maybe_catch_exception (body);
6096
 
6097
  /* Region exit marker goes at the end of the loop body.  */
6098
  gimple_seq_add_stmt (&body, gimple_build_omp_return (fd.have_nowait));
6099
 
6100
  pop_gimplify_context (new_stmt);
6101
 
6102
  gimple_bind_append_vars (new_stmt, ctx->block_vars);
6103
  BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
6104
  if (BLOCK_VARS (block))
6105
    TREE_USED (block) = 1;
6106
 
6107
  gimple_bind_set_body (new_stmt, body);
6108
  gimple_omp_set_body (stmt, NULL);
6109
  gimple_omp_for_set_pre_body (stmt, NULL);
6110
  gsi_replace (gsi_p, new_stmt, true);
6111
}
6112
 
6113
/* Callback for walk_stmts.  Check if the current statement only contains
6114
   GIMPLE_OMP_FOR or GIMPLE_OMP_PARALLEL.  */
6115
 
6116
static tree
6117
check_combined_parallel (gimple_stmt_iterator *gsi_p,
6118
                         bool *handled_ops_p,
6119
                         struct walk_stmt_info *wi)
6120
{
6121
  int *info = (int *) wi->info;
6122
  gimple stmt = gsi_stmt (*gsi_p);
6123
 
6124
  *handled_ops_p = true;
6125
  switch (gimple_code (stmt))
6126
    {
6127
    WALK_SUBSTMTS;
6128
 
6129
    case GIMPLE_OMP_FOR:
6130
    case GIMPLE_OMP_SECTIONS:
6131
      *info = *info == 0 ? 1 : -1;
6132
      break;
6133
    default:
6134
      *info = -1;
6135
      break;
6136
    }
6137
  return NULL;
6138
}
6139
 
6140
struct omp_taskcopy_context
6141
{
6142
  /* This field must be at the beginning, as we do "inheritance": Some
6143
     callback functions for tree-inline.c (e.g., omp_copy_decl)
6144
     receive a copy_body_data pointer that is up-casted to an
6145
     omp_context pointer.  */
6146
  copy_body_data cb;
6147
  omp_context *ctx;
6148
};
6149
 
6150
static tree
6151
task_copyfn_copy_decl (tree var, copy_body_data *cb)
6152
{
6153
  struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
6154
 
6155
  if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
6156
    return create_tmp_var (TREE_TYPE (var), NULL);
6157
 
6158
  return var;
6159
}
6160
 
6161
static tree
6162
task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
6163
{
6164
  tree name, new_fields = NULL, type, f;
6165
 
6166
  type = lang_hooks.types.make_type (RECORD_TYPE);
6167
  name = DECL_NAME (TYPE_NAME (orig_type));
6168
  name = build_decl (gimple_location (tcctx->ctx->stmt),
6169
                     TYPE_DECL, name, type);
6170
  TYPE_NAME (type) = name;
6171
 
6172
  for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
6173
    {
6174
      tree new_f = copy_node (f);
6175
      DECL_CONTEXT (new_f) = type;
6176
      TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
6177
      TREE_CHAIN (new_f) = new_fields;
6178
      walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6179
      walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
6180
      walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
6181
                 &tcctx->cb, NULL);
6182
      new_fields = new_f;
6183
      *pointer_map_insert (tcctx->cb.decl_map, f) = new_f;
6184
    }
6185
  TYPE_FIELDS (type) = nreverse (new_fields);
6186
  layout_type (type);
6187
  return type;
6188
}
6189
 
6190
/* Create task copyfn.  */
6191
 
6192
static void
6193
create_task_copyfn (gimple task_stmt, omp_context *ctx)
6194
{
6195
  struct function *child_cfun;
6196
  tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
6197
  tree record_type, srecord_type, bind, list;
6198
  bool record_needs_remap = false, srecord_needs_remap = false;
6199
  splay_tree_node n;
6200
  struct omp_taskcopy_context tcctx;
6201
  struct gimplify_ctx gctx;
6202
  location_t loc = gimple_location (task_stmt);
6203
 
6204
  child_fn = gimple_omp_task_copy_fn (task_stmt);
6205
  child_cfun = DECL_STRUCT_FUNCTION (child_fn);
6206
  gcc_assert (child_cfun->cfg == NULL);
6207
  child_cfun->dont_save_pending_sizes_p = 1;
6208
  DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
6209
 
6210
  /* Reset DECL_CONTEXT on function arguments.  */
6211
  for (t = DECL_ARGUMENTS (child_fn); t; t = TREE_CHAIN (t))
6212
    DECL_CONTEXT (t) = child_fn;
6213
 
6214
  /* Populate the function.  */
6215
  push_gimplify_context (&gctx);
6216
  current_function_decl = child_fn;
6217
 
6218
  bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
6219
  TREE_SIDE_EFFECTS (bind) = 1;
6220
  list = NULL;
6221
  DECL_SAVED_TREE (child_fn) = bind;
6222
  DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
6223
 
6224
  /* Remap src and dst argument types if needed.  */
6225
  record_type = ctx->record_type;
6226
  srecord_type = ctx->srecord_type;
6227
  for (f = TYPE_FIELDS (record_type); f ; f = TREE_CHAIN (f))
6228
    if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6229
      {
6230
        record_needs_remap = true;
6231
        break;
6232
      }
6233
  for (f = TYPE_FIELDS (srecord_type); f ; f = TREE_CHAIN (f))
6234
    if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
6235
      {
6236
        srecord_needs_remap = true;
6237
        break;
6238
      }
6239
 
6240
  if (record_needs_remap || srecord_needs_remap)
6241
    {
6242
      memset (&tcctx, '\0', sizeof (tcctx));
6243
      tcctx.cb.src_fn = ctx->cb.src_fn;
6244
      tcctx.cb.dst_fn = child_fn;
6245
      tcctx.cb.src_node = cgraph_node (tcctx.cb.src_fn);
6246
      tcctx.cb.dst_node = tcctx.cb.src_node;
6247
      tcctx.cb.src_cfun = ctx->cb.src_cfun;
6248
      tcctx.cb.copy_decl = task_copyfn_copy_decl;
6249
      tcctx.cb.eh_lp_nr = 0;
6250
      tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
6251
      tcctx.cb.decl_map = pointer_map_create ();
6252
      tcctx.ctx = ctx;
6253
 
6254
      if (record_needs_remap)
6255
        record_type = task_copyfn_remap_type (&tcctx, record_type);
6256
      if (srecord_needs_remap)
6257
        srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
6258
    }
6259
  else
6260
    tcctx.cb.decl_map = NULL;
6261
 
6262
  push_cfun (child_cfun);
6263
 
6264
  arg = DECL_ARGUMENTS (child_fn);
6265
  TREE_TYPE (arg) = build_pointer_type (record_type);
6266
  sarg = TREE_CHAIN (arg);
6267
  TREE_TYPE (sarg) = build_pointer_type (srecord_type);
6268
 
6269
  /* First pass: initialize temporaries used in record_type and srecord_type
6270
     sizes and field offsets.  */
6271
  if (tcctx.cb.decl_map)
6272
    for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6273
      if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6274
        {
6275
          tree *p;
6276
 
6277
          decl = OMP_CLAUSE_DECL (c);
6278
          p = (tree *) pointer_map_contains (tcctx.cb.decl_map, decl);
6279
          if (p == NULL)
6280
            continue;
6281
          n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6282
          sf = (tree) n->value;
6283
          sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6284
          src = build_fold_indirect_ref_loc (loc, sarg);
6285
          src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6286
          t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
6287
          append_to_statement_list (t, &list);
6288
        }
6289
 
6290
  /* Second pass: copy shared var pointers and copy construct non-VLA
6291
     firstprivate vars.  */
6292
  for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6293
    switch (OMP_CLAUSE_CODE (c))
6294
      {
6295
      case OMP_CLAUSE_SHARED:
6296
        decl = OMP_CLAUSE_DECL (c);
6297
        n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6298
        if (n == NULL)
6299
          break;
6300
        f = (tree) n->value;
6301
        if (tcctx.cb.decl_map)
6302
          f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6303
        n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6304
        sf = (tree) n->value;
6305
        if (tcctx.cb.decl_map)
6306
          sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6307
        src = build_fold_indirect_ref_loc (loc, sarg);
6308
        src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6309
        dst = build_fold_indirect_ref_loc (loc, arg);
6310
        dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6311
        t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6312
        append_to_statement_list (t, &list);
6313
        break;
6314
      case OMP_CLAUSE_FIRSTPRIVATE:
6315
        decl = OMP_CLAUSE_DECL (c);
6316
        if (is_variable_sized (decl))
6317
          break;
6318
        n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6319
        if (n == NULL)
6320
          break;
6321
        f = (tree) n->value;
6322
        if (tcctx.cb.decl_map)
6323
          f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6324
        n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6325
        if (n != NULL)
6326
          {
6327
            sf = (tree) n->value;
6328
            if (tcctx.cb.decl_map)
6329
              sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6330
            src = build_fold_indirect_ref_loc (loc, sarg);
6331
            src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6332
            if (use_pointer_for_field (decl, NULL) || is_reference (decl))
6333
              src = build_fold_indirect_ref_loc (loc, src);
6334
          }
6335
        else
6336
          src = decl;
6337
        dst = build_fold_indirect_ref_loc (loc, arg);
6338
        dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6339
        t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6340
        append_to_statement_list (t, &list);
6341
        break;
6342
      case OMP_CLAUSE_PRIVATE:
6343
        if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
6344
          break;
6345
        decl = OMP_CLAUSE_DECL (c);
6346
        n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6347
        f = (tree) n->value;
6348
        if (tcctx.cb.decl_map)
6349
          f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6350
        n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
6351
        if (n != NULL)
6352
          {
6353
            sf = (tree) n->value;
6354
            if (tcctx.cb.decl_map)
6355
              sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6356
            src = build_fold_indirect_ref_loc (loc, sarg);
6357
            src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6358
            if (use_pointer_for_field (decl, NULL))
6359
              src = build_fold_indirect_ref_loc (loc, src);
6360
          }
6361
        else
6362
          src = decl;
6363
        dst = build_fold_indirect_ref_loc (loc, arg);
6364
        dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6365
        t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
6366
        append_to_statement_list (t, &list);
6367
        break;
6368
      default:
6369
        break;
6370
      }
6371
 
6372
  /* Last pass: handle VLA firstprivates.  */
6373
  if (tcctx.cb.decl_map)
6374
    for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
6375
      if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
6376
        {
6377
          tree ind, ptr, df;
6378
 
6379
          decl = OMP_CLAUSE_DECL (c);
6380
          if (!is_variable_sized (decl))
6381
            continue;
6382
          n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
6383
          if (n == NULL)
6384
            continue;
6385
          f = (tree) n->value;
6386
          f = *(tree *) pointer_map_contains (tcctx.cb.decl_map, f);
6387
          gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
6388
          ind = DECL_VALUE_EXPR (decl);
6389
          gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
6390
          gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
6391
          n = splay_tree_lookup (ctx->sfield_map,
6392
                                 (splay_tree_key) TREE_OPERAND (ind, 0));
6393
          sf = (tree) n->value;
6394
          sf = *(tree *) pointer_map_contains (tcctx.cb.decl_map, sf);
6395
          src = build_fold_indirect_ref_loc (loc, sarg);
6396
          src = build3 (COMPONENT_REF, TREE_TYPE (sf), src, sf, NULL);
6397
          src = build_fold_indirect_ref_loc (loc, src);
6398
          dst = build_fold_indirect_ref_loc (loc, arg);
6399
          dst = build3 (COMPONENT_REF, TREE_TYPE (f), dst, f, NULL);
6400
          t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
6401
          append_to_statement_list (t, &list);
6402
          n = splay_tree_lookup (ctx->field_map,
6403
                                 (splay_tree_key) TREE_OPERAND (ind, 0));
6404
          df = (tree) n->value;
6405
          df = *(tree *) pointer_map_contains (tcctx.cb.decl_map, df);
6406
          ptr = build_fold_indirect_ref_loc (loc, arg);
6407
          ptr = build3 (COMPONENT_REF, TREE_TYPE (df), ptr, df, NULL);
6408
          t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
6409
                      build_fold_addr_expr_loc (loc, dst));
6410
          append_to_statement_list (t, &list);
6411
        }
6412
 
6413
  t = build1 (RETURN_EXPR, void_type_node, NULL);
6414
  append_to_statement_list (t, &list);
6415
 
6416
  if (tcctx.cb.decl_map)
6417
    pointer_map_destroy (tcctx.cb.decl_map);
6418
  pop_gimplify_context (NULL);
6419
  BIND_EXPR_BODY (bind) = list;
6420
  pop_cfun ();
6421
  current_function_decl = ctx->cb.src_fn;
6422
}
6423
 
6424
/* Lower the OpenMP parallel or task directive in the current statement
6425
   in GSI_P.  CTX holds context information for the directive.  */
6426
 
6427
static void
6428
lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6429
{
6430
  tree clauses;
6431
  tree child_fn, t;
6432
  gimple stmt = gsi_stmt (*gsi_p);
6433
  gimple par_bind, bind;
6434
  gimple_seq par_body, olist, ilist, par_olist, par_ilist, new_body;
6435
  struct gimplify_ctx gctx;
6436
  location_t loc = gimple_location (stmt);
6437
 
6438
  clauses = gimple_omp_taskreg_clauses (stmt);
6439
  par_bind = gimple_seq_first_stmt (gimple_omp_body (stmt));
6440
  par_body = gimple_bind_body (par_bind);
6441
  child_fn = ctx->cb.dst_fn;
6442
  if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
6443
      && !gimple_omp_parallel_combined_p (stmt))
6444
    {
6445
      struct walk_stmt_info wi;
6446
      int ws_num = 0;
6447
 
6448
      memset (&wi, 0, sizeof (wi));
6449
      wi.info = &ws_num;
6450
      wi.val_only = true;
6451
      walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
6452
      if (ws_num == 1)
6453
        gimple_omp_parallel_set_combined_p (stmt, true);
6454
    }
6455
  if (ctx->srecord_type)
6456
    create_task_copyfn (stmt, ctx);
6457
 
6458
  push_gimplify_context (&gctx);
6459
 
6460
  par_olist = NULL;
6461
  par_ilist = NULL;
6462
  lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx);
6463
  lower_omp (par_body, ctx);
6464
  if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL)
6465
    lower_reduction_clauses (clauses, &par_olist, ctx);
6466
 
6467
  /* Declare all the variables created by mapping and the variables
6468
     declared in the scope of the parallel body.  */
6469
  record_vars_into (ctx->block_vars, child_fn);
6470
  record_vars_into (gimple_bind_vars (par_bind), child_fn);
6471
 
6472
  if (ctx->record_type)
6473
    {
6474
      ctx->sender_decl
6475
        = create_tmp_var (ctx->srecord_type ? ctx->srecord_type
6476
                          : ctx->record_type, ".omp_data_o");
6477
      TREE_ADDRESSABLE (ctx->sender_decl) = 1;
6478
      gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
6479
    }
6480
 
6481
  olist = NULL;
6482
  ilist = NULL;
6483
  lower_send_clauses (clauses, &ilist, &olist, ctx);
6484
  lower_send_shared_vars (&ilist, &olist, ctx);
6485
 
6486
  /* Once all the expansions are done, sequence all the different
6487
     fragments inside gimple_omp_body.  */
6488
 
6489
  new_body = NULL;
6490
 
6491
  if (ctx->record_type)
6492
    {
6493
      t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
6494
      /* fixup_child_record_type might have changed receiver_decl's type.  */
6495
      t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
6496
      gimple_seq_add_stmt (&new_body,
6497
                           gimple_build_assign (ctx->receiver_decl, t));
6498
    }
6499
 
6500
  gimple_seq_add_seq (&new_body, par_ilist);
6501
  gimple_seq_add_seq (&new_body, par_body);
6502
  gimple_seq_add_seq (&new_body, par_olist);
6503
  new_body = maybe_catch_exception (new_body);
6504
  gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
6505
  gimple_omp_set_body (stmt, new_body);
6506
 
6507
  bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
6508
  gimple_bind_add_stmt (bind, stmt);
6509
  if (ilist || olist)
6510
    {
6511
      gimple_seq_add_stmt (&ilist, bind);
6512
      gimple_seq_add_seq (&ilist, olist);
6513
      bind = gimple_build_bind (NULL, ilist, NULL);
6514
    }
6515
 
6516
  gsi_replace (gsi_p, bind, true);
6517
 
6518
  pop_gimplify_context (NULL);
6519
}
6520
 
6521
/* Callback for lower_omp_1.  Return non-NULL if *tp needs to be
6522
   regimplified.  If DATA is non-NULL, lower_omp_1 is outside
6523
   of OpenMP context, but with task_shared_vars set.  */
6524
 
6525
static tree
6526
lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
6527
                        void *data)
6528
{
6529
  tree t = *tp;
6530
 
6531
  /* Any variable with DECL_VALUE_EXPR needs to be regimplified.  */
6532
  if (TREE_CODE (t) == VAR_DECL && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
6533
    return t;
6534
 
6535
  if (task_shared_vars
6536
      && DECL_P (t)
6537
      && bitmap_bit_p (task_shared_vars, DECL_UID (t)))
6538
    return t;
6539
 
6540
  /* If a global variable has been privatized, TREE_CONSTANT on
6541
     ADDR_EXPR might be wrong.  */
6542
  if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
6543
    recompute_tree_invariant_for_addr_expr (t);
6544
 
6545
  *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
6546
  return NULL_TREE;
6547
}
6548
 
6549
static void
6550
lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
6551
{
6552
  gimple stmt = gsi_stmt (*gsi_p);
6553
  struct walk_stmt_info wi;
6554
 
6555
  if (gimple_has_location (stmt))
6556
    input_location = gimple_location (stmt);
6557
 
6558
  if (task_shared_vars)
6559
    memset (&wi, '\0', sizeof (wi));
6560
 
6561
  /* If we have issued syntax errors, avoid doing any heavy lifting.
6562
     Just replace the OpenMP directives with a NOP to avoid
6563
     confusing RTL expansion.  */
6564
  if (errorcount && is_gimple_omp (stmt))
6565
    {
6566
      gsi_replace (gsi_p, gimple_build_nop (), true);
6567
      return;
6568
    }
6569
 
6570
  switch (gimple_code (stmt))
6571
    {
6572
    case GIMPLE_COND:
6573
      if ((ctx || task_shared_vars)
6574
          && (walk_tree (gimple_cond_lhs_ptr (stmt), lower_omp_regimplify_p,
6575
                         ctx ? NULL : &wi, NULL)
6576
              || walk_tree (gimple_cond_rhs_ptr (stmt), lower_omp_regimplify_p,
6577
                            ctx ? NULL : &wi, NULL)))
6578
        gimple_regimplify_operands (stmt, gsi_p);
6579
      break;
6580
    case GIMPLE_CATCH:
6581
      lower_omp (gimple_catch_handler (stmt), ctx);
6582
      break;
6583
    case GIMPLE_EH_FILTER:
6584
      lower_omp (gimple_eh_filter_failure (stmt), ctx);
6585
      break;
6586
    case GIMPLE_TRY:
6587
      lower_omp (gimple_try_eval (stmt), ctx);
6588
      lower_omp (gimple_try_cleanup (stmt), ctx);
6589
      break;
6590
    case GIMPLE_BIND:
6591
      lower_omp (gimple_bind_body (stmt), ctx);
6592
      break;
6593
    case GIMPLE_OMP_PARALLEL:
6594
    case GIMPLE_OMP_TASK:
6595
      ctx = maybe_lookup_ctx (stmt);
6596
      lower_omp_taskreg (gsi_p, ctx);
6597
      break;
6598
    case GIMPLE_OMP_FOR:
6599
      ctx = maybe_lookup_ctx (stmt);
6600
      gcc_assert (ctx);
6601
      lower_omp_for (gsi_p, ctx);
6602
      break;
6603
    case GIMPLE_OMP_SECTIONS:
6604
      ctx = maybe_lookup_ctx (stmt);
6605
      gcc_assert (ctx);
6606
      lower_omp_sections (gsi_p, ctx);
6607
      break;
6608
    case GIMPLE_OMP_SINGLE:
6609
      ctx = maybe_lookup_ctx (stmt);
6610
      gcc_assert (ctx);
6611
      lower_omp_single (gsi_p, ctx);
6612
      break;
6613
    case GIMPLE_OMP_MASTER:
6614
      ctx = maybe_lookup_ctx (stmt);
6615
      gcc_assert (ctx);
6616
      lower_omp_master (gsi_p, ctx);
6617
      break;
6618
    case GIMPLE_OMP_ORDERED:
6619
      ctx = maybe_lookup_ctx (stmt);
6620
      gcc_assert (ctx);
6621
      lower_omp_ordered (gsi_p, ctx);
6622
      break;
6623
    case GIMPLE_OMP_CRITICAL:
6624
      ctx = maybe_lookup_ctx (stmt);
6625
      gcc_assert (ctx);
6626
      lower_omp_critical (gsi_p, ctx);
6627
      break;
6628
    case GIMPLE_OMP_ATOMIC_LOAD:
6629
      if ((ctx || task_shared_vars)
6630
          && walk_tree (gimple_omp_atomic_load_rhs_ptr (stmt),
6631
                        lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
6632
        gimple_regimplify_operands (stmt, gsi_p);
6633
      break;
6634
    default:
6635
      if ((ctx || task_shared_vars)
6636
          && walk_gimple_op (stmt, lower_omp_regimplify_p,
6637
                             ctx ? NULL : &wi))
6638
        gimple_regimplify_operands (stmt, gsi_p);
6639
      break;
6640
    }
6641
}
6642
 
6643
static void
6644
lower_omp (gimple_seq body, omp_context *ctx)
6645
{
6646
  location_t saved_location = input_location;
6647
  gimple_stmt_iterator gsi = gsi_start (body);
6648
  for (gsi = gsi_start (body); !gsi_end_p (gsi); gsi_next (&gsi))
6649
    lower_omp_1 (&gsi, ctx);
6650
  input_location = saved_location;
6651
}
6652
 
6653
/* Main entry point.  */
6654
 
6655
static unsigned int
6656
execute_lower_omp (void)
6657
{
6658
  gimple_seq body;
6659
 
6660
  /* This pass always runs, to provide PROP_gimple_lomp.
6661
     But there is nothing to do unless -fopenmp is given.  */
6662
  if (flag_openmp == 0)
6663
    return 0;
6664
 
6665
  all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
6666
                                 delete_omp_context);
6667
 
6668
  body = gimple_body (current_function_decl);
6669
  scan_omp (body, NULL);
6670
  gcc_assert (taskreg_nesting_level == 0);
6671
 
6672
  if (all_contexts->root)
6673
    {
6674
      struct gimplify_ctx gctx;
6675
 
6676
      if (task_shared_vars)
6677
        push_gimplify_context (&gctx);
6678
      lower_omp (body, NULL);
6679
      if (task_shared_vars)
6680
        pop_gimplify_context (NULL);
6681
    }
6682
 
6683
  if (all_contexts)
6684
    {
6685
      splay_tree_delete (all_contexts);
6686
      all_contexts = NULL;
6687
    }
6688
  BITMAP_FREE (task_shared_vars);
6689
  return 0;
6690
}
6691
 
6692
struct gimple_opt_pass pass_lower_omp =
6693
{
6694
 {
6695
  GIMPLE_PASS,
6696
  "omplower",                           /* name */
6697
  NULL,                                 /* gate */
6698
  execute_lower_omp,                    /* execute */
6699
  NULL,                                 /* sub */
6700
  NULL,                                 /* next */
6701
  0,                                     /* static_pass_number */
6702
  TV_NONE,                              /* tv_id */
6703
  PROP_gimple_any,                      /* properties_required */
6704
  PROP_gimple_lomp,                     /* properties_provided */
6705
  0,                                     /* properties_destroyed */
6706
  0,                                     /* todo_flags_start */
6707
  TODO_dump_func                        /* todo_flags_finish */
6708
 }
6709
};
6710
 
6711
/* The following is a utility to diagnose OpenMP structured block violations.
6712
   It is not part of the "omplower" pass, as that's invoked too late.  It
6713
   should be invoked by the respective front ends after gimplification.  */
6714
 
6715
static splay_tree all_labels;
6716
 
6717
/* Check for mismatched contexts and generate an error if needed.  Return
6718
   true if an error is detected.  */
6719
 
6720
static bool
6721
diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
6722
               gimple branch_ctx, gimple label_ctx)
6723
{
6724
  if (label_ctx == branch_ctx)
6725
    return false;
6726
 
6727
 
6728
  /*
6729
     Previously we kept track of the label's entire context in diagnose_sb_[12]
6730
     so we could traverse it and issue a correct "exit" or "enter" error
6731
     message upon a structured block violation.
6732
 
6733
     We built the context by building a list with tree_cons'ing, but there is
6734
     no easy counterpart in gimple tuples.  It seems like far too much work
6735
     for issuing exit/enter error messages.  If someone really misses the
6736
     distinct error message... patches welcome.
6737
   */
6738
 
6739
#if 0
6740
  /* Try to avoid confusing the user by producing and error message
6741
     with correct "exit" or "enter" verbiage.  We prefer "exit"
6742
     unless we can show that LABEL_CTX is nested within BRANCH_CTX.  */
6743
  if (branch_ctx == NULL)
6744
    exit_p = false;
6745
  else
6746
    {
6747
      while (label_ctx)
6748
        {
6749
          if (TREE_VALUE (label_ctx) == branch_ctx)
6750
            {
6751
              exit_p = false;
6752
              break;
6753
            }
6754
          label_ctx = TREE_CHAIN (label_ctx);
6755
        }
6756
    }
6757
 
6758
  if (exit_p)
6759
    error ("invalid exit from OpenMP structured block");
6760
  else
6761
    error ("invalid entry to OpenMP structured block");
6762
#endif
6763
 
6764
  /* If it's obvious we have an invalid entry, be specific about the error.  */
6765
  if (branch_ctx == NULL)
6766
    error ("invalid entry to OpenMP structured block");
6767
  else
6768
    /* Otherwise, be vague and lazy, but efficient.  */
6769
    error ("invalid branch to/from an OpenMP structured block");
6770
 
6771
  gsi_replace (gsi_p, gimple_build_nop (), false);
6772
  return true;
6773
}
6774
 
6775
/* Pass 1: Create a minimal tree of OpenMP structured blocks, and record
6776
   where each label is found.  */
6777
 
6778
static tree
6779
diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6780
               struct walk_stmt_info *wi)
6781
{
6782
  gimple context = (gimple) wi->info;
6783
  gimple inner_context;
6784
  gimple stmt = gsi_stmt (*gsi_p);
6785
 
6786
  *handled_ops_p = true;
6787
 
6788
 switch (gimple_code (stmt))
6789
    {
6790
    WALK_SUBSTMTS;
6791
 
6792
    case GIMPLE_OMP_PARALLEL:
6793
    case GIMPLE_OMP_TASK:
6794
    case GIMPLE_OMP_SECTIONS:
6795
    case GIMPLE_OMP_SINGLE:
6796
    case GIMPLE_OMP_SECTION:
6797
    case GIMPLE_OMP_MASTER:
6798
    case GIMPLE_OMP_ORDERED:
6799
    case GIMPLE_OMP_CRITICAL:
6800
      /* The minimal context here is just the current OMP construct.  */
6801
      inner_context = stmt;
6802
      wi->info = inner_context;
6803
      walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6804
      wi->info = context;
6805
      break;
6806
 
6807
    case GIMPLE_OMP_FOR:
6808
      inner_context = stmt;
6809
      wi->info = inner_context;
6810
      /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6811
         walk them.  */
6812
      walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6813
                       diagnose_sb_1, NULL, wi);
6814
      walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
6815
      wi->info = context;
6816
      break;
6817
 
6818
    case GIMPLE_LABEL:
6819
      splay_tree_insert (all_labels, (splay_tree_key) gimple_label_label (stmt),
6820
                         (splay_tree_value) context);
6821
      break;
6822
 
6823
    default:
6824
      break;
6825
    }
6826
 
6827
  return NULL_TREE;
6828
}
6829
 
6830
/* Pass 2: Check each branch and see if its context differs from that of
6831
   the destination label's context.  */
6832
 
6833
static tree
6834
diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
6835
               struct walk_stmt_info *wi)
6836
{
6837
  gimple context = (gimple) wi->info;
6838
  splay_tree_node n;
6839
  gimple stmt = gsi_stmt (*gsi_p);
6840
 
6841
  *handled_ops_p = true;
6842
 
6843
  switch (gimple_code (stmt))
6844
    {
6845
    WALK_SUBSTMTS;
6846
 
6847
    case GIMPLE_OMP_PARALLEL:
6848
    case GIMPLE_OMP_TASK:
6849
    case GIMPLE_OMP_SECTIONS:
6850
    case GIMPLE_OMP_SINGLE:
6851
    case GIMPLE_OMP_SECTION:
6852
    case GIMPLE_OMP_MASTER:
6853
    case GIMPLE_OMP_ORDERED:
6854
    case GIMPLE_OMP_CRITICAL:
6855
      wi->info = stmt;
6856
      walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6857
      wi->info = context;
6858
      break;
6859
 
6860
    case GIMPLE_OMP_FOR:
6861
      wi->info = stmt;
6862
      /* gimple_omp_for_{index,initial,final} are all DECLs; no need to
6863
         walk them.  */
6864
      walk_gimple_seq (gimple_omp_for_pre_body (stmt),
6865
                       diagnose_sb_2, NULL, wi);
6866
      walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_2, NULL, wi);
6867
      wi->info = context;
6868
      break;
6869
 
6870
    case GIMPLE_COND:
6871
        {
6872
          tree lab = gimple_cond_true_label (stmt);
6873
          if (lab)
6874
            {
6875
              n = splay_tree_lookup (all_labels,
6876
                                     (splay_tree_key) lab);
6877
              diagnose_sb_0 (gsi_p, context,
6878
                             n ? (gimple) n->value : NULL);
6879
            }
6880
          lab = gimple_cond_false_label (stmt);
6881
          if (lab)
6882
            {
6883
              n = splay_tree_lookup (all_labels,
6884
                                     (splay_tree_key) lab);
6885
              diagnose_sb_0 (gsi_p, context,
6886
                             n ? (gimple) n->value : NULL);
6887
            }
6888
        }
6889
      break;
6890
 
6891
    case GIMPLE_GOTO:
6892
      {
6893
        tree lab = gimple_goto_dest (stmt);
6894
        if (TREE_CODE (lab) != LABEL_DECL)
6895
          break;
6896
 
6897
        n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6898
        diagnose_sb_0 (gsi_p, context, n ? (gimple) n->value : NULL);
6899
      }
6900
      break;
6901
 
6902
    case GIMPLE_SWITCH:
6903
      {
6904
        unsigned int i;
6905
        for (i = 0; i < gimple_switch_num_labels (stmt); ++i)
6906
          {
6907
            tree lab = CASE_LABEL (gimple_switch_label (stmt, i));
6908
            n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
6909
            if (n && diagnose_sb_0 (gsi_p, context, (gimple) n->value))
6910
              break;
6911
          }
6912
      }
6913
      break;
6914
 
6915
    case GIMPLE_RETURN:
6916
      diagnose_sb_0 (gsi_p, context, NULL);
6917
      break;
6918
 
6919
    default:
6920
      break;
6921
    }
6922
 
6923
  return NULL_TREE;
6924
}
6925
 
6926
static unsigned int
6927
diagnose_omp_structured_block_errors (void)
6928
{
6929
  struct walk_stmt_info wi;
6930
  gimple_seq body = gimple_body (current_function_decl);
6931
 
6932
  all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
6933
 
6934
  memset (&wi, 0, sizeof (wi));
6935
  walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
6936
 
6937
  memset (&wi, 0, sizeof (wi));
6938
  wi.want_locations = true;
6939
  walk_gimple_seq (body, diagnose_sb_2, NULL, &wi);
6940
 
6941
  splay_tree_delete (all_labels);
6942
  all_labels = NULL;
6943
 
6944
  return 0;
6945
}
6946
 
6947
static bool
6948
gate_diagnose_omp_blocks (void)
6949
{
6950
  return flag_openmp != 0;
6951
}
6952
 
6953
struct gimple_opt_pass pass_diagnose_omp_blocks =
6954
{
6955
  {
6956
    GIMPLE_PASS,
6957
    "*diagnose_omp_blocks",             /* name */
6958
    gate_diagnose_omp_blocks,           /* gate */
6959
    diagnose_omp_structured_block_errors,       /* execute */
6960
    NULL,                               /* sub */
6961
    NULL,                               /* next */
6962
    0,                                   /* static_pass_number */
6963
    TV_NONE,                            /* tv_id */
6964
    PROP_gimple_any,                    /* properties_required */
6965
    0,                                   /* properties_provided */
6966
    0,                                   /* properties_destroyed */
6967
    0,                                   /* todo_flags_start */
6968
    0,                                   /* todo_flags_finish */
6969
  }
6970
};
6971
 
6972
#include "gt-omp-low.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.