OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-dev/] [or1k-gcc/] [gcc/] [trans-mem.c] - Blame information for rev 774

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 684 jeremybenn
/* Passes for transactional memory support.
2
   Copyright (C) 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
3
 
4
   This file is part of GCC.
5
 
6
   GCC is free software; you can redistribute it and/or modify it under
7
   the terms of the GNU General Public License as published by the Free
8
   Software Foundation; either version 3, or (at your option) any later
9
   version.
10
 
11
   GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12
   WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14
   for more details.
15
 
16
   You should have received a copy of the GNU General Public License
17
   along with GCC; see the file COPYING3.  If not see
18
   <http://www.gnu.org/licenses/>.  */
19
 
20
#include "config.h"
21
#include "system.h"
22
#include "coretypes.h"
23
#include "tree.h"
24
#include "gimple.h"
25
#include "tree-flow.h"
26
#include "tree-pass.h"
27
#include "tree-inline.h"
28
#include "diagnostic-core.h"
29
#include "demangle.h"
30
#include "output.h"
31
#include "trans-mem.h"
32
#include "params.h"
33
#include "target.h"
34
#include "langhooks.h"
35
#include "tree-pretty-print.h"
36
#include "gimple-pretty-print.h"
37
 
38
 
39
#define PROB_VERY_UNLIKELY      (REG_BR_PROB_BASE / 2000 - 1)
40
#define PROB_ALWAYS             (REG_BR_PROB_BASE)
41
 
42
#define A_RUNINSTRUMENTEDCODE   0x0001
43
#define A_RUNUNINSTRUMENTEDCODE 0x0002
44
#define A_SAVELIVEVARIABLES     0x0004
45
#define A_RESTORELIVEVARIABLES  0x0008
46
#define A_ABORTTRANSACTION      0x0010
47
 
48
#define AR_USERABORT            0x0001
49
#define AR_USERRETRY            0x0002
50
#define AR_TMCONFLICT           0x0004
51
#define AR_EXCEPTIONBLOCKABORT  0x0008
52
#define AR_OUTERABORT           0x0010
53
 
54
#define MODE_SERIALIRREVOCABLE  0x0000
55
 
56
 
57
/* The representation of a transaction changes several times during the
58
   lowering process.  In the beginning, in the front-end we have the
59
   GENERIC tree TRANSACTION_EXPR.  For example,
60
 
61
        __transaction {
62
          local++;
63
          if (++global == 10)
64
            __tm_abort;
65
        }
66
 
67
  During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
68
  trivially replaced with a GIMPLE_TRANSACTION node.
69
 
70
  During pass_lower_tm, we examine the body of transactions looking
71
  for aborts.  Transactions that do not contain an abort may be
72
  merged into an outer transaction.  We also add a TRY-FINALLY node
73
  to arrange for the transaction to be committed on any exit.
74
 
75
  [??? Think about how this arrangement affects throw-with-commit
76
  and throw-with-abort operations.  In this case we want the TRY to
77
  handle gotos, but not to catch any exceptions because the transaction
78
  will already be closed.]
79
 
80
        GIMPLE_TRANSACTION [label=NULL] {
81
          try {
82
            local = local + 1;
83
            t0 = global;
84
            t1 = t0 + 1;
85
            global = t1;
86
            if (t1 == 10)
87
              __builtin___tm_abort ();
88
          } finally {
89
            __builtin___tm_commit ();
90
          }
91
        }
92
 
93
  During pass_lower_eh, we create EH regions for the transactions,
94
  intermixed with the regular EH stuff.  This gives us a nice persistent
95
  mapping (all the way through rtl) from transactional memory operation
96
  back to the transaction, which allows us to get the abnormal edges
97
  correct to model transaction aborts and restarts:
98
 
99
        GIMPLE_TRANSACTION [label=over]
100
        local = local + 1;
101
        t0 = global;
102
        t1 = t0 + 1;
103
        global = t1;
104
        if (t1 == 10)
105
          __builtin___tm_abort ();
106
        __builtin___tm_commit ();
107
        over:
108
 
109
  This is the end of all_lowering_passes, and so is what is present
110
  during the IPA passes, and through all of the optimization passes.
111
 
112
  During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
113
  functions and mark functions for cloning.
114
 
115
  At the end of gimple optimization, before exiting SSA form,
116
  pass_tm_edges replaces statements that perform transactional
117
  memory operations with the appropriate TM builtins, and swap
118
  out function calls with their transactional clones.  At this
119
  point we introduce the abnormal transaction restart edges and
120
  complete lowering of the GIMPLE_TRANSACTION node.
121
 
122
        x = __builtin___tm_start (MAY_ABORT);
123
        eh_label:
124
        if (x & abort_transaction)
125
          goto over;
126
        local = local + 1;
127
        t0 = __builtin___tm_load (global);
128
        t1 = t0 + 1;
129
        __builtin___tm_store (&global, t1);
130
        if (t1 == 10)
131
          __builtin___tm_abort ();
132
        __builtin___tm_commit ();
133
        over:
134
*/
135
 
136
 
137
/* Return the attributes we want to examine for X, or NULL if it's not
138
   something we examine.  We look at function types, but allow pointers
139
   to function types and function decls and peek through.  */
140
 
141
static tree
142
get_attrs_for (const_tree x)
143
{
144
  switch (TREE_CODE (x))
145
    {
146
    case FUNCTION_DECL:
147
      return TYPE_ATTRIBUTES (TREE_TYPE (x));
148
      break;
149
 
150
    default:
151
      if (TYPE_P (x))
152
        return NULL;
153
      x = TREE_TYPE (x);
154
      if (TREE_CODE (x) != POINTER_TYPE)
155
        return NULL;
156
      /* FALLTHRU */
157
 
158
    case POINTER_TYPE:
159
      x = TREE_TYPE (x);
160
      if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
161
        return NULL;
162
      /* FALLTHRU */
163
 
164
    case FUNCTION_TYPE:
165
    case METHOD_TYPE:
166
      return TYPE_ATTRIBUTES (x);
167
    }
168
}
169
 
170
/* Return true if X has been marked TM_PURE.  */
171
 
172
bool
173
is_tm_pure (const_tree x)
174
{
175
  unsigned flags;
176
 
177
  switch (TREE_CODE (x))
178
    {
179
    case FUNCTION_DECL:
180
    case FUNCTION_TYPE:
181
    case METHOD_TYPE:
182
      break;
183
 
184
    default:
185
      if (TYPE_P (x))
186
        return false;
187
      x = TREE_TYPE (x);
188
      if (TREE_CODE (x) != POINTER_TYPE)
189
        return false;
190
      /* FALLTHRU */
191
 
192
    case POINTER_TYPE:
193
      x = TREE_TYPE (x);
194
      if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
195
        return false;
196
      break;
197
    }
198
 
199
  flags = flags_from_decl_or_type (x);
200
  return (flags & ECF_TM_PURE) != 0;
201
}
202
 
203
/* Return true if X has been marked TM_IRREVOCABLE.  */
204
 
205
static bool
206
is_tm_irrevocable (tree x)
207
{
208
  tree attrs = get_attrs_for (x);
209
 
210
  if (attrs && lookup_attribute ("transaction_unsafe", attrs))
211
    return true;
212
 
213
  /* A call to the irrevocable builtin is by definition,
214
     irrevocable.  */
215
  if (TREE_CODE (x) == ADDR_EXPR)
216
    x = TREE_OPERAND (x, 0);
217
  if (TREE_CODE (x) == FUNCTION_DECL
218
      && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
219
      && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
220
    return true;
221
 
222
  return false;
223
}
224
 
225
/* Return true if X has been marked TM_SAFE.  */
226
 
227
bool
228
is_tm_safe (const_tree x)
229
{
230
  if (flag_tm)
231
    {
232
      tree attrs = get_attrs_for (x);
233
      if (attrs)
234
        {
235
          if (lookup_attribute ("transaction_safe", attrs))
236
            return true;
237
          if (lookup_attribute ("transaction_may_cancel_outer", attrs))
238
            return true;
239
        }
240
    }
241
  return false;
242
}
243
 
244
/* Return true if CALL is const, or tm_pure.  */
245
 
246
static bool
247
is_tm_pure_call (gimple call)
248
{
249
  tree fn = gimple_call_fn (call);
250
 
251
  if (TREE_CODE (fn) == ADDR_EXPR)
252
    {
253
      fn = TREE_OPERAND (fn, 0);
254
      gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
255
    }
256
  else
257
    fn = TREE_TYPE (fn);
258
 
259
  return is_tm_pure (fn);
260
}
261
 
262
/* Return true if X has been marked TM_CALLABLE.  */
263
 
264
static bool
265
is_tm_callable (tree x)
266
{
267
  tree attrs = get_attrs_for (x);
268
  if (attrs)
269
    {
270
      if (lookup_attribute ("transaction_callable", attrs))
271
        return true;
272
      if (lookup_attribute ("transaction_safe", attrs))
273
        return true;
274
      if (lookup_attribute ("transaction_may_cancel_outer", attrs))
275
        return true;
276
    }
277
  return false;
278
}
279
 
280
/* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER.  */
281
 
282
bool
283
is_tm_may_cancel_outer (tree x)
284
{
285
  tree attrs = get_attrs_for (x);
286
  if (attrs)
287
    return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
288
  return false;
289
}
290
 
291
/* Return true for built in functions that "end" a transaction.   */
292
 
293
bool
294
is_tm_ending_fndecl (tree fndecl)
295
{
296
  if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
297
    switch (DECL_FUNCTION_CODE (fndecl))
298
      {
299
      case BUILT_IN_TM_COMMIT:
300
      case BUILT_IN_TM_COMMIT_EH:
301
      case BUILT_IN_TM_ABORT:
302
      case BUILT_IN_TM_IRREVOCABLE:
303
        return true;
304
      default:
305
        break;
306
      }
307
 
308
  return false;
309
}
310
 
311
/* Return true if STMT is a TM load.  */
312
 
313
static bool
314
is_tm_load (gimple stmt)
315
{
316
  tree fndecl;
317
 
318
  if (gimple_code (stmt) != GIMPLE_CALL)
319
    return false;
320
 
321
  fndecl = gimple_call_fndecl (stmt);
322
  return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
323
          && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
324
}
325
 
326
/* Same as above, but for simple TM loads, that is, not the
327
   after-write, after-read, etc optimized variants.  */
328
 
329
static bool
330
is_tm_simple_load (gimple stmt)
331
{
332
  tree fndecl;
333
 
334
  if (gimple_code (stmt) != GIMPLE_CALL)
335
    return false;
336
 
337
  fndecl = gimple_call_fndecl (stmt);
338
  if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
339
    {
340
      enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
341
      return (fcode == BUILT_IN_TM_LOAD_1
342
              || fcode == BUILT_IN_TM_LOAD_2
343
              || fcode == BUILT_IN_TM_LOAD_4
344
              || fcode == BUILT_IN_TM_LOAD_8
345
              || fcode == BUILT_IN_TM_LOAD_FLOAT
346
              || fcode == BUILT_IN_TM_LOAD_DOUBLE
347
              || fcode == BUILT_IN_TM_LOAD_LDOUBLE
348
              || fcode == BUILT_IN_TM_LOAD_M64
349
              || fcode == BUILT_IN_TM_LOAD_M128
350
              || fcode == BUILT_IN_TM_LOAD_M256);
351
    }
352
  return false;
353
}
354
 
355
/* Return true if STMT is a TM store.  */
356
 
357
static bool
358
is_tm_store (gimple stmt)
359
{
360
  tree fndecl;
361
 
362
  if (gimple_code (stmt) != GIMPLE_CALL)
363
    return false;
364
 
365
  fndecl = gimple_call_fndecl (stmt);
366
  return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
367
          && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
368
}
369
 
370
/* Same as above, but for simple TM stores, that is, not the
371
   after-write, after-read, etc optimized variants.  */
372
 
373
static bool
374
is_tm_simple_store (gimple stmt)
375
{
376
  tree fndecl;
377
 
378
  if (gimple_code (stmt) != GIMPLE_CALL)
379
    return false;
380
 
381
  fndecl = gimple_call_fndecl (stmt);
382
  if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
383
    {
384
      enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
385
      return (fcode == BUILT_IN_TM_STORE_1
386
              || fcode == BUILT_IN_TM_STORE_2
387
              || fcode == BUILT_IN_TM_STORE_4
388
              || fcode == BUILT_IN_TM_STORE_8
389
              || fcode == BUILT_IN_TM_STORE_FLOAT
390
              || fcode == BUILT_IN_TM_STORE_DOUBLE
391
              || fcode == BUILT_IN_TM_STORE_LDOUBLE
392
              || fcode == BUILT_IN_TM_STORE_M64
393
              || fcode == BUILT_IN_TM_STORE_M128
394
              || fcode == BUILT_IN_TM_STORE_M256);
395
    }
396
  return false;
397
}
398
 
399
/* Return true if FNDECL is BUILT_IN_TM_ABORT.  */
400
 
401
static bool
402
is_tm_abort (tree fndecl)
403
{
404
  return (fndecl
405
          && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
406
          && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
407
}
408
 
409
/* Build a GENERIC tree for a user abort.  This is called by front ends
410
   while transforming the __tm_abort statement.  */
411
 
412
tree
413
build_tm_abort_call (location_t loc, bool is_outer)
414
{
415
  return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
416
                              build_int_cst (integer_type_node,
417
                                             AR_USERABORT
418
                                             | (is_outer ? AR_OUTERABORT : 0)));
419
}
420
 
421
/* Common gateing function for several of the TM passes.  */
422
 
423
static bool
424
gate_tm (void)
425
{
426
  return flag_tm;
427
}
428
 
429
/* Map for aribtrary function replacement under TM, as created
430
   by the tm_wrap attribute.  */
431
 
432
static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
433
     htab_t tm_wrap_map;
434
 
435
void
436
record_tm_replacement (tree from, tree to)
437
{
438
  struct tree_map **slot, *h;
439
 
440
  /* Do not inline wrapper functions that will get replaced in the TM
441
     pass.
442
 
443
     Suppose you have foo() that will get replaced into tmfoo().  Make
444
     sure the inliner doesn't try to outsmart us and inline foo()
445
     before we get a chance to do the TM replacement.  */
446
  DECL_UNINLINABLE (from) = 1;
447
 
448
  if (tm_wrap_map == NULL)
449
    tm_wrap_map = htab_create_ggc (32, tree_map_hash, tree_map_eq, 0);
450
 
451
  h = ggc_alloc_tree_map ();
452
  h->hash = htab_hash_pointer (from);
453
  h->base.from = from;
454
  h->to = to;
455
 
456
  slot = (struct tree_map **)
457
    htab_find_slot_with_hash (tm_wrap_map, h, h->hash, INSERT);
458
  *slot = h;
459
}
460
 
461
/* Return a TM-aware replacement function for DECL.  */
462
 
463
static tree
464
find_tm_replacement_function (tree fndecl)
465
{
466
  if (tm_wrap_map)
467
    {
468
      struct tree_map *h, in;
469
 
470
      in.base.from = fndecl;
471
      in.hash = htab_hash_pointer (fndecl);
472
      h = (struct tree_map *) htab_find_with_hash (tm_wrap_map, &in, in.hash);
473
      if (h)
474
        return h->to;
475
    }
476
 
477
  /* ??? We may well want TM versions of most of the common <string.h>
478
     functions.  For now, we've already these two defined.  */
479
  /* Adjust expand_call_tm() attributes as necessary for the cases
480
     handled here:  */
481
  if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
482
    switch (DECL_FUNCTION_CODE (fndecl))
483
      {
484
      case BUILT_IN_MEMCPY:
485
        return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
486
      case BUILT_IN_MEMMOVE:
487
        return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
488
      case BUILT_IN_MEMSET:
489
        return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
490
      default:
491
        return NULL;
492
      }
493
 
494
  return NULL;
495
}
496
 
497
/* When appropriate, record TM replacement for memory allocation functions.
498
 
499
   FROM is the FNDECL to wrap.  */
500
void
501
tm_malloc_replacement (tree from)
502
{
503
  const char *str;
504
  tree to;
505
 
506
  if (TREE_CODE (from) != FUNCTION_DECL)
507
    return;
508
 
509
  /* If we have a previous replacement, the user must be explicitly
510
     wrapping malloc/calloc/free.  They better know what they're
511
     doing... */
512
  if (find_tm_replacement_function (from))
513
    return;
514
 
515
  str = IDENTIFIER_POINTER (DECL_NAME (from));
516
 
517
  if (!strcmp (str, "malloc"))
518
    to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
519
  else if (!strcmp (str, "calloc"))
520
    to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
521
  else if (!strcmp (str, "free"))
522
    to = builtin_decl_explicit (BUILT_IN_TM_FREE);
523
  else
524
    return;
525
 
526
  TREE_NOTHROW (to) = 0;
527
 
528
  record_tm_replacement (from, to);
529
}
530
 
531
/* Diagnostics for tm_safe functions/regions.  Called by the front end
532
   once we've lowered the function to high-gimple.  */
533
 
534
/* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
535
   Process exactly one statement.  WI->INFO is set to non-null when in
536
   the context of a tm_safe function, and null for a __transaction block.  */
537
 
538
#define DIAG_TM_OUTER           1
539
#define DIAG_TM_SAFE            2
540
#define DIAG_TM_RELAXED         4
541
 
542
struct diagnose_tm
543
{
544
  unsigned int summary_flags : 8;
545
  unsigned int block_flags : 8;
546
  unsigned int func_flags : 8;
547
  unsigned int saw_volatile : 1;
548
  gimple stmt;
549
};
550
 
551
/* Tree callback function for diagnose_tm pass.  */
552
 
553
static tree
554
diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
555
                  void *data)
556
{
557
  struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
558
  struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
559
  enum tree_code code = TREE_CODE (*tp);
560
 
561
  if ((code == VAR_DECL
562
       || code == RESULT_DECL
563
       || code == PARM_DECL)
564
      && d->block_flags & (DIAG_TM_SAFE | DIAG_TM_RELAXED)
565
      && TREE_THIS_VOLATILE (TREE_TYPE (*tp))
566
      && !d->saw_volatile)
567
    {
568
      d->saw_volatile = 1;
569
      error_at (gimple_location (d->stmt),
570
                "invalid volatile use of %qD inside transaction",
571
                *tp);
572
    }
573
 
574
  return NULL_TREE;
575
}
576
 
577
static tree
578
diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
579
                    struct walk_stmt_info *wi)
580
{
581
  gimple stmt = gsi_stmt (*gsi);
582
  struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
583
 
584
  /* Save stmt for use in leaf analysis.  */
585
  d->stmt = stmt;
586
 
587
  switch (gimple_code (stmt))
588
    {
589
    case GIMPLE_CALL:
590
      {
591
        tree fn = gimple_call_fn (stmt);
592
 
593
        if ((d->summary_flags & DIAG_TM_OUTER) == 0
594
            && is_tm_may_cancel_outer (fn))
595
          error_at (gimple_location (stmt),
596
                    "%<transaction_may_cancel_outer%> function call not within"
597
                    " outer transaction or %<transaction_may_cancel_outer%>");
598
 
599
        if (d->summary_flags & DIAG_TM_SAFE)
600
          {
601
            bool is_safe, direct_call_p;
602
            tree replacement;
603
 
604
            if (TREE_CODE (fn) == ADDR_EXPR
605
                && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
606
              {
607
                direct_call_p = true;
608
                replacement = TREE_OPERAND (fn, 0);
609
                replacement = find_tm_replacement_function (replacement);
610
                if (replacement)
611
                  fn = replacement;
612
              }
613
            else
614
              {
615
                direct_call_p = false;
616
                replacement = NULL_TREE;
617
              }
618
 
619
            if (is_tm_safe_or_pure (fn))
620
              is_safe = true;
621
            else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
622
              {
623
                /* A function explicitly marked transaction_callable as
624
                   opposed to transaction_safe is being defined to be
625
                   unsafe as part of its ABI, regardless of its contents.  */
626
                is_safe = false;
627
              }
628
            else if (direct_call_p)
629
              {
630
                if (flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
631
                  is_safe = true;
632
                else if (replacement)
633
                  {
634
                    /* ??? At present we've been considering replacements
635
                       merely transaction_callable, and therefore might
636
                       enter irrevocable.  The tm_wrap attribute has not
637
                       yet made it into the new language spec.  */
638
                    is_safe = false;
639
                  }
640
                else
641
                  {
642
                    /* ??? Diagnostics for unmarked direct calls moved into
643
                       the IPA pass.  Section 3.2 of the spec details how
644
                       functions not marked should be considered "implicitly
645
                       safe" based on having examined the function body.  */
646
                    is_safe = true;
647
                  }
648
              }
649
            else
650
              {
651
                /* An unmarked indirect call.  Consider it unsafe even
652
                   though optimization may yet figure out how to inline.  */
653
                is_safe = false;
654
              }
655
 
656
            if (!is_safe)
657
              {
658
                if (TREE_CODE (fn) == ADDR_EXPR)
659
                  fn = TREE_OPERAND (fn, 0);
660
                if (d->block_flags & DIAG_TM_SAFE)
661
                  {
662
                    if (direct_call_p)
663
                      error_at (gimple_location (stmt),
664
                                "unsafe function call %qD within "
665
                                "atomic transaction", fn);
666
                    else
667
                      {
668
                        if (!DECL_P (fn) || DECL_NAME (fn))
669
                          error_at (gimple_location (stmt),
670
                                    "unsafe function call %qE within "
671
                                    "atomic transaction", fn);
672
                        else
673
                          error_at (gimple_location (stmt),
674
                                    "unsafe indirect function call within "
675
                                    "atomic transaction");
676
                      }
677
                  }
678
                else
679
                  {
680
                    if (direct_call_p)
681
                      error_at (gimple_location (stmt),
682
                                "unsafe function call %qD within "
683
                                "%<transaction_safe%> function", fn);
684
                    else
685
                      {
686
                        if (!DECL_P (fn) || DECL_NAME (fn))
687
                          error_at (gimple_location (stmt),
688
                                    "unsafe function call %qE within "
689
                                    "%<transaction_safe%> function", fn);
690
                        else
691
                          error_at (gimple_location (stmt),
692
                                    "unsafe indirect function call within "
693
                                    "%<transaction_safe%> function");
694
                      }
695
                  }
696
              }
697
          }
698
      }
699
      break;
700
 
701
    case GIMPLE_ASM:
702
      /* ??? We ought to come up with a way to add attributes to
703
         asm statements, and then add "transaction_safe" to it.
704
         Either that or get the language spec to resurrect __tm_waiver.  */
705
      if (d->block_flags & DIAG_TM_SAFE)
706
        error_at (gimple_location (stmt),
707
                  "asm not allowed in atomic transaction");
708
      else if (d->func_flags & DIAG_TM_SAFE)
709
        error_at (gimple_location (stmt),
710
                  "asm not allowed in %<transaction_safe%> function");
711
      break;
712
 
713
    case GIMPLE_TRANSACTION:
714
      {
715
        unsigned char inner_flags = DIAG_TM_SAFE;
716
 
717
        if (gimple_transaction_subcode (stmt) & GTMA_IS_RELAXED)
718
          {
719
            if (d->block_flags & DIAG_TM_SAFE)
720
              error_at (gimple_location (stmt),
721
                        "relaxed transaction in atomic transaction");
722
            else if (d->func_flags & DIAG_TM_SAFE)
723
              error_at (gimple_location (stmt),
724
                        "relaxed transaction in %<transaction_safe%> function");
725
            inner_flags = DIAG_TM_RELAXED;
726
          }
727
        else if (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)
728
          {
729
            if (d->block_flags)
730
              error_at (gimple_location (stmt),
731
                        "outer transaction in transaction");
732
            else if (d->func_flags & DIAG_TM_OUTER)
733
              error_at (gimple_location (stmt),
734
                        "outer transaction in "
735
                        "%<transaction_may_cancel_outer%> function");
736
            else if (d->func_flags & DIAG_TM_SAFE)
737
              error_at (gimple_location (stmt),
738
                        "outer transaction in %<transaction_safe%> function");
739
            inner_flags |= DIAG_TM_OUTER;
740
          }
741
 
742
        *handled_ops_p = true;
743
        if (gimple_transaction_body (stmt))
744
          {
745
            struct walk_stmt_info wi_inner;
746
            struct diagnose_tm d_inner;
747
 
748
            memset (&d_inner, 0, sizeof (d_inner));
749
            d_inner.func_flags = d->func_flags;
750
            d_inner.block_flags = d->block_flags | inner_flags;
751
            d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
752
 
753
            memset (&wi_inner, 0, sizeof (wi_inner));
754
            wi_inner.info = &d_inner;
755
 
756
            walk_gimple_seq (gimple_transaction_body (stmt),
757
                             diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
758
          }
759
      }
760
      break;
761
 
762
    default:
763
      break;
764
    }
765
 
766
  return NULL_TREE;
767
}
768
 
769
static unsigned int
770
diagnose_tm_blocks (void)
771
{
772
  struct walk_stmt_info wi;
773
  struct diagnose_tm d;
774
 
775
  memset (&d, 0, sizeof (d));
776
  if (is_tm_may_cancel_outer (current_function_decl))
777
    d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
778
  else if (is_tm_safe (current_function_decl))
779
    d.func_flags = DIAG_TM_SAFE;
780
  d.summary_flags = d.func_flags;
781
 
782
  memset (&wi, 0, sizeof (wi));
783
  wi.info = &d;
784
 
785
  walk_gimple_seq (gimple_body (current_function_decl),
786
                   diagnose_tm_1, diagnose_tm_1_op, &wi);
787
 
788
  return 0;
789
}
790
 
791
struct gimple_opt_pass pass_diagnose_tm_blocks =
792
{
793
  {
794
    GIMPLE_PASS,
795
    "*diagnose_tm_blocks",              /* name */
796
    gate_tm,                            /* gate */
797
    diagnose_tm_blocks,                 /* execute */
798
    NULL,                               /* sub */
799
    NULL,                               /* next */
800
    0,                                   /* static_pass_number */
801
    TV_TRANS_MEM,                       /* tv_id */
802
    PROP_gimple_any,                    /* properties_required */
803
    0,                                   /* properties_provided */
804
    0,                                   /* properties_destroyed */
805
    0,                                   /* todo_flags_start */
806
    0,                                   /* todo_flags_finish */
807
  }
808
};
809
 
810
/* Instead of instrumenting thread private memory, we save the
811
   addresses in a log which we later use to save/restore the addresses
812
   upon transaction start/restart.
813
 
814
   The log is keyed by address, where each element contains individual
815
   statements among different code paths that perform the store.
816
 
817
   This log is later used to generate either plain save/restore of the
818
   addresses upon transaction start/restart, or calls to the ITM_L*
819
   logging functions.
820
 
821
   So for something like:
822
 
823
       struct large { int x[1000]; };
824
       struct large lala = { 0 };
825
       __transaction {
826
         lala.x[i] = 123;
827
         ...
828
       }
829
 
830
   We can either save/restore:
831
 
832
       lala = { 0 };
833
       trxn = _ITM_startTransaction ();
834
       if (trxn & a_saveLiveVariables)
835
         tmp_lala1 = lala.x[i];
836
       else if (a & a_restoreLiveVariables)
837
         lala.x[i] = tmp_lala1;
838
 
839
   or use the logging functions:
840
 
841
       lala = { 0 };
842
       trxn = _ITM_startTransaction ();
843
       _ITM_LU4 (&lala.x[i]);
844
 
845
   Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
846
   far up the dominator tree to shadow all of the writes to a given
847
   location (thus reducing the total number of logging calls), but not
848
   so high as to be called on a path that does not perform a
849
   write.  */
850
 
851
/* One individual log entry.  We may have multiple statements for the
852
   same location if neither dominate each other (on different
853
   execution paths).  */
854
typedef struct tm_log_entry
855
{
856
  /* Address to save.  */
857
  tree addr;
858
  /* Entry block for the transaction this address occurs in.  */
859
  basic_block entry_block;
860
  /* Dominating statements the store occurs in.  */
861
  gimple_vec stmts;
862
  /* Initially, while we are building the log, we place a nonzero
863
     value here to mean that this address *will* be saved with a
864
     save/restore sequence.  Later, when generating the save sequence
865
     we place the SSA temp generated here.  */
866
  tree save_var;
867
} *tm_log_entry_t;
868
 
869
/* The actual log.  */
870
static htab_t tm_log;
871
 
872
/* Addresses to log with a save/restore sequence.  These should be in
873
   dominator order.  */
874
static VEC(tree,heap) *tm_log_save_addresses;
875
 
876
/* Map for an SSA_NAME originally pointing to a non aliased new piece
877
   of memory (malloc, alloc, etc).  */
878
static htab_t tm_new_mem_hash;
879
 
880
enum thread_memory_type
881
  {
882
    mem_non_local = 0,
883
    mem_thread_local,
884
    mem_transaction_local,
885
    mem_max
886
  };
887
 
888
typedef struct tm_new_mem_map
889
{
890
  /* SSA_NAME being dereferenced.  */
891
  tree val;
892
  enum thread_memory_type local_new_memory;
893
} tm_new_mem_map_t;
894
 
895
/* Htab support.  Return hash value for a `tm_log_entry'.  */
896
static hashval_t
897
tm_log_hash (const void *p)
898
{
899
  const struct tm_log_entry *log = (const struct tm_log_entry *) p;
900
  return iterative_hash_expr (log->addr, 0);
901
}
902
 
903
/* Htab support.  Return true if two log entries are the same.  */
904
static int
905
tm_log_eq (const void *p1, const void *p2)
906
{
907
  const struct tm_log_entry *log1 = (const struct tm_log_entry *) p1;
908
  const struct tm_log_entry *log2 = (const struct tm_log_entry *) p2;
909
 
910
  /* FIXME:
911
 
912
     rth: I suggest that we get rid of the component refs etc.
913
     I.e. resolve the reference to base + offset.
914
 
915
     We may need to actually finish a merge with mainline for this,
916
     since we'd like to be presented with Richi's MEM_REF_EXPRs more
917
     often than not.  But in the meantime your tm_log_entry could save
918
     the results of get_inner_reference.
919
 
920
     See: g++.dg/tm/pr46653.C
921
  */
922
 
923
  /* Special case plain equality because operand_equal_p() below will
924
     return FALSE if the addresses are equal but they have
925
     side-effects (e.g. a volatile address).  */
926
  if (log1->addr == log2->addr)
927
    return true;
928
 
929
  return operand_equal_p (log1->addr, log2->addr, 0);
930
}
931
 
932
/* Htab support.  Free one tm_log_entry.  */
933
static void
934
tm_log_free (void *p)
935
{
936
  struct tm_log_entry *lp = (struct tm_log_entry *) p;
937
  VEC_free (gimple, heap, lp->stmts);
938
  free (lp);
939
}
940
 
941
/* Initialize logging data structures.  */
942
static void
943
tm_log_init (void)
944
{
945
  tm_log = htab_create (10, tm_log_hash, tm_log_eq, tm_log_free);
946
  tm_new_mem_hash = htab_create (5, struct_ptr_hash, struct_ptr_eq, free);
947
  tm_log_save_addresses = VEC_alloc (tree, heap, 5);
948
}
949
 
950
/* Free logging data structures.  */
951
static void
952
tm_log_delete (void)
953
{
954
  htab_delete (tm_log);
955
  htab_delete (tm_new_mem_hash);
956
  VEC_free (tree, heap, tm_log_save_addresses);
957
}
958
 
959
/* Return true if MEM is a transaction invariant memory for the TM
960
   region starting at REGION_ENTRY_BLOCK.  */
961
static bool
962
transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
963
{
964
  if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
965
      && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
966
    {
967
      basic_block def_bb;
968
 
969
      def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
970
      return def_bb != region_entry_block
971
        && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
972
    }
973
 
974
  mem = strip_invariant_refs (mem);
975
  return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
976
}
977
 
978
/* Given an address ADDR in STMT, find it in the memory log or add it,
979
   making sure to keep only the addresses highest in the dominator
980
   tree.
981
 
982
   ENTRY_BLOCK is the entry_block for the transaction.
983
 
984
   If we find the address in the log, make sure it's either the same
985
   address, or an equivalent one that dominates ADDR.
986
 
987
   If we find the address, but neither ADDR dominates the found
988
   address, nor the found one dominates ADDR, we're on different
989
   execution paths.  Add it.
990
 
991
   If known, ENTRY_BLOCK is the entry block for the region, otherwise
992
   NULL.  */
993
static void
994
tm_log_add (basic_block entry_block, tree addr, gimple stmt)
995
{
996
  void **slot;
997
  struct tm_log_entry l, *lp;
998
 
999
  l.addr = addr;
1000
  slot = htab_find_slot (tm_log, &l, INSERT);
1001
  if (!*slot)
1002
    {
1003
      tree type = TREE_TYPE (addr);
1004
 
1005
      lp = XNEW (struct tm_log_entry);
1006
      lp->addr = addr;
1007
      *slot = lp;
1008
 
1009
      /* Small invariant addresses can be handled as save/restores.  */
1010
      if (entry_block
1011
          && transaction_invariant_address_p (lp->addr, entry_block)
1012
          && TYPE_SIZE_UNIT (type) != NULL
1013
          && host_integerp (TYPE_SIZE_UNIT (type), 1)
1014
          && (tree_low_cst (TYPE_SIZE_UNIT (type), 1)
1015
              < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
1016
          /* We must be able to copy this type normally.  I.e., no
1017
             special constructors and the like.  */
1018
          && !TREE_ADDRESSABLE (type))
1019
        {
1020
          lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
1021
          add_referenced_var (lp->save_var);
1022
          lp->stmts = NULL;
1023
          lp->entry_block = entry_block;
1024
          /* Save addresses separately in dominator order so we don't
1025
             get confused by overlapping addresses in the save/restore
1026
             sequence.  */
1027
          VEC_safe_push (tree, heap, tm_log_save_addresses, lp->addr);
1028
        }
1029
      else
1030
        {
1031
          /* Use the logging functions.  */
1032
          lp->stmts = VEC_alloc (gimple, heap, 5);
1033
          VEC_quick_push (gimple, lp->stmts, stmt);
1034
          lp->save_var = NULL;
1035
        }
1036
    }
1037
  else
1038
    {
1039
      size_t i;
1040
      gimple oldstmt;
1041
 
1042
      lp = (struct tm_log_entry *) *slot;
1043
 
1044
      /* If we're generating a save/restore sequence, we don't care
1045
         about statements.  */
1046
      if (lp->save_var)
1047
        return;
1048
 
1049
      for (i = 0; VEC_iterate (gimple, lp->stmts, i, oldstmt); ++i)
1050
        {
1051
          if (stmt == oldstmt)
1052
            return;
1053
          /* We already have a store to the same address, higher up the
1054
             dominator tree.  Nothing to do.  */
1055
          if (dominated_by_p (CDI_DOMINATORS,
1056
                              gimple_bb (stmt), gimple_bb (oldstmt)))
1057
            return;
1058
          /* We should be processing blocks in dominator tree order.  */
1059
          gcc_assert (!dominated_by_p (CDI_DOMINATORS,
1060
                                       gimple_bb (oldstmt), gimple_bb (stmt)));
1061
        }
1062
      /* Store is on a different code path.  */
1063
      VEC_safe_push (gimple, heap, lp->stmts, stmt);
1064
    }
1065
}
1066
 
1067
/* Gimplify the address of a TARGET_MEM_REF.  Return the SSA_NAME
1068
   result, insert the new statements before GSI.  */
1069
 
1070
static tree
1071
gimplify_addr (gimple_stmt_iterator *gsi, tree x)
1072
{
1073
  if (TREE_CODE (x) == TARGET_MEM_REF)
1074
    x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
1075
  else
1076
    x = build_fold_addr_expr (x);
1077
  return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
1078
}
1079
 
1080
/* Instrument one address with the logging functions.
1081
   ADDR is the address to save.
1082
   STMT is the statement before which to place it.  */
1083
static void
1084
tm_log_emit_stmt (tree addr, gimple stmt)
1085
{
1086
  tree type = TREE_TYPE (addr);
1087
  tree size = TYPE_SIZE_UNIT (type);
1088
  gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1089
  gimple log;
1090
  enum built_in_function code = BUILT_IN_TM_LOG;
1091
 
1092
  if (type == float_type_node)
1093
    code = BUILT_IN_TM_LOG_FLOAT;
1094
  else if (type == double_type_node)
1095
    code = BUILT_IN_TM_LOG_DOUBLE;
1096
  else if (type == long_double_type_node)
1097
    code = BUILT_IN_TM_LOG_LDOUBLE;
1098
  else if (host_integerp (size, 1))
1099
    {
1100
      unsigned int n = tree_low_cst (size, 1);
1101
      switch (n)
1102
        {
1103
        case 1:
1104
          code = BUILT_IN_TM_LOG_1;
1105
          break;
1106
        case 2:
1107
          code = BUILT_IN_TM_LOG_2;
1108
          break;
1109
        case 4:
1110
          code = BUILT_IN_TM_LOG_4;
1111
          break;
1112
        case 8:
1113
          code = BUILT_IN_TM_LOG_8;
1114
          break;
1115
        default:
1116
          code = BUILT_IN_TM_LOG;
1117
          if (TREE_CODE (type) == VECTOR_TYPE)
1118
            {
1119
              if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
1120
                code = BUILT_IN_TM_LOG_M64;
1121
              else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
1122
                code = BUILT_IN_TM_LOG_M128;
1123
              else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
1124
                code = BUILT_IN_TM_LOG_M256;
1125
            }
1126
          break;
1127
        }
1128
    }
1129
 
1130
  addr = gimplify_addr (&gsi, addr);
1131
  if (code == BUILT_IN_TM_LOG)
1132
    log = gimple_build_call (builtin_decl_explicit (code), 2, addr,  size);
1133
  else
1134
    log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
1135
  gsi_insert_before (&gsi, log, GSI_SAME_STMT);
1136
}
1137
 
1138
/* Go through the log and instrument address that must be instrumented
1139
   with the logging functions.  Leave the save/restore addresses for
1140
   later.  */
1141
static void
1142
tm_log_emit (void)
1143
{
1144
  htab_iterator hi;
1145
  struct tm_log_entry *lp;
1146
 
1147
  FOR_EACH_HTAB_ELEMENT (tm_log, lp, tm_log_entry_t, hi)
1148
    {
1149
      size_t i;
1150
      gimple stmt;
1151
 
1152
      if (dump_file)
1153
        {
1154
          fprintf (dump_file, "TM thread private mem logging: ");
1155
          print_generic_expr (dump_file, lp->addr, 0);
1156
          fprintf (dump_file, "\n");
1157
        }
1158
 
1159
      if (lp->save_var)
1160
        {
1161
          if (dump_file)
1162
            fprintf (dump_file, "DUMPING to variable\n");
1163
          continue;
1164
        }
1165
      else
1166
        {
1167
          if (dump_file)
1168
            fprintf (dump_file, "DUMPING with logging functions\n");
1169
          for (i = 0; VEC_iterate (gimple, lp->stmts, i, stmt); ++i)
1170
            tm_log_emit_stmt (lp->addr, stmt);
1171
        }
1172
    }
1173
}
1174
 
1175
/* Emit the save sequence for the corresponding addresses in the log.
1176
   ENTRY_BLOCK is the entry block for the transaction.
1177
   BB is the basic block to insert the code in.  */
1178
static void
1179
tm_log_emit_saves (basic_block entry_block, basic_block bb)
1180
{
1181
  size_t i;
1182
  gimple_stmt_iterator gsi = gsi_last_bb (bb);
1183
  gimple stmt;
1184
  struct tm_log_entry l, *lp;
1185
 
1186
  for (i = 0; i < VEC_length (tree, tm_log_save_addresses); ++i)
1187
    {
1188
      l.addr = VEC_index (tree, tm_log_save_addresses, i);
1189
      lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
1190
      gcc_assert (lp->save_var != NULL);
1191
 
1192
      /* We only care about variables in the current transaction.  */
1193
      if (lp->entry_block != entry_block)
1194
        continue;
1195
 
1196
      stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
1197
 
1198
      /* Make sure we can create an SSA_NAME for this type.  For
1199
         instance, aggregates aren't allowed, in which case the system
1200
         will create a VOP for us and everything will just work.  */
1201
      if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
1202
        {
1203
          lp->save_var = make_ssa_name (lp->save_var, stmt);
1204
          gimple_assign_set_lhs (stmt, lp->save_var);
1205
        }
1206
 
1207
      gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1208
    }
1209
}
1210
 
1211
/* Emit the restore sequence for the corresponding addresses in the log.
1212
   ENTRY_BLOCK is the entry block for the transaction.
1213
   BB is the basic block to insert the code in.  */
1214
static void
1215
tm_log_emit_restores (basic_block entry_block, basic_block bb)
1216
{
1217
  int i;
1218
  struct tm_log_entry l, *lp;
1219
  gimple_stmt_iterator gsi;
1220
  gimple stmt;
1221
 
1222
  for (i = VEC_length (tree, tm_log_save_addresses) - 1; i >= 0; i--)
1223
    {
1224
      l.addr = VEC_index (tree, tm_log_save_addresses, i);
1225
      lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
1226
      gcc_assert (lp->save_var != NULL);
1227
 
1228
      /* We only care about variables in the current transaction.  */
1229
      if (lp->entry_block != entry_block)
1230
        continue;
1231
 
1232
      /* Restores are in LIFO order from the saves in case we have
1233
         overlaps.  */
1234
      gsi = gsi_start_bb (bb);
1235
 
1236
      stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
1237
      gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1238
    }
1239
}
1240
 
1241
/* Emit the checks for performing either a save or a restore sequence.
1242
 
1243
   TRXN_PROP is either A_SAVELIVEVARIABLES or A_RESTORELIVEVARIABLES.
1244
 
1245
   The code sequence is inserted in a new basic block created in
1246
   END_BB which is inserted between BEFORE_BB and the destination of
1247
   FALLTHRU_EDGE.
1248
 
1249
   STATUS is the return value from _ITM_beginTransaction.
1250
   ENTRY_BLOCK is the entry block for the transaction.
1251
   EMITF is a callback to emit the actual save/restore code.
1252
 
1253
   The basic block containing the conditional checking for TRXN_PROP
1254
   is returned.  */
1255
static basic_block
1256
tm_log_emit_save_or_restores (basic_block entry_block,
1257
                              unsigned trxn_prop,
1258
                              tree status,
1259
                              void (*emitf)(basic_block, basic_block),
1260
                              basic_block before_bb,
1261
                              edge fallthru_edge,
1262
                              basic_block *end_bb)
1263
{
1264
  basic_block cond_bb, code_bb;
1265
  gimple cond_stmt, stmt;
1266
  gimple_stmt_iterator gsi;
1267
  tree t1, t2;
1268
  int old_flags = fallthru_edge->flags;
1269
 
1270
  cond_bb = create_empty_bb (before_bb);
1271
  code_bb = create_empty_bb (cond_bb);
1272
  *end_bb = create_empty_bb (code_bb);
1273
  redirect_edge_pred (fallthru_edge, *end_bb);
1274
  fallthru_edge->flags = EDGE_FALLTHRU;
1275
  make_edge (before_bb, cond_bb, old_flags);
1276
 
1277
  set_immediate_dominator (CDI_DOMINATORS, cond_bb, before_bb);
1278
  set_immediate_dominator (CDI_DOMINATORS, code_bb, cond_bb);
1279
 
1280
  gsi = gsi_last_bb (cond_bb);
1281
 
1282
  /* t1 = status & A_{property}.  */
1283
  t1 = make_rename_temp (TREE_TYPE (status), NULL);
1284
  t2 = build_int_cst (TREE_TYPE (status), trxn_prop);
1285
  stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, status, t2);
1286
  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1287
 
1288
  /* if (t1).  */
1289
  t2 = build_int_cst (TREE_TYPE (status), 0);
1290
  cond_stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
1291
  gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
1292
 
1293
  emitf (entry_block, code_bb);
1294
 
1295
  make_edge (cond_bb, code_bb, EDGE_TRUE_VALUE);
1296
  make_edge (cond_bb, *end_bb, EDGE_FALSE_VALUE);
1297
  make_edge (code_bb, *end_bb, EDGE_FALLTHRU);
1298
 
1299
  return cond_bb;
1300
}
1301
 
1302
static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
1303
                               struct walk_stmt_info *);
1304
static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
1305
                                  struct walk_stmt_info *);
1306
 
1307
/* Evaluate an address X being dereferenced and determine if it
1308
   originally points to a non aliased new chunk of memory (malloc,
1309
   alloca, etc).
1310
 
1311
   Return MEM_THREAD_LOCAL if it points to a thread-local address.
1312
   Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
1313
   Return MEM_NON_LOCAL otherwise.
1314
 
1315
   ENTRY_BLOCK is the entry block to the transaction containing the
1316
   dereference of X.  */
1317
static enum thread_memory_type
1318
thread_private_new_memory (basic_block entry_block, tree x)
1319
{
1320
  gimple stmt = NULL;
1321
  enum tree_code code;
1322
  void **slot;
1323
  tm_new_mem_map_t elt, *elt_p;
1324
  tree val = x;
1325
  enum thread_memory_type retval = mem_transaction_local;
1326
 
1327
  if (!entry_block
1328
      || TREE_CODE (x) != SSA_NAME
1329
      /* Possible uninitialized use, or a function argument.  In
1330
         either case, we don't care.  */
1331
      || SSA_NAME_IS_DEFAULT_DEF (x))
1332
    return mem_non_local;
1333
 
1334
  /* Look in cache first.  */
1335
  elt.val = x;
1336
  slot = htab_find_slot (tm_new_mem_hash, &elt, INSERT);
1337
  elt_p = (tm_new_mem_map_t *) *slot;
1338
  if (elt_p)
1339
    return elt_p->local_new_memory;
1340
 
1341
  /* Optimistically assume the memory is transaction local during
1342
     processing.  This catches recursion into this variable.  */
1343
  *slot = elt_p = XNEW (tm_new_mem_map_t);
1344
  elt_p->val = val;
1345
  elt_p->local_new_memory = mem_transaction_local;
1346
 
1347
  /* Search DEF chain to find the original definition of this address.  */
1348
  do
1349
    {
1350
      if (ptr_deref_may_alias_global_p (x))
1351
        {
1352
          /* Address escapes.  This is not thread-private.  */
1353
          retval = mem_non_local;
1354
          goto new_memory_ret;
1355
        }
1356
 
1357
      stmt = SSA_NAME_DEF_STMT (x);
1358
 
1359
      /* If the malloc call is outside the transaction, this is
1360
         thread-local.  */
1361
      if (retval != mem_thread_local
1362
          && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
1363
        retval = mem_thread_local;
1364
 
1365
      if (is_gimple_assign (stmt))
1366
        {
1367
          code = gimple_assign_rhs_code (stmt);
1368
          /* x = foo ==> foo */
1369
          if (code == SSA_NAME)
1370
            x = gimple_assign_rhs1 (stmt);
1371
          /* x = foo + n ==> foo */
1372
          else if (code == POINTER_PLUS_EXPR)
1373
            x = gimple_assign_rhs1 (stmt);
1374
          /* x = (cast*) foo ==> foo */
1375
          else if (code == VIEW_CONVERT_EXPR || code == NOP_EXPR)
1376
            x = gimple_assign_rhs1 (stmt);
1377
          else
1378
            {
1379
              retval = mem_non_local;
1380
              goto new_memory_ret;
1381
            }
1382
        }
1383
      else
1384
        {
1385
          if (gimple_code (stmt) == GIMPLE_PHI)
1386
            {
1387
              unsigned int i;
1388
              enum thread_memory_type mem;
1389
              tree phi_result = gimple_phi_result (stmt);
1390
 
1391
              /* If any of the ancestors are non-local, we are sure to
1392
                 be non-local.  Otherwise we can avoid doing anything
1393
                 and inherit what has already been generated.  */
1394
              retval = mem_max;
1395
              for (i = 0; i < gimple_phi_num_args (stmt); ++i)
1396
                {
1397
                  tree op = PHI_ARG_DEF (stmt, i);
1398
 
1399
                  /* Exclude self-assignment.  */
1400
                  if (phi_result == op)
1401
                    continue;
1402
 
1403
                  mem = thread_private_new_memory (entry_block, op);
1404
                  if (mem == mem_non_local)
1405
                    {
1406
                      retval = mem;
1407
                      goto new_memory_ret;
1408
                    }
1409
                  retval = MIN (retval, mem);
1410
                }
1411
              goto new_memory_ret;
1412
            }
1413
          break;
1414
        }
1415
    }
1416
  while (TREE_CODE (x) == SSA_NAME);
1417
 
1418
  if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
1419
    /* Thread-local or transaction-local.  */
1420
    ;
1421
  else
1422
    retval = mem_non_local;
1423
 
1424
 new_memory_ret:
1425
  elt_p->local_new_memory = retval;
1426
  return retval;
1427
}
1428
 
1429
/* Determine whether X has to be instrumented using a read
1430
   or write barrier.
1431
 
1432
   ENTRY_BLOCK is the entry block for the region where stmt resides
1433
   in.  NULL if unknown.
1434
 
1435
   STMT is the statement in which X occurs in.  It is used for thread
1436
   private memory instrumentation.  If no TPM instrumentation is
1437
   desired, STMT should be null.  */
1438
static bool
1439
requires_barrier (basic_block entry_block, tree x, gimple stmt)
1440
{
1441
  tree orig = x;
1442
  while (handled_component_p (x))
1443
    x = TREE_OPERAND (x, 0);
1444
 
1445
  switch (TREE_CODE (x))
1446
    {
1447
    case INDIRECT_REF:
1448
    case MEM_REF:
1449
      {
1450
        enum thread_memory_type ret;
1451
 
1452
        ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
1453
        if (ret == mem_non_local)
1454
          return true;
1455
        if (stmt && ret == mem_thread_local)
1456
          /* ?? Should we pass `orig', or the INDIRECT_REF X.  ?? */
1457
          tm_log_add (entry_block, orig, stmt);
1458
 
1459
        /* Transaction-locals require nothing at all.  For malloc, a
1460
           transaction restart frees the memory and we reallocate.
1461
           For alloca, the stack pointer gets reset by the retry and
1462
           we reallocate.  */
1463
        return false;
1464
      }
1465
 
1466
    case TARGET_MEM_REF:
1467
      if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
1468
        return true;
1469
      x = TREE_OPERAND (TMR_BASE (x), 0);
1470
      if (TREE_CODE (x) == PARM_DECL)
1471
        return false;
1472
      gcc_assert (TREE_CODE (x) == VAR_DECL);
1473
      /* FALLTHRU */
1474
 
1475
    case PARM_DECL:
1476
    case RESULT_DECL:
1477
    case VAR_DECL:
1478
      if (DECL_BY_REFERENCE (x))
1479
        {
1480
          /* ??? This value is a pointer, but aggregate_value_p has been
1481
             jigged to return true which confuses needs_to_live_in_memory.
1482
             This ought to be cleaned up generically.
1483
 
1484
             FIXME: Verify this still happens after the next mainline
1485
             merge.  Testcase ie g++.dg/tm/pr47554.C.
1486
          */
1487
          return false;
1488
        }
1489
 
1490
      if (is_global_var (x))
1491
        return !TREE_READONLY (x);
1492
      if (/* FIXME: This condition should actually go below in the
1493
             tm_log_add() call, however is_call_clobbered() depends on
1494
             aliasing info which is not available during
1495
             gimplification.  Since requires_barrier() gets called
1496
             during lower_sequence_tm/gimplification, leave the call
1497
             to needs_to_live_in_memory until we eliminate
1498
             lower_sequence_tm altogether.  */
1499
          needs_to_live_in_memory (x))
1500
        return true;
1501
      else
1502
        {
1503
          /* For local memory that doesn't escape (aka thread private
1504
             memory), we can either save the value at the beginning of
1505
             the transaction and restore on restart, or call a tm
1506
             function to dynamically save and restore on restart
1507
             (ITM_L*).  */
1508
          if (stmt)
1509
            tm_log_add (entry_block, orig, stmt);
1510
          return false;
1511
        }
1512
 
1513
    default:
1514
      return false;
1515
    }
1516
}
1517
 
1518
/* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
1519
   a transaction region.  */
1520
 
1521
static void
1522
examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
1523
{
1524
  gimple stmt = gsi_stmt (*gsi);
1525
 
1526
  if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
1527
    *state |= GTMA_HAVE_LOAD;
1528
  if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
1529
    *state |= GTMA_HAVE_STORE;
1530
}
1531
 
1532
/* Mark a GIMPLE_CALL as appropriate for being inside a transaction.  */
1533
 
1534
static void
1535
examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
1536
{
1537
  gimple stmt = gsi_stmt (*gsi);
1538
  tree fn;
1539
 
1540
  if (is_tm_pure_call (stmt))
1541
    return;
1542
 
1543
  /* Check if this call is a transaction abort.  */
1544
  fn = gimple_call_fndecl (stmt);
1545
  if (is_tm_abort (fn))
1546
    *state |= GTMA_HAVE_ABORT;
1547
 
1548
  /* Note that something may happen.  */
1549
  *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
1550
}
1551
 
1552
/* Lower a GIMPLE_TRANSACTION statement.  */
1553
 
1554
static void
1555
lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
1556
{
1557
  gimple g, stmt = gsi_stmt (*gsi);
1558
  unsigned int *outer_state = (unsigned int *) wi->info;
1559
  unsigned int this_state = 0;
1560
  struct walk_stmt_info this_wi;
1561
 
1562
  /* First, lower the body.  The scanning that we do inside gives
1563
     us some idea of what we're dealing with.  */
1564
  memset (&this_wi, 0, sizeof (this_wi));
1565
  this_wi.info = (void *) &this_state;
1566
  walk_gimple_seq (gimple_transaction_body (stmt),
1567
                   lower_sequence_tm, NULL, &this_wi);
1568
 
1569
  /* If there was absolutely nothing transaction related inside the
1570
     transaction, we may elide it.  Likewise if this is a nested
1571
     transaction and does not contain an abort.  */
1572
  if (this_state == 0
1573
      || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
1574
    {
1575
      if (outer_state)
1576
        *outer_state |= this_state;
1577
 
1578
      gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
1579
                             GSI_SAME_STMT);
1580
      gimple_transaction_set_body (stmt, NULL);
1581
 
1582
      gsi_remove (gsi, true);
1583
      wi->removed_stmt = true;
1584
      return;
1585
    }
1586
 
1587
  /* Wrap the body of the transaction in a try-finally node so that
1588
     the commit call is always properly called.  */
1589
  g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
1590
  if (flag_exceptions)
1591
    {
1592
      tree ptr;
1593
      gimple_seq n_seq, e_seq;
1594
 
1595
      n_seq = gimple_seq_alloc_with_stmt (g);
1596
      e_seq = gimple_seq_alloc ();
1597
 
1598
      g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
1599
                             1, integer_zero_node);
1600
      ptr = create_tmp_var (ptr_type_node, NULL);
1601
      gimple_call_set_lhs (g, ptr);
1602
      gimple_seq_add_stmt (&e_seq, g);
1603
 
1604
      g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
1605
                             1, ptr);
1606
      gimple_seq_add_stmt (&e_seq, g);
1607
 
1608
      g = gimple_build_eh_else (n_seq, e_seq);
1609
    }
1610
 
1611
  g = gimple_build_try (gimple_transaction_body (stmt),
1612
                        gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
1613
  gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
1614
 
1615
  gimple_transaction_set_body (stmt, NULL);
1616
 
1617
  /* If the transaction calls abort or if this is an outer transaction,
1618
     add an "over" label afterwards.  */
1619
  if ((this_state & (GTMA_HAVE_ABORT))
1620
      || (gimple_transaction_subcode(stmt) & GTMA_IS_OUTER))
1621
    {
1622
      tree label = create_artificial_label (UNKNOWN_LOCATION);
1623
      gimple_transaction_set_label (stmt, label);
1624
      gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
1625
    }
1626
 
1627
  /* Record the set of operations found for use later.  */
1628
  this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
1629
  gimple_transaction_set_subcode (stmt, this_state);
1630
}
1631
 
1632
/* Iterate through the statements in the sequence, lowering them all
1633
   as appropriate for being in a transaction.  */
1634
 
1635
static tree
1636
lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1637
                   struct walk_stmt_info *wi)
1638
{
1639
  unsigned int *state = (unsigned int *) wi->info;
1640
  gimple stmt = gsi_stmt (*gsi);
1641
 
1642
  *handled_ops_p = true;
1643
  switch (gimple_code (stmt))
1644
    {
1645
    case GIMPLE_ASSIGN:
1646
      /* Only memory reads/writes need to be instrumented.  */
1647
      if (gimple_assign_single_p (stmt))
1648
        examine_assign_tm (state, gsi);
1649
      break;
1650
 
1651
    case GIMPLE_CALL:
1652
      examine_call_tm (state, gsi);
1653
      break;
1654
 
1655
    case GIMPLE_ASM:
1656
      *state |= GTMA_MAY_ENTER_IRREVOCABLE;
1657
      break;
1658
 
1659
    case GIMPLE_TRANSACTION:
1660
      lower_transaction (gsi, wi);
1661
      break;
1662
 
1663
    default:
1664
      *handled_ops_p = !gimple_has_substatements (stmt);
1665
      break;
1666
    }
1667
 
1668
  return NULL_TREE;
1669
}
1670
 
1671
/* Iterate through the statements in the sequence, lowering them all
1672
   as appropriate for being outside of a transaction.  */
1673
 
1674
static tree
1675
lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
1676
                      struct walk_stmt_info * wi)
1677
{
1678
  gimple stmt = gsi_stmt (*gsi);
1679
 
1680
  if (gimple_code (stmt) == GIMPLE_TRANSACTION)
1681
    {
1682
      *handled_ops_p = true;
1683
      lower_transaction (gsi, wi);
1684
    }
1685
  else
1686
    *handled_ops_p = !gimple_has_substatements (stmt);
1687
 
1688
  return NULL_TREE;
1689
}
1690
 
1691
/* Main entry point for flattening GIMPLE_TRANSACTION constructs.  After
1692
   this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
1693
   been moved out, and all the data required for constructing a proper
1694
   CFG has been recorded.  */
1695
 
1696
static unsigned int
1697
execute_lower_tm (void)
1698
{
1699
  struct walk_stmt_info wi;
1700
 
1701
  /* Transactional clones aren't created until a later pass.  */
1702
  gcc_assert (!decl_is_tm_clone (current_function_decl));
1703
 
1704
  memset (&wi, 0, sizeof (wi));
1705
  walk_gimple_seq (gimple_body (current_function_decl),
1706
                   lower_sequence_no_tm, NULL, &wi);
1707
 
1708
  return 0;
1709
}
1710
 
1711
struct gimple_opt_pass pass_lower_tm =
1712
{
1713
 {
1714
  GIMPLE_PASS,
1715
  "tmlower",                            /* name */
1716
  gate_tm,                              /* gate */
1717
  execute_lower_tm,                     /* execute */
1718
  NULL,                                 /* sub */
1719
  NULL,                                 /* next */
1720
  0,                                     /* static_pass_number */
1721
  TV_TRANS_MEM,                         /* tv_id */
1722
  PROP_gimple_lcf,                      /* properties_required */
1723
  0,                                     /* properties_provided */
1724
  0,                                     /* properties_destroyed */
1725
  0,                                     /* todo_flags_start */
1726
  TODO_dump_func                        /* todo_flags_finish */
1727
 }
1728
};
1729
 
1730
/* Collect region information for each transaction.  */
1731
 
1732
struct tm_region
1733
{
1734
  /* Link to the next unnested transaction.  */
1735
  struct tm_region *next;
1736
 
1737
  /* Link to the next inner transaction.  */
1738
  struct tm_region *inner;
1739
 
1740
  /* Link to the next outer transaction.  */
1741
  struct tm_region *outer;
1742
 
1743
  /* The GIMPLE_TRANSACTION statement beginning this transaction.  */
1744
  gimple transaction_stmt;
1745
 
1746
  /* The entry block to this region.  */
1747
  basic_block entry_block;
1748
 
1749
  /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
1750
     These blocks are still a part of the region (i.e., the border is
1751
     inclusive). Note that this set is only complete for paths in the CFG
1752
     starting at ENTRY_BLOCK, and that there is no exit block recorded for
1753
     the edge to the "over" label.  */
1754
  bitmap exit_blocks;
1755
 
1756
  /* The set of all blocks that have an TM_IRREVOCABLE call.  */
1757
  bitmap irr_blocks;
1758
};
1759
 
1760
/* True if there are pending edge statements to be committed for the
1761
   current function being scanned in the tmmark pass.  */
1762
bool pending_edge_inserts_p;
1763
 
1764
static struct tm_region *all_tm_regions;
1765
static bitmap_obstack tm_obstack;
1766
 
1767
 
1768
/* A subroutine of tm_region_init.  Record the existance of the
1769
   GIMPLE_TRANSACTION statement in a tree of tm_region elements.  */
1770
 
1771
static struct tm_region *
1772
tm_region_init_0 (struct tm_region *outer, basic_block bb, gimple stmt)
1773
{
1774
  struct tm_region *region;
1775
 
1776
  region = (struct tm_region *)
1777
    obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1778
 
1779
  if (outer)
1780
    {
1781
      region->next = outer->inner;
1782
      outer->inner = region;
1783
    }
1784
  else
1785
    {
1786
      region->next = all_tm_regions;
1787
      all_tm_regions = region;
1788
    }
1789
  region->inner = NULL;
1790
  region->outer = outer;
1791
 
1792
  region->transaction_stmt = stmt;
1793
 
1794
  /* There are either one or two edges out of the block containing
1795
     the GIMPLE_TRANSACTION, one to the actual region and one to the
1796
     "over" label if the region contains an abort.  The former will
1797
     always be the one marked FALLTHRU.  */
1798
  region->entry_block = FALLTHRU_EDGE (bb)->dest;
1799
 
1800
  region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
1801
  region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1802
 
1803
  return region;
1804
}
1805
 
1806
/* A subroutine of tm_region_init.  Record all the exit and
1807
   irrevocable blocks in BB into the region's exit_blocks and
1808
   irr_blocks bitmaps.  Returns the new region being scanned.  */
1809
 
1810
static struct tm_region *
1811
tm_region_init_1 (struct tm_region *region, basic_block bb)
1812
{
1813
  gimple_stmt_iterator gsi;
1814
  gimple g;
1815
 
1816
  if (!region
1817
      || (!region->irr_blocks && !region->exit_blocks))
1818
    return region;
1819
 
1820
  /* Check to see if this is the end of a region by seeing if it
1821
     contains a call to __builtin_tm_commit{,_eh}.  Note that the
1822
     outermost region for DECL_IS_TM_CLONE need not collect this.  */
1823
  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1824
    {
1825
      g = gsi_stmt (gsi);
1826
      if (gimple_code (g) == GIMPLE_CALL)
1827
        {
1828
          tree fn = gimple_call_fndecl (g);
1829
          if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
1830
            {
1831
              if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
1832
                   || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
1833
                  && region->exit_blocks)
1834
                {
1835
                  bitmap_set_bit (region->exit_blocks, bb->index);
1836
                  region = region->outer;
1837
                  break;
1838
                }
1839
              if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
1840
                bitmap_set_bit (region->irr_blocks, bb->index);
1841
            }
1842
        }
1843
    }
1844
  return region;
1845
}
1846
 
1847
/* Collect all of the transaction regions within the current function
1848
   and record them in ALL_TM_REGIONS.  The REGION parameter may specify
1849
   an "outermost" region for use by tm clones.  */
1850
 
1851
static void
1852
tm_region_init (struct tm_region *region)
1853
{
1854
  gimple g;
1855
  edge_iterator ei;
1856
  edge e;
1857
  basic_block bb;
1858
  VEC(basic_block, heap) *queue = NULL;
1859
  bitmap visited_blocks = BITMAP_ALLOC (NULL);
1860
  struct tm_region *old_region;
1861
  struct tm_region **region_worklist;
1862
 
1863
  all_tm_regions = region;
1864
  bb = single_succ (ENTRY_BLOCK_PTR);
1865
 
1866
  /* We could store this information in bb->aux, but we may get called
1867
     through get_all_tm_blocks() from another pass that may be already
1868
     using bb->aux.  */
1869
  region_worklist =
1870
    (struct tm_region **) xcalloc (sizeof (struct tm_region *),
1871
                                  n_basic_blocks + NUM_FIXED_BLOCKS + 2);
1872
 
1873
  VEC_safe_push (basic_block, heap, queue, bb);
1874
  region_worklist[bb->index] = region;
1875
  do
1876
    {
1877
      bb = VEC_pop (basic_block, queue);
1878
      region = region_worklist[bb->index];
1879
      region_worklist[bb->index] = NULL;
1880
 
1881
      /* Record exit and irrevocable blocks.  */
1882
      region = tm_region_init_1 (region, bb);
1883
 
1884
      /* Check for the last statement in the block beginning a new region.  */
1885
      g = last_stmt (bb);
1886
      old_region = region;
1887
      if (g && gimple_code (g) == GIMPLE_TRANSACTION)
1888
        region = tm_region_init_0 (region, bb, g);
1889
 
1890
      /* Process subsequent blocks.  */
1891
      FOR_EACH_EDGE (e, ei, bb->succs)
1892
        if (!bitmap_bit_p (visited_blocks, e->dest->index))
1893
          {
1894
            bitmap_set_bit (visited_blocks, e->dest->index);
1895
            VEC_safe_push (basic_block, heap, queue, e->dest);
1896
 
1897
            /* If the current block started a new region, make sure that only
1898
               the entry block of the new region is associated with this region.
1899
               Other successors are still part of the old region.  */
1900
            if (old_region != region && e->dest != region->entry_block)
1901
              region_worklist[e->dest->index] = old_region;
1902
            else
1903
              region_worklist[e->dest->index] = region;
1904
          }
1905
    }
1906
  while (!VEC_empty (basic_block, queue));
1907
  VEC_free (basic_block, heap, queue);
1908
  BITMAP_FREE (visited_blocks);
1909
  free (region_worklist);
1910
}
1911
 
1912
/* The "gate" function for all transactional memory expansion and optimization
1913
   passes.  We collect region information for each top-level transaction, and
1914
   if we don't find any, we skip all of the TM passes.  Each region will have
1915
   all of the exit blocks recorded, and the originating statement.  */
1916
 
1917
static bool
1918
gate_tm_init (void)
1919
{
1920
  if (!flag_tm)
1921
    return false;
1922
 
1923
  calculate_dominance_info (CDI_DOMINATORS);
1924
  bitmap_obstack_initialize (&tm_obstack);
1925
 
1926
  /* If the function is a TM_CLONE, then the entire function is the region.  */
1927
  if (decl_is_tm_clone (current_function_decl))
1928
    {
1929
      struct tm_region *region = (struct tm_region *)
1930
        obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
1931
      memset (region, 0, sizeof (*region));
1932
      region->entry_block = single_succ (ENTRY_BLOCK_PTR);
1933
      /* For a clone, the entire function is the region.  But even if
1934
         we don't need to record any exit blocks, we may need to
1935
         record irrevocable blocks.  */
1936
      region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
1937
 
1938
      tm_region_init (region);
1939
    }
1940
  else
1941
    {
1942
      tm_region_init (NULL);
1943
 
1944
      /* If we didn't find any regions, cleanup and skip the whole tree
1945
         of tm-related optimizations.  */
1946
      if (all_tm_regions == NULL)
1947
        {
1948
          bitmap_obstack_release (&tm_obstack);
1949
          return false;
1950
        }
1951
    }
1952
 
1953
  return true;
1954
}
1955
 
1956
struct gimple_opt_pass pass_tm_init =
1957
{
1958
 {
1959
  GIMPLE_PASS,
1960
  "*tminit",                            /* name */
1961
  gate_tm_init,                         /* gate */
1962
  NULL,                                 /* execute */
1963
  NULL,                                 /* sub */
1964
  NULL,                                 /* next */
1965
  0,                                     /* static_pass_number */
1966
  TV_TRANS_MEM,                         /* tv_id */
1967
  PROP_ssa | PROP_cfg,                  /* properties_required */
1968
  0,                                     /* properties_provided */
1969
  0,                                     /* properties_destroyed */
1970
  0,                                     /* todo_flags_start */
1971
  0,                                     /* todo_flags_finish */
1972
 }
1973
};
1974
 
1975
/* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
1976
   represented by STATE.  */
1977
 
1978
static inline void
1979
transaction_subcode_ior (struct tm_region *region, unsigned flags)
1980
{
1981
  if (region && region->transaction_stmt)
1982
    {
1983
      flags |= gimple_transaction_subcode (region->transaction_stmt);
1984
      gimple_transaction_set_subcode (region->transaction_stmt, flags);
1985
    }
1986
}
1987
 
1988
/* Construct a memory load in a transactional context.  Return the
1989
   gimple statement performing the load, or NULL if there is no
1990
   TM_LOAD builtin of the appropriate size to do the load.
1991
 
1992
   LOC is the location to use for the new statement(s).  */
1993
 
1994
static gimple
1995
build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
1996
{
1997
  enum built_in_function code = END_BUILTINS;
1998
  tree t, type = TREE_TYPE (rhs), decl;
1999
  gimple gcall;
2000
 
2001
  if (type == float_type_node)
2002
    code = BUILT_IN_TM_LOAD_FLOAT;
2003
  else if (type == double_type_node)
2004
    code = BUILT_IN_TM_LOAD_DOUBLE;
2005
  else if (type == long_double_type_node)
2006
    code = BUILT_IN_TM_LOAD_LDOUBLE;
2007
  else if (TYPE_SIZE_UNIT (type) != NULL
2008
           && host_integerp (TYPE_SIZE_UNIT (type), 1))
2009
    {
2010
      switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
2011
        {
2012
        case 1:
2013
          code = BUILT_IN_TM_LOAD_1;
2014
          break;
2015
        case 2:
2016
          code = BUILT_IN_TM_LOAD_2;
2017
          break;
2018
        case 4:
2019
          code = BUILT_IN_TM_LOAD_4;
2020
          break;
2021
        case 8:
2022
          code = BUILT_IN_TM_LOAD_8;
2023
          break;
2024
        }
2025
    }
2026
 
2027
  if (code == END_BUILTINS)
2028
    {
2029
      decl = targetm.vectorize.builtin_tm_load (type);
2030
      if (!decl)
2031
        return NULL;
2032
    }
2033
  else
2034
    decl = builtin_decl_explicit (code);
2035
 
2036
  t = gimplify_addr (gsi, rhs);
2037
  gcall = gimple_build_call (decl, 1, t);
2038
  gimple_set_location (gcall, loc);
2039
 
2040
  t = TREE_TYPE (TREE_TYPE (decl));
2041
  if (useless_type_conversion_p (type, t))
2042
    {
2043
      gimple_call_set_lhs (gcall, lhs);
2044
      gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2045
    }
2046
  else
2047
    {
2048
      gimple g;
2049
      tree temp;
2050
 
2051
      temp = make_rename_temp (t, NULL);
2052
      gimple_call_set_lhs (gcall, temp);
2053
      gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2054
 
2055
      t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
2056
      g = gimple_build_assign (lhs, t);
2057
      gsi_insert_before (gsi, g, GSI_SAME_STMT);
2058
    }
2059
 
2060
  return gcall;
2061
}
2062
 
2063
 
2064
/* Similarly for storing TYPE in a transactional context.  */
2065
 
2066
static gimple
2067
build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
2068
{
2069
  enum built_in_function code = END_BUILTINS;
2070
  tree t, fn, type = TREE_TYPE (rhs), simple_type;
2071
  gimple gcall;
2072
 
2073
  if (type == float_type_node)
2074
    code = BUILT_IN_TM_STORE_FLOAT;
2075
  else if (type == double_type_node)
2076
    code = BUILT_IN_TM_STORE_DOUBLE;
2077
  else if (type == long_double_type_node)
2078
    code = BUILT_IN_TM_STORE_LDOUBLE;
2079
  else if (TYPE_SIZE_UNIT (type) != NULL
2080
           && host_integerp (TYPE_SIZE_UNIT (type), 1))
2081
    {
2082
      switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
2083
        {
2084
        case 1:
2085
          code = BUILT_IN_TM_STORE_1;
2086
          break;
2087
        case 2:
2088
          code = BUILT_IN_TM_STORE_2;
2089
          break;
2090
        case 4:
2091
          code = BUILT_IN_TM_STORE_4;
2092
          break;
2093
        case 8:
2094
          code = BUILT_IN_TM_STORE_8;
2095
          break;
2096
        }
2097
    }
2098
 
2099
  if (code == END_BUILTINS)
2100
    {
2101
      fn = targetm.vectorize.builtin_tm_store (type);
2102
      if (!fn)
2103
        return NULL;
2104
    }
2105
  else
2106
    fn = builtin_decl_explicit (code);
2107
 
2108
  simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
2109
 
2110
  if (TREE_CODE (rhs) == CONSTRUCTOR)
2111
    {
2112
      /* Handle the easy initialization to zero.  */
2113
      if (CONSTRUCTOR_ELTS (rhs) == 0)
2114
        rhs = build_int_cst (simple_type, 0);
2115
      else
2116
        {
2117
          /* ...otherwise punt to the caller and probably use
2118
            BUILT_IN_TM_MEMMOVE, because we can't wrap a
2119
            VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
2120
            valid gimple.  */
2121
          return NULL;
2122
        }
2123
    }
2124
  else if (!useless_type_conversion_p (simple_type, type))
2125
    {
2126
      gimple g;
2127
      tree temp;
2128
 
2129
      temp = make_rename_temp (simple_type, NULL);
2130
      t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
2131
      g = gimple_build_assign (temp, t);
2132
      gimple_set_location (g, loc);
2133
      gsi_insert_before (gsi, g, GSI_SAME_STMT);
2134
 
2135
      rhs = temp;
2136
    }
2137
 
2138
  t = gimplify_addr (gsi, lhs);
2139
  gcall = gimple_build_call (fn, 2, t, rhs);
2140
  gimple_set_location (gcall, loc);
2141
  gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2142
 
2143
  return gcall;
2144
}
2145
 
2146
 
2147
/* Expand an assignment statement into transactional builtins.  */
2148
 
2149
static void
2150
expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
2151
{
2152
  gimple stmt = gsi_stmt (*gsi);
2153
  location_t loc = gimple_location (stmt);
2154
  tree lhs = gimple_assign_lhs (stmt);
2155
  tree rhs = gimple_assign_rhs1 (stmt);
2156
  bool store_p = requires_barrier (region->entry_block, lhs, NULL);
2157
  bool load_p = requires_barrier (region->entry_block, rhs, NULL);
2158
  gimple gcall = NULL;
2159
 
2160
  if (!load_p && !store_p)
2161
    {
2162
      /* Add thread private addresses to log if applicable.  */
2163
      requires_barrier (region->entry_block, lhs, stmt);
2164
      gsi_next (gsi);
2165
      return;
2166
    }
2167
 
2168
  gsi_remove (gsi, true);
2169
 
2170
  if (load_p && !store_p)
2171
    {
2172
      transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2173
      gcall = build_tm_load (loc, lhs, rhs, gsi);
2174
    }
2175
  else if (store_p && !load_p)
2176
    {
2177
      transaction_subcode_ior (region, GTMA_HAVE_STORE);
2178
      gcall = build_tm_store (loc, lhs, rhs, gsi);
2179
    }
2180
  if (!gcall)
2181
    {
2182
      tree lhs_addr, rhs_addr, tmp;
2183
 
2184
      if (load_p)
2185
        transaction_subcode_ior (region, GTMA_HAVE_LOAD);
2186
      if (store_p)
2187
        transaction_subcode_ior (region, GTMA_HAVE_STORE);
2188
 
2189
      /* ??? Figure out if there's any possible overlap between the LHS
2190
         and the RHS and if not, use MEMCPY.  */
2191
 
2192
      if (load_p && is_gimple_reg (lhs))
2193
        {
2194
          tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
2195
          lhs_addr = build_fold_addr_expr (tmp);
2196
        }
2197
      else
2198
        {
2199
          tmp = NULL_TREE;
2200
          lhs_addr = gimplify_addr (gsi, lhs);
2201
        }
2202
      rhs_addr = gimplify_addr (gsi, rhs);
2203
      gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
2204
                                 3, lhs_addr, rhs_addr,
2205
                                 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
2206
      gimple_set_location (gcall, loc);
2207
      gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2208
 
2209
      if (tmp)
2210
        {
2211
          gcall = gimple_build_assign (lhs, tmp);
2212
          gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
2213
        }
2214
    }
2215
 
2216
  /* Now that we have the load/store in its instrumented form, add
2217
     thread private addresses to the log if applicable.  */
2218
  if (!store_p)
2219
    requires_barrier (region->entry_block, lhs, gcall);
2220
 
2221
  /* add_stmt_to_tm_region  (region, gcall); */
2222
}
2223
 
2224
 
2225
/* Expand a call statement as appropriate for a transaction.  That is,
2226
   either verify that the call does not affect the transaction, or
2227
   redirect the call to a clone that handles transactions, or change
2228
   the transaction state to IRREVOCABLE.  Return true if the call is
2229
   one of the builtins that end a transaction.  */
2230
 
2231
static bool
2232
expand_call_tm (struct tm_region *region,
2233
                gimple_stmt_iterator *gsi)
2234
{
2235
  gimple stmt = gsi_stmt (*gsi);
2236
  tree lhs = gimple_call_lhs (stmt);
2237
  tree fn_decl;
2238
  struct cgraph_node *node;
2239
  bool retval = false;
2240
 
2241
  fn_decl = gimple_call_fndecl (stmt);
2242
 
2243
  if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
2244
      || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
2245
    transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
2246
  if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
2247
    transaction_subcode_ior (region, GTMA_HAVE_STORE);
2248
 
2249
  if (is_tm_pure_call (stmt))
2250
    return false;
2251
 
2252
  if (fn_decl)
2253
    retval = is_tm_ending_fndecl (fn_decl);
2254
  if (!retval)
2255
    {
2256
      /* Assume all non-const/pure calls write to memory, except
2257
         transaction ending builtins.  */
2258
      transaction_subcode_ior (region, GTMA_HAVE_STORE);
2259
    }
2260
 
2261
  /* For indirect calls, we already generated a call into the runtime.  */
2262
  if (!fn_decl)
2263
    {
2264
      tree fn = gimple_call_fn (stmt);
2265
 
2266
      /* We are guaranteed never to go irrevocable on a safe or pure
2267
         call, and the pure call was handled above.  */
2268
      if (is_tm_safe (fn))
2269
        return false;
2270
      else
2271
        transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2272
 
2273
      return false;
2274
    }
2275
 
2276
  node = cgraph_get_node (fn_decl);
2277
  /* All calls should have cgraph here. */
2278
  gcc_assert (node);
2279
  if (node->local.tm_may_enter_irr)
2280
    transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
2281
 
2282
  if (is_tm_abort (fn_decl))
2283
    {
2284
      transaction_subcode_ior (region, GTMA_HAVE_ABORT);
2285
      return true;
2286
    }
2287
 
2288
  /* Instrument the store if needed.
2289
 
2290
     If the assignment happens inside the function call (return slot
2291
     optimization), there is no instrumentation to be done, since
2292
     the callee should have done the right thing.  */
2293
  if (lhs && requires_barrier (region->entry_block, lhs, stmt)
2294
      && !gimple_call_return_slot_opt_p (stmt))
2295
    {
2296
      tree tmp = make_rename_temp (TREE_TYPE (lhs), NULL);
2297
      location_t loc = gimple_location (stmt);
2298
      edge fallthru_edge = NULL;
2299
 
2300
      /* Remember if the call was going to throw.  */
2301
      if (stmt_can_throw_internal (stmt))
2302
        {
2303
          edge_iterator ei;
2304
          edge e;
2305
          basic_block bb = gimple_bb (stmt);
2306
 
2307
          FOR_EACH_EDGE (e, ei, bb->succs)
2308
            if (e->flags & EDGE_FALLTHRU)
2309
              {
2310
                fallthru_edge = e;
2311
                break;
2312
              }
2313
        }
2314
 
2315
      gimple_call_set_lhs (stmt, tmp);
2316
      update_stmt (stmt);
2317
      stmt = gimple_build_assign (lhs, tmp);
2318
      gimple_set_location (stmt, loc);
2319
 
2320
      /* We cannot throw in the middle of a BB.  If the call was going
2321
         to throw, place the instrumentation on the fallthru edge, so
2322
         the call remains the last statement in the block.  */
2323
      if (fallthru_edge)
2324
        {
2325
          gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (stmt);
2326
          gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
2327
          expand_assign_tm (region, &fallthru_gsi);
2328
          gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
2329
          pending_edge_inserts_p = true;
2330
        }
2331
      else
2332
        {
2333
          gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
2334
          expand_assign_tm (region, gsi);
2335
        }
2336
 
2337
      transaction_subcode_ior (region, GTMA_HAVE_STORE);
2338
    }
2339
 
2340
  return retval;
2341
}
2342
 
2343
 
2344
/* Expand all statements in BB as appropriate for being inside
2345
   a transaction.  */
2346
 
2347
static void
2348
expand_block_tm (struct tm_region *region, basic_block bb)
2349
{
2350
  gimple_stmt_iterator gsi;
2351
 
2352
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2353
    {
2354
      gimple stmt = gsi_stmt (gsi);
2355
      switch (gimple_code (stmt))
2356
        {
2357
        case GIMPLE_ASSIGN:
2358
          /* Only memory reads/writes need to be instrumented.  */
2359
          if (gimple_assign_single_p (stmt)
2360
              && !gimple_clobber_p (stmt))
2361
            {
2362
              expand_assign_tm (region, &gsi);
2363
              continue;
2364
            }
2365
          break;
2366
 
2367
        case GIMPLE_CALL:
2368
          if (expand_call_tm (region, &gsi))
2369
            return;
2370
          break;
2371
 
2372
        case GIMPLE_ASM:
2373
          gcc_unreachable ();
2374
 
2375
        default:
2376
          break;
2377
        }
2378
      if (!gsi_end_p (gsi))
2379
        gsi_next (&gsi);
2380
    }
2381
}
2382
 
2383
/* Return the list of basic-blocks in REGION.
2384
 
2385
   STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
2386
   following a TM_IRREVOCABLE call.  */
2387
 
2388
static VEC (basic_block, heap) *
2389
get_tm_region_blocks (basic_block entry_block,
2390
                      bitmap exit_blocks,
2391
                      bitmap irr_blocks,
2392
                      bitmap all_region_blocks,
2393
                      bool stop_at_irrevocable_p)
2394
{
2395
  VEC(basic_block, heap) *bbs = NULL;
2396
  unsigned i;
2397
  edge e;
2398
  edge_iterator ei;
2399
  bitmap visited_blocks = BITMAP_ALLOC (NULL);
2400
 
2401
  i = 0;
2402
  VEC_safe_push (basic_block, heap, bbs, entry_block);
2403
  bitmap_set_bit (visited_blocks, entry_block->index);
2404
 
2405
  do
2406
    {
2407
      basic_block bb = VEC_index (basic_block, bbs, i++);
2408
 
2409
      if (exit_blocks &&
2410
          bitmap_bit_p (exit_blocks, bb->index))
2411
        continue;
2412
 
2413
      if (stop_at_irrevocable_p
2414
          && irr_blocks
2415
          && bitmap_bit_p (irr_blocks, bb->index))
2416
        continue;
2417
 
2418
      FOR_EACH_EDGE (e, ei, bb->succs)
2419
        if (!bitmap_bit_p (visited_blocks, e->dest->index))
2420
          {
2421
            bitmap_set_bit (visited_blocks, e->dest->index);
2422
            VEC_safe_push (basic_block, heap, bbs, e->dest);
2423
          }
2424
    }
2425
  while (i < VEC_length (basic_block, bbs));
2426
 
2427
  if (all_region_blocks)
2428
    bitmap_ior_into (all_region_blocks, visited_blocks);
2429
 
2430
  BITMAP_FREE (visited_blocks);
2431
  return bbs;
2432
}
2433
 
2434
/* Set the IN_TRANSACTION for all gimple statements that appear in a
2435
   transaction.  */
2436
 
2437
void
2438
compute_transaction_bits (void)
2439
{
2440
  struct tm_region *region;
2441
  VEC (basic_block, heap) *queue;
2442
  unsigned int i;
2443
  gimple_stmt_iterator gsi;
2444
  basic_block bb;
2445
 
2446
  /* ?? Perhaps we need to abstract gate_tm_init further, because we
2447
     certainly don't need it to calculate CDI_DOMINATOR info.  */
2448
  gate_tm_init ();
2449
 
2450
  for (region = all_tm_regions; region; region = region->next)
2451
    {
2452
      queue = get_tm_region_blocks (region->entry_block,
2453
                                    region->exit_blocks,
2454
                                    region->irr_blocks,
2455
                                    NULL,
2456
                                    /*stop_at_irr_p=*/true);
2457
      for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
2458
        for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2459
          {
2460
            gimple stmt = gsi_stmt (gsi);
2461
            gimple_set_in_transaction (stmt, true);
2462
          }
2463
      VEC_free (basic_block, heap, queue);
2464
    }
2465
 
2466
  if (all_tm_regions)
2467
    bitmap_obstack_release (&tm_obstack);
2468
}
2469
 
2470
/* Entry point to the MARK phase of TM expansion.  Here we replace
2471
   transactional memory statements with calls to builtins, and function
2472
   calls with their transactional clones (if available).  But we don't
2473
   yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges.  */
2474
 
2475
static unsigned int
2476
execute_tm_mark (void)
2477
{
2478
  struct tm_region *region;
2479
  basic_block bb;
2480
  VEC (basic_block, heap) *queue;
2481
  size_t i;
2482
 
2483
  queue = VEC_alloc (basic_block, heap, 10);
2484
  pending_edge_inserts_p = false;
2485
 
2486
  for (region = all_tm_regions; region ; region = region->next)
2487
    {
2488
      tm_log_init ();
2489
      /* If we have a transaction...  */
2490
      if (region->exit_blocks)
2491
        {
2492
          unsigned int subcode
2493
            = gimple_transaction_subcode (region->transaction_stmt);
2494
 
2495
          /* Collect a new SUBCODE set, now that optimizations are done...  */
2496
          if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2497
            subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
2498
                        | GTMA_MAY_ENTER_IRREVOCABLE);
2499
          else
2500
            subcode &= GTMA_DECLARATION_MASK;
2501
          gimple_transaction_set_subcode (region->transaction_stmt, subcode);
2502
        }
2503
 
2504
      queue = get_tm_region_blocks (region->entry_block,
2505
                                    region->exit_blocks,
2506
                                    region->irr_blocks,
2507
                                    NULL,
2508
                                    /*stop_at_irr_p=*/true);
2509
      for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
2510
        expand_block_tm (region, bb);
2511
      VEC_free (basic_block, heap, queue);
2512
 
2513
      tm_log_emit ();
2514
    }
2515
 
2516
  if (pending_edge_inserts_p)
2517
    gsi_commit_edge_inserts ();
2518
  return 0;
2519
}
2520
 
2521
struct gimple_opt_pass pass_tm_mark =
2522
{
2523
 {
2524
  GIMPLE_PASS,
2525
  "tmmark",                             /* name */
2526
  NULL,                                 /* gate */
2527
  execute_tm_mark,                      /* execute */
2528
  NULL,                                 /* sub */
2529
  NULL,                                 /* next */
2530
  0,                                     /* static_pass_number */
2531
  TV_TRANS_MEM,                         /* tv_id */
2532
  PROP_ssa | PROP_cfg,                  /* properties_required */
2533
  0,                                     /* properties_provided */
2534
  0,                                     /* properties_destroyed */
2535
  0,                                     /* todo_flags_start */
2536
  TODO_update_ssa
2537
  | TODO_verify_ssa
2538
  | TODO_dump_func,                     /* todo_flags_finish */
2539
 }
2540
};
2541
 
2542
/* Create an abnormal call edge from BB to the first block of the region
2543
   represented by STATE.  Also record the edge in the TM_RESTART map.  */
2544
 
2545
static inline void
2546
make_tm_edge (gimple stmt, basic_block bb, struct tm_region *region)
2547
{
2548
  void **slot;
2549
  struct tm_restart_node *n, dummy;
2550
 
2551
  if (cfun->gimple_df->tm_restart == NULL)
2552
    cfun->gimple_df->tm_restart = htab_create_ggc (31, struct_ptr_hash,
2553
                                                   struct_ptr_eq, ggc_free);
2554
 
2555
  dummy.stmt = stmt;
2556
  dummy.label_or_list = gimple_block_label (region->entry_block);
2557
  slot = htab_find_slot (cfun->gimple_df->tm_restart, &dummy, INSERT);
2558
  n = (struct tm_restart_node *) *slot;
2559
  if (n == NULL)
2560
    {
2561
      n = ggc_alloc_tm_restart_node ();
2562
      *n = dummy;
2563
    }
2564
  else
2565
    {
2566
      tree old = n->label_or_list;
2567
      if (TREE_CODE (old) == LABEL_DECL)
2568
        old = tree_cons (NULL, old, NULL);
2569
      n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
2570
    }
2571
 
2572
  make_edge (bb, region->entry_block, EDGE_ABNORMAL | EDGE_ABNORMAL_CALL);
2573
}
2574
 
2575
 
2576
/* Split block BB as necessary for every builtin function we added, and
2577
   wire up the abnormal back edges implied by the transaction restart.  */
2578
 
2579
static void
2580
expand_block_edges (struct tm_region *region, basic_block bb)
2581
{
2582
  gimple_stmt_iterator gsi;
2583
 
2584
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2585
    {
2586
      gimple stmt = gsi_stmt (gsi);
2587
 
2588
      /* ??? TM_COMMIT (and any other tm builtin function) in a nested
2589
         transaction has an abnormal edge back to the outer-most transaction
2590
         (there are no nested retries), while a TM_ABORT also has an abnormal
2591
         backedge to the inner-most transaction.  We haven't actually saved
2592
         the inner-most transaction here.  We should be able to get to it
2593
         via the region_nr saved on STMT, and read the transaction_stmt from
2594
         that, and find the first region block from there.  */
2595
      /* ??? Shouldn't we split for any non-pure, non-irrevocable function?  */
2596
      if (gimple_code (stmt) == GIMPLE_CALL
2597
          && (gimple_call_flags (stmt) & ECF_TM_BUILTIN) != 0)
2598
        {
2599
          if (gsi_one_before_end_p (gsi))
2600
            make_tm_edge (stmt, bb, region);
2601
          else
2602
            {
2603
              edge e = split_block (bb, stmt);
2604
              make_tm_edge (stmt, bb, region);
2605
              bb = e->dest;
2606
              gsi = gsi_start_bb (bb);
2607
            }
2608
 
2609
          /* Delete any tail-call annotation that may have been added.
2610
             The tail-call pass may have mis-identified the commit as being
2611
             a candidate because we had not yet added this restart edge.  */
2612
          gimple_call_set_tail (stmt, false);
2613
        }
2614
 
2615
      gsi_next (&gsi);
2616
    }
2617
}
2618
 
2619
/* Expand the GIMPLE_TRANSACTION statement into the STM library call.  */
2620
 
2621
static void
2622
expand_transaction (struct tm_region *region)
2623
{
2624
  tree status, tm_start;
2625
  basic_block atomic_bb, slice_bb;
2626
  gimple_stmt_iterator gsi;
2627
  tree t1, t2;
2628
  gimple g;
2629
  int flags, subcode;
2630
 
2631
  tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
2632
  status = make_rename_temp (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
2633
 
2634
  /* ??? There are plenty of bits here we're not computing.  */
2635
  subcode = gimple_transaction_subcode (region->transaction_stmt);
2636
  if (subcode & GTMA_DOES_GO_IRREVOCABLE)
2637
    flags = PR_DOESGOIRREVOCABLE | PR_UNINSTRUMENTEDCODE;
2638
  else
2639
    flags = PR_INSTRUMENTEDCODE;
2640
  if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
2641
    flags |= PR_HASNOIRREVOCABLE;
2642
  /* If the transaction does not have an abort in lexical scope and is not
2643
     marked as an outer transaction, then it will never abort.  */
2644
  if ((subcode & GTMA_HAVE_ABORT) == 0
2645
      && (subcode & GTMA_IS_OUTER) == 0)
2646
    flags |= PR_HASNOABORT;
2647
  if ((subcode & GTMA_HAVE_STORE) == 0)
2648
    flags |= PR_READONLY;
2649
  t2 = build_int_cst (TREE_TYPE (status), flags);
2650
  g = gimple_build_call (tm_start, 1, t2);
2651
  gimple_call_set_lhs (g, status);
2652
  gimple_set_location (g, gimple_location (region->transaction_stmt));
2653
 
2654
  atomic_bb = gimple_bb (region->transaction_stmt);
2655
 
2656
  if (!VEC_empty (tree, tm_log_save_addresses))
2657
    tm_log_emit_saves (region->entry_block, atomic_bb);
2658
 
2659
  gsi = gsi_last_bb (atomic_bb);
2660
  gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2661
  gsi_remove (&gsi, true);
2662
 
2663
  if (!VEC_empty (tree, tm_log_save_addresses))
2664
    region->entry_block =
2665
      tm_log_emit_save_or_restores (region->entry_block,
2666
                                    A_RESTORELIVEVARIABLES,
2667
                                    status,
2668
                                    tm_log_emit_restores,
2669
                                    atomic_bb,
2670
                                    FALLTHRU_EDGE (atomic_bb),
2671
                                    &slice_bb);
2672
  else
2673
    slice_bb = atomic_bb;
2674
 
2675
  /* If we have an ABORT statement, create a test following the start
2676
     call to perform the abort.  */
2677
  if (gimple_transaction_label (region->transaction_stmt))
2678
    {
2679
      edge e;
2680
      basic_block test_bb;
2681
 
2682
      test_bb = create_empty_bb (slice_bb);
2683
      if (VEC_empty (tree, tm_log_save_addresses))
2684
        region->entry_block = test_bb;
2685
      gsi = gsi_last_bb (test_bb);
2686
 
2687
      t1 = make_rename_temp (TREE_TYPE (status), NULL);
2688
      t2 = build_int_cst (TREE_TYPE (status), A_ABORTTRANSACTION);
2689
      g = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, status, t2);
2690
      gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
2691
 
2692
      t2 = build_int_cst (TREE_TYPE (status), 0);
2693
      g = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
2694
      gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
2695
 
2696
      e = FALLTHRU_EDGE (slice_bb);
2697
      redirect_edge_pred (e, test_bb);
2698
      e->flags = EDGE_FALSE_VALUE;
2699
      e->probability = PROB_ALWAYS - PROB_VERY_UNLIKELY;
2700
 
2701
      e = BRANCH_EDGE (atomic_bb);
2702
      redirect_edge_pred (e, test_bb);
2703
      e->flags = EDGE_TRUE_VALUE;
2704
      e->probability = PROB_VERY_UNLIKELY;
2705
 
2706
      e = make_edge (slice_bb, test_bb, EDGE_FALLTHRU);
2707
    }
2708
 
2709
  /* If we've no abort, but we do have PHIs at the beginning of the atomic
2710
     region, that means we've a loop at the beginning of the atomic region
2711
     that shares the first block.  This can cause problems with the abnormal
2712
     edges we're about to add for the transaction restart.  Solve this by
2713
     adding a new empty block to receive the abnormal edges.  */
2714
  else if (phi_nodes (region->entry_block))
2715
    {
2716
      edge e;
2717
      basic_block empty_bb;
2718
 
2719
      region->entry_block = empty_bb = create_empty_bb (atomic_bb);
2720
 
2721
      e = FALLTHRU_EDGE (atomic_bb);
2722
      redirect_edge_pred (e, empty_bb);
2723
 
2724
      e = make_edge (atomic_bb, empty_bb, EDGE_FALLTHRU);
2725
    }
2726
 
2727
  /* The GIMPLE_TRANSACTION statement no longer exists.  */
2728
  region->transaction_stmt = NULL;
2729
}
2730
 
2731
static void expand_regions (struct tm_region *);
2732
 
2733
/* Helper function for expand_regions.  Expand REGION and recurse to
2734
   the inner region.  */
2735
 
2736
static void
2737
expand_regions_1 (struct tm_region *region)
2738
{
2739
  if (region->exit_blocks)
2740
    {
2741
      unsigned int i;
2742
      basic_block bb;
2743
      VEC (basic_block, heap) *queue;
2744
 
2745
      /* Collect the set of blocks in this region.  Do this before
2746
         splitting edges, so that we don't have to play with the
2747
         dominator tree in the middle.  */
2748
      queue = get_tm_region_blocks (region->entry_block,
2749
                                    region->exit_blocks,
2750
                                    region->irr_blocks,
2751
                                    NULL,
2752
                                    /*stop_at_irr_p=*/false);
2753
      expand_transaction (region);
2754
      for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
2755
        expand_block_edges (region, bb);
2756
      VEC_free (basic_block, heap, queue);
2757
    }
2758
  if (region->inner)
2759
    expand_regions (region->inner);
2760
}
2761
 
2762
/* Expand regions starting at REGION.  */
2763
 
2764
static void
2765
expand_regions (struct tm_region *region)
2766
{
2767
  while (region)
2768
    {
2769
      expand_regions_1 (region);
2770
      region = region->next;
2771
    }
2772
}
2773
 
2774
/* Entry point to the final expansion of transactional nodes. */
2775
 
2776
static unsigned int
2777
execute_tm_edges (void)
2778
{
2779
  expand_regions (all_tm_regions);
2780
  tm_log_delete ();
2781
 
2782
  /* We've got to release the dominance info now, to indicate that it
2783
     must be rebuilt completely.  Otherwise we'll crash trying to update
2784
     the SSA web in the TODO section following this pass.  */
2785
  free_dominance_info (CDI_DOMINATORS);
2786
  bitmap_obstack_release (&tm_obstack);
2787
  all_tm_regions = NULL;
2788
 
2789
  return 0;
2790
}
2791
 
2792
struct gimple_opt_pass pass_tm_edges =
2793
{
2794
 {
2795
  GIMPLE_PASS,
2796
  "tmedge",                             /* name */
2797
  NULL,                                 /* gate */
2798
  execute_tm_edges,                     /* execute */
2799
  NULL,                                 /* sub */
2800
  NULL,                                 /* next */
2801
  0,                                     /* static_pass_number */
2802
  TV_TRANS_MEM,                         /* tv_id */
2803
  PROP_ssa | PROP_cfg,                  /* properties_required */
2804
  0,                                     /* properties_provided */
2805
  0,                                     /* properties_destroyed */
2806
  0,                                     /* todo_flags_start */
2807
  TODO_update_ssa
2808
  | TODO_verify_ssa
2809
  | TODO_dump_func,                     /* todo_flags_finish */
2810
 }
2811
};
2812
 
2813
/* A unique TM memory operation.  */
2814
typedef struct tm_memop
2815
{
2816
  /* Unique ID that all memory operations to the same location have.  */
2817
  unsigned int value_id;
2818
  /* Address of load/store.  */
2819
  tree addr;
2820
} *tm_memop_t;
2821
 
2822
/* Sets for solving data flow equations in the memory optimization pass.  */
2823
struct tm_memopt_bitmaps
2824
{
2825
  /* Stores available to this BB upon entry.  Basically, stores that
2826
     dominate this BB.  */
2827
  bitmap store_avail_in;
2828
  /* Stores available at the end of this BB.  */
2829
  bitmap store_avail_out;
2830
  bitmap store_antic_in;
2831
  bitmap store_antic_out;
2832
  /* Reads available to this BB upon entry.  Basically, reads that
2833
     dominate this BB.  */
2834
  bitmap read_avail_in;
2835
  /* Reads available at the end of this BB.  */
2836
  bitmap read_avail_out;
2837
  /* Reads performed in this BB.  */
2838
  bitmap read_local;
2839
  /* Writes performed in this BB.  */
2840
  bitmap store_local;
2841
 
2842
  /* Temporary storage for pass.  */
2843
  /* Is the current BB in the worklist?  */
2844
  bool avail_in_worklist_p;
2845
  /* Have we visited this BB?  */
2846
  bool visited_p;
2847
};
2848
 
2849
static bitmap_obstack tm_memopt_obstack;
2850
 
2851
/* Unique counter for TM loads and stores. Loads and stores of the
2852
   same address get the same ID.  */
2853
static unsigned int tm_memopt_value_id;
2854
static htab_t tm_memopt_value_numbers;
2855
 
2856
#define STORE_AVAIL_IN(BB) \
2857
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
2858
#define STORE_AVAIL_OUT(BB) \
2859
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
2860
#define STORE_ANTIC_IN(BB) \
2861
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
2862
#define STORE_ANTIC_OUT(BB) \
2863
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
2864
#define READ_AVAIL_IN(BB) \
2865
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
2866
#define READ_AVAIL_OUT(BB) \
2867
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
2868
#define READ_LOCAL(BB) \
2869
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
2870
#define STORE_LOCAL(BB) \
2871
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
2872
#define AVAIL_IN_WORKLIST_P(BB) \
2873
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
2874
#define BB_VISITED_P(BB) \
2875
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
2876
 
2877
/* Htab support.  Return a hash value for a `tm_memop'.  */
2878
static hashval_t
2879
tm_memop_hash (const void *p)
2880
{
2881
  const struct tm_memop *mem = (const struct tm_memop *) p;
2882
  tree addr = mem->addr;
2883
  /* We drill down to the SSA_NAME/DECL for the hash, but equality is
2884
     actually done with operand_equal_p (see tm_memop_eq).  */
2885
  if (TREE_CODE (addr) == ADDR_EXPR)
2886
    addr = TREE_OPERAND (addr, 0);
2887
  return iterative_hash_expr (addr, 0);
2888
}
2889
 
2890
/* Htab support.  Return true if two tm_memop's are the same.  */
2891
static int
2892
tm_memop_eq (const void *p1, const void *p2)
2893
{
2894
  const struct tm_memop *mem1 = (const struct tm_memop *) p1;
2895
  const struct tm_memop *mem2 = (const struct tm_memop *) p2;
2896
 
2897
  return operand_equal_p (mem1->addr, mem2->addr, 0);
2898
}
2899
 
2900
/* Given a TM load/store in STMT, return the value number for the address
2901
   it accesses.  */
2902
 
2903
static unsigned int
2904
tm_memopt_value_number (gimple stmt, enum insert_option op)
2905
{
2906
  struct tm_memop tmpmem, *mem;
2907
  void **slot;
2908
 
2909
  gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
2910
  tmpmem.addr = gimple_call_arg (stmt, 0);
2911
  slot = htab_find_slot (tm_memopt_value_numbers, &tmpmem, op);
2912
  if (*slot)
2913
    mem = (struct tm_memop *) *slot;
2914
  else if (op == INSERT)
2915
    {
2916
      mem = XNEW (struct tm_memop);
2917
      *slot = mem;
2918
      mem->value_id = tm_memopt_value_id++;
2919
      mem->addr = tmpmem.addr;
2920
    }
2921
  else
2922
    gcc_unreachable ();
2923
  return mem->value_id;
2924
}
2925
 
2926
/* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL.  */
2927
 
2928
static void
2929
tm_memopt_accumulate_memops (basic_block bb)
2930
{
2931
  gimple_stmt_iterator gsi;
2932
 
2933
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2934
    {
2935
      gimple stmt = gsi_stmt (gsi);
2936
      bitmap bits;
2937
      unsigned int loc;
2938
 
2939
      if (is_tm_store (stmt))
2940
        bits = STORE_LOCAL (bb);
2941
      else if (is_tm_load (stmt))
2942
        bits = READ_LOCAL (bb);
2943
      else
2944
        continue;
2945
 
2946
      loc = tm_memopt_value_number (stmt, INSERT);
2947
      bitmap_set_bit (bits, loc);
2948
      if (dump_file)
2949
        {
2950
          fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
2951
                   is_tm_load (stmt) ? "LOAD" : "STORE", loc,
2952
                   gimple_bb (stmt)->index);
2953
          print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
2954
          fprintf (dump_file, "\n");
2955
        }
2956
    }
2957
}
2958
 
2959
/* Prettily dump one of the memopt sets.  BITS is the bitmap to dump.  */
2960
 
2961
static void
2962
dump_tm_memopt_set (const char *set_name, bitmap bits)
2963
{
2964
  unsigned i;
2965
  bitmap_iterator bi;
2966
  const char *comma = "";
2967
 
2968
  fprintf (dump_file, "TM memopt: %s: [", set_name);
2969
  EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
2970
    {
2971
      htab_iterator hi;
2972
      struct tm_memop *mem;
2973
 
2974
      /* Yeah, yeah, yeah.  Whatever.  This is just for debugging.  */
2975
      FOR_EACH_HTAB_ELEMENT (tm_memopt_value_numbers, mem, tm_memop_t, hi)
2976
        if (mem->value_id == i)
2977
          break;
2978
      gcc_assert (mem->value_id == i);
2979
      fprintf (dump_file, "%s", comma);
2980
      comma = ", ";
2981
      print_generic_expr (dump_file, mem->addr, 0);
2982
    }
2983
  fprintf (dump_file, "]\n");
2984
}
2985
 
2986
/* Prettily dump all of the memopt sets in BLOCKS.  */
2987
 
2988
static void
2989
dump_tm_memopt_sets (VEC (basic_block, heap) *blocks)
2990
{
2991
  size_t i;
2992
  basic_block bb;
2993
 
2994
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
2995
    {
2996
      fprintf (dump_file, "------------BB %d---------\n", bb->index);
2997
      dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
2998
      dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
2999
      dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
3000
      dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
3001
      dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
3002
      dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
3003
    }
3004
}
3005
 
3006
/* Compute {STORE,READ}_AVAIL_IN for the basic block BB.  */
3007
 
3008
static void
3009
tm_memopt_compute_avin (basic_block bb)
3010
{
3011
  edge e;
3012
  unsigned ix;
3013
 
3014
  /* Seed with the AVOUT of any predecessor.  */
3015
  for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
3016
    {
3017
      e = EDGE_PRED (bb, ix);
3018
      /* Make sure we have already visited this BB, and is thus
3019
         initialized.
3020
 
3021
          If e->src->aux is NULL, this predecessor is actually on an
3022
          enclosing transaction.  We only care about the current
3023
          transaction, so ignore it.  */
3024
      if (e->src->aux && BB_VISITED_P (e->src))
3025
        {
3026
          bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3027
          bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3028
          break;
3029
        }
3030
    }
3031
 
3032
  for (; ix < EDGE_COUNT (bb->preds); ix++)
3033
    {
3034
      e = EDGE_PRED (bb, ix);
3035
      if (e->src->aux && BB_VISITED_P (e->src))
3036
        {
3037
          bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
3038
          bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
3039
        }
3040
    }
3041
 
3042
  BB_VISITED_P (bb) = true;
3043
}
3044
 
3045
/* Compute the STORE_ANTIC_IN for the basic block BB.  */
3046
 
3047
static void
3048
tm_memopt_compute_antin (basic_block bb)
3049
{
3050
  edge e;
3051
  unsigned ix;
3052
 
3053
  /* Seed with the ANTIC_OUT of any successor.  */
3054
  for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
3055
    {
3056
      e = EDGE_SUCC (bb, ix);
3057
      /* Make sure we have already visited this BB, and is thus
3058
         initialized.  */
3059
      if (BB_VISITED_P (e->dest))
3060
        {
3061
          bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3062
          break;
3063
        }
3064
    }
3065
 
3066
  for (; ix < EDGE_COUNT (bb->succs); ix++)
3067
    {
3068
      e = EDGE_SUCC (bb, ix);
3069
      if (BB_VISITED_P  (e->dest))
3070
        bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
3071
    }
3072
 
3073
  BB_VISITED_P (bb) = true;
3074
}
3075
 
3076
/* Compute the AVAIL sets for every basic block in BLOCKS.
3077
 
3078
   We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
3079
 
3080
     AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
3081
     AVAIL_IN[bb]  = intersect (AVAIL_OUT[predecessors])
3082
 
3083
   This is basically what we do in lcm's compute_available(), but here
3084
   we calculate two sets of sets (one for STOREs and one for READs),
3085
   and we work on a region instead of the entire CFG.
3086
 
3087
   REGION is the TM region.
3088
   BLOCKS are the basic blocks in the region.  */
3089
 
3090
static void
3091
tm_memopt_compute_available (struct tm_region *region,
3092
                             VEC (basic_block, heap) *blocks)
3093
{
3094
  edge e;
3095
  basic_block *worklist, *qin, *qout, *qend, bb;
3096
  unsigned int qlen, i;
3097
  edge_iterator ei;
3098
  bool changed;
3099
 
3100
  /* Allocate a worklist array/queue.  Entries are only added to the
3101
     list if they were not already on the list.  So the size is
3102
     bounded by the number of basic blocks in the region.  */
3103
  qlen = VEC_length (basic_block, blocks) - 1;
3104
  qin = qout = worklist =
3105
    XNEWVEC (basic_block, qlen);
3106
 
3107
  /* Put every block in the region on the worklist.  */
3108
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3109
    {
3110
      /* Seed AVAIL_OUT with the LOCAL set.  */
3111
      bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
3112
      bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
3113
 
3114
      AVAIL_IN_WORKLIST_P (bb) = true;
3115
      /* No need to insert the entry block, since it has an AVIN of
3116
         null, and an AVOUT that has already been seeded in.  */
3117
      if (bb != region->entry_block)
3118
        *qin++ = bb;
3119
    }
3120
 
3121
  /* The entry block has been initialized with the local sets.  */
3122
  BB_VISITED_P (region->entry_block) = true;
3123
 
3124
  qin = worklist;
3125
  qend = &worklist[qlen];
3126
 
3127
  /* Iterate until the worklist is empty.  */
3128
  while (qlen)
3129
    {
3130
      /* Take the first entry off the worklist.  */
3131
      bb = *qout++;
3132
      qlen--;
3133
 
3134
      if (qout >= qend)
3135
        qout = worklist;
3136
 
3137
      /* This block can be added to the worklist again if necessary.  */
3138
      AVAIL_IN_WORKLIST_P (bb) = false;
3139
      tm_memopt_compute_avin (bb);
3140
 
3141
      /* Note: We do not add the LOCAL sets here because we already
3142
         seeded the AVAIL_OUT sets with them.  */
3143
      changed  = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
3144
      changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
3145
      if (changed
3146
          && (region->exit_blocks == NULL
3147
              || !bitmap_bit_p (region->exit_blocks, bb->index)))
3148
        /* If the out state of this block changed, then we need to add
3149
           its successors to the worklist if they are not already in.  */
3150
        FOR_EACH_EDGE (e, ei, bb->succs)
3151
          if (!AVAIL_IN_WORKLIST_P (e->dest) && e->dest != EXIT_BLOCK_PTR)
3152
            {
3153
              *qin++ = e->dest;
3154
              AVAIL_IN_WORKLIST_P (e->dest) = true;
3155
              qlen++;
3156
 
3157
              if (qin >= qend)
3158
                qin = worklist;
3159
            }
3160
    }
3161
 
3162
  free (worklist);
3163
 
3164
  if (dump_file)
3165
    dump_tm_memopt_sets (blocks);
3166
}
3167
 
3168
/* Compute ANTIC sets for every basic block in BLOCKS.
3169
 
3170
   We compute STORE_ANTIC_OUT as follows:
3171
 
3172
        STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
3173
        STORE_ANTIC_IN[bb]  = intersect(STORE_ANTIC_OUT[successors])
3174
 
3175
   REGION is the TM region.
3176
   BLOCKS are the basic blocks in the region.  */
3177
 
3178
static void
3179
tm_memopt_compute_antic (struct tm_region *region,
3180
                         VEC (basic_block, heap) *blocks)
3181
{
3182
  edge e;
3183
  basic_block *worklist, *qin, *qout, *qend, bb;
3184
  unsigned int qlen;
3185
  int i;
3186
  edge_iterator ei;
3187
 
3188
  /* Allocate a worklist array/queue.  Entries are only added to the
3189
     list if they were not already on the list.  So the size is
3190
     bounded by the number of basic blocks in the region.  */
3191
  qin = qout = worklist =
3192
    XNEWVEC (basic_block, VEC_length (basic_block, blocks));
3193
 
3194
  for (qlen = 0, i = VEC_length (basic_block, blocks) - 1; i >= 0; --i)
3195
    {
3196
      bb = VEC_index (basic_block, blocks, i);
3197
 
3198
      /* Seed ANTIC_OUT with the LOCAL set.  */
3199
      bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
3200
 
3201
      /* Put every block in the region on the worklist.  */
3202
      AVAIL_IN_WORKLIST_P (bb) = true;
3203
      /* No need to insert exit blocks, since their ANTIC_IN is NULL,
3204
         and their ANTIC_OUT has already been seeded in.  */
3205
      if (region->exit_blocks
3206
          && !bitmap_bit_p (region->exit_blocks, bb->index))
3207
        {
3208
          qlen++;
3209
          *qin++ = bb;
3210
        }
3211
    }
3212
 
3213
  /* The exit blocks have been initialized with the local sets.  */
3214
  if (region->exit_blocks)
3215
    {
3216
      unsigned int i;
3217
      bitmap_iterator bi;
3218
      EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
3219
        BB_VISITED_P (BASIC_BLOCK (i)) = true;
3220
    }
3221
 
3222
  qin = worklist;
3223
  qend = &worklist[qlen];
3224
 
3225
  /* Iterate until the worklist is empty.  */
3226
  while (qlen)
3227
    {
3228
      /* Take the first entry off the worklist.  */
3229
      bb = *qout++;
3230
      qlen--;
3231
 
3232
      if (qout >= qend)
3233
        qout = worklist;
3234
 
3235
      /* This block can be added to the worklist again if necessary.  */
3236
      AVAIL_IN_WORKLIST_P (bb) = false;
3237
      tm_memopt_compute_antin (bb);
3238
 
3239
      /* Note: We do not add the LOCAL sets here because we already
3240
         seeded the ANTIC_OUT sets with them.  */
3241
      if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
3242
          && bb != region->entry_block)
3243
        /* If the out state of this block changed, then we need to add
3244
           its predecessors to the worklist if they are not already in.  */
3245
        FOR_EACH_EDGE (e, ei, bb->preds)
3246
          if (!AVAIL_IN_WORKLIST_P (e->src))
3247
            {
3248
              *qin++ = e->src;
3249
              AVAIL_IN_WORKLIST_P (e->src) = true;
3250
              qlen++;
3251
 
3252
              if (qin >= qend)
3253
                qin = worklist;
3254
            }
3255
    }
3256
 
3257
  free (worklist);
3258
 
3259
  if (dump_file)
3260
    dump_tm_memopt_sets (blocks);
3261
}
3262
 
3263
/* Offsets of load variants from TM_LOAD.  For example,
3264
   BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
3265
   See gtm-builtins.def.  */
3266
#define TRANSFORM_RAR 1
3267
#define TRANSFORM_RAW 2
3268
#define TRANSFORM_RFW 3
3269
/* Offsets of store variants from TM_STORE.  */
3270
#define TRANSFORM_WAR 1
3271
#define TRANSFORM_WAW 2
3272
 
3273
/* Inform about a load/store optimization.  */
3274
 
3275
static void
3276
dump_tm_memopt_transform (gimple stmt)
3277
{
3278
  if (dump_file)
3279
    {
3280
      fprintf (dump_file, "TM memopt: transforming: ");
3281
      print_gimple_stmt (dump_file, stmt, 0, 0);
3282
      fprintf (dump_file, "\n");
3283
    }
3284
}
3285
 
3286
/* Perform a read/write optimization.  Replaces the TM builtin in STMT
3287
   by a builtin that is OFFSET entries down in the builtins table in
3288
   gtm-builtins.def.  */
3289
 
3290
static void
3291
tm_memopt_transform_stmt (unsigned int offset,
3292
                          gimple stmt,
3293
                          gimple_stmt_iterator *gsi)
3294
{
3295
  tree fn = gimple_call_fn (stmt);
3296
  gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
3297
  TREE_OPERAND (fn, 0)
3298
    = builtin_decl_explicit ((enum built_in_function)
3299
                             (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
3300
                              + offset));
3301
  gimple_call_set_fn (stmt, fn);
3302
  gsi_replace (gsi, stmt, true);
3303
  dump_tm_memopt_transform (stmt);
3304
}
3305
 
3306
/* Perform the actual TM memory optimization transformations in the
3307
   basic blocks in BLOCKS.  */
3308
 
3309
static void
3310
tm_memopt_transform_blocks (VEC (basic_block, heap) *blocks)
3311
{
3312
  size_t i;
3313
  basic_block bb;
3314
  gimple_stmt_iterator gsi;
3315
 
3316
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3317
    {
3318
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3319
        {
3320
          gimple stmt = gsi_stmt (gsi);
3321
          bitmap read_avail = READ_AVAIL_IN (bb);
3322
          bitmap store_avail = STORE_AVAIL_IN (bb);
3323
          bitmap store_antic = STORE_ANTIC_OUT (bb);
3324
          unsigned int loc;
3325
 
3326
          if (is_tm_simple_load (stmt))
3327
            {
3328
              loc = tm_memopt_value_number (stmt, NO_INSERT);
3329
              if (store_avail && bitmap_bit_p (store_avail, loc))
3330
                tm_memopt_transform_stmt (TRANSFORM_RAW, stmt, &gsi);
3331
              else if (store_antic && bitmap_bit_p (store_antic, loc))
3332
                {
3333
                  tm_memopt_transform_stmt (TRANSFORM_RFW, stmt, &gsi);
3334
                  bitmap_set_bit (store_avail, loc);
3335
                }
3336
              else if (read_avail && bitmap_bit_p (read_avail, loc))
3337
                tm_memopt_transform_stmt (TRANSFORM_RAR, stmt, &gsi);
3338
              else
3339
                bitmap_set_bit (read_avail, loc);
3340
            }
3341
          else if (is_tm_simple_store (stmt))
3342
            {
3343
              loc = tm_memopt_value_number (stmt, NO_INSERT);
3344
              if (store_avail && bitmap_bit_p (store_avail, loc))
3345
                tm_memopt_transform_stmt (TRANSFORM_WAW, stmt, &gsi);
3346
              else
3347
                {
3348
                  if (read_avail && bitmap_bit_p (read_avail, loc))
3349
                    tm_memopt_transform_stmt (TRANSFORM_WAR, stmt, &gsi);
3350
                  bitmap_set_bit (store_avail, loc);
3351
                }
3352
            }
3353
        }
3354
    }
3355
}
3356
 
3357
/* Return a new set of bitmaps for a BB.  */
3358
 
3359
static struct tm_memopt_bitmaps *
3360
tm_memopt_init_sets (void)
3361
{
3362
  struct tm_memopt_bitmaps *b
3363
    = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
3364
  b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3365
  b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3366
  b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
3367
  b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
3368
  b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3369
  b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
3370
  b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
3371
  b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
3372
  b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
3373
  return b;
3374
}
3375
 
3376
/* Free sets computed for each BB.  */
3377
 
3378
static void
3379
tm_memopt_free_sets (VEC (basic_block, heap) *blocks)
3380
{
3381
  size_t i;
3382
  basic_block bb;
3383
 
3384
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3385
    bb->aux = NULL;
3386
}
3387
 
3388
/* Clear the visited bit for every basic block in BLOCKS.  */
3389
 
3390
static void
3391
tm_memopt_clear_visited (VEC (basic_block, heap) *blocks)
3392
{
3393
  size_t i;
3394
  basic_block bb;
3395
 
3396
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
3397
    BB_VISITED_P (bb) = false;
3398
}
3399
 
3400
/* Replace TM load/stores with hints for the runtime.  We handle
3401
   things like read-after-write, write-after-read, read-after-read,
3402
   read-for-write, etc.  */
3403
 
3404
static unsigned int
3405
execute_tm_memopt (void)
3406
{
3407
  struct tm_region *region;
3408
  VEC (basic_block, heap) *bbs;
3409
 
3410
  tm_memopt_value_id = 0;
3411
  tm_memopt_value_numbers = htab_create (10, tm_memop_hash, tm_memop_eq, free);
3412
 
3413
  for (region = all_tm_regions; region; region = region->next)
3414
    {
3415
      /* All the TM stores/loads in the current region.  */
3416
      size_t i;
3417
      basic_block bb;
3418
 
3419
      bitmap_obstack_initialize (&tm_memopt_obstack);
3420
 
3421
      /* Save all BBs for the current region.  */
3422
      bbs = get_tm_region_blocks (region->entry_block,
3423
                                  region->exit_blocks,
3424
                                  region->irr_blocks,
3425
                                  NULL,
3426
                                  false);
3427
 
3428
      /* Collect all the memory operations.  */
3429
      for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
3430
        {
3431
          bb->aux = tm_memopt_init_sets ();
3432
          tm_memopt_accumulate_memops (bb);
3433
        }
3434
 
3435
      /* Solve data flow equations and transform each block accordingly.  */
3436
      tm_memopt_clear_visited (bbs);
3437
      tm_memopt_compute_available (region, bbs);
3438
      tm_memopt_clear_visited (bbs);
3439
      tm_memopt_compute_antic (region, bbs);
3440
      tm_memopt_transform_blocks (bbs);
3441
 
3442
      tm_memopt_free_sets (bbs);
3443
      VEC_free (basic_block, heap, bbs);
3444
      bitmap_obstack_release (&tm_memopt_obstack);
3445
      htab_empty (tm_memopt_value_numbers);
3446
    }
3447
 
3448
  htab_delete (tm_memopt_value_numbers);
3449
  return 0;
3450
}
3451
 
3452
static bool
3453
gate_tm_memopt (void)
3454
{
3455
  return flag_tm && optimize > 0;
3456
}
3457
 
3458
struct gimple_opt_pass pass_tm_memopt =
3459
{
3460
 {
3461
  GIMPLE_PASS,
3462
  "tmmemopt",                           /* name */
3463
  gate_tm_memopt,                       /* gate */
3464
  execute_tm_memopt,                    /* execute */
3465
  NULL,                                 /* sub */
3466
  NULL,                                 /* next */
3467
  0,                                     /* static_pass_number */
3468
  TV_TRANS_MEM,                         /* tv_id */
3469
  PROP_ssa | PROP_cfg,                  /* properties_required */
3470
  0,                                     /* properties_provided */
3471
  0,                                     /* properties_destroyed */
3472
  0,                                     /* todo_flags_start */
3473
  TODO_dump_func,                       /* todo_flags_finish */
3474
 }
3475
};
3476
 
3477
 
3478
/* Interprocedual analysis for the creation of transactional clones.
3479
   The aim of this pass is to find which functions are referenced in
3480
   a non-irrevocable transaction context, and for those over which
3481
   we have control (or user directive), create a version of the
3482
   function which uses only the transactional interface to reference
3483
   protected memories.  This analysis proceeds in several steps:
3484
 
3485
     (1) Collect the set of all possible transactional clones:
3486
 
3487
        (a) For all local public functions marked tm_callable, push
3488
            it onto the tm_callee queue.
3489
 
3490
        (b) For all local functions, scan for calls in transaction blocks.
3491
            Push the caller and callee onto the tm_caller and tm_callee
3492
            queues.  Count the number of callers for each callee.
3493
 
3494
        (c) For each local function on the callee list, assume we will
3495
            create a transactional clone.  Push *all* calls onto the
3496
            callee queues; count the number of clone callers separately
3497
            to the number of original callers.
3498
 
3499
     (2) Propagate irrevocable status up the dominator tree:
3500
 
3501
        (a) Any external function on the callee list that is not marked
3502
            tm_callable is irrevocable.  Push all callers of such onto
3503
            a worklist.
3504
 
3505
        (b) For each function on the worklist, mark each block that
3506
            contains an irrevocable call.  Use the AND operator to
3507
            propagate that mark up the dominator tree.
3508
 
3509
        (c) If we reach the entry block for a possible transactional
3510
            clone, then the transactional clone is irrevocable, and
3511
            we should not create the clone after all.  Push all
3512
            callers onto the worklist.
3513
 
3514
        (d) Place tm_irrevocable calls at the beginning of the relevant
3515
            blocks.  Special case here is the entry block for the entire
3516
            transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
3517
            the library to begin the region in serial mode.  Decrement
3518
            the call count for all callees in the irrevocable region.
3519
 
3520
     (3) Create the transactional clones:
3521
 
3522
        Any tm_callee that still has a non-zero call count is cloned.
3523
*/
3524
 
3525
/* This structure is stored in the AUX field of each cgraph_node.  */
3526
struct tm_ipa_cg_data
3527
{
3528
  /* The clone of the function that got created.  */
3529
  struct cgraph_node *clone;
3530
 
3531
  /* The tm regions in the normal function.  */
3532
  struct tm_region *all_tm_regions;
3533
 
3534
  /* The blocks of the normal/clone functions that contain irrevocable
3535
     calls, or blocks that are post-dominated by irrevocable calls.  */
3536
  bitmap irrevocable_blocks_normal;
3537
  bitmap irrevocable_blocks_clone;
3538
 
3539
  /* The blocks of the normal function that are involved in transactions.  */
3540
  bitmap transaction_blocks_normal;
3541
 
3542
  /* The number of callers to the transactional clone of this function
3543
     from normal and transactional clones respectively.  */
3544
  unsigned tm_callers_normal;
3545
  unsigned tm_callers_clone;
3546
 
3547
  /* True if all calls to this function's transactional clone
3548
     are irrevocable.  Also automatically true if the function
3549
     has no transactional clone.  */
3550
  bool is_irrevocable;
3551
 
3552
  /* Flags indicating the presence of this function in various queues.  */
3553
  bool in_callee_queue;
3554
  bool in_worklist;
3555
 
3556
  /* Flags indicating the kind of scan desired while in the worklist.  */
3557
  bool want_irr_scan_normal;
3558
};
3559
 
3560
typedef struct cgraph_node *cgraph_node_p;
3561
 
3562
DEF_VEC_P (cgraph_node_p);
3563
DEF_VEC_ALLOC_P (cgraph_node_p, heap);
3564
 
3565
typedef VEC (cgraph_node_p, heap) *cgraph_node_queue;
3566
 
3567
/* Return the ipa data associated with NODE, allocating zeroed memory
3568
   if necessary.  TRAVERSE_ALIASES is true if we must traverse aliases
3569
   and set *NODE accordingly.  */
3570
 
3571
static struct tm_ipa_cg_data *
3572
get_cg_data (struct cgraph_node **node, bool traverse_aliases)
3573
{
3574
  struct tm_ipa_cg_data *d;
3575
 
3576
  if (traverse_aliases && (*node)->alias)
3577
    *node = cgraph_get_node ((*node)->thunk.alias);
3578
 
3579
  d = (struct tm_ipa_cg_data *) (*node)->aux;
3580
 
3581
  if (d == NULL)
3582
    {
3583
      d = (struct tm_ipa_cg_data *)
3584
        obstack_alloc (&tm_obstack.obstack, sizeof (*d));
3585
      (*node)->aux = (void *) d;
3586
      memset (d, 0, sizeof (*d));
3587
    }
3588
 
3589
  return d;
3590
}
3591
 
3592
/* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
3593
   it is already present.  */
3594
 
3595
static void
3596
maybe_push_queue (struct cgraph_node *node,
3597
                  cgraph_node_queue *queue_p, bool *in_queue_p)
3598
{
3599
  if (!*in_queue_p)
3600
    {
3601
      *in_queue_p = true;
3602
      VEC_safe_push (cgraph_node_p, heap, *queue_p, node);
3603
    }
3604
}
3605
 
3606
/* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
3607
   Queue all callees within block BB.  */
3608
 
3609
static void
3610
ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
3611
                         basic_block bb, bool for_clone)
3612
{
3613
  gimple_stmt_iterator gsi;
3614
 
3615
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3616
    {
3617
      gimple stmt = gsi_stmt (gsi);
3618
      if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
3619
        {
3620
          tree fndecl = gimple_call_fndecl (stmt);
3621
          if (fndecl)
3622
            {
3623
              struct tm_ipa_cg_data *d;
3624
              unsigned *pcallers;
3625
              struct cgraph_node *node;
3626
 
3627
              if (is_tm_ending_fndecl (fndecl))
3628
                continue;
3629
              if (find_tm_replacement_function (fndecl))
3630
                continue;
3631
 
3632
              node = cgraph_get_node (fndecl);
3633
              gcc_assert (node != NULL);
3634
              d = get_cg_data (&node, true);
3635
 
3636
              pcallers = (for_clone ? &d->tm_callers_clone
3637
                          : &d->tm_callers_normal);
3638
              *pcallers += 1;
3639
 
3640
              maybe_push_queue (node, callees_p, &d->in_callee_queue);
3641
            }
3642
        }
3643
    }
3644
}
3645
 
3646
/* Scan all calls in NODE that are within a transaction region,
3647
   and push the resulting nodes into the callee queue.  */
3648
 
3649
static void
3650
ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
3651
                               cgraph_node_queue *callees_p)
3652
{
3653
  struct tm_region *r;
3654
 
3655
  d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
3656
  d->all_tm_regions = all_tm_regions;
3657
 
3658
  for (r = all_tm_regions; r; r = r->next)
3659
    {
3660
      VEC (basic_block, heap) *bbs;
3661
      basic_block bb;
3662
      unsigned i;
3663
 
3664
      bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
3665
                                  d->transaction_blocks_normal, false);
3666
 
3667
      FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
3668
        ipa_tm_scan_calls_block (callees_p, bb, false);
3669
 
3670
      VEC_free (basic_block, heap, bbs);
3671
    }
3672
}
3673
 
3674
/* Scan all calls in NODE as if this is the transactional clone,
3675
   and push the destinations into the callee queue.  */
3676
 
3677
static void
3678
ipa_tm_scan_calls_clone (struct cgraph_node *node,
3679
                         cgraph_node_queue *callees_p)
3680
{
3681
  struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
3682
  basic_block bb;
3683
 
3684
  FOR_EACH_BB_FN (bb, fn)
3685
    ipa_tm_scan_calls_block (callees_p, bb, true);
3686
}
3687
 
3688
/* The function NODE has been detected to be irrevocable.  Push all
3689
   of its callers onto WORKLIST for the purpose of re-scanning them.  */
3690
 
3691
static void
3692
ipa_tm_note_irrevocable (struct cgraph_node *node,
3693
                         cgraph_node_queue *worklist_p)
3694
{
3695
  struct tm_ipa_cg_data *d = get_cg_data (&node, true);
3696
  struct cgraph_edge *e;
3697
 
3698
  d->is_irrevocable = true;
3699
 
3700
  for (e = node->callers; e ; e = e->next_caller)
3701
    {
3702
      basic_block bb;
3703
      struct cgraph_node *caller;
3704
 
3705
      /* Don't examine recursive calls.  */
3706
      if (e->caller == node)
3707
        continue;
3708
      /* Even if we think we can go irrevocable, believe the user
3709
         above all.  */
3710
      if (is_tm_safe_or_pure (e->caller->decl))
3711
        continue;
3712
 
3713
      caller = e->caller;
3714
      d = get_cg_data (&caller, true);
3715
 
3716
      /* Check if the callee is in a transactional region.  If so,
3717
         schedule the function for normal re-scan as well.  */
3718
      bb = gimple_bb (e->call_stmt);
3719
      gcc_assert (bb != NULL);
3720
      if (d->transaction_blocks_normal
3721
          && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
3722
        d->want_irr_scan_normal = true;
3723
 
3724
      maybe_push_queue (caller, worklist_p, &d->in_worklist);
3725
    }
3726
}
3727
 
3728
/* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
3729
   within the block is irrevocable.  */
3730
 
3731
static bool
3732
ipa_tm_scan_irr_block (basic_block bb)
3733
{
3734
  gimple_stmt_iterator gsi;
3735
  tree fn;
3736
 
3737
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3738
    {
3739
      gimple stmt = gsi_stmt (gsi);
3740
      switch (gimple_code (stmt))
3741
        {
3742
        case GIMPLE_CALL:
3743
          if (is_tm_pure_call (stmt))
3744
            break;
3745
 
3746
          fn = gimple_call_fn (stmt);
3747
 
3748
          /* Functions with the attribute are by definition irrevocable.  */
3749
          if (is_tm_irrevocable (fn))
3750
            return true;
3751
 
3752
          /* For direct function calls, go ahead and check for replacement
3753
             functions, or transitive irrevocable functions.  For indirect
3754
             functions, we'll ask the runtime.  */
3755
          if (TREE_CODE (fn) == ADDR_EXPR)
3756
            {
3757
              struct tm_ipa_cg_data *d;
3758
              struct cgraph_node *node;
3759
 
3760
              fn = TREE_OPERAND (fn, 0);
3761
              if (is_tm_ending_fndecl (fn))
3762
                break;
3763
              if (find_tm_replacement_function (fn))
3764
                break;
3765
 
3766
              node = cgraph_get_node(fn);
3767
              d = get_cg_data (&node, true);
3768
 
3769
              /* Return true if irrevocable, but above all, believe
3770
                 the user.  */
3771
              if (d->is_irrevocable
3772
                  && !is_tm_safe_or_pure (fn))
3773
                return true;
3774
            }
3775
          break;
3776
 
3777
        case GIMPLE_ASM:
3778
          /* ??? The Approved Method of indicating that an inline
3779
             assembly statement is not relevant to the transaction
3780
             is to wrap it in a __tm_waiver block.  This is not
3781
             yet implemented, so we can't check for it.  */
3782
          if (is_tm_safe (current_function_decl))
3783
            {
3784
              tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
3785
              SET_EXPR_LOCATION (t, gimple_location (stmt));
3786
              TREE_BLOCK (t) = gimple_block (stmt);
3787
              error ("%Kasm not allowed in %<transaction_safe%> function", t);
3788
            }
3789
          return true;
3790
 
3791
        default:
3792
          break;
3793
        }
3794
    }
3795
 
3796
  return false;
3797
}
3798
 
3799
/* For each of the blocks seeded witin PQUEUE, walk the CFG looking
3800
   for new irrevocable blocks, marking them in NEW_IRR.  Don't bother
3801
   scanning past OLD_IRR or EXIT_BLOCKS.  */
3802
 
3803
static bool
3804
ipa_tm_scan_irr_blocks (VEC (basic_block, heap) **pqueue, bitmap new_irr,
3805
                        bitmap old_irr, bitmap exit_blocks)
3806
{
3807
  bool any_new_irr = false;
3808
  edge e;
3809
  edge_iterator ei;
3810
  bitmap visited_blocks = BITMAP_ALLOC (NULL);
3811
 
3812
  do
3813
    {
3814
      basic_block bb = VEC_pop (basic_block, *pqueue);
3815
 
3816
      /* Don't re-scan blocks we know already are irrevocable.  */
3817
      if (old_irr && bitmap_bit_p (old_irr, bb->index))
3818
        continue;
3819
 
3820
      if (ipa_tm_scan_irr_block (bb))
3821
        {
3822
          bitmap_set_bit (new_irr, bb->index);
3823
          any_new_irr = true;
3824
        }
3825
      else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
3826
        {
3827
          FOR_EACH_EDGE (e, ei, bb->succs)
3828
            if (!bitmap_bit_p (visited_blocks, e->dest->index))
3829
              {
3830
                bitmap_set_bit (visited_blocks, e->dest->index);
3831
                VEC_safe_push (basic_block, heap, *pqueue, e->dest);
3832
              }
3833
        }
3834
    }
3835
  while (!VEC_empty (basic_block, *pqueue));
3836
 
3837
  BITMAP_FREE (visited_blocks);
3838
 
3839
  return any_new_irr;
3840
}
3841
 
3842
/* Propagate the irrevocable property both up and down the dominator tree.
3843
   BB is the current block being scanned; EXIT_BLOCKS are the edges of the
3844
   TM regions; OLD_IRR are the results of a previous scan of the dominator
3845
   tree which has been fully propagated; NEW_IRR is the set of new blocks
3846
   which are gaining the irrevocable property during the current scan.  */
3847
 
3848
static void
3849
ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
3850
                      bitmap old_irr, bitmap exit_blocks)
3851
{
3852
  VEC (basic_block, heap) *bbs;
3853
  bitmap all_region_blocks;
3854
 
3855
  /* If this block is in the old set, no need to rescan.  */
3856
  if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
3857
    return;
3858
 
3859
  all_region_blocks = BITMAP_ALLOC (&tm_obstack);
3860
  bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
3861
                              all_region_blocks, false);
3862
  do
3863
    {
3864
      basic_block bb = VEC_pop (basic_block, bbs);
3865
      bool this_irr = bitmap_bit_p (new_irr, bb->index);
3866
      bool all_son_irr = false;
3867
      edge_iterator ei;
3868
      edge e;
3869
 
3870
      /* Propagate up.  If my children are, I am too, but we must have
3871
         at least one child that is.  */
3872
      if (!this_irr)
3873
        {
3874
          FOR_EACH_EDGE (e, ei, bb->succs)
3875
            {
3876
              if (!bitmap_bit_p (new_irr, e->dest->index))
3877
                {
3878
                  all_son_irr = false;
3879
                  break;
3880
                }
3881
              else
3882
                all_son_irr = true;
3883
            }
3884
          if (all_son_irr)
3885
            {
3886
              /* Add block to new_irr if it hasn't already been processed. */
3887
              if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
3888
                {
3889
                  bitmap_set_bit (new_irr, bb->index);
3890
                  this_irr = true;
3891
                }
3892
            }
3893
        }
3894
 
3895
      /* Propagate down to everyone we immediately dominate.  */
3896
      if (this_irr)
3897
        {
3898
          basic_block son;
3899
          for (son = first_dom_son (CDI_DOMINATORS, bb);
3900
               son;
3901
               son = next_dom_son (CDI_DOMINATORS, son))
3902
            {
3903
              /* Make sure block is actually in a TM region, and it
3904
                 isn't already in old_irr.  */
3905
              if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
3906
                  && bitmap_bit_p (all_region_blocks, son->index))
3907
                bitmap_set_bit (new_irr, son->index);
3908
            }
3909
        }
3910
    }
3911
  while (!VEC_empty (basic_block, bbs));
3912
 
3913
  BITMAP_FREE (all_region_blocks);
3914
  VEC_free (basic_block, heap, bbs);
3915
}
3916
 
3917
static void
3918
ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
3919
{
3920
  gimple_stmt_iterator gsi;
3921
 
3922
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
3923
    {
3924
      gimple stmt = gsi_stmt (gsi);
3925
      if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
3926
        {
3927
          tree fndecl = gimple_call_fndecl (stmt);
3928
          if (fndecl)
3929
            {
3930
              struct tm_ipa_cg_data *d;
3931
              unsigned *pcallers;
3932
              struct cgraph_node *tnode;
3933
 
3934
              if (is_tm_ending_fndecl (fndecl))
3935
                continue;
3936
              if (find_tm_replacement_function (fndecl))
3937
                continue;
3938
 
3939
              tnode = cgraph_get_node (fndecl);
3940
              d = get_cg_data (&tnode, true);
3941
 
3942
              pcallers = (for_clone ? &d->tm_callers_clone
3943
                          : &d->tm_callers_normal);
3944
 
3945
              gcc_assert (*pcallers > 0);
3946
              *pcallers -= 1;
3947
            }
3948
        }
3949
    }
3950
}
3951
 
3952
/* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
3953
   as well as other irrevocable actions such as inline assembly.  Mark all
3954
   such blocks as irrevocable and decrement the number of calls to
3955
   transactional clones.  Return true if, for the transactional clone, the
3956
   entire function is irrevocable.  */
3957
 
3958
static bool
3959
ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
3960
{
3961
  struct tm_ipa_cg_data *d;
3962
  bitmap new_irr, old_irr;
3963
  VEC (basic_block, heap) *queue;
3964
  bool ret = false;
3965
 
3966
  /* Builtin operators (operator new, and such).  */
3967
  if (DECL_STRUCT_FUNCTION (node->decl) == NULL
3968
      || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL)
3969
    return false;
3970
 
3971
  current_function_decl = node->decl;
3972
  push_cfun (DECL_STRUCT_FUNCTION (node->decl));
3973
  calculate_dominance_info (CDI_DOMINATORS);
3974
 
3975
  d = get_cg_data (&node, true);
3976
  queue = VEC_alloc (basic_block, heap, 10);
3977
  new_irr = BITMAP_ALLOC (&tm_obstack);
3978
 
3979
  /* Scan each tm region, propagating irrevocable status through the tree.  */
3980
  if (for_clone)
3981
    {
3982
      old_irr = d->irrevocable_blocks_clone;
3983
      VEC_quick_push (basic_block, queue, single_succ (ENTRY_BLOCK_PTR));
3984
      if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
3985
        {
3986
          ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR), new_irr,
3987
                                old_irr, NULL);
3988
          ret = bitmap_bit_p (new_irr, single_succ (ENTRY_BLOCK_PTR)->index);
3989
        }
3990
    }
3991
  else
3992
    {
3993
      struct tm_region *region;
3994
 
3995
      old_irr = d->irrevocable_blocks_normal;
3996
      for (region = d->all_tm_regions; region; region = region->next)
3997
        {
3998
          VEC_quick_push (basic_block, queue, region->entry_block);
3999
          if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
4000
                                      region->exit_blocks))
4001
            ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
4002
                                  region->exit_blocks);
4003
        }
4004
    }
4005
 
4006
  /* If we found any new irrevocable blocks, reduce the call count for
4007
     transactional clones within the irrevocable blocks.  Save the new
4008
     set of irrevocable blocks for next time.  */
4009
  if (!bitmap_empty_p (new_irr))
4010
    {
4011
      bitmap_iterator bmi;
4012
      unsigned i;
4013
 
4014
      EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4015
        ipa_tm_decrement_clone_counts (BASIC_BLOCK (i), for_clone);
4016
 
4017
      if (old_irr)
4018
        {
4019
          bitmap_ior_into (old_irr, new_irr);
4020
          BITMAP_FREE (new_irr);
4021
        }
4022
      else if (for_clone)
4023
        d->irrevocable_blocks_clone = new_irr;
4024
      else
4025
        d->irrevocable_blocks_normal = new_irr;
4026
 
4027
      if (dump_file && new_irr)
4028
        {
4029
          const char *dname;
4030
          bitmap_iterator bmi;
4031
          unsigned i;
4032
 
4033
          dname = lang_hooks.decl_printable_name (current_function_decl, 2);
4034
          EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
4035
            fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
4036
        }
4037
    }
4038
  else
4039
    BITMAP_FREE (new_irr);
4040
 
4041
  VEC_free (basic_block, heap, queue);
4042
  pop_cfun ();
4043
  current_function_decl = NULL;
4044
 
4045
  return ret;
4046
}
4047
 
4048
/* Return true if, for the transactional clone of NODE, any call
4049
   may enter irrevocable mode.  */
4050
 
4051
static bool
4052
ipa_tm_mayenterirr_function (struct cgraph_node *node)
4053
{
4054
  struct tm_ipa_cg_data *d;
4055
  tree decl;
4056
  unsigned flags;
4057
 
4058
  d = get_cg_data (&node, true);
4059
  decl = node->decl;
4060
  flags = flags_from_decl_or_type (decl);
4061
 
4062
  /* Handle some TM builtins.  Ordinarily these aren't actually generated
4063
     at this point, but handling these functions when written in by the
4064
     user makes it easier to build unit tests.  */
4065
  if (flags & ECF_TM_BUILTIN)
4066
    return false;
4067
 
4068
  /* Filter out all functions that are marked.  */
4069
  if (flags & ECF_TM_PURE)
4070
    return false;
4071
  if (is_tm_safe (decl))
4072
    return false;
4073
  if (is_tm_irrevocable (decl))
4074
    return true;
4075
  if (is_tm_callable (decl))
4076
    return true;
4077
  if (find_tm_replacement_function (decl))
4078
    return true;
4079
 
4080
  /* If we aren't seeing the final version of the function we don't
4081
     know what it will contain at runtime.  */
4082
  if (cgraph_function_body_availability (node) < AVAIL_AVAILABLE)
4083
    return true;
4084
 
4085
  /* If the function must go irrevocable, then of course true.  */
4086
  if (d->is_irrevocable)
4087
    return true;
4088
 
4089
  /* If there are any blocks marked irrevocable, then the function
4090
     as a whole may enter irrevocable.  */
4091
  if (d->irrevocable_blocks_clone)
4092
    return true;
4093
 
4094
  /* We may have previously marked this function as tm_may_enter_irr;
4095
     see pass_diagnose_tm_blocks.  */
4096
  if (node->local.tm_may_enter_irr)
4097
    return true;
4098
 
4099
  /* Recurse on the main body for aliases.  In general, this will
4100
     result in one of the bits above being set so that we will not
4101
     have to recurse next time.  */
4102
  if (node->alias)
4103
    return ipa_tm_mayenterirr_function (cgraph_get_node (node->thunk.alias));
4104
 
4105
  /* What remains is unmarked local functions without items that force
4106
     the function to go irrevocable.  */
4107
  return false;
4108
}
4109
 
4110
/* Diagnose calls from transaction_safe functions to unmarked
4111
   functions that are determined to not be safe.  */
4112
 
4113
static void
4114
ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
4115
{
4116
  struct cgraph_edge *e;
4117
 
4118
  for (e = node->callees; e ; e = e->next_callee)
4119
    if (!is_tm_callable (e->callee->decl)
4120
        && e->callee->local.tm_may_enter_irr)
4121
      error_at (gimple_location (e->call_stmt),
4122
                "unsafe function call %qD within "
4123
                "%<transaction_safe%> function", e->callee->decl);
4124
}
4125
 
4126
/* Diagnose call from atomic transactions to unmarked functions
4127
   that are determined to not be safe.  */
4128
 
4129
static void
4130
ipa_tm_diagnose_transaction (struct cgraph_node *node,
4131
                           struct tm_region *all_tm_regions)
4132
{
4133
  struct tm_region *r;
4134
 
4135
  for (r = all_tm_regions; r ; r = r->next)
4136
    if (gimple_transaction_subcode (r->transaction_stmt) & GTMA_IS_RELAXED)
4137
      {
4138
        /* Atomic transactions can be nested inside relaxed.  */
4139
        if (r->inner)
4140
          ipa_tm_diagnose_transaction (node, r->inner);
4141
      }
4142
    else
4143
      {
4144
        VEC (basic_block, heap) *bbs;
4145
        gimple_stmt_iterator gsi;
4146
        basic_block bb;
4147
        size_t i;
4148
 
4149
        bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
4150
                                    r->irr_blocks, NULL, false);
4151
 
4152
        for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
4153
          for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4154
            {
4155
              gimple stmt = gsi_stmt (gsi);
4156
              tree fndecl;
4157
 
4158
              if (gimple_code (stmt) == GIMPLE_ASM)
4159
                {
4160
                  error_at (gimple_location (stmt),
4161
                            "asm not allowed in atomic transaction");
4162
                  continue;
4163
                }
4164
 
4165
              if (!is_gimple_call (stmt))
4166
                continue;
4167
              fndecl = gimple_call_fndecl (stmt);
4168
 
4169
              /* Indirect function calls have been diagnosed already.  */
4170
              if (!fndecl)
4171
                continue;
4172
 
4173
              /* Stop at the end of the transaction.  */
4174
              if (is_tm_ending_fndecl (fndecl))
4175
                {
4176
                  if (bitmap_bit_p (r->exit_blocks, bb->index))
4177
                    break;
4178
                  continue;
4179
                }
4180
 
4181
              /* Marked functions have been diagnosed already.  */
4182
              if (is_tm_pure_call (stmt))
4183
                continue;
4184
              if (is_tm_callable (fndecl))
4185
                continue;
4186
 
4187
              if (cgraph_local_info (fndecl)->tm_may_enter_irr)
4188
                error_at (gimple_location (stmt),
4189
                          "unsafe function call %qD within "
4190
                          "atomic transaction", fndecl);
4191
            }
4192
 
4193
        VEC_free (basic_block, heap, bbs);
4194
      }
4195
}
4196
 
4197
/* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
4198
   OLD_DECL.  The returned value is a freshly malloced pointer that
4199
   should be freed by the caller.  */
4200
 
4201
static tree
4202
tm_mangle (tree old_asm_id)
4203
{
4204
  const char *old_asm_name;
4205
  char *tm_name;
4206
  void *alloc = NULL;
4207
  struct demangle_component *dc;
4208
  tree new_asm_id;
4209
 
4210
  /* Determine if the symbol is already a valid C++ mangled name.  Do this
4211
     even for C, which might be interfacing with C++ code via appropriately
4212
     ugly identifiers.  */
4213
  /* ??? We could probably do just as well checking for "_Z" and be done.  */
4214
  old_asm_name = IDENTIFIER_POINTER (old_asm_id);
4215
  dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
4216
 
4217
  if (dc == NULL)
4218
    {
4219
      char length[8];
4220
 
4221
    do_unencoded:
4222
      sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
4223
      tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
4224
    }
4225
  else
4226
    {
4227
      old_asm_name += 2;        /* Skip _Z */
4228
 
4229
      switch (dc->type)
4230
        {
4231
        case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
4232
        case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
4233
          /* Don't play silly games, you!  */
4234
          goto do_unencoded;
4235
 
4236
        case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
4237
          /* I'd really like to know if we can ever be passed one of
4238
             these from the C++ front end.  The Logical Thing would
4239
             seem that hidden-alias should be outer-most, so that we
4240
             get hidden-alias of a transaction-clone and not vice-versa.  */
4241
          old_asm_name += 2;
4242
          break;
4243
 
4244
        default:
4245
          break;
4246
        }
4247
 
4248
      tm_name = concat ("_ZGTt", old_asm_name, NULL);
4249
    }
4250
  free (alloc);
4251
 
4252
  new_asm_id = get_identifier (tm_name);
4253
  free (tm_name);
4254
 
4255
  return new_asm_id;
4256
}
4257
 
4258
static inline void
4259
ipa_tm_mark_needed_node (struct cgraph_node *node)
4260
{
4261
  cgraph_mark_needed_node (node);
4262
  /* ??? function_and_variable_visibility will reset
4263
     the needed bit, without actually checking.  */
4264
  node->analyzed = 1;
4265
}
4266
 
4267
/* Callback data for ipa_tm_create_version_alias.  */
4268
struct create_version_alias_info
4269
{
4270
  struct cgraph_node *old_node;
4271
  tree new_decl;
4272
};
4273
 
4274
/* A subroutine of ipa_tm_create_version, called via
4275
   cgraph_for_node_and_aliases.  Create new tm clones for each of
4276
   the existing aliases.  */
4277
static bool
4278
ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
4279
{
4280
  struct create_version_alias_info *info
4281
    = (struct create_version_alias_info *)data;
4282
  tree old_decl, new_decl, tm_name;
4283
  struct cgraph_node *new_node;
4284
 
4285
  if (!node->same_body_alias)
4286
    return false;
4287
 
4288
  old_decl = node->decl;
4289
  tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4290
  new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
4291
                         TREE_CODE (old_decl), tm_name,
4292
                         TREE_TYPE (old_decl));
4293
 
4294
  SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4295
  SET_DECL_RTL (new_decl, NULL);
4296
 
4297
  /* Based loosely on C++'s make_alias_for().  */
4298
  TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
4299
  DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
4300
  DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
4301
  TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
4302
  DECL_EXTERNAL (new_decl) = 0;
4303
  DECL_ARTIFICIAL (new_decl) = 1;
4304
  TREE_ADDRESSABLE (new_decl) = 1;
4305
  TREE_USED (new_decl) = 1;
4306
  TREE_SYMBOL_REFERENCED (tm_name) = 1;
4307
 
4308
  /* Perform the same remapping to the comdat group.  */
4309
  if (DECL_ONE_ONLY (new_decl))
4310
    DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
4311
 
4312
  new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl);
4313
  new_node->tm_clone = true;
4314
  new_node->local.externally_visible = info->old_node->local.externally_visible;
4315
  /* ?? Do not traverse aliases here.  */
4316
  get_cg_data (&node, false)->clone = new_node;
4317
 
4318
  record_tm_clone_pair (old_decl, new_decl);
4319
 
4320
  if (info->old_node->needed)
4321
    ipa_tm_mark_needed_node (new_node);
4322
  return false;
4323
}
4324
 
4325
/* Create a copy of the function (possibly declaration only) of OLD_NODE,
4326
   appropriate for the transactional clone.  */
4327
 
4328
static void
4329
ipa_tm_create_version (struct cgraph_node *old_node)
4330
{
4331
  tree new_decl, old_decl, tm_name;
4332
  struct cgraph_node *new_node;
4333
 
4334
  old_decl = old_node->decl;
4335
  new_decl = copy_node (old_decl);
4336
 
4337
  /* DECL_ASSEMBLER_NAME needs to be set before we call
4338
     cgraph_copy_node_for_versioning below, because cgraph_node will
4339
     fill the assembler_name_hash.  */
4340
  tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
4341
  SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
4342
  SET_DECL_RTL (new_decl, NULL);
4343
  TREE_SYMBOL_REFERENCED (tm_name) = 1;
4344
 
4345
  /* Perform the same remapping to the comdat group.  */
4346
  if (DECL_ONE_ONLY (new_decl))
4347
    DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
4348
 
4349
  new_node = cgraph_copy_node_for_versioning (old_node, new_decl, NULL, NULL);
4350
  new_node->local.externally_visible = old_node->local.externally_visible;
4351
  new_node->lowered = true;
4352
  new_node->tm_clone = 1;
4353
  get_cg_data (&old_node, true)->clone = new_node;
4354
 
4355
  if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE)
4356
    {
4357
      /* Remap extern inline to static inline.  */
4358
      /* ??? Is it worth trying to use make_decl_one_only?  */
4359
      if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
4360
        {
4361
          DECL_EXTERNAL (new_decl) = 0;
4362
          TREE_PUBLIC (new_decl) = 0;
4363
          DECL_WEAK (new_decl) = 0;
4364
        }
4365
 
4366
      tree_function_versioning (old_decl, new_decl, NULL, false, NULL, false,
4367
                                NULL, NULL);
4368
    }
4369
 
4370
  record_tm_clone_pair (old_decl, new_decl);
4371
 
4372
  cgraph_call_function_insertion_hooks (new_node);
4373
  if (old_node->needed)
4374
    ipa_tm_mark_needed_node (new_node);
4375
 
4376
  /* Do the same thing, but for any aliases of the original node.  */
4377
  {
4378
    struct create_version_alias_info data;
4379
    data.old_node = old_node;
4380
    data.new_decl = new_decl;
4381
    cgraph_for_node_and_aliases (old_node, ipa_tm_create_version_alias,
4382
                                 &data, true);
4383
  }
4384
}
4385
 
4386
/* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB.  */
4387
 
4388
static void
4389
ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
4390
                        basic_block bb)
4391
{
4392
  gimple_stmt_iterator gsi;
4393
  gimple g;
4394
 
4395
  transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4396
 
4397
  g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
4398
                         1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
4399
 
4400
  split_block_after_labels (bb);
4401
  gsi = gsi_after_labels (bb);
4402
  gsi_insert_before (&gsi, g, GSI_SAME_STMT);
4403
 
4404
  cgraph_create_edge (node,
4405
               cgraph_get_create_node
4406
                  (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
4407
                      g, 0,
4408
                      compute_call_stmt_bb_frequency (node->decl,
4409
                                                      gimple_bb (g)));
4410
}
4411
 
4412
/* Construct a call to TM_GETTMCLONE and insert it before GSI.  */
4413
 
4414
static bool
4415
ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
4416
                               struct tm_region *region,
4417
                               gimple_stmt_iterator *gsi, gimple stmt)
4418
{
4419
  tree gettm_fn, ret, old_fn, callfn;
4420
  gimple g, g2;
4421
  bool safe;
4422
 
4423
  old_fn = gimple_call_fn (stmt);
4424
 
4425
  if (TREE_CODE (old_fn) == ADDR_EXPR)
4426
    {
4427
      tree fndecl = TREE_OPERAND (old_fn, 0);
4428
      tree clone = get_tm_clone_pair (fndecl);
4429
 
4430
      /* By transforming the call into a TM_GETTMCLONE, we are
4431
         technically taking the address of the original function and
4432
         its clone.  Explain this so inlining will know this function
4433
         is needed.  */
4434
      cgraph_mark_address_taken_node (cgraph_get_node (fndecl));
4435
      if (clone)
4436
        cgraph_mark_address_taken_node (cgraph_get_node (clone));
4437
    }
4438
 
4439
  safe = is_tm_safe (TREE_TYPE (old_fn));
4440
  gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
4441
                                    : BUILT_IN_TM_GETTMCLONE_IRR);
4442
  ret = create_tmp_var (ptr_type_node, NULL);
4443
  add_referenced_var (ret);
4444
 
4445
  if (!safe)
4446
    transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4447
 
4448
  /* Discard OBJ_TYPE_REF, since we weren't able to fold it.  */
4449
  if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
4450
    old_fn = OBJ_TYPE_REF_EXPR (old_fn);
4451
 
4452
  g = gimple_build_call (gettm_fn, 1, old_fn);
4453
  ret = make_ssa_name (ret, g);
4454
  gimple_call_set_lhs (g, ret);
4455
 
4456
  gsi_insert_before (gsi, g, GSI_SAME_STMT);
4457
 
4458
  cgraph_create_edge (node, cgraph_get_create_node (gettm_fn), g, 0,
4459
                      compute_call_stmt_bb_frequency (node->decl,
4460
                                                      gimple_bb(g)));
4461
 
4462
  /* Cast return value from tm_gettmclone* into appropriate function
4463
     pointer.  */
4464
  callfn = create_tmp_var (TREE_TYPE (old_fn), NULL);
4465
  add_referenced_var (callfn);
4466
  g2 = gimple_build_assign (callfn,
4467
                            fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
4468
  callfn = make_ssa_name (callfn, g2);
4469
  gimple_assign_set_lhs (g2, callfn);
4470
  gsi_insert_before (gsi, g2, GSI_SAME_STMT);
4471
 
4472
  /* ??? This is a hack to preserve the NOTHROW bit on the call,
4473
     which we would have derived from the decl.  Failure to save
4474
     this bit means we might have to split the basic block.  */
4475
  if (gimple_call_nothrow_p (stmt))
4476
    gimple_call_set_nothrow (stmt, true);
4477
 
4478
  gimple_call_set_fn (stmt, callfn);
4479
 
4480
  /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
4481
     for a call statement.  Fix it.  */
4482
  {
4483
    tree lhs = gimple_call_lhs (stmt);
4484
    tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
4485
    if (lhs
4486
        && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
4487
    {
4488
      tree temp;
4489
 
4490
      temp = make_rename_temp (rettype, 0);
4491
      gimple_call_set_lhs (stmt, temp);
4492
 
4493
      g2 = gimple_build_assign (lhs,
4494
                                fold_build1 (VIEW_CONVERT_EXPR,
4495
                                             TREE_TYPE (lhs), temp));
4496
      gsi_insert_after (gsi, g2, GSI_SAME_STMT);
4497
    }
4498
  }
4499
 
4500
  update_stmt (stmt);
4501
 
4502
  return true;
4503
}
4504
 
4505
/* Helper function for ipa_tm_transform_calls*.  Given a call
4506
   statement in GSI which resides inside transaction REGION, redirect
4507
   the call to either its wrapper function, or its clone.  */
4508
 
4509
static void
4510
ipa_tm_transform_calls_redirect (struct cgraph_node *node,
4511
                                 struct tm_region *region,
4512
                                 gimple_stmt_iterator *gsi,
4513
                                 bool *need_ssa_rename_p)
4514
{
4515
  gimple stmt = gsi_stmt (*gsi);
4516
  struct cgraph_node *new_node;
4517
  struct cgraph_edge *e = cgraph_edge (node, stmt);
4518
  tree fndecl = gimple_call_fndecl (stmt);
4519
 
4520
  /* For indirect calls, pass the address through the runtime.  */
4521
  if (fndecl == NULL)
4522
    {
4523
      *need_ssa_rename_p |=
4524
        ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
4525
      return;
4526
    }
4527
 
4528
  /* Handle some TM builtins.  Ordinarily these aren't actually generated
4529
     at this point, but handling these functions when written in by the
4530
     user makes it easier to build unit tests.  */
4531
  if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
4532
    return;
4533
 
4534
  /* Fixup recursive calls inside clones.  */
4535
  /* ??? Why did cgraph_copy_node_for_versioning update the call edges
4536
     for recursion but not update the call statements themselves?  */
4537
  if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
4538
    {
4539
      gimple_call_set_fndecl (stmt, current_function_decl);
4540
      return;
4541
    }
4542
 
4543
  /* If there is a replacement, use it.  */
4544
  fndecl = find_tm_replacement_function (fndecl);
4545
  if (fndecl)
4546
    {
4547
      new_node = cgraph_get_create_node (fndecl);
4548
 
4549
      /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
4550
 
4551
         We can't do this earlier in record_tm_replacement because
4552
         cgraph_remove_unreachable_nodes is called before we inject
4553
         references to the node.  Further, we can't do this in some
4554
         nice central place in ipa_tm_execute because we don't have
4555
         the exact list of wrapper functions that would be used.
4556
         Marking more wrappers than necessary results in the creation
4557
         of unnecessary cgraph_nodes, which can cause some of the
4558
         other IPA passes to crash.
4559
 
4560
         We do need to mark these nodes so that we get the proper
4561
         result in expand_call_tm.  */
4562
      /* ??? This seems broken.  How is it that we're marking the
4563
         CALLEE as may_enter_irr?  Surely we should be marking the
4564
         CALLER.  Also note that find_tm_replacement_function also
4565
         contains mappings into the TM runtime, e.g. memcpy.  These
4566
         we know won't go irrevocable.  */
4567
      new_node->local.tm_may_enter_irr = 1;
4568
    }
4569
  else
4570
    {
4571
      struct tm_ipa_cg_data *d;
4572
      struct cgraph_node *tnode = e->callee;
4573
 
4574
      d = get_cg_data (&tnode, true);
4575
      new_node = d->clone;
4576
 
4577
      /* As we've already skipped pure calls and appropriate builtins,
4578
         and we've already marked irrevocable blocks, if we can't come
4579
         up with a static replacement, then ask the runtime.  */
4580
      if (new_node == NULL)
4581
        {
4582
          *need_ssa_rename_p |=
4583
            ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
4584
          return;
4585
        }
4586
 
4587
      fndecl = new_node->decl;
4588
    }
4589
 
4590
  cgraph_redirect_edge_callee (e, new_node);
4591
  gimple_call_set_fndecl (stmt, fndecl);
4592
}
4593
 
4594
/* Helper function for ipa_tm_transform_calls.  For a given BB,
4595
   install calls to tm_irrevocable when IRR_BLOCKS are reached,
4596
   redirect other calls to the generated transactional clone.  */
4597
 
4598
static bool
4599
ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
4600
                          basic_block bb, bitmap irr_blocks)
4601
{
4602
  gimple_stmt_iterator gsi;
4603
  bool need_ssa_rename = false;
4604
 
4605
  if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
4606
    {
4607
      ipa_tm_insert_irr_call (node, region, bb);
4608
      return true;
4609
    }
4610
 
4611
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4612
    {
4613
      gimple stmt = gsi_stmt (gsi);
4614
 
4615
      if (!is_gimple_call (stmt))
4616
        continue;
4617
      if (is_tm_pure_call (stmt))
4618
        continue;
4619
 
4620
      /* Redirect edges to the appropriate replacement or clone.  */
4621
      ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
4622
    }
4623
 
4624
  return need_ssa_rename;
4625
}
4626
 
4627
/* Walk the CFG for REGION, beginning at BB.  Install calls to
4628
   tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
4629
   the generated transactional clone.  */
4630
 
4631
static bool
4632
ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
4633
                        basic_block bb, bitmap irr_blocks)
4634
{
4635
  bool need_ssa_rename = false;
4636
  edge e;
4637
  edge_iterator ei;
4638
  VEC(basic_block, heap) *queue = NULL;
4639
  bitmap visited_blocks = BITMAP_ALLOC (NULL);
4640
 
4641
  VEC_safe_push (basic_block, heap, queue, bb);
4642
  do
4643
    {
4644
      bb = VEC_pop (basic_block, queue);
4645
 
4646
      need_ssa_rename |=
4647
        ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
4648
 
4649
      if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
4650
        continue;
4651
 
4652
      if (region && bitmap_bit_p (region->exit_blocks, bb->index))
4653
        continue;
4654
 
4655
      FOR_EACH_EDGE (e, ei, bb->succs)
4656
        if (!bitmap_bit_p (visited_blocks, e->dest->index))
4657
          {
4658
            bitmap_set_bit (visited_blocks, e->dest->index);
4659
            VEC_safe_push (basic_block, heap, queue, e->dest);
4660
          }
4661
    }
4662
  while (!VEC_empty (basic_block, queue));
4663
 
4664
  VEC_free (basic_block, heap, queue);
4665
  BITMAP_FREE (visited_blocks);
4666
 
4667
  return need_ssa_rename;
4668
}
4669
 
4670
/* Transform the calls within the TM regions within NODE.  */
4671
 
4672
static void
4673
ipa_tm_transform_transaction (struct cgraph_node *node)
4674
{
4675
  struct tm_ipa_cg_data *d;
4676
  struct tm_region *region;
4677
  bool need_ssa_rename = false;
4678
 
4679
  d = get_cg_data (&node, true);
4680
 
4681
  current_function_decl = node->decl;
4682
  push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4683
  calculate_dominance_info (CDI_DOMINATORS);
4684
 
4685
  for (region = d->all_tm_regions; region; region = region->next)
4686
    {
4687
      /* If we're sure to go irrevocable, don't transform anything.  */
4688
      if (d->irrevocable_blocks_normal
4689
          && bitmap_bit_p (d->irrevocable_blocks_normal,
4690
                           region->entry_block->index))
4691
        {
4692
          transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE);
4693
          transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
4694
          continue;
4695
        }
4696
 
4697
      need_ssa_rename |=
4698
        ipa_tm_transform_calls (node, region, region->entry_block,
4699
                                d->irrevocable_blocks_normal);
4700
    }
4701
 
4702
  if (need_ssa_rename)
4703
    update_ssa (TODO_update_ssa_only_virtuals);
4704
 
4705
  pop_cfun ();
4706
  current_function_decl = NULL;
4707
}
4708
 
4709
/* Transform the calls within the transactional clone of NODE.  */
4710
 
4711
static void
4712
ipa_tm_transform_clone (struct cgraph_node *node)
4713
{
4714
  struct tm_ipa_cg_data *d;
4715
  bool need_ssa_rename;
4716
 
4717
  d = get_cg_data (&node, true);
4718
 
4719
  /* If this function makes no calls and has no irrevocable blocks,
4720
     then there's nothing to do.  */
4721
  /* ??? Remove non-aborting top-level transactions.  */
4722
  if (!node->callees && !d->irrevocable_blocks_clone)
4723
    return;
4724
 
4725
  current_function_decl = d->clone->decl;
4726
  push_cfun (DECL_STRUCT_FUNCTION (current_function_decl));
4727
  calculate_dominance_info (CDI_DOMINATORS);
4728
 
4729
  need_ssa_rename =
4730
    ipa_tm_transform_calls (d->clone, NULL, single_succ (ENTRY_BLOCK_PTR),
4731
                            d->irrevocable_blocks_clone);
4732
 
4733
  if (need_ssa_rename)
4734
    update_ssa (TODO_update_ssa_only_virtuals);
4735
 
4736
  pop_cfun ();
4737
  current_function_decl = NULL;
4738
}
4739
 
4740
/* Main entry point for the transactional memory IPA pass.  */
4741
 
4742
static unsigned int
4743
ipa_tm_execute (void)
4744
{
4745
  cgraph_node_queue tm_callees = NULL;
4746
  /* List of functions that will go irrevocable.  */
4747
  cgraph_node_queue irr_worklist = NULL;
4748
 
4749
  struct cgraph_node *node;
4750
  struct tm_ipa_cg_data *d;
4751
  enum availability a;
4752
  unsigned int i;
4753
 
4754
#ifdef ENABLE_CHECKING
4755
  verify_cgraph ();
4756
#endif
4757
 
4758
  bitmap_obstack_initialize (&tm_obstack);
4759
 
4760
  /* For all local functions marked tm_callable, queue them.  */
4761
  for (node = cgraph_nodes; node; node = node->next)
4762
    if (is_tm_callable (node->decl)
4763
        && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
4764
      {
4765
        d = get_cg_data (&node, true);
4766
        maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
4767
      }
4768
 
4769
  /* For all local reachable functions...  */
4770
  for (node = cgraph_nodes; node; node = node->next)
4771
    if (node->reachable && node->lowered
4772
        && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
4773
      {
4774
        /* ... marked tm_pure, record that fact for the runtime by
4775
           indicating that the pure function is its own tm_callable.
4776
           No need to do this if the function's address can't be taken.  */
4777
        if (is_tm_pure (node->decl))
4778
          {
4779
            if (!node->local.local)
4780
              record_tm_clone_pair (node->decl, node->decl);
4781
            continue;
4782
          }
4783
 
4784
        current_function_decl = node->decl;
4785
        push_cfun (DECL_STRUCT_FUNCTION (node->decl));
4786
        calculate_dominance_info (CDI_DOMINATORS);
4787
 
4788
        tm_region_init (NULL);
4789
        if (all_tm_regions)
4790
          {
4791
            d = get_cg_data (&node, true);
4792
 
4793
            /* Scan for calls that are in each transaction.  */
4794
            ipa_tm_scan_calls_transaction (d, &tm_callees);
4795
 
4796
            /* Put it in the worklist so we can scan the function
4797
               later (ipa_tm_scan_irr_function) and mark the
4798
               irrevocable blocks.  */
4799
            maybe_push_queue (node, &irr_worklist, &d->in_worklist);
4800
            d->want_irr_scan_normal = true;
4801
          }
4802
 
4803
        pop_cfun ();
4804
        current_function_decl = NULL;
4805
      }
4806
 
4807
  /* For every local function on the callee list, scan as if we will be
4808
     creating a transactional clone, queueing all new functions we find
4809
     along the way.  */
4810
  for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
4811
    {
4812
      node = VEC_index (cgraph_node_p, tm_callees, i);
4813
      a = cgraph_function_body_availability (node);
4814
      d = get_cg_data (&node, true);
4815
 
4816
      /* Put it in the worklist so we can scan the function later
4817
         (ipa_tm_scan_irr_function) and mark the irrevocable
4818
         blocks.  */
4819
      maybe_push_queue (node, &irr_worklist, &d->in_worklist);
4820
 
4821
      /* Some callees cannot be arbitrarily cloned.  These will always be
4822
         irrevocable.  Mark these now, so that we need not scan them.  */
4823
      if (is_tm_irrevocable (node->decl))
4824
        ipa_tm_note_irrevocable (node, &irr_worklist);
4825
      else if (a <= AVAIL_NOT_AVAILABLE
4826
               && !is_tm_safe_or_pure (node->decl))
4827
        ipa_tm_note_irrevocable (node, &irr_worklist);
4828
      else if (a >= AVAIL_OVERWRITABLE)
4829
        {
4830
          if (!tree_versionable_function_p (node->decl))
4831
            ipa_tm_note_irrevocable (node, &irr_worklist);
4832
          else if (!d->is_irrevocable)
4833
            {
4834
              /* If this is an alias, make sure its base is queued as well.
4835
                 we need not scan the callees now, as the base will do.  */
4836
              if (node->alias)
4837
                {
4838
                  node = cgraph_get_node (node->thunk.alias);
4839
                  d = get_cg_data (&node, true);
4840
                  maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
4841
                  continue;
4842
                }
4843
 
4844
              /* Add all nodes called by this function into
4845
                 tm_callees as well.  */
4846
              ipa_tm_scan_calls_clone (node, &tm_callees);
4847
            }
4848
        }
4849
    }
4850
 
4851
  /* Iterate scans until no more work to be done.  Prefer not to use
4852
     VEC_pop because the worklist tends to follow a breadth-first
4853
     search of the callgraph, which should allow convergance with a
4854
     minimum number of scans.  But we also don't want the worklist
4855
     array to grow without bound, so we shift the array up periodically.  */
4856
  for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
4857
    {
4858
      if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
4859
        {
4860
          VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
4861
          i = 0;
4862
        }
4863
 
4864
      node = VEC_index (cgraph_node_p, irr_worklist, i);
4865
      d = get_cg_data (&node, true);
4866
      d->in_worklist = false;
4867
 
4868
      if (d->want_irr_scan_normal)
4869
        {
4870
          d->want_irr_scan_normal = false;
4871
          ipa_tm_scan_irr_function (node, false);
4872
        }
4873
      if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
4874
        ipa_tm_note_irrevocable (node, &irr_worklist);
4875
    }
4876
 
4877
  /* For every function on the callee list, collect the tm_may_enter_irr
4878
     bit on the node.  */
4879
  VEC_truncate (cgraph_node_p, irr_worklist, 0);
4880
  for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
4881
    {
4882
      node = VEC_index (cgraph_node_p, tm_callees, i);
4883
      if (ipa_tm_mayenterirr_function (node))
4884
        {
4885
          d = get_cg_data (&node, true);
4886
          gcc_assert (d->in_worklist == false);
4887
          maybe_push_queue (node, &irr_worklist, &d->in_worklist);
4888
        }
4889
    }
4890
 
4891
  /* Propagate the tm_may_enter_irr bit to callers until stable.  */
4892
  for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
4893
    {
4894
      struct cgraph_node *caller;
4895
      struct cgraph_edge *e;
4896
      struct ipa_ref *ref;
4897
      unsigned j;
4898
 
4899
      if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
4900
        {
4901
          VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
4902
          i = 0;
4903
        }
4904
 
4905
      node = VEC_index (cgraph_node_p, irr_worklist, i);
4906
      d = get_cg_data (&node, true);
4907
      d->in_worklist = false;
4908
      node->local.tm_may_enter_irr = true;
4909
 
4910
      /* Propagate back to normal callers.  */
4911
      for (e = node->callers; e ; e = e->next_caller)
4912
        {
4913
          caller = e->caller;
4914
          if (!is_tm_safe_or_pure (caller->decl)
4915
              && !caller->local.tm_may_enter_irr)
4916
            {
4917
              d = get_cg_data (&caller, true);
4918
              maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
4919
            }
4920
        }
4921
 
4922
      /* Propagate back to referring aliases as well.  */
4923
      for (j = 0; ipa_ref_list_refering_iterate (&node->ref_list, j, ref); j++)
4924
        {
4925
          caller = ref->refering.cgraph_node;
4926
          if (ref->use == IPA_REF_ALIAS
4927
              && !caller->local.tm_may_enter_irr)
4928
            {
4929
              /* ?? Do not traverse aliases here.  */
4930
              d = get_cg_data (&caller, false);
4931
              maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
4932
            }
4933
        }
4934
    }
4935
 
4936
  /* Now validate all tm_safe functions, and all atomic regions in
4937
     other functions.  */
4938
  for (node = cgraph_nodes; node; node = node->next)
4939
    if (node->reachable && node->lowered
4940
        && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
4941
      {
4942
        d = get_cg_data (&node, true);
4943
        if (is_tm_safe (node->decl))
4944
          ipa_tm_diagnose_tm_safe (node);
4945
        else if (d->all_tm_regions)
4946
          ipa_tm_diagnose_transaction (node, d->all_tm_regions);
4947
      }
4948
 
4949
  /* Create clones.  Do those that are not irrevocable and have a
4950
     positive call count.  Do those publicly visible functions that
4951
     the user directed us to clone.  */
4952
  for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
4953
    {
4954
      bool doit = false;
4955
 
4956
      node = VEC_index (cgraph_node_p, tm_callees, i);
4957
      if (node->same_body_alias)
4958
        continue;
4959
 
4960
      a = cgraph_function_body_availability (node);
4961
      d = get_cg_data (&node, true);
4962
 
4963
      if (a <= AVAIL_NOT_AVAILABLE)
4964
        doit = is_tm_callable (node->decl);
4965
      else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl))
4966
        doit = true;
4967
      else if (!d->is_irrevocable
4968
               && d->tm_callers_normal + d->tm_callers_clone > 0)
4969
        doit = true;
4970
 
4971
      if (doit)
4972
        ipa_tm_create_version (node);
4973
    }
4974
 
4975
  /* Redirect calls to the new clones, and insert irrevocable marks.  */
4976
  for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
4977
    {
4978
      node = VEC_index (cgraph_node_p, tm_callees, i);
4979
      if (node->analyzed)
4980
        {
4981
          d = get_cg_data (&node, true);
4982
          if (d->clone)
4983
            ipa_tm_transform_clone (node);
4984
        }
4985
    }
4986
  for (node = cgraph_nodes; node; node = node->next)
4987
    if (node->reachable && node->lowered
4988
        && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
4989
      {
4990
        d = get_cg_data (&node, true);
4991
        if (d->all_tm_regions)
4992
          ipa_tm_transform_transaction (node);
4993
      }
4994
 
4995
  /* Free and clear all data structures.  */
4996
  VEC_free (cgraph_node_p, heap, tm_callees);
4997
  VEC_free (cgraph_node_p, heap, irr_worklist);
4998
  bitmap_obstack_release (&tm_obstack);
4999
 
5000
  for (node = cgraph_nodes; node; node = node->next)
5001
    node->aux = NULL;
5002
 
5003
#ifdef ENABLE_CHECKING
5004
  verify_cgraph ();
5005
#endif
5006
 
5007
  return 0;
5008
}
5009
 
5010
struct simple_ipa_opt_pass pass_ipa_tm =
5011
{
5012
 {
5013
  SIMPLE_IPA_PASS,
5014
  "tmipa",                              /* name */
5015
  gate_tm,                              /* gate */
5016
  ipa_tm_execute,                       /* execute */
5017
  NULL,                                 /* sub */
5018
  NULL,                                 /* next */
5019
  0,                                     /* static_pass_number */
5020
  TV_TRANS_MEM,                         /* tv_id */
5021
  PROP_ssa | PROP_cfg,                  /* properties_required */
5022
  0,                                     /* properties_provided */
5023
  0,                                     /* properties_destroyed */
5024
  0,                                     /* todo_flags_start */
5025
  TODO_dump_func,                       /* todo_flags_finish */
5026
 },
5027
};
5028
 
5029
#include "gt-trans-mem.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.