OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [tags/] [gnu-dev/] [fsf-gcc-snapshot-1-mar-12/] [or1k-gcc/] [gcc/] [trans-mem.c] - Diff between revs 684 and 783

Go to most recent revision | Only display areas with differences | Details | Blame | View Log

Rev 684 Rev 783
/* Passes for transactional memory support.
/* Passes for transactional memory support.
   Copyright (C) 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
   Copyright (C) 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc.
 
 
   This file is part of GCC.
   This file is part of GCC.
 
 
   GCC is free software; you can redistribute it and/or modify it under
   GCC is free software; you can redistribute it and/or modify it under
   the terms of the GNU General Public License as published by the Free
   the terms of the GNU General Public License as published by the Free
   Software Foundation; either version 3, or (at your option) any later
   Software Foundation; either version 3, or (at your option) any later
   version.
   version.
 
 
   GCC is distributed in the hope that it will be useful, but WITHOUT ANY
   GCC is distributed in the hope that it will be useful, but WITHOUT ANY
   WARRANTY; without even the implied warranty of MERCHANTABILITY or
   WARRANTY; without even the implied warranty of MERCHANTABILITY or
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   for more details.
   for more details.
 
 
   You should have received a copy of the GNU General Public License
   You should have received a copy of the GNU General Public License
   along with GCC; see the file COPYING3.  If not see
   along with GCC; see the file COPYING3.  If not see
   <http://www.gnu.org/licenses/>.  */
   <http://www.gnu.org/licenses/>.  */
 
 
#include "config.h"
#include "config.h"
#include "system.h"
#include "system.h"
#include "coretypes.h"
#include "coretypes.h"
#include "tree.h"
#include "tree.h"
#include "gimple.h"
#include "gimple.h"
#include "tree-flow.h"
#include "tree-flow.h"
#include "tree-pass.h"
#include "tree-pass.h"
#include "tree-inline.h"
#include "tree-inline.h"
#include "diagnostic-core.h"
#include "diagnostic-core.h"
#include "demangle.h"
#include "demangle.h"
#include "output.h"
#include "output.h"
#include "trans-mem.h"
#include "trans-mem.h"
#include "params.h"
#include "params.h"
#include "target.h"
#include "target.h"
#include "langhooks.h"
#include "langhooks.h"
#include "tree-pretty-print.h"
#include "tree-pretty-print.h"
#include "gimple-pretty-print.h"
#include "gimple-pretty-print.h"
 
 
 
 
#define PROB_VERY_UNLIKELY      (REG_BR_PROB_BASE / 2000 - 1)
#define PROB_VERY_UNLIKELY      (REG_BR_PROB_BASE / 2000 - 1)
#define PROB_ALWAYS             (REG_BR_PROB_BASE)
#define PROB_ALWAYS             (REG_BR_PROB_BASE)
 
 
#define A_RUNINSTRUMENTEDCODE   0x0001
#define A_RUNINSTRUMENTEDCODE   0x0001
#define A_RUNUNINSTRUMENTEDCODE 0x0002
#define A_RUNUNINSTRUMENTEDCODE 0x0002
#define A_SAVELIVEVARIABLES     0x0004
#define A_SAVELIVEVARIABLES     0x0004
#define A_RESTORELIVEVARIABLES  0x0008
#define A_RESTORELIVEVARIABLES  0x0008
#define A_ABORTTRANSACTION      0x0010
#define A_ABORTTRANSACTION      0x0010
 
 
#define AR_USERABORT            0x0001
#define AR_USERABORT            0x0001
#define AR_USERRETRY            0x0002
#define AR_USERRETRY            0x0002
#define AR_TMCONFLICT           0x0004
#define AR_TMCONFLICT           0x0004
#define AR_EXCEPTIONBLOCKABORT  0x0008
#define AR_EXCEPTIONBLOCKABORT  0x0008
#define AR_OUTERABORT           0x0010
#define AR_OUTERABORT           0x0010
 
 
#define MODE_SERIALIRREVOCABLE  0x0000
#define MODE_SERIALIRREVOCABLE  0x0000
 
 
 
 
/* The representation of a transaction changes several times during the
/* The representation of a transaction changes several times during the
   lowering process.  In the beginning, in the front-end we have the
   lowering process.  In the beginning, in the front-end we have the
   GENERIC tree TRANSACTION_EXPR.  For example,
   GENERIC tree TRANSACTION_EXPR.  For example,
 
 
        __transaction {
        __transaction {
          local++;
          local++;
          if (++global == 10)
          if (++global == 10)
            __tm_abort;
            __tm_abort;
        }
        }
 
 
  During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
  During initial gimplification (gimplify.c) the TRANSACTION_EXPR node is
  trivially replaced with a GIMPLE_TRANSACTION node.
  trivially replaced with a GIMPLE_TRANSACTION node.
 
 
  During pass_lower_tm, we examine the body of transactions looking
  During pass_lower_tm, we examine the body of transactions looking
  for aborts.  Transactions that do not contain an abort may be
  for aborts.  Transactions that do not contain an abort may be
  merged into an outer transaction.  We also add a TRY-FINALLY node
  merged into an outer transaction.  We also add a TRY-FINALLY node
  to arrange for the transaction to be committed on any exit.
  to arrange for the transaction to be committed on any exit.
 
 
  [??? Think about how this arrangement affects throw-with-commit
  [??? Think about how this arrangement affects throw-with-commit
  and throw-with-abort operations.  In this case we want the TRY to
  and throw-with-abort operations.  In this case we want the TRY to
  handle gotos, but not to catch any exceptions because the transaction
  handle gotos, but not to catch any exceptions because the transaction
  will already be closed.]
  will already be closed.]
 
 
        GIMPLE_TRANSACTION [label=NULL] {
        GIMPLE_TRANSACTION [label=NULL] {
          try {
          try {
            local = local + 1;
            local = local + 1;
            t0 = global;
            t0 = global;
            t1 = t0 + 1;
            t1 = t0 + 1;
            global = t1;
            global = t1;
            if (t1 == 10)
            if (t1 == 10)
              __builtin___tm_abort ();
              __builtin___tm_abort ();
          } finally {
          } finally {
            __builtin___tm_commit ();
            __builtin___tm_commit ();
          }
          }
        }
        }
 
 
  During pass_lower_eh, we create EH regions for the transactions,
  During pass_lower_eh, we create EH regions for the transactions,
  intermixed with the regular EH stuff.  This gives us a nice persistent
  intermixed with the regular EH stuff.  This gives us a nice persistent
  mapping (all the way through rtl) from transactional memory operation
  mapping (all the way through rtl) from transactional memory operation
  back to the transaction, which allows us to get the abnormal edges
  back to the transaction, which allows us to get the abnormal edges
  correct to model transaction aborts and restarts:
  correct to model transaction aborts and restarts:
 
 
        GIMPLE_TRANSACTION [label=over]
        GIMPLE_TRANSACTION [label=over]
        local = local + 1;
        local = local + 1;
        t0 = global;
        t0 = global;
        t1 = t0 + 1;
        t1 = t0 + 1;
        global = t1;
        global = t1;
        if (t1 == 10)
        if (t1 == 10)
          __builtin___tm_abort ();
          __builtin___tm_abort ();
        __builtin___tm_commit ();
        __builtin___tm_commit ();
        over:
        over:
 
 
  This is the end of all_lowering_passes, and so is what is present
  This is the end of all_lowering_passes, and so is what is present
  during the IPA passes, and through all of the optimization passes.
  during the IPA passes, and through all of the optimization passes.
 
 
  During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
  During pass_ipa_tm, we examine all GIMPLE_TRANSACTION blocks in all
  functions and mark functions for cloning.
  functions and mark functions for cloning.
 
 
  At the end of gimple optimization, before exiting SSA form,
  At the end of gimple optimization, before exiting SSA form,
  pass_tm_edges replaces statements that perform transactional
  pass_tm_edges replaces statements that perform transactional
  memory operations with the appropriate TM builtins, and swap
  memory operations with the appropriate TM builtins, and swap
  out function calls with their transactional clones.  At this
  out function calls with their transactional clones.  At this
  point we introduce the abnormal transaction restart edges and
  point we introduce the abnormal transaction restart edges and
  complete lowering of the GIMPLE_TRANSACTION node.
  complete lowering of the GIMPLE_TRANSACTION node.
 
 
        x = __builtin___tm_start (MAY_ABORT);
        x = __builtin___tm_start (MAY_ABORT);
        eh_label:
        eh_label:
        if (x & abort_transaction)
        if (x & abort_transaction)
          goto over;
          goto over;
        local = local + 1;
        local = local + 1;
        t0 = __builtin___tm_load (global);
        t0 = __builtin___tm_load (global);
        t1 = t0 + 1;
        t1 = t0 + 1;
        __builtin___tm_store (&global, t1);
        __builtin___tm_store (&global, t1);
        if (t1 == 10)
        if (t1 == 10)
          __builtin___tm_abort ();
          __builtin___tm_abort ();
        __builtin___tm_commit ();
        __builtin___tm_commit ();
        over:
        over:
*/
*/
 
 


/* Return the attributes we want to examine for X, or NULL if it's not
/* Return the attributes we want to examine for X, or NULL if it's not
   something we examine.  We look at function types, but allow pointers
   something we examine.  We look at function types, but allow pointers
   to function types and function decls and peek through.  */
   to function types and function decls and peek through.  */
 
 
static tree
static tree
get_attrs_for (const_tree x)
get_attrs_for (const_tree x)
{
{
  switch (TREE_CODE (x))
  switch (TREE_CODE (x))
    {
    {
    case FUNCTION_DECL:
    case FUNCTION_DECL:
      return TYPE_ATTRIBUTES (TREE_TYPE (x));
      return TYPE_ATTRIBUTES (TREE_TYPE (x));
      break;
      break;
 
 
    default:
    default:
      if (TYPE_P (x))
      if (TYPE_P (x))
        return NULL;
        return NULL;
      x = TREE_TYPE (x);
      x = TREE_TYPE (x);
      if (TREE_CODE (x) != POINTER_TYPE)
      if (TREE_CODE (x) != POINTER_TYPE)
        return NULL;
        return NULL;
      /* FALLTHRU */
      /* FALLTHRU */
 
 
    case POINTER_TYPE:
    case POINTER_TYPE:
      x = TREE_TYPE (x);
      x = TREE_TYPE (x);
      if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
      if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
        return NULL;
        return NULL;
      /* FALLTHRU */
      /* FALLTHRU */
 
 
    case FUNCTION_TYPE:
    case FUNCTION_TYPE:
    case METHOD_TYPE:
    case METHOD_TYPE:
      return TYPE_ATTRIBUTES (x);
      return TYPE_ATTRIBUTES (x);
    }
    }
}
}
 
 
/* Return true if X has been marked TM_PURE.  */
/* Return true if X has been marked TM_PURE.  */
 
 
bool
bool
is_tm_pure (const_tree x)
is_tm_pure (const_tree x)
{
{
  unsigned flags;
  unsigned flags;
 
 
  switch (TREE_CODE (x))
  switch (TREE_CODE (x))
    {
    {
    case FUNCTION_DECL:
    case FUNCTION_DECL:
    case FUNCTION_TYPE:
    case FUNCTION_TYPE:
    case METHOD_TYPE:
    case METHOD_TYPE:
      break;
      break;
 
 
    default:
    default:
      if (TYPE_P (x))
      if (TYPE_P (x))
        return false;
        return false;
      x = TREE_TYPE (x);
      x = TREE_TYPE (x);
      if (TREE_CODE (x) != POINTER_TYPE)
      if (TREE_CODE (x) != POINTER_TYPE)
        return false;
        return false;
      /* FALLTHRU */
      /* FALLTHRU */
 
 
    case POINTER_TYPE:
    case POINTER_TYPE:
      x = TREE_TYPE (x);
      x = TREE_TYPE (x);
      if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
      if (TREE_CODE (x) != FUNCTION_TYPE && TREE_CODE (x) != METHOD_TYPE)
        return false;
        return false;
      break;
      break;
    }
    }
 
 
  flags = flags_from_decl_or_type (x);
  flags = flags_from_decl_or_type (x);
  return (flags & ECF_TM_PURE) != 0;
  return (flags & ECF_TM_PURE) != 0;
}
}
 
 
/* Return true if X has been marked TM_IRREVOCABLE.  */
/* Return true if X has been marked TM_IRREVOCABLE.  */
 
 
static bool
static bool
is_tm_irrevocable (tree x)
is_tm_irrevocable (tree x)
{
{
  tree attrs = get_attrs_for (x);
  tree attrs = get_attrs_for (x);
 
 
  if (attrs && lookup_attribute ("transaction_unsafe", attrs))
  if (attrs && lookup_attribute ("transaction_unsafe", attrs))
    return true;
    return true;
 
 
  /* A call to the irrevocable builtin is by definition,
  /* A call to the irrevocable builtin is by definition,
     irrevocable.  */
     irrevocable.  */
  if (TREE_CODE (x) == ADDR_EXPR)
  if (TREE_CODE (x) == ADDR_EXPR)
    x = TREE_OPERAND (x, 0);
    x = TREE_OPERAND (x, 0);
  if (TREE_CODE (x) == FUNCTION_DECL
  if (TREE_CODE (x) == FUNCTION_DECL
      && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
      && DECL_BUILT_IN_CLASS (x) == BUILT_IN_NORMAL
      && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
      && DECL_FUNCTION_CODE (x) == BUILT_IN_TM_IRREVOCABLE)
    return true;
    return true;
 
 
  return false;
  return false;
}
}
 
 
/* Return true if X has been marked TM_SAFE.  */
/* Return true if X has been marked TM_SAFE.  */
 
 
bool
bool
is_tm_safe (const_tree x)
is_tm_safe (const_tree x)
{
{
  if (flag_tm)
  if (flag_tm)
    {
    {
      tree attrs = get_attrs_for (x);
      tree attrs = get_attrs_for (x);
      if (attrs)
      if (attrs)
        {
        {
          if (lookup_attribute ("transaction_safe", attrs))
          if (lookup_attribute ("transaction_safe", attrs))
            return true;
            return true;
          if (lookup_attribute ("transaction_may_cancel_outer", attrs))
          if (lookup_attribute ("transaction_may_cancel_outer", attrs))
            return true;
            return true;
        }
        }
    }
    }
  return false;
  return false;
}
}
 
 
/* Return true if CALL is const, or tm_pure.  */
/* Return true if CALL is const, or tm_pure.  */
 
 
static bool
static bool
is_tm_pure_call (gimple call)
is_tm_pure_call (gimple call)
{
{
  tree fn = gimple_call_fn (call);
  tree fn = gimple_call_fn (call);
 
 
  if (TREE_CODE (fn) == ADDR_EXPR)
  if (TREE_CODE (fn) == ADDR_EXPR)
    {
    {
      fn = TREE_OPERAND (fn, 0);
      fn = TREE_OPERAND (fn, 0);
      gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
      gcc_assert (TREE_CODE (fn) == FUNCTION_DECL);
    }
    }
  else
  else
    fn = TREE_TYPE (fn);
    fn = TREE_TYPE (fn);
 
 
  return is_tm_pure (fn);
  return is_tm_pure (fn);
}
}
 
 
/* Return true if X has been marked TM_CALLABLE.  */
/* Return true if X has been marked TM_CALLABLE.  */
 
 
static bool
static bool
is_tm_callable (tree x)
is_tm_callable (tree x)
{
{
  tree attrs = get_attrs_for (x);
  tree attrs = get_attrs_for (x);
  if (attrs)
  if (attrs)
    {
    {
      if (lookup_attribute ("transaction_callable", attrs))
      if (lookup_attribute ("transaction_callable", attrs))
        return true;
        return true;
      if (lookup_attribute ("transaction_safe", attrs))
      if (lookup_attribute ("transaction_safe", attrs))
        return true;
        return true;
      if (lookup_attribute ("transaction_may_cancel_outer", attrs))
      if (lookup_attribute ("transaction_may_cancel_outer", attrs))
        return true;
        return true;
    }
    }
  return false;
  return false;
}
}
 
 
/* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER.  */
/* Return true if X has been marked TRANSACTION_MAY_CANCEL_OUTER.  */
 
 
bool
bool
is_tm_may_cancel_outer (tree x)
is_tm_may_cancel_outer (tree x)
{
{
  tree attrs = get_attrs_for (x);
  tree attrs = get_attrs_for (x);
  if (attrs)
  if (attrs)
    return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
    return lookup_attribute ("transaction_may_cancel_outer", attrs) != NULL;
  return false;
  return false;
}
}
 
 
/* Return true for built in functions that "end" a transaction.   */
/* Return true for built in functions that "end" a transaction.   */
 
 
bool
bool
is_tm_ending_fndecl (tree fndecl)
is_tm_ending_fndecl (tree fndecl)
{
{
  if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
  if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
    switch (DECL_FUNCTION_CODE (fndecl))
    switch (DECL_FUNCTION_CODE (fndecl))
      {
      {
      case BUILT_IN_TM_COMMIT:
      case BUILT_IN_TM_COMMIT:
      case BUILT_IN_TM_COMMIT_EH:
      case BUILT_IN_TM_COMMIT_EH:
      case BUILT_IN_TM_ABORT:
      case BUILT_IN_TM_ABORT:
      case BUILT_IN_TM_IRREVOCABLE:
      case BUILT_IN_TM_IRREVOCABLE:
        return true;
        return true;
      default:
      default:
        break;
        break;
      }
      }
 
 
  return false;
  return false;
}
}
 
 
/* Return true if STMT is a TM load.  */
/* Return true if STMT is a TM load.  */
 
 
static bool
static bool
is_tm_load (gimple stmt)
is_tm_load (gimple stmt)
{
{
  tree fndecl;
  tree fndecl;
 
 
  if (gimple_code (stmt) != GIMPLE_CALL)
  if (gimple_code (stmt) != GIMPLE_CALL)
    return false;
    return false;
 
 
  fndecl = gimple_call_fndecl (stmt);
  fndecl = gimple_call_fndecl (stmt);
  return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
  return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
          && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
          && BUILTIN_TM_LOAD_P (DECL_FUNCTION_CODE (fndecl)));
}
}
 
 
/* Same as above, but for simple TM loads, that is, not the
/* Same as above, but for simple TM loads, that is, not the
   after-write, after-read, etc optimized variants.  */
   after-write, after-read, etc optimized variants.  */
 
 
static bool
static bool
is_tm_simple_load (gimple stmt)
is_tm_simple_load (gimple stmt)
{
{
  tree fndecl;
  tree fndecl;
 
 
  if (gimple_code (stmt) != GIMPLE_CALL)
  if (gimple_code (stmt) != GIMPLE_CALL)
    return false;
    return false;
 
 
  fndecl = gimple_call_fndecl (stmt);
  fndecl = gimple_call_fndecl (stmt);
  if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
  if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
    {
    {
      enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
      enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
      return (fcode == BUILT_IN_TM_LOAD_1
      return (fcode == BUILT_IN_TM_LOAD_1
              || fcode == BUILT_IN_TM_LOAD_2
              || fcode == BUILT_IN_TM_LOAD_2
              || fcode == BUILT_IN_TM_LOAD_4
              || fcode == BUILT_IN_TM_LOAD_4
              || fcode == BUILT_IN_TM_LOAD_8
              || fcode == BUILT_IN_TM_LOAD_8
              || fcode == BUILT_IN_TM_LOAD_FLOAT
              || fcode == BUILT_IN_TM_LOAD_FLOAT
              || fcode == BUILT_IN_TM_LOAD_DOUBLE
              || fcode == BUILT_IN_TM_LOAD_DOUBLE
              || fcode == BUILT_IN_TM_LOAD_LDOUBLE
              || fcode == BUILT_IN_TM_LOAD_LDOUBLE
              || fcode == BUILT_IN_TM_LOAD_M64
              || fcode == BUILT_IN_TM_LOAD_M64
              || fcode == BUILT_IN_TM_LOAD_M128
              || fcode == BUILT_IN_TM_LOAD_M128
              || fcode == BUILT_IN_TM_LOAD_M256);
              || fcode == BUILT_IN_TM_LOAD_M256);
    }
    }
  return false;
  return false;
}
}
 
 
/* Return true if STMT is a TM store.  */
/* Return true if STMT is a TM store.  */
 
 
static bool
static bool
is_tm_store (gimple stmt)
is_tm_store (gimple stmt)
{
{
  tree fndecl;
  tree fndecl;
 
 
  if (gimple_code (stmt) != GIMPLE_CALL)
  if (gimple_code (stmt) != GIMPLE_CALL)
    return false;
    return false;
 
 
  fndecl = gimple_call_fndecl (stmt);
  fndecl = gimple_call_fndecl (stmt);
  return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
  return (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
          && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
          && BUILTIN_TM_STORE_P (DECL_FUNCTION_CODE (fndecl)));
}
}
 
 
/* Same as above, but for simple TM stores, that is, not the
/* Same as above, but for simple TM stores, that is, not the
   after-write, after-read, etc optimized variants.  */
   after-write, after-read, etc optimized variants.  */
 
 
static bool
static bool
is_tm_simple_store (gimple stmt)
is_tm_simple_store (gimple stmt)
{
{
  tree fndecl;
  tree fndecl;
 
 
  if (gimple_code (stmt) != GIMPLE_CALL)
  if (gimple_code (stmt) != GIMPLE_CALL)
    return false;
    return false;
 
 
  fndecl = gimple_call_fndecl (stmt);
  fndecl = gimple_call_fndecl (stmt);
  if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
  if (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
    {
    {
      enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
      enum built_in_function fcode = DECL_FUNCTION_CODE (fndecl);
      return (fcode == BUILT_IN_TM_STORE_1
      return (fcode == BUILT_IN_TM_STORE_1
              || fcode == BUILT_IN_TM_STORE_2
              || fcode == BUILT_IN_TM_STORE_2
              || fcode == BUILT_IN_TM_STORE_4
              || fcode == BUILT_IN_TM_STORE_4
              || fcode == BUILT_IN_TM_STORE_8
              || fcode == BUILT_IN_TM_STORE_8
              || fcode == BUILT_IN_TM_STORE_FLOAT
              || fcode == BUILT_IN_TM_STORE_FLOAT
              || fcode == BUILT_IN_TM_STORE_DOUBLE
              || fcode == BUILT_IN_TM_STORE_DOUBLE
              || fcode == BUILT_IN_TM_STORE_LDOUBLE
              || fcode == BUILT_IN_TM_STORE_LDOUBLE
              || fcode == BUILT_IN_TM_STORE_M64
              || fcode == BUILT_IN_TM_STORE_M64
              || fcode == BUILT_IN_TM_STORE_M128
              || fcode == BUILT_IN_TM_STORE_M128
              || fcode == BUILT_IN_TM_STORE_M256);
              || fcode == BUILT_IN_TM_STORE_M256);
    }
    }
  return false;
  return false;
}
}
 
 
/* Return true if FNDECL is BUILT_IN_TM_ABORT.  */
/* Return true if FNDECL is BUILT_IN_TM_ABORT.  */
 
 
static bool
static bool
is_tm_abort (tree fndecl)
is_tm_abort (tree fndecl)
{
{
  return (fndecl
  return (fndecl
          && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
          && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
          && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
          && DECL_FUNCTION_CODE (fndecl) == BUILT_IN_TM_ABORT);
}
}
 
 
/* Build a GENERIC tree for a user abort.  This is called by front ends
/* Build a GENERIC tree for a user abort.  This is called by front ends
   while transforming the __tm_abort statement.  */
   while transforming the __tm_abort statement.  */
 
 
tree
tree
build_tm_abort_call (location_t loc, bool is_outer)
build_tm_abort_call (location_t loc, bool is_outer)
{
{
  return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
  return build_call_expr_loc (loc, builtin_decl_explicit (BUILT_IN_TM_ABORT), 1,
                              build_int_cst (integer_type_node,
                              build_int_cst (integer_type_node,
                                             AR_USERABORT
                                             AR_USERABORT
                                             | (is_outer ? AR_OUTERABORT : 0)));
                                             | (is_outer ? AR_OUTERABORT : 0)));
}
}
 
 
/* Common gateing function for several of the TM passes.  */
/* Common gateing function for several of the TM passes.  */
 
 
static bool
static bool
gate_tm (void)
gate_tm (void)
{
{
  return flag_tm;
  return flag_tm;
}
}


/* Map for aribtrary function replacement under TM, as created
/* Map for aribtrary function replacement under TM, as created
   by the tm_wrap attribute.  */
   by the tm_wrap attribute.  */
 
 
static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
static GTY((if_marked ("tree_map_marked_p"), param_is (struct tree_map)))
     htab_t tm_wrap_map;
     htab_t tm_wrap_map;
 
 
void
void
record_tm_replacement (tree from, tree to)
record_tm_replacement (tree from, tree to)
{
{
  struct tree_map **slot, *h;
  struct tree_map **slot, *h;
 
 
  /* Do not inline wrapper functions that will get replaced in the TM
  /* Do not inline wrapper functions that will get replaced in the TM
     pass.
     pass.
 
 
     Suppose you have foo() that will get replaced into tmfoo().  Make
     Suppose you have foo() that will get replaced into tmfoo().  Make
     sure the inliner doesn't try to outsmart us and inline foo()
     sure the inliner doesn't try to outsmart us and inline foo()
     before we get a chance to do the TM replacement.  */
     before we get a chance to do the TM replacement.  */
  DECL_UNINLINABLE (from) = 1;
  DECL_UNINLINABLE (from) = 1;
 
 
  if (tm_wrap_map == NULL)
  if (tm_wrap_map == NULL)
    tm_wrap_map = htab_create_ggc (32, tree_map_hash, tree_map_eq, 0);
    tm_wrap_map = htab_create_ggc (32, tree_map_hash, tree_map_eq, 0);
 
 
  h = ggc_alloc_tree_map ();
  h = ggc_alloc_tree_map ();
  h->hash = htab_hash_pointer (from);
  h->hash = htab_hash_pointer (from);
  h->base.from = from;
  h->base.from = from;
  h->to = to;
  h->to = to;
 
 
  slot = (struct tree_map **)
  slot = (struct tree_map **)
    htab_find_slot_with_hash (tm_wrap_map, h, h->hash, INSERT);
    htab_find_slot_with_hash (tm_wrap_map, h, h->hash, INSERT);
  *slot = h;
  *slot = h;
}
}
 
 
/* Return a TM-aware replacement function for DECL.  */
/* Return a TM-aware replacement function for DECL.  */
 
 
static tree
static tree
find_tm_replacement_function (tree fndecl)
find_tm_replacement_function (tree fndecl)
{
{
  if (tm_wrap_map)
  if (tm_wrap_map)
    {
    {
      struct tree_map *h, in;
      struct tree_map *h, in;
 
 
      in.base.from = fndecl;
      in.base.from = fndecl;
      in.hash = htab_hash_pointer (fndecl);
      in.hash = htab_hash_pointer (fndecl);
      h = (struct tree_map *) htab_find_with_hash (tm_wrap_map, &in, in.hash);
      h = (struct tree_map *) htab_find_with_hash (tm_wrap_map, &in, in.hash);
      if (h)
      if (h)
        return h->to;
        return h->to;
    }
    }
 
 
  /* ??? We may well want TM versions of most of the common <string.h>
  /* ??? We may well want TM versions of most of the common <string.h>
     functions.  For now, we've already these two defined.  */
     functions.  For now, we've already these two defined.  */
  /* Adjust expand_call_tm() attributes as necessary for the cases
  /* Adjust expand_call_tm() attributes as necessary for the cases
     handled here:  */
     handled here:  */
  if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
  if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
    switch (DECL_FUNCTION_CODE (fndecl))
    switch (DECL_FUNCTION_CODE (fndecl))
      {
      {
      case BUILT_IN_MEMCPY:
      case BUILT_IN_MEMCPY:
        return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
        return builtin_decl_explicit (BUILT_IN_TM_MEMCPY);
      case BUILT_IN_MEMMOVE:
      case BUILT_IN_MEMMOVE:
        return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
        return builtin_decl_explicit (BUILT_IN_TM_MEMMOVE);
      case BUILT_IN_MEMSET:
      case BUILT_IN_MEMSET:
        return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
        return builtin_decl_explicit (BUILT_IN_TM_MEMSET);
      default:
      default:
        return NULL;
        return NULL;
      }
      }
 
 
  return NULL;
  return NULL;
}
}
 
 
/* When appropriate, record TM replacement for memory allocation functions.
/* When appropriate, record TM replacement for memory allocation functions.
 
 
   FROM is the FNDECL to wrap.  */
   FROM is the FNDECL to wrap.  */
void
void
tm_malloc_replacement (tree from)
tm_malloc_replacement (tree from)
{
{
  const char *str;
  const char *str;
  tree to;
  tree to;
 
 
  if (TREE_CODE (from) != FUNCTION_DECL)
  if (TREE_CODE (from) != FUNCTION_DECL)
    return;
    return;
 
 
  /* If we have a previous replacement, the user must be explicitly
  /* If we have a previous replacement, the user must be explicitly
     wrapping malloc/calloc/free.  They better know what they're
     wrapping malloc/calloc/free.  They better know what they're
     doing... */
     doing... */
  if (find_tm_replacement_function (from))
  if (find_tm_replacement_function (from))
    return;
    return;
 
 
  str = IDENTIFIER_POINTER (DECL_NAME (from));
  str = IDENTIFIER_POINTER (DECL_NAME (from));
 
 
  if (!strcmp (str, "malloc"))
  if (!strcmp (str, "malloc"))
    to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
    to = builtin_decl_explicit (BUILT_IN_TM_MALLOC);
  else if (!strcmp (str, "calloc"))
  else if (!strcmp (str, "calloc"))
    to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
    to = builtin_decl_explicit (BUILT_IN_TM_CALLOC);
  else if (!strcmp (str, "free"))
  else if (!strcmp (str, "free"))
    to = builtin_decl_explicit (BUILT_IN_TM_FREE);
    to = builtin_decl_explicit (BUILT_IN_TM_FREE);
  else
  else
    return;
    return;
 
 
  TREE_NOTHROW (to) = 0;
  TREE_NOTHROW (to) = 0;
 
 
  record_tm_replacement (from, to);
  record_tm_replacement (from, to);
}
}


/* Diagnostics for tm_safe functions/regions.  Called by the front end
/* Diagnostics for tm_safe functions/regions.  Called by the front end
   once we've lowered the function to high-gimple.  */
   once we've lowered the function to high-gimple.  */
 
 
/* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
/* Subroutine of diagnose_tm_safe_errors, called through walk_gimple_seq.
   Process exactly one statement.  WI->INFO is set to non-null when in
   Process exactly one statement.  WI->INFO is set to non-null when in
   the context of a tm_safe function, and null for a __transaction block.  */
   the context of a tm_safe function, and null for a __transaction block.  */
 
 
#define DIAG_TM_OUTER           1
#define DIAG_TM_OUTER           1
#define DIAG_TM_SAFE            2
#define DIAG_TM_SAFE            2
#define DIAG_TM_RELAXED         4
#define DIAG_TM_RELAXED         4
 
 
struct diagnose_tm
struct diagnose_tm
{
{
  unsigned int summary_flags : 8;
  unsigned int summary_flags : 8;
  unsigned int block_flags : 8;
  unsigned int block_flags : 8;
  unsigned int func_flags : 8;
  unsigned int func_flags : 8;
  unsigned int saw_volatile : 1;
  unsigned int saw_volatile : 1;
  gimple stmt;
  gimple stmt;
};
};
 
 
/* Tree callback function for diagnose_tm pass.  */
/* Tree callback function for diagnose_tm pass.  */
 
 
static tree
static tree
diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
diagnose_tm_1_op (tree *tp, int *walk_subtrees ATTRIBUTE_UNUSED,
                  void *data)
                  void *data)
{
{
  struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
  struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
  struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
  struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
  enum tree_code code = TREE_CODE (*tp);
  enum tree_code code = TREE_CODE (*tp);
 
 
  if ((code == VAR_DECL
  if ((code == VAR_DECL
       || code == RESULT_DECL
       || code == RESULT_DECL
       || code == PARM_DECL)
       || code == PARM_DECL)
      && d->block_flags & (DIAG_TM_SAFE | DIAG_TM_RELAXED)
      && d->block_flags & (DIAG_TM_SAFE | DIAG_TM_RELAXED)
      && TREE_THIS_VOLATILE (TREE_TYPE (*tp))
      && TREE_THIS_VOLATILE (TREE_TYPE (*tp))
      && !d->saw_volatile)
      && !d->saw_volatile)
    {
    {
      d->saw_volatile = 1;
      d->saw_volatile = 1;
      error_at (gimple_location (d->stmt),
      error_at (gimple_location (d->stmt),
                "invalid volatile use of %qD inside transaction",
                "invalid volatile use of %qD inside transaction",
                *tp);
                *tp);
    }
    }
 
 
  return NULL_TREE;
  return NULL_TREE;
}
}
 
 
static tree
static tree
diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
diagnose_tm_1 (gimple_stmt_iterator *gsi, bool *handled_ops_p,
                    struct walk_stmt_info *wi)
                    struct walk_stmt_info *wi)
{
{
  gimple stmt = gsi_stmt (*gsi);
  gimple stmt = gsi_stmt (*gsi);
  struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
  struct diagnose_tm *d = (struct diagnose_tm *) wi->info;
 
 
  /* Save stmt for use in leaf analysis.  */
  /* Save stmt for use in leaf analysis.  */
  d->stmt = stmt;
  d->stmt = stmt;
 
 
  switch (gimple_code (stmt))
  switch (gimple_code (stmt))
    {
    {
    case GIMPLE_CALL:
    case GIMPLE_CALL:
      {
      {
        tree fn = gimple_call_fn (stmt);
        tree fn = gimple_call_fn (stmt);
 
 
        if ((d->summary_flags & DIAG_TM_OUTER) == 0
        if ((d->summary_flags & DIAG_TM_OUTER) == 0
            && is_tm_may_cancel_outer (fn))
            && is_tm_may_cancel_outer (fn))
          error_at (gimple_location (stmt),
          error_at (gimple_location (stmt),
                    "%<transaction_may_cancel_outer%> function call not within"
                    "%<transaction_may_cancel_outer%> function call not within"
                    " outer transaction or %<transaction_may_cancel_outer%>");
                    " outer transaction or %<transaction_may_cancel_outer%>");
 
 
        if (d->summary_flags & DIAG_TM_SAFE)
        if (d->summary_flags & DIAG_TM_SAFE)
          {
          {
            bool is_safe, direct_call_p;
            bool is_safe, direct_call_p;
            tree replacement;
            tree replacement;
 
 
            if (TREE_CODE (fn) == ADDR_EXPR
            if (TREE_CODE (fn) == ADDR_EXPR
                && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
                && TREE_CODE (TREE_OPERAND (fn, 0)) == FUNCTION_DECL)
              {
              {
                direct_call_p = true;
                direct_call_p = true;
                replacement = TREE_OPERAND (fn, 0);
                replacement = TREE_OPERAND (fn, 0);
                replacement = find_tm_replacement_function (replacement);
                replacement = find_tm_replacement_function (replacement);
                if (replacement)
                if (replacement)
                  fn = replacement;
                  fn = replacement;
              }
              }
            else
            else
              {
              {
                direct_call_p = false;
                direct_call_p = false;
                replacement = NULL_TREE;
                replacement = NULL_TREE;
              }
              }
 
 
            if (is_tm_safe_or_pure (fn))
            if (is_tm_safe_or_pure (fn))
              is_safe = true;
              is_safe = true;
            else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
            else if (is_tm_callable (fn) || is_tm_irrevocable (fn))
              {
              {
                /* A function explicitly marked transaction_callable as
                /* A function explicitly marked transaction_callable as
                   opposed to transaction_safe is being defined to be
                   opposed to transaction_safe is being defined to be
                   unsafe as part of its ABI, regardless of its contents.  */
                   unsafe as part of its ABI, regardless of its contents.  */
                is_safe = false;
                is_safe = false;
              }
              }
            else if (direct_call_p)
            else if (direct_call_p)
              {
              {
                if (flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
                if (flags_from_decl_or_type (fn) & ECF_TM_BUILTIN)
                  is_safe = true;
                  is_safe = true;
                else if (replacement)
                else if (replacement)
                  {
                  {
                    /* ??? At present we've been considering replacements
                    /* ??? At present we've been considering replacements
                       merely transaction_callable, and therefore might
                       merely transaction_callable, and therefore might
                       enter irrevocable.  The tm_wrap attribute has not
                       enter irrevocable.  The tm_wrap attribute has not
                       yet made it into the new language spec.  */
                       yet made it into the new language spec.  */
                    is_safe = false;
                    is_safe = false;
                  }
                  }
                else
                else
                  {
                  {
                    /* ??? Diagnostics for unmarked direct calls moved into
                    /* ??? Diagnostics for unmarked direct calls moved into
                       the IPA pass.  Section 3.2 of the spec details how
                       the IPA pass.  Section 3.2 of the spec details how
                       functions not marked should be considered "implicitly
                       functions not marked should be considered "implicitly
                       safe" based on having examined the function body.  */
                       safe" based on having examined the function body.  */
                    is_safe = true;
                    is_safe = true;
                  }
                  }
              }
              }
            else
            else
              {
              {
                /* An unmarked indirect call.  Consider it unsafe even
                /* An unmarked indirect call.  Consider it unsafe even
                   though optimization may yet figure out how to inline.  */
                   though optimization may yet figure out how to inline.  */
                is_safe = false;
                is_safe = false;
              }
              }
 
 
            if (!is_safe)
            if (!is_safe)
              {
              {
                if (TREE_CODE (fn) == ADDR_EXPR)
                if (TREE_CODE (fn) == ADDR_EXPR)
                  fn = TREE_OPERAND (fn, 0);
                  fn = TREE_OPERAND (fn, 0);
                if (d->block_flags & DIAG_TM_SAFE)
                if (d->block_flags & DIAG_TM_SAFE)
                  {
                  {
                    if (direct_call_p)
                    if (direct_call_p)
                      error_at (gimple_location (stmt),
                      error_at (gimple_location (stmt),
                                "unsafe function call %qD within "
                                "unsafe function call %qD within "
                                "atomic transaction", fn);
                                "atomic transaction", fn);
                    else
                    else
                      {
                      {
                        if (!DECL_P (fn) || DECL_NAME (fn))
                        if (!DECL_P (fn) || DECL_NAME (fn))
                          error_at (gimple_location (stmt),
                          error_at (gimple_location (stmt),
                                    "unsafe function call %qE within "
                                    "unsafe function call %qE within "
                                    "atomic transaction", fn);
                                    "atomic transaction", fn);
                        else
                        else
                          error_at (gimple_location (stmt),
                          error_at (gimple_location (stmt),
                                    "unsafe indirect function call within "
                                    "unsafe indirect function call within "
                                    "atomic transaction");
                                    "atomic transaction");
                      }
                      }
                  }
                  }
                else
                else
                  {
                  {
                    if (direct_call_p)
                    if (direct_call_p)
                      error_at (gimple_location (stmt),
                      error_at (gimple_location (stmt),
                                "unsafe function call %qD within "
                                "unsafe function call %qD within "
                                "%<transaction_safe%> function", fn);
                                "%<transaction_safe%> function", fn);
                    else
                    else
                      {
                      {
                        if (!DECL_P (fn) || DECL_NAME (fn))
                        if (!DECL_P (fn) || DECL_NAME (fn))
                          error_at (gimple_location (stmt),
                          error_at (gimple_location (stmt),
                                    "unsafe function call %qE within "
                                    "unsafe function call %qE within "
                                    "%<transaction_safe%> function", fn);
                                    "%<transaction_safe%> function", fn);
                        else
                        else
                          error_at (gimple_location (stmt),
                          error_at (gimple_location (stmt),
                                    "unsafe indirect function call within "
                                    "unsafe indirect function call within "
                                    "%<transaction_safe%> function");
                                    "%<transaction_safe%> function");
                      }
                      }
                  }
                  }
              }
              }
          }
          }
      }
      }
      break;
      break;
 
 
    case GIMPLE_ASM:
    case GIMPLE_ASM:
      /* ??? We ought to come up with a way to add attributes to
      /* ??? We ought to come up with a way to add attributes to
         asm statements, and then add "transaction_safe" to it.
         asm statements, and then add "transaction_safe" to it.
         Either that or get the language spec to resurrect __tm_waiver.  */
         Either that or get the language spec to resurrect __tm_waiver.  */
      if (d->block_flags & DIAG_TM_SAFE)
      if (d->block_flags & DIAG_TM_SAFE)
        error_at (gimple_location (stmt),
        error_at (gimple_location (stmt),
                  "asm not allowed in atomic transaction");
                  "asm not allowed in atomic transaction");
      else if (d->func_flags & DIAG_TM_SAFE)
      else if (d->func_flags & DIAG_TM_SAFE)
        error_at (gimple_location (stmt),
        error_at (gimple_location (stmt),
                  "asm not allowed in %<transaction_safe%> function");
                  "asm not allowed in %<transaction_safe%> function");
      break;
      break;
 
 
    case GIMPLE_TRANSACTION:
    case GIMPLE_TRANSACTION:
      {
      {
        unsigned char inner_flags = DIAG_TM_SAFE;
        unsigned char inner_flags = DIAG_TM_SAFE;
 
 
        if (gimple_transaction_subcode (stmt) & GTMA_IS_RELAXED)
        if (gimple_transaction_subcode (stmt) & GTMA_IS_RELAXED)
          {
          {
            if (d->block_flags & DIAG_TM_SAFE)
            if (d->block_flags & DIAG_TM_SAFE)
              error_at (gimple_location (stmt),
              error_at (gimple_location (stmt),
                        "relaxed transaction in atomic transaction");
                        "relaxed transaction in atomic transaction");
            else if (d->func_flags & DIAG_TM_SAFE)
            else if (d->func_flags & DIAG_TM_SAFE)
              error_at (gimple_location (stmt),
              error_at (gimple_location (stmt),
                        "relaxed transaction in %<transaction_safe%> function");
                        "relaxed transaction in %<transaction_safe%> function");
            inner_flags = DIAG_TM_RELAXED;
            inner_flags = DIAG_TM_RELAXED;
          }
          }
        else if (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)
        else if (gimple_transaction_subcode (stmt) & GTMA_IS_OUTER)
          {
          {
            if (d->block_flags)
            if (d->block_flags)
              error_at (gimple_location (stmt),
              error_at (gimple_location (stmt),
                        "outer transaction in transaction");
                        "outer transaction in transaction");
            else if (d->func_flags & DIAG_TM_OUTER)
            else if (d->func_flags & DIAG_TM_OUTER)
              error_at (gimple_location (stmt),
              error_at (gimple_location (stmt),
                        "outer transaction in "
                        "outer transaction in "
                        "%<transaction_may_cancel_outer%> function");
                        "%<transaction_may_cancel_outer%> function");
            else if (d->func_flags & DIAG_TM_SAFE)
            else if (d->func_flags & DIAG_TM_SAFE)
              error_at (gimple_location (stmt),
              error_at (gimple_location (stmt),
                        "outer transaction in %<transaction_safe%> function");
                        "outer transaction in %<transaction_safe%> function");
            inner_flags |= DIAG_TM_OUTER;
            inner_flags |= DIAG_TM_OUTER;
          }
          }
 
 
        *handled_ops_p = true;
        *handled_ops_p = true;
        if (gimple_transaction_body (stmt))
        if (gimple_transaction_body (stmt))
          {
          {
            struct walk_stmt_info wi_inner;
            struct walk_stmt_info wi_inner;
            struct diagnose_tm d_inner;
            struct diagnose_tm d_inner;
 
 
            memset (&d_inner, 0, sizeof (d_inner));
            memset (&d_inner, 0, sizeof (d_inner));
            d_inner.func_flags = d->func_flags;
            d_inner.func_flags = d->func_flags;
            d_inner.block_flags = d->block_flags | inner_flags;
            d_inner.block_flags = d->block_flags | inner_flags;
            d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
            d_inner.summary_flags = d_inner.func_flags | d_inner.block_flags;
 
 
            memset (&wi_inner, 0, sizeof (wi_inner));
            memset (&wi_inner, 0, sizeof (wi_inner));
            wi_inner.info = &d_inner;
            wi_inner.info = &d_inner;
 
 
            walk_gimple_seq (gimple_transaction_body (stmt),
            walk_gimple_seq (gimple_transaction_body (stmt),
                             diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
                             diagnose_tm_1, diagnose_tm_1_op, &wi_inner);
          }
          }
      }
      }
      break;
      break;
 
 
    default:
    default:
      break;
      break;
    }
    }
 
 
  return NULL_TREE;
  return NULL_TREE;
}
}
 
 
static unsigned int
static unsigned int
diagnose_tm_blocks (void)
diagnose_tm_blocks (void)
{
{
  struct walk_stmt_info wi;
  struct walk_stmt_info wi;
  struct diagnose_tm d;
  struct diagnose_tm d;
 
 
  memset (&d, 0, sizeof (d));
  memset (&d, 0, sizeof (d));
  if (is_tm_may_cancel_outer (current_function_decl))
  if (is_tm_may_cancel_outer (current_function_decl))
    d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
    d.func_flags = DIAG_TM_OUTER | DIAG_TM_SAFE;
  else if (is_tm_safe (current_function_decl))
  else if (is_tm_safe (current_function_decl))
    d.func_flags = DIAG_TM_SAFE;
    d.func_flags = DIAG_TM_SAFE;
  d.summary_flags = d.func_flags;
  d.summary_flags = d.func_flags;
 
 
  memset (&wi, 0, sizeof (wi));
  memset (&wi, 0, sizeof (wi));
  wi.info = &d;
  wi.info = &d;
 
 
  walk_gimple_seq (gimple_body (current_function_decl),
  walk_gimple_seq (gimple_body (current_function_decl),
                   diagnose_tm_1, diagnose_tm_1_op, &wi);
                   diagnose_tm_1, diagnose_tm_1_op, &wi);
 
 
  return 0;
  return 0;
}
}
 
 
struct gimple_opt_pass pass_diagnose_tm_blocks =
struct gimple_opt_pass pass_diagnose_tm_blocks =
{
{
  {
  {
    GIMPLE_PASS,
    GIMPLE_PASS,
    "*diagnose_tm_blocks",              /* name */
    "*diagnose_tm_blocks",              /* name */
    gate_tm,                            /* gate */
    gate_tm,                            /* gate */
    diagnose_tm_blocks,                 /* execute */
    diagnose_tm_blocks,                 /* execute */
    NULL,                               /* sub */
    NULL,                               /* sub */
    NULL,                               /* next */
    NULL,                               /* next */
    0,                                   /* static_pass_number */
    0,                                   /* static_pass_number */
    TV_TRANS_MEM,                       /* tv_id */
    TV_TRANS_MEM,                       /* tv_id */
    PROP_gimple_any,                    /* properties_required */
    PROP_gimple_any,                    /* properties_required */
    0,                                   /* properties_provided */
    0,                                   /* properties_provided */
    0,                                   /* properties_destroyed */
    0,                                   /* properties_destroyed */
    0,                                   /* todo_flags_start */
    0,                                   /* todo_flags_start */
    0,                                   /* todo_flags_finish */
    0,                                   /* todo_flags_finish */
  }
  }
};
};


/* Instead of instrumenting thread private memory, we save the
/* Instead of instrumenting thread private memory, we save the
   addresses in a log which we later use to save/restore the addresses
   addresses in a log which we later use to save/restore the addresses
   upon transaction start/restart.
   upon transaction start/restart.
 
 
   The log is keyed by address, where each element contains individual
   The log is keyed by address, where each element contains individual
   statements among different code paths that perform the store.
   statements among different code paths that perform the store.
 
 
   This log is later used to generate either plain save/restore of the
   This log is later used to generate either plain save/restore of the
   addresses upon transaction start/restart, or calls to the ITM_L*
   addresses upon transaction start/restart, or calls to the ITM_L*
   logging functions.
   logging functions.
 
 
   So for something like:
   So for something like:
 
 
       struct large { int x[1000]; };
       struct large { int x[1000]; };
       struct large lala = { 0 };
       struct large lala = { 0 };
       __transaction {
       __transaction {
         lala.x[i] = 123;
         lala.x[i] = 123;
         ...
         ...
       }
       }
 
 
   We can either save/restore:
   We can either save/restore:
 
 
       lala = { 0 };
       lala = { 0 };
       trxn = _ITM_startTransaction ();
       trxn = _ITM_startTransaction ();
       if (trxn & a_saveLiveVariables)
       if (trxn & a_saveLiveVariables)
         tmp_lala1 = lala.x[i];
         tmp_lala1 = lala.x[i];
       else if (a & a_restoreLiveVariables)
       else if (a & a_restoreLiveVariables)
         lala.x[i] = tmp_lala1;
         lala.x[i] = tmp_lala1;
 
 
   or use the logging functions:
   or use the logging functions:
 
 
       lala = { 0 };
       lala = { 0 };
       trxn = _ITM_startTransaction ();
       trxn = _ITM_startTransaction ();
       _ITM_LU4 (&lala.x[i]);
       _ITM_LU4 (&lala.x[i]);
 
 
   Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
   Obviously, if we use _ITM_L* to log, we prefer to call _ITM_L* as
   far up the dominator tree to shadow all of the writes to a given
   far up the dominator tree to shadow all of the writes to a given
   location (thus reducing the total number of logging calls), but not
   location (thus reducing the total number of logging calls), but not
   so high as to be called on a path that does not perform a
   so high as to be called on a path that does not perform a
   write.  */
   write.  */
 
 
/* One individual log entry.  We may have multiple statements for the
/* One individual log entry.  We may have multiple statements for the
   same location if neither dominate each other (on different
   same location if neither dominate each other (on different
   execution paths).  */
   execution paths).  */
typedef struct tm_log_entry
typedef struct tm_log_entry
{
{
  /* Address to save.  */
  /* Address to save.  */
  tree addr;
  tree addr;
  /* Entry block for the transaction this address occurs in.  */
  /* Entry block for the transaction this address occurs in.  */
  basic_block entry_block;
  basic_block entry_block;
  /* Dominating statements the store occurs in.  */
  /* Dominating statements the store occurs in.  */
  gimple_vec stmts;
  gimple_vec stmts;
  /* Initially, while we are building the log, we place a nonzero
  /* Initially, while we are building the log, we place a nonzero
     value here to mean that this address *will* be saved with a
     value here to mean that this address *will* be saved with a
     save/restore sequence.  Later, when generating the save sequence
     save/restore sequence.  Later, when generating the save sequence
     we place the SSA temp generated here.  */
     we place the SSA temp generated here.  */
  tree save_var;
  tree save_var;
} *tm_log_entry_t;
} *tm_log_entry_t;
 
 
/* The actual log.  */
/* The actual log.  */
static htab_t tm_log;
static htab_t tm_log;
 
 
/* Addresses to log with a save/restore sequence.  These should be in
/* Addresses to log with a save/restore sequence.  These should be in
   dominator order.  */
   dominator order.  */
static VEC(tree,heap) *tm_log_save_addresses;
static VEC(tree,heap) *tm_log_save_addresses;
 
 
/* Map for an SSA_NAME originally pointing to a non aliased new piece
/* Map for an SSA_NAME originally pointing to a non aliased new piece
   of memory (malloc, alloc, etc).  */
   of memory (malloc, alloc, etc).  */
static htab_t tm_new_mem_hash;
static htab_t tm_new_mem_hash;
 
 
enum thread_memory_type
enum thread_memory_type
  {
  {
    mem_non_local = 0,
    mem_non_local = 0,
    mem_thread_local,
    mem_thread_local,
    mem_transaction_local,
    mem_transaction_local,
    mem_max
    mem_max
  };
  };
 
 
typedef struct tm_new_mem_map
typedef struct tm_new_mem_map
{
{
  /* SSA_NAME being dereferenced.  */
  /* SSA_NAME being dereferenced.  */
  tree val;
  tree val;
  enum thread_memory_type local_new_memory;
  enum thread_memory_type local_new_memory;
} tm_new_mem_map_t;
} tm_new_mem_map_t;
 
 
/* Htab support.  Return hash value for a `tm_log_entry'.  */
/* Htab support.  Return hash value for a `tm_log_entry'.  */
static hashval_t
static hashval_t
tm_log_hash (const void *p)
tm_log_hash (const void *p)
{
{
  const struct tm_log_entry *log = (const struct tm_log_entry *) p;
  const struct tm_log_entry *log = (const struct tm_log_entry *) p;
  return iterative_hash_expr (log->addr, 0);
  return iterative_hash_expr (log->addr, 0);
}
}
 
 
/* Htab support.  Return true if two log entries are the same.  */
/* Htab support.  Return true if two log entries are the same.  */
static int
static int
tm_log_eq (const void *p1, const void *p2)
tm_log_eq (const void *p1, const void *p2)
{
{
  const struct tm_log_entry *log1 = (const struct tm_log_entry *) p1;
  const struct tm_log_entry *log1 = (const struct tm_log_entry *) p1;
  const struct tm_log_entry *log2 = (const struct tm_log_entry *) p2;
  const struct tm_log_entry *log2 = (const struct tm_log_entry *) p2;
 
 
  /* FIXME:
  /* FIXME:
 
 
     rth: I suggest that we get rid of the component refs etc.
     rth: I suggest that we get rid of the component refs etc.
     I.e. resolve the reference to base + offset.
     I.e. resolve the reference to base + offset.
 
 
     We may need to actually finish a merge with mainline for this,
     We may need to actually finish a merge with mainline for this,
     since we'd like to be presented with Richi's MEM_REF_EXPRs more
     since we'd like to be presented with Richi's MEM_REF_EXPRs more
     often than not.  But in the meantime your tm_log_entry could save
     often than not.  But in the meantime your tm_log_entry could save
     the results of get_inner_reference.
     the results of get_inner_reference.
 
 
     See: g++.dg/tm/pr46653.C
     See: g++.dg/tm/pr46653.C
  */
  */
 
 
  /* Special case plain equality because operand_equal_p() below will
  /* Special case plain equality because operand_equal_p() below will
     return FALSE if the addresses are equal but they have
     return FALSE if the addresses are equal but they have
     side-effects (e.g. a volatile address).  */
     side-effects (e.g. a volatile address).  */
  if (log1->addr == log2->addr)
  if (log1->addr == log2->addr)
    return true;
    return true;
 
 
  return operand_equal_p (log1->addr, log2->addr, 0);
  return operand_equal_p (log1->addr, log2->addr, 0);
}
}
 
 
/* Htab support.  Free one tm_log_entry.  */
/* Htab support.  Free one tm_log_entry.  */
static void
static void
tm_log_free (void *p)
tm_log_free (void *p)
{
{
  struct tm_log_entry *lp = (struct tm_log_entry *) p;
  struct tm_log_entry *lp = (struct tm_log_entry *) p;
  VEC_free (gimple, heap, lp->stmts);
  VEC_free (gimple, heap, lp->stmts);
  free (lp);
  free (lp);
}
}
 
 
/* Initialize logging data structures.  */
/* Initialize logging data structures.  */
static void
static void
tm_log_init (void)
tm_log_init (void)
{
{
  tm_log = htab_create (10, tm_log_hash, tm_log_eq, tm_log_free);
  tm_log = htab_create (10, tm_log_hash, tm_log_eq, tm_log_free);
  tm_new_mem_hash = htab_create (5, struct_ptr_hash, struct_ptr_eq, free);
  tm_new_mem_hash = htab_create (5, struct_ptr_hash, struct_ptr_eq, free);
  tm_log_save_addresses = VEC_alloc (tree, heap, 5);
  tm_log_save_addresses = VEC_alloc (tree, heap, 5);
}
}
 
 
/* Free logging data structures.  */
/* Free logging data structures.  */
static void
static void
tm_log_delete (void)
tm_log_delete (void)
{
{
  htab_delete (tm_log);
  htab_delete (tm_log);
  htab_delete (tm_new_mem_hash);
  htab_delete (tm_new_mem_hash);
  VEC_free (tree, heap, tm_log_save_addresses);
  VEC_free (tree, heap, tm_log_save_addresses);
}
}
 
 
/* Return true if MEM is a transaction invariant memory for the TM
/* Return true if MEM is a transaction invariant memory for the TM
   region starting at REGION_ENTRY_BLOCK.  */
   region starting at REGION_ENTRY_BLOCK.  */
static bool
static bool
transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
transaction_invariant_address_p (const_tree mem, basic_block region_entry_block)
{
{
  if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
  if ((TREE_CODE (mem) == INDIRECT_REF || TREE_CODE (mem) == MEM_REF)
      && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
      && TREE_CODE (TREE_OPERAND (mem, 0)) == SSA_NAME)
    {
    {
      basic_block def_bb;
      basic_block def_bb;
 
 
      def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
      def_bb = gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (mem, 0)));
      return def_bb != region_entry_block
      return def_bb != region_entry_block
        && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
        && dominated_by_p (CDI_DOMINATORS, region_entry_block, def_bb);
    }
    }
 
 
  mem = strip_invariant_refs (mem);
  mem = strip_invariant_refs (mem);
  return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
  return mem && (CONSTANT_CLASS_P (mem) || decl_address_invariant_p (mem));
}
}
 
 
/* Given an address ADDR in STMT, find it in the memory log or add it,
/* Given an address ADDR in STMT, find it in the memory log or add it,
   making sure to keep only the addresses highest in the dominator
   making sure to keep only the addresses highest in the dominator
   tree.
   tree.
 
 
   ENTRY_BLOCK is the entry_block for the transaction.
   ENTRY_BLOCK is the entry_block for the transaction.
 
 
   If we find the address in the log, make sure it's either the same
   If we find the address in the log, make sure it's either the same
   address, or an equivalent one that dominates ADDR.
   address, or an equivalent one that dominates ADDR.
 
 
   If we find the address, but neither ADDR dominates the found
   If we find the address, but neither ADDR dominates the found
   address, nor the found one dominates ADDR, we're on different
   address, nor the found one dominates ADDR, we're on different
   execution paths.  Add it.
   execution paths.  Add it.
 
 
   If known, ENTRY_BLOCK is the entry block for the region, otherwise
   If known, ENTRY_BLOCK is the entry block for the region, otherwise
   NULL.  */
   NULL.  */
static void
static void
tm_log_add (basic_block entry_block, tree addr, gimple stmt)
tm_log_add (basic_block entry_block, tree addr, gimple stmt)
{
{
  void **slot;
  void **slot;
  struct tm_log_entry l, *lp;
  struct tm_log_entry l, *lp;
 
 
  l.addr = addr;
  l.addr = addr;
  slot = htab_find_slot (tm_log, &l, INSERT);
  slot = htab_find_slot (tm_log, &l, INSERT);
  if (!*slot)
  if (!*slot)
    {
    {
      tree type = TREE_TYPE (addr);
      tree type = TREE_TYPE (addr);
 
 
      lp = XNEW (struct tm_log_entry);
      lp = XNEW (struct tm_log_entry);
      lp->addr = addr;
      lp->addr = addr;
      *slot = lp;
      *slot = lp;
 
 
      /* Small invariant addresses can be handled as save/restores.  */
      /* Small invariant addresses can be handled as save/restores.  */
      if (entry_block
      if (entry_block
          && transaction_invariant_address_p (lp->addr, entry_block)
          && transaction_invariant_address_p (lp->addr, entry_block)
          && TYPE_SIZE_UNIT (type) != NULL
          && TYPE_SIZE_UNIT (type) != NULL
          && host_integerp (TYPE_SIZE_UNIT (type), 1)
          && host_integerp (TYPE_SIZE_UNIT (type), 1)
          && (tree_low_cst (TYPE_SIZE_UNIT (type), 1)
          && (tree_low_cst (TYPE_SIZE_UNIT (type), 1)
              < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
              < PARAM_VALUE (PARAM_TM_MAX_AGGREGATE_SIZE))
          /* We must be able to copy this type normally.  I.e., no
          /* We must be able to copy this type normally.  I.e., no
             special constructors and the like.  */
             special constructors and the like.  */
          && !TREE_ADDRESSABLE (type))
          && !TREE_ADDRESSABLE (type))
        {
        {
          lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
          lp->save_var = create_tmp_reg (TREE_TYPE (lp->addr), "tm_save");
          add_referenced_var (lp->save_var);
          add_referenced_var (lp->save_var);
          lp->stmts = NULL;
          lp->stmts = NULL;
          lp->entry_block = entry_block;
          lp->entry_block = entry_block;
          /* Save addresses separately in dominator order so we don't
          /* Save addresses separately in dominator order so we don't
             get confused by overlapping addresses in the save/restore
             get confused by overlapping addresses in the save/restore
             sequence.  */
             sequence.  */
          VEC_safe_push (tree, heap, tm_log_save_addresses, lp->addr);
          VEC_safe_push (tree, heap, tm_log_save_addresses, lp->addr);
        }
        }
      else
      else
        {
        {
          /* Use the logging functions.  */
          /* Use the logging functions.  */
          lp->stmts = VEC_alloc (gimple, heap, 5);
          lp->stmts = VEC_alloc (gimple, heap, 5);
          VEC_quick_push (gimple, lp->stmts, stmt);
          VEC_quick_push (gimple, lp->stmts, stmt);
          lp->save_var = NULL;
          lp->save_var = NULL;
        }
        }
    }
    }
  else
  else
    {
    {
      size_t i;
      size_t i;
      gimple oldstmt;
      gimple oldstmt;
 
 
      lp = (struct tm_log_entry *) *slot;
      lp = (struct tm_log_entry *) *slot;
 
 
      /* If we're generating a save/restore sequence, we don't care
      /* If we're generating a save/restore sequence, we don't care
         about statements.  */
         about statements.  */
      if (lp->save_var)
      if (lp->save_var)
        return;
        return;
 
 
      for (i = 0; VEC_iterate (gimple, lp->stmts, i, oldstmt); ++i)
      for (i = 0; VEC_iterate (gimple, lp->stmts, i, oldstmt); ++i)
        {
        {
          if (stmt == oldstmt)
          if (stmt == oldstmt)
            return;
            return;
          /* We already have a store to the same address, higher up the
          /* We already have a store to the same address, higher up the
             dominator tree.  Nothing to do.  */
             dominator tree.  Nothing to do.  */
          if (dominated_by_p (CDI_DOMINATORS,
          if (dominated_by_p (CDI_DOMINATORS,
                              gimple_bb (stmt), gimple_bb (oldstmt)))
                              gimple_bb (stmt), gimple_bb (oldstmt)))
            return;
            return;
          /* We should be processing blocks in dominator tree order.  */
          /* We should be processing blocks in dominator tree order.  */
          gcc_assert (!dominated_by_p (CDI_DOMINATORS,
          gcc_assert (!dominated_by_p (CDI_DOMINATORS,
                                       gimple_bb (oldstmt), gimple_bb (stmt)));
                                       gimple_bb (oldstmt), gimple_bb (stmt)));
        }
        }
      /* Store is on a different code path.  */
      /* Store is on a different code path.  */
      VEC_safe_push (gimple, heap, lp->stmts, stmt);
      VEC_safe_push (gimple, heap, lp->stmts, stmt);
    }
    }
}
}
 
 
/* Gimplify the address of a TARGET_MEM_REF.  Return the SSA_NAME
/* Gimplify the address of a TARGET_MEM_REF.  Return the SSA_NAME
   result, insert the new statements before GSI.  */
   result, insert the new statements before GSI.  */
 
 
static tree
static tree
gimplify_addr (gimple_stmt_iterator *gsi, tree x)
gimplify_addr (gimple_stmt_iterator *gsi, tree x)
{
{
  if (TREE_CODE (x) == TARGET_MEM_REF)
  if (TREE_CODE (x) == TARGET_MEM_REF)
    x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
    x = tree_mem_ref_addr (build_pointer_type (TREE_TYPE (x)), x);
  else
  else
    x = build_fold_addr_expr (x);
    x = build_fold_addr_expr (x);
  return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
  return force_gimple_operand_gsi (gsi, x, true, NULL, true, GSI_SAME_STMT);
}
}
 
 
/* Instrument one address with the logging functions.
/* Instrument one address with the logging functions.
   ADDR is the address to save.
   ADDR is the address to save.
   STMT is the statement before which to place it.  */
   STMT is the statement before which to place it.  */
static void
static void
tm_log_emit_stmt (tree addr, gimple stmt)
tm_log_emit_stmt (tree addr, gimple stmt)
{
{
  tree type = TREE_TYPE (addr);
  tree type = TREE_TYPE (addr);
  tree size = TYPE_SIZE_UNIT (type);
  tree size = TYPE_SIZE_UNIT (type);
  gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
  gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
  gimple log;
  gimple log;
  enum built_in_function code = BUILT_IN_TM_LOG;
  enum built_in_function code = BUILT_IN_TM_LOG;
 
 
  if (type == float_type_node)
  if (type == float_type_node)
    code = BUILT_IN_TM_LOG_FLOAT;
    code = BUILT_IN_TM_LOG_FLOAT;
  else if (type == double_type_node)
  else if (type == double_type_node)
    code = BUILT_IN_TM_LOG_DOUBLE;
    code = BUILT_IN_TM_LOG_DOUBLE;
  else if (type == long_double_type_node)
  else if (type == long_double_type_node)
    code = BUILT_IN_TM_LOG_LDOUBLE;
    code = BUILT_IN_TM_LOG_LDOUBLE;
  else if (host_integerp (size, 1))
  else if (host_integerp (size, 1))
    {
    {
      unsigned int n = tree_low_cst (size, 1);
      unsigned int n = tree_low_cst (size, 1);
      switch (n)
      switch (n)
        {
        {
        case 1:
        case 1:
          code = BUILT_IN_TM_LOG_1;
          code = BUILT_IN_TM_LOG_1;
          break;
          break;
        case 2:
        case 2:
          code = BUILT_IN_TM_LOG_2;
          code = BUILT_IN_TM_LOG_2;
          break;
          break;
        case 4:
        case 4:
          code = BUILT_IN_TM_LOG_4;
          code = BUILT_IN_TM_LOG_4;
          break;
          break;
        case 8:
        case 8:
          code = BUILT_IN_TM_LOG_8;
          code = BUILT_IN_TM_LOG_8;
          break;
          break;
        default:
        default:
          code = BUILT_IN_TM_LOG;
          code = BUILT_IN_TM_LOG;
          if (TREE_CODE (type) == VECTOR_TYPE)
          if (TREE_CODE (type) == VECTOR_TYPE)
            {
            {
              if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
              if (n == 8 && builtin_decl_explicit (BUILT_IN_TM_LOG_M64))
                code = BUILT_IN_TM_LOG_M64;
                code = BUILT_IN_TM_LOG_M64;
              else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
              else if (n == 16 && builtin_decl_explicit (BUILT_IN_TM_LOG_M128))
                code = BUILT_IN_TM_LOG_M128;
                code = BUILT_IN_TM_LOG_M128;
              else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
              else if (n == 32 && builtin_decl_explicit (BUILT_IN_TM_LOG_M256))
                code = BUILT_IN_TM_LOG_M256;
                code = BUILT_IN_TM_LOG_M256;
            }
            }
          break;
          break;
        }
        }
    }
    }
 
 
  addr = gimplify_addr (&gsi, addr);
  addr = gimplify_addr (&gsi, addr);
  if (code == BUILT_IN_TM_LOG)
  if (code == BUILT_IN_TM_LOG)
    log = gimple_build_call (builtin_decl_explicit (code), 2, addr,  size);
    log = gimple_build_call (builtin_decl_explicit (code), 2, addr,  size);
  else
  else
    log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
    log = gimple_build_call (builtin_decl_explicit (code), 1, addr);
  gsi_insert_before (&gsi, log, GSI_SAME_STMT);
  gsi_insert_before (&gsi, log, GSI_SAME_STMT);
}
}
 
 
/* Go through the log and instrument address that must be instrumented
/* Go through the log and instrument address that must be instrumented
   with the logging functions.  Leave the save/restore addresses for
   with the logging functions.  Leave the save/restore addresses for
   later.  */
   later.  */
static void
static void
tm_log_emit (void)
tm_log_emit (void)
{
{
  htab_iterator hi;
  htab_iterator hi;
  struct tm_log_entry *lp;
  struct tm_log_entry *lp;
 
 
  FOR_EACH_HTAB_ELEMENT (tm_log, lp, tm_log_entry_t, hi)
  FOR_EACH_HTAB_ELEMENT (tm_log, lp, tm_log_entry_t, hi)
    {
    {
      size_t i;
      size_t i;
      gimple stmt;
      gimple stmt;
 
 
      if (dump_file)
      if (dump_file)
        {
        {
          fprintf (dump_file, "TM thread private mem logging: ");
          fprintf (dump_file, "TM thread private mem logging: ");
          print_generic_expr (dump_file, lp->addr, 0);
          print_generic_expr (dump_file, lp->addr, 0);
          fprintf (dump_file, "\n");
          fprintf (dump_file, "\n");
        }
        }
 
 
      if (lp->save_var)
      if (lp->save_var)
        {
        {
          if (dump_file)
          if (dump_file)
            fprintf (dump_file, "DUMPING to variable\n");
            fprintf (dump_file, "DUMPING to variable\n");
          continue;
          continue;
        }
        }
      else
      else
        {
        {
          if (dump_file)
          if (dump_file)
            fprintf (dump_file, "DUMPING with logging functions\n");
            fprintf (dump_file, "DUMPING with logging functions\n");
          for (i = 0; VEC_iterate (gimple, lp->stmts, i, stmt); ++i)
          for (i = 0; VEC_iterate (gimple, lp->stmts, i, stmt); ++i)
            tm_log_emit_stmt (lp->addr, stmt);
            tm_log_emit_stmt (lp->addr, stmt);
        }
        }
    }
    }
}
}
 
 
/* Emit the save sequence for the corresponding addresses in the log.
/* Emit the save sequence for the corresponding addresses in the log.
   ENTRY_BLOCK is the entry block for the transaction.
   ENTRY_BLOCK is the entry block for the transaction.
   BB is the basic block to insert the code in.  */
   BB is the basic block to insert the code in.  */
static void
static void
tm_log_emit_saves (basic_block entry_block, basic_block bb)
tm_log_emit_saves (basic_block entry_block, basic_block bb)
{
{
  size_t i;
  size_t i;
  gimple_stmt_iterator gsi = gsi_last_bb (bb);
  gimple_stmt_iterator gsi = gsi_last_bb (bb);
  gimple stmt;
  gimple stmt;
  struct tm_log_entry l, *lp;
  struct tm_log_entry l, *lp;
 
 
  for (i = 0; i < VEC_length (tree, tm_log_save_addresses); ++i)
  for (i = 0; i < VEC_length (tree, tm_log_save_addresses); ++i)
    {
    {
      l.addr = VEC_index (tree, tm_log_save_addresses, i);
      l.addr = VEC_index (tree, tm_log_save_addresses, i);
      lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
      lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
      gcc_assert (lp->save_var != NULL);
      gcc_assert (lp->save_var != NULL);
 
 
      /* We only care about variables in the current transaction.  */
      /* We only care about variables in the current transaction.  */
      if (lp->entry_block != entry_block)
      if (lp->entry_block != entry_block)
        continue;
        continue;
 
 
      stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
      stmt = gimple_build_assign (lp->save_var, unshare_expr (lp->addr));
 
 
      /* Make sure we can create an SSA_NAME for this type.  For
      /* Make sure we can create an SSA_NAME for this type.  For
         instance, aggregates aren't allowed, in which case the system
         instance, aggregates aren't allowed, in which case the system
         will create a VOP for us and everything will just work.  */
         will create a VOP for us and everything will just work.  */
      if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
      if (is_gimple_reg_type (TREE_TYPE (lp->save_var)))
        {
        {
          lp->save_var = make_ssa_name (lp->save_var, stmt);
          lp->save_var = make_ssa_name (lp->save_var, stmt);
          gimple_assign_set_lhs (stmt, lp->save_var);
          gimple_assign_set_lhs (stmt, lp->save_var);
        }
        }
 
 
      gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
      gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
    }
    }
}
}
 
 
/* Emit the restore sequence for the corresponding addresses in the log.
/* Emit the restore sequence for the corresponding addresses in the log.
   ENTRY_BLOCK is the entry block for the transaction.
   ENTRY_BLOCK is the entry block for the transaction.
   BB is the basic block to insert the code in.  */
   BB is the basic block to insert the code in.  */
static void
static void
tm_log_emit_restores (basic_block entry_block, basic_block bb)
tm_log_emit_restores (basic_block entry_block, basic_block bb)
{
{
  int i;
  int i;
  struct tm_log_entry l, *lp;
  struct tm_log_entry l, *lp;
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
  gimple stmt;
  gimple stmt;
 
 
  for (i = VEC_length (tree, tm_log_save_addresses) - 1; i >= 0; i--)
  for (i = VEC_length (tree, tm_log_save_addresses) - 1; i >= 0; i--)
    {
    {
      l.addr = VEC_index (tree, tm_log_save_addresses, i);
      l.addr = VEC_index (tree, tm_log_save_addresses, i);
      lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
      lp = (struct tm_log_entry *) *htab_find_slot (tm_log, &l, NO_INSERT);
      gcc_assert (lp->save_var != NULL);
      gcc_assert (lp->save_var != NULL);
 
 
      /* We only care about variables in the current transaction.  */
      /* We only care about variables in the current transaction.  */
      if (lp->entry_block != entry_block)
      if (lp->entry_block != entry_block)
        continue;
        continue;
 
 
      /* Restores are in LIFO order from the saves in case we have
      /* Restores are in LIFO order from the saves in case we have
         overlaps.  */
         overlaps.  */
      gsi = gsi_start_bb (bb);
      gsi = gsi_start_bb (bb);
 
 
      stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
      stmt = gimple_build_assign (unshare_expr (lp->addr), lp->save_var);
      gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
      gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
    }
    }
}
}
 
 
/* Emit the checks for performing either a save or a restore sequence.
/* Emit the checks for performing either a save or a restore sequence.
 
 
   TRXN_PROP is either A_SAVELIVEVARIABLES or A_RESTORELIVEVARIABLES.
   TRXN_PROP is either A_SAVELIVEVARIABLES or A_RESTORELIVEVARIABLES.
 
 
   The code sequence is inserted in a new basic block created in
   The code sequence is inserted in a new basic block created in
   END_BB which is inserted between BEFORE_BB and the destination of
   END_BB which is inserted between BEFORE_BB and the destination of
   FALLTHRU_EDGE.
   FALLTHRU_EDGE.
 
 
   STATUS is the return value from _ITM_beginTransaction.
   STATUS is the return value from _ITM_beginTransaction.
   ENTRY_BLOCK is the entry block for the transaction.
   ENTRY_BLOCK is the entry block for the transaction.
   EMITF is a callback to emit the actual save/restore code.
   EMITF is a callback to emit the actual save/restore code.
 
 
   The basic block containing the conditional checking for TRXN_PROP
   The basic block containing the conditional checking for TRXN_PROP
   is returned.  */
   is returned.  */
static basic_block
static basic_block
tm_log_emit_save_or_restores (basic_block entry_block,
tm_log_emit_save_or_restores (basic_block entry_block,
                              unsigned trxn_prop,
                              unsigned trxn_prop,
                              tree status,
                              tree status,
                              void (*emitf)(basic_block, basic_block),
                              void (*emitf)(basic_block, basic_block),
                              basic_block before_bb,
                              basic_block before_bb,
                              edge fallthru_edge,
                              edge fallthru_edge,
                              basic_block *end_bb)
                              basic_block *end_bb)
{
{
  basic_block cond_bb, code_bb;
  basic_block cond_bb, code_bb;
  gimple cond_stmt, stmt;
  gimple cond_stmt, stmt;
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
  tree t1, t2;
  tree t1, t2;
  int old_flags = fallthru_edge->flags;
  int old_flags = fallthru_edge->flags;
 
 
  cond_bb = create_empty_bb (before_bb);
  cond_bb = create_empty_bb (before_bb);
  code_bb = create_empty_bb (cond_bb);
  code_bb = create_empty_bb (cond_bb);
  *end_bb = create_empty_bb (code_bb);
  *end_bb = create_empty_bb (code_bb);
  redirect_edge_pred (fallthru_edge, *end_bb);
  redirect_edge_pred (fallthru_edge, *end_bb);
  fallthru_edge->flags = EDGE_FALLTHRU;
  fallthru_edge->flags = EDGE_FALLTHRU;
  make_edge (before_bb, cond_bb, old_flags);
  make_edge (before_bb, cond_bb, old_flags);
 
 
  set_immediate_dominator (CDI_DOMINATORS, cond_bb, before_bb);
  set_immediate_dominator (CDI_DOMINATORS, cond_bb, before_bb);
  set_immediate_dominator (CDI_DOMINATORS, code_bb, cond_bb);
  set_immediate_dominator (CDI_DOMINATORS, code_bb, cond_bb);
 
 
  gsi = gsi_last_bb (cond_bb);
  gsi = gsi_last_bb (cond_bb);
 
 
  /* t1 = status & A_{property}.  */
  /* t1 = status & A_{property}.  */
  t1 = make_rename_temp (TREE_TYPE (status), NULL);
  t1 = make_rename_temp (TREE_TYPE (status), NULL);
  t2 = build_int_cst (TREE_TYPE (status), trxn_prop);
  t2 = build_int_cst (TREE_TYPE (status), trxn_prop);
  stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, status, t2);
  stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, status, t2);
  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
  gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
 
 
  /* if (t1).  */
  /* if (t1).  */
  t2 = build_int_cst (TREE_TYPE (status), 0);
  t2 = build_int_cst (TREE_TYPE (status), 0);
  cond_stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
  cond_stmt = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
  gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
  gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
 
 
  emitf (entry_block, code_bb);
  emitf (entry_block, code_bb);
 
 
  make_edge (cond_bb, code_bb, EDGE_TRUE_VALUE);
  make_edge (cond_bb, code_bb, EDGE_TRUE_VALUE);
  make_edge (cond_bb, *end_bb, EDGE_FALSE_VALUE);
  make_edge (cond_bb, *end_bb, EDGE_FALSE_VALUE);
  make_edge (code_bb, *end_bb, EDGE_FALLTHRU);
  make_edge (code_bb, *end_bb, EDGE_FALLTHRU);
 
 
  return cond_bb;
  return cond_bb;
}
}


static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
static tree lower_sequence_tm (gimple_stmt_iterator *, bool *,
                               struct walk_stmt_info *);
                               struct walk_stmt_info *);
static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
static tree lower_sequence_no_tm (gimple_stmt_iterator *, bool *,
                                  struct walk_stmt_info *);
                                  struct walk_stmt_info *);
 
 
/* Evaluate an address X being dereferenced and determine if it
/* Evaluate an address X being dereferenced and determine if it
   originally points to a non aliased new chunk of memory (malloc,
   originally points to a non aliased new chunk of memory (malloc,
   alloca, etc).
   alloca, etc).
 
 
   Return MEM_THREAD_LOCAL if it points to a thread-local address.
   Return MEM_THREAD_LOCAL if it points to a thread-local address.
   Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
   Return MEM_TRANSACTION_LOCAL if it points to a transaction-local address.
   Return MEM_NON_LOCAL otherwise.
   Return MEM_NON_LOCAL otherwise.
 
 
   ENTRY_BLOCK is the entry block to the transaction containing the
   ENTRY_BLOCK is the entry block to the transaction containing the
   dereference of X.  */
   dereference of X.  */
static enum thread_memory_type
static enum thread_memory_type
thread_private_new_memory (basic_block entry_block, tree x)
thread_private_new_memory (basic_block entry_block, tree x)
{
{
  gimple stmt = NULL;
  gimple stmt = NULL;
  enum tree_code code;
  enum tree_code code;
  void **slot;
  void **slot;
  tm_new_mem_map_t elt, *elt_p;
  tm_new_mem_map_t elt, *elt_p;
  tree val = x;
  tree val = x;
  enum thread_memory_type retval = mem_transaction_local;
  enum thread_memory_type retval = mem_transaction_local;
 
 
  if (!entry_block
  if (!entry_block
      || TREE_CODE (x) != SSA_NAME
      || TREE_CODE (x) != SSA_NAME
      /* Possible uninitialized use, or a function argument.  In
      /* Possible uninitialized use, or a function argument.  In
         either case, we don't care.  */
         either case, we don't care.  */
      || SSA_NAME_IS_DEFAULT_DEF (x))
      || SSA_NAME_IS_DEFAULT_DEF (x))
    return mem_non_local;
    return mem_non_local;
 
 
  /* Look in cache first.  */
  /* Look in cache first.  */
  elt.val = x;
  elt.val = x;
  slot = htab_find_slot (tm_new_mem_hash, &elt, INSERT);
  slot = htab_find_slot (tm_new_mem_hash, &elt, INSERT);
  elt_p = (tm_new_mem_map_t *) *slot;
  elt_p = (tm_new_mem_map_t *) *slot;
  if (elt_p)
  if (elt_p)
    return elt_p->local_new_memory;
    return elt_p->local_new_memory;
 
 
  /* Optimistically assume the memory is transaction local during
  /* Optimistically assume the memory is transaction local during
     processing.  This catches recursion into this variable.  */
     processing.  This catches recursion into this variable.  */
  *slot = elt_p = XNEW (tm_new_mem_map_t);
  *slot = elt_p = XNEW (tm_new_mem_map_t);
  elt_p->val = val;
  elt_p->val = val;
  elt_p->local_new_memory = mem_transaction_local;
  elt_p->local_new_memory = mem_transaction_local;
 
 
  /* Search DEF chain to find the original definition of this address.  */
  /* Search DEF chain to find the original definition of this address.  */
  do
  do
    {
    {
      if (ptr_deref_may_alias_global_p (x))
      if (ptr_deref_may_alias_global_p (x))
        {
        {
          /* Address escapes.  This is not thread-private.  */
          /* Address escapes.  This is not thread-private.  */
          retval = mem_non_local;
          retval = mem_non_local;
          goto new_memory_ret;
          goto new_memory_ret;
        }
        }
 
 
      stmt = SSA_NAME_DEF_STMT (x);
      stmt = SSA_NAME_DEF_STMT (x);
 
 
      /* If the malloc call is outside the transaction, this is
      /* If the malloc call is outside the transaction, this is
         thread-local.  */
         thread-local.  */
      if (retval != mem_thread_local
      if (retval != mem_thread_local
          && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
          && !dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt), entry_block))
        retval = mem_thread_local;
        retval = mem_thread_local;
 
 
      if (is_gimple_assign (stmt))
      if (is_gimple_assign (stmt))
        {
        {
          code = gimple_assign_rhs_code (stmt);
          code = gimple_assign_rhs_code (stmt);
          /* x = foo ==> foo */
          /* x = foo ==> foo */
          if (code == SSA_NAME)
          if (code == SSA_NAME)
            x = gimple_assign_rhs1 (stmt);
            x = gimple_assign_rhs1 (stmt);
          /* x = foo + n ==> foo */
          /* x = foo + n ==> foo */
          else if (code == POINTER_PLUS_EXPR)
          else if (code == POINTER_PLUS_EXPR)
            x = gimple_assign_rhs1 (stmt);
            x = gimple_assign_rhs1 (stmt);
          /* x = (cast*) foo ==> foo */
          /* x = (cast*) foo ==> foo */
          else if (code == VIEW_CONVERT_EXPR || code == NOP_EXPR)
          else if (code == VIEW_CONVERT_EXPR || code == NOP_EXPR)
            x = gimple_assign_rhs1 (stmt);
            x = gimple_assign_rhs1 (stmt);
          else
          else
            {
            {
              retval = mem_non_local;
              retval = mem_non_local;
              goto new_memory_ret;
              goto new_memory_ret;
            }
            }
        }
        }
      else
      else
        {
        {
          if (gimple_code (stmt) == GIMPLE_PHI)
          if (gimple_code (stmt) == GIMPLE_PHI)
            {
            {
              unsigned int i;
              unsigned int i;
              enum thread_memory_type mem;
              enum thread_memory_type mem;
              tree phi_result = gimple_phi_result (stmt);
              tree phi_result = gimple_phi_result (stmt);
 
 
              /* If any of the ancestors are non-local, we are sure to
              /* If any of the ancestors are non-local, we are sure to
                 be non-local.  Otherwise we can avoid doing anything
                 be non-local.  Otherwise we can avoid doing anything
                 and inherit what has already been generated.  */
                 and inherit what has already been generated.  */
              retval = mem_max;
              retval = mem_max;
              for (i = 0; i < gimple_phi_num_args (stmt); ++i)
              for (i = 0; i < gimple_phi_num_args (stmt); ++i)
                {
                {
                  tree op = PHI_ARG_DEF (stmt, i);
                  tree op = PHI_ARG_DEF (stmt, i);
 
 
                  /* Exclude self-assignment.  */
                  /* Exclude self-assignment.  */
                  if (phi_result == op)
                  if (phi_result == op)
                    continue;
                    continue;
 
 
                  mem = thread_private_new_memory (entry_block, op);
                  mem = thread_private_new_memory (entry_block, op);
                  if (mem == mem_non_local)
                  if (mem == mem_non_local)
                    {
                    {
                      retval = mem;
                      retval = mem;
                      goto new_memory_ret;
                      goto new_memory_ret;
                    }
                    }
                  retval = MIN (retval, mem);
                  retval = MIN (retval, mem);
                }
                }
              goto new_memory_ret;
              goto new_memory_ret;
            }
            }
          break;
          break;
        }
        }
    }
    }
  while (TREE_CODE (x) == SSA_NAME);
  while (TREE_CODE (x) == SSA_NAME);
 
 
  if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
  if (stmt && is_gimple_call (stmt) && gimple_call_flags (stmt) & ECF_MALLOC)
    /* Thread-local or transaction-local.  */
    /* Thread-local or transaction-local.  */
    ;
    ;
  else
  else
    retval = mem_non_local;
    retval = mem_non_local;
 
 
 new_memory_ret:
 new_memory_ret:
  elt_p->local_new_memory = retval;
  elt_p->local_new_memory = retval;
  return retval;
  return retval;
}
}
 
 
/* Determine whether X has to be instrumented using a read
/* Determine whether X has to be instrumented using a read
   or write barrier.
   or write barrier.
 
 
   ENTRY_BLOCK is the entry block for the region where stmt resides
   ENTRY_BLOCK is the entry block for the region where stmt resides
   in.  NULL if unknown.
   in.  NULL if unknown.
 
 
   STMT is the statement in which X occurs in.  It is used for thread
   STMT is the statement in which X occurs in.  It is used for thread
   private memory instrumentation.  If no TPM instrumentation is
   private memory instrumentation.  If no TPM instrumentation is
   desired, STMT should be null.  */
   desired, STMT should be null.  */
static bool
static bool
requires_barrier (basic_block entry_block, tree x, gimple stmt)
requires_barrier (basic_block entry_block, tree x, gimple stmt)
{
{
  tree orig = x;
  tree orig = x;
  while (handled_component_p (x))
  while (handled_component_p (x))
    x = TREE_OPERAND (x, 0);
    x = TREE_OPERAND (x, 0);
 
 
  switch (TREE_CODE (x))
  switch (TREE_CODE (x))
    {
    {
    case INDIRECT_REF:
    case INDIRECT_REF:
    case MEM_REF:
    case MEM_REF:
      {
      {
        enum thread_memory_type ret;
        enum thread_memory_type ret;
 
 
        ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
        ret = thread_private_new_memory (entry_block, TREE_OPERAND (x, 0));
        if (ret == mem_non_local)
        if (ret == mem_non_local)
          return true;
          return true;
        if (stmt && ret == mem_thread_local)
        if (stmt && ret == mem_thread_local)
          /* ?? Should we pass `orig', or the INDIRECT_REF X.  ?? */
          /* ?? Should we pass `orig', or the INDIRECT_REF X.  ?? */
          tm_log_add (entry_block, orig, stmt);
          tm_log_add (entry_block, orig, stmt);
 
 
        /* Transaction-locals require nothing at all.  For malloc, a
        /* Transaction-locals require nothing at all.  For malloc, a
           transaction restart frees the memory and we reallocate.
           transaction restart frees the memory and we reallocate.
           For alloca, the stack pointer gets reset by the retry and
           For alloca, the stack pointer gets reset by the retry and
           we reallocate.  */
           we reallocate.  */
        return false;
        return false;
      }
      }
 
 
    case TARGET_MEM_REF:
    case TARGET_MEM_REF:
      if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
      if (TREE_CODE (TMR_BASE (x)) != ADDR_EXPR)
        return true;
        return true;
      x = TREE_OPERAND (TMR_BASE (x), 0);
      x = TREE_OPERAND (TMR_BASE (x), 0);
      if (TREE_CODE (x) == PARM_DECL)
      if (TREE_CODE (x) == PARM_DECL)
        return false;
        return false;
      gcc_assert (TREE_CODE (x) == VAR_DECL);
      gcc_assert (TREE_CODE (x) == VAR_DECL);
      /* FALLTHRU */
      /* FALLTHRU */
 
 
    case PARM_DECL:
    case PARM_DECL:
    case RESULT_DECL:
    case RESULT_DECL:
    case VAR_DECL:
    case VAR_DECL:
      if (DECL_BY_REFERENCE (x))
      if (DECL_BY_REFERENCE (x))
        {
        {
          /* ??? This value is a pointer, but aggregate_value_p has been
          /* ??? This value is a pointer, but aggregate_value_p has been
             jigged to return true which confuses needs_to_live_in_memory.
             jigged to return true which confuses needs_to_live_in_memory.
             This ought to be cleaned up generically.
             This ought to be cleaned up generically.
 
 
             FIXME: Verify this still happens after the next mainline
             FIXME: Verify this still happens after the next mainline
             merge.  Testcase ie g++.dg/tm/pr47554.C.
             merge.  Testcase ie g++.dg/tm/pr47554.C.
          */
          */
          return false;
          return false;
        }
        }
 
 
      if (is_global_var (x))
      if (is_global_var (x))
        return !TREE_READONLY (x);
        return !TREE_READONLY (x);
      if (/* FIXME: This condition should actually go below in the
      if (/* FIXME: This condition should actually go below in the
             tm_log_add() call, however is_call_clobbered() depends on
             tm_log_add() call, however is_call_clobbered() depends on
             aliasing info which is not available during
             aliasing info which is not available during
             gimplification.  Since requires_barrier() gets called
             gimplification.  Since requires_barrier() gets called
             during lower_sequence_tm/gimplification, leave the call
             during lower_sequence_tm/gimplification, leave the call
             to needs_to_live_in_memory until we eliminate
             to needs_to_live_in_memory until we eliminate
             lower_sequence_tm altogether.  */
             lower_sequence_tm altogether.  */
          needs_to_live_in_memory (x))
          needs_to_live_in_memory (x))
        return true;
        return true;
      else
      else
        {
        {
          /* For local memory that doesn't escape (aka thread private
          /* For local memory that doesn't escape (aka thread private
             memory), we can either save the value at the beginning of
             memory), we can either save the value at the beginning of
             the transaction and restore on restart, or call a tm
             the transaction and restore on restart, or call a tm
             function to dynamically save and restore on restart
             function to dynamically save and restore on restart
             (ITM_L*).  */
             (ITM_L*).  */
          if (stmt)
          if (stmt)
            tm_log_add (entry_block, orig, stmt);
            tm_log_add (entry_block, orig, stmt);
          return false;
          return false;
        }
        }
 
 
    default:
    default:
      return false;
      return false;
    }
    }
}
}
 
 
/* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
/* Mark the GIMPLE_ASSIGN statement as appropriate for being inside
   a transaction region.  */
   a transaction region.  */
 
 
static void
static void
examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
examine_assign_tm (unsigned *state, gimple_stmt_iterator *gsi)
{
{
  gimple stmt = gsi_stmt (*gsi);
  gimple stmt = gsi_stmt (*gsi);
 
 
  if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
  if (requires_barrier (/*entry_block=*/NULL, gimple_assign_rhs1 (stmt), NULL))
    *state |= GTMA_HAVE_LOAD;
    *state |= GTMA_HAVE_LOAD;
  if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
  if (requires_barrier (/*entry_block=*/NULL, gimple_assign_lhs (stmt), NULL))
    *state |= GTMA_HAVE_STORE;
    *state |= GTMA_HAVE_STORE;
}
}
 
 
/* Mark a GIMPLE_CALL as appropriate for being inside a transaction.  */
/* Mark a GIMPLE_CALL as appropriate for being inside a transaction.  */
 
 
static void
static void
examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
examine_call_tm (unsigned *state, gimple_stmt_iterator *gsi)
{
{
  gimple stmt = gsi_stmt (*gsi);
  gimple stmt = gsi_stmt (*gsi);
  tree fn;
  tree fn;
 
 
  if (is_tm_pure_call (stmt))
  if (is_tm_pure_call (stmt))
    return;
    return;
 
 
  /* Check if this call is a transaction abort.  */
  /* Check if this call is a transaction abort.  */
  fn = gimple_call_fndecl (stmt);
  fn = gimple_call_fndecl (stmt);
  if (is_tm_abort (fn))
  if (is_tm_abort (fn))
    *state |= GTMA_HAVE_ABORT;
    *state |= GTMA_HAVE_ABORT;
 
 
  /* Note that something may happen.  */
  /* Note that something may happen.  */
  *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
  *state |= GTMA_HAVE_LOAD | GTMA_HAVE_STORE;
}
}
 
 
/* Lower a GIMPLE_TRANSACTION statement.  */
/* Lower a GIMPLE_TRANSACTION statement.  */
 
 
static void
static void
lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
lower_transaction (gimple_stmt_iterator *gsi, struct walk_stmt_info *wi)
{
{
  gimple g, stmt = gsi_stmt (*gsi);
  gimple g, stmt = gsi_stmt (*gsi);
  unsigned int *outer_state = (unsigned int *) wi->info;
  unsigned int *outer_state = (unsigned int *) wi->info;
  unsigned int this_state = 0;
  unsigned int this_state = 0;
  struct walk_stmt_info this_wi;
  struct walk_stmt_info this_wi;
 
 
  /* First, lower the body.  The scanning that we do inside gives
  /* First, lower the body.  The scanning that we do inside gives
     us some idea of what we're dealing with.  */
     us some idea of what we're dealing with.  */
  memset (&this_wi, 0, sizeof (this_wi));
  memset (&this_wi, 0, sizeof (this_wi));
  this_wi.info = (void *) &this_state;
  this_wi.info = (void *) &this_state;
  walk_gimple_seq (gimple_transaction_body (stmt),
  walk_gimple_seq (gimple_transaction_body (stmt),
                   lower_sequence_tm, NULL, &this_wi);
                   lower_sequence_tm, NULL, &this_wi);
 
 
  /* If there was absolutely nothing transaction related inside the
  /* If there was absolutely nothing transaction related inside the
     transaction, we may elide it.  Likewise if this is a nested
     transaction, we may elide it.  Likewise if this is a nested
     transaction and does not contain an abort.  */
     transaction and does not contain an abort.  */
  if (this_state == 0
  if (this_state == 0
      || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
      || (!(this_state & GTMA_HAVE_ABORT) && outer_state != NULL))
    {
    {
      if (outer_state)
      if (outer_state)
        *outer_state |= this_state;
        *outer_state |= this_state;
 
 
      gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
      gsi_insert_seq_before (gsi, gimple_transaction_body (stmt),
                             GSI_SAME_STMT);
                             GSI_SAME_STMT);
      gimple_transaction_set_body (stmt, NULL);
      gimple_transaction_set_body (stmt, NULL);
 
 
      gsi_remove (gsi, true);
      gsi_remove (gsi, true);
      wi->removed_stmt = true;
      wi->removed_stmt = true;
      return;
      return;
    }
    }
 
 
  /* Wrap the body of the transaction in a try-finally node so that
  /* Wrap the body of the transaction in a try-finally node so that
     the commit call is always properly called.  */
     the commit call is always properly called.  */
  g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
  g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT), 0);
  if (flag_exceptions)
  if (flag_exceptions)
    {
    {
      tree ptr;
      tree ptr;
      gimple_seq n_seq, e_seq;
      gimple_seq n_seq, e_seq;
 
 
      n_seq = gimple_seq_alloc_with_stmt (g);
      n_seq = gimple_seq_alloc_with_stmt (g);
      e_seq = gimple_seq_alloc ();
      e_seq = gimple_seq_alloc ();
 
 
      g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
      g = gimple_build_call (builtin_decl_explicit (BUILT_IN_EH_POINTER),
                             1, integer_zero_node);
                             1, integer_zero_node);
      ptr = create_tmp_var (ptr_type_node, NULL);
      ptr = create_tmp_var (ptr_type_node, NULL);
      gimple_call_set_lhs (g, ptr);
      gimple_call_set_lhs (g, ptr);
      gimple_seq_add_stmt (&e_seq, g);
      gimple_seq_add_stmt (&e_seq, g);
 
 
      g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
      g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_COMMIT_EH),
                             1, ptr);
                             1, ptr);
      gimple_seq_add_stmt (&e_seq, g);
      gimple_seq_add_stmt (&e_seq, g);
 
 
      g = gimple_build_eh_else (n_seq, e_seq);
      g = gimple_build_eh_else (n_seq, e_seq);
    }
    }
 
 
  g = gimple_build_try (gimple_transaction_body (stmt),
  g = gimple_build_try (gimple_transaction_body (stmt),
                        gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
                        gimple_seq_alloc_with_stmt (g), GIMPLE_TRY_FINALLY);
  gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
  gsi_insert_after (gsi, g, GSI_CONTINUE_LINKING);
 
 
  gimple_transaction_set_body (stmt, NULL);
  gimple_transaction_set_body (stmt, NULL);
 
 
  /* If the transaction calls abort or if this is an outer transaction,
  /* If the transaction calls abort or if this is an outer transaction,
     add an "over" label afterwards.  */
     add an "over" label afterwards.  */
  if ((this_state & (GTMA_HAVE_ABORT))
  if ((this_state & (GTMA_HAVE_ABORT))
      || (gimple_transaction_subcode(stmt) & GTMA_IS_OUTER))
      || (gimple_transaction_subcode(stmt) & GTMA_IS_OUTER))
    {
    {
      tree label = create_artificial_label (UNKNOWN_LOCATION);
      tree label = create_artificial_label (UNKNOWN_LOCATION);
      gimple_transaction_set_label (stmt, label);
      gimple_transaction_set_label (stmt, label);
      gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
      gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING);
    }
    }
 
 
  /* Record the set of operations found for use later.  */
  /* Record the set of operations found for use later.  */
  this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
  this_state |= gimple_transaction_subcode (stmt) & GTMA_DECLARATION_MASK;
  gimple_transaction_set_subcode (stmt, this_state);
  gimple_transaction_set_subcode (stmt, this_state);
}
}
 
 
/* Iterate through the statements in the sequence, lowering them all
/* Iterate through the statements in the sequence, lowering them all
   as appropriate for being in a transaction.  */
   as appropriate for being in a transaction.  */
 
 
static tree
static tree
lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
lower_sequence_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
                   struct walk_stmt_info *wi)
                   struct walk_stmt_info *wi)
{
{
  unsigned int *state = (unsigned int *) wi->info;
  unsigned int *state = (unsigned int *) wi->info;
  gimple stmt = gsi_stmt (*gsi);
  gimple stmt = gsi_stmt (*gsi);
 
 
  *handled_ops_p = true;
  *handled_ops_p = true;
  switch (gimple_code (stmt))
  switch (gimple_code (stmt))
    {
    {
    case GIMPLE_ASSIGN:
    case GIMPLE_ASSIGN:
      /* Only memory reads/writes need to be instrumented.  */
      /* Only memory reads/writes need to be instrumented.  */
      if (gimple_assign_single_p (stmt))
      if (gimple_assign_single_p (stmt))
        examine_assign_tm (state, gsi);
        examine_assign_tm (state, gsi);
      break;
      break;
 
 
    case GIMPLE_CALL:
    case GIMPLE_CALL:
      examine_call_tm (state, gsi);
      examine_call_tm (state, gsi);
      break;
      break;
 
 
    case GIMPLE_ASM:
    case GIMPLE_ASM:
      *state |= GTMA_MAY_ENTER_IRREVOCABLE;
      *state |= GTMA_MAY_ENTER_IRREVOCABLE;
      break;
      break;
 
 
    case GIMPLE_TRANSACTION:
    case GIMPLE_TRANSACTION:
      lower_transaction (gsi, wi);
      lower_transaction (gsi, wi);
      break;
      break;
 
 
    default:
    default:
      *handled_ops_p = !gimple_has_substatements (stmt);
      *handled_ops_p = !gimple_has_substatements (stmt);
      break;
      break;
    }
    }
 
 
  return NULL_TREE;
  return NULL_TREE;
}
}
 
 
/* Iterate through the statements in the sequence, lowering them all
/* Iterate through the statements in the sequence, lowering them all
   as appropriate for being outside of a transaction.  */
   as appropriate for being outside of a transaction.  */
 
 
static tree
static tree
lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
lower_sequence_no_tm (gimple_stmt_iterator *gsi, bool *handled_ops_p,
                      struct walk_stmt_info * wi)
                      struct walk_stmt_info * wi)
{
{
  gimple stmt = gsi_stmt (*gsi);
  gimple stmt = gsi_stmt (*gsi);
 
 
  if (gimple_code (stmt) == GIMPLE_TRANSACTION)
  if (gimple_code (stmt) == GIMPLE_TRANSACTION)
    {
    {
      *handled_ops_p = true;
      *handled_ops_p = true;
      lower_transaction (gsi, wi);
      lower_transaction (gsi, wi);
    }
    }
  else
  else
    *handled_ops_p = !gimple_has_substatements (stmt);
    *handled_ops_p = !gimple_has_substatements (stmt);
 
 
  return NULL_TREE;
  return NULL_TREE;
}
}
 
 
/* Main entry point for flattening GIMPLE_TRANSACTION constructs.  After
/* Main entry point for flattening GIMPLE_TRANSACTION constructs.  After
   this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
   this, GIMPLE_TRANSACTION nodes still exist, but the nested body has
   been moved out, and all the data required for constructing a proper
   been moved out, and all the data required for constructing a proper
   CFG has been recorded.  */
   CFG has been recorded.  */
 
 
static unsigned int
static unsigned int
execute_lower_tm (void)
execute_lower_tm (void)
{
{
  struct walk_stmt_info wi;
  struct walk_stmt_info wi;
 
 
  /* Transactional clones aren't created until a later pass.  */
  /* Transactional clones aren't created until a later pass.  */
  gcc_assert (!decl_is_tm_clone (current_function_decl));
  gcc_assert (!decl_is_tm_clone (current_function_decl));
 
 
  memset (&wi, 0, sizeof (wi));
  memset (&wi, 0, sizeof (wi));
  walk_gimple_seq (gimple_body (current_function_decl),
  walk_gimple_seq (gimple_body (current_function_decl),
                   lower_sequence_no_tm, NULL, &wi);
                   lower_sequence_no_tm, NULL, &wi);
 
 
  return 0;
  return 0;
}
}
 
 
struct gimple_opt_pass pass_lower_tm =
struct gimple_opt_pass pass_lower_tm =
{
{
 {
 {
  GIMPLE_PASS,
  GIMPLE_PASS,
  "tmlower",                            /* name */
  "tmlower",                            /* name */
  gate_tm,                              /* gate */
  gate_tm,                              /* gate */
  execute_lower_tm,                     /* execute */
  execute_lower_tm,                     /* execute */
  NULL,                                 /* sub */
  NULL,                                 /* sub */
  NULL,                                 /* next */
  NULL,                                 /* next */
  0,                                     /* static_pass_number */
  0,                                     /* static_pass_number */
  TV_TRANS_MEM,                         /* tv_id */
  TV_TRANS_MEM,                         /* tv_id */
  PROP_gimple_lcf,                      /* properties_required */
  PROP_gimple_lcf,                      /* properties_required */
  0,                                     /* properties_provided */
  0,                                     /* properties_provided */
  0,                                     /* properties_destroyed */
  0,                                     /* properties_destroyed */
  0,                                     /* todo_flags_start */
  0,                                     /* todo_flags_start */
  TODO_dump_func                        /* todo_flags_finish */
  TODO_dump_func                        /* todo_flags_finish */
 }
 }
};
};


/* Collect region information for each transaction.  */
/* Collect region information for each transaction.  */
 
 
struct tm_region
struct tm_region
{
{
  /* Link to the next unnested transaction.  */
  /* Link to the next unnested transaction.  */
  struct tm_region *next;
  struct tm_region *next;
 
 
  /* Link to the next inner transaction.  */
  /* Link to the next inner transaction.  */
  struct tm_region *inner;
  struct tm_region *inner;
 
 
  /* Link to the next outer transaction.  */
  /* Link to the next outer transaction.  */
  struct tm_region *outer;
  struct tm_region *outer;
 
 
  /* The GIMPLE_TRANSACTION statement beginning this transaction.  */
  /* The GIMPLE_TRANSACTION statement beginning this transaction.  */
  gimple transaction_stmt;
  gimple transaction_stmt;
 
 
  /* The entry block to this region.  */
  /* The entry block to this region.  */
  basic_block entry_block;
  basic_block entry_block;
 
 
  /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
  /* The set of all blocks that end the region; NULL if only EXIT_BLOCK.
     These blocks are still a part of the region (i.e., the border is
     These blocks are still a part of the region (i.e., the border is
     inclusive). Note that this set is only complete for paths in the CFG
     inclusive). Note that this set is only complete for paths in the CFG
     starting at ENTRY_BLOCK, and that there is no exit block recorded for
     starting at ENTRY_BLOCK, and that there is no exit block recorded for
     the edge to the "over" label.  */
     the edge to the "over" label.  */
  bitmap exit_blocks;
  bitmap exit_blocks;
 
 
  /* The set of all blocks that have an TM_IRREVOCABLE call.  */
  /* The set of all blocks that have an TM_IRREVOCABLE call.  */
  bitmap irr_blocks;
  bitmap irr_blocks;
};
};
 
 
/* True if there are pending edge statements to be committed for the
/* True if there are pending edge statements to be committed for the
   current function being scanned in the tmmark pass.  */
   current function being scanned in the tmmark pass.  */
bool pending_edge_inserts_p;
bool pending_edge_inserts_p;
 
 
static struct tm_region *all_tm_regions;
static struct tm_region *all_tm_regions;
static bitmap_obstack tm_obstack;
static bitmap_obstack tm_obstack;
 
 
 
 
/* A subroutine of tm_region_init.  Record the existance of the
/* A subroutine of tm_region_init.  Record the existance of the
   GIMPLE_TRANSACTION statement in a tree of tm_region elements.  */
   GIMPLE_TRANSACTION statement in a tree of tm_region elements.  */
 
 
static struct tm_region *
static struct tm_region *
tm_region_init_0 (struct tm_region *outer, basic_block bb, gimple stmt)
tm_region_init_0 (struct tm_region *outer, basic_block bb, gimple stmt)
{
{
  struct tm_region *region;
  struct tm_region *region;
 
 
  region = (struct tm_region *)
  region = (struct tm_region *)
    obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
    obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
 
 
  if (outer)
  if (outer)
    {
    {
      region->next = outer->inner;
      region->next = outer->inner;
      outer->inner = region;
      outer->inner = region;
    }
    }
  else
  else
    {
    {
      region->next = all_tm_regions;
      region->next = all_tm_regions;
      all_tm_regions = region;
      all_tm_regions = region;
    }
    }
  region->inner = NULL;
  region->inner = NULL;
  region->outer = outer;
  region->outer = outer;
 
 
  region->transaction_stmt = stmt;
  region->transaction_stmt = stmt;
 
 
  /* There are either one or two edges out of the block containing
  /* There are either one or two edges out of the block containing
     the GIMPLE_TRANSACTION, one to the actual region and one to the
     the GIMPLE_TRANSACTION, one to the actual region and one to the
     "over" label if the region contains an abort.  The former will
     "over" label if the region contains an abort.  The former will
     always be the one marked FALLTHRU.  */
     always be the one marked FALLTHRU.  */
  region->entry_block = FALLTHRU_EDGE (bb)->dest;
  region->entry_block = FALLTHRU_EDGE (bb)->dest;
 
 
  region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
  region->exit_blocks = BITMAP_ALLOC (&tm_obstack);
  region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
  region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
 
 
  return region;
  return region;
}
}
 
 
/* A subroutine of tm_region_init.  Record all the exit and
/* A subroutine of tm_region_init.  Record all the exit and
   irrevocable blocks in BB into the region's exit_blocks and
   irrevocable blocks in BB into the region's exit_blocks and
   irr_blocks bitmaps.  Returns the new region being scanned.  */
   irr_blocks bitmaps.  Returns the new region being scanned.  */
 
 
static struct tm_region *
static struct tm_region *
tm_region_init_1 (struct tm_region *region, basic_block bb)
tm_region_init_1 (struct tm_region *region, basic_block bb)
{
{
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
  gimple g;
  gimple g;
 
 
  if (!region
  if (!region
      || (!region->irr_blocks && !region->exit_blocks))
      || (!region->irr_blocks && !region->exit_blocks))
    return region;
    return region;
 
 
  /* Check to see if this is the end of a region by seeing if it
  /* Check to see if this is the end of a region by seeing if it
     contains a call to __builtin_tm_commit{,_eh}.  Note that the
     contains a call to __builtin_tm_commit{,_eh}.  Note that the
     outermost region for DECL_IS_TM_CLONE need not collect this.  */
     outermost region for DECL_IS_TM_CLONE need not collect this.  */
  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
  for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
    {
    {
      g = gsi_stmt (gsi);
      g = gsi_stmt (gsi);
      if (gimple_code (g) == GIMPLE_CALL)
      if (gimple_code (g) == GIMPLE_CALL)
        {
        {
          tree fn = gimple_call_fndecl (g);
          tree fn = gimple_call_fndecl (g);
          if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
          if (fn && DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL)
            {
            {
              if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
              if ((DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT
                   || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
                   || DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_COMMIT_EH)
                  && region->exit_blocks)
                  && region->exit_blocks)
                {
                {
                  bitmap_set_bit (region->exit_blocks, bb->index);
                  bitmap_set_bit (region->exit_blocks, bb->index);
                  region = region->outer;
                  region = region->outer;
                  break;
                  break;
                }
                }
              if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
              if (DECL_FUNCTION_CODE (fn) == BUILT_IN_TM_IRREVOCABLE)
                bitmap_set_bit (region->irr_blocks, bb->index);
                bitmap_set_bit (region->irr_blocks, bb->index);
            }
            }
        }
        }
    }
    }
  return region;
  return region;
}
}
 
 
/* Collect all of the transaction regions within the current function
/* Collect all of the transaction regions within the current function
   and record them in ALL_TM_REGIONS.  The REGION parameter may specify
   and record them in ALL_TM_REGIONS.  The REGION parameter may specify
   an "outermost" region for use by tm clones.  */
   an "outermost" region for use by tm clones.  */
 
 
static void
static void
tm_region_init (struct tm_region *region)
tm_region_init (struct tm_region *region)
{
{
  gimple g;
  gimple g;
  edge_iterator ei;
  edge_iterator ei;
  edge e;
  edge e;
  basic_block bb;
  basic_block bb;
  VEC(basic_block, heap) *queue = NULL;
  VEC(basic_block, heap) *queue = NULL;
  bitmap visited_blocks = BITMAP_ALLOC (NULL);
  bitmap visited_blocks = BITMAP_ALLOC (NULL);
  struct tm_region *old_region;
  struct tm_region *old_region;
  struct tm_region **region_worklist;
  struct tm_region **region_worklist;
 
 
  all_tm_regions = region;
  all_tm_regions = region;
  bb = single_succ (ENTRY_BLOCK_PTR);
  bb = single_succ (ENTRY_BLOCK_PTR);
 
 
  /* We could store this information in bb->aux, but we may get called
  /* We could store this information in bb->aux, but we may get called
     through get_all_tm_blocks() from another pass that may be already
     through get_all_tm_blocks() from another pass that may be already
     using bb->aux.  */
     using bb->aux.  */
  region_worklist =
  region_worklist =
    (struct tm_region **) xcalloc (sizeof (struct tm_region *),
    (struct tm_region **) xcalloc (sizeof (struct tm_region *),
                                  n_basic_blocks + NUM_FIXED_BLOCKS + 2);
                                  n_basic_blocks + NUM_FIXED_BLOCKS + 2);
 
 
  VEC_safe_push (basic_block, heap, queue, bb);
  VEC_safe_push (basic_block, heap, queue, bb);
  region_worklist[bb->index] = region;
  region_worklist[bb->index] = region;
  do
  do
    {
    {
      bb = VEC_pop (basic_block, queue);
      bb = VEC_pop (basic_block, queue);
      region = region_worklist[bb->index];
      region = region_worklist[bb->index];
      region_worklist[bb->index] = NULL;
      region_worklist[bb->index] = NULL;
 
 
      /* Record exit and irrevocable blocks.  */
      /* Record exit and irrevocable blocks.  */
      region = tm_region_init_1 (region, bb);
      region = tm_region_init_1 (region, bb);
 
 
      /* Check for the last statement in the block beginning a new region.  */
      /* Check for the last statement in the block beginning a new region.  */
      g = last_stmt (bb);
      g = last_stmt (bb);
      old_region = region;
      old_region = region;
      if (g && gimple_code (g) == GIMPLE_TRANSACTION)
      if (g && gimple_code (g) == GIMPLE_TRANSACTION)
        region = tm_region_init_0 (region, bb, g);
        region = tm_region_init_0 (region, bb, g);
 
 
      /* Process subsequent blocks.  */
      /* Process subsequent blocks.  */
      FOR_EACH_EDGE (e, ei, bb->succs)
      FOR_EACH_EDGE (e, ei, bb->succs)
        if (!bitmap_bit_p (visited_blocks, e->dest->index))
        if (!bitmap_bit_p (visited_blocks, e->dest->index))
          {
          {
            bitmap_set_bit (visited_blocks, e->dest->index);
            bitmap_set_bit (visited_blocks, e->dest->index);
            VEC_safe_push (basic_block, heap, queue, e->dest);
            VEC_safe_push (basic_block, heap, queue, e->dest);
 
 
            /* If the current block started a new region, make sure that only
            /* If the current block started a new region, make sure that only
               the entry block of the new region is associated with this region.
               the entry block of the new region is associated with this region.
               Other successors are still part of the old region.  */
               Other successors are still part of the old region.  */
            if (old_region != region && e->dest != region->entry_block)
            if (old_region != region && e->dest != region->entry_block)
              region_worklist[e->dest->index] = old_region;
              region_worklist[e->dest->index] = old_region;
            else
            else
              region_worklist[e->dest->index] = region;
              region_worklist[e->dest->index] = region;
          }
          }
    }
    }
  while (!VEC_empty (basic_block, queue));
  while (!VEC_empty (basic_block, queue));
  VEC_free (basic_block, heap, queue);
  VEC_free (basic_block, heap, queue);
  BITMAP_FREE (visited_blocks);
  BITMAP_FREE (visited_blocks);
  free (region_worklist);
  free (region_worklist);
}
}
 
 
/* The "gate" function for all transactional memory expansion and optimization
/* The "gate" function for all transactional memory expansion and optimization
   passes.  We collect region information for each top-level transaction, and
   passes.  We collect region information for each top-level transaction, and
   if we don't find any, we skip all of the TM passes.  Each region will have
   if we don't find any, we skip all of the TM passes.  Each region will have
   all of the exit blocks recorded, and the originating statement.  */
   all of the exit blocks recorded, and the originating statement.  */
 
 
static bool
static bool
gate_tm_init (void)
gate_tm_init (void)
{
{
  if (!flag_tm)
  if (!flag_tm)
    return false;
    return false;
 
 
  calculate_dominance_info (CDI_DOMINATORS);
  calculate_dominance_info (CDI_DOMINATORS);
  bitmap_obstack_initialize (&tm_obstack);
  bitmap_obstack_initialize (&tm_obstack);
 
 
  /* If the function is a TM_CLONE, then the entire function is the region.  */
  /* If the function is a TM_CLONE, then the entire function is the region.  */
  if (decl_is_tm_clone (current_function_decl))
  if (decl_is_tm_clone (current_function_decl))
    {
    {
      struct tm_region *region = (struct tm_region *)
      struct tm_region *region = (struct tm_region *)
        obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
        obstack_alloc (&tm_obstack.obstack, sizeof (struct tm_region));
      memset (region, 0, sizeof (*region));
      memset (region, 0, sizeof (*region));
      region->entry_block = single_succ (ENTRY_BLOCK_PTR);
      region->entry_block = single_succ (ENTRY_BLOCK_PTR);
      /* For a clone, the entire function is the region.  But even if
      /* For a clone, the entire function is the region.  But even if
         we don't need to record any exit blocks, we may need to
         we don't need to record any exit blocks, we may need to
         record irrevocable blocks.  */
         record irrevocable blocks.  */
      region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
      region->irr_blocks = BITMAP_ALLOC (&tm_obstack);
 
 
      tm_region_init (region);
      tm_region_init (region);
    }
    }
  else
  else
    {
    {
      tm_region_init (NULL);
      tm_region_init (NULL);
 
 
      /* If we didn't find any regions, cleanup and skip the whole tree
      /* If we didn't find any regions, cleanup and skip the whole tree
         of tm-related optimizations.  */
         of tm-related optimizations.  */
      if (all_tm_regions == NULL)
      if (all_tm_regions == NULL)
        {
        {
          bitmap_obstack_release (&tm_obstack);
          bitmap_obstack_release (&tm_obstack);
          return false;
          return false;
        }
        }
    }
    }
 
 
  return true;
  return true;
}
}
 
 
struct gimple_opt_pass pass_tm_init =
struct gimple_opt_pass pass_tm_init =
{
{
 {
 {
  GIMPLE_PASS,
  GIMPLE_PASS,
  "*tminit",                            /* name */
  "*tminit",                            /* name */
  gate_tm_init,                         /* gate */
  gate_tm_init,                         /* gate */
  NULL,                                 /* execute */
  NULL,                                 /* execute */
  NULL,                                 /* sub */
  NULL,                                 /* sub */
  NULL,                                 /* next */
  NULL,                                 /* next */
  0,                                     /* static_pass_number */
  0,                                     /* static_pass_number */
  TV_TRANS_MEM,                         /* tv_id */
  TV_TRANS_MEM,                         /* tv_id */
  PROP_ssa | PROP_cfg,                  /* properties_required */
  PROP_ssa | PROP_cfg,                  /* properties_required */
  0,                                     /* properties_provided */
  0,                                     /* properties_provided */
  0,                                     /* properties_destroyed */
  0,                                     /* properties_destroyed */
  0,                                     /* todo_flags_start */
  0,                                     /* todo_flags_start */
  0,                                     /* todo_flags_finish */
  0,                                     /* todo_flags_finish */
 }
 }
};
};


/* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
/* Add FLAGS to the GIMPLE_TRANSACTION subcode for the transaction region
   represented by STATE.  */
   represented by STATE.  */
 
 
static inline void
static inline void
transaction_subcode_ior (struct tm_region *region, unsigned flags)
transaction_subcode_ior (struct tm_region *region, unsigned flags)
{
{
  if (region && region->transaction_stmt)
  if (region && region->transaction_stmt)
    {
    {
      flags |= gimple_transaction_subcode (region->transaction_stmt);
      flags |= gimple_transaction_subcode (region->transaction_stmt);
      gimple_transaction_set_subcode (region->transaction_stmt, flags);
      gimple_transaction_set_subcode (region->transaction_stmt, flags);
    }
    }
}
}
 
 
/* Construct a memory load in a transactional context.  Return the
/* Construct a memory load in a transactional context.  Return the
   gimple statement performing the load, or NULL if there is no
   gimple statement performing the load, or NULL if there is no
   TM_LOAD builtin of the appropriate size to do the load.
   TM_LOAD builtin of the appropriate size to do the load.
 
 
   LOC is the location to use for the new statement(s).  */
   LOC is the location to use for the new statement(s).  */
 
 
static gimple
static gimple
build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
build_tm_load (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
{
{
  enum built_in_function code = END_BUILTINS;
  enum built_in_function code = END_BUILTINS;
  tree t, type = TREE_TYPE (rhs), decl;
  tree t, type = TREE_TYPE (rhs), decl;
  gimple gcall;
  gimple gcall;
 
 
  if (type == float_type_node)
  if (type == float_type_node)
    code = BUILT_IN_TM_LOAD_FLOAT;
    code = BUILT_IN_TM_LOAD_FLOAT;
  else if (type == double_type_node)
  else if (type == double_type_node)
    code = BUILT_IN_TM_LOAD_DOUBLE;
    code = BUILT_IN_TM_LOAD_DOUBLE;
  else if (type == long_double_type_node)
  else if (type == long_double_type_node)
    code = BUILT_IN_TM_LOAD_LDOUBLE;
    code = BUILT_IN_TM_LOAD_LDOUBLE;
  else if (TYPE_SIZE_UNIT (type) != NULL
  else if (TYPE_SIZE_UNIT (type) != NULL
           && host_integerp (TYPE_SIZE_UNIT (type), 1))
           && host_integerp (TYPE_SIZE_UNIT (type), 1))
    {
    {
      switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
      switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
        {
        {
        case 1:
        case 1:
          code = BUILT_IN_TM_LOAD_1;
          code = BUILT_IN_TM_LOAD_1;
          break;
          break;
        case 2:
        case 2:
          code = BUILT_IN_TM_LOAD_2;
          code = BUILT_IN_TM_LOAD_2;
          break;
          break;
        case 4:
        case 4:
          code = BUILT_IN_TM_LOAD_4;
          code = BUILT_IN_TM_LOAD_4;
          break;
          break;
        case 8:
        case 8:
          code = BUILT_IN_TM_LOAD_8;
          code = BUILT_IN_TM_LOAD_8;
          break;
          break;
        }
        }
    }
    }
 
 
  if (code == END_BUILTINS)
  if (code == END_BUILTINS)
    {
    {
      decl = targetm.vectorize.builtin_tm_load (type);
      decl = targetm.vectorize.builtin_tm_load (type);
      if (!decl)
      if (!decl)
        return NULL;
        return NULL;
    }
    }
  else
  else
    decl = builtin_decl_explicit (code);
    decl = builtin_decl_explicit (code);
 
 
  t = gimplify_addr (gsi, rhs);
  t = gimplify_addr (gsi, rhs);
  gcall = gimple_build_call (decl, 1, t);
  gcall = gimple_build_call (decl, 1, t);
  gimple_set_location (gcall, loc);
  gimple_set_location (gcall, loc);
 
 
  t = TREE_TYPE (TREE_TYPE (decl));
  t = TREE_TYPE (TREE_TYPE (decl));
  if (useless_type_conversion_p (type, t))
  if (useless_type_conversion_p (type, t))
    {
    {
      gimple_call_set_lhs (gcall, lhs);
      gimple_call_set_lhs (gcall, lhs);
      gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
      gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
    }
    }
  else
  else
    {
    {
      gimple g;
      gimple g;
      tree temp;
      tree temp;
 
 
      temp = make_rename_temp (t, NULL);
      temp = make_rename_temp (t, NULL);
      gimple_call_set_lhs (gcall, temp);
      gimple_call_set_lhs (gcall, temp);
      gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
      gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
 
 
      t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
      t = fold_build1 (VIEW_CONVERT_EXPR, type, temp);
      g = gimple_build_assign (lhs, t);
      g = gimple_build_assign (lhs, t);
      gsi_insert_before (gsi, g, GSI_SAME_STMT);
      gsi_insert_before (gsi, g, GSI_SAME_STMT);
    }
    }
 
 
  return gcall;
  return gcall;
}
}
 
 
 
 
/* Similarly for storing TYPE in a transactional context.  */
/* Similarly for storing TYPE in a transactional context.  */
 
 
static gimple
static gimple
build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
build_tm_store (location_t loc, tree lhs, tree rhs, gimple_stmt_iterator *gsi)
{
{
  enum built_in_function code = END_BUILTINS;
  enum built_in_function code = END_BUILTINS;
  tree t, fn, type = TREE_TYPE (rhs), simple_type;
  tree t, fn, type = TREE_TYPE (rhs), simple_type;
  gimple gcall;
  gimple gcall;
 
 
  if (type == float_type_node)
  if (type == float_type_node)
    code = BUILT_IN_TM_STORE_FLOAT;
    code = BUILT_IN_TM_STORE_FLOAT;
  else if (type == double_type_node)
  else if (type == double_type_node)
    code = BUILT_IN_TM_STORE_DOUBLE;
    code = BUILT_IN_TM_STORE_DOUBLE;
  else if (type == long_double_type_node)
  else if (type == long_double_type_node)
    code = BUILT_IN_TM_STORE_LDOUBLE;
    code = BUILT_IN_TM_STORE_LDOUBLE;
  else if (TYPE_SIZE_UNIT (type) != NULL
  else if (TYPE_SIZE_UNIT (type) != NULL
           && host_integerp (TYPE_SIZE_UNIT (type), 1))
           && host_integerp (TYPE_SIZE_UNIT (type), 1))
    {
    {
      switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
      switch (tree_low_cst (TYPE_SIZE_UNIT (type), 1))
        {
        {
        case 1:
        case 1:
          code = BUILT_IN_TM_STORE_1;
          code = BUILT_IN_TM_STORE_1;
          break;
          break;
        case 2:
        case 2:
          code = BUILT_IN_TM_STORE_2;
          code = BUILT_IN_TM_STORE_2;
          break;
          break;
        case 4:
        case 4:
          code = BUILT_IN_TM_STORE_4;
          code = BUILT_IN_TM_STORE_4;
          break;
          break;
        case 8:
        case 8:
          code = BUILT_IN_TM_STORE_8;
          code = BUILT_IN_TM_STORE_8;
          break;
          break;
        }
        }
    }
    }
 
 
  if (code == END_BUILTINS)
  if (code == END_BUILTINS)
    {
    {
      fn = targetm.vectorize.builtin_tm_store (type);
      fn = targetm.vectorize.builtin_tm_store (type);
      if (!fn)
      if (!fn)
        return NULL;
        return NULL;
    }
    }
  else
  else
    fn = builtin_decl_explicit (code);
    fn = builtin_decl_explicit (code);
 
 
  simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
  simple_type = TREE_VALUE (TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (fn))));
 
 
  if (TREE_CODE (rhs) == CONSTRUCTOR)
  if (TREE_CODE (rhs) == CONSTRUCTOR)
    {
    {
      /* Handle the easy initialization to zero.  */
      /* Handle the easy initialization to zero.  */
      if (CONSTRUCTOR_ELTS (rhs) == 0)
      if (CONSTRUCTOR_ELTS (rhs) == 0)
        rhs = build_int_cst (simple_type, 0);
        rhs = build_int_cst (simple_type, 0);
      else
      else
        {
        {
          /* ...otherwise punt to the caller and probably use
          /* ...otherwise punt to the caller and probably use
            BUILT_IN_TM_MEMMOVE, because we can't wrap a
            BUILT_IN_TM_MEMMOVE, because we can't wrap a
            VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
            VIEW_CONVERT_EXPR around a CONSTRUCTOR (below) and produce
            valid gimple.  */
            valid gimple.  */
          return NULL;
          return NULL;
        }
        }
    }
    }
  else if (!useless_type_conversion_p (simple_type, type))
  else if (!useless_type_conversion_p (simple_type, type))
    {
    {
      gimple g;
      gimple g;
      tree temp;
      tree temp;
 
 
      temp = make_rename_temp (simple_type, NULL);
      temp = make_rename_temp (simple_type, NULL);
      t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
      t = fold_build1 (VIEW_CONVERT_EXPR, simple_type, rhs);
      g = gimple_build_assign (temp, t);
      g = gimple_build_assign (temp, t);
      gimple_set_location (g, loc);
      gimple_set_location (g, loc);
      gsi_insert_before (gsi, g, GSI_SAME_STMT);
      gsi_insert_before (gsi, g, GSI_SAME_STMT);
 
 
      rhs = temp;
      rhs = temp;
    }
    }
 
 
  t = gimplify_addr (gsi, lhs);
  t = gimplify_addr (gsi, lhs);
  gcall = gimple_build_call (fn, 2, t, rhs);
  gcall = gimple_build_call (fn, 2, t, rhs);
  gimple_set_location (gcall, loc);
  gimple_set_location (gcall, loc);
  gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
  gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
 
 
  return gcall;
  return gcall;
}
}
 
 
 
 
/* Expand an assignment statement into transactional builtins.  */
/* Expand an assignment statement into transactional builtins.  */
 
 
static void
static void
expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
expand_assign_tm (struct tm_region *region, gimple_stmt_iterator *gsi)
{
{
  gimple stmt = gsi_stmt (*gsi);
  gimple stmt = gsi_stmt (*gsi);
  location_t loc = gimple_location (stmt);
  location_t loc = gimple_location (stmt);
  tree lhs = gimple_assign_lhs (stmt);
  tree lhs = gimple_assign_lhs (stmt);
  tree rhs = gimple_assign_rhs1 (stmt);
  tree rhs = gimple_assign_rhs1 (stmt);
  bool store_p = requires_barrier (region->entry_block, lhs, NULL);
  bool store_p = requires_barrier (region->entry_block, lhs, NULL);
  bool load_p = requires_barrier (region->entry_block, rhs, NULL);
  bool load_p = requires_barrier (region->entry_block, rhs, NULL);
  gimple gcall = NULL;
  gimple gcall = NULL;
 
 
  if (!load_p && !store_p)
  if (!load_p && !store_p)
    {
    {
      /* Add thread private addresses to log if applicable.  */
      /* Add thread private addresses to log if applicable.  */
      requires_barrier (region->entry_block, lhs, stmt);
      requires_barrier (region->entry_block, lhs, stmt);
      gsi_next (gsi);
      gsi_next (gsi);
      return;
      return;
    }
    }
 
 
  gsi_remove (gsi, true);
  gsi_remove (gsi, true);
 
 
  if (load_p && !store_p)
  if (load_p && !store_p)
    {
    {
      transaction_subcode_ior (region, GTMA_HAVE_LOAD);
      transaction_subcode_ior (region, GTMA_HAVE_LOAD);
      gcall = build_tm_load (loc, lhs, rhs, gsi);
      gcall = build_tm_load (loc, lhs, rhs, gsi);
    }
    }
  else if (store_p && !load_p)
  else if (store_p && !load_p)
    {
    {
      transaction_subcode_ior (region, GTMA_HAVE_STORE);
      transaction_subcode_ior (region, GTMA_HAVE_STORE);
      gcall = build_tm_store (loc, lhs, rhs, gsi);
      gcall = build_tm_store (loc, lhs, rhs, gsi);
    }
    }
  if (!gcall)
  if (!gcall)
    {
    {
      tree lhs_addr, rhs_addr, tmp;
      tree lhs_addr, rhs_addr, tmp;
 
 
      if (load_p)
      if (load_p)
        transaction_subcode_ior (region, GTMA_HAVE_LOAD);
        transaction_subcode_ior (region, GTMA_HAVE_LOAD);
      if (store_p)
      if (store_p)
        transaction_subcode_ior (region, GTMA_HAVE_STORE);
        transaction_subcode_ior (region, GTMA_HAVE_STORE);
 
 
      /* ??? Figure out if there's any possible overlap between the LHS
      /* ??? Figure out if there's any possible overlap between the LHS
         and the RHS and if not, use MEMCPY.  */
         and the RHS and if not, use MEMCPY.  */
 
 
      if (load_p && is_gimple_reg (lhs))
      if (load_p && is_gimple_reg (lhs))
        {
        {
          tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
          tmp = create_tmp_var (TREE_TYPE (lhs), NULL);
          lhs_addr = build_fold_addr_expr (tmp);
          lhs_addr = build_fold_addr_expr (tmp);
        }
        }
      else
      else
        {
        {
          tmp = NULL_TREE;
          tmp = NULL_TREE;
          lhs_addr = gimplify_addr (gsi, lhs);
          lhs_addr = gimplify_addr (gsi, lhs);
        }
        }
      rhs_addr = gimplify_addr (gsi, rhs);
      rhs_addr = gimplify_addr (gsi, rhs);
      gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
      gcall = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_MEMMOVE),
                                 3, lhs_addr, rhs_addr,
                                 3, lhs_addr, rhs_addr,
                                 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
                                 TYPE_SIZE_UNIT (TREE_TYPE (lhs)));
      gimple_set_location (gcall, loc);
      gimple_set_location (gcall, loc);
      gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
      gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
 
 
      if (tmp)
      if (tmp)
        {
        {
          gcall = gimple_build_assign (lhs, tmp);
          gcall = gimple_build_assign (lhs, tmp);
          gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
          gsi_insert_before (gsi, gcall, GSI_SAME_STMT);
        }
        }
    }
    }
 
 
  /* Now that we have the load/store in its instrumented form, add
  /* Now that we have the load/store in its instrumented form, add
     thread private addresses to the log if applicable.  */
     thread private addresses to the log if applicable.  */
  if (!store_p)
  if (!store_p)
    requires_barrier (region->entry_block, lhs, gcall);
    requires_barrier (region->entry_block, lhs, gcall);
 
 
  /* add_stmt_to_tm_region  (region, gcall); */
  /* add_stmt_to_tm_region  (region, gcall); */
}
}
 
 
 
 
/* Expand a call statement as appropriate for a transaction.  That is,
/* Expand a call statement as appropriate for a transaction.  That is,
   either verify that the call does not affect the transaction, or
   either verify that the call does not affect the transaction, or
   redirect the call to a clone that handles transactions, or change
   redirect the call to a clone that handles transactions, or change
   the transaction state to IRREVOCABLE.  Return true if the call is
   the transaction state to IRREVOCABLE.  Return true if the call is
   one of the builtins that end a transaction.  */
   one of the builtins that end a transaction.  */
 
 
static bool
static bool
expand_call_tm (struct tm_region *region,
expand_call_tm (struct tm_region *region,
                gimple_stmt_iterator *gsi)
                gimple_stmt_iterator *gsi)
{
{
  gimple stmt = gsi_stmt (*gsi);
  gimple stmt = gsi_stmt (*gsi);
  tree lhs = gimple_call_lhs (stmt);
  tree lhs = gimple_call_lhs (stmt);
  tree fn_decl;
  tree fn_decl;
  struct cgraph_node *node;
  struct cgraph_node *node;
  bool retval = false;
  bool retval = false;
 
 
  fn_decl = gimple_call_fndecl (stmt);
  fn_decl = gimple_call_fndecl (stmt);
 
 
  if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
  if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMCPY)
      || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
      || fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMMOVE))
    transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
    transaction_subcode_ior (region, GTMA_HAVE_STORE | GTMA_HAVE_LOAD);
  if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
  if (fn_decl == builtin_decl_explicit (BUILT_IN_TM_MEMSET))
    transaction_subcode_ior (region, GTMA_HAVE_STORE);
    transaction_subcode_ior (region, GTMA_HAVE_STORE);
 
 
  if (is_tm_pure_call (stmt))
  if (is_tm_pure_call (stmt))
    return false;
    return false;
 
 
  if (fn_decl)
  if (fn_decl)
    retval = is_tm_ending_fndecl (fn_decl);
    retval = is_tm_ending_fndecl (fn_decl);
  if (!retval)
  if (!retval)
    {
    {
      /* Assume all non-const/pure calls write to memory, except
      /* Assume all non-const/pure calls write to memory, except
         transaction ending builtins.  */
         transaction ending builtins.  */
      transaction_subcode_ior (region, GTMA_HAVE_STORE);
      transaction_subcode_ior (region, GTMA_HAVE_STORE);
    }
    }
 
 
  /* For indirect calls, we already generated a call into the runtime.  */
  /* For indirect calls, we already generated a call into the runtime.  */
  if (!fn_decl)
  if (!fn_decl)
    {
    {
      tree fn = gimple_call_fn (stmt);
      tree fn = gimple_call_fn (stmt);
 
 
      /* We are guaranteed never to go irrevocable on a safe or pure
      /* We are guaranteed never to go irrevocable on a safe or pure
         call, and the pure call was handled above.  */
         call, and the pure call was handled above.  */
      if (is_tm_safe (fn))
      if (is_tm_safe (fn))
        return false;
        return false;
      else
      else
        transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
        transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
 
 
      return false;
      return false;
    }
    }
 
 
  node = cgraph_get_node (fn_decl);
  node = cgraph_get_node (fn_decl);
  /* All calls should have cgraph here. */
  /* All calls should have cgraph here. */
  gcc_assert (node);
  gcc_assert (node);
  if (node->local.tm_may_enter_irr)
  if (node->local.tm_may_enter_irr)
    transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
    transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
 
 
  if (is_tm_abort (fn_decl))
  if (is_tm_abort (fn_decl))
    {
    {
      transaction_subcode_ior (region, GTMA_HAVE_ABORT);
      transaction_subcode_ior (region, GTMA_HAVE_ABORT);
      return true;
      return true;
    }
    }
 
 
  /* Instrument the store if needed.
  /* Instrument the store if needed.
 
 
     If the assignment happens inside the function call (return slot
     If the assignment happens inside the function call (return slot
     optimization), there is no instrumentation to be done, since
     optimization), there is no instrumentation to be done, since
     the callee should have done the right thing.  */
     the callee should have done the right thing.  */
  if (lhs && requires_barrier (region->entry_block, lhs, stmt)
  if (lhs && requires_barrier (region->entry_block, lhs, stmt)
      && !gimple_call_return_slot_opt_p (stmt))
      && !gimple_call_return_slot_opt_p (stmt))
    {
    {
      tree tmp = make_rename_temp (TREE_TYPE (lhs), NULL);
      tree tmp = make_rename_temp (TREE_TYPE (lhs), NULL);
      location_t loc = gimple_location (stmt);
      location_t loc = gimple_location (stmt);
      edge fallthru_edge = NULL;
      edge fallthru_edge = NULL;
 
 
      /* Remember if the call was going to throw.  */
      /* Remember if the call was going to throw.  */
      if (stmt_can_throw_internal (stmt))
      if (stmt_can_throw_internal (stmt))
        {
        {
          edge_iterator ei;
          edge_iterator ei;
          edge e;
          edge e;
          basic_block bb = gimple_bb (stmt);
          basic_block bb = gimple_bb (stmt);
 
 
          FOR_EACH_EDGE (e, ei, bb->succs)
          FOR_EACH_EDGE (e, ei, bb->succs)
            if (e->flags & EDGE_FALLTHRU)
            if (e->flags & EDGE_FALLTHRU)
              {
              {
                fallthru_edge = e;
                fallthru_edge = e;
                break;
                break;
              }
              }
        }
        }
 
 
      gimple_call_set_lhs (stmt, tmp);
      gimple_call_set_lhs (stmt, tmp);
      update_stmt (stmt);
      update_stmt (stmt);
      stmt = gimple_build_assign (lhs, tmp);
      stmt = gimple_build_assign (lhs, tmp);
      gimple_set_location (stmt, loc);
      gimple_set_location (stmt, loc);
 
 
      /* We cannot throw in the middle of a BB.  If the call was going
      /* We cannot throw in the middle of a BB.  If the call was going
         to throw, place the instrumentation on the fallthru edge, so
         to throw, place the instrumentation on the fallthru edge, so
         the call remains the last statement in the block.  */
         the call remains the last statement in the block.  */
      if (fallthru_edge)
      if (fallthru_edge)
        {
        {
          gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (stmt);
          gimple_seq fallthru_seq = gimple_seq_alloc_with_stmt (stmt);
          gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
          gimple_stmt_iterator fallthru_gsi = gsi_start (fallthru_seq);
          expand_assign_tm (region, &fallthru_gsi);
          expand_assign_tm (region, &fallthru_gsi);
          gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
          gsi_insert_seq_on_edge (fallthru_edge, fallthru_seq);
          pending_edge_inserts_p = true;
          pending_edge_inserts_p = true;
        }
        }
      else
      else
        {
        {
          gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
          gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
          expand_assign_tm (region, gsi);
          expand_assign_tm (region, gsi);
        }
        }
 
 
      transaction_subcode_ior (region, GTMA_HAVE_STORE);
      transaction_subcode_ior (region, GTMA_HAVE_STORE);
    }
    }
 
 
  return retval;
  return retval;
}
}
 
 
 
 
/* Expand all statements in BB as appropriate for being inside
/* Expand all statements in BB as appropriate for being inside
   a transaction.  */
   a transaction.  */
 
 
static void
static void
expand_block_tm (struct tm_region *region, basic_block bb)
expand_block_tm (struct tm_region *region, basic_block bb)
{
{
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
 
 
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
    {
    {
      gimple stmt = gsi_stmt (gsi);
      gimple stmt = gsi_stmt (gsi);
      switch (gimple_code (stmt))
      switch (gimple_code (stmt))
        {
        {
        case GIMPLE_ASSIGN:
        case GIMPLE_ASSIGN:
          /* Only memory reads/writes need to be instrumented.  */
          /* Only memory reads/writes need to be instrumented.  */
          if (gimple_assign_single_p (stmt)
          if (gimple_assign_single_p (stmt)
              && !gimple_clobber_p (stmt))
              && !gimple_clobber_p (stmt))
            {
            {
              expand_assign_tm (region, &gsi);
              expand_assign_tm (region, &gsi);
              continue;
              continue;
            }
            }
          break;
          break;
 
 
        case GIMPLE_CALL:
        case GIMPLE_CALL:
          if (expand_call_tm (region, &gsi))
          if (expand_call_tm (region, &gsi))
            return;
            return;
          break;
          break;
 
 
        case GIMPLE_ASM:
        case GIMPLE_ASM:
          gcc_unreachable ();
          gcc_unreachable ();
 
 
        default:
        default:
          break;
          break;
        }
        }
      if (!gsi_end_p (gsi))
      if (!gsi_end_p (gsi))
        gsi_next (&gsi);
        gsi_next (&gsi);
    }
    }
}
}
 
 
/* Return the list of basic-blocks in REGION.
/* Return the list of basic-blocks in REGION.
 
 
   STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
   STOP_AT_IRREVOCABLE_P is true if caller is uninterested in blocks
   following a TM_IRREVOCABLE call.  */
   following a TM_IRREVOCABLE call.  */
 
 
static VEC (basic_block, heap) *
static VEC (basic_block, heap) *
get_tm_region_blocks (basic_block entry_block,
get_tm_region_blocks (basic_block entry_block,
                      bitmap exit_blocks,
                      bitmap exit_blocks,
                      bitmap irr_blocks,
                      bitmap irr_blocks,
                      bitmap all_region_blocks,
                      bitmap all_region_blocks,
                      bool stop_at_irrevocable_p)
                      bool stop_at_irrevocable_p)
{
{
  VEC(basic_block, heap) *bbs = NULL;
  VEC(basic_block, heap) *bbs = NULL;
  unsigned i;
  unsigned i;
  edge e;
  edge e;
  edge_iterator ei;
  edge_iterator ei;
  bitmap visited_blocks = BITMAP_ALLOC (NULL);
  bitmap visited_blocks = BITMAP_ALLOC (NULL);
 
 
  i = 0;
  i = 0;
  VEC_safe_push (basic_block, heap, bbs, entry_block);
  VEC_safe_push (basic_block, heap, bbs, entry_block);
  bitmap_set_bit (visited_blocks, entry_block->index);
  bitmap_set_bit (visited_blocks, entry_block->index);
 
 
  do
  do
    {
    {
      basic_block bb = VEC_index (basic_block, bbs, i++);
      basic_block bb = VEC_index (basic_block, bbs, i++);
 
 
      if (exit_blocks &&
      if (exit_blocks &&
          bitmap_bit_p (exit_blocks, bb->index))
          bitmap_bit_p (exit_blocks, bb->index))
        continue;
        continue;
 
 
      if (stop_at_irrevocable_p
      if (stop_at_irrevocable_p
          && irr_blocks
          && irr_blocks
          && bitmap_bit_p (irr_blocks, bb->index))
          && bitmap_bit_p (irr_blocks, bb->index))
        continue;
        continue;
 
 
      FOR_EACH_EDGE (e, ei, bb->succs)
      FOR_EACH_EDGE (e, ei, bb->succs)
        if (!bitmap_bit_p (visited_blocks, e->dest->index))
        if (!bitmap_bit_p (visited_blocks, e->dest->index))
          {
          {
            bitmap_set_bit (visited_blocks, e->dest->index);
            bitmap_set_bit (visited_blocks, e->dest->index);
            VEC_safe_push (basic_block, heap, bbs, e->dest);
            VEC_safe_push (basic_block, heap, bbs, e->dest);
          }
          }
    }
    }
  while (i < VEC_length (basic_block, bbs));
  while (i < VEC_length (basic_block, bbs));
 
 
  if (all_region_blocks)
  if (all_region_blocks)
    bitmap_ior_into (all_region_blocks, visited_blocks);
    bitmap_ior_into (all_region_blocks, visited_blocks);
 
 
  BITMAP_FREE (visited_blocks);
  BITMAP_FREE (visited_blocks);
  return bbs;
  return bbs;
}
}
 
 
/* Set the IN_TRANSACTION for all gimple statements that appear in a
/* Set the IN_TRANSACTION for all gimple statements that appear in a
   transaction.  */
   transaction.  */
 
 
void
void
compute_transaction_bits (void)
compute_transaction_bits (void)
{
{
  struct tm_region *region;
  struct tm_region *region;
  VEC (basic_block, heap) *queue;
  VEC (basic_block, heap) *queue;
  unsigned int i;
  unsigned int i;
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
  basic_block bb;
  basic_block bb;
 
 
  /* ?? Perhaps we need to abstract gate_tm_init further, because we
  /* ?? Perhaps we need to abstract gate_tm_init further, because we
     certainly don't need it to calculate CDI_DOMINATOR info.  */
     certainly don't need it to calculate CDI_DOMINATOR info.  */
  gate_tm_init ();
  gate_tm_init ();
 
 
  for (region = all_tm_regions; region; region = region->next)
  for (region = all_tm_regions; region; region = region->next)
    {
    {
      queue = get_tm_region_blocks (region->entry_block,
      queue = get_tm_region_blocks (region->entry_block,
                                    region->exit_blocks,
                                    region->exit_blocks,
                                    region->irr_blocks,
                                    region->irr_blocks,
                                    NULL,
                                    NULL,
                                    /*stop_at_irr_p=*/true);
                                    /*stop_at_irr_p=*/true);
      for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
      for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
        for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
        for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
          {
          {
            gimple stmt = gsi_stmt (gsi);
            gimple stmt = gsi_stmt (gsi);
            gimple_set_in_transaction (stmt, true);
            gimple_set_in_transaction (stmt, true);
          }
          }
      VEC_free (basic_block, heap, queue);
      VEC_free (basic_block, heap, queue);
    }
    }
 
 
  if (all_tm_regions)
  if (all_tm_regions)
    bitmap_obstack_release (&tm_obstack);
    bitmap_obstack_release (&tm_obstack);
}
}
 
 
/* Entry point to the MARK phase of TM expansion.  Here we replace
/* Entry point to the MARK phase of TM expansion.  Here we replace
   transactional memory statements with calls to builtins, and function
   transactional memory statements with calls to builtins, and function
   calls with their transactional clones (if available).  But we don't
   calls with their transactional clones (if available).  But we don't
   yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges.  */
   yet lower GIMPLE_TRANSACTION or add the transaction restart back-edges.  */
 
 
static unsigned int
static unsigned int
execute_tm_mark (void)
execute_tm_mark (void)
{
{
  struct tm_region *region;
  struct tm_region *region;
  basic_block bb;
  basic_block bb;
  VEC (basic_block, heap) *queue;
  VEC (basic_block, heap) *queue;
  size_t i;
  size_t i;
 
 
  queue = VEC_alloc (basic_block, heap, 10);
  queue = VEC_alloc (basic_block, heap, 10);
  pending_edge_inserts_p = false;
  pending_edge_inserts_p = false;
 
 
  for (region = all_tm_regions; region ; region = region->next)
  for (region = all_tm_regions; region ; region = region->next)
    {
    {
      tm_log_init ();
      tm_log_init ();
      /* If we have a transaction...  */
      /* If we have a transaction...  */
      if (region->exit_blocks)
      if (region->exit_blocks)
        {
        {
          unsigned int subcode
          unsigned int subcode
            = gimple_transaction_subcode (region->transaction_stmt);
            = gimple_transaction_subcode (region->transaction_stmt);
 
 
          /* Collect a new SUBCODE set, now that optimizations are done...  */
          /* Collect a new SUBCODE set, now that optimizations are done...  */
          if (subcode & GTMA_DOES_GO_IRREVOCABLE)
          if (subcode & GTMA_DOES_GO_IRREVOCABLE)
            subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
            subcode &= (GTMA_DECLARATION_MASK | GTMA_DOES_GO_IRREVOCABLE
                        | GTMA_MAY_ENTER_IRREVOCABLE);
                        | GTMA_MAY_ENTER_IRREVOCABLE);
          else
          else
            subcode &= GTMA_DECLARATION_MASK;
            subcode &= GTMA_DECLARATION_MASK;
          gimple_transaction_set_subcode (region->transaction_stmt, subcode);
          gimple_transaction_set_subcode (region->transaction_stmt, subcode);
        }
        }
 
 
      queue = get_tm_region_blocks (region->entry_block,
      queue = get_tm_region_blocks (region->entry_block,
                                    region->exit_blocks,
                                    region->exit_blocks,
                                    region->irr_blocks,
                                    region->irr_blocks,
                                    NULL,
                                    NULL,
                                    /*stop_at_irr_p=*/true);
                                    /*stop_at_irr_p=*/true);
      for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
      for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
        expand_block_tm (region, bb);
        expand_block_tm (region, bb);
      VEC_free (basic_block, heap, queue);
      VEC_free (basic_block, heap, queue);
 
 
      tm_log_emit ();
      tm_log_emit ();
    }
    }
 
 
  if (pending_edge_inserts_p)
  if (pending_edge_inserts_p)
    gsi_commit_edge_inserts ();
    gsi_commit_edge_inserts ();
  return 0;
  return 0;
}
}
 
 
struct gimple_opt_pass pass_tm_mark =
struct gimple_opt_pass pass_tm_mark =
{
{
 {
 {
  GIMPLE_PASS,
  GIMPLE_PASS,
  "tmmark",                             /* name */
  "tmmark",                             /* name */
  NULL,                                 /* gate */
  NULL,                                 /* gate */
  execute_tm_mark,                      /* execute */
  execute_tm_mark,                      /* execute */
  NULL,                                 /* sub */
  NULL,                                 /* sub */
  NULL,                                 /* next */
  NULL,                                 /* next */
  0,                                     /* static_pass_number */
  0,                                     /* static_pass_number */
  TV_TRANS_MEM,                         /* tv_id */
  TV_TRANS_MEM,                         /* tv_id */
  PROP_ssa | PROP_cfg,                  /* properties_required */
  PROP_ssa | PROP_cfg,                  /* properties_required */
  0,                                     /* properties_provided */
  0,                                     /* properties_provided */
  0,                                     /* properties_destroyed */
  0,                                     /* properties_destroyed */
  0,                                     /* todo_flags_start */
  0,                                     /* todo_flags_start */
  TODO_update_ssa
  TODO_update_ssa
  | TODO_verify_ssa
  | TODO_verify_ssa
  | TODO_dump_func,                     /* todo_flags_finish */
  | TODO_dump_func,                     /* todo_flags_finish */
 }
 }
};
};


/* Create an abnormal call edge from BB to the first block of the region
/* Create an abnormal call edge from BB to the first block of the region
   represented by STATE.  Also record the edge in the TM_RESTART map.  */
   represented by STATE.  Also record the edge in the TM_RESTART map.  */
 
 
static inline void
static inline void
make_tm_edge (gimple stmt, basic_block bb, struct tm_region *region)
make_tm_edge (gimple stmt, basic_block bb, struct tm_region *region)
{
{
  void **slot;
  void **slot;
  struct tm_restart_node *n, dummy;
  struct tm_restart_node *n, dummy;
 
 
  if (cfun->gimple_df->tm_restart == NULL)
  if (cfun->gimple_df->tm_restart == NULL)
    cfun->gimple_df->tm_restart = htab_create_ggc (31, struct_ptr_hash,
    cfun->gimple_df->tm_restart = htab_create_ggc (31, struct_ptr_hash,
                                                   struct_ptr_eq, ggc_free);
                                                   struct_ptr_eq, ggc_free);
 
 
  dummy.stmt = stmt;
  dummy.stmt = stmt;
  dummy.label_or_list = gimple_block_label (region->entry_block);
  dummy.label_or_list = gimple_block_label (region->entry_block);
  slot = htab_find_slot (cfun->gimple_df->tm_restart, &dummy, INSERT);
  slot = htab_find_slot (cfun->gimple_df->tm_restart, &dummy, INSERT);
  n = (struct tm_restart_node *) *slot;
  n = (struct tm_restart_node *) *slot;
  if (n == NULL)
  if (n == NULL)
    {
    {
      n = ggc_alloc_tm_restart_node ();
      n = ggc_alloc_tm_restart_node ();
      *n = dummy;
      *n = dummy;
    }
    }
  else
  else
    {
    {
      tree old = n->label_or_list;
      tree old = n->label_or_list;
      if (TREE_CODE (old) == LABEL_DECL)
      if (TREE_CODE (old) == LABEL_DECL)
        old = tree_cons (NULL, old, NULL);
        old = tree_cons (NULL, old, NULL);
      n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
      n->label_or_list = tree_cons (NULL, dummy.label_or_list, old);
    }
    }
 
 
  make_edge (bb, region->entry_block, EDGE_ABNORMAL | EDGE_ABNORMAL_CALL);
  make_edge (bb, region->entry_block, EDGE_ABNORMAL | EDGE_ABNORMAL_CALL);
}
}
 
 
 
 
/* Split block BB as necessary for every builtin function we added, and
/* Split block BB as necessary for every builtin function we added, and
   wire up the abnormal back edges implied by the transaction restart.  */
   wire up the abnormal back edges implied by the transaction restart.  */
 
 
static void
static void
expand_block_edges (struct tm_region *region, basic_block bb)
expand_block_edges (struct tm_region *region, basic_block bb)
{
{
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
 
 
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
    {
    {
      gimple stmt = gsi_stmt (gsi);
      gimple stmt = gsi_stmt (gsi);
 
 
      /* ??? TM_COMMIT (and any other tm builtin function) in a nested
      /* ??? TM_COMMIT (and any other tm builtin function) in a nested
         transaction has an abnormal edge back to the outer-most transaction
         transaction has an abnormal edge back to the outer-most transaction
         (there are no nested retries), while a TM_ABORT also has an abnormal
         (there are no nested retries), while a TM_ABORT also has an abnormal
         backedge to the inner-most transaction.  We haven't actually saved
         backedge to the inner-most transaction.  We haven't actually saved
         the inner-most transaction here.  We should be able to get to it
         the inner-most transaction here.  We should be able to get to it
         via the region_nr saved on STMT, and read the transaction_stmt from
         via the region_nr saved on STMT, and read the transaction_stmt from
         that, and find the first region block from there.  */
         that, and find the first region block from there.  */
      /* ??? Shouldn't we split for any non-pure, non-irrevocable function?  */
      /* ??? Shouldn't we split for any non-pure, non-irrevocable function?  */
      if (gimple_code (stmt) == GIMPLE_CALL
      if (gimple_code (stmt) == GIMPLE_CALL
          && (gimple_call_flags (stmt) & ECF_TM_BUILTIN) != 0)
          && (gimple_call_flags (stmt) & ECF_TM_BUILTIN) != 0)
        {
        {
          if (gsi_one_before_end_p (gsi))
          if (gsi_one_before_end_p (gsi))
            make_tm_edge (stmt, bb, region);
            make_tm_edge (stmt, bb, region);
          else
          else
            {
            {
              edge e = split_block (bb, stmt);
              edge e = split_block (bb, stmt);
              make_tm_edge (stmt, bb, region);
              make_tm_edge (stmt, bb, region);
              bb = e->dest;
              bb = e->dest;
              gsi = gsi_start_bb (bb);
              gsi = gsi_start_bb (bb);
            }
            }
 
 
          /* Delete any tail-call annotation that may have been added.
          /* Delete any tail-call annotation that may have been added.
             The tail-call pass may have mis-identified the commit as being
             The tail-call pass may have mis-identified the commit as being
             a candidate because we had not yet added this restart edge.  */
             a candidate because we had not yet added this restart edge.  */
          gimple_call_set_tail (stmt, false);
          gimple_call_set_tail (stmt, false);
        }
        }
 
 
      gsi_next (&gsi);
      gsi_next (&gsi);
    }
    }
}
}
 
 
/* Expand the GIMPLE_TRANSACTION statement into the STM library call.  */
/* Expand the GIMPLE_TRANSACTION statement into the STM library call.  */
 
 
static void
static void
expand_transaction (struct tm_region *region)
expand_transaction (struct tm_region *region)
{
{
  tree status, tm_start;
  tree status, tm_start;
  basic_block atomic_bb, slice_bb;
  basic_block atomic_bb, slice_bb;
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
  tree t1, t2;
  tree t1, t2;
  gimple g;
  gimple g;
  int flags, subcode;
  int flags, subcode;
 
 
  tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
  tm_start = builtin_decl_explicit (BUILT_IN_TM_START);
  status = make_rename_temp (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
  status = make_rename_temp (TREE_TYPE (TREE_TYPE (tm_start)), "tm_state");
 
 
  /* ??? There are plenty of bits here we're not computing.  */
  /* ??? There are plenty of bits here we're not computing.  */
  subcode = gimple_transaction_subcode (region->transaction_stmt);
  subcode = gimple_transaction_subcode (region->transaction_stmt);
  if (subcode & GTMA_DOES_GO_IRREVOCABLE)
  if (subcode & GTMA_DOES_GO_IRREVOCABLE)
    flags = PR_DOESGOIRREVOCABLE | PR_UNINSTRUMENTEDCODE;
    flags = PR_DOESGOIRREVOCABLE | PR_UNINSTRUMENTEDCODE;
  else
  else
    flags = PR_INSTRUMENTEDCODE;
    flags = PR_INSTRUMENTEDCODE;
  if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
  if ((subcode & GTMA_MAY_ENTER_IRREVOCABLE) == 0)
    flags |= PR_HASNOIRREVOCABLE;
    flags |= PR_HASNOIRREVOCABLE;
  /* If the transaction does not have an abort in lexical scope and is not
  /* If the transaction does not have an abort in lexical scope and is not
     marked as an outer transaction, then it will never abort.  */
     marked as an outer transaction, then it will never abort.  */
  if ((subcode & GTMA_HAVE_ABORT) == 0
  if ((subcode & GTMA_HAVE_ABORT) == 0
      && (subcode & GTMA_IS_OUTER) == 0)
      && (subcode & GTMA_IS_OUTER) == 0)
    flags |= PR_HASNOABORT;
    flags |= PR_HASNOABORT;
  if ((subcode & GTMA_HAVE_STORE) == 0)
  if ((subcode & GTMA_HAVE_STORE) == 0)
    flags |= PR_READONLY;
    flags |= PR_READONLY;
  t2 = build_int_cst (TREE_TYPE (status), flags);
  t2 = build_int_cst (TREE_TYPE (status), flags);
  g = gimple_build_call (tm_start, 1, t2);
  g = gimple_build_call (tm_start, 1, t2);
  gimple_call_set_lhs (g, status);
  gimple_call_set_lhs (g, status);
  gimple_set_location (g, gimple_location (region->transaction_stmt));
  gimple_set_location (g, gimple_location (region->transaction_stmt));
 
 
  atomic_bb = gimple_bb (region->transaction_stmt);
  atomic_bb = gimple_bb (region->transaction_stmt);
 
 
  if (!VEC_empty (tree, tm_log_save_addresses))
  if (!VEC_empty (tree, tm_log_save_addresses))
    tm_log_emit_saves (region->entry_block, atomic_bb);
    tm_log_emit_saves (region->entry_block, atomic_bb);
 
 
  gsi = gsi_last_bb (atomic_bb);
  gsi = gsi_last_bb (atomic_bb);
  gsi_insert_before (&gsi, g, GSI_SAME_STMT);
  gsi_insert_before (&gsi, g, GSI_SAME_STMT);
  gsi_remove (&gsi, true);
  gsi_remove (&gsi, true);
 
 
  if (!VEC_empty (tree, tm_log_save_addresses))
  if (!VEC_empty (tree, tm_log_save_addresses))
    region->entry_block =
    region->entry_block =
      tm_log_emit_save_or_restores (region->entry_block,
      tm_log_emit_save_or_restores (region->entry_block,
                                    A_RESTORELIVEVARIABLES,
                                    A_RESTORELIVEVARIABLES,
                                    status,
                                    status,
                                    tm_log_emit_restores,
                                    tm_log_emit_restores,
                                    atomic_bb,
                                    atomic_bb,
                                    FALLTHRU_EDGE (atomic_bb),
                                    FALLTHRU_EDGE (atomic_bb),
                                    &slice_bb);
                                    &slice_bb);
  else
  else
    slice_bb = atomic_bb;
    slice_bb = atomic_bb;
 
 
  /* If we have an ABORT statement, create a test following the start
  /* If we have an ABORT statement, create a test following the start
     call to perform the abort.  */
     call to perform the abort.  */
  if (gimple_transaction_label (region->transaction_stmt))
  if (gimple_transaction_label (region->transaction_stmt))
    {
    {
      edge e;
      edge e;
      basic_block test_bb;
      basic_block test_bb;
 
 
      test_bb = create_empty_bb (slice_bb);
      test_bb = create_empty_bb (slice_bb);
      if (VEC_empty (tree, tm_log_save_addresses))
      if (VEC_empty (tree, tm_log_save_addresses))
        region->entry_block = test_bb;
        region->entry_block = test_bb;
      gsi = gsi_last_bb (test_bb);
      gsi = gsi_last_bb (test_bb);
 
 
      t1 = make_rename_temp (TREE_TYPE (status), NULL);
      t1 = make_rename_temp (TREE_TYPE (status), NULL);
      t2 = build_int_cst (TREE_TYPE (status), A_ABORTTRANSACTION);
      t2 = build_int_cst (TREE_TYPE (status), A_ABORTTRANSACTION);
      g = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, status, t2);
      g = gimple_build_assign_with_ops (BIT_AND_EXPR, t1, status, t2);
      gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
      gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
 
 
      t2 = build_int_cst (TREE_TYPE (status), 0);
      t2 = build_int_cst (TREE_TYPE (status), 0);
      g = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
      g = gimple_build_cond (NE_EXPR, t1, t2, NULL, NULL);
      gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
      gsi_insert_after (&gsi, g, GSI_CONTINUE_LINKING);
 
 
      e = FALLTHRU_EDGE (slice_bb);
      e = FALLTHRU_EDGE (slice_bb);
      redirect_edge_pred (e, test_bb);
      redirect_edge_pred (e, test_bb);
      e->flags = EDGE_FALSE_VALUE;
      e->flags = EDGE_FALSE_VALUE;
      e->probability = PROB_ALWAYS - PROB_VERY_UNLIKELY;
      e->probability = PROB_ALWAYS - PROB_VERY_UNLIKELY;
 
 
      e = BRANCH_EDGE (atomic_bb);
      e = BRANCH_EDGE (atomic_bb);
      redirect_edge_pred (e, test_bb);
      redirect_edge_pred (e, test_bb);
      e->flags = EDGE_TRUE_VALUE;
      e->flags = EDGE_TRUE_VALUE;
      e->probability = PROB_VERY_UNLIKELY;
      e->probability = PROB_VERY_UNLIKELY;
 
 
      e = make_edge (slice_bb, test_bb, EDGE_FALLTHRU);
      e = make_edge (slice_bb, test_bb, EDGE_FALLTHRU);
    }
    }
 
 
  /* If we've no abort, but we do have PHIs at the beginning of the atomic
  /* If we've no abort, but we do have PHIs at the beginning of the atomic
     region, that means we've a loop at the beginning of the atomic region
     region, that means we've a loop at the beginning of the atomic region
     that shares the first block.  This can cause problems with the abnormal
     that shares the first block.  This can cause problems with the abnormal
     edges we're about to add for the transaction restart.  Solve this by
     edges we're about to add for the transaction restart.  Solve this by
     adding a new empty block to receive the abnormal edges.  */
     adding a new empty block to receive the abnormal edges.  */
  else if (phi_nodes (region->entry_block))
  else if (phi_nodes (region->entry_block))
    {
    {
      edge e;
      edge e;
      basic_block empty_bb;
      basic_block empty_bb;
 
 
      region->entry_block = empty_bb = create_empty_bb (atomic_bb);
      region->entry_block = empty_bb = create_empty_bb (atomic_bb);
 
 
      e = FALLTHRU_EDGE (atomic_bb);
      e = FALLTHRU_EDGE (atomic_bb);
      redirect_edge_pred (e, empty_bb);
      redirect_edge_pred (e, empty_bb);
 
 
      e = make_edge (atomic_bb, empty_bb, EDGE_FALLTHRU);
      e = make_edge (atomic_bb, empty_bb, EDGE_FALLTHRU);
    }
    }
 
 
  /* The GIMPLE_TRANSACTION statement no longer exists.  */
  /* The GIMPLE_TRANSACTION statement no longer exists.  */
  region->transaction_stmt = NULL;
  region->transaction_stmt = NULL;
}
}
 
 
static void expand_regions (struct tm_region *);
static void expand_regions (struct tm_region *);
 
 
/* Helper function for expand_regions.  Expand REGION and recurse to
/* Helper function for expand_regions.  Expand REGION and recurse to
   the inner region.  */
   the inner region.  */
 
 
static void
static void
expand_regions_1 (struct tm_region *region)
expand_regions_1 (struct tm_region *region)
{
{
  if (region->exit_blocks)
  if (region->exit_blocks)
    {
    {
      unsigned int i;
      unsigned int i;
      basic_block bb;
      basic_block bb;
      VEC (basic_block, heap) *queue;
      VEC (basic_block, heap) *queue;
 
 
      /* Collect the set of blocks in this region.  Do this before
      /* Collect the set of blocks in this region.  Do this before
         splitting edges, so that we don't have to play with the
         splitting edges, so that we don't have to play with the
         dominator tree in the middle.  */
         dominator tree in the middle.  */
      queue = get_tm_region_blocks (region->entry_block,
      queue = get_tm_region_blocks (region->entry_block,
                                    region->exit_blocks,
                                    region->exit_blocks,
                                    region->irr_blocks,
                                    region->irr_blocks,
                                    NULL,
                                    NULL,
                                    /*stop_at_irr_p=*/false);
                                    /*stop_at_irr_p=*/false);
      expand_transaction (region);
      expand_transaction (region);
      for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
      for (i = 0; VEC_iterate (basic_block, queue, i, bb); ++i)
        expand_block_edges (region, bb);
        expand_block_edges (region, bb);
      VEC_free (basic_block, heap, queue);
      VEC_free (basic_block, heap, queue);
    }
    }
  if (region->inner)
  if (region->inner)
    expand_regions (region->inner);
    expand_regions (region->inner);
}
}
 
 
/* Expand regions starting at REGION.  */
/* Expand regions starting at REGION.  */
 
 
static void
static void
expand_regions (struct tm_region *region)
expand_regions (struct tm_region *region)
{
{
  while (region)
  while (region)
    {
    {
      expand_regions_1 (region);
      expand_regions_1 (region);
      region = region->next;
      region = region->next;
    }
    }
}
}
 
 
/* Entry point to the final expansion of transactional nodes. */
/* Entry point to the final expansion of transactional nodes. */
 
 
static unsigned int
static unsigned int
execute_tm_edges (void)
execute_tm_edges (void)
{
{
  expand_regions (all_tm_regions);
  expand_regions (all_tm_regions);
  tm_log_delete ();
  tm_log_delete ();
 
 
  /* We've got to release the dominance info now, to indicate that it
  /* We've got to release the dominance info now, to indicate that it
     must be rebuilt completely.  Otherwise we'll crash trying to update
     must be rebuilt completely.  Otherwise we'll crash trying to update
     the SSA web in the TODO section following this pass.  */
     the SSA web in the TODO section following this pass.  */
  free_dominance_info (CDI_DOMINATORS);
  free_dominance_info (CDI_DOMINATORS);
  bitmap_obstack_release (&tm_obstack);
  bitmap_obstack_release (&tm_obstack);
  all_tm_regions = NULL;
  all_tm_regions = NULL;
 
 
  return 0;
  return 0;
}
}
 
 
struct gimple_opt_pass pass_tm_edges =
struct gimple_opt_pass pass_tm_edges =
{
{
 {
 {
  GIMPLE_PASS,
  GIMPLE_PASS,
  "tmedge",                             /* name */
  "tmedge",                             /* name */
  NULL,                                 /* gate */
  NULL,                                 /* gate */
  execute_tm_edges,                     /* execute */
  execute_tm_edges,                     /* execute */
  NULL,                                 /* sub */
  NULL,                                 /* sub */
  NULL,                                 /* next */
  NULL,                                 /* next */
  0,                                     /* static_pass_number */
  0,                                     /* static_pass_number */
  TV_TRANS_MEM,                         /* tv_id */
  TV_TRANS_MEM,                         /* tv_id */
  PROP_ssa | PROP_cfg,                  /* properties_required */
  PROP_ssa | PROP_cfg,                  /* properties_required */
  0,                                     /* properties_provided */
  0,                                     /* properties_provided */
  0,                                     /* properties_destroyed */
  0,                                     /* properties_destroyed */
  0,                                     /* todo_flags_start */
  0,                                     /* todo_flags_start */
  TODO_update_ssa
  TODO_update_ssa
  | TODO_verify_ssa
  | TODO_verify_ssa
  | TODO_dump_func,                     /* todo_flags_finish */
  | TODO_dump_func,                     /* todo_flags_finish */
 }
 }
};
};


/* A unique TM memory operation.  */
/* A unique TM memory operation.  */
typedef struct tm_memop
typedef struct tm_memop
{
{
  /* Unique ID that all memory operations to the same location have.  */
  /* Unique ID that all memory operations to the same location have.  */
  unsigned int value_id;
  unsigned int value_id;
  /* Address of load/store.  */
  /* Address of load/store.  */
  tree addr;
  tree addr;
} *tm_memop_t;
} *tm_memop_t;
 
 
/* Sets for solving data flow equations in the memory optimization pass.  */
/* Sets for solving data flow equations in the memory optimization pass.  */
struct tm_memopt_bitmaps
struct tm_memopt_bitmaps
{
{
  /* Stores available to this BB upon entry.  Basically, stores that
  /* Stores available to this BB upon entry.  Basically, stores that
     dominate this BB.  */
     dominate this BB.  */
  bitmap store_avail_in;
  bitmap store_avail_in;
  /* Stores available at the end of this BB.  */
  /* Stores available at the end of this BB.  */
  bitmap store_avail_out;
  bitmap store_avail_out;
  bitmap store_antic_in;
  bitmap store_antic_in;
  bitmap store_antic_out;
  bitmap store_antic_out;
  /* Reads available to this BB upon entry.  Basically, reads that
  /* Reads available to this BB upon entry.  Basically, reads that
     dominate this BB.  */
     dominate this BB.  */
  bitmap read_avail_in;
  bitmap read_avail_in;
  /* Reads available at the end of this BB.  */
  /* Reads available at the end of this BB.  */
  bitmap read_avail_out;
  bitmap read_avail_out;
  /* Reads performed in this BB.  */
  /* Reads performed in this BB.  */
  bitmap read_local;
  bitmap read_local;
  /* Writes performed in this BB.  */
  /* Writes performed in this BB.  */
  bitmap store_local;
  bitmap store_local;
 
 
  /* Temporary storage for pass.  */
  /* Temporary storage for pass.  */
  /* Is the current BB in the worklist?  */
  /* Is the current BB in the worklist?  */
  bool avail_in_worklist_p;
  bool avail_in_worklist_p;
  /* Have we visited this BB?  */
  /* Have we visited this BB?  */
  bool visited_p;
  bool visited_p;
};
};
 
 
static bitmap_obstack tm_memopt_obstack;
static bitmap_obstack tm_memopt_obstack;
 
 
/* Unique counter for TM loads and stores. Loads and stores of the
/* Unique counter for TM loads and stores. Loads and stores of the
   same address get the same ID.  */
   same address get the same ID.  */
static unsigned int tm_memopt_value_id;
static unsigned int tm_memopt_value_id;
static htab_t tm_memopt_value_numbers;
static htab_t tm_memopt_value_numbers;
 
 
#define STORE_AVAIL_IN(BB) \
#define STORE_AVAIL_IN(BB) \
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_in
#define STORE_AVAIL_OUT(BB) \
#define STORE_AVAIL_OUT(BB) \
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_avail_out
#define STORE_ANTIC_IN(BB) \
#define STORE_ANTIC_IN(BB) \
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_in
#define STORE_ANTIC_OUT(BB) \
#define STORE_ANTIC_OUT(BB) \
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_antic_out
#define READ_AVAIL_IN(BB) \
#define READ_AVAIL_IN(BB) \
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_in
#define READ_AVAIL_OUT(BB) \
#define READ_AVAIL_OUT(BB) \
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_avail_out
#define READ_LOCAL(BB) \
#define READ_LOCAL(BB) \
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->read_local
#define STORE_LOCAL(BB) \
#define STORE_LOCAL(BB) \
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->store_local
#define AVAIL_IN_WORKLIST_P(BB) \
#define AVAIL_IN_WORKLIST_P(BB) \
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->avail_in_worklist_p
#define BB_VISITED_P(BB) \
#define BB_VISITED_P(BB) \
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
  ((struct tm_memopt_bitmaps *) ((BB)->aux))->visited_p
 
 
/* Htab support.  Return a hash value for a `tm_memop'.  */
/* Htab support.  Return a hash value for a `tm_memop'.  */
static hashval_t
static hashval_t
tm_memop_hash (const void *p)
tm_memop_hash (const void *p)
{
{
  const struct tm_memop *mem = (const struct tm_memop *) p;
  const struct tm_memop *mem = (const struct tm_memop *) p;
  tree addr = mem->addr;
  tree addr = mem->addr;
  /* We drill down to the SSA_NAME/DECL for the hash, but equality is
  /* We drill down to the SSA_NAME/DECL for the hash, but equality is
     actually done with operand_equal_p (see tm_memop_eq).  */
     actually done with operand_equal_p (see tm_memop_eq).  */
  if (TREE_CODE (addr) == ADDR_EXPR)
  if (TREE_CODE (addr) == ADDR_EXPR)
    addr = TREE_OPERAND (addr, 0);
    addr = TREE_OPERAND (addr, 0);
  return iterative_hash_expr (addr, 0);
  return iterative_hash_expr (addr, 0);
}
}
 
 
/* Htab support.  Return true if two tm_memop's are the same.  */
/* Htab support.  Return true if two tm_memop's are the same.  */
static int
static int
tm_memop_eq (const void *p1, const void *p2)
tm_memop_eq (const void *p1, const void *p2)
{
{
  const struct tm_memop *mem1 = (const struct tm_memop *) p1;
  const struct tm_memop *mem1 = (const struct tm_memop *) p1;
  const struct tm_memop *mem2 = (const struct tm_memop *) p2;
  const struct tm_memop *mem2 = (const struct tm_memop *) p2;
 
 
  return operand_equal_p (mem1->addr, mem2->addr, 0);
  return operand_equal_p (mem1->addr, mem2->addr, 0);
}
}
 
 
/* Given a TM load/store in STMT, return the value number for the address
/* Given a TM load/store in STMT, return the value number for the address
   it accesses.  */
   it accesses.  */
 
 
static unsigned int
static unsigned int
tm_memopt_value_number (gimple stmt, enum insert_option op)
tm_memopt_value_number (gimple stmt, enum insert_option op)
{
{
  struct tm_memop tmpmem, *mem;
  struct tm_memop tmpmem, *mem;
  void **slot;
  void **slot;
 
 
  gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
  gcc_assert (is_tm_load (stmt) || is_tm_store (stmt));
  tmpmem.addr = gimple_call_arg (stmt, 0);
  tmpmem.addr = gimple_call_arg (stmt, 0);
  slot = htab_find_slot (tm_memopt_value_numbers, &tmpmem, op);
  slot = htab_find_slot (tm_memopt_value_numbers, &tmpmem, op);
  if (*slot)
  if (*slot)
    mem = (struct tm_memop *) *slot;
    mem = (struct tm_memop *) *slot;
  else if (op == INSERT)
  else if (op == INSERT)
    {
    {
      mem = XNEW (struct tm_memop);
      mem = XNEW (struct tm_memop);
      *slot = mem;
      *slot = mem;
      mem->value_id = tm_memopt_value_id++;
      mem->value_id = tm_memopt_value_id++;
      mem->addr = tmpmem.addr;
      mem->addr = tmpmem.addr;
    }
    }
  else
  else
    gcc_unreachable ();
    gcc_unreachable ();
  return mem->value_id;
  return mem->value_id;
}
}
 
 
/* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL.  */
/* Accumulate TM memory operations in BB into STORE_LOCAL and READ_LOCAL.  */
 
 
static void
static void
tm_memopt_accumulate_memops (basic_block bb)
tm_memopt_accumulate_memops (basic_block bb)
{
{
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
 
 
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
    {
    {
      gimple stmt = gsi_stmt (gsi);
      gimple stmt = gsi_stmt (gsi);
      bitmap bits;
      bitmap bits;
      unsigned int loc;
      unsigned int loc;
 
 
      if (is_tm_store (stmt))
      if (is_tm_store (stmt))
        bits = STORE_LOCAL (bb);
        bits = STORE_LOCAL (bb);
      else if (is_tm_load (stmt))
      else if (is_tm_load (stmt))
        bits = READ_LOCAL (bb);
        bits = READ_LOCAL (bb);
      else
      else
        continue;
        continue;
 
 
      loc = tm_memopt_value_number (stmt, INSERT);
      loc = tm_memopt_value_number (stmt, INSERT);
      bitmap_set_bit (bits, loc);
      bitmap_set_bit (bits, loc);
      if (dump_file)
      if (dump_file)
        {
        {
          fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
          fprintf (dump_file, "TM memopt (%s): value num=%d, BB=%d, addr=",
                   is_tm_load (stmt) ? "LOAD" : "STORE", loc,
                   is_tm_load (stmt) ? "LOAD" : "STORE", loc,
                   gimple_bb (stmt)->index);
                   gimple_bb (stmt)->index);
          print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
          print_generic_expr (dump_file, gimple_call_arg (stmt, 0), 0);
          fprintf (dump_file, "\n");
          fprintf (dump_file, "\n");
        }
        }
    }
    }
}
}
 
 
/* Prettily dump one of the memopt sets.  BITS is the bitmap to dump.  */
/* Prettily dump one of the memopt sets.  BITS is the bitmap to dump.  */
 
 
static void
static void
dump_tm_memopt_set (const char *set_name, bitmap bits)
dump_tm_memopt_set (const char *set_name, bitmap bits)
{
{
  unsigned i;
  unsigned i;
  bitmap_iterator bi;
  bitmap_iterator bi;
  const char *comma = "";
  const char *comma = "";
 
 
  fprintf (dump_file, "TM memopt: %s: [", set_name);
  fprintf (dump_file, "TM memopt: %s: [", set_name);
  EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
  EXECUTE_IF_SET_IN_BITMAP (bits, 0, i, bi)
    {
    {
      htab_iterator hi;
      htab_iterator hi;
      struct tm_memop *mem;
      struct tm_memop *mem;
 
 
      /* Yeah, yeah, yeah.  Whatever.  This is just for debugging.  */
      /* Yeah, yeah, yeah.  Whatever.  This is just for debugging.  */
      FOR_EACH_HTAB_ELEMENT (tm_memopt_value_numbers, mem, tm_memop_t, hi)
      FOR_EACH_HTAB_ELEMENT (tm_memopt_value_numbers, mem, tm_memop_t, hi)
        if (mem->value_id == i)
        if (mem->value_id == i)
          break;
          break;
      gcc_assert (mem->value_id == i);
      gcc_assert (mem->value_id == i);
      fprintf (dump_file, "%s", comma);
      fprintf (dump_file, "%s", comma);
      comma = ", ";
      comma = ", ";
      print_generic_expr (dump_file, mem->addr, 0);
      print_generic_expr (dump_file, mem->addr, 0);
    }
    }
  fprintf (dump_file, "]\n");
  fprintf (dump_file, "]\n");
}
}
 
 
/* Prettily dump all of the memopt sets in BLOCKS.  */
/* Prettily dump all of the memopt sets in BLOCKS.  */
 
 
static void
static void
dump_tm_memopt_sets (VEC (basic_block, heap) *blocks)
dump_tm_memopt_sets (VEC (basic_block, heap) *blocks)
{
{
  size_t i;
  size_t i;
  basic_block bb;
  basic_block bb;
 
 
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
    {
    {
      fprintf (dump_file, "------------BB %d---------\n", bb->index);
      fprintf (dump_file, "------------BB %d---------\n", bb->index);
      dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
      dump_tm_memopt_set ("STORE_LOCAL", STORE_LOCAL (bb));
      dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
      dump_tm_memopt_set ("READ_LOCAL", READ_LOCAL (bb));
      dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
      dump_tm_memopt_set ("STORE_AVAIL_IN", STORE_AVAIL_IN (bb));
      dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
      dump_tm_memopt_set ("STORE_AVAIL_OUT", STORE_AVAIL_OUT (bb));
      dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
      dump_tm_memopt_set ("READ_AVAIL_IN", READ_AVAIL_IN (bb));
      dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
      dump_tm_memopt_set ("READ_AVAIL_OUT", READ_AVAIL_OUT (bb));
    }
    }
}
}
 
 
/* Compute {STORE,READ}_AVAIL_IN for the basic block BB.  */
/* Compute {STORE,READ}_AVAIL_IN for the basic block BB.  */
 
 
static void
static void
tm_memopt_compute_avin (basic_block bb)
tm_memopt_compute_avin (basic_block bb)
{
{
  edge e;
  edge e;
  unsigned ix;
  unsigned ix;
 
 
  /* Seed with the AVOUT of any predecessor.  */
  /* Seed with the AVOUT of any predecessor.  */
  for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
  for (ix = 0; ix < EDGE_COUNT (bb->preds); ix++)
    {
    {
      e = EDGE_PRED (bb, ix);
      e = EDGE_PRED (bb, ix);
      /* Make sure we have already visited this BB, and is thus
      /* Make sure we have already visited this BB, and is thus
         initialized.
         initialized.
 
 
          If e->src->aux is NULL, this predecessor is actually on an
          If e->src->aux is NULL, this predecessor is actually on an
          enclosing transaction.  We only care about the current
          enclosing transaction.  We only care about the current
          transaction, so ignore it.  */
          transaction, so ignore it.  */
      if (e->src->aux && BB_VISITED_P (e->src))
      if (e->src->aux && BB_VISITED_P (e->src))
        {
        {
          bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
          bitmap_copy (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
          bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
          bitmap_copy (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
          break;
          break;
        }
        }
    }
    }
 
 
  for (; ix < EDGE_COUNT (bb->preds); ix++)
  for (; ix < EDGE_COUNT (bb->preds); ix++)
    {
    {
      e = EDGE_PRED (bb, ix);
      e = EDGE_PRED (bb, ix);
      if (e->src->aux && BB_VISITED_P (e->src))
      if (e->src->aux && BB_VISITED_P (e->src))
        {
        {
          bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
          bitmap_and_into (STORE_AVAIL_IN (bb), STORE_AVAIL_OUT (e->src));
          bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
          bitmap_and_into (READ_AVAIL_IN (bb), READ_AVAIL_OUT (e->src));
        }
        }
    }
    }
 
 
  BB_VISITED_P (bb) = true;
  BB_VISITED_P (bb) = true;
}
}
 
 
/* Compute the STORE_ANTIC_IN for the basic block BB.  */
/* Compute the STORE_ANTIC_IN for the basic block BB.  */
 
 
static void
static void
tm_memopt_compute_antin (basic_block bb)
tm_memopt_compute_antin (basic_block bb)
{
{
  edge e;
  edge e;
  unsigned ix;
  unsigned ix;
 
 
  /* Seed with the ANTIC_OUT of any successor.  */
  /* Seed with the ANTIC_OUT of any successor.  */
  for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
  for (ix = 0; ix < EDGE_COUNT (bb->succs); ix++)
    {
    {
      e = EDGE_SUCC (bb, ix);
      e = EDGE_SUCC (bb, ix);
      /* Make sure we have already visited this BB, and is thus
      /* Make sure we have already visited this BB, and is thus
         initialized.  */
         initialized.  */
      if (BB_VISITED_P (e->dest))
      if (BB_VISITED_P (e->dest))
        {
        {
          bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
          bitmap_copy (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
          break;
          break;
        }
        }
    }
    }
 
 
  for (; ix < EDGE_COUNT (bb->succs); ix++)
  for (; ix < EDGE_COUNT (bb->succs); ix++)
    {
    {
      e = EDGE_SUCC (bb, ix);
      e = EDGE_SUCC (bb, ix);
      if (BB_VISITED_P  (e->dest))
      if (BB_VISITED_P  (e->dest))
        bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
        bitmap_and_into (STORE_ANTIC_IN (bb), STORE_ANTIC_OUT (e->dest));
    }
    }
 
 
  BB_VISITED_P (bb) = true;
  BB_VISITED_P (bb) = true;
}
}
 
 
/* Compute the AVAIL sets for every basic block in BLOCKS.
/* Compute the AVAIL sets for every basic block in BLOCKS.
 
 
   We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
   We compute {STORE,READ}_AVAIL_{OUT,IN} as follows:
 
 
     AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
     AVAIL_OUT[bb] = union (AVAIL_IN[bb], LOCAL[bb])
     AVAIL_IN[bb]  = intersect (AVAIL_OUT[predecessors])
     AVAIL_IN[bb]  = intersect (AVAIL_OUT[predecessors])
 
 
   This is basically what we do in lcm's compute_available(), but here
   This is basically what we do in lcm's compute_available(), but here
   we calculate two sets of sets (one for STOREs and one for READs),
   we calculate two sets of sets (one for STOREs and one for READs),
   and we work on a region instead of the entire CFG.
   and we work on a region instead of the entire CFG.
 
 
   REGION is the TM region.
   REGION is the TM region.
   BLOCKS are the basic blocks in the region.  */
   BLOCKS are the basic blocks in the region.  */
 
 
static void
static void
tm_memopt_compute_available (struct tm_region *region,
tm_memopt_compute_available (struct tm_region *region,
                             VEC (basic_block, heap) *blocks)
                             VEC (basic_block, heap) *blocks)
{
{
  edge e;
  edge e;
  basic_block *worklist, *qin, *qout, *qend, bb;
  basic_block *worklist, *qin, *qout, *qend, bb;
  unsigned int qlen, i;
  unsigned int qlen, i;
  edge_iterator ei;
  edge_iterator ei;
  bool changed;
  bool changed;
 
 
  /* Allocate a worklist array/queue.  Entries are only added to the
  /* Allocate a worklist array/queue.  Entries are only added to the
     list if they were not already on the list.  So the size is
     list if they were not already on the list.  So the size is
     bounded by the number of basic blocks in the region.  */
     bounded by the number of basic blocks in the region.  */
  qlen = VEC_length (basic_block, blocks) - 1;
  qlen = VEC_length (basic_block, blocks) - 1;
  qin = qout = worklist =
  qin = qout = worklist =
    XNEWVEC (basic_block, qlen);
    XNEWVEC (basic_block, qlen);
 
 
  /* Put every block in the region on the worklist.  */
  /* Put every block in the region on the worklist.  */
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
    {
    {
      /* Seed AVAIL_OUT with the LOCAL set.  */
      /* Seed AVAIL_OUT with the LOCAL set.  */
      bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
      bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_LOCAL (bb));
      bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
      bitmap_ior_into (READ_AVAIL_OUT (bb), READ_LOCAL (bb));
 
 
      AVAIL_IN_WORKLIST_P (bb) = true;
      AVAIL_IN_WORKLIST_P (bb) = true;
      /* No need to insert the entry block, since it has an AVIN of
      /* No need to insert the entry block, since it has an AVIN of
         null, and an AVOUT that has already been seeded in.  */
         null, and an AVOUT that has already been seeded in.  */
      if (bb != region->entry_block)
      if (bb != region->entry_block)
        *qin++ = bb;
        *qin++ = bb;
    }
    }
 
 
  /* The entry block has been initialized with the local sets.  */
  /* The entry block has been initialized with the local sets.  */
  BB_VISITED_P (region->entry_block) = true;
  BB_VISITED_P (region->entry_block) = true;
 
 
  qin = worklist;
  qin = worklist;
  qend = &worklist[qlen];
  qend = &worklist[qlen];
 
 
  /* Iterate until the worklist is empty.  */
  /* Iterate until the worklist is empty.  */
  while (qlen)
  while (qlen)
    {
    {
      /* Take the first entry off the worklist.  */
      /* Take the first entry off the worklist.  */
      bb = *qout++;
      bb = *qout++;
      qlen--;
      qlen--;
 
 
      if (qout >= qend)
      if (qout >= qend)
        qout = worklist;
        qout = worklist;
 
 
      /* This block can be added to the worklist again if necessary.  */
      /* This block can be added to the worklist again if necessary.  */
      AVAIL_IN_WORKLIST_P (bb) = false;
      AVAIL_IN_WORKLIST_P (bb) = false;
      tm_memopt_compute_avin (bb);
      tm_memopt_compute_avin (bb);
 
 
      /* Note: We do not add the LOCAL sets here because we already
      /* Note: We do not add the LOCAL sets here because we already
         seeded the AVAIL_OUT sets with them.  */
         seeded the AVAIL_OUT sets with them.  */
      changed  = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
      changed  = bitmap_ior_into (STORE_AVAIL_OUT (bb), STORE_AVAIL_IN (bb));
      changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
      changed |= bitmap_ior_into (READ_AVAIL_OUT (bb), READ_AVAIL_IN (bb));
      if (changed
      if (changed
          && (region->exit_blocks == NULL
          && (region->exit_blocks == NULL
              || !bitmap_bit_p (region->exit_blocks, bb->index)))
              || !bitmap_bit_p (region->exit_blocks, bb->index)))
        /* If the out state of this block changed, then we need to add
        /* If the out state of this block changed, then we need to add
           its successors to the worklist if they are not already in.  */
           its successors to the worklist if they are not already in.  */
        FOR_EACH_EDGE (e, ei, bb->succs)
        FOR_EACH_EDGE (e, ei, bb->succs)
          if (!AVAIL_IN_WORKLIST_P (e->dest) && e->dest != EXIT_BLOCK_PTR)
          if (!AVAIL_IN_WORKLIST_P (e->dest) && e->dest != EXIT_BLOCK_PTR)
            {
            {
              *qin++ = e->dest;
              *qin++ = e->dest;
              AVAIL_IN_WORKLIST_P (e->dest) = true;
              AVAIL_IN_WORKLIST_P (e->dest) = true;
              qlen++;
              qlen++;
 
 
              if (qin >= qend)
              if (qin >= qend)
                qin = worklist;
                qin = worklist;
            }
            }
    }
    }
 
 
  free (worklist);
  free (worklist);
 
 
  if (dump_file)
  if (dump_file)
    dump_tm_memopt_sets (blocks);
    dump_tm_memopt_sets (blocks);
}
}
 
 
/* Compute ANTIC sets for every basic block in BLOCKS.
/* Compute ANTIC sets for every basic block in BLOCKS.
 
 
   We compute STORE_ANTIC_OUT as follows:
   We compute STORE_ANTIC_OUT as follows:
 
 
        STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
        STORE_ANTIC_OUT[bb] = union(STORE_ANTIC_IN[bb], STORE_LOCAL[bb])
        STORE_ANTIC_IN[bb]  = intersect(STORE_ANTIC_OUT[successors])
        STORE_ANTIC_IN[bb]  = intersect(STORE_ANTIC_OUT[successors])
 
 
   REGION is the TM region.
   REGION is the TM region.
   BLOCKS are the basic blocks in the region.  */
   BLOCKS are the basic blocks in the region.  */
 
 
static void
static void
tm_memopt_compute_antic (struct tm_region *region,
tm_memopt_compute_antic (struct tm_region *region,
                         VEC (basic_block, heap) *blocks)
                         VEC (basic_block, heap) *blocks)
{
{
  edge e;
  edge e;
  basic_block *worklist, *qin, *qout, *qend, bb;
  basic_block *worklist, *qin, *qout, *qend, bb;
  unsigned int qlen;
  unsigned int qlen;
  int i;
  int i;
  edge_iterator ei;
  edge_iterator ei;
 
 
  /* Allocate a worklist array/queue.  Entries are only added to the
  /* Allocate a worklist array/queue.  Entries are only added to the
     list if they were not already on the list.  So the size is
     list if they were not already on the list.  So the size is
     bounded by the number of basic blocks in the region.  */
     bounded by the number of basic blocks in the region.  */
  qin = qout = worklist =
  qin = qout = worklist =
    XNEWVEC (basic_block, VEC_length (basic_block, blocks));
    XNEWVEC (basic_block, VEC_length (basic_block, blocks));
 
 
  for (qlen = 0, i = VEC_length (basic_block, blocks) - 1; i >= 0; --i)
  for (qlen = 0, i = VEC_length (basic_block, blocks) - 1; i >= 0; --i)
    {
    {
      bb = VEC_index (basic_block, blocks, i);
      bb = VEC_index (basic_block, blocks, i);
 
 
      /* Seed ANTIC_OUT with the LOCAL set.  */
      /* Seed ANTIC_OUT with the LOCAL set.  */
      bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
      bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_LOCAL (bb));
 
 
      /* Put every block in the region on the worklist.  */
      /* Put every block in the region on the worklist.  */
      AVAIL_IN_WORKLIST_P (bb) = true;
      AVAIL_IN_WORKLIST_P (bb) = true;
      /* No need to insert exit blocks, since their ANTIC_IN is NULL,
      /* No need to insert exit blocks, since their ANTIC_IN is NULL,
         and their ANTIC_OUT has already been seeded in.  */
         and their ANTIC_OUT has already been seeded in.  */
      if (region->exit_blocks
      if (region->exit_blocks
          && !bitmap_bit_p (region->exit_blocks, bb->index))
          && !bitmap_bit_p (region->exit_blocks, bb->index))
        {
        {
          qlen++;
          qlen++;
          *qin++ = bb;
          *qin++ = bb;
        }
        }
    }
    }
 
 
  /* The exit blocks have been initialized with the local sets.  */
  /* The exit blocks have been initialized with the local sets.  */
  if (region->exit_blocks)
  if (region->exit_blocks)
    {
    {
      unsigned int i;
      unsigned int i;
      bitmap_iterator bi;
      bitmap_iterator bi;
      EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
      EXECUTE_IF_SET_IN_BITMAP (region->exit_blocks, 0, i, bi)
        BB_VISITED_P (BASIC_BLOCK (i)) = true;
        BB_VISITED_P (BASIC_BLOCK (i)) = true;
    }
    }
 
 
  qin = worklist;
  qin = worklist;
  qend = &worklist[qlen];
  qend = &worklist[qlen];
 
 
  /* Iterate until the worklist is empty.  */
  /* Iterate until the worklist is empty.  */
  while (qlen)
  while (qlen)
    {
    {
      /* Take the first entry off the worklist.  */
      /* Take the first entry off the worklist.  */
      bb = *qout++;
      bb = *qout++;
      qlen--;
      qlen--;
 
 
      if (qout >= qend)
      if (qout >= qend)
        qout = worklist;
        qout = worklist;
 
 
      /* This block can be added to the worklist again if necessary.  */
      /* This block can be added to the worklist again if necessary.  */
      AVAIL_IN_WORKLIST_P (bb) = false;
      AVAIL_IN_WORKLIST_P (bb) = false;
      tm_memopt_compute_antin (bb);
      tm_memopt_compute_antin (bb);
 
 
      /* Note: We do not add the LOCAL sets here because we already
      /* Note: We do not add the LOCAL sets here because we already
         seeded the ANTIC_OUT sets with them.  */
         seeded the ANTIC_OUT sets with them.  */
      if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
      if (bitmap_ior_into (STORE_ANTIC_OUT (bb), STORE_ANTIC_IN (bb))
          && bb != region->entry_block)
          && bb != region->entry_block)
        /* If the out state of this block changed, then we need to add
        /* If the out state of this block changed, then we need to add
           its predecessors to the worklist if they are not already in.  */
           its predecessors to the worklist if they are not already in.  */
        FOR_EACH_EDGE (e, ei, bb->preds)
        FOR_EACH_EDGE (e, ei, bb->preds)
          if (!AVAIL_IN_WORKLIST_P (e->src))
          if (!AVAIL_IN_WORKLIST_P (e->src))
            {
            {
              *qin++ = e->src;
              *qin++ = e->src;
              AVAIL_IN_WORKLIST_P (e->src) = true;
              AVAIL_IN_WORKLIST_P (e->src) = true;
              qlen++;
              qlen++;
 
 
              if (qin >= qend)
              if (qin >= qend)
                qin = worklist;
                qin = worklist;
            }
            }
    }
    }
 
 
  free (worklist);
  free (worklist);
 
 
  if (dump_file)
  if (dump_file)
    dump_tm_memopt_sets (blocks);
    dump_tm_memopt_sets (blocks);
}
}
 
 
/* Offsets of load variants from TM_LOAD.  For example,
/* Offsets of load variants from TM_LOAD.  For example,
   BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
   BUILT_IN_TM_LOAD_RAR* is an offset of 1 from BUILT_IN_TM_LOAD*.
   See gtm-builtins.def.  */
   See gtm-builtins.def.  */
#define TRANSFORM_RAR 1
#define TRANSFORM_RAR 1
#define TRANSFORM_RAW 2
#define TRANSFORM_RAW 2
#define TRANSFORM_RFW 3
#define TRANSFORM_RFW 3
/* Offsets of store variants from TM_STORE.  */
/* Offsets of store variants from TM_STORE.  */
#define TRANSFORM_WAR 1
#define TRANSFORM_WAR 1
#define TRANSFORM_WAW 2
#define TRANSFORM_WAW 2
 
 
/* Inform about a load/store optimization.  */
/* Inform about a load/store optimization.  */
 
 
static void
static void
dump_tm_memopt_transform (gimple stmt)
dump_tm_memopt_transform (gimple stmt)
{
{
  if (dump_file)
  if (dump_file)
    {
    {
      fprintf (dump_file, "TM memopt: transforming: ");
      fprintf (dump_file, "TM memopt: transforming: ");
      print_gimple_stmt (dump_file, stmt, 0, 0);
      print_gimple_stmt (dump_file, stmt, 0, 0);
      fprintf (dump_file, "\n");
      fprintf (dump_file, "\n");
    }
    }
}
}
 
 
/* Perform a read/write optimization.  Replaces the TM builtin in STMT
/* Perform a read/write optimization.  Replaces the TM builtin in STMT
   by a builtin that is OFFSET entries down in the builtins table in
   by a builtin that is OFFSET entries down in the builtins table in
   gtm-builtins.def.  */
   gtm-builtins.def.  */
 
 
static void
static void
tm_memopt_transform_stmt (unsigned int offset,
tm_memopt_transform_stmt (unsigned int offset,
                          gimple stmt,
                          gimple stmt,
                          gimple_stmt_iterator *gsi)
                          gimple_stmt_iterator *gsi)
{
{
  tree fn = gimple_call_fn (stmt);
  tree fn = gimple_call_fn (stmt);
  gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
  gcc_assert (TREE_CODE (fn) == ADDR_EXPR);
  TREE_OPERAND (fn, 0)
  TREE_OPERAND (fn, 0)
    = builtin_decl_explicit ((enum built_in_function)
    = builtin_decl_explicit ((enum built_in_function)
                             (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
                             (DECL_FUNCTION_CODE (TREE_OPERAND (fn, 0))
                              + offset));
                              + offset));
  gimple_call_set_fn (stmt, fn);
  gimple_call_set_fn (stmt, fn);
  gsi_replace (gsi, stmt, true);
  gsi_replace (gsi, stmt, true);
  dump_tm_memopt_transform (stmt);
  dump_tm_memopt_transform (stmt);
}
}
 
 
/* Perform the actual TM memory optimization transformations in the
/* Perform the actual TM memory optimization transformations in the
   basic blocks in BLOCKS.  */
   basic blocks in BLOCKS.  */
 
 
static void
static void
tm_memopt_transform_blocks (VEC (basic_block, heap) *blocks)
tm_memopt_transform_blocks (VEC (basic_block, heap) *blocks)
{
{
  size_t i;
  size_t i;
  basic_block bb;
  basic_block bb;
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
 
 
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
    {
    {
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
        {
        {
          gimple stmt = gsi_stmt (gsi);
          gimple stmt = gsi_stmt (gsi);
          bitmap read_avail = READ_AVAIL_IN (bb);
          bitmap read_avail = READ_AVAIL_IN (bb);
          bitmap store_avail = STORE_AVAIL_IN (bb);
          bitmap store_avail = STORE_AVAIL_IN (bb);
          bitmap store_antic = STORE_ANTIC_OUT (bb);
          bitmap store_antic = STORE_ANTIC_OUT (bb);
          unsigned int loc;
          unsigned int loc;
 
 
          if (is_tm_simple_load (stmt))
          if (is_tm_simple_load (stmt))
            {
            {
              loc = tm_memopt_value_number (stmt, NO_INSERT);
              loc = tm_memopt_value_number (stmt, NO_INSERT);
              if (store_avail && bitmap_bit_p (store_avail, loc))
              if (store_avail && bitmap_bit_p (store_avail, loc))
                tm_memopt_transform_stmt (TRANSFORM_RAW, stmt, &gsi);
                tm_memopt_transform_stmt (TRANSFORM_RAW, stmt, &gsi);
              else if (store_antic && bitmap_bit_p (store_antic, loc))
              else if (store_antic && bitmap_bit_p (store_antic, loc))
                {
                {
                  tm_memopt_transform_stmt (TRANSFORM_RFW, stmt, &gsi);
                  tm_memopt_transform_stmt (TRANSFORM_RFW, stmt, &gsi);
                  bitmap_set_bit (store_avail, loc);
                  bitmap_set_bit (store_avail, loc);
                }
                }
              else if (read_avail && bitmap_bit_p (read_avail, loc))
              else if (read_avail && bitmap_bit_p (read_avail, loc))
                tm_memopt_transform_stmt (TRANSFORM_RAR, stmt, &gsi);
                tm_memopt_transform_stmt (TRANSFORM_RAR, stmt, &gsi);
              else
              else
                bitmap_set_bit (read_avail, loc);
                bitmap_set_bit (read_avail, loc);
            }
            }
          else if (is_tm_simple_store (stmt))
          else if (is_tm_simple_store (stmt))
            {
            {
              loc = tm_memopt_value_number (stmt, NO_INSERT);
              loc = tm_memopt_value_number (stmt, NO_INSERT);
              if (store_avail && bitmap_bit_p (store_avail, loc))
              if (store_avail && bitmap_bit_p (store_avail, loc))
                tm_memopt_transform_stmt (TRANSFORM_WAW, stmt, &gsi);
                tm_memopt_transform_stmt (TRANSFORM_WAW, stmt, &gsi);
              else
              else
                {
                {
                  if (read_avail && bitmap_bit_p (read_avail, loc))
                  if (read_avail && bitmap_bit_p (read_avail, loc))
                    tm_memopt_transform_stmt (TRANSFORM_WAR, stmt, &gsi);
                    tm_memopt_transform_stmt (TRANSFORM_WAR, stmt, &gsi);
                  bitmap_set_bit (store_avail, loc);
                  bitmap_set_bit (store_avail, loc);
                }
                }
            }
            }
        }
        }
    }
    }
}
}
 
 
/* Return a new set of bitmaps for a BB.  */
/* Return a new set of bitmaps for a BB.  */
 
 
static struct tm_memopt_bitmaps *
static struct tm_memopt_bitmaps *
tm_memopt_init_sets (void)
tm_memopt_init_sets (void)
{
{
  struct tm_memopt_bitmaps *b
  struct tm_memopt_bitmaps *b
    = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
    = XOBNEW (&tm_memopt_obstack.obstack, struct tm_memopt_bitmaps);
  b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
  b->store_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
  b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
  b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
  b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
  b->store_antic_in = BITMAP_ALLOC (&tm_memopt_obstack);
  b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
  b->store_antic_out = BITMAP_ALLOC (&tm_memopt_obstack);
  b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
  b->store_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
  b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
  b->read_avail_in = BITMAP_ALLOC (&tm_memopt_obstack);
  b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
  b->read_avail_out = BITMAP_ALLOC (&tm_memopt_obstack);
  b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
  b->read_local = BITMAP_ALLOC (&tm_memopt_obstack);
  b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
  b->store_local = BITMAP_ALLOC (&tm_memopt_obstack);
  return b;
  return b;
}
}
 
 
/* Free sets computed for each BB.  */
/* Free sets computed for each BB.  */
 
 
static void
static void
tm_memopt_free_sets (VEC (basic_block, heap) *blocks)
tm_memopt_free_sets (VEC (basic_block, heap) *blocks)
{
{
  size_t i;
  size_t i;
  basic_block bb;
  basic_block bb;
 
 
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
    bb->aux = NULL;
    bb->aux = NULL;
}
}
 
 
/* Clear the visited bit for every basic block in BLOCKS.  */
/* Clear the visited bit for every basic block in BLOCKS.  */
 
 
static void
static void
tm_memopt_clear_visited (VEC (basic_block, heap) *blocks)
tm_memopt_clear_visited (VEC (basic_block, heap) *blocks)
{
{
  size_t i;
  size_t i;
  basic_block bb;
  basic_block bb;
 
 
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
  for (i = 0; VEC_iterate (basic_block, blocks, i, bb); ++i)
    BB_VISITED_P (bb) = false;
    BB_VISITED_P (bb) = false;
}
}
 
 
/* Replace TM load/stores with hints for the runtime.  We handle
/* Replace TM load/stores with hints for the runtime.  We handle
   things like read-after-write, write-after-read, read-after-read,
   things like read-after-write, write-after-read, read-after-read,
   read-for-write, etc.  */
   read-for-write, etc.  */
 
 
static unsigned int
static unsigned int
execute_tm_memopt (void)
execute_tm_memopt (void)
{
{
  struct tm_region *region;
  struct tm_region *region;
  VEC (basic_block, heap) *bbs;
  VEC (basic_block, heap) *bbs;
 
 
  tm_memopt_value_id = 0;
  tm_memopt_value_id = 0;
  tm_memopt_value_numbers = htab_create (10, tm_memop_hash, tm_memop_eq, free);
  tm_memopt_value_numbers = htab_create (10, tm_memop_hash, tm_memop_eq, free);
 
 
  for (region = all_tm_regions; region; region = region->next)
  for (region = all_tm_regions; region; region = region->next)
    {
    {
      /* All the TM stores/loads in the current region.  */
      /* All the TM stores/loads in the current region.  */
      size_t i;
      size_t i;
      basic_block bb;
      basic_block bb;
 
 
      bitmap_obstack_initialize (&tm_memopt_obstack);
      bitmap_obstack_initialize (&tm_memopt_obstack);
 
 
      /* Save all BBs for the current region.  */
      /* Save all BBs for the current region.  */
      bbs = get_tm_region_blocks (region->entry_block,
      bbs = get_tm_region_blocks (region->entry_block,
                                  region->exit_blocks,
                                  region->exit_blocks,
                                  region->irr_blocks,
                                  region->irr_blocks,
                                  NULL,
                                  NULL,
                                  false);
                                  false);
 
 
      /* Collect all the memory operations.  */
      /* Collect all the memory operations.  */
      for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
      for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
        {
        {
          bb->aux = tm_memopt_init_sets ();
          bb->aux = tm_memopt_init_sets ();
          tm_memopt_accumulate_memops (bb);
          tm_memopt_accumulate_memops (bb);
        }
        }
 
 
      /* Solve data flow equations and transform each block accordingly.  */
      /* Solve data flow equations and transform each block accordingly.  */
      tm_memopt_clear_visited (bbs);
      tm_memopt_clear_visited (bbs);
      tm_memopt_compute_available (region, bbs);
      tm_memopt_compute_available (region, bbs);
      tm_memopt_clear_visited (bbs);
      tm_memopt_clear_visited (bbs);
      tm_memopt_compute_antic (region, bbs);
      tm_memopt_compute_antic (region, bbs);
      tm_memopt_transform_blocks (bbs);
      tm_memopt_transform_blocks (bbs);
 
 
      tm_memopt_free_sets (bbs);
      tm_memopt_free_sets (bbs);
      VEC_free (basic_block, heap, bbs);
      VEC_free (basic_block, heap, bbs);
      bitmap_obstack_release (&tm_memopt_obstack);
      bitmap_obstack_release (&tm_memopt_obstack);
      htab_empty (tm_memopt_value_numbers);
      htab_empty (tm_memopt_value_numbers);
    }
    }
 
 
  htab_delete (tm_memopt_value_numbers);
  htab_delete (tm_memopt_value_numbers);
  return 0;
  return 0;
}
}
 
 
static bool
static bool
gate_tm_memopt (void)
gate_tm_memopt (void)
{
{
  return flag_tm && optimize > 0;
  return flag_tm && optimize > 0;
}
}
 
 
struct gimple_opt_pass pass_tm_memopt =
struct gimple_opt_pass pass_tm_memopt =
{
{
 {
 {
  GIMPLE_PASS,
  GIMPLE_PASS,
  "tmmemopt",                           /* name */
  "tmmemopt",                           /* name */
  gate_tm_memopt,                       /* gate */
  gate_tm_memopt,                       /* gate */
  execute_tm_memopt,                    /* execute */
  execute_tm_memopt,                    /* execute */
  NULL,                                 /* sub */
  NULL,                                 /* sub */
  NULL,                                 /* next */
  NULL,                                 /* next */
  0,                                     /* static_pass_number */
  0,                                     /* static_pass_number */
  TV_TRANS_MEM,                         /* tv_id */
  TV_TRANS_MEM,                         /* tv_id */
  PROP_ssa | PROP_cfg,                  /* properties_required */
  PROP_ssa | PROP_cfg,                  /* properties_required */
  0,                                     /* properties_provided */
  0,                                     /* properties_provided */
  0,                                     /* properties_destroyed */
  0,                                     /* properties_destroyed */
  0,                                     /* todo_flags_start */
  0,                                     /* todo_flags_start */
  TODO_dump_func,                       /* todo_flags_finish */
  TODO_dump_func,                       /* todo_flags_finish */
 }
 }
};
};
 
 


/* Interprocedual analysis for the creation of transactional clones.
/* Interprocedual analysis for the creation of transactional clones.
   The aim of this pass is to find which functions are referenced in
   The aim of this pass is to find which functions are referenced in
   a non-irrevocable transaction context, and for those over which
   a non-irrevocable transaction context, and for those over which
   we have control (or user directive), create a version of the
   we have control (or user directive), create a version of the
   function which uses only the transactional interface to reference
   function which uses only the transactional interface to reference
   protected memories.  This analysis proceeds in several steps:
   protected memories.  This analysis proceeds in several steps:
 
 
     (1) Collect the set of all possible transactional clones:
     (1) Collect the set of all possible transactional clones:
 
 
        (a) For all local public functions marked tm_callable, push
        (a) For all local public functions marked tm_callable, push
            it onto the tm_callee queue.
            it onto the tm_callee queue.
 
 
        (b) For all local functions, scan for calls in transaction blocks.
        (b) For all local functions, scan for calls in transaction blocks.
            Push the caller and callee onto the tm_caller and tm_callee
            Push the caller and callee onto the tm_caller and tm_callee
            queues.  Count the number of callers for each callee.
            queues.  Count the number of callers for each callee.
 
 
        (c) For each local function on the callee list, assume we will
        (c) For each local function on the callee list, assume we will
            create a transactional clone.  Push *all* calls onto the
            create a transactional clone.  Push *all* calls onto the
            callee queues; count the number of clone callers separately
            callee queues; count the number of clone callers separately
            to the number of original callers.
            to the number of original callers.
 
 
     (2) Propagate irrevocable status up the dominator tree:
     (2) Propagate irrevocable status up the dominator tree:
 
 
        (a) Any external function on the callee list that is not marked
        (a) Any external function on the callee list that is not marked
            tm_callable is irrevocable.  Push all callers of such onto
            tm_callable is irrevocable.  Push all callers of such onto
            a worklist.
            a worklist.
 
 
        (b) For each function on the worklist, mark each block that
        (b) For each function on the worklist, mark each block that
            contains an irrevocable call.  Use the AND operator to
            contains an irrevocable call.  Use the AND operator to
            propagate that mark up the dominator tree.
            propagate that mark up the dominator tree.
 
 
        (c) If we reach the entry block for a possible transactional
        (c) If we reach the entry block for a possible transactional
            clone, then the transactional clone is irrevocable, and
            clone, then the transactional clone is irrevocable, and
            we should not create the clone after all.  Push all
            we should not create the clone after all.  Push all
            callers onto the worklist.
            callers onto the worklist.
 
 
        (d) Place tm_irrevocable calls at the beginning of the relevant
        (d) Place tm_irrevocable calls at the beginning of the relevant
            blocks.  Special case here is the entry block for the entire
            blocks.  Special case here is the entry block for the entire
            transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
            transaction region; there we mark it GTMA_DOES_GO_IRREVOCABLE for
            the library to begin the region in serial mode.  Decrement
            the library to begin the region in serial mode.  Decrement
            the call count for all callees in the irrevocable region.
            the call count for all callees in the irrevocable region.
 
 
     (3) Create the transactional clones:
     (3) Create the transactional clones:
 
 
        Any tm_callee that still has a non-zero call count is cloned.
        Any tm_callee that still has a non-zero call count is cloned.
*/
*/
 
 
/* This structure is stored in the AUX field of each cgraph_node.  */
/* This structure is stored in the AUX field of each cgraph_node.  */
struct tm_ipa_cg_data
struct tm_ipa_cg_data
{
{
  /* The clone of the function that got created.  */
  /* The clone of the function that got created.  */
  struct cgraph_node *clone;
  struct cgraph_node *clone;
 
 
  /* The tm regions in the normal function.  */
  /* The tm regions in the normal function.  */
  struct tm_region *all_tm_regions;
  struct tm_region *all_tm_regions;
 
 
  /* The blocks of the normal/clone functions that contain irrevocable
  /* The blocks of the normal/clone functions that contain irrevocable
     calls, or blocks that are post-dominated by irrevocable calls.  */
     calls, or blocks that are post-dominated by irrevocable calls.  */
  bitmap irrevocable_blocks_normal;
  bitmap irrevocable_blocks_normal;
  bitmap irrevocable_blocks_clone;
  bitmap irrevocable_blocks_clone;
 
 
  /* The blocks of the normal function that are involved in transactions.  */
  /* The blocks of the normal function that are involved in transactions.  */
  bitmap transaction_blocks_normal;
  bitmap transaction_blocks_normal;
 
 
  /* The number of callers to the transactional clone of this function
  /* The number of callers to the transactional clone of this function
     from normal and transactional clones respectively.  */
     from normal and transactional clones respectively.  */
  unsigned tm_callers_normal;
  unsigned tm_callers_normal;
  unsigned tm_callers_clone;
  unsigned tm_callers_clone;
 
 
  /* True if all calls to this function's transactional clone
  /* True if all calls to this function's transactional clone
     are irrevocable.  Also automatically true if the function
     are irrevocable.  Also automatically true if the function
     has no transactional clone.  */
     has no transactional clone.  */
  bool is_irrevocable;
  bool is_irrevocable;
 
 
  /* Flags indicating the presence of this function in various queues.  */
  /* Flags indicating the presence of this function in various queues.  */
  bool in_callee_queue;
  bool in_callee_queue;
  bool in_worklist;
  bool in_worklist;
 
 
  /* Flags indicating the kind of scan desired while in the worklist.  */
  /* Flags indicating the kind of scan desired while in the worklist.  */
  bool want_irr_scan_normal;
  bool want_irr_scan_normal;
};
};
 
 
typedef struct cgraph_node *cgraph_node_p;
typedef struct cgraph_node *cgraph_node_p;
 
 
DEF_VEC_P (cgraph_node_p);
DEF_VEC_P (cgraph_node_p);
DEF_VEC_ALLOC_P (cgraph_node_p, heap);
DEF_VEC_ALLOC_P (cgraph_node_p, heap);
 
 
typedef VEC (cgraph_node_p, heap) *cgraph_node_queue;
typedef VEC (cgraph_node_p, heap) *cgraph_node_queue;
 
 
/* Return the ipa data associated with NODE, allocating zeroed memory
/* Return the ipa data associated with NODE, allocating zeroed memory
   if necessary.  TRAVERSE_ALIASES is true if we must traverse aliases
   if necessary.  TRAVERSE_ALIASES is true if we must traverse aliases
   and set *NODE accordingly.  */
   and set *NODE accordingly.  */
 
 
static struct tm_ipa_cg_data *
static struct tm_ipa_cg_data *
get_cg_data (struct cgraph_node **node, bool traverse_aliases)
get_cg_data (struct cgraph_node **node, bool traverse_aliases)
{
{
  struct tm_ipa_cg_data *d;
  struct tm_ipa_cg_data *d;
 
 
  if (traverse_aliases && (*node)->alias)
  if (traverse_aliases && (*node)->alias)
    *node = cgraph_get_node ((*node)->thunk.alias);
    *node = cgraph_get_node ((*node)->thunk.alias);
 
 
  d = (struct tm_ipa_cg_data *) (*node)->aux;
  d = (struct tm_ipa_cg_data *) (*node)->aux;
 
 
  if (d == NULL)
  if (d == NULL)
    {
    {
      d = (struct tm_ipa_cg_data *)
      d = (struct tm_ipa_cg_data *)
        obstack_alloc (&tm_obstack.obstack, sizeof (*d));
        obstack_alloc (&tm_obstack.obstack, sizeof (*d));
      (*node)->aux = (void *) d;
      (*node)->aux = (void *) d;
      memset (d, 0, sizeof (*d));
      memset (d, 0, sizeof (*d));
    }
    }
 
 
  return d;
  return d;
}
}
 
 
/* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
/* Add NODE to the end of QUEUE, unless IN_QUEUE_P indicates that
   it is already present.  */
   it is already present.  */
 
 
static void
static void
maybe_push_queue (struct cgraph_node *node,
maybe_push_queue (struct cgraph_node *node,
                  cgraph_node_queue *queue_p, bool *in_queue_p)
                  cgraph_node_queue *queue_p, bool *in_queue_p)
{
{
  if (!*in_queue_p)
  if (!*in_queue_p)
    {
    {
      *in_queue_p = true;
      *in_queue_p = true;
      VEC_safe_push (cgraph_node_p, heap, *queue_p, node);
      VEC_safe_push (cgraph_node_p, heap, *queue_p, node);
    }
    }
}
}
 
 
/* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
/* A subroutine of ipa_tm_scan_calls_transaction and ipa_tm_scan_calls_clone.
   Queue all callees within block BB.  */
   Queue all callees within block BB.  */
 
 
static void
static void
ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
ipa_tm_scan_calls_block (cgraph_node_queue *callees_p,
                         basic_block bb, bool for_clone)
                         basic_block bb, bool for_clone)
{
{
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
 
 
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
    {
    {
      gimple stmt = gsi_stmt (gsi);
      gimple stmt = gsi_stmt (gsi);
      if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
      if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
        {
        {
          tree fndecl = gimple_call_fndecl (stmt);
          tree fndecl = gimple_call_fndecl (stmt);
          if (fndecl)
          if (fndecl)
            {
            {
              struct tm_ipa_cg_data *d;
              struct tm_ipa_cg_data *d;
              unsigned *pcallers;
              unsigned *pcallers;
              struct cgraph_node *node;
              struct cgraph_node *node;
 
 
              if (is_tm_ending_fndecl (fndecl))
              if (is_tm_ending_fndecl (fndecl))
                continue;
                continue;
              if (find_tm_replacement_function (fndecl))
              if (find_tm_replacement_function (fndecl))
                continue;
                continue;
 
 
              node = cgraph_get_node (fndecl);
              node = cgraph_get_node (fndecl);
              gcc_assert (node != NULL);
              gcc_assert (node != NULL);
              d = get_cg_data (&node, true);
              d = get_cg_data (&node, true);
 
 
              pcallers = (for_clone ? &d->tm_callers_clone
              pcallers = (for_clone ? &d->tm_callers_clone
                          : &d->tm_callers_normal);
                          : &d->tm_callers_normal);
              *pcallers += 1;
              *pcallers += 1;
 
 
              maybe_push_queue (node, callees_p, &d->in_callee_queue);
              maybe_push_queue (node, callees_p, &d->in_callee_queue);
            }
            }
        }
        }
    }
    }
}
}
 
 
/* Scan all calls in NODE that are within a transaction region,
/* Scan all calls in NODE that are within a transaction region,
   and push the resulting nodes into the callee queue.  */
   and push the resulting nodes into the callee queue.  */
 
 
static void
static void
ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
ipa_tm_scan_calls_transaction (struct tm_ipa_cg_data *d,
                               cgraph_node_queue *callees_p)
                               cgraph_node_queue *callees_p)
{
{
  struct tm_region *r;
  struct tm_region *r;
 
 
  d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
  d->transaction_blocks_normal = BITMAP_ALLOC (&tm_obstack);
  d->all_tm_regions = all_tm_regions;
  d->all_tm_regions = all_tm_regions;
 
 
  for (r = all_tm_regions; r; r = r->next)
  for (r = all_tm_regions; r; r = r->next)
    {
    {
      VEC (basic_block, heap) *bbs;
      VEC (basic_block, heap) *bbs;
      basic_block bb;
      basic_block bb;
      unsigned i;
      unsigned i;
 
 
      bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
      bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks, NULL,
                                  d->transaction_blocks_normal, false);
                                  d->transaction_blocks_normal, false);
 
 
      FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
      FOR_EACH_VEC_ELT (basic_block, bbs, i, bb)
        ipa_tm_scan_calls_block (callees_p, bb, false);
        ipa_tm_scan_calls_block (callees_p, bb, false);
 
 
      VEC_free (basic_block, heap, bbs);
      VEC_free (basic_block, heap, bbs);
    }
    }
}
}
 
 
/* Scan all calls in NODE as if this is the transactional clone,
/* Scan all calls in NODE as if this is the transactional clone,
   and push the destinations into the callee queue.  */
   and push the destinations into the callee queue.  */
 
 
static void
static void
ipa_tm_scan_calls_clone (struct cgraph_node *node,
ipa_tm_scan_calls_clone (struct cgraph_node *node,
                         cgraph_node_queue *callees_p)
                         cgraph_node_queue *callees_p)
{
{
  struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
  struct function *fn = DECL_STRUCT_FUNCTION (node->decl);
  basic_block bb;
  basic_block bb;
 
 
  FOR_EACH_BB_FN (bb, fn)
  FOR_EACH_BB_FN (bb, fn)
    ipa_tm_scan_calls_block (callees_p, bb, true);
    ipa_tm_scan_calls_block (callees_p, bb, true);
}
}
 
 
/* The function NODE has been detected to be irrevocable.  Push all
/* The function NODE has been detected to be irrevocable.  Push all
   of its callers onto WORKLIST for the purpose of re-scanning them.  */
   of its callers onto WORKLIST for the purpose of re-scanning them.  */
 
 
static void
static void
ipa_tm_note_irrevocable (struct cgraph_node *node,
ipa_tm_note_irrevocable (struct cgraph_node *node,
                         cgraph_node_queue *worklist_p)
                         cgraph_node_queue *worklist_p)
{
{
  struct tm_ipa_cg_data *d = get_cg_data (&node, true);
  struct tm_ipa_cg_data *d = get_cg_data (&node, true);
  struct cgraph_edge *e;
  struct cgraph_edge *e;
 
 
  d->is_irrevocable = true;
  d->is_irrevocable = true;
 
 
  for (e = node->callers; e ; e = e->next_caller)
  for (e = node->callers; e ; e = e->next_caller)
    {
    {
      basic_block bb;
      basic_block bb;
      struct cgraph_node *caller;
      struct cgraph_node *caller;
 
 
      /* Don't examine recursive calls.  */
      /* Don't examine recursive calls.  */
      if (e->caller == node)
      if (e->caller == node)
        continue;
        continue;
      /* Even if we think we can go irrevocable, believe the user
      /* Even if we think we can go irrevocable, believe the user
         above all.  */
         above all.  */
      if (is_tm_safe_or_pure (e->caller->decl))
      if (is_tm_safe_or_pure (e->caller->decl))
        continue;
        continue;
 
 
      caller = e->caller;
      caller = e->caller;
      d = get_cg_data (&caller, true);
      d = get_cg_data (&caller, true);
 
 
      /* Check if the callee is in a transactional region.  If so,
      /* Check if the callee is in a transactional region.  If so,
         schedule the function for normal re-scan as well.  */
         schedule the function for normal re-scan as well.  */
      bb = gimple_bb (e->call_stmt);
      bb = gimple_bb (e->call_stmt);
      gcc_assert (bb != NULL);
      gcc_assert (bb != NULL);
      if (d->transaction_blocks_normal
      if (d->transaction_blocks_normal
          && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
          && bitmap_bit_p (d->transaction_blocks_normal, bb->index))
        d->want_irr_scan_normal = true;
        d->want_irr_scan_normal = true;
 
 
      maybe_push_queue (caller, worklist_p, &d->in_worklist);
      maybe_push_queue (caller, worklist_p, &d->in_worklist);
    }
    }
}
}
 
 
/* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
/* A subroutine of ipa_tm_scan_irr_blocks; return true iff any statement
   within the block is irrevocable.  */
   within the block is irrevocable.  */
 
 
static bool
static bool
ipa_tm_scan_irr_block (basic_block bb)
ipa_tm_scan_irr_block (basic_block bb)
{
{
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
  tree fn;
  tree fn;
 
 
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
    {
    {
      gimple stmt = gsi_stmt (gsi);
      gimple stmt = gsi_stmt (gsi);
      switch (gimple_code (stmt))
      switch (gimple_code (stmt))
        {
        {
        case GIMPLE_CALL:
        case GIMPLE_CALL:
          if (is_tm_pure_call (stmt))
          if (is_tm_pure_call (stmt))
            break;
            break;
 
 
          fn = gimple_call_fn (stmt);
          fn = gimple_call_fn (stmt);
 
 
          /* Functions with the attribute are by definition irrevocable.  */
          /* Functions with the attribute are by definition irrevocable.  */
          if (is_tm_irrevocable (fn))
          if (is_tm_irrevocable (fn))
            return true;
            return true;
 
 
          /* For direct function calls, go ahead and check for replacement
          /* For direct function calls, go ahead and check for replacement
             functions, or transitive irrevocable functions.  For indirect
             functions, or transitive irrevocable functions.  For indirect
             functions, we'll ask the runtime.  */
             functions, we'll ask the runtime.  */
          if (TREE_CODE (fn) == ADDR_EXPR)
          if (TREE_CODE (fn) == ADDR_EXPR)
            {
            {
              struct tm_ipa_cg_data *d;
              struct tm_ipa_cg_data *d;
              struct cgraph_node *node;
              struct cgraph_node *node;
 
 
              fn = TREE_OPERAND (fn, 0);
              fn = TREE_OPERAND (fn, 0);
              if (is_tm_ending_fndecl (fn))
              if (is_tm_ending_fndecl (fn))
                break;
                break;
              if (find_tm_replacement_function (fn))
              if (find_tm_replacement_function (fn))
                break;
                break;
 
 
              node = cgraph_get_node(fn);
              node = cgraph_get_node(fn);
              d = get_cg_data (&node, true);
              d = get_cg_data (&node, true);
 
 
              /* Return true if irrevocable, but above all, believe
              /* Return true if irrevocable, but above all, believe
                 the user.  */
                 the user.  */
              if (d->is_irrevocable
              if (d->is_irrevocable
                  && !is_tm_safe_or_pure (fn))
                  && !is_tm_safe_or_pure (fn))
                return true;
                return true;
            }
            }
          break;
          break;
 
 
        case GIMPLE_ASM:
        case GIMPLE_ASM:
          /* ??? The Approved Method of indicating that an inline
          /* ??? The Approved Method of indicating that an inline
             assembly statement is not relevant to the transaction
             assembly statement is not relevant to the transaction
             is to wrap it in a __tm_waiver block.  This is not
             is to wrap it in a __tm_waiver block.  This is not
             yet implemented, so we can't check for it.  */
             yet implemented, so we can't check for it.  */
          if (is_tm_safe (current_function_decl))
          if (is_tm_safe (current_function_decl))
            {
            {
              tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
              tree t = build1 (NOP_EXPR, void_type_node, size_zero_node);
              SET_EXPR_LOCATION (t, gimple_location (stmt));
              SET_EXPR_LOCATION (t, gimple_location (stmt));
              TREE_BLOCK (t) = gimple_block (stmt);
              TREE_BLOCK (t) = gimple_block (stmt);
              error ("%Kasm not allowed in %<transaction_safe%> function", t);
              error ("%Kasm not allowed in %<transaction_safe%> function", t);
            }
            }
          return true;
          return true;
 
 
        default:
        default:
          break;
          break;
        }
        }
    }
    }
 
 
  return false;
  return false;
}
}
 
 
/* For each of the blocks seeded witin PQUEUE, walk the CFG looking
/* For each of the blocks seeded witin PQUEUE, walk the CFG looking
   for new irrevocable blocks, marking them in NEW_IRR.  Don't bother
   for new irrevocable blocks, marking them in NEW_IRR.  Don't bother
   scanning past OLD_IRR or EXIT_BLOCKS.  */
   scanning past OLD_IRR or EXIT_BLOCKS.  */
 
 
static bool
static bool
ipa_tm_scan_irr_blocks (VEC (basic_block, heap) **pqueue, bitmap new_irr,
ipa_tm_scan_irr_blocks (VEC (basic_block, heap) **pqueue, bitmap new_irr,
                        bitmap old_irr, bitmap exit_blocks)
                        bitmap old_irr, bitmap exit_blocks)
{
{
  bool any_new_irr = false;
  bool any_new_irr = false;
  edge e;
  edge e;
  edge_iterator ei;
  edge_iterator ei;
  bitmap visited_blocks = BITMAP_ALLOC (NULL);
  bitmap visited_blocks = BITMAP_ALLOC (NULL);
 
 
  do
  do
    {
    {
      basic_block bb = VEC_pop (basic_block, *pqueue);
      basic_block bb = VEC_pop (basic_block, *pqueue);
 
 
      /* Don't re-scan blocks we know already are irrevocable.  */
      /* Don't re-scan blocks we know already are irrevocable.  */
      if (old_irr && bitmap_bit_p (old_irr, bb->index))
      if (old_irr && bitmap_bit_p (old_irr, bb->index))
        continue;
        continue;
 
 
      if (ipa_tm_scan_irr_block (bb))
      if (ipa_tm_scan_irr_block (bb))
        {
        {
          bitmap_set_bit (new_irr, bb->index);
          bitmap_set_bit (new_irr, bb->index);
          any_new_irr = true;
          any_new_irr = true;
        }
        }
      else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
      else if (exit_blocks == NULL || !bitmap_bit_p (exit_blocks, bb->index))
        {
        {
          FOR_EACH_EDGE (e, ei, bb->succs)
          FOR_EACH_EDGE (e, ei, bb->succs)
            if (!bitmap_bit_p (visited_blocks, e->dest->index))
            if (!bitmap_bit_p (visited_blocks, e->dest->index))
              {
              {
                bitmap_set_bit (visited_blocks, e->dest->index);
                bitmap_set_bit (visited_blocks, e->dest->index);
                VEC_safe_push (basic_block, heap, *pqueue, e->dest);
                VEC_safe_push (basic_block, heap, *pqueue, e->dest);
              }
              }
        }
        }
    }
    }
  while (!VEC_empty (basic_block, *pqueue));
  while (!VEC_empty (basic_block, *pqueue));
 
 
  BITMAP_FREE (visited_blocks);
  BITMAP_FREE (visited_blocks);
 
 
  return any_new_irr;
  return any_new_irr;
}
}
 
 
/* Propagate the irrevocable property both up and down the dominator tree.
/* Propagate the irrevocable property both up and down the dominator tree.
   BB is the current block being scanned; EXIT_BLOCKS are the edges of the
   BB is the current block being scanned; EXIT_BLOCKS are the edges of the
   TM regions; OLD_IRR are the results of a previous scan of the dominator
   TM regions; OLD_IRR are the results of a previous scan of the dominator
   tree which has been fully propagated; NEW_IRR is the set of new blocks
   tree which has been fully propagated; NEW_IRR is the set of new blocks
   which are gaining the irrevocable property during the current scan.  */
   which are gaining the irrevocable property during the current scan.  */
 
 
static void
static void
ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
ipa_tm_propagate_irr (basic_block entry_block, bitmap new_irr,
                      bitmap old_irr, bitmap exit_blocks)
                      bitmap old_irr, bitmap exit_blocks)
{
{
  VEC (basic_block, heap) *bbs;
  VEC (basic_block, heap) *bbs;
  bitmap all_region_blocks;
  bitmap all_region_blocks;
 
 
  /* If this block is in the old set, no need to rescan.  */
  /* If this block is in the old set, no need to rescan.  */
  if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
  if (old_irr && bitmap_bit_p (old_irr, entry_block->index))
    return;
    return;
 
 
  all_region_blocks = BITMAP_ALLOC (&tm_obstack);
  all_region_blocks = BITMAP_ALLOC (&tm_obstack);
  bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
  bbs = get_tm_region_blocks (entry_block, exit_blocks, NULL,
                              all_region_blocks, false);
                              all_region_blocks, false);
  do
  do
    {
    {
      basic_block bb = VEC_pop (basic_block, bbs);
      basic_block bb = VEC_pop (basic_block, bbs);
      bool this_irr = bitmap_bit_p (new_irr, bb->index);
      bool this_irr = bitmap_bit_p (new_irr, bb->index);
      bool all_son_irr = false;
      bool all_son_irr = false;
      edge_iterator ei;
      edge_iterator ei;
      edge e;
      edge e;
 
 
      /* Propagate up.  If my children are, I am too, but we must have
      /* Propagate up.  If my children are, I am too, but we must have
         at least one child that is.  */
         at least one child that is.  */
      if (!this_irr)
      if (!this_irr)
        {
        {
          FOR_EACH_EDGE (e, ei, bb->succs)
          FOR_EACH_EDGE (e, ei, bb->succs)
            {
            {
              if (!bitmap_bit_p (new_irr, e->dest->index))
              if (!bitmap_bit_p (new_irr, e->dest->index))
                {
                {
                  all_son_irr = false;
                  all_son_irr = false;
                  break;
                  break;
                }
                }
              else
              else
                all_son_irr = true;
                all_son_irr = true;
            }
            }
          if (all_son_irr)
          if (all_son_irr)
            {
            {
              /* Add block to new_irr if it hasn't already been processed. */
              /* Add block to new_irr if it hasn't already been processed. */
              if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
              if (!old_irr || !bitmap_bit_p (old_irr, bb->index))
                {
                {
                  bitmap_set_bit (new_irr, bb->index);
                  bitmap_set_bit (new_irr, bb->index);
                  this_irr = true;
                  this_irr = true;
                }
                }
            }
            }
        }
        }
 
 
      /* Propagate down to everyone we immediately dominate.  */
      /* Propagate down to everyone we immediately dominate.  */
      if (this_irr)
      if (this_irr)
        {
        {
          basic_block son;
          basic_block son;
          for (son = first_dom_son (CDI_DOMINATORS, bb);
          for (son = first_dom_son (CDI_DOMINATORS, bb);
               son;
               son;
               son = next_dom_son (CDI_DOMINATORS, son))
               son = next_dom_son (CDI_DOMINATORS, son))
            {
            {
              /* Make sure block is actually in a TM region, and it
              /* Make sure block is actually in a TM region, and it
                 isn't already in old_irr.  */
                 isn't already in old_irr.  */
              if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
              if ((!old_irr || !bitmap_bit_p (old_irr, son->index))
                  && bitmap_bit_p (all_region_blocks, son->index))
                  && bitmap_bit_p (all_region_blocks, son->index))
                bitmap_set_bit (new_irr, son->index);
                bitmap_set_bit (new_irr, son->index);
            }
            }
        }
        }
    }
    }
  while (!VEC_empty (basic_block, bbs));
  while (!VEC_empty (basic_block, bbs));
 
 
  BITMAP_FREE (all_region_blocks);
  BITMAP_FREE (all_region_blocks);
  VEC_free (basic_block, heap, bbs);
  VEC_free (basic_block, heap, bbs);
}
}
 
 
static void
static void
ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
ipa_tm_decrement_clone_counts (basic_block bb, bool for_clone)
{
{
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
 
 
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
    {
    {
      gimple stmt = gsi_stmt (gsi);
      gimple stmt = gsi_stmt (gsi);
      if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
      if (is_gimple_call (stmt) && !is_tm_pure_call (stmt))
        {
        {
          tree fndecl = gimple_call_fndecl (stmt);
          tree fndecl = gimple_call_fndecl (stmt);
          if (fndecl)
          if (fndecl)
            {
            {
              struct tm_ipa_cg_data *d;
              struct tm_ipa_cg_data *d;
              unsigned *pcallers;
              unsigned *pcallers;
              struct cgraph_node *tnode;
              struct cgraph_node *tnode;
 
 
              if (is_tm_ending_fndecl (fndecl))
              if (is_tm_ending_fndecl (fndecl))
                continue;
                continue;
              if (find_tm_replacement_function (fndecl))
              if (find_tm_replacement_function (fndecl))
                continue;
                continue;
 
 
              tnode = cgraph_get_node (fndecl);
              tnode = cgraph_get_node (fndecl);
              d = get_cg_data (&tnode, true);
              d = get_cg_data (&tnode, true);
 
 
              pcallers = (for_clone ? &d->tm_callers_clone
              pcallers = (for_clone ? &d->tm_callers_clone
                          : &d->tm_callers_normal);
                          : &d->tm_callers_normal);
 
 
              gcc_assert (*pcallers > 0);
              gcc_assert (*pcallers > 0);
              *pcallers -= 1;
              *pcallers -= 1;
            }
            }
        }
        }
    }
    }
}
}
 
 
/* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
/* (Re-)Scan the transaction blocks in NODE for calls to irrevocable functions,
   as well as other irrevocable actions such as inline assembly.  Mark all
   as well as other irrevocable actions such as inline assembly.  Mark all
   such blocks as irrevocable and decrement the number of calls to
   such blocks as irrevocable and decrement the number of calls to
   transactional clones.  Return true if, for the transactional clone, the
   transactional clones.  Return true if, for the transactional clone, the
   entire function is irrevocable.  */
   entire function is irrevocable.  */
 
 
static bool
static bool
ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
ipa_tm_scan_irr_function (struct cgraph_node *node, bool for_clone)
{
{
  struct tm_ipa_cg_data *d;
  struct tm_ipa_cg_data *d;
  bitmap new_irr, old_irr;
  bitmap new_irr, old_irr;
  VEC (basic_block, heap) *queue;
  VEC (basic_block, heap) *queue;
  bool ret = false;
  bool ret = false;
 
 
  /* Builtin operators (operator new, and such).  */
  /* Builtin operators (operator new, and such).  */
  if (DECL_STRUCT_FUNCTION (node->decl) == NULL
  if (DECL_STRUCT_FUNCTION (node->decl) == NULL
      || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL)
      || DECL_STRUCT_FUNCTION (node->decl)->cfg == NULL)
    return false;
    return false;
 
 
  current_function_decl = node->decl;
  current_function_decl = node->decl;
  push_cfun (DECL_STRUCT_FUNCTION (node->decl));
  push_cfun (DECL_STRUCT_FUNCTION (node->decl));
  calculate_dominance_info (CDI_DOMINATORS);
  calculate_dominance_info (CDI_DOMINATORS);
 
 
  d = get_cg_data (&node, true);
  d = get_cg_data (&node, true);
  queue = VEC_alloc (basic_block, heap, 10);
  queue = VEC_alloc (basic_block, heap, 10);
  new_irr = BITMAP_ALLOC (&tm_obstack);
  new_irr = BITMAP_ALLOC (&tm_obstack);
 
 
  /* Scan each tm region, propagating irrevocable status through the tree.  */
  /* Scan each tm region, propagating irrevocable status through the tree.  */
  if (for_clone)
  if (for_clone)
    {
    {
      old_irr = d->irrevocable_blocks_clone;
      old_irr = d->irrevocable_blocks_clone;
      VEC_quick_push (basic_block, queue, single_succ (ENTRY_BLOCK_PTR));
      VEC_quick_push (basic_block, queue, single_succ (ENTRY_BLOCK_PTR));
      if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
      if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr, NULL))
        {
        {
          ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR), new_irr,
          ipa_tm_propagate_irr (single_succ (ENTRY_BLOCK_PTR), new_irr,
                                old_irr, NULL);
                                old_irr, NULL);
          ret = bitmap_bit_p (new_irr, single_succ (ENTRY_BLOCK_PTR)->index);
          ret = bitmap_bit_p (new_irr, single_succ (ENTRY_BLOCK_PTR)->index);
        }
        }
    }
    }
  else
  else
    {
    {
      struct tm_region *region;
      struct tm_region *region;
 
 
      old_irr = d->irrevocable_blocks_normal;
      old_irr = d->irrevocable_blocks_normal;
      for (region = d->all_tm_regions; region; region = region->next)
      for (region = d->all_tm_regions; region; region = region->next)
        {
        {
          VEC_quick_push (basic_block, queue, region->entry_block);
          VEC_quick_push (basic_block, queue, region->entry_block);
          if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
          if (ipa_tm_scan_irr_blocks (&queue, new_irr, old_irr,
                                      region->exit_blocks))
                                      region->exit_blocks))
            ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
            ipa_tm_propagate_irr (region->entry_block, new_irr, old_irr,
                                  region->exit_blocks);
                                  region->exit_blocks);
        }
        }
    }
    }
 
 
  /* If we found any new irrevocable blocks, reduce the call count for
  /* If we found any new irrevocable blocks, reduce the call count for
     transactional clones within the irrevocable blocks.  Save the new
     transactional clones within the irrevocable blocks.  Save the new
     set of irrevocable blocks for next time.  */
     set of irrevocable blocks for next time.  */
  if (!bitmap_empty_p (new_irr))
  if (!bitmap_empty_p (new_irr))
    {
    {
      bitmap_iterator bmi;
      bitmap_iterator bmi;
      unsigned i;
      unsigned i;
 
 
      EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
      EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
        ipa_tm_decrement_clone_counts (BASIC_BLOCK (i), for_clone);
        ipa_tm_decrement_clone_counts (BASIC_BLOCK (i), for_clone);
 
 
      if (old_irr)
      if (old_irr)
        {
        {
          bitmap_ior_into (old_irr, new_irr);
          bitmap_ior_into (old_irr, new_irr);
          BITMAP_FREE (new_irr);
          BITMAP_FREE (new_irr);
        }
        }
      else if (for_clone)
      else if (for_clone)
        d->irrevocable_blocks_clone = new_irr;
        d->irrevocable_blocks_clone = new_irr;
      else
      else
        d->irrevocable_blocks_normal = new_irr;
        d->irrevocable_blocks_normal = new_irr;
 
 
      if (dump_file && new_irr)
      if (dump_file && new_irr)
        {
        {
          const char *dname;
          const char *dname;
          bitmap_iterator bmi;
          bitmap_iterator bmi;
          unsigned i;
          unsigned i;
 
 
          dname = lang_hooks.decl_printable_name (current_function_decl, 2);
          dname = lang_hooks.decl_printable_name (current_function_decl, 2);
          EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
          EXECUTE_IF_SET_IN_BITMAP (new_irr, 0, i, bmi)
            fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
            fprintf (dump_file, "%s: bb %d goes irrevocable\n", dname, i);
        }
        }
    }
    }
  else
  else
    BITMAP_FREE (new_irr);
    BITMAP_FREE (new_irr);
 
 
  VEC_free (basic_block, heap, queue);
  VEC_free (basic_block, heap, queue);
  pop_cfun ();
  pop_cfun ();
  current_function_decl = NULL;
  current_function_decl = NULL;
 
 
  return ret;
  return ret;
}
}
 
 
/* Return true if, for the transactional clone of NODE, any call
/* Return true if, for the transactional clone of NODE, any call
   may enter irrevocable mode.  */
   may enter irrevocable mode.  */
 
 
static bool
static bool
ipa_tm_mayenterirr_function (struct cgraph_node *node)
ipa_tm_mayenterirr_function (struct cgraph_node *node)
{
{
  struct tm_ipa_cg_data *d;
  struct tm_ipa_cg_data *d;
  tree decl;
  tree decl;
  unsigned flags;
  unsigned flags;
 
 
  d = get_cg_data (&node, true);
  d = get_cg_data (&node, true);
  decl = node->decl;
  decl = node->decl;
  flags = flags_from_decl_or_type (decl);
  flags = flags_from_decl_or_type (decl);
 
 
  /* Handle some TM builtins.  Ordinarily these aren't actually generated
  /* Handle some TM builtins.  Ordinarily these aren't actually generated
     at this point, but handling these functions when written in by the
     at this point, but handling these functions when written in by the
     user makes it easier to build unit tests.  */
     user makes it easier to build unit tests.  */
  if (flags & ECF_TM_BUILTIN)
  if (flags & ECF_TM_BUILTIN)
    return false;
    return false;
 
 
  /* Filter out all functions that are marked.  */
  /* Filter out all functions that are marked.  */
  if (flags & ECF_TM_PURE)
  if (flags & ECF_TM_PURE)
    return false;
    return false;
  if (is_tm_safe (decl))
  if (is_tm_safe (decl))
    return false;
    return false;
  if (is_tm_irrevocable (decl))
  if (is_tm_irrevocable (decl))
    return true;
    return true;
  if (is_tm_callable (decl))
  if (is_tm_callable (decl))
    return true;
    return true;
  if (find_tm_replacement_function (decl))
  if (find_tm_replacement_function (decl))
    return true;
    return true;
 
 
  /* If we aren't seeing the final version of the function we don't
  /* If we aren't seeing the final version of the function we don't
     know what it will contain at runtime.  */
     know what it will contain at runtime.  */
  if (cgraph_function_body_availability (node) < AVAIL_AVAILABLE)
  if (cgraph_function_body_availability (node) < AVAIL_AVAILABLE)
    return true;
    return true;
 
 
  /* If the function must go irrevocable, then of course true.  */
  /* If the function must go irrevocable, then of course true.  */
  if (d->is_irrevocable)
  if (d->is_irrevocable)
    return true;
    return true;
 
 
  /* If there are any blocks marked irrevocable, then the function
  /* If there are any blocks marked irrevocable, then the function
     as a whole may enter irrevocable.  */
     as a whole may enter irrevocable.  */
  if (d->irrevocable_blocks_clone)
  if (d->irrevocable_blocks_clone)
    return true;
    return true;
 
 
  /* We may have previously marked this function as tm_may_enter_irr;
  /* We may have previously marked this function as tm_may_enter_irr;
     see pass_diagnose_tm_blocks.  */
     see pass_diagnose_tm_blocks.  */
  if (node->local.tm_may_enter_irr)
  if (node->local.tm_may_enter_irr)
    return true;
    return true;
 
 
  /* Recurse on the main body for aliases.  In general, this will
  /* Recurse on the main body for aliases.  In general, this will
     result in one of the bits above being set so that we will not
     result in one of the bits above being set so that we will not
     have to recurse next time.  */
     have to recurse next time.  */
  if (node->alias)
  if (node->alias)
    return ipa_tm_mayenterirr_function (cgraph_get_node (node->thunk.alias));
    return ipa_tm_mayenterirr_function (cgraph_get_node (node->thunk.alias));
 
 
  /* What remains is unmarked local functions without items that force
  /* What remains is unmarked local functions without items that force
     the function to go irrevocable.  */
     the function to go irrevocable.  */
  return false;
  return false;
}
}
 
 
/* Diagnose calls from transaction_safe functions to unmarked
/* Diagnose calls from transaction_safe functions to unmarked
   functions that are determined to not be safe.  */
   functions that are determined to not be safe.  */
 
 
static void
static void
ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
ipa_tm_diagnose_tm_safe (struct cgraph_node *node)
{
{
  struct cgraph_edge *e;
  struct cgraph_edge *e;
 
 
  for (e = node->callees; e ; e = e->next_callee)
  for (e = node->callees; e ; e = e->next_callee)
    if (!is_tm_callable (e->callee->decl)
    if (!is_tm_callable (e->callee->decl)
        && e->callee->local.tm_may_enter_irr)
        && e->callee->local.tm_may_enter_irr)
      error_at (gimple_location (e->call_stmt),
      error_at (gimple_location (e->call_stmt),
                "unsafe function call %qD within "
                "unsafe function call %qD within "
                "%<transaction_safe%> function", e->callee->decl);
                "%<transaction_safe%> function", e->callee->decl);
}
}
 
 
/* Diagnose call from atomic transactions to unmarked functions
/* Diagnose call from atomic transactions to unmarked functions
   that are determined to not be safe.  */
   that are determined to not be safe.  */
 
 
static void
static void
ipa_tm_diagnose_transaction (struct cgraph_node *node,
ipa_tm_diagnose_transaction (struct cgraph_node *node,
                           struct tm_region *all_tm_regions)
                           struct tm_region *all_tm_regions)
{
{
  struct tm_region *r;
  struct tm_region *r;
 
 
  for (r = all_tm_regions; r ; r = r->next)
  for (r = all_tm_regions; r ; r = r->next)
    if (gimple_transaction_subcode (r->transaction_stmt) & GTMA_IS_RELAXED)
    if (gimple_transaction_subcode (r->transaction_stmt) & GTMA_IS_RELAXED)
      {
      {
        /* Atomic transactions can be nested inside relaxed.  */
        /* Atomic transactions can be nested inside relaxed.  */
        if (r->inner)
        if (r->inner)
          ipa_tm_diagnose_transaction (node, r->inner);
          ipa_tm_diagnose_transaction (node, r->inner);
      }
      }
    else
    else
      {
      {
        VEC (basic_block, heap) *bbs;
        VEC (basic_block, heap) *bbs;
        gimple_stmt_iterator gsi;
        gimple_stmt_iterator gsi;
        basic_block bb;
        basic_block bb;
        size_t i;
        size_t i;
 
 
        bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
        bbs = get_tm_region_blocks (r->entry_block, r->exit_blocks,
                                    r->irr_blocks, NULL, false);
                                    r->irr_blocks, NULL, false);
 
 
        for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
        for (i = 0; VEC_iterate (basic_block, bbs, i, bb); ++i)
          for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
          for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
            {
            {
              gimple stmt = gsi_stmt (gsi);
              gimple stmt = gsi_stmt (gsi);
              tree fndecl;
              tree fndecl;
 
 
              if (gimple_code (stmt) == GIMPLE_ASM)
              if (gimple_code (stmt) == GIMPLE_ASM)
                {
                {
                  error_at (gimple_location (stmt),
                  error_at (gimple_location (stmt),
                            "asm not allowed in atomic transaction");
                            "asm not allowed in atomic transaction");
                  continue;
                  continue;
                }
                }
 
 
              if (!is_gimple_call (stmt))
              if (!is_gimple_call (stmt))
                continue;
                continue;
              fndecl = gimple_call_fndecl (stmt);
              fndecl = gimple_call_fndecl (stmt);
 
 
              /* Indirect function calls have been diagnosed already.  */
              /* Indirect function calls have been diagnosed already.  */
              if (!fndecl)
              if (!fndecl)
                continue;
                continue;
 
 
              /* Stop at the end of the transaction.  */
              /* Stop at the end of the transaction.  */
              if (is_tm_ending_fndecl (fndecl))
              if (is_tm_ending_fndecl (fndecl))
                {
                {
                  if (bitmap_bit_p (r->exit_blocks, bb->index))
                  if (bitmap_bit_p (r->exit_blocks, bb->index))
                    break;
                    break;
                  continue;
                  continue;
                }
                }
 
 
              /* Marked functions have been diagnosed already.  */
              /* Marked functions have been diagnosed already.  */
              if (is_tm_pure_call (stmt))
              if (is_tm_pure_call (stmt))
                continue;
                continue;
              if (is_tm_callable (fndecl))
              if (is_tm_callable (fndecl))
                continue;
                continue;
 
 
              if (cgraph_local_info (fndecl)->tm_may_enter_irr)
              if (cgraph_local_info (fndecl)->tm_may_enter_irr)
                error_at (gimple_location (stmt),
                error_at (gimple_location (stmt),
                          "unsafe function call %qD within "
                          "unsafe function call %qD within "
                          "atomic transaction", fndecl);
                          "atomic transaction", fndecl);
            }
            }
 
 
        VEC_free (basic_block, heap, bbs);
        VEC_free (basic_block, heap, bbs);
      }
      }
}
}
 
 
/* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
/* Return a transactional mangled name for the DECL_ASSEMBLER_NAME in
   OLD_DECL.  The returned value is a freshly malloced pointer that
   OLD_DECL.  The returned value is a freshly malloced pointer that
   should be freed by the caller.  */
   should be freed by the caller.  */
 
 
static tree
static tree
tm_mangle (tree old_asm_id)
tm_mangle (tree old_asm_id)
{
{
  const char *old_asm_name;
  const char *old_asm_name;
  char *tm_name;
  char *tm_name;
  void *alloc = NULL;
  void *alloc = NULL;
  struct demangle_component *dc;
  struct demangle_component *dc;
  tree new_asm_id;
  tree new_asm_id;
 
 
  /* Determine if the symbol is already a valid C++ mangled name.  Do this
  /* Determine if the symbol is already a valid C++ mangled name.  Do this
     even for C, which might be interfacing with C++ code via appropriately
     even for C, which might be interfacing with C++ code via appropriately
     ugly identifiers.  */
     ugly identifiers.  */
  /* ??? We could probably do just as well checking for "_Z" and be done.  */
  /* ??? We could probably do just as well checking for "_Z" and be done.  */
  old_asm_name = IDENTIFIER_POINTER (old_asm_id);
  old_asm_name = IDENTIFIER_POINTER (old_asm_id);
  dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
  dc = cplus_demangle_v3_components (old_asm_name, DMGL_NO_OPTS, &alloc);
 
 
  if (dc == NULL)
  if (dc == NULL)
    {
    {
      char length[8];
      char length[8];
 
 
    do_unencoded:
    do_unencoded:
      sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
      sprintf (length, "%u", IDENTIFIER_LENGTH (old_asm_id));
      tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
      tm_name = concat ("_ZGTt", length, old_asm_name, NULL);
    }
    }
  else
  else
    {
    {
      old_asm_name += 2;        /* Skip _Z */
      old_asm_name += 2;        /* Skip _Z */
 
 
      switch (dc->type)
      switch (dc->type)
        {
        {
        case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
        case DEMANGLE_COMPONENT_TRANSACTION_CLONE:
        case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
        case DEMANGLE_COMPONENT_NONTRANSACTION_CLONE:
          /* Don't play silly games, you!  */
          /* Don't play silly games, you!  */
          goto do_unencoded;
          goto do_unencoded;
 
 
        case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
        case DEMANGLE_COMPONENT_HIDDEN_ALIAS:
          /* I'd really like to know if we can ever be passed one of
          /* I'd really like to know if we can ever be passed one of
             these from the C++ front end.  The Logical Thing would
             these from the C++ front end.  The Logical Thing would
             seem that hidden-alias should be outer-most, so that we
             seem that hidden-alias should be outer-most, so that we
             get hidden-alias of a transaction-clone and not vice-versa.  */
             get hidden-alias of a transaction-clone and not vice-versa.  */
          old_asm_name += 2;
          old_asm_name += 2;
          break;
          break;
 
 
        default:
        default:
          break;
          break;
        }
        }
 
 
      tm_name = concat ("_ZGTt", old_asm_name, NULL);
      tm_name = concat ("_ZGTt", old_asm_name, NULL);
    }
    }
  free (alloc);
  free (alloc);
 
 
  new_asm_id = get_identifier (tm_name);
  new_asm_id = get_identifier (tm_name);
  free (tm_name);
  free (tm_name);
 
 
  return new_asm_id;
  return new_asm_id;
}
}
 
 
static inline void
static inline void
ipa_tm_mark_needed_node (struct cgraph_node *node)
ipa_tm_mark_needed_node (struct cgraph_node *node)
{
{
  cgraph_mark_needed_node (node);
  cgraph_mark_needed_node (node);
  /* ??? function_and_variable_visibility will reset
  /* ??? function_and_variable_visibility will reset
     the needed bit, without actually checking.  */
     the needed bit, without actually checking.  */
  node->analyzed = 1;
  node->analyzed = 1;
}
}
 
 
/* Callback data for ipa_tm_create_version_alias.  */
/* Callback data for ipa_tm_create_version_alias.  */
struct create_version_alias_info
struct create_version_alias_info
{
{
  struct cgraph_node *old_node;
  struct cgraph_node *old_node;
  tree new_decl;
  tree new_decl;
};
};
 
 
/* A subroutine of ipa_tm_create_version, called via
/* A subroutine of ipa_tm_create_version, called via
   cgraph_for_node_and_aliases.  Create new tm clones for each of
   cgraph_for_node_and_aliases.  Create new tm clones for each of
   the existing aliases.  */
   the existing aliases.  */
static bool
static bool
ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
ipa_tm_create_version_alias (struct cgraph_node *node, void *data)
{
{
  struct create_version_alias_info *info
  struct create_version_alias_info *info
    = (struct create_version_alias_info *)data;
    = (struct create_version_alias_info *)data;
  tree old_decl, new_decl, tm_name;
  tree old_decl, new_decl, tm_name;
  struct cgraph_node *new_node;
  struct cgraph_node *new_node;
 
 
  if (!node->same_body_alias)
  if (!node->same_body_alias)
    return false;
    return false;
 
 
  old_decl = node->decl;
  old_decl = node->decl;
  tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
  tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
  new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
  new_decl = build_decl (DECL_SOURCE_LOCATION (old_decl),
                         TREE_CODE (old_decl), tm_name,
                         TREE_CODE (old_decl), tm_name,
                         TREE_TYPE (old_decl));
                         TREE_TYPE (old_decl));
 
 
  SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
  SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
  SET_DECL_RTL (new_decl, NULL);
  SET_DECL_RTL (new_decl, NULL);
 
 
  /* Based loosely on C++'s make_alias_for().  */
  /* Based loosely on C++'s make_alias_for().  */
  TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
  TREE_PUBLIC (new_decl) = TREE_PUBLIC (old_decl);
  DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
  DECL_CONTEXT (new_decl) = DECL_CONTEXT (old_decl);
  DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
  DECL_LANG_SPECIFIC (new_decl) = DECL_LANG_SPECIFIC (old_decl);
  TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
  TREE_READONLY (new_decl) = TREE_READONLY (old_decl);
  DECL_EXTERNAL (new_decl) = 0;
  DECL_EXTERNAL (new_decl) = 0;
  DECL_ARTIFICIAL (new_decl) = 1;
  DECL_ARTIFICIAL (new_decl) = 1;
  TREE_ADDRESSABLE (new_decl) = 1;
  TREE_ADDRESSABLE (new_decl) = 1;
  TREE_USED (new_decl) = 1;
  TREE_USED (new_decl) = 1;
  TREE_SYMBOL_REFERENCED (tm_name) = 1;
  TREE_SYMBOL_REFERENCED (tm_name) = 1;
 
 
  /* Perform the same remapping to the comdat group.  */
  /* Perform the same remapping to the comdat group.  */
  if (DECL_ONE_ONLY (new_decl))
  if (DECL_ONE_ONLY (new_decl))
    DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
    DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
 
 
  new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl);
  new_node = cgraph_same_body_alias (NULL, new_decl, info->new_decl);
  new_node->tm_clone = true;
  new_node->tm_clone = true;
  new_node->local.externally_visible = info->old_node->local.externally_visible;
  new_node->local.externally_visible = info->old_node->local.externally_visible;
  /* ?? Do not traverse aliases here.  */
  /* ?? Do not traverse aliases here.  */
  get_cg_data (&node, false)->clone = new_node;
  get_cg_data (&node, false)->clone = new_node;
 
 
  record_tm_clone_pair (old_decl, new_decl);
  record_tm_clone_pair (old_decl, new_decl);
 
 
  if (info->old_node->needed)
  if (info->old_node->needed)
    ipa_tm_mark_needed_node (new_node);
    ipa_tm_mark_needed_node (new_node);
  return false;
  return false;
}
}
 
 
/* Create a copy of the function (possibly declaration only) of OLD_NODE,
/* Create a copy of the function (possibly declaration only) of OLD_NODE,
   appropriate for the transactional clone.  */
   appropriate for the transactional clone.  */
 
 
static void
static void
ipa_tm_create_version (struct cgraph_node *old_node)
ipa_tm_create_version (struct cgraph_node *old_node)
{
{
  tree new_decl, old_decl, tm_name;
  tree new_decl, old_decl, tm_name;
  struct cgraph_node *new_node;
  struct cgraph_node *new_node;
 
 
  old_decl = old_node->decl;
  old_decl = old_node->decl;
  new_decl = copy_node (old_decl);
  new_decl = copy_node (old_decl);
 
 
  /* DECL_ASSEMBLER_NAME needs to be set before we call
  /* DECL_ASSEMBLER_NAME needs to be set before we call
     cgraph_copy_node_for_versioning below, because cgraph_node will
     cgraph_copy_node_for_versioning below, because cgraph_node will
     fill the assembler_name_hash.  */
     fill the assembler_name_hash.  */
  tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
  tm_name = tm_mangle (DECL_ASSEMBLER_NAME (old_decl));
  SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
  SET_DECL_ASSEMBLER_NAME (new_decl, tm_name);
  SET_DECL_RTL (new_decl, NULL);
  SET_DECL_RTL (new_decl, NULL);
  TREE_SYMBOL_REFERENCED (tm_name) = 1;
  TREE_SYMBOL_REFERENCED (tm_name) = 1;
 
 
  /* Perform the same remapping to the comdat group.  */
  /* Perform the same remapping to the comdat group.  */
  if (DECL_ONE_ONLY (new_decl))
  if (DECL_ONE_ONLY (new_decl))
    DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
    DECL_COMDAT_GROUP (new_decl) = tm_mangle (DECL_COMDAT_GROUP (old_decl));
 
 
  new_node = cgraph_copy_node_for_versioning (old_node, new_decl, NULL, NULL);
  new_node = cgraph_copy_node_for_versioning (old_node, new_decl, NULL, NULL);
  new_node->local.externally_visible = old_node->local.externally_visible;
  new_node->local.externally_visible = old_node->local.externally_visible;
  new_node->lowered = true;
  new_node->lowered = true;
  new_node->tm_clone = 1;
  new_node->tm_clone = 1;
  get_cg_data (&old_node, true)->clone = new_node;
  get_cg_data (&old_node, true)->clone = new_node;
 
 
  if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE)
  if (cgraph_function_body_availability (old_node) >= AVAIL_OVERWRITABLE)
    {
    {
      /* Remap extern inline to static inline.  */
      /* Remap extern inline to static inline.  */
      /* ??? Is it worth trying to use make_decl_one_only?  */
      /* ??? Is it worth trying to use make_decl_one_only?  */
      if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
      if (DECL_DECLARED_INLINE_P (new_decl) && DECL_EXTERNAL (new_decl))
        {
        {
          DECL_EXTERNAL (new_decl) = 0;
          DECL_EXTERNAL (new_decl) = 0;
          TREE_PUBLIC (new_decl) = 0;
          TREE_PUBLIC (new_decl) = 0;
          DECL_WEAK (new_decl) = 0;
          DECL_WEAK (new_decl) = 0;
        }
        }
 
 
      tree_function_versioning (old_decl, new_decl, NULL, false, NULL, false,
      tree_function_versioning (old_decl, new_decl, NULL, false, NULL, false,
                                NULL, NULL);
                                NULL, NULL);
    }
    }
 
 
  record_tm_clone_pair (old_decl, new_decl);
  record_tm_clone_pair (old_decl, new_decl);
 
 
  cgraph_call_function_insertion_hooks (new_node);
  cgraph_call_function_insertion_hooks (new_node);
  if (old_node->needed)
  if (old_node->needed)
    ipa_tm_mark_needed_node (new_node);
    ipa_tm_mark_needed_node (new_node);
 
 
  /* Do the same thing, but for any aliases of the original node.  */
  /* Do the same thing, but for any aliases of the original node.  */
  {
  {
    struct create_version_alias_info data;
    struct create_version_alias_info data;
    data.old_node = old_node;
    data.old_node = old_node;
    data.new_decl = new_decl;
    data.new_decl = new_decl;
    cgraph_for_node_and_aliases (old_node, ipa_tm_create_version_alias,
    cgraph_for_node_and_aliases (old_node, ipa_tm_create_version_alias,
                                 &data, true);
                                 &data, true);
  }
  }
}
}
 
 
/* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB.  */
/* Construct a call to TM_IRREVOCABLE and insert it at the beginning of BB.  */
 
 
static void
static void
ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
ipa_tm_insert_irr_call (struct cgraph_node *node, struct tm_region *region,
                        basic_block bb)
                        basic_block bb)
{
{
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
  gimple g;
  gimple g;
 
 
  transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
  transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
 
 
  g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
  g = gimple_build_call (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE),
                         1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
                         1, build_int_cst (NULL_TREE, MODE_SERIALIRREVOCABLE));
 
 
  split_block_after_labels (bb);
  split_block_after_labels (bb);
  gsi = gsi_after_labels (bb);
  gsi = gsi_after_labels (bb);
  gsi_insert_before (&gsi, g, GSI_SAME_STMT);
  gsi_insert_before (&gsi, g, GSI_SAME_STMT);
 
 
  cgraph_create_edge (node,
  cgraph_create_edge (node,
               cgraph_get_create_node
               cgraph_get_create_node
                  (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
                  (builtin_decl_explicit (BUILT_IN_TM_IRREVOCABLE)),
                      g, 0,
                      g, 0,
                      compute_call_stmt_bb_frequency (node->decl,
                      compute_call_stmt_bb_frequency (node->decl,
                                                      gimple_bb (g)));
                                                      gimple_bb (g)));
}
}
 
 
/* Construct a call to TM_GETTMCLONE and insert it before GSI.  */
/* Construct a call to TM_GETTMCLONE and insert it before GSI.  */
 
 
static bool
static bool
ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
ipa_tm_insert_gettmclone_call (struct cgraph_node *node,
                               struct tm_region *region,
                               struct tm_region *region,
                               gimple_stmt_iterator *gsi, gimple stmt)
                               gimple_stmt_iterator *gsi, gimple stmt)
{
{
  tree gettm_fn, ret, old_fn, callfn;
  tree gettm_fn, ret, old_fn, callfn;
  gimple g, g2;
  gimple g, g2;
  bool safe;
  bool safe;
 
 
  old_fn = gimple_call_fn (stmt);
  old_fn = gimple_call_fn (stmt);
 
 
  if (TREE_CODE (old_fn) == ADDR_EXPR)
  if (TREE_CODE (old_fn) == ADDR_EXPR)
    {
    {
      tree fndecl = TREE_OPERAND (old_fn, 0);
      tree fndecl = TREE_OPERAND (old_fn, 0);
      tree clone = get_tm_clone_pair (fndecl);
      tree clone = get_tm_clone_pair (fndecl);
 
 
      /* By transforming the call into a TM_GETTMCLONE, we are
      /* By transforming the call into a TM_GETTMCLONE, we are
         technically taking the address of the original function and
         technically taking the address of the original function and
         its clone.  Explain this so inlining will know this function
         its clone.  Explain this so inlining will know this function
         is needed.  */
         is needed.  */
      cgraph_mark_address_taken_node (cgraph_get_node (fndecl));
      cgraph_mark_address_taken_node (cgraph_get_node (fndecl));
      if (clone)
      if (clone)
        cgraph_mark_address_taken_node (cgraph_get_node (clone));
        cgraph_mark_address_taken_node (cgraph_get_node (clone));
    }
    }
 
 
  safe = is_tm_safe (TREE_TYPE (old_fn));
  safe = is_tm_safe (TREE_TYPE (old_fn));
  gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
  gettm_fn = builtin_decl_explicit (safe ? BUILT_IN_TM_GETTMCLONE_SAFE
                                    : BUILT_IN_TM_GETTMCLONE_IRR);
                                    : BUILT_IN_TM_GETTMCLONE_IRR);
  ret = create_tmp_var (ptr_type_node, NULL);
  ret = create_tmp_var (ptr_type_node, NULL);
  add_referenced_var (ret);
  add_referenced_var (ret);
 
 
  if (!safe)
  if (!safe)
    transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
    transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
 
 
  /* Discard OBJ_TYPE_REF, since we weren't able to fold it.  */
  /* Discard OBJ_TYPE_REF, since we weren't able to fold it.  */
  if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
  if (TREE_CODE (old_fn) == OBJ_TYPE_REF)
    old_fn = OBJ_TYPE_REF_EXPR (old_fn);
    old_fn = OBJ_TYPE_REF_EXPR (old_fn);
 
 
  g = gimple_build_call (gettm_fn, 1, old_fn);
  g = gimple_build_call (gettm_fn, 1, old_fn);
  ret = make_ssa_name (ret, g);
  ret = make_ssa_name (ret, g);
  gimple_call_set_lhs (g, ret);
  gimple_call_set_lhs (g, ret);
 
 
  gsi_insert_before (gsi, g, GSI_SAME_STMT);
  gsi_insert_before (gsi, g, GSI_SAME_STMT);
 
 
  cgraph_create_edge (node, cgraph_get_create_node (gettm_fn), g, 0,
  cgraph_create_edge (node, cgraph_get_create_node (gettm_fn), g, 0,
                      compute_call_stmt_bb_frequency (node->decl,
                      compute_call_stmt_bb_frequency (node->decl,
                                                      gimple_bb(g)));
                                                      gimple_bb(g)));
 
 
  /* Cast return value from tm_gettmclone* into appropriate function
  /* Cast return value from tm_gettmclone* into appropriate function
     pointer.  */
     pointer.  */
  callfn = create_tmp_var (TREE_TYPE (old_fn), NULL);
  callfn = create_tmp_var (TREE_TYPE (old_fn), NULL);
  add_referenced_var (callfn);
  add_referenced_var (callfn);
  g2 = gimple_build_assign (callfn,
  g2 = gimple_build_assign (callfn,
                            fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
                            fold_build1 (NOP_EXPR, TREE_TYPE (callfn), ret));
  callfn = make_ssa_name (callfn, g2);
  callfn = make_ssa_name (callfn, g2);
  gimple_assign_set_lhs (g2, callfn);
  gimple_assign_set_lhs (g2, callfn);
  gsi_insert_before (gsi, g2, GSI_SAME_STMT);
  gsi_insert_before (gsi, g2, GSI_SAME_STMT);
 
 
  /* ??? This is a hack to preserve the NOTHROW bit on the call,
  /* ??? This is a hack to preserve the NOTHROW bit on the call,
     which we would have derived from the decl.  Failure to save
     which we would have derived from the decl.  Failure to save
     this bit means we might have to split the basic block.  */
     this bit means we might have to split the basic block.  */
  if (gimple_call_nothrow_p (stmt))
  if (gimple_call_nothrow_p (stmt))
    gimple_call_set_nothrow (stmt, true);
    gimple_call_set_nothrow (stmt, true);
 
 
  gimple_call_set_fn (stmt, callfn);
  gimple_call_set_fn (stmt, callfn);
 
 
  /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
  /* Discarding OBJ_TYPE_REF above may produce incompatible LHS and RHS
     for a call statement.  Fix it.  */
     for a call statement.  Fix it.  */
  {
  {
    tree lhs = gimple_call_lhs (stmt);
    tree lhs = gimple_call_lhs (stmt);
    tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
    tree rettype = TREE_TYPE (gimple_call_fntype (stmt));
    if (lhs
    if (lhs
        && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
        && !useless_type_conversion_p (TREE_TYPE (lhs), rettype))
    {
    {
      tree temp;
      tree temp;
 
 
      temp = make_rename_temp (rettype, 0);
      temp = make_rename_temp (rettype, 0);
      gimple_call_set_lhs (stmt, temp);
      gimple_call_set_lhs (stmt, temp);
 
 
      g2 = gimple_build_assign (lhs,
      g2 = gimple_build_assign (lhs,
                                fold_build1 (VIEW_CONVERT_EXPR,
                                fold_build1 (VIEW_CONVERT_EXPR,
                                             TREE_TYPE (lhs), temp));
                                             TREE_TYPE (lhs), temp));
      gsi_insert_after (gsi, g2, GSI_SAME_STMT);
      gsi_insert_after (gsi, g2, GSI_SAME_STMT);
    }
    }
  }
  }
 
 
  update_stmt (stmt);
  update_stmt (stmt);
 
 
  return true;
  return true;
}
}
 
 
/* Helper function for ipa_tm_transform_calls*.  Given a call
/* Helper function for ipa_tm_transform_calls*.  Given a call
   statement in GSI which resides inside transaction REGION, redirect
   statement in GSI which resides inside transaction REGION, redirect
   the call to either its wrapper function, or its clone.  */
   the call to either its wrapper function, or its clone.  */
 
 
static void
static void
ipa_tm_transform_calls_redirect (struct cgraph_node *node,
ipa_tm_transform_calls_redirect (struct cgraph_node *node,
                                 struct tm_region *region,
                                 struct tm_region *region,
                                 gimple_stmt_iterator *gsi,
                                 gimple_stmt_iterator *gsi,
                                 bool *need_ssa_rename_p)
                                 bool *need_ssa_rename_p)
{
{
  gimple stmt = gsi_stmt (*gsi);
  gimple stmt = gsi_stmt (*gsi);
  struct cgraph_node *new_node;
  struct cgraph_node *new_node;
  struct cgraph_edge *e = cgraph_edge (node, stmt);
  struct cgraph_edge *e = cgraph_edge (node, stmt);
  tree fndecl = gimple_call_fndecl (stmt);
  tree fndecl = gimple_call_fndecl (stmt);
 
 
  /* For indirect calls, pass the address through the runtime.  */
  /* For indirect calls, pass the address through the runtime.  */
  if (fndecl == NULL)
  if (fndecl == NULL)
    {
    {
      *need_ssa_rename_p |=
      *need_ssa_rename_p |=
        ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
        ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
      return;
      return;
    }
    }
 
 
  /* Handle some TM builtins.  Ordinarily these aren't actually generated
  /* Handle some TM builtins.  Ordinarily these aren't actually generated
     at this point, but handling these functions when written in by the
     at this point, but handling these functions when written in by the
     user makes it easier to build unit tests.  */
     user makes it easier to build unit tests.  */
  if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
  if (flags_from_decl_or_type (fndecl) & ECF_TM_BUILTIN)
    return;
    return;
 
 
  /* Fixup recursive calls inside clones.  */
  /* Fixup recursive calls inside clones.  */
  /* ??? Why did cgraph_copy_node_for_versioning update the call edges
  /* ??? Why did cgraph_copy_node_for_versioning update the call edges
     for recursion but not update the call statements themselves?  */
     for recursion but not update the call statements themselves?  */
  if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
  if (e->caller == e->callee && decl_is_tm_clone (current_function_decl))
    {
    {
      gimple_call_set_fndecl (stmt, current_function_decl);
      gimple_call_set_fndecl (stmt, current_function_decl);
      return;
      return;
    }
    }
 
 
  /* If there is a replacement, use it.  */
  /* If there is a replacement, use it.  */
  fndecl = find_tm_replacement_function (fndecl);
  fndecl = find_tm_replacement_function (fndecl);
  if (fndecl)
  if (fndecl)
    {
    {
      new_node = cgraph_get_create_node (fndecl);
      new_node = cgraph_get_create_node (fndecl);
 
 
      /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
      /* ??? Mark all transaction_wrap functions tm_may_enter_irr.
 
 
         We can't do this earlier in record_tm_replacement because
         We can't do this earlier in record_tm_replacement because
         cgraph_remove_unreachable_nodes is called before we inject
         cgraph_remove_unreachable_nodes is called before we inject
         references to the node.  Further, we can't do this in some
         references to the node.  Further, we can't do this in some
         nice central place in ipa_tm_execute because we don't have
         nice central place in ipa_tm_execute because we don't have
         the exact list of wrapper functions that would be used.
         the exact list of wrapper functions that would be used.
         Marking more wrappers than necessary results in the creation
         Marking more wrappers than necessary results in the creation
         of unnecessary cgraph_nodes, which can cause some of the
         of unnecessary cgraph_nodes, which can cause some of the
         other IPA passes to crash.
         other IPA passes to crash.
 
 
         We do need to mark these nodes so that we get the proper
         We do need to mark these nodes so that we get the proper
         result in expand_call_tm.  */
         result in expand_call_tm.  */
      /* ??? This seems broken.  How is it that we're marking the
      /* ??? This seems broken.  How is it that we're marking the
         CALLEE as may_enter_irr?  Surely we should be marking the
         CALLEE as may_enter_irr?  Surely we should be marking the
         CALLER.  Also note that find_tm_replacement_function also
         CALLER.  Also note that find_tm_replacement_function also
         contains mappings into the TM runtime, e.g. memcpy.  These
         contains mappings into the TM runtime, e.g. memcpy.  These
         we know won't go irrevocable.  */
         we know won't go irrevocable.  */
      new_node->local.tm_may_enter_irr = 1;
      new_node->local.tm_may_enter_irr = 1;
    }
    }
  else
  else
    {
    {
      struct tm_ipa_cg_data *d;
      struct tm_ipa_cg_data *d;
      struct cgraph_node *tnode = e->callee;
      struct cgraph_node *tnode = e->callee;
 
 
      d = get_cg_data (&tnode, true);
      d = get_cg_data (&tnode, true);
      new_node = d->clone;
      new_node = d->clone;
 
 
      /* As we've already skipped pure calls and appropriate builtins,
      /* As we've already skipped pure calls and appropriate builtins,
         and we've already marked irrevocable blocks, if we can't come
         and we've already marked irrevocable blocks, if we can't come
         up with a static replacement, then ask the runtime.  */
         up with a static replacement, then ask the runtime.  */
      if (new_node == NULL)
      if (new_node == NULL)
        {
        {
          *need_ssa_rename_p |=
          *need_ssa_rename_p |=
            ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
            ipa_tm_insert_gettmclone_call (node, region, gsi, stmt);
          return;
          return;
        }
        }
 
 
      fndecl = new_node->decl;
      fndecl = new_node->decl;
    }
    }
 
 
  cgraph_redirect_edge_callee (e, new_node);
  cgraph_redirect_edge_callee (e, new_node);
  gimple_call_set_fndecl (stmt, fndecl);
  gimple_call_set_fndecl (stmt, fndecl);
}
}
 
 
/* Helper function for ipa_tm_transform_calls.  For a given BB,
/* Helper function for ipa_tm_transform_calls.  For a given BB,
   install calls to tm_irrevocable when IRR_BLOCKS are reached,
   install calls to tm_irrevocable when IRR_BLOCKS are reached,
   redirect other calls to the generated transactional clone.  */
   redirect other calls to the generated transactional clone.  */
 
 
static bool
static bool
ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
ipa_tm_transform_calls_1 (struct cgraph_node *node, struct tm_region *region,
                          basic_block bb, bitmap irr_blocks)
                          basic_block bb, bitmap irr_blocks)
{
{
  gimple_stmt_iterator gsi;
  gimple_stmt_iterator gsi;
  bool need_ssa_rename = false;
  bool need_ssa_rename = false;
 
 
  if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
  if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
    {
    {
      ipa_tm_insert_irr_call (node, region, bb);
      ipa_tm_insert_irr_call (node, region, bb);
      return true;
      return true;
    }
    }
 
 
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
  for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
    {
    {
      gimple stmt = gsi_stmt (gsi);
      gimple stmt = gsi_stmt (gsi);
 
 
      if (!is_gimple_call (stmt))
      if (!is_gimple_call (stmt))
        continue;
        continue;
      if (is_tm_pure_call (stmt))
      if (is_tm_pure_call (stmt))
        continue;
        continue;
 
 
      /* Redirect edges to the appropriate replacement or clone.  */
      /* Redirect edges to the appropriate replacement or clone.  */
      ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
      ipa_tm_transform_calls_redirect (node, region, &gsi, &need_ssa_rename);
    }
    }
 
 
  return need_ssa_rename;
  return need_ssa_rename;
}
}
 
 
/* Walk the CFG for REGION, beginning at BB.  Install calls to
/* Walk the CFG for REGION, beginning at BB.  Install calls to
   tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
   tm_irrevocable when IRR_BLOCKS are reached, redirect other calls to
   the generated transactional clone.  */
   the generated transactional clone.  */
 
 
static bool
static bool
ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
ipa_tm_transform_calls (struct cgraph_node *node, struct tm_region *region,
                        basic_block bb, bitmap irr_blocks)
                        basic_block bb, bitmap irr_blocks)
{
{
  bool need_ssa_rename = false;
  bool need_ssa_rename = false;
  edge e;
  edge e;
  edge_iterator ei;
  edge_iterator ei;
  VEC(basic_block, heap) *queue = NULL;
  VEC(basic_block, heap) *queue = NULL;
  bitmap visited_blocks = BITMAP_ALLOC (NULL);
  bitmap visited_blocks = BITMAP_ALLOC (NULL);
 
 
  VEC_safe_push (basic_block, heap, queue, bb);
  VEC_safe_push (basic_block, heap, queue, bb);
  do
  do
    {
    {
      bb = VEC_pop (basic_block, queue);
      bb = VEC_pop (basic_block, queue);
 
 
      need_ssa_rename |=
      need_ssa_rename |=
        ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
        ipa_tm_transform_calls_1 (node, region, bb, irr_blocks);
 
 
      if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
      if (irr_blocks && bitmap_bit_p (irr_blocks, bb->index))
        continue;
        continue;
 
 
      if (region && bitmap_bit_p (region->exit_blocks, bb->index))
      if (region && bitmap_bit_p (region->exit_blocks, bb->index))
        continue;
        continue;
 
 
      FOR_EACH_EDGE (e, ei, bb->succs)
      FOR_EACH_EDGE (e, ei, bb->succs)
        if (!bitmap_bit_p (visited_blocks, e->dest->index))
        if (!bitmap_bit_p (visited_blocks, e->dest->index))
          {
          {
            bitmap_set_bit (visited_blocks, e->dest->index);
            bitmap_set_bit (visited_blocks, e->dest->index);
            VEC_safe_push (basic_block, heap, queue, e->dest);
            VEC_safe_push (basic_block, heap, queue, e->dest);
          }
          }
    }
    }
  while (!VEC_empty (basic_block, queue));
  while (!VEC_empty (basic_block, queue));
 
 
  VEC_free (basic_block, heap, queue);
  VEC_free (basic_block, heap, queue);
  BITMAP_FREE (visited_blocks);
  BITMAP_FREE (visited_blocks);
 
 
  return need_ssa_rename;
  return need_ssa_rename;
}
}
 
 
/* Transform the calls within the TM regions within NODE.  */
/* Transform the calls within the TM regions within NODE.  */
 
 
static void
static void
ipa_tm_transform_transaction (struct cgraph_node *node)
ipa_tm_transform_transaction (struct cgraph_node *node)
{
{
  struct tm_ipa_cg_data *d;
  struct tm_ipa_cg_data *d;
  struct tm_region *region;
  struct tm_region *region;
  bool need_ssa_rename = false;
  bool need_ssa_rename = false;
 
 
  d = get_cg_data (&node, true);
  d = get_cg_data (&node, true);
 
 
  current_function_decl = node->decl;
  current_function_decl = node->decl;
  push_cfun (DECL_STRUCT_FUNCTION (node->decl));
  push_cfun (DECL_STRUCT_FUNCTION (node->decl));
  calculate_dominance_info (CDI_DOMINATORS);
  calculate_dominance_info (CDI_DOMINATORS);
 
 
  for (region = d->all_tm_regions; region; region = region->next)
  for (region = d->all_tm_regions; region; region = region->next)
    {
    {
      /* If we're sure to go irrevocable, don't transform anything.  */
      /* If we're sure to go irrevocable, don't transform anything.  */
      if (d->irrevocable_blocks_normal
      if (d->irrevocable_blocks_normal
          && bitmap_bit_p (d->irrevocable_blocks_normal,
          && bitmap_bit_p (d->irrevocable_blocks_normal,
                           region->entry_block->index))
                           region->entry_block->index))
        {
        {
          transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE);
          transaction_subcode_ior (region, GTMA_DOES_GO_IRREVOCABLE);
          transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
          transaction_subcode_ior (region, GTMA_MAY_ENTER_IRREVOCABLE);
          continue;
          continue;
        }
        }
 
 
      need_ssa_rename |=
      need_ssa_rename |=
        ipa_tm_transform_calls (node, region, region->entry_block,
        ipa_tm_transform_calls (node, region, region->entry_block,
                                d->irrevocable_blocks_normal);
                                d->irrevocable_blocks_normal);
    }
    }
 
 
  if (need_ssa_rename)
  if (need_ssa_rename)
    update_ssa (TODO_update_ssa_only_virtuals);
    update_ssa (TODO_update_ssa_only_virtuals);
 
 
  pop_cfun ();
  pop_cfun ();
  current_function_decl = NULL;
  current_function_decl = NULL;
}
}
 
 
/* Transform the calls within the transactional clone of NODE.  */
/* Transform the calls within the transactional clone of NODE.  */
 
 
static void
static void
ipa_tm_transform_clone (struct cgraph_node *node)
ipa_tm_transform_clone (struct cgraph_node *node)
{
{
  struct tm_ipa_cg_data *d;
  struct tm_ipa_cg_data *d;
  bool need_ssa_rename;
  bool need_ssa_rename;
 
 
  d = get_cg_data (&node, true);
  d = get_cg_data (&node, true);
 
 
  /* If this function makes no calls and has no irrevocable blocks,
  /* If this function makes no calls and has no irrevocable blocks,
     then there's nothing to do.  */
     then there's nothing to do.  */
  /* ??? Remove non-aborting top-level transactions.  */
  /* ??? Remove non-aborting top-level transactions.  */
  if (!node->callees && !d->irrevocable_blocks_clone)
  if (!node->callees && !d->irrevocable_blocks_clone)
    return;
    return;
 
 
  current_function_decl = d->clone->decl;
  current_function_decl = d->clone->decl;
  push_cfun (DECL_STRUCT_FUNCTION (current_function_decl));
  push_cfun (DECL_STRUCT_FUNCTION (current_function_decl));
  calculate_dominance_info (CDI_DOMINATORS);
  calculate_dominance_info (CDI_DOMINATORS);
 
 
  need_ssa_rename =
  need_ssa_rename =
    ipa_tm_transform_calls (d->clone, NULL, single_succ (ENTRY_BLOCK_PTR),
    ipa_tm_transform_calls (d->clone, NULL, single_succ (ENTRY_BLOCK_PTR),
                            d->irrevocable_blocks_clone);
                            d->irrevocable_blocks_clone);
 
 
  if (need_ssa_rename)
  if (need_ssa_rename)
    update_ssa (TODO_update_ssa_only_virtuals);
    update_ssa (TODO_update_ssa_only_virtuals);
 
 
  pop_cfun ();
  pop_cfun ();
  current_function_decl = NULL;
  current_function_decl = NULL;
}
}
 
 
/* Main entry point for the transactional memory IPA pass.  */
/* Main entry point for the transactional memory IPA pass.  */
 
 
static unsigned int
static unsigned int
ipa_tm_execute (void)
ipa_tm_execute (void)
{
{
  cgraph_node_queue tm_callees = NULL;
  cgraph_node_queue tm_callees = NULL;
  /* List of functions that will go irrevocable.  */
  /* List of functions that will go irrevocable.  */
  cgraph_node_queue irr_worklist = NULL;
  cgraph_node_queue irr_worklist = NULL;
 
 
  struct cgraph_node *node;
  struct cgraph_node *node;
  struct tm_ipa_cg_data *d;
  struct tm_ipa_cg_data *d;
  enum availability a;
  enum availability a;
  unsigned int i;
  unsigned int i;
 
 
#ifdef ENABLE_CHECKING
#ifdef ENABLE_CHECKING
  verify_cgraph ();
  verify_cgraph ();
#endif
#endif
 
 
  bitmap_obstack_initialize (&tm_obstack);
  bitmap_obstack_initialize (&tm_obstack);
 
 
  /* For all local functions marked tm_callable, queue them.  */
  /* For all local functions marked tm_callable, queue them.  */
  for (node = cgraph_nodes; node; node = node->next)
  for (node = cgraph_nodes; node; node = node->next)
    if (is_tm_callable (node->decl)
    if (is_tm_callable (node->decl)
        && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
        && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
      {
      {
        d = get_cg_data (&node, true);
        d = get_cg_data (&node, true);
        maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
        maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
      }
      }
 
 
  /* For all local reachable functions...  */
  /* For all local reachable functions...  */
  for (node = cgraph_nodes; node; node = node->next)
  for (node = cgraph_nodes; node; node = node->next)
    if (node->reachable && node->lowered
    if (node->reachable && node->lowered
        && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
        && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
      {
      {
        /* ... marked tm_pure, record that fact for the runtime by
        /* ... marked tm_pure, record that fact for the runtime by
           indicating that the pure function is its own tm_callable.
           indicating that the pure function is its own tm_callable.
           No need to do this if the function's address can't be taken.  */
           No need to do this if the function's address can't be taken.  */
        if (is_tm_pure (node->decl))
        if (is_tm_pure (node->decl))
          {
          {
            if (!node->local.local)
            if (!node->local.local)
              record_tm_clone_pair (node->decl, node->decl);
              record_tm_clone_pair (node->decl, node->decl);
            continue;
            continue;
          }
          }
 
 
        current_function_decl = node->decl;
        current_function_decl = node->decl;
        push_cfun (DECL_STRUCT_FUNCTION (node->decl));
        push_cfun (DECL_STRUCT_FUNCTION (node->decl));
        calculate_dominance_info (CDI_DOMINATORS);
        calculate_dominance_info (CDI_DOMINATORS);
 
 
        tm_region_init (NULL);
        tm_region_init (NULL);
        if (all_tm_regions)
        if (all_tm_regions)
          {
          {
            d = get_cg_data (&node, true);
            d = get_cg_data (&node, true);
 
 
            /* Scan for calls that are in each transaction.  */
            /* Scan for calls that are in each transaction.  */
            ipa_tm_scan_calls_transaction (d, &tm_callees);
            ipa_tm_scan_calls_transaction (d, &tm_callees);
 
 
            /* Put it in the worklist so we can scan the function
            /* Put it in the worklist so we can scan the function
               later (ipa_tm_scan_irr_function) and mark the
               later (ipa_tm_scan_irr_function) and mark the
               irrevocable blocks.  */
               irrevocable blocks.  */
            maybe_push_queue (node, &irr_worklist, &d->in_worklist);
            maybe_push_queue (node, &irr_worklist, &d->in_worklist);
            d->want_irr_scan_normal = true;
            d->want_irr_scan_normal = true;
          }
          }
 
 
        pop_cfun ();
        pop_cfun ();
        current_function_decl = NULL;
        current_function_decl = NULL;
      }
      }
 
 
  /* For every local function on the callee list, scan as if we will be
  /* For every local function on the callee list, scan as if we will be
     creating a transactional clone, queueing all new functions we find
     creating a transactional clone, queueing all new functions we find
     along the way.  */
     along the way.  */
  for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
  for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
    {
    {
      node = VEC_index (cgraph_node_p, tm_callees, i);
      node = VEC_index (cgraph_node_p, tm_callees, i);
      a = cgraph_function_body_availability (node);
      a = cgraph_function_body_availability (node);
      d = get_cg_data (&node, true);
      d = get_cg_data (&node, true);
 
 
      /* Put it in the worklist so we can scan the function later
      /* Put it in the worklist so we can scan the function later
         (ipa_tm_scan_irr_function) and mark the irrevocable
         (ipa_tm_scan_irr_function) and mark the irrevocable
         blocks.  */
         blocks.  */
      maybe_push_queue (node, &irr_worklist, &d->in_worklist);
      maybe_push_queue (node, &irr_worklist, &d->in_worklist);
 
 
      /* Some callees cannot be arbitrarily cloned.  These will always be
      /* Some callees cannot be arbitrarily cloned.  These will always be
         irrevocable.  Mark these now, so that we need not scan them.  */
         irrevocable.  Mark these now, so that we need not scan them.  */
      if (is_tm_irrevocable (node->decl))
      if (is_tm_irrevocable (node->decl))
        ipa_tm_note_irrevocable (node, &irr_worklist);
        ipa_tm_note_irrevocable (node, &irr_worklist);
      else if (a <= AVAIL_NOT_AVAILABLE
      else if (a <= AVAIL_NOT_AVAILABLE
               && !is_tm_safe_or_pure (node->decl))
               && !is_tm_safe_or_pure (node->decl))
        ipa_tm_note_irrevocable (node, &irr_worklist);
        ipa_tm_note_irrevocable (node, &irr_worklist);
      else if (a >= AVAIL_OVERWRITABLE)
      else if (a >= AVAIL_OVERWRITABLE)
        {
        {
          if (!tree_versionable_function_p (node->decl))
          if (!tree_versionable_function_p (node->decl))
            ipa_tm_note_irrevocable (node, &irr_worklist);
            ipa_tm_note_irrevocable (node, &irr_worklist);
          else if (!d->is_irrevocable)
          else if (!d->is_irrevocable)
            {
            {
              /* If this is an alias, make sure its base is queued as well.
              /* If this is an alias, make sure its base is queued as well.
                 we need not scan the callees now, as the base will do.  */
                 we need not scan the callees now, as the base will do.  */
              if (node->alias)
              if (node->alias)
                {
                {
                  node = cgraph_get_node (node->thunk.alias);
                  node = cgraph_get_node (node->thunk.alias);
                  d = get_cg_data (&node, true);
                  d = get_cg_data (&node, true);
                  maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
                  maybe_push_queue (node, &tm_callees, &d->in_callee_queue);
                  continue;
                  continue;
                }
                }
 
 
              /* Add all nodes called by this function into
              /* Add all nodes called by this function into
                 tm_callees as well.  */
                 tm_callees as well.  */
              ipa_tm_scan_calls_clone (node, &tm_callees);
              ipa_tm_scan_calls_clone (node, &tm_callees);
            }
            }
        }
        }
    }
    }
 
 
  /* Iterate scans until no more work to be done.  Prefer not to use
  /* Iterate scans until no more work to be done.  Prefer not to use
     VEC_pop because the worklist tends to follow a breadth-first
     VEC_pop because the worklist tends to follow a breadth-first
     search of the callgraph, which should allow convergance with a
     search of the callgraph, which should allow convergance with a
     minimum number of scans.  But we also don't want the worklist
     minimum number of scans.  But we also don't want the worklist
     array to grow without bound, so we shift the array up periodically.  */
     array to grow without bound, so we shift the array up periodically.  */
  for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
  for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
    {
    {
      if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
      if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
        {
        {
          VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
          VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
          i = 0;
          i = 0;
        }
        }
 
 
      node = VEC_index (cgraph_node_p, irr_worklist, i);
      node = VEC_index (cgraph_node_p, irr_worklist, i);
      d = get_cg_data (&node, true);
      d = get_cg_data (&node, true);
      d->in_worklist = false;
      d->in_worklist = false;
 
 
      if (d->want_irr_scan_normal)
      if (d->want_irr_scan_normal)
        {
        {
          d->want_irr_scan_normal = false;
          d->want_irr_scan_normal = false;
          ipa_tm_scan_irr_function (node, false);
          ipa_tm_scan_irr_function (node, false);
        }
        }
      if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
      if (d->in_callee_queue && ipa_tm_scan_irr_function (node, true))
        ipa_tm_note_irrevocable (node, &irr_worklist);
        ipa_tm_note_irrevocable (node, &irr_worklist);
    }
    }
 
 
  /* For every function on the callee list, collect the tm_may_enter_irr
  /* For every function on the callee list, collect the tm_may_enter_irr
     bit on the node.  */
     bit on the node.  */
  VEC_truncate (cgraph_node_p, irr_worklist, 0);
  VEC_truncate (cgraph_node_p, irr_worklist, 0);
  for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
  for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
    {
    {
      node = VEC_index (cgraph_node_p, tm_callees, i);
      node = VEC_index (cgraph_node_p, tm_callees, i);
      if (ipa_tm_mayenterirr_function (node))
      if (ipa_tm_mayenterirr_function (node))
        {
        {
          d = get_cg_data (&node, true);
          d = get_cg_data (&node, true);
          gcc_assert (d->in_worklist == false);
          gcc_assert (d->in_worklist == false);
          maybe_push_queue (node, &irr_worklist, &d->in_worklist);
          maybe_push_queue (node, &irr_worklist, &d->in_worklist);
        }
        }
    }
    }
 
 
  /* Propagate the tm_may_enter_irr bit to callers until stable.  */
  /* Propagate the tm_may_enter_irr bit to callers until stable.  */
  for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
  for (i = 0; i < VEC_length (cgraph_node_p, irr_worklist); ++i)
    {
    {
      struct cgraph_node *caller;
      struct cgraph_node *caller;
      struct cgraph_edge *e;
      struct cgraph_edge *e;
      struct ipa_ref *ref;
      struct ipa_ref *ref;
      unsigned j;
      unsigned j;
 
 
      if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
      if (i > 256 && i == VEC_length (cgraph_node_p, irr_worklist) / 8)
        {
        {
          VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
          VEC_block_remove (cgraph_node_p, irr_worklist, 0, i);
          i = 0;
          i = 0;
        }
        }
 
 
      node = VEC_index (cgraph_node_p, irr_worklist, i);
      node = VEC_index (cgraph_node_p, irr_worklist, i);
      d = get_cg_data (&node, true);
      d = get_cg_data (&node, true);
      d->in_worklist = false;
      d->in_worklist = false;
      node->local.tm_may_enter_irr = true;
      node->local.tm_may_enter_irr = true;
 
 
      /* Propagate back to normal callers.  */
      /* Propagate back to normal callers.  */
      for (e = node->callers; e ; e = e->next_caller)
      for (e = node->callers; e ; e = e->next_caller)
        {
        {
          caller = e->caller;
          caller = e->caller;
          if (!is_tm_safe_or_pure (caller->decl)
          if (!is_tm_safe_or_pure (caller->decl)
              && !caller->local.tm_may_enter_irr)
              && !caller->local.tm_may_enter_irr)
            {
            {
              d = get_cg_data (&caller, true);
              d = get_cg_data (&caller, true);
              maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
              maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
            }
            }
        }
        }
 
 
      /* Propagate back to referring aliases as well.  */
      /* Propagate back to referring aliases as well.  */
      for (j = 0; ipa_ref_list_refering_iterate (&node->ref_list, j, ref); j++)
      for (j = 0; ipa_ref_list_refering_iterate (&node->ref_list, j, ref); j++)
        {
        {
          caller = ref->refering.cgraph_node;
          caller = ref->refering.cgraph_node;
          if (ref->use == IPA_REF_ALIAS
          if (ref->use == IPA_REF_ALIAS
              && !caller->local.tm_may_enter_irr)
              && !caller->local.tm_may_enter_irr)
            {
            {
              /* ?? Do not traverse aliases here.  */
              /* ?? Do not traverse aliases here.  */
              d = get_cg_data (&caller, false);
              d = get_cg_data (&caller, false);
              maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
              maybe_push_queue (caller, &irr_worklist, &d->in_worklist);
            }
            }
        }
        }
    }
    }
 
 
  /* Now validate all tm_safe functions, and all atomic regions in
  /* Now validate all tm_safe functions, and all atomic regions in
     other functions.  */
     other functions.  */
  for (node = cgraph_nodes; node; node = node->next)
  for (node = cgraph_nodes; node; node = node->next)
    if (node->reachable && node->lowered
    if (node->reachable && node->lowered
        && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
        && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
      {
      {
        d = get_cg_data (&node, true);
        d = get_cg_data (&node, true);
        if (is_tm_safe (node->decl))
        if (is_tm_safe (node->decl))
          ipa_tm_diagnose_tm_safe (node);
          ipa_tm_diagnose_tm_safe (node);
        else if (d->all_tm_regions)
        else if (d->all_tm_regions)
          ipa_tm_diagnose_transaction (node, d->all_tm_regions);
          ipa_tm_diagnose_transaction (node, d->all_tm_regions);
      }
      }
 
 
  /* Create clones.  Do those that are not irrevocable and have a
  /* Create clones.  Do those that are not irrevocable and have a
     positive call count.  Do those publicly visible functions that
     positive call count.  Do those publicly visible functions that
     the user directed us to clone.  */
     the user directed us to clone.  */
  for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
  for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
    {
    {
      bool doit = false;
      bool doit = false;
 
 
      node = VEC_index (cgraph_node_p, tm_callees, i);
      node = VEC_index (cgraph_node_p, tm_callees, i);
      if (node->same_body_alias)
      if (node->same_body_alias)
        continue;
        continue;
 
 
      a = cgraph_function_body_availability (node);
      a = cgraph_function_body_availability (node);
      d = get_cg_data (&node, true);
      d = get_cg_data (&node, true);
 
 
      if (a <= AVAIL_NOT_AVAILABLE)
      if (a <= AVAIL_NOT_AVAILABLE)
        doit = is_tm_callable (node->decl);
        doit = is_tm_callable (node->decl);
      else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl))
      else if (a <= AVAIL_AVAILABLE && is_tm_callable (node->decl))
        doit = true;
        doit = true;
      else if (!d->is_irrevocable
      else if (!d->is_irrevocable
               && d->tm_callers_normal + d->tm_callers_clone > 0)
               && d->tm_callers_normal + d->tm_callers_clone > 0)
        doit = true;
        doit = true;
 
 
      if (doit)
      if (doit)
        ipa_tm_create_version (node);
        ipa_tm_create_version (node);
    }
    }
 
 
  /* Redirect calls to the new clones, and insert irrevocable marks.  */
  /* Redirect calls to the new clones, and insert irrevocable marks.  */
  for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
  for (i = 0; i < VEC_length (cgraph_node_p, tm_callees); ++i)
    {
    {
      node = VEC_index (cgraph_node_p, tm_callees, i);
      node = VEC_index (cgraph_node_p, tm_callees, i);
      if (node->analyzed)
      if (node->analyzed)
        {
        {
          d = get_cg_data (&node, true);
          d = get_cg_data (&node, true);
          if (d->clone)
          if (d->clone)
            ipa_tm_transform_clone (node);
            ipa_tm_transform_clone (node);
        }
        }
    }
    }
  for (node = cgraph_nodes; node; node = node->next)
  for (node = cgraph_nodes; node; node = node->next)
    if (node->reachable && node->lowered
    if (node->reachable && node->lowered
        && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
        && cgraph_function_body_availability (node) >= AVAIL_OVERWRITABLE)
      {
      {
        d = get_cg_data (&node, true);
        d = get_cg_data (&node, true);
        if (d->all_tm_regions)
        if (d->all_tm_regions)
          ipa_tm_transform_transaction (node);
          ipa_tm_transform_transaction (node);
      }
      }
 
 
  /* Free and clear all data structures.  */
  /* Free and clear all data structures.  */
  VEC_free (cgraph_node_p, heap, tm_callees);
  VEC_free (cgraph_node_p, heap, tm_callees);
  VEC_free (cgraph_node_p, heap, irr_worklist);
  VEC_free (cgraph_node_p, heap, irr_worklist);
  bitmap_obstack_release (&tm_obstack);
  bitmap_obstack_release (&tm_obstack);
 
 
  for (node = cgraph_nodes; node; node = node->next)
  for (node = cgraph_nodes; node; node = node->next)
    node->aux = NULL;
    node->aux = NULL;
 
 
#ifdef ENABLE_CHECKING
#ifdef ENABLE_CHECKING
  verify_cgraph ();
  verify_cgraph ();
#endif
#endif
 
 
  return 0;
  return 0;
}
}
 
 
struct simple_ipa_opt_pass pass_ipa_tm =
struct simple_ipa_opt_pass pass_ipa_tm =
{
{
 {
 {
  SIMPLE_IPA_PASS,
  SIMPLE_IPA_PASS,
  "tmipa",                              /* name */
  "tmipa",                              /* name */
  gate_tm,                              /* gate */
  gate_tm,                              /* gate */
  ipa_tm_execute,                       /* execute */
  ipa_tm_execute,                       /* execute */
  NULL,                                 /* sub */
  NULL,                                 /* sub */
  NULL,                                 /* next */
  NULL,                                 /* next */
  0,                                     /* static_pass_number */
  0,                                     /* static_pass_number */
  TV_TRANS_MEM,                         /* tv_id */
  TV_TRANS_MEM,                         /* tv_id */
  PROP_ssa | PROP_cfg,                  /* properties_required */
  PROP_ssa | PROP_cfg,                  /* properties_required */
  0,                                     /* properties_provided */
  0,                                     /* properties_provided */
  0,                                     /* properties_destroyed */
  0,                                     /* properties_destroyed */
  0,                                     /* todo_flags_start */
  0,                                     /* todo_flags_start */
  TODO_dump_func,                       /* todo_flags_finish */
  TODO_dump_func,                       /* todo_flags_finish */
 },
 },
};
};
 
 
#include "gt-trans-mem.h"
#include "gt-trans-mem.h"
 
 

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.