OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gdb-6.8/] [bfd/] [elf32-spu.c] - Diff between revs 157 and 225

Go to most recent revision | Show entire file | Details | Blame | View Log

Rev 157 Rev 225
Line 1... Line 1...
/* SPU specific support for 32-bit ELF
/* SPU specific support for 32-bit ELF
 
 
   Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
   Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
 
 
   This file is part of BFD, the Binary File Descriptor library.
   This file is part of BFD, the Binary File Descriptor library.
 
 
   This program is free software; you can redistribute it and/or modify
   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   it under the terms of the GNU General Public License as published by
Line 17... Line 17...
   You should have received a copy of the GNU General Public License along
   You should have received a copy of the GNU General Public License along
   with this program; if not, write to the Free Software Foundation, Inc.,
   with this program; if not, write to the Free Software Foundation, Inc.,
   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
 
 
#include "sysdep.h"
#include "sysdep.h"
 
#include "libiberty.h"
#include "bfd.h"
#include "bfd.h"
#include "bfdlink.h"
#include "bfdlink.h"
#include "libbfd.h"
#include "libbfd.h"
#include "elf-bfd.h"
#include "elf-bfd.h"
#include "elf/spu.h"
#include "elf/spu.h"
Line 85... Line 86...
         bfd_elf_generic_reloc, "SPU_PPU32",
         bfd_elf_generic_reloc, "SPU_PPU32",
         FALSE, 0, 0xffffffff, FALSE),
         FALSE, 0, 0xffffffff, FALSE),
  HOWTO (R_SPU_PPU64,      0, 4, 64, FALSE,  0, complain_overflow_dont,
  HOWTO (R_SPU_PPU64,      0, 4, 64, FALSE,  0, complain_overflow_dont,
         bfd_elf_generic_reloc, "SPU_PPU64",
         bfd_elf_generic_reloc, "SPU_PPU64",
         FALSE, 0, -1, FALSE),
         FALSE, 0, -1, FALSE),
 
  HOWTO (R_SPU_ADD_PIC,      0, 0, 0, FALSE,  0, complain_overflow_dont,
 
         bfd_elf_generic_reloc, "SPU_ADD_PIC",
 
         FALSE, 0, 0x00000000, FALSE),
};
};
 
 
static struct bfd_elf_special_section const spu_elf_special_sections[] = {
static struct bfd_elf_special_section const spu_elf_special_sections[] = {
 
  { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
  { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
  { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
  { NULL, 0, 0, 0, 0 }
  { NULL, 0, 0, 0, 0 }
};
};
 
 
static enum elf_spu_reloc_type
static enum elf_spu_reloc_type
Line 131... Line 136...
      return R_SPU_REL32;
      return R_SPU_REL32;
    case BFD_RELOC_SPU_PPU32:
    case BFD_RELOC_SPU_PPU32:
      return R_SPU_PPU32;
      return R_SPU_PPU32;
    case BFD_RELOC_SPU_PPU64:
    case BFD_RELOC_SPU_PPU64:
      return R_SPU_PPU64;
      return R_SPU_PPU64;
 
    case BFD_RELOC_SPU_ADD_PIC:
 
      return R_SPU_ADD_PIC;
    }
    }
}
}
 
 
static void
static void
spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
Line 237... Line 244...
    }
    }
 
 
  return _bfd_elf_new_section_hook (abfd, sec);
  return _bfd_elf_new_section_hook (abfd, sec);
}
}
 
 
 
/* Set up overlay info for executables.  */
 
 
 
static bfd_boolean
 
spu_elf_object_p (bfd *abfd)
 
{
 
  if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
 
    {
 
      unsigned int i, num_ovl, num_buf;
 
      Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
 
      Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
 
      Elf_Internal_Phdr *last_phdr = NULL;
 
 
 
      for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
 
        if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
 
          {
 
            unsigned int j;
 
 
 
            ++num_ovl;
 
            if (last_phdr == NULL
 
                || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
 
              ++num_buf;
 
            last_phdr = phdr;
 
            for (j = 1; j < elf_numsections (abfd); j++)
 
              {
 
                Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
 
 
 
                if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
 
                  {
 
                    asection *sec = shdr->bfd_section;
 
                    spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
 
                    spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
 
                  }
 
              }
 
          }
 
    }
 
  return TRUE;
 
}
 
 
/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
   strip --strip-unneeded will not remove them.  */
   strip --strip-unneeded will not remove them.  */
 
 
static void
static void
spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
Line 255... Line 300...
 
 
struct spu_link_hash_table
struct spu_link_hash_table
{
{
  struct elf_link_hash_table elf;
  struct elf_link_hash_table elf;
 
 
 
  struct spu_elf_params *params;
 
 
  /* Shortcuts to overlay sections.  */
  /* Shortcuts to overlay sections.  */
  asection *ovtab;
  asection *ovtab;
 
  asection *init;
  asection *toe;
  asection *toe;
  asection **ovl_sec;
  asection **ovl_sec;
 
 
  /* Count of stubs in each overlay section.  */
  /* Count of stubs in each overlay section.  */
  unsigned int *stub_count;
  unsigned int *stub_count;
 
 
  /* The stub section for each overlay section.  */
  /* The stub section for each overlay section.  */
  asection **stub_sec;
  asection **stub_sec;
 
 
  struct elf_link_hash_entry *ovly_load;
  struct elf_link_hash_entry *ovly_entry[2];
  struct elf_link_hash_entry *ovly_return;
 
  unsigned long ovly_load_r_symndx;
 
 
 
  /* Number of overlay buffers.  */
  /* Number of overlay buffers.  */
  unsigned int num_buf;
  unsigned int num_buf;
 
 
  /* Total number of overlays.  */
  /* Total number of overlays.  */
  unsigned int num_overlays;
  unsigned int num_overlays;
 
 
  /* Set if we should emit symbols for stubs.  */
  /* For soft icache.  */
  unsigned int emit_stub_syms:1;
  unsigned int line_size_log2;
 
  unsigned int num_lines_log2;
 
  unsigned int fromelem_size_log2;
 
 
  /* Set if we want stubs on calls out of overlay regions to
  /* How much memory we have.  */
     non-overlay regions.  */
  unsigned int local_store;
  unsigned int non_overlay_stubs : 1;
 
 
 
  /* Set on error.  */
  /* Count of overlay stubs needed in non-overlay area.  */
  unsigned int stub_err : 1;
  unsigned int non_ovly_stub;
 
 
  /* Set if stack size analysis should be done.  */
  /* Pointer to the fixup section */
  unsigned int stack_analysis : 1;
  asection *sfixup;
 
 
  /* Set if __stack_* syms will be emitted.  */
  /* Set on error.  */
  unsigned int emit_stack_syms : 1;
  unsigned int stub_err : 1;
};
};
 
 
/* Hijack the generic got fields for overlay stub accounting.  */
/* Hijack the generic got fields for overlay stub accounting.  */
 
 
struct got_entry
struct got_entry
{
{
  struct got_entry *next;
  struct got_entry *next;
  unsigned int ovl;
  unsigned int ovl;
 
  union {
 
    bfd_vma addend;
 
    bfd_vma br_addr;
 
  };
  bfd_vma stub_addr;
  bfd_vma stub_addr;
};
};
 
 
#define spu_hash_table(p) \
#define spu_hash_table(p) \
  ((struct spu_link_hash_table *) ((p)->hash))
  ((struct spu_link_hash_table *) ((p)->hash))
 
 
 
struct call_info
 
{
 
  struct function_info *fun;
 
  struct call_info *next;
 
  unsigned int count;
 
  unsigned int max_depth;
 
  unsigned int is_tail : 1;
 
  unsigned int is_pasted : 1;
 
  unsigned int broken_cycle : 1;
 
  unsigned int priority : 13;
 
};
 
 
 
struct function_info
 
{
 
  /* List of functions called.  Also branches to hot/cold part of
 
     function.  */
 
  struct call_info *call_list;
 
  /* For hot/cold part of function, point to owner.  */
 
  struct function_info *start;
 
  /* Symbol at start of function.  */
 
  union {
 
    Elf_Internal_Sym *sym;
 
    struct elf_link_hash_entry *h;
 
  } u;
 
  /* Function section.  */
 
  asection *sec;
 
  asection *rodata;
 
  /* Where last called from, and number of sections called from.  */
 
  asection *last_caller;
 
  unsigned int call_count;
 
  /* Address range of (this part of) function.  */
 
  bfd_vma lo, hi;
 
  /* Offset where we found a store of lr, or -1 if none found.  */
 
  bfd_vma lr_store;
 
  /* Offset where we found the stack adjustment insn.  */
 
  bfd_vma sp_adjust;
 
  /* Stack usage.  */
 
  int stack;
 
  /* Distance from root of call tree.  Tail and hot/cold branches
 
     count as one deeper.  We aren't counting stack frames here.  */
 
  unsigned int depth;
 
  /* Set if global symbol.  */
 
  unsigned int global : 1;
 
  /* Set if known to be start of function (as distinct from a hunk
 
     in hot/cold section.  */
 
  unsigned int is_func : 1;
 
  /* Set if not a root node.  */
 
  unsigned int non_root : 1;
 
  /* Flags used during call tree traversal.  It's cheaper to replicate
 
     the visit flags than have one which needs clearing after a traversal.  */
 
  unsigned int visit1 : 1;
 
  unsigned int visit2 : 1;
 
  unsigned int marking : 1;
 
  unsigned int visit3 : 1;
 
  unsigned int visit4 : 1;
 
  unsigned int visit5 : 1;
 
  unsigned int visit6 : 1;
 
  unsigned int visit7 : 1;
 
};
 
 
 
struct spu_elf_stack_info
 
{
 
  int num_fun;
 
  int max_fun;
 
  /* Variable size array describing functions, one per contiguous
 
     address range belonging to a function.  */
 
  struct function_info fun[1];
 
};
 
 
 
static struct function_info *find_function (asection *, bfd_vma,
 
                                            struct bfd_link_info *);
 
 
/* Create a spu ELF linker hash table.  */
/* Create a spu ELF linker hash table.  */
 
 
static struct bfd_link_hash_table *
static struct bfd_link_hash_table *
spu_elf_link_hash_table_create (bfd *abfd)
spu_elf_link_hash_table_create (bfd *abfd)
{
{
Line 334... Line 457...
  htab->elf.init_got_offset.offset = 0;
  htab->elf.init_got_offset.offset = 0;
  htab->elf.init_got_offset.glist = NULL;
  htab->elf.init_got_offset.glist = NULL;
  return &htab->elf.root;
  return &htab->elf.root;
}
}
 
 
 
void
 
spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
 
{
 
  bfd_vma max_branch_log2;
 
 
 
  struct spu_link_hash_table *htab = spu_hash_table (info);
 
  htab->params = params;
 
  htab->line_size_log2 = bfd_log2 (htab->params->line_size);
 
  htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
 
 
 
  /* For the software i-cache, we provide a "from" list whose size
 
     is a power-of-two number of quadwords, big enough to hold one
 
     byte per outgoing branch.  Compute this number here.  */
 
  max_branch_log2 = bfd_log2 (htab->params->max_branch);
 
  htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
 
}
 
 
/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
   to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
   to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
   *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
   *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
 
 
static bfd_boolean
static bfd_boolean
Line 382... Line 522...
 
 
      if (locsyms == NULL)
      if (locsyms == NULL)
        {
        {
          locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
          locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
          if (locsyms == NULL)
          if (locsyms == NULL)
            {
            locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
              size_t symcount = symtab_hdr->sh_info;
                                            symtab_hdr->sh_info,
 
                                            0, NULL, NULL, NULL);
              /* If we are reading symbols into the contents, then
 
                 read the global syms too.  This is done to cache
 
                 syms for later stack analysis.  */
 
              if ((unsigned char **) locsymsp == &symtab_hdr->contents)
 
                symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
 
              locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
 
                                              NULL, NULL, NULL);
 
            }
 
          if (locsyms == NULL)
          if (locsyms == NULL)
            return FALSE;
            return FALSE;
          *locsymsp = locsyms;
          *locsymsp = locsyms;
        }
        }
      sym = locsyms + r_symndx;
      sym = locsyms + r_symndx;
Line 406... Line 538...
 
 
      if (symp != NULL)
      if (symp != NULL)
        *symp = sym;
        *symp = sym;
 
 
      if (symsecp != NULL)
      if (symsecp != NULL)
        {
        *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
          asection *symsec = NULL;
 
          if ((sym->st_shndx != SHN_UNDEF
 
               && sym->st_shndx < SHN_LORESERVE)
 
              || sym->st_shndx > SHN_HIRESERVE)
 
            symsec = bfd_section_from_elf_index (ibfd, sym->st_shndx);
 
          *symsecp = symsec;
 
        }
 
    }
    }
 
 
  return TRUE;
  return TRUE;
}
}
 
 
/* Create the note section if not already present.  This is done early so
/* Create the note section if not already present.  This is done early so
   that the linker maps the sections to the right place in the output.  */
   that the linker maps the sections to the right place in the output.  */
 
 
bfd_boolean
bfd_boolean
spu_elf_create_sections (bfd *output_bfd,
spu_elf_create_sections (struct bfd_link_info *info)
                         struct bfd_link_info *info,
 
                         int stack_analysis,
 
                         int emit_stack_syms)
 
{
{
  bfd *ibfd;
 
  struct spu_link_hash_table *htab = spu_hash_table (info);
  struct spu_link_hash_table *htab = spu_hash_table (info);
 
  bfd *ibfd;
  /* Stash some options away where we can get at them later.  */
 
  htab->stack_analysis = stack_analysis;
 
  htab->emit_stack_syms = emit_stack_syms;
 
 
 
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
    if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
    if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
      break;
      break;
 
 
Line 455... Line 573...
      s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
      s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
      if (s == NULL
      if (s == NULL
          || !bfd_set_section_alignment (ibfd, s, 4))
          || !bfd_set_section_alignment (ibfd, s, 4))
        return FALSE;
        return FALSE;
 
 
      name_len = strlen (bfd_get_filename (output_bfd)) + 1;
      name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
      size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
      size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
      size += (name_len + 3) & -4;
      size += (name_len + 3) & -4;
 
 
      if (!bfd_set_section_size (ibfd, s, size))
      if (!bfd_set_section_size (ibfd, s, size))
        return FALSE;
        return FALSE;
Line 471... Line 589...
      bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
      bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
      bfd_put_32 (ibfd, name_len, data + 4);
      bfd_put_32 (ibfd, name_len, data + 4);
      bfd_put_32 (ibfd, 1, data + 8);
      bfd_put_32 (ibfd, 1, data + 8);
      memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
      memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
      memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
      memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
              bfd_get_filename (output_bfd), name_len);
              bfd_get_filename (info->output_bfd), name_len);
      s->contents = data;
      s->contents = data;
    }
    }
 
 
 
  if (htab->params->emit_fixups)
 
    {
 
      asection *s;
 
      flagword flags;
 
      ibfd = info->input_bfds;
 
      flags = SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
 
              | SEC_IN_MEMORY;
 
      s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
 
      if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
 
        return FALSE;
 
      htab->sfixup = s;
 
    }
 
 
  return TRUE;
  return TRUE;
}
}
 
 
/* qsort predicate to sort sections by vma.  */
/* qsort predicate to sort sections by vma.  */
 
 
Line 493... Line 624...
    return delta < 0 ? -1 : 1;
    return delta < 0 ? -1 : 1;
 
 
  return (*s1)->index - (*s2)->index;
  return (*s1)->index - (*s2)->index;
}
}
 
 
/* Identify overlays in the output bfd, and number them.  */
/* Identify overlays in the output bfd, and number them.
 
   Returns 0 on error, 1 if no overlays, 2 if overlays.  */
 
 
bfd_boolean
int
spu_elf_find_overlays (bfd *output_bfd, struct bfd_link_info *info)
spu_elf_find_overlays (struct bfd_link_info *info)
{
{
  struct spu_link_hash_table *htab = spu_hash_table (info);
  struct spu_link_hash_table *htab = spu_hash_table (info);
  asection **alloc_sec;
  asection **alloc_sec;
  unsigned int i, n, ovl_index, num_buf;
  unsigned int i, n, ovl_index, num_buf;
  asection *s;
  asection *s;
  bfd_vma ovl_end;
  bfd_vma ovl_end;
 
  static const char *const entry_names[2][2] = {
 
    { "__ovly_load", "__icache_br_handler" },
 
    { "__ovly_return", "__icache_call_handler" }
 
  };
 
 
  if (output_bfd->section_count < 2)
  if (info->output_bfd->section_count < 2)
    return FALSE;
    return 1;
 
 
  alloc_sec = bfd_malloc (output_bfd->section_count * sizeof (*alloc_sec));
  alloc_sec
 
    = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
  if (alloc_sec == NULL)
  if (alloc_sec == NULL)
    return FALSE;
    return 0;
 
 
  /* Pick out all the alloced sections.  */
  /* Pick out all the alloced sections.  */
  for (n = 0, s = output_bfd->sections; s != NULL; s = s->next)
  for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
    if ((s->flags & SEC_ALLOC) != 0
    if ((s->flags & SEC_ALLOC) != 0
        && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
        && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
        && s->size != 0)
        && s->size != 0)
      alloc_sec[n++] = s;
      alloc_sec[n++] = s;
 
 
  if (n == 0)
  if (n == 0)
    {
    {
      free (alloc_sec);
      free (alloc_sec);
      return FALSE;
      return 1;
    }
    }
 
 
  /* Sort them by vma.  */
  /* Sort them by vma.  */
  qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
  qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
 
 
 
  ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
 
  if (htab->params->ovly_flavour == ovly_soft_icache)
 
    {
 
      unsigned int prev_buf = 0, set_id = 0;
 
 
 
      /* Look for an overlapping vma to find the first overlay section.  */
 
      bfd_vma vma_start = 0;
 
 
 
      for (i = 1; i < n; i++)
 
        {
 
          s = alloc_sec[i];
 
          if (s->vma < ovl_end)
 
            {
 
              asection *s0 = alloc_sec[i - 1];
 
              vma_start = s0->vma;
 
              ovl_end = (s0->vma
 
                         + ((bfd_vma) 1
 
                            << (htab->num_lines_log2 + htab->line_size_log2)));
 
              --i;
 
              break;
 
            }
 
          else
 
            ovl_end = s->vma + s->size;
 
        }
 
 
 
      /* Now find any sections within the cache area.  */
 
      for (ovl_index = 0, num_buf = 0; i < n; i++)
 
        {
 
          s = alloc_sec[i];
 
          if (s->vma >= ovl_end)
 
            break;
 
 
 
          /* A section in an overlay area called .ovl.init is not
 
             an overlay, in the sense that it might be loaded in
 
             by the overlay manager, but rather the initial
 
             section contents for the overlay buffer.  */
 
          if (strncmp (s->name, ".ovl.init", 9) != 0)
 
            {
 
              num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
 
              set_id = (num_buf == prev_buf)? set_id + 1 : 0;
 
              prev_buf = num_buf;
 
 
 
              if ((s->vma - vma_start) & (htab->params->line_size - 1))
 
                {
 
                  info->callbacks->einfo (_("%X%P: overlay section %A "
 
                                            "does not start on a cache line.\n"),
 
                                          s);
 
                  bfd_set_error (bfd_error_bad_value);
 
                  return 0;
 
                }
 
              else if (s->size > htab->params->line_size)
 
                {
 
                  info->callbacks->einfo (_("%X%P: overlay section %A "
 
                                            "is larger than a cache line.\n"),
 
                                          s);
 
                  bfd_set_error (bfd_error_bad_value);
 
                  return 0;
 
                }
 
 
 
              alloc_sec[ovl_index++] = s;
 
              spu_elf_section_data (s)->u.o.ovl_index
 
                = (set_id << htab->num_lines_log2) + num_buf;
 
              spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
 
            }
 
        }
 
 
 
      /* Ensure there are no more overlay sections.  */
 
      for ( ; i < n; i++)
 
        {
 
          s = alloc_sec[i];
 
          if (s->vma < ovl_end)
 
            {
 
              info->callbacks->einfo (_("%X%P: overlay section %A "
 
                                        "is not in cache area.\n"),
 
                                      alloc_sec[i-1]);
 
              bfd_set_error (bfd_error_bad_value);
 
              return 0;
 
            }
 
          else
 
            ovl_end = s->vma + s->size;
 
        }
 
    }
 
  else
 
    {
  /* Look for overlapping vmas.  Any with overlap must be overlays.
  /* Look for overlapping vmas.  Any with overlap must be overlays.
     Count them.  Also count the number of overlay regions.  */
     Count them.  Also count the number of overlay regions.  */
  ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
 
  for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
  for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
    {
    {
      s = alloc_sec[i];
      s = alloc_sec[i];
      if (s->vma < ovl_end)
      if (s->vma < ovl_end)
        {
        {
          asection *s0 = alloc_sec[i - 1];
          asection *s0 = alloc_sec[i - 1];
 
 
          if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
          if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
            {
            {
 
                  ++num_buf;
 
                  if (strncmp (s0->name, ".ovl.init", 9) != 0)
 
                    {
              alloc_sec[ovl_index] = s0;
              alloc_sec[ovl_index] = s0;
              spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
              spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
              spu_elf_section_data (s0)->u.o.ovl_buf = ++num_buf;
                      spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
 
                    }
 
                  else
 
                    ovl_end = s->vma + s->size;
            }
            }
 
              if (strncmp (s->name, ".ovl.init", 9) != 0)
 
                {
          alloc_sec[ovl_index] = s;
          alloc_sec[ovl_index] = s;
          spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
          spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
          spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
          spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
          if (s0->vma != s->vma)
          if (s0->vma != s->vma)
            {
            {
              info->callbacks->einfo (_("%X%P: overlay sections %A and %A "
                      info->callbacks->einfo (_("%X%P: overlay sections %A "
                                        "do not start at the same address.\n"),
                                                "and %A do not start at the "
 
                                                "same address.\n"),
                                      s0, s);
                                      s0, s);
              return FALSE;
                      bfd_set_error (bfd_error_bad_value);
 
                      return 0;
            }
            }
          if (ovl_end < s->vma + s->size)
          if (ovl_end < s->vma + s->size)
            ovl_end = s->vma + s->size;
            ovl_end = s->vma + s->size;
        }
        }
 
            }
      else
      else
        ovl_end = s->vma + s->size;
        ovl_end = s->vma + s->size;
    }
    }
 
    }
 
 
  htab->num_overlays = ovl_index;
  htab->num_overlays = ovl_index;
  htab->num_buf = num_buf;
  htab->num_buf = num_buf;
  htab->ovl_sec = alloc_sec;
  htab->ovl_sec = alloc_sec;
  return ovl_index != 0;
 
 
  if (ovl_index == 0)
 
    return 1;
 
 
 
  for (i = 0; i < 2; i++)
 
    {
 
      const char *name;
 
      struct elf_link_hash_entry *h;
 
 
 
      name = entry_names[i][htab->params->ovly_flavour];
 
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
 
      if (h == NULL)
 
        return 0;
 
 
 
      if (h->root.type == bfd_link_hash_new)
 
        {
 
          h->root.type = bfd_link_hash_undefined;
 
          h->ref_regular = 1;
 
          h->ref_regular_nonweak = 1;
 
          h->non_elf = 0;
 
        }
 
      htab->ovly_entry[i] = h;
}
}
 
 
/* Support two sizes of overlay stubs, a slower more compact stub of two
  return 2;
   intructions, and a faster stub of four instructions.  */
}
#ifndef OVL_STUB_SIZE
 
/* Default to faster.  */
/* Non-zero to use bra in overlay stubs rather than br.  */
#define OVL_STUB_SIZE 16
#define BRA_STUBS 0
/* #define OVL_STUB_SIZE 8 */
 
#endif
#define BRA     0x30000000
#define BRSL    0x33000000
#define BRASL   0x31000000
#define BR      0x32000000
#define BR      0x32000000
 
#define BRSL    0x33000000
#define NOP     0x40200000
#define NOP     0x40200000
#define LNOP    0x00200000
#define LNOP    0x00200000
#define ILA     0x42000000
#define ILA     0x42000000
 
 
/* Return true for all relative and absolute branch instructions.
/* Return true for all relative and absolute branch instructions.
Line 621... Line 875...
is_hint (const unsigned char *insn)
is_hint (const unsigned char *insn)
{
{
  return (insn[0] & 0xfc) == 0x10;
  return (insn[0] & 0xfc) == 0x10;
}
}
 
 
/* Return TRUE if this reloc symbol should possibly go via an overlay stub.  */
/* True if INPUT_SECTION might need overlay stubs.  */
 
 
static bfd_boolean
static bfd_boolean
needs_ovl_stub (const char *sym_name,
maybe_needs_stubs (asection *input_section)
                asection *sym_sec,
 
                asection *input_section,
 
                struct spu_link_hash_table *htab,
 
                bfd_boolean is_branch)
 
{
{
  if (htab->num_overlays == 0)
  /* No stubs for debug sections and suchlike.  */
 
  if ((input_section->flags & SEC_ALLOC) == 0)
    return FALSE;
    return FALSE;
 
 
  if (sym_sec == NULL
  /* No stubs for link-once sections that will be discarded.  */
      || sym_sec->output_section == NULL
  if (input_section->output_section == bfd_abs_section_ptr)
      || spu_elf_section_data (sym_sec->output_section) == NULL)
 
    return FALSE;
    return FALSE;
 
 
  /* setjmp always goes via an overlay stub, because then the return
  /* Don't create stubs for .eh_frame references.  */
     and hence the longjmp goes via __ovly_return.  That magically
  if (strcmp (input_section->name, ".eh_frame") == 0)
     makes setjmp/longjmp between overlays work.  */
 
  if (strncmp (sym_name, "setjmp", 6) == 0
 
      && (sym_name[6] == '\0' || sym_name[6] == '@'))
 
    return TRUE;
 
 
 
  /* Usually, symbols in non-overlay sections don't need stubs.  */
 
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
 
      && !htab->non_overlay_stubs)
 
    return FALSE;
    return FALSE;
 
 
  /* A reference from some other section to a symbol in an overlay
 
     section needs a stub.  */
 
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
 
       != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
 
    return TRUE;
    return TRUE;
 
 
  /* If this insn isn't a branch then we are possibly taking the
 
     address of a function and passing it out somehow.  */
 
  return !is_branch;
 
}
}
 
 
enum _insn_type { non_branch, branch, call };
enum _stub_type
 
{
 
  no_stub,
 
  call_ovl_stub,
 
  br000_ovl_stub,
 
  br001_ovl_stub,
 
  br010_ovl_stub,
 
  br011_ovl_stub,
 
  br100_ovl_stub,
 
  br101_ovl_stub,
 
  br110_ovl_stub,
 
  br111_ovl_stub,
 
  nonovl_stub,
 
  stub_error
 
};
 
 
static bfd_boolean
/* Return non-zero if this reloc symbol should go via an overlay stub.
count_stub (struct spu_link_hash_table *htab,
   Return 2 if the stub must be in non-overlay area.  */
            bfd *ibfd,
 
            asection *isec,
static enum _stub_type
            enum _insn_type insn_type,
needs_ovl_stub (struct elf_link_hash_entry *h,
            struct elf_link_hash_entry *h,
                Elf_Internal_Sym *sym,
            const Elf_Internal_Rela *irela)
                asection *sym_sec,
 
                asection *input_section,
 
                Elf_Internal_Rela *irela,
 
                bfd_byte *contents,
 
                struct bfd_link_info *info)
{
{
  unsigned int ovl = 0;
  struct spu_link_hash_table *htab = spu_hash_table (info);
  struct got_entry *g, **head;
  enum elf_spu_reloc_type r_type;
 
  unsigned int sym_type;
 
  bfd_boolean branch, hint, call;
 
  enum _stub_type ret = no_stub;
 
  bfd_byte insn[4];
 
 
  /* If this instruction is a branch or call, we need a stub
  if (sym_sec == NULL
     for it.  One stub per function per overlay.
      || sym_sec->output_section == bfd_abs_section_ptr
     If it isn't a branch, then we are taking the address of
      || spu_elf_section_data (sym_sec->output_section) == NULL)
     this function so need a stub in the non-overlay area
    return ret;
     for it.  One stub per function.  */
 
  if (insn_type != non_branch)
 
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
 
 
 
  if (h != NULL)
  if (h != NULL)
    head = &h->got.glist;
 
  else
 
    {
    {
      if (elf_local_got_ents (ibfd) == NULL)
      /* Ensure no stubs for user supplied overlay manager syms.  */
        {
      if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
          bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
        return ret;
                               * sizeof (*elf_local_got_ents (ibfd)));
 
          elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
      /* setjmp always goes via an overlay stub, because then the return
          if (elf_local_got_ents (ibfd) == NULL)
         and hence the longjmp goes via __ovly_return.  That magically
            return FALSE;
         makes setjmp/longjmp between overlays work.  */
        }
      if (strncmp (h->root.root.string, "setjmp", 6) == 0
      head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
          && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
 
        ret = call_ovl_stub;
    }
    }
 
 
  /* If we have a stub in the non-overlay area then there's no need
  if (h != NULL)
     for one in overlays.  */
    sym_type = h->type;
  g = *head;
  else
  if (g != NULL && g->ovl == 0)
    sym_type = ELF_ST_TYPE (sym->st_info);
    return TRUE;
 
 
 
  if (ovl == 0)
  r_type = ELF32_R_TYPE (irela->r_info);
 
  branch = FALSE;
 
  hint = FALSE;
 
  call = FALSE;
 
  if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
 
    {
 
      if (contents == NULL)
 
        {
 
          contents = insn;
 
          if (!bfd_get_section_contents (input_section->owner,
 
                                         input_section,
 
                                         contents,
 
                                         irela->r_offset, 4))
 
            return stub_error;
 
        }
 
      else
 
        contents += irela->r_offset;
 
 
 
      branch = is_branch (contents);
 
      hint = is_hint (contents);
 
      if (branch || hint)
 
        {
 
          call = (contents[0] & 0xfd) == 0x31;
 
          if (call
 
              && sym_type != STT_FUNC
 
              && contents != insn)
 
            {
 
              /* It's common for people to write assembly and forget
 
                 to give function symbols the right type.  Handle
 
                 calls to such symbols, but warn so that (hopefully)
 
                 people will fix their code.  We need the symbol
 
                 type to be correct to distinguish function pointer
 
                 initialisation from other pointer initialisations.  */
 
              const char *sym_name;
 
 
 
              if (h != NULL)
 
                sym_name = h->root.root.string;
 
              else
 
                {
 
                  Elf_Internal_Shdr *symtab_hdr;
 
                  symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
 
                  sym_name = bfd_elf_sym_name (input_section->owner,
 
                                               symtab_hdr,
 
                                               sym,
 
                                               sym_sec);
 
                }
 
              (*_bfd_error_handler) (_("warning: call to non-function"
 
                                       " symbol %s defined in %B"),
 
                                     sym_sec->owner, sym_name);
 
 
 
            }
 
        }
 
    }
 
 
 
  if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
 
      || (sym_type != STT_FUNC
 
          && !(branch || hint)
 
          && (sym_sec->flags & SEC_CODE) == 0))
 
    return no_stub;
 
 
 
  /* Usually, symbols in non-overlay sections don't need stubs.  */
 
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
 
      && !htab->params->non_overlay_stubs)
 
    return ret;
 
 
 
  /* A reference from some other section to a symbol in an overlay
 
     section needs a stub.  */
 
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
 
       != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
 
    {
 
      unsigned int lrlive = 0;
 
      if (branch)
 
        lrlive = (contents[1] & 0x70) >> 4;
 
 
 
      if (!lrlive && (call || sym_type == STT_FUNC))
 
        ret = call_ovl_stub;
 
      else
 
        ret = br000_ovl_stub + lrlive;
 
    }
 
 
 
  /* If this insn isn't a branch then we are possibly taking the
 
     address of a function and passing it out somehow.  Soft-icache code
 
     always generates inline code to do indirect branches.  */
 
  if (!(branch || hint)
 
      && sym_type == STT_FUNC
 
      && htab->params->ovly_flavour != ovly_soft_icache)
 
    ret = nonovl_stub;
 
 
 
  return ret;
 
}
 
 
 
static bfd_boolean
 
count_stub (struct spu_link_hash_table *htab,
 
            bfd *ibfd,
 
            asection *isec,
 
            enum _stub_type stub_type,
 
            struct elf_link_hash_entry *h,
 
            const Elf_Internal_Rela *irela)
 
{
 
  unsigned int ovl = 0;
 
  struct got_entry *g, **head;
 
  bfd_vma addend;
 
 
 
  /* If this instruction is a branch or call, we need a stub
 
     for it.  One stub per function per overlay.
 
     If it isn't a branch, then we are taking the address of
 
     this function so need a stub in the non-overlay area
 
     for it.  One stub per function.  */
 
  if (stub_type != nonovl_stub)
 
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
 
 
 
  if (h != NULL)
 
    head = &h->got.glist;
 
  else
 
    {
 
      if (elf_local_got_ents (ibfd) == NULL)
 
        {
 
          bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
 
                               * sizeof (*elf_local_got_ents (ibfd)));
 
          elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
 
          if (elf_local_got_ents (ibfd) == NULL)
 
            return FALSE;
 
        }
 
      head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
 
    }
 
 
 
  if (htab->params->ovly_flavour == ovly_soft_icache)
 
    {
 
      htab->stub_count[ovl] += 1;
 
      return TRUE;
 
    }
 
 
 
  addend = 0;
 
  if (irela != NULL)
 
    addend = irela->r_addend;
 
 
 
  if (ovl == 0)
 
    {
 
      struct got_entry *gnext;
 
 
 
      for (g = *head; g != NULL; g = g->next)
 
        if (g->addend == addend && g->ovl == 0)
 
          break;
 
 
 
      if (g == NULL)
    {
    {
      struct got_entry *gnext;
 
 
 
      /* Need a new non-overlay area stub.  Zap other stubs.  */
      /* Need a new non-overlay area stub.  Zap other stubs.  */
      for (; g != NULL; g = gnext)
          for (g = *head; g != NULL; g = gnext)
        {
        {
          htab->stub_count[g->ovl] -= 1;
 
          gnext = g->next;
          gnext = g->next;
 
              if (g->addend == addend)
 
                {
 
                  htab->stub_count[g->ovl] -= 1;
          free (g);
          free (g);
        }
        }
    }
    }
 
        }
 
    }
  else
  else
    {
    {
      for (; g != NULL; g = g->next)
      for (g = *head; g != NULL; g = g->next)
        if (g->ovl == ovl)
        if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
          break;
          break;
    }
    }
 
 
  if (g == NULL)
  if (g == NULL)
    {
    {
      g = bfd_malloc (sizeof *g);
      g = bfd_malloc (sizeof *g);
      if (g == NULL)
      if (g == NULL)
        return FALSE;
        return FALSE;
      g->ovl = ovl;
      g->ovl = ovl;
 
      g->addend = addend;
      g->stub_addr = (bfd_vma) -1;
      g->stub_addr = (bfd_vma) -1;
      g->next = *head;
      g->next = *head;
      *head = g;
      *head = g;
 
 
      htab->stub_count[ovl] += 1;
      htab->stub_count[ovl] += 1;
    }
    }
 
 
  return TRUE;
  return TRUE;
}
}
 
 
 
/* Support two sizes of overlay stubs, a slower more compact stub of two
 
   intructions, and a faster stub of four instructions.
 
   Soft-icache stubs are four or eight words.  */
 
 
 
static unsigned int
 
ovl_stub_size (struct spu_elf_params *params)
 
{
 
  return 16 << params->ovly_flavour >> params->compact_stub;
 
}
 
 
 
static unsigned int
 
ovl_stub_size_log2 (struct spu_elf_params *params)
 
{
 
  return 4 + params->ovly_flavour - params->compact_stub;
 
}
 
 
/* Two instruction overlay stubs look like:
/* Two instruction overlay stubs look like:
 
 
   brsl $75,__ovly_load
   brsl $75,__ovly_load
   .word target_ovl_and_address
   .word target_ovl_and_address
 
 
Line 751... Line 1165...
   Four instruction overlay stubs look like:
   Four instruction overlay stubs look like:
 
 
   ila $78,ovl_number
   ila $78,ovl_number
   lnop
   lnop
   ila $79,target_address
   ila $79,target_address
   br __ovly_load  */
   br __ovly_load
 
 
 
   Software icache stubs are:
 
 
 
   .word target_index
 
   .word target_ia;
 
   .word lrlive_branchlocalstoreaddr;
 
   brasl $75,__icache_br_handler
 
   .quad xor_pattern
 
*/
 
 
static bfd_boolean
static bfd_boolean
build_stub (struct spu_link_hash_table *htab,
build_stub (struct bfd_link_info *info,
            bfd *ibfd,
            bfd *ibfd,
            asection *isec,
            asection *isec,
            enum _insn_type insn_type,
            enum _stub_type stub_type,
            struct elf_link_hash_entry *h,
            struct elf_link_hash_entry *h,
            const Elf_Internal_Rela *irela,
            const Elf_Internal_Rela *irela,
            bfd_vma dest,
            bfd_vma dest,
            asection *dest_sec)
            asection *dest_sec)
{
{
  unsigned int ovl;
  struct spu_link_hash_table *htab = spu_hash_table (info);
 
  unsigned int ovl, dest_ovl, set_id;
  struct got_entry *g, **head;
  struct got_entry *g, **head;
  asection *sec;
  asection *sec;
  bfd_vma val, from, to;
  bfd_vma addend, from, to, br_dest, patt;
 
  unsigned int lrlive;
 
 
  ovl = 0;
  ovl = 0;
  if (insn_type != non_branch)
  if (stub_type != nonovl_stub)
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
 
 
  if (h != NULL)
  if (h != NULL)
    head = &h->got.glist;
    head = &h->got.glist;
  else
  else
    head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
    head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
 
 
  g = *head;
  addend = 0;
  if (g != NULL && g->ovl == 0 && ovl != 0)
  if (irela != NULL)
    return TRUE;
    addend = irela->r_addend;
 
 
  for (; g != NULL; g = g->next)
  if (htab->params->ovly_flavour == ovly_soft_icache)
    if (g->ovl == ovl)
    {
 
      g = bfd_malloc (sizeof *g);
 
      if (g == NULL)
 
        return FALSE;
 
      g->ovl = ovl;
 
      g->br_addr = 0;
 
      if (irela != NULL)
 
        g->br_addr = (irela->r_offset
 
                      + isec->output_offset
 
                      + isec->output_section->vma);
 
      g->next = *head;
 
      *head = g;
 
    }
 
  else
 
    {
 
      for (g = *head; g != NULL; g = g->next)
 
        if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
      break;
      break;
  if (g == NULL)
  if (g == NULL)
    abort ();
    abort ();
 
 
 
      if (g->ovl == 0 && ovl != 0)
 
        return TRUE;
 
 
  if (g->stub_addr != (bfd_vma) -1)
  if (g->stub_addr != (bfd_vma) -1)
    return TRUE;
    return TRUE;
 
    }
 
 
  sec = htab->stub_sec[ovl];
  sec = htab->stub_sec[ovl];
  dest += dest_sec->output_offset + dest_sec->output_section->vma;
  dest += dest_sec->output_offset + dest_sec->output_section->vma;
  from = sec->size + sec->output_offset + sec->output_section->vma;
  from = sec->size + sec->output_offset + sec->output_section->vma;
  g->stub_addr = from;
  g->stub_addr = from;
  to = (htab->ovly_load->root.u.def.value
  to = (htab->ovly_entry[0]->root.u.def.value
        + htab->ovly_load->root.u.def.section->output_offset
        + htab->ovly_entry[0]->root.u.def.section->output_offset
        + htab->ovly_load->root.u.def.section->output_section->vma);
        + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
  val = to - from;
 
  if (OVL_STUB_SIZE == 16)
  if (((dest | to | from) & 3) != 0)
    val -= 12;
 
  if (((dest | to | from) & 3) != 0
 
      || val + 0x20000 >= 0x40000)
 
    {
    {
      htab->stub_err = 1;
      htab->stub_err = 1;
      return FALSE;
      return FALSE;
    }
    }
  ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
  dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
 
 
  if (OVL_STUB_SIZE == 16)
  if (htab->params->ovly_flavour == ovly_normal
 
      && !htab->params->compact_stub)
    {
    {
      bfd_put_32 (sec->owner, ILA + ((ovl << 7) & 0x01ffff80) + 78,
      bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
                  sec->contents + sec->size);
                  sec->contents + sec->size);
      bfd_put_32 (sec->owner, LNOP,
      bfd_put_32 (sec->owner, LNOP,
                  sec->contents + sec->size + 4);
                  sec->contents + sec->size + 4);
      bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
      bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
                  sec->contents + sec->size + 8);
                  sec->contents + sec->size + 8);
      bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
      if (!BRA_STUBS)
 
        bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
 
                    sec->contents + sec->size + 12);
 
      else
 
        bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
                  sec->contents + sec->size + 12);
                  sec->contents + sec->size + 12);
    }
    }
  else if (OVL_STUB_SIZE == 8)
  else if (htab->params->ovly_flavour == ovly_normal
 
           && htab->params->compact_stub)
    {
    {
      bfd_put_32 (sec->owner, BRSL + ((val << 5) & 0x007fff80) + 75,
      if (!BRA_STUBS)
 
        bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
 
                    sec->contents + sec->size);
 
      else
 
        bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
                  sec->contents + sec->size);
                  sec->contents + sec->size);
 
      bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
 
                  sec->contents + sec->size + 4);
 
    }
 
  else if (htab->params->ovly_flavour == ovly_soft_icache
 
           && htab->params->compact_stub)
 
    {
 
      lrlive = 0;
 
      if (stub_type == nonovl_stub)
 
        ;
 
      else if (stub_type == call_ovl_stub)
 
        /* A brsl makes lr live and *(*sp+16) is live.
 
           Tail calls have the same liveness.  */
 
        lrlive = 5;
 
      else if (!htab->params->lrlive_analysis)
 
        /* Assume stack frame and lr save.  */
 
        lrlive = 1;
 
      else if (irela != NULL)
 
        {
 
          /* Analyse branch instructions.  */
 
          struct function_info *caller;
 
          bfd_vma off;
 
 
 
          caller = find_function (isec, irela->r_offset, info);
 
          if (caller->start == NULL)
 
            off = irela->r_offset;
 
          else
 
            {
 
              struct function_info *found = NULL;
 
 
 
              /* Find the earliest piece of this function that
 
                 has frame adjusting instructions.  We might
 
                 see dynamic frame adjustment (eg. for alloca)
 
                 in some later piece, but functions using
 
                 alloca always set up a frame earlier.  Frame
 
                 setup instructions are always in one piece.  */
 
              if (caller->lr_store != (bfd_vma) -1
 
                  || caller->sp_adjust != (bfd_vma) -1)
 
                found = caller;
 
              while (caller->start != NULL)
 
                {
 
                  caller = caller->start;
 
                  if (caller->lr_store != (bfd_vma) -1
 
                      || caller->sp_adjust != (bfd_vma) -1)
 
                    found = caller;
 
                }
 
              if (found != NULL)
 
                caller = found;
 
              off = (bfd_vma) -1;
 
            }
 
 
 
          if (off > caller->sp_adjust)
 
            {
 
              if (off > caller->lr_store)
 
                /* Only *(*sp+16) is live.  */
 
                lrlive = 1;
 
              else
 
                /* If no lr save, then we must be in a
 
                   leaf function with a frame.
 
                   lr is still live.  */
 
                lrlive = 4;
 
            }
 
          else if (off > caller->lr_store)
 
            {
 
              /* Between lr save and stack adjust.  */
 
              lrlive = 3;
 
              /* This should never happen since prologues won't
 
                 be split here.  */
 
              BFD_ASSERT (0);
 
            }
 
          else
 
            /* On entry to function.  */
 
            lrlive = 5;
 
 
 
          if (stub_type != br000_ovl_stub
 
              && lrlive != stub_type - br000_ovl_stub)
 
            info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
 
                                      "from analysis (%u)\n"),
 
                                    isec, irela->r_offset, lrlive,
 
                                    stub_type - br000_ovl_stub);
 
        }
 
 
 
      /* If given lrlive info via .brinfo, use it.  */
 
      if (stub_type > br000_ovl_stub)
 
        lrlive = stub_type - br000_ovl_stub;
 
 
      val = (dest & 0x3ffff) | (ovl << 14);
      if (ovl == 0)
      bfd_put_32 (sec->owner, val,
        to = (htab->ovly_entry[1]->root.u.def.value
 
              + htab->ovly_entry[1]->root.u.def.section->output_offset
 
              + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
 
 
 
      /* The branch that uses this stub goes to stub_addr + 4.  We'll
 
         set up an xor pattern that can be used by the icache manager
 
         to modify this branch to go directly to its destination.  */
 
      g->stub_addr += 4;
 
      br_dest = g->stub_addr;
 
      if (irela == NULL)
 
        {
 
          /* Except in the case of _SPUEAR_ stubs, the branch in
 
             question is the one in the stub itself.  */
 
          BFD_ASSERT (stub_type == nonovl_stub);
 
          g->br_addr = g->stub_addr;
 
          br_dest = to;
 
        }
 
 
 
      set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
 
      bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
 
                  sec->contents + sec->size);
 
      bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
                  sec->contents + sec->size + 4);
                  sec->contents + sec->size + 4);
 
      bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
 
                  sec->contents + sec->size + 8);
 
      patt = dest ^ br_dest;
 
      if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
 
        patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
 
      bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
 
                  sec->contents + sec->size + 12);
 
 
 
      if (ovl == 0)
 
        /* Extra space for linked list entries.  */
 
        sec->size += 16;
    }
    }
  else
  else
    abort ();
    abort ();
  sec->size += OVL_STUB_SIZE;
 
 
 
  if (htab->emit_stub_syms)
  sec->size += ovl_stub_size (htab->params);
 
 
 
  if (htab->params->emit_stub_syms)
    {
    {
      size_t len;
      size_t len;
      char *name;
      char *name;
      int add;
      int add;
 
 
Line 870... Line 1439...
        return FALSE;
        return FALSE;
      if (h->root.type == bfd_link_hash_new)
      if (h->root.type == bfd_link_hash_new)
        {
        {
          h->root.type = bfd_link_hash_defined;
          h->root.type = bfd_link_hash_defined;
          h->root.u.def.section = sec;
          h->root.u.def.section = sec;
          h->root.u.def.value = sec->size - OVL_STUB_SIZE;
          h->size = ovl_stub_size (htab->params);
          h->size = OVL_STUB_SIZE;
          h->root.u.def.value = sec->size - h->size;
          h->type = STT_FUNC;
          h->type = STT_FUNC;
          h->ref_regular = 1;
          h->ref_regular = 1;
          h->def_regular = 1;
          h->def_regular = 1;
          h->ref_regular_nonweak = 1;
          h->ref_regular_nonweak = 1;
          h->forced_local = 1;
          h->forced_local = 1;
Line 892... Line 1461...
static bfd_boolean
static bfd_boolean
allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
{
{
  /* Symbols starting with _SPUEAR_ need a stub because they may be
  /* Symbols starting with _SPUEAR_ need a stub because they may be
     invoked by the PPU.  */
     invoked by the PPU.  */
 
  struct bfd_link_info *info = inf;
 
  struct spu_link_hash_table *htab = spu_hash_table (info);
 
  asection *sym_sec;
 
 
  if ((h->root.type == bfd_link_hash_defined
  if ((h->root.type == bfd_link_hash_defined
       || h->root.type == bfd_link_hash_defweak)
       || h->root.type == bfd_link_hash_defweak)
      && h->def_regular
      && h->def_regular
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
 
      && (sym_sec = h->root.u.def.section) != NULL
 
      && sym_sec->output_section != bfd_abs_section_ptr
 
      && spu_elf_section_data (sym_sec->output_section) != NULL
 
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
 
          || htab->params->non_overlay_stubs))
    {
    {
      struct spu_link_hash_table *htab = inf;
      return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
 
 
      count_stub (htab, NULL, NULL, non_branch, h, NULL);
 
    }
    }
 
 
  return TRUE;
  return TRUE;
}
}
 
 
static bfd_boolean
static bfd_boolean
build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
{
{
  /* Symbols starting with _SPUEAR_ need a stub because they may be
  /* Symbols starting with _SPUEAR_ need a stub because they may be
     invoked by the PPU.  */
     invoked by the PPU.  */
 
  struct bfd_link_info *info = inf;
 
  struct spu_link_hash_table *htab = spu_hash_table (info);
 
  asection *sym_sec;
 
 
  if ((h->root.type == bfd_link_hash_defined
  if ((h->root.type == bfd_link_hash_defined
       || h->root.type == bfd_link_hash_defweak)
       || h->root.type == bfd_link_hash_defweak)
      && h->def_regular
      && h->def_regular
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
 
      && (sym_sec = h->root.u.def.section) != NULL
 
      && sym_sec->output_section != bfd_abs_section_ptr
 
      && spu_elf_section_data (sym_sec->output_section) != NULL
 
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
 
          || htab->params->non_overlay_stubs))
    {
    {
      struct spu_link_hash_table *htab = inf;
      return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
 
                         h->root.u.def.value, sym_sec);
      build_stub (htab, NULL, NULL, non_branch, h, NULL,
 
                  h->root.u.def.value, h->root.u.def.section);
 
    }
    }
 
 
  return TRUE;
  return TRUE;
}
}
 
 
/* Size or build stubs.  */
/* Size or build stubs.  */
 
 
static bfd_boolean
static bfd_boolean
process_stubs (bfd *output_bfd,
process_stubs (struct bfd_link_info *info, bfd_boolean build)
               struct bfd_link_info *info,
 
               bfd_boolean build)
 
{
{
  struct spu_link_hash_table *htab = spu_hash_table (info);
  struct spu_link_hash_table *htab = spu_hash_table (info);
  bfd *ibfd;
  bfd *ibfd;
 
 
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
    {
    {
      extern const bfd_target bfd_elf32_spu_vec;
      extern const bfd_target bfd_elf32_spu_vec;
      Elf_Internal_Shdr *symtab_hdr;
      Elf_Internal_Shdr *symtab_hdr;
      asection *isec;
      asection *isec;
      Elf_Internal_Sym *local_syms = NULL;
      Elf_Internal_Sym *local_syms = NULL;
      void *psyms;
 
 
 
      if (ibfd->xvec != &bfd_elf32_spu_vec)
      if (ibfd->xvec != &bfd_elf32_spu_vec)
        continue;
        continue;
 
 
      /* We'll need the symbol table in a second.  */
      /* We'll need the symbol table in a second.  */
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
      if (symtab_hdr->sh_info == 0)
      if (symtab_hdr->sh_info == 0)
        continue;
        continue;
 
 
      /* Arrange to read and keep global syms for later stack analysis.  */
 
      psyms = &local_syms;
 
      if (htab->stack_analysis)
 
        psyms = &symtab_hdr->contents;
 
 
 
      /* Walk over each section attached to the input bfd.  */
      /* Walk over each section attached to the input bfd.  */
      for (isec = ibfd->sections; isec != NULL; isec = isec->next)
      for (isec = ibfd->sections; isec != NULL; isec = isec->next)
        {
        {
          Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
          Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
 
 
          /* If there aren't any relocs, then there's nothing more to do.  */
          /* If there aren't any relocs, then there's nothing more to do.  */
          if ((isec->flags & SEC_RELOC) == 0
          if ((isec->flags & SEC_RELOC) == 0
              || (isec->flags & SEC_ALLOC) == 0
 
              || (isec->flags & SEC_LOAD) == 0
 
              || isec->reloc_count == 0)
              || isec->reloc_count == 0)
            continue;
            continue;
 
 
          /* If this section is a link-once section that will be
          if (!maybe_needs_stubs (isec))
             discarded, then don't create any stubs.  */
 
          if (isec->output_section == NULL
 
              || isec->output_section->owner != output_bfd)
 
            continue;
            continue;
 
 
          /* Get the relocs.  */
          /* Get the relocs.  */
          internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
          internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
                                                       info->keep_memory);
                                                       info->keep_memory);
Line 989... Line 1559...
              enum elf_spu_reloc_type r_type;
              enum elf_spu_reloc_type r_type;
              unsigned int r_indx;
              unsigned int r_indx;
              asection *sym_sec;
              asection *sym_sec;
              Elf_Internal_Sym *sym;
              Elf_Internal_Sym *sym;
              struct elf_link_hash_entry *h;
              struct elf_link_hash_entry *h;
              const char *sym_name;
              enum _stub_type stub_type;
              unsigned int sym_type;
 
              enum _insn_type insn_type;
 
 
 
              r_type = ELF32_R_TYPE (irela->r_info);
              r_type = ELF32_R_TYPE (irela->r_info);
              r_indx = ELF32_R_SYM (irela->r_info);
              r_indx = ELF32_R_SYM (irela->r_info);
 
 
              if (r_type >= R_SPU_max)
              if (r_type >= R_SPU_max)
Line 1011... Line 1579...
                    free (local_syms);
                    free (local_syms);
                  return FALSE;
                  return FALSE;
                }
                }
 
 
              /* Determine the reloc target section.  */
              /* Determine the reloc target section.  */
              if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
              if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
                goto error_ret_free_internal;
                goto error_ret_free_internal;
 
 
              if (sym_sec == NULL
              stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
                  || sym_sec->output_section == NULL
                                          NULL, info);
                  || sym_sec->output_section->owner != output_bfd)
              if (stub_type == no_stub)
                continue;
 
 
 
              /* Ensure no stubs for user supplied overlay manager syms.  */
 
              if (h != NULL
 
                  && (strcmp (h->root.root.string, "__ovly_load") == 0
 
                      || strcmp (h->root.root.string, "__ovly_return") == 0))
 
                continue;
                continue;
 
              else if (stub_type == stub_error)
              insn_type = non_branch;
 
              if (r_type == R_SPU_REL16
 
                  || r_type == R_SPU_ADDR16)
 
                {
 
                  unsigned char insn[4];
 
 
 
                  if (!bfd_get_section_contents (ibfd, isec, insn,
 
                                                 irela->r_offset, 4))
 
                    goto error_ret_free_internal;
                    goto error_ret_free_internal;
 
 
                  if (is_branch (insn) || is_hint (insn))
 
                    {
 
                      insn_type = branch;
 
                      if ((insn[0] & 0xfd) == 0x31)
 
                        insn_type = call;
 
                    }
 
                }
 
 
 
              /* We are only interested in function symbols.  */
 
              if (h != NULL)
 
                {
 
                  sym_type = h->type;
 
                  sym_name = h->root.root.string;
 
                }
 
              else
 
                {
 
                  sym_type = ELF_ST_TYPE (sym->st_info);
 
                  sym_name = bfd_elf_sym_name (sym_sec->owner,
 
                                               symtab_hdr,
 
                                               sym,
 
                                               sym_sec);
 
                }
 
 
 
              if (sym_type != STT_FUNC)
 
                {
 
                  /* It's common for people to write assembly and forget
 
                     to give function symbols the right type.  Handle
 
                     calls to such symbols, but warn so that (hopefully)
 
                     people will fix their code.  We need the symbol
 
                     type to be correct to distinguish function pointer
 
                     initialisation from other pointer initialisation.  */
 
                  if (insn_type == call)
 
                    (*_bfd_error_handler) (_("warning: call to non-function"
 
                                             " symbol %s defined in %B"),
 
                                           sym_sec->owner, sym_name);
 
                  else if (insn_type == non_branch)
 
                    continue;
 
                }
 
 
 
              if (!needs_ovl_stub (sym_name, sym_sec, isec, htab,
 
                                   insn_type != non_branch))
 
                continue;
 
 
 
              if (htab->stub_count == NULL)
              if (htab->stub_count == NULL)
                {
                {
                  bfd_size_type amt;
                  bfd_size_type amt;
                  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
                  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
                  htab->stub_count = bfd_zmalloc (amt);
                  htab->stub_count = bfd_zmalloc (amt);
Line 1089... Line 1600...
                    goto error_ret_free_internal;
                    goto error_ret_free_internal;
                }
                }
 
 
              if (!build)
              if (!build)
                {
                {
                  if (!count_stub (htab, ibfd, isec, insn_type, h, irela))
                  if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
                    goto error_ret_free_internal;
                    goto error_ret_free_internal;
                }
                }
              else
              else
                {
                {
                  bfd_vma dest;
                  bfd_vma dest;
 
 
                  if (h != NULL)
                  if (h != NULL)
                    dest = h->root.u.def.value;
                    dest = h->root.u.def.value;
                  else
                  else
                    dest = sym->st_value;
                    dest = sym->st_value;
                  if (!build_stub (htab, ibfd, isec, insn_type, h, irela,
                  dest += irela->r_addend;
 
                  if (!build_stub (info, ibfd, isec, stub_type, h, irela,
                                   dest, sym_sec))
                                   dest, sym_sec))
                    goto error_ret_free_internal;
                    goto error_ret_free_internal;
                }
                }
            }
            }
 
 
Line 1124... Line 1636...
    }
    }
 
 
  return TRUE;
  return TRUE;
}
}
 
 
/* Allocate space for overlay call and return stubs.  */
/* Allocate space for overlay call and return stubs.
 
   Return 0 on error, 1 if no overlays, 2 otherwise.  */
 
 
int
int
spu_elf_size_stubs (bfd *output_bfd,
spu_elf_size_stubs (struct bfd_link_info *info)
                    struct bfd_link_info *info,
 
                    void (*place_spu_section) (asection *, asection *,
 
                                               const char *),
 
                    int non_overlay_stubs)
 
{
{
  struct spu_link_hash_table *htab = spu_hash_table (info);
  struct spu_link_hash_table *htab;
  bfd *ibfd;
  bfd *ibfd;
  bfd_size_type amt;
  bfd_size_type amt;
  flagword flags;
  flagword flags;
  unsigned int i;
  unsigned int i;
  asection *stub;
  asection *stub;
 
 
  htab->non_overlay_stubs = non_overlay_stubs;
  if (!process_stubs (info, FALSE))
  if (!process_stubs (output_bfd, info, FALSE))
 
    return 0;
    return 0;
 
 
  elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, htab);
  htab = spu_hash_table (info);
 
  elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
  if (htab->stub_err)
  if (htab->stub_err)
    return 0;
    return 0;
 
 
  if (htab->stub_count == NULL)
 
    return 1;
 
 
 
  ibfd = info->input_bfds;
  ibfd = info->input_bfds;
 
  if (htab->stub_count != NULL)
 
    {
  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
  htab->stub_sec = bfd_zmalloc (amt);
  htab->stub_sec = bfd_zmalloc (amt);
  if (htab->stub_sec == NULL)
  if (htab->stub_sec == NULL)
    return 0;
    return 0;
 
 
  flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
  flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
           | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
           | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
  stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
  stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
  htab->stub_sec[0] = stub;
  htab->stub_sec[0] = stub;
  if (stub == NULL
  if (stub == NULL
      || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
          || !bfd_set_section_alignment (ibfd, stub,
 
                                         ovl_stub_size_log2 (htab->params)))
    return 0;
    return 0;
  stub->size = htab->stub_count[0] * OVL_STUB_SIZE;
      stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
  (*place_spu_section) (stub, NULL, ".text");
      if (htab->params->ovly_flavour == ovly_soft_icache)
 
        /* Extra space for linked list entries.  */
 
        stub->size += htab->stub_count[0] * 16;
 
 
  for (i = 0; i < htab->num_overlays; ++i)
  for (i = 0; i < htab->num_overlays; ++i)
    {
    {
      asection *osec = htab->ovl_sec[i];
      asection *osec = htab->ovl_sec[i];
      unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
      unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
      stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
      stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
      htab->stub_sec[ovl] = stub;
      htab->stub_sec[ovl] = stub;
      if (stub == NULL
      if (stub == NULL
          || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
              || !bfd_set_section_alignment (ibfd, stub,
 
                                             ovl_stub_size_log2 (htab->params)))
        return 0;
        return 0;
      stub->size = htab->stub_count[ovl] * OVL_STUB_SIZE;
          stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
      (*place_spu_section) (stub, osec, NULL);
 
    }
    }
 
    }
 
 
 
  if (htab->params->ovly_flavour == ovly_soft_icache)
 
    {
 
      /* Space for icache manager tables.
 
         a) Tag array, one quadword per cache line.
 
         b) Rewrite "to" list, one quadword per cache line.
 
         c) Rewrite "from" list, one byte per outgoing branch (rounded up to
 
            a power-of-two number of full quadwords) per cache line.  */
 
 
 
      flags = SEC_ALLOC;
 
      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
 
      if (htab->ovtab == NULL
 
          || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
 
        return 0;
 
 
 
      htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
 
                          << htab->num_lines_log2;
 
 
 
      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
 
      htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
 
      if (htab->init == NULL
 
          || !bfd_set_section_alignment (ibfd, htab->init, 4))
 
        return 0;
 
 
 
      htab->init->size = 16;
 
    }
 
  else if (htab->stub_count == NULL)
 
    return 1;
 
  else
 
    {
 /* htab->ovtab consists of two arrays.
 /* htab->ovtab consists of two arrays.
    .   struct {
    .   struct {
    .     u32 vma;
    .     u32 vma;
    .     u32 size;
    .     u32 size;
    .     u32 file_off;
    .     u32 file_off;
Line 1193... Line 1734...
    .   struct {
    .   struct {
    .     u32 mapped;
    .     u32 mapped;
    .   } _ovly_buf_table[];
    .   } _ovly_buf_table[];
    .  */
    .  */
 
 
  flags = (SEC_ALLOC | SEC_LOAD
      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
           | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
 
  htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
  htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
  if (htab->ovtab == NULL
  if (htab->ovtab == NULL
      || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
      || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
    return 0;
    return 0;
 
 
  htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
  htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
  (*place_spu_section) (htab->ovtab, NULL, ".data");
    }
 
 
  htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
  htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
  if (htab->toe == NULL
  if (htab->toe == NULL
      || !bfd_set_section_alignment (ibfd, htab->toe, 4))
      || !bfd_set_section_alignment (ibfd, htab->toe, 4))
    return 0;
    return 0;
  htab->toe->size = 16;
  htab->toe->size = 16;
  (*place_spu_section) (htab->toe, NULL, ".toe");
 
 
 
  return 2;
  return 2;
}
}
 
 
/* Functions to handle embedded spu_ovl.o object.  */
/* Called from ld to place overlay manager data sections.  This is done
 
   after the overlay manager itself is loaded, mainly so that the
 
   linker's htab->init section is placed after any other .ovl.init
 
   sections.  */
 
 
static void *
void
ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
spu_elf_place_overlay_data (struct bfd_link_info *info)
{
{
  return stream;
  struct spu_link_hash_table *htab = spu_hash_table (info);
 
  unsigned int i;
 
 
 
  if (htab->stub_sec != NULL)
 
    {
 
      (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
 
 
 
      for (i = 0; i < htab->num_overlays; ++i)
 
        {
 
          asection *osec = htab->ovl_sec[i];
 
          unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
 
          (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
 
        }
 
    }
 
 
 
  if (htab->params->ovly_flavour == ovly_soft_icache)
 
    (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
 
 
 
  if (htab->ovtab != NULL)
 
    {
 
      const char *ovout = ".data";
 
      if (htab->params->ovly_flavour == ovly_soft_icache)
 
        ovout = ".bss";
 
      (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
 
    }
 
 
 
  if (htab->toe != NULL)
 
    (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
 
}
 
 
 
/* Functions to handle embedded spu_ovl.o object.  */
 
 
 
static void *
 
ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
 
{
 
  return stream;
}
}
 
 
static file_ptr
static file_ptr
ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
               void *stream,
               void *stream,
Line 1259... Line 1836...
                              NULL,
                              NULL,
                              NULL);
                              NULL);
  return *ovl_bfd != NULL;
  return *ovl_bfd != NULL;
}
}
 
 
 
static unsigned int
 
overlay_index (asection *sec)
 
{
 
  if (sec == NULL
 
      || sec->output_section == bfd_abs_section_ptr)
 
    return 0;
 
  return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
 
}
 
 
/* Define an STT_OBJECT symbol.  */
/* Define an STT_OBJECT symbol.  */
 
 
static struct elf_link_hash_entry *
static struct elf_link_hash_entry *
define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
{
{
Line 1281... Line 1867...
      h->ref_regular = 1;
      h->ref_regular = 1;
      h->def_regular = 1;
      h->def_regular = 1;
      h->ref_regular_nonweak = 1;
      h->ref_regular_nonweak = 1;
      h->non_elf = 0;
      h->non_elf = 0;
    }
    }
  else
  else if (h->root.u.def.section->owner != NULL)
    {
    {
      (*_bfd_error_handler) (_("%B is not allowed to define %s"),
      (*_bfd_error_handler) (_("%B is not allowed to define %s"),
                             h->root.u.def.section->owner,
                             h->root.u.def.section->owner,
                             h->root.root.string);
                             h->root.root.string);
      bfd_set_error (bfd_error_bad_value);
      bfd_set_error (bfd_error_bad_value);
      return NULL;
      return NULL;
    }
    }
 
  else
 
    {
 
      (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
 
                             h->root.root.string);
 
      bfd_set_error (bfd_error_bad_value);
 
      return NULL;
 
    }
 
 
  return h;
  return h;
}
}
 
 
/* Fill in all stubs and the overlay tables.  */
/* Fill in all stubs and the overlay tables.  */
 
 
bfd_boolean
static bfd_boolean
spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms)
spu_elf_build_stubs (struct bfd_link_info *info)
{
{
  struct spu_link_hash_table *htab = spu_hash_table (info);
  struct spu_link_hash_table *htab = spu_hash_table (info);
  struct elf_link_hash_entry *h;
  struct elf_link_hash_entry *h;
  bfd_byte *p;
  bfd_byte *p;
  asection *s;
  asection *s;
  bfd *obfd;
  bfd *obfd;
  unsigned int i;
  unsigned int i;
 
 
  htab->emit_stub_syms = emit_syms;
  if (htab->num_overlays != 0)
  if (htab->stub_count == NULL)
    {
    return TRUE;
      for (i = 0; i < 2; i++)
 
        {
 
          h = htab->ovly_entry[i];
 
          if (h != NULL
 
              && (h->root.type == bfd_link_hash_defined
 
                  || h->root.type == bfd_link_hash_defweak)
 
              && h->def_regular)
 
            {
 
              s = h->root.u.def.section->output_section;
 
              if (spu_elf_section_data (s)->u.o.ovl_index)
 
                {
 
                  (*_bfd_error_handler) (_("%s in overlay section"),
 
                                         h->root.root.string);
 
                  bfd_set_error (bfd_error_bad_value);
 
                  return FALSE;
 
                }
 
            }
 
        }
 
    }
 
 
 
  if (htab->stub_sec != NULL)
 
    {
  for (i = 0; i <= htab->num_overlays; i++)
  for (i = 0; i <= htab->num_overlays; i++)
    if (htab->stub_sec[i]->size != 0)
    if (htab->stub_sec[i]->size != 0)
      {
      {
        htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
        htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
                                                  htab->stub_sec[i]->size);
                                                  htab->stub_sec[i]->size);
Line 1320... Line 1933...
          return FALSE;
          return FALSE;
        htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
        htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
        htab->stub_sec[i]->size = 0;
        htab->stub_sec[i]->size = 0;
      }
      }
 
 
  h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
      /* Fill in all the stubs.  */
  htab->ovly_load = h;
      process_stubs (info, TRUE);
  BFD_ASSERT (h != NULL
      if (!htab->stub_err)
              && (h->root.type == bfd_link_hash_defined
        elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
                  || h->root.type == bfd_link_hash_defweak)
 
              && h->def_regular);
 
 
 
  s = h->root.u.def.section->output_section;
      if (htab->stub_err)
  if (spu_elf_section_data (s)->u.o.ovl_index)
 
    {
    {
      (*_bfd_error_handler) (_("%s in overlay section"),
          (*_bfd_error_handler) (_("overlay stub relocation overflow"));
                             h->root.u.def.section->owner);
 
      bfd_set_error (bfd_error_bad_value);
      bfd_set_error (bfd_error_bad_value);
      return FALSE;
      return FALSE;
    }
    }
 
 
  h = elf_link_hash_lookup (&htab->elf, "__ovly_return", FALSE, FALSE, FALSE);
 
  htab->ovly_return = h;
 
 
 
  /* Write out all the stubs.  */
 
  obfd = htab->ovtab->output_section->owner;
 
  process_stubs (obfd, info, TRUE);
 
 
 
  elf_link_hash_traverse (&htab->elf, build_spuear_stubs, htab);
 
  if (htab->stub_err)
 
    return FALSE;
 
 
 
  for (i = 0; i <= htab->num_overlays; i++)
  for (i = 0; i <= htab->num_overlays; i++)
    {
    {
      if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
      if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
        {
        {
          (*_bfd_error_handler)  (_("stubs don't match calculated size"));
          (*_bfd_error_handler)  (_("stubs don't match calculated size"));
          bfd_set_error (bfd_error_bad_value);
          bfd_set_error (bfd_error_bad_value);
          return FALSE;
          return FALSE;
        }
        }
      htab->stub_sec[i]->rawsize = 0;
      htab->stub_sec[i]->rawsize = 0;
    }
    }
 
 
  if (htab->stub_err)
 
    {
 
      (*_bfd_error_handler) (_("overlay stub relocation overflow"));
 
      bfd_set_error (bfd_error_bad_value);
 
      return FALSE;
 
    }
    }
 
 
 
  if (htab->ovtab == NULL || htab->ovtab->size == 0)
 
    return TRUE;
 
 
  htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
  htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
  if (htab->ovtab->contents == NULL)
  if (htab->ovtab->contents == NULL)
    return FALSE;
    return FALSE;
 
 
  /* Write out _ovly_table.  */
 
  p = htab->ovtab->contents;
  p = htab->ovtab->contents;
 
  if (htab->params->ovly_flavour == ovly_soft_icache)
 
    {
 
      bfd_vma off;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_tag_array");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = 0;
 
      h->size = 16 << htab->num_lines_log2;
 
      off = h->size;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_tag_array_size");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = 16 << htab->num_lines_log2;
 
      h->root.u.def.section = bfd_abs_section_ptr;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_rewrite_to");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = off;
 
      h->size = 16 << htab->num_lines_log2;
 
      off += h->size;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = 16 << htab->num_lines_log2;
 
      h->root.u.def.section = bfd_abs_section_ptr;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_rewrite_from");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = off;
 
      h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
 
      off += h->size;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = 16 << (htab->fromelem_size_log2
 
                                   + htab->num_lines_log2);
 
      h->root.u.def.section = bfd_abs_section_ptr;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = htab->fromelem_size_log2;
 
      h->root.u.def.section = bfd_abs_section_ptr;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_base");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = htab->ovl_sec[0]->vma;
 
      h->root.u.def.section = bfd_abs_section_ptr;
 
      h->size = htab->num_buf << htab->line_size_log2;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_linesize");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = 1 << htab->line_size_log2;
 
      h->root.u.def.section = bfd_abs_section_ptr;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_log2_linesize");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = htab->line_size_log2;
 
      h->root.u.def.section = bfd_abs_section_ptr;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = -htab->line_size_log2;
 
      h->root.u.def.section = bfd_abs_section_ptr;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_cachesize");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
 
      h->root.u.def.section = bfd_abs_section_ptr;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
 
      h->root.u.def.section = bfd_abs_section_ptr;
 
 
 
      h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
 
      if (h == NULL)
 
        return FALSE;
 
      h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
 
      h->root.u.def.section = bfd_abs_section_ptr;
 
 
 
      if (htab->init != NULL && htab->init->size != 0)
 
        {
 
          htab->init->contents = bfd_zalloc (htab->init->owner,
 
                                             htab->init->size);
 
          if (htab->init->contents == NULL)
 
            return FALSE;
 
 
 
          h = define_ovtab_symbol (htab, "__icache_fileoff");
 
          if (h == NULL)
 
            return FALSE;
 
          h->root.u.def.value = 0;
 
          h->root.u.def.section = htab->init;
 
          h->size = 8;
 
        }
 
    }
 
  else
 
    {
 
      /* Write out _ovly_table.  */
  /* set low bit of .size to mark non-overlay area as present.  */
  /* set low bit of .size to mark non-overlay area as present.  */
  p[7] = 1;
  p[7] = 1;
 
      obfd = htab->ovtab->output_section->owner;
  for (s = obfd->sections; s != NULL; s = s->next)
  for (s = obfd->sections; s != NULL; s = s->next)
    {
    {
      unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
      unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
 
 
      if (ovl_index != 0)
      if (ovl_index != 0)
        {
        {
          unsigned long off = ovl_index * 16;
          unsigned long off = ovl_index * 16;
          unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
          unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
 
 
          bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
          bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
          bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
              bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
 
                          p + off + 4);
          /* file_off written later in spu_elf_modify_program_headers.  */
          /* file_off written later in spu_elf_modify_program_headers.  */
          bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
          bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
        }
        }
    }
    }
 
 
Line 1412... Line 2119...
  h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
  h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
  if (h == NULL)
  if (h == NULL)
    return FALSE;
    return FALSE;
  h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
  h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
  h->size = 0;
  h->size = 0;
 
    }
 
 
  h = define_ovtab_symbol (htab, "_EAR_");
  h = define_ovtab_symbol (htab, "_EAR_");
  if (h == NULL)
  if (h == NULL)
    return FALSE;
    return FALSE;
  h->root.u.def.section = htab->toe;
  h->root.u.def.section = htab->toe;
Line 1423... Line 2131...
  h->size = 16;
  h->size = 16;
 
 
  return TRUE;
  return TRUE;
}
}
 
 
 
/* Check that all loadable section VMAs lie in the range
 
   LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
 
 
 
asection *
 
spu_elf_check_vma (struct bfd_link_info *info)
 
{
 
  struct elf_segment_map *m;
 
  unsigned int i;
 
  struct spu_link_hash_table *htab = spu_hash_table (info);
 
  bfd *abfd = info->output_bfd;
 
  bfd_vma hi = htab->params->local_store_hi;
 
  bfd_vma lo = htab->params->local_store_lo;
 
 
 
  htab->local_store = hi + 1 - lo;
 
 
 
  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
 
    if (m->p_type == PT_LOAD)
 
      for (i = 0; i < m->count; i++)
 
        if (m->sections[i]->size != 0
 
            && (m->sections[i]->vma < lo
 
                || m->sections[i]->vma > hi
 
                || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
 
          return m->sections[i];
 
 
 
  return NULL;
 
}
 
 
/* OFFSET in SEC (presumably) is the beginning of a function prologue.
/* OFFSET in SEC (presumably) is the beginning of a function prologue.
   Search for stack adjusting insns, and return the sp delta.  */
   Search for stack adjusting insns, and return the sp delta.
 
   If a store of lr is found save the instruction offset to *LR_STORE.
 
   If a stack adjusting instruction is found, save that offset to
 
   *SP_ADJUST.  */
 
 
static int
static int
find_function_stack_adjust (asection *sec, bfd_vma offset)
find_function_stack_adjust (asection *sec,
 
                            bfd_vma offset,
 
                            bfd_vma *lr_store,
 
                            bfd_vma *sp_adjust)
{
{
  int unrecog;
 
  int reg[128];
  int reg[128];
 
 
  memset (reg, 0, sizeof (reg));
  memset (reg, 0, sizeof (reg));
  for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
  for ( ; offset + 4 <= sec->size; offset += 4)
    {
    {
      unsigned char buf[4];
      unsigned char buf[4];
      int rt, ra;
      int rt, ra;
      int imm;
      int imm;
 
 
      /* Assume no relocs on stack adjusing insns.  */
      /* Assume no relocs on stack adjusing insns.  */
      if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
      if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
        break;
        break;
 
 
 
      rt = buf[3] & 0x7f;
 
      ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
 
 
      if (buf[0] == 0x24 /* stqd */)
      if (buf[0] == 0x24 /* stqd */)
 
        {
 
          if (rt == 0 /* lr */ && ra == 1 /* sp */)
 
            *lr_store = offset;
        continue;
        continue;
 
        }
 
 
      rt = buf[3] & 0x7f;
 
      ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
 
      /* Partly decoded immediate field.  */
      /* Partly decoded immediate field.  */
      imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
      imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
 
 
      if (buf[0] == 0x1c /* ai */)
      if (buf[0] == 0x1c /* ai */)
        {
        {
Line 1459... Line 2204...
          imm = (imm ^ 0x200) - 0x200;
          imm = (imm ^ 0x200) - 0x200;
          reg[rt] = reg[ra] + imm;
          reg[rt] = reg[ra] + imm;
 
 
          if (rt == 1 /* sp */)
          if (rt == 1 /* sp */)
            {
            {
              if (imm > 0)
              if (reg[rt] > 0)
                break;
                break;
 
              *sp_adjust = offset;
              return reg[rt];
              return reg[rt];
            }
            }
        }
        }
      else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
      else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
        {
        {
          int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
          int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
 
 
          reg[rt] = reg[ra] + reg[rb];
          reg[rt] = reg[ra] + reg[rb];
          if (rt == 1)
          if (rt == 1)
 
            {
 
              if (reg[rt] > 0)
 
                break;
 
              *sp_adjust = offset;
 
              return reg[rt];
 
            }
 
        }
 
      else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
 
        {
 
          int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
 
 
 
          reg[rt] = reg[rb] - reg[ra];
 
          if (rt == 1)
 
            {
 
              if (reg[rt] > 0)
 
                break;
 
              *sp_adjust = offset;
            return reg[rt];
            return reg[rt];
        }
        }
 
        }
      else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
      else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
        {
        {
          if (buf[0] >= 0x42 /* ila */)
          if (buf[0] >= 0x42 /* ila */)
            imm |= (buf[0] & 1) << 17;
            imm |= (buf[0] & 1) << 17;
          else
          else
Line 1483... Line 2247...
              imm &= 0xffff;
              imm &= 0xffff;
 
 
              if (buf[0] == 0x40 /* il */)
              if (buf[0] == 0x40 /* il */)
                {
                {
                  if ((buf[1] & 0x80) == 0)
                  if ((buf[1] & 0x80) == 0)
                    goto unknown_insn;
                    continue;
                  imm = (imm ^ 0x8000) - 0x8000;
                  imm = (imm ^ 0x8000) - 0x8000;
                }
                }
              else if ((buf[1] & 0x80) == 0 /* ilhu */)
              else if ((buf[1] & 0x80) == 0 /* ilhu */)
                imm <<= 16;
                imm <<= 16;
            }
            }
Line 1504... Line 2268...
          imm >>= 7;
          imm >>= 7;
          imm = (imm ^ 0x200) - 0x200;
          imm = (imm ^ 0x200) - 0x200;
          reg[rt] = reg[ra] | imm;
          reg[rt] = reg[ra] | imm;
          continue;
          continue;
        }
        }
      else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
      else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
               || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
        {
 
          reg[rt] = (  ((imm & 0x8000) ? 0xff000000 : 0)
 
                     | ((imm & 0x4000) ? 0x00ff0000 : 0)
 
                     | ((imm & 0x2000) ? 0x0000ff00 : 0)
 
                     | ((imm & 0x1000) ? 0x000000ff : 0));
 
          continue;
 
        }
 
      else if (buf[0] == 0x16 /* andbi */)
 
        {
 
          imm >>= 7;
 
          imm &= 0xff;
 
          imm |= imm << 8;
 
          imm |= imm << 16;
 
          reg[rt] = reg[ra] & imm;
 
          continue;
 
        }
 
      else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
        {
        {
          /* Used in pic reg load.  Say rt is trashed.  */
          /* Used in pic reg load.  Say rt is trashed.  Won't be used
 
             in stack adjust, but we need to continue past this branch.  */
          reg[rt] = 0;
          reg[rt] = 0;
          continue;
          continue;
        }
        }
      else if (is_branch (buf) || is_indirect_branch (buf))
      else if (is_branch (buf) || is_indirect_branch (buf))
        /* If we hit a branch then we must be out of the prologue.  */
        /* If we hit a branch then we must be out of the prologue.  */
        break;
        break;
    unknown_insn:
 
      ++unrecog;
 
    }
    }
 
 
  return 0;
  return 0;
}
}
 
 
Line 1551... Line 2330...
    return delta < 0 ? -1 : 1;
    return delta < 0 ? -1 : 1;
 
 
  return *s1 < *s2 ? -1 : 1;
  return *s1 < *s2 ? -1 : 1;
}
}
 
 
struct call_info
 
{
 
  struct function_info *fun;
 
  struct call_info *next;
 
  int is_tail;
 
};
 
 
 
struct function_info
 
{
 
  /* List of functions called.  Also branches to hot/cold part of
 
     function.  */
 
  struct call_info *call_list;
 
  /* For hot/cold part of function, point to owner.  */
 
  struct function_info *start;
 
  /* Symbol at start of function.  */
 
  union {
 
    Elf_Internal_Sym *sym;
 
    struct elf_link_hash_entry *h;
 
  } u;
 
  /* Function section.  */
 
  asection *sec;
 
  /* Address range of (this part of) function.  */
 
  bfd_vma lo, hi;
 
  /* Stack usage.  */
 
  int stack;
 
  /* Set if global symbol.  */
 
  unsigned int global : 1;
 
  /* Set if known to be start of function (as distinct from a hunk
 
     in hot/cold section.  */
 
  unsigned int is_func : 1;
 
  /* Flags used during call tree traversal.  */
 
  unsigned int visit1 : 1;
 
  unsigned int non_root : 1;
 
  unsigned int visit2 : 1;
 
  unsigned int marking : 1;
 
  unsigned int visit3 : 1;
 
};
 
 
 
struct spu_elf_stack_info
 
{
 
  int num_fun;
 
  int max_fun;
 
  /* Variable size array describing functions, one per contiguous
 
     address range belonging to a function.  */
 
  struct function_info fun[1];
 
};
 
 
 
/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
   entries for section SEC.  */
   entries for section SEC.  */
 
 
static struct spu_elf_stack_info *
static struct spu_elf_stack_info *
alloc_stack_info (asection *sec, int max_fun)
alloc_stack_info (asection *sec, int max_fun)
Line 1674... Line 2406...
      /* Ignore a zero-size symbol inside an existing function.  */
      /* Ignore a zero-size symbol inside an existing function.  */
      else if (sinfo->fun[i].hi > off && size == 0)
      else if (sinfo->fun[i].hi > off && size == 0)
        return &sinfo->fun[i];
        return &sinfo->fun[i];
    }
    }
 
 
  if (++i < sinfo->num_fun)
  if (sinfo->num_fun >= sinfo->max_fun)
    memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
 
             (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
 
  else if (i >= sinfo->max_fun)
 
    {
    {
      bfd_size_type amt = sizeof (struct spu_elf_stack_info);
      bfd_size_type amt = sizeof (struct spu_elf_stack_info);
      bfd_size_type old = amt;
      bfd_size_type old = amt;
 
 
      old += (sinfo->max_fun - 1) * sizeof (struct function_info);
      old += (sinfo->max_fun - 1) * sizeof (struct function_info);
Line 1691... Line 2420...
      if (sinfo == NULL)
      if (sinfo == NULL)
        return NULL;
        return NULL;
      memset ((char *) sinfo + old, 0, amt - old);
      memset ((char *) sinfo + old, 0, amt - old);
      sec_data->u.i.stack_info = sinfo;
      sec_data->u.i.stack_info = sinfo;
    }
    }
 
 
 
  if (++i < sinfo->num_fun)
 
    memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
 
             (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
  sinfo->fun[i].is_func = is_func;
  sinfo->fun[i].is_func = is_func;
  sinfo->fun[i].global = global;
  sinfo->fun[i].global = global;
  sinfo->fun[i].sec = sec;
  sinfo->fun[i].sec = sec;
  if (global)
  if (global)
    sinfo->fun[i].u.h = sym_h;
    sinfo->fun[i].u.h = sym_h;
  else
  else
    sinfo->fun[i].u.sym = sym_h;
    sinfo->fun[i].u.sym = sym_h;
  sinfo->fun[i].lo = off;
  sinfo->fun[i].lo = off;
  sinfo->fun[i].hi = off + size;
  sinfo->fun[i].hi = off + size;
  sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
  sinfo->fun[i].lr_store = -1;
 
  sinfo->fun[i].sp_adjust = -1;
 
  sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
 
                                                     &sinfo->fun[i].lr_store,
 
                                                     &sinfo->fun[i].sp_adjust);
  sinfo->num_fun += 1;
  sinfo->num_fun += 1;
  return &sinfo->fun[i];
  return &sinfo->fun[i];
}
}
 
 
/* Return the name of FUN.  */
/* Return the name of FUN.  */
Line 1843... Line 2580...
      else
      else
        return &sinfo->fun[mid];
        return &sinfo->fun[mid];
    }
    }
  info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
  info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
                          sec, offset);
                          sec, offset);
 
  bfd_set_error (bfd_error_bad_value);
  return NULL;
  return NULL;
}
}
 
 
/* Add CALLEE to CALLER call list if not already present.  */
/* Add CALLEE to CALLER call list if not already present.  Return TRUE
 
   if CALLEE was new.  If this function return FALSE, CALLEE should
 
   be freed.  */
 
 
static bfd_boolean
static bfd_boolean
insert_callee (struct function_info *caller, struct call_info *callee)
insert_callee (struct function_info *caller, struct call_info *callee)
{
{
  struct call_info *p;
  struct call_info **pp, *p;
  for (p = caller->call_list; p != NULL; p = p->next)
 
 
  for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
    if (p->fun == callee->fun)
    if (p->fun == callee->fun)
      {
      {
        /* Tail calls use less stack than normal calls.  Retain entry
        /* Tail calls use less stack than normal calls.  Retain entry
           for normal call over one for tail call.  */
           for normal call over one for tail call.  */
        if (p->is_tail > callee->is_tail)
        p->is_tail &= callee->is_tail;
          p->is_tail = callee->is_tail;
        if (!p->is_tail)
 
          {
 
            p->fun->start = NULL;
 
            p->fun->is_func = TRUE;
 
          }
 
        p->count += callee->count;
 
        /* Reorder list so most recent call is first.  */
 
        *pp = p->next;
 
        p->next = caller->call_list;
 
        caller->call_list = p;
        return FALSE;
        return FALSE;
      }
      }
  callee->next = caller->call_list;
  callee->next = caller->call_list;
  caller->call_list = callee;
  caller->call_list = callee;
  return TRUE;
  return TRUE;
}
}
 
 
/* Rummage through the relocs for SEC, looking for function calls.
/* Copy CALL and insert the copy into CALLER.  */
 
 
 
static bfd_boolean
 
copy_callee (struct function_info *caller, const struct call_info *call)
 
{
 
  struct call_info *callee;
 
  callee = bfd_malloc (sizeof (*callee));
 
  if (callee == NULL)
 
    return FALSE;
 
  *callee = *call;
 
  if (!insert_callee (caller, callee))
 
    free (callee);
 
  return TRUE;
 
}
 
 
 
/* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
 
   overlay stub sections.  */
 
 
 
static bfd_boolean
 
interesting_section (asection *s)
 
{
 
  return (s->output_section != bfd_abs_section_ptr
 
          && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
 
              == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
 
          && s->size != 0);
 
}
 
 
 
/* Rummage through the relocs for SEC, looking for function calls.
   If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
   If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
   mark destination symbols on calls as being functions.  Also
   mark destination symbols on calls as being functions.  Also
   look at branches, which may be tail calls or go to hot/cold
   look at branches, which may be tail calls or go to hot/cold
   section part of same function.  */
   section part of same function.  */
 
 
Line 1878... Line 2655...
mark_functions_via_relocs (asection *sec,
mark_functions_via_relocs (asection *sec,
                           struct bfd_link_info *info,
                           struct bfd_link_info *info,
                           int call_tree)
                           int call_tree)
{
{
  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
  Elf_Internal_Shdr *symtab_hdr;
  Elf_Internal_Sym *syms;
 
  void *psyms;
  void *psyms;
 
  unsigned int priority = 0;
  static bfd_boolean warned;
  static bfd_boolean warned;
 
 
 
  if (!interesting_section (sec)
 
      || sec->reloc_count == 0)
 
    return TRUE;
 
 
  internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
  internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
                                               info->keep_memory);
                                               info->keep_memory);
  if (internal_relocs == NULL)
  if (internal_relocs == NULL)
    return FALSE;
    return FALSE;
 
 
  symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
  symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
  psyms = &symtab_hdr->contents;
  psyms = &symtab_hdr->contents;
  syms = *(Elf_Internal_Sym **) psyms;
 
  irela = internal_relocs;
  irela = internal_relocs;
  irelaend = irela + sec->reloc_count;
  irelaend = irela + sec->reloc_count;
  for (; irela < irelaend; irela++)
  for (; irela < irelaend; irela++)
    {
    {
      enum elf_spu_reloc_type r_type;
      enum elf_spu_reloc_type r_type;
      unsigned int r_indx;
      unsigned int r_indx;
      asection *sym_sec;
      asection *sym_sec;
      Elf_Internal_Sym *sym;
      Elf_Internal_Sym *sym;
      struct elf_link_hash_entry *h;
      struct elf_link_hash_entry *h;
      bfd_vma val;
      bfd_vma val;
      unsigned char insn[4];
      bfd_boolean nonbranch, is_call;
      bfd_boolean is_call;
 
      struct function_info *caller;
      struct function_info *caller;
      struct call_info *callee;
      struct call_info *callee;
 
 
      r_type = ELF32_R_TYPE (irela->r_info);
      r_type = ELF32_R_TYPE (irela->r_info);
      if (r_type != R_SPU_REL16
      nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
          && r_type != R_SPU_ADDR16)
 
        continue;
 
 
 
      r_indx = ELF32_R_SYM (irela->r_info);
      r_indx = ELF32_R_SYM (irela->r_info);
      if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
      if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
        return FALSE;
        return FALSE;
 
 
      if (sym_sec == NULL
      if (sym_sec == NULL
          || sym_sec->output_section == NULL
          || sym_sec->output_section == bfd_abs_section_ptr)
          || sym_sec->output_section->owner != sec->output_section->owner)
 
        continue;
        continue;
 
 
 
      is_call = FALSE;
 
      if (!nonbranch)
 
        {
 
          unsigned char insn[4];
 
 
      if (!bfd_get_section_contents (sec->owner, sec, insn,
      if (!bfd_get_section_contents (sec->owner, sec, insn,
                                     irela->r_offset, 4))
                                     irela->r_offset, 4))
        return FALSE;
        return FALSE;
      if (!is_branch (insn))
          if (is_branch (insn))
        continue;
            {
 
              is_call = (insn[0] & 0xfd) == 0x31;
 
              priority = insn[1] & 0x0f;
 
              priority <<= 8;
 
              priority |= insn[2];
 
              priority <<= 8;
 
              priority |= insn[3];
 
              priority >>= 7;
      if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
      if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
          != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
          != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
        {
        {
          if (!call_tree)
                  if (!warned)
            warned = TRUE;
                    info->callbacks->einfo
          if (!call_tree || !warned)
                      (_("%B(%A+0x%v): call to non-code section"
            info->callbacks->einfo (_("%B(%A+0x%v): call to non-code section"
                         " %B(%A), analysis incomplete\n"),
                                      " %B(%A), stack analysis incomplete\n"),
 
                                    sec->owner, sec, irela->r_offset,
                                    sec->owner, sec, irela->r_offset,
                                    sym_sec->owner, sym_sec);
                                    sym_sec->owner, sym_sec);
 
                  warned = TRUE;
 
                  continue;
 
                }
 
            }
 
          else
 
            {
 
              nonbranch = TRUE;
 
              if (is_hint (insn))
          continue;
          continue;
        }
        }
 
        }
 
 
      is_call = (insn[0] & 0xfd) == 0x31;
      if (nonbranch)
 
        {
 
          /* For --auto-overlay, count possible stubs we need for
 
             function pointer references.  */
 
          unsigned int sym_type;
 
          if (h)
 
            sym_type = h->type;
 
          else
 
            sym_type = ELF_ST_TYPE (sym->st_info);
 
          if (sym_type == STT_FUNC)
 
            {
 
              if (call_tree && spu_hash_table (info)->params->auto_overlay)
 
                spu_hash_table (info)->non_ovly_stub += 1;
 
              /* If the symbol type is STT_FUNC then this must be a
 
                 function pointer initialisation.  */
 
              continue;
 
            }
 
          /* Ignore data references.  */
 
          if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
 
              != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
 
            continue;
 
          /* Otherwise we probably have a jump table reloc for
 
             a switch statement or some other reference to a
 
             code label.  */
 
        }
 
 
      if (h)
      if (h)
        val = h->root.u.def.value;
        val = h->root.u.def.value;
      else
      else
        val = sym->st_value;
        val = sym->st_value;
Line 1984... Line 2803...
 
 
      callee->fun = find_function (sym_sec, val, info);
      callee->fun = find_function (sym_sec, val, info);
      if (callee->fun == NULL)
      if (callee->fun == NULL)
        return FALSE;
        return FALSE;
      callee->is_tail = !is_call;
      callee->is_tail = !is_call;
 
      callee->is_pasted = FALSE;
 
      callee->broken_cycle = FALSE;
 
      callee->priority = priority;
 
      callee->count = nonbranch? 0 : 1;
 
      if (callee->fun->last_caller != sec)
 
        {
 
          callee->fun->last_caller = sec;
 
          callee->fun->call_count += 1;
 
        }
      if (!insert_callee (caller, callee))
      if (!insert_callee (caller, callee))
        free (callee);
        free (callee);
      else if (!is_call
      else if (!is_call
               && !callee->fun->is_func
               && !callee->fun->is_func
               && callee->fun->stack == 0)
               && callee->fun->stack == 0)
Line 1995... Line 2823...
          /* This is either a tail call or a branch from one part of
          /* This is either a tail call or a branch from one part of
             the function to another, ie. hot/cold section.  If the
             the function to another, ie. hot/cold section.  If the
             destination has been called by some other function then
             destination has been called by some other function then
             it is a separate function.  We also assume that functions
             it is a separate function.  We also assume that functions
             are not split across input files.  */
             are not split across input files.  */
          if (callee->fun->start != NULL
          if (sec->owner != sym_sec->owner)
              || sec->owner != sym_sec->owner)
 
            {
            {
              callee->fun->start = NULL;
              callee->fun->start = NULL;
              callee->fun->is_func = TRUE;
              callee->fun->is_func = TRUE;
            }
            }
 
          else if (callee->fun->start == NULL)
 
            {
 
              struct function_info *caller_start = caller;
 
              while (caller_start->start)
 
                caller_start = caller_start->start;
 
 
 
              if (caller_start != callee->fun)
 
                callee->fun->start = caller_start;
 
            }
          else
          else
            callee->fun->start = caller;
            {
 
              struct function_info *callee_start;
 
              struct function_info *caller_start;
 
              callee_start = callee->fun;
 
              while (callee_start->start)
 
                callee_start = callee_start->start;
 
              caller_start = caller;
 
              while (caller_start->start)
 
                caller_start = caller_start->start;
 
              if (caller_start != callee_start)
 
                {
 
                  callee->fun->start = NULL;
 
                  callee->fun->is_func = TRUE;
 
                }
 
            }
        }
        }
    }
    }
 
 
  return TRUE;
  return TRUE;
}
}
 
 
/* Handle something like .init or .fini, which has a piece of a function.
/* Handle something like .init or .fini, which has a piece of a function.
   These sections are pasted together to form a single function.  */
   These sections are pasted together to form a single function.  */
 
 
static bfd_boolean
static bfd_boolean
pasted_function (asection *sec, struct bfd_link_info *info)
pasted_function (asection *sec)
{
{
  struct bfd_link_order *l;
  struct bfd_link_order *l;
  struct _spu_elf_section_data *sec_data;
  struct _spu_elf_section_data *sec_data;
  struct spu_elf_stack_info *sinfo;
  struct spu_elf_stack_info *sinfo;
  Elf_Internal_Sym *fake;
  Elf_Internal_Sym *fake;
Line 2040... Line 2890...
    {
    {
      if (l->u.indirect.section == sec)
      if (l->u.indirect.section == sec)
        {
        {
          if (fun_start != NULL)
          if (fun_start != NULL)
            {
            {
              if (fun_start->start)
              struct call_info *callee = bfd_malloc (sizeof *callee);
                fun_start = fun_start->start;
              if (callee == NULL)
 
                return FALSE;
 
 
              fun->start = fun_start;
              fun->start = fun_start;
            }
              callee->fun = fun;
 
              callee->is_tail = TRUE;
 
              callee->is_pasted = TRUE;
 
              callee->broken_cycle = FALSE;
 
              callee->priority = 0;
 
              callee->count = 1;
 
              if (!insert_callee (fun_start, callee))
 
                free (callee);
          return TRUE;
          return TRUE;
        }
        }
 
          break;
 
        }
      if (l->type == bfd_indirect_link_order
      if (l->type == bfd_indirect_link_order
          && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
          && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
          && (sinfo = sec_data->u.i.stack_info) != NULL
          && (sinfo = sec_data->u.i.stack_info) != NULL
          && sinfo->num_fun != 0)
          && sinfo->num_fun != 0)
        fun_start = &sinfo->fun[sinfo->num_fun - 1];
        fun_start = &sinfo->fun[sinfo->num_fun - 1];
    }
    }
 
 
  info->callbacks->einfo (_("%A link_order not found\n"), sec);
  /* Don't return an error if we did not find a function preceding this
  return FALSE;
     section.  The section may have incorrect flags.  */
}
  return TRUE;
 
 
/* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
 
   overlay stub sections.  */
 
 
 
static bfd_boolean
 
interesting_section (asection *s, bfd *obfd)
 
{
 
  return (s->output_section != NULL
 
          && s->output_section->owner == obfd
 
          && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
 
              == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
 
          && s->size != 0);
 
}
}
 
 
/* Map address ranges in code sections to functions.  */
/* Map address ranges in code sections to functions.  */
 
 
static bfd_boolean
static bfd_boolean
discover_functions (bfd *output_bfd, struct bfd_link_info *info)
discover_functions (struct bfd_link_info *info)
{
{
  bfd *ibfd;
  bfd *ibfd;
  int bfd_idx;
  int bfd_idx;
  Elf_Internal_Sym ***psym_arr;
  Elf_Internal_Sym ***psym_arr;
  asection ***sec_arr;
  asection ***sec_arr;
Line 2092... Line 2941...
    return FALSE;
    return FALSE;
  sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
  sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
  if (sec_arr == NULL)
  if (sec_arr == NULL)
    return FALSE;
    return FALSE;
 
 
 
 
  for (ibfd = info->input_bfds, bfd_idx = 0;
  for (ibfd = info->input_bfds, bfd_idx = 0;
       ibfd != NULL;
       ibfd != NULL;
       ibfd = ibfd->link_next, bfd_idx++)
       ibfd = ibfd->link_next, bfd_idx++)
    {
    {
      extern const bfd_target bfd_elf32_spu_vec;
      extern const bfd_target bfd_elf32_spu_vec;
Line 2111... Line 2959...
 
 
      /* Read all the symbols.  */
      /* Read all the symbols.  */
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
      symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
      symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
      if (symcount == 0)
      if (symcount == 0)
 
        {
 
          if (!gaps)
 
            for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
 
              if (interesting_section (sec))
 
                {
 
                  gaps = TRUE;
 
                  break;
 
                }
        continue;
        continue;
 
        }
 
 
      syms = (Elf_Internal_Sym *) symtab_hdr->contents;
      if (symtab_hdr->contents != NULL)
      if (syms == NULL)
 
        {
        {
 
          /* Don't use cached symbols since the generic ELF linker
 
             code only reads local symbols, and we need globals too.  */
 
          free (symtab_hdr->contents);
 
          symtab_hdr->contents = NULL;
 
        }
          syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
          syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
                                       NULL, NULL, NULL);
                                       NULL, NULL, NULL);
          symtab_hdr->contents = (void *) syms;
          symtab_hdr->contents = (void *) syms;
          if (syms == NULL)
          if (syms == NULL)
            return FALSE;
            return FALSE;
        }
 
 
 
      /* Select defined function symbols that are going to be output.  */
      /* Select defined function symbols that are going to be output.  */
      psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
      psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
      if (psyms == NULL)
      if (psyms == NULL)
        return FALSE;
        return FALSE;
Line 2139... Line 2999...
            || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
            || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
          {
          {
            asection *s;
            asection *s;
 
 
            *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
            *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
            if (s != NULL && interesting_section (s, output_bfd))
            if (s != NULL && interesting_section (s))
              *psy++ = sy;
              *psy++ = sy;
          }
          }
      symcount = psy - psyms;
      symcount = psy - psyms;
      *psy = NULL;
      *psy = NULL;
 
 
Line 2181... Line 3041...
                return FALSE;
                return FALSE;
            }
            }
        }
        }
 
 
      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
        if (interesting_section (sec, output_bfd))
        if (interesting_section (sec))
          gaps |= check_function_ranges (sec, info);
          gaps |= check_function_ranges (sec, info);
    }
    }
 
 
  if (gaps)
  if (gaps)
    {
    {
Line 2199... Line 3059...
 
 
          if (psym_arr[bfd_idx] == NULL)
          if (psym_arr[bfd_idx] == NULL)
            continue;
            continue;
 
 
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
            if (interesting_section (sec, output_bfd)
 
                && sec->reloc_count != 0)
 
              {
 
                if (!mark_functions_via_relocs (sec, info, FALSE))
                if (!mark_functions_via_relocs (sec, info, FALSE))
                  return FALSE;
                  return FALSE;
              }
              }
        }
 
 
 
      for (ibfd = info->input_bfds, bfd_idx = 0;
      for (ibfd = info->input_bfds, bfd_idx = 0;
           ibfd != NULL;
           ibfd != NULL;
           ibfd = ibfd->link_next, bfd_idx++)
           ibfd = ibfd->link_next, bfd_idx++)
        {
        {
Line 2226... Line 3082...
          symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
          symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
          syms = (Elf_Internal_Sym *) symtab_hdr->contents;
          syms = (Elf_Internal_Sym *) symtab_hdr->contents;
 
 
          gaps = FALSE;
          gaps = FALSE;
          for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
          for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
            if (interesting_section (sec, output_bfd))
            if (interesting_section (sec))
              gaps |= check_function_ranges (sec, info);
              gaps |= check_function_ranges (sec, info);
          if (!gaps)
          if (!gaps)
            continue;
            continue;
 
 
          /* Finally, install all globals.  */
          /* Finally, install all globals.  */
Line 2246... Line 3102...
                {
                {
                  if (!maybe_insert_function (s, sy, FALSE, FALSE))
                  if (!maybe_insert_function (s, sy, FALSE, FALSE))
                    return FALSE;
                    return FALSE;
                }
                }
            }
            }
 
        }
 
 
 
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
 
        {
 
          extern const bfd_target bfd_elf32_spu_vec;
 
          asection *sec;
 
 
 
          if (ibfd->xvec != &bfd_elf32_spu_vec)
 
            continue;
 
 
          /* Some of the symbols we've installed as marking the
          /* Some of the symbols we've installed as marking the
             beginning of functions may have a size of zero.  Extend
             beginning of functions may have a size of zero.  Extend
             the range of such functions to the beginning of the
             the range of such functions to the beginning of the
             next symbol of interest.  */
             next symbol of interest.  */
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
            if (interesting_section (sec, output_bfd))
            if (interesting_section (sec))
              {
              {
                struct _spu_elf_section_data *sec_data;
                struct _spu_elf_section_data *sec_data;
                struct spu_elf_stack_info *sinfo;
                struct spu_elf_stack_info *sinfo;
 
 
                sec_data = spu_elf_section_data (sec);
                sec_data = spu_elf_section_data (sec);
                sinfo = sec_data->u.i.stack_info;
                sinfo = sec_data->u.i.stack_info;
                if (sinfo != NULL)
                if (sinfo != NULL && sinfo->num_fun != 0)
                  {
                  {
                    int fun_idx;
                    int fun_idx;
                    bfd_vma hi = sec->size;
                    bfd_vma hi = sec->size;
 
 
                    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
                    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
                      {
                      {
                        sinfo->fun[fun_idx].hi = hi;
                        sinfo->fun[fun_idx].hi = hi;
                        hi = sinfo->fun[fun_idx].lo;
                        hi = sinfo->fun[fun_idx].lo;
                      }
                      }
 
 
 
                    sinfo->fun[0].lo = 0;
                  }
                  }
                /* No symbols in this section.  Must be .init or .fini
                /* No symbols in this section.  Must be .init or .fini
                   or something similar.  */
                   or something similar.  */
                else if (!pasted_function (sec, info))
                else if (!pasted_function (sec))
                  return FALSE;
                  return FALSE;
              }
              }
        }
        }
    }
    }
 
 
Line 2295... Line 3162...
  free (sec_arr);
  free (sec_arr);
 
 
  return TRUE;
  return TRUE;
}
}
 
 
 
/* Iterate over all function_info we have collected, calling DOIT on
 
   each node if ROOT_ONLY is false.  Only call DOIT on root nodes
 
   if ROOT_ONLY.  */
 
 
 
static bfd_boolean
 
for_each_node (bfd_boolean (*doit) (struct function_info *,
 
                                    struct bfd_link_info *,
 
                                    void *),
 
               struct bfd_link_info *info,
 
               void *param,
 
               int root_only)
 
{
 
  bfd *ibfd;
 
 
 
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
 
    {
 
      extern const bfd_target bfd_elf32_spu_vec;
 
      asection *sec;
 
 
 
      if (ibfd->xvec != &bfd_elf32_spu_vec)
 
        continue;
 
 
 
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
 
        {
 
          struct _spu_elf_section_data *sec_data;
 
          struct spu_elf_stack_info *sinfo;
 
 
 
          if ((sec_data = spu_elf_section_data (sec)) != NULL
 
              && (sinfo = sec_data->u.i.stack_info) != NULL)
 
            {
 
              int i;
 
              for (i = 0; i < sinfo->num_fun; ++i)
 
                if (!root_only || !sinfo->fun[i].non_root)
 
                  if (!doit (&sinfo->fun[i], info, param))
 
                    return FALSE;
 
            }
 
        }
 
    }
 
  return TRUE;
 
}
 
 
 
/* Transfer call info attached to struct function_info entries for
 
   all of a given function's sections to the first entry.  */
 
 
 
static bfd_boolean
 
transfer_calls (struct function_info *fun,
 
                struct bfd_link_info *info ATTRIBUTE_UNUSED,
 
                void *param ATTRIBUTE_UNUSED)
 
{
 
  struct function_info *start = fun->start;
 
 
 
  if (start != NULL)
 
    {
 
      struct call_info *call, *call_next;
 
 
 
      while (start->start != NULL)
 
        start = start->start;
 
      for (call = fun->call_list; call != NULL; call = call_next)
 
        {
 
          call_next = call->next;
 
          if (!insert_callee (start, call))
 
            free (call);
 
        }
 
      fun->call_list = NULL;
 
    }
 
  return TRUE;
 
}
 
 
/* Mark nodes in the call graph that are called by some other node.  */
/* Mark nodes in the call graph that are called by some other node.  */
 
 
static void
static bfd_boolean
mark_non_root (struct function_info *fun)
mark_non_root (struct function_info *fun,
 
               struct bfd_link_info *info ATTRIBUTE_UNUSED,
 
               void *param ATTRIBUTE_UNUSED)
{
{
  struct call_info *call;
  struct call_info *call;
 
 
 
  if (fun->visit1)
 
    return TRUE;
  fun->visit1 = TRUE;
  fun->visit1 = TRUE;
  for (call = fun->call_list; call; call = call->next)
  for (call = fun->call_list; call; call = call->next)
    {
    {
      call->fun->non_root = TRUE;
      call->fun->non_root = TRUE;
      if (!call->fun->visit1)
      mark_non_root (call->fun, 0, 0);
        mark_non_root (call->fun);
 
    }
    }
 
  return TRUE;
}
}
 
 
/* Remove cycles from the call graph.  */
/* Remove cycles from the call graph.  Set depth of nodes.  */
 
 
static void
static bfd_boolean
call_graph_traverse (struct function_info *fun, struct bfd_link_info *info)
remove_cycles (struct function_info *fun,
 
               struct bfd_link_info *info,
 
               void *param)
{
{
  struct call_info **callp, *call;
  struct call_info **callp, *call;
 
  unsigned int depth = *(unsigned int *) param;
 
  unsigned int max_depth = depth;
 
 
 
  fun->depth = depth;
  fun->visit2 = TRUE;
  fun->visit2 = TRUE;
  fun->marking = TRUE;
  fun->marking = TRUE;
 
 
  callp = &fun->call_list;
  callp = &fun->call_list;
  while ((call = *callp) != NULL)
  while ((call = *callp) != NULL)
    {
    {
 
      call->max_depth = depth + !call->is_pasted;
      if (!call->fun->visit2)
      if (!call->fun->visit2)
        call_graph_traverse (call->fun, info);
        {
 
          if (!remove_cycles (call->fun, info, &call->max_depth))
 
            return FALSE;
 
          if (max_depth < call->max_depth)
 
            max_depth = call->max_depth;
 
        }
      else if (call->fun->marking)
      else if (call->fun->marking)
        {
        {
 
          struct spu_link_hash_table *htab = spu_hash_table (info);
 
 
 
          if (!htab->params->auto_overlay
 
              && htab->params->stack_analysis)
 
            {
          const char *f1 = func_name (fun);
          const char *f1 = func_name (fun);
          const char *f2 = func_name (call->fun);
          const char *f2 = func_name (call->fun);
 
 
          info->callbacks->info (_("Stack analysis will ignore the call "
          info->callbacks->info (_("Stack analysis will ignore the call "
                                   "from %s to %s\n"),
                                   "from %s to %s\n"),
                                 f1, f2);
                                 f1, f2);
          *callp = call->next;
            }
          continue;
 
 
          call->broken_cycle = TRUE;
        }
        }
      callp = &call->next;
      callp = &call->next;
    }
    }
  fun->marking = FALSE;
  fun->marking = FALSE;
 
  *(unsigned int *) param = max_depth;
 
  return TRUE;
 
}
 
 
 
/* Check that we actually visited all nodes in remove_cycles.  If we
 
   didn't, then there is some cycle in the call graph not attached to
 
   any root node.  Arbitrarily choose a node in the cycle as a new
 
   root and break the cycle.  */
 
 
 
static bfd_boolean
 
mark_detached_root (struct function_info *fun,
 
                    struct bfd_link_info *info,
 
                    void *param)
 
{
 
  if (fun->visit2)
 
    return TRUE;
 
  fun->non_root = FALSE;
 
  *(unsigned int *) param = 0;
 
  return remove_cycles (fun, info, param);
}
}
 
 
/* Populate call_list for each function.  */
/* Populate call_list for each function.  */
 
 
static bfd_boolean
static bfd_boolean
build_call_tree (bfd *output_bfd, struct bfd_link_info *info)
build_call_tree (struct bfd_link_info *info)
{
{
  bfd *ibfd;
  bfd *ibfd;
 
  unsigned int depth;
 
 
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
    {
    {
      extern const bfd_target bfd_elf32_spu_vec;
      extern const bfd_target bfd_elf32_spu_vec;
      asection *sec;
      asection *sec;
 
 
      if (ibfd->xvec != &bfd_elf32_spu_vec)
      if (ibfd->xvec != &bfd_elf32_spu_vec)
        continue;
        continue;
 
 
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
        {
 
          if (!interesting_section (sec, output_bfd)
 
              || sec->reloc_count == 0)
 
            continue;
 
 
 
          if (!mark_functions_via_relocs (sec, info, TRUE))
          if (!mark_functions_via_relocs (sec, info, TRUE))
            return FALSE;
            return FALSE;
        }
        }
 
 
      /* Transfer call info from hot/cold section part of function
      /* Transfer call info from hot/cold section part of function
         to main entry.  */
         to main entry.  */
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
  if (!spu_hash_table (info)->params->auto_overlay
 
      && !for_each_node (transfer_calls, info, 0, FALSE))
 
    return FALSE;
 
 
 
  /* Find the call graph root(s).  */
 
  if (!for_each_node (mark_non_root, info, 0, FALSE))
 
    return FALSE;
 
 
 
  /* Remove cycles from the call graph.  We start from the root node(s)
 
     so that we break cycles in a reasonable place.  */
 
  depth = 0;
 
  if (!for_each_node (remove_cycles, info, &depth, TRUE))
 
    return FALSE;
 
 
 
  return for_each_node (mark_detached_root, info, &depth, FALSE);
 
}
 
 
 
/* qsort predicate to sort calls by priority, max_depth then count.  */
 
 
 
static int
 
sort_calls (const void *a, const void *b)
        {
        {
          struct _spu_elf_section_data *sec_data;
  struct call_info *const *c1 = a;
          struct spu_elf_stack_info *sinfo;
  struct call_info *const *c2 = b;
 
  int delta;
 
 
          if ((sec_data = spu_elf_section_data (sec)) != NULL
  delta = (*c2)->priority - (*c1)->priority;
              && (sinfo = sec_data->u.i.stack_info) != NULL)
  if (delta != 0)
 
    return delta;
 
 
 
  delta = (*c2)->max_depth - (*c1)->max_depth;
 
  if (delta != 0)
 
    return delta;
 
 
 
  delta = (*c2)->count - (*c1)->count;
 
  if (delta != 0)
 
    return delta;
 
 
 
  return (char *) c1 - (char *) c2;
 
}
 
 
 
struct _mos_param {
 
  unsigned int max_overlay_size;
 
};
 
 
 
/* Set linker_mark and gc_mark on any sections that we will put in
 
   overlays.  These flags are used by the generic ELF linker, but we
 
   won't be continuing on to bfd_elf_final_link so it is OK to use
 
   them.  linker_mark is clear before we get here.  Set segment_mark
 
   on sections that are part of a pasted function (excluding the last
 
   section).
 
 
 
   Set up function rodata section if --overlay-rodata.  We don't
 
   currently include merged string constant rodata sections since
 
 
 
   Sort the call graph so that the deepest nodes will be visited
 
   first.  */
 
 
 
static bfd_boolean
 
mark_overlay_section (struct function_info *fun,
 
                      struct bfd_link_info *info,
 
                      void *param)
            {
            {
              int i;
  struct call_info *call;
              for (i = 0; i < sinfo->num_fun; ++i)
  unsigned int count;
 
  struct _mos_param *mos_param = param;
 
  struct spu_link_hash_table *htab = spu_hash_table (info);
 
 
 
  if (fun->visit4)
 
    return TRUE;
 
 
 
  fun->visit4 = TRUE;
 
  if (!fun->sec->linker_mark
 
      && (htab->params->ovly_flavour != ovly_soft_icache
 
          || htab->params->non_ia_text
 
          || strncmp (fun->sec->name, ".text.ia.", 9) == 0
 
          || strcmp (fun->sec->name, ".init") == 0
 
          || strcmp (fun->sec->name, ".fini") == 0))
 
    {
 
      unsigned int size;
 
 
 
      fun->sec->linker_mark = 1;
 
      fun->sec->gc_mark = 1;
 
      fun->sec->segment_mark = 0;
 
      /* Ensure SEC_CODE is set on this text section (it ought to
 
         be!), and SEC_CODE is clear on rodata sections.  We use
 
         this flag to differentiate the two overlay section types.  */
 
      fun->sec->flags |= SEC_CODE;
 
 
 
      size = fun->sec->size;
 
      if (htab->params->auto_overlay & OVERLAY_RODATA)
 
        {
 
          char *name = NULL;
 
 
 
          /* Find the rodata section corresponding to this function's
 
             text section.  */
 
          if (strcmp (fun->sec->name, ".text") == 0)
 
            {
 
              name = bfd_malloc (sizeof (".rodata"));
 
              if (name == NULL)
 
                return FALSE;
 
              memcpy (name, ".rodata", sizeof (".rodata"));
 
            }
 
          else if (strncmp (fun->sec->name, ".text.", 6) == 0)
                {
                {
                  if (sinfo->fun[i].start != NULL)
              size_t len = strlen (fun->sec->name);
 
              name = bfd_malloc (len + 3);
 
              if (name == NULL)
 
                return FALSE;
 
              memcpy (name, ".rodata", sizeof (".rodata"));
 
              memcpy (name + 7, fun->sec->name + 5, len - 4);
 
            }
 
          else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
                    {
                    {
                      struct call_info *call = sinfo->fun[i].call_list;
              size_t len = strlen (fun->sec->name) + 1;
 
              name = bfd_malloc (len);
 
              if (name == NULL)
 
                return FALSE;
 
              memcpy (name, fun->sec->name, len);
 
              name[14] = 'r';
 
            }
 
 
                      while (call != NULL)
          if (name != NULL)
                        {
                        {
                          struct call_info *call_next = call->next;
              asection *rodata = NULL;
                          if (!insert_callee (sinfo->fun[i].start, call))
              asection *group_sec = elf_section_data (fun->sec)->next_in_group;
                            free (call);
              if (group_sec == NULL)
                          call = call_next;
                rodata = bfd_get_section_by_name (fun->sec->owner, name);
 
              else
 
                while (group_sec != NULL && group_sec != fun->sec)
 
                  {
 
                    if (strcmp (group_sec->name, name) == 0)
 
                      {
 
                        rodata = group_sec;
 
                        break;
 
                      }
 
                    group_sec = elf_section_data (group_sec)->next_in_group;
 
                  }
 
              fun->rodata = rodata;
 
              if (fun->rodata)
 
                {
 
                  size += fun->rodata->size;
 
                  if (htab->params->line_size != 0
 
                      && size > htab->params->line_size)
 
                    {
 
                      size -= fun->rodata->size;
 
                      fun->rodata = NULL;
                        }
                        }
                      sinfo->fun[i].call_list = NULL;
                  else
                      sinfo->fun[i].non_root = TRUE;
                    {
 
                      fun->rodata->linker_mark = 1;
 
                      fun->rodata->gc_mark = 1;
 
                      fun->rodata->flags &= ~SEC_CODE;
                    }
                    }
                }
                }
 
              free (name);
            }
            }
        }
        }
 
      if (mos_param->max_overlay_size < size)
 
        mos_param->max_overlay_size = size;
    }
    }
 
 
  /* Find the call graph root(s).  */
  for (count = 0, call = fun->call_list; call != NULL; call = call->next)
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
    count += 1;
    {
 
      extern const bfd_target bfd_elf32_spu_vec;
 
      asection *sec;
 
 
 
      if (ibfd->xvec != &bfd_elf32_spu_vec)
  if (count > 1)
        continue;
    {
 
      struct call_info **calls = bfd_malloc (count * sizeof (*calls));
 
      if (calls == NULL)
 
        return FALSE;
 
 
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
      for (count = 0, call = fun->call_list; call != NULL; call = call->next)
 
        calls[count++] = call;
 
 
 
      qsort (calls, count, sizeof (*calls), sort_calls);
 
 
 
      fun->call_list = NULL;
 
      while (count != 0)
 
        {
 
          --count;
 
          calls[count]->next = fun->call_list;
 
          fun->call_list = calls[count];
 
        }
 
      free (calls);
 
    }
 
 
 
  for (call = fun->call_list; call != NULL; call = call->next)
 
    {
 
      if (call->is_pasted)
        {
        {
 
          /* There can only be one is_pasted call per function_info.  */
 
          BFD_ASSERT (!fun->sec->segment_mark);
 
          fun->sec->segment_mark = 1;
 
        }
 
      if (!call->broken_cycle
 
          && !mark_overlay_section (call->fun, info, param))
 
        return FALSE;
 
    }
 
 
 
  /* Don't put entry code into an overlay.  The overlay manager needs
 
     a stack!  Also, don't mark .ovl.init as an overlay.  */
 
  if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
 
      == info->output_bfd->start_address
 
      || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
 
    {
 
      fun->sec->linker_mark = 0;
 
      if (fun->rodata != NULL)
 
        fun->rodata->linker_mark = 0;
 
    }
 
  return TRUE;
 
}
 
 
 
/* If non-zero then unmark functions called from those within sections
 
   that we need to unmark.  Unfortunately this isn't reliable since the
 
   call graph cannot know the destination of function pointer calls.  */
 
#define RECURSE_UNMARK 0
 
 
 
struct _uos_param {
 
  asection *exclude_input_section;
 
  asection *exclude_output_section;
 
  unsigned long clearing;
 
};
 
 
 
/* Undo some of mark_overlay_section's work.  */
 
 
 
static bfd_boolean
 
unmark_overlay_section (struct function_info *fun,
 
                        struct bfd_link_info *info,
 
                        void *param)
 
{
 
  struct call_info *call;
 
  struct _uos_param *uos_param = param;
 
  unsigned int excluded = 0;
 
 
 
  if (fun->visit5)
 
    return TRUE;
 
 
 
  fun->visit5 = TRUE;
 
 
 
  excluded = 0;
 
  if (fun->sec == uos_param->exclude_input_section
 
      || fun->sec->output_section == uos_param->exclude_output_section)
 
    excluded = 1;
 
 
 
  if (RECURSE_UNMARK)
 
    uos_param->clearing += excluded;
 
 
 
  if (RECURSE_UNMARK ? uos_param->clearing : excluded)
 
    {
 
      fun->sec->linker_mark = 0;
 
      if (fun->rodata)
 
        fun->rodata->linker_mark = 0;
 
    }
 
 
 
  for (call = fun->call_list; call != NULL; call = call->next)
 
    if (!call->broken_cycle
 
        && !unmark_overlay_section (call->fun, info, param))
 
      return FALSE;
 
 
 
  if (RECURSE_UNMARK)
 
    uos_param->clearing -= excluded;
 
  return TRUE;
 
}
 
 
 
struct _cl_param {
 
  unsigned int lib_size;
 
  asection **lib_sections;
 
};
 
 
 
/* Add sections we have marked as belonging to overlays to an array
 
   for consideration as non-overlay sections.  The array consist of
 
   pairs of sections, (text,rodata), for functions in the call graph.  */
 
 
 
static bfd_boolean
 
collect_lib_sections (struct function_info *fun,
 
                      struct bfd_link_info *info,
 
                      void *param)
 
{
 
  struct _cl_param *lib_param = param;
 
  struct call_info *call;
 
  unsigned int size;
 
 
 
  if (fun->visit6)
 
    return TRUE;
 
 
 
  fun->visit6 = TRUE;
 
  if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
 
    return TRUE;
 
 
 
  size = fun->sec->size;
 
  if (fun->rodata)
 
    size += fun->rodata->size;
 
 
 
  if (size <= lib_param->lib_size)
 
    {
 
      *lib_param->lib_sections++ = fun->sec;
 
      fun->sec->gc_mark = 0;
 
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
 
        {
 
          *lib_param->lib_sections++ = fun->rodata;
 
          fun->rodata->gc_mark = 0;
 
        }
 
      else
 
        *lib_param->lib_sections++ = NULL;
 
    }
 
 
 
  for (call = fun->call_list; call != NULL; call = call->next)
 
    if (!call->broken_cycle)
 
      collect_lib_sections (call->fun, info, param);
 
 
 
  return TRUE;
 
}
 
 
 
/* qsort predicate to sort sections by call count.  */
 
 
 
static int
 
sort_lib (const void *a, const void *b)
 
{
 
  asection *const *s1 = a;
 
  asection *const *s2 = b;
          struct _spu_elf_section_data *sec_data;
          struct _spu_elf_section_data *sec_data;
          struct spu_elf_stack_info *sinfo;
          struct spu_elf_stack_info *sinfo;
 
  int delta;
 
 
          if ((sec_data = spu_elf_section_data (sec)) != NULL
  delta = 0;
 
  if ((sec_data = spu_elf_section_data (*s1)) != NULL
              && (sinfo = sec_data->u.i.stack_info) != NULL)
              && (sinfo = sec_data->u.i.stack_info) != NULL)
            {
            {
              int i;
              int i;
              for (i = 0; i < sinfo->num_fun; ++i)
              for (i = 0; i < sinfo->num_fun; ++i)
                if (!sinfo->fun[i].visit1)
        delta -= sinfo->fun[i].call_count;
                  mark_non_root (&sinfo->fun[i]);
 
            }
            }
 
 
 
  if ((sec_data = spu_elf_section_data (*s2)) != NULL
 
      && (sinfo = sec_data->u.i.stack_info) != NULL)
 
    {
 
      int i;
 
      for (i = 0; i < sinfo->num_fun; ++i)
 
        delta += sinfo->fun[i].call_count;
        }
        }
 
 
 
  if (delta != 0)
 
    return delta;
 
 
 
  return s1 - s2;
    }
    }
 
 
  /* Remove cycles from the call graph.  We start from the root node(s)
/* Remove some sections from those marked to be in overlays.  Choose
     so that we break cycles in a reasonable place.  */
   those that are called from many places, likely library functions.  */
 
 
 
static unsigned int
 
auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
 
{
 
  bfd *ibfd;
 
  asection **lib_sections;
 
  unsigned int i, lib_count;
 
  struct _cl_param collect_lib_param;
 
  struct function_info dummy_caller;
 
  struct spu_link_hash_table *htab;
 
 
 
  memset (&dummy_caller, 0, sizeof (dummy_caller));
 
  lib_count = 0;
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
    {
    {
      extern const bfd_target bfd_elf32_spu_vec;
      extern const bfd_target bfd_elf32_spu_vec;
      asection *sec;
      asection *sec;
 
 
      if (ibfd->xvec != &bfd_elf32_spu_vec)
      if (ibfd->xvec != &bfd_elf32_spu_vec)
        continue;
        continue;
 
 
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
 
        if (sec->linker_mark
 
            && sec->size < lib_size
 
            && (sec->flags & SEC_CODE) != 0)
 
          lib_count += 1;
 
    }
 
  lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
 
  if (lib_sections == NULL)
 
    return (unsigned int) -1;
 
  collect_lib_param.lib_size = lib_size;
 
  collect_lib_param.lib_sections = lib_sections;
 
  if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
 
                      TRUE))
 
    return (unsigned int) -1;
 
  lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
 
 
 
  /* Sort sections so that those with the most calls are first.  */
 
  if (lib_count > 1)
 
    qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
 
 
 
  htab = spu_hash_table (info);
 
  for (i = 0; i < lib_count; i++)
        {
        {
 
      unsigned int tmp, stub_size;
 
      asection *sec;
          struct _spu_elf_section_data *sec_data;
          struct _spu_elf_section_data *sec_data;
          struct spu_elf_stack_info *sinfo;
          struct spu_elf_stack_info *sinfo;
 
 
 
      sec = lib_sections[2 * i];
 
      /* If this section is OK, its size must be less than lib_size.  */
 
      tmp = sec->size;
 
      /* If it has a rodata section, then add that too.  */
 
      if (lib_sections[2 * i + 1])
 
        tmp += lib_sections[2 * i + 1]->size;
 
      /* Add any new overlay call stubs needed by the section.  */
 
      stub_size = 0;
 
      if (tmp < lib_size
 
          && (sec_data = spu_elf_section_data (sec)) != NULL
 
          && (sinfo = sec_data->u.i.stack_info) != NULL)
 
        {
 
          int k;
 
          struct call_info *call;
 
 
 
          for (k = 0; k < sinfo->num_fun; ++k)
 
            for (call = sinfo->fun[k].call_list; call; call = call->next)
 
              if (call->fun->sec->linker_mark)
 
                {
 
                  struct call_info *p;
 
                  for (p = dummy_caller.call_list; p; p = p->next)
 
                    if (p->fun == call->fun)
 
                      break;
 
                  if (!p)
 
                    stub_size += ovl_stub_size (htab->params);
 
                }
 
        }
 
      if (tmp + stub_size < lib_size)
 
        {
 
          struct call_info **pp, *p;
 
 
 
          /* This section fits.  Mark it as non-overlay.  */
 
          lib_sections[2 * i]->linker_mark = 0;
 
          if (lib_sections[2 * i + 1])
 
            lib_sections[2 * i + 1]->linker_mark = 0;
 
          lib_size -= tmp + stub_size;
 
          /* Call stubs to the section we just added are no longer
 
             needed.  */
 
          pp = &dummy_caller.call_list;
 
          while ((p = *pp) != NULL)
 
            if (!p->fun->sec->linker_mark)
 
              {
 
                lib_size += ovl_stub_size (htab->params);
 
                *pp = p->next;
 
                free (p);
 
              }
 
            else
 
              pp = &p->next;
 
          /* Add new call stubs to dummy_caller.  */
          if ((sec_data = spu_elf_section_data (sec)) != NULL
          if ((sec_data = spu_elf_section_data (sec)) != NULL
              && (sinfo = sec_data->u.i.stack_info) != NULL)
              && (sinfo = sec_data->u.i.stack_info) != NULL)
            {
            {
 
              int k;
 
              struct call_info *call;
 
 
 
              for (k = 0; k < sinfo->num_fun; ++k)
 
                for (call = sinfo->fun[k].call_list;
 
                     call;
 
                     call = call->next)
 
                  if (call->fun->sec->linker_mark)
 
                    {
 
                      struct call_info *callee;
 
                      callee = bfd_malloc (sizeof (*callee));
 
                      if (callee == NULL)
 
                        return (unsigned int) -1;
 
                      *callee = *call;
 
                      if (!insert_callee (&dummy_caller, callee))
 
                        free (callee);
 
                    }
 
            }
 
        }
 
    }
 
  while (dummy_caller.call_list != NULL)
 
    {
 
      struct call_info *call = dummy_caller.call_list;
 
      dummy_caller.call_list = call->next;
 
      free (call);
 
    }
 
  for (i = 0; i < 2 * lib_count; i++)
 
    if (lib_sections[i])
 
      lib_sections[i]->gc_mark = 1;
 
  free (lib_sections);
 
  return lib_size;
 
}
 
 
 
/* Build an array of overlay sections.  The deepest node's section is
 
   added first, then its parent node's section, then everything called
 
   from the parent section.  The idea being to group sections to
 
   minimise calls between different overlays.  */
 
 
 
static bfd_boolean
 
collect_overlays (struct function_info *fun,
 
                  struct bfd_link_info *info,
 
                  void *param)
 
{
 
  struct call_info *call;
 
  bfd_boolean added_fun;
 
  asection ***ovly_sections = param;
 
 
 
  if (fun->visit7)
 
    return TRUE;
 
 
 
  fun->visit7 = TRUE;
 
  for (call = fun->call_list; call != NULL; call = call->next)
 
    if (!call->is_pasted && !call->broken_cycle)
 
      {
 
        if (!collect_overlays (call->fun, info, ovly_sections))
 
          return FALSE;
 
        break;
 
      }
 
 
 
  added_fun = FALSE;
 
  if (fun->sec->linker_mark && fun->sec->gc_mark)
 
    {
 
      fun->sec->gc_mark = 0;
 
      *(*ovly_sections)++ = fun->sec;
 
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
 
        {
 
          fun->rodata->gc_mark = 0;
 
          *(*ovly_sections)++ = fun->rodata;
 
        }
 
      else
 
        *(*ovly_sections)++ = NULL;
 
      added_fun = TRUE;
 
 
 
      /* Pasted sections must stay with the first section.  We don't
 
         put pasted sections in the array, just the first section.
 
         Mark subsequent sections as already considered.  */
 
      if (fun->sec->segment_mark)
 
        {
 
          struct function_info *call_fun = fun;
 
          do
 
            {
 
              for (call = call_fun->call_list; call != NULL; call = call->next)
 
                if (call->is_pasted)
 
                  {
 
                    call_fun = call->fun;
 
                    call_fun->sec->gc_mark = 0;
 
                    if (call_fun->rodata)
 
                      call_fun->rodata->gc_mark = 0;
 
                    break;
 
                  }
 
              if (call == NULL)
 
                abort ();
 
            }
 
          while (call_fun->sec->segment_mark);
 
        }
 
    }
 
 
 
  for (call = fun->call_list; call != NULL; call = call->next)
 
    if (!call->broken_cycle
 
        && !collect_overlays (call->fun, info, ovly_sections))
 
      return FALSE;
 
 
 
  if (added_fun)
 
    {
 
      struct _spu_elf_section_data *sec_data;
 
      struct spu_elf_stack_info *sinfo;
 
 
 
      if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
 
          && (sinfo = sec_data->u.i.stack_info) != NULL)
 
        {
              int i;
              int i;
              for (i = 0; i < sinfo->num_fun; ++i)
              for (i = 0; i < sinfo->num_fun; ++i)
                if (!sinfo->fun[i].non_root)
            if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
                  call_graph_traverse (&sinfo->fun[i], info);
              return FALSE;
            }
 
        }
        }
    }
    }
 
 
  return TRUE;
  return TRUE;
}
}
 
 
 
struct _sum_stack_param {
 
  size_t cum_stack;
 
  size_t overall_stack;
 
  bfd_boolean emit_stack_syms;
 
};
 
 
/* Descend the call graph for FUN, accumulating total stack required.  */
/* Descend the call graph for FUN, accumulating total stack required.  */
 
 
static bfd_vma
static bfd_boolean
sum_stack (struct function_info *fun,
sum_stack (struct function_info *fun,
           struct bfd_link_info *info,
           struct bfd_link_info *info,
           int emit_stack_syms)
           void *param)
{
{
  struct call_info *call;
  struct call_info *call;
  struct function_info *max = NULL;
  struct function_info *max;
  bfd_vma max_stack = fun->stack;
  size_t stack, cum_stack;
  bfd_vma stack;
 
  const char *f1;
  const char *f1;
 
  bfd_boolean has_call;
 
  struct _sum_stack_param *sum_stack_param = param;
 
  struct spu_link_hash_table *htab;
 
 
 
  cum_stack = fun->stack;
 
  sum_stack_param->cum_stack = cum_stack;
  if (fun->visit3)
  if (fun->visit3)
    return max_stack;
    return TRUE;
 
 
 
  has_call = FALSE;
 
  max = NULL;
  for (call = fun->call_list; call; call = call->next)
  for (call = fun->call_list; call; call = call->next)
    {
    {
      stack = sum_stack (call->fun, info, emit_stack_syms);
      if (call->broken_cycle)
 
        continue;
 
      if (!call->is_pasted)
 
        has_call = TRUE;
 
      if (!sum_stack (call->fun, info, sum_stack_param))
 
        return FALSE;
 
      stack = sum_stack_param->cum_stack;
      /* Include caller stack for normal calls, don't do so for
      /* Include caller stack for normal calls, don't do so for
         tail calls.  fun->stack here is local stack usage for
         tail calls.  fun->stack here is local stack usage for
         this function.  */
         this function.  */
      if (!call->is_tail)
      if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
        stack += fun->stack;
        stack += fun->stack;
      if (max_stack < stack)
      if (cum_stack < stack)
 
        {
 
          cum_stack = stack;
 
          max = call->fun;
 
        }
 
    }
 
 
 
  sum_stack_param->cum_stack = cum_stack;
 
  stack = fun->stack;
 
  /* Now fun->stack holds cumulative stack.  */
 
  fun->stack = cum_stack;
 
  fun->visit3 = TRUE;
 
 
 
  if (!fun->non_root
 
      && sum_stack_param->overall_stack < cum_stack)
 
    sum_stack_param->overall_stack = cum_stack;
 
 
 
  htab = spu_hash_table (info);
 
  if (htab->params->auto_overlay)
 
    return TRUE;
 
 
 
  f1 = func_name (fun);
 
  if (htab->params->stack_analysis)
 
    {
 
      if (!fun->non_root)
 
        info->callbacks->info (_("  %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
 
      info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
 
                              f1, (bfd_vma) stack, (bfd_vma) cum_stack);
 
 
 
      if (has_call)
 
        {
 
          info->callbacks->minfo (_("  calls:\n"));
 
          for (call = fun->call_list; call; call = call->next)
 
            if (!call->is_pasted && !call->broken_cycle)
 
              {
 
                const char *f2 = func_name (call->fun);
 
                const char *ann1 = call->fun == max ? "*" : " ";
 
                const char *ann2 = call->is_tail ? "t" : " ";
 
 
 
                info->callbacks->minfo (_("   %s%s %s\n"), ann1, ann2, f2);
 
              }
 
        }
 
    }
 
 
 
  if (sum_stack_param->emit_stack_syms)
 
    {
 
      char *name = bfd_malloc (18 + strlen (f1));
 
      struct elf_link_hash_entry *h;
 
 
 
      if (name == NULL)
 
        return FALSE;
 
 
 
      if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
 
        sprintf (name, "__stack_%s", f1);
 
      else
 
        sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
 
 
 
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
 
      free (name);
 
      if (h != NULL
 
          && (h->root.type == bfd_link_hash_new
 
              || h->root.type == bfd_link_hash_undefined
 
              || h->root.type == bfd_link_hash_undefweak))
 
        {
 
          h->root.type = bfd_link_hash_defined;
 
          h->root.u.def.section = bfd_abs_section_ptr;
 
          h->root.u.def.value = cum_stack;
 
          h->size = 0;
 
          h->type = 0;
 
          h->ref_regular = 1;
 
          h->def_regular = 1;
 
          h->ref_regular_nonweak = 1;
 
          h->forced_local = 1;
 
          h->non_elf = 0;
 
        }
 
    }
 
 
 
  return TRUE;
 
}
 
 
 
/* SEC is part of a pasted function.  Return the call_info for the
 
   next section of this function.  */
 
 
 
static struct call_info *
 
find_pasted_call (asection *sec)
 
{
 
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
 
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
 
  struct call_info *call;
 
  int k;
 
 
 
  for (k = 0; k < sinfo->num_fun; ++k)
 
    for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
 
      if (call->is_pasted)
 
        return call;
 
  abort ();
 
  return 0;
 
}
 
 
 
/* qsort predicate to sort bfds by file name.  */
 
 
 
static int
 
sort_bfds (const void *a, const void *b)
 
{
 
  bfd *const *abfd1 = a;
 
  bfd *const *abfd2 = b;
 
 
 
  return strcmp ((*abfd1)->filename, (*abfd2)->filename);
 
}
 
 
 
static unsigned int
 
print_one_overlay_section (FILE *script,
 
                           unsigned int base,
 
                           unsigned int count,
 
                           unsigned int ovlynum,
 
                           unsigned int *ovly_map,
 
                           asection **ovly_sections,
 
                           struct bfd_link_info *info)
 
{
 
  unsigned int j;
 
 
 
  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
 
    {
 
      asection *sec = ovly_sections[2 * j];
 
 
 
      if (fprintf (script, "   %s%c%s (%s)\n",
 
                   (sec->owner->my_archive != NULL
 
                    ? sec->owner->my_archive->filename : ""),
 
                   info->path_separator,
 
                   sec->owner->filename,
 
                   sec->name) <= 0)
 
        return -1;
 
      if (sec->segment_mark)
 
        {
 
          struct call_info *call = find_pasted_call (sec);
 
          while (call != NULL)
 
            {
 
              struct function_info *call_fun = call->fun;
 
              sec = call_fun->sec;
 
              if (fprintf (script, "   %s%c%s (%s)\n",
 
                           (sec->owner->my_archive != NULL
 
                            ? sec->owner->my_archive->filename : ""),
 
                           info->path_separator,
 
                           sec->owner->filename,
 
                           sec->name) <= 0)
 
                return -1;
 
              for (call = call_fun->call_list; call; call = call->next)
 
                if (call->is_pasted)
 
                  break;
 
            }
 
        }
 
    }
 
 
 
  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
 
    {
 
      asection *sec = ovly_sections[2 * j + 1];
 
      if (sec != NULL
 
          && fprintf (script, "   %s%c%s (%s)\n",
 
                      (sec->owner->my_archive != NULL
 
                       ? sec->owner->my_archive->filename : ""),
 
                      info->path_separator,
 
                      sec->owner->filename,
 
                      sec->name) <= 0)
 
        return -1;
 
 
 
      sec = ovly_sections[2 * j];
 
      if (sec->segment_mark)
 
        {
 
          struct call_info *call = find_pasted_call (sec);
 
          while (call != NULL)
 
            {
 
              struct function_info *call_fun = call->fun;
 
              sec = call_fun->rodata;
 
              if (sec != NULL
 
                  && fprintf (script, "   %s%c%s (%s)\n",
 
                              (sec->owner->my_archive != NULL
 
                               ? sec->owner->my_archive->filename : ""),
 
                              info->path_separator,
 
                              sec->owner->filename,
 
                              sec->name) <= 0)
 
                return -1;
 
              for (call = call_fun->call_list; call; call = call->next)
 
                if (call->is_pasted)
 
                  break;
 
            }
 
        }
 
    }
 
 
 
  return j;
 
}
 
 
 
/* Handle --auto-overlay.  */
 
 
 
static void
 
spu_elf_auto_overlay (struct bfd_link_info *info)
 
{
 
  bfd *ibfd;
 
  bfd **bfd_arr;
 
  struct elf_segment_map *m;
 
  unsigned int fixed_size, lo, hi;
 
  unsigned int reserved;
 
  struct spu_link_hash_table *htab;
 
  unsigned int base, i, count, bfd_count;
 
  unsigned int region, ovlynum;
 
  asection **ovly_sections, **ovly_p;
 
  unsigned int *ovly_map;
 
  FILE *script;
 
  unsigned int total_overlay_size, overlay_size;
 
  const char *ovly_mgr_entry;
 
  struct elf_link_hash_entry *h;
 
  struct _mos_param mos_param;
 
  struct _uos_param uos_param;
 
  struct function_info dummy_caller;
 
 
 
  /* Find the extents of our loadable image.  */
 
  lo = (unsigned int) -1;
 
  hi = 0;
 
  for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
 
    if (m->p_type == PT_LOAD)
 
      for (i = 0; i < m->count; i++)
 
        if (m->sections[i]->size != 0)
 
          {
 
            if (m->sections[i]->vma < lo)
 
              lo = m->sections[i]->vma;
 
            if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
 
              hi = m->sections[i]->vma + m->sections[i]->size - 1;
 
          }
 
  fixed_size = hi + 1 - lo;
 
 
 
  if (!discover_functions (info))
 
    goto err_exit;
 
 
 
  if (!build_call_tree (info))
 
    goto err_exit;
 
 
 
  htab = spu_hash_table (info);
 
  reserved = htab->params->auto_overlay_reserved;
 
  if (reserved == 0)
 
    {
 
      struct _sum_stack_param sum_stack_param;
 
 
 
      sum_stack_param.emit_stack_syms = 0;
 
      sum_stack_param.overall_stack = 0;
 
      if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
 
        goto err_exit;
 
      reserved = (sum_stack_param.overall_stack
 
                  + htab->params->extra_stack_space);
 
    }
 
 
 
  /* No need for overlays if everything already fits.  */
 
  if (fixed_size + reserved <= htab->local_store
 
      && htab->params->ovly_flavour != ovly_soft_icache)
 
    {
 
      htab->params->auto_overlay = 0;
 
      return;
 
    }
 
 
 
  uos_param.exclude_input_section = 0;
 
  uos_param.exclude_output_section
 
    = bfd_get_section_by_name (info->output_bfd, ".interrupt");
 
 
 
  ovly_mgr_entry = "__ovly_load";
 
  if (htab->params->ovly_flavour == ovly_soft_icache)
 
    ovly_mgr_entry = "__icache_br_handler";
 
  h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
 
                            FALSE, FALSE, FALSE);
 
  if (h != NULL
 
      && (h->root.type == bfd_link_hash_defined
 
          || h->root.type == bfd_link_hash_defweak)
 
      && h->def_regular)
 
    {
 
      /* We have a user supplied overlay manager.  */
 
      uos_param.exclude_input_section = h->root.u.def.section;
 
    }
 
  else
 
    {
 
      /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
 
         builtin version to .text, and will adjust .text size.  */
 
      fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
 
    }
 
 
 
  /* Mark overlay sections, and find max overlay section size.  */
 
  mos_param.max_overlay_size = 0;
 
  if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
 
    goto err_exit;
 
 
 
  /* We can't put the overlay manager or interrupt routines in
 
     overlays.  */
 
  uos_param.clearing = 0;
 
  if ((uos_param.exclude_input_section
 
       || uos_param.exclude_output_section)
 
      && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
 
    goto err_exit;
 
 
 
  bfd_count = 0;
 
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
 
    ++bfd_count;
 
  bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
 
  if (bfd_arr == NULL)
 
    goto err_exit;
 
 
 
  /* Count overlay sections, and subtract their sizes from "fixed_size".  */
 
  count = 0;
 
  bfd_count = 0;
 
  total_overlay_size = 0;
 
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
 
    {
 
      extern const bfd_target bfd_elf32_spu_vec;
 
      asection *sec;
 
      unsigned int old_count;
 
 
 
      if (ibfd->xvec != &bfd_elf32_spu_vec)
 
        continue;
 
 
 
      old_count = count;
 
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
 
        if (sec->linker_mark)
 
          {
 
            if ((sec->flags & SEC_CODE) != 0)
 
              count += 1;
 
            fixed_size -= sec->size;
 
            total_overlay_size += sec->size;
 
          }
 
        else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
 
                 && sec->output_section->owner == info->output_bfd
 
                 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
 
          fixed_size -= sec->size;
 
      if (count != old_count)
 
        bfd_arr[bfd_count++] = ibfd;
 
    }
 
 
 
  /* Since the overlay link script selects sections by file name and
 
     section name, ensure that file names are unique.  */
 
  if (bfd_count > 1)
 
    {
 
      bfd_boolean ok = TRUE;
 
 
 
      qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
 
      for (i = 1; i < bfd_count; ++i)
 
        if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
 
          {
 
            if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
 
              {
 
                if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
 
                  info->callbacks->einfo (_("%s duplicated in %s\n"),
 
                                          bfd_arr[i]->filename,
 
                                          bfd_arr[i]->my_archive->filename);
 
                else
 
                  info->callbacks->einfo (_("%s duplicated\n"),
 
                                          bfd_arr[i]->filename);
 
                ok = FALSE;
 
              }
 
          }
 
      if (!ok)
 
        {
 
          info->callbacks->einfo (_("sorry, no support for duplicate "
 
                                    "object files in auto-overlay script\n"));
 
          bfd_set_error (bfd_error_bad_value);
 
          goto err_exit;
 
        }
 
    }
 
  free (bfd_arr);
 
 
 
  fixed_size += reserved;
 
  fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
 
  if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
 
    {
 
      if (htab->params->ovly_flavour == ovly_soft_icache)
 
        {
 
          /* Stubs in the non-icache area are bigger.  */
 
          fixed_size += htab->non_ovly_stub * 16;
 
          /* Space for icache manager tables.
 
             a) Tag array, one quadword per cache line.
 
             - word 0: ia address of present line, init to zero.  */
 
          fixed_size += 16 << htab->num_lines_log2;
 
          /* b) Rewrite "to" list, one quadword per cache line.  */
 
          fixed_size += 16 << htab->num_lines_log2;
 
          /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
 
                to a power-of-two number of full quadwords) per cache line.  */
 
          fixed_size += 16 << (htab->fromelem_size_log2
 
                               + htab->num_lines_log2);
 
          /* d) Pointer to __ea backing store (toe), 1 quadword.  */
 
          fixed_size += 16;
 
        }
 
      else
 
        {
 
          /* Guess number of overlays.  Assuming overlay buffer is on
 
             average only half full should be conservative.  */
 
          ovlynum = (total_overlay_size * 2 * htab->params->num_lines
 
                     / (htab->local_store - fixed_size));
 
          /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
 
          fixed_size += ovlynum * 16 + 16 + 4 + 16;
 
        }
 
    }
 
 
 
  if (fixed_size + mos_param.max_overlay_size > htab->local_store)
 
    info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
 
                              "size of 0x%v exceeds local store\n"),
 
                            (bfd_vma) fixed_size,
 
                            (bfd_vma) mos_param.max_overlay_size);
 
 
 
  /* Now see if we should put some functions in the non-overlay area.  */
 
  else if (fixed_size < htab->params->auto_overlay_fixed)
 
    {
 
      unsigned int max_fixed, lib_size;
 
 
 
      max_fixed = htab->local_store - mos_param.max_overlay_size;
 
      if (max_fixed > htab->params->auto_overlay_fixed)
 
        max_fixed = htab->params->auto_overlay_fixed;
 
      lib_size = max_fixed - fixed_size;
 
      lib_size = auto_ovl_lib_functions (info, lib_size);
 
      if (lib_size == (unsigned int) -1)
 
        goto err_exit;
 
      fixed_size = max_fixed - lib_size;
 
    }
 
 
 
  /* Build an array of sections, suitably sorted to place into
 
     overlays.  */
 
  ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
 
  if (ovly_sections == NULL)
 
    goto err_exit;
 
  ovly_p = ovly_sections;
 
  if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
 
    goto err_exit;
 
  count = (size_t) (ovly_p - ovly_sections) / 2;
 
  ovly_map = bfd_malloc (count * sizeof (*ovly_map));
 
  if (ovly_map == NULL)
 
    goto err_exit;
 
 
 
  memset (&dummy_caller, 0, sizeof (dummy_caller));
 
  overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
 
  if (htab->params->line_size != 0)
 
    overlay_size = htab->params->line_size;
 
  base = 0;
 
  ovlynum = 0;
 
  while (base < count)
 
    {
 
      unsigned int size = 0, rosize = 0, roalign = 0;
 
 
 
      for (i = base; i < count; i++)
 
        {
 
          asection *sec, *rosec;
 
          unsigned int tmp, rotmp;
 
          unsigned int num_stubs;
 
          struct call_info *call, *pasty;
 
          struct _spu_elf_section_data *sec_data;
 
          struct spu_elf_stack_info *sinfo;
 
          int k;
 
 
 
          /* See whether we can add this section to the current
 
             overlay without overflowing our overlay buffer.  */
 
          sec = ovly_sections[2 * i];
 
          tmp = align_power (size, sec->alignment_power) + sec->size;
 
          rotmp = rosize;
 
          rosec = ovly_sections[2 * i + 1];
 
          if (rosec != NULL)
 
            {
 
              rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
 
              if (roalign < rosec->alignment_power)
 
                roalign = rosec->alignment_power;
 
            }
 
          if (align_power (tmp, roalign) + rotmp > overlay_size)
 
            break;
 
          if (sec->segment_mark)
 
            {
 
              /* Pasted sections must stay together, so add their
 
                 sizes too.  */
 
              struct call_info *pasty = find_pasted_call (sec);
 
              while (pasty != NULL)
 
                {
 
                  struct function_info *call_fun = pasty->fun;
 
                  tmp = (align_power (tmp, call_fun->sec->alignment_power)
 
                         + call_fun->sec->size);
 
                  if (call_fun->rodata)
 
                    {
 
                      rotmp = (align_power (rotmp,
 
                                            call_fun->rodata->alignment_power)
 
                               + call_fun->rodata->size);
 
                      if (roalign < rosec->alignment_power)
 
                        roalign = rosec->alignment_power;
 
                    }
 
                  for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
 
                    if (pasty->is_pasted)
 
                      break;
 
                }
 
            }
 
          if (align_power (tmp, roalign) + rotmp > overlay_size)
 
            break;
 
 
 
          /* If we add this section, we might need new overlay call
 
             stubs.  Add any overlay section calls to dummy_call.  */
 
          pasty = NULL;
 
          sec_data = spu_elf_section_data (sec);
 
          sinfo = sec_data->u.i.stack_info;
 
          for (k = 0; k < sinfo->num_fun; ++k)
 
            for (call = sinfo->fun[k].call_list; call; call = call->next)
 
              if (call->is_pasted)
 
                {
 
                  BFD_ASSERT (pasty == NULL);
 
                  pasty = call;
 
                }
 
              else if (call->fun->sec->linker_mark)
 
                {
 
                  if (!copy_callee (&dummy_caller, call))
 
                    goto err_exit;
 
                }
 
          while (pasty != NULL)
 
            {
 
              struct function_info *call_fun = pasty->fun;
 
              pasty = NULL;
 
              for (call = call_fun->call_list; call; call = call->next)
 
                if (call->is_pasted)
 
                  {
 
                    BFD_ASSERT (pasty == NULL);
 
                    pasty = call;
 
                  }
 
                else if (!copy_callee (&dummy_caller, call))
 
                  goto err_exit;
 
            }
 
 
 
          /* Calculate call stub size.  */
 
          num_stubs = 0;
 
          for (call = dummy_caller.call_list; call; call = call->next)
 
            {
 
              unsigned int k;
 
              unsigned int stub_delta = 1;
 
 
 
              if (htab->params->ovly_flavour == ovly_soft_icache)
 
                stub_delta = call->count;
 
              num_stubs += stub_delta;
 
 
 
              /* If the call is within this overlay, we won't need a
 
                 stub.  */
 
              for (k = base; k < i + 1; k++)
 
                if (call->fun->sec == ovly_sections[2 * k])
 
                  {
 
                    num_stubs -= stub_delta;
 
                    break;
 
                  }
 
            }
 
          if (htab->params->ovly_flavour == ovly_soft_icache
 
              && num_stubs > htab->params->max_branch)
 
            break;
 
          if (align_power (tmp, roalign) + rotmp
 
              + num_stubs * ovl_stub_size (htab->params) > overlay_size)
 
            break;
 
          size = tmp;
 
          rosize = rotmp;
 
        }
 
 
 
      if (i == base)
 
        {
 
          info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
 
                                  ovly_sections[2 * i]->owner,
 
                                  ovly_sections[2 * i],
 
                                  ovly_sections[2 * i + 1] ? " + rodata" : "");
 
          bfd_set_error (bfd_error_bad_value);
 
          goto err_exit;
 
        }
 
 
 
      while (dummy_caller.call_list != NULL)
        {
        {
          max_stack = stack;
          struct call_info *call = dummy_caller.call_list;
          max = call->fun;
          dummy_caller.call_list = call->next;
 
          free (call);
        }
        }
 
 
 
      ++ovlynum;
 
      while (base < i)
 
        ovly_map[base++] = ovlynum;
    }
    }
 
 
  f1 = func_name (fun);
  script = htab->params->spu_elf_open_overlay_script ();
  info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
 
                          f1, (bfd_vma) fun->stack, max_stack);
 
 
 
  if (fun->call_list)
  if (htab->params->ovly_flavour == ovly_soft_icache)
    {
    {
      info->callbacks->minfo (_("  calls:\n"));
      if (fprintf (script, "SECTIONS\n{\n") <= 0)
      for (call = fun->call_list; call; call = call->next)
        goto file_err;
 
 
 
      if (fprintf (script,
 
                   " . = ALIGN (%u);\n"
 
                   " .ovl.init : { *(.ovl.init) }\n"
 
                   " . = ABSOLUTE (ADDR (.ovl.init));\n",
 
                   htab->params->line_size) <= 0)
 
        goto file_err;
 
 
 
      base = 0;
 
      ovlynum = 1;
 
      while (base < count)
        {
        {
          const char *f2 = func_name (call->fun);
          unsigned int indx = ovlynum - 1;
          const char *ann1 = call->fun == max ? "*" : " ";
          unsigned int vma, lma;
          const char *ann2 = call->is_tail ? "t" : " ";
 
 
 
          info->callbacks->minfo (_("   %s%s %s\n"), ann1, ann2, f2);
          vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
        }
          lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
 
 
 
          if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
 
                               ": AT (LOADADDR (.ovl.init) + %u) {\n",
 
                       ovlynum, vma, lma) <= 0)
 
            goto file_err;
 
 
 
          base = print_one_overlay_section (script, base, count, ovlynum,
 
                                            ovly_map, ovly_sections, info);
 
          if (base == (unsigned) -1)
 
            goto file_err;
 
 
 
          if (fprintf (script, "  }\n") <= 0)
 
            goto file_err;
 
 
 
          ovlynum++;
    }
    }
 
 
  /* Now fun->stack holds cumulative stack.  */
      if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
  fun->stack = max_stack;
                   1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
  fun->visit3 = TRUE;
        goto file_err;
 
 
  if (emit_stack_syms)
      if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
 
        goto file_err;
 
    }
 
  else
    {
    {
      struct spu_link_hash_table *htab = spu_hash_table (info);
      if (fprintf (script, "SECTIONS\n{\n") <= 0)
      char *name = bfd_malloc (18 + strlen (f1));
        goto file_err;
      struct elf_link_hash_entry *h;
 
 
 
      if (name != NULL)
      if (fprintf (script,
 
                   " . = ALIGN (16);\n"
 
                   " .ovl.init : { *(.ovl.init) }\n"
 
                   " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
 
        goto file_err;
 
 
 
      for (region = 1; region <= htab->params->num_lines; region++)
 
        {
 
          ovlynum = region;
 
          base = 0;
 
          while (base < count && ovly_map[base] < ovlynum)
 
            base++;
 
 
 
          if (base == count)
 
            break;
 
 
 
          if (region == 1)
        {
        {
          if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
              /* We need to set lma since we are overlaying .ovl.init.  */
            sprintf (name, "__stack_%s", f1);
              if (fprintf (script,
 
                           " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
 
                goto file_err;
 
            }
          else
          else
            sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
            {
 
              if (fprintf (script, " OVERLAY :\n {\n") <= 0)
 
                goto file_err;
 
            }
 
 
          h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
          while (base < count)
          free (name);
 
          if (h != NULL
 
              && (h->root.type == bfd_link_hash_new
 
                  || h->root.type == bfd_link_hash_undefined
 
                  || h->root.type == bfd_link_hash_undefweak))
 
            {
            {
              h->root.type = bfd_link_hash_defined;
              if (fprintf (script, "  .ovly%u {\n", ovlynum) <= 0)
              h->root.u.def.section = bfd_abs_section_ptr;
                goto file_err;
              h->root.u.def.value = max_stack;
 
              h->size = 0;
              base = print_one_overlay_section (script, base, count, ovlynum,
              h->type = 0;
                                                ovly_map, ovly_sections, info);
              h->ref_regular = 1;
              if (base == (unsigned) -1)
              h->def_regular = 1;
                goto file_err;
              h->ref_regular_nonweak = 1;
 
              h->forced_local = 1;
              if (fprintf (script, "  }\n") <= 0)
              h->non_elf = 0;
                goto file_err;
 
 
 
              ovlynum += htab->params->num_lines;
 
              while (base < count && ovly_map[base] < ovlynum)
 
                base++;
            }
            }
 
 
 
          if (fprintf (script, " }\n") <= 0)
 
            goto file_err;
        }
        }
 
 
 
      if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
 
        goto file_err;
    }
    }
 
 
  return max_stack;
  free (ovly_map);
 
  free (ovly_sections);
 
 
 
  if (fclose (script) != 0)
 
    goto file_err;
 
 
 
  if (htab->params->auto_overlay & AUTO_RELINK)
 
    (*htab->params->spu_elf_relink) ();
 
 
 
  xexit (0);
 
 
 
 file_err:
 
  bfd_set_error (bfd_error_system_call);
 
 err_exit:
 
  info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
 
  xexit (1);
}
}
 
 
/* Provide an estimate of total stack required.  */
/* Provide an estimate of total stack required.  */
 
 
static bfd_boolean
static bfd_boolean
spu_elf_stack_analysis (bfd *output_bfd,
spu_elf_stack_analysis (struct bfd_link_info *info)
                        struct bfd_link_info *info,
 
                        int emit_stack_syms)
 
{
{
  bfd *ibfd;
  struct spu_link_hash_table *htab;
  bfd_vma max_stack = 0;
  struct _sum_stack_param sum_stack_param;
 
 
  if (!discover_functions (output_bfd, info))
  if (!discover_functions (info))
    return FALSE;
    return FALSE;
 
 
  if (!build_call_tree (output_bfd, info))
  if (!build_call_tree (info))
    return FALSE;
    return FALSE;
 
 
 
  htab = spu_hash_table (info);
 
  if (htab->params->stack_analysis)
 
    {
  info->callbacks->info (_("Stack size for call graph root nodes.\n"));
  info->callbacks->info (_("Stack size for call graph root nodes.\n"));
  info->callbacks->minfo (_("\nStack size for functions.  "
  info->callbacks->minfo (_("\nStack size for functions.  "
                            "Annotations: '*' max stack, 't' tail call\n"));
                            "Annotations: '*' max stack, 't' tail call\n"));
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
 
    {
 
      extern const bfd_target bfd_elf32_spu_vec;
 
      asection *sec;
 
 
 
      if (ibfd->xvec != &bfd_elf32_spu_vec)
 
        continue;
 
 
 
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
 
        {
 
          struct _spu_elf_section_data *sec_data;
 
          struct spu_elf_stack_info *sinfo;
 
 
 
          if ((sec_data = spu_elf_section_data (sec)) != NULL
 
              && (sinfo = sec_data->u.i.stack_info) != NULL)
 
            {
 
              int i;
 
              for (i = 0; i < sinfo->num_fun; ++i)
 
                {
 
                  if (!sinfo->fun[i].non_root)
 
                    {
 
                      bfd_vma stack;
 
                      const char *f1;
 
 
 
                      stack = sum_stack (&sinfo->fun[i], info,
 
                                         emit_stack_syms);
 
                      f1 = func_name (&sinfo->fun[i]);
 
                      info->callbacks->info (_("  %s: 0x%v\n"),
 
                                              f1, stack);
 
                      if (max_stack < stack)
 
                        max_stack = stack;
 
                    }
 
                }
 
            }
 
        }
 
    }
    }
 
 
  info->callbacks->info (_("Maximum stack required is 0x%v\n"), max_stack);
  sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
 
  sum_stack_param.overall_stack = 0;
 
  if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
 
    return FALSE;
 
 
 
  if (htab->params->stack_analysis)
 
    info->callbacks->info (_("Maximum stack required is 0x%v\n"),
 
                           (bfd_vma) sum_stack_param.overall_stack);
  return TRUE;
  return TRUE;
}
}
 
 
/* Perform a final link.  */
/* Perform a final link.  */
 
 
static bfd_boolean
static bfd_boolean
spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
{
{
  struct spu_link_hash_table *htab = spu_hash_table (info);
  struct spu_link_hash_table *htab = spu_hash_table (info);
 
 
  if (htab->stack_analysis
  if (htab->params->auto_overlay)
      && !spu_elf_stack_analysis (output_bfd, info, htab->emit_stack_syms))
    spu_elf_auto_overlay (info);
    info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
 
 
  if ((htab->params->stack_analysis
 
       || (htab->params->ovly_flavour == ovly_soft_icache
 
           && htab->params->lrlive_analysis))
 
      && !spu_elf_stack_analysis (info))
 
    info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
 
 
 
  if (!spu_elf_build_stubs (info))
 
    info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
 
 
  return bfd_elf_final_link (output_bfd, info);
  return bfd_elf_final_link (output_bfd, info);
}
}
 
 
/* Called when not normally emitting relocs, ie. !info->relocatable
/* Called when not normally emitting relocs, ie. !info->relocatable
   and !info->emitrelocations.  Returns a count of special relocs
   and !info->emitrelocations.  Returns a count of special relocs
   that need to be emitted.  */
   that need to be emitted.  */
 
 
static unsigned int
static unsigned int
spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
{
{
 
  Elf_Internal_Rela *relocs;
  unsigned int count = 0;
  unsigned int count = 0;
 
 
 
  relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
 
                                      info->keep_memory);
 
  if (relocs != NULL)
 
    {
 
      Elf_Internal_Rela *rel;
  Elf_Internal_Rela *relend = relocs + sec->reloc_count;
  Elf_Internal_Rela *relend = relocs + sec->reloc_count;
 
 
  for (; relocs < relend; relocs++)
      for (rel = relocs; rel < relend; rel++)
    {
    {
      int r_type = ELF32_R_TYPE (relocs->r_info);
          int r_type = ELF32_R_TYPE (rel->r_info);
      if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
      if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
        ++count;
        ++count;
    }
    }
 
 
 
      if (elf_section_data (sec)->relocs != relocs)
 
        free (relocs);
 
    }
 
 
  return count;
  return count;
}
}
 
 
 
/* Functions for adding fixup records to .fixup */
 
 
 
#define FIXUP_RECORD_SIZE 4
 
 
 
#define FIXUP_PUT(output_bfd,htab,index,addr) \
 
          bfd_put_32 (output_bfd, addr, \
 
                      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
 
#define FIXUP_GET(output_bfd,htab,index) \
 
          bfd_get_32 (output_bfd, \
 
                      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
 
 
 
/* Store OFFSET in .fixup.  This assumes it will be called with an
 
   increasing OFFSET.  When this OFFSET fits with the last base offset,
 
   it just sets a bit, otherwise it adds a new fixup record.  */
 
static void
 
spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
 
                    bfd_vma offset)
 
{
 
  struct spu_link_hash_table *htab = spu_hash_table (info);
 
  asection *sfixup = htab->sfixup;
 
  bfd_vma qaddr = offset & ~(bfd_vma) 15;
 
  bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
 
  if (sfixup->reloc_count == 0)
 
    {
 
      FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
 
      sfixup->reloc_count++;
 
    }
 
  else
 
    {
 
      bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
 
      if (qaddr != (base & ~(bfd_vma) 15))
 
        {
 
          if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
 
            (*_bfd_error_handler) (_("fatal error while creating .fixup"));
 
          FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
 
          sfixup->reloc_count++;
 
        }
 
      else
 
        FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
 
    }
 
}
 
 
/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
 
 
static int
static int
spu_elf_relocate_section (bfd *output_bfd,
spu_elf_relocate_section (bfd *output_bfd,
                          struct bfd_link_info *info,
                          struct bfd_link_info *info,
Line 2652... Line 4793...
{
{
  Elf_Internal_Shdr *symtab_hdr;
  Elf_Internal_Shdr *symtab_hdr;
  struct elf_link_hash_entry **sym_hashes;
  struct elf_link_hash_entry **sym_hashes;
  Elf_Internal_Rela *rel, *relend;
  Elf_Internal_Rela *rel, *relend;
  struct spu_link_hash_table *htab;
  struct spu_link_hash_table *htab;
 
  asection *ea;
  int ret = TRUE;
  int ret = TRUE;
  bfd_boolean emit_these_relocs = FALSE;
  bfd_boolean emit_these_relocs = FALSE;
 
  bfd_boolean is_ea_sym;
 
  bfd_boolean stubs;
 
  unsigned int iovl = 0;
 
 
  htab = spu_hash_table (info);
  htab = spu_hash_table (info);
 
  stubs = (htab->stub_sec != NULL
 
           && maybe_needs_stubs (input_section));
 
  iovl = overlay_index (input_section);
 
  ea = bfd_get_section_by_name (output_bfd, "._ea");
  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
  sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
  sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
 
 
  rel = relocs;
  rel = relocs;
  relend = relocs + input_section->reloc_count;
  relend = relocs + input_section->reloc_count;
  for (; rel < relend; rel++)
  for (; rel < relend; rel++)
    {
    {
      int r_type;
      int r_type;
      reloc_howto_type *howto;
      reloc_howto_type *howto;
      unsigned long r_symndx;
      unsigned int r_symndx;
      Elf_Internal_Sym *sym;
      Elf_Internal_Sym *sym;
      asection *sec;
      asection *sec;
      struct elf_link_hash_entry *h;
      struct elf_link_hash_entry *h;
      const char *sym_name;
      const char *sym_name;
      bfd_vma relocation;
      bfd_vma relocation;
      bfd_vma addend;
      bfd_vma addend;
      bfd_reloc_status_type r;
      bfd_reloc_status_type r;
      bfd_boolean unresolved_reloc;
      bfd_boolean unresolved_reloc;
      bfd_boolean warned;
      bfd_boolean warned;
      bfd_boolean branch;
      enum _stub_type stub_type;
 
 
      r_symndx = ELF32_R_SYM (rel->r_info);
      r_symndx = ELF32_R_SYM (rel->r_info);
      r_type = ELF32_R_TYPE (rel->r_info);
      r_type = ELF32_R_TYPE (rel->r_info);
      if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
 
        {
 
          emit_these_relocs = TRUE;
 
          continue;
 
        }
 
 
 
      howto = elf_howto_table + r_type;
      howto = elf_howto_table + r_type;
      unresolved_reloc = FALSE;
      unresolved_reloc = FALSE;
      warned = FALSE;
      warned = FALSE;
      h = NULL;
      h = NULL;
      sym = NULL;
      sym = NULL;
Line 2700... Line 4843...
          sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
          sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
          relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
          relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
        }
        }
      else
      else
        {
        {
          RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
          if (sym_hashes == NULL)
                                   r_symndx, symtab_hdr, sym_hashes,
            return FALSE;
                                   h, sec, relocation,
 
                                   unresolved_reloc, warned);
          h = sym_hashes[r_symndx - symtab_hdr->sh_info];
 
 
 
          while (h->root.type == bfd_link_hash_indirect
 
                 || h->root.type == bfd_link_hash_warning)
 
            h = (struct elf_link_hash_entry *) h->root.u.i.link;
 
 
 
          relocation = 0;
 
          if (h->root.type == bfd_link_hash_defined
 
              || h->root.type == bfd_link_hash_defweak)
 
            {
 
              sec = h->root.u.def.section;
 
              if (sec == NULL
 
                  || sec->output_section == NULL)
 
                /* Set a flag that will be cleared later if we find a
 
                   relocation value for this symbol.  output_section
 
                   is typically NULL for symbols satisfied by a shared
 
                   library.  */
 
                unresolved_reloc = TRUE;
 
              else
 
                relocation = (h->root.u.def.value
 
                              + sec->output_section->vma
 
                              + sec->output_offset);
 
            }
 
          else if (h->root.type == bfd_link_hash_undefweak)
 
            ;
 
          else if (info->unresolved_syms_in_objects == RM_IGNORE
 
                   && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
 
            ;
 
          else if (!info->relocatable
 
                   && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
 
            {
 
              bfd_boolean err;
 
              err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
 
                     || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
 
              if (!info->callbacks->undefined_symbol (info,
 
                                                      h->root.root.string,
 
                                                      input_bfd,
 
                                                      input_section,
 
                                                      rel->r_offset, err))
 
                return FALSE;
 
              warned = TRUE;
 
            }
          sym_name = h->root.root.string;
          sym_name = h->root.root.string;
        }
        }
 
 
      if (sec != NULL && elf_discarded_section (sec))
      if (sec != NULL && elf_discarded_section (sec))
        {
        {
Line 2721... Line 4905...
        }
        }
 
 
      if (info->relocatable)
      if (info->relocatable)
        continue;
        continue;
 
 
      if (unresolved_reloc)
      /* Change "a rt,ra,rb" to "ai rt,ra,0". */
 
      if (r_type == R_SPU_ADD_PIC
 
          && h != NULL
 
          && !(h->def_regular || ELF_COMMON_DEF_P (h)))
        {
        {
          (*_bfd_error_handler)
          bfd_byte *loc = contents + rel->r_offset;
            (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
          loc[0] = 0x1c;
             input_bfd,
          loc[1] = 0x00;
             bfd_get_section_name (input_bfd, input_section),
          loc[2] &= 0x3f;
             (long) rel->r_offset,
 
             howto->name,
 
             sym_name);
 
          ret = FALSE;
 
        }
        }
 
 
 
      is_ea_sym = (ea != NULL
 
                   && sec != NULL
 
                   && sec->output_section == ea);
 
 
      /* If this symbol is in an overlay area, we may need to relocate
      /* If this symbol is in an overlay area, we may need to relocate
         to the overlay stub.  */
         to the overlay stub.  */
      addend = rel->r_addend;
      addend = rel->r_addend;
      branch = (is_branch (contents + rel->r_offset)
      if (stubs
                || is_hint (contents + rel->r_offset));
          && !is_ea_sym
      if (htab->stub_sec != NULL
          && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
          && needs_ovl_stub (sym_name, sec, input_section, htab, branch)
                                          contents, info)) != no_stub)
          && (h == NULL
 
              || (h != htab->ovly_load && h != htab->ovly_return)))
 
        {
        {
          unsigned int ovl = 0;
          unsigned int ovl = 0;
          struct got_entry *g, **head;
          struct got_entry *g, **head;
 
 
          if (branch)
          if (stub_type != nonovl_stub)
            ovl = (spu_elf_section_data (input_section->output_section)
            ovl = iovl;
                   ->u.o.ovl_index);
 
 
 
          if (h != NULL)
          if (h != NULL)
            head = &h->got.glist;
            head = &h->got.glist;
          else
          else
            head = elf_local_got_ents (input_bfd) + r_symndx;
            head = elf_local_got_ents (input_bfd) + r_symndx;
 
 
          for (g = *head; g != NULL; g = g->next)
          for (g = *head; g != NULL; g = g->next)
            if (g->ovl == ovl || g->ovl == 0)
            if (htab->params->ovly_flavour == ovly_soft_icache
 
                ? (g->ovl == ovl
 
                   && g->br_addr == (rel->r_offset
 
                                     + input_section->output_offset
 
                                     + input_section->output_section->vma))
 
                : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
              break;
              break;
          if (g == NULL)
          if (g == NULL)
            abort ();
            abort ();
 
 
          relocation = g->stub_addr;
          relocation = g->stub_addr;
          addend = 0;
          addend = 0;
        }
        }
 
      else
 
        {
 
          /* For soft icache, encode the overlay index into addresses.  */
 
          if (htab->params->ovly_flavour == ovly_soft_icache
 
              && (r_type == R_SPU_ADDR16_HI
 
                  || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
 
              && !is_ea_sym)
 
            {
 
              unsigned int ovl = overlay_index (sec);
 
              if (ovl != 0)
 
                {
 
                  unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
 
                  relocation += set_id << 18;
 
                }
 
            }
 
        }
 
 
 
      if (htab->params->emit_fixups && !info->relocatable
 
          && (input_section->flags & SEC_ALLOC) != 0
 
          && r_type == R_SPU_ADDR32)
 
        {
 
          bfd_vma offset;
 
          offset = rel->r_offset + input_section->output_section->vma
 
                   + input_section->output_offset;
 
          spu_elf_emit_fixup (output_bfd, info, offset);
 
        }
 
 
 
      if (unresolved_reloc)
 
        ;
 
      else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
 
        {
 
          if (is_ea_sym)
 
            {
 
              /* ._ea is a special section that isn't allocated in SPU
 
                 memory, but rather occupies space in PPU memory as
 
                 part of an embedded ELF image.  If this reloc is
 
                 against a symbol defined in ._ea, then transform the
 
                 reloc into an equivalent one without a symbol
 
                 relative to the start of the ELF image.  */
 
              rel->r_addend += (relocation
 
                                - ea->vma
 
                                + elf_section_data (ea)->this_hdr.sh_offset);
 
              rel->r_info = ELF32_R_INFO (0, r_type);
 
            }
 
          emit_these_relocs = TRUE;
 
          continue;
 
        }
 
      else if (is_ea_sym)
 
        unresolved_reloc = TRUE;
 
 
 
      if (unresolved_reloc)
 
        {
 
          (*_bfd_error_handler)
 
            (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
 
             input_bfd,
 
             bfd_get_section_name (input_bfd, input_section),
 
             (long) rel->r_offset,
 
             howto->name,
 
             sym_name);
 
          ret = FALSE;
 
        }
 
 
      r = _bfd_final_link_relocate (howto,
      r = _bfd_final_link_relocate (howto,
                                    input_bfd,
                                    input_bfd,
                                    input_section,
                                    input_section,
                                    contents,
                                    contents,
Line 2820... Line 5070...
        }
        }
    }
    }
 
 
  if (ret
  if (ret
      && emit_these_relocs
      && emit_these_relocs
      && !info->relocatable
 
      && !info->emitrelocations)
      && !info->emitrelocations)
    {
    {
      Elf_Internal_Rela *wrel;
      Elf_Internal_Rela *wrel;
      Elf_Internal_Shdr *rel_hdr;
      Elf_Internal_Shdr *rel_hdr;
 
 
Line 2848... Line 5097...
  return ret;
  return ret;
}
}
 
 
/* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
/* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
 
 
static bfd_boolean
static int
spu_elf_output_symbol_hook (struct bfd_link_info *info,
spu_elf_output_symbol_hook (struct bfd_link_info *info,
                            const char *sym_name ATTRIBUTE_UNUSED,
                            const char *sym_name ATTRIBUTE_UNUSED,
                            Elf_Internal_Sym *sym,
                            Elf_Internal_Sym *sym,
                            asection *sym_sec ATTRIBUTE_UNUSED,
                            asection *sym_sec ATTRIBUTE_UNUSED,
                            struct elf_link_hash_entry *h)
                            struct elf_link_hash_entry *h)
Line 2865... Line 5114...
      && (h->root.type == bfd_link_hash_defined
      && (h->root.type == bfd_link_hash_defined
          || h->root.type == bfd_link_hash_defweak)
          || h->root.type == bfd_link_hash_defweak)
      && h->def_regular
      && h->def_regular
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
    {
    {
      struct got_entry *g = h->got.glist;
      struct got_entry *g;
 
 
      if (g != NULL && g->ovl == 0)
      for (g = h->got.glist; g != NULL; g = g->next)
 
        if (htab->params->ovly_flavour == ovly_soft_icache
 
            ? g->br_addr == g->stub_addr
 
            : g->addend == 0 && g->ovl == 0)
        {
        {
          sym->st_shndx = (_bfd_elf_section_from_bfd_section
          sym->st_shndx = (_bfd_elf_section_from_bfd_section
                           (htab->stub_sec[0]->output_section->owner,
                           (htab->stub_sec[0]->output_section->owner,
                            htab->stub_sec[0]->output_section));
                            htab->stub_sec[0]->output_section));
          sym->st_value = g->stub_addr;
          sym->st_value = g->stub_addr;
 
            break;
        }
        }
    }
    }
 
 
  return TRUE;
  return 1;
}
}
 
 
static int spu_plugin = 0;
static int spu_plugin = 0;
 
 
void
void
Line 2907... Line 5160...
   segments for overlays.  */
   segments for overlays.  */
 
 
static int
static int
spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
{
{
  struct spu_link_hash_table *htab = spu_hash_table (info);
  int extra = 0;
  int extra = htab->num_overlays;
 
  asection *sec;
  asection *sec;
 
 
 
  if (info != NULL)
 
    {
 
      struct spu_link_hash_table *htab = spu_hash_table (info);
 
      extra = htab->num_overlays;
 
    }
 
 
  if (extra)
  if (extra)
    ++extra;
    ++extra;
 
 
  sec = bfd_get_section_by_name (abfd, ".toe");
  sec = bfd_get_section_by_name (abfd, ".toe");
  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
Line 2928... Line 5186...
 
 
static bfd_boolean
static bfd_boolean
spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
{
{
  asection *toe, *s;
  asection *toe, *s;
  struct elf_segment_map *m;
  struct elf_segment_map *m, *m_overlay;
 
  struct elf_segment_map **p, **p_overlay;
  unsigned int i;
  unsigned int i;
 
 
  if (info == NULL)
  if (info == NULL)
    return TRUE;
    return TRUE;
 
 
Line 2975... Line 5234...
                m->next = m2;
                m->next = m2;
              }
              }
            break;
            break;
          }
          }
 
 
  return TRUE;
 
}
 
 
 
/* Check that all loadable section VMAs lie in the range
  /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
   LO .. HI inclusive.  */
     PT_LOAD segments.  This can cause the .ovl.init section to be
 
     overwritten with the contents of some overlay segment.  To work
 
     around this issue, we ensure that all PF_OVERLAY segments are
 
     sorted first amongst the program headers; this ensures that even
 
     with a broken loader, the .ovl.init section (which is not marked
 
     as PF_OVERLAY) will be placed into SPU local store on startup.  */
 
 
 
  /* Move all overlay segments onto a separate list.  */
 
  p = &elf_tdata (abfd)->segment_map;
 
  p_overlay = &m_overlay;
 
  while (*p != NULL)
 
    {
 
      if ((*p)->p_type == PT_LOAD && (*p)->count == 1
 
          && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
 
        {
 
          struct elf_segment_map *m = *p;
 
          *p = m->next;
 
          *p_overlay = m;
 
          p_overlay = &m->next;
 
          continue;
 
        }
 
 
asection *
      p = &((*p)->next);
spu_elf_check_vma (bfd *abfd, bfd_vma lo, bfd_vma hi)
    }
{
 
  struct elf_segment_map *m;
 
  unsigned int i;
 
 
 
  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
  /* Re-insert overlay segments at the head of the segment map.  */
    if (m->p_type == PT_LOAD)
  *p_overlay = elf_tdata (abfd)->segment_map;
      for (i = 0; i < m->count; i++)
  elf_tdata (abfd)->segment_map = m_overlay;
        if (m->sections[i]->size != 0
 
            && (m->sections[i]->vma < lo
 
                || m->sections[i]->vma > hi
 
                || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
 
          return m->sections[i];
 
 
 
  return NULL;
  return TRUE;
}
}
 
 
/* Tweak the section type of .note.spu_name.  */
/* Tweak the section type of .note.spu_name.  */
 
 
static bfd_boolean
static bfd_boolean
Line 3043... Line 5312...
            && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
            && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
          {
          {
            /* Mark this as an overlay header.  */
            /* Mark this as an overlay header.  */
            phdr[i].p_flags |= PF_OVERLAY;
            phdr[i].p_flags |= PF_OVERLAY;
 
 
            if (htab->ovtab != NULL && htab->ovtab->size != 0)
            if (htab->ovtab != NULL && htab->ovtab->size != 0
 
                && htab->params->ovly_flavour != ovly_soft_icache)
              {
              {
                bfd_byte *p = htab->ovtab->contents;
                bfd_byte *p = htab->ovtab->contents;
                unsigned int off = o * 16 + 8;
                unsigned int off = o * 16 + 8;
 
 
                /* Write file_off into _ovly_table.  */
                /* Write file_off into _ovly_table.  */
                bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
                bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
              }
              }
          }
          }
 
      /* Soft-icache has its file offset put in .ovl.init.  */
 
      if (htab->init != NULL && htab->init->size != 0)
 
        {
 
          bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
 
 
 
          bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
 
        }
    }
    }
 
 
  /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
  /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
     of 16.  This should always be possible when using the standard
     of 16.  This should always be possible when using the standard
     linker scripts, but don't create overlapping segments if
     linker scripts, but don't create overlapping segments if
Line 3098... Line 5375...
      }
      }
 
 
  return TRUE;
  return TRUE;
}
}
 
 
 
bfd_boolean
 
spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
 
{
 
  struct spu_link_hash_table *htab = spu_hash_table (info);
 
  if (htab->params->emit_fixups)
 
    {
 
      asection *sfixup = htab->sfixup;
 
      int fixup_count = 0;
 
      bfd *ibfd;
 
      size_t size;
 
 
 
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
 
        {
 
          asection *isec;
 
 
 
          if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
 
            continue;
 
 
 
          /* Walk over each section attached to the input bfd.  */
 
          for (isec = ibfd->sections; isec != NULL; isec = isec->next)
 
            {
 
              Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
 
              bfd_vma base_end;
 
 
 
              /* If there aren't any relocs, then there's nothing more
 
                 to do.  */
 
              if ((isec->flags & SEC_RELOC) == 0
 
                  || isec->reloc_count == 0)
 
                continue;
 
 
 
              /* Get the relocs.  */
 
              internal_relocs =
 
                _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
 
                                           info->keep_memory);
 
              if (internal_relocs == NULL)
 
                return FALSE;
 
 
 
              /* 1 quadword can contain up to 4 R_SPU_ADDR32
 
                 relocations.  They are stored in a single word by
 
                 saving the upper 28 bits of the address and setting the
 
                 lower 4 bits to a bit mask of the words that have the
 
                 relocation.  BASE_END keeps track of the next quadword. */
 
              irela = internal_relocs;
 
              irelaend = irela + isec->reloc_count;
 
              base_end = 0;
 
              for (; irela < irelaend; irela++)
 
                if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
 
                    && irela->r_offset >= base_end)
 
                  {
 
                    base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
 
                    fixup_count++;
 
                  }
 
            }
 
        }
 
 
 
      /* We always have a NULL fixup as a sentinel */
 
      size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
 
      if (!bfd_set_section_size (output_bfd, sfixup, size))
 
        return FALSE;
 
      sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
 
      if (sfixup->contents == NULL)
 
        return FALSE;
 
    }
 
  return TRUE;
 
}
 
 
#define TARGET_BIG_SYM          bfd_elf32_spu_vec
#define TARGET_BIG_SYM          bfd_elf32_spu_vec
#define TARGET_BIG_NAME         "elf32-spu"
#define TARGET_BIG_NAME         "elf32-spu"
#define ELF_ARCH                bfd_arch_spu
#define ELF_ARCH                bfd_arch_spu
#define ELF_MACHINE_CODE        EM_SPU
#define ELF_MACHINE_CODE        EM_SPU
/* This matches the alignment need for DMA.  */
/* This matches the alignment need for DMA.  */
Line 3114... Line 5457...
#define elf_info_to_howto                       spu_elf_info_to_howto
#define elf_info_to_howto                       spu_elf_info_to_howto
#define elf_backend_count_relocs                spu_elf_count_relocs
#define elf_backend_count_relocs                spu_elf_count_relocs
#define elf_backend_relocate_section            spu_elf_relocate_section
#define elf_backend_relocate_section            spu_elf_relocate_section
#define elf_backend_symbol_processing           spu_elf_backend_symbol_processing
#define elf_backend_symbol_processing           spu_elf_backend_symbol_processing
#define elf_backend_link_output_symbol_hook     spu_elf_output_symbol_hook
#define elf_backend_link_output_symbol_hook     spu_elf_output_symbol_hook
 
#define elf_backend_object_p                    spu_elf_object_p
#define bfd_elf32_new_section_hook              spu_elf_new_section_hook
#define bfd_elf32_new_section_hook              spu_elf_new_section_hook
#define bfd_elf32_bfd_link_hash_table_create    spu_elf_link_hash_table_create
#define bfd_elf32_bfd_link_hash_table_create    spu_elf_link_hash_table_create
 
 
#define elf_backend_additional_program_headers  spu_elf_additional_program_headers
#define elf_backend_additional_program_headers  spu_elf_additional_program_headers
#define elf_backend_modify_segment_map          spu_elf_modify_segment_map
#define elf_backend_modify_segment_map          spu_elf_modify_segment_map

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.