OpenCores
URL https://opencores.org/ocsvn/scarts/scarts/trunk

Subversion Repositories scarts

[/] [scarts/] [trunk/] [toolchain/] [scarts-binutils/] [binutils-2.19.1/] [bfd/] [elf32-spu.c] - Blame information for rev 6

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 6 jlechner
/* SPU specific support for 32-bit ELF
2
 
3
   Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
4
 
5
   This file is part of BFD, the Binary File Descriptor library.
6
 
7
   This program is free software; you can redistribute it and/or modify
8
   it under the terms of the GNU General Public License as published by
9
   the Free Software Foundation; either version 3 of the License, or
10
   (at your option) any later version.
11
 
12
   This program is distributed in the hope that it will be useful,
13
   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
   GNU General Public License for more details.
16
 
17
   You should have received a copy of the GNU General Public License along
18
   with this program; if not, write to the Free Software Foundation, Inc.,
19
   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
20
 
21
#include "sysdep.h"
22
#include "libiberty.h"
23
#include "bfd.h"
24
#include "bfdlink.h"
25
#include "libbfd.h"
26
#include "elf-bfd.h"
27
#include "elf/spu.h"
28
#include "elf32-spu.h"
29
 
30
/* We use RELA style relocs.  Don't define USE_REL.  */
31
 
32
static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33
                                           void *, asection *,
34
                                           bfd *, char **);
35
 
36
/* Values of type 'enum elf_spu_reloc_type' are used to index this
37
   array, so it must be declared in the order of that type.  */
38
 
39
static reloc_howto_type elf_howto_table[] = {
40
  HOWTO (R_SPU_NONE,       0, 0,  0, FALSE,  0, complain_overflow_dont,
41
         bfd_elf_generic_reloc, "SPU_NONE",
42
         FALSE, 0, 0x00000000, FALSE),
43
  HOWTO (R_SPU_ADDR10,     4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44
         bfd_elf_generic_reloc, "SPU_ADDR10",
45
         FALSE, 0, 0x00ffc000, FALSE),
46
  HOWTO (R_SPU_ADDR16,     2, 2, 16, FALSE,  7, complain_overflow_bitfield,
47
         bfd_elf_generic_reloc, "SPU_ADDR16",
48
         FALSE, 0, 0x007fff80, FALSE),
49
  HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE,  7, complain_overflow_bitfield,
50
         bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51
         FALSE, 0, 0x007fff80, FALSE),
52
  HOWTO (R_SPU_ADDR16_LO,  0, 2, 16, FALSE,  7, complain_overflow_dont,
53
         bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54
         FALSE, 0, 0x007fff80, FALSE),
55
  HOWTO (R_SPU_ADDR18,     0, 2, 18, FALSE,  7, complain_overflow_bitfield,
56
         bfd_elf_generic_reloc, "SPU_ADDR18",
57
         FALSE, 0, 0x01ffff80, FALSE),
58
  HOWTO (R_SPU_ADDR32,     0, 2, 32, FALSE,  0, complain_overflow_dont,
59
         bfd_elf_generic_reloc, "SPU_ADDR32",
60
         FALSE, 0, 0xffffffff, FALSE),
61
  HOWTO (R_SPU_REL16,      2, 2, 16,  TRUE,  7, complain_overflow_bitfield,
62
         bfd_elf_generic_reloc, "SPU_REL16",
63
         FALSE, 0, 0x007fff80, TRUE),
64
  HOWTO (R_SPU_ADDR7,      0, 2,  7, FALSE, 14, complain_overflow_dont,
65
         bfd_elf_generic_reloc, "SPU_ADDR7",
66
         FALSE, 0, 0x001fc000, FALSE),
67
  HOWTO (R_SPU_REL9,       2, 2,  9,  TRUE,  0, complain_overflow_signed,
68
         spu_elf_rel9,          "SPU_REL9",
69
         FALSE, 0, 0x0180007f, TRUE),
70
  HOWTO (R_SPU_REL9I,      2, 2,  9,  TRUE,  0, complain_overflow_signed,
71
         spu_elf_rel9,          "SPU_REL9I",
72
         FALSE, 0, 0x0000c07f, TRUE),
73
  HOWTO (R_SPU_ADDR10I,    0, 2, 10, FALSE, 14, complain_overflow_signed,
74
         bfd_elf_generic_reloc, "SPU_ADDR10I",
75
         FALSE, 0, 0x00ffc000, FALSE),
76
  HOWTO (R_SPU_ADDR16I,    0, 2, 16, FALSE,  7, complain_overflow_signed,
77
         bfd_elf_generic_reloc, "SPU_ADDR16I",
78
         FALSE, 0, 0x007fff80, FALSE),
79
  HOWTO (R_SPU_REL32,      0, 2, 32, TRUE,  0, complain_overflow_dont,
80
         bfd_elf_generic_reloc, "SPU_REL32",
81
         FALSE, 0, 0xffffffff, TRUE),
82
  HOWTO (R_SPU_ADDR16X,    0, 2, 16, FALSE,  7, complain_overflow_bitfield,
83
         bfd_elf_generic_reloc, "SPU_ADDR16X",
84
         FALSE, 0, 0x007fff80, FALSE),
85
  HOWTO (R_SPU_PPU32,      0, 2, 32, FALSE,  0, complain_overflow_dont,
86
         bfd_elf_generic_reloc, "SPU_PPU32",
87
         FALSE, 0, 0xffffffff, FALSE),
88
  HOWTO (R_SPU_PPU64,      0, 4, 64, FALSE,  0, complain_overflow_dont,
89
         bfd_elf_generic_reloc, "SPU_PPU64",
90
         FALSE, 0, -1, FALSE),
91
};
92
 
93
static struct bfd_elf_special_section const spu_elf_special_sections[] = {
94
  { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
95
  { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
96
  { NULL, 0, 0, 0, 0 }
97
};
98
 
99
static enum elf_spu_reloc_type
100
spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
101
{
102
  switch (code)
103
    {
104
    default:
105
      return R_SPU_NONE;
106
    case BFD_RELOC_SPU_IMM10W:
107
      return R_SPU_ADDR10;
108
    case BFD_RELOC_SPU_IMM16W:
109
      return R_SPU_ADDR16;
110
    case BFD_RELOC_SPU_LO16:
111
      return R_SPU_ADDR16_LO;
112
    case BFD_RELOC_SPU_HI16:
113
      return R_SPU_ADDR16_HI;
114
    case BFD_RELOC_SPU_IMM18:
115
      return R_SPU_ADDR18;
116
    case BFD_RELOC_SPU_PCREL16:
117
      return R_SPU_REL16;
118
    case BFD_RELOC_SPU_IMM7:
119
      return R_SPU_ADDR7;
120
    case BFD_RELOC_SPU_IMM8:
121
      return R_SPU_NONE;
122
    case BFD_RELOC_SPU_PCREL9a:
123
      return R_SPU_REL9;
124
    case BFD_RELOC_SPU_PCREL9b:
125
      return R_SPU_REL9I;
126
    case BFD_RELOC_SPU_IMM10:
127
      return R_SPU_ADDR10I;
128
    case BFD_RELOC_SPU_IMM16:
129
      return R_SPU_ADDR16I;
130
    case BFD_RELOC_32:
131
      return R_SPU_ADDR32;
132
    case BFD_RELOC_32_PCREL:
133
      return R_SPU_REL32;
134
    case BFD_RELOC_SPU_PPU32:
135
      return R_SPU_PPU32;
136
    case BFD_RELOC_SPU_PPU64:
137
      return R_SPU_PPU64;
138
    }
139
}
140
 
141
static void
142
spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
143
                       arelent *cache_ptr,
144
                       Elf_Internal_Rela *dst)
145
{
146
  enum elf_spu_reloc_type r_type;
147
 
148
  r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
149
  BFD_ASSERT (r_type < R_SPU_max);
150
  cache_ptr->howto = &elf_howto_table[(int) r_type];
151
}
152
 
153
static reloc_howto_type *
154
spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
155
                           bfd_reloc_code_real_type code)
156
{
157
  enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
158
 
159
  if (r_type == R_SPU_NONE)
160
    return NULL;
161
 
162
  return elf_howto_table + r_type;
163
}
164
 
165
static reloc_howto_type *
166
spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
167
                           const char *r_name)
168
{
169
  unsigned int i;
170
 
171
  for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
172
    if (elf_howto_table[i].name != NULL
173
        && strcasecmp (elf_howto_table[i].name, r_name) == 0)
174
      return &elf_howto_table[i];
175
 
176
  return NULL;
177
}
178
 
179
/* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
180
 
181
static bfd_reloc_status_type
182
spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
183
              void *data, asection *input_section,
184
              bfd *output_bfd, char **error_message)
185
{
186
  bfd_size_type octets;
187
  bfd_vma val;
188
  long insn;
189
 
190
  /* If this is a relocatable link (output_bfd test tells us), just
191
     call the generic function.  Any adjustment will be done at final
192
     link time.  */
193
  if (output_bfd != NULL)
194
    return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
195
                                  input_section, output_bfd, error_message);
196
 
197
  if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
198
    return bfd_reloc_outofrange;
199
  octets = reloc_entry->address * bfd_octets_per_byte (abfd);
200
 
201
  /* Get symbol value.  */
202
  val = 0;
203
  if (!bfd_is_com_section (symbol->section))
204
    val = symbol->value;
205
  if (symbol->section->output_section)
206
    val += symbol->section->output_section->vma;
207
 
208
  val += reloc_entry->addend;
209
 
210
  /* Make it pc-relative.  */
211
  val -= input_section->output_section->vma + input_section->output_offset;
212
 
213
  val >>= 2;
214
  if (val + 256 >= 512)
215
    return bfd_reloc_overflow;
216
 
217
  insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
218
 
219
  /* Move two high bits of value to REL9I and REL9 position.
220
     The mask will take care of selecting the right field.  */
221
  val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
222
  insn &= ~reloc_entry->howto->dst_mask;
223
  insn |= val & reloc_entry->howto->dst_mask;
224
  bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
225
  return bfd_reloc_ok;
226
}
227
 
228
static bfd_boolean
229
spu_elf_new_section_hook (bfd *abfd, asection *sec)
230
{
231
  if (!sec->used_by_bfd)
232
    {
233
      struct _spu_elf_section_data *sdata;
234
 
235
      sdata = bfd_zalloc (abfd, sizeof (*sdata));
236
      if (sdata == NULL)
237
        return FALSE;
238
      sec->used_by_bfd = sdata;
239
    }
240
 
241
  return _bfd_elf_new_section_hook (abfd, sec);
242
}
243
 
244
/* Set up overlay info for executables.  */
245
 
246
static bfd_boolean
247
spu_elf_object_p (bfd *abfd)
248
{
249
  if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
250
    {
251
      unsigned int i, num_ovl, num_buf;
252
      Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
253
      Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
254
      Elf_Internal_Phdr *last_phdr = NULL;
255
 
256
      for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
257
        if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
258
          {
259
            unsigned int j;
260
 
261
            ++num_ovl;
262
            if (last_phdr == NULL
263
                || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
264
              ++num_buf;
265
            last_phdr = phdr;
266
            for (j = 1; j < elf_numsections (abfd); j++)
267
              {
268
                Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
269
 
270
                if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
271
                  {
272
                    asection *sec = shdr->bfd_section;
273
                    spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
274
                    spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
275
                  }
276
              }
277
          }
278
    }
279
  return TRUE;
280
}
281
 
282
/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
283
   strip --strip-unneeded will not remove them.  */
284
 
285
static void
286
spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
287
{
288
  if (sym->name != NULL
289
      && sym->section != bfd_abs_section_ptr
290
      && strncmp (sym->name, "_EAR_", 5) == 0)
291
    sym->flags |= BSF_KEEP;
292
}
293
 
294
/* SPU ELF linker hash table.  */
295
 
296
struct spu_link_hash_table
297
{
298
  struct elf_link_hash_table elf;
299
 
300
  /* Shortcuts to overlay sections.  */
301
  asection *ovtab;
302
  asection *toe;
303
  asection **ovl_sec;
304
 
305
  /* Count of stubs in each overlay section.  */
306
  unsigned int *stub_count;
307
 
308
  /* The stub section for each overlay section.  */
309
  asection **stub_sec;
310
 
311
  struct elf_link_hash_entry *ovly_load;
312
  struct elf_link_hash_entry *ovly_return;
313
  unsigned long ovly_load_r_symndx;
314
 
315
  /* Number of overlay buffers.  */
316
  unsigned int num_buf;
317
 
318
  /* Total number of overlays.  */
319
  unsigned int num_overlays;
320
 
321
  /* How much memory we have.  */
322
  unsigned int local_store;
323
  /* Local store --auto-overlay should reserve for non-overlay
324
     functions and data.  */
325
  unsigned int overlay_fixed;
326
  /* Local store --auto-overlay should reserve for stack and heap.  */
327
  unsigned int reserved;
328
  /* If reserved is not specified, stack analysis will calculate a value
329
     for the stack.  This parameter adjusts that value to allow for
330
     negative sp access (the ABI says 2000 bytes below sp are valid,
331
     and the overlay manager uses some of this area).  */
332
  int extra_stack_space;
333
  /* Count of overlay stubs needed in non-overlay area.  */
334
  unsigned int non_ovly_stub;
335
 
336
  /* Stash various callbacks for --auto-overlay.  */
337
  void (*spu_elf_load_ovl_mgr) (void);
338
  FILE *(*spu_elf_open_overlay_script) (void);
339
  void (*spu_elf_relink) (void);
340
 
341
  /* Bit 0 set if --auto-overlay.
342
     Bit 1 set if --auto-relink.
343
     Bit 2 set if --overlay-rodata.  */
344
  unsigned int auto_overlay : 3;
345
#define AUTO_OVERLAY 1
346
#define AUTO_RELINK 2
347
#define OVERLAY_RODATA 4
348
 
349
  /* Set if we should emit symbols for stubs.  */
350
  unsigned int emit_stub_syms:1;
351
 
352
  /* Set if we want stubs on calls out of overlay regions to
353
     non-overlay regions.  */
354
  unsigned int non_overlay_stubs : 1;
355
 
356
  /* Set on error.  */
357
  unsigned int stub_err : 1;
358
 
359
  /* Set if stack size analysis should be done.  */
360
  unsigned int stack_analysis : 1;
361
 
362
  /* Set if __stack_* syms will be emitted.  */
363
  unsigned int emit_stack_syms : 1;
364
};
365
 
366
/* Hijack the generic got fields for overlay stub accounting.  */
367
 
368
struct got_entry
369
{
370
  struct got_entry *next;
371
  unsigned int ovl;
372
  bfd_vma addend;
373
  bfd_vma stub_addr;
374
};
375
 
376
#define spu_hash_table(p) \
377
  ((struct spu_link_hash_table *) ((p)->hash))
378
 
379
/* Create a spu ELF linker hash table.  */
380
 
381
static struct bfd_link_hash_table *
382
spu_elf_link_hash_table_create (bfd *abfd)
383
{
384
  struct spu_link_hash_table *htab;
385
 
386
  htab = bfd_malloc (sizeof (*htab));
387
  if (htab == NULL)
388
    return NULL;
389
 
390
  if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
391
                                      _bfd_elf_link_hash_newfunc,
392
                                      sizeof (struct elf_link_hash_entry)))
393
    {
394
      free (htab);
395
      return NULL;
396
    }
397
 
398
  memset (&htab->ovtab, 0,
399
          sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
400
 
401
  htab->elf.init_got_refcount.refcount = 0;
402
  htab->elf.init_got_refcount.glist = NULL;
403
  htab->elf.init_got_offset.offset = 0;
404
  htab->elf.init_got_offset.glist = NULL;
405
  return &htab->elf.root;
406
}
407
 
408
/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
409
   to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
410
   *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
411
 
412
static bfd_boolean
413
get_sym_h (struct elf_link_hash_entry **hp,
414
           Elf_Internal_Sym **symp,
415
           asection **symsecp,
416
           Elf_Internal_Sym **locsymsp,
417
           unsigned long r_symndx,
418
           bfd *ibfd)
419
{
420
  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
421
 
422
  if (r_symndx >= symtab_hdr->sh_info)
423
    {
424
      struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
425
      struct elf_link_hash_entry *h;
426
 
427
      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
428
      while (h->root.type == bfd_link_hash_indirect
429
             || h->root.type == bfd_link_hash_warning)
430
        h = (struct elf_link_hash_entry *) h->root.u.i.link;
431
 
432
      if (hp != NULL)
433
        *hp = h;
434
 
435
      if (symp != NULL)
436
        *symp = NULL;
437
 
438
      if (symsecp != NULL)
439
        {
440
          asection *symsec = NULL;
441
          if (h->root.type == bfd_link_hash_defined
442
              || h->root.type == bfd_link_hash_defweak)
443
            symsec = h->root.u.def.section;
444
          *symsecp = symsec;
445
        }
446
    }
447
  else
448
    {
449
      Elf_Internal_Sym *sym;
450
      Elf_Internal_Sym *locsyms = *locsymsp;
451
 
452
      if (locsyms == NULL)
453
        {
454
          locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
455
          if (locsyms == NULL)
456
            locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
457
                                            symtab_hdr->sh_info,
458
                                            0, NULL, NULL, NULL);
459
          if (locsyms == NULL)
460
            return FALSE;
461
          *locsymsp = locsyms;
462
        }
463
      sym = locsyms + r_symndx;
464
 
465
      if (hp != NULL)
466
        *hp = NULL;
467
 
468
      if (symp != NULL)
469
        *symp = sym;
470
 
471
      if (symsecp != NULL)
472
        *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
473
    }
474
 
475
  return TRUE;
476
}
477
 
478
/* Create the note section if not already present.  This is done early so
479
   that the linker maps the sections to the right place in the output.  */
480
 
481
bfd_boolean
482
spu_elf_create_sections (struct bfd_link_info *info,
483
                         int stack_analysis,
484
                         int emit_stack_syms)
485
{
486
  bfd *ibfd;
487
  struct spu_link_hash_table *htab = spu_hash_table (info);
488
 
489
  /* Stash some options away where we can get at them later.  */
490
  htab->stack_analysis = stack_analysis;
491
  htab->emit_stack_syms = emit_stack_syms;
492
 
493
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
494
    if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
495
      break;
496
 
497
  if (ibfd == NULL)
498
    {
499
      /* Make SPU_PTNOTE_SPUNAME section.  */
500
      asection *s;
501
      size_t name_len;
502
      size_t size;
503
      bfd_byte *data;
504
      flagword flags;
505
 
506
      ibfd = info->input_bfds;
507
      flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
508
      s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
509
      if (s == NULL
510
          || !bfd_set_section_alignment (ibfd, s, 4))
511
        return FALSE;
512
 
513
      name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
514
      size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
515
      size += (name_len + 3) & -4;
516
 
517
      if (!bfd_set_section_size (ibfd, s, size))
518
        return FALSE;
519
 
520
      data = bfd_zalloc (ibfd, size);
521
      if (data == NULL)
522
        return FALSE;
523
 
524
      bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
525
      bfd_put_32 (ibfd, name_len, data + 4);
526
      bfd_put_32 (ibfd, 1, data + 8);
527
      memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
528
      memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
529
              bfd_get_filename (info->output_bfd), name_len);
530
      s->contents = data;
531
    }
532
 
533
  return TRUE;
534
}
535
 
536
/* qsort predicate to sort sections by vma.  */
537
 
538
static int
539
sort_sections (const void *a, const void *b)
540
{
541
  const asection *const *s1 = a;
542
  const asection *const *s2 = b;
543
  bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
544
 
545
  if (delta != 0)
546
    return delta < 0 ? -1 : 1;
547
 
548
  return (*s1)->index - (*s2)->index;
549
}
550
 
551
/* Identify overlays in the output bfd, and number them.  */
552
 
553
bfd_boolean
554
spu_elf_find_overlays (struct bfd_link_info *info)
555
{
556
  struct spu_link_hash_table *htab = spu_hash_table (info);
557
  asection **alloc_sec;
558
  unsigned int i, n, ovl_index, num_buf;
559
  asection *s;
560
  bfd_vma ovl_end;
561
 
562
  if (info->output_bfd->section_count < 2)
563
    return FALSE;
564
 
565
  alloc_sec
566
    = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
567
  if (alloc_sec == NULL)
568
    return FALSE;
569
 
570
  /* Pick out all the alloced sections.  */
571
  for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
572
    if ((s->flags & SEC_ALLOC) != 0
573
        && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
574
        && s->size != 0)
575
      alloc_sec[n++] = s;
576
 
577
  if (n == 0)
578
    {
579
      free (alloc_sec);
580
      return FALSE;
581
    }
582
 
583
  /* Sort them by vma.  */
584
  qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
585
 
586
  /* Look for overlapping vmas.  Any with overlap must be overlays.
587
     Count them.  Also count the number of overlay regions.  */
588
  ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
589
  for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
590
    {
591
      s = alloc_sec[i];
592
      if (s->vma < ovl_end)
593
        {
594
          asection *s0 = alloc_sec[i - 1];
595
 
596
          if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
597
            {
598
              alloc_sec[ovl_index] = s0;
599
              spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
600
              spu_elf_section_data (s0)->u.o.ovl_buf = ++num_buf;
601
            }
602
          alloc_sec[ovl_index] = s;
603
          spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
604
          spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
605
          if (s0->vma != s->vma)
606
            {
607
              info->callbacks->einfo (_("%X%P: overlay sections %A and %A "
608
                                        "do not start at the same address.\n"),
609
                                      s0, s);
610
              return FALSE;
611
            }
612
          if (ovl_end < s->vma + s->size)
613
            ovl_end = s->vma + s->size;
614
        }
615
      else
616
        ovl_end = s->vma + s->size;
617
    }
618
 
619
  htab->num_overlays = ovl_index;
620
  htab->num_buf = num_buf;
621
  htab->ovl_sec = alloc_sec;
622
  htab->ovly_load = elf_link_hash_lookup (&htab->elf, "__ovly_load",
623
                                          FALSE, FALSE, FALSE);
624
  htab->ovly_return = elf_link_hash_lookup (&htab->elf, "__ovly_return",
625
                                            FALSE, FALSE, FALSE);
626
  return ovl_index != 0;
627
}
628
 
629
/* Support two sizes of overlay stubs, a slower more compact stub of two
630
   intructions, and a faster stub of four instructions.  */
631
#ifndef OVL_STUB_SIZE
632
/* Default to faster.  */
633
#define OVL_STUB_SIZE 16
634
/* #define OVL_STUB_SIZE 8 */
635
#endif
636
#define BRSL    0x33000000
637
#define BR      0x32000000
638
#define NOP     0x40200000
639
#define LNOP    0x00200000
640
#define ILA     0x42000000
641
 
642
/* Return true for all relative and absolute branch instructions.
643
   bra   00110000 0..
644
   brasl 00110001 0..
645
   br    00110010 0..
646
   brsl  00110011 0..
647
   brz   00100000 0..
648
   brnz  00100001 0..
649
   brhz  00100010 0..
650
   brhnz 00100011 0..  */
651
 
652
static bfd_boolean
653
is_branch (const unsigned char *insn)
654
{
655
  return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
656
}
657
 
658
/* Return true for all indirect branch instructions.
659
   bi     00110101 000
660
   bisl   00110101 001
661
   iret   00110101 010
662
   bisled 00110101 011
663
   biz    00100101 000
664
   binz   00100101 001
665
   bihz   00100101 010
666
   bihnz  00100101 011  */
667
 
668
static bfd_boolean
669
is_indirect_branch (const unsigned char *insn)
670
{
671
  return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
672
}
673
 
674
/* Return true for branch hint instructions.
675
   hbra  0001000..
676
   hbrr  0001001..  */
677
 
678
static bfd_boolean
679
is_hint (const unsigned char *insn)
680
{
681
  return (insn[0] & 0xfc) == 0x10;
682
}
683
 
684
/* True if INPUT_SECTION might need overlay stubs.  */
685
 
686
static bfd_boolean
687
maybe_needs_stubs (asection *input_section, bfd *output_bfd)
688
{
689
  /* No stubs for debug sections and suchlike.  */
690
  if ((input_section->flags & SEC_ALLOC) == 0)
691
    return FALSE;
692
 
693
  /* No stubs for link-once sections that will be discarded.  */
694
  if (input_section->output_section == NULL
695
      || input_section->output_section->owner != output_bfd)
696
    return FALSE;
697
 
698
  /* Don't create stubs for .eh_frame references.  */
699
  if (strcmp (input_section->name, ".eh_frame") == 0)
700
    return FALSE;
701
 
702
  return TRUE;
703
}
704
 
705
enum _stub_type
706
{
707
  no_stub,
708
  ovl_stub,
709
  nonovl_stub,
710
  stub_error
711
};
712
 
713
/* Return non-zero if this reloc symbol should go via an overlay stub.
714
   Return 2 if the stub must be in non-overlay area.  */
715
 
716
static enum _stub_type
717
needs_ovl_stub (struct elf_link_hash_entry *h,
718
                Elf_Internal_Sym *sym,
719
                asection *sym_sec,
720
                asection *input_section,
721
                Elf_Internal_Rela *irela,
722
                bfd_byte *contents,
723
                struct bfd_link_info *info)
724
{
725
  struct spu_link_hash_table *htab = spu_hash_table (info);
726
  enum elf_spu_reloc_type r_type;
727
  unsigned int sym_type;
728
  bfd_boolean branch;
729
  enum _stub_type ret = no_stub;
730
 
731
  if (sym_sec == NULL
732
      || sym_sec->output_section == NULL
733
      || sym_sec->output_section->owner != info->output_bfd
734
      || spu_elf_section_data (sym_sec->output_section) == NULL)
735
    return ret;
736
 
737
  if (h != NULL)
738
    {
739
      /* Ensure no stubs for user supplied overlay manager syms.  */
740
      if (h == htab->ovly_load || h == htab->ovly_return)
741
        return ret;
742
 
743
      /* setjmp always goes via an overlay stub, because then the return
744
         and hence the longjmp goes via __ovly_return.  That magically
745
         makes setjmp/longjmp between overlays work.  */
746
      if (strncmp (h->root.root.string, "setjmp", 6) == 0
747
          && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
748
        ret = ovl_stub;
749
    }
750
 
751
  /* Usually, symbols in non-overlay sections don't need stubs.  */
752
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
753
      && !htab->non_overlay_stubs)
754
    return ret;
755
 
756
  if (h != NULL)
757
    sym_type = h->type;
758
  else
759
    sym_type = ELF_ST_TYPE (sym->st_info);
760
 
761
  r_type = ELF32_R_TYPE (irela->r_info);
762
  branch = FALSE;
763
  if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
764
    {
765
      bfd_byte insn[4];
766
 
767
      if (contents == NULL)
768
        {
769
          contents = insn;
770
          if (!bfd_get_section_contents (input_section->owner,
771
                                         input_section,
772
                                         contents,
773
                                         irela->r_offset, 4))
774
            return stub_error;
775
        }
776
      else
777
        contents += irela->r_offset;
778
 
779
      if (is_branch (contents) || is_hint (contents))
780
        {
781
          branch = TRUE;
782
          if ((contents[0] & 0xfd) == 0x31
783
              && sym_type != STT_FUNC
784
              && contents != insn)
785
            {
786
              /* It's common for people to write assembly and forget
787
                 to give function symbols the right type.  Handle
788
                 calls to such symbols, but warn so that (hopefully)
789
                 people will fix their code.  We need the symbol
790
                 type to be correct to distinguish function pointer
791
                 initialisation from other pointer initialisations.  */
792
              const char *sym_name;
793
 
794
              if (h != NULL)
795
                sym_name = h->root.root.string;
796
              else
797
                {
798
                  Elf_Internal_Shdr *symtab_hdr;
799
                  symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
800
                  sym_name = bfd_elf_sym_name (input_section->owner,
801
                                               symtab_hdr,
802
                                               sym,
803
                                               sym_sec);
804
                }
805
              (*_bfd_error_handler) (_("warning: call to non-function"
806
                                       " symbol %s defined in %B"),
807
                                     sym_sec->owner, sym_name);
808
 
809
            }
810
        }
811
    }
812
 
813
  if (sym_type != STT_FUNC
814
      && !branch
815
      && (sym_sec->flags & SEC_CODE) == 0)
816
    return ret;
817
 
818
  /* A reference from some other section to a symbol in an overlay
819
     section needs a stub.  */
820
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
821
       != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
822
    ret = ovl_stub;
823
 
824
  /* If this insn isn't a branch then we are possibly taking the
825
     address of a function and passing it out somehow.  */
826
  return !branch && sym_type == STT_FUNC ? nonovl_stub : ret;
827
}
828
 
829
static bfd_boolean
830
count_stub (struct spu_link_hash_table *htab,
831
            bfd *ibfd,
832
            asection *isec,
833
            enum _stub_type stub_type,
834
            struct elf_link_hash_entry *h,
835
            const Elf_Internal_Rela *irela)
836
{
837
  unsigned int ovl = 0;
838
  struct got_entry *g, **head;
839
  bfd_vma addend;
840
 
841
  /* If this instruction is a branch or call, we need a stub
842
     for it.  One stub per function per overlay.
843
     If it isn't a branch, then we are taking the address of
844
     this function so need a stub in the non-overlay area
845
     for it.  One stub per function.  */
846
  if (stub_type != nonovl_stub)
847
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
848
 
849
  if (h != NULL)
850
    head = &h->got.glist;
851
  else
852
    {
853
      if (elf_local_got_ents (ibfd) == NULL)
854
        {
855
          bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
856
                               * sizeof (*elf_local_got_ents (ibfd)));
857
          elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
858
          if (elf_local_got_ents (ibfd) == NULL)
859
            return FALSE;
860
        }
861
      head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
862
    }
863
 
864
  addend = 0;
865
  if (irela != NULL)
866
    addend = irela->r_addend;
867
 
868
  if (ovl == 0)
869
    {
870
      struct got_entry *gnext;
871
 
872
      for (g = *head; g != NULL; g = g->next)
873
        if (g->addend == addend && g->ovl == 0)
874
          break;
875
 
876
      if (g == NULL)
877
        {
878
          /* Need a new non-overlay area stub.  Zap other stubs.  */
879
          for (g = *head; g != NULL; g = gnext)
880
            {
881
              gnext = g->next;
882
              if (g->addend == addend)
883
                {
884
                  htab->stub_count[g->ovl] -= 1;
885
                  free (g);
886
                }
887
            }
888
        }
889
    }
890
  else
891
    {
892
      for (g = *head; g != NULL; g = g->next)
893
        if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
894
          break;
895
    }
896
 
897
  if (g == NULL)
898
    {
899
      g = bfd_malloc (sizeof *g);
900
      if (g == NULL)
901
        return FALSE;
902
      g->ovl = ovl;
903
      g->addend = addend;
904
      g->stub_addr = (bfd_vma) -1;
905
      g->next = *head;
906
      *head = g;
907
 
908
      htab->stub_count[ovl] += 1;
909
    }
910
 
911
  return TRUE;
912
}
913
 
914
/* Two instruction overlay stubs look like:
915
 
916
   brsl $75,__ovly_load
917
   .word target_ovl_and_address
918
 
919
   ovl_and_address is a word with the overlay number in the top 14 bits
920
   and local store address in the bottom 18 bits.
921
 
922
   Four instruction overlay stubs look like:
923
 
924
   ila $78,ovl_number
925
   lnop
926
   ila $79,target_address
927
   br __ovly_load  */
928
 
929
static bfd_boolean
930
build_stub (struct spu_link_hash_table *htab,
931
            bfd *ibfd,
932
            asection *isec,
933
            enum _stub_type stub_type,
934
            struct elf_link_hash_entry *h,
935
            const Elf_Internal_Rela *irela,
936
            bfd_vma dest,
937
            asection *dest_sec)
938
{
939
  unsigned int ovl;
940
  struct got_entry *g, **head;
941
  asection *sec;
942
  bfd_vma addend, val, from, to;
943
 
944
  ovl = 0;
945
  if (stub_type != nonovl_stub)
946
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
947
 
948
  if (h != NULL)
949
    head = &h->got.glist;
950
  else
951
    head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
952
 
953
  addend = 0;
954
  if (irela != NULL)
955
    addend = irela->r_addend;
956
 
957
  for (g = *head; g != NULL; g = g->next)
958
    if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
959
      break;
960
  if (g == NULL)
961
    abort ();
962
 
963
  if (g->ovl == 0 && ovl != 0)
964
    return TRUE;
965
 
966
  if (g->stub_addr != (bfd_vma) -1)
967
    return TRUE;
968
 
969
  sec = htab->stub_sec[ovl];
970
  dest += dest_sec->output_offset + dest_sec->output_section->vma;
971
  from = sec->size + sec->output_offset + sec->output_section->vma;
972
  g->stub_addr = from;
973
  to = (htab->ovly_load->root.u.def.value
974
        + htab->ovly_load->root.u.def.section->output_offset
975
        + htab->ovly_load->root.u.def.section->output_section->vma);
976
  val = to - from;
977
  if (OVL_STUB_SIZE == 16)
978
    val -= 12;
979
  if (((dest | to | from) & 3) != 0
980
      || val + 0x40000 >= 0x80000)
981
    {
982
      htab->stub_err = 1;
983
      return FALSE;
984
    }
985
  ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
986
 
987
  if (OVL_STUB_SIZE == 16)
988
    {
989
      bfd_put_32 (sec->owner, ILA + ((ovl << 7) & 0x01ffff80) + 78,
990
                  sec->contents + sec->size);
991
      bfd_put_32 (sec->owner, LNOP,
992
                  sec->contents + sec->size + 4);
993
      bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
994
                  sec->contents + sec->size + 8);
995
      bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
996
                  sec->contents + sec->size + 12);
997
    }
998
  else if (OVL_STUB_SIZE == 8)
999
    {
1000
      bfd_put_32 (sec->owner, BRSL + ((val << 5) & 0x007fff80) + 75,
1001
                  sec->contents + sec->size);
1002
 
1003
      val = (dest & 0x3ffff) | (ovl << 18);
1004
      bfd_put_32 (sec->owner, val,
1005
                  sec->contents + sec->size + 4);
1006
    }
1007
  else
1008
    abort ();
1009
  sec->size += OVL_STUB_SIZE;
1010
 
1011
  if (htab->emit_stub_syms)
1012
    {
1013
      size_t len;
1014
      char *name;
1015
      int add;
1016
 
1017
      len = 8 + sizeof (".ovl_call.") - 1;
1018
      if (h != NULL)
1019
        len += strlen (h->root.root.string);
1020
      else
1021
        len += 8 + 1 + 8;
1022
      add = 0;
1023
      if (irela != NULL)
1024
        add = (int) irela->r_addend & 0xffffffff;
1025
      if (add != 0)
1026
        len += 1 + 8;
1027
      name = bfd_malloc (len);
1028
      if (name == NULL)
1029
        return FALSE;
1030
 
1031
      sprintf (name, "%08x.ovl_call.", g->ovl);
1032
      if (h != NULL)
1033
        strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1034
      else
1035
        sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1036
                 dest_sec->id & 0xffffffff,
1037
                 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1038
      if (add != 0)
1039
        sprintf (name + len - 9, "+%x", add);
1040
 
1041
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1042
      free (name);
1043
      if (h == NULL)
1044
        return FALSE;
1045
      if (h->root.type == bfd_link_hash_new)
1046
        {
1047
          h->root.type = bfd_link_hash_defined;
1048
          h->root.u.def.section = sec;
1049
          h->root.u.def.value = sec->size - OVL_STUB_SIZE;
1050
          h->size = OVL_STUB_SIZE;
1051
          h->type = STT_FUNC;
1052
          h->ref_regular = 1;
1053
          h->def_regular = 1;
1054
          h->ref_regular_nonweak = 1;
1055
          h->forced_local = 1;
1056
          h->non_elf = 0;
1057
        }
1058
    }
1059
 
1060
  return TRUE;
1061
}
1062
 
1063
/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1064
   symbols.  */
1065
 
1066
static bfd_boolean
1067
allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1068
{
1069
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1070
     invoked by the PPU.  */
1071
  struct bfd_link_info *info = inf;
1072
  struct spu_link_hash_table *htab = spu_hash_table (info);
1073
  asection *sym_sec;
1074
 
1075
  if ((h->root.type == bfd_link_hash_defined
1076
       || h->root.type == bfd_link_hash_defweak)
1077
      && h->def_regular
1078
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1079
      && (sym_sec = h->root.u.def.section) != NULL
1080
      && sym_sec->output_section != NULL
1081
      && sym_sec->output_section->owner == info->output_bfd
1082
      && spu_elf_section_data (sym_sec->output_section) != NULL
1083
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1084
          || htab->non_overlay_stubs))
1085
    {
1086
      return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1087
    }
1088
 
1089
  return TRUE;
1090
}
1091
 
1092
static bfd_boolean
1093
build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1094
{
1095
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1096
     invoked by the PPU.  */
1097
  struct bfd_link_info *info = inf;
1098
  struct spu_link_hash_table *htab = spu_hash_table (info);
1099
  asection *sym_sec;
1100
 
1101
  if ((h->root.type == bfd_link_hash_defined
1102
       || h->root.type == bfd_link_hash_defweak)
1103
      && h->def_regular
1104
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1105
      && (sym_sec = h->root.u.def.section) != NULL
1106
      && sym_sec->output_section != NULL
1107
      && sym_sec->output_section->owner == info->output_bfd
1108
      && spu_elf_section_data (sym_sec->output_section) != NULL
1109
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1110
          || htab->non_overlay_stubs))
1111
    {
1112
      return build_stub (htab, NULL, NULL, nonovl_stub, h, NULL,
1113
                         h->root.u.def.value, sym_sec);
1114
    }
1115
 
1116
  return TRUE;
1117
}
1118
 
1119
/* Size or build stubs.  */
1120
 
1121
static bfd_boolean
1122
process_stubs (struct bfd_link_info *info, bfd_boolean build)
1123
{
1124
  struct spu_link_hash_table *htab = spu_hash_table (info);
1125
  bfd *ibfd;
1126
 
1127
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1128
    {
1129
      extern const bfd_target bfd_elf32_spu_vec;
1130
      Elf_Internal_Shdr *symtab_hdr;
1131
      asection *isec;
1132
      Elf_Internal_Sym *local_syms = NULL;
1133
 
1134
      if (ibfd->xvec != &bfd_elf32_spu_vec)
1135
        continue;
1136
 
1137
      /* We'll need the symbol table in a second.  */
1138
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1139
      if (symtab_hdr->sh_info == 0)
1140
        continue;
1141
 
1142
      /* Walk over each section attached to the input bfd.  */
1143
      for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1144
        {
1145
          Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1146
 
1147
          /* If there aren't any relocs, then there's nothing more to do.  */
1148
          if ((isec->flags & SEC_RELOC) == 0
1149
              || isec->reloc_count == 0)
1150
            continue;
1151
 
1152
          if (!maybe_needs_stubs (isec, info->output_bfd))
1153
            continue;
1154
 
1155
          /* Get the relocs.  */
1156
          internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1157
                                                       info->keep_memory);
1158
          if (internal_relocs == NULL)
1159
            goto error_ret_free_local;
1160
 
1161
          /* Now examine each relocation.  */
1162
          irela = internal_relocs;
1163
          irelaend = irela + isec->reloc_count;
1164
          for (; irela < irelaend; irela++)
1165
            {
1166
              enum elf_spu_reloc_type r_type;
1167
              unsigned int r_indx;
1168
              asection *sym_sec;
1169
              Elf_Internal_Sym *sym;
1170
              struct elf_link_hash_entry *h;
1171
              enum _stub_type stub_type;
1172
 
1173
              r_type = ELF32_R_TYPE (irela->r_info);
1174
              r_indx = ELF32_R_SYM (irela->r_info);
1175
 
1176
              if (r_type >= R_SPU_max)
1177
                {
1178
                  bfd_set_error (bfd_error_bad_value);
1179
                error_ret_free_internal:
1180
                  if (elf_section_data (isec)->relocs != internal_relocs)
1181
                    free (internal_relocs);
1182
                error_ret_free_local:
1183
                  if (local_syms != NULL
1184
                      && (symtab_hdr->contents
1185
                          != (unsigned char *) local_syms))
1186
                    free (local_syms);
1187
                  return FALSE;
1188
                }
1189
 
1190
              /* Determine the reloc target section.  */
1191
              if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1192
                goto error_ret_free_internal;
1193
 
1194
              stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1195
                                          NULL, info);
1196
              if (stub_type == no_stub)
1197
                continue;
1198
              else if (stub_type == stub_error)
1199
                goto error_ret_free_internal;
1200
 
1201
              if (htab->stub_count == NULL)
1202
                {
1203
                  bfd_size_type amt;
1204
                  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1205
                  htab->stub_count = bfd_zmalloc (amt);
1206
                  if (htab->stub_count == NULL)
1207
                    goto error_ret_free_internal;
1208
                }
1209
 
1210
              if (!build)
1211
                {
1212
                  if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1213
                    goto error_ret_free_internal;
1214
                }
1215
              else
1216
                {
1217
                  bfd_vma dest;
1218
 
1219
                  if (h != NULL)
1220
                    dest = h->root.u.def.value;
1221
                  else
1222
                    dest = sym->st_value;
1223
                  dest += irela->r_addend;
1224
                  if (!build_stub (htab, ibfd, isec, stub_type, h, irela,
1225
                                   dest, sym_sec))
1226
                    goto error_ret_free_internal;
1227
                }
1228
            }
1229
 
1230
          /* We're done with the internal relocs, free them.  */
1231
          if (elf_section_data (isec)->relocs != internal_relocs)
1232
            free (internal_relocs);
1233
        }
1234
 
1235
      if (local_syms != NULL
1236
          && symtab_hdr->contents != (unsigned char *) local_syms)
1237
        {
1238
          if (!info->keep_memory)
1239
            free (local_syms);
1240
          else
1241
            symtab_hdr->contents = (unsigned char *) local_syms;
1242
        }
1243
    }
1244
 
1245
  return TRUE;
1246
}
1247
 
1248
/* Allocate space for overlay call and return stubs.  */
1249
 
1250
int
1251
spu_elf_size_stubs (struct bfd_link_info *info,
1252
                    void (*place_spu_section) (asection *, asection *,
1253
                                               const char *),
1254
                    int non_overlay_stubs)
1255
{
1256
  struct spu_link_hash_table *htab = spu_hash_table (info);
1257
  bfd *ibfd;
1258
  bfd_size_type amt;
1259
  flagword flags;
1260
  unsigned int i;
1261
  asection *stub;
1262
 
1263
  htab->non_overlay_stubs = non_overlay_stubs;
1264
  if (!process_stubs (info, FALSE))
1265
    return 0;
1266
 
1267
  elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1268
  if (htab->stub_err)
1269
    return 0;
1270
 
1271
  if (htab->stub_count == NULL)
1272
    return 1;
1273
 
1274
  ibfd = info->input_bfds;
1275
  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1276
  htab->stub_sec = bfd_zmalloc (amt);
1277
  if (htab->stub_sec == NULL)
1278
    return 0;
1279
 
1280
  flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1281
           | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1282
  stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1283
  htab->stub_sec[0] = stub;
1284
  if (stub == NULL
1285
      || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1286
    return 0;
1287
  stub->size = htab->stub_count[0] * OVL_STUB_SIZE;
1288
  (*place_spu_section) (stub, NULL, ".text");
1289
 
1290
  for (i = 0; i < htab->num_overlays; ++i)
1291
    {
1292
      asection *osec = htab->ovl_sec[i];
1293
      unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1294
      stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1295
      htab->stub_sec[ovl] = stub;
1296
      if (stub == NULL
1297
          || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1298
        return 0;
1299
      stub->size = htab->stub_count[ovl] * OVL_STUB_SIZE;
1300
      (*place_spu_section) (stub, osec, NULL);
1301
    }
1302
 
1303
 /* htab->ovtab consists of two arrays.
1304
    .   struct {
1305
    .     u32 vma;
1306
    .     u32 size;
1307
    .     u32 file_off;
1308
    .     u32 buf;
1309
    .   } _ovly_table[];
1310
    .
1311
    .   struct {
1312
    .     u32 mapped;
1313
    .   } _ovly_buf_table[];
1314
    .  */
1315
 
1316
  flags = (SEC_ALLOC | SEC_LOAD
1317
           | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1318
  htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1319
  if (htab->ovtab == NULL
1320
      || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1321
    return 0;
1322
 
1323
  htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1324
  (*place_spu_section) (htab->ovtab, NULL, ".data");
1325
 
1326
  htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1327
  if (htab->toe == NULL
1328
      || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1329
    return 0;
1330
  htab->toe->size = 16;
1331
  (*place_spu_section) (htab->toe, NULL, ".toe");
1332
 
1333
  return 2;
1334
}
1335
 
1336
/* Functions to handle embedded spu_ovl.o object.  */
1337
 
1338
static void *
1339
ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1340
{
1341
  return stream;
1342
}
1343
 
1344
static file_ptr
1345
ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1346
               void *stream,
1347
               void *buf,
1348
               file_ptr nbytes,
1349
               file_ptr offset)
1350
{
1351
  struct _ovl_stream *os;
1352
  size_t count;
1353
  size_t max;
1354
 
1355
  os = (struct _ovl_stream *) stream;
1356
  max = (const char *) os->end - (const char *) os->start;
1357
 
1358
  if ((ufile_ptr) offset >= max)
1359
    return 0;
1360
 
1361
  count = nbytes;
1362
  if (count > max - offset)
1363
    count = max - offset;
1364
 
1365
  memcpy (buf, (const char *) os->start + offset, count);
1366
  return count;
1367
}
1368
 
1369
bfd_boolean
1370
spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1371
{
1372
  *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1373
                              "elf32-spu",
1374
                              ovl_mgr_open,
1375
                              (void *) stream,
1376
                              ovl_mgr_pread,
1377
                              NULL,
1378
                              NULL);
1379
  return *ovl_bfd != NULL;
1380
}
1381
 
1382
/* Define an STT_OBJECT symbol.  */
1383
 
1384
static struct elf_link_hash_entry *
1385
define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1386
{
1387
  struct elf_link_hash_entry *h;
1388
 
1389
  h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1390
  if (h == NULL)
1391
    return NULL;
1392
 
1393
  if (h->root.type != bfd_link_hash_defined
1394
      || !h->def_regular)
1395
    {
1396
      h->root.type = bfd_link_hash_defined;
1397
      h->root.u.def.section = htab->ovtab;
1398
      h->type = STT_OBJECT;
1399
      h->ref_regular = 1;
1400
      h->def_regular = 1;
1401
      h->ref_regular_nonweak = 1;
1402
      h->non_elf = 0;
1403
    }
1404
  else
1405
    {
1406
      (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1407
                             h->root.u.def.section->owner,
1408
                             h->root.root.string);
1409
      bfd_set_error (bfd_error_bad_value);
1410
      return NULL;
1411
    }
1412
 
1413
  return h;
1414
}
1415
 
1416
/* Fill in all stubs and the overlay tables.  */
1417
 
1418
bfd_boolean
1419
spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms)
1420
{
1421
  struct spu_link_hash_table *htab = spu_hash_table (info);
1422
  struct elf_link_hash_entry *h;
1423
  bfd_byte *p;
1424
  asection *s;
1425
  bfd *obfd;
1426
  unsigned int i;
1427
 
1428
  htab->emit_stub_syms = emit_syms;
1429
  if (htab->stub_count == NULL)
1430
    return TRUE;
1431
 
1432
  for (i = 0; i <= htab->num_overlays; i++)
1433
    if (htab->stub_sec[i]->size != 0)
1434
      {
1435
        htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1436
                                                  htab->stub_sec[i]->size);
1437
        if (htab->stub_sec[i]->contents == NULL)
1438
          return FALSE;
1439
        htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1440
        htab->stub_sec[i]->size = 0;
1441
      }
1442
 
1443
  h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1444
  htab->ovly_load = h;
1445
  BFD_ASSERT (h != NULL
1446
              && (h->root.type == bfd_link_hash_defined
1447
                  || h->root.type == bfd_link_hash_defweak)
1448
              && h->def_regular);
1449
 
1450
  s = h->root.u.def.section->output_section;
1451
  if (spu_elf_section_data (s)->u.o.ovl_index)
1452
    {
1453
      (*_bfd_error_handler) (_("%s in overlay section"),
1454
                             h->root.root.string);
1455
      bfd_set_error (bfd_error_bad_value);
1456
      return FALSE;
1457
    }
1458
 
1459
  h = elf_link_hash_lookup (&htab->elf, "__ovly_return", FALSE, FALSE, FALSE);
1460
  htab->ovly_return = h;
1461
 
1462
  /* Fill in all the stubs.  */
1463
  process_stubs (info, TRUE);
1464
  if (!htab->stub_err)
1465
    elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1466
 
1467
  if (htab->stub_err)
1468
    {
1469
      (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1470
      bfd_set_error (bfd_error_bad_value);
1471
      return FALSE;
1472
    }
1473
 
1474
  for (i = 0; i <= htab->num_overlays; i++)
1475
    {
1476
      if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1477
        {
1478
          (*_bfd_error_handler)  (_("stubs don't match calculated size"));
1479
          bfd_set_error (bfd_error_bad_value);
1480
          return FALSE;
1481
        }
1482
      htab->stub_sec[i]->rawsize = 0;
1483
    }
1484
 
1485
  htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1486
  if (htab->ovtab->contents == NULL)
1487
    return FALSE;
1488
 
1489
  /* Write out _ovly_table.  */
1490
  p = htab->ovtab->contents;
1491
  /* set low bit of .size to mark non-overlay area as present.  */
1492
  p[7] = 1;
1493
  obfd = htab->ovtab->output_section->owner;
1494
  for (s = obfd->sections; s != NULL; s = s->next)
1495
    {
1496
      unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
1497
 
1498
      if (ovl_index != 0)
1499
        {
1500
          unsigned long off = ovl_index * 16;
1501
          unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
1502
 
1503
          bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1504
          bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1505
          /* file_off written later in spu_elf_modify_program_headers.  */
1506
          bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
1507
        }
1508
    }
1509
 
1510
  h = define_ovtab_symbol (htab, "_ovly_table");
1511
  if (h == NULL)
1512
    return FALSE;
1513
  h->root.u.def.value = 16;
1514
  h->size = htab->num_overlays * 16;
1515
 
1516
  h = define_ovtab_symbol (htab, "_ovly_table_end");
1517
  if (h == NULL)
1518
    return FALSE;
1519
  h->root.u.def.value = htab->num_overlays * 16 + 16;
1520
  h->size = 0;
1521
 
1522
  h = define_ovtab_symbol (htab, "_ovly_buf_table");
1523
  if (h == NULL)
1524
    return FALSE;
1525
  h->root.u.def.value = htab->num_overlays * 16 + 16;
1526
  h->size = htab->num_buf * 4;
1527
 
1528
  h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1529
  if (h == NULL)
1530
    return FALSE;
1531
  h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1532
  h->size = 0;
1533
 
1534
  h = define_ovtab_symbol (htab, "_EAR_");
1535
  if (h == NULL)
1536
    return FALSE;
1537
  h->root.u.def.section = htab->toe;
1538
  h->root.u.def.value = 0;
1539
  h->size = 16;
1540
 
1541
  return TRUE;
1542
}
1543
 
1544
/* Check that all loadable section VMAs lie in the range
1545
   LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
1546
 
1547
asection *
1548
spu_elf_check_vma (struct bfd_link_info *info,
1549
                   int auto_overlay,
1550
                   unsigned int lo,
1551
                   unsigned int hi,
1552
                   unsigned int overlay_fixed,
1553
                   unsigned int reserved,
1554
                   int extra_stack_space,
1555
                   void (*spu_elf_load_ovl_mgr) (void),
1556
                   FILE *(*spu_elf_open_overlay_script) (void),
1557
                   void (*spu_elf_relink) (void))
1558
{
1559
  struct elf_segment_map *m;
1560
  unsigned int i;
1561
  struct spu_link_hash_table *htab = spu_hash_table (info);
1562
  bfd *abfd = info->output_bfd;
1563
 
1564
  if (auto_overlay & AUTO_OVERLAY)
1565
    htab->auto_overlay = auto_overlay;
1566
  htab->local_store = hi + 1 - lo;
1567
  htab->overlay_fixed = overlay_fixed;
1568
  htab->reserved = reserved;
1569
  htab->extra_stack_space = extra_stack_space;
1570
  htab->spu_elf_load_ovl_mgr = spu_elf_load_ovl_mgr;
1571
  htab->spu_elf_open_overlay_script = spu_elf_open_overlay_script;
1572
  htab->spu_elf_relink = spu_elf_relink;
1573
 
1574
  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
1575
    if (m->p_type == PT_LOAD)
1576
      for (i = 0; i < m->count; i++)
1577
        if (m->sections[i]->size != 0
1578
            && (m->sections[i]->vma < lo
1579
                || m->sections[i]->vma > hi
1580
                || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
1581
          return m->sections[i];
1582
 
1583
  /* No need for overlays if it all fits.  */
1584
  htab->auto_overlay = 0;
1585
  return NULL;
1586
}
1587
 
1588
/* OFFSET in SEC (presumably) is the beginning of a function prologue.
1589
   Search for stack adjusting insns, and return the sp delta.  */
1590
 
1591
static int
1592
find_function_stack_adjust (asection *sec, bfd_vma offset)
1593
{
1594
  int unrecog;
1595
  int reg[128];
1596
 
1597
  memset (reg, 0, sizeof (reg));
1598
  for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1599
    {
1600
      unsigned char buf[4];
1601
      int rt, ra;
1602
      int imm;
1603
 
1604
      /* Assume no relocs on stack adjusing insns.  */
1605
      if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1606
        break;
1607
 
1608
      if (buf[0] == 0x24 /* stqd */)
1609
        continue;
1610
 
1611
      rt = buf[3] & 0x7f;
1612
      ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1613
      /* Partly decoded immediate field.  */
1614
      imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1615
 
1616
      if (buf[0] == 0x1c /* ai */)
1617
        {
1618
          imm >>= 7;
1619
          imm = (imm ^ 0x200) - 0x200;
1620
          reg[rt] = reg[ra] + imm;
1621
 
1622
          if (rt == 1 /* sp */)
1623
            {
1624
              if (imm > 0)
1625
                break;
1626
              return reg[rt];
1627
            }
1628
        }
1629
      else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1630
        {
1631
          int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1632
 
1633
          reg[rt] = reg[ra] + reg[rb];
1634
          if (rt == 1)
1635
            return reg[rt];
1636
        }
1637
      else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1638
        {
1639
          if (buf[0] >= 0x42 /* ila */)
1640
            imm |= (buf[0] & 1) << 17;
1641
          else
1642
            {
1643
              imm &= 0xffff;
1644
 
1645
              if (buf[0] == 0x40 /* il */)
1646
                {
1647
                  if ((buf[1] & 0x80) == 0)
1648
                    goto unknown_insn;
1649
                  imm = (imm ^ 0x8000) - 0x8000;
1650
                }
1651
              else if ((buf[1] & 0x80) == 0 /* ilhu */)
1652
                imm <<= 16;
1653
            }
1654
          reg[rt] = imm;
1655
          continue;
1656
        }
1657
      else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1658
        {
1659
          reg[rt] |= imm & 0xffff;
1660
          continue;
1661
        }
1662
      else if (buf[0] == 0x04 /* ori */)
1663
        {
1664
          imm >>= 7;
1665
          imm = (imm ^ 0x200) - 0x200;
1666
          reg[rt] = reg[ra] | imm;
1667
          continue;
1668
        }
1669
      else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1670
               || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1671
        {
1672
          /* Used in pic reg load.  Say rt is trashed.  */
1673
          reg[rt] = 0;
1674
          continue;
1675
        }
1676
      else if (is_branch (buf) || is_indirect_branch (buf))
1677
        /* If we hit a branch then we must be out of the prologue.  */
1678
        break;
1679
    unknown_insn:
1680
      ++unrecog;
1681
    }
1682
 
1683
  return 0;
1684
}
1685
 
1686
/* qsort predicate to sort symbols by section and value.  */
1687
 
1688
static Elf_Internal_Sym *sort_syms_syms;
1689
static asection **sort_syms_psecs;
1690
 
1691
static int
1692
sort_syms (const void *a, const void *b)
1693
{
1694
  Elf_Internal_Sym *const *s1 = a;
1695
  Elf_Internal_Sym *const *s2 = b;
1696
  asection *sec1,*sec2;
1697
  bfd_signed_vma delta;
1698
 
1699
  sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1700
  sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1701
 
1702
  if (sec1 != sec2)
1703
    return sec1->index - sec2->index;
1704
 
1705
  delta = (*s1)->st_value - (*s2)->st_value;
1706
  if (delta != 0)
1707
    return delta < 0 ? -1 : 1;
1708
 
1709
  delta = (*s2)->st_size - (*s1)->st_size;
1710
  if (delta != 0)
1711
    return delta < 0 ? -1 : 1;
1712
 
1713
  return *s1 < *s2 ? -1 : 1;
1714
}
1715
 
1716
struct call_info
1717
{
1718
  struct function_info *fun;
1719
  struct call_info *next;
1720
  unsigned int count;
1721
  unsigned int max_depth;
1722
  unsigned int is_tail : 1;
1723
  unsigned int is_pasted : 1;
1724
};
1725
 
1726
struct function_info
1727
{
1728
  /* List of functions called.  Also branches to hot/cold part of
1729
     function.  */
1730
  struct call_info *call_list;
1731
  /* For hot/cold part of function, point to owner.  */
1732
  struct function_info *start;
1733
  /* Symbol at start of function.  */
1734
  union {
1735
    Elf_Internal_Sym *sym;
1736
    struct elf_link_hash_entry *h;
1737
  } u;
1738
  /* Function section.  */
1739
  asection *sec;
1740
  asection *rodata;
1741
  /* Where last called from, and number of sections called from.  */
1742
  asection *last_caller;
1743
  unsigned int call_count;
1744
  /* Address range of (this part of) function.  */
1745
  bfd_vma lo, hi;
1746
  /* Stack usage.  */
1747
  int stack;
1748
  /* Distance from root of call tree.  Tail and hot/cold branches
1749
     count as one deeper.  We aren't counting stack frames here.  */
1750
  unsigned int depth;
1751
  /* Set if global symbol.  */
1752
  unsigned int global : 1;
1753
  /* Set if known to be start of function (as distinct from a hunk
1754
     in hot/cold section.  */
1755
  unsigned int is_func : 1;
1756
  /* Set if not a root node.  */
1757
  unsigned int non_root : 1;
1758
  /* Flags used during call tree traversal.  It's cheaper to replicate
1759
     the visit flags than have one which needs clearing after a traversal.  */
1760
  unsigned int visit1 : 1;
1761
  unsigned int visit2 : 1;
1762
  unsigned int marking : 1;
1763
  unsigned int visit3 : 1;
1764
  unsigned int visit4 : 1;
1765
  unsigned int visit5 : 1;
1766
  unsigned int visit6 : 1;
1767
  unsigned int visit7 : 1;
1768
};
1769
 
1770
struct spu_elf_stack_info
1771
{
1772
  int num_fun;
1773
  int max_fun;
1774
  /* Variable size array describing functions, one per contiguous
1775
     address range belonging to a function.  */
1776
  struct function_info fun[1];
1777
};
1778
 
1779
/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1780
   entries for section SEC.  */
1781
 
1782
static struct spu_elf_stack_info *
1783
alloc_stack_info (asection *sec, int max_fun)
1784
{
1785
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1786
  bfd_size_type amt;
1787
 
1788
  amt = sizeof (struct spu_elf_stack_info);
1789
  amt += (max_fun - 1) * sizeof (struct function_info);
1790
  sec_data->u.i.stack_info = bfd_zmalloc (amt);
1791
  if (sec_data->u.i.stack_info != NULL)
1792
    sec_data->u.i.stack_info->max_fun = max_fun;
1793
  return sec_data->u.i.stack_info;
1794
}
1795
 
1796
/* Add a new struct function_info describing a (part of a) function
1797
   starting at SYM_H.  Keep the array sorted by address.  */
1798
 
1799
static struct function_info *
1800
maybe_insert_function (asection *sec,
1801
                       void *sym_h,
1802
                       bfd_boolean global,
1803
                       bfd_boolean is_func)
1804
{
1805
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1806
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1807
  int i;
1808
  bfd_vma off, size;
1809
 
1810
  if (sinfo == NULL)
1811
    {
1812
      sinfo = alloc_stack_info (sec, 20);
1813
      if (sinfo == NULL)
1814
        return NULL;
1815
    }
1816
 
1817
  if (!global)
1818
    {
1819
      Elf_Internal_Sym *sym = sym_h;
1820
      off = sym->st_value;
1821
      size = sym->st_size;
1822
    }
1823
  else
1824
    {
1825
      struct elf_link_hash_entry *h = sym_h;
1826
      off = h->root.u.def.value;
1827
      size = h->size;
1828
    }
1829
 
1830
  for (i = sinfo->num_fun; --i >= 0; )
1831
    if (sinfo->fun[i].lo <= off)
1832
      break;
1833
 
1834
  if (i >= 0)
1835
    {
1836
      /* Don't add another entry for an alias, but do update some
1837
         info.  */
1838
      if (sinfo->fun[i].lo == off)
1839
        {
1840
          /* Prefer globals over local syms.  */
1841
          if (global && !sinfo->fun[i].global)
1842
            {
1843
              sinfo->fun[i].global = TRUE;
1844
              sinfo->fun[i].u.h = sym_h;
1845
            }
1846
          if (is_func)
1847
            sinfo->fun[i].is_func = TRUE;
1848
          return &sinfo->fun[i];
1849
        }
1850
      /* Ignore a zero-size symbol inside an existing function.  */
1851
      else if (sinfo->fun[i].hi > off && size == 0)
1852
        return &sinfo->fun[i];
1853
    }
1854
 
1855
  if (sinfo->num_fun >= sinfo->max_fun)
1856
    {
1857
      bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1858
      bfd_size_type old = amt;
1859
 
1860
      old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1861
      sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1862
      amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1863
      sinfo = bfd_realloc (sinfo, amt);
1864
      if (sinfo == NULL)
1865
        return NULL;
1866
      memset ((char *) sinfo + old, 0, amt - old);
1867
      sec_data->u.i.stack_info = sinfo;
1868
    }
1869
 
1870
  if (++i < sinfo->num_fun)
1871
    memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1872
             (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1873
  sinfo->fun[i].is_func = is_func;
1874
  sinfo->fun[i].global = global;
1875
  sinfo->fun[i].sec = sec;
1876
  if (global)
1877
    sinfo->fun[i].u.h = sym_h;
1878
  else
1879
    sinfo->fun[i].u.sym = sym_h;
1880
  sinfo->fun[i].lo = off;
1881
  sinfo->fun[i].hi = off + size;
1882
  sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1883
  sinfo->num_fun += 1;
1884
  return &sinfo->fun[i];
1885
}
1886
 
1887
/* Return the name of FUN.  */
1888
 
1889
static const char *
1890
func_name (struct function_info *fun)
1891
{
1892
  asection *sec;
1893
  bfd *ibfd;
1894
  Elf_Internal_Shdr *symtab_hdr;
1895
 
1896
  while (fun->start != NULL)
1897
    fun = fun->start;
1898
 
1899
  if (fun->global)
1900
    return fun->u.h->root.root.string;
1901
 
1902
  sec = fun->sec;
1903
  if (fun->u.sym->st_name == 0)
1904
    {
1905
      size_t len = strlen (sec->name);
1906
      char *name = bfd_malloc (len + 10);
1907
      if (name == NULL)
1908
        return "(null)";
1909
      sprintf (name, "%s+%lx", sec->name,
1910
               (unsigned long) fun->u.sym->st_value & 0xffffffff);
1911
      return name;
1912
    }
1913
  ibfd = sec->owner;
1914
  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1915
  return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1916
}
1917
 
1918
/* Read the instruction at OFF in SEC.  Return true iff the instruction
1919
   is a nop, lnop, or stop 0 (all zero insn).  */
1920
 
1921
static bfd_boolean
1922
is_nop (asection *sec, bfd_vma off)
1923
{
1924
  unsigned char insn[4];
1925
 
1926
  if (off + 4 > sec->size
1927
      || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1928
    return FALSE;
1929
  if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1930
    return TRUE;
1931
  if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1932
    return TRUE;
1933
  return FALSE;
1934
}
1935
 
1936
/* Extend the range of FUN to cover nop padding up to LIMIT.
1937
   Return TRUE iff some instruction other than a NOP was found.  */
1938
 
1939
static bfd_boolean
1940
insns_at_end (struct function_info *fun, bfd_vma limit)
1941
{
1942
  bfd_vma off = (fun->hi + 3) & -4;
1943
 
1944
  while (off < limit && is_nop (fun->sec, off))
1945
    off += 4;
1946
  if (off < limit)
1947
    {
1948
      fun->hi = off;
1949
      return TRUE;
1950
    }
1951
  fun->hi = limit;
1952
  return FALSE;
1953
}
1954
 
1955
/* Check and fix overlapping function ranges.  Return TRUE iff there
1956
   are gaps in the current info we have about functions in SEC.  */
1957
 
1958
static bfd_boolean
1959
check_function_ranges (asection *sec, struct bfd_link_info *info)
1960
{
1961
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1962
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1963
  int i;
1964
  bfd_boolean gaps = FALSE;
1965
 
1966
  if (sinfo == NULL)
1967
    return FALSE;
1968
 
1969
  for (i = 1; i < sinfo->num_fun; i++)
1970
    if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1971
      {
1972
        /* Fix overlapping symbols.  */
1973
        const char *f1 = func_name (&sinfo->fun[i - 1]);
1974
        const char *f2 = func_name (&sinfo->fun[i]);
1975
 
1976
        info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1977
        sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1978
      }
1979
    else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1980
      gaps = TRUE;
1981
 
1982
  if (sinfo->num_fun == 0)
1983
    gaps = TRUE;
1984
  else
1985
    {
1986
      if (sinfo->fun[0].lo != 0)
1987
        gaps = TRUE;
1988
      if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1989
        {
1990
          const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1991
 
1992
          info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1993
          sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1994
        }
1995
      else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1996
        gaps = TRUE;
1997
    }
1998
  return gaps;
1999
}
2000
 
2001
/* Search current function info for a function that contains address
2002
   OFFSET in section SEC.  */
2003
 
2004
static struct function_info *
2005
find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2006
{
2007
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2008
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2009
  int lo, hi, mid;
2010
 
2011
  lo = 0;
2012
  hi = sinfo->num_fun;
2013
  while (lo < hi)
2014
    {
2015
      mid = (lo + hi) / 2;
2016
      if (offset < sinfo->fun[mid].lo)
2017
        hi = mid;
2018
      else if (offset >= sinfo->fun[mid].hi)
2019
        lo = mid + 1;
2020
      else
2021
        return &sinfo->fun[mid];
2022
    }
2023
  info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2024
                          sec, offset);
2025
  return NULL;
2026
}
2027
 
2028
/* Add CALLEE to CALLER call list if not already present.  Return TRUE
2029
   if CALLEE was new.  If this function return FALSE, CALLEE should
2030
   be freed.  */
2031
 
2032
static bfd_boolean
2033
insert_callee (struct function_info *caller, struct call_info *callee)
2034
{
2035
  struct call_info **pp, *p;
2036
 
2037
  for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2038
    if (p->fun == callee->fun)
2039
      {
2040
        /* Tail calls use less stack than normal calls.  Retain entry
2041
           for normal call over one for tail call.  */
2042
        p->is_tail &= callee->is_tail;
2043
        if (!p->is_tail)
2044
          {
2045
            p->fun->start = NULL;
2046
            p->fun->is_func = TRUE;
2047
          }
2048
        p->count += 1;
2049
        /* Reorder list so most recent call is first.  */
2050
        *pp = p->next;
2051
        p->next = caller->call_list;
2052
        caller->call_list = p;
2053
        return FALSE;
2054
      }
2055
  callee->next = caller->call_list;
2056
  callee->count += 1;
2057
  caller->call_list = callee;
2058
  return TRUE;
2059
}
2060
 
2061
/* Copy CALL and insert the copy into CALLER.  */
2062
 
2063
static bfd_boolean
2064
copy_callee (struct function_info *caller, const struct call_info *call)
2065
{
2066
  struct call_info *callee;
2067
  callee = bfd_malloc (sizeof (*callee));
2068
  if (callee == NULL)
2069
    return FALSE;
2070
  *callee = *call;
2071
  if (!insert_callee (caller, callee))
2072
    free (callee);
2073
  return TRUE;
2074
}
2075
 
2076
/* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
2077
   overlay stub sections.  */
2078
 
2079
static bfd_boolean
2080
interesting_section (asection *s, bfd *obfd)
2081
{
2082
  return (s->output_section != NULL
2083
          && s->output_section->owner == obfd
2084
          && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2085
              == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2086
          && s->size != 0);
2087
}
2088
 
2089
/* Rummage through the relocs for SEC, looking for function calls.
2090
   If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
2091
   mark destination symbols on calls as being functions.  Also
2092
   look at branches, which may be tail calls or go to hot/cold
2093
   section part of same function.  */
2094
 
2095
static bfd_boolean
2096
mark_functions_via_relocs (asection *sec,
2097
                           struct bfd_link_info *info,
2098
                           int call_tree)
2099
{
2100
  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2101
  Elf_Internal_Shdr *symtab_hdr;
2102
  void *psyms;
2103
  static bfd_boolean warned;
2104
 
2105
  if (!interesting_section (sec, info->output_bfd)
2106
      || sec->reloc_count == 0)
2107
    return TRUE;
2108
 
2109
  internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2110
                                               info->keep_memory);
2111
  if (internal_relocs == NULL)
2112
    return FALSE;
2113
 
2114
  symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2115
  psyms = &symtab_hdr->contents;
2116
  irela = internal_relocs;
2117
  irelaend = irela + sec->reloc_count;
2118
  for (; irela < irelaend; irela++)
2119
    {
2120
      enum elf_spu_reloc_type r_type;
2121
      unsigned int r_indx;
2122
      asection *sym_sec;
2123
      Elf_Internal_Sym *sym;
2124
      struct elf_link_hash_entry *h;
2125
      bfd_vma val;
2126
      bfd_boolean reject, is_call;
2127
      struct function_info *caller;
2128
      struct call_info *callee;
2129
 
2130
      reject = FALSE;
2131
      r_type = ELF32_R_TYPE (irela->r_info);
2132
      if (r_type != R_SPU_REL16
2133
          && r_type != R_SPU_ADDR16)
2134
        {
2135
          reject = TRUE;
2136
          if (!(call_tree && spu_hash_table (info)->auto_overlay))
2137
            continue;
2138
        }
2139
 
2140
      r_indx = ELF32_R_SYM (irela->r_info);
2141
      if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2142
        return FALSE;
2143
 
2144
      if (sym_sec == NULL
2145
          || sym_sec->output_section == NULL
2146
          || sym_sec->output_section->owner != info->output_bfd)
2147
        continue;
2148
 
2149
      is_call = FALSE;
2150
      if (!reject)
2151
        {
2152
          unsigned char insn[4];
2153
 
2154
          if (!bfd_get_section_contents (sec->owner, sec, insn,
2155
                                         irela->r_offset, 4))
2156
            return FALSE;
2157
          if (is_branch (insn))
2158
            {
2159
              is_call = (insn[0] & 0xfd) == 0x31;
2160
              if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2161
                  != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2162
                {
2163
                  if (!warned)
2164
                    info->callbacks->einfo
2165
                      (_("%B(%A+0x%v): call to non-code section"
2166
                         " %B(%A), analysis incomplete\n"),
2167
                       sec->owner, sec, irela->r_offset,
2168
                       sym_sec->owner, sym_sec);
2169
                  warned = TRUE;
2170
                  continue;
2171
                }
2172
            }
2173
          else
2174
            {
2175
              reject = TRUE;
2176
              if (!(call_tree && spu_hash_table (info)->auto_overlay)
2177
                  || is_hint (insn))
2178
                continue;
2179
            }
2180
        }
2181
 
2182
      if (reject)
2183
        {
2184
          /* For --auto-overlay, count possible stubs we need for
2185
             function pointer references.  */
2186
          unsigned int sym_type;
2187
          if (h)
2188
            sym_type = h->type;
2189
          else
2190
            sym_type = ELF_ST_TYPE (sym->st_info);
2191
          if (sym_type == STT_FUNC)
2192
            spu_hash_table (info)->non_ovly_stub += 1;
2193
          continue;
2194
        }
2195
 
2196
      if (h)
2197
        val = h->root.u.def.value;
2198
      else
2199
        val = sym->st_value;
2200
      val += irela->r_addend;
2201
 
2202
      if (!call_tree)
2203
        {
2204
          struct function_info *fun;
2205
 
2206
          if (irela->r_addend != 0)
2207
            {
2208
              Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2209
              if (fake == NULL)
2210
                return FALSE;
2211
              fake->st_value = val;
2212
              fake->st_shndx
2213
                = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2214
              sym = fake;
2215
            }
2216
          if (sym)
2217
            fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2218
          else
2219
            fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2220
          if (fun == NULL)
2221
            return FALSE;
2222
          if (irela->r_addend != 0
2223
              && fun->u.sym != sym)
2224
            free (sym);
2225
          continue;
2226
        }
2227
 
2228
      caller = find_function (sec, irela->r_offset, info);
2229
      if (caller == NULL)
2230
        return FALSE;
2231
      callee = bfd_malloc (sizeof *callee);
2232
      if (callee == NULL)
2233
        return FALSE;
2234
 
2235
      callee->fun = find_function (sym_sec, val, info);
2236
      if (callee->fun == NULL)
2237
        return FALSE;
2238
      callee->is_tail = !is_call;
2239
      callee->is_pasted = FALSE;
2240
      callee->count = 0;
2241
      if (callee->fun->last_caller != sec)
2242
        {
2243
          callee->fun->last_caller = sec;
2244
          callee->fun->call_count += 1;
2245
        }
2246
      if (!insert_callee (caller, callee))
2247
        free (callee);
2248
      else if (!is_call
2249
               && !callee->fun->is_func
2250
               && callee->fun->stack == 0)
2251
        {
2252
          /* This is either a tail call or a branch from one part of
2253
             the function to another, ie. hot/cold section.  If the
2254
             destination has been called by some other function then
2255
             it is a separate function.  We also assume that functions
2256
             are not split across input files.  */
2257
          if (sec->owner != sym_sec->owner)
2258
            {
2259
              callee->fun->start = NULL;
2260
              callee->fun->is_func = TRUE;
2261
            }
2262
          else if (callee->fun->start == NULL)
2263
            callee->fun->start = caller;
2264
          else
2265
            {
2266
              struct function_info *callee_start;
2267
              struct function_info *caller_start;
2268
              callee_start = callee->fun;
2269
              while (callee_start->start)
2270
                callee_start = callee_start->start;
2271
              caller_start = caller;
2272
              while (caller_start->start)
2273
                caller_start = caller_start->start;
2274
              if (caller_start != callee_start)
2275
                {
2276
                  callee->fun->start = NULL;
2277
                  callee->fun->is_func = TRUE;
2278
                }
2279
            }
2280
        }
2281
    }
2282
 
2283
  return TRUE;
2284
}
2285
 
2286
/* Handle something like .init or .fini, which has a piece of a function.
2287
   These sections are pasted together to form a single function.  */
2288
 
2289
static bfd_boolean
2290
pasted_function (asection *sec, struct bfd_link_info *info)
2291
{
2292
  struct bfd_link_order *l;
2293
  struct _spu_elf_section_data *sec_data;
2294
  struct spu_elf_stack_info *sinfo;
2295
  Elf_Internal_Sym *fake;
2296
  struct function_info *fun, *fun_start;
2297
 
2298
  fake = bfd_zmalloc (sizeof (*fake));
2299
  if (fake == NULL)
2300
    return FALSE;
2301
  fake->st_value = 0;
2302
  fake->st_size = sec->size;
2303
  fake->st_shndx
2304
    = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2305
  fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2306
  if (!fun)
2307
    return FALSE;
2308
 
2309
  /* Find a function immediately preceding this section.  */
2310
  fun_start = NULL;
2311
  for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2312
    {
2313
      if (l->u.indirect.section == sec)
2314
        {
2315
          if (fun_start != NULL)
2316
            {
2317
              struct call_info *callee = bfd_malloc (sizeof *callee);
2318
              if (callee == NULL)
2319
                return FALSE;
2320
 
2321
              fun->start = fun_start;
2322
              callee->fun = fun;
2323
              callee->is_tail = TRUE;
2324
              callee->is_pasted = TRUE;
2325
              callee->count = 0;
2326
              if (!insert_callee (fun_start, callee))
2327
                free (callee);
2328
              return TRUE;
2329
            }
2330
          break;
2331
        }
2332
      if (l->type == bfd_indirect_link_order
2333
          && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2334
          && (sinfo = sec_data->u.i.stack_info) != NULL
2335
          && sinfo->num_fun != 0)
2336
        fun_start = &sinfo->fun[sinfo->num_fun - 1];
2337
    }
2338
 
2339
  info->callbacks->einfo (_("%A link_order not found\n"), sec);
2340
  return FALSE;
2341
}
2342
 
2343
/* Map address ranges in code sections to functions.  */
2344
 
2345
static bfd_boolean
2346
discover_functions (struct bfd_link_info *info)
2347
{
2348
  bfd *ibfd;
2349
  int bfd_idx;
2350
  Elf_Internal_Sym ***psym_arr;
2351
  asection ***sec_arr;
2352
  bfd_boolean gaps = FALSE;
2353
 
2354
  bfd_idx = 0;
2355
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2356
    bfd_idx++;
2357
 
2358
  psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2359
  if (psym_arr == NULL)
2360
    return FALSE;
2361
  sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2362
  if (sec_arr == NULL)
2363
    return FALSE;
2364
 
2365
 
2366
  for (ibfd = info->input_bfds, bfd_idx = 0;
2367
       ibfd != NULL;
2368
       ibfd = ibfd->link_next, bfd_idx++)
2369
    {
2370
      extern const bfd_target bfd_elf32_spu_vec;
2371
      Elf_Internal_Shdr *symtab_hdr;
2372
      asection *sec;
2373
      size_t symcount;
2374
      Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2375
      asection **psecs, **p;
2376
 
2377
      if (ibfd->xvec != &bfd_elf32_spu_vec)
2378
        continue;
2379
 
2380
      /* Read all the symbols.  */
2381
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2382
      symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2383
      if (symcount == 0)
2384
        {
2385
          if (!gaps)
2386
            for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2387
              if (interesting_section (sec, info->output_bfd))
2388
                {
2389
                  gaps = TRUE;
2390
                  break;
2391
                }
2392
          continue;
2393
        }
2394
 
2395
      if (symtab_hdr->contents != NULL)
2396
        {
2397
          /* Don't use cached symbols since the generic ELF linker
2398
             code only reads local symbols, and we need globals too.  */
2399
          free (symtab_hdr->contents);
2400
          symtab_hdr->contents = NULL;
2401
        }
2402
      syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2403
                                   NULL, NULL, NULL);
2404
      symtab_hdr->contents = (void *) syms;
2405
      if (syms == NULL)
2406
        return FALSE;
2407
 
2408
      /* Select defined function symbols that are going to be output.  */
2409
      psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2410
      if (psyms == NULL)
2411
        return FALSE;
2412
      psym_arr[bfd_idx] = psyms;
2413
      psecs = bfd_malloc (symcount * sizeof (*psecs));
2414
      if (psecs == NULL)
2415
        return FALSE;
2416
      sec_arr[bfd_idx] = psecs;
2417
      for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2418
        if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2419
            || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2420
          {
2421
            asection *s;
2422
 
2423
            *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2424
            if (s != NULL && interesting_section (s, info->output_bfd))
2425
              *psy++ = sy;
2426
          }
2427
      symcount = psy - psyms;
2428
      *psy = NULL;
2429
 
2430
      /* Sort them by section and offset within section.  */
2431
      sort_syms_syms = syms;
2432
      sort_syms_psecs = psecs;
2433
      qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2434
 
2435
      /* Now inspect the function symbols.  */
2436
      for (psy = psyms; psy < psyms + symcount; )
2437
        {
2438
          asection *s = psecs[*psy - syms];
2439
          Elf_Internal_Sym **psy2;
2440
 
2441
          for (psy2 = psy; ++psy2 < psyms + symcount; )
2442
            if (psecs[*psy2 - syms] != s)
2443
              break;
2444
 
2445
          if (!alloc_stack_info (s, psy2 - psy))
2446
            return FALSE;
2447
          psy = psy2;
2448
        }
2449
 
2450
      /* First install info about properly typed and sized functions.
2451
         In an ideal world this will cover all code sections, except
2452
         when partitioning functions into hot and cold sections,
2453
         and the horrible pasted together .init and .fini functions.  */
2454
      for (psy = psyms; psy < psyms + symcount; ++psy)
2455
        {
2456
          sy = *psy;
2457
          if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2458
            {
2459
              asection *s = psecs[sy - syms];
2460
              if (!maybe_insert_function (s, sy, FALSE, TRUE))
2461
                return FALSE;
2462
            }
2463
        }
2464
 
2465
      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2466
        if (interesting_section (sec, info->output_bfd))
2467
          gaps |= check_function_ranges (sec, info);
2468
    }
2469
 
2470
  if (gaps)
2471
    {
2472
      /* See if we can discover more function symbols by looking at
2473
         relocations.  */
2474
      for (ibfd = info->input_bfds, bfd_idx = 0;
2475
           ibfd != NULL;
2476
           ibfd = ibfd->link_next, bfd_idx++)
2477
        {
2478
          asection *sec;
2479
 
2480
          if (psym_arr[bfd_idx] == NULL)
2481
            continue;
2482
 
2483
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2484
            if (!mark_functions_via_relocs (sec, info, FALSE))
2485
              return FALSE;
2486
        }
2487
 
2488
      for (ibfd = info->input_bfds, bfd_idx = 0;
2489
           ibfd != NULL;
2490
           ibfd = ibfd->link_next, bfd_idx++)
2491
        {
2492
          Elf_Internal_Shdr *symtab_hdr;
2493
          asection *sec;
2494
          Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2495
          asection **psecs;
2496
 
2497
          if ((psyms = psym_arr[bfd_idx]) == NULL)
2498
            continue;
2499
 
2500
          psecs = sec_arr[bfd_idx];
2501
 
2502
          symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2503
          syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2504
 
2505
          gaps = FALSE;
2506
          for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2507
            if (interesting_section (sec, info->output_bfd))
2508
              gaps |= check_function_ranges (sec, info);
2509
          if (!gaps)
2510
            continue;
2511
 
2512
          /* Finally, install all globals.  */
2513
          for (psy = psyms; (sy = *psy) != NULL; ++psy)
2514
            {
2515
              asection *s;
2516
 
2517
              s = psecs[sy - syms];
2518
 
2519
              /* Global syms might be improperly typed functions.  */
2520
              if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2521
                  && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2522
                {
2523
                  if (!maybe_insert_function (s, sy, FALSE, FALSE))
2524
                    return FALSE;
2525
                }
2526
            }
2527
        }
2528
 
2529
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2530
        {
2531
          extern const bfd_target bfd_elf32_spu_vec;
2532
          asection *sec;
2533
 
2534
          if (ibfd->xvec != &bfd_elf32_spu_vec)
2535
            continue;
2536
 
2537
          /* Some of the symbols we've installed as marking the
2538
             beginning of functions may have a size of zero.  Extend
2539
             the range of such functions to the beginning of the
2540
             next symbol of interest.  */
2541
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2542
            if (interesting_section (sec, info->output_bfd))
2543
              {
2544
                struct _spu_elf_section_data *sec_data;
2545
                struct spu_elf_stack_info *sinfo;
2546
 
2547
                sec_data = spu_elf_section_data (sec);
2548
                sinfo = sec_data->u.i.stack_info;
2549
                if (sinfo != NULL)
2550
                  {
2551
                    int fun_idx;
2552
                    bfd_vma hi = sec->size;
2553
 
2554
                    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2555
                      {
2556
                        sinfo->fun[fun_idx].hi = hi;
2557
                        hi = sinfo->fun[fun_idx].lo;
2558
                      }
2559
                  }
2560
                /* No symbols in this section.  Must be .init or .fini
2561
                   or something similar.  */
2562
                else if (!pasted_function (sec, info))
2563
                  return FALSE;
2564
              }
2565
        }
2566
    }
2567
 
2568
  for (ibfd = info->input_bfds, bfd_idx = 0;
2569
       ibfd != NULL;
2570
       ibfd = ibfd->link_next, bfd_idx++)
2571
    {
2572
      if (psym_arr[bfd_idx] == NULL)
2573
        continue;
2574
 
2575
      free (psym_arr[bfd_idx]);
2576
      free (sec_arr[bfd_idx]);
2577
    }
2578
 
2579
  free (psym_arr);
2580
  free (sec_arr);
2581
 
2582
  return TRUE;
2583
}
2584
 
2585
/* Iterate over all function_info we have collected, calling DOIT on
2586
   each node if ROOT_ONLY is false.  Only call DOIT on root nodes
2587
   if ROOT_ONLY.  */
2588
 
2589
static bfd_boolean
2590
for_each_node (bfd_boolean (*doit) (struct function_info *,
2591
                                    struct bfd_link_info *,
2592
                                    void *),
2593
               struct bfd_link_info *info,
2594
               void *param,
2595
               int root_only)
2596
{
2597
  bfd *ibfd;
2598
 
2599
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2600
    {
2601
      extern const bfd_target bfd_elf32_spu_vec;
2602
      asection *sec;
2603
 
2604
      if (ibfd->xvec != &bfd_elf32_spu_vec)
2605
        continue;
2606
 
2607
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2608
        {
2609
          struct _spu_elf_section_data *sec_data;
2610
          struct spu_elf_stack_info *sinfo;
2611
 
2612
          if ((sec_data = spu_elf_section_data (sec)) != NULL
2613
              && (sinfo = sec_data->u.i.stack_info) != NULL)
2614
            {
2615
              int i;
2616
              for (i = 0; i < sinfo->num_fun; ++i)
2617
                if (!root_only || !sinfo->fun[i].non_root)
2618
                  if (!doit (&sinfo->fun[i], info, param))
2619
                    return FALSE;
2620
            }
2621
        }
2622
    }
2623
  return TRUE;
2624
}
2625
 
2626
/* Transfer call info attached to struct function_info entries for
2627
   all of a given function's sections to the first entry.  */
2628
 
2629
static bfd_boolean
2630
transfer_calls (struct function_info *fun,
2631
                struct bfd_link_info *info ATTRIBUTE_UNUSED,
2632
                void *param ATTRIBUTE_UNUSED)
2633
{
2634
  struct function_info *start = fun->start;
2635
 
2636
  if (start != NULL)
2637
    {
2638
      struct call_info *call, *call_next;
2639
 
2640
      while (start->start != NULL)
2641
        start = start->start;
2642
      for (call = fun->call_list; call != NULL; call = call_next)
2643
        {
2644
          call_next = call->next;
2645
          if (!insert_callee (start, call))
2646
            free (call);
2647
        }
2648
      fun->call_list = NULL;
2649
    }
2650
  return TRUE;
2651
}
2652
 
2653
/* Mark nodes in the call graph that are called by some other node.  */
2654
 
2655
static bfd_boolean
2656
mark_non_root (struct function_info *fun,
2657
               struct bfd_link_info *info ATTRIBUTE_UNUSED,
2658
               void *param ATTRIBUTE_UNUSED)
2659
{
2660
  struct call_info *call;
2661
 
2662
  if (fun->visit1)
2663
    return TRUE;
2664
  fun->visit1 = TRUE;
2665
  for (call = fun->call_list; call; call = call->next)
2666
    {
2667
      call->fun->non_root = TRUE;
2668
      mark_non_root (call->fun, 0, 0);
2669
    }
2670
  return TRUE;
2671
}
2672
 
2673
/* Remove cycles from the call graph.  Set depth of nodes.  */
2674
 
2675
static bfd_boolean
2676
remove_cycles (struct function_info *fun,
2677
               struct bfd_link_info *info,
2678
               void *param)
2679
{
2680
  struct call_info **callp, *call;
2681
  unsigned int depth = *(unsigned int *) param;
2682
  unsigned int max_depth = depth;
2683
 
2684
  fun->depth = depth;
2685
  fun->visit2 = TRUE;
2686
  fun->marking = TRUE;
2687
 
2688
  callp = &fun->call_list;
2689
  while ((call = *callp) != NULL)
2690
    {
2691
      if (!call->fun->visit2)
2692
        {
2693
          call->max_depth = depth + !call->is_pasted;
2694
          if (!remove_cycles (call->fun, info, &call->max_depth))
2695
            return FALSE;
2696
          if (max_depth < call->max_depth)
2697
            max_depth = call->max_depth;
2698
        }
2699
      else if (call->fun->marking)
2700
        {
2701
          if (!spu_hash_table (info)->auto_overlay)
2702
            {
2703
              const char *f1 = func_name (fun);
2704
              const char *f2 = func_name (call->fun);
2705
 
2706
              info->callbacks->info (_("Stack analysis will ignore the call "
2707
                                       "from %s to %s\n"),
2708
                                     f1, f2);
2709
            }
2710
          *callp = call->next;
2711
          free (call);
2712
          continue;
2713
        }
2714
      callp = &call->next;
2715
    }
2716
  fun->marking = FALSE;
2717
  *(unsigned int *) param = max_depth;
2718
  return TRUE;
2719
}
2720
 
2721
/* Populate call_list for each function.  */
2722
 
2723
static bfd_boolean
2724
build_call_tree (struct bfd_link_info *info)
2725
{
2726
  bfd *ibfd;
2727
  unsigned int depth;
2728
 
2729
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2730
    {
2731
      extern const bfd_target bfd_elf32_spu_vec;
2732
      asection *sec;
2733
 
2734
      if (ibfd->xvec != &bfd_elf32_spu_vec)
2735
        continue;
2736
 
2737
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2738
        if (!mark_functions_via_relocs (sec, info, TRUE))
2739
          return FALSE;
2740
    }
2741
 
2742
  /* Transfer call info from hot/cold section part of function
2743
     to main entry.  */
2744
  if (!spu_hash_table (info)->auto_overlay
2745
      && !for_each_node (transfer_calls, info, 0, FALSE))
2746
    return FALSE;
2747
 
2748
  /* Find the call graph root(s).  */
2749
  if (!for_each_node (mark_non_root, info, 0, FALSE))
2750
    return FALSE;
2751
 
2752
  /* Remove cycles from the call graph.  We start from the root node(s)
2753
     so that we break cycles in a reasonable place.  */
2754
  depth = 0;
2755
  return for_each_node (remove_cycles, info, &depth, TRUE);
2756
}
2757
 
2758
/* qsort predicate to sort calls by max_depth then count.  */
2759
 
2760
static int
2761
sort_calls (const void *a, const void *b)
2762
{
2763
  struct call_info *const *c1 = a;
2764
  struct call_info *const *c2 = b;
2765
  int delta;
2766
 
2767
  delta = (*c2)->max_depth - (*c1)->max_depth;
2768
  if (delta != 0)
2769
    return delta;
2770
 
2771
  delta = (*c2)->count - (*c1)->count;
2772
  if (delta != 0)
2773
    return delta;
2774
 
2775
  return c1 - c2;
2776
}
2777
 
2778
struct _mos_param {
2779
  unsigned int max_overlay_size;
2780
};
2781
 
2782
/* Set linker_mark and gc_mark on any sections that we will put in
2783
   overlays.  These flags are used by the generic ELF linker, but we
2784
   won't be continuing on to bfd_elf_final_link so it is OK to use
2785
   them.  linker_mark is clear before we get here.  Set segment_mark
2786
   on sections that are part of a pasted function (excluding the last
2787
   section).
2788
 
2789
   Set up function rodata section if --overlay-rodata.  We don't
2790
   currently include merged string constant rodata sections since
2791
 
2792
   Sort the call graph so that the deepest nodes will be visited
2793
   first.  */
2794
 
2795
static bfd_boolean
2796
mark_overlay_section (struct function_info *fun,
2797
                      struct bfd_link_info *info,
2798
                      void *param)
2799
{
2800
  struct call_info *call;
2801
  unsigned int count;
2802
  struct _mos_param *mos_param = param;
2803
 
2804
  if (fun->visit4)
2805
    return TRUE;
2806
 
2807
  fun->visit4 = TRUE;
2808
  if (!fun->sec->linker_mark)
2809
    {
2810
      unsigned int size;
2811
 
2812
      fun->sec->linker_mark = 1;
2813
      fun->sec->gc_mark = 1;
2814
      fun->sec->segment_mark = 0;
2815
      /* Ensure SEC_CODE is set on this text section (it ought to
2816
         be!), and SEC_CODE is clear on rodata sections.  We use
2817
         this flag to differentiate the two overlay section types.  */
2818
      fun->sec->flags |= SEC_CODE;
2819
 
2820
      if (spu_hash_table (info)->auto_overlay & OVERLAY_RODATA)
2821
        {
2822
          char *name = NULL;
2823
 
2824
          /* Find the rodata section corresponding to this function's
2825
             text section.  */
2826
          if (strcmp (fun->sec->name, ".text") == 0)
2827
            {
2828
              name = bfd_malloc (sizeof (".rodata"));
2829
              if (name == NULL)
2830
                return FALSE;
2831
              memcpy (name, ".rodata", sizeof (".rodata"));
2832
            }
2833
          else if (strncmp (fun->sec->name, ".text.", 6) == 0)
2834
            {
2835
              size_t len = strlen (fun->sec->name);
2836
              name = bfd_malloc (len + 3);
2837
              if (name == NULL)
2838
                return FALSE;
2839
              memcpy (name, ".rodata", sizeof (".rodata"));
2840
              memcpy (name + 7, fun->sec->name + 5, len - 4);
2841
            }
2842
          else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
2843
            {
2844
              size_t len = strlen (fun->sec->name) + 1;
2845
              name = bfd_malloc (len);
2846
              if (name == NULL)
2847
                return FALSE;
2848
              memcpy (name, fun->sec->name, len);
2849
              name[14] = 'r';
2850
            }
2851
 
2852
          if (name != NULL)
2853
            {
2854
              asection *rodata = NULL;
2855
              asection *group_sec = elf_section_data (fun->sec)->next_in_group;
2856
              if (group_sec == NULL)
2857
                rodata = bfd_get_section_by_name (fun->sec->owner, name);
2858
              else
2859
                while (group_sec != NULL && group_sec != fun->sec)
2860
                  {
2861
                    if (strcmp (group_sec->name, name) == 0)
2862
                      {
2863
                        rodata = group_sec;
2864
                        break;
2865
                      }
2866
                    group_sec = elf_section_data (group_sec)->next_in_group;
2867
                  }
2868
              fun->rodata = rodata;
2869
              if (fun->rodata)
2870
                {
2871
                  fun->rodata->linker_mark = 1;
2872
                  fun->rodata->gc_mark = 1;
2873
                  fun->rodata->flags &= ~SEC_CODE;
2874
                }
2875
              free (name);
2876
            }
2877
        }
2878
      size = fun->sec->size;
2879
      if (fun->rodata)
2880
        size += fun->rodata->size;
2881
      if (mos_param->max_overlay_size < size)
2882
        mos_param->max_overlay_size = size;
2883
    }
2884
 
2885
  for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2886
    count += 1;
2887
 
2888
  if (count > 1)
2889
    {
2890
      struct call_info **calls = bfd_malloc (count * sizeof (*calls));
2891
      if (calls == NULL)
2892
        return FALSE;
2893
 
2894
      for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2895
        calls[count++] = call;
2896
 
2897
      qsort (calls, count, sizeof (*calls), sort_calls);
2898
 
2899
      fun->call_list = NULL;
2900
      while (count != 0)
2901
        {
2902
          --count;
2903
          calls[count]->next = fun->call_list;
2904
          fun->call_list = calls[count];
2905
        }
2906
      free (calls);
2907
    }
2908
 
2909
  for (call = fun->call_list; call != NULL; call = call->next)
2910
    {
2911
      if (call->is_pasted)
2912
        {
2913
          /* There can only be one is_pasted call per function_info.  */
2914
          BFD_ASSERT (!fun->sec->segment_mark);
2915
          fun->sec->segment_mark = 1;
2916
        }
2917
      if (!mark_overlay_section (call->fun, info, param))
2918
        return FALSE;
2919
    }
2920
 
2921
  /* Don't put entry code into an overlay.  The overlay manager needs
2922
     a stack!  */
2923
  if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
2924
      == info->output_bfd->start_address)
2925
    {
2926
      fun->sec->linker_mark = 0;
2927
      if (fun->rodata != NULL)
2928
        fun->rodata->linker_mark = 0;
2929
    }
2930
  return TRUE;
2931
}
2932
 
2933
/* If non-zero then unmark functions called from those within sections
2934
   that we need to unmark.  Unfortunately this isn't reliable since the
2935
   call graph cannot know the destination of function pointer calls.  */
2936
#define RECURSE_UNMARK 0
2937
 
2938
struct _uos_param {
2939
  asection *exclude_input_section;
2940
  asection *exclude_output_section;
2941
  unsigned long clearing;
2942
};
2943
 
2944
/* Undo some of mark_overlay_section's work.  */
2945
 
2946
static bfd_boolean
2947
unmark_overlay_section (struct function_info *fun,
2948
                        struct bfd_link_info *info,
2949
                        void *param)
2950
{
2951
  struct call_info *call;
2952
  struct _uos_param *uos_param = param;
2953
  unsigned int excluded = 0;
2954
 
2955
  if (fun->visit5)
2956
    return TRUE;
2957
 
2958
  fun->visit5 = TRUE;
2959
 
2960
  excluded = 0;
2961
  if (fun->sec == uos_param->exclude_input_section
2962
      || fun->sec->output_section == uos_param->exclude_output_section)
2963
    excluded = 1;
2964
 
2965
  if (RECURSE_UNMARK)
2966
    uos_param->clearing += excluded;
2967
 
2968
  if (RECURSE_UNMARK ? uos_param->clearing : excluded)
2969
    {
2970
      fun->sec->linker_mark = 0;
2971
      if (fun->rodata)
2972
        fun->rodata->linker_mark = 0;
2973
    }
2974
 
2975
  for (call = fun->call_list; call != NULL; call = call->next)
2976
    if (!unmark_overlay_section (call->fun, info, param))
2977
      return FALSE;
2978
 
2979
  if (RECURSE_UNMARK)
2980
    uos_param->clearing -= excluded;
2981
  return TRUE;
2982
}
2983
 
2984
struct _cl_param {
2985
  unsigned int lib_size;
2986
  asection **lib_sections;
2987
};
2988
 
2989
/* Add sections we have marked as belonging to overlays to an array
2990
   for consideration as non-overlay sections.  The array consist of
2991
   pairs of sections, (text,rodata), for functions in the call graph.  */
2992
 
2993
static bfd_boolean
2994
collect_lib_sections (struct function_info *fun,
2995
                      struct bfd_link_info *info,
2996
                      void *param)
2997
{
2998
  struct _cl_param *lib_param = param;
2999
  struct call_info *call;
3000
  unsigned int size;
3001
 
3002
  if (fun->visit6)
3003
    return TRUE;
3004
 
3005
  fun->visit6 = TRUE;
3006
  if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3007
    return TRUE;
3008
 
3009
  size = fun->sec->size;
3010
  if (fun->rodata)
3011
    size += fun->rodata->size;
3012
  if (size > lib_param->lib_size)
3013
    return TRUE;
3014
 
3015
  *lib_param->lib_sections++ = fun->sec;
3016
  fun->sec->gc_mark = 0;
3017
  if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3018
    {
3019
      *lib_param->lib_sections++ = fun->rodata;
3020
      fun->rodata->gc_mark = 0;
3021
    }
3022
  else
3023
    *lib_param->lib_sections++ = NULL;
3024
 
3025
  for (call = fun->call_list; call != NULL; call = call->next)
3026
    collect_lib_sections (call->fun, info, param);
3027
 
3028
  return TRUE;
3029
}
3030
 
3031
/* qsort predicate to sort sections by call count.  */
3032
 
3033
static int
3034
sort_lib (const void *a, const void *b)
3035
{
3036
  asection *const *s1 = a;
3037
  asection *const *s2 = b;
3038
  struct _spu_elf_section_data *sec_data;
3039
  struct spu_elf_stack_info *sinfo;
3040
  int delta;
3041
 
3042
  delta = 0;
3043
  if ((sec_data = spu_elf_section_data (*s1)) != NULL
3044
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3045
    {
3046
      int i;
3047
      for (i = 0; i < sinfo->num_fun; ++i)
3048
        delta -= sinfo->fun[i].call_count;
3049
    }
3050
 
3051
  if ((sec_data = spu_elf_section_data (*s2)) != NULL
3052
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3053
    {
3054
      int i;
3055
      for (i = 0; i < sinfo->num_fun; ++i)
3056
        delta += sinfo->fun[i].call_count;
3057
    }
3058
 
3059
  if (delta != 0)
3060
    return delta;
3061
 
3062
  return s1 - s2;
3063
}
3064
 
3065
/* Remove some sections from those marked to be in overlays.  Choose
3066
   those that are called from many places, likely library functions.  */
3067
 
3068
static unsigned int
3069
auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3070
{
3071
  bfd *ibfd;
3072
  asection **lib_sections;
3073
  unsigned int i, lib_count;
3074
  struct _cl_param collect_lib_param;
3075
  struct function_info dummy_caller;
3076
 
3077
  memset (&dummy_caller, 0, sizeof (dummy_caller));
3078
  lib_count = 0;
3079
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3080
    {
3081
      extern const bfd_target bfd_elf32_spu_vec;
3082
      asection *sec;
3083
 
3084
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3085
        continue;
3086
 
3087
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3088
        if (sec->linker_mark
3089
            && sec->size < lib_size
3090
            && (sec->flags & SEC_CODE) != 0)
3091
          lib_count += 1;
3092
    }
3093
  lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3094
  if (lib_sections == NULL)
3095
    return (unsigned int) -1;
3096
  collect_lib_param.lib_size = lib_size;
3097
  collect_lib_param.lib_sections = lib_sections;
3098
  if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3099
                      TRUE))
3100
    return (unsigned int) -1;
3101
  lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3102
 
3103
  /* Sort sections so that those with the most calls are first.  */
3104
  if (lib_count > 1)
3105
    qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3106
 
3107
  for (i = 0; i < lib_count; i++)
3108
    {
3109
      unsigned int tmp, stub_size;
3110
      asection *sec;
3111
      struct _spu_elf_section_data *sec_data;
3112
      struct spu_elf_stack_info *sinfo;
3113
 
3114
      sec = lib_sections[2 * i];
3115
      /* If this section is OK, its size must be less than lib_size.  */
3116
      tmp = sec->size;
3117
      /* If it has a rodata section, then add that too.  */
3118
      if (lib_sections[2 * i + 1])
3119
        tmp += lib_sections[2 * i + 1]->size;
3120
      /* Add any new overlay call stubs needed by the section.  */
3121
      stub_size = 0;
3122
      if (tmp < lib_size
3123
          && (sec_data = spu_elf_section_data (sec)) != NULL
3124
          && (sinfo = sec_data->u.i.stack_info) != NULL)
3125
        {
3126
          int k;
3127
          struct call_info *call;
3128
 
3129
          for (k = 0; k < sinfo->num_fun; ++k)
3130
            for (call = sinfo->fun[k].call_list; call; call = call->next)
3131
              if (call->fun->sec->linker_mark)
3132
                {
3133
                  struct call_info *p;
3134
                  for (p = dummy_caller.call_list; p; p = p->next)
3135
                    if (p->fun == call->fun)
3136
                      break;
3137
                  if (!p)
3138
                    stub_size += OVL_STUB_SIZE;
3139
                }
3140
        }
3141
      if (tmp + stub_size < lib_size)
3142
        {
3143
          struct call_info **pp, *p;
3144
 
3145
          /* This section fits.  Mark it as non-overlay.  */
3146
          lib_sections[2 * i]->linker_mark = 0;
3147
          if (lib_sections[2 * i + 1])
3148
            lib_sections[2 * i + 1]->linker_mark = 0;
3149
          lib_size -= tmp + stub_size;
3150
          /* Call stubs to the section we just added are no longer
3151
             needed.  */
3152
          pp = &dummy_caller.call_list;
3153
          while ((p = *pp) != NULL)
3154
            if (!p->fun->sec->linker_mark)
3155
              {
3156
                lib_size += OVL_STUB_SIZE;
3157
                *pp = p->next;
3158
                free (p);
3159
              }
3160
            else
3161
              pp = &p->next;
3162
          /* Add new call stubs to dummy_caller.  */
3163
          if ((sec_data = spu_elf_section_data (sec)) != NULL
3164
              && (sinfo = sec_data->u.i.stack_info) != NULL)
3165
            {
3166
              int k;
3167
              struct call_info *call;
3168
 
3169
              for (k = 0; k < sinfo->num_fun; ++k)
3170
                for (call = sinfo->fun[k].call_list;
3171
                     call;
3172
                     call = call->next)
3173
                  if (call->fun->sec->linker_mark)
3174
                    {
3175
                      struct call_info *callee;
3176
                      callee = bfd_malloc (sizeof (*callee));
3177
                      if (callee == NULL)
3178
                        return (unsigned int) -1;
3179
                      *callee = *call;
3180
                      if (!insert_callee (&dummy_caller, callee))
3181
                        free (callee);
3182
                    }
3183
            }
3184
        }
3185
    }
3186
  while (dummy_caller.call_list != NULL)
3187
    {
3188
      struct call_info *call = dummy_caller.call_list;
3189
      dummy_caller.call_list = call->next;
3190
      free (call);
3191
    }
3192
  for (i = 0; i < 2 * lib_count; i++)
3193
    if (lib_sections[i])
3194
      lib_sections[i]->gc_mark = 1;
3195
  free (lib_sections);
3196
  return lib_size;
3197
}
3198
 
3199
/* Build an array of overlay sections.  The deepest node's section is
3200
   added first, then its parent node's section, then everything called
3201
   from the parent section.  The idea being to group sections to
3202
   minimise calls between different overlays.  */
3203
 
3204
static bfd_boolean
3205
collect_overlays (struct function_info *fun,
3206
                  struct bfd_link_info *info,
3207
                  void *param)
3208
{
3209
  struct call_info *call;
3210
  bfd_boolean added_fun;
3211
  asection ***ovly_sections = param;
3212
 
3213
  if (fun->visit7)
3214
    return TRUE;
3215
 
3216
  fun->visit7 = TRUE;
3217
  for (call = fun->call_list; call != NULL; call = call->next)
3218
    if (!call->is_pasted)
3219
      {
3220
        if (!collect_overlays (call->fun, info, ovly_sections))
3221
          return FALSE;
3222
        break;
3223
      }
3224
 
3225
  added_fun = FALSE;
3226
  if (fun->sec->linker_mark && fun->sec->gc_mark)
3227
    {
3228
      fun->sec->gc_mark = 0;
3229
      *(*ovly_sections)++ = fun->sec;
3230
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3231
        {
3232
          fun->rodata->gc_mark = 0;
3233
          *(*ovly_sections)++ = fun->rodata;
3234
        }
3235
      else
3236
        *(*ovly_sections)++ = NULL;
3237
      added_fun = TRUE;
3238
 
3239
      /* Pasted sections must stay with the first section.  We don't
3240
         put pasted sections in the array, just the first section.
3241
         Mark subsequent sections as already considered.  */
3242
      if (fun->sec->segment_mark)
3243
        {
3244
          struct function_info *call_fun = fun;
3245
          do
3246
            {
3247
              for (call = call_fun->call_list; call != NULL; call = call->next)
3248
                if (call->is_pasted)
3249
                  {
3250
                    call_fun = call->fun;
3251
                    call_fun->sec->gc_mark = 0;
3252
                    if (call_fun->rodata)
3253
                      call_fun->rodata->gc_mark = 0;
3254
                    break;
3255
                  }
3256
              if (call == NULL)
3257
                abort ();
3258
            }
3259
          while (call_fun->sec->segment_mark);
3260
        }
3261
    }
3262
 
3263
  for (call = fun->call_list; call != NULL; call = call->next)
3264
    if (!collect_overlays (call->fun, info, ovly_sections))
3265
      return FALSE;
3266
 
3267
  if (added_fun)
3268
    {
3269
      struct _spu_elf_section_data *sec_data;
3270
      struct spu_elf_stack_info *sinfo;
3271
 
3272
      if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3273
          && (sinfo = sec_data->u.i.stack_info) != NULL)
3274
        {
3275
          int i;
3276
          for (i = 0; i < sinfo->num_fun; ++i)
3277
            if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3278
              return FALSE;
3279
        }
3280
    }
3281
 
3282
  return TRUE;
3283
}
3284
 
3285
struct _sum_stack_param {
3286
  size_t cum_stack;
3287
  size_t overall_stack;
3288
  bfd_boolean emit_stack_syms;
3289
};
3290
 
3291
/* Descend the call graph for FUN, accumulating total stack required.  */
3292
 
3293
static bfd_boolean
3294
sum_stack (struct function_info *fun,
3295
           struct bfd_link_info *info,
3296
           void *param)
3297
{
3298
  struct call_info *call;
3299
  struct function_info *max;
3300
  size_t stack, cum_stack;
3301
  const char *f1;
3302
  bfd_boolean has_call;
3303
  struct _sum_stack_param *sum_stack_param = param;
3304
  struct spu_link_hash_table *htab;
3305
 
3306
  cum_stack = fun->stack;
3307
  sum_stack_param->cum_stack = cum_stack;
3308
  if (fun->visit3)
3309
    return TRUE;
3310
 
3311
  has_call = FALSE;
3312
  max = NULL;
3313
  for (call = fun->call_list; call; call = call->next)
3314
    {
3315
      if (!call->is_pasted)
3316
        has_call = TRUE;
3317
      if (!sum_stack (call->fun, info, sum_stack_param))
3318
        return FALSE;
3319
      stack = sum_stack_param->cum_stack;
3320
      /* Include caller stack for normal calls, don't do so for
3321
         tail calls.  fun->stack here is local stack usage for
3322
         this function.  */
3323
      if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3324
        stack += fun->stack;
3325
      if (cum_stack < stack)
3326
        {
3327
          cum_stack = stack;
3328
          max = call->fun;
3329
        }
3330
    }
3331
 
3332
  sum_stack_param->cum_stack = cum_stack;
3333
  stack = fun->stack;
3334
  /* Now fun->stack holds cumulative stack.  */
3335
  fun->stack = cum_stack;
3336
  fun->visit3 = TRUE;
3337
 
3338
  if (!fun->non_root
3339
      && sum_stack_param->overall_stack < cum_stack)
3340
    sum_stack_param->overall_stack = cum_stack;
3341
 
3342
  htab = spu_hash_table (info);
3343
  if (htab->auto_overlay)
3344
    return TRUE;
3345
 
3346
  f1 = func_name (fun);
3347
  if (!fun->non_root)
3348
    info->callbacks->info (_("  %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3349
  info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3350
                          f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3351
 
3352
  if (has_call)
3353
    {
3354
      info->callbacks->minfo (_("  calls:\n"));
3355
      for (call = fun->call_list; call; call = call->next)
3356
        if (!call->is_pasted)
3357
          {
3358
            const char *f2 = func_name (call->fun);
3359
            const char *ann1 = call->fun == max ? "*" : " ";
3360
            const char *ann2 = call->is_tail ? "t" : " ";
3361
 
3362
            info->callbacks->minfo (_("   %s%s %s\n"), ann1, ann2, f2);
3363
          }
3364
    }
3365
 
3366
  if (sum_stack_param->emit_stack_syms)
3367
    {
3368
      char *name = bfd_malloc (18 + strlen (f1));
3369
      struct elf_link_hash_entry *h;
3370
 
3371
      if (name == NULL)
3372
        return FALSE;
3373
 
3374
      if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
3375
        sprintf (name, "__stack_%s", f1);
3376
      else
3377
        sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
3378
 
3379
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
3380
      free (name);
3381
      if (h != NULL
3382
          && (h->root.type == bfd_link_hash_new
3383
              || h->root.type == bfd_link_hash_undefined
3384
              || h->root.type == bfd_link_hash_undefweak))
3385
        {
3386
          h->root.type = bfd_link_hash_defined;
3387
          h->root.u.def.section = bfd_abs_section_ptr;
3388
          h->root.u.def.value = cum_stack;
3389
          h->size = 0;
3390
          h->type = 0;
3391
          h->ref_regular = 1;
3392
          h->def_regular = 1;
3393
          h->ref_regular_nonweak = 1;
3394
          h->forced_local = 1;
3395
          h->non_elf = 0;
3396
        }
3397
    }
3398
 
3399
  return TRUE;
3400
}
3401
 
3402
/* SEC is part of a pasted function.  Return the call_info for the
3403
   next section of this function.  */
3404
 
3405
static struct call_info *
3406
find_pasted_call (asection *sec)
3407
{
3408
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
3409
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
3410
  struct call_info *call;
3411
  int k;
3412
 
3413
  for (k = 0; k < sinfo->num_fun; ++k)
3414
    for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
3415
      if (call->is_pasted)
3416
        return call;
3417
  abort ();
3418
  return 0;
3419
}
3420
 
3421
/* qsort predicate to sort bfds by file name.  */
3422
 
3423
static int
3424
sort_bfds (const void *a, const void *b)
3425
{
3426
  bfd *const *abfd1 = a;
3427
  bfd *const *abfd2 = b;
3428
 
3429
  return strcmp ((*abfd1)->filename, (*abfd2)->filename);
3430
}
3431
 
3432
/* Handle --auto-overlay.  */
3433
 
3434
static void spu_elf_auto_overlay (struct bfd_link_info *, void (*) (void))
3435
     ATTRIBUTE_NORETURN;
3436
 
3437
static void
3438
spu_elf_auto_overlay (struct bfd_link_info *info,
3439
                      void (*spu_elf_load_ovl_mgr) (void))
3440
{
3441
  bfd *ibfd;
3442
  bfd **bfd_arr;
3443
  struct elf_segment_map *m;
3444
  unsigned int fixed_size, lo, hi;
3445
  struct spu_link_hash_table *htab;
3446
  unsigned int base, i, count, bfd_count;
3447
  int ovlynum;
3448
  asection **ovly_sections, **ovly_p;
3449
  FILE *script;
3450
  unsigned int total_overlay_size, overlay_size;
3451
  struct elf_link_hash_entry *h;
3452
  struct _mos_param mos_param;
3453
  struct _uos_param uos_param;
3454
  struct function_info dummy_caller;
3455
 
3456
  /* Find the extents of our loadable image.  */
3457
  lo = (unsigned int) -1;
3458
  hi = 0;
3459
  for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
3460
    if (m->p_type == PT_LOAD)
3461
      for (i = 0; i < m->count; i++)
3462
        if (m->sections[i]->size != 0)
3463
          {
3464
            if (m->sections[i]->vma < lo)
3465
              lo = m->sections[i]->vma;
3466
            if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
3467
              hi = m->sections[i]->vma + m->sections[i]->size - 1;
3468
          }
3469
  fixed_size = hi + 1 - lo;
3470
 
3471
  if (!discover_functions (info))
3472
    goto err_exit;
3473
 
3474
  if (!build_call_tree (info))
3475
    goto err_exit;
3476
 
3477
  uos_param.exclude_input_section = 0;
3478
  uos_param.exclude_output_section
3479
    = bfd_get_section_by_name (info->output_bfd, ".interrupt");
3480
 
3481
  htab = spu_hash_table (info);
3482
  h = elf_link_hash_lookup (&htab->elf, "__ovly_load",
3483
                            FALSE, FALSE, FALSE);
3484
  if (h != NULL
3485
      && (h->root.type == bfd_link_hash_defined
3486
          || h->root.type == bfd_link_hash_defweak)
3487
      && h->def_regular)
3488
    {
3489
      /* We have a user supplied overlay manager.  */
3490
      uos_param.exclude_input_section = h->root.u.def.section;
3491
    }
3492
  else
3493
    {
3494
      /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3495
         builtin version to .text, and will adjust .text size.  */
3496
      asection *text = bfd_get_section_by_name (info->output_bfd, ".text");
3497
      if (text != NULL)
3498
        fixed_size -= text->size;
3499
      spu_elf_load_ovl_mgr ();
3500
      text = bfd_get_section_by_name (info->output_bfd, ".text");
3501
      if (text != NULL)
3502
        fixed_size += text->size;
3503
    }
3504
 
3505
  /* Mark overlay sections, and find max overlay section size.  */
3506
  mos_param.max_overlay_size = 0;
3507
  if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
3508
    goto err_exit;
3509
 
3510
  /* We can't put the overlay manager or interrupt routines in
3511
     overlays.  */
3512
  uos_param.clearing = 0;
3513
  if ((uos_param.exclude_input_section
3514
       || uos_param.exclude_output_section)
3515
      && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
3516
    goto err_exit;
3517
 
3518
  bfd_count = 0;
3519
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3520
    ++bfd_count;
3521
  bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
3522
  if (bfd_arr == NULL)
3523
    goto err_exit;
3524
 
3525
  /* Count overlay sections, and subtract their sizes from "fixed_size".  */
3526
  count = 0;
3527
  bfd_count = 0;
3528
  total_overlay_size = 0;
3529
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3530
    {
3531
      extern const bfd_target bfd_elf32_spu_vec;
3532
      asection *sec;
3533
      unsigned int old_count;
3534
 
3535
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3536
        continue;
3537
 
3538
      old_count = count;
3539
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3540
        if (sec->linker_mark)
3541
          {
3542
            if ((sec->flags & SEC_CODE) != 0)
3543
              count += 1;
3544
            fixed_size -= sec->size;
3545
            total_overlay_size += sec->size;
3546
          }
3547
      if (count != old_count)
3548
        bfd_arr[bfd_count++] = ibfd;
3549
    }
3550
 
3551
  /* Since the overlay link script selects sections by file name and
3552
     section name, ensure that file names are unique.  */
3553
  if (bfd_count > 1)
3554
    {
3555
      bfd_boolean ok = TRUE;
3556
 
3557
      qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
3558
      for (i = 1; i < bfd_count; ++i)
3559
        if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
3560
          {
3561
            if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
3562
              {
3563
                if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
3564
                  info->callbacks->einfo (_("%s duplicated in %s\n"),
3565
                                          bfd_arr[i]->filename,
3566
                                          bfd_arr[i]->my_archive->filename);
3567
                else
3568
                  info->callbacks->einfo (_("%s duplicated\n"),
3569
                                          bfd_arr[i]->filename);
3570
                ok = FALSE;
3571
              }
3572
          }
3573
      if (!ok)
3574
        {
3575
          info->callbacks->einfo (_("sorry, no support for duplicate "
3576
                                    "object files in auto-overlay script\n"));
3577
          bfd_set_error (bfd_error_bad_value);
3578
          goto err_exit;
3579
        }
3580
    }
3581
  free (bfd_arr);
3582
 
3583
  if (htab->reserved == 0)
3584
    {
3585
      struct _sum_stack_param sum_stack_param;
3586
 
3587
      sum_stack_param.emit_stack_syms = 0;
3588
      sum_stack_param.overall_stack = 0;
3589
      if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3590
        goto err_exit;
3591
      htab->reserved = sum_stack_param.overall_stack + htab->extra_stack_space;
3592
    }
3593
  fixed_size += htab->reserved;
3594
  fixed_size += htab->non_ovly_stub * OVL_STUB_SIZE;
3595
  if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
3596
    {
3597
      /* Guess number of overlays.  Assuming overlay buffer is on
3598
         average only half full should be conservative.  */
3599
      ovlynum = total_overlay_size * 2 / (htab->local_store - fixed_size);
3600
      /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
3601
      fixed_size += ovlynum * 16 + 16 + 4 + 16;
3602
    }
3603
 
3604
  if (fixed_size + mos_param.max_overlay_size > htab->local_store)
3605
    info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
3606
                              "size of 0x%v exceeds local store\n"),
3607
                            (bfd_vma) fixed_size,
3608
                            (bfd_vma) mos_param.max_overlay_size);
3609
 
3610
  /* Now see if we should put some functions in the non-overlay area.  */
3611
  else if (fixed_size < htab->overlay_fixed)
3612
    {
3613
      unsigned int max_fixed, lib_size;
3614
 
3615
      max_fixed = htab->local_store - mos_param.max_overlay_size;
3616
      if (max_fixed > htab->overlay_fixed)
3617
        max_fixed = htab->overlay_fixed;
3618
      lib_size = max_fixed - fixed_size;
3619
      lib_size = auto_ovl_lib_functions (info, lib_size);
3620
      if (lib_size == (unsigned int) -1)
3621
        goto err_exit;
3622
      fixed_size = max_fixed - lib_size;
3623
    }
3624
 
3625
  /* Build an array of sections, suitably sorted to place into
3626
     overlays.  */
3627
  ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
3628
  if (ovly_sections == NULL)
3629
    goto err_exit;
3630
  ovly_p = ovly_sections;
3631
  if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
3632
    goto err_exit;
3633
  count = (size_t) (ovly_p - ovly_sections) / 2;
3634
 
3635
  script = htab->spu_elf_open_overlay_script ();
3636
 
3637
  if (fprintf (script, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3638
    goto file_err;
3639
 
3640
  memset (&dummy_caller, 0, sizeof (dummy_caller));
3641
  overlay_size = htab->local_store - fixed_size;
3642
  base = 0;
3643
  ovlynum = 0;
3644
  while (base < count)
3645
    {
3646
      unsigned int size = 0;
3647
      unsigned int j;
3648
 
3649
      for (i = base; i < count; i++)
3650
        {
3651
          asection *sec;
3652
          unsigned int tmp;
3653
          unsigned int stub_size;
3654
          struct call_info *call, *pasty;
3655
          struct _spu_elf_section_data *sec_data;
3656
          struct spu_elf_stack_info *sinfo;
3657
          int k;
3658
 
3659
          /* See whether we can add this section to the current
3660
             overlay without overflowing our overlay buffer.  */
3661
          sec = ovly_sections[2 * i];
3662
          tmp = size + sec->size;
3663
          if (ovly_sections[2 * i + 1])
3664
            tmp += ovly_sections[2 * i + 1]->size;
3665
          if (tmp > overlay_size)
3666
            break;
3667
          if (sec->segment_mark)
3668
            {
3669
              /* Pasted sections must stay together, so add their
3670
                 sizes too.  */
3671
              struct call_info *pasty = find_pasted_call (sec);
3672
              while (pasty != NULL)
3673
                {
3674
                  struct function_info *call_fun = pasty->fun;
3675
                  tmp += call_fun->sec->size;
3676
                  if (call_fun->rodata)
3677
                    tmp += call_fun->rodata->size;
3678
                  for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
3679
                    if (pasty->is_pasted)
3680
                      break;
3681
                }
3682
            }
3683
          if (tmp > overlay_size)
3684
            break;
3685
 
3686
          /* If we add this section, we might need new overlay call
3687
             stubs.  Add any overlay section calls to dummy_call.  */
3688
          pasty = NULL;
3689
          sec_data = spu_elf_section_data (sec);
3690
          sinfo = sec_data->u.i.stack_info;
3691
          for (k = 0; k < sinfo->num_fun; ++k)
3692
            for (call = sinfo->fun[k].call_list; call; call = call->next)
3693
              if (call->is_pasted)
3694
                {
3695
                  BFD_ASSERT (pasty == NULL);
3696
                  pasty = call;
3697
                }
3698
              else if (call->fun->sec->linker_mark)
3699
                {
3700
                  if (!copy_callee (&dummy_caller, call))
3701
                    goto err_exit;
3702
                }
3703
          while (pasty != NULL)
3704
            {
3705
              struct function_info *call_fun = pasty->fun;
3706
              pasty = NULL;
3707
              for (call = call_fun->call_list; call; call = call->next)
3708
                if (call->is_pasted)
3709
                  {
3710
                    BFD_ASSERT (pasty == NULL);
3711
                    pasty = call;
3712
                  }
3713
                else if (!copy_callee (&dummy_caller, call))
3714
                  goto err_exit;
3715
            }
3716
 
3717
          /* Calculate call stub size.  */
3718
          stub_size = 0;
3719
          for (call = dummy_caller.call_list; call; call = call->next)
3720
            {
3721
              unsigned int k;
3722
 
3723
              stub_size += OVL_STUB_SIZE;
3724
              /* If the call is within this overlay, we won't need a
3725
                 stub.  */
3726
              for (k = base; k < i + 1; k++)
3727
                if (call->fun->sec == ovly_sections[2 * k])
3728
                  {
3729
                    stub_size -= OVL_STUB_SIZE;
3730
                    break;
3731
                  }
3732
            }
3733
          if (tmp + stub_size > overlay_size)
3734
            break;
3735
 
3736
          size = tmp;
3737
        }
3738
 
3739
      if (i == base)
3740
        {
3741
          info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
3742
                                  ovly_sections[2 * i]->owner,
3743
                                  ovly_sections[2 * i],
3744
                                  ovly_sections[2 * i + 1] ? " + rodata" : "");
3745
          bfd_set_error (bfd_error_bad_value);
3746
          goto err_exit;
3747
        }
3748
 
3749
      if (fprintf (script, "  .ovly%d {\n", ++ovlynum) <= 0)
3750
        goto file_err;
3751
      for (j = base; j < i; j++)
3752
        {
3753
          asection *sec = ovly_sections[2 * j];
3754
 
3755
          if (fprintf (script, "   %s%c%s (%s)\n",
3756
                       (sec->owner->my_archive != NULL
3757
                        ? sec->owner->my_archive->filename : ""),
3758
                       info->path_separator,
3759
                       sec->owner->filename,
3760
                       sec->name) <= 0)
3761
            goto file_err;
3762
          if (sec->segment_mark)
3763
            {
3764
              struct call_info *call = find_pasted_call (sec);
3765
              while (call != NULL)
3766
                {
3767
                  struct function_info *call_fun = call->fun;
3768
                  sec = call_fun->sec;
3769
                  if (fprintf (script, "   %s%c%s (%s)\n",
3770
                               (sec->owner->my_archive != NULL
3771
                                ? sec->owner->my_archive->filename : ""),
3772
                               info->path_separator,
3773
                               sec->owner->filename,
3774
                               sec->name) <= 0)
3775
                    goto file_err;
3776
                  for (call = call_fun->call_list; call; call = call->next)
3777
                    if (call->is_pasted)
3778
                      break;
3779
                }
3780
            }
3781
        }
3782
 
3783
      for (j = base; j < i; j++)
3784
        {
3785
          asection *sec = ovly_sections[2 * j + 1];
3786
          if (sec != NULL
3787
              && fprintf (script, "   %s%c%s (%s)\n",
3788
                          (sec->owner->my_archive != NULL
3789
                           ? sec->owner->my_archive->filename : ""),
3790
                          info->path_separator,
3791
                          sec->owner->filename,
3792
                          sec->name) <= 0)
3793
            goto file_err;
3794
 
3795
          sec = ovly_sections[2 * j];
3796
          if (sec->segment_mark)
3797
            {
3798
              struct call_info *call = find_pasted_call (sec);
3799
              while (call != NULL)
3800
                {
3801
                  struct function_info *call_fun = call->fun;
3802
                  sec = call_fun->rodata;
3803
                  if (sec != NULL
3804
                      && fprintf (script, "   %s%c%s (%s)\n",
3805
                                  (sec->owner->my_archive != NULL
3806
                                   ? sec->owner->my_archive->filename : ""),
3807
                                  info->path_separator,
3808
                                  sec->owner->filename,
3809
                                  sec->name) <= 0)
3810
                    goto file_err;
3811
                  for (call = call_fun->call_list; call; call = call->next)
3812
                    if (call->is_pasted)
3813
                      break;
3814
                }
3815
            }
3816
        }
3817
 
3818
      if (fprintf (script, "  }\n") <= 0)
3819
        goto file_err;
3820
 
3821
      while (dummy_caller.call_list != NULL)
3822
        {
3823
          struct call_info *call = dummy_caller.call_list;
3824
          dummy_caller.call_list = call->next;
3825
          free (call);
3826
        }
3827
 
3828
      base = i;
3829
    }
3830
  free (ovly_sections);
3831
 
3832
  if (fprintf (script, " }\n}\nINSERT AFTER .text;\n") <= 0)
3833
    goto file_err;
3834
  if (fclose (script) != 0)
3835
    goto file_err;
3836
 
3837
  if (htab->auto_overlay & AUTO_RELINK)
3838
    htab->spu_elf_relink ();
3839
 
3840
  xexit (0);
3841
 
3842
 file_err:
3843
  bfd_set_error (bfd_error_system_call);
3844
 err_exit:
3845
  info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
3846
  xexit (1);
3847
}
3848
 
3849
/* Provide an estimate of total stack required.  */
3850
 
3851
static bfd_boolean
3852
spu_elf_stack_analysis (struct bfd_link_info *info, int emit_stack_syms)
3853
{
3854
  struct _sum_stack_param sum_stack_param;
3855
 
3856
  if (!discover_functions (info))
3857
    return FALSE;
3858
 
3859
  if (!build_call_tree (info))
3860
    return FALSE;
3861
 
3862
  info->callbacks->info (_("Stack size for call graph root nodes.\n"));
3863
  info->callbacks->minfo (_("\nStack size for functions.  "
3864
                            "Annotations: '*' max stack, 't' tail call\n"));
3865
 
3866
  sum_stack_param.emit_stack_syms = emit_stack_syms;
3867
  sum_stack_param.overall_stack = 0;
3868
  if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3869
    return FALSE;
3870
 
3871
  info->callbacks->info (_("Maximum stack required is 0x%v\n"),
3872
                         (bfd_vma) sum_stack_param.overall_stack);
3873
  return TRUE;
3874
}
3875
 
3876
/* Perform a final link.  */
3877
 
3878
static bfd_boolean
3879
spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
3880
{
3881
  struct spu_link_hash_table *htab = spu_hash_table (info);
3882
 
3883
  if (htab->auto_overlay)
3884
    spu_elf_auto_overlay (info, htab->spu_elf_load_ovl_mgr);
3885
 
3886
  if (htab->stack_analysis
3887
      && !spu_elf_stack_analysis (info, htab->emit_stack_syms))
3888
    info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
3889
 
3890
  return bfd_elf_final_link (output_bfd, info);
3891
}
3892
 
3893
/* Called when not normally emitting relocs, ie. !info->relocatable
3894
   and !info->emitrelocations.  Returns a count of special relocs
3895
   that need to be emitted.  */
3896
 
3897
static unsigned int
3898
spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
3899
{
3900
  unsigned int count = 0;
3901
  Elf_Internal_Rela *relend = relocs + sec->reloc_count;
3902
 
3903
  for (; relocs < relend; relocs++)
3904
    {
3905
      int r_type = ELF32_R_TYPE (relocs->r_info);
3906
      if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
3907
        ++count;
3908
    }
3909
 
3910
  return count;
3911
}
3912
 
3913
/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
3914
 
3915
static int
3916
spu_elf_relocate_section (bfd *output_bfd,
3917
                          struct bfd_link_info *info,
3918
                          bfd *input_bfd,
3919
                          asection *input_section,
3920
                          bfd_byte *contents,
3921
                          Elf_Internal_Rela *relocs,
3922
                          Elf_Internal_Sym *local_syms,
3923
                          asection **local_sections)
3924
{
3925
  Elf_Internal_Shdr *symtab_hdr;
3926
  struct elf_link_hash_entry **sym_hashes;
3927
  Elf_Internal_Rela *rel, *relend;
3928
  struct spu_link_hash_table *htab;
3929
  asection *ea = bfd_get_section_by_name (output_bfd, "._ea");
3930
  int ret = TRUE;
3931
  bfd_boolean emit_these_relocs = FALSE;
3932
  bfd_boolean is_ea_sym;
3933
  bfd_boolean stubs;
3934
 
3935
  htab = spu_hash_table (info);
3936
  stubs = (htab->stub_sec != NULL
3937
           && maybe_needs_stubs (input_section, output_bfd));
3938
  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3939
  sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
3940
 
3941
  rel = relocs;
3942
  relend = relocs + input_section->reloc_count;
3943
  for (; rel < relend; rel++)
3944
    {
3945
      int r_type;
3946
      reloc_howto_type *howto;
3947
      unsigned int r_symndx;
3948
      Elf_Internal_Sym *sym;
3949
      asection *sec;
3950
      struct elf_link_hash_entry *h;
3951
      const char *sym_name;
3952
      bfd_vma relocation;
3953
      bfd_vma addend;
3954
      bfd_reloc_status_type r;
3955
      bfd_boolean unresolved_reloc;
3956
      bfd_boolean warned;
3957
      enum _stub_type stub_type;
3958
 
3959
      r_symndx = ELF32_R_SYM (rel->r_info);
3960
      r_type = ELF32_R_TYPE (rel->r_info);
3961
      howto = elf_howto_table + r_type;
3962
      unresolved_reloc = FALSE;
3963
      warned = FALSE;
3964
      h = NULL;
3965
      sym = NULL;
3966
      sec = NULL;
3967
      if (r_symndx < symtab_hdr->sh_info)
3968
        {
3969
          sym = local_syms + r_symndx;
3970
          sec = local_sections[r_symndx];
3971
          sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
3972
          relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
3973
        }
3974
      else
3975
        {
3976
          if (sym_hashes == NULL)
3977
            return FALSE;
3978
 
3979
          h = sym_hashes[r_symndx - symtab_hdr->sh_info];
3980
 
3981
          while (h->root.type == bfd_link_hash_indirect
3982
                 || h->root.type == bfd_link_hash_warning)
3983
            h = (struct elf_link_hash_entry *) h->root.u.i.link;
3984
 
3985
          relocation = 0;
3986
          if (h->root.type == bfd_link_hash_defined
3987
              || h->root.type == bfd_link_hash_defweak)
3988
            {
3989
              sec = h->root.u.def.section;
3990
              if (sec == NULL
3991
                  || sec->output_section == NULL)
3992
                /* Set a flag that will be cleared later if we find a
3993
                   relocation value for this symbol.  output_section
3994
                   is typically NULL for symbols satisfied by a shared
3995
                   library.  */
3996
                unresolved_reloc = TRUE;
3997
              else
3998
                relocation = (h->root.u.def.value
3999
                              + sec->output_section->vma
4000
                              + sec->output_offset);
4001
            }
4002
          else if (h->root.type == bfd_link_hash_undefweak)
4003
            ;
4004
          else if (info->unresolved_syms_in_objects == RM_IGNORE
4005
                   && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4006
            ;
4007
          else if (!info->relocatable
4008
                   && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4009
            {
4010
              bfd_boolean err;
4011
              err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4012
                     || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4013
              if (!info->callbacks->undefined_symbol (info,
4014
                                                      h->root.root.string,
4015
                                                      input_bfd,
4016
                                                      input_section,
4017
                                                      rel->r_offset, err))
4018
                return FALSE;
4019
              warned = TRUE;
4020
            }
4021
          sym_name = h->root.root.string;
4022
        }
4023
 
4024
      if (sec != NULL && elf_discarded_section (sec))
4025
        {
4026
          /* For relocs against symbols from removed linkonce sections,
4027
             or sections discarded by a linker script, we just want the
4028
             section contents zeroed.  Avoid any special processing.  */
4029
          _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
4030
          rel->r_info = 0;
4031
          rel->r_addend = 0;
4032
          continue;
4033
        }
4034
 
4035
      if (info->relocatable)
4036
        continue;
4037
 
4038
      is_ea_sym = (ea != NULL
4039
                   && sec != NULL
4040
                   && sec->output_section == ea);
4041
 
4042
      if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4043
        {
4044
          if (is_ea_sym)
4045
            {
4046
              /* ._ea is a special section that isn't allocated in SPU
4047
                 memory, but rather occupies space in PPU memory as
4048
                 part of an embedded ELF image.  If this reloc is
4049
                 against a symbol defined in ._ea, then transform the
4050
                 reloc into an equivalent one without a symbol
4051
                 relative to the start of the ELF image.  */
4052
              rel->r_addend += (relocation
4053
                                - ea->vma
4054
                                + elf_section_data (ea)->this_hdr.sh_offset);
4055
              rel->r_info = ELF32_R_INFO (0, r_type);
4056
            }
4057
          emit_these_relocs = TRUE;
4058
          continue;
4059
        }
4060
 
4061
      if (is_ea_sym)
4062
        unresolved_reloc = TRUE;
4063
 
4064
      if (unresolved_reloc)
4065
        {
4066
          (*_bfd_error_handler)
4067
            (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
4068
             input_bfd,
4069
             bfd_get_section_name (input_bfd, input_section),
4070
             (long) rel->r_offset,
4071
             howto->name,
4072
             sym_name);
4073
          ret = FALSE;
4074
        }
4075
 
4076
      /* If this symbol is in an overlay area, we may need to relocate
4077
         to the overlay stub.  */
4078
      addend = rel->r_addend;
4079
      if (stubs
4080
          && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4081
                                          contents, info)) != no_stub)
4082
        {
4083
          unsigned int ovl = 0;
4084
          struct got_entry *g, **head;
4085
 
4086
          if (stub_type != nonovl_stub)
4087
            ovl = (spu_elf_section_data (input_section->output_section)
4088
                   ->u.o.ovl_index);
4089
 
4090
          if (h != NULL)
4091
            head = &h->got.glist;
4092
          else
4093
            head = elf_local_got_ents (input_bfd) + r_symndx;
4094
 
4095
          for (g = *head; g != NULL; g = g->next)
4096
            if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4097
              break;
4098
          if (g == NULL)
4099
            abort ();
4100
 
4101
          relocation = g->stub_addr;
4102
          addend = 0;
4103
        }
4104
 
4105
      r = _bfd_final_link_relocate (howto,
4106
                                    input_bfd,
4107
                                    input_section,
4108
                                    contents,
4109
                                    rel->r_offset, relocation, addend);
4110
 
4111
      if (r != bfd_reloc_ok)
4112
        {
4113
          const char *msg = (const char *) 0;
4114
 
4115
          switch (r)
4116
            {
4117
            case bfd_reloc_overflow:
4118
              if (!((*info->callbacks->reloc_overflow)
4119
                    (info, (h ? &h->root : NULL), sym_name, howto->name,
4120
                     (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
4121
                return FALSE;
4122
              break;
4123
 
4124
            case bfd_reloc_undefined:
4125
              if (!((*info->callbacks->undefined_symbol)
4126
                    (info, sym_name, input_bfd, input_section,
4127
                     rel->r_offset, TRUE)))
4128
                return FALSE;
4129
              break;
4130
 
4131
            case bfd_reloc_outofrange:
4132
              msg = _("internal error: out of range error");
4133
              goto common_error;
4134
 
4135
            case bfd_reloc_notsupported:
4136
              msg = _("internal error: unsupported relocation error");
4137
              goto common_error;
4138
 
4139
            case bfd_reloc_dangerous:
4140
              msg = _("internal error: dangerous error");
4141
              goto common_error;
4142
 
4143
            default:
4144
              msg = _("internal error: unknown error");
4145
              /* fall through */
4146
 
4147
            common_error:
4148
              ret = FALSE;
4149
              if (!((*info->callbacks->warning)
4150
                    (info, msg, sym_name, input_bfd, input_section,
4151
                     rel->r_offset)))
4152
                return FALSE;
4153
              break;
4154
            }
4155
        }
4156
    }
4157
 
4158
  if (ret
4159
      && emit_these_relocs
4160
      && !info->emitrelocations)
4161
    {
4162
      Elf_Internal_Rela *wrel;
4163
      Elf_Internal_Shdr *rel_hdr;
4164
 
4165
      wrel = rel = relocs;
4166
      relend = relocs + input_section->reloc_count;
4167
      for (; rel < relend; rel++)
4168
        {
4169
          int r_type;
4170
 
4171
          r_type = ELF32_R_TYPE (rel->r_info);
4172
          if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4173
            *wrel++ = *rel;
4174
        }
4175
      input_section->reloc_count = wrel - relocs;
4176
      /* Backflips for _bfd_elf_link_output_relocs.  */
4177
      rel_hdr = &elf_section_data (input_section)->rel_hdr;
4178
      rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
4179
      ret = 2;
4180
    }
4181
 
4182
  return ret;
4183
}
4184
 
4185
/* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
4186
 
4187
static bfd_boolean
4188
spu_elf_output_symbol_hook (struct bfd_link_info *info,
4189
                            const char *sym_name ATTRIBUTE_UNUSED,
4190
                            Elf_Internal_Sym *sym,
4191
                            asection *sym_sec ATTRIBUTE_UNUSED,
4192
                            struct elf_link_hash_entry *h)
4193
{
4194
  struct spu_link_hash_table *htab = spu_hash_table (info);
4195
 
4196
  if (!info->relocatable
4197
      && htab->stub_sec != NULL
4198
      && h != NULL
4199
      && (h->root.type == bfd_link_hash_defined
4200
          || h->root.type == bfd_link_hash_defweak)
4201
      && h->def_regular
4202
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
4203
    {
4204
      struct got_entry *g;
4205
 
4206
      for (g = h->got.glist; g != NULL; g = g->next)
4207
        if (g->addend == 0 && g->ovl == 0)
4208
          {
4209
            sym->st_shndx = (_bfd_elf_section_from_bfd_section
4210
                             (htab->stub_sec[0]->output_section->owner,
4211
                              htab->stub_sec[0]->output_section));
4212
            sym->st_value = g->stub_addr;
4213
            break;
4214
          }
4215
    }
4216
 
4217
  return TRUE;
4218
}
4219
 
4220
static int spu_plugin = 0;
4221
 
4222
void
4223
spu_elf_plugin (int val)
4224
{
4225
  spu_plugin = val;
4226
}
4227
 
4228
/* Set ELF header e_type for plugins.  */
4229
 
4230
static void
4231
spu_elf_post_process_headers (bfd *abfd,
4232
                              struct bfd_link_info *info ATTRIBUTE_UNUSED)
4233
{
4234
  if (spu_plugin)
4235
    {
4236
      Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
4237
 
4238
      i_ehdrp->e_type = ET_DYN;
4239
    }
4240
}
4241
 
4242
/* We may add an extra PT_LOAD segment for .toe.  We also need extra
4243
   segments for overlays.  */
4244
 
4245
static int
4246
spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
4247
{
4248
  int extra = 0;
4249
  asection *sec;
4250
 
4251
  if (info != NULL)
4252
    {
4253
      struct spu_link_hash_table *htab = spu_hash_table (info);
4254
      extra = htab->num_overlays;
4255
    }
4256
 
4257
  if (extra)
4258
    ++extra;
4259
 
4260
  sec = bfd_get_section_by_name (abfd, ".toe");
4261
  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
4262
    ++extra;
4263
 
4264
  return extra;
4265
}
4266
 
4267
/* Remove .toe section from other PT_LOAD segments and put it in
4268
   a segment of its own.  Put overlays in separate segments too.  */
4269
 
4270
static bfd_boolean
4271
spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
4272
{
4273
  asection *toe, *s;
4274
  struct elf_segment_map *m;
4275
  unsigned int i;
4276
 
4277
  if (info == NULL)
4278
    return TRUE;
4279
 
4280
  toe = bfd_get_section_by_name (abfd, ".toe");
4281
  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
4282
    if (m->p_type == PT_LOAD && m->count > 1)
4283
      for (i = 0; i < m->count; i++)
4284
        if ((s = m->sections[i]) == toe
4285
            || spu_elf_section_data (s)->u.o.ovl_index != 0)
4286
          {
4287
            struct elf_segment_map *m2;
4288
            bfd_vma amt;
4289
 
4290
            if (i + 1 < m->count)
4291
              {
4292
                amt = sizeof (struct elf_segment_map);
4293
                amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
4294
                m2 = bfd_zalloc (abfd, amt);
4295
                if (m2 == NULL)
4296
                  return FALSE;
4297
                m2->count = m->count - (i + 1);
4298
                memcpy (m2->sections, m->sections + i + 1,
4299
                        m2->count * sizeof (m->sections[0]));
4300
                m2->p_type = PT_LOAD;
4301
                m2->next = m->next;
4302
                m->next = m2;
4303
              }
4304
            m->count = 1;
4305
            if (i != 0)
4306
              {
4307
                m->count = i;
4308
                amt = sizeof (struct elf_segment_map);
4309
                m2 = bfd_zalloc (abfd, amt);
4310
                if (m2 == NULL)
4311
                  return FALSE;
4312
                m2->p_type = PT_LOAD;
4313
                m2->count = 1;
4314
                m2->sections[0] = s;
4315
                m2->next = m->next;
4316
                m->next = m2;
4317
              }
4318
            break;
4319
          }
4320
 
4321
  return TRUE;
4322
}
4323
 
4324
/* Tweak the section type of .note.spu_name.  */
4325
 
4326
static bfd_boolean
4327
spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
4328
                       Elf_Internal_Shdr *hdr,
4329
                       asection *sec)
4330
{
4331
  if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
4332
    hdr->sh_type = SHT_NOTE;
4333
  return TRUE;
4334
}
4335
 
4336
/* Tweak phdrs before writing them out.  */
4337
 
4338
static int
4339
spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
4340
{
4341
  const struct elf_backend_data *bed;
4342
  struct elf_obj_tdata *tdata;
4343
  Elf_Internal_Phdr *phdr, *last;
4344
  struct spu_link_hash_table *htab;
4345
  unsigned int count;
4346
  unsigned int i;
4347
 
4348
  if (info == NULL)
4349
    return TRUE;
4350
 
4351
  bed = get_elf_backend_data (abfd);
4352
  tdata = elf_tdata (abfd);
4353
  phdr = tdata->phdr;
4354
  count = tdata->program_header_size / bed->s->sizeof_phdr;
4355
  htab = spu_hash_table (info);
4356
  if (htab->num_overlays != 0)
4357
    {
4358
      struct elf_segment_map *m;
4359
      unsigned int o;
4360
 
4361
      for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
4362
        if (m->count != 0
4363
            && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
4364
          {
4365
            /* Mark this as an overlay header.  */
4366
            phdr[i].p_flags |= PF_OVERLAY;
4367
 
4368
            if (htab->ovtab != NULL && htab->ovtab->size != 0)
4369
              {
4370
                bfd_byte *p = htab->ovtab->contents;
4371
                unsigned int off = o * 16 + 8;
4372
 
4373
                /* Write file_off into _ovly_table.  */
4374
                bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
4375
              }
4376
          }
4377
    }
4378
 
4379
  /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4380
     of 16.  This should always be possible when using the standard
4381
     linker scripts, but don't create overlapping segments if
4382
     someone is playing games with linker scripts.  */
4383
  last = NULL;
4384
  for (i = count; i-- != 0; )
4385
    if (phdr[i].p_type == PT_LOAD)
4386
      {
4387
        unsigned adjust;
4388
 
4389
        adjust = -phdr[i].p_filesz & 15;
4390
        if (adjust != 0
4391
            && last != NULL
4392
            && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
4393
          break;
4394
 
4395
        adjust = -phdr[i].p_memsz & 15;
4396
        if (adjust != 0
4397
            && last != NULL
4398
            && phdr[i].p_filesz != 0
4399
            && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
4400
            && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
4401
          break;
4402
 
4403
        if (phdr[i].p_filesz != 0)
4404
          last = &phdr[i];
4405
      }
4406
 
4407
  if (i == (unsigned int) -1)
4408
    for (i = count; i-- != 0; )
4409
      if (phdr[i].p_type == PT_LOAD)
4410
        {
4411
        unsigned adjust;
4412
 
4413
        adjust = -phdr[i].p_filesz & 15;
4414
        phdr[i].p_filesz += adjust;
4415
 
4416
        adjust = -phdr[i].p_memsz & 15;
4417
        phdr[i].p_memsz += adjust;
4418
      }
4419
 
4420
  return TRUE;
4421
}
4422
 
4423
#define TARGET_BIG_SYM          bfd_elf32_spu_vec
4424
#define TARGET_BIG_NAME         "elf32-spu"
4425
#define ELF_ARCH                bfd_arch_spu
4426
#define ELF_MACHINE_CODE        EM_SPU
4427
/* This matches the alignment need for DMA.  */
4428
#define ELF_MAXPAGESIZE         0x80
4429
#define elf_backend_rela_normal         1
4430
#define elf_backend_can_gc_sections     1
4431
 
4432
#define bfd_elf32_bfd_reloc_type_lookup         spu_elf_reloc_type_lookup
4433
#define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
4434
#define elf_info_to_howto                       spu_elf_info_to_howto
4435
#define elf_backend_count_relocs                spu_elf_count_relocs
4436
#define elf_backend_relocate_section            spu_elf_relocate_section
4437
#define elf_backend_symbol_processing           spu_elf_backend_symbol_processing
4438
#define elf_backend_link_output_symbol_hook     spu_elf_output_symbol_hook
4439
#define elf_backend_object_p                    spu_elf_object_p
4440
#define bfd_elf32_new_section_hook              spu_elf_new_section_hook
4441
#define bfd_elf32_bfd_link_hash_table_create    spu_elf_link_hash_table_create
4442
 
4443
#define elf_backend_additional_program_headers  spu_elf_additional_program_headers
4444
#define elf_backend_modify_segment_map          spu_elf_modify_segment_map
4445
#define elf_backend_modify_program_headers      spu_elf_modify_program_headers
4446
#define elf_backend_post_process_headers        spu_elf_post_process_headers
4447
#define elf_backend_fake_sections               spu_elf_fake_sections
4448
#define elf_backend_special_sections            spu_elf_special_sections
4449
#define bfd_elf32_bfd_final_link                spu_elf_final_link
4450
 
4451
#include "elf32-target.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.