OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-old/] [binutils-2.18.50/] [bfd/] [elf32-spu.c] - Blame information for rev 38

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 38 julius
/* SPU specific support for 32-bit ELF
2
 
3
   Copyright 2006, 2007, 2008 Free Software Foundation, Inc.
4
 
5
   This file is part of BFD, the Binary File Descriptor library.
6
 
7
   This program is free software; you can redistribute it and/or modify
8
   it under the terms of the GNU General Public License as published by
9
   the Free Software Foundation; either version 3 of the License, or
10
   (at your option) any later version.
11
 
12
   This program is distributed in the hope that it will be useful,
13
   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
   GNU General Public License for more details.
16
 
17
   You should have received a copy of the GNU General Public License along
18
   with this program; if not, write to the Free Software Foundation, Inc.,
19
   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
20
 
21
#include "sysdep.h"
22
#include "libiberty.h"
23
#include "bfd.h"
24
#include "bfdlink.h"
25
#include "libbfd.h"
26
#include "elf-bfd.h"
27
#include "elf/spu.h"
28
#include "elf32-spu.h"
29
 
30
/* We use RELA style relocs.  Don't define USE_REL.  */
31
 
32
static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33
                                           void *, asection *,
34
                                           bfd *, char **);
35
 
36
/* Values of type 'enum elf_spu_reloc_type' are used to index this
37
   array, so it must be declared in the order of that type.  */
38
 
39
static reloc_howto_type elf_howto_table[] = {
40
  HOWTO (R_SPU_NONE,       0, 0,  0, FALSE,  0, complain_overflow_dont,
41
         bfd_elf_generic_reloc, "SPU_NONE",
42
         FALSE, 0, 0x00000000, FALSE),
43
  HOWTO (R_SPU_ADDR10,     4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44
         bfd_elf_generic_reloc, "SPU_ADDR10",
45
         FALSE, 0, 0x00ffc000, FALSE),
46
  HOWTO (R_SPU_ADDR16,     2, 2, 16, FALSE,  7, complain_overflow_bitfield,
47
         bfd_elf_generic_reloc, "SPU_ADDR16",
48
         FALSE, 0, 0x007fff80, FALSE),
49
  HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE,  7, complain_overflow_bitfield,
50
         bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51
         FALSE, 0, 0x007fff80, FALSE),
52
  HOWTO (R_SPU_ADDR16_LO,  0, 2, 16, FALSE,  7, complain_overflow_dont,
53
         bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54
         FALSE, 0, 0x007fff80, FALSE),
55
  HOWTO (R_SPU_ADDR18,     0, 2, 18, FALSE,  7, complain_overflow_bitfield,
56
         bfd_elf_generic_reloc, "SPU_ADDR18",
57
         FALSE, 0, 0x01ffff80, FALSE),
58
  HOWTO (R_SPU_ADDR32,     0, 2, 32, FALSE,  0, complain_overflow_dont,
59
         bfd_elf_generic_reloc, "SPU_ADDR32",
60
         FALSE, 0, 0xffffffff, FALSE),
61
  HOWTO (R_SPU_REL16,      2, 2, 16,  TRUE,  7, complain_overflow_bitfield,
62
         bfd_elf_generic_reloc, "SPU_REL16",
63
         FALSE, 0, 0x007fff80, TRUE),
64
  HOWTO (R_SPU_ADDR7,      0, 2,  7, FALSE, 14, complain_overflow_dont,
65
         bfd_elf_generic_reloc, "SPU_ADDR7",
66
         FALSE, 0, 0x001fc000, FALSE),
67
  HOWTO (R_SPU_REL9,       2, 2,  9,  TRUE,  0, complain_overflow_signed,
68
         spu_elf_rel9,          "SPU_REL9",
69
         FALSE, 0, 0x0180007f, TRUE),
70
  HOWTO (R_SPU_REL9I,      2, 2,  9,  TRUE,  0, complain_overflow_signed,
71
         spu_elf_rel9,          "SPU_REL9I",
72
         FALSE, 0, 0x0000c07f, TRUE),
73
  HOWTO (R_SPU_ADDR10I,    0, 2, 10, FALSE, 14, complain_overflow_signed,
74
         bfd_elf_generic_reloc, "SPU_ADDR10I",
75
         FALSE, 0, 0x00ffc000, FALSE),
76
  HOWTO (R_SPU_ADDR16I,    0, 2, 16, FALSE,  7, complain_overflow_signed,
77
         bfd_elf_generic_reloc, "SPU_ADDR16I",
78
         FALSE, 0, 0x007fff80, FALSE),
79
  HOWTO (R_SPU_REL32,      0, 2, 32, TRUE,  0, complain_overflow_dont,
80
         bfd_elf_generic_reloc, "SPU_REL32",
81
         FALSE, 0, 0xffffffff, TRUE),
82
  HOWTO (R_SPU_ADDR16X,    0, 2, 16, FALSE,  7, complain_overflow_bitfield,
83
         bfd_elf_generic_reloc, "SPU_ADDR16X",
84
         FALSE, 0, 0x007fff80, FALSE),
85
  HOWTO (R_SPU_PPU32,      0, 2, 32, FALSE,  0, complain_overflow_dont,
86
         bfd_elf_generic_reloc, "SPU_PPU32",
87
         FALSE, 0, 0xffffffff, FALSE),
88
  HOWTO (R_SPU_PPU64,      0, 4, 64, FALSE,  0, complain_overflow_dont,
89
         bfd_elf_generic_reloc, "SPU_PPU64",
90
         FALSE, 0, -1, FALSE),
91
};
92
 
93
static struct bfd_elf_special_section const spu_elf_special_sections[] = {
94
  { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
95
  { NULL, 0, 0, 0, 0 }
96
};
97
 
98
static enum elf_spu_reloc_type
99
spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
100
{
101
  switch (code)
102
    {
103
    default:
104
      return R_SPU_NONE;
105
    case BFD_RELOC_SPU_IMM10W:
106
      return R_SPU_ADDR10;
107
    case BFD_RELOC_SPU_IMM16W:
108
      return R_SPU_ADDR16;
109
    case BFD_RELOC_SPU_LO16:
110
      return R_SPU_ADDR16_LO;
111
    case BFD_RELOC_SPU_HI16:
112
      return R_SPU_ADDR16_HI;
113
    case BFD_RELOC_SPU_IMM18:
114
      return R_SPU_ADDR18;
115
    case BFD_RELOC_SPU_PCREL16:
116
      return R_SPU_REL16;
117
    case BFD_RELOC_SPU_IMM7:
118
      return R_SPU_ADDR7;
119
    case BFD_RELOC_SPU_IMM8:
120
      return R_SPU_NONE;
121
    case BFD_RELOC_SPU_PCREL9a:
122
      return R_SPU_REL9;
123
    case BFD_RELOC_SPU_PCREL9b:
124
      return R_SPU_REL9I;
125
    case BFD_RELOC_SPU_IMM10:
126
      return R_SPU_ADDR10I;
127
    case BFD_RELOC_SPU_IMM16:
128
      return R_SPU_ADDR16I;
129
    case BFD_RELOC_32:
130
      return R_SPU_ADDR32;
131
    case BFD_RELOC_32_PCREL:
132
      return R_SPU_REL32;
133
    case BFD_RELOC_SPU_PPU32:
134
      return R_SPU_PPU32;
135
    case BFD_RELOC_SPU_PPU64:
136
      return R_SPU_PPU64;
137
    }
138
}
139
 
140
static void
141
spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
142
                       arelent *cache_ptr,
143
                       Elf_Internal_Rela *dst)
144
{
145
  enum elf_spu_reloc_type r_type;
146
 
147
  r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
148
  BFD_ASSERT (r_type < R_SPU_max);
149
  cache_ptr->howto = &elf_howto_table[(int) r_type];
150
}
151
 
152
static reloc_howto_type *
153
spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
154
                           bfd_reloc_code_real_type code)
155
{
156
  enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
157
 
158
  if (r_type == R_SPU_NONE)
159
    return NULL;
160
 
161
  return elf_howto_table + r_type;
162
}
163
 
164
static reloc_howto_type *
165
spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
166
                           const char *r_name)
167
{
168
  unsigned int i;
169
 
170
  for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
171
    if (elf_howto_table[i].name != NULL
172
        && strcasecmp (elf_howto_table[i].name, r_name) == 0)
173
      return &elf_howto_table[i];
174
 
175
  return NULL;
176
}
177
 
178
/* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
179
 
180
static bfd_reloc_status_type
181
spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
182
              void *data, asection *input_section,
183
              bfd *output_bfd, char **error_message)
184
{
185
  bfd_size_type octets;
186
  bfd_vma val;
187
  long insn;
188
 
189
  /* If this is a relocatable link (output_bfd test tells us), just
190
     call the generic function.  Any adjustment will be done at final
191
     link time.  */
192
  if (output_bfd != NULL)
193
    return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
194
                                  input_section, output_bfd, error_message);
195
 
196
  if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
197
    return bfd_reloc_outofrange;
198
  octets = reloc_entry->address * bfd_octets_per_byte (abfd);
199
 
200
  /* Get symbol value.  */
201
  val = 0;
202
  if (!bfd_is_com_section (symbol->section))
203
    val = symbol->value;
204
  if (symbol->section->output_section)
205
    val += symbol->section->output_section->vma;
206
 
207
  val += reloc_entry->addend;
208
 
209
  /* Make it pc-relative.  */
210
  val -= input_section->output_section->vma + input_section->output_offset;
211
 
212
  val >>= 2;
213
  if (val + 256 >= 512)
214
    return bfd_reloc_overflow;
215
 
216
  insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
217
 
218
  /* Move two high bits of value to REL9I and REL9 position.
219
     The mask will take care of selecting the right field.  */
220
  val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
221
  insn &= ~reloc_entry->howto->dst_mask;
222
  insn |= val & reloc_entry->howto->dst_mask;
223
  bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
224
  return bfd_reloc_ok;
225
}
226
 
227
static bfd_boolean
228
spu_elf_new_section_hook (bfd *abfd, asection *sec)
229
{
230
  if (!sec->used_by_bfd)
231
    {
232
      struct _spu_elf_section_data *sdata;
233
 
234
      sdata = bfd_zalloc (abfd, sizeof (*sdata));
235
      if (sdata == NULL)
236
        return FALSE;
237
      sec->used_by_bfd = sdata;
238
    }
239
 
240
  return _bfd_elf_new_section_hook (abfd, sec);
241
}
242
 
243
/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
244
   strip --strip-unneeded will not remove them.  */
245
 
246
static void
247
spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
248
{
249
  if (sym->name != NULL
250
      && sym->section != bfd_abs_section_ptr
251
      && strncmp (sym->name, "_EAR_", 5) == 0)
252
    sym->flags |= BSF_KEEP;
253
}
254
 
255
/* SPU ELF linker hash table.  */
256
 
257
struct spu_link_hash_table
258
{
259
  struct elf_link_hash_table elf;
260
 
261
  /* Shortcuts to overlay sections.  */
262
  asection *ovtab;
263
  asection *toe;
264
  asection **ovl_sec;
265
 
266
  /* Count of stubs in each overlay section.  */
267
  unsigned int *stub_count;
268
 
269
  /* The stub section for each overlay section.  */
270
  asection **stub_sec;
271
 
272
  struct elf_link_hash_entry *ovly_load;
273
  struct elf_link_hash_entry *ovly_return;
274
  unsigned long ovly_load_r_symndx;
275
 
276
  /* Number of overlay buffers.  */
277
  unsigned int num_buf;
278
 
279
  /* Total number of overlays.  */
280
  unsigned int num_overlays;
281
 
282
  /* How much memory we have.  */
283
  unsigned int local_store;
284
  /* Local store --auto-overlay should reserve for non-overlay
285
     functions and data.  */
286
  unsigned int overlay_fixed;
287
  /* Local store --auto-overlay should reserve for stack and heap.  */
288
  unsigned int reserved;
289
  /* Count of overlay stubs needed in non-overlay area.  */
290
  unsigned int non_ovly_stub;
291
 
292
  /* Stash various callbacks for --auto-overlay.  */
293
  void (*spu_elf_load_ovl_mgr) (void);
294
  FILE *(*spu_elf_open_overlay_script) (void);
295
  void (*spu_elf_relink) (void);
296
 
297
  /* Bit 0 set if --auto-overlay.
298
     Bit 1 set if --auto-relink.
299
     Bit 2 set if --overlay-rodata.  */
300
  unsigned int auto_overlay : 3;
301
#define AUTO_OVERLAY 1
302
#define AUTO_RELINK 2
303
#define OVERLAY_RODATA 4
304
 
305
  /* Set if we should emit symbols for stubs.  */
306
  unsigned int emit_stub_syms:1;
307
 
308
  /* Set if we want stubs on calls out of overlay regions to
309
     non-overlay regions.  */
310
  unsigned int non_overlay_stubs : 1;
311
 
312
  /* Set on error.  */
313
  unsigned int stub_err : 1;
314
 
315
  /* Set if stack size analysis should be done.  */
316
  unsigned int stack_analysis : 1;
317
 
318
  /* Set if __stack_* syms will be emitted.  */
319
  unsigned int emit_stack_syms : 1;
320
};
321
 
322
/* Hijack the generic got fields for overlay stub accounting.  */
323
 
324
struct got_entry
325
{
326
  struct got_entry *next;
327
  unsigned int ovl;
328
  bfd_vma addend;
329
  bfd_vma stub_addr;
330
};
331
 
332
#define spu_hash_table(p) \
333
  ((struct spu_link_hash_table *) ((p)->hash))
334
 
335
/* Create a spu ELF linker hash table.  */
336
 
337
static struct bfd_link_hash_table *
338
spu_elf_link_hash_table_create (bfd *abfd)
339
{
340
  struct spu_link_hash_table *htab;
341
 
342
  htab = bfd_malloc (sizeof (*htab));
343
  if (htab == NULL)
344
    return NULL;
345
 
346
  if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
347
                                      _bfd_elf_link_hash_newfunc,
348
                                      sizeof (struct elf_link_hash_entry)))
349
    {
350
      free (htab);
351
      return NULL;
352
    }
353
 
354
  memset (&htab->ovtab, 0,
355
          sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
356
 
357
  htab->elf.init_got_refcount.refcount = 0;
358
  htab->elf.init_got_refcount.glist = NULL;
359
  htab->elf.init_got_offset.offset = 0;
360
  htab->elf.init_got_offset.glist = NULL;
361
  return &htab->elf.root;
362
}
363
 
364
/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
365
   to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
366
   *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
367
 
368
static bfd_boolean
369
get_sym_h (struct elf_link_hash_entry **hp,
370
           Elf_Internal_Sym **symp,
371
           asection **symsecp,
372
           Elf_Internal_Sym **locsymsp,
373
           unsigned long r_symndx,
374
           bfd *ibfd)
375
{
376
  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
377
 
378
  if (r_symndx >= symtab_hdr->sh_info)
379
    {
380
      struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
381
      struct elf_link_hash_entry *h;
382
 
383
      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
384
      while (h->root.type == bfd_link_hash_indirect
385
             || h->root.type == bfd_link_hash_warning)
386
        h = (struct elf_link_hash_entry *) h->root.u.i.link;
387
 
388
      if (hp != NULL)
389
        *hp = h;
390
 
391
      if (symp != NULL)
392
        *symp = NULL;
393
 
394
      if (symsecp != NULL)
395
        {
396
          asection *symsec = NULL;
397
          if (h->root.type == bfd_link_hash_defined
398
              || h->root.type == bfd_link_hash_defweak)
399
            symsec = h->root.u.def.section;
400
          *symsecp = symsec;
401
        }
402
    }
403
  else
404
    {
405
      Elf_Internal_Sym *sym;
406
      Elf_Internal_Sym *locsyms = *locsymsp;
407
 
408
      if (locsyms == NULL)
409
        {
410
          locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
411
          if (locsyms == NULL)
412
            {
413
              size_t symcount = symtab_hdr->sh_info;
414
 
415
              /* If we are reading symbols into the contents, then
416
                 read the global syms too.  This is done to cache
417
                 syms for later stack analysis.  */
418
              if ((unsigned char **) locsymsp == &symtab_hdr->contents)
419
                symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
420
              locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
421
                                              NULL, NULL, NULL);
422
            }
423
          if (locsyms == NULL)
424
            return FALSE;
425
          *locsymsp = locsyms;
426
        }
427
      sym = locsyms + r_symndx;
428
 
429
      if (hp != NULL)
430
        *hp = NULL;
431
 
432
      if (symp != NULL)
433
        *symp = sym;
434
 
435
      if (symsecp != NULL)
436
        *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
437
    }
438
 
439
  return TRUE;
440
}
441
 
442
/* Create the note section if not already present.  This is done early so
443
   that the linker maps the sections to the right place in the output.  */
444
 
445
bfd_boolean
446
spu_elf_create_sections (struct bfd_link_info *info,
447
                         int stack_analysis,
448
                         int emit_stack_syms)
449
{
450
  bfd *ibfd;
451
  struct spu_link_hash_table *htab = spu_hash_table (info);
452
 
453
  /* Stash some options away where we can get at them later.  */
454
  htab->stack_analysis = stack_analysis;
455
  htab->emit_stack_syms = emit_stack_syms;
456
 
457
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
458
    if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
459
      break;
460
 
461
  if (ibfd == NULL)
462
    {
463
      /* Make SPU_PTNOTE_SPUNAME section.  */
464
      asection *s;
465
      size_t name_len;
466
      size_t size;
467
      bfd_byte *data;
468
      flagword flags;
469
 
470
      ibfd = info->input_bfds;
471
      flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
472
      s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
473
      if (s == NULL
474
          || !bfd_set_section_alignment (ibfd, s, 4))
475
        return FALSE;
476
 
477
      name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
478
      size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
479
      size += (name_len + 3) & -4;
480
 
481
      if (!bfd_set_section_size (ibfd, s, size))
482
        return FALSE;
483
 
484
      data = bfd_zalloc (ibfd, size);
485
      if (data == NULL)
486
        return FALSE;
487
 
488
      bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
489
      bfd_put_32 (ibfd, name_len, data + 4);
490
      bfd_put_32 (ibfd, 1, data + 8);
491
      memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
492
      memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
493
              bfd_get_filename (info->output_bfd), name_len);
494
      s->contents = data;
495
    }
496
 
497
  return TRUE;
498
}
499
 
500
/* qsort predicate to sort sections by vma.  */
501
 
502
static int
503
sort_sections (const void *a, const void *b)
504
{
505
  const asection *const *s1 = a;
506
  const asection *const *s2 = b;
507
  bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
508
 
509
  if (delta != 0)
510
    return delta < 0 ? -1 : 1;
511
 
512
  return (*s1)->index - (*s2)->index;
513
}
514
 
515
/* Identify overlays in the output bfd, and number them.  */
516
 
517
bfd_boolean
518
spu_elf_find_overlays (struct bfd_link_info *info)
519
{
520
  struct spu_link_hash_table *htab = spu_hash_table (info);
521
  asection **alloc_sec;
522
  unsigned int i, n, ovl_index, num_buf;
523
  asection *s;
524
  bfd_vma ovl_end;
525
 
526
  if (info->output_bfd->section_count < 2)
527
    return FALSE;
528
 
529
  alloc_sec
530
    = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
531
  if (alloc_sec == NULL)
532
    return FALSE;
533
 
534
  /* Pick out all the alloced sections.  */
535
  for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
536
    if ((s->flags & SEC_ALLOC) != 0
537
        && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
538
        && s->size != 0)
539
      alloc_sec[n++] = s;
540
 
541
  if (n == 0)
542
    {
543
      free (alloc_sec);
544
      return FALSE;
545
    }
546
 
547
  /* Sort them by vma.  */
548
  qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
549
 
550
  /* Look for overlapping vmas.  Any with overlap must be overlays.
551
     Count them.  Also count the number of overlay regions.  */
552
  ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
553
  for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
554
    {
555
      s = alloc_sec[i];
556
      if (s->vma < ovl_end)
557
        {
558
          asection *s0 = alloc_sec[i - 1];
559
 
560
          if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
561
            {
562
              alloc_sec[ovl_index] = s0;
563
              spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
564
              spu_elf_section_data (s0)->u.o.ovl_buf = ++num_buf;
565
            }
566
          alloc_sec[ovl_index] = s;
567
          spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
568
          spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
569
          if (s0->vma != s->vma)
570
            {
571
              info->callbacks->einfo (_("%X%P: overlay sections %A and %A "
572
                                        "do not start at the same address.\n"),
573
                                      s0, s);
574
              return FALSE;
575
            }
576
          if (ovl_end < s->vma + s->size)
577
            ovl_end = s->vma + s->size;
578
        }
579
      else
580
        ovl_end = s->vma + s->size;
581
    }
582
 
583
  htab->num_overlays = ovl_index;
584
  htab->num_buf = num_buf;
585
  htab->ovl_sec = alloc_sec;
586
  htab->ovly_load = elf_link_hash_lookup (&htab->elf, "__ovly_load",
587
                                          FALSE, FALSE, FALSE);
588
  htab->ovly_return = elf_link_hash_lookup (&htab->elf, "__ovly_return",
589
                                            FALSE, FALSE, FALSE);
590
  return ovl_index != 0;
591
}
592
 
593
/* Support two sizes of overlay stubs, a slower more compact stub of two
594
   intructions, and a faster stub of four instructions.  */
595
#ifndef OVL_STUB_SIZE
596
/* Default to faster.  */
597
#define OVL_STUB_SIZE 16
598
/* #define OVL_STUB_SIZE 8 */
599
#endif
600
#define BRSL    0x33000000
601
#define BR      0x32000000
602
#define NOP     0x40200000
603
#define LNOP    0x00200000
604
#define ILA     0x42000000
605
 
606
/* Return true for all relative and absolute branch instructions.
607
   bra   00110000 0..
608
   brasl 00110001 0..
609
   br    00110010 0..
610
   brsl  00110011 0..
611
   brz   00100000 0..
612
   brnz  00100001 0..
613
   brhz  00100010 0..
614
   brhnz 00100011 0..  */
615
 
616
static bfd_boolean
617
is_branch (const unsigned char *insn)
618
{
619
  return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
620
}
621
 
622
/* Return true for all indirect branch instructions.
623
   bi     00110101 000
624
   bisl   00110101 001
625
   iret   00110101 010
626
   bisled 00110101 011
627
   biz    00100101 000
628
   binz   00100101 001
629
   bihz   00100101 010
630
   bihnz  00100101 011  */
631
 
632
static bfd_boolean
633
is_indirect_branch (const unsigned char *insn)
634
{
635
  return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
636
}
637
 
638
/* Return true for branch hint instructions.
639
   hbra  0001000..
640
   hbrr  0001001..  */
641
 
642
static bfd_boolean
643
is_hint (const unsigned char *insn)
644
{
645
  return (insn[0] & 0xfc) == 0x10;
646
}
647
 
648
/* True if INPUT_SECTION might need overlay stubs.  */
649
 
650
static bfd_boolean
651
maybe_needs_stubs (asection *input_section, bfd *output_bfd)
652
{
653
  /* No stubs for debug sections and suchlike.  */
654
  if ((input_section->flags & SEC_ALLOC) == 0)
655
    return FALSE;
656
 
657
  /* No stubs for link-once sections that will be discarded.  */
658
  if (input_section->output_section == NULL
659
      || input_section->output_section->owner != output_bfd)
660
    return FALSE;
661
 
662
  /* Don't create stubs for .eh_frame references.  */
663
  if (strcmp (input_section->name, ".eh_frame") == 0)
664
    return FALSE;
665
 
666
  return TRUE;
667
}
668
 
669
enum _stub_type
670
{
671
  no_stub,
672
  ovl_stub,
673
  nonovl_stub,
674
  stub_error
675
};
676
 
677
/* Return non-zero if this reloc symbol should go via an overlay stub.
678
   Return 2 if the stub must be in non-overlay area.  */
679
 
680
static enum _stub_type
681
needs_ovl_stub (struct elf_link_hash_entry *h,
682
                Elf_Internal_Sym *sym,
683
                asection *sym_sec,
684
                asection *input_section,
685
                Elf_Internal_Rela *irela,
686
                bfd_byte *contents,
687
                struct bfd_link_info *info)
688
{
689
  struct spu_link_hash_table *htab = spu_hash_table (info);
690
  enum elf_spu_reloc_type r_type;
691
  unsigned int sym_type;
692
  bfd_boolean branch;
693
  enum _stub_type ret = no_stub;
694
 
695
  if (sym_sec == NULL
696
      || sym_sec->output_section == NULL
697
      || sym_sec->output_section->owner != info->output_bfd
698
      || spu_elf_section_data (sym_sec->output_section) == NULL)
699
    return ret;
700
 
701
  if (h != NULL)
702
    {
703
      /* Ensure no stubs for user supplied overlay manager syms.  */
704
      if (h == htab->ovly_load || h == htab->ovly_return)
705
        return ret;
706
 
707
      /* setjmp always goes via an overlay stub, because then the return
708
         and hence the longjmp goes via __ovly_return.  That magically
709
         makes setjmp/longjmp between overlays work.  */
710
      if (strncmp (h->root.root.string, "setjmp", 6) == 0
711
          && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
712
        ret = ovl_stub;
713
    }
714
 
715
  /* Usually, symbols in non-overlay sections don't need stubs.  */
716
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
717
      && !htab->non_overlay_stubs)
718
    return ret;
719
 
720
  if (h != NULL)
721
    sym_type = h->type;
722
  else
723
    sym_type = ELF_ST_TYPE (sym->st_info);
724
 
725
  r_type = ELF32_R_TYPE (irela->r_info);
726
  branch = FALSE;
727
  if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
728
    {
729
      bfd_byte insn[4];
730
 
731
      if (contents == NULL)
732
        {
733
          contents = insn;
734
          if (!bfd_get_section_contents (input_section->owner,
735
                                         input_section,
736
                                         contents,
737
                                         irela->r_offset, 4))
738
            return stub_error;
739
        }
740
      else
741
        contents += irela->r_offset;
742
 
743
      if (is_branch (contents) || is_hint (contents))
744
        {
745
          branch = TRUE;
746
          if ((contents[0] & 0xfd) == 0x31
747
              && sym_type != STT_FUNC
748
              && contents != insn)
749
            {
750
              /* It's common for people to write assembly and forget
751
                 to give function symbols the right type.  Handle
752
                 calls to such symbols, but warn so that (hopefully)
753
                 people will fix their code.  We need the symbol
754
                 type to be correct to distinguish function pointer
755
                 initialisation from other pointer initialisations.  */
756
              const char *sym_name;
757
 
758
              if (h != NULL)
759
                sym_name = h->root.root.string;
760
              else
761
                {
762
                  Elf_Internal_Shdr *symtab_hdr;
763
                  symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
764
                  sym_name = bfd_elf_sym_name (input_section->owner,
765
                                               symtab_hdr,
766
                                               sym,
767
                                               sym_sec);
768
                }
769
              (*_bfd_error_handler) (_("warning: call to non-function"
770
                                       " symbol %s defined in %B"),
771
                                     sym_sec->owner, sym_name);
772
 
773
            }
774
        }
775
    }
776
 
777
  if (sym_type != STT_FUNC
778
      && !branch
779
      && (sym_sec->flags & SEC_CODE) == 0)
780
    return ret;
781
 
782
  /* A reference from some other section to a symbol in an overlay
783
     section needs a stub.  */
784
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
785
       != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
786
    return ovl_stub;
787
 
788
  /* If this insn isn't a branch then we are possibly taking the
789
     address of a function and passing it out somehow.  */
790
  return !branch && sym_type == STT_FUNC ? nonovl_stub : ret;
791
}
792
 
793
static bfd_boolean
794
count_stub (struct spu_link_hash_table *htab,
795
            bfd *ibfd,
796
            asection *isec,
797
            enum _stub_type stub_type,
798
            struct elf_link_hash_entry *h,
799
            const Elf_Internal_Rela *irela)
800
{
801
  unsigned int ovl = 0;
802
  struct got_entry *g, **head;
803
  bfd_vma addend;
804
 
805
  /* If this instruction is a branch or call, we need a stub
806
     for it.  One stub per function per overlay.
807
     If it isn't a branch, then we are taking the address of
808
     this function so need a stub in the non-overlay area
809
     for it.  One stub per function.  */
810
  if (stub_type != nonovl_stub)
811
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
812
 
813
  if (h != NULL)
814
    head = &h->got.glist;
815
  else
816
    {
817
      if (elf_local_got_ents (ibfd) == NULL)
818
        {
819
          bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
820
                               * sizeof (*elf_local_got_ents (ibfd)));
821
          elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
822
          if (elf_local_got_ents (ibfd) == NULL)
823
            return FALSE;
824
        }
825
      head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
826
    }
827
 
828
  addend = 0;
829
  if (irela != NULL)
830
    addend = irela->r_addend;
831
 
832
  if (ovl == 0)
833
    {
834
      struct got_entry *gnext;
835
 
836
      for (g = *head; g != NULL; g = g->next)
837
        if (g->addend == addend && g->ovl == 0)
838
          break;
839
 
840
      if (g == NULL)
841
        {
842
          /* Need a new non-overlay area stub.  Zap other stubs.  */
843
          for (g = *head; g != NULL; g = gnext)
844
            {
845
              gnext = g->next;
846
              if (g->addend == addend)
847
                {
848
                  htab->stub_count[g->ovl] -= 1;
849
                  free (g);
850
                }
851
            }
852
        }
853
    }
854
  else
855
    {
856
      for (g = *head; g != NULL; g = g->next)
857
        if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
858
          break;
859
    }
860
 
861
  if (g == NULL)
862
    {
863
      g = bfd_malloc (sizeof *g);
864
      if (g == NULL)
865
        return FALSE;
866
      g->ovl = ovl;
867
      g->addend = addend;
868
      g->stub_addr = (bfd_vma) -1;
869
      g->next = *head;
870
      *head = g;
871
 
872
      htab->stub_count[ovl] += 1;
873
    }
874
 
875
  return TRUE;
876
}
877
 
878
/* Two instruction overlay stubs look like:
879
 
880
   brsl $75,__ovly_load
881
   .word target_ovl_and_address
882
 
883
   ovl_and_address is a word with the overlay number in the top 14 bits
884
   and local store address in the bottom 18 bits.
885
 
886
   Four instruction overlay stubs look like:
887
 
888
   ila $78,ovl_number
889
   lnop
890
   ila $79,target_address
891
   br __ovly_load  */
892
 
893
static bfd_boolean
894
build_stub (struct spu_link_hash_table *htab,
895
            bfd *ibfd,
896
            asection *isec,
897
            enum _stub_type stub_type,
898
            struct elf_link_hash_entry *h,
899
            const Elf_Internal_Rela *irela,
900
            bfd_vma dest,
901
            asection *dest_sec)
902
{
903
  unsigned int ovl;
904
  struct got_entry *g, **head;
905
  asection *sec;
906
  bfd_vma addend, val, from, to;
907
 
908
  ovl = 0;
909
  if (stub_type != nonovl_stub)
910
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
911
 
912
  if (h != NULL)
913
    head = &h->got.glist;
914
  else
915
    head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
916
 
917
  addend = 0;
918
  if (irela != NULL)
919
    addend = irela->r_addend;
920
 
921
  for (g = *head; g != NULL; g = g->next)
922
    if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
923
      break;
924
  if (g == NULL)
925
    abort ();
926
 
927
  if (g->ovl == 0 && ovl != 0)
928
    return TRUE;
929
 
930
  if (g->stub_addr != (bfd_vma) -1)
931
    return TRUE;
932
 
933
  sec = htab->stub_sec[ovl];
934
  dest += dest_sec->output_offset + dest_sec->output_section->vma;
935
  from = sec->size + sec->output_offset + sec->output_section->vma;
936
  g->stub_addr = from;
937
  to = (htab->ovly_load->root.u.def.value
938
        + htab->ovly_load->root.u.def.section->output_offset
939
        + htab->ovly_load->root.u.def.section->output_section->vma);
940
  val = to - from;
941
  if (OVL_STUB_SIZE == 16)
942
    val -= 12;
943
  if (((dest | to | from) & 3) != 0
944
      || val + 0x20000 >= 0x40000)
945
    {
946
      htab->stub_err = 1;
947
      return FALSE;
948
    }
949
  ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
950
 
951
  if (OVL_STUB_SIZE == 16)
952
    {
953
      bfd_put_32 (sec->owner, ILA + ((ovl << 7) & 0x01ffff80) + 78,
954
                  sec->contents + sec->size);
955
      bfd_put_32 (sec->owner, LNOP,
956
                  sec->contents + sec->size + 4);
957
      bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
958
                  sec->contents + sec->size + 8);
959
      bfd_put_32 (sec->owner, BR + ((val << 5) & 0x007fff80),
960
                  sec->contents + sec->size + 12);
961
    }
962
  else if (OVL_STUB_SIZE == 8)
963
    {
964
      bfd_put_32 (sec->owner, BRSL + ((val << 5) & 0x007fff80) + 75,
965
                  sec->contents + sec->size);
966
 
967
      val = (dest & 0x3ffff) | (ovl << 14);
968
      bfd_put_32 (sec->owner, val,
969
                  sec->contents + sec->size + 4);
970
    }
971
  else
972
    abort ();
973
  sec->size += OVL_STUB_SIZE;
974
 
975
  if (htab->emit_stub_syms)
976
    {
977
      size_t len;
978
      char *name;
979
      int add;
980
 
981
      len = 8 + sizeof (".ovl_call.") - 1;
982
      if (h != NULL)
983
        len += strlen (h->root.root.string);
984
      else
985
        len += 8 + 1 + 8;
986
      add = 0;
987
      if (irela != NULL)
988
        add = (int) irela->r_addend & 0xffffffff;
989
      if (add != 0)
990
        len += 1 + 8;
991
      name = bfd_malloc (len);
992
      if (name == NULL)
993
        return FALSE;
994
 
995
      sprintf (name, "%08x.ovl_call.", g->ovl);
996
      if (h != NULL)
997
        strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
998
      else
999
        sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1000
                 dest_sec->id & 0xffffffff,
1001
                 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1002
      if (add != 0)
1003
        sprintf (name + len - 9, "+%x", add);
1004
 
1005
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1006
      free (name);
1007
      if (h == NULL)
1008
        return FALSE;
1009
      if (h->root.type == bfd_link_hash_new)
1010
        {
1011
          h->root.type = bfd_link_hash_defined;
1012
          h->root.u.def.section = sec;
1013
          h->root.u.def.value = sec->size - OVL_STUB_SIZE;
1014
          h->size = OVL_STUB_SIZE;
1015
          h->type = STT_FUNC;
1016
          h->ref_regular = 1;
1017
          h->def_regular = 1;
1018
          h->ref_regular_nonweak = 1;
1019
          h->forced_local = 1;
1020
          h->non_elf = 0;
1021
        }
1022
    }
1023
 
1024
  return TRUE;
1025
}
1026
 
1027
/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1028
   symbols.  */
1029
 
1030
static bfd_boolean
1031
allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1032
{
1033
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1034
     invoked by the PPU.  */
1035
  struct bfd_link_info *info = inf;
1036
  struct spu_link_hash_table *htab = spu_hash_table (info);
1037
  asection *sym_sec;
1038
 
1039
  if ((h->root.type == bfd_link_hash_defined
1040
       || h->root.type == bfd_link_hash_defweak)
1041
      && h->def_regular
1042
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1043
      && (sym_sec = h->root.u.def.section) != NULL
1044
      && sym_sec->output_section != NULL
1045
      && sym_sec->output_section->owner == info->output_bfd
1046
      && spu_elf_section_data (sym_sec->output_section) != NULL
1047
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1048
          || htab->non_overlay_stubs))
1049
    {
1050
      count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1051
    }
1052
 
1053
  return TRUE;
1054
}
1055
 
1056
static bfd_boolean
1057
build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1058
{
1059
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1060
     invoked by the PPU.  */
1061
  struct bfd_link_info *info = inf;
1062
  struct spu_link_hash_table *htab = spu_hash_table (info);
1063
  asection *sym_sec;
1064
 
1065
  if ((h->root.type == bfd_link_hash_defined
1066
       || h->root.type == bfd_link_hash_defweak)
1067
      && h->def_regular
1068
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1069
      && (sym_sec = h->root.u.def.section) != NULL
1070
      && sym_sec->output_section != NULL
1071
      && sym_sec->output_section->owner == info->output_bfd
1072
      && spu_elf_section_data (sym_sec->output_section) != NULL
1073
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1074
          || htab->non_overlay_stubs))
1075
    {
1076
      build_stub (htab, NULL, NULL, nonovl_stub, h, NULL,
1077
                  h->root.u.def.value, sym_sec);
1078
    }
1079
 
1080
  return TRUE;
1081
}
1082
 
1083
/* Size or build stubs.  */
1084
 
1085
static bfd_boolean
1086
process_stubs (struct bfd_link_info *info, bfd_boolean build)
1087
{
1088
  struct spu_link_hash_table *htab = spu_hash_table (info);
1089
  bfd *ibfd;
1090
 
1091
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1092
    {
1093
      extern const bfd_target bfd_elf32_spu_vec;
1094
      Elf_Internal_Shdr *symtab_hdr;
1095
      asection *isec;
1096
      Elf_Internal_Sym *local_syms = NULL;
1097
      void *psyms;
1098
 
1099
      if (ibfd->xvec != &bfd_elf32_spu_vec)
1100
        continue;
1101
 
1102
      /* We'll need the symbol table in a second.  */
1103
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1104
      if (symtab_hdr->sh_info == 0)
1105
        continue;
1106
 
1107
      /* Arrange to read and keep global syms for later stack analysis.  */
1108
      psyms = &local_syms;
1109
      if (htab->stack_analysis)
1110
        psyms = &symtab_hdr->contents;
1111
 
1112
      /* Walk over each section attached to the input bfd.  */
1113
      for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1114
        {
1115
          Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1116
 
1117
          /* If there aren't any relocs, then there's nothing more to do.  */
1118
          if ((isec->flags & SEC_RELOC) == 0
1119
              || isec->reloc_count == 0)
1120
            continue;
1121
 
1122
          if (!maybe_needs_stubs (isec, info->output_bfd))
1123
            continue;
1124
 
1125
          /* Get the relocs.  */
1126
          internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1127
                                                       info->keep_memory);
1128
          if (internal_relocs == NULL)
1129
            goto error_ret_free_local;
1130
 
1131
          /* Now examine each relocation.  */
1132
          irela = internal_relocs;
1133
          irelaend = irela + isec->reloc_count;
1134
          for (; irela < irelaend; irela++)
1135
            {
1136
              enum elf_spu_reloc_type r_type;
1137
              unsigned int r_indx;
1138
              asection *sym_sec;
1139
              Elf_Internal_Sym *sym;
1140
              struct elf_link_hash_entry *h;
1141
              enum _stub_type stub_type;
1142
 
1143
              r_type = ELF32_R_TYPE (irela->r_info);
1144
              r_indx = ELF32_R_SYM (irela->r_info);
1145
 
1146
              if (r_type >= R_SPU_max)
1147
                {
1148
                  bfd_set_error (bfd_error_bad_value);
1149
                error_ret_free_internal:
1150
                  if (elf_section_data (isec)->relocs != internal_relocs)
1151
                    free (internal_relocs);
1152
                error_ret_free_local:
1153
                  if (local_syms != NULL
1154
                      && (symtab_hdr->contents
1155
                          != (unsigned char *) local_syms))
1156
                    free (local_syms);
1157
                  return FALSE;
1158
                }
1159
 
1160
              /* Determine the reloc target section.  */
1161
              if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, ibfd))
1162
                goto error_ret_free_internal;
1163
 
1164
              stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1165
                                          NULL, info);
1166
              if (stub_type == no_stub)
1167
                continue;
1168
              else if (stub_type == stub_error)
1169
                goto error_ret_free_internal;
1170
 
1171
              if (htab->stub_count == NULL)
1172
                {
1173
                  bfd_size_type amt;
1174
                  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1175
                  htab->stub_count = bfd_zmalloc (amt);
1176
                  if (htab->stub_count == NULL)
1177
                    goto error_ret_free_internal;
1178
                }
1179
 
1180
              if (!build)
1181
                {
1182
                  if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1183
                    goto error_ret_free_internal;
1184
                }
1185
              else
1186
                {
1187
                  bfd_vma dest;
1188
 
1189
                  if (h != NULL)
1190
                    dest = h->root.u.def.value;
1191
                  else
1192
                    dest = sym->st_value;
1193
                  dest += irela->r_addend;
1194
                  if (!build_stub (htab, ibfd, isec, stub_type, h, irela,
1195
                                   dest, sym_sec))
1196
                    goto error_ret_free_internal;
1197
                }
1198
            }
1199
 
1200
          /* We're done with the internal relocs, free them.  */
1201
          if (elf_section_data (isec)->relocs != internal_relocs)
1202
            free (internal_relocs);
1203
        }
1204
 
1205
      if (local_syms != NULL
1206
          && symtab_hdr->contents != (unsigned char *) local_syms)
1207
        {
1208
          if (!info->keep_memory)
1209
            free (local_syms);
1210
          else
1211
            symtab_hdr->contents = (unsigned char *) local_syms;
1212
        }
1213
    }
1214
 
1215
  return TRUE;
1216
}
1217
 
1218
/* Allocate space for overlay call and return stubs.  */
1219
 
1220
int
1221
spu_elf_size_stubs (struct bfd_link_info *info,
1222
                    void (*place_spu_section) (asection *, asection *,
1223
                                               const char *),
1224
                    int non_overlay_stubs)
1225
{
1226
  struct spu_link_hash_table *htab = spu_hash_table (info);
1227
  bfd *ibfd;
1228
  bfd_size_type amt;
1229
  flagword flags;
1230
  unsigned int i;
1231
  asection *stub;
1232
 
1233
  htab->non_overlay_stubs = non_overlay_stubs;
1234
  if (!process_stubs (info, FALSE))
1235
    return 0;
1236
 
1237
  elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1238
  if (htab->stub_err)
1239
    return 0;
1240
 
1241
  if (htab->stub_count == NULL)
1242
    return 1;
1243
 
1244
  ibfd = info->input_bfds;
1245
  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1246
  htab->stub_sec = bfd_zmalloc (amt);
1247
  if (htab->stub_sec == NULL)
1248
    return 0;
1249
 
1250
  flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1251
           | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1252
  stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1253
  htab->stub_sec[0] = stub;
1254
  if (stub == NULL
1255
      || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1256
    return 0;
1257
  stub->size = htab->stub_count[0] * OVL_STUB_SIZE;
1258
  (*place_spu_section) (stub, NULL, ".text");
1259
 
1260
  for (i = 0; i < htab->num_overlays; ++i)
1261
    {
1262
      asection *osec = htab->ovl_sec[i];
1263
      unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1264
      stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1265
      htab->stub_sec[ovl] = stub;
1266
      if (stub == NULL
1267
          || !bfd_set_section_alignment (ibfd, stub, 3 + (OVL_STUB_SIZE > 8)))
1268
        return 0;
1269
      stub->size = htab->stub_count[ovl] * OVL_STUB_SIZE;
1270
      (*place_spu_section) (stub, osec, NULL);
1271
    }
1272
 
1273
 /* htab->ovtab consists of two arrays.
1274
    .   struct {
1275
    .     u32 vma;
1276
    .     u32 size;
1277
    .     u32 file_off;
1278
    .     u32 buf;
1279
    .   } _ovly_table[];
1280
    .
1281
    .   struct {
1282
    .     u32 mapped;
1283
    .   } _ovly_buf_table[];
1284
    .  */
1285
 
1286
  flags = (SEC_ALLOC | SEC_LOAD
1287
           | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1288
  htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1289
  if (htab->ovtab == NULL
1290
      || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1291
    return 0;
1292
 
1293
  htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1294
  (*place_spu_section) (htab->ovtab, NULL, ".data");
1295
 
1296
  htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1297
  if (htab->toe == NULL
1298
      || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1299
    return 0;
1300
  htab->toe->size = 16;
1301
  (*place_spu_section) (htab->toe, NULL, ".toe");
1302
 
1303
  return 2;
1304
}
1305
 
1306
/* Functions to handle embedded spu_ovl.o object.  */
1307
 
1308
static void *
1309
ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1310
{
1311
  return stream;
1312
}
1313
 
1314
static file_ptr
1315
ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1316
               void *stream,
1317
               void *buf,
1318
               file_ptr nbytes,
1319
               file_ptr offset)
1320
{
1321
  struct _ovl_stream *os;
1322
  size_t count;
1323
  size_t max;
1324
 
1325
  os = (struct _ovl_stream *) stream;
1326
  max = (const char *) os->end - (const char *) os->start;
1327
 
1328
  if ((ufile_ptr) offset >= max)
1329
    return 0;
1330
 
1331
  count = nbytes;
1332
  if (count > max - offset)
1333
    count = max - offset;
1334
 
1335
  memcpy (buf, (const char *) os->start + offset, count);
1336
  return count;
1337
}
1338
 
1339
bfd_boolean
1340
spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1341
{
1342
  *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1343
                              "elf32-spu",
1344
                              ovl_mgr_open,
1345
                              (void *) stream,
1346
                              ovl_mgr_pread,
1347
                              NULL,
1348
                              NULL);
1349
  return *ovl_bfd != NULL;
1350
}
1351
 
1352
/* Define an STT_OBJECT symbol.  */
1353
 
1354
static struct elf_link_hash_entry *
1355
define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1356
{
1357
  struct elf_link_hash_entry *h;
1358
 
1359
  h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1360
  if (h == NULL)
1361
    return NULL;
1362
 
1363
  if (h->root.type != bfd_link_hash_defined
1364
      || !h->def_regular)
1365
    {
1366
      h->root.type = bfd_link_hash_defined;
1367
      h->root.u.def.section = htab->ovtab;
1368
      h->type = STT_OBJECT;
1369
      h->ref_regular = 1;
1370
      h->def_regular = 1;
1371
      h->ref_regular_nonweak = 1;
1372
      h->non_elf = 0;
1373
    }
1374
  else
1375
    {
1376
      (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1377
                             h->root.u.def.section->owner,
1378
                             h->root.root.string);
1379
      bfd_set_error (bfd_error_bad_value);
1380
      return NULL;
1381
    }
1382
 
1383
  return h;
1384
}
1385
 
1386
/* Fill in all stubs and the overlay tables.  */
1387
 
1388
bfd_boolean
1389
spu_elf_build_stubs (struct bfd_link_info *info, int emit_syms)
1390
{
1391
  struct spu_link_hash_table *htab = spu_hash_table (info);
1392
  struct elf_link_hash_entry *h;
1393
  bfd_byte *p;
1394
  asection *s;
1395
  bfd *obfd;
1396
  unsigned int i;
1397
 
1398
  htab->emit_stub_syms = emit_syms;
1399
  if (htab->stub_count == NULL)
1400
    return TRUE;
1401
 
1402
  for (i = 0; i <= htab->num_overlays; i++)
1403
    if (htab->stub_sec[i]->size != 0)
1404
      {
1405
        htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1406
                                                  htab->stub_sec[i]->size);
1407
        if (htab->stub_sec[i]->contents == NULL)
1408
          return FALSE;
1409
        htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1410
        htab->stub_sec[i]->size = 0;
1411
      }
1412
 
1413
  h = elf_link_hash_lookup (&htab->elf, "__ovly_load", FALSE, FALSE, FALSE);
1414
  htab->ovly_load = h;
1415
  BFD_ASSERT (h != NULL
1416
              && (h->root.type == bfd_link_hash_defined
1417
                  || h->root.type == bfd_link_hash_defweak)
1418
              && h->def_regular);
1419
 
1420
  s = h->root.u.def.section->output_section;
1421
  if (spu_elf_section_data (s)->u.o.ovl_index)
1422
    {
1423
      (*_bfd_error_handler) (_("%s in overlay section"),
1424
                             h->root.root.string);
1425
      bfd_set_error (bfd_error_bad_value);
1426
      return FALSE;
1427
    }
1428
 
1429
  h = elf_link_hash_lookup (&htab->elf, "__ovly_return", FALSE, FALSE, FALSE);
1430
  htab->ovly_return = h;
1431
 
1432
  /* Fill in all the stubs.  */
1433
  process_stubs (info, TRUE);
1434
 
1435
  elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1436
  if (htab->stub_err)
1437
    return FALSE;
1438
 
1439
  for (i = 0; i <= htab->num_overlays; i++)
1440
    {
1441
      if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1442
        {
1443
          (*_bfd_error_handler)  (_("stubs don't match calculated size"));
1444
          bfd_set_error (bfd_error_bad_value);
1445
          return FALSE;
1446
        }
1447
      htab->stub_sec[i]->rawsize = 0;
1448
    }
1449
 
1450
  if (htab->stub_err)
1451
    {
1452
      (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1453
      bfd_set_error (bfd_error_bad_value);
1454
      return FALSE;
1455
    }
1456
 
1457
  htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1458
  if (htab->ovtab->contents == NULL)
1459
    return FALSE;
1460
 
1461
  /* Write out _ovly_table.  */
1462
  p = htab->ovtab->contents;
1463
  /* set low bit of .size to mark non-overlay area as present.  */
1464
  p[7] = 1;
1465
  obfd = htab->ovtab->output_section->owner;
1466
  for (s = obfd->sections; s != NULL; s = s->next)
1467
    {
1468
      unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
1469
 
1470
      if (ovl_index != 0)
1471
        {
1472
          unsigned long off = ovl_index * 16;
1473
          unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
1474
 
1475
          bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
1476
          bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16, p + off + 4);
1477
          /* file_off written later in spu_elf_modify_program_headers.  */
1478
          bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
1479
        }
1480
    }
1481
 
1482
  h = define_ovtab_symbol (htab, "_ovly_table");
1483
  if (h == NULL)
1484
    return FALSE;
1485
  h->root.u.def.value = 16;
1486
  h->size = htab->num_overlays * 16;
1487
 
1488
  h = define_ovtab_symbol (htab, "_ovly_table_end");
1489
  if (h == NULL)
1490
    return FALSE;
1491
  h->root.u.def.value = htab->num_overlays * 16 + 16;
1492
  h->size = 0;
1493
 
1494
  h = define_ovtab_symbol (htab, "_ovly_buf_table");
1495
  if (h == NULL)
1496
    return FALSE;
1497
  h->root.u.def.value = htab->num_overlays * 16 + 16;
1498
  h->size = htab->num_buf * 4;
1499
 
1500
  h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
1501
  if (h == NULL)
1502
    return FALSE;
1503
  h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1504
  h->size = 0;
1505
 
1506
  h = define_ovtab_symbol (htab, "_EAR_");
1507
  if (h == NULL)
1508
    return FALSE;
1509
  h->root.u.def.section = htab->toe;
1510
  h->root.u.def.value = 0;
1511
  h->size = 16;
1512
 
1513
  return TRUE;
1514
}
1515
 
1516
/* Check that all loadable section VMAs lie in the range
1517
   LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
1518
 
1519
asection *
1520
spu_elf_check_vma (struct bfd_link_info *info,
1521
                   int auto_overlay,
1522
                   unsigned int lo,
1523
                   unsigned int hi,
1524
                   unsigned int overlay_fixed,
1525
                   unsigned int reserved,
1526
                   void (*spu_elf_load_ovl_mgr) (void),
1527
                   FILE *(*spu_elf_open_overlay_script) (void),
1528
                   void (*spu_elf_relink) (void))
1529
{
1530
  struct elf_segment_map *m;
1531
  unsigned int i;
1532
  struct spu_link_hash_table *htab = spu_hash_table (info);
1533
  bfd *abfd = info->output_bfd;
1534
 
1535
  if (auto_overlay & AUTO_OVERLAY)
1536
    htab->auto_overlay = auto_overlay;
1537
  htab->local_store = hi + 1 - lo;
1538
  htab->overlay_fixed = overlay_fixed;
1539
  htab->reserved = reserved;
1540
  htab->spu_elf_load_ovl_mgr = spu_elf_load_ovl_mgr;
1541
  htab->spu_elf_open_overlay_script = spu_elf_open_overlay_script;
1542
  htab->spu_elf_relink = spu_elf_relink;
1543
 
1544
  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
1545
    if (m->p_type == PT_LOAD)
1546
      for (i = 0; i < m->count; i++)
1547
        if (m->sections[i]->size != 0
1548
            && (m->sections[i]->vma < lo
1549
                || m->sections[i]->vma > hi
1550
                || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
1551
          return m->sections[i];
1552
 
1553
  /* No need for overlays if it all fits.  */
1554
  htab->auto_overlay = 0;
1555
  return NULL;
1556
}
1557
 
1558
/* OFFSET in SEC (presumably) is the beginning of a function prologue.
1559
   Search for stack adjusting insns, and return the sp delta.  */
1560
 
1561
static int
1562
find_function_stack_adjust (asection *sec, bfd_vma offset)
1563
{
1564
  int unrecog;
1565
  int reg[128];
1566
 
1567
  memset (reg, 0, sizeof (reg));
1568
  for (unrecog = 0; offset + 4 <= sec->size && unrecog < 32; offset += 4)
1569
    {
1570
      unsigned char buf[4];
1571
      int rt, ra;
1572
      int imm;
1573
 
1574
      /* Assume no relocs on stack adjusing insns.  */
1575
      if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
1576
        break;
1577
 
1578
      if (buf[0] == 0x24 /* stqd */)
1579
        continue;
1580
 
1581
      rt = buf[3] & 0x7f;
1582
      ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
1583
      /* Partly decoded immediate field.  */
1584
      imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
1585
 
1586
      if (buf[0] == 0x1c /* ai */)
1587
        {
1588
          imm >>= 7;
1589
          imm = (imm ^ 0x200) - 0x200;
1590
          reg[rt] = reg[ra] + imm;
1591
 
1592
          if (rt == 1 /* sp */)
1593
            {
1594
              if (imm > 0)
1595
                break;
1596
              return reg[rt];
1597
            }
1598
        }
1599
      else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
1600
        {
1601
          int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
1602
 
1603
          reg[rt] = reg[ra] + reg[rb];
1604
          if (rt == 1)
1605
            return reg[rt];
1606
        }
1607
      else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
1608
        {
1609
          if (buf[0] >= 0x42 /* ila */)
1610
            imm |= (buf[0] & 1) << 17;
1611
          else
1612
            {
1613
              imm &= 0xffff;
1614
 
1615
              if (buf[0] == 0x40 /* il */)
1616
                {
1617
                  if ((buf[1] & 0x80) == 0)
1618
                    goto unknown_insn;
1619
                  imm = (imm ^ 0x8000) - 0x8000;
1620
                }
1621
              else if ((buf[1] & 0x80) == 0 /* ilhu */)
1622
                imm <<= 16;
1623
            }
1624
          reg[rt] = imm;
1625
          continue;
1626
        }
1627
      else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
1628
        {
1629
          reg[rt] |= imm & 0xffff;
1630
          continue;
1631
        }
1632
      else if (buf[0] == 0x04 /* ori */)
1633
        {
1634
          imm >>= 7;
1635
          imm = (imm ^ 0x200) - 0x200;
1636
          reg[rt] = reg[ra] | imm;
1637
          continue;
1638
        }
1639
      else if ((buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
1640
               || (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */))
1641
        {
1642
          /* Used in pic reg load.  Say rt is trashed.  */
1643
          reg[rt] = 0;
1644
          continue;
1645
        }
1646
      else if (is_branch (buf) || is_indirect_branch (buf))
1647
        /* If we hit a branch then we must be out of the prologue.  */
1648
        break;
1649
    unknown_insn:
1650
      ++unrecog;
1651
    }
1652
 
1653
  return 0;
1654
}
1655
 
1656
/* qsort predicate to sort symbols by section and value.  */
1657
 
1658
static Elf_Internal_Sym *sort_syms_syms;
1659
static asection **sort_syms_psecs;
1660
 
1661
static int
1662
sort_syms (const void *a, const void *b)
1663
{
1664
  Elf_Internal_Sym *const *s1 = a;
1665
  Elf_Internal_Sym *const *s2 = b;
1666
  asection *sec1,*sec2;
1667
  bfd_signed_vma delta;
1668
 
1669
  sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
1670
  sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
1671
 
1672
  if (sec1 != sec2)
1673
    return sec1->index - sec2->index;
1674
 
1675
  delta = (*s1)->st_value - (*s2)->st_value;
1676
  if (delta != 0)
1677
    return delta < 0 ? -1 : 1;
1678
 
1679
  delta = (*s2)->st_size - (*s1)->st_size;
1680
  if (delta != 0)
1681
    return delta < 0 ? -1 : 1;
1682
 
1683
  return *s1 < *s2 ? -1 : 1;
1684
}
1685
 
1686
struct call_info
1687
{
1688
  struct function_info *fun;
1689
  struct call_info *next;
1690
  unsigned int count;
1691
  unsigned int max_depth;
1692
  unsigned int is_tail : 1;
1693
  unsigned int is_pasted : 1;
1694
};
1695
 
1696
struct function_info
1697
{
1698
  /* List of functions called.  Also branches to hot/cold part of
1699
     function.  */
1700
  struct call_info *call_list;
1701
  /* For hot/cold part of function, point to owner.  */
1702
  struct function_info *start;
1703
  /* Symbol at start of function.  */
1704
  union {
1705
    Elf_Internal_Sym *sym;
1706
    struct elf_link_hash_entry *h;
1707
  } u;
1708
  /* Function section.  */
1709
  asection *sec;
1710
  asection *rodata;
1711
  /* Where last called from, and number of sections called from.  */
1712
  asection *last_caller;
1713
  unsigned int call_count;
1714
  /* Address range of (this part of) function.  */
1715
  bfd_vma lo, hi;
1716
  /* Stack usage.  */
1717
  int stack;
1718
  /* Distance from root of call tree.  Tail and hot/cold branches
1719
     count as one deeper.  We aren't counting stack frames here.  */
1720
  unsigned int depth;
1721
  /* Set if global symbol.  */
1722
  unsigned int global : 1;
1723
  /* Set if known to be start of function (as distinct from a hunk
1724
     in hot/cold section.  */
1725
  unsigned int is_func : 1;
1726
  /* Set if not a root node.  */
1727
  unsigned int non_root : 1;
1728
  /* Flags used during call tree traversal.  It's cheaper to replicate
1729
     the visit flags than have one which needs clearing after a traversal.  */
1730
  unsigned int visit1 : 1;
1731
  unsigned int visit2 : 1;
1732
  unsigned int marking : 1;
1733
  unsigned int visit3 : 1;
1734
  unsigned int visit4 : 1;
1735
  unsigned int visit5 : 1;
1736
  unsigned int visit6 : 1;
1737
  unsigned int visit7 : 1;
1738
};
1739
 
1740
struct spu_elf_stack_info
1741
{
1742
  int num_fun;
1743
  int max_fun;
1744
  /* Variable size array describing functions, one per contiguous
1745
     address range belonging to a function.  */
1746
  struct function_info fun[1];
1747
};
1748
 
1749
/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
1750
   entries for section SEC.  */
1751
 
1752
static struct spu_elf_stack_info *
1753
alloc_stack_info (asection *sec, int max_fun)
1754
{
1755
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1756
  bfd_size_type amt;
1757
 
1758
  amt = sizeof (struct spu_elf_stack_info);
1759
  amt += (max_fun - 1) * sizeof (struct function_info);
1760
  sec_data->u.i.stack_info = bfd_zmalloc (amt);
1761
  if (sec_data->u.i.stack_info != NULL)
1762
    sec_data->u.i.stack_info->max_fun = max_fun;
1763
  return sec_data->u.i.stack_info;
1764
}
1765
 
1766
/* Add a new struct function_info describing a (part of a) function
1767
   starting at SYM_H.  Keep the array sorted by address.  */
1768
 
1769
static struct function_info *
1770
maybe_insert_function (asection *sec,
1771
                       void *sym_h,
1772
                       bfd_boolean global,
1773
                       bfd_boolean is_func)
1774
{
1775
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1776
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1777
  int i;
1778
  bfd_vma off, size;
1779
 
1780
  if (sinfo == NULL)
1781
    {
1782
      sinfo = alloc_stack_info (sec, 20);
1783
      if (sinfo == NULL)
1784
        return NULL;
1785
    }
1786
 
1787
  if (!global)
1788
    {
1789
      Elf_Internal_Sym *sym = sym_h;
1790
      off = sym->st_value;
1791
      size = sym->st_size;
1792
    }
1793
  else
1794
    {
1795
      struct elf_link_hash_entry *h = sym_h;
1796
      off = h->root.u.def.value;
1797
      size = h->size;
1798
    }
1799
 
1800
  for (i = sinfo->num_fun; --i >= 0; )
1801
    if (sinfo->fun[i].lo <= off)
1802
      break;
1803
 
1804
  if (i >= 0)
1805
    {
1806
      /* Don't add another entry for an alias, but do update some
1807
         info.  */
1808
      if (sinfo->fun[i].lo == off)
1809
        {
1810
          /* Prefer globals over local syms.  */
1811
          if (global && !sinfo->fun[i].global)
1812
            {
1813
              sinfo->fun[i].global = TRUE;
1814
              sinfo->fun[i].u.h = sym_h;
1815
            }
1816
          if (is_func)
1817
            sinfo->fun[i].is_func = TRUE;
1818
          return &sinfo->fun[i];
1819
        }
1820
      /* Ignore a zero-size symbol inside an existing function.  */
1821
      else if (sinfo->fun[i].hi > off && size == 0)
1822
        return &sinfo->fun[i];
1823
    }
1824
 
1825
  if (++i < sinfo->num_fun)
1826
    memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
1827
             (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
1828
  else if (i >= sinfo->max_fun)
1829
    {
1830
      bfd_size_type amt = sizeof (struct spu_elf_stack_info);
1831
      bfd_size_type old = amt;
1832
 
1833
      old += (sinfo->max_fun - 1) * sizeof (struct function_info);
1834
      sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
1835
      amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
1836
      sinfo = bfd_realloc (sinfo, amt);
1837
      if (sinfo == NULL)
1838
        return NULL;
1839
      memset ((char *) sinfo + old, 0, amt - old);
1840
      sec_data->u.i.stack_info = sinfo;
1841
    }
1842
  sinfo->fun[i].is_func = is_func;
1843
  sinfo->fun[i].global = global;
1844
  sinfo->fun[i].sec = sec;
1845
  if (global)
1846
    sinfo->fun[i].u.h = sym_h;
1847
  else
1848
    sinfo->fun[i].u.sym = sym_h;
1849
  sinfo->fun[i].lo = off;
1850
  sinfo->fun[i].hi = off + size;
1851
  sinfo->fun[i].stack = -find_function_stack_adjust (sec, off);
1852
  sinfo->num_fun += 1;
1853
  return &sinfo->fun[i];
1854
}
1855
 
1856
/* Return the name of FUN.  */
1857
 
1858
static const char *
1859
func_name (struct function_info *fun)
1860
{
1861
  asection *sec;
1862
  bfd *ibfd;
1863
  Elf_Internal_Shdr *symtab_hdr;
1864
 
1865
  while (fun->start != NULL)
1866
    fun = fun->start;
1867
 
1868
  if (fun->global)
1869
    return fun->u.h->root.root.string;
1870
 
1871
  sec = fun->sec;
1872
  if (fun->u.sym->st_name == 0)
1873
    {
1874
      size_t len = strlen (sec->name);
1875
      char *name = bfd_malloc (len + 10);
1876
      if (name == NULL)
1877
        return "(null)";
1878
      sprintf (name, "%s+%lx", sec->name,
1879
               (unsigned long) fun->u.sym->st_value & 0xffffffff);
1880
      return name;
1881
    }
1882
  ibfd = sec->owner;
1883
  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1884
  return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
1885
}
1886
 
1887
/* Read the instruction at OFF in SEC.  Return true iff the instruction
1888
   is a nop, lnop, or stop 0 (all zero insn).  */
1889
 
1890
static bfd_boolean
1891
is_nop (asection *sec, bfd_vma off)
1892
{
1893
  unsigned char insn[4];
1894
 
1895
  if (off + 4 > sec->size
1896
      || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
1897
    return FALSE;
1898
  if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
1899
    return TRUE;
1900
  if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
1901
    return TRUE;
1902
  return FALSE;
1903
}
1904
 
1905
/* Extend the range of FUN to cover nop padding up to LIMIT.
1906
   Return TRUE iff some instruction other than a NOP was found.  */
1907
 
1908
static bfd_boolean
1909
insns_at_end (struct function_info *fun, bfd_vma limit)
1910
{
1911
  bfd_vma off = (fun->hi + 3) & -4;
1912
 
1913
  while (off < limit && is_nop (fun->sec, off))
1914
    off += 4;
1915
  if (off < limit)
1916
    {
1917
      fun->hi = off;
1918
      return TRUE;
1919
    }
1920
  fun->hi = limit;
1921
  return FALSE;
1922
}
1923
 
1924
/* Check and fix overlapping function ranges.  Return TRUE iff there
1925
   are gaps in the current info we have about functions in SEC.  */
1926
 
1927
static bfd_boolean
1928
check_function_ranges (asection *sec, struct bfd_link_info *info)
1929
{
1930
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1931
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1932
  int i;
1933
  bfd_boolean gaps = FALSE;
1934
 
1935
  if (sinfo == NULL)
1936
    return FALSE;
1937
 
1938
  for (i = 1; i < sinfo->num_fun; i++)
1939
    if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
1940
      {
1941
        /* Fix overlapping symbols.  */
1942
        const char *f1 = func_name (&sinfo->fun[i - 1]);
1943
        const char *f2 = func_name (&sinfo->fun[i]);
1944
 
1945
        info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
1946
        sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
1947
      }
1948
    else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
1949
      gaps = TRUE;
1950
 
1951
  if (sinfo->num_fun == 0)
1952
    gaps = TRUE;
1953
  else
1954
    {
1955
      if (sinfo->fun[0].lo != 0)
1956
        gaps = TRUE;
1957
      if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
1958
        {
1959
          const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
1960
 
1961
          info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
1962
          sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
1963
        }
1964
      else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
1965
        gaps = TRUE;
1966
    }
1967
  return gaps;
1968
}
1969
 
1970
/* Search current function info for a function that contains address
1971
   OFFSET in section SEC.  */
1972
 
1973
static struct function_info *
1974
find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
1975
{
1976
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
1977
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
1978
  int lo, hi, mid;
1979
 
1980
  lo = 0;
1981
  hi = sinfo->num_fun;
1982
  while (lo < hi)
1983
    {
1984
      mid = (lo + hi) / 2;
1985
      if (offset < sinfo->fun[mid].lo)
1986
        hi = mid;
1987
      else if (offset >= sinfo->fun[mid].hi)
1988
        lo = mid + 1;
1989
      else
1990
        return &sinfo->fun[mid];
1991
    }
1992
  info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
1993
                          sec, offset);
1994
  return NULL;
1995
}
1996
 
1997
/* Add CALLEE to CALLER call list if not already present.  Return TRUE
1998
   if CALLEE was new.  If this function return FALSE, CALLEE should
1999
   be freed.  */
2000
 
2001
static bfd_boolean
2002
insert_callee (struct function_info *caller, struct call_info *callee)
2003
{
2004
  struct call_info **pp, *p;
2005
 
2006
  for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2007
    if (p->fun == callee->fun)
2008
      {
2009
        /* Tail calls use less stack than normal calls.  Retain entry
2010
           for normal call over one for tail call.  */
2011
        p->is_tail &= callee->is_tail;
2012
        if (!p->is_tail)
2013
          {
2014
            p->fun->start = NULL;
2015
            p->fun->is_func = TRUE;
2016
          }
2017
        p->count += 1;
2018
        /* Reorder list so most recent call is first.  */
2019
        *pp = p->next;
2020
        p->next = caller->call_list;
2021
        caller->call_list = p;
2022
        return FALSE;
2023
      }
2024
  callee->next = caller->call_list;
2025
  callee->count += 1;
2026
  caller->call_list = callee;
2027
  return TRUE;
2028
}
2029
 
2030
/* Copy CALL and insert the copy into CALLER.  */
2031
 
2032
static bfd_boolean
2033
copy_callee (struct function_info *caller, const struct call_info *call)
2034
{
2035
  struct call_info *callee;
2036
  callee = bfd_malloc (sizeof (*callee));
2037
  if (callee == NULL)
2038
    return FALSE;
2039
  *callee = *call;
2040
  if (!insert_callee (caller, callee))
2041
    free (callee);
2042
  return TRUE;
2043
}
2044
 
2045
/* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
2046
   overlay stub sections.  */
2047
 
2048
static bfd_boolean
2049
interesting_section (asection *s, bfd *obfd)
2050
{
2051
  return (s->output_section != NULL
2052
          && s->output_section->owner == obfd
2053
          && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2054
              == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2055
          && s->size != 0);
2056
}
2057
 
2058
/* Rummage through the relocs for SEC, looking for function calls.
2059
   If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
2060
   mark destination symbols on calls as being functions.  Also
2061
   look at branches, which may be tail calls or go to hot/cold
2062
   section part of same function.  */
2063
 
2064
static bfd_boolean
2065
mark_functions_via_relocs (asection *sec,
2066
                           struct bfd_link_info *info,
2067
                           int call_tree)
2068
{
2069
  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2070
  Elf_Internal_Shdr *symtab_hdr;
2071
  Elf_Internal_Sym *syms;
2072
  void *psyms;
2073
  static bfd_boolean warned;
2074
 
2075
  if (!interesting_section (sec, info->output_bfd)
2076
      || sec->reloc_count == 0)
2077
    return TRUE;
2078
 
2079
  internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2080
                                               info->keep_memory);
2081
  if (internal_relocs == NULL)
2082
    return FALSE;
2083
 
2084
  symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2085
  psyms = &symtab_hdr->contents;
2086
  syms = *(Elf_Internal_Sym **) psyms;
2087
  irela = internal_relocs;
2088
  irelaend = irela + sec->reloc_count;
2089
  for (; irela < irelaend; irela++)
2090
    {
2091
      enum elf_spu_reloc_type r_type;
2092
      unsigned int r_indx;
2093
      asection *sym_sec;
2094
      Elf_Internal_Sym *sym;
2095
      struct elf_link_hash_entry *h;
2096
      bfd_vma val;
2097
      bfd_boolean reject, is_call;
2098
      struct function_info *caller;
2099
      struct call_info *callee;
2100
 
2101
      reject = FALSE;
2102
      r_type = ELF32_R_TYPE (irela->r_info);
2103
      if (r_type != R_SPU_REL16
2104
          && r_type != R_SPU_ADDR16)
2105
        {
2106
          reject = TRUE;
2107
          if (!(call_tree && spu_hash_table (info)->auto_overlay))
2108
            continue;
2109
        }
2110
 
2111
      r_indx = ELF32_R_SYM (irela->r_info);
2112
      if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2113
        return FALSE;
2114
 
2115
      if (sym_sec == NULL
2116
          || sym_sec->output_section == NULL
2117
          || sym_sec->output_section->owner != info->output_bfd)
2118
        continue;
2119
 
2120
      is_call = FALSE;
2121
      if (!reject)
2122
        {
2123
          unsigned char insn[4];
2124
 
2125
          if (!bfd_get_section_contents (sec->owner, sec, insn,
2126
                                         irela->r_offset, 4))
2127
            return FALSE;
2128
          if (is_branch (insn))
2129
            {
2130
              is_call = (insn[0] & 0xfd) == 0x31;
2131
              if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2132
                  != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2133
                {
2134
                  if (!warned)
2135
                    info->callbacks->einfo
2136
                      (_("%B(%A+0x%v): call to non-code section"
2137
                         " %B(%A), analysis incomplete\n"),
2138
                       sec->owner, sec, irela->r_offset,
2139
                       sym_sec->owner, sym_sec);
2140
                  warned = TRUE;
2141
                  continue;
2142
                }
2143
            }
2144
          else
2145
            {
2146
              reject = TRUE;
2147
              if (!(call_tree && spu_hash_table (info)->auto_overlay)
2148
                  || is_hint (insn))
2149
                continue;
2150
            }
2151
        }
2152
 
2153
      if (reject)
2154
        {
2155
          /* For --auto-overlay, count possible stubs we need for
2156
             function pointer references.  */
2157
          unsigned int sym_type;
2158
          if (h)
2159
            sym_type = h->type;
2160
          else
2161
            sym_type = ELF_ST_TYPE (sym->st_info);
2162
          if (sym_type == STT_FUNC)
2163
            spu_hash_table (info)->non_ovly_stub += 1;
2164
          continue;
2165
        }
2166
 
2167
      if (h)
2168
        val = h->root.u.def.value;
2169
      else
2170
        val = sym->st_value;
2171
      val += irela->r_addend;
2172
 
2173
      if (!call_tree)
2174
        {
2175
          struct function_info *fun;
2176
 
2177
          if (irela->r_addend != 0)
2178
            {
2179
              Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2180
              if (fake == NULL)
2181
                return FALSE;
2182
              fake->st_value = val;
2183
              fake->st_shndx
2184
                = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2185
              sym = fake;
2186
            }
2187
          if (sym)
2188
            fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2189
          else
2190
            fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2191
          if (fun == NULL)
2192
            return FALSE;
2193
          if (irela->r_addend != 0
2194
              && fun->u.sym != sym)
2195
            free (sym);
2196
          continue;
2197
        }
2198
 
2199
      caller = find_function (sec, irela->r_offset, info);
2200
      if (caller == NULL)
2201
        return FALSE;
2202
      callee = bfd_malloc (sizeof *callee);
2203
      if (callee == NULL)
2204
        return FALSE;
2205
 
2206
      callee->fun = find_function (sym_sec, val, info);
2207
      if (callee->fun == NULL)
2208
        return FALSE;
2209
      callee->is_tail = !is_call;
2210
      callee->is_pasted = FALSE;
2211
      callee->count = 0;
2212
      if (callee->fun->last_caller != sec)
2213
        {
2214
          callee->fun->last_caller = sec;
2215
          callee->fun->call_count += 1;
2216
        }
2217
      if (!insert_callee (caller, callee))
2218
        free (callee);
2219
      else if (!is_call
2220
               && !callee->fun->is_func
2221
               && callee->fun->stack == 0)
2222
        {
2223
          /* This is either a tail call or a branch from one part of
2224
             the function to another, ie. hot/cold section.  If the
2225
             destination has been called by some other function then
2226
             it is a separate function.  We also assume that functions
2227
             are not split across input files.  */
2228
          if (sec->owner != sym_sec->owner)
2229
            {
2230
              callee->fun->start = NULL;
2231
              callee->fun->is_func = TRUE;
2232
            }
2233
          else if (callee->fun->start == NULL)
2234
            callee->fun->start = caller;
2235
          else
2236
            {
2237
              struct function_info *callee_start;
2238
              struct function_info *caller_start;
2239
              callee_start = callee->fun;
2240
              while (callee_start->start)
2241
                callee_start = callee_start->start;
2242
              caller_start = caller;
2243
              while (caller_start->start)
2244
                caller_start = caller_start->start;
2245
              if (caller_start != callee_start)
2246
                {
2247
                  callee->fun->start = NULL;
2248
                  callee->fun->is_func = TRUE;
2249
                }
2250
            }
2251
        }
2252
    }
2253
 
2254
  return TRUE;
2255
}
2256
 
2257
/* Handle something like .init or .fini, which has a piece of a function.
2258
   These sections are pasted together to form a single function.  */
2259
 
2260
static bfd_boolean
2261
pasted_function (asection *sec, struct bfd_link_info *info)
2262
{
2263
  struct bfd_link_order *l;
2264
  struct _spu_elf_section_data *sec_data;
2265
  struct spu_elf_stack_info *sinfo;
2266
  Elf_Internal_Sym *fake;
2267
  struct function_info *fun, *fun_start;
2268
 
2269
  fake = bfd_zmalloc (sizeof (*fake));
2270
  if (fake == NULL)
2271
    return FALSE;
2272
  fake->st_value = 0;
2273
  fake->st_size = sec->size;
2274
  fake->st_shndx
2275
    = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2276
  fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2277
  if (!fun)
2278
    return FALSE;
2279
 
2280
  /* Find a function immediately preceding this section.  */
2281
  fun_start = NULL;
2282
  for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2283
    {
2284
      if (l->u.indirect.section == sec)
2285
        {
2286
          if (fun_start != NULL)
2287
            {
2288
              struct call_info *callee = bfd_malloc (sizeof *callee);
2289
              if (callee == NULL)
2290
                return FALSE;
2291
 
2292
              fun->start = fun_start;
2293
              callee->fun = fun;
2294
              callee->is_tail = TRUE;
2295
              callee->is_pasted = TRUE;
2296
              callee->count = 0;
2297
              if (!insert_callee (fun_start, callee))
2298
                free (callee);
2299
              return TRUE;
2300
            }
2301
          break;
2302
        }
2303
      if (l->type == bfd_indirect_link_order
2304
          && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2305
          && (sinfo = sec_data->u.i.stack_info) != NULL
2306
          && sinfo->num_fun != 0)
2307
        fun_start = &sinfo->fun[sinfo->num_fun - 1];
2308
    }
2309
 
2310
  info->callbacks->einfo (_("%A link_order not found\n"), sec);
2311
  return FALSE;
2312
}
2313
 
2314
/* Map address ranges in code sections to functions.  */
2315
 
2316
static bfd_boolean
2317
discover_functions (struct bfd_link_info *info)
2318
{
2319
  bfd *ibfd;
2320
  int bfd_idx;
2321
  Elf_Internal_Sym ***psym_arr;
2322
  asection ***sec_arr;
2323
  bfd_boolean gaps = FALSE;
2324
 
2325
  bfd_idx = 0;
2326
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2327
    bfd_idx++;
2328
 
2329
  psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2330
  if (psym_arr == NULL)
2331
    return FALSE;
2332
  sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2333
  if (sec_arr == NULL)
2334
    return FALSE;
2335
 
2336
 
2337
  for (ibfd = info->input_bfds, bfd_idx = 0;
2338
       ibfd != NULL;
2339
       ibfd = ibfd->link_next, bfd_idx++)
2340
    {
2341
      extern const bfd_target bfd_elf32_spu_vec;
2342
      Elf_Internal_Shdr *symtab_hdr;
2343
      asection *sec;
2344
      size_t symcount;
2345
      Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2346
      asection **psecs, **p;
2347
 
2348
      if (ibfd->xvec != &bfd_elf32_spu_vec)
2349
        continue;
2350
 
2351
      /* Read all the symbols.  */
2352
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2353
      symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2354
      if (symcount == 0)
2355
        {
2356
          if (!gaps)
2357
            for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2358
              if (interesting_section (sec, info->output_bfd))
2359
                {
2360
                  gaps = TRUE;
2361
                  break;
2362
                }
2363
          continue;
2364
        }
2365
 
2366
      syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2367
      if (syms == NULL)
2368
        {
2369
          syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2370
                                       NULL, NULL, NULL);
2371
          symtab_hdr->contents = (void *) syms;
2372
          if (syms == NULL)
2373
            return FALSE;
2374
        }
2375
 
2376
      /* Select defined function symbols that are going to be output.  */
2377
      psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2378
      if (psyms == NULL)
2379
        return FALSE;
2380
      psym_arr[bfd_idx] = psyms;
2381
      psecs = bfd_malloc (symcount * sizeof (*psecs));
2382
      if (psecs == NULL)
2383
        return FALSE;
2384
      sec_arr[bfd_idx] = psecs;
2385
      for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
2386
        if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
2387
            || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2388
          {
2389
            asection *s;
2390
 
2391
            *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
2392
            if (s != NULL && interesting_section (s, info->output_bfd))
2393
              *psy++ = sy;
2394
          }
2395
      symcount = psy - psyms;
2396
      *psy = NULL;
2397
 
2398
      /* Sort them by section and offset within section.  */
2399
      sort_syms_syms = syms;
2400
      sort_syms_psecs = psecs;
2401
      qsort (psyms, symcount, sizeof (*psyms), sort_syms);
2402
 
2403
      /* Now inspect the function symbols.  */
2404
      for (psy = psyms; psy < psyms + symcount; )
2405
        {
2406
          asection *s = psecs[*psy - syms];
2407
          Elf_Internal_Sym **psy2;
2408
 
2409
          for (psy2 = psy; ++psy2 < psyms + symcount; )
2410
            if (psecs[*psy2 - syms] != s)
2411
              break;
2412
 
2413
          if (!alloc_stack_info (s, psy2 - psy))
2414
            return FALSE;
2415
          psy = psy2;
2416
        }
2417
 
2418
      /* First install info about properly typed and sized functions.
2419
         In an ideal world this will cover all code sections, except
2420
         when partitioning functions into hot and cold sections,
2421
         and the horrible pasted together .init and .fini functions.  */
2422
      for (psy = psyms; psy < psyms + symcount; ++psy)
2423
        {
2424
          sy = *psy;
2425
          if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
2426
            {
2427
              asection *s = psecs[sy - syms];
2428
              if (!maybe_insert_function (s, sy, FALSE, TRUE))
2429
                return FALSE;
2430
            }
2431
        }
2432
 
2433
      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2434
        if (interesting_section (sec, info->output_bfd))
2435
          gaps |= check_function_ranges (sec, info);
2436
    }
2437
 
2438
  if (gaps)
2439
    {
2440
      /* See if we can discover more function symbols by looking at
2441
         relocations.  */
2442
      for (ibfd = info->input_bfds, bfd_idx = 0;
2443
           ibfd != NULL;
2444
           ibfd = ibfd->link_next, bfd_idx++)
2445
        {
2446
          asection *sec;
2447
 
2448
          if (psym_arr[bfd_idx] == NULL)
2449
            continue;
2450
 
2451
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2452
            if (!mark_functions_via_relocs (sec, info, FALSE))
2453
              return FALSE;
2454
        }
2455
 
2456
      for (ibfd = info->input_bfds, bfd_idx = 0;
2457
           ibfd != NULL;
2458
           ibfd = ibfd->link_next, bfd_idx++)
2459
        {
2460
          Elf_Internal_Shdr *symtab_hdr;
2461
          asection *sec;
2462
          Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2463
          asection **psecs;
2464
 
2465
          if ((psyms = psym_arr[bfd_idx]) == NULL)
2466
            continue;
2467
 
2468
          psecs = sec_arr[bfd_idx];
2469
 
2470
          symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2471
          syms = (Elf_Internal_Sym *) symtab_hdr->contents;
2472
 
2473
          gaps = FALSE;
2474
          for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2475
            if (interesting_section (sec, info->output_bfd))
2476
              gaps |= check_function_ranges (sec, info);
2477
          if (!gaps)
2478
            continue;
2479
 
2480
          /* Finally, install all globals.  */
2481
          for (psy = psyms; (sy = *psy) != NULL; ++psy)
2482
            {
2483
              asection *s;
2484
 
2485
              s = psecs[sy - syms];
2486
 
2487
              /* Global syms might be improperly typed functions.  */
2488
              if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
2489
                  && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
2490
                {
2491
                  if (!maybe_insert_function (s, sy, FALSE, FALSE))
2492
                    return FALSE;
2493
                }
2494
            }
2495
        }
2496
 
2497
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2498
        {
2499
          extern const bfd_target bfd_elf32_spu_vec;
2500
          asection *sec;
2501
 
2502
          if (ibfd->xvec != &bfd_elf32_spu_vec)
2503
            continue;
2504
 
2505
          /* Some of the symbols we've installed as marking the
2506
             beginning of functions may have a size of zero.  Extend
2507
             the range of such functions to the beginning of the
2508
             next symbol of interest.  */
2509
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2510
            if (interesting_section (sec, info->output_bfd))
2511
              {
2512
                struct _spu_elf_section_data *sec_data;
2513
                struct spu_elf_stack_info *sinfo;
2514
 
2515
                sec_data = spu_elf_section_data (sec);
2516
                sinfo = sec_data->u.i.stack_info;
2517
                if (sinfo != NULL)
2518
                  {
2519
                    int fun_idx;
2520
                    bfd_vma hi = sec->size;
2521
 
2522
                    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
2523
                      {
2524
                        sinfo->fun[fun_idx].hi = hi;
2525
                        hi = sinfo->fun[fun_idx].lo;
2526
                      }
2527
                  }
2528
                /* No symbols in this section.  Must be .init or .fini
2529
                   or something similar.  */
2530
                else if (!pasted_function (sec, info))
2531
                  return FALSE;
2532
              }
2533
        }
2534
    }
2535
 
2536
  for (ibfd = info->input_bfds, bfd_idx = 0;
2537
       ibfd != NULL;
2538
       ibfd = ibfd->link_next, bfd_idx++)
2539
    {
2540
      if (psym_arr[bfd_idx] == NULL)
2541
        continue;
2542
 
2543
      free (psym_arr[bfd_idx]);
2544
      free (sec_arr[bfd_idx]);
2545
    }
2546
 
2547
  free (psym_arr);
2548
  free (sec_arr);
2549
 
2550
  return TRUE;
2551
}
2552
 
2553
/* Iterate over all function_info we have collected, calling DOIT on
2554
   each node if ROOT_ONLY is false.  Only call DOIT on root nodes
2555
   if ROOT_ONLY.  */
2556
 
2557
static bfd_boolean
2558
for_each_node (bfd_boolean (*doit) (struct function_info *,
2559
                                    struct bfd_link_info *,
2560
                                    void *),
2561
               struct bfd_link_info *info,
2562
               void *param,
2563
               int root_only)
2564
{
2565
  bfd *ibfd;
2566
 
2567
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2568
    {
2569
      extern const bfd_target bfd_elf32_spu_vec;
2570
      asection *sec;
2571
 
2572
      if (ibfd->xvec != &bfd_elf32_spu_vec)
2573
        continue;
2574
 
2575
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2576
        {
2577
          struct _spu_elf_section_data *sec_data;
2578
          struct spu_elf_stack_info *sinfo;
2579
 
2580
          if ((sec_data = spu_elf_section_data (sec)) != NULL
2581
              && (sinfo = sec_data->u.i.stack_info) != NULL)
2582
            {
2583
              int i;
2584
              for (i = 0; i < sinfo->num_fun; ++i)
2585
                if (!root_only || !sinfo->fun[i].non_root)
2586
                  if (!doit (&sinfo->fun[i], info, param))
2587
                    return FALSE;
2588
            }
2589
        }
2590
    }
2591
  return TRUE;
2592
}
2593
 
2594
/* Transfer call info attached to struct function_info entries for
2595
   all of a given function's sections to the first entry.  */
2596
 
2597
static bfd_boolean
2598
transfer_calls (struct function_info *fun,
2599
                struct bfd_link_info *info ATTRIBUTE_UNUSED,
2600
                void *param ATTRIBUTE_UNUSED)
2601
{
2602
  struct function_info *start = fun->start;
2603
 
2604
  if (start != NULL)
2605
    {
2606
      struct call_info *call, *call_next;
2607
 
2608
      while (start->start != NULL)
2609
        start = start->start;
2610
      for (call = fun->call_list; call != NULL; call = call_next)
2611
        {
2612
          call_next = call->next;
2613
          if (!insert_callee (start, call))
2614
            free (call);
2615
        }
2616
      fun->call_list = NULL;
2617
    }
2618
  return TRUE;
2619
}
2620
 
2621
/* Mark nodes in the call graph that are called by some other node.  */
2622
 
2623
static bfd_boolean
2624
mark_non_root (struct function_info *fun,
2625
               struct bfd_link_info *info ATTRIBUTE_UNUSED,
2626
               void *param ATTRIBUTE_UNUSED)
2627
{
2628
  struct call_info *call;
2629
 
2630
  if (fun->visit1)
2631
    return TRUE;
2632
  fun->visit1 = TRUE;
2633
  for (call = fun->call_list; call; call = call->next)
2634
    {
2635
      call->fun->non_root = TRUE;
2636
      mark_non_root (call->fun, 0, 0);
2637
    }
2638
  return TRUE;
2639
}
2640
 
2641
/* Remove cycles from the call graph.  Set depth of nodes.  */
2642
 
2643
static bfd_boolean
2644
remove_cycles (struct function_info *fun,
2645
               struct bfd_link_info *info,
2646
               void *param)
2647
{
2648
  struct call_info **callp, *call;
2649
  unsigned int depth = *(unsigned int *) param;
2650
  unsigned int max_depth = depth;
2651
 
2652
  fun->depth = depth;
2653
  fun->visit2 = TRUE;
2654
  fun->marking = TRUE;
2655
 
2656
  callp = &fun->call_list;
2657
  while ((call = *callp) != NULL)
2658
    {
2659
      if (!call->fun->visit2)
2660
        {
2661
          call->max_depth = depth + !call->is_pasted;
2662
          if (!remove_cycles (call->fun, info, &call->max_depth))
2663
            return FALSE;
2664
          if (max_depth < call->max_depth)
2665
            max_depth = call->max_depth;
2666
        }
2667
      else if (call->fun->marking)
2668
        {
2669
          if (!spu_hash_table (info)->auto_overlay)
2670
            {
2671
              const char *f1 = func_name (fun);
2672
              const char *f2 = func_name (call->fun);
2673
 
2674
              info->callbacks->info (_("Stack analysis will ignore the call "
2675
                                       "from %s to %s\n"),
2676
                                     f1, f2);
2677
            }
2678
          *callp = call->next;
2679
          free (call);
2680
          continue;
2681
        }
2682
      callp = &call->next;
2683
    }
2684
  fun->marking = FALSE;
2685
  *(unsigned int *) param = max_depth;
2686
  return TRUE;
2687
}
2688
 
2689
/* Populate call_list for each function.  */
2690
 
2691
static bfd_boolean
2692
build_call_tree (struct bfd_link_info *info)
2693
{
2694
  bfd *ibfd;
2695
  unsigned int depth;
2696
 
2697
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2698
    {
2699
      extern const bfd_target bfd_elf32_spu_vec;
2700
      asection *sec;
2701
 
2702
      if (ibfd->xvec != &bfd_elf32_spu_vec)
2703
        continue;
2704
 
2705
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
2706
        if (!mark_functions_via_relocs (sec, info, TRUE))
2707
          return FALSE;
2708
    }
2709
 
2710
  /* Transfer call info from hot/cold section part of function
2711
     to main entry.  */
2712
  if (!spu_hash_table (info)->auto_overlay
2713
      && !for_each_node (transfer_calls, info, 0, FALSE))
2714
    return FALSE;
2715
 
2716
  /* Find the call graph root(s).  */
2717
  if (!for_each_node (mark_non_root, info, 0, FALSE))
2718
    return FALSE;
2719
 
2720
  /* Remove cycles from the call graph.  We start from the root node(s)
2721
     so that we break cycles in a reasonable place.  */
2722
  depth = 0;
2723
  return for_each_node (remove_cycles, info, &depth, TRUE);
2724
}
2725
 
2726
/* qsort predicate to sort calls by max_depth then count.  */
2727
 
2728
static int
2729
sort_calls (const void *a, const void *b)
2730
{
2731
  struct call_info *const *c1 = a;
2732
  struct call_info *const *c2 = b;
2733
  int delta;
2734
 
2735
  delta = (*c2)->max_depth - (*c1)->max_depth;
2736
  if (delta != 0)
2737
    return delta;
2738
 
2739
  delta = (*c2)->count - (*c1)->count;
2740
  if (delta != 0)
2741
    return delta;
2742
 
2743
  return c1 - c2;
2744
}
2745
 
2746
struct _mos_param {
2747
  unsigned int max_overlay_size;
2748
};
2749
 
2750
/* Set linker_mark and gc_mark on any sections that we will put in
2751
   overlays.  These flags are used by the generic ELF linker, but we
2752
   won't be continuing on to bfd_elf_final_link so it is OK to use
2753
   them.  linker_mark is clear before we get here.  Set segment_mark
2754
   on sections that are part of a pasted function (excluding the last
2755
   section).
2756
 
2757
   Set up function rodata section if --overlay-rodata.  We don't
2758
   currently include merged string constant rodata sections since
2759
 
2760
   Sort the call graph so that the deepest nodes will be visited
2761
   first.  */
2762
 
2763
static bfd_boolean
2764
mark_overlay_section (struct function_info *fun,
2765
                      struct bfd_link_info *info,
2766
                      void *param)
2767
{
2768
  struct call_info *call;
2769
  unsigned int count;
2770
  struct _mos_param *mos_param = param;
2771
 
2772
  if (fun->visit4)
2773
    return TRUE;
2774
 
2775
  fun->visit4 = TRUE;
2776
  if (!fun->sec->linker_mark)
2777
    {
2778
      fun->sec->linker_mark = 1;
2779
      fun->sec->gc_mark = 1;
2780
      fun->sec->segment_mark = 0;
2781
      /* Ensure SEC_CODE is set on this text section (it ought to
2782
         be!), and SEC_CODE is clear on rodata sections.  We use
2783
         this flag to differentiate the two overlay section types.  */
2784
      fun->sec->flags |= SEC_CODE;
2785
      if (spu_hash_table (info)->auto_overlay & OVERLAY_RODATA)
2786
        {
2787
          char *name = NULL;
2788
          unsigned int size;
2789
 
2790
          /* Find the rodata section corresponding to this function's
2791
             text section.  */
2792
          if (strcmp (fun->sec->name, ".text") == 0)
2793
            {
2794
              name = bfd_malloc (sizeof (".rodata"));
2795
              if (name == NULL)
2796
                return FALSE;
2797
              memcpy (name, ".rodata", sizeof (".rodata"));
2798
            }
2799
          else if (strncmp (fun->sec->name, ".text.", 6) == 0)
2800
            {
2801
              size_t len = strlen (fun->sec->name);
2802
              name = bfd_malloc (len + 3);
2803
              if (name == NULL)
2804
                return FALSE;
2805
              memcpy (name, ".rodata", sizeof (".rodata"));
2806
              memcpy (name + 7, fun->sec->name + 5, len - 4);
2807
            }
2808
          else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
2809
            {
2810
              size_t len = strlen (fun->sec->name) + 1;
2811
              name = bfd_malloc (len);
2812
              if (name == NULL)
2813
                return FALSE;
2814
              memcpy (name, fun->sec->name, len);
2815
              name[14] = 'r';
2816
            }
2817
 
2818
          if (name != NULL)
2819
            {
2820
              asection *rodata = NULL;
2821
              asection *group_sec = elf_section_data (fun->sec)->next_in_group;
2822
              if (group_sec == NULL)
2823
                rodata = bfd_get_section_by_name (fun->sec->owner, name);
2824
              else
2825
                while (group_sec != NULL && group_sec != fun->sec)
2826
                  {
2827
                    if (strcmp (group_sec->name, name) == 0)
2828
                      {
2829
                        rodata = group_sec;
2830
                        break;
2831
                      }
2832
                    group_sec = elf_section_data (group_sec)->next_in_group;
2833
                  }
2834
              fun->rodata = rodata;
2835
              if (fun->rodata)
2836
                {
2837
                  fun->rodata->linker_mark = 1;
2838
                  fun->rodata->gc_mark = 1;
2839
                  fun->rodata->flags &= ~SEC_CODE;
2840
                }
2841
              free (name);
2842
            }
2843
          size = fun->sec->size;
2844
          if (fun->rodata)
2845
            size += fun->rodata->size;
2846
          if (mos_param->max_overlay_size < size)
2847
            mos_param->max_overlay_size = size;
2848
        }
2849
    }
2850
 
2851
  for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2852
    count += 1;
2853
 
2854
  if (count > 1)
2855
    {
2856
      struct call_info **calls = bfd_malloc (count * sizeof (*calls));
2857
      if (calls == NULL)
2858
        return FALSE;
2859
 
2860
      for (count = 0, call = fun->call_list; call != NULL; call = call->next)
2861
        calls[count++] = call;
2862
 
2863
      qsort (calls, count, sizeof (*calls), sort_calls);
2864
 
2865
      fun->call_list = NULL;
2866
      while (count != 0)
2867
        {
2868
          --count;
2869
          calls[count]->next = fun->call_list;
2870
          fun->call_list = calls[count];
2871
        }
2872
      free (calls);
2873
    }
2874
 
2875
  for (call = fun->call_list; call != NULL; call = call->next)
2876
    {
2877
      if (call->is_pasted)
2878
        {
2879
          /* There can only be one is_pasted call per function_info.  */
2880
          BFD_ASSERT (!fun->sec->segment_mark);
2881
          fun->sec->segment_mark = 1;
2882
        }
2883
      if (!mark_overlay_section (call->fun, info, param))
2884
        return FALSE;
2885
    }
2886
 
2887
  /* Don't put entry code into an overlay.  The overlay manager needs
2888
     a stack!  */
2889
  if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
2890
      == info->output_bfd->start_address)
2891
    {
2892
      fun->sec->linker_mark = 0;
2893
      if (fun->rodata != NULL)
2894
        fun->rodata->linker_mark = 0;
2895
    }
2896
  return TRUE;
2897
}
2898
 
2899
struct _uos_param {
2900
  asection *exclude_input_section;
2901
  asection *exclude_output_section;
2902
  unsigned long clearing;
2903
};
2904
 
2905
/* Undo some of mark_overlay_section's work.  */
2906
 
2907
static bfd_boolean
2908
unmark_overlay_section (struct function_info *fun,
2909
                        struct bfd_link_info *info,
2910
                        void *param)
2911
{
2912
  struct call_info *call;
2913
  struct _uos_param *uos_param = param;
2914
  unsigned int excluded = 0;
2915
 
2916
  if (fun->visit5)
2917
    return TRUE;
2918
 
2919
  fun->visit5 = TRUE;
2920
 
2921
  excluded = 0;
2922
  if (fun->sec == uos_param->exclude_input_section
2923
      || fun->sec->output_section == uos_param->exclude_output_section)
2924
    excluded = 1;
2925
 
2926
  uos_param->clearing += excluded;
2927
 
2928
  if (uos_param->clearing)
2929
    {
2930
      fun->sec->linker_mark = 0;
2931
      if (fun->rodata)
2932
        fun->rodata->linker_mark = 0;
2933
    }
2934
 
2935
  for (call = fun->call_list; call != NULL; call = call->next)
2936
    if (!unmark_overlay_section (call->fun, info, param))
2937
      return FALSE;
2938
 
2939
  uos_param->clearing -= excluded;
2940
  return TRUE;
2941
}
2942
 
2943
struct _cl_param {
2944
  unsigned int lib_size;
2945
  asection **lib_sections;
2946
};
2947
 
2948
/* Add sections we have marked as belonging to overlays to an array
2949
   for consideration as non-overlay sections.  The array consist of
2950
   pairs of sections, (text,rodata), for functions in the call graph.  */
2951
 
2952
static bfd_boolean
2953
collect_lib_sections (struct function_info *fun,
2954
                      struct bfd_link_info *info,
2955
                      void *param)
2956
{
2957
  struct _cl_param *lib_param = param;
2958
  struct call_info *call;
2959
  unsigned int size;
2960
 
2961
  if (fun->visit6)
2962
    return TRUE;
2963
 
2964
  fun->visit6 = TRUE;
2965
  if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
2966
    return TRUE;
2967
 
2968
  size = fun->sec->size;
2969
  if (fun->rodata)
2970
    size += fun->rodata->size;
2971
  if (size > lib_param->lib_size)
2972
    return TRUE;
2973
 
2974
  *lib_param->lib_sections++ = fun->sec;
2975
  fun->sec->gc_mark = 0;
2976
  if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
2977
    {
2978
      *lib_param->lib_sections++ = fun->rodata;
2979
      fun->rodata->gc_mark = 0;
2980
    }
2981
  else
2982
    *lib_param->lib_sections++ = NULL;
2983
 
2984
  for (call = fun->call_list; call != NULL; call = call->next)
2985
    collect_lib_sections (call->fun, info, param);
2986
 
2987
  return TRUE;
2988
}
2989
 
2990
/* qsort predicate to sort sections by call count.  */
2991
 
2992
static int
2993
sort_lib (const void *a, const void *b)
2994
{
2995
  asection *const *s1 = a;
2996
  asection *const *s2 = b;
2997
  struct _spu_elf_section_data *sec_data;
2998
  struct spu_elf_stack_info *sinfo;
2999
  int delta;
3000
 
3001
  delta = 0;
3002
  if ((sec_data = spu_elf_section_data (*s1)) != NULL
3003
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3004
    {
3005
      int i;
3006
      for (i = 0; i < sinfo->num_fun; ++i)
3007
        delta -= sinfo->fun[i].call_count;
3008
    }
3009
 
3010
  if ((sec_data = spu_elf_section_data (*s2)) != NULL
3011
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3012
    {
3013
      int i;
3014
      for (i = 0; i < sinfo->num_fun; ++i)
3015
        delta += sinfo->fun[i].call_count;
3016
    }
3017
 
3018
  if (delta != 0)
3019
    return delta;
3020
 
3021
  return s1 - s2;
3022
}
3023
 
3024
/* Remove some sections from those marked to be in overlays.  Choose
3025
   those that are called from many places, likely library functions.  */
3026
 
3027
static unsigned int
3028
auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3029
{
3030
  bfd *ibfd;
3031
  asection **lib_sections;
3032
  unsigned int i, lib_count;
3033
  struct _cl_param collect_lib_param;
3034
  struct function_info dummy_caller;
3035
 
3036
  memset (&dummy_caller, 0, sizeof (dummy_caller));
3037
  lib_count = 0;
3038
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3039
    {
3040
      extern const bfd_target bfd_elf32_spu_vec;
3041
      asection *sec;
3042
 
3043
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3044
        continue;
3045
 
3046
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3047
        if (sec->linker_mark
3048
            && sec->size < lib_size
3049
            && (sec->flags & SEC_CODE) != 0)
3050
          lib_count += 1;
3051
    }
3052
  lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3053
  if (lib_sections == NULL)
3054
    return (unsigned int) -1;
3055
  collect_lib_param.lib_size = lib_size;
3056
  collect_lib_param.lib_sections = lib_sections;
3057
  if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3058
                      TRUE))
3059
    return (unsigned int) -1;
3060
  lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3061
 
3062
  /* Sort sections so that those with the most calls are first.  */
3063
  if (lib_count > 1)
3064
    qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3065
 
3066
  for (i = 0; i < lib_count; i++)
3067
    {
3068
      unsigned int tmp, stub_size;
3069
      asection *sec;
3070
      struct _spu_elf_section_data *sec_data;
3071
      struct spu_elf_stack_info *sinfo;
3072
 
3073
      sec = lib_sections[2 * i];
3074
      /* If this section is OK, its size must be less than lib_size.  */
3075
      tmp = sec->size;
3076
      /* If it has a rodata section, then add that too.  */
3077
      if (lib_sections[2 * i + 1])
3078
        tmp += lib_sections[2 * i + 1]->size;
3079
      /* Add any new overlay call stubs needed by the section.  */
3080
      stub_size = 0;
3081
      if (tmp < lib_size
3082
          && (sec_data = spu_elf_section_data (sec)) != NULL
3083
          && (sinfo = sec_data->u.i.stack_info) != NULL)
3084
        {
3085
          int k;
3086
          struct call_info *call;
3087
 
3088
          for (k = 0; k < sinfo->num_fun; ++k)
3089
            for (call = sinfo->fun[k].call_list; call; call = call->next)
3090
              if (call->fun->sec->linker_mark)
3091
                {
3092
                  struct call_info *p;
3093
                  for (p = dummy_caller.call_list; p; p = p->next)
3094
                    if (p->fun == call->fun)
3095
                      break;
3096
                  if (!p)
3097
                    stub_size += OVL_STUB_SIZE;
3098
                }
3099
        }
3100
      if (tmp + stub_size < lib_size)
3101
        {
3102
          struct call_info **pp, *p;
3103
 
3104
          /* This section fits.  Mark it as non-overlay.  */
3105
          lib_sections[2 * i]->linker_mark = 0;
3106
          if (lib_sections[2 * i + 1])
3107
            lib_sections[2 * i + 1]->linker_mark = 0;
3108
          lib_size -= tmp + stub_size;
3109
          /* Call stubs to the section we just added are no longer
3110
             needed.  */
3111
          pp = &dummy_caller.call_list;
3112
          while ((p = *pp) != NULL)
3113
            if (!p->fun->sec->linker_mark)
3114
              {
3115
                lib_size += OVL_STUB_SIZE;
3116
                *pp = p->next;
3117
                free (p);
3118
              }
3119
            else
3120
              pp = &p->next;
3121
          /* Add new call stubs to dummy_caller.  */
3122
          if ((sec_data = spu_elf_section_data (sec)) != NULL
3123
              && (sinfo = sec_data->u.i.stack_info) != NULL)
3124
            {
3125
              int k;
3126
              struct call_info *call;
3127
 
3128
              for (k = 0; k < sinfo->num_fun; ++k)
3129
                for (call = sinfo->fun[k].call_list;
3130
                     call;
3131
                     call = call->next)
3132
                  if (call->fun->sec->linker_mark)
3133
                    {
3134
                      struct call_info *callee;
3135
                      callee = bfd_malloc (sizeof (*callee));
3136
                      if (callee == NULL)
3137
                        return (unsigned int) -1;
3138
                      *callee = *call;
3139
                      if (!insert_callee (&dummy_caller, callee))
3140
                        free (callee);
3141
                    }
3142
            }
3143
        }
3144
    }
3145
  while (dummy_caller.call_list != NULL)
3146
    {
3147
      struct call_info *call = dummy_caller.call_list;
3148
      dummy_caller.call_list = call->next;
3149
      free (call);
3150
    }
3151
  for (i = 0; i < 2 * lib_count; i++)
3152
    if (lib_sections[i])
3153
      lib_sections[i]->gc_mark = 1;
3154
  free (lib_sections);
3155
  return lib_size;
3156
}
3157
 
3158
/* Build an array of overlay sections.  The deepest node's section is
3159
   added first, then its parent node's section, then everything called
3160
   from the parent section.  The idea being to group sections to
3161
   minimise calls between different overlays.  */
3162
 
3163
static bfd_boolean
3164
collect_overlays (struct function_info *fun,
3165
                  struct bfd_link_info *info,
3166
                  void *param)
3167
{
3168
  struct call_info *call;
3169
  bfd_boolean added_fun;
3170
  asection ***ovly_sections = param;
3171
 
3172
  if (fun->visit7)
3173
    return TRUE;
3174
 
3175
  fun->visit7 = TRUE;
3176
  for (call = fun->call_list; call != NULL; call = call->next)
3177
    if (!call->is_pasted)
3178
      {
3179
        if (!collect_overlays (call->fun, info, ovly_sections))
3180
          return FALSE;
3181
        break;
3182
      }
3183
 
3184
  added_fun = FALSE;
3185
  if (fun->sec->linker_mark && fun->sec->gc_mark)
3186
    {
3187
      fun->sec->gc_mark = 0;
3188
      *(*ovly_sections)++ = fun->sec;
3189
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3190
        {
3191
          fun->rodata->gc_mark = 0;
3192
          *(*ovly_sections)++ = fun->rodata;
3193
        }
3194
      else
3195
        *(*ovly_sections)++ = NULL;
3196
      added_fun = TRUE;
3197
 
3198
      /* Pasted sections must stay with the first section.  We don't
3199
         put pasted sections in the array, just the first section.
3200
         Mark subsequent sections as already considered.  */
3201
      if (fun->sec->segment_mark)
3202
        {
3203
          struct function_info *call_fun = fun;
3204
          do
3205
            {
3206
              for (call = call_fun->call_list; call != NULL; call = call->next)
3207
                if (call->is_pasted)
3208
                  {
3209
                    call_fun = call->fun;
3210
                    call_fun->sec->gc_mark = 0;
3211
                    if (call_fun->rodata)
3212
                      call_fun->rodata->gc_mark = 0;
3213
                    break;
3214
                  }
3215
              if (call == NULL)
3216
                abort ();
3217
            }
3218
          while (call_fun->sec->segment_mark);
3219
        }
3220
    }
3221
 
3222
  for (call = fun->call_list; call != NULL; call = call->next)
3223
    if (!collect_overlays (call->fun, info, ovly_sections))
3224
      return FALSE;
3225
 
3226
  if (added_fun)
3227
    {
3228
      struct _spu_elf_section_data *sec_data;
3229
      struct spu_elf_stack_info *sinfo;
3230
 
3231
      if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3232
          && (sinfo = sec_data->u.i.stack_info) != NULL)
3233
        {
3234
          int i;
3235
          for (i = 0; i < sinfo->num_fun; ++i)
3236
            if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3237
              return FALSE;
3238
        }
3239
    }
3240
 
3241
  return TRUE;
3242
}
3243
 
3244
struct _sum_stack_param {
3245
  size_t cum_stack;
3246
  size_t overall_stack;
3247
  bfd_boolean emit_stack_syms;
3248
};
3249
 
3250
/* Descend the call graph for FUN, accumulating total stack required.  */
3251
 
3252
static bfd_boolean
3253
sum_stack (struct function_info *fun,
3254
           struct bfd_link_info *info,
3255
           void *param)
3256
{
3257
  struct call_info *call;
3258
  struct function_info *max;
3259
  size_t stack, cum_stack;
3260
  const char *f1;
3261
  bfd_boolean has_call;
3262
  struct _sum_stack_param *sum_stack_param = param;
3263
  struct spu_link_hash_table *htab;
3264
 
3265
  cum_stack = fun->stack;
3266
  sum_stack_param->cum_stack = cum_stack;
3267
  if (fun->visit3)
3268
    return TRUE;
3269
 
3270
  has_call = FALSE;
3271
  max = NULL;
3272
  for (call = fun->call_list; call; call = call->next)
3273
    {
3274
      if (!call->is_pasted)
3275
        has_call = TRUE;
3276
      if (!sum_stack (call->fun, info, sum_stack_param))
3277
        return FALSE;
3278
      stack = sum_stack_param->cum_stack;
3279
      /* Include caller stack for normal calls, don't do so for
3280
         tail calls.  fun->stack here is local stack usage for
3281
         this function.  */
3282
      if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3283
        stack += fun->stack;
3284
      if (cum_stack < stack)
3285
        {
3286
          cum_stack = stack;
3287
          max = call->fun;
3288
        }
3289
    }
3290
 
3291
  sum_stack_param->cum_stack = cum_stack;
3292
  stack = fun->stack;
3293
  /* Now fun->stack holds cumulative stack.  */
3294
  fun->stack = cum_stack;
3295
  fun->visit3 = TRUE;
3296
 
3297
  if (!fun->non_root
3298
      && sum_stack_param->overall_stack < cum_stack)
3299
    sum_stack_param->overall_stack = cum_stack;
3300
 
3301
  htab = spu_hash_table (info);
3302
  if (htab->auto_overlay)
3303
    return TRUE;
3304
 
3305
  f1 = func_name (fun);
3306
  if (!fun->non_root)
3307
    info->callbacks->info (_("  %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3308
  info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3309
                          f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3310
 
3311
  if (has_call)
3312
    {
3313
      info->callbacks->minfo (_("  calls:\n"));
3314
      for (call = fun->call_list; call; call = call->next)
3315
        if (!call->is_pasted)
3316
          {
3317
            const char *f2 = func_name (call->fun);
3318
            const char *ann1 = call->fun == max ? "*" : " ";
3319
            const char *ann2 = call->is_tail ? "t" : " ";
3320
 
3321
            info->callbacks->minfo (_("   %s%s %s\n"), ann1, ann2, f2);
3322
          }
3323
    }
3324
 
3325
  if (sum_stack_param->emit_stack_syms)
3326
    {
3327
      char *name = bfd_malloc (18 + strlen (f1));
3328
      struct elf_link_hash_entry *h;
3329
 
3330
      if (name == NULL)
3331
        return FALSE;
3332
 
3333
      if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
3334
        sprintf (name, "__stack_%s", f1);
3335
      else
3336
        sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
3337
 
3338
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
3339
      free (name);
3340
      if (h != NULL
3341
          && (h->root.type == bfd_link_hash_new
3342
              || h->root.type == bfd_link_hash_undefined
3343
              || h->root.type == bfd_link_hash_undefweak))
3344
        {
3345
          h->root.type = bfd_link_hash_defined;
3346
          h->root.u.def.section = bfd_abs_section_ptr;
3347
          h->root.u.def.value = cum_stack;
3348
          h->size = 0;
3349
          h->type = 0;
3350
          h->ref_regular = 1;
3351
          h->def_regular = 1;
3352
          h->ref_regular_nonweak = 1;
3353
          h->forced_local = 1;
3354
          h->non_elf = 0;
3355
        }
3356
    }
3357
 
3358
  return TRUE;
3359
}
3360
 
3361
/* SEC is part of a pasted function.  Return the call_info for the
3362
   next section of this function.  */
3363
 
3364
static struct call_info *
3365
find_pasted_call (asection *sec)
3366
{
3367
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
3368
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
3369
  struct call_info *call;
3370
  int k;
3371
 
3372
  for (k = 0; k < sinfo->num_fun; ++k)
3373
    for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
3374
      if (call->is_pasted)
3375
        return call;
3376
  abort ();
3377
  return 0;
3378
}
3379
 
3380
/* qsort predicate to sort bfds by file name.  */
3381
 
3382
static int
3383
sort_bfds (const void *a, const void *b)
3384
{
3385
  bfd *const *abfd1 = a;
3386
  bfd *const *abfd2 = b;
3387
 
3388
  return strcmp ((*abfd1)->filename, (*abfd2)->filename);
3389
}
3390
 
3391
/* Handle --auto-overlay.  */
3392
 
3393
static void spu_elf_auto_overlay (struct bfd_link_info *, void (*) (void))
3394
     ATTRIBUTE_NORETURN;
3395
 
3396
static void
3397
spu_elf_auto_overlay (struct bfd_link_info *info,
3398
                      void (*spu_elf_load_ovl_mgr) (void))
3399
{
3400
  bfd *ibfd;
3401
  bfd **bfd_arr;
3402
  struct elf_segment_map *m;
3403
  unsigned int fixed_size, lo, hi;
3404
  struct spu_link_hash_table *htab;
3405
  unsigned int base, i, count, bfd_count;
3406
  int ovlynum;
3407
  asection **ovly_sections, **ovly_p;
3408
  FILE *script;
3409
  unsigned int total_overlay_size, overlay_size;
3410
  struct elf_link_hash_entry *h;
3411
  struct _mos_param mos_param;
3412
  struct _uos_param uos_param;
3413
  struct function_info dummy_caller;
3414
 
3415
  /* Find the extents of our loadable image.  */
3416
  lo = (unsigned int) -1;
3417
  hi = 0;
3418
  for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
3419
    if (m->p_type == PT_LOAD)
3420
      for (i = 0; i < m->count; i++)
3421
        if (m->sections[i]->size != 0)
3422
          {
3423
            if (m->sections[i]->vma < lo)
3424
              lo = m->sections[i]->vma;
3425
            if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
3426
              hi = m->sections[i]->vma + m->sections[i]->size - 1;
3427
          }
3428
  fixed_size = hi + 1 - lo;
3429
 
3430
  if (!discover_functions (info))
3431
    goto err_exit;
3432
 
3433
  if (!build_call_tree (info))
3434
    goto err_exit;
3435
 
3436
  uos_param.exclude_input_section = 0;
3437
  uos_param.exclude_output_section
3438
    = bfd_get_section_by_name (info->output_bfd, ".interrupt");
3439
 
3440
  htab = spu_hash_table (info);
3441
  h = elf_link_hash_lookup (&htab->elf, "__ovly_load",
3442
                            FALSE, FALSE, FALSE);
3443
  if (h != NULL
3444
      && (h->root.type == bfd_link_hash_defined
3445
          || h->root.type == bfd_link_hash_defweak)
3446
      && h->def_regular)
3447
    {
3448
      /* We have a user supplied overlay manager.  */
3449
      uos_param.exclude_input_section = h->root.u.def.section;
3450
    }
3451
  else
3452
    {
3453
      /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
3454
         builtin version to .text, and will adjust .text size.  */
3455
      asection *text = bfd_get_section_by_name (info->output_bfd, ".text");
3456
      if (text != NULL)
3457
        fixed_size -= text->size;
3458
      spu_elf_load_ovl_mgr ();
3459
      text = bfd_get_section_by_name (info->output_bfd, ".text");
3460
      if (text != NULL)
3461
        fixed_size += text->size;
3462
    }
3463
 
3464
  /* Mark overlay sections, and find max overlay section size.  */
3465
  mos_param.max_overlay_size = 0;
3466
  if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
3467
    goto err_exit;
3468
 
3469
  /* We can't put the overlay manager or interrupt routines in
3470
     overlays.  */
3471
  uos_param.clearing = 0;
3472
  if ((uos_param.exclude_input_section
3473
       || uos_param.exclude_output_section)
3474
      && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
3475
    goto err_exit;
3476
 
3477
  bfd_count = 0;
3478
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3479
    ++bfd_count;
3480
  bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
3481
  if (bfd_arr == NULL)
3482
    goto err_exit;
3483
 
3484
  /* Count overlay sections, and subtract their sizes from "fixed_size".  */
3485
  count = 0;
3486
  bfd_count = 0;
3487
  total_overlay_size = 0;
3488
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3489
    {
3490
      extern const bfd_target bfd_elf32_spu_vec;
3491
      asection *sec;
3492
      unsigned int old_count;
3493
 
3494
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3495
        continue;
3496
 
3497
      old_count = count;
3498
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3499
        if (sec->linker_mark)
3500
          {
3501
            if ((sec->flags & SEC_CODE) != 0)
3502
              count += 1;
3503
            fixed_size -= sec->size;
3504
            total_overlay_size += sec->size;
3505
          }
3506
      if (count != old_count)
3507
        bfd_arr[bfd_count++] = ibfd;
3508
    }
3509
 
3510
  /* Since the overlay link script selects sections by file name and
3511
     section name, ensure that file names are unique.  */
3512
  if (bfd_count > 1)
3513
    {
3514
      bfd_boolean ok = TRUE;
3515
 
3516
      qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
3517
      for (i = 1; i < bfd_count; ++i)
3518
        if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
3519
          {
3520
            if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
3521
              {
3522
                if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
3523
                  info->callbacks->einfo (_("%s duplicated in %s\n"),
3524
                                          bfd_arr[i - 1]->filename,
3525
                                          bfd_arr[i - 1]->my_archive->filename);
3526
                else
3527
                  info->callbacks->einfo (_("%s in both %s and %s\n"),
3528
                                          bfd_arr[i - 1]->filename,
3529
                                          bfd_arr[i - 1]->my_archive->filename,
3530
                                          bfd_arr[i]->my_archive->filename);
3531
              }
3532
            else if (bfd_arr[i - 1]->my_archive)
3533
              info->callbacks->einfo (_("%s in %s and as an object\n"),
3534
                                      bfd_arr[i - 1]->filename,
3535
                                      bfd_arr[i - 1]->my_archive->filename);
3536
            else if (bfd_arr[i]->my_archive)
3537
              info->callbacks->einfo (_("%s in %s and as an object\n"),
3538
                                      bfd_arr[i]->filename,
3539
                                      bfd_arr[i]->my_archive->filename);
3540
            else
3541
              info->callbacks->einfo (_("%s duplicated\n"),
3542
                                      bfd_arr[i]->filename);
3543
            ok = FALSE;
3544
          }
3545
      if (!ok)
3546
        {
3547
          /* FIXME: modify plain object files from foo.o to ./foo.o
3548
             and emit EXCLUDE_FILE to handle the duplicates in
3549
             archives.  There is a pathological case we can't handle:
3550
             We may have duplicate file names within a single archive.  */
3551
          info->callbacks->einfo (_("sorry, no support for duplicate "
3552
                                    "object files in auto-overlay script\n"));
3553
          bfd_set_error (bfd_error_bad_value);
3554
          goto err_exit;
3555
        }
3556
    }
3557
  free (bfd_arr);
3558
 
3559
  if (htab->reserved == 0)
3560
    {
3561
      struct _sum_stack_param sum_stack_param;
3562
 
3563
      sum_stack_param.emit_stack_syms = 0;
3564
      sum_stack_param.overall_stack = 0;
3565
      if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3566
        goto err_exit;
3567
      htab->reserved = sum_stack_param.overall_stack;
3568
    }
3569
  fixed_size += htab->reserved;
3570
  fixed_size += htab->non_ovly_stub * OVL_STUB_SIZE;
3571
  if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
3572
    {
3573
      /* Guess number of overlays.  Assuming overlay buffer is on
3574
         average only half full should be conservative.  */
3575
      ovlynum = total_overlay_size * 2 / (htab->local_store - fixed_size);
3576
      /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
3577
      fixed_size += ovlynum * 16 + 16 + 4 + 16;
3578
    }
3579
 
3580
  if (fixed_size + mos_param.max_overlay_size > htab->local_store)
3581
    info->callbacks->einfo (_("non-overlay plus maximum overlay size "
3582
                              "of 0x%x exceeds local store\n"),
3583
                            fixed_size + mos_param.max_overlay_size);
3584
 
3585
  /* Now see if we should put some functions in the non-overlay area.  */
3586
  if (fixed_size < htab->overlay_fixed
3587
      && htab->overlay_fixed + mos_param.max_overlay_size < htab->local_store)
3588
    {
3589
      unsigned int lib_size = htab->overlay_fixed - fixed_size;
3590
      lib_size = auto_ovl_lib_functions (info, lib_size);
3591
      if (lib_size == (unsigned int) -1)
3592
        goto err_exit;
3593
      fixed_size = htab->overlay_fixed - lib_size;
3594
    }
3595
 
3596
  /* Build an array of sections, suitably sorted to place into
3597
     overlays.  */
3598
  ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
3599
  if (ovly_sections == NULL)
3600
    goto err_exit;
3601
  ovly_p = ovly_sections;
3602
  if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
3603
    goto err_exit;
3604
  count = (size_t) (ovly_p - ovly_sections) / 2;
3605
 
3606
  script = htab->spu_elf_open_overlay_script ();
3607
 
3608
  if (fprintf (script, "SECTIONS\n{\n OVERLAY :\n {\n") <= 0)
3609
    goto file_err;
3610
 
3611
  memset (&dummy_caller, 0, sizeof (dummy_caller));
3612
  overlay_size = htab->local_store - fixed_size;
3613
  base = 0;
3614
  ovlynum = 0;
3615
  while (base < count)
3616
    {
3617
      unsigned int size = 0;
3618
      unsigned int j;
3619
 
3620
      for (i = base; i < count; i++)
3621
        {
3622
          asection *sec;
3623
          unsigned int tmp;
3624
          unsigned int stub_size;
3625
          struct call_info *call, *pasty;
3626
          struct _spu_elf_section_data *sec_data;
3627
          struct spu_elf_stack_info *sinfo;
3628
          int k;
3629
 
3630
          /* See whether we can add this section to the current
3631
             overlay without overflowing our overlay buffer.  */
3632
          sec = ovly_sections[2 * i];
3633
          tmp = size + sec->size;
3634
          if (ovly_sections[2 * i + 1])
3635
            tmp += ovly_sections[2 * i + 1]->size;
3636
          if (tmp > overlay_size)
3637
            break;
3638
          if (sec->segment_mark)
3639
            {
3640
              /* Pasted sections must stay together, so add their
3641
                 sizes too.  */
3642
              struct call_info *pasty = find_pasted_call (sec);
3643
              while (pasty != NULL)
3644
                {
3645
                  struct function_info *call_fun = pasty->fun;
3646
                  tmp += call_fun->sec->size;
3647
                  if (call_fun->rodata)
3648
                    tmp += call_fun->rodata->size;
3649
                  for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
3650
                    if (pasty->is_pasted)
3651
                      break;
3652
                }
3653
            }
3654
          if (tmp > overlay_size)
3655
            break;
3656
 
3657
          /* If we add this section, we might need new overlay call
3658
             stubs.  Add any overlay section calls to dummy_call.  */
3659
          pasty = NULL;
3660
          sec_data = spu_elf_section_data (sec);
3661
          sinfo = sec_data->u.i.stack_info;
3662
          for (k = 0; k < sinfo->num_fun; ++k)
3663
            for (call = sinfo->fun[k].call_list; call; call = call->next)
3664
              if (call->is_pasted)
3665
                {
3666
                  BFD_ASSERT (pasty == NULL);
3667
                  pasty = call;
3668
                }
3669
              else if (call->fun->sec->linker_mark)
3670
                {
3671
                  if (!copy_callee (&dummy_caller, call))
3672
                    goto err_exit;
3673
                }
3674
          while (pasty != NULL)
3675
            {
3676
              struct function_info *call_fun = pasty->fun;
3677
              pasty = NULL;
3678
              for (call = call_fun->call_list; call; call = call->next)
3679
                if (call->is_pasted)
3680
                  {
3681
                    BFD_ASSERT (pasty == NULL);
3682
                    pasty = call;
3683
                  }
3684
                else if (!copy_callee (&dummy_caller, call))
3685
                  goto err_exit;
3686
            }
3687
 
3688
          /* Calculate call stub size.  */
3689
          stub_size = 0;
3690
          for (call = dummy_caller.call_list; call; call = call->next)
3691
            {
3692
              unsigned int k;
3693
 
3694
              stub_size += OVL_STUB_SIZE;
3695
              /* If the call is within this overlay, we won't need a
3696
                 stub.  */
3697
              for (k = base; k < i + 1; k++)
3698
                if (call->fun->sec == ovly_sections[2 * k])
3699
                  {
3700
                    stub_size -= OVL_STUB_SIZE;
3701
                    break;
3702
                  }
3703
            }
3704
          if (tmp + stub_size > overlay_size)
3705
            break;
3706
 
3707
          size = tmp;
3708
        }
3709
 
3710
      if (i == base)
3711
        {
3712
          info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
3713
                                  ovly_sections[2 * i]->owner,
3714
                                  ovly_sections[2 * i],
3715
                                  ovly_sections[2 * i + 1] ? " + rodata" : "");
3716
          bfd_set_error (bfd_error_bad_value);
3717
          goto err_exit;
3718
        }
3719
 
3720
      if (fprintf (script, "  .ovly%d {\n", ++ovlynum) <= 0)
3721
        goto file_err;
3722
      for (j = base; j < i; j++)
3723
        {
3724
          asection *sec = ovly_sections[2 * j];
3725
 
3726
          if (fprintf (script, "   [%c]%s (%s)\n",
3727
                       sec->owner->filename[0],
3728
                       sec->owner->filename + 1,
3729
                       sec->name) <= 0)
3730
            goto file_err;
3731
          if (sec->segment_mark)
3732
            {
3733
              struct call_info *call = find_pasted_call (sec);
3734
              while (call != NULL)
3735
                {
3736
                  struct function_info *call_fun = call->fun;
3737
                  sec = call_fun->sec;
3738
                  if (fprintf (script, "   [%c]%s (%s)\n",
3739
                               sec->owner->filename[0],
3740
                               sec->owner->filename + 1,
3741
                               sec->name) <= 0)
3742
                    goto file_err;
3743
                  for (call = call_fun->call_list; call; call = call->next)
3744
                    if (call->is_pasted)
3745
                      break;
3746
                }
3747
            }
3748
        }
3749
 
3750
      for (j = base; j < i; j++)
3751
        {
3752
          asection *sec = ovly_sections[2 * j + 1];
3753
          if (sec != NULL && fprintf (script, "   [%c]%s (%s)\n",
3754
                                      sec->owner->filename[0],
3755
                                      sec->owner->filename + 1,
3756
                                      sec->name) <= 0)
3757
            goto file_err;
3758
 
3759
          sec = ovly_sections[2 * j];
3760
          if (sec->segment_mark)
3761
            {
3762
              struct call_info *call = find_pasted_call (sec);
3763
              while (call != NULL)
3764
                {
3765
                  struct function_info *call_fun = call->fun;
3766
                  sec = call_fun->rodata;
3767
                  if (sec != NULL && fprintf (script, "   [%c]%s (%s)\n",
3768
                                              sec->owner->filename[0],
3769
                                              sec->owner->filename + 1,
3770
                                              sec->name) <= 0)
3771
                    goto file_err;
3772
                  for (call = call_fun->call_list; call; call = call->next)
3773
                    if (call->is_pasted)
3774
                      break;
3775
                }
3776
            }
3777
        }
3778
 
3779
      if (fprintf (script, "  }\n") <= 0)
3780
        goto file_err;
3781
 
3782
      while (dummy_caller.call_list != NULL)
3783
        {
3784
          struct call_info *call = dummy_caller.call_list;
3785
          dummy_caller.call_list = call->next;
3786
          free (call);
3787
        }
3788
 
3789
      base = i;
3790
    }
3791
  free (ovly_sections);
3792
 
3793
  if (fprintf (script, " }\n}\nINSERT AFTER .text;\n") <= 0)
3794
    goto file_err;
3795
  if (fclose (script) != 0)
3796
    goto file_err;
3797
 
3798
  if (htab->auto_overlay & AUTO_RELINK)
3799
    htab->spu_elf_relink ();
3800
 
3801
  xexit (0);
3802
 
3803
 file_err:
3804
  bfd_set_error (bfd_error_system_call);
3805
 err_exit:
3806
  info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
3807
  xexit (1);
3808
}
3809
 
3810
/* Provide an estimate of total stack required.  */
3811
 
3812
static bfd_boolean
3813
spu_elf_stack_analysis (struct bfd_link_info *info, int emit_stack_syms)
3814
{
3815
  struct _sum_stack_param sum_stack_param;
3816
 
3817
  if (!discover_functions (info))
3818
    return FALSE;
3819
 
3820
  if (!build_call_tree (info))
3821
    return FALSE;
3822
 
3823
  info->callbacks->info (_("Stack size for call graph root nodes.\n"));
3824
  info->callbacks->minfo (_("\nStack size for functions.  "
3825
                            "Annotations: '*' max stack, 't' tail call\n"));
3826
 
3827
  sum_stack_param.emit_stack_syms = emit_stack_syms;
3828
  sum_stack_param.overall_stack = 0;
3829
  if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
3830
    return FALSE;
3831
 
3832
  info->callbacks->info (_("Maximum stack required is 0x%v\n"),
3833
                         (bfd_vma) sum_stack_param.overall_stack);
3834
  return TRUE;
3835
}
3836
 
3837
/* Perform a final link.  */
3838
 
3839
static bfd_boolean
3840
spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
3841
{
3842
  struct spu_link_hash_table *htab = spu_hash_table (info);
3843
 
3844
  if (htab->auto_overlay)
3845
    spu_elf_auto_overlay (info, htab->spu_elf_load_ovl_mgr);
3846
 
3847
  if (htab->stack_analysis
3848
      && !spu_elf_stack_analysis (info, htab->emit_stack_syms))
3849
    info->callbacks->einfo ("%X%P: stack analysis error: %E\n");
3850
 
3851
  return bfd_elf_final_link (output_bfd, info);
3852
}
3853
 
3854
/* Called when not normally emitting relocs, ie. !info->relocatable
3855
   and !info->emitrelocations.  Returns a count of special relocs
3856
   that need to be emitted.  */
3857
 
3858
static unsigned int
3859
spu_elf_count_relocs (asection *sec, Elf_Internal_Rela *relocs)
3860
{
3861
  unsigned int count = 0;
3862
  Elf_Internal_Rela *relend = relocs + sec->reloc_count;
3863
 
3864
  for (; relocs < relend; relocs++)
3865
    {
3866
      int r_type = ELF32_R_TYPE (relocs->r_info);
3867
      if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
3868
        ++count;
3869
    }
3870
 
3871
  return count;
3872
}
3873
 
3874
/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
3875
 
3876
static int
3877
spu_elf_relocate_section (bfd *output_bfd,
3878
                          struct bfd_link_info *info,
3879
                          bfd *input_bfd,
3880
                          asection *input_section,
3881
                          bfd_byte *contents,
3882
                          Elf_Internal_Rela *relocs,
3883
                          Elf_Internal_Sym *local_syms,
3884
                          asection **local_sections)
3885
{
3886
  Elf_Internal_Shdr *symtab_hdr;
3887
  struct elf_link_hash_entry **sym_hashes;
3888
  Elf_Internal_Rela *rel, *relend;
3889
  struct spu_link_hash_table *htab;
3890
  int ret = TRUE;
3891
  bfd_boolean emit_these_relocs = FALSE;
3892
  bfd_boolean stubs;
3893
 
3894
  htab = spu_hash_table (info);
3895
  stubs = (htab->stub_sec != NULL
3896
           && maybe_needs_stubs (input_section, output_bfd));
3897
  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
3898
  sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
3899
 
3900
  rel = relocs;
3901
  relend = relocs + input_section->reloc_count;
3902
  for (; rel < relend; rel++)
3903
    {
3904
      int r_type;
3905
      reloc_howto_type *howto;
3906
      unsigned long r_symndx;
3907
      Elf_Internal_Sym *sym;
3908
      asection *sec;
3909
      struct elf_link_hash_entry *h;
3910
      const char *sym_name;
3911
      bfd_vma relocation;
3912
      bfd_vma addend;
3913
      bfd_reloc_status_type r;
3914
      bfd_boolean unresolved_reloc;
3915
      bfd_boolean warned;
3916
 
3917
      r_symndx = ELF32_R_SYM (rel->r_info);
3918
      r_type = ELF32_R_TYPE (rel->r_info);
3919
      if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
3920
        {
3921
          emit_these_relocs = TRUE;
3922
          continue;
3923
        }
3924
 
3925
      howto = elf_howto_table + r_type;
3926
      unresolved_reloc = FALSE;
3927
      warned = FALSE;
3928
      h = NULL;
3929
      sym = NULL;
3930
      sec = NULL;
3931
      if (r_symndx < symtab_hdr->sh_info)
3932
        {
3933
          sym = local_syms + r_symndx;
3934
          sec = local_sections[r_symndx];
3935
          sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
3936
          relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
3937
        }
3938
      else
3939
        {
3940
          RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
3941
                                   r_symndx, symtab_hdr, sym_hashes,
3942
                                   h, sec, relocation,
3943
                                   unresolved_reloc, warned);
3944
          sym_name = h->root.root.string;
3945
        }
3946
 
3947
      if (sec != NULL && elf_discarded_section (sec))
3948
        {
3949
          /* For relocs against symbols from removed linkonce sections,
3950
             or sections discarded by a linker script, we just want the
3951
             section contents zeroed.  Avoid any special processing.  */
3952
          _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
3953
          rel->r_info = 0;
3954
          rel->r_addend = 0;
3955
          continue;
3956
        }
3957
 
3958
      if (info->relocatable)
3959
        continue;
3960
 
3961
      if (unresolved_reloc)
3962
        {
3963
          (*_bfd_error_handler)
3964
            (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
3965
             input_bfd,
3966
             bfd_get_section_name (input_bfd, input_section),
3967
             (long) rel->r_offset,
3968
             howto->name,
3969
             sym_name);
3970
          ret = FALSE;
3971
        }
3972
 
3973
      /* If this symbol is in an overlay area, we may need to relocate
3974
         to the overlay stub.  */
3975
      addend = rel->r_addend;
3976
      if (stubs)
3977
        {
3978
          enum _stub_type stub_type;
3979
 
3980
          stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
3981
                                      contents, info);
3982
          if (stub_type != no_stub)
3983
            {
3984
              unsigned int ovl = 0;
3985
              struct got_entry *g, **head;
3986
 
3987
              if (stub_type != nonovl_stub)
3988
                ovl = (spu_elf_section_data (input_section->output_section)
3989
                       ->u.o.ovl_index);
3990
 
3991
              if (h != NULL)
3992
                head = &h->got.glist;
3993
              else
3994
                head = elf_local_got_ents (input_bfd) + r_symndx;
3995
 
3996
              for (g = *head; g != NULL; g = g->next)
3997
                if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
3998
                  break;
3999
              if (g == NULL)
4000
                abort ();
4001
 
4002
              relocation = g->stub_addr;
4003
              addend = 0;
4004
            }
4005
        }
4006
 
4007
      r = _bfd_final_link_relocate (howto,
4008
                                    input_bfd,
4009
                                    input_section,
4010
                                    contents,
4011
                                    rel->r_offset, relocation, addend);
4012
 
4013
      if (r != bfd_reloc_ok)
4014
        {
4015
          const char *msg = (const char *) 0;
4016
 
4017
          switch (r)
4018
            {
4019
            case bfd_reloc_overflow:
4020
              if (!((*info->callbacks->reloc_overflow)
4021
                    (info, (h ? &h->root : NULL), sym_name, howto->name,
4022
                     (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
4023
                return FALSE;
4024
              break;
4025
 
4026
            case bfd_reloc_undefined:
4027
              if (!((*info->callbacks->undefined_symbol)
4028
                    (info, sym_name, input_bfd, input_section,
4029
                     rel->r_offset, TRUE)))
4030
                return FALSE;
4031
              break;
4032
 
4033
            case bfd_reloc_outofrange:
4034
              msg = _("internal error: out of range error");
4035
              goto common_error;
4036
 
4037
            case bfd_reloc_notsupported:
4038
              msg = _("internal error: unsupported relocation error");
4039
              goto common_error;
4040
 
4041
            case bfd_reloc_dangerous:
4042
              msg = _("internal error: dangerous error");
4043
              goto common_error;
4044
 
4045
            default:
4046
              msg = _("internal error: unknown error");
4047
              /* fall through */
4048
 
4049
            common_error:
4050
              ret = FALSE;
4051
              if (!((*info->callbacks->warning)
4052
                    (info, msg, sym_name, input_bfd, input_section,
4053
                     rel->r_offset)))
4054
                return FALSE;
4055
              break;
4056
            }
4057
        }
4058
    }
4059
 
4060
  if (ret
4061
      && emit_these_relocs
4062
      && !info->relocatable
4063
      && !info->emitrelocations)
4064
    {
4065
      Elf_Internal_Rela *wrel;
4066
      Elf_Internal_Shdr *rel_hdr;
4067
 
4068
      wrel = rel = relocs;
4069
      relend = relocs + input_section->reloc_count;
4070
      for (; rel < relend; rel++)
4071
        {
4072
          int r_type;
4073
 
4074
          r_type = ELF32_R_TYPE (rel->r_info);
4075
          if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4076
            *wrel++ = *rel;
4077
        }
4078
      input_section->reloc_count = wrel - relocs;
4079
      /* Backflips for _bfd_elf_link_output_relocs.  */
4080
      rel_hdr = &elf_section_data (input_section)->rel_hdr;
4081
      rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
4082
      ret = 2;
4083
    }
4084
 
4085
  return ret;
4086
}
4087
 
4088
/* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
4089
 
4090
static bfd_boolean
4091
spu_elf_output_symbol_hook (struct bfd_link_info *info,
4092
                            const char *sym_name ATTRIBUTE_UNUSED,
4093
                            Elf_Internal_Sym *sym,
4094
                            asection *sym_sec ATTRIBUTE_UNUSED,
4095
                            struct elf_link_hash_entry *h)
4096
{
4097
  struct spu_link_hash_table *htab = spu_hash_table (info);
4098
 
4099
  if (!info->relocatable
4100
      && htab->stub_sec != NULL
4101
      && h != NULL
4102
      && (h->root.type == bfd_link_hash_defined
4103
          || h->root.type == bfd_link_hash_defweak)
4104
      && h->def_regular
4105
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
4106
    {
4107
      struct got_entry *g;
4108
 
4109
      for (g = h->got.glist; g != NULL; g = g->next)
4110
        if (g->addend == 0 && g->ovl == 0)
4111
          {
4112
            sym->st_shndx = (_bfd_elf_section_from_bfd_section
4113
                             (htab->stub_sec[0]->output_section->owner,
4114
                              htab->stub_sec[0]->output_section));
4115
            sym->st_value = g->stub_addr;
4116
            break;
4117
          }
4118
    }
4119
 
4120
  return TRUE;
4121
}
4122
 
4123
static int spu_plugin = 0;
4124
 
4125
void
4126
spu_elf_plugin (int val)
4127
{
4128
  spu_plugin = val;
4129
}
4130
 
4131
/* Set ELF header e_type for plugins.  */
4132
 
4133
static void
4134
spu_elf_post_process_headers (bfd *abfd,
4135
                              struct bfd_link_info *info ATTRIBUTE_UNUSED)
4136
{
4137
  if (spu_plugin)
4138
    {
4139
      Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
4140
 
4141
      i_ehdrp->e_type = ET_DYN;
4142
    }
4143
}
4144
 
4145
/* We may add an extra PT_LOAD segment for .toe.  We also need extra
4146
   segments for overlays.  */
4147
 
4148
static int
4149
spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
4150
{
4151
  struct spu_link_hash_table *htab = spu_hash_table (info);
4152
  int extra = htab->num_overlays;
4153
  asection *sec;
4154
 
4155
  if (extra)
4156
    ++extra;
4157
 
4158
  sec = bfd_get_section_by_name (abfd, ".toe");
4159
  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
4160
    ++extra;
4161
 
4162
  return extra;
4163
}
4164
 
4165
/* Remove .toe section from other PT_LOAD segments and put it in
4166
   a segment of its own.  Put overlays in separate segments too.  */
4167
 
4168
static bfd_boolean
4169
spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
4170
{
4171
  asection *toe, *s;
4172
  struct elf_segment_map *m;
4173
  unsigned int i;
4174
 
4175
  if (info == NULL)
4176
    return TRUE;
4177
 
4178
  toe = bfd_get_section_by_name (abfd, ".toe");
4179
  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
4180
    if (m->p_type == PT_LOAD && m->count > 1)
4181
      for (i = 0; i < m->count; i++)
4182
        if ((s = m->sections[i]) == toe
4183
            || spu_elf_section_data (s)->u.o.ovl_index != 0)
4184
          {
4185
            struct elf_segment_map *m2;
4186
            bfd_vma amt;
4187
 
4188
            if (i + 1 < m->count)
4189
              {
4190
                amt = sizeof (struct elf_segment_map);
4191
                amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
4192
                m2 = bfd_zalloc (abfd, amt);
4193
                if (m2 == NULL)
4194
                  return FALSE;
4195
                m2->count = m->count - (i + 1);
4196
                memcpy (m2->sections, m->sections + i + 1,
4197
                        m2->count * sizeof (m->sections[0]));
4198
                m2->p_type = PT_LOAD;
4199
                m2->next = m->next;
4200
                m->next = m2;
4201
              }
4202
            m->count = 1;
4203
            if (i != 0)
4204
              {
4205
                m->count = i;
4206
                amt = sizeof (struct elf_segment_map);
4207
                m2 = bfd_zalloc (abfd, amt);
4208
                if (m2 == NULL)
4209
                  return FALSE;
4210
                m2->p_type = PT_LOAD;
4211
                m2->count = 1;
4212
                m2->sections[0] = s;
4213
                m2->next = m->next;
4214
                m->next = m2;
4215
              }
4216
            break;
4217
          }
4218
 
4219
  return TRUE;
4220
}
4221
 
4222
/* Tweak the section type of .note.spu_name.  */
4223
 
4224
static bfd_boolean
4225
spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
4226
                       Elf_Internal_Shdr *hdr,
4227
                       asection *sec)
4228
{
4229
  if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
4230
    hdr->sh_type = SHT_NOTE;
4231
  return TRUE;
4232
}
4233
 
4234
/* Tweak phdrs before writing them out.  */
4235
 
4236
static int
4237
spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
4238
{
4239
  const struct elf_backend_data *bed;
4240
  struct elf_obj_tdata *tdata;
4241
  Elf_Internal_Phdr *phdr, *last;
4242
  struct spu_link_hash_table *htab;
4243
  unsigned int count;
4244
  unsigned int i;
4245
 
4246
  if (info == NULL)
4247
    return TRUE;
4248
 
4249
  bed = get_elf_backend_data (abfd);
4250
  tdata = elf_tdata (abfd);
4251
  phdr = tdata->phdr;
4252
  count = tdata->program_header_size / bed->s->sizeof_phdr;
4253
  htab = spu_hash_table (info);
4254
  if (htab->num_overlays != 0)
4255
    {
4256
      struct elf_segment_map *m;
4257
      unsigned int o;
4258
 
4259
      for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
4260
        if (m->count != 0
4261
            && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
4262
          {
4263
            /* Mark this as an overlay header.  */
4264
            phdr[i].p_flags |= PF_OVERLAY;
4265
 
4266
            if (htab->ovtab != NULL && htab->ovtab->size != 0)
4267
              {
4268
                bfd_byte *p = htab->ovtab->contents;
4269
                unsigned int off = o * 16 + 8;
4270
 
4271
                /* Write file_off into _ovly_table.  */
4272
                bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
4273
              }
4274
          }
4275
    }
4276
 
4277
  /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
4278
     of 16.  This should always be possible when using the standard
4279
     linker scripts, but don't create overlapping segments if
4280
     someone is playing games with linker scripts.  */
4281
  last = NULL;
4282
  for (i = count; i-- != 0; )
4283
    if (phdr[i].p_type == PT_LOAD)
4284
      {
4285
        unsigned adjust;
4286
 
4287
        adjust = -phdr[i].p_filesz & 15;
4288
        if (adjust != 0
4289
            && last != NULL
4290
            && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
4291
          break;
4292
 
4293
        adjust = -phdr[i].p_memsz & 15;
4294
        if (adjust != 0
4295
            && last != NULL
4296
            && phdr[i].p_filesz != 0
4297
            && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
4298
            && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
4299
          break;
4300
 
4301
        if (phdr[i].p_filesz != 0)
4302
          last = &phdr[i];
4303
      }
4304
 
4305
  if (i == (unsigned int) -1)
4306
    for (i = count; i-- != 0; )
4307
      if (phdr[i].p_type == PT_LOAD)
4308
        {
4309
        unsigned adjust;
4310
 
4311
        adjust = -phdr[i].p_filesz & 15;
4312
        phdr[i].p_filesz += adjust;
4313
 
4314
        adjust = -phdr[i].p_memsz & 15;
4315
        phdr[i].p_memsz += adjust;
4316
      }
4317
 
4318
  return TRUE;
4319
}
4320
 
4321
#define TARGET_BIG_SYM          bfd_elf32_spu_vec
4322
#define TARGET_BIG_NAME         "elf32-spu"
4323
#define ELF_ARCH                bfd_arch_spu
4324
#define ELF_MACHINE_CODE        EM_SPU
4325
/* This matches the alignment need for DMA.  */
4326
#define ELF_MAXPAGESIZE         0x80
4327
#define elf_backend_rela_normal         1
4328
#define elf_backend_can_gc_sections     1
4329
 
4330
#define bfd_elf32_bfd_reloc_type_lookup         spu_elf_reloc_type_lookup
4331
#define bfd_elf32_bfd_reloc_name_lookup spu_elf_reloc_name_lookup
4332
#define elf_info_to_howto                       spu_elf_info_to_howto
4333
#define elf_backend_count_relocs                spu_elf_count_relocs
4334
#define elf_backend_relocate_section            spu_elf_relocate_section
4335
#define elf_backend_symbol_processing           spu_elf_backend_symbol_processing
4336
#define elf_backend_link_output_symbol_hook     spu_elf_output_symbol_hook
4337
#define bfd_elf32_new_section_hook              spu_elf_new_section_hook
4338
#define bfd_elf32_bfd_link_hash_table_create    spu_elf_link_hash_table_create
4339
 
4340
#define elf_backend_additional_program_headers  spu_elf_additional_program_headers
4341
#define elf_backend_modify_segment_map          spu_elf_modify_segment_map
4342
#define elf_backend_modify_program_headers      spu_elf_modify_program_headers
4343
#define elf_backend_post_process_headers        spu_elf_post_process_headers
4344
#define elf_backend_fake_sections               spu_elf_fake_sections
4345
#define elf_backend_special_sections            spu_elf_special_sections
4346
#define bfd_elf32_bfd_final_link                spu_elf_final_link
4347
 
4348
#include "elf32-target.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.