OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gdb-7.1/] [bfd/] [elf32-spu.c] - Blame information for rev 326

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 227 jeremybenn
/* SPU specific support for 32-bit ELF
2
 
3
   Copyright 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
4
 
5
   This file is part of BFD, the Binary File Descriptor library.
6
 
7
   This program is free software; you can redistribute it and/or modify
8
   it under the terms of the GNU General Public License as published by
9
   the Free Software Foundation; either version 3 of the License, or
10
   (at your option) any later version.
11
 
12
   This program is distributed in the hope that it will be useful,
13
   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
   GNU General Public License for more details.
16
 
17
   You should have received a copy of the GNU General Public License along
18
   with this program; if not, write to the Free Software Foundation, Inc.,
19
   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
20
 
21
#include "sysdep.h"
22
#include "libiberty.h"
23
#include "bfd.h"
24
#include "bfdlink.h"
25
#include "libbfd.h"
26
#include "elf-bfd.h"
27
#include "elf/spu.h"
28
#include "elf32-spu.h"
29
 
30
/* We use RELA style relocs.  Don't define USE_REL.  */
31
 
32
static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33
                                           void *, asection *,
34
                                           bfd *, char **);
35
 
36
/* Values of type 'enum elf_spu_reloc_type' are used to index this
37
   array, so it must be declared in the order of that type.  */
38
 
39
static reloc_howto_type elf_howto_table[] = {
40
  HOWTO (R_SPU_NONE,       0, 0,  0, FALSE,  0, complain_overflow_dont,
41
         bfd_elf_generic_reloc, "SPU_NONE",
42
         FALSE, 0, 0x00000000, FALSE),
43
  HOWTO (R_SPU_ADDR10,     4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44
         bfd_elf_generic_reloc, "SPU_ADDR10",
45
         FALSE, 0, 0x00ffc000, FALSE),
46
  HOWTO (R_SPU_ADDR16,     2, 2, 16, FALSE,  7, complain_overflow_bitfield,
47
         bfd_elf_generic_reloc, "SPU_ADDR16",
48
         FALSE, 0, 0x007fff80, FALSE),
49
  HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE,  7, complain_overflow_bitfield,
50
         bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51
         FALSE, 0, 0x007fff80, FALSE),
52
  HOWTO (R_SPU_ADDR16_LO,  0, 2, 16, FALSE,  7, complain_overflow_dont,
53
         bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54
         FALSE, 0, 0x007fff80, FALSE),
55
  HOWTO (R_SPU_ADDR18,     0, 2, 18, FALSE,  7, complain_overflow_bitfield,
56
         bfd_elf_generic_reloc, "SPU_ADDR18",
57
         FALSE, 0, 0x01ffff80, FALSE),
58
  HOWTO (R_SPU_ADDR32,     0, 2, 32, FALSE,  0, complain_overflow_dont,
59
         bfd_elf_generic_reloc, "SPU_ADDR32",
60
         FALSE, 0, 0xffffffff, FALSE),
61
  HOWTO (R_SPU_REL16,      2, 2, 16,  TRUE,  7, complain_overflow_bitfield,
62
         bfd_elf_generic_reloc, "SPU_REL16",
63
         FALSE, 0, 0x007fff80, TRUE),
64
  HOWTO (R_SPU_ADDR7,      0, 2,  7, FALSE, 14, complain_overflow_dont,
65
         bfd_elf_generic_reloc, "SPU_ADDR7",
66
         FALSE, 0, 0x001fc000, FALSE),
67
  HOWTO (R_SPU_REL9,       2, 2,  9,  TRUE,  0, complain_overflow_signed,
68
         spu_elf_rel9,          "SPU_REL9",
69
         FALSE, 0, 0x0180007f, TRUE),
70
  HOWTO (R_SPU_REL9I,      2, 2,  9,  TRUE,  0, complain_overflow_signed,
71
         spu_elf_rel9,          "SPU_REL9I",
72
         FALSE, 0, 0x0000c07f, TRUE),
73
  HOWTO (R_SPU_ADDR10I,    0, 2, 10, FALSE, 14, complain_overflow_signed,
74
         bfd_elf_generic_reloc, "SPU_ADDR10I",
75
         FALSE, 0, 0x00ffc000, FALSE),
76
  HOWTO (R_SPU_ADDR16I,    0, 2, 16, FALSE,  7, complain_overflow_signed,
77
         bfd_elf_generic_reloc, "SPU_ADDR16I",
78
         FALSE, 0, 0x007fff80, FALSE),
79
  HOWTO (R_SPU_REL32,      0, 2, 32, TRUE,  0, complain_overflow_dont,
80
         bfd_elf_generic_reloc, "SPU_REL32",
81
         FALSE, 0, 0xffffffff, TRUE),
82
  HOWTO (R_SPU_ADDR16X,    0, 2, 16, FALSE,  7, complain_overflow_bitfield,
83
         bfd_elf_generic_reloc, "SPU_ADDR16X",
84
         FALSE, 0, 0x007fff80, FALSE),
85
  HOWTO (R_SPU_PPU32,      0, 2, 32, FALSE,  0, complain_overflow_dont,
86
         bfd_elf_generic_reloc, "SPU_PPU32",
87
         FALSE, 0, 0xffffffff, FALSE),
88
  HOWTO (R_SPU_PPU64,      0, 4, 64, FALSE,  0, complain_overflow_dont,
89
         bfd_elf_generic_reloc, "SPU_PPU64",
90
         FALSE, 0, -1, FALSE),
91
  HOWTO (R_SPU_ADD_PIC,      0, 0, 0, FALSE,  0, complain_overflow_dont,
92
         bfd_elf_generic_reloc, "SPU_ADD_PIC",
93
         FALSE, 0, 0x00000000, FALSE),
94
};
95
 
96
static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97
  { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98
  { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
99
  { NULL, 0, 0, 0, 0 }
100
};
101
 
102
static enum elf_spu_reloc_type
103
spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
104
{
105
  switch (code)
106
    {
107
    default:
108
      return R_SPU_NONE;
109
    case BFD_RELOC_SPU_IMM10W:
110
      return R_SPU_ADDR10;
111
    case BFD_RELOC_SPU_IMM16W:
112
      return R_SPU_ADDR16;
113
    case BFD_RELOC_SPU_LO16:
114
      return R_SPU_ADDR16_LO;
115
    case BFD_RELOC_SPU_HI16:
116
      return R_SPU_ADDR16_HI;
117
    case BFD_RELOC_SPU_IMM18:
118
      return R_SPU_ADDR18;
119
    case BFD_RELOC_SPU_PCREL16:
120
      return R_SPU_REL16;
121
    case BFD_RELOC_SPU_IMM7:
122
      return R_SPU_ADDR7;
123
    case BFD_RELOC_SPU_IMM8:
124
      return R_SPU_NONE;
125
    case BFD_RELOC_SPU_PCREL9a:
126
      return R_SPU_REL9;
127
    case BFD_RELOC_SPU_PCREL9b:
128
      return R_SPU_REL9I;
129
    case BFD_RELOC_SPU_IMM10:
130
      return R_SPU_ADDR10I;
131
    case BFD_RELOC_SPU_IMM16:
132
      return R_SPU_ADDR16I;
133
    case BFD_RELOC_32:
134
      return R_SPU_ADDR32;
135
    case BFD_RELOC_32_PCREL:
136
      return R_SPU_REL32;
137
    case BFD_RELOC_SPU_PPU32:
138
      return R_SPU_PPU32;
139
    case BFD_RELOC_SPU_PPU64:
140
      return R_SPU_PPU64;
141
    case BFD_RELOC_SPU_ADD_PIC:
142
      return R_SPU_ADD_PIC;
143
    }
144
}
145
 
146
static void
147
spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
148
                       arelent *cache_ptr,
149
                       Elf_Internal_Rela *dst)
150
{
151
  enum elf_spu_reloc_type r_type;
152
 
153
  r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
154
  BFD_ASSERT (r_type < R_SPU_max);
155
  cache_ptr->howto = &elf_howto_table[(int) r_type];
156
}
157
 
158
static reloc_howto_type *
159
spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
160
                           bfd_reloc_code_real_type code)
161
{
162
  enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
163
 
164
  if (r_type == R_SPU_NONE)
165
    return NULL;
166
 
167
  return elf_howto_table + r_type;
168
}
169
 
170
static reloc_howto_type *
171
spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
172
                           const char *r_name)
173
{
174
  unsigned int i;
175
 
176
  for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
177
    if (elf_howto_table[i].name != NULL
178
        && strcasecmp (elf_howto_table[i].name, r_name) == 0)
179
      return &elf_howto_table[i];
180
 
181
  return NULL;
182
}
183
 
184
/* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
185
 
186
static bfd_reloc_status_type
187
spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
188
              void *data, asection *input_section,
189
              bfd *output_bfd, char **error_message)
190
{
191
  bfd_size_type octets;
192
  bfd_vma val;
193
  long insn;
194
 
195
  /* If this is a relocatable link (output_bfd test tells us), just
196
     call the generic function.  Any adjustment will be done at final
197
     link time.  */
198
  if (output_bfd != NULL)
199
    return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
200
                                  input_section, output_bfd, error_message);
201
 
202
  if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
203
    return bfd_reloc_outofrange;
204
  octets = reloc_entry->address * bfd_octets_per_byte (abfd);
205
 
206
  /* Get symbol value.  */
207
  val = 0;
208
  if (!bfd_is_com_section (symbol->section))
209
    val = symbol->value;
210
  if (symbol->section->output_section)
211
    val += symbol->section->output_section->vma;
212
 
213
  val += reloc_entry->addend;
214
 
215
  /* Make it pc-relative.  */
216
  val -= input_section->output_section->vma + input_section->output_offset;
217
 
218
  val >>= 2;
219
  if (val + 256 >= 512)
220
    return bfd_reloc_overflow;
221
 
222
  insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
223
 
224
  /* Move two high bits of value to REL9I and REL9 position.
225
     The mask will take care of selecting the right field.  */
226
  val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
227
  insn &= ~reloc_entry->howto->dst_mask;
228
  insn |= val & reloc_entry->howto->dst_mask;
229
  bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
230
  return bfd_reloc_ok;
231
}
232
 
233
static bfd_boolean
234
spu_elf_new_section_hook (bfd *abfd, asection *sec)
235
{
236
  if (!sec->used_by_bfd)
237
    {
238
      struct _spu_elf_section_data *sdata;
239
 
240
      sdata = bfd_zalloc (abfd, sizeof (*sdata));
241
      if (sdata == NULL)
242
        return FALSE;
243
      sec->used_by_bfd = sdata;
244
    }
245
 
246
  return _bfd_elf_new_section_hook (abfd, sec);
247
}
248
 
249
/* Set up overlay info for executables.  */
250
 
251
static bfd_boolean
252
spu_elf_object_p (bfd *abfd)
253
{
254
  if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
255
    {
256
      unsigned int i, num_ovl, num_buf;
257
      Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
258
      Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
259
      Elf_Internal_Phdr *last_phdr = NULL;
260
 
261
      for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
262
        if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
263
          {
264
            unsigned int j;
265
 
266
            ++num_ovl;
267
            if (last_phdr == NULL
268
                || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
269
              ++num_buf;
270
            last_phdr = phdr;
271
            for (j = 1; j < elf_numsections (abfd); j++)
272
              {
273
                Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
274
 
275
                if (ELF_IS_SECTION_IN_SEGMENT_MEMORY (shdr, phdr))
276
                  {
277
                    asection *sec = shdr->bfd_section;
278
                    spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
279
                    spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
280
                  }
281
              }
282
          }
283
    }
284
  return TRUE;
285
}
286
 
287
/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
288
   strip --strip-unneeded will not remove them.  */
289
 
290
static void
291
spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
292
{
293
  if (sym->name != NULL
294
      && sym->section != bfd_abs_section_ptr
295
      && strncmp (sym->name, "_EAR_", 5) == 0)
296
    sym->flags |= BSF_KEEP;
297
}
298
 
299
/* SPU ELF linker hash table.  */
300
 
301
struct spu_link_hash_table
302
{
303
  struct elf_link_hash_table elf;
304
 
305
  struct spu_elf_params *params;
306
 
307
  /* Shortcuts to overlay sections.  */
308
  asection *ovtab;
309
  asection *init;
310
  asection *toe;
311
  asection **ovl_sec;
312
 
313
  /* Count of stubs in each overlay section.  */
314
  unsigned int *stub_count;
315
 
316
  /* The stub section for each overlay section.  */
317
  asection **stub_sec;
318
 
319
  struct elf_link_hash_entry *ovly_entry[2];
320
 
321
  /* Number of overlay buffers.  */
322
  unsigned int num_buf;
323
 
324
  /* Total number of overlays.  */
325
  unsigned int num_overlays;
326
 
327
  /* For soft icache.  */
328
  unsigned int line_size_log2;
329
  unsigned int num_lines_log2;
330
  unsigned int fromelem_size_log2;
331
 
332
  /* How much memory we have.  */
333
  unsigned int local_store;
334
 
335
  /* Count of overlay stubs needed in non-overlay area.  */
336
  unsigned int non_ovly_stub;
337
 
338
  /* Pointer to the fixup section */
339
  asection *sfixup;
340
 
341
  /* Set on error.  */
342
  unsigned int stub_err : 1;
343
};
344
 
345
/* Hijack the generic got fields for overlay stub accounting.  */
346
 
347
struct got_entry
348
{
349
  struct got_entry *next;
350
  unsigned int ovl;
351
  union {
352
    bfd_vma addend;
353
    bfd_vma br_addr;
354
  };
355
  bfd_vma stub_addr;
356
};
357
 
358
#define spu_hash_table(p) \
359
  (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
360
  == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
361
 
362
struct call_info
363
{
364
  struct function_info *fun;
365
  struct call_info *next;
366
  unsigned int count;
367
  unsigned int max_depth;
368
  unsigned int is_tail : 1;
369
  unsigned int is_pasted : 1;
370
  unsigned int broken_cycle : 1;
371
  unsigned int priority : 13;
372
};
373
 
374
struct function_info
375
{
376
  /* List of functions called.  Also branches to hot/cold part of
377
     function.  */
378
  struct call_info *call_list;
379
  /* For hot/cold part of function, point to owner.  */
380
  struct function_info *start;
381
  /* Symbol at start of function.  */
382
  union {
383
    Elf_Internal_Sym *sym;
384
    struct elf_link_hash_entry *h;
385
  } u;
386
  /* Function section.  */
387
  asection *sec;
388
  asection *rodata;
389
  /* Where last called from, and number of sections called from.  */
390
  asection *last_caller;
391
  unsigned int call_count;
392
  /* Address range of (this part of) function.  */
393
  bfd_vma lo, hi;
394
  /* Offset where we found a store of lr, or -1 if none found.  */
395
  bfd_vma lr_store;
396
  /* Offset where we found the stack adjustment insn.  */
397
  bfd_vma sp_adjust;
398
  /* Stack usage.  */
399
  int stack;
400
  /* Distance from root of call tree.  Tail and hot/cold branches
401
     count as one deeper.  We aren't counting stack frames here.  */
402
  unsigned int depth;
403
  /* Set if global symbol.  */
404
  unsigned int global : 1;
405
  /* Set if known to be start of function (as distinct from a hunk
406
     in hot/cold section.  */
407
  unsigned int is_func : 1;
408
  /* Set if not a root node.  */
409
  unsigned int non_root : 1;
410
  /* Flags used during call tree traversal.  It's cheaper to replicate
411
     the visit flags than have one which needs clearing after a traversal.  */
412
  unsigned int visit1 : 1;
413
  unsigned int visit2 : 1;
414
  unsigned int marking : 1;
415
  unsigned int visit3 : 1;
416
  unsigned int visit4 : 1;
417
  unsigned int visit5 : 1;
418
  unsigned int visit6 : 1;
419
  unsigned int visit7 : 1;
420
};
421
 
422
struct spu_elf_stack_info
423
{
424
  int num_fun;
425
  int max_fun;
426
  /* Variable size array describing functions, one per contiguous
427
     address range belonging to a function.  */
428
  struct function_info fun[1];
429
};
430
 
431
static struct function_info *find_function (asection *, bfd_vma,
432
                                            struct bfd_link_info *);
433
 
434
/* Create a spu ELF linker hash table.  */
435
 
436
static struct bfd_link_hash_table *
437
spu_elf_link_hash_table_create (bfd *abfd)
438
{
439
  struct spu_link_hash_table *htab;
440
 
441
  htab = bfd_malloc (sizeof (*htab));
442
  if (htab == NULL)
443
    return NULL;
444
 
445
  if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
446
                                      _bfd_elf_link_hash_newfunc,
447
                                      sizeof (struct elf_link_hash_entry),
448
                                      SPU_ELF_DATA))
449
    {
450
      free (htab);
451
      return NULL;
452
    }
453
 
454
  memset (&htab->ovtab, 0,
455
          sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
456
 
457
  htab->elf.init_got_refcount.refcount = 0;
458
  htab->elf.init_got_refcount.glist = NULL;
459
  htab->elf.init_got_offset.offset = 0;
460
  htab->elf.init_got_offset.glist = NULL;
461
  return &htab->elf.root;
462
}
463
 
464
void
465
spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
466
{
467
  bfd_vma max_branch_log2;
468
 
469
  struct spu_link_hash_table *htab = spu_hash_table (info);
470
  htab->params = params;
471
  htab->line_size_log2 = bfd_log2 (htab->params->line_size);
472
  htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
473
 
474
  /* For the software i-cache, we provide a "from" list whose size
475
     is a power-of-two number of quadwords, big enough to hold one
476
     byte per outgoing branch.  Compute this number here.  */
477
  max_branch_log2 = bfd_log2 (htab->params->max_branch);
478
  htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
479
}
480
 
481
/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
482
   to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
483
   *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
484
 
485
static bfd_boolean
486
get_sym_h (struct elf_link_hash_entry **hp,
487
           Elf_Internal_Sym **symp,
488
           asection **symsecp,
489
           Elf_Internal_Sym **locsymsp,
490
           unsigned long r_symndx,
491
           bfd *ibfd)
492
{
493
  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
494
 
495
  if (r_symndx >= symtab_hdr->sh_info)
496
    {
497
      struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
498
      struct elf_link_hash_entry *h;
499
 
500
      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
501
      while (h->root.type == bfd_link_hash_indirect
502
             || h->root.type == bfd_link_hash_warning)
503
        h = (struct elf_link_hash_entry *) h->root.u.i.link;
504
 
505
      if (hp != NULL)
506
        *hp = h;
507
 
508
      if (symp != NULL)
509
        *symp = NULL;
510
 
511
      if (symsecp != NULL)
512
        {
513
          asection *symsec = NULL;
514
          if (h->root.type == bfd_link_hash_defined
515
              || h->root.type == bfd_link_hash_defweak)
516
            symsec = h->root.u.def.section;
517
          *symsecp = symsec;
518
        }
519
    }
520
  else
521
    {
522
      Elf_Internal_Sym *sym;
523
      Elf_Internal_Sym *locsyms = *locsymsp;
524
 
525
      if (locsyms == NULL)
526
        {
527
          locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
528
          if (locsyms == NULL)
529
            locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
530
                                            symtab_hdr->sh_info,
531
                                            0, NULL, NULL, NULL);
532
          if (locsyms == NULL)
533
            return FALSE;
534
          *locsymsp = locsyms;
535
        }
536
      sym = locsyms + r_symndx;
537
 
538
      if (hp != NULL)
539
        *hp = NULL;
540
 
541
      if (symp != NULL)
542
        *symp = sym;
543
 
544
      if (symsecp != NULL)
545
        *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
546
    }
547
 
548
  return TRUE;
549
}
550
 
551
/* Create the note section if not already present.  This is done early so
552
   that the linker maps the sections to the right place in the output.  */
553
 
554
bfd_boolean
555
spu_elf_create_sections (struct bfd_link_info *info)
556
{
557
  struct spu_link_hash_table *htab = spu_hash_table (info);
558
  bfd *ibfd;
559
 
560
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
561
    if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
562
      break;
563
 
564
  if (ibfd == NULL)
565
    {
566
      /* Make SPU_PTNOTE_SPUNAME section.  */
567
      asection *s;
568
      size_t name_len;
569
      size_t size;
570
      bfd_byte *data;
571
      flagword flags;
572
 
573
      ibfd = info->input_bfds;
574
      flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
575
      s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
576
      if (s == NULL
577
          || !bfd_set_section_alignment (ibfd, s, 4))
578
        return FALSE;
579
 
580
      name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
581
      size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
582
      size += (name_len + 3) & -4;
583
 
584
      if (!bfd_set_section_size (ibfd, s, size))
585
        return FALSE;
586
 
587
      data = bfd_zalloc (ibfd, size);
588
      if (data == NULL)
589
        return FALSE;
590
 
591
      bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
592
      bfd_put_32 (ibfd, name_len, data + 4);
593
      bfd_put_32 (ibfd, 1, data + 8);
594
      memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
595
      memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
596
              bfd_get_filename (info->output_bfd), name_len);
597
      s->contents = data;
598
    }
599
 
600
  if (htab->params->emit_fixups)
601
    {
602
      asection *s;
603
      flagword flags;
604
      ibfd = info->input_bfds;
605
      flags = SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
606
              | SEC_IN_MEMORY;
607
      s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
608
      if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
609
        return FALSE;
610
      htab->sfixup = s;
611
    }
612
 
613
  return TRUE;
614
}
615
 
616
/* qsort predicate to sort sections by vma.  */
617
 
618
static int
619
sort_sections (const void *a, const void *b)
620
{
621
  const asection *const *s1 = a;
622
  const asection *const *s2 = b;
623
  bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
624
 
625
  if (delta != 0)
626
    return delta < 0 ? -1 : 1;
627
 
628
  return (*s1)->index - (*s2)->index;
629
}
630
 
631
/* Identify overlays in the output bfd, and number them.
632
   Returns 0 on error, 1 if no overlays, 2 if overlays.  */
633
 
634
int
635
spu_elf_find_overlays (struct bfd_link_info *info)
636
{
637
  struct spu_link_hash_table *htab = spu_hash_table (info);
638
  asection **alloc_sec;
639
  unsigned int i, n, ovl_index, num_buf;
640
  asection *s;
641
  bfd_vma ovl_end;
642
  static const char *const entry_names[2][2] = {
643
    { "__ovly_load", "__icache_br_handler" },
644
    { "__ovly_return", "__icache_call_handler" }
645
  };
646
 
647
  if (info->output_bfd->section_count < 2)
648
    return 1;
649
 
650
  alloc_sec
651
    = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
652
  if (alloc_sec == NULL)
653
    return 0;
654
 
655
  /* Pick out all the alloced sections.  */
656
  for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
657
    if ((s->flags & SEC_ALLOC) != 0
658
        && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
659
        && s->size != 0)
660
      alloc_sec[n++] = s;
661
 
662
  if (n == 0)
663
    {
664
      free (alloc_sec);
665
      return 1;
666
    }
667
 
668
  /* Sort them by vma.  */
669
  qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
670
 
671
  ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
672
  if (htab->params->ovly_flavour == ovly_soft_icache)
673
    {
674
      unsigned int prev_buf = 0, set_id = 0;
675
 
676
      /* Look for an overlapping vma to find the first overlay section.  */
677
      bfd_vma vma_start = 0;
678
 
679
      for (i = 1; i < n; i++)
680
        {
681
          s = alloc_sec[i];
682
          if (s->vma < ovl_end)
683
            {
684
              asection *s0 = alloc_sec[i - 1];
685
              vma_start = s0->vma;
686
              ovl_end = (s0->vma
687
                         + ((bfd_vma) 1
688
                            << (htab->num_lines_log2 + htab->line_size_log2)));
689
              --i;
690
              break;
691
            }
692
          else
693
            ovl_end = s->vma + s->size;
694
        }
695
 
696
      /* Now find any sections within the cache area.  */
697
      for (ovl_index = 0, num_buf = 0; i < n; i++)
698
        {
699
          s = alloc_sec[i];
700
          if (s->vma >= ovl_end)
701
            break;
702
 
703
          /* A section in an overlay area called .ovl.init is not
704
             an overlay, in the sense that it might be loaded in
705
             by the overlay manager, but rather the initial
706
             section contents for the overlay buffer.  */
707
          if (strncmp (s->name, ".ovl.init", 9) != 0)
708
            {
709
              num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
710
              set_id = (num_buf == prev_buf)? set_id + 1 : 0;
711
              prev_buf = num_buf;
712
 
713
              if ((s->vma - vma_start) & (htab->params->line_size - 1))
714
                {
715
                  info->callbacks->einfo (_("%X%P: overlay section %A "
716
                                            "does not start on a cache line.\n"),
717
                                          s);
718
                  bfd_set_error (bfd_error_bad_value);
719
                  return 0;
720
                }
721
              else if (s->size > htab->params->line_size)
722
                {
723
                  info->callbacks->einfo (_("%X%P: overlay section %A "
724
                                            "is larger than a cache line.\n"),
725
                                          s);
726
                  bfd_set_error (bfd_error_bad_value);
727
                  return 0;
728
                }
729
 
730
              alloc_sec[ovl_index++] = s;
731
              spu_elf_section_data (s)->u.o.ovl_index
732
                = (set_id << htab->num_lines_log2) + num_buf;
733
              spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
734
            }
735
        }
736
 
737
      /* Ensure there are no more overlay sections.  */
738
      for ( ; i < n; i++)
739
        {
740
          s = alloc_sec[i];
741
          if (s->vma < ovl_end)
742
            {
743
              info->callbacks->einfo (_("%X%P: overlay section %A "
744
                                        "is not in cache area.\n"),
745
                                      alloc_sec[i-1]);
746
              bfd_set_error (bfd_error_bad_value);
747
              return 0;
748
            }
749
          else
750
            ovl_end = s->vma + s->size;
751
        }
752
    }
753
  else
754
    {
755
      /* Look for overlapping vmas.  Any with overlap must be overlays.
756
         Count them.  Also count the number of overlay regions.  */
757
      for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
758
        {
759
          s = alloc_sec[i];
760
          if (s->vma < ovl_end)
761
            {
762
              asection *s0 = alloc_sec[i - 1];
763
 
764
              if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
765
                {
766
                  ++num_buf;
767
                  if (strncmp (s0->name, ".ovl.init", 9) != 0)
768
                    {
769
                      alloc_sec[ovl_index] = s0;
770
                      spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
771
                      spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
772
                    }
773
                  else
774
                    ovl_end = s->vma + s->size;
775
                }
776
              if (strncmp (s->name, ".ovl.init", 9) != 0)
777
                {
778
                  alloc_sec[ovl_index] = s;
779
                  spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
780
                  spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
781
                  if (s0->vma != s->vma)
782
                    {
783
                      info->callbacks->einfo (_("%X%P: overlay sections %A "
784
                                                "and %A do not start at the "
785
                                                "same address.\n"),
786
                                              s0, s);
787
                      bfd_set_error (bfd_error_bad_value);
788
                      return 0;
789
                    }
790
                  if (ovl_end < s->vma + s->size)
791
                    ovl_end = s->vma + s->size;
792
                }
793
            }
794
          else
795
            ovl_end = s->vma + s->size;
796
        }
797
    }
798
 
799
  htab->num_overlays = ovl_index;
800
  htab->num_buf = num_buf;
801
  htab->ovl_sec = alloc_sec;
802
 
803
  if (ovl_index == 0)
804
    return 1;
805
 
806
  for (i = 0; i < 2; i++)
807
    {
808
      const char *name;
809
      struct elf_link_hash_entry *h;
810
 
811
      name = entry_names[i][htab->params->ovly_flavour];
812
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
813
      if (h == NULL)
814
        return 0;
815
 
816
      if (h->root.type == bfd_link_hash_new)
817
        {
818
          h->root.type = bfd_link_hash_undefined;
819
          h->ref_regular = 1;
820
          h->ref_regular_nonweak = 1;
821
          h->non_elf = 0;
822
        }
823
      htab->ovly_entry[i] = h;
824
    }
825
 
826
  return 2;
827
}
828
 
829
/* Non-zero to use bra in overlay stubs rather than br.  */
830
#define BRA_STUBS 0
831
 
832
#define BRA     0x30000000
833
#define BRASL   0x31000000
834
#define BR      0x32000000
835
#define BRSL    0x33000000
836
#define NOP     0x40200000
837
#define LNOP    0x00200000
838
#define ILA     0x42000000
839
 
840
/* Return true for all relative and absolute branch instructions.
841
   bra   00110000 0..
842
   brasl 00110001 0..
843
   br    00110010 0..
844
   brsl  00110011 0..
845
   brz   00100000 0..
846
   brnz  00100001 0..
847
   brhz  00100010 0..
848
   brhnz 00100011 0..  */
849
 
850
static bfd_boolean
851
is_branch (const unsigned char *insn)
852
{
853
  return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
854
}
855
 
856
/* Return true for all indirect branch instructions.
857
   bi     00110101 000
858
   bisl   00110101 001
859
   iret   00110101 010
860
   bisled 00110101 011
861
   biz    00100101 000
862
   binz   00100101 001
863
   bihz   00100101 010
864
   bihnz  00100101 011  */
865
 
866
static bfd_boolean
867
is_indirect_branch (const unsigned char *insn)
868
{
869
  return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
870
}
871
 
872
/* Return true for branch hint instructions.
873
   hbra  0001000..
874
   hbrr  0001001..  */
875
 
876
static bfd_boolean
877
is_hint (const unsigned char *insn)
878
{
879
  return (insn[0] & 0xfc) == 0x10;
880
}
881
 
882
/* True if INPUT_SECTION might need overlay stubs.  */
883
 
884
static bfd_boolean
885
maybe_needs_stubs (asection *input_section)
886
{
887
  /* No stubs for debug sections and suchlike.  */
888
  if ((input_section->flags & SEC_ALLOC) == 0)
889
    return FALSE;
890
 
891
  /* No stubs for link-once sections that will be discarded.  */
892
  if (input_section->output_section == bfd_abs_section_ptr)
893
    return FALSE;
894
 
895
  /* Don't create stubs for .eh_frame references.  */
896
  if (strcmp (input_section->name, ".eh_frame") == 0)
897
    return FALSE;
898
 
899
  return TRUE;
900
}
901
 
902
enum _stub_type
903
{
904
  no_stub,
905
  call_ovl_stub,
906
  br000_ovl_stub,
907
  br001_ovl_stub,
908
  br010_ovl_stub,
909
  br011_ovl_stub,
910
  br100_ovl_stub,
911
  br101_ovl_stub,
912
  br110_ovl_stub,
913
  br111_ovl_stub,
914
  nonovl_stub,
915
  stub_error
916
};
917
 
918
/* Return non-zero if this reloc symbol should go via an overlay stub.
919
   Return 2 if the stub must be in non-overlay area.  */
920
 
921
static enum _stub_type
922
needs_ovl_stub (struct elf_link_hash_entry *h,
923
                Elf_Internal_Sym *sym,
924
                asection *sym_sec,
925
                asection *input_section,
926
                Elf_Internal_Rela *irela,
927
                bfd_byte *contents,
928
                struct bfd_link_info *info)
929
{
930
  struct spu_link_hash_table *htab = spu_hash_table (info);
931
  enum elf_spu_reloc_type r_type;
932
  unsigned int sym_type;
933
  bfd_boolean branch, hint, call;
934
  enum _stub_type ret = no_stub;
935
  bfd_byte insn[4];
936
 
937
  if (sym_sec == NULL
938
      || sym_sec->output_section == bfd_abs_section_ptr
939
      || spu_elf_section_data (sym_sec->output_section) == NULL)
940
    return ret;
941
 
942
  if (h != NULL)
943
    {
944
      /* Ensure no stubs for user supplied overlay manager syms.  */
945
      if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
946
        return ret;
947
 
948
      /* setjmp always goes via an overlay stub, because then the return
949
         and hence the longjmp goes via __ovly_return.  That magically
950
         makes setjmp/longjmp between overlays work.  */
951
      if (strncmp (h->root.root.string, "setjmp", 6) == 0
952
          && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
953
        ret = call_ovl_stub;
954
    }
955
 
956
  if (h != NULL)
957
    sym_type = h->type;
958
  else
959
    sym_type = ELF_ST_TYPE (sym->st_info);
960
 
961
  r_type = ELF32_R_TYPE (irela->r_info);
962
  branch = FALSE;
963
  hint = FALSE;
964
  call = FALSE;
965
  if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
966
    {
967
      if (contents == NULL)
968
        {
969
          contents = insn;
970
          if (!bfd_get_section_contents (input_section->owner,
971
                                         input_section,
972
                                         contents,
973
                                         irela->r_offset, 4))
974
            return stub_error;
975
        }
976
      else
977
        contents += irela->r_offset;
978
 
979
      branch = is_branch (contents);
980
      hint = is_hint (contents);
981
      if (branch || hint)
982
        {
983
          call = (contents[0] & 0xfd) == 0x31;
984
          if (call
985
              && sym_type != STT_FUNC
986
              && contents != insn)
987
            {
988
              /* It's common for people to write assembly and forget
989
                 to give function symbols the right type.  Handle
990
                 calls to such symbols, but warn so that (hopefully)
991
                 people will fix their code.  We need the symbol
992
                 type to be correct to distinguish function pointer
993
                 initialisation from other pointer initialisations.  */
994
              const char *sym_name;
995
 
996
              if (h != NULL)
997
                sym_name = h->root.root.string;
998
              else
999
                {
1000
                  Elf_Internal_Shdr *symtab_hdr;
1001
                  symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1002
                  sym_name = bfd_elf_sym_name (input_section->owner,
1003
                                               symtab_hdr,
1004
                                               sym,
1005
                                               sym_sec);
1006
                }
1007
              (*_bfd_error_handler) (_("warning: call to non-function"
1008
                                       " symbol %s defined in %B"),
1009
                                     sym_sec->owner, sym_name);
1010
 
1011
            }
1012
        }
1013
    }
1014
 
1015
  if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1016
      || (sym_type != STT_FUNC
1017
          && !(branch || hint)
1018
          && (sym_sec->flags & SEC_CODE) == 0))
1019
    return no_stub;
1020
 
1021
  /* Usually, symbols in non-overlay sections don't need stubs.  */
1022
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1023
      && !htab->params->non_overlay_stubs)
1024
    return ret;
1025
 
1026
  /* A reference from some other section to a symbol in an overlay
1027
     section needs a stub.  */
1028
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1029
       != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1030
    {
1031
      unsigned int lrlive = 0;
1032
      if (branch)
1033
        lrlive = (contents[1] & 0x70) >> 4;
1034
 
1035
      if (!lrlive && (call || sym_type == STT_FUNC))
1036
        ret = call_ovl_stub;
1037
      else
1038
        ret = br000_ovl_stub + lrlive;
1039
    }
1040
 
1041
  /* If this insn isn't a branch then we are possibly taking the
1042
     address of a function and passing it out somehow.  Soft-icache code
1043
     always generates inline code to do indirect branches.  */
1044
  if (!(branch || hint)
1045
      && sym_type == STT_FUNC
1046
      && htab->params->ovly_flavour != ovly_soft_icache)
1047
    ret = nonovl_stub;
1048
 
1049
  return ret;
1050
}
1051
 
1052
static bfd_boolean
1053
count_stub (struct spu_link_hash_table *htab,
1054
            bfd *ibfd,
1055
            asection *isec,
1056
            enum _stub_type stub_type,
1057
            struct elf_link_hash_entry *h,
1058
            const Elf_Internal_Rela *irela)
1059
{
1060
  unsigned int ovl = 0;
1061
  struct got_entry *g, **head;
1062
  bfd_vma addend;
1063
 
1064
  /* If this instruction is a branch or call, we need a stub
1065
     for it.  One stub per function per overlay.
1066
     If it isn't a branch, then we are taking the address of
1067
     this function so need a stub in the non-overlay area
1068
     for it.  One stub per function.  */
1069
  if (stub_type != nonovl_stub)
1070
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1071
 
1072
  if (h != NULL)
1073
    head = &h->got.glist;
1074
  else
1075
    {
1076
      if (elf_local_got_ents (ibfd) == NULL)
1077
        {
1078
          bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1079
                               * sizeof (*elf_local_got_ents (ibfd)));
1080
          elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1081
          if (elf_local_got_ents (ibfd) == NULL)
1082
            return FALSE;
1083
        }
1084
      head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1085
    }
1086
 
1087
  if (htab->params->ovly_flavour == ovly_soft_icache)
1088
    {
1089
      htab->stub_count[ovl] += 1;
1090
      return TRUE;
1091
    }
1092
 
1093
  addend = 0;
1094
  if (irela != NULL)
1095
    addend = irela->r_addend;
1096
 
1097
  if (ovl == 0)
1098
    {
1099
      struct got_entry *gnext;
1100
 
1101
      for (g = *head; g != NULL; g = g->next)
1102
        if (g->addend == addend && g->ovl == 0)
1103
          break;
1104
 
1105
      if (g == NULL)
1106
        {
1107
          /* Need a new non-overlay area stub.  Zap other stubs.  */
1108
          for (g = *head; g != NULL; g = gnext)
1109
            {
1110
              gnext = g->next;
1111
              if (g->addend == addend)
1112
                {
1113
                  htab->stub_count[g->ovl] -= 1;
1114
                  free (g);
1115
                }
1116
            }
1117
        }
1118
    }
1119
  else
1120
    {
1121
      for (g = *head; g != NULL; g = g->next)
1122
        if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1123
          break;
1124
    }
1125
 
1126
  if (g == NULL)
1127
    {
1128
      g = bfd_malloc (sizeof *g);
1129
      if (g == NULL)
1130
        return FALSE;
1131
      g->ovl = ovl;
1132
      g->addend = addend;
1133
      g->stub_addr = (bfd_vma) -1;
1134
      g->next = *head;
1135
      *head = g;
1136
 
1137
      htab->stub_count[ovl] += 1;
1138
    }
1139
 
1140
  return TRUE;
1141
}
1142
 
1143
/* Support two sizes of overlay stubs, a slower more compact stub of two
1144
   intructions, and a faster stub of four instructions.
1145
   Soft-icache stubs are four or eight words.  */
1146
 
1147
static unsigned int
1148
ovl_stub_size (struct spu_elf_params *params)
1149
{
1150
  return 16 << params->ovly_flavour >> params->compact_stub;
1151
}
1152
 
1153
static unsigned int
1154
ovl_stub_size_log2 (struct spu_elf_params *params)
1155
{
1156
  return 4 + params->ovly_flavour - params->compact_stub;
1157
}
1158
 
1159
/* Two instruction overlay stubs look like:
1160
 
1161
   brsl $75,__ovly_load
1162
   .word target_ovl_and_address
1163
 
1164
   ovl_and_address is a word with the overlay number in the top 14 bits
1165
   and local store address in the bottom 18 bits.
1166
 
1167
   Four instruction overlay stubs look like:
1168
 
1169
   ila $78,ovl_number
1170
   lnop
1171
   ila $79,target_address
1172
   br __ovly_load
1173
 
1174
   Software icache stubs are:
1175
 
1176
   .word target_index
1177
   .word target_ia;
1178
   .word lrlive_branchlocalstoreaddr;
1179
   brasl $75,__icache_br_handler
1180
   .quad xor_pattern
1181
*/
1182
 
1183
static bfd_boolean
1184
build_stub (struct bfd_link_info *info,
1185
            bfd *ibfd,
1186
            asection *isec,
1187
            enum _stub_type stub_type,
1188
            struct elf_link_hash_entry *h,
1189
            const Elf_Internal_Rela *irela,
1190
            bfd_vma dest,
1191
            asection *dest_sec)
1192
{
1193
  struct spu_link_hash_table *htab = spu_hash_table (info);
1194
  unsigned int ovl, dest_ovl, set_id;
1195
  struct got_entry *g, **head;
1196
  asection *sec;
1197
  bfd_vma addend, from, to, br_dest, patt;
1198
  unsigned int lrlive;
1199
 
1200
  ovl = 0;
1201
  if (stub_type != nonovl_stub)
1202
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1203
 
1204
  if (h != NULL)
1205
    head = &h->got.glist;
1206
  else
1207
    head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1208
 
1209
  addend = 0;
1210
  if (irela != NULL)
1211
    addend = irela->r_addend;
1212
 
1213
  if (htab->params->ovly_flavour == ovly_soft_icache)
1214
    {
1215
      g = bfd_malloc (sizeof *g);
1216
      if (g == NULL)
1217
        return FALSE;
1218
      g->ovl = ovl;
1219
      g->br_addr = 0;
1220
      if (irela != NULL)
1221
        g->br_addr = (irela->r_offset
1222
                      + isec->output_offset
1223
                      + isec->output_section->vma);
1224
      g->next = *head;
1225
      *head = g;
1226
    }
1227
  else
1228
    {
1229
      for (g = *head; g != NULL; g = g->next)
1230
        if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1231
          break;
1232
      if (g == NULL)
1233
        abort ();
1234
 
1235
      if (g->ovl == 0 && ovl != 0)
1236
        return TRUE;
1237
 
1238
      if (g->stub_addr != (bfd_vma) -1)
1239
        return TRUE;
1240
    }
1241
 
1242
  sec = htab->stub_sec[ovl];
1243
  dest += dest_sec->output_offset + dest_sec->output_section->vma;
1244
  from = sec->size + sec->output_offset + sec->output_section->vma;
1245
  g->stub_addr = from;
1246
  to = (htab->ovly_entry[0]->root.u.def.value
1247
        + htab->ovly_entry[0]->root.u.def.section->output_offset
1248
        + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1249
 
1250
  if (((dest | to | from) & 3) != 0)
1251
    {
1252
      htab->stub_err = 1;
1253
      return FALSE;
1254
    }
1255
  dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1256
 
1257
  if (htab->params->ovly_flavour == ovly_normal
1258
      && !htab->params->compact_stub)
1259
    {
1260
      bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1261
                  sec->contents + sec->size);
1262
      bfd_put_32 (sec->owner, LNOP,
1263
                  sec->contents + sec->size + 4);
1264
      bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1265
                  sec->contents + sec->size + 8);
1266
      if (!BRA_STUBS)
1267
        bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1268
                    sec->contents + sec->size + 12);
1269
      else
1270
        bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1271
                    sec->contents + sec->size + 12);
1272
    }
1273
  else if (htab->params->ovly_flavour == ovly_normal
1274
           && htab->params->compact_stub)
1275
    {
1276
      if (!BRA_STUBS)
1277
        bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1278
                    sec->contents + sec->size);
1279
      else
1280
        bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1281
                    sec->contents + sec->size);
1282
      bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1283
                  sec->contents + sec->size + 4);
1284
    }
1285
  else if (htab->params->ovly_flavour == ovly_soft_icache
1286
           && htab->params->compact_stub)
1287
    {
1288
      lrlive = 0;
1289
      if (stub_type == nonovl_stub)
1290
        ;
1291
      else if (stub_type == call_ovl_stub)
1292
        /* A brsl makes lr live and *(*sp+16) is live.
1293
           Tail calls have the same liveness.  */
1294
        lrlive = 5;
1295
      else if (!htab->params->lrlive_analysis)
1296
        /* Assume stack frame and lr save.  */
1297
        lrlive = 1;
1298
      else if (irela != NULL)
1299
        {
1300
          /* Analyse branch instructions.  */
1301
          struct function_info *caller;
1302
          bfd_vma off;
1303
 
1304
          caller = find_function (isec, irela->r_offset, info);
1305
          if (caller->start == NULL)
1306
            off = irela->r_offset;
1307
          else
1308
            {
1309
              struct function_info *found = NULL;
1310
 
1311
              /* Find the earliest piece of this function that
1312
                 has frame adjusting instructions.  We might
1313
                 see dynamic frame adjustment (eg. for alloca)
1314
                 in some later piece, but functions using
1315
                 alloca always set up a frame earlier.  Frame
1316
                 setup instructions are always in one piece.  */
1317
              if (caller->lr_store != (bfd_vma) -1
1318
                  || caller->sp_adjust != (bfd_vma) -1)
1319
                found = caller;
1320
              while (caller->start != NULL)
1321
                {
1322
                  caller = caller->start;
1323
                  if (caller->lr_store != (bfd_vma) -1
1324
                      || caller->sp_adjust != (bfd_vma) -1)
1325
                    found = caller;
1326
                }
1327
              if (found != NULL)
1328
                caller = found;
1329
              off = (bfd_vma) -1;
1330
            }
1331
 
1332
          if (off > caller->sp_adjust)
1333
            {
1334
              if (off > caller->lr_store)
1335
                /* Only *(*sp+16) is live.  */
1336
                lrlive = 1;
1337
              else
1338
                /* If no lr save, then we must be in a
1339
                   leaf function with a frame.
1340
                   lr is still live.  */
1341
                lrlive = 4;
1342
            }
1343
          else if (off > caller->lr_store)
1344
            {
1345
              /* Between lr save and stack adjust.  */
1346
              lrlive = 3;
1347
              /* This should never happen since prologues won't
1348
                 be split here.  */
1349
              BFD_ASSERT (0);
1350
            }
1351
          else
1352
            /* On entry to function.  */
1353
            lrlive = 5;
1354
 
1355
          if (stub_type != br000_ovl_stub
1356
              && lrlive != stub_type - br000_ovl_stub)
1357
            info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1358
                                      "from analysis (%u)\n"),
1359
                                    isec, irela->r_offset, lrlive,
1360
                                    stub_type - br000_ovl_stub);
1361
        }
1362
 
1363
      /* If given lrlive info via .brinfo, use it.  */
1364
      if (stub_type > br000_ovl_stub)
1365
        lrlive = stub_type - br000_ovl_stub;
1366
 
1367
      if (ovl == 0)
1368
        to = (htab->ovly_entry[1]->root.u.def.value
1369
              + htab->ovly_entry[1]->root.u.def.section->output_offset
1370
              + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1371
 
1372
      /* The branch that uses this stub goes to stub_addr + 4.  We'll
1373
         set up an xor pattern that can be used by the icache manager
1374
         to modify this branch to go directly to its destination.  */
1375
      g->stub_addr += 4;
1376
      br_dest = g->stub_addr;
1377
      if (irela == NULL)
1378
        {
1379
          /* Except in the case of _SPUEAR_ stubs, the branch in
1380
             question is the one in the stub itself.  */
1381
          BFD_ASSERT (stub_type == nonovl_stub);
1382
          g->br_addr = g->stub_addr;
1383
          br_dest = to;
1384
        }
1385
 
1386
      set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1387
      bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1388
                  sec->contents + sec->size);
1389
      bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1390
                  sec->contents + sec->size + 4);
1391
      bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1392
                  sec->contents + sec->size + 8);
1393
      patt = dest ^ br_dest;
1394
      if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1395
        patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1396
      bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1397
                  sec->contents + sec->size + 12);
1398
 
1399
      if (ovl == 0)
1400
        /* Extra space for linked list entries.  */
1401
        sec->size += 16;
1402
    }
1403
  else
1404
    abort ();
1405
 
1406
  sec->size += ovl_stub_size (htab->params);
1407
 
1408
  if (htab->params->emit_stub_syms)
1409
    {
1410
      size_t len;
1411
      char *name;
1412
      int add;
1413
 
1414
      len = 8 + sizeof (".ovl_call.") - 1;
1415
      if (h != NULL)
1416
        len += strlen (h->root.root.string);
1417
      else
1418
        len += 8 + 1 + 8;
1419
      add = 0;
1420
      if (irela != NULL)
1421
        add = (int) irela->r_addend & 0xffffffff;
1422
      if (add != 0)
1423
        len += 1 + 8;
1424
      name = bfd_malloc (len);
1425
      if (name == NULL)
1426
        return FALSE;
1427
 
1428
      sprintf (name, "%08x.ovl_call.", g->ovl);
1429
      if (h != NULL)
1430
        strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1431
      else
1432
        sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1433
                 dest_sec->id & 0xffffffff,
1434
                 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1435
      if (add != 0)
1436
        sprintf (name + len - 9, "+%x", add);
1437
 
1438
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1439
      free (name);
1440
      if (h == NULL)
1441
        return FALSE;
1442
      if (h->root.type == bfd_link_hash_new)
1443
        {
1444
          h->root.type = bfd_link_hash_defined;
1445
          h->root.u.def.section = sec;
1446
          h->size = ovl_stub_size (htab->params);
1447
          h->root.u.def.value = sec->size - h->size;
1448
          h->type = STT_FUNC;
1449
          h->ref_regular = 1;
1450
          h->def_regular = 1;
1451
          h->ref_regular_nonweak = 1;
1452
          h->forced_local = 1;
1453
          h->non_elf = 0;
1454
        }
1455
    }
1456
 
1457
  return TRUE;
1458
}
1459
 
1460
/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1461
   symbols.  */
1462
 
1463
static bfd_boolean
1464
allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1465
{
1466
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1467
     invoked by the PPU.  */
1468
  struct bfd_link_info *info = inf;
1469
  struct spu_link_hash_table *htab = spu_hash_table (info);
1470
  asection *sym_sec;
1471
 
1472
  if ((h->root.type == bfd_link_hash_defined
1473
       || h->root.type == bfd_link_hash_defweak)
1474
      && h->def_regular
1475
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1476
      && (sym_sec = h->root.u.def.section) != NULL
1477
      && sym_sec->output_section != bfd_abs_section_ptr
1478
      && spu_elf_section_data (sym_sec->output_section) != NULL
1479
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1480
          || htab->params->non_overlay_stubs))
1481
    {
1482
      return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1483
    }
1484
 
1485
  return TRUE;
1486
}
1487
 
1488
static bfd_boolean
1489
build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1490
{
1491
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1492
     invoked by the PPU.  */
1493
  struct bfd_link_info *info = inf;
1494
  struct spu_link_hash_table *htab = spu_hash_table (info);
1495
  asection *sym_sec;
1496
 
1497
  if ((h->root.type == bfd_link_hash_defined
1498
       || h->root.type == bfd_link_hash_defweak)
1499
      && h->def_regular
1500
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1501
      && (sym_sec = h->root.u.def.section) != NULL
1502
      && sym_sec->output_section != bfd_abs_section_ptr
1503
      && spu_elf_section_data (sym_sec->output_section) != NULL
1504
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1505
          || htab->params->non_overlay_stubs))
1506
    {
1507
      return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1508
                         h->root.u.def.value, sym_sec);
1509
    }
1510
 
1511
  return TRUE;
1512
}
1513
 
1514
/* Size or build stubs.  */
1515
 
1516
static bfd_boolean
1517
process_stubs (struct bfd_link_info *info, bfd_boolean build)
1518
{
1519
  struct spu_link_hash_table *htab = spu_hash_table (info);
1520
  bfd *ibfd;
1521
 
1522
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1523
    {
1524
      extern const bfd_target bfd_elf32_spu_vec;
1525
      Elf_Internal_Shdr *symtab_hdr;
1526
      asection *isec;
1527
      Elf_Internal_Sym *local_syms = NULL;
1528
 
1529
      if (ibfd->xvec != &bfd_elf32_spu_vec)
1530
        continue;
1531
 
1532
      /* We'll need the symbol table in a second.  */
1533
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1534
      if (symtab_hdr->sh_info == 0)
1535
        continue;
1536
 
1537
      /* Walk over each section attached to the input bfd.  */
1538
      for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1539
        {
1540
          Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1541
 
1542
          /* If there aren't any relocs, then there's nothing more to do.  */
1543
          if ((isec->flags & SEC_RELOC) == 0
1544
              || isec->reloc_count == 0)
1545
            continue;
1546
 
1547
          if (!maybe_needs_stubs (isec))
1548
            continue;
1549
 
1550
          /* Get the relocs.  */
1551
          internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1552
                                                       info->keep_memory);
1553
          if (internal_relocs == NULL)
1554
            goto error_ret_free_local;
1555
 
1556
          /* Now examine each relocation.  */
1557
          irela = internal_relocs;
1558
          irelaend = irela + isec->reloc_count;
1559
          for (; irela < irelaend; irela++)
1560
            {
1561
              enum elf_spu_reloc_type r_type;
1562
              unsigned int r_indx;
1563
              asection *sym_sec;
1564
              Elf_Internal_Sym *sym;
1565
              struct elf_link_hash_entry *h;
1566
              enum _stub_type stub_type;
1567
 
1568
              r_type = ELF32_R_TYPE (irela->r_info);
1569
              r_indx = ELF32_R_SYM (irela->r_info);
1570
 
1571
              if (r_type >= R_SPU_max)
1572
                {
1573
                  bfd_set_error (bfd_error_bad_value);
1574
                error_ret_free_internal:
1575
                  if (elf_section_data (isec)->relocs != internal_relocs)
1576
                    free (internal_relocs);
1577
                error_ret_free_local:
1578
                  if (local_syms != NULL
1579
                      && (symtab_hdr->contents
1580
                          != (unsigned char *) local_syms))
1581
                    free (local_syms);
1582
                  return FALSE;
1583
                }
1584
 
1585
              /* Determine the reloc target section.  */
1586
              if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1587
                goto error_ret_free_internal;
1588
 
1589
              stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1590
                                          NULL, info);
1591
              if (stub_type == no_stub)
1592
                continue;
1593
              else if (stub_type == stub_error)
1594
                goto error_ret_free_internal;
1595
 
1596
              if (htab->stub_count == NULL)
1597
                {
1598
                  bfd_size_type amt;
1599
                  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1600
                  htab->stub_count = bfd_zmalloc (amt);
1601
                  if (htab->stub_count == NULL)
1602
                    goto error_ret_free_internal;
1603
                }
1604
 
1605
              if (!build)
1606
                {
1607
                  if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1608
                    goto error_ret_free_internal;
1609
                }
1610
              else
1611
                {
1612
                  bfd_vma dest;
1613
 
1614
                  if (h != NULL)
1615
                    dest = h->root.u.def.value;
1616
                  else
1617
                    dest = sym->st_value;
1618
                  dest += irela->r_addend;
1619
                  if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1620
                                   dest, sym_sec))
1621
                    goto error_ret_free_internal;
1622
                }
1623
            }
1624
 
1625
          /* We're done with the internal relocs, free them.  */
1626
          if (elf_section_data (isec)->relocs != internal_relocs)
1627
            free (internal_relocs);
1628
        }
1629
 
1630
      if (local_syms != NULL
1631
          && symtab_hdr->contents != (unsigned char *) local_syms)
1632
        {
1633
          if (!info->keep_memory)
1634
            free (local_syms);
1635
          else
1636
            symtab_hdr->contents = (unsigned char *) local_syms;
1637
        }
1638
    }
1639
 
1640
  return TRUE;
1641
}
1642
 
1643
/* Allocate space for overlay call and return stubs.
1644
   Return 0 on error, 1 if no overlays, 2 otherwise.  */
1645
 
1646
int
1647
spu_elf_size_stubs (struct bfd_link_info *info)
1648
{
1649
  struct spu_link_hash_table *htab;
1650
  bfd *ibfd;
1651
  bfd_size_type amt;
1652
  flagword flags;
1653
  unsigned int i;
1654
  asection *stub;
1655
 
1656
  if (!process_stubs (info, FALSE))
1657
    return 0;
1658
 
1659
  htab = spu_hash_table (info);
1660
  elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1661
  if (htab->stub_err)
1662
    return 0;
1663
 
1664
  ibfd = info->input_bfds;
1665
  if (htab->stub_count != NULL)
1666
    {
1667
      amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1668
      htab->stub_sec = bfd_zmalloc (amt);
1669
      if (htab->stub_sec == NULL)
1670
        return 0;
1671
 
1672
      flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1673
               | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1674
      stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1675
      htab->stub_sec[0] = stub;
1676
      if (stub == NULL
1677
          || !bfd_set_section_alignment (ibfd, stub,
1678
                                         ovl_stub_size_log2 (htab->params)))
1679
        return 0;
1680
      stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1681
      if (htab->params->ovly_flavour == ovly_soft_icache)
1682
        /* Extra space for linked list entries.  */
1683
        stub->size += htab->stub_count[0] * 16;
1684
 
1685
      for (i = 0; i < htab->num_overlays; ++i)
1686
        {
1687
          asection *osec = htab->ovl_sec[i];
1688
          unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1689
          stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1690
          htab->stub_sec[ovl] = stub;
1691
          if (stub == NULL
1692
              || !bfd_set_section_alignment (ibfd, stub,
1693
                                             ovl_stub_size_log2 (htab->params)))
1694
            return 0;
1695
          stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1696
        }
1697
    }
1698
 
1699
  if (htab->params->ovly_flavour == ovly_soft_icache)
1700
    {
1701
      /* Space for icache manager tables.
1702
         a) Tag array, one quadword per cache line.
1703
         b) Rewrite "to" list, one quadword per cache line.
1704
         c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1705
            a power-of-two number of full quadwords) per cache line.  */
1706
 
1707
      flags = SEC_ALLOC;
1708
      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1709
      if (htab->ovtab == NULL
1710
          || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1711
        return 0;
1712
 
1713
      htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1714
                          << htab->num_lines_log2;
1715
 
1716
      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1717
      htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1718
      if (htab->init == NULL
1719
          || !bfd_set_section_alignment (ibfd, htab->init, 4))
1720
        return 0;
1721
 
1722
      htab->init->size = 16;
1723
    }
1724
  else if (htab->stub_count == NULL)
1725
    return 1;
1726
  else
1727
    {
1728
      /* htab->ovtab consists of two arrays.
1729
         .      struct {
1730
         .        u32 vma;
1731
         .        u32 size;
1732
         .        u32 file_off;
1733
         .        u32 buf;
1734
         .      } _ovly_table[];
1735
         .
1736
         .      struct {
1737
         .        u32 mapped;
1738
         .      } _ovly_buf_table[];
1739
         .  */
1740
 
1741
      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1742
      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1743
      if (htab->ovtab == NULL
1744
          || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1745
        return 0;
1746
 
1747
      htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1748
    }
1749
 
1750
  htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1751
  if (htab->toe == NULL
1752
      || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1753
    return 0;
1754
  htab->toe->size = 16;
1755
 
1756
  return 2;
1757
}
1758
 
1759
/* Called from ld to place overlay manager data sections.  This is done
1760
   after the overlay manager itself is loaded, mainly so that the
1761
   linker's htab->init section is placed after any other .ovl.init
1762
   sections.  */
1763
 
1764
void
1765
spu_elf_place_overlay_data (struct bfd_link_info *info)
1766
{
1767
  struct spu_link_hash_table *htab = spu_hash_table (info);
1768
  unsigned int i;
1769
 
1770
  if (htab->stub_sec != NULL)
1771
    {
1772
      (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1773
 
1774
      for (i = 0; i < htab->num_overlays; ++i)
1775
        {
1776
          asection *osec = htab->ovl_sec[i];
1777
          unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1778
          (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1779
        }
1780
    }
1781
 
1782
  if (htab->params->ovly_flavour == ovly_soft_icache)
1783
    (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1784
 
1785
  if (htab->ovtab != NULL)
1786
    {
1787
      const char *ovout = ".data";
1788
      if (htab->params->ovly_flavour == ovly_soft_icache)
1789
        ovout = ".bss";
1790
      (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1791
    }
1792
 
1793
  if (htab->toe != NULL)
1794
    (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1795
}
1796
 
1797
/* Functions to handle embedded spu_ovl.o object.  */
1798
 
1799
static void *
1800
ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1801
{
1802
  return stream;
1803
}
1804
 
1805
static file_ptr
1806
ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1807
               void *stream,
1808
               void *buf,
1809
               file_ptr nbytes,
1810
               file_ptr offset)
1811
{
1812
  struct _ovl_stream *os;
1813
  size_t count;
1814
  size_t max;
1815
 
1816
  os = (struct _ovl_stream *) stream;
1817
  max = (const char *) os->end - (const char *) os->start;
1818
 
1819
  if ((ufile_ptr) offset >= max)
1820
    return 0;
1821
 
1822
  count = nbytes;
1823
  if (count > max - offset)
1824
    count = max - offset;
1825
 
1826
  memcpy (buf, (const char *) os->start + offset, count);
1827
  return count;
1828
}
1829
 
1830
bfd_boolean
1831
spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1832
{
1833
  *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1834
                              "elf32-spu",
1835
                              ovl_mgr_open,
1836
                              (void *) stream,
1837
                              ovl_mgr_pread,
1838
                              NULL,
1839
                              NULL);
1840
  return *ovl_bfd != NULL;
1841
}
1842
 
1843
static unsigned int
1844
overlay_index (asection *sec)
1845
{
1846
  if (sec == NULL
1847
      || sec->output_section == bfd_abs_section_ptr)
1848
    return 0;
1849
  return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1850
}
1851
 
1852
/* Define an STT_OBJECT symbol.  */
1853
 
1854
static struct elf_link_hash_entry *
1855
define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1856
{
1857
  struct elf_link_hash_entry *h;
1858
 
1859
  h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1860
  if (h == NULL)
1861
    return NULL;
1862
 
1863
  if (h->root.type != bfd_link_hash_defined
1864
      || !h->def_regular)
1865
    {
1866
      h->root.type = bfd_link_hash_defined;
1867
      h->root.u.def.section = htab->ovtab;
1868
      h->type = STT_OBJECT;
1869
      h->ref_regular = 1;
1870
      h->def_regular = 1;
1871
      h->ref_regular_nonweak = 1;
1872
      h->non_elf = 0;
1873
    }
1874
  else if (h->root.u.def.section->owner != NULL)
1875
    {
1876
      (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1877
                             h->root.u.def.section->owner,
1878
                             h->root.root.string);
1879
      bfd_set_error (bfd_error_bad_value);
1880
      return NULL;
1881
    }
1882
  else
1883
    {
1884
      (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1885
                             h->root.root.string);
1886
      bfd_set_error (bfd_error_bad_value);
1887
      return NULL;
1888
    }
1889
 
1890
  return h;
1891
}
1892
 
1893
/* Fill in all stubs and the overlay tables.  */
1894
 
1895
static bfd_boolean
1896
spu_elf_build_stubs (struct bfd_link_info *info)
1897
{
1898
  struct spu_link_hash_table *htab = spu_hash_table (info);
1899
  struct elf_link_hash_entry *h;
1900
  bfd_byte *p;
1901
  asection *s;
1902
  bfd *obfd;
1903
  unsigned int i;
1904
 
1905
  if (htab->num_overlays != 0)
1906
    {
1907
      for (i = 0; i < 2; i++)
1908
        {
1909
          h = htab->ovly_entry[i];
1910
          if (h != NULL
1911
              && (h->root.type == bfd_link_hash_defined
1912
                  || h->root.type == bfd_link_hash_defweak)
1913
              && h->def_regular)
1914
            {
1915
              s = h->root.u.def.section->output_section;
1916
              if (spu_elf_section_data (s)->u.o.ovl_index)
1917
                {
1918
                  (*_bfd_error_handler) (_("%s in overlay section"),
1919
                                         h->root.root.string);
1920
                  bfd_set_error (bfd_error_bad_value);
1921
                  return FALSE;
1922
                }
1923
            }
1924
        }
1925
    }
1926
 
1927
  if (htab->stub_sec != NULL)
1928
    {
1929
      for (i = 0; i <= htab->num_overlays; i++)
1930
        if (htab->stub_sec[i]->size != 0)
1931
          {
1932
            htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1933
                                                      htab->stub_sec[i]->size);
1934
            if (htab->stub_sec[i]->contents == NULL)
1935
              return FALSE;
1936
            htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1937
            htab->stub_sec[i]->size = 0;
1938
          }
1939
 
1940
      /* Fill in all the stubs.  */
1941
      process_stubs (info, TRUE);
1942
      if (!htab->stub_err)
1943
        elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1944
 
1945
      if (htab->stub_err)
1946
        {
1947
          (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1948
          bfd_set_error (bfd_error_bad_value);
1949
          return FALSE;
1950
        }
1951
 
1952
      for (i = 0; i <= htab->num_overlays; i++)
1953
        {
1954
          if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1955
            {
1956
              (*_bfd_error_handler)  (_("stubs don't match calculated size"));
1957
              bfd_set_error (bfd_error_bad_value);
1958
              return FALSE;
1959
            }
1960
          htab->stub_sec[i]->rawsize = 0;
1961
        }
1962
    }
1963
 
1964
  if (htab->ovtab == NULL || htab->ovtab->size == 0)
1965
    return TRUE;
1966
 
1967
  htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1968
  if (htab->ovtab->contents == NULL)
1969
    return FALSE;
1970
 
1971
  p = htab->ovtab->contents;
1972
  if (htab->params->ovly_flavour == ovly_soft_icache)
1973
    {
1974
      bfd_vma off;
1975
 
1976
      h = define_ovtab_symbol (htab, "__icache_tag_array");
1977
      if (h == NULL)
1978
        return FALSE;
1979
      h->root.u.def.value = 0;
1980
      h->size = 16 << htab->num_lines_log2;
1981
      off = h->size;
1982
 
1983
      h = define_ovtab_symbol (htab, "__icache_tag_array_size");
1984
      if (h == NULL)
1985
        return FALSE;
1986
      h->root.u.def.value = 16 << htab->num_lines_log2;
1987
      h->root.u.def.section = bfd_abs_section_ptr;
1988
 
1989
      h = define_ovtab_symbol (htab, "__icache_rewrite_to");
1990
      if (h == NULL)
1991
        return FALSE;
1992
      h->root.u.def.value = off;
1993
      h->size = 16 << htab->num_lines_log2;
1994
      off += h->size;
1995
 
1996
      h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
1997
      if (h == NULL)
1998
        return FALSE;
1999
      h->root.u.def.value = 16 << htab->num_lines_log2;
2000
      h->root.u.def.section = bfd_abs_section_ptr;
2001
 
2002
      h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2003
      if (h == NULL)
2004
        return FALSE;
2005
      h->root.u.def.value = off;
2006
      h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2007
      off += h->size;
2008
 
2009
      h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2010
      if (h == NULL)
2011
        return FALSE;
2012
      h->root.u.def.value = 16 << (htab->fromelem_size_log2
2013
                                   + htab->num_lines_log2);
2014
      h->root.u.def.section = bfd_abs_section_ptr;
2015
 
2016
      h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2017
      if (h == NULL)
2018
        return FALSE;
2019
      h->root.u.def.value = htab->fromelem_size_log2;
2020
      h->root.u.def.section = bfd_abs_section_ptr;
2021
 
2022
      h = define_ovtab_symbol (htab, "__icache_base");
2023
      if (h == NULL)
2024
        return FALSE;
2025
      h->root.u.def.value = htab->ovl_sec[0]->vma;
2026
      h->root.u.def.section = bfd_abs_section_ptr;
2027
      h->size = htab->num_buf << htab->line_size_log2;
2028
 
2029
      h = define_ovtab_symbol (htab, "__icache_linesize");
2030
      if (h == NULL)
2031
        return FALSE;
2032
      h->root.u.def.value = 1 << htab->line_size_log2;
2033
      h->root.u.def.section = bfd_abs_section_ptr;
2034
 
2035
      h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2036
      if (h == NULL)
2037
        return FALSE;
2038
      h->root.u.def.value = htab->line_size_log2;
2039
      h->root.u.def.section = bfd_abs_section_ptr;
2040
 
2041
      h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2042
      if (h == NULL)
2043
        return FALSE;
2044
      h->root.u.def.value = -htab->line_size_log2;
2045
      h->root.u.def.section = bfd_abs_section_ptr;
2046
 
2047
      h = define_ovtab_symbol (htab, "__icache_cachesize");
2048
      if (h == NULL)
2049
        return FALSE;
2050
      h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2051
      h->root.u.def.section = bfd_abs_section_ptr;
2052
 
2053
      h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2054
      if (h == NULL)
2055
        return FALSE;
2056
      h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2057
      h->root.u.def.section = bfd_abs_section_ptr;
2058
 
2059
      h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2060
      if (h == NULL)
2061
        return FALSE;
2062
      h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2063
      h->root.u.def.section = bfd_abs_section_ptr;
2064
 
2065
      if (htab->init != NULL && htab->init->size != 0)
2066
        {
2067
          htab->init->contents = bfd_zalloc (htab->init->owner,
2068
                                             htab->init->size);
2069
          if (htab->init->contents == NULL)
2070
            return FALSE;
2071
 
2072
          h = define_ovtab_symbol (htab, "__icache_fileoff");
2073
          if (h == NULL)
2074
            return FALSE;
2075
          h->root.u.def.value = 0;
2076
          h->root.u.def.section = htab->init;
2077
          h->size = 8;
2078
        }
2079
    }
2080
  else
2081
    {
2082
      /* Write out _ovly_table.  */
2083
      /* set low bit of .size to mark non-overlay area as present.  */
2084
      p[7] = 1;
2085
      obfd = htab->ovtab->output_section->owner;
2086
      for (s = obfd->sections; s != NULL; s = s->next)
2087
        {
2088
          unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2089
 
2090
          if (ovl_index != 0)
2091
            {
2092
              unsigned long off = ovl_index * 16;
2093
              unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2094
 
2095
              bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2096
              bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2097
                          p + off + 4);
2098
              /* file_off written later in spu_elf_modify_program_headers.  */
2099
              bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2100
            }
2101
        }
2102
 
2103
      h = define_ovtab_symbol (htab, "_ovly_table");
2104
      if (h == NULL)
2105
        return FALSE;
2106
      h->root.u.def.value = 16;
2107
      h->size = htab->num_overlays * 16;
2108
 
2109
      h = define_ovtab_symbol (htab, "_ovly_table_end");
2110
      if (h == NULL)
2111
        return FALSE;
2112
      h->root.u.def.value = htab->num_overlays * 16 + 16;
2113
      h->size = 0;
2114
 
2115
      h = define_ovtab_symbol (htab, "_ovly_buf_table");
2116
      if (h == NULL)
2117
        return FALSE;
2118
      h->root.u.def.value = htab->num_overlays * 16 + 16;
2119
      h->size = htab->num_buf * 4;
2120
 
2121
      h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2122
      if (h == NULL)
2123
        return FALSE;
2124
      h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2125
      h->size = 0;
2126
    }
2127
 
2128
  h = define_ovtab_symbol (htab, "_EAR_");
2129
  if (h == NULL)
2130
    return FALSE;
2131
  h->root.u.def.section = htab->toe;
2132
  h->root.u.def.value = 0;
2133
  h->size = 16;
2134
 
2135
  return TRUE;
2136
}
2137
 
2138
/* Check that all loadable section VMAs lie in the range
2139
   LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
2140
 
2141
asection *
2142
spu_elf_check_vma (struct bfd_link_info *info)
2143
{
2144
  struct elf_segment_map *m;
2145
  unsigned int i;
2146
  struct spu_link_hash_table *htab = spu_hash_table (info);
2147
  bfd *abfd = info->output_bfd;
2148
  bfd_vma hi = htab->params->local_store_hi;
2149
  bfd_vma lo = htab->params->local_store_lo;
2150
 
2151
  htab->local_store = hi + 1 - lo;
2152
 
2153
  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2154
    if (m->p_type == PT_LOAD)
2155
      for (i = 0; i < m->count; i++)
2156
        if (m->sections[i]->size != 0
2157
            && (m->sections[i]->vma < lo
2158
                || m->sections[i]->vma > hi
2159
                || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2160
          return m->sections[i];
2161
 
2162
  return NULL;
2163
}
2164
 
2165
/* OFFSET in SEC (presumably) is the beginning of a function prologue.
2166
   Search for stack adjusting insns, and return the sp delta.
2167
   If a store of lr is found save the instruction offset to *LR_STORE.
2168
   If a stack adjusting instruction is found, save that offset to
2169
   *SP_ADJUST.  */
2170
 
2171
static int
2172
find_function_stack_adjust (asection *sec,
2173
                            bfd_vma offset,
2174
                            bfd_vma *lr_store,
2175
                            bfd_vma *sp_adjust)
2176
{
2177
  int reg[128];
2178
 
2179
  memset (reg, 0, sizeof (reg));
2180
  for ( ; offset + 4 <= sec->size; offset += 4)
2181
    {
2182
      unsigned char buf[4];
2183
      int rt, ra;
2184
      int imm;
2185
 
2186
      /* Assume no relocs on stack adjusing insns.  */
2187
      if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2188
        break;
2189
 
2190
      rt = buf[3] & 0x7f;
2191
      ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2192
 
2193
      if (buf[0] == 0x24 /* stqd */)
2194
        {
2195
          if (rt == 0 /* lr */ && ra == 1 /* sp */)
2196
            *lr_store = offset;
2197
          continue;
2198
        }
2199
 
2200
      /* Partly decoded immediate field.  */
2201
      imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2202
 
2203
      if (buf[0] == 0x1c /* ai */)
2204
        {
2205
          imm >>= 7;
2206
          imm = (imm ^ 0x200) - 0x200;
2207
          reg[rt] = reg[ra] + imm;
2208
 
2209
          if (rt == 1 /* sp */)
2210
            {
2211
              if (reg[rt] > 0)
2212
                break;
2213
              *sp_adjust = offset;
2214
              return reg[rt];
2215
            }
2216
        }
2217
      else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2218
        {
2219
          int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2220
 
2221
          reg[rt] = reg[ra] + reg[rb];
2222
          if (rt == 1)
2223
            {
2224
              if (reg[rt] > 0)
2225
                break;
2226
              *sp_adjust = offset;
2227
              return reg[rt];
2228
            }
2229
        }
2230
      else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2231
        {
2232
          int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2233
 
2234
          reg[rt] = reg[rb] - reg[ra];
2235
          if (rt == 1)
2236
            {
2237
              if (reg[rt] > 0)
2238
                break;
2239
              *sp_adjust = offset;
2240
              return reg[rt];
2241
            }
2242
        }
2243
      else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2244
        {
2245
          if (buf[0] >= 0x42 /* ila */)
2246
            imm |= (buf[0] & 1) << 17;
2247
          else
2248
            {
2249
              imm &= 0xffff;
2250
 
2251
              if (buf[0] == 0x40 /* il */)
2252
                {
2253
                  if ((buf[1] & 0x80) == 0)
2254
                    continue;
2255
                  imm = (imm ^ 0x8000) - 0x8000;
2256
                }
2257
              else if ((buf[1] & 0x80) == 0 /* ilhu */)
2258
                imm <<= 16;
2259
            }
2260
          reg[rt] = imm;
2261
          continue;
2262
        }
2263
      else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2264
        {
2265
          reg[rt] |= imm & 0xffff;
2266
          continue;
2267
        }
2268
      else if (buf[0] == 0x04 /* ori */)
2269
        {
2270
          imm >>= 7;
2271
          imm = (imm ^ 0x200) - 0x200;
2272
          reg[rt] = reg[ra] | imm;
2273
          continue;
2274
        }
2275
      else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2276
        {
2277
          reg[rt] = (  ((imm & 0x8000) ? 0xff000000 : 0)
2278
                     | ((imm & 0x4000) ? 0x00ff0000 : 0)
2279
                     | ((imm & 0x2000) ? 0x0000ff00 : 0)
2280
                     | ((imm & 0x1000) ? 0x000000ff : 0));
2281
          continue;
2282
        }
2283
      else if (buf[0] == 0x16 /* andbi */)
2284
        {
2285
          imm >>= 7;
2286
          imm &= 0xff;
2287
          imm |= imm << 8;
2288
          imm |= imm << 16;
2289
          reg[rt] = reg[ra] & imm;
2290
          continue;
2291
        }
2292
      else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2293
        {
2294
          /* Used in pic reg load.  Say rt is trashed.  Won't be used
2295
             in stack adjust, but we need to continue past this branch.  */
2296
          reg[rt] = 0;
2297
          continue;
2298
        }
2299
      else if (is_branch (buf) || is_indirect_branch (buf))
2300
        /* If we hit a branch then we must be out of the prologue.  */
2301
        break;
2302
    }
2303
 
2304
  return 0;
2305
}
2306
 
2307
/* qsort predicate to sort symbols by section and value.  */
2308
 
2309
static Elf_Internal_Sym *sort_syms_syms;
2310
static asection **sort_syms_psecs;
2311
 
2312
static int
2313
sort_syms (const void *a, const void *b)
2314
{
2315
  Elf_Internal_Sym *const *s1 = a;
2316
  Elf_Internal_Sym *const *s2 = b;
2317
  asection *sec1,*sec2;
2318
  bfd_signed_vma delta;
2319
 
2320
  sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2321
  sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2322
 
2323
  if (sec1 != sec2)
2324
    return sec1->index - sec2->index;
2325
 
2326
  delta = (*s1)->st_value - (*s2)->st_value;
2327
  if (delta != 0)
2328
    return delta < 0 ? -1 : 1;
2329
 
2330
  delta = (*s2)->st_size - (*s1)->st_size;
2331
  if (delta != 0)
2332
    return delta < 0 ? -1 : 1;
2333
 
2334
  return *s1 < *s2 ? -1 : 1;
2335
}
2336
 
2337
/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2338
   entries for section SEC.  */
2339
 
2340
static struct spu_elf_stack_info *
2341
alloc_stack_info (asection *sec, int max_fun)
2342
{
2343
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2344
  bfd_size_type amt;
2345
 
2346
  amt = sizeof (struct spu_elf_stack_info);
2347
  amt += (max_fun - 1) * sizeof (struct function_info);
2348
  sec_data->u.i.stack_info = bfd_zmalloc (amt);
2349
  if (sec_data->u.i.stack_info != NULL)
2350
    sec_data->u.i.stack_info->max_fun = max_fun;
2351
  return sec_data->u.i.stack_info;
2352
}
2353
 
2354
/* Add a new struct function_info describing a (part of a) function
2355
   starting at SYM_H.  Keep the array sorted by address.  */
2356
 
2357
static struct function_info *
2358
maybe_insert_function (asection *sec,
2359
                       void *sym_h,
2360
                       bfd_boolean global,
2361
                       bfd_boolean is_func)
2362
{
2363
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2364
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2365
  int i;
2366
  bfd_vma off, size;
2367
 
2368
  if (sinfo == NULL)
2369
    {
2370
      sinfo = alloc_stack_info (sec, 20);
2371
      if (sinfo == NULL)
2372
        return NULL;
2373
    }
2374
 
2375
  if (!global)
2376
    {
2377
      Elf_Internal_Sym *sym = sym_h;
2378
      off = sym->st_value;
2379
      size = sym->st_size;
2380
    }
2381
  else
2382
    {
2383
      struct elf_link_hash_entry *h = sym_h;
2384
      off = h->root.u.def.value;
2385
      size = h->size;
2386
    }
2387
 
2388
  for (i = sinfo->num_fun; --i >= 0; )
2389
    if (sinfo->fun[i].lo <= off)
2390
      break;
2391
 
2392
  if (i >= 0)
2393
    {
2394
      /* Don't add another entry for an alias, but do update some
2395
         info.  */
2396
      if (sinfo->fun[i].lo == off)
2397
        {
2398
          /* Prefer globals over local syms.  */
2399
          if (global && !sinfo->fun[i].global)
2400
            {
2401
              sinfo->fun[i].global = TRUE;
2402
              sinfo->fun[i].u.h = sym_h;
2403
            }
2404
          if (is_func)
2405
            sinfo->fun[i].is_func = TRUE;
2406
          return &sinfo->fun[i];
2407
        }
2408
      /* Ignore a zero-size symbol inside an existing function.  */
2409
      else if (sinfo->fun[i].hi > off && size == 0)
2410
        return &sinfo->fun[i];
2411
    }
2412
 
2413
  if (sinfo->num_fun >= sinfo->max_fun)
2414
    {
2415
      bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2416
      bfd_size_type old = amt;
2417
 
2418
      old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2419
      sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2420
      amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2421
      sinfo = bfd_realloc (sinfo, amt);
2422
      if (sinfo == NULL)
2423
        return NULL;
2424
      memset ((char *) sinfo + old, 0, amt - old);
2425
      sec_data->u.i.stack_info = sinfo;
2426
    }
2427
 
2428
  if (++i < sinfo->num_fun)
2429
    memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2430
             (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2431
  sinfo->fun[i].is_func = is_func;
2432
  sinfo->fun[i].global = global;
2433
  sinfo->fun[i].sec = sec;
2434
  if (global)
2435
    sinfo->fun[i].u.h = sym_h;
2436
  else
2437
    sinfo->fun[i].u.sym = sym_h;
2438
  sinfo->fun[i].lo = off;
2439
  sinfo->fun[i].hi = off + size;
2440
  sinfo->fun[i].lr_store = -1;
2441
  sinfo->fun[i].sp_adjust = -1;
2442
  sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2443
                                                     &sinfo->fun[i].lr_store,
2444
                                                     &sinfo->fun[i].sp_adjust);
2445
  sinfo->num_fun += 1;
2446
  return &sinfo->fun[i];
2447
}
2448
 
2449
/* Return the name of FUN.  */
2450
 
2451
static const char *
2452
func_name (struct function_info *fun)
2453
{
2454
  asection *sec;
2455
  bfd *ibfd;
2456
  Elf_Internal_Shdr *symtab_hdr;
2457
 
2458
  while (fun->start != NULL)
2459
    fun = fun->start;
2460
 
2461
  if (fun->global)
2462
    return fun->u.h->root.root.string;
2463
 
2464
  sec = fun->sec;
2465
  if (fun->u.sym->st_name == 0)
2466
    {
2467
      size_t len = strlen (sec->name);
2468
      char *name = bfd_malloc (len + 10);
2469
      if (name == NULL)
2470
        return "(null)";
2471
      sprintf (name, "%s+%lx", sec->name,
2472
               (unsigned long) fun->u.sym->st_value & 0xffffffff);
2473
      return name;
2474
    }
2475
  ibfd = sec->owner;
2476
  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2477
  return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2478
}
2479
 
2480
/* Read the instruction at OFF in SEC.  Return true iff the instruction
2481
   is a nop, lnop, or stop 0 (all zero insn).  */
2482
 
2483
static bfd_boolean
2484
is_nop (asection *sec, bfd_vma off)
2485
{
2486
  unsigned char insn[4];
2487
 
2488
  if (off + 4 > sec->size
2489
      || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2490
    return FALSE;
2491
  if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2492
    return TRUE;
2493
  if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2494
    return TRUE;
2495
  return FALSE;
2496
}
2497
 
2498
/* Extend the range of FUN to cover nop padding up to LIMIT.
2499
   Return TRUE iff some instruction other than a NOP was found.  */
2500
 
2501
static bfd_boolean
2502
insns_at_end (struct function_info *fun, bfd_vma limit)
2503
{
2504
  bfd_vma off = (fun->hi + 3) & -4;
2505
 
2506
  while (off < limit && is_nop (fun->sec, off))
2507
    off += 4;
2508
  if (off < limit)
2509
    {
2510
      fun->hi = off;
2511
      return TRUE;
2512
    }
2513
  fun->hi = limit;
2514
  return FALSE;
2515
}
2516
 
2517
/* Check and fix overlapping function ranges.  Return TRUE iff there
2518
   are gaps in the current info we have about functions in SEC.  */
2519
 
2520
static bfd_boolean
2521
check_function_ranges (asection *sec, struct bfd_link_info *info)
2522
{
2523
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2524
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2525
  int i;
2526
  bfd_boolean gaps = FALSE;
2527
 
2528
  if (sinfo == NULL)
2529
    return FALSE;
2530
 
2531
  for (i = 1; i < sinfo->num_fun; i++)
2532
    if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2533
      {
2534
        /* Fix overlapping symbols.  */
2535
        const char *f1 = func_name (&sinfo->fun[i - 1]);
2536
        const char *f2 = func_name (&sinfo->fun[i]);
2537
 
2538
        info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2539
        sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2540
      }
2541
    else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2542
      gaps = TRUE;
2543
 
2544
  if (sinfo->num_fun == 0)
2545
    gaps = TRUE;
2546
  else
2547
    {
2548
      if (sinfo->fun[0].lo != 0)
2549
        gaps = TRUE;
2550
      if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2551
        {
2552
          const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2553
 
2554
          info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2555
          sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2556
        }
2557
      else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2558
        gaps = TRUE;
2559
    }
2560
  return gaps;
2561
}
2562
 
2563
/* Search current function info for a function that contains address
2564
   OFFSET in section SEC.  */
2565
 
2566
static struct function_info *
2567
find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2568
{
2569
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2570
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2571
  int lo, hi, mid;
2572
 
2573
  lo = 0;
2574
  hi = sinfo->num_fun;
2575
  while (lo < hi)
2576
    {
2577
      mid = (lo + hi) / 2;
2578
      if (offset < sinfo->fun[mid].lo)
2579
        hi = mid;
2580
      else if (offset >= sinfo->fun[mid].hi)
2581
        lo = mid + 1;
2582
      else
2583
        return &sinfo->fun[mid];
2584
    }
2585
  info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2586
                          sec, offset);
2587
  bfd_set_error (bfd_error_bad_value);
2588
  return NULL;
2589
}
2590
 
2591
/* Add CALLEE to CALLER call list if not already present.  Return TRUE
2592
   if CALLEE was new.  If this function return FALSE, CALLEE should
2593
   be freed.  */
2594
 
2595
static bfd_boolean
2596
insert_callee (struct function_info *caller, struct call_info *callee)
2597
{
2598
  struct call_info **pp, *p;
2599
 
2600
  for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2601
    if (p->fun == callee->fun)
2602
      {
2603
        /* Tail calls use less stack than normal calls.  Retain entry
2604
           for normal call over one for tail call.  */
2605
        p->is_tail &= callee->is_tail;
2606
        if (!p->is_tail)
2607
          {
2608
            p->fun->start = NULL;
2609
            p->fun->is_func = TRUE;
2610
          }
2611
        p->count += callee->count;
2612
        /* Reorder list so most recent call is first.  */
2613
        *pp = p->next;
2614
        p->next = caller->call_list;
2615
        caller->call_list = p;
2616
        return FALSE;
2617
      }
2618
  callee->next = caller->call_list;
2619
  caller->call_list = callee;
2620
  return TRUE;
2621
}
2622
 
2623
/* Copy CALL and insert the copy into CALLER.  */
2624
 
2625
static bfd_boolean
2626
copy_callee (struct function_info *caller, const struct call_info *call)
2627
{
2628
  struct call_info *callee;
2629
  callee = bfd_malloc (sizeof (*callee));
2630
  if (callee == NULL)
2631
    return FALSE;
2632
  *callee = *call;
2633
  if (!insert_callee (caller, callee))
2634
    free (callee);
2635
  return TRUE;
2636
}
2637
 
2638
/* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
2639
   overlay stub sections.  */
2640
 
2641
static bfd_boolean
2642
interesting_section (asection *s)
2643
{
2644
  return (s->output_section != bfd_abs_section_ptr
2645
          && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2646
              == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2647
          && s->size != 0);
2648
}
2649
 
2650
/* Rummage through the relocs for SEC, looking for function calls.
2651
   If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
2652
   mark destination symbols on calls as being functions.  Also
2653
   look at branches, which may be tail calls or go to hot/cold
2654
   section part of same function.  */
2655
 
2656
static bfd_boolean
2657
mark_functions_via_relocs (asection *sec,
2658
                           struct bfd_link_info *info,
2659
                           int call_tree)
2660
{
2661
  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2662
  Elf_Internal_Shdr *symtab_hdr;
2663
  void *psyms;
2664
  unsigned int priority = 0;
2665
  static bfd_boolean warned;
2666
 
2667
  if (!interesting_section (sec)
2668
      || sec->reloc_count == 0)
2669
    return TRUE;
2670
 
2671
  internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2672
                                               info->keep_memory);
2673
  if (internal_relocs == NULL)
2674
    return FALSE;
2675
 
2676
  symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2677
  psyms = &symtab_hdr->contents;
2678
  irela = internal_relocs;
2679
  irelaend = irela + sec->reloc_count;
2680
  for (; irela < irelaend; irela++)
2681
    {
2682
      enum elf_spu_reloc_type r_type;
2683
      unsigned int r_indx;
2684
      asection *sym_sec;
2685
      Elf_Internal_Sym *sym;
2686
      struct elf_link_hash_entry *h;
2687
      bfd_vma val;
2688
      bfd_boolean nonbranch, is_call;
2689
      struct function_info *caller;
2690
      struct call_info *callee;
2691
 
2692
      r_type = ELF32_R_TYPE (irela->r_info);
2693
      nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2694
 
2695
      r_indx = ELF32_R_SYM (irela->r_info);
2696
      if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2697
        return FALSE;
2698
 
2699
      if (sym_sec == NULL
2700
          || sym_sec->output_section == bfd_abs_section_ptr)
2701
        continue;
2702
 
2703
      is_call = FALSE;
2704
      if (!nonbranch)
2705
        {
2706
          unsigned char insn[4];
2707
 
2708
          if (!bfd_get_section_contents (sec->owner, sec, insn,
2709
                                         irela->r_offset, 4))
2710
            return FALSE;
2711
          if (is_branch (insn))
2712
            {
2713
              is_call = (insn[0] & 0xfd) == 0x31;
2714
              priority = insn[1] & 0x0f;
2715
              priority <<= 8;
2716
              priority |= insn[2];
2717
              priority <<= 8;
2718
              priority |= insn[3];
2719
              priority >>= 7;
2720
              if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2721
                  != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2722
                {
2723
                  if (!warned)
2724
                    info->callbacks->einfo
2725
                      (_("%B(%A+0x%v): call to non-code section"
2726
                         " %B(%A), analysis incomplete\n"),
2727
                       sec->owner, sec, irela->r_offset,
2728
                       sym_sec->owner, sym_sec);
2729
                  warned = TRUE;
2730
                  continue;
2731
                }
2732
            }
2733
          else
2734
            {
2735
              nonbranch = TRUE;
2736
              if (is_hint (insn))
2737
                continue;
2738
            }
2739
        }
2740
 
2741
      if (nonbranch)
2742
        {
2743
          /* For --auto-overlay, count possible stubs we need for
2744
             function pointer references.  */
2745
          unsigned int sym_type;
2746
          if (h)
2747
            sym_type = h->type;
2748
          else
2749
            sym_type = ELF_ST_TYPE (sym->st_info);
2750
          if (sym_type == STT_FUNC)
2751
            {
2752
              if (call_tree && spu_hash_table (info)->params->auto_overlay)
2753
                spu_hash_table (info)->non_ovly_stub += 1;
2754
              /* If the symbol type is STT_FUNC then this must be a
2755
                 function pointer initialisation.  */
2756
              continue;
2757
            }
2758
          /* Ignore data references.  */
2759
          if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2760
              != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2761
            continue;
2762
          /* Otherwise we probably have a jump table reloc for
2763
             a switch statement or some other reference to a
2764
             code label.  */
2765
        }
2766
 
2767
      if (h)
2768
        val = h->root.u.def.value;
2769
      else
2770
        val = sym->st_value;
2771
      val += irela->r_addend;
2772
 
2773
      if (!call_tree)
2774
        {
2775
          struct function_info *fun;
2776
 
2777
          if (irela->r_addend != 0)
2778
            {
2779
              Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2780
              if (fake == NULL)
2781
                return FALSE;
2782
              fake->st_value = val;
2783
              fake->st_shndx
2784
                = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2785
              sym = fake;
2786
            }
2787
          if (sym)
2788
            fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2789
          else
2790
            fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2791
          if (fun == NULL)
2792
            return FALSE;
2793
          if (irela->r_addend != 0
2794
              && fun->u.sym != sym)
2795
            free (sym);
2796
          continue;
2797
        }
2798
 
2799
      caller = find_function (sec, irela->r_offset, info);
2800
      if (caller == NULL)
2801
        return FALSE;
2802
      callee = bfd_malloc (sizeof *callee);
2803
      if (callee == NULL)
2804
        return FALSE;
2805
 
2806
      callee->fun = find_function (sym_sec, val, info);
2807
      if (callee->fun == NULL)
2808
        return FALSE;
2809
      callee->is_tail = !is_call;
2810
      callee->is_pasted = FALSE;
2811
      callee->broken_cycle = FALSE;
2812
      callee->priority = priority;
2813
      callee->count = nonbranch? 0 : 1;
2814
      if (callee->fun->last_caller != sec)
2815
        {
2816
          callee->fun->last_caller = sec;
2817
          callee->fun->call_count += 1;
2818
        }
2819
      if (!insert_callee (caller, callee))
2820
        free (callee);
2821
      else if (!is_call
2822
               && !callee->fun->is_func
2823
               && callee->fun->stack == 0)
2824
        {
2825
          /* This is either a tail call or a branch from one part of
2826
             the function to another, ie. hot/cold section.  If the
2827
             destination has been called by some other function then
2828
             it is a separate function.  We also assume that functions
2829
             are not split across input files.  */
2830
          if (sec->owner != sym_sec->owner)
2831
            {
2832
              callee->fun->start = NULL;
2833
              callee->fun->is_func = TRUE;
2834
            }
2835
          else if (callee->fun->start == NULL)
2836
            {
2837
              struct function_info *caller_start = caller;
2838
              while (caller_start->start)
2839
                caller_start = caller_start->start;
2840
 
2841
              if (caller_start != callee->fun)
2842
                callee->fun->start = caller_start;
2843
            }
2844
          else
2845
            {
2846
              struct function_info *callee_start;
2847
              struct function_info *caller_start;
2848
              callee_start = callee->fun;
2849
              while (callee_start->start)
2850
                callee_start = callee_start->start;
2851
              caller_start = caller;
2852
              while (caller_start->start)
2853
                caller_start = caller_start->start;
2854
              if (caller_start != callee_start)
2855
                {
2856
                  callee->fun->start = NULL;
2857
                  callee->fun->is_func = TRUE;
2858
                }
2859
            }
2860
        }
2861
    }
2862
 
2863
  return TRUE;
2864
}
2865
 
2866
/* Handle something like .init or .fini, which has a piece of a function.
2867
   These sections are pasted together to form a single function.  */
2868
 
2869
static bfd_boolean
2870
pasted_function (asection *sec)
2871
{
2872
  struct bfd_link_order *l;
2873
  struct _spu_elf_section_data *sec_data;
2874
  struct spu_elf_stack_info *sinfo;
2875
  Elf_Internal_Sym *fake;
2876
  struct function_info *fun, *fun_start;
2877
 
2878
  fake = bfd_zmalloc (sizeof (*fake));
2879
  if (fake == NULL)
2880
    return FALSE;
2881
  fake->st_value = 0;
2882
  fake->st_size = sec->size;
2883
  fake->st_shndx
2884
    = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2885
  fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2886
  if (!fun)
2887
    return FALSE;
2888
 
2889
  /* Find a function immediately preceding this section.  */
2890
  fun_start = NULL;
2891
  for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2892
    {
2893
      if (l->u.indirect.section == sec)
2894
        {
2895
          if (fun_start != NULL)
2896
            {
2897
              struct call_info *callee = bfd_malloc (sizeof *callee);
2898
              if (callee == NULL)
2899
                return FALSE;
2900
 
2901
              fun->start = fun_start;
2902
              callee->fun = fun;
2903
              callee->is_tail = TRUE;
2904
              callee->is_pasted = TRUE;
2905
              callee->broken_cycle = FALSE;
2906
              callee->priority = 0;
2907
              callee->count = 1;
2908
              if (!insert_callee (fun_start, callee))
2909
                free (callee);
2910
              return TRUE;
2911
            }
2912
          break;
2913
        }
2914
      if (l->type == bfd_indirect_link_order
2915
          && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2916
          && (sinfo = sec_data->u.i.stack_info) != NULL
2917
          && sinfo->num_fun != 0)
2918
        fun_start = &sinfo->fun[sinfo->num_fun - 1];
2919
    }
2920
 
2921
  /* Don't return an error if we did not find a function preceding this
2922
     section.  The section may have incorrect flags.  */
2923
  return TRUE;
2924
}
2925
 
2926
/* Map address ranges in code sections to functions.  */
2927
 
2928
static bfd_boolean
2929
discover_functions (struct bfd_link_info *info)
2930
{
2931
  bfd *ibfd;
2932
  int bfd_idx;
2933
  Elf_Internal_Sym ***psym_arr;
2934
  asection ***sec_arr;
2935
  bfd_boolean gaps = FALSE;
2936
 
2937
  bfd_idx = 0;
2938
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2939
    bfd_idx++;
2940
 
2941
  psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2942
  if (psym_arr == NULL)
2943
    return FALSE;
2944
  sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2945
  if (sec_arr == NULL)
2946
    return FALSE;
2947
 
2948
  for (ibfd = info->input_bfds, bfd_idx = 0;
2949
       ibfd != NULL;
2950
       ibfd = ibfd->link_next, bfd_idx++)
2951
    {
2952
      extern const bfd_target bfd_elf32_spu_vec;
2953
      Elf_Internal_Shdr *symtab_hdr;
2954
      asection *sec;
2955
      size_t symcount;
2956
      Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2957
      asection **psecs, **p;
2958
 
2959
      if (ibfd->xvec != &bfd_elf32_spu_vec)
2960
        continue;
2961
 
2962
      /* Read all the symbols.  */
2963
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2964
      symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2965
      if (symcount == 0)
2966
        {
2967
          if (!gaps)
2968
            for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2969
              if (interesting_section (sec))
2970
                {
2971
                  gaps = TRUE;
2972
                  break;
2973
                }
2974
          continue;
2975
        }
2976
 
2977
      if (symtab_hdr->contents != NULL)
2978
        {
2979
          /* Don't use cached symbols since the generic ELF linker
2980
             code only reads local symbols, and we need globals too.  */
2981
          free (symtab_hdr->contents);
2982
          symtab_hdr->contents = NULL;
2983
        }
2984
      syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2985
                                   NULL, NULL, NULL);
2986
      symtab_hdr->contents = (void *) syms;
2987
      if (syms == NULL)
2988
        return FALSE;
2989
 
2990
      /* Select defined function symbols that are going to be output.  */
2991
      psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2992
      if (psyms == NULL)
2993
        return FALSE;
2994
      psym_arr[bfd_idx] = psyms;
2995
      psecs = bfd_malloc (symcount * sizeof (*psecs));
2996
      if (psecs == NULL)
2997
        return FALSE;
2998
      sec_arr[bfd_idx] = psecs;
2999
      for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3000
        if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3001
            || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3002
          {
3003
            asection *s;
3004
 
3005
            *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3006
            if (s != NULL && interesting_section (s))
3007
              *psy++ = sy;
3008
          }
3009
      symcount = psy - psyms;
3010
      *psy = NULL;
3011
 
3012
      /* Sort them by section and offset within section.  */
3013
      sort_syms_syms = syms;
3014
      sort_syms_psecs = psecs;
3015
      qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3016
 
3017
      /* Now inspect the function symbols.  */
3018
      for (psy = psyms; psy < psyms + symcount; )
3019
        {
3020
          asection *s = psecs[*psy - syms];
3021
          Elf_Internal_Sym **psy2;
3022
 
3023
          for (psy2 = psy; ++psy2 < psyms + symcount; )
3024
            if (psecs[*psy2 - syms] != s)
3025
              break;
3026
 
3027
          if (!alloc_stack_info (s, psy2 - psy))
3028
            return FALSE;
3029
          psy = psy2;
3030
        }
3031
 
3032
      /* First install info about properly typed and sized functions.
3033
         In an ideal world this will cover all code sections, except
3034
         when partitioning functions into hot and cold sections,
3035
         and the horrible pasted together .init and .fini functions.  */
3036
      for (psy = psyms; psy < psyms + symcount; ++psy)
3037
        {
3038
          sy = *psy;
3039
          if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3040
            {
3041
              asection *s = psecs[sy - syms];
3042
              if (!maybe_insert_function (s, sy, FALSE, TRUE))
3043
                return FALSE;
3044
            }
3045
        }
3046
 
3047
      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3048
        if (interesting_section (sec))
3049
          gaps |= check_function_ranges (sec, info);
3050
    }
3051
 
3052
  if (gaps)
3053
    {
3054
      /* See if we can discover more function symbols by looking at
3055
         relocations.  */
3056
      for (ibfd = info->input_bfds, bfd_idx = 0;
3057
           ibfd != NULL;
3058
           ibfd = ibfd->link_next, bfd_idx++)
3059
        {
3060
          asection *sec;
3061
 
3062
          if (psym_arr[bfd_idx] == NULL)
3063
            continue;
3064
 
3065
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3066
            if (!mark_functions_via_relocs (sec, info, FALSE))
3067
              return FALSE;
3068
        }
3069
 
3070
      for (ibfd = info->input_bfds, bfd_idx = 0;
3071
           ibfd != NULL;
3072
           ibfd = ibfd->link_next, bfd_idx++)
3073
        {
3074
          Elf_Internal_Shdr *symtab_hdr;
3075
          asection *sec;
3076
          Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3077
          asection **psecs;
3078
 
3079
          if ((psyms = psym_arr[bfd_idx]) == NULL)
3080
            continue;
3081
 
3082
          psecs = sec_arr[bfd_idx];
3083
 
3084
          symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3085
          syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3086
 
3087
          gaps = FALSE;
3088
          for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3089
            if (interesting_section (sec))
3090
              gaps |= check_function_ranges (sec, info);
3091
          if (!gaps)
3092
            continue;
3093
 
3094
          /* Finally, install all globals.  */
3095
          for (psy = psyms; (sy = *psy) != NULL; ++psy)
3096
            {
3097
              asection *s;
3098
 
3099
              s = psecs[sy - syms];
3100
 
3101
              /* Global syms might be improperly typed functions.  */
3102
              if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3103
                  && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3104
                {
3105
                  if (!maybe_insert_function (s, sy, FALSE, FALSE))
3106
                    return FALSE;
3107
                }
3108
            }
3109
        }
3110
 
3111
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3112
        {
3113
          extern const bfd_target bfd_elf32_spu_vec;
3114
          asection *sec;
3115
 
3116
          if (ibfd->xvec != &bfd_elf32_spu_vec)
3117
            continue;
3118
 
3119
          /* Some of the symbols we've installed as marking the
3120
             beginning of functions may have a size of zero.  Extend
3121
             the range of such functions to the beginning of the
3122
             next symbol of interest.  */
3123
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3124
            if (interesting_section (sec))
3125
              {
3126
                struct _spu_elf_section_data *sec_data;
3127
                struct spu_elf_stack_info *sinfo;
3128
 
3129
                sec_data = spu_elf_section_data (sec);
3130
                sinfo = sec_data->u.i.stack_info;
3131
                if (sinfo != NULL && sinfo->num_fun != 0)
3132
                  {
3133
                    int fun_idx;
3134
                    bfd_vma hi = sec->size;
3135
 
3136
                    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3137
                      {
3138
                        sinfo->fun[fun_idx].hi = hi;
3139
                        hi = sinfo->fun[fun_idx].lo;
3140
                      }
3141
 
3142
                    sinfo->fun[0].lo = 0;
3143
                  }
3144
                /* No symbols in this section.  Must be .init or .fini
3145
                   or something similar.  */
3146
                else if (!pasted_function (sec))
3147
                  return FALSE;
3148
              }
3149
        }
3150
    }
3151
 
3152
  for (ibfd = info->input_bfds, bfd_idx = 0;
3153
       ibfd != NULL;
3154
       ibfd = ibfd->link_next, bfd_idx++)
3155
    {
3156
      if (psym_arr[bfd_idx] == NULL)
3157
        continue;
3158
 
3159
      free (psym_arr[bfd_idx]);
3160
      free (sec_arr[bfd_idx]);
3161
    }
3162
 
3163
  free (psym_arr);
3164
  free (sec_arr);
3165
 
3166
  return TRUE;
3167
}
3168
 
3169
/* Iterate over all function_info we have collected, calling DOIT on
3170
   each node if ROOT_ONLY is false.  Only call DOIT on root nodes
3171
   if ROOT_ONLY.  */
3172
 
3173
static bfd_boolean
3174
for_each_node (bfd_boolean (*doit) (struct function_info *,
3175
                                    struct bfd_link_info *,
3176
                                    void *),
3177
               struct bfd_link_info *info,
3178
               void *param,
3179
               int root_only)
3180
{
3181
  bfd *ibfd;
3182
 
3183
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3184
    {
3185
      extern const bfd_target bfd_elf32_spu_vec;
3186
      asection *sec;
3187
 
3188
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3189
        continue;
3190
 
3191
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3192
        {
3193
          struct _spu_elf_section_data *sec_data;
3194
          struct spu_elf_stack_info *sinfo;
3195
 
3196
          if ((sec_data = spu_elf_section_data (sec)) != NULL
3197
              && (sinfo = sec_data->u.i.stack_info) != NULL)
3198
            {
3199
              int i;
3200
              for (i = 0; i < sinfo->num_fun; ++i)
3201
                if (!root_only || !sinfo->fun[i].non_root)
3202
                  if (!doit (&sinfo->fun[i], info, param))
3203
                    return FALSE;
3204
            }
3205
        }
3206
    }
3207
  return TRUE;
3208
}
3209
 
3210
/* Transfer call info attached to struct function_info entries for
3211
   all of a given function's sections to the first entry.  */
3212
 
3213
static bfd_boolean
3214
transfer_calls (struct function_info *fun,
3215
                struct bfd_link_info *info ATTRIBUTE_UNUSED,
3216
                void *param ATTRIBUTE_UNUSED)
3217
{
3218
  struct function_info *start = fun->start;
3219
 
3220
  if (start != NULL)
3221
    {
3222
      struct call_info *call, *call_next;
3223
 
3224
      while (start->start != NULL)
3225
        start = start->start;
3226
      for (call = fun->call_list; call != NULL; call = call_next)
3227
        {
3228
          call_next = call->next;
3229
          if (!insert_callee (start, call))
3230
            free (call);
3231
        }
3232
      fun->call_list = NULL;
3233
    }
3234
  return TRUE;
3235
}
3236
 
3237
/* Mark nodes in the call graph that are called by some other node.  */
3238
 
3239
static bfd_boolean
3240
mark_non_root (struct function_info *fun,
3241
               struct bfd_link_info *info ATTRIBUTE_UNUSED,
3242
               void *param ATTRIBUTE_UNUSED)
3243
{
3244
  struct call_info *call;
3245
 
3246
  if (fun->visit1)
3247
    return TRUE;
3248
  fun->visit1 = TRUE;
3249
  for (call = fun->call_list; call; call = call->next)
3250
    {
3251
      call->fun->non_root = TRUE;
3252
      mark_non_root (call->fun, 0, 0);
3253
    }
3254
  return TRUE;
3255
}
3256
 
3257
/* Remove cycles from the call graph.  Set depth of nodes.  */
3258
 
3259
static bfd_boolean
3260
remove_cycles (struct function_info *fun,
3261
               struct bfd_link_info *info,
3262
               void *param)
3263
{
3264
  struct call_info **callp, *call;
3265
  unsigned int depth = *(unsigned int *) param;
3266
  unsigned int max_depth = depth;
3267
 
3268
  fun->depth = depth;
3269
  fun->visit2 = TRUE;
3270
  fun->marking = TRUE;
3271
 
3272
  callp = &fun->call_list;
3273
  while ((call = *callp) != NULL)
3274
    {
3275
      call->max_depth = depth + !call->is_pasted;
3276
      if (!call->fun->visit2)
3277
        {
3278
          if (!remove_cycles (call->fun, info, &call->max_depth))
3279
            return FALSE;
3280
          if (max_depth < call->max_depth)
3281
            max_depth = call->max_depth;
3282
        }
3283
      else if (call->fun->marking)
3284
        {
3285
          struct spu_link_hash_table *htab = spu_hash_table (info);
3286
 
3287
          if (!htab->params->auto_overlay
3288
              && htab->params->stack_analysis)
3289
            {
3290
              const char *f1 = func_name (fun);
3291
              const char *f2 = func_name (call->fun);
3292
 
3293
              info->callbacks->info (_("Stack analysis will ignore the call "
3294
                                       "from %s to %s\n"),
3295
                                     f1, f2);
3296
            }
3297
 
3298
          call->broken_cycle = TRUE;
3299
        }
3300
      callp = &call->next;
3301
    }
3302
  fun->marking = FALSE;
3303
  *(unsigned int *) param = max_depth;
3304
  return TRUE;
3305
}
3306
 
3307
/* Check that we actually visited all nodes in remove_cycles.  If we
3308
   didn't, then there is some cycle in the call graph not attached to
3309
   any root node.  Arbitrarily choose a node in the cycle as a new
3310
   root and break the cycle.  */
3311
 
3312
static bfd_boolean
3313
mark_detached_root (struct function_info *fun,
3314
                    struct bfd_link_info *info,
3315
                    void *param)
3316
{
3317
  if (fun->visit2)
3318
    return TRUE;
3319
  fun->non_root = FALSE;
3320
  *(unsigned int *) param = 0;
3321
  return remove_cycles (fun, info, param);
3322
}
3323
 
3324
/* Populate call_list for each function.  */
3325
 
3326
static bfd_boolean
3327
build_call_tree (struct bfd_link_info *info)
3328
{
3329
  bfd *ibfd;
3330
  unsigned int depth;
3331
 
3332
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3333
    {
3334
      extern const bfd_target bfd_elf32_spu_vec;
3335
      asection *sec;
3336
 
3337
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3338
        continue;
3339
 
3340
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3341
        if (!mark_functions_via_relocs (sec, info, TRUE))
3342
          return FALSE;
3343
    }
3344
 
3345
  /* Transfer call info from hot/cold section part of function
3346
     to main entry.  */
3347
  if (!spu_hash_table (info)->params->auto_overlay
3348
      && !for_each_node (transfer_calls, info, 0, FALSE))
3349
    return FALSE;
3350
 
3351
  /* Find the call graph root(s).  */
3352
  if (!for_each_node (mark_non_root, info, 0, FALSE))
3353
    return FALSE;
3354
 
3355
  /* Remove cycles from the call graph.  We start from the root node(s)
3356
     so that we break cycles in a reasonable place.  */
3357
  depth = 0;
3358
  if (!for_each_node (remove_cycles, info, &depth, TRUE))
3359
    return FALSE;
3360
 
3361
  return for_each_node (mark_detached_root, info, &depth, FALSE);
3362
}
3363
 
3364
/* qsort predicate to sort calls by priority, max_depth then count.  */
3365
 
3366
static int
3367
sort_calls (const void *a, const void *b)
3368
{
3369
  struct call_info *const *c1 = a;
3370
  struct call_info *const *c2 = b;
3371
  int delta;
3372
 
3373
  delta = (*c2)->priority - (*c1)->priority;
3374
  if (delta != 0)
3375
    return delta;
3376
 
3377
  delta = (*c2)->max_depth - (*c1)->max_depth;
3378
  if (delta != 0)
3379
    return delta;
3380
 
3381
  delta = (*c2)->count - (*c1)->count;
3382
  if (delta != 0)
3383
    return delta;
3384
 
3385
  return (char *) c1 - (char *) c2;
3386
}
3387
 
3388
struct _mos_param {
3389
  unsigned int max_overlay_size;
3390
};
3391
 
3392
/* Set linker_mark and gc_mark on any sections that we will put in
3393
   overlays.  These flags are used by the generic ELF linker, but we
3394
   won't be continuing on to bfd_elf_final_link so it is OK to use
3395
   them.  linker_mark is clear before we get here.  Set segment_mark
3396
   on sections that are part of a pasted function (excluding the last
3397
   section).
3398
 
3399
   Set up function rodata section if --overlay-rodata.  We don't
3400
   currently include merged string constant rodata sections since
3401
 
3402
   Sort the call graph so that the deepest nodes will be visited
3403
   first.  */
3404
 
3405
static bfd_boolean
3406
mark_overlay_section (struct function_info *fun,
3407
                      struct bfd_link_info *info,
3408
                      void *param)
3409
{
3410
  struct call_info *call;
3411
  unsigned int count;
3412
  struct _mos_param *mos_param = param;
3413
  struct spu_link_hash_table *htab = spu_hash_table (info);
3414
 
3415
  if (fun->visit4)
3416
    return TRUE;
3417
 
3418
  fun->visit4 = TRUE;
3419
  if (!fun->sec->linker_mark
3420
      && (htab->params->ovly_flavour != ovly_soft_icache
3421
          || htab->params->non_ia_text
3422
          || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3423
          || strcmp (fun->sec->name, ".init") == 0
3424
          || strcmp (fun->sec->name, ".fini") == 0))
3425
    {
3426
      unsigned int size;
3427
 
3428
      fun->sec->linker_mark = 1;
3429
      fun->sec->gc_mark = 1;
3430
      fun->sec->segment_mark = 0;
3431
      /* Ensure SEC_CODE is set on this text section (it ought to
3432
         be!), and SEC_CODE is clear on rodata sections.  We use
3433
         this flag to differentiate the two overlay section types.  */
3434
      fun->sec->flags |= SEC_CODE;
3435
 
3436
      size = fun->sec->size;
3437
      if (htab->params->auto_overlay & OVERLAY_RODATA)
3438
        {
3439
          char *name = NULL;
3440
 
3441
          /* Find the rodata section corresponding to this function's
3442
             text section.  */
3443
          if (strcmp (fun->sec->name, ".text") == 0)
3444
            {
3445
              name = bfd_malloc (sizeof (".rodata"));
3446
              if (name == NULL)
3447
                return FALSE;
3448
              memcpy (name, ".rodata", sizeof (".rodata"));
3449
            }
3450
          else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3451
            {
3452
              size_t len = strlen (fun->sec->name);
3453
              name = bfd_malloc (len + 3);
3454
              if (name == NULL)
3455
                return FALSE;
3456
              memcpy (name, ".rodata", sizeof (".rodata"));
3457
              memcpy (name + 7, fun->sec->name + 5, len - 4);
3458
            }
3459
          else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3460
            {
3461
              size_t len = strlen (fun->sec->name) + 1;
3462
              name = bfd_malloc (len);
3463
              if (name == NULL)
3464
                return FALSE;
3465
              memcpy (name, fun->sec->name, len);
3466
              name[14] = 'r';
3467
            }
3468
 
3469
          if (name != NULL)
3470
            {
3471
              asection *rodata = NULL;
3472
              asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3473
              if (group_sec == NULL)
3474
                rodata = bfd_get_section_by_name (fun->sec->owner, name);
3475
              else
3476
                while (group_sec != NULL && group_sec != fun->sec)
3477
                  {
3478
                    if (strcmp (group_sec->name, name) == 0)
3479
                      {
3480
                        rodata = group_sec;
3481
                        break;
3482
                      }
3483
                    group_sec = elf_section_data (group_sec)->next_in_group;
3484
                  }
3485
              fun->rodata = rodata;
3486
              if (fun->rodata)
3487
                {
3488
                  size += fun->rodata->size;
3489
                  if (htab->params->line_size != 0
3490
                      && size > htab->params->line_size)
3491
                    {
3492
                      size -= fun->rodata->size;
3493
                      fun->rodata = NULL;
3494
                    }
3495
                  else
3496
                    {
3497
                      fun->rodata->linker_mark = 1;
3498
                      fun->rodata->gc_mark = 1;
3499
                      fun->rodata->flags &= ~SEC_CODE;
3500
                    }
3501
                }
3502
              free (name);
3503
            }
3504
        }
3505
      if (mos_param->max_overlay_size < size)
3506
        mos_param->max_overlay_size = size;
3507
    }
3508
 
3509
  for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3510
    count += 1;
3511
 
3512
  if (count > 1)
3513
    {
3514
      struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3515
      if (calls == NULL)
3516
        return FALSE;
3517
 
3518
      for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3519
        calls[count++] = call;
3520
 
3521
      qsort (calls, count, sizeof (*calls), sort_calls);
3522
 
3523
      fun->call_list = NULL;
3524
      while (count != 0)
3525
        {
3526
          --count;
3527
          calls[count]->next = fun->call_list;
3528
          fun->call_list = calls[count];
3529
        }
3530
      free (calls);
3531
    }
3532
 
3533
  for (call = fun->call_list; call != NULL; call = call->next)
3534
    {
3535
      if (call->is_pasted)
3536
        {
3537
          /* There can only be one is_pasted call per function_info.  */
3538
          BFD_ASSERT (!fun->sec->segment_mark);
3539
          fun->sec->segment_mark = 1;
3540
        }
3541
      if (!call->broken_cycle
3542
          && !mark_overlay_section (call->fun, info, param))
3543
        return FALSE;
3544
    }
3545
 
3546
  /* Don't put entry code into an overlay.  The overlay manager needs
3547
     a stack!  Also, don't mark .ovl.init as an overlay.  */
3548
  if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3549
      == info->output_bfd->start_address
3550
      || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3551
    {
3552
      fun->sec->linker_mark = 0;
3553
      if (fun->rodata != NULL)
3554
        fun->rodata->linker_mark = 0;
3555
    }
3556
  return TRUE;
3557
}
3558
 
3559
/* If non-zero then unmark functions called from those within sections
3560
   that we need to unmark.  Unfortunately this isn't reliable since the
3561
   call graph cannot know the destination of function pointer calls.  */
3562
#define RECURSE_UNMARK 0
3563
 
3564
struct _uos_param {
3565
  asection *exclude_input_section;
3566
  asection *exclude_output_section;
3567
  unsigned long clearing;
3568
};
3569
 
3570
/* Undo some of mark_overlay_section's work.  */
3571
 
3572
static bfd_boolean
3573
unmark_overlay_section (struct function_info *fun,
3574
                        struct bfd_link_info *info,
3575
                        void *param)
3576
{
3577
  struct call_info *call;
3578
  struct _uos_param *uos_param = param;
3579
  unsigned int excluded = 0;
3580
 
3581
  if (fun->visit5)
3582
    return TRUE;
3583
 
3584
  fun->visit5 = TRUE;
3585
 
3586
  excluded = 0;
3587
  if (fun->sec == uos_param->exclude_input_section
3588
      || fun->sec->output_section == uos_param->exclude_output_section)
3589
    excluded = 1;
3590
 
3591
  if (RECURSE_UNMARK)
3592
    uos_param->clearing += excluded;
3593
 
3594
  if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3595
    {
3596
      fun->sec->linker_mark = 0;
3597
      if (fun->rodata)
3598
        fun->rodata->linker_mark = 0;
3599
    }
3600
 
3601
  for (call = fun->call_list; call != NULL; call = call->next)
3602
    if (!call->broken_cycle
3603
        && !unmark_overlay_section (call->fun, info, param))
3604
      return FALSE;
3605
 
3606
  if (RECURSE_UNMARK)
3607
    uos_param->clearing -= excluded;
3608
  return TRUE;
3609
}
3610
 
3611
struct _cl_param {
3612
  unsigned int lib_size;
3613
  asection **lib_sections;
3614
};
3615
 
3616
/* Add sections we have marked as belonging to overlays to an array
3617
   for consideration as non-overlay sections.  The array consist of
3618
   pairs of sections, (text,rodata), for functions in the call graph.  */
3619
 
3620
static bfd_boolean
3621
collect_lib_sections (struct function_info *fun,
3622
                      struct bfd_link_info *info,
3623
                      void *param)
3624
{
3625
  struct _cl_param *lib_param = param;
3626
  struct call_info *call;
3627
  unsigned int size;
3628
 
3629
  if (fun->visit6)
3630
    return TRUE;
3631
 
3632
  fun->visit6 = TRUE;
3633
  if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3634
    return TRUE;
3635
 
3636
  size = fun->sec->size;
3637
  if (fun->rodata)
3638
    size += fun->rodata->size;
3639
 
3640
  if (size <= lib_param->lib_size)
3641
    {
3642
      *lib_param->lib_sections++ = fun->sec;
3643
      fun->sec->gc_mark = 0;
3644
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3645
        {
3646
          *lib_param->lib_sections++ = fun->rodata;
3647
          fun->rodata->gc_mark = 0;
3648
        }
3649
      else
3650
        *lib_param->lib_sections++ = NULL;
3651
    }
3652
 
3653
  for (call = fun->call_list; call != NULL; call = call->next)
3654
    if (!call->broken_cycle)
3655
      collect_lib_sections (call->fun, info, param);
3656
 
3657
  return TRUE;
3658
}
3659
 
3660
/* qsort predicate to sort sections by call count.  */
3661
 
3662
static int
3663
sort_lib (const void *a, const void *b)
3664
{
3665
  asection *const *s1 = a;
3666
  asection *const *s2 = b;
3667
  struct _spu_elf_section_data *sec_data;
3668
  struct spu_elf_stack_info *sinfo;
3669
  int delta;
3670
 
3671
  delta = 0;
3672
  if ((sec_data = spu_elf_section_data (*s1)) != NULL
3673
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3674
    {
3675
      int i;
3676
      for (i = 0; i < sinfo->num_fun; ++i)
3677
        delta -= sinfo->fun[i].call_count;
3678
    }
3679
 
3680
  if ((sec_data = spu_elf_section_data (*s2)) != NULL
3681
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3682
    {
3683
      int i;
3684
      for (i = 0; i < sinfo->num_fun; ++i)
3685
        delta += sinfo->fun[i].call_count;
3686
    }
3687
 
3688
  if (delta != 0)
3689
    return delta;
3690
 
3691
  return s1 - s2;
3692
}
3693
 
3694
/* Remove some sections from those marked to be in overlays.  Choose
3695
   those that are called from many places, likely library functions.  */
3696
 
3697
static unsigned int
3698
auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3699
{
3700
  bfd *ibfd;
3701
  asection **lib_sections;
3702
  unsigned int i, lib_count;
3703
  struct _cl_param collect_lib_param;
3704
  struct function_info dummy_caller;
3705
  struct spu_link_hash_table *htab;
3706
 
3707
  memset (&dummy_caller, 0, sizeof (dummy_caller));
3708
  lib_count = 0;
3709
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3710
    {
3711
      extern const bfd_target bfd_elf32_spu_vec;
3712
      asection *sec;
3713
 
3714
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3715
        continue;
3716
 
3717
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3718
        if (sec->linker_mark
3719
            && sec->size < lib_size
3720
            && (sec->flags & SEC_CODE) != 0)
3721
          lib_count += 1;
3722
    }
3723
  lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3724
  if (lib_sections == NULL)
3725
    return (unsigned int) -1;
3726
  collect_lib_param.lib_size = lib_size;
3727
  collect_lib_param.lib_sections = lib_sections;
3728
  if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3729
                      TRUE))
3730
    return (unsigned int) -1;
3731
  lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3732
 
3733
  /* Sort sections so that those with the most calls are first.  */
3734
  if (lib_count > 1)
3735
    qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3736
 
3737
  htab = spu_hash_table (info);
3738
  for (i = 0; i < lib_count; i++)
3739
    {
3740
      unsigned int tmp, stub_size;
3741
      asection *sec;
3742
      struct _spu_elf_section_data *sec_data;
3743
      struct spu_elf_stack_info *sinfo;
3744
 
3745
      sec = lib_sections[2 * i];
3746
      /* If this section is OK, its size must be less than lib_size.  */
3747
      tmp = sec->size;
3748
      /* If it has a rodata section, then add that too.  */
3749
      if (lib_sections[2 * i + 1])
3750
        tmp += lib_sections[2 * i + 1]->size;
3751
      /* Add any new overlay call stubs needed by the section.  */
3752
      stub_size = 0;
3753
      if (tmp < lib_size
3754
          && (sec_data = spu_elf_section_data (sec)) != NULL
3755
          && (sinfo = sec_data->u.i.stack_info) != NULL)
3756
        {
3757
          int k;
3758
          struct call_info *call;
3759
 
3760
          for (k = 0; k < sinfo->num_fun; ++k)
3761
            for (call = sinfo->fun[k].call_list; call; call = call->next)
3762
              if (call->fun->sec->linker_mark)
3763
                {
3764
                  struct call_info *p;
3765
                  for (p = dummy_caller.call_list; p; p = p->next)
3766
                    if (p->fun == call->fun)
3767
                      break;
3768
                  if (!p)
3769
                    stub_size += ovl_stub_size (htab->params);
3770
                }
3771
        }
3772
      if (tmp + stub_size < lib_size)
3773
        {
3774
          struct call_info **pp, *p;
3775
 
3776
          /* This section fits.  Mark it as non-overlay.  */
3777
          lib_sections[2 * i]->linker_mark = 0;
3778
          if (lib_sections[2 * i + 1])
3779
            lib_sections[2 * i + 1]->linker_mark = 0;
3780
          lib_size -= tmp + stub_size;
3781
          /* Call stubs to the section we just added are no longer
3782
             needed.  */
3783
          pp = &dummy_caller.call_list;
3784
          while ((p = *pp) != NULL)
3785
            if (!p->fun->sec->linker_mark)
3786
              {
3787
                lib_size += ovl_stub_size (htab->params);
3788
                *pp = p->next;
3789
                free (p);
3790
              }
3791
            else
3792
              pp = &p->next;
3793
          /* Add new call stubs to dummy_caller.  */
3794
          if ((sec_data = spu_elf_section_data (sec)) != NULL
3795
              && (sinfo = sec_data->u.i.stack_info) != NULL)
3796
            {
3797
              int k;
3798
              struct call_info *call;
3799
 
3800
              for (k = 0; k < sinfo->num_fun; ++k)
3801
                for (call = sinfo->fun[k].call_list;
3802
                     call;
3803
                     call = call->next)
3804
                  if (call->fun->sec->linker_mark)
3805
                    {
3806
                      struct call_info *callee;
3807
                      callee = bfd_malloc (sizeof (*callee));
3808
                      if (callee == NULL)
3809
                        return (unsigned int) -1;
3810
                      *callee = *call;
3811
                      if (!insert_callee (&dummy_caller, callee))
3812
                        free (callee);
3813
                    }
3814
            }
3815
        }
3816
    }
3817
  while (dummy_caller.call_list != NULL)
3818
    {
3819
      struct call_info *call = dummy_caller.call_list;
3820
      dummy_caller.call_list = call->next;
3821
      free (call);
3822
    }
3823
  for (i = 0; i < 2 * lib_count; i++)
3824
    if (lib_sections[i])
3825
      lib_sections[i]->gc_mark = 1;
3826
  free (lib_sections);
3827
  return lib_size;
3828
}
3829
 
3830
/* Build an array of overlay sections.  The deepest node's section is
3831
   added first, then its parent node's section, then everything called
3832
   from the parent section.  The idea being to group sections to
3833
   minimise calls between different overlays.  */
3834
 
3835
static bfd_boolean
3836
collect_overlays (struct function_info *fun,
3837
                  struct bfd_link_info *info,
3838
                  void *param)
3839
{
3840
  struct call_info *call;
3841
  bfd_boolean added_fun;
3842
  asection ***ovly_sections = param;
3843
 
3844
  if (fun->visit7)
3845
    return TRUE;
3846
 
3847
  fun->visit7 = TRUE;
3848
  for (call = fun->call_list; call != NULL; call = call->next)
3849
    if (!call->is_pasted && !call->broken_cycle)
3850
      {
3851
        if (!collect_overlays (call->fun, info, ovly_sections))
3852
          return FALSE;
3853
        break;
3854
      }
3855
 
3856
  added_fun = FALSE;
3857
  if (fun->sec->linker_mark && fun->sec->gc_mark)
3858
    {
3859
      fun->sec->gc_mark = 0;
3860
      *(*ovly_sections)++ = fun->sec;
3861
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3862
        {
3863
          fun->rodata->gc_mark = 0;
3864
          *(*ovly_sections)++ = fun->rodata;
3865
        }
3866
      else
3867
        *(*ovly_sections)++ = NULL;
3868
      added_fun = TRUE;
3869
 
3870
      /* Pasted sections must stay with the first section.  We don't
3871
         put pasted sections in the array, just the first section.
3872
         Mark subsequent sections as already considered.  */
3873
      if (fun->sec->segment_mark)
3874
        {
3875
          struct function_info *call_fun = fun;
3876
          do
3877
            {
3878
              for (call = call_fun->call_list; call != NULL; call = call->next)
3879
                if (call->is_pasted)
3880
                  {
3881
                    call_fun = call->fun;
3882
                    call_fun->sec->gc_mark = 0;
3883
                    if (call_fun->rodata)
3884
                      call_fun->rodata->gc_mark = 0;
3885
                    break;
3886
                  }
3887
              if (call == NULL)
3888
                abort ();
3889
            }
3890
          while (call_fun->sec->segment_mark);
3891
        }
3892
    }
3893
 
3894
  for (call = fun->call_list; call != NULL; call = call->next)
3895
    if (!call->broken_cycle
3896
        && !collect_overlays (call->fun, info, ovly_sections))
3897
      return FALSE;
3898
 
3899
  if (added_fun)
3900
    {
3901
      struct _spu_elf_section_data *sec_data;
3902
      struct spu_elf_stack_info *sinfo;
3903
 
3904
      if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3905
          && (sinfo = sec_data->u.i.stack_info) != NULL)
3906
        {
3907
          int i;
3908
          for (i = 0; i < sinfo->num_fun; ++i)
3909
            if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3910
              return FALSE;
3911
        }
3912
    }
3913
 
3914
  return TRUE;
3915
}
3916
 
3917
struct _sum_stack_param {
3918
  size_t cum_stack;
3919
  size_t overall_stack;
3920
  bfd_boolean emit_stack_syms;
3921
};
3922
 
3923
/* Descend the call graph for FUN, accumulating total stack required.  */
3924
 
3925
static bfd_boolean
3926
sum_stack (struct function_info *fun,
3927
           struct bfd_link_info *info,
3928
           void *param)
3929
{
3930
  struct call_info *call;
3931
  struct function_info *max;
3932
  size_t stack, cum_stack;
3933
  const char *f1;
3934
  bfd_boolean has_call;
3935
  struct _sum_stack_param *sum_stack_param = param;
3936
  struct spu_link_hash_table *htab;
3937
 
3938
  cum_stack = fun->stack;
3939
  sum_stack_param->cum_stack = cum_stack;
3940
  if (fun->visit3)
3941
    return TRUE;
3942
 
3943
  has_call = FALSE;
3944
  max = NULL;
3945
  for (call = fun->call_list; call; call = call->next)
3946
    {
3947
      if (call->broken_cycle)
3948
        continue;
3949
      if (!call->is_pasted)
3950
        has_call = TRUE;
3951
      if (!sum_stack (call->fun, info, sum_stack_param))
3952
        return FALSE;
3953
      stack = sum_stack_param->cum_stack;
3954
      /* Include caller stack for normal calls, don't do so for
3955
         tail calls.  fun->stack here is local stack usage for
3956
         this function.  */
3957
      if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3958
        stack += fun->stack;
3959
      if (cum_stack < stack)
3960
        {
3961
          cum_stack = stack;
3962
          max = call->fun;
3963
        }
3964
    }
3965
 
3966
  sum_stack_param->cum_stack = cum_stack;
3967
  stack = fun->stack;
3968
  /* Now fun->stack holds cumulative stack.  */
3969
  fun->stack = cum_stack;
3970
  fun->visit3 = TRUE;
3971
 
3972
  if (!fun->non_root
3973
      && sum_stack_param->overall_stack < cum_stack)
3974
    sum_stack_param->overall_stack = cum_stack;
3975
 
3976
  htab = spu_hash_table (info);
3977
  if (htab->params->auto_overlay)
3978
    return TRUE;
3979
 
3980
  f1 = func_name (fun);
3981
  if (htab->params->stack_analysis)
3982
    {
3983
      if (!fun->non_root)
3984
        info->callbacks->info (_("  %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3985
      info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3986
                              f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3987
 
3988
      if (has_call)
3989
        {
3990
          info->callbacks->minfo (_("  calls:\n"));
3991
          for (call = fun->call_list; call; call = call->next)
3992
            if (!call->is_pasted && !call->broken_cycle)
3993
              {
3994
                const char *f2 = func_name (call->fun);
3995
                const char *ann1 = call->fun == max ? "*" : " ";
3996
                const char *ann2 = call->is_tail ? "t" : " ";
3997
 
3998
                info->callbacks->minfo (_("   %s%s %s\n"), ann1, ann2, f2);
3999
              }
4000
        }
4001
    }
4002
 
4003
  if (sum_stack_param->emit_stack_syms)
4004
    {
4005
      char *name = bfd_malloc (18 + strlen (f1));
4006
      struct elf_link_hash_entry *h;
4007
 
4008
      if (name == NULL)
4009
        return FALSE;
4010
 
4011
      if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4012
        sprintf (name, "__stack_%s", f1);
4013
      else
4014
        sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4015
 
4016
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4017
      free (name);
4018
      if (h != NULL
4019
          && (h->root.type == bfd_link_hash_new
4020
              || h->root.type == bfd_link_hash_undefined
4021
              || h->root.type == bfd_link_hash_undefweak))
4022
        {
4023
          h->root.type = bfd_link_hash_defined;
4024
          h->root.u.def.section = bfd_abs_section_ptr;
4025
          h->root.u.def.value = cum_stack;
4026
          h->size = 0;
4027
          h->type = 0;
4028
          h->ref_regular = 1;
4029
          h->def_regular = 1;
4030
          h->ref_regular_nonweak = 1;
4031
          h->forced_local = 1;
4032
          h->non_elf = 0;
4033
        }
4034
    }
4035
 
4036
  return TRUE;
4037
}
4038
 
4039
/* SEC is part of a pasted function.  Return the call_info for the
4040
   next section of this function.  */
4041
 
4042
static struct call_info *
4043
find_pasted_call (asection *sec)
4044
{
4045
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4046
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4047
  struct call_info *call;
4048
  int k;
4049
 
4050
  for (k = 0; k < sinfo->num_fun; ++k)
4051
    for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4052
      if (call->is_pasted)
4053
        return call;
4054
  abort ();
4055
  return 0;
4056
}
4057
 
4058
/* qsort predicate to sort bfds by file name.  */
4059
 
4060
static int
4061
sort_bfds (const void *a, const void *b)
4062
{
4063
  bfd *const *abfd1 = a;
4064
  bfd *const *abfd2 = b;
4065
 
4066
  return strcmp ((*abfd1)->filename, (*abfd2)->filename);
4067
}
4068
 
4069
static unsigned int
4070
print_one_overlay_section (FILE *script,
4071
                           unsigned int base,
4072
                           unsigned int count,
4073
                           unsigned int ovlynum,
4074
                           unsigned int *ovly_map,
4075
                           asection **ovly_sections,
4076
                           struct bfd_link_info *info)
4077
{
4078
  unsigned int j;
4079
 
4080
  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4081
    {
4082
      asection *sec = ovly_sections[2 * j];
4083
 
4084
      if (fprintf (script, "   %s%c%s (%s)\n",
4085
                   (sec->owner->my_archive != NULL
4086
                    ? sec->owner->my_archive->filename : ""),
4087
                   info->path_separator,
4088
                   sec->owner->filename,
4089
                   sec->name) <= 0)
4090
        return -1;
4091
      if (sec->segment_mark)
4092
        {
4093
          struct call_info *call = find_pasted_call (sec);
4094
          while (call != NULL)
4095
            {
4096
              struct function_info *call_fun = call->fun;
4097
              sec = call_fun->sec;
4098
              if (fprintf (script, "   %s%c%s (%s)\n",
4099
                           (sec->owner->my_archive != NULL
4100
                            ? sec->owner->my_archive->filename : ""),
4101
                           info->path_separator,
4102
                           sec->owner->filename,
4103
                           sec->name) <= 0)
4104
                return -1;
4105
              for (call = call_fun->call_list; call; call = call->next)
4106
                if (call->is_pasted)
4107
                  break;
4108
            }
4109
        }
4110
    }
4111
 
4112
  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4113
    {
4114
      asection *sec = ovly_sections[2 * j + 1];
4115
      if (sec != NULL
4116
          && fprintf (script, "   %s%c%s (%s)\n",
4117
                      (sec->owner->my_archive != NULL
4118
                       ? sec->owner->my_archive->filename : ""),
4119
                      info->path_separator,
4120
                      sec->owner->filename,
4121
                      sec->name) <= 0)
4122
        return -1;
4123
 
4124
      sec = ovly_sections[2 * j];
4125
      if (sec->segment_mark)
4126
        {
4127
          struct call_info *call = find_pasted_call (sec);
4128
          while (call != NULL)
4129
            {
4130
              struct function_info *call_fun = call->fun;
4131
              sec = call_fun->rodata;
4132
              if (sec != NULL
4133
                  && fprintf (script, "   %s%c%s (%s)\n",
4134
                              (sec->owner->my_archive != NULL
4135
                               ? sec->owner->my_archive->filename : ""),
4136
                              info->path_separator,
4137
                              sec->owner->filename,
4138
                              sec->name) <= 0)
4139
                return -1;
4140
              for (call = call_fun->call_list; call; call = call->next)
4141
                if (call->is_pasted)
4142
                  break;
4143
            }
4144
        }
4145
    }
4146
 
4147
  return j;
4148
}
4149
 
4150
/* Handle --auto-overlay.  */
4151
 
4152
static void
4153
spu_elf_auto_overlay (struct bfd_link_info *info)
4154
{
4155
  bfd *ibfd;
4156
  bfd **bfd_arr;
4157
  struct elf_segment_map *m;
4158
  unsigned int fixed_size, lo, hi;
4159
  unsigned int reserved;
4160
  struct spu_link_hash_table *htab;
4161
  unsigned int base, i, count, bfd_count;
4162
  unsigned int region, ovlynum;
4163
  asection **ovly_sections, **ovly_p;
4164
  unsigned int *ovly_map;
4165
  FILE *script;
4166
  unsigned int total_overlay_size, overlay_size;
4167
  const char *ovly_mgr_entry;
4168
  struct elf_link_hash_entry *h;
4169
  struct _mos_param mos_param;
4170
  struct _uos_param uos_param;
4171
  struct function_info dummy_caller;
4172
 
4173
  /* Find the extents of our loadable image.  */
4174
  lo = (unsigned int) -1;
4175
  hi = 0;
4176
  for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
4177
    if (m->p_type == PT_LOAD)
4178
      for (i = 0; i < m->count; i++)
4179
        if (m->sections[i]->size != 0)
4180
          {
4181
            if (m->sections[i]->vma < lo)
4182
              lo = m->sections[i]->vma;
4183
            if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4184
              hi = m->sections[i]->vma + m->sections[i]->size - 1;
4185
          }
4186
  fixed_size = hi + 1 - lo;
4187
 
4188
  if (!discover_functions (info))
4189
    goto err_exit;
4190
 
4191
  if (!build_call_tree (info))
4192
    goto err_exit;
4193
 
4194
  htab = spu_hash_table (info);
4195
  reserved = htab->params->auto_overlay_reserved;
4196
  if (reserved == 0)
4197
    {
4198
      struct _sum_stack_param sum_stack_param;
4199
 
4200
      sum_stack_param.emit_stack_syms = 0;
4201
      sum_stack_param.overall_stack = 0;
4202
      if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4203
        goto err_exit;
4204
      reserved = (sum_stack_param.overall_stack
4205
                  + htab->params->extra_stack_space);
4206
    }
4207
 
4208
  /* No need for overlays if everything already fits.  */
4209
  if (fixed_size + reserved <= htab->local_store
4210
      && htab->params->ovly_flavour != ovly_soft_icache)
4211
    {
4212
      htab->params->auto_overlay = 0;
4213
      return;
4214
    }
4215
 
4216
  uos_param.exclude_input_section = 0;
4217
  uos_param.exclude_output_section
4218
    = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4219
 
4220
  ovly_mgr_entry = "__ovly_load";
4221
  if (htab->params->ovly_flavour == ovly_soft_icache)
4222
    ovly_mgr_entry = "__icache_br_handler";
4223
  h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4224
                            FALSE, FALSE, FALSE);
4225
  if (h != NULL
4226
      && (h->root.type == bfd_link_hash_defined
4227
          || h->root.type == bfd_link_hash_defweak)
4228
      && h->def_regular)
4229
    {
4230
      /* We have a user supplied overlay manager.  */
4231
      uos_param.exclude_input_section = h->root.u.def.section;
4232
    }
4233
  else
4234
    {
4235
      /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4236
         builtin version to .text, and will adjust .text size.  */
4237
      fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4238
    }
4239
 
4240
  /* Mark overlay sections, and find max overlay section size.  */
4241
  mos_param.max_overlay_size = 0;
4242
  if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4243
    goto err_exit;
4244
 
4245
  /* We can't put the overlay manager or interrupt routines in
4246
     overlays.  */
4247
  uos_param.clearing = 0;
4248
  if ((uos_param.exclude_input_section
4249
       || uos_param.exclude_output_section)
4250
      && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4251
    goto err_exit;
4252
 
4253
  bfd_count = 0;
4254
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4255
    ++bfd_count;
4256
  bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4257
  if (bfd_arr == NULL)
4258
    goto err_exit;
4259
 
4260
  /* Count overlay sections, and subtract their sizes from "fixed_size".  */
4261
  count = 0;
4262
  bfd_count = 0;
4263
  total_overlay_size = 0;
4264
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4265
    {
4266
      extern const bfd_target bfd_elf32_spu_vec;
4267
      asection *sec;
4268
      unsigned int old_count;
4269
 
4270
      if (ibfd->xvec != &bfd_elf32_spu_vec)
4271
        continue;
4272
 
4273
      old_count = count;
4274
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4275
        if (sec->linker_mark)
4276
          {
4277
            if ((sec->flags & SEC_CODE) != 0)
4278
              count += 1;
4279
            fixed_size -= sec->size;
4280
            total_overlay_size += sec->size;
4281
          }
4282
        else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4283
                 && sec->output_section->owner == info->output_bfd
4284
                 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4285
          fixed_size -= sec->size;
4286
      if (count != old_count)
4287
        bfd_arr[bfd_count++] = ibfd;
4288
    }
4289
 
4290
  /* Since the overlay link script selects sections by file name and
4291
     section name, ensure that file names are unique.  */
4292
  if (bfd_count > 1)
4293
    {
4294
      bfd_boolean ok = TRUE;
4295
 
4296
      qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4297
      for (i = 1; i < bfd_count; ++i)
4298
        if (strcmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4299
          {
4300
            if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4301
              {
4302
                if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4303
                  info->callbacks->einfo (_("%s duplicated in %s\n"),
4304
                                          bfd_arr[i]->filename,
4305
                                          bfd_arr[i]->my_archive->filename);
4306
                else
4307
                  info->callbacks->einfo (_("%s duplicated\n"),
4308
                                          bfd_arr[i]->filename);
4309
                ok = FALSE;
4310
              }
4311
          }
4312
      if (!ok)
4313
        {
4314
          info->callbacks->einfo (_("sorry, no support for duplicate "
4315
                                    "object files in auto-overlay script\n"));
4316
          bfd_set_error (bfd_error_bad_value);
4317
          goto err_exit;
4318
        }
4319
    }
4320
  free (bfd_arr);
4321
 
4322
  fixed_size += reserved;
4323
  fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4324
  if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4325
    {
4326
      if (htab->params->ovly_flavour == ovly_soft_icache)
4327
        {
4328
          /* Stubs in the non-icache area are bigger.  */
4329
          fixed_size += htab->non_ovly_stub * 16;
4330
          /* Space for icache manager tables.
4331
             a) Tag array, one quadword per cache line.
4332
             - word 0: ia address of present line, init to zero.  */
4333
          fixed_size += 16 << htab->num_lines_log2;
4334
          /* b) Rewrite "to" list, one quadword per cache line.  */
4335
          fixed_size += 16 << htab->num_lines_log2;
4336
          /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4337
                to a power-of-two number of full quadwords) per cache line.  */
4338
          fixed_size += 16 << (htab->fromelem_size_log2
4339
                               + htab->num_lines_log2);
4340
          /* d) Pointer to __ea backing store (toe), 1 quadword.  */
4341
          fixed_size += 16;
4342
        }
4343
      else
4344
        {
4345
          /* Guess number of overlays.  Assuming overlay buffer is on
4346
             average only half full should be conservative.  */
4347
          ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4348
                     / (htab->local_store - fixed_size));
4349
          /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
4350
          fixed_size += ovlynum * 16 + 16 + 4 + 16;
4351
        }
4352
    }
4353
 
4354
  if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4355
    info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4356
                              "size of 0x%v exceeds local store\n"),
4357
                            (bfd_vma) fixed_size,
4358
                            (bfd_vma) mos_param.max_overlay_size);
4359
 
4360
  /* Now see if we should put some functions in the non-overlay area.  */
4361
  else if (fixed_size < htab->params->auto_overlay_fixed)
4362
    {
4363
      unsigned int max_fixed, lib_size;
4364
 
4365
      max_fixed = htab->local_store - mos_param.max_overlay_size;
4366
      if (max_fixed > htab->params->auto_overlay_fixed)
4367
        max_fixed = htab->params->auto_overlay_fixed;
4368
      lib_size = max_fixed - fixed_size;
4369
      lib_size = auto_ovl_lib_functions (info, lib_size);
4370
      if (lib_size == (unsigned int) -1)
4371
        goto err_exit;
4372
      fixed_size = max_fixed - lib_size;
4373
    }
4374
 
4375
  /* Build an array of sections, suitably sorted to place into
4376
     overlays.  */
4377
  ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4378
  if (ovly_sections == NULL)
4379
    goto err_exit;
4380
  ovly_p = ovly_sections;
4381
  if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4382
    goto err_exit;
4383
  count = (size_t) (ovly_p - ovly_sections) / 2;
4384
  ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4385
  if (ovly_map == NULL)
4386
    goto err_exit;
4387
 
4388
  memset (&dummy_caller, 0, sizeof (dummy_caller));
4389
  overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4390
  if (htab->params->line_size != 0)
4391
    overlay_size = htab->params->line_size;
4392
  base = 0;
4393
  ovlynum = 0;
4394
  while (base < count)
4395
    {
4396
      unsigned int size = 0, rosize = 0, roalign = 0;
4397
 
4398
      for (i = base; i < count; i++)
4399
        {
4400
          asection *sec, *rosec;
4401
          unsigned int tmp, rotmp;
4402
          unsigned int num_stubs;
4403
          struct call_info *call, *pasty;
4404
          struct _spu_elf_section_data *sec_data;
4405
          struct spu_elf_stack_info *sinfo;
4406
          unsigned int k;
4407
 
4408
          /* See whether we can add this section to the current
4409
             overlay without overflowing our overlay buffer.  */
4410
          sec = ovly_sections[2 * i];
4411
          tmp = align_power (size, sec->alignment_power) + sec->size;
4412
          rotmp = rosize;
4413
          rosec = ovly_sections[2 * i + 1];
4414
          if (rosec != NULL)
4415
            {
4416
              rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4417
              if (roalign < rosec->alignment_power)
4418
                roalign = rosec->alignment_power;
4419
            }
4420
          if (align_power (tmp, roalign) + rotmp > overlay_size)
4421
            break;
4422
          if (sec->segment_mark)
4423
            {
4424
              /* Pasted sections must stay together, so add their
4425
                 sizes too.  */
4426
              pasty = find_pasted_call (sec);
4427
              while (pasty != NULL)
4428
                {
4429
                  struct function_info *call_fun = pasty->fun;
4430
                  tmp = (align_power (tmp, call_fun->sec->alignment_power)
4431
                         + call_fun->sec->size);
4432
                  if (call_fun->rodata)
4433
                    {
4434
                      rotmp = (align_power (rotmp,
4435
                                            call_fun->rodata->alignment_power)
4436
                               + call_fun->rodata->size);
4437
                      if (roalign < rosec->alignment_power)
4438
                        roalign = rosec->alignment_power;
4439
                    }
4440
                  for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4441
                    if (pasty->is_pasted)
4442
                      break;
4443
                }
4444
            }
4445
          if (align_power (tmp, roalign) + rotmp > overlay_size)
4446
            break;
4447
 
4448
          /* If we add this section, we might need new overlay call
4449
             stubs.  Add any overlay section calls to dummy_call.  */
4450
          pasty = NULL;
4451
          sec_data = spu_elf_section_data (sec);
4452
          sinfo = sec_data->u.i.stack_info;
4453
          for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4454
            for (call = sinfo->fun[k].call_list; call; call = call->next)
4455
              if (call->is_pasted)
4456
                {
4457
                  BFD_ASSERT (pasty == NULL);
4458
                  pasty = call;
4459
                }
4460
              else if (call->fun->sec->linker_mark)
4461
                {
4462
                  if (!copy_callee (&dummy_caller, call))
4463
                    goto err_exit;
4464
                }
4465
          while (pasty != NULL)
4466
            {
4467
              struct function_info *call_fun = pasty->fun;
4468
              pasty = NULL;
4469
              for (call = call_fun->call_list; call; call = call->next)
4470
                if (call->is_pasted)
4471
                  {
4472
                    BFD_ASSERT (pasty == NULL);
4473
                    pasty = call;
4474
                  }
4475
                else if (!copy_callee (&dummy_caller, call))
4476
                  goto err_exit;
4477
            }
4478
 
4479
          /* Calculate call stub size.  */
4480
          num_stubs = 0;
4481
          for (call = dummy_caller.call_list; call; call = call->next)
4482
            {
4483
              unsigned int stub_delta = 1;
4484
 
4485
              if (htab->params->ovly_flavour == ovly_soft_icache)
4486
                stub_delta = call->count;
4487
              num_stubs += stub_delta;
4488
 
4489
              /* If the call is within this overlay, we won't need a
4490
                 stub.  */
4491
              for (k = base; k < i + 1; k++)
4492
                if (call->fun->sec == ovly_sections[2 * k])
4493
                  {
4494
                    num_stubs -= stub_delta;
4495
                    break;
4496
                  }
4497
            }
4498
          if (htab->params->ovly_flavour == ovly_soft_icache
4499
              && num_stubs > htab->params->max_branch)
4500
            break;
4501
          if (align_power (tmp, roalign) + rotmp
4502
              + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4503
            break;
4504
          size = tmp;
4505
          rosize = rotmp;
4506
        }
4507
 
4508
      if (i == base)
4509
        {
4510
          info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4511
                                  ovly_sections[2 * i]->owner,
4512
                                  ovly_sections[2 * i],
4513
                                  ovly_sections[2 * i + 1] ? " + rodata" : "");
4514
          bfd_set_error (bfd_error_bad_value);
4515
          goto err_exit;
4516
        }
4517
 
4518
      while (dummy_caller.call_list != NULL)
4519
        {
4520
          struct call_info *call = dummy_caller.call_list;
4521
          dummy_caller.call_list = call->next;
4522
          free (call);
4523
        }
4524
 
4525
      ++ovlynum;
4526
      while (base < i)
4527
        ovly_map[base++] = ovlynum;
4528
    }
4529
 
4530
  script = htab->params->spu_elf_open_overlay_script ();
4531
 
4532
  if (htab->params->ovly_flavour == ovly_soft_icache)
4533
    {
4534
      if (fprintf (script, "SECTIONS\n{\n") <= 0)
4535
        goto file_err;
4536
 
4537
      if (fprintf (script,
4538
                   " . = ALIGN (%u);\n"
4539
                   " .ovl.init : { *(.ovl.init) }\n"
4540
                   " . = ABSOLUTE (ADDR (.ovl.init));\n",
4541
                   htab->params->line_size) <= 0)
4542
        goto file_err;
4543
 
4544
      base = 0;
4545
      ovlynum = 1;
4546
      while (base < count)
4547
        {
4548
          unsigned int indx = ovlynum - 1;
4549
          unsigned int vma, lma;
4550
 
4551
          vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4552
          lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4553
 
4554
          if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4555
                               ": AT (LOADADDR (.ovl.init) + %u) {\n",
4556
                       ovlynum, vma, lma) <= 0)
4557
            goto file_err;
4558
 
4559
          base = print_one_overlay_section (script, base, count, ovlynum,
4560
                                            ovly_map, ovly_sections, info);
4561
          if (base == (unsigned) -1)
4562
            goto file_err;
4563
 
4564
          if (fprintf (script, "  }\n") <= 0)
4565
            goto file_err;
4566
 
4567
          ovlynum++;
4568
        }
4569
 
4570
      if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4571
                   1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4572
        goto file_err;
4573
 
4574
      if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4575
        goto file_err;
4576
    }
4577
  else
4578
    {
4579
      if (fprintf (script, "SECTIONS\n{\n") <= 0)
4580
        goto file_err;
4581
 
4582
      if (fprintf (script,
4583
                   " . = ALIGN (16);\n"
4584
                   " .ovl.init : { *(.ovl.init) }\n"
4585
                   " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4586
        goto file_err;
4587
 
4588
      for (region = 1; region <= htab->params->num_lines; region++)
4589
        {
4590
          ovlynum = region;
4591
          base = 0;
4592
          while (base < count && ovly_map[base] < ovlynum)
4593
            base++;
4594
 
4595
          if (base == count)
4596
            break;
4597
 
4598
          if (region == 1)
4599
            {
4600
              /* We need to set lma since we are overlaying .ovl.init.  */
4601
              if (fprintf (script,
4602
                           " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4603
                goto file_err;
4604
            }
4605
          else
4606
            {
4607
              if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4608
                goto file_err;
4609
            }
4610
 
4611
          while (base < count)
4612
            {
4613
              if (fprintf (script, "  .ovly%u {\n", ovlynum) <= 0)
4614
                goto file_err;
4615
 
4616
              base = print_one_overlay_section (script, base, count, ovlynum,
4617
                                                ovly_map, ovly_sections, info);
4618
              if (base == (unsigned) -1)
4619
                goto file_err;
4620
 
4621
              if (fprintf (script, "  }\n") <= 0)
4622
                goto file_err;
4623
 
4624
              ovlynum += htab->params->num_lines;
4625
              while (base < count && ovly_map[base] < ovlynum)
4626
                base++;
4627
            }
4628
 
4629
          if (fprintf (script, " }\n") <= 0)
4630
            goto file_err;
4631
        }
4632
 
4633
      if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4634
        goto file_err;
4635
    }
4636
 
4637
  free (ovly_map);
4638
  free (ovly_sections);
4639
 
4640
  if (fclose (script) != 0)
4641
    goto file_err;
4642
 
4643
  if (htab->params->auto_overlay & AUTO_RELINK)
4644
    (*htab->params->spu_elf_relink) ();
4645
 
4646
  xexit (0);
4647
 
4648
 file_err:
4649
  bfd_set_error (bfd_error_system_call);
4650
 err_exit:
4651
  info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4652
  xexit (1);
4653
}
4654
 
4655
/* Provide an estimate of total stack required.  */
4656
 
4657
static bfd_boolean
4658
spu_elf_stack_analysis (struct bfd_link_info *info)
4659
{
4660
  struct spu_link_hash_table *htab;
4661
  struct _sum_stack_param sum_stack_param;
4662
 
4663
  if (!discover_functions (info))
4664
    return FALSE;
4665
 
4666
  if (!build_call_tree (info))
4667
    return FALSE;
4668
 
4669
  htab = spu_hash_table (info);
4670
  if (htab->params->stack_analysis)
4671
    {
4672
      info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4673
      info->callbacks->minfo (_("\nStack size for functions.  "
4674
                                "Annotations: '*' max stack, 't' tail call\n"));
4675
    }
4676
 
4677
  sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4678
  sum_stack_param.overall_stack = 0;
4679
  if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4680
    return FALSE;
4681
 
4682
  if (htab->params->stack_analysis)
4683
    info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4684
                           (bfd_vma) sum_stack_param.overall_stack);
4685
  return TRUE;
4686
}
4687
 
4688
/* Perform a final link.  */
4689
 
4690
static bfd_boolean
4691
spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4692
{
4693
  struct spu_link_hash_table *htab = spu_hash_table (info);
4694
 
4695
  if (htab->params->auto_overlay)
4696
    spu_elf_auto_overlay (info);
4697
 
4698
  if ((htab->params->stack_analysis
4699
       || (htab->params->ovly_flavour == ovly_soft_icache
4700
           && htab->params->lrlive_analysis))
4701
      && !spu_elf_stack_analysis (info))
4702
    info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4703
 
4704
  if (!spu_elf_build_stubs (info))
4705
    info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4706
 
4707
  return bfd_elf_final_link (output_bfd, info);
4708
}
4709
 
4710
/* Called when not normally emitting relocs, ie. !info->relocatable
4711
   and !info->emitrelocations.  Returns a count of special relocs
4712
   that need to be emitted.  */
4713
 
4714
static unsigned int
4715
spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4716
{
4717
  Elf_Internal_Rela *relocs;
4718
  unsigned int count = 0;
4719
 
4720
  relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4721
                                      info->keep_memory);
4722
  if (relocs != NULL)
4723
    {
4724
      Elf_Internal_Rela *rel;
4725
      Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4726
 
4727
      for (rel = relocs; rel < relend; rel++)
4728
        {
4729
          int r_type = ELF32_R_TYPE (rel->r_info);
4730
          if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4731
            ++count;
4732
        }
4733
 
4734
      if (elf_section_data (sec)->relocs != relocs)
4735
        free (relocs);
4736
    }
4737
 
4738
  return count;
4739
}
4740
 
4741
/* Functions for adding fixup records to .fixup */
4742
 
4743
#define FIXUP_RECORD_SIZE 4
4744
 
4745
#define FIXUP_PUT(output_bfd,htab,index,addr) \
4746
          bfd_put_32 (output_bfd, addr, \
4747
                      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4748
#define FIXUP_GET(output_bfd,htab,index) \
4749
          bfd_get_32 (output_bfd, \
4750
                      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4751
 
4752
/* Store OFFSET in .fixup.  This assumes it will be called with an
4753
   increasing OFFSET.  When this OFFSET fits with the last base offset,
4754
   it just sets a bit, otherwise it adds a new fixup record.  */
4755
static void
4756
spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4757
                    bfd_vma offset)
4758
{
4759
  struct spu_link_hash_table *htab = spu_hash_table (info);
4760
  asection *sfixup = htab->sfixup;
4761
  bfd_vma qaddr = offset & ~(bfd_vma) 15;
4762
  bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4763
  if (sfixup->reloc_count == 0)
4764
    {
4765
      FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4766
      sfixup->reloc_count++;
4767
    }
4768
  else
4769
    {
4770
      bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4771
      if (qaddr != (base & ~(bfd_vma) 15))
4772
        {
4773
          if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4774
            (*_bfd_error_handler) (_("fatal error while creating .fixup"));
4775
          FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4776
          sfixup->reloc_count++;
4777
        }
4778
      else
4779
        FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4780
    }
4781
}
4782
 
4783
/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
4784
 
4785
static int
4786
spu_elf_relocate_section (bfd *output_bfd,
4787
                          struct bfd_link_info *info,
4788
                          bfd *input_bfd,
4789
                          asection *input_section,
4790
                          bfd_byte *contents,
4791
                          Elf_Internal_Rela *relocs,
4792
                          Elf_Internal_Sym *local_syms,
4793
                          asection **local_sections)
4794
{
4795
  Elf_Internal_Shdr *symtab_hdr;
4796
  struct elf_link_hash_entry **sym_hashes;
4797
  Elf_Internal_Rela *rel, *relend;
4798
  struct spu_link_hash_table *htab;
4799
  asection *ea;
4800
  int ret = TRUE;
4801
  bfd_boolean emit_these_relocs = FALSE;
4802
  bfd_boolean is_ea_sym;
4803
  bfd_boolean stubs;
4804
  unsigned int iovl = 0;
4805
 
4806
  htab = spu_hash_table (info);
4807
  stubs = (htab->stub_sec != NULL
4808
           && maybe_needs_stubs (input_section));
4809
  iovl = overlay_index (input_section);
4810
  ea = bfd_get_section_by_name (output_bfd, "._ea");
4811
  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4812
  sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4813
 
4814
  rel = relocs;
4815
  relend = relocs + input_section->reloc_count;
4816
  for (; rel < relend; rel++)
4817
    {
4818
      int r_type;
4819
      reloc_howto_type *howto;
4820
      unsigned int r_symndx;
4821
      Elf_Internal_Sym *sym;
4822
      asection *sec;
4823
      struct elf_link_hash_entry *h;
4824
      const char *sym_name;
4825
      bfd_vma relocation;
4826
      bfd_vma addend;
4827
      bfd_reloc_status_type r;
4828
      bfd_boolean unresolved_reloc;
4829
      bfd_boolean warned;
4830
      enum _stub_type stub_type;
4831
 
4832
      r_symndx = ELF32_R_SYM (rel->r_info);
4833
      r_type = ELF32_R_TYPE (rel->r_info);
4834
      howto = elf_howto_table + r_type;
4835
      unresolved_reloc = FALSE;
4836
      warned = FALSE;
4837
      h = NULL;
4838
      sym = NULL;
4839
      sec = NULL;
4840
      if (r_symndx < symtab_hdr->sh_info)
4841
        {
4842
          sym = local_syms + r_symndx;
4843
          sec = local_sections[r_symndx];
4844
          sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4845
          relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4846
        }
4847
      else
4848
        {
4849
          if (sym_hashes == NULL)
4850
            return FALSE;
4851
 
4852
          h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4853
 
4854
          while (h->root.type == bfd_link_hash_indirect
4855
                 || h->root.type == bfd_link_hash_warning)
4856
            h = (struct elf_link_hash_entry *) h->root.u.i.link;
4857
 
4858
          relocation = 0;
4859
          if (h->root.type == bfd_link_hash_defined
4860
              || h->root.type == bfd_link_hash_defweak)
4861
            {
4862
              sec = h->root.u.def.section;
4863
              if (sec == NULL
4864
                  || sec->output_section == NULL)
4865
                /* Set a flag that will be cleared later if we find a
4866
                   relocation value for this symbol.  output_section
4867
                   is typically NULL for symbols satisfied by a shared
4868
                   library.  */
4869
                unresolved_reloc = TRUE;
4870
              else
4871
                relocation = (h->root.u.def.value
4872
                              + sec->output_section->vma
4873
                              + sec->output_offset);
4874
            }
4875
          else if (h->root.type == bfd_link_hash_undefweak)
4876
            ;
4877
          else if (info->unresolved_syms_in_objects == RM_IGNORE
4878
                   && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4879
            ;
4880
          else if (!info->relocatable
4881
                   && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4882
            {
4883
              bfd_boolean err;
4884
              err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4885
                     || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4886
              if (!info->callbacks->undefined_symbol (info,
4887
                                                      h->root.root.string,
4888
                                                      input_bfd,
4889
                                                      input_section,
4890
                                                      rel->r_offset, err))
4891
                return FALSE;
4892
              warned = TRUE;
4893
            }
4894
          sym_name = h->root.root.string;
4895
        }
4896
 
4897
      if (sec != NULL && elf_discarded_section (sec))
4898
        {
4899
          /* For relocs against symbols from removed linkonce sections,
4900
             or sections discarded by a linker script, we just want the
4901
             section contents zeroed.  Avoid any special processing.  */
4902
          _bfd_clear_contents (howto, input_bfd, contents + rel->r_offset);
4903
          rel->r_info = 0;
4904
          rel->r_addend = 0;
4905
          continue;
4906
        }
4907
 
4908
      if (info->relocatable)
4909
        continue;
4910
 
4911
      /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4912
      if (r_type == R_SPU_ADD_PIC
4913
          && h != NULL
4914
          && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4915
        {
4916
          bfd_byte *loc = contents + rel->r_offset;
4917
          loc[0] = 0x1c;
4918
          loc[1] = 0x00;
4919
          loc[2] &= 0x3f;
4920
        }
4921
 
4922
      is_ea_sym = (ea != NULL
4923
                   && sec != NULL
4924
                   && sec->output_section == ea);
4925
 
4926
      /* If this symbol is in an overlay area, we may need to relocate
4927
         to the overlay stub.  */
4928
      addend = rel->r_addend;
4929
      if (stubs
4930
          && !is_ea_sym
4931
          && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4932
                                          contents, info)) != no_stub)
4933
        {
4934
          unsigned int ovl = 0;
4935
          struct got_entry *g, **head;
4936
 
4937
          if (stub_type != nonovl_stub)
4938
            ovl = iovl;
4939
 
4940
          if (h != NULL)
4941
            head = &h->got.glist;
4942
          else
4943
            head = elf_local_got_ents (input_bfd) + r_symndx;
4944
 
4945
          for (g = *head; g != NULL; g = g->next)
4946
            if (htab->params->ovly_flavour == ovly_soft_icache
4947
                ? (g->ovl == ovl
4948
                   && g->br_addr == (rel->r_offset
4949
                                     + input_section->output_offset
4950
                                     + input_section->output_section->vma))
4951
                : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4952
              break;
4953
          if (g == NULL)
4954
            abort ();
4955
 
4956
          relocation = g->stub_addr;
4957
          addend = 0;
4958
        }
4959
      else
4960
        {
4961
          /* For soft icache, encode the overlay index into addresses.  */
4962
          if (htab->params->ovly_flavour == ovly_soft_icache
4963
              && (r_type == R_SPU_ADDR16_HI
4964
                  || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4965
              && !is_ea_sym)
4966
            {
4967
              unsigned int ovl = overlay_index (sec);
4968
              if (ovl != 0)
4969
                {
4970
                  unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4971
                  relocation += set_id << 18;
4972
                }
4973
            }
4974
        }
4975
 
4976
      if (htab->params->emit_fixups && !info->relocatable
4977
          && (input_section->flags & SEC_ALLOC) != 0
4978
          && r_type == R_SPU_ADDR32)
4979
        {
4980
          bfd_vma offset;
4981
          offset = rel->r_offset + input_section->output_section->vma
4982
                   + input_section->output_offset;
4983
          spu_elf_emit_fixup (output_bfd, info, offset);
4984
        }
4985
 
4986
      if (unresolved_reloc)
4987
        ;
4988
      else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4989
        {
4990
          if (is_ea_sym)
4991
            {
4992
              /* ._ea is a special section that isn't allocated in SPU
4993
                 memory, but rather occupies space in PPU memory as
4994
                 part of an embedded ELF image.  If this reloc is
4995
                 against a symbol defined in ._ea, then transform the
4996
                 reloc into an equivalent one without a symbol
4997
                 relative to the start of the ELF image.  */
4998
              rel->r_addend += (relocation
4999
                                - ea->vma
5000
                                + elf_section_data (ea)->this_hdr.sh_offset);
5001
              rel->r_info = ELF32_R_INFO (0, r_type);
5002
            }
5003
          emit_these_relocs = TRUE;
5004
          continue;
5005
        }
5006
      else if (is_ea_sym)
5007
        unresolved_reloc = TRUE;
5008
 
5009
      if (unresolved_reloc)
5010
        {
5011
          (*_bfd_error_handler)
5012
            (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5013
             input_bfd,
5014
             bfd_get_section_name (input_bfd, input_section),
5015
             (long) rel->r_offset,
5016
             howto->name,
5017
             sym_name);
5018
          ret = FALSE;
5019
        }
5020
 
5021
      r = _bfd_final_link_relocate (howto,
5022
                                    input_bfd,
5023
                                    input_section,
5024
                                    contents,
5025
                                    rel->r_offset, relocation, addend);
5026
 
5027
      if (r != bfd_reloc_ok)
5028
        {
5029
          const char *msg = (const char *) 0;
5030
 
5031
          switch (r)
5032
            {
5033
            case bfd_reloc_overflow:
5034
              if (!((*info->callbacks->reloc_overflow)
5035
                    (info, (h ? &h->root : NULL), sym_name, howto->name,
5036
                     (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
5037
                return FALSE;
5038
              break;
5039
 
5040
            case bfd_reloc_undefined:
5041
              if (!((*info->callbacks->undefined_symbol)
5042
                    (info, sym_name, input_bfd, input_section,
5043
                     rel->r_offset, TRUE)))
5044
                return FALSE;
5045
              break;
5046
 
5047
            case bfd_reloc_outofrange:
5048
              msg = _("internal error: out of range error");
5049
              goto common_error;
5050
 
5051
            case bfd_reloc_notsupported:
5052
              msg = _("internal error: unsupported relocation error");
5053
              goto common_error;
5054
 
5055
            case bfd_reloc_dangerous:
5056
              msg = _("internal error: dangerous error");
5057
              goto common_error;
5058
 
5059
            default:
5060
              msg = _("internal error: unknown error");
5061
              /* fall through */
5062
 
5063
            common_error:
5064
              ret = FALSE;
5065
              if (!((*info->callbacks->warning)
5066
                    (info, msg, sym_name, input_bfd, input_section,
5067
                     rel->r_offset)))
5068
                return FALSE;
5069
              break;
5070
            }
5071
        }
5072
    }
5073
 
5074
  if (ret
5075
      && emit_these_relocs
5076
      && !info->emitrelocations)
5077
    {
5078
      Elf_Internal_Rela *wrel;
5079
      Elf_Internal_Shdr *rel_hdr;
5080
 
5081
      wrel = rel = relocs;
5082
      relend = relocs + input_section->reloc_count;
5083
      for (; rel < relend; rel++)
5084
        {
5085
          int r_type;
5086
 
5087
          r_type = ELF32_R_TYPE (rel->r_info);
5088
          if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5089
            *wrel++ = *rel;
5090
        }
5091
      input_section->reloc_count = wrel - relocs;
5092
      /* Backflips for _bfd_elf_link_output_relocs.  */
5093
      rel_hdr = &elf_section_data (input_section)->rel_hdr;
5094
      rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5095
      ret = 2;
5096
    }
5097
 
5098
  return ret;
5099
}
5100
 
5101
/* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
5102
 
5103
static int
5104
spu_elf_output_symbol_hook (struct bfd_link_info *info,
5105
                            const char *sym_name ATTRIBUTE_UNUSED,
5106
                            Elf_Internal_Sym *sym,
5107
                            asection *sym_sec ATTRIBUTE_UNUSED,
5108
                            struct elf_link_hash_entry *h)
5109
{
5110
  struct spu_link_hash_table *htab = spu_hash_table (info);
5111
 
5112
  if (!info->relocatable
5113
      && htab->stub_sec != NULL
5114
      && h != NULL
5115
      && (h->root.type == bfd_link_hash_defined
5116
          || h->root.type == bfd_link_hash_defweak)
5117
      && h->def_regular
5118
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5119
    {
5120
      struct got_entry *g;
5121
 
5122
      for (g = h->got.glist; g != NULL; g = g->next)
5123
        if (htab->params->ovly_flavour == ovly_soft_icache
5124
            ? g->br_addr == g->stub_addr
5125
            : g->addend == 0 && g->ovl == 0)
5126
          {
5127
            sym->st_shndx = (_bfd_elf_section_from_bfd_section
5128
                             (htab->stub_sec[0]->output_section->owner,
5129
                              htab->stub_sec[0]->output_section));
5130
            sym->st_value = g->stub_addr;
5131
            break;
5132
          }
5133
    }
5134
 
5135
  return 1;
5136
}
5137
 
5138
static int spu_plugin = 0;
5139
 
5140
void
5141
spu_elf_plugin (int val)
5142
{
5143
  spu_plugin = val;
5144
}
5145
 
5146
/* Set ELF header e_type for plugins.  */
5147
 
5148
static void
5149
spu_elf_post_process_headers (bfd *abfd,
5150
                              struct bfd_link_info *info ATTRIBUTE_UNUSED)
5151
{
5152
  if (spu_plugin)
5153
    {
5154
      Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5155
 
5156
      i_ehdrp->e_type = ET_DYN;
5157
    }
5158
}
5159
 
5160
/* We may add an extra PT_LOAD segment for .toe.  We also need extra
5161
   segments for overlays.  */
5162
 
5163
static int
5164
spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5165
{
5166
  int extra = 0;
5167
  asection *sec;
5168
 
5169
  if (info != NULL)
5170
    {
5171
      struct spu_link_hash_table *htab = spu_hash_table (info);
5172
      extra = htab->num_overlays;
5173
    }
5174
 
5175
  if (extra)
5176
    ++extra;
5177
 
5178
  sec = bfd_get_section_by_name (abfd, ".toe");
5179
  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5180
    ++extra;
5181
 
5182
  return extra;
5183
}
5184
 
5185
/* Remove .toe section from other PT_LOAD segments and put it in
5186
   a segment of its own.  Put overlays in separate segments too.  */
5187
 
5188
static bfd_boolean
5189
spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5190
{
5191
  asection *toe, *s;
5192
  struct elf_segment_map *m, *m_overlay;
5193
  struct elf_segment_map **p, **p_overlay;
5194
  unsigned int i;
5195
 
5196
  if (info == NULL)
5197
    return TRUE;
5198
 
5199
  toe = bfd_get_section_by_name (abfd, ".toe");
5200
  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
5201
    if (m->p_type == PT_LOAD && m->count > 1)
5202
      for (i = 0; i < m->count; i++)
5203
        if ((s = m->sections[i]) == toe
5204
            || spu_elf_section_data (s)->u.o.ovl_index != 0)
5205
          {
5206
            struct elf_segment_map *m2;
5207
            bfd_vma amt;
5208
 
5209
            if (i + 1 < m->count)
5210
              {
5211
                amt = sizeof (struct elf_segment_map);
5212
                amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5213
                m2 = bfd_zalloc (abfd, amt);
5214
                if (m2 == NULL)
5215
                  return FALSE;
5216
                m2->count = m->count - (i + 1);
5217
                memcpy (m2->sections, m->sections + i + 1,
5218
                        m2->count * sizeof (m->sections[0]));
5219
                m2->p_type = PT_LOAD;
5220
                m2->next = m->next;
5221
                m->next = m2;
5222
              }
5223
            m->count = 1;
5224
            if (i != 0)
5225
              {
5226
                m->count = i;
5227
                amt = sizeof (struct elf_segment_map);
5228
                m2 = bfd_zalloc (abfd, amt);
5229
                if (m2 == NULL)
5230
                  return FALSE;
5231
                m2->p_type = PT_LOAD;
5232
                m2->count = 1;
5233
                m2->sections[0] = s;
5234
                m2->next = m->next;
5235
                m->next = m2;
5236
              }
5237
            break;
5238
          }
5239
 
5240
 
5241
  /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5242
     PT_LOAD segments.  This can cause the .ovl.init section to be
5243
     overwritten with the contents of some overlay segment.  To work
5244
     around this issue, we ensure that all PF_OVERLAY segments are
5245
     sorted first amongst the program headers; this ensures that even
5246
     with a broken loader, the .ovl.init section (which is not marked
5247
     as PF_OVERLAY) will be placed into SPU local store on startup.  */
5248
 
5249
  /* Move all overlay segments onto a separate list.  */
5250
  p = &elf_tdata (abfd)->segment_map;
5251
  p_overlay = &m_overlay;
5252
  while (*p != NULL)
5253
    {
5254
      if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5255
          && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5256
        {
5257
          m = *p;
5258
          *p = m->next;
5259
          *p_overlay = m;
5260
          p_overlay = &m->next;
5261
          continue;
5262
        }
5263
 
5264
      p = &((*p)->next);
5265
    }
5266
 
5267
  /* Re-insert overlay segments at the head of the segment map.  */
5268
  *p_overlay = elf_tdata (abfd)->segment_map;
5269
  elf_tdata (abfd)->segment_map = m_overlay;
5270
 
5271
  return TRUE;
5272
}
5273
 
5274
/* Tweak the section type of .note.spu_name.  */
5275
 
5276
static bfd_boolean
5277
spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5278
                       Elf_Internal_Shdr *hdr,
5279
                       asection *sec)
5280
{
5281
  if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5282
    hdr->sh_type = SHT_NOTE;
5283
  return TRUE;
5284
}
5285
 
5286
/* Tweak phdrs before writing them out.  */
5287
 
5288
static int
5289
spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5290
{
5291
  const struct elf_backend_data *bed;
5292
  struct elf_obj_tdata *tdata;
5293
  Elf_Internal_Phdr *phdr, *last;
5294
  struct spu_link_hash_table *htab;
5295
  unsigned int count;
5296
  unsigned int i;
5297
 
5298
  if (info == NULL)
5299
    return TRUE;
5300
 
5301
  bed = get_elf_backend_data (abfd);
5302
  tdata = elf_tdata (abfd);
5303
  phdr = tdata->phdr;
5304
  count = tdata->program_header_size / bed->s->sizeof_phdr;
5305
  htab = spu_hash_table (info);
5306
  if (htab->num_overlays != 0)
5307
    {
5308
      struct elf_segment_map *m;
5309
      unsigned int o;
5310
 
5311
      for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
5312
        if (m->count != 0
5313
            && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5314
          {
5315
            /* Mark this as an overlay header.  */
5316
            phdr[i].p_flags |= PF_OVERLAY;
5317
 
5318
            if (htab->ovtab != NULL && htab->ovtab->size != 0
5319
                && htab->params->ovly_flavour != ovly_soft_icache)
5320
              {
5321
                bfd_byte *p = htab->ovtab->contents;
5322
                unsigned int off = o * 16 + 8;
5323
 
5324
                /* Write file_off into _ovly_table.  */
5325
                bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5326
              }
5327
          }
5328
      /* Soft-icache has its file offset put in .ovl.init.  */
5329
      if (htab->init != NULL && htab->init->size != 0)
5330
        {
5331
          bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5332
 
5333
          bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5334
        }
5335
    }
5336
 
5337
  /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5338
     of 16.  This should always be possible when using the standard
5339
     linker scripts, but don't create overlapping segments if
5340
     someone is playing games with linker scripts.  */
5341
  last = NULL;
5342
  for (i = count; i-- != 0; )
5343
    if (phdr[i].p_type == PT_LOAD)
5344
      {
5345
        unsigned adjust;
5346
 
5347
        adjust = -phdr[i].p_filesz & 15;
5348
        if (adjust != 0
5349
            && last != NULL
5350
            && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5351
          break;
5352
 
5353
        adjust = -phdr[i].p_memsz & 15;
5354
        if (adjust != 0
5355
            && last != NULL
5356
            && phdr[i].p_filesz != 0
5357
            && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5358
            && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5359
          break;
5360
 
5361
        if (phdr[i].p_filesz != 0)
5362
          last = &phdr[i];
5363
      }
5364
 
5365
  if (i == (unsigned int) -1)
5366
    for (i = count; i-- != 0; )
5367
      if (phdr[i].p_type == PT_LOAD)
5368
        {
5369
        unsigned adjust;
5370
 
5371
        adjust = -phdr[i].p_filesz & 15;
5372
        phdr[i].p_filesz += adjust;
5373
 
5374
        adjust = -phdr[i].p_memsz & 15;
5375
        phdr[i].p_memsz += adjust;
5376
      }
5377
 
5378
  return TRUE;
5379
}
5380
 
5381
bfd_boolean
5382
spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5383
{
5384
  struct spu_link_hash_table *htab = spu_hash_table (info);
5385
  if (htab->params->emit_fixups)
5386
    {
5387
      asection *sfixup = htab->sfixup;
5388
      int fixup_count = 0;
5389
      bfd *ibfd;
5390
      size_t size;
5391
 
5392
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
5393
        {
5394
          asection *isec;
5395
 
5396
          if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5397
            continue;
5398
 
5399
          /* Walk over each section attached to the input bfd.  */
5400
          for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5401
            {
5402
              Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5403
              bfd_vma base_end;
5404
 
5405
              /* If there aren't any relocs, then there's nothing more
5406
                 to do.  */
5407
              if ((isec->flags & SEC_RELOC) == 0
5408
                  || isec->reloc_count == 0)
5409
                continue;
5410
 
5411
              /* Get the relocs.  */
5412
              internal_relocs =
5413
                _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5414
                                           info->keep_memory);
5415
              if (internal_relocs == NULL)
5416
                return FALSE;
5417
 
5418
              /* 1 quadword can contain up to 4 R_SPU_ADDR32
5419
                 relocations.  They are stored in a single word by
5420
                 saving the upper 28 bits of the address and setting the
5421
                 lower 4 bits to a bit mask of the words that have the
5422
                 relocation.  BASE_END keeps track of the next quadword. */
5423
              irela = internal_relocs;
5424
              irelaend = irela + isec->reloc_count;
5425
              base_end = 0;
5426
              for (; irela < irelaend; irela++)
5427
                if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5428
                    && irela->r_offset >= base_end)
5429
                  {
5430
                    base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5431
                    fixup_count++;
5432
                  }
5433
            }
5434
        }
5435
 
5436
      /* We always have a NULL fixup as a sentinel */
5437
      size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5438
      if (!bfd_set_section_size (output_bfd, sfixup, size))
5439
        return FALSE;
5440
      sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5441
      if (sfixup->contents == NULL)
5442
        return FALSE;
5443
    }
5444
  return TRUE;
5445
}
5446
 
5447
#define TARGET_BIG_SYM          bfd_elf32_spu_vec
5448
#define TARGET_BIG_NAME         "elf32-spu"
5449
#define ELF_ARCH                bfd_arch_spu
5450
#define ELF_MACHINE_CODE        EM_SPU
5451
/* This matches the alignment need for DMA.  */
5452
#define ELF_MAXPAGESIZE         0x80
5453
#define elf_backend_rela_normal         1
5454
#define elf_backend_can_gc_sections     1
5455
 
5456
#define bfd_elf32_bfd_reloc_type_lookup         spu_elf_reloc_type_lookup
5457
#define bfd_elf32_bfd_reloc_name_lookup         spu_elf_reloc_name_lookup
5458
#define elf_info_to_howto                       spu_elf_info_to_howto
5459
#define elf_backend_count_relocs                spu_elf_count_relocs
5460
#define elf_backend_relocate_section            spu_elf_relocate_section
5461
#define elf_backend_symbol_processing           spu_elf_backend_symbol_processing
5462
#define elf_backend_link_output_symbol_hook     spu_elf_output_symbol_hook
5463
#define elf_backend_object_p                    spu_elf_object_p
5464
#define bfd_elf32_new_section_hook              spu_elf_new_section_hook
5465
#define bfd_elf32_bfd_link_hash_table_create    spu_elf_link_hash_table_create
5466
 
5467
#define elf_backend_additional_program_headers  spu_elf_additional_program_headers
5468
#define elf_backend_modify_segment_map          spu_elf_modify_segment_map
5469
#define elf_backend_modify_program_headers      spu_elf_modify_program_headers
5470
#define elf_backend_post_process_headers        spu_elf_post_process_headers
5471
#define elf_backend_fake_sections               spu_elf_fake_sections
5472
#define elf_backend_special_sections            spu_elf_special_sections
5473
#define bfd_elf32_bfd_final_link                spu_elf_final_link
5474
 
5475
#include "elf32-target.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.