OpenCores
URL https://opencores.org/ocsvn/open8_urisc/open8_urisc/trunk

Subversion Repositories open8_urisc

[/] [open8_urisc/] [trunk/] [gnu/] [binutils/] [bfd/] [elf32-spu.c] - Blame information for rev 117

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 14 khays
/* SPU specific support for 32-bit ELF
2
 
3
   Copyright 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
 
5
   This file is part of BFD, the Binary File Descriptor library.
6
 
7
   This program is free software; you can redistribute it and/or modify
8
   it under the terms of the GNU General Public License as published by
9
   the Free Software Foundation; either version 3 of the License, or
10
   (at your option) any later version.
11
 
12
   This program is distributed in the hope that it will be useful,
13
   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
   GNU General Public License for more details.
16
 
17
   You should have received a copy of the GNU General Public License along
18
   with this program; if not, write to the Free Software Foundation, Inc.,
19
   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
20
 
21
#include "sysdep.h"
22
#include "libiberty.h"
23
#include "bfd.h"
24
#include "bfdlink.h"
25
#include "libbfd.h"
26
#include "elf-bfd.h"
27
#include "elf/spu.h"
28
#include "elf32-spu.h"
29
 
30
/* We use RELA style relocs.  Don't define USE_REL.  */
31
 
32
static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33
                                           void *, asection *,
34
                                           bfd *, char **);
35
 
36
/* Values of type 'enum elf_spu_reloc_type' are used to index this
37
   array, so it must be declared in the order of that type.  */
38
 
39
static reloc_howto_type elf_howto_table[] = {
40
  HOWTO (R_SPU_NONE,       0, 0,  0, FALSE,  0, complain_overflow_dont,
41
         bfd_elf_generic_reloc, "SPU_NONE",
42
         FALSE, 0, 0x00000000, FALSE),
43
  HOWTO (R_SPU_ADDR10,     4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44
         bfd_elf_generic_reloc, "SPU_ADDR10",
45
         FALSE, 0, 0x00ffc000, FALSE),
46
  HOWTO (R_SPU_ADDR16,     2, 2, 16, FALSE,  7, complain_overflow_bitfield,
47
         bfd_elf_generic_reloc, "SPU_ADDR16",
48
         FALSE, 0, 0x007fff80, FALSE),
49
  HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE,  7, complain_overflow_bitfield,
50
         bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51
         FALSE, 0, 0x007fff80, FALSE),
52
  HOWTO (R_SPU_ADDR16_LO,  0, 2, 16, FALSE,  7, complain_overflow_dont,
53
         bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54
         FALSE, 0, 0x007fff80, FALSE),
55
  HOWTO (R_SPU_ADDR18,     0, 2, 18, FALSE,  7, complain_overflow_bitfield,
56
         bfd_elf_generic_reloc, "SPU_ADDR18",
57
         FALSE, 0, 0x01ffff80, FALSE),
58
  HOWTO (R_SPU_ADDR32,     0, 2, 32, FALSE,  0, complain_overflow_dont,
59
         bfd_elf_generic_reloc, "SPU_ADDR32",
60
         FALSE, 0, 0xffffffff, FALSE),
61
  HOWTO (R_SPU_REL16,      2, 2, 16,  TRUE,  7, complain_overflow_bitfield,
62
         bfd_elf_generic_reloc, "SPU_REL16",
63
         FALSE, 0, 0x007fff80, TRUE),
64
  HOWTO (R_SPU_ADDR7,      0, 2,  7, FALSE, 14, complain_overflow_dont,
65
         bfd_elf_generic_reloc, "SPU_ADDR7",
66
         FALSE, 0, 0x001fc000, FALSE),
67
  HOWTO (R_SPU_REL9,       2, 2,  9,  TRUE,  0, complain_overflow_signed,
68
         spu_elf_rel9,          "SPU_REL9",
69
         FALSE, 0, 0x0180007f, TRUE),
70
  HOWTO (R_SPU_REL9I,      2, 2,  9,  TRUE,  0, complain_overflow_signed,
71
         spu_elf_rel9,          "SPU_REL9I",
72
         FALSE, 0, 0x0000c07f, TRUE),
73
  HOWTO (R_SPU_ADDR10I,    0, 2, 10, FALSE, 14, complain_overflow_signed,
74
         bfd_elf_generic_reloc, "SPU_ADDR10I",
75
         FALSE, 0, 0x00ffc000, FALSE),
76
  HOWTO (R_SPU_ADDR16I,    0, 2, 16, FALSE,  7, complain_overflow_signed,
77
         bfd_elf_generic_reloc, "SPU_ADDR16I",
78
         FALSE, 0, 0x007fff80, FALSE),
79
  HOWTO (R_SPU_REL32,      0, 2, 32, TRUE,  0, complain_overflow_dont,
80
         bfd_elf_generic_reloc, "SPU_REL32",
81
         FALSE, 0, 0xffffffff, TRUE),
82
  HOWTO (R_SPU_ADDR16X,    0, 2, 16, FALSE,  7, complain_overflow_bitfield,
83
         bfd_elf_generic_reloc, "SPU_ADDR16X",
84
         FALSE, 0, 0x007fff80, FALSE),
85
  HOWTO (R_SPU_PPU32,      0, 2, 32, FALSE,  0, complain_overflow_dont,
86
         bfd_elf_generic_reloc, "SPU_PPU32",
87
         FALSE, 0, 0xffffffff, FALSE),
88
  HOWTO (R_SPU_PPU64,      0, 4, 64, FALSE,  0, complain_overflow_dont,
89
         bfd_elf_generic_reloc, "SPU_PPU64",
90
         FALSE, 0, -1, FALSE),
91
  HOWTO (R_SPU_ADD_PIC,      0, 0, 0, FALSE,  0, complain_overflow_dont,
92
         bfd_elf_generic_reloc, "SPU_ADD_PIC",
93
         FALSE, 0, 0x00000000, FALSE),
94
};
95
 
96
static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97
  { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98
  { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
99
  { NULL, 0, 0, 0, 0 }
100
};
101
 
102
static enum elf_spu_reloc_type
103
spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
104
{
105
  switch (code)
106
    {
107
    default:
108
      return R_SPU_NONE;
109
    case BFD_RELOC_SPU_IMM10W:
110
      return R_SPU_ADDR10;
111
    case BFD_RELOC_SPU_IMM16W:
112
      return R_SPU_ADDR16;
113
    case BFD_RELOC_SPU_LO16:
114
      return R_SPU_ADDR16_LO;
115
    case BFD_RELOC_SPU_HI16:
116
      return R_SPU_ADDR16_HI;
117
    case BFD_RELOC_SPU_IMM18:
118
      return R_SPU_ADDR18;
119
    case BFD_RELOC_SPU_PCREL16:
120
      return R_SPU_REL16;
121
    case BFD_RELOC_SPU_IMM7:
122
      return R_SPU_ADDR7;
123
    case BFD_RELOC_SPU_IMM8:
124
      return R_SPU_NONE;
125
    case BFD_RELOC_SPU_PCREL9a:
126
      return R_SPU_REL9;
127
    case BFD_RELOC_SPU_PCREL9b:
128
      return R_SPU_REL9I;
129
    case BFD_RELOC_SPU_IMM10:
130
      return R_SPU_ADDR10I;
131
    case BFD_RELOC_SPU_IMM16:
132
      return R_SPU_ADDR16I;
133
    case BFD_RELOC_32:
134
      return R_SPU_ADDR32;
135
    case BFD_RELOC_32_PCREL:
136
      return R_SPU_REL32;
137
    case BFD_RELOC_SPU_PPU32:
138
      return R_SPU_PPU32;
139
    case BFD_RELOC_SPU_PPU64:
140
      return R_SPU_PPU64;
141
    case BFD_RELOC_SPU_ADD_PIC:
142
      return R_SPU_ADD_PIC;
143
    }
144
}
145
 
146
static void
147
spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
148
                       arelent *cache_ptr,
149
                       Elf_Internal_Rela *dst)
150
{
151
  enum elf_spu_reloc_type r_type;
152
 
153
  r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
154
  BFD_ASSERT (r_type < R_SPU_max);
155
  cache_ptr->howto = &elf_howto_table[(int) r_type];
156
}
157
 
158
static reloc_howto_type *
159
spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
160
                           bfd_reloc_code_real_type code)
161
{
162
  enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
163
 
164
  if (r_type == R_SPU_NONE)
165
    return NULL;
166
 
167
  return elf_howto_table + r_type;
168
}
169
 
170
static reloc_howto_type *
171
spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
172
                           const char *r_name)
173
{
174
  unsigned int i;
175
 
176
  for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
177
    if (elf_howto_table[i].name != NULL
178
        && strcasecmp (elf_howto_table[i].name, r_name) == 0)
179
      return &elf_howto_table[i];
180
 
181
  return NULL;
182
}
183
 
184
/* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
185
 
186
static bfd_reloc_status_type
187
spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
188
              void *data, asection *input_section,
189
              bfd *output_bfd, char **error_message)
190
{
191
  bfd_size_type octets;
192
  bfd_vma val;
193
  long insn;
194
 
195
  /* If this is a relocatable link (output_bfd test tells us), just
196
     call the generic function.  Any adjustment will be done at final
197
     link time.  */
198
  if (output_bfd != NULL)
199
    return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
200
                                  input_section, output_bfd, error_message);
201
 
202
  if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
203
    return bfd_reloc_outofrange;
204
  octets = reloc_entry->address * bfd_octets_per_byte (abfd);
205
 
206
  /* Get symbol value.  */
207
  val = 0;
208
  if (!bfd_is_com_section (symbol->section))
209
    val = symbol->value;
210
  if (symbol->section->output_section)
211
    val += symbol->section->output_section->vma;
212
 
213
  val += reloc_entry->addend;
214
 
215
  /* Make it pc-relative.  */
216
  val -= input_section->output_section->vma + input_section->output_offset;
217
 
218
  val >>= 2;
219
  if (val + 256 >= 512)
220
    return bfd_reloc_overflow;
221
 
222
  insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
223
 
224
  /* Move two high bits of value to REL9I and REL9 position.
225
     The mask will take care of selecting the right field.  */
226
  val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
227
  insn &= ~reloc_entry->howto->dst_mask;
228
  insn |= val & reloc_entry->howto->dst_mask;
229
  bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
230
  return bfd_reloc_ok;
231
}
232
 
233
static bfd_boolean
234
spu_elf_new_section_hook (bfd *abfd, asection *sec)
235
{
236
  if (!sec->used_by_bfd)
237
    {
238
      struct _spu_elf_section_data *sdata;
239
 
240
      sdata = bfd_zalloc (abfd, sizeof (*sdata));
241
      if (sdata == NULL)
242
        return FALSE;
243
      sec->used_by_bfd = sdata;
244
    }
245
 
246
  return _bfd_elf_new_section_hook (abfd, sec);
247
}
248
 
249
/* Set up overlay info for executables.  */
250
 
251
static bfd_boolean
252
spu_elf_object_p (bfd *abfd)
253
{
254
  if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
255
    {
256
      unsigned int i, num_ovl, num_buf;
257
      Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
258
      Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
259
      Elf_Internal_Phdr *last_phdr = NULL;
260
 
261
      for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
262
        if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
263
          {
264
            unsigned int j;
265
 
266
            ++num_ovl;
267
            if (last_phdr == NULL
268
                || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
269
              ++num_buf;
270
            last_phdr = phdr;
271
            for (j = 1; j < elf_numsections (abfd); j++)
272
              {
273
                Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
274
 
275
                if (ELF_SECTION_SIZE (shdr, phdr) != 0
276
                    && ELF_SECTION_IN_SEGMENT (shdr, phdr))
277
                  {
278
                    asection *sec = shdr->bfd_section;
279
                    spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
280
                    spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
281
                  }
282
              }
283
          }
284
    }
285
  return TRUE;
286
}
287
 
288
/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
289
   strip --strip-unneeded will not remove them.  */
290
 
291
static void
292
spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
293
{
294
  if (sym->name != NULL
295
      && sym->section != bfd_abs_section_ptr
296
      && strncmp (sym->name, "_EAR_", 5) == 0)
297
    sym->flags |= BSF_KEEP;
298
}
299
 
300
/* SPU ELF linker hash table.  */
301
 
302
struct spu_link_hash_table
303
{
304
  struct elf_link_hash_table elf;
305
 
306
  struct spu_elf_params *params;
307
 
308
  /* Shortcuts to overlay sections.  */
309
  asection *ovtab;
310
  asection *init;
311
  asection *toe;
312
  asection **ovl_sec;
313
 
314
  /* Count of stubs in each overlay section.  */
315
  unsigned int *stub_count;
316
 
317
  /* The stub section for each overlay section.  */
318
  asection **stub_sec;
319
 
320
  struct elf_link_hash_entry *ovly_entry[2];
321
 
322
  /* Number of overlay buffers.  */
323
  unsigned int num_buf;
324
 
325
  /* Total number of overlays.  */
326
  unsigned int num_overlays;
327
 
328
  /* For soft icache.  */
329
  unsigned int line_size_log2;
330
  unsigned int num_lines_log2;
331
  unsigned int fromelem_size_log2;
332
 
333
  /* How much memory we have.  */
334
  unsigned int local_store;
335
 
336
  /* Count of overlay stubs needed in non-overlay area.  */
337
  unsigned int non_ovly_stub;
338
 
339
  /* Pointer to the fixup section */
340
  asection *sfixup;
341
 
342
  /* Set on error.  */
343
  unsigned int stub_err : 1;
344
};
345
 
346
/* Hijack the generic got fields for overlay stub accounting.  */
347
 
348
struct got_entry
349
{
350
  struct got_entry *next;
351
  unsigned int ovl;
352
  union {
353
    bfd_vma addend;
354
    bfd_vma br_addr;
355
  };
356
  bfd_vma stub_addr;
357
};
358
 
359
#define spu_hash_table(p) \
360
  (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
361
  == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
362
 
363
struct call_info
364
{
365
  struct function_info *fun;
366
  struct call_info *next;
367
  unsigned int count;
368
  unsigned int max_depth;
369
  unsigned int is_tail : 1;
370
  unsigned int is_pasted : 1;
371
  unsigned int broken_cycle : 1;
372
  unsigned int priority : 13;
373
};
374
 
375
struct function_info
376
{
377
  /* List of functions called.  Also branches to hot/cold part of
378
     function.  */
379
  struct call_info *call_list;
380
  /* For hot/cold part of function, point to owner.  */
381
  struct function_info *start;
382
  /* Symbol at start of function.  */
383
  union {
384
    Elf_Internal_Sym *sym;
385
    struct elf_link_hash_entry *h;
386
  } u;
387
  /* Function section.  */
388
  asection *sec;
389
  asection *rodata;
390
  /* Where last called from, and number of sections called from.  */
391
  asection *last_caller;
392
  unsigned int call_count;
393
  /* Address range of (this part of) function.  */
394
  bfd_vma lo, hi;
395
  /* Offset where we found a store of lr, or -1 if none found.  */
396
  bfd_vma lr_store;
397
  /* Offset where we found the stack adjustment insn.  */
398
  bfd_vma sp_adjust;
399
  /* Stack usage.  */
400
  int stack;
401
  /* Distance from root of call tree.  Tail and hot/cold branches
402
     count as one deeper.  We aren't counting stack frames here.  */
403
  unsigned int depth;
404
  /* Set if global symbol.  */
405
  unsigned int global : 1;
406
  /* Set if known to be start of function (as distinct from a hunk
407
     in hot/cold section.  */
408
  unsigned int is_func : 1;
409
  /* Set if not a root node.  */
410
  unsigned int non_root : 1;
411
  /* Flags used during call tree traversal.  It's cheaper to replicate
412
     the visit flags than have one which needs clearing after a traversal.  */
413
  unsigned int visit1 : 1;
414
  unsigned int visit2 : 1;
415
  unsigned int marking : 1;
416
  unsigned int visit3 : 1;
417
  unsigned int visit4 : 1;
418
  unsigned int visit5 : 1;
419
  unsigned int visit6 : 1;
420
  unsigned int visit7 : 1;
421
};
422
 
423
struct spu_elf_stack_info
424
{
425
  int num_fun;
426
  int max_fun;
427
  /* Variable size array describing functions, one per contiguous
428
     address range belonging to a function.  */
429
  struct function_info fun[1];
430
};
431
 
432
static struct function_info *find_function (asection *, bfd_vma,
433
                                            struct bfd_link_info *);
434
 
435
/* Create a spu ELF linker hash table.  */
436
 
437
static struct bfd_link_hash_table *
438
spu_elf_link_hash_table_create (bfd *abfd)
439
{
440
  struct spu_link_hash_table *htab;
441
 
442
  htab = bfd_malloc (sizeof (*htab));
443
  if (htab == NULL)
444
    return NULL;
445
 
446
  if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
447
                                      _bfd_elf_link_hash_newfunc,
448
                                      sizeof (struct elf_link_hash_entry),
449
                                      SPU_ELF_DATA))
450
    {
451
      free (htab);
452
      return NULL;
453
    }
454
 
455
  memset (&htab->ovtab, 0,
456
          sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
457
 
458
  htab->elf.init_got_refcount.refcount = 0;
459
  htab->elf.init_got_refcount.glist = NULL;
460
  htab->elf.init_got_offset.offset = 0;
461
  htab->elf.init_got_offset.glist = NULL;
462
  return &htab->elf.root;
463
}
464
 
465
void
466
spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
467
{
468
  bfd_vma max_branch_log2;
469
 
470
  struct spu_link_hash_table *htab = spu_hash_table (info);
471
  htab->params = params;
472
  htab->line_size_log2 = bfd_log2 (htab->params->line_size);
473
  htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
474
 
475
  /* For the software i-cache, we provide a "from" list whose size
476
     is a power-of-two number of quadwords, big enough to hold one
477
     byte per outgoing branch.  Compute this number here.  */
478
  max_branch_log2 = bfd_log2 (htab->params->max_branch);
479
  htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
480
}
481
 
482
/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
483
   to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
484
   *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
485
 
486
static bfd_boolean
487
get_sym_h (struct elf_link_hash_entry **hp,
488
           Elf_Internal_Sym **symp,
489
           asection **symsecp,
490
           Elf_Internal_Sym **locsymsp,
491
           unsigned long r_symndx,
492
           bfd *ibfd)
493
{
494
  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
495
 
496
  if (r_symndx >= symtab_hdr->sh_info)
497
    {
498
      struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
499
      struct elf_link_hash_entry *h;
500
 
501
      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
502
      while (h->root.type == bfd_link_hash_indirect
503
             || h->root.type == bfd_link_hash_warning)
504
        h = (struct elf_link_hash_entry *) h->root.u.i.link;
505
 
506
      if (hp != NULL)
507
        *hp = h;
508
 
509
      if (symp != NULL)
510
        *symp = NULL;
511
 
512
      if (symsecp != NULL)
513
        {
514
          asection *symsec = NULL;
515
          if (h->root.type == bfd_link_hash_defined
516
              || h->root.type == bfd_link_hash_defweak)
517
            symsec = h->root.u.def.section;
518
          *symsecp = symsec;
519
        }
520
    }
521
  else
522
    {
523
      Elf_Internal_Sym *sym;
524
      Elf_Internal_Sym *locsyms = *locsymsp;
525
 
526
      if (locsyms == NULL)
527
        {
528
          locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
529
          if (locsyms == NULL)
530
            locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
531
                                            symtab_hdr->sh_info,
532
                                            0, NULL, NULL, NULL);
533
          if (locsyms == NULL)
534
            return FALSE;
535
          *locsymsp = locsyms;
536
        }
537
      sym = locsyms + r_symndx;
538
 
539
      if (hp != NULL)
540
        *hp = NULL;
541
 
542
      if (symp != NULL)
543
        *symp = sym;
544
 
545
      if (symsecp != NULL)
546
        *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
547
    }
548
 
549
  return TRUE;
550
}
551
 
552
/* Create the note section if not already present.  This is done early so
553
   that the linker maps the sections to the right place in the output.  */
554
 
555
bfd_boolean
556
spu_elf_create_sections (struct bfd_link_info *info)
557
{
558
  struct spu_link_hash_table *htab = spu_hash_table (info);
559
  bfd *ibfd;
560
 
561
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
562
    if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
563
      break;
564
 
565
  if (ibfd == NULL)
566
    {
567
      /* Make SPU_PTNOTE_SPUNAME section.  */
568
      asection *s;
569
      size_t name_len;
570
      size_t size;
571
      bfd_byte *data;
572
      flagword flags;
573
 
574
      ibfd = info->input_bfds;
575
      flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
576
      s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
577
      if (s == NULL
578
          || !bfd_set_section_alignment (ibfd, s, 4))
579
        return FALSE;
580
 
581
      name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
582
      size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
583
      size += (name_len + 3) & -4;
584
 
585
      if (!bfd_set_section_size (ibfd, s, size))
586
        return FALSE;
587
 
588
      data = bfd_zalloc (ibfd, size);
589
      if (data == NULL)
590
        return FALSE;
591
 
592
      bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
593
      bfd_put_32 (ibfd, name_len, data + 4);
594
      bfd_put_32 (ibfd, 1, data + 8);
595
      memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
596
      memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
597
              bfd_get_filename (info->output_bfd), name_len);
598
      s->contents = data;
599
    }
600
 
601
  if (htab->params->emit_fixups)
602
    {
603
      asection *s;
604
      flagword flags;
605
 
606
      if (htab->elf.dynobj == NULL)
607
        htab->elf.dynobj = ibfd;
608
      ibfd = htab->elf.dynobj;
609
      flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
610
               | SEC_IN_MEMORY | SEC_LINKER_CREATED);
611
      s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
612
      if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
613
        return FALSE;
614
      htab->sfixup = s;
615
    }
616
 
617
  return TRUE;
618
}
619
 
620
/* qsort predicate to sort sections by vma.  */
621
 
622
static int
623
sort_sections (const void *a, const void *b)
624
{
625
  const asection *const *s1 = a;
626
  const asection *const *s2 = b;
627
  bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
628
 
629
  if (delta != 0)
630
    return delta < 0 ? -1 : 1;
631
 
632
  return (*s1)->index - (*s2)->index;
633
}
634
 
635
/* Identify overlays in the output bfd, and number them.
636
   Returns 0 on error, 1 if no overlays, 2 if overlays.  */
637
 
638
int
639
spu_elf_find_overlays (struct bfd_link_info *info)
640
{
641
  struct spu_link_hash_table *htab = spu_hash_table (info);
642
  asection **alloc_sec;
643
  unsigned int i, n, ovl_index, num_buf;
644
  asection *s;
645
  bfd_vma ovl_end;
646
  static const char *const entry_names[2][2] = {
647
    { "__ovly_load", "__icache_br_handler" },
648
    { "__ovly_return", "__icache_call_handler" }
649
  };
650
 
651
  if (info->output_bfd->section_count < 2)
652
    return 1;
653
 
654
  alloc_sec
655
    = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
656
  if (alloc_sec == NULL)
657
    return 0;
658
 
659
  /* Pick out all the alloced sections.  */
660
  for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
661
    if ((s->flags & SEC_ALLOC) != 0
662
        && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
663
        && s->size != 0)
664
      alloc_sec[n++] = s;
665
 
666
  if (n == 0)
667
    {
668
      free (alloc_sec);
669
      return 1;
670
    }
671
 
672
  /* Sort them by vma.  */
673
  qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
674
 
675
  ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
676
  if (htab->params->ovly_flavour == ovly_soft_icache)
677
    {
678
      unsigned int prev_buf = 0, set_id = 0;
679
 
680
      /* Look for an overlapping vma to find the first overlay section.  */
681
      bfd_vma vma_start = 0;
682
 
683
      for (i = 1; i < n; i++)
684
        {
685
          s = alloc_sec[i];
686
          if (s->vma < ovl_end)
687
            {
688
              asection *s0 = alloc_sec[i - 1];
689
              vma_start = s0->vma;
690
              ovl_end = (s0->vma
691
                         + ((bfd_vma) 1
692
                            << (htab->num_lines_log2 + htab->line_size_log2)));
693
              --i;
694
              break;
695
            }
696
          else
697
            ovl_end = s->vma + s->size;
698
        }
699
 
700
      /* Now find any sections within the cache area.  */
701
      for (ovl_index = 0, num_buf = 0; i < n; i++)
702
        {
703
          s = alloc_sec[i];
704
          if (s->vma >= ovl_end)
705
            break;
706
 
707
          /* A section in an overlay area called .ovl.init is not
708
             an overlay, in the sense that it might be loaded in
709
             by the overlay manager, but rather the initial
710
             section contents for the overlay buffer.  */
711
          if (strncmp (s->name, ".ovl.init", 9) != 0)
712
            {
713
              num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
714
              set_id = (num_buf == prev_buf)? set_id + 1 : 0;
715
              prev_buf = num_buf;
716
 
717
              if ((s->vma - vma_start) & (htab->params->line_size - 1))
718
                {
719
                  info->callbacks->einfo (_("%X%P: overlay section %A "
720
                                            "does not start on a cache line.\n"),
721
                                          s);
722
                  bfd_set_error (bfd_error_bad_value);
723
                  return 0;
724
                }
725
              else if (s->size > htab->params->line_size)
726
                {
727
                  info->callbacks->einfo (_("%X%P: overlay section %A "
728
                                            "is larger than a cache line.\n"),
729
                                          s);
730
                  bfd_set_error (bfd_error_bad_value);
731
                  return 0;
732
                }
733
 
734
              alloc_sec[ovl_index++] = s;
735
              spu_elf_section_data (s)->u.o.ovl_index
736
                = (set_id << htab->num_lines_log2) + num_buf;
737
              spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
738
            }
739
        }
740
 
741
      /* Ensure there are no more overlay sections.  */
742
      for ( ; i < n; i++)
743
        {
744
          s = alloc_sec[i];
745
          if (s->vma < ovl_end)
746
            {
747
              info->callbacks->einfo (_("%X%P: overlay section %A "
748
                                        "is not in cache area.\n"),
749
                                      alloc_sec[i-1]);
750
              bfd_set_error (bfd_error_bad_value);
751
              return 0;
752
            }
753
          else
754
            ovl_end = s->vma + s->size;
755
        }
756
    }
757
  else
758
    {
759
      /* Look for overlapping vmas.  Any with overlap must be overlays.
760
         Count them.  Also count the number of overlay regions.  */
761
      for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
762
        {
763
          s = alloc_sec[i];
764
          if (s->vma < ovl_end)
765
            {
766
              asection *s0 = alloc_sec[i - 1];
767
 
768
              if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
769
                {
770
                  ++num_buf;
771
                  if (strncmp (s0->name, ".ovl.init", 9) != 0)
772
                    {
773
                      alloc_sec[ovl_index] = s0;
774
                      spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
775
                      spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
776
                    }
777
                  else
778
                    ovl_end = s->vma + s->size;
779
                }
780
              if (strncmp (s->name, ".ovl.init", 9) != 0)
781
                {
782
                  alloc_sec[ovl_index] = s;
783
                  spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
784
                  spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
785
                  if (s0->vma != s->vma)
786
                    {
787
                      info->callbacks->einfo (_("%X%P: overlay sections %A "
788
                                                "and %A do not start at the "
789
                                                "same address.\n"),
790
                                              s0, s);
791
                      bfd_set_error (bfd_error_bad_value);
792
                      return 0;
793
                    }
794
                  if (ovl_end < s->vma + s->size)
795
                    ovl_end = s->vma + s->size;
796
                }
797
            }
798
          else
799
            ovl_end = s->vma + s->size;
800
        }
801
    }
802
 
803
  htab->num_overlays = ovl_index;
804
  htab->num_buf = num_buf;
805
  htab->ovl_sec = alloc_sec;
806
 
807
  if (ovl_index == 0)
808
    return 1;
809
 
810
  for (i = 0; i < 2; i++)
811
    {
812
      const char *name;
813
      struct elf_link_hash_entry *h;
814
 
815
      name = entry_names[i][htab->params->ovly_flavour];
816
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
817
      if (h == NULL)
818
        return 0;
819
 
820
      if (h->root.type == bfd_link_hash_new)
821
        {
822
          h->root.type = bfd_link_hash_undefined;
823
          h->ref_regular = 1;
824
          h->ref_regular_nonweak = 1;
825
          h->non_elf = 0;
826
        }
827
      htab->ovly_entry[i] = h;
828
    }
829
 
830
  return 2;
831
}
832
 
833
/* Non-zero to use bra in overlay stubs rather than br.  */
834
#define BRA_STUBS 0
835
 
836
#define BRA     0x30000000
837
#define BRASL   0x31000000
838
#define BR      0x32000000
839
#define BRSL    0x33000000
840
#define NOP     0x40200000
841
#define LNOP    0x00200000
842
#define ILA     0x42000000
843
 
844
/* Return true for all relative and absolute branch instructions.
845
   bra   00110000 0..
846
   brasl 00110001 0..
847
   br    00110010 0..
848
   brsl  00110011 0..
849
   brz   00100000 0..
850
   brnz  00100001 0..
851
   brhz  00100010 0..
852
   brhnz 00100011 0..  */
853
 
854
static bfd_boolean
855
is_branch (const unsigned char *insn)
856
{
857
  return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
858
}
859
 
860
/* Return true for all indirect branch instructions.
861
   bi     00110101 000
862
   bisl   00110101 001
863
   iret   00110101 010
864
   bisled 00110101 011
865
   biz    00100101 000
866
   binz   00100101 001
867
   bihz   00100101 010
868
   bihnz  00100101 011  */
869
 
870
static bfd_boolean
871
is_indirect_branch (const unsigned char *insn)
872
{
873
  return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
874
}
875
 
876
/* Return true for branch hint instructions.
877
   hbra  0001000..
878
   hbrr  0001001..  */
879
 
880
static bfd_boolean
881
is_hint (const unsigned char *insn)
882
{
883
  return (insn[0] & 0xfc) == 0x10;
884
}
885
 
886
/* True if INPUT_SECTION might need overlay stubs.  */
887
 
888
static bfd_boolean
889
maybe_needs_stubs (asection *input_section)
890
{
891
  /* No stubs for debug sections and suchlike.  */
892
  if ((input_section->flags & SEC_ALLOC) == 0)
893
    return FALSE;
894
 
895
  /* No stubs for link-once sections that will be discarded.  */
896
  if (input_section->output_section == bfd_abs_section_ptr)
897
    return FALSE;
898
 
899
  /* Don't create stubs for .eh_frame references.  */
900
  if (strcmp (input_section->name, ".eh_frame") == 0)
901
    return FALSE;
902
 
903
  return TRUE;
904
}
905
 
906
enum _stub_type
907
{
908
  no_stub,
909
  call_ovl_stub,
910
  br000_ovl_stub,
911
  br001_ovl_stub,
912
  br010_ovl_stub,
913
  br011_ovl_stub,
914
  br100_ovl_stub,
915
  br101_ovl_stub,
916
  br110_ovl_stub,
917
  br111_ovl_stub,
918
  nonovl_stub,
919
  stub_error
920
};
921
 
922
/* Return non-zero if this reloc symbol should go via an overlay stub.
923
   Return 2 if the stub must be in non-overlay area.  */
924
 
925
static enum _stub_type
926
needs_ovl_stub (struct elf_link_hash_entry *h,
927
                Elf_Internal_Sym *sym,
928
                asection *sym_sec,
929
                asection *input_section,
930
                Elf_Internal_Rela *irela,
931
                bfd_byte *contents,
932
                struct bfd_link_info *info)
933
{
934
  struct spu_link_hash_table *htab = spu_hash_table (info);
935
  enum elf_spu_reloc_type r_type;
936
  unsigned int sym_type;
937
  bfd_boolean branch, hint, call;
938
  enum _stub_type ret = no_stub;
939
  bfd_byte insn[4];
940
 
941
  if (sym_sec == NULL
942
      || sym_sec->output_section == bfd_abs_section_ptr
943
      || spu_elf_section_data (sym_sec->output_section) == NULL)
944
    return ret;
945
 
946
  if (h != NULL)
947
    {
948
      /* Ensure no stubs for user supplied overlay manager syms.  */
949
      if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
950
        return ret;
951
 
952
      /* setjmp always goes via an overlay stub, because then the return
953
         and hence the longjmp goes via __ovly_return.  That magically
954
         makes setjmp/longjmp between overlays work.  */
955
      if (strncmp (h->root.root.string, "setjmp", 6) == 0
956
          && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
957
        ret = call_ovl_stub;
958
    }
959
 
960
  if (h != NULL)
961
    sym_type = h->type;
962
  else
963
    sym_type = ELF_ST_TYPE (sym->st_info);
964
 
965
  r_type = ELF32_R_TYPE (irela->r_info);
966
  branch = FALSE;
967
  hint = FALSE;
968
  call = FALSE;
969
  if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
970
    {
971
      if (contents == NULL)
972
        {
973
          contents = insn;
974
          if (!bfd_get_section_contents (input_section->owner,
975
                                         input_section,
976
                                         contents,
977
                                         irela->r_offset, 4))
978
            return stub_error;
979
        }
980
      else
981
        contents += irela->r_offset;
982
 
983
      branch = is_branch (contents);
984
      hint = is_hint (contents);
985
      if (branch || hint)
986
        {
987
          call = (contents[0] & 0xfd) == 0x31;
988
          if (call
989
              && sym_type != STT_FUNC
990
              && contents != insn)
991
            {
992
              /* It's common for people to write assembly and forget
993
                 to give function symbols the right type.  Handle
994
                 calls to such symbols, but warn so that (hopefully)
995
                 people will fix their code.  We need the symbol
996
                 type to be correct to distinguish function pointer
997
                 initialisation from other pointer initialisations.  */
998
              const char *sym_name;
999
 
1000
              if (h != NULL)
1001
                sym_name = h->root.root.string;
1002
              else
1003
                {
1004
                  Elf_Internal_Shdr *symtab_hdr;
1005
                  symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1006
                  sym_name = bfd_elf_sym_name (input_section->owner,
1007
                                               symtab_hdr,
1008
                                               sym,
1009
                                               sym_sec);
1010
                }
1011
              (*_bfd_error_handler) (_("warning: call to non-function"
1012
                                       " symbol %s defined in %B"),
1013
                                     sym_sec->owner, sym_name);
1014
 
1015
            }
1016
        }
1017
    }
1018
 
1019
  if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1020
      || (sym_type != STT_FUNC
1021
          && !(branch || hint)
1022
          && (sym_sec->flags & SEC_CODE) == 0))
1023
    return no_stub;
1024
 
1025
  /* Usually, symbols in non-overlay sections don't need stubs.  */
1026
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1027
      && !htab->params->non_overlay_stubs)
1028
    return ret;
1029
 
1030
  /* A reference from some other section to a symbol in an overlay
1031
     section needs a stub.  */
1032
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1033
       != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1034
    {
1035
      unsigned int lrlive = 0;
1036
      if (branch)
1037
        lrlive = (contents[1] & 0x70) >> 4;
1038
 
1039
      if (!lrlive && (call || sym_type == STT_FUNC))
1040
        ret = call_ovl_stub;
1041
      else
1042
        ret = br000_ovl_stub + lrlive;
1043
    }
1044
 
1045
  /* If this insn isn't a branch then we are possibly taking the
1046
     address of a function and passing it out somehow.  Soft-icache code
1047
     always generates inline code to do indirect branches.  */
1048
  if (!(branch || hint)
1049
      && sym_type == STT_FUNC
1050
      && htab->params->ovly_flavour != ovly_soft_icache)
1051
    ret = nonovl_stub;
1052
 
1053
  return ret;
1054
}
1055
 
1056
static bfd_boolean
1057
count_stub (struct spu_link_hash_table *htab,
1058
            bfd *ibfd,
1059
            asection *isec,
1060
            enum _stub_type stub_type,
1061
            struct elf_link_hash_entry *h,
1062
            const Elf_Internal_Rela *irela)
1063
{
1064
  unsigned int ovl = 0;
1065
  struct got_entry *g, **head;
1066
  bfd_vma addend;
1067
 
1068
  /* If this instruction is a branch or call, we need a stub
1069
     for it.  One stub per function per overlay.
1070
     If it isn't a branch, then we are taking the address of
1071
     this function so need a stub in the non-overlay area
1072
     for it.  One stub per function.  */
1073
  if (stub_type != nonovl_stub)
1074
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1075
 
1076
  if (h != NULL)
1077
    head = &h->got.glist;
1078
  else
1079
    {
1080
      if (elf_local_got_ents (ibfd) == NULL)
1081
        {
1082
          bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1083
                               * sizeof (*elf_local_got_ents (ibfd)));
1084
          elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1085
          if (elf_local_got_ents (ibfd) == NULL)
1086
            return FALSE;
1087
        }
1088
      head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1089
    }
1090
 
1091
  if (htab->params->ovly_flavour == ovly_soft_icache)
1092
    {
1093
      htab->stub_count[ovl] += 1;
1094
      return TRUE;
1095
    }
1096
 
1097
  addend = 0;
1098
  if (irela != NULL)
1099
    addend = irela->r_addend;
1100
 
1101
  if (ovl == 0)
1102
    {
1103
      struct got_entry *gnext;
1104
 
1105
      for (g = *head; g != NULL; g = g->next)
1106
        if (g->addend == addend && g->ovl == 0)
1107
          break;
1108
 
1109
      if (g == NULL)
1110
        {
1111
          /* Need a new non-overlay area stub.  Zap other stubs.  */
1112
          for (g = *head; g != NULL; g = gnext)
1113
            {
1114
              gnext = g->next;
1115
              if (g->addend == addend)
1116
                {
1117
                  htab->stub_count[g->ovl] -= 1;
1118
                  free (g);
1119
                }
1120
            }
1121
        }
1122
    }
1123
  else
1124
    {
1125
      for (g = *head; g != NULL; g = g->next)
1126
        if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1127
          break;
1128
    }
1129
 
1130
  if (g == NULL)
1131
    {
1132
      g = bfd_malloc (sizeof *g);
1133
      if (g == NULL)
1134
        return FALSE;
1135
      g->ovl = ovl;
1136
      g->addend = addend;
1137
      g->stub_addr = (bfd_vma) -1;
1138
      g->next = *head;
1139
      *head = g;
1140
 
1141
      htab->stub_count[ovl] += 1;
1142
    }
1143
 
1144
  return TRUE;
1145
}
1146
 
1147
/* Support two sizes of overlay stubs, a slower more compact stub of two
1148
   intructions, and a faster stub of four instructions.
1149
   Soft-icache stubs are four or eight words.  */
1150
 
1151
static unsigned int
1152
ovl_stub_size (struct spu_elf_params *params)
1153
{
1154
  return 16 << params->ovly_flavour >> params->compact_stub;
1155
}
1156
 
1157
static unsigned int
1158
ovl_stub_size_log2 (struct spu_elf_params *params)
1159
{
1160
  return 4 + params->ovly_flavour - params->compact_stub;
1161
}
1162
 
1163
/* Two instruction overlay stubs look like:
1164
 
1165
   brsl $75,__ovly_load
1166
   .word target_ovl_and_address
1167
 
1168
   ovl_and_address is a word with the overlay number in the top 14 bits
1169
   and local store address in the bottom 18 bits.
1170
 
1171
   Four instruction overlay stubs look like:
1172
 
1173
   ila $78,ovl_number
1174
   lnop
1175
   ila $79,target_address
1176
   br __ovly_load
1177
 
1178
   Software icache stubs are:
1179
 
1180
   .word target_index
1181
   .word target_ia;
1182
   .word lrlive_branchlocalstoreaddr;
1183
   brasl $75,__icache_br_handler
1184
   .quad xor_pattern
1185
*/
1186
 
1187
static bfd_boolean
1188
build_stub (struct bfd_link_info *info,
1189
            bfd *ibfd,
1190
            asection *isec,
1191
            enum _stub_type stub_type,
1192
            struct elf_link_hash_entry *h,
1193
            const Elf_Internal_Rela *irela,
1194
            bfd_vma dest,
1195
            asection *dest_sec)
1196
{
1197
  struct spu_link_hash_table *htab = spu_hash_table (info);
1198
  unsigned int ovl, dest_ovl, set_id;
1199
  struct got_entry *g, **head;
1200
  asection *sec;
1201
  bfd_vma addend, from, to, br_dest, patt;
1202
  unsigned int lrlive;
1203
 
1204
  ovl = 0;
1205
  if (stub_type != nonovl_stub)
1206
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1207
 
1208
  if (h != NULL)
1209
    head = &h->got.glist;
1210
  else
1211
    head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1212
 
1213
  addend = 0;
1214
  if (irela != NULL)
1215
    addend = irela->r_addend;
1216
 
1217
  if (htab->params->ovly_flavour == ovly_soft_icache)
1218
    {
1219
      g = bfd_malloc (sizeof *g);
1220
      if (g == NULL)
1221
        return FALSE;
1222
      g->ovl = ovl;
1223
      g->br_addr = 0;
1224
      if (irela != NULL)
1225
        g->br_addr = (irela->r_offset
1226
                      + isec->output_offset
1227
                      + isec->output_section->vma);
1228
      g->next = *head;
1229
      *head = g;
1230
    }
1231
  else
1232
    {
1233
      for (g = *head; g != NULL; g = g->next)
1234
        if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1235
          break;
1236
      if (g == NULL)
1237
        abort ();
1238
 
1239
      if (g->ovl == 0 && ovl != 0)
1240
        return TRUE;
1241
 
1242
      if (g->stub_addr != (bfd_vma) -1)
1243
        return TRUE;
1244
    }
1245
 
1246
  sec = htab->stub_sec[ovl];
1247
  dest += dest_sec->output_offset + dest_sec->output_section->vma;
1248
  from = sec->size + sec->output_offset + sec->output_section->vma;
1249
  g->stub_addr = from;
1250
  to = (htab->ovly_entry[0]->root.u.def.value
1251
        + htab->ovly_entry[0]->root.u.def.section->output_offset
1252
        + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1253
 
1254
  if (((dest | to | from) & 3) != 0)
1255
    {
1256
      htab->stub_err = 1;
1257
      return FALSE;
1258
    }
1259
  dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1260
 
1261
  if (htab->params->ovly_flavour == ovly_normal
1262
      && !htab->params->compact_stub)
1263
    {
1264
      bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1265
                  sec->contents + sec->size);
1266
      bfd_put_32 (sec->owner, LNOP,
1267
                  sec->contents + sec->size + 4);
1268
      bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1269
                  sec->contents + sec->size + 8);
1270
      if (!BRA_STUBS)
1271
        bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1272
                    sec->contents + sec->size + 12);
1273
      else
1274
        bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1275
                    sec->contents + sec->size + 12);
1276
    }
1277
  else if (htab->params->ovly_flavour == ovly_normal
1278
           && htab->params->compact_stub)
1279
    {
1280
      if (!BRA_STUBS)
1281
        bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1282
                    sec->contents + sec->size);
1283
      else
1284
        bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1285
                    sec->contents + sec->size);
1286
      bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1287
                  sec->contents + sec->size + 4);
1288
    }
1289
  else if (htab->params->ovly_flavour == ovly_soft_icache
1290
           && htab->params->compact_stub)
1291
    {
1292
      lrlive = 0;
1293
      if (stub_type == nonovl_stub)
1294
        ;
1295
      else if (stub_type == call_ovl_stub)
1296
        /* A brsl makes lr live and *(*sp+16) is live.
1297
           Tail calls have the same liveness.  */
1298
        lrlive = 5;
1299
      else if (!htab->params->lrlive_analysis)
1300
        /* Assume stack frame and lr save.  */
1301
        lrlive = 1;
1302
      else if (irela != NULL)
1303
        {
1304
          /* Analyse branch instructions.  */
1305
          struct function_info *caller;
1306
          bfd_vma off;
1307
 
1308
          caller = find_function (isec, irela->r_offset, info);
1309
          if (caller->start == NULL)
1310
            off = irela->r_offset;
1311
          else
1312
            {
1313
              struct function_info *found = NULL;
1314
 
1315
              /* Find the earliest piece of this function that
1316
                 has frame adjusting instructions.  We might
1317
                 see dynamic frame adjustment (eg. for alloca)
1318
                 in some later piece, but functions using
1319
                 alloca always set up a frame earlier.  Frame
1320
                 setup instructions are always in one piece.  */
1321
              if (caller->lr_store != (bfd_vma) -1
1322
                  || caller->sp_adjust != (bfd_vma) -1)
1323
                found = caller;
1324
              while (caller->start != NULL)
1325
                {
1326
                  caller = caller->start;
1327
                  if (caller->lr_store != (bfd_vma) -1
1328
                      || caller->sp_adjust != (bfd_vma) -1)
1329
                    found = caller;
1330
                }
1331
              if (found != NULL)
1332
                caller = found;
1333
              off = (bfd_vma) -1;
1334
            }
1335
 
1336
          if (off > caller->sp_adjust)
1337
            {
1338
              if (off > caller->lr_store)
1339
                /* Only *(*sp+16) is live.  */
1340
                lrlive = 1;
1341
              else
1342
                /* If no lr save, then we must be in a
1343
                   leaf function with a frame.
1344
                   lr is still live.  */
1345
                lrlive = 4;
1346
            }
1347
          else if (off > caller->lr_store)
1348
            {
1349
              /* Between lr save and stack adjust.  */
1350
              lrlive = 3;
1351
              /* This should never happen since prologues won't
1352
                 be split here.  */
1353
              BFD_ASSERT (0);
1354
            }
1355
          else
1356
            /* On entry to function.  */
1357
            lrlive = 5;
1358
 
1359
          if (stub_type != br000_ovl_stub
1360
              && lrlive != stub_type - br000_ovl_stub)
1361
            info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1362
                                      "from analysis (%u)\n"),
1363
                                    isec, irela->r_offset, lrlive,
1364
                                    stub_type - br000_ovl_stub);
1365
        }
1366
 
1367
      /* If given lrlive info via .brinfo, use it.  */
1368
      if (stub_type > br000_ovl_stub)
1369
        lrlive = stub_type - br000_ovl_stub;
1370
 
1371
      if (ovl == 0)
1372
        to = (htab->ovly_entry[1]->root.u.def.value
1373
              + htab->ovly_entry[1]->root.u.def.section->output_offset
1374
              + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1375
 
1376
      /* The branch that uses this stub goes to stub_addr + 4.  We'll
1377
         set up an xor pattern that can be used by the icache manager
1378
         to modify this branch to go directly to its destination.  */
1379
      g->stub_addr += 4;
1380
      br_dest = g->stub_addr;
1381
      if (irela == NULL)
1382
        {
1383
          /* Except in the case of _SPUEAR_ stubs, the branch in
1384
             question is the one in the stub itself.  */
1385
          BFD_ASSERT (stub_type == nonovl_stub);
1386
          g->br_addr = g->stub_addr;
1387
          br_dest = to;
1388
        }
1389
 
1390
      set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1391
      bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1392
                  sec->contents + sec->size);
1393
      bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1394
                  sec->contents + sec->size + 4);
1395
      bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1396
                  sec->contents + sec->size + 8);
1397
      patt = dest ^ br_dest;
1398
      if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1399
        patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1400
      bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1401
                  sec->contents + sec->size + 12);
1402
 
1403
      if (ovl == 0)
1404
        /* Extra space for linked list entries.  */
1405
        sec->size += 16;
1406
    }
1407
  else
1408
    abort ();
1409
 
1410
  sec->size += ovl_stub_size (htab->params);
1411
 
1412
  if (htab->params->emit_stub_syms)
1413
    {
1414
      size_t len;
1415
      char *name;
1416
      int add;
1417
 
1418
      len = 8 + sizeof (".ovl_call.") - 1;
1419
      if (h != NULL)
1420
        len += strlen (h->root.root.string);
1421
      else
1422
        len += 8 + 1 + 8;
1423
      add = 0;
1424
      if (irela != NULL)
1425
        add = (int) irela->r_addend & 0xffffffff;
1426
      if (add != 0)
1427
        len += 1 + 8;
1428
      name = bfd_malloc (len);
1429
      if (name == NULL)
1430
        return FALSE;
1431
 
1432
      sprintf (name, "%08x.ovl_call.", g->ovl);
1433
      if (h != NULL)
1434
        strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1435
      else
1436
        sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1437
                 dest_sec->id & 0xffffffff,
1438
                 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1439
      if (add != 0)
1440
        sprintf (name + len - 9, "+%x", add);
1441
 
1442
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1443
      free (name);
1444
      if (h == NULL)
1445
        return FALSE;
1446
      if (h->root.type == bfd_link_hash_new)
1447
        {
1448
          h->root.type = bfd_link_hash_defined;
1449
          h->root.u.def.section = sec;
1450
          h->size = ovl_stub_size (htab->params);
1451
          h->root.u.def.value = sec->size - h->size;
1452
          h->type = STT_FUNC;
1453
          h->ref_regular = 1;
1454
          h->def_regular = 1;
1455
          h->ref_regular_nonweak = 1;
1456
          h->forced_local = 1;
1457
          h->non_elf = 0;
1458
        }
1459
    }
1460
 
1461
  return TRUE;
1462
}
1463
 
1464
/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1465
   symbols.  */
1466
 
1467
static bfd_boolean
1468
allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1469
{
1470
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1471
     invoked by the PPU.  */
1472
  struct bfd_link_info *info = inf;
1473
  struct spu_link_hash_table *htab = spu_hash_table (info);
1474
  asection *sym_sec;
1475
 
1476
  if ((h->root.type == bfd_link_hash_defined
1477
       || h->root.type == bfd_link_hash_defweak)
1478
      && h->def_regular
1479
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1480
      && (sym_sec = h->root.u.def.section) != NULL
1481
      && sym_sec->output_section != bfd_abs_section_ptr
1482
      && spu_elf_section_data (sym_sec->output_section) != NULL
1483
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1484
          || htab->params->non_overlay_stubs))
1485
    {
1486
      return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1487
    }
1488
 
1489
  return TRUE;
1490
}
1491
 
1492
static bfd_boolean
1493
build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1494
{
1495
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1496
     invoked by the PPU.  */
1497
  struct bfd_link_info *info = inf;
1498
  struct spu_link_hash_table *htab = spu_hash_table (info);
1499
  asection *sym_sec;
1500
 
1501
  if ((h->root.type == bfd_link_hash_defined
1502
       || h->root.type == bfd_link_hash_defweak)
1503
      && h->def_regular
1504
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1505
      && (sym_sec = h->root.u.def.section) != NULL
1506
      && sym_sec->output_section != bfd_abs_section_ptr
1507
      && spu_elf_section_data (sym_sec->output_section) != NULL
1508
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1509
          || htab->params->non_overlay_stubs))
1510
    {
1511
      return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1512
                         h->root.u.def.value, sym_sec);
1513
    }
1514
 
1515
  return TRUE;
1516
}
1517
 
1518
/* Size or build stubs.  */
1519
 
1520
static bfd_boolean
1521
process_stubs (struct bfd_link_info *info, bfd_boolean build)
1522
{
1523
  struct spu_link_hash_table *htab = spu_hash_table (info);
1524
  bfd *ibfd;
1525
 
1526
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1527
    {
1528
      extern const bfd_target bfd_elf32_spu_vec;
1529
      Elf_Internal_Shdr *symtab_hdr;
1530
      asection *isec;
1531
      Elf_Internal_Sym *local_syms = NULL;
1532
 
1533
      if (ibfd->xvec != &bfd_elf32_spu_vec)
1534
        continue;
1535
 
1536
      /* We'll need the symbol table in a second.  */
1537
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1538
      if (symtab_hdr->sh_info == 0)
1539
        continue;
1540
 
1541
      /* Walk over each section attached to the input bfd.  */
1542
      for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1543
        {
1544
          Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1545
 
1546
          /* If there aren't any relocs, then there's nothing more to do.  */
1547
          if ((isec->flags & SEC_RELOC) == 0
1548
              || isec->reloc_count == 0)
1549
            continue;
1550
 
1551
          if (!maybe_needs_stubs (isec))
1552
            continue;
1553
 
1554
          /* Get the relocs.  */
1555
          internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1556
                                                       info->keep_memory);
1557
          if (internal_relocs == NULL)
1558
            goto error_ret_free_local;
1559
 
1560
          /* Now examine each relocation.  */
1561
          irela = internal_relocs;
1562
          irelaend = irela + isec->reloc_count;
1563
          for (; irela < irelaend; irela++)
1564
            {
1565
              enum elf_spu_reloc_type r_type;
1566
              unsigned int r_indx;
1567
              asection *sym_sec;
1568
              Elf_Internal_Sym *sym;
1569
              struct elf_link_hash_entry *h;
1570
              enum _stub_type stub_type;
1571
 
1572
              r_type = ELF32_R_TYPE (irela->r_info);
1573
              r_indx = ELF32_R_SYM (irela->r_info);
1574
 
1575
              if (r_type >= R_SPU_max)
1576
                {
1577
                  bfd_set_error (bfd_error_bad_value);
1578
                error_ret_free_internal:
1579
                  if (elf_section_data (isec)->relocs != internal_relocs)
1580
                    free (internal_relocs);
1581
                error_ret_free_local:
1582
                  if (local_syms != NULL
1583
                      && (symtab_hdr->contents
1584
                          != (unsigned char *) local_syms))
1585
                    free (local_syms);
1586
                  return FALSE;
1587
                }
1588
 
1589
              /* Determine the reloc target section.  */
1590
              if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1591
                goto error_ret_free_internal;
1592
 
1593
              stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1594
                                          NULL, info);
1595
              if (stub_type == no_stub)
1596
                continue;
1597
              else if (stub_type == stub_error)
1598
                goto error_ret_free_internal;
1599
 
1600
              if (htab->stub_count == NULL)
1601
                {
1602
                  bfd_size_type amt;
1603
                  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1604
                  htab->stub_count = bfd_zmalloc (amt);
1605
                  if (htab->stub_count == NULL)
1606
                    goto error_ret_free_internal;
1607
                }
1608
 
1609
              if (!build)
1610
                {
1611
                  if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1612
                    goto error_ret_free_internal;
1613
                }
1614
              else
1615
                {
1616
                  bfd_vma dest;
1617
 
1618
                  if (h != NULL)
1619
                    dest = h->root.u.def.value;
1620
                  else
1621
                    dest = sym->st_value;
1622
                  dest += irela->r_addend;
1623
                  if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1624
                                   dest, sym_sec))
1625
                    goto error_ret_free_internal;
1626
                }
1627
            }
1628
 
1629
          /* We're done with the internal relocs, free them.  */
1630
          if (elf_section_data (isec)->relocs != internal_relocs)
1631
            free (internal_relocs);
1632
        }
1633
 
1634
      if (local_syms != NULL
1635
          && symtab_hdr->contents != (unsigned char *) local_syms)
1636
        {
1637
          if (!info->keep_memory)
1638
            free (local_syms);
1639
          else
1640
            symtab_hdr->contents = (unsigned char *) local_syms;
1641
        }
1642
    }
1643
 
1644
  return TRUE;
1645
}
1646
 
1647
/* Allocate space for overlay call and return stubs.
1648
   Return 0 on error, 1 if no overlays, 2 otherwise.  */
1649
 
1650
int
1651
spu_elf_size_stubs (struct bfd_link_info *info)
1652
{
1653
  struct spu_link_hash_table *htab;
1654
  bfd *ibfd;
1655
  bfd_size_type amt;
1656
  flagword flags;
1657
  unsigned int i;
1658
  asection *stub;
1659
 
1660
  if (!process_stubs (info, FALSE))
1661
    return 0;
1662
 
1663
  htab = spu_hash_table (info);
1664
  elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1665
  if (htab->stub_err)
1666
    return 0;
1667
 
1668
  ibfd = info->input_bfds;
1669
  if (htab->stub_count != NULL)
1670
    {
1671
      amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1672
      htab->stub_sec = bfd_zmalloc (amt);
1673
      if (htab->stub_sec == NULL)
1674
        return 0;
1675
 
1676
      flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1677
               | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1678
      stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1679
      htab->stub_sec[0] = stub;
1680
      if (stub == NULL
1681
          || !bfd_set_section_alignment (ibfd, stub,
1682
                                         ovl_stub_size_log2 (htab->params)))
1683
        return 0;
1684
      stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1685
      if (htab->params->ovly_flavour == ovly_soft_icache)
1686
        /* Extra space for linked list entries.  */
1687
        stub->size += htab->stub_count[0] * 16;
1688
 
1689
      for (i = 0; i < htab->num_overlays; ++i)
1690
        {
1691
          asection *osec = htab->ovl_sec[i];
1692
          unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1693
          stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1694
          htab->stub_sec[ovl] = stub;
1695
          if (stub == NULL
1696
              || !bfd_set_section_alignment (ibfd, stub,
1697
                                             ovl_stub_size_log2 (htab->params)))
1698
            return 0;
1699
          stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1700
        }
1701
    }
1702
 
1703
  if (htab->params->ovly_flavour == ovly_soft_icache)
1704
    {
1705
      /* Space for icache manager tables.
1706
         a) Tag array, one quadword per cache line.
1707
         b) Rewrite "to" list, one quadword per cache line.
1708
         c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1709
            a power-of-two number of full quadwords) per cache line.  */
1710
 
1711
      flags = SEC_ALLOC;
1712
      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1713
      if (htab->ovtab == NULL
1714
          || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1715
        return 0;
1716
 
1717
      htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1718
                          << htab->num_lines_log2;
1719
 
1720
      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1721
      htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1722
      if (htab->init == NULL
1723
          || !bfd_set_section_alignment (ibfd, htab->init, 4))
1724
        return 0;
1725
 
1726
      htab->init->size = 16;
1727
    }
1728
  else if (htab->stub_count == NULL)
1729
    return 1;
1730
  else
1731
    {
1732
      /* htab->ovtab consists of two arrays.
1733
         .      struct {
1734
         .        u32 vma;
1735
         .        u32 size;
1736
         .        u32 file_off;
1737
         .        u32 buf;
1738
         .      } _ovly_table[];
1739
         .
1740
         .      struct {
1741
         .        u32 mapped;
1742
         .      } _ovly_buf_table[];
1743
         .  */
1744
 
1745
      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1746
      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1747
      if (htab->ovtab == NULL
1748
          || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1749
        return 0;
1750
 
1751
      htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1752
    }
1753
 
1754
  htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1755
  if (htab->toe == NULL
1756
      || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1757
    return 0;
1758
  htab->toe->size = 16;
1759
 
1760
  return 2;
1761
}
1762
 
1763
/* Called from ld to place overlay manager data sections.  This is done
1764
   after the overlay manager itself is loaded, mainly so that the
1765
   linker's htab->init section is placed after any other .ovl.init
1766
   sections.  */
1767
 
1768
void
1769
spu_elf_place_overlay_data (struct bfd_link_info *info)
1770
{
1771
  struct spu_link_hash_table *htab = spu_hash_table (info);
1772
  unsigned int i;
1773
 
1774
  if (htab->stub_sec != NULL)
1775
    {
1776
      (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1777
 
1778
      for (i = 0; i < htab->num_overlays; ++i)
1779
        {
1780
          asection *osec = htab->ovl_sec[i];
1781
          unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1782
          (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1783
        }
1784
    }
1785
 
1786
  if (htab->params->ovly_flavour == ovly_soft_icache)
1787
    (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1788
 
1789
  if (htab->ovtab != NULL)
1790
    {
1791
      const char *ovout = ".data";
1792
      if (htab->params->ovly_flavour == ovly_soft_icache)
1793
        ovout = ".bss";
1794
      (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1795
    }
1796
 
1797
  if (htab->toe != NULL)
1798
    (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1799
}
1800
 
1801
/* Functions to handle embedded spu_ovl.o object.  */
1802
 
1803
static void *
1804
ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1805
{
1806
  return stream;
1807
}
1808
 
1809
static file_ptr
1810
ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1811
               void *stream,
1812
               void *buf,
1813
               file_ptr nbytes,
1814
               file_ptr offset)
1815
{
1816
  struct _ovl_stream *os;
1817
  size_t count;
1818
  size_t max;
1819
 
1820
  os = (struct _ovl_stream *) stream;
1821
  max = (const char *) os->end - (const char *) os->start;
1822
 
1823
  if ((ufile_ptr) offset >= max)
1824
    return 0;
1825
 
1826
  count = nbytes;
1827
  if (count > max - offset)
1828
    count = max - offset;
1829
 
1830
  memcpy (buf, (const char *) os->start + offset, count);
1831
  return count;
1832
}
1833
 
1834
bfd_boolean
1835
spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1836
{
1837
  *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1838
                              "elf32-spu",
1839
                              ovl_mgr_open,
1840
                              (void *) stream,
1841
                              ovl_mgr_pread,
1842
                              NULL,
1843
                              NULL);
1844
  return *ovl_bfd != NULL;
1845
}
1846
 
1847
static unsigned int
1848
overlay_index (asection *sec)
1849
{
1850
  if (sec == NULL
1851
      || sec->output_section == bfd_abs_section_ptr)
1852
    return 0;
1853
  return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1854
}
1855
 
1856
/* Define an STT_OBJECT symbol.  */
1857
 
1858
static struct elf_link_hash_entry *
1859
define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1860
{
1861
  struct elf_link_hash_entry *h;
1862
 
1863
  h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1864
  if (h == NULL)
1865
    return NULL;
1866
 
1867
  if (h->root.type != bfd_link_hash_defined
1868
      || !h->def_regular)
1869
    {
1870
      h->root.type = bfd_link_hash_defined;
1871
      h->root.u.def.section = htab->ovtab;
1872
      h->type = STT_OBJECT;
1873
      h->ref_regular = 1;
1874
      h->def_regular = 1;
1875
      h->ref_regular_nonweak = 1;
1876
      h->non_elf = 0;
1877
    }
1878
  else if (h->root.u.def.section->owner != NULL)
1879
    {
1880
      (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1881
                             h->root.u.def.section->owner,
1882
                             h->root.root.string);
1883
      bfd_set_error (bfd_error_bad_value);
1884
      return NULL;
1885
    }
1886
  else
1887
    {
1888
      (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1889
                             h->root.root.string);
1890
      bfd_set_error (bfd_error_bad_value);
1891
      return NULL;
1892
    }
1893
 
1894
  return h;
1895
}
1896
 
1897
/* Fill in all stubs and the overlay tables.  */
1898
 
1899
static bfd_boolean
1900
spu_elf_build_stubs (struct bfd_link_info *info)
1901
{
1902
  struct spu_link_hash_table *htab = spu_hash_table (info);
1903
  struct elf_link_hash_entry *h;
1904
  bfd_byte *p;
1905
  asection *s;
1906
  bfd *obfd;
1907
  unsigned int i;
1908
 
1909
  if (htab->num_overlays != 0)
1910
    {
1911
      for (i = 0; i < 2; i++)
1912
        {
1913
          h = htab->ovly_entry[i];
1914
          if (h != NULL
1915
              && (h->root.type == bfd_link_hash_defined
1916
                  || h->root.type == bfd_link_hash_defweak)
1917
              && h->def_regular)
1918
            {
1919
              s = h->root.u.def.section->output_section;
1920
              if (spu_elf_section_data (s)->u.o.ovl_index)
1921
                {
1922
                  (*_bfd_error_handler) (_("%s in overlay section"),
1923
                                         h->root.root.string);
1924
                  bfd_set_error (bfd_error_bad_value);
1925
                  return FALSE;
1926
                }
1927
            }
1928
        }
1929
    }
1930
 
1931
  if (htab->stub_sec != NULL)
1932
    {
1933
      for (i = 0; i <= htab->num_overlays; i++)
1934
        if (htab->stub_sec[i]->size != 0)
1935
          {
1936
            htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1937
                                                      htab->stub_sec[i]->size);
1938
            if (htab->stub_sec[i]->contents == NULL)
1939
              return FALSE;
1940
            htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1941
            htab->stub_sec[i]->size = 0;
1942
          }
1943
 
1944
      /* Fill in all the stubs.  */
1945
      process_stubs (info, TRUE);
1946
      if (!htab->stub_err)
1947
        elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1948
 
1949
      if (htab->stub_err)
1950
        {
1951
          (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1952
          bfd_set_error (bfd_error_bad_value);
1953
          return FALSE;
1954
        }
1955
 
1956
      for (i = 0; i <= htab->num_overlays; i++)
1957
        {
1958
          if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1959
            {
1960
              (*_bfd_error_handler)  (_("stubs don't match calculated size"));
1961
              bfd_set_error (bfd_error_bad_value);
1962
              return FALSE;
1963
            }
1964
          htab->stub_sec[i]->rawsize = 0;
1965
        }
1966
    }
1967
 
1968
  if (htab->ovtab == NULL || htab->ovtab->size == 0)
1969
    return TRUE;
1970
 
1971
  htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1972
  if (htab->ovtab->contents == NULL)
1973
    return FALSE;
1974
 
1975
  p = htab->ovtab->contents;
1976
  if (htab->params->ovly_flavour == ovly_soft_icache)
1977
    {
1978
      bfd_vma off;
1979
 
1980
      h = define_ovtab_symbol (htab, "__icache_tag_array");
1981
      if (h == NULL)
1982
        return FALSE;
1983
      h->root.u.def.value = 0;
1984
      h->size = 16 << htab->num_lines_log2;
1985
      off = h->size;
1986
 
1987
      h = define_ovtab_symbol (htab, "__icache_tag_array_size");
1988
      if (h == NULL)
1989
        return FALSE;
1990
      h->root.u.def.value = 16 << htab->num_lines_log2;
1991
      h->root.u.def.section = bfd_abs_section_ptr;
1992
 
1993
      h = define_ovtab_symbol (htab, "__icache_rewrite_to");
1994
      if (h == NULL)
1995
        return FALSE;
1996
      h->root.u.def.value = off;
1997
      h->size = 16 << htab->num_lines_log2;
1998
      off += h->size;
1999
 
2000
      h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2001
      if (h == NULL)
2002
        return FALSE;
2003
      h->root.u.def.value = 16 << htab->num_lines_log2;
2004
      h->root.u.def.section = bfd_abs_section_ptr;
2005
 
2006
      h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2007
      if (h == NULL)
2008
        return FALSE;
2009
      h->root.u.def.value = off;
2010
      h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2011
      off += h->size;
2012
 
2013
      h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2014
      if (h == NULL)
2015
        return FALSE;
2016
      h->root.u.def.value = 16 << (htab->fromelem_size_log2
2017
                                   + htab->num_lines_log2);
2018
      h->root.u.def.section = bfd_abs_section_ptr;
2019
 
2020
      h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2021
      if (h == NULL)
2022
        return FALSE;
2023
      h->root.u.def.value = htab->fromelem_size_log2;
2024
      h->root.u.def.section = bfd_abs_section_ptr;
2025
 
2026
      h = define_ovtab_symbol (htab, "__icache_base");
2027
      if (h == NULL)
2028
        return FALSE;
2029
      h->root.u.def.value = htab->ovl_sec[0]->vma;
2030
      h->root.u.def.section = bfd_abs_section_ptr;
2031
      h->size = htab->num_buf << htab->line_size_log2;
2032
 
2033
      h = define_ovtab_symbol (htab, "__icache_linesize");
2034
      if (h == NULL)
2035
        return FALSE;
2036
      h->root.u.def.value = 1 << htab->line_size_log2;
2037
      h->root.u.def.section = bfd_abs_section_ptr;
2038
 
2039
      h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2040
      if (h == NULL)
2041
        return FALSE;
2042
      h->root.u.def.value = htab->line_size_log2;
2043
      h->root.u.def.section = bfd_abs_section_ptr;
2044
 
2045
      h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2046
      if (h == NULL)
2047
        return FALSE;
2048
      h->root.u.def.value = -htab->line_size_log2;
2049
      h->root.u.def.section = bfd_abs_section_ptr;
2050
 
2051
      h = define_ovtab_symbol (htab, "__icache_cachesize");
2052
      if (h == NULL)
2053
        return FALSE;
2054
      h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2055
      h->root.u.def.section = bfd_abs_section_ptr;
2056
 
2057
      h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2058
      if (h == NULL)
2059
        return FALSE;
2060
      h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2061
      h->root.u.def.section = bfd_abs_section_ptr;
2062
 
2063
      h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2064
      if (h == NULL)
2065
        return FALSE;
2066
      h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2067
      h->root.u.def.section = bfd_abs_section_ptr;
2068
 
2069
      if (htab->init != NULL && htab->init->size != 0)
2070
        {
2071
          htab->init->contents = bfd_zalloc (htab->init->owner,
2072
                                             htab->init->size);
2073
          if (htab->init->contents == NULL)
2074
            return FALSE;
2075
 
2076
          h = define_ovtab_symbol (htab, "__icache_fileoff");
2077
          if (h == NULL)
2078
            return FALSE;
2079
          h->root.u.def.value = 0;
2080
          h->root.u.def.section = htab->init;
2081
          h->size = 8;
2082
        }
2083
    }
2084
  else
2085
    {
2086
      /* Write out _ovly_table.  */
2087
      /* set low bit of .size to mark non-overlay area as present.  */
2088
      p[7] = 1;
2089
      obfd = htab->ovtab->output_section->owner;
2090
      for (s = obfd->sections; s != NULL; s = s->next)
2091
        {
2092
          unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2093
 
2094
          if (ovl_index != 0)
2095
            {
2096
              unsigned long off = ovl_index * 16;
2097
              unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2098
 
2099
              bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2100
              bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2101
                          p + off + 4);
2102
              /* file_off written later in spu_elf_modify_program_headers.  */
2103
              bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2104
            }
2105
        }
2106
 
2107
      h = define_ovtab_symbol (htab, "_ovly_table");
2108
      if (h == NULL)
2109
        return FALSE;
2110
      h->root.u.def.value = 16;
2111
      h->size = htab->num_overlays * 16;
2112
 
2113
      h = define_ovtab_symbol (htab, "_ovly_table_end");
2114
      if (h == NULL)
2115
        return FALSE;
2116
      h->root.u.def.value = htab->num_overlays * 16 + 16;
2117
      h->size = 0;
2118
 
2119
      h = define_ovtab_symbol (htab, "_ovly_buf_table");
2120
      if (h == NULL)
2121
        return FALSE;
2122
      h->root.u.def.value = htab->num_overlays * 16 + 16;
2123
      h->size = htab->num_buf * 4;
2124
 
2125
      h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2126
      if (h == NULL)
2127
        return FALSE;
2128
      h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2129
      h->size = 0;
2130
    }
2131
 
2132
  h = define_ovtab_symbol (htab, "_EAR_");
2133
  if (h == NULL)
2134
    return FALSE;
2135
  h->root.u.def.section = htab->toe;
2136
  h->root.u.def.value = 0;
2137
  h->size = 16;
2138
 
2139
  return TRUE;
2140
}
2141
 
2142
/* Check that all loadable section VMAs lie in the range
2143
   LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
2144
 
2145
asection *
2146
spu_elf_check_vma (struct bfd_link_info *info)
2147
{
2148
  struct elf_segment_map *m;
2149
  unsigned int i;
2150
  struct spu_link_hash_table *htab = spu_hash_table (info);
2151
  bfd *abfd = info->output_bfd;
2152
  bfd_vma hi = htab->params->local_store_hi;
2153
  bfd_vma lo = htab->params->local_store_lo;
2154
 
2155
  htab->local_store = hi + 1 - lo;
2156
 
2157
  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2158
    if (m->p_type == PT_LOAD)
2159
      for (i = 0; i < m->count; i++)
2160
        if (m->sections[i]->size != 0
2161
            && (m->sections[i]->vma < lo
2162
                || m->sections[i]->vma > hi
2163
                || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2164
          return m->sections[i];
2165
 
2166
  return NULL;
2167
}
2168
 
2169
/* OFFSET in SEC (presumably) is the beginning of a function prologue.
2170
   Search for stack adjusting insns, and return the sp delta.
2171
   If a store of lr is found save the instruction offset to *LR_STORE.
2172
   If a stack adjusting instruction is found, save that offset to
2173
   *SP_ADJUST.  */
2174
 
2175
static int
2176
find_function_stack_adjust (asection *sec,
2177
                            bfd_vma offset,
2178
                            bfd_vma *lr_store,
2179
                            bfd_vma *sp_adjust)
2180
{
2181
  int reg[128];
2182
 
2183
  memset (reg, 0, sizeof (reg));
2184
  for ( ; offset + 4 <= sec->size; offset += 4)
2185
    {
2186
      unsigned char buf[4];
2187
      int rt, ra;
2188
      int imm;
2189
 
2190
      /* Assume no relocs on stack adjusing insns.  */
2191
      if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2192
        break;
2193
 
2194
      rt = buf[3] & 0x7f;
2195
      ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2196
 
2197
      if (buf[0] == 0x24 /* stqd */)
2198
        {
2199
          if (rt == 0 /* lr */ && ra == 1 /* sp */)
2200
            *lr_store = offset;
2201
          continue;
2202
        }
2203
 
2204
      /* Partly decoded immediate field.  */
2205
      imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2206
 
2207
      if (buf[0] == 0x1c /* ai */)
2208
        {
2209
          imm >>= 7;
2210
          imm = (imm ^ 0x200) - 0x200;
2211
          reg[rt] = reg[ra] + imm;
2212
 
2213
          if (rt == 1 /* sp */)
2214
            {
2215
              if (reg[rt] > 0)
2216
                break;
2217
              *sp_adjust = offset;
2218
              return reg[rt];
2219
            }
2220
        }
2221
      else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2222
        {
2223
          int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2224
 
2225
          reg[rt] = reg[ra] + reg[rb];
2226
          if (rt == 1)
2227
            {
2228
              if (reg[rt] > 0)
2229
                break;
2230
              *sp_adjust = offset;
2231
              return reg[rt];
2232
            }
2233
        }
2234
      else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2235
        {
2236
          int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2237
 
2238
          reg[rt] = reg[rb] - reg[ra];
2239
          if (rt == 1)
2240
            {
2241
              if (reg[rt] > 0)
2242
                break;
2243
              *sp_adjust = offset;
2244
              return reg[rt];
2245
            }
2246
        }
2247
      else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2248
        {
2249
          if (buf[0] >= 0x42 /* ila */)
2250
            imm |= (buf[0] & 1) << 17;
2251
          else
2252
            {
2253
              imm &= 0xffff;
2254
 
2255
              if (buf[0] == 0x40 /* il */)
2256
                {
2257
                  if ((buf[1] & 0x80) == 0)
2258
                    continue;
2259
                  imm = (imm ^ 0x8000) - 0x8000;
2260
                }
2261
              else if ((buf[1] & 0x80) == 0 /* ilhu */)
2262
                imm <<= 16;
2263
            }
2264
          reg[rt] = imm;
2265
          continue;
2266
        }
2267
      else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2268
        {
2269
          reg[rt] |= imm & 0xffff;
2270
          continue;
2271
        }
2272
      else if (buf[0] == 0x04 /* ori */)
2273
        {
2274
          imm >>= 7;
2275
          imm = (imm ^ 0x200) - 0x200;
2276
          reg[rt] = reg[ra] | imm;
2277
          continue;
2278
        }
2279
      else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2280
        {
2281
          reg[rt] = (  ((imm & 0x8000) ? 0xff000000 : 0)
2282
                     | ((imm & 0x4000) ? 0x00ff0000 : 0)
2283
                     | ((imm & 0x2000) ? 0x0000ff00 : 0)
2284
                     | ((imm & 0x1000) ? 0x000000ff : 0));
2285
          continue;
2286
        }
2287
      else if (buf[0] == 0x16 /* andbi */)
2288
        {
2289
          imm >>= 7;
2290
          imm &= 0xff;
2291
          imm |= imm << 8;
2292
          imm |= imm << 16;
2293
          reg[rt] = reg[ra] & imm;
2294
          continue;
2295
        }
2296
      else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2297
        {
2298
          /* Used in pic reg load.  Say rt is trashed.  Won't be used
2299
             in stack adjust, but we need to continue past this branch.  */
2300
          reg[rt] = 0;
2301
          continue;
2302
        }
2303
      else if (is_branch (buf) || is_indirect_branch (buf))
2304
        /* If we hit a branch then we must be out of the prologue.  */
2305
        break;
2306
    }
2307
 
2308
  return 0;
2309
}
2310
 
2311
/* qsort predicate to sort symbols by section and value.  */
2312
 
2313
static Elf_Internal_Sym *sort_syms_syms;
2314
static asection **sort_syms_psecs;
2315
 
2316
static int
2317
sort_syms (const void *a, const void *b)
2318
{
2319
  Elf_Internal_Sym *const *s1 = a;
2320
  Elf_Internal_Sym *const *s2 = b;
2321
  asection *sec1,*sec2;
2322
  bfd_signed_vma delta;
2323
 
2324
  sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2325
  sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2326
 
2327
  if (sec1 != sec2)
2328
    return sec1->index - sec2->index;
2329
 
2330
  delta = (*s1)->st_value - (*s2)->st_value;
2331
  if (delta != 0)
2332
    return delta < 0 ? -1 : 1;
2333
 
2334
  delta = (*s2)->st_size - (*s1)->st_size;
2335
  if (delta != 0)
2336
    return delta < 0 ? -1 : 1;
2337
 
2338
  return *s1 < *s2 ? -1 : 1;
2339
}
2340
 
2341
/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2342
   entries for section SEC.  */
2343
 
2344
static struct spu_elf_stack_info *
2345
alloc_stack_info (asection *sec, int max_fun)
2346
{
2347
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2348
  bfd_size_type amt;
2349
 
2350
  amt = sizeof (struct spu_elf_stack_info);
2351
  amt += (max_fun - 1) * sizeof (struct function_info);
2352
  sec_data->u.i.stack_info = bfd_zmalloc (amt);
2353
  if (sec_data->u.i.stack_info != NULL)
2354
    sec_data->u.i.stack_info->max_fun = max_fun;
2355
  return sec_data->u.i.stack_info;
2356
}
2357
 
2358
/* Add a new struct function_info describing a (part of a) function
2359
   starting at SYM_H.  Keep the array sorted by address.  */
2360
 
2361
static struct function_info *
2362
maybe_insert_function (asection *sec,
2363
                       void *sym_h,
2364
                       bfd_boolean global,
2365
                       bfd_boolean is_func)
2366
{
2367
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2368
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2369
  int i;
2370
  bfd_vma off, size;
2371
 
2372
  if (sinfo == NULL)
2373
    {
2374
      sinfo = alloc_stack_info (sec, 20);
2375
      if (sinfo == NULL)
2376
        return NULL;
2377
    }
2378
 
2379
  if (!global)
2380
    {
2381
      Elf_Internal_Sym *sym = sym_h;
2382
      off = sym->st_value;
2383
      size = sym->st_size;
2384
    }
2385
  else
2386
    {
2387
      struct elf_link_hash_entry *h = sym_h;
2388
      off = h->root.u.def.value;
2389
      size = h->size;
2390
    }
2391
 
2392
  for (i = sinfo->num_fun; --i >= 0; )
2393
    if (sinfo->fun[i].lo <= off)
2394
      break;
2395
 
2396
  if (i >= 0)
2397
    {
2398
      /* Don't add another entry for an alias, but do update some
2399
         info.  */
2400
      if (sinfo->fun[i].lo == off)
2401
        {
2402
          /* Prefer globals over local syms.  */
2403
          if (global && !sinfo->fun[i].global)
2404
            {
2405
              sinfo->fun[i].global = TRUE;
2406
              sinfo->fun[i].u.h = sym_h;
2407
            }
2408
          if (is_func)
2409
            sinfo->fun[i].is_func = TRUE;
2410
          return &sinfo->fun[i];
2411
        }
2412
      /* Ignore a zero-size symbol inside an existing function.  */
2413
      else if (sinfo->fun[i].hi > off && size == 0)
2414
        return &sinfo->fun[i];
2415
    }
2416
 
2417
  if (sinfo->num_fun >= sinfo->max_fun)
2418
    {
2419
      bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2420
      bfd_size_type old = amt;
2421
 
2422
      old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2423
      sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2424
      amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2425
      sinfo = bfd_realloc (sinfo, amt);
2426
      if (sinfo == NULL)
2427
        return NULL;
2428
      memset ((char *) sinfo + old, 0, amt - old);
2429
      sec_data->u.i.stack_info = sinfo;
2430
    }
2431
 
2432
  if (++i < sinfo->num_fun)
2433
    memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2434
             (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2435
  sinfo->fun[i].is_func = is_func;
2436
  sinfo->fun[i].global = global;
2437
  sinfo->fun[i].sec = sec;
2438
  if (global)
2439
    sinfo->fun[i].u.h = sym_h;
2440
  else
2441
    sinfo->fun[i].u.sym = sym_h;
2442
  sinfo->fun[i].lo = off;
2443
  sinfo->fun[i].hi = off + size;
2444
  sinfo->fun[i].lr_store = -1;
2445
  sinfo->fun[i].sp_adjust = -1;
2446
  sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2447
                                                     &sinfo->fun[i].lr_store,
2448
                                                     &sinfo->fun[i].sp_adjust);
2449
  sinfo->num_fun += 1;
2450
  return &sinfo->fun[i];
2451
}
2452
 
2453
/* Return the name of FUN.  */
2454
 
2455
static const char *
2456
func_name (struct function_info *fun)
2457
{
2458
  asection *sec;
2459
  bfd *ibfd;
2460
  Elf_Internal_Shdr *symtab_hdr;
2461
 
2462
  while (fun->start != NULL)
2463
    fun = fun->start;
2464
 
2465
  if (fun->global)
2466
    return fun->u.h->root.root.string;
2467
 
2468
  sec = fun->sec;
2469
  if (fun->u.sym->st_name == 0)
2470
    {
2471
      size_t len = strlen (sec->name);
2472
      char *name = bfd_malloc (len + 10);
2473
      if (name == NULL)
2474
        return "(null)";
2475
      sprintf (name, "%s+%lx", sec->name,
2476
               (unsigned long) fun->u.sym->st_value & 0xffffffff);
2477
      return name;
2478
    }
2479
  ibfd = sec->owner;
2480
  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2481
  return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2482
}
2483
 
2484
/* Read the instruction at OFF in SEC.  Return true iff the instruction
2485
   is a nop, lnop, or stop 0 (all zero insn).  */
2486
 
2487
static bfd_boolean
2488
is_nop (asection *sec, bfd_vma off)
2489
{
2490
  unsigned char insn[4];
2491
 
2492
  if (off + 4 > sec->size
2493
      || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2494
    return FALSE;
2495
  if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2496
    return TRUE;
2497
  if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2498
    return TRUE;
2499
  return FALSE;
2500
}
2501
 
2502
/* Extend the range of FUN to cover nop padding up to LIMIT.
2503
   Return TRUE iff some instruction other than a NOP was found.  */
2504
 
2505
static bfd_boolean
2506
insns_at_end (struct function_info *fun, bfd_vma limit)
2507
{
2508
  bfd_vma off = (fun->hi + 3) & -4;
2509
 
2510
  while (off < limit && is_nop (fun->sec, off))
2511
    off += 4;
2512
  if (off < limit)
2513
    {
2514
      fun->hi = off;
2515
      return TRUE;
2516
    }
2517
  fun->hi = limit;
2518
  return FALSE;
2519
}
2520
 
2521
/* Check and fix overlapping function ranges.  Return TRUE iff there
2522
   are gaps in the current info we have about functions in SEC.  */
2523
 
2524
static bfd_boolean
2525
check_function_ranges (asection *sec, struct bfd_link_info *info)
2526
{
2527
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2528
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2529
  int i;
2530
  bfd_boolean gaps = FALSE;
2531
 
2532
  if (sinfo == NULL)
2533
    return FALSE;
2534
 
2535
  for (i = 1; i < sinfo->num_fun; i++)
2536
    if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2537
      {
2538
        /* Fix overlapping symbols.  */
2539
        const char *f1 = func_name (&sinfo->fun[i - 1]);
2540
        const char *f2 = func_name (&sinfo->fun[i]);
2541
 
2542
        info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2543
        sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2544
      }
2545
    else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2546
      gaps = TRUE;
2547
 
2548
  if (sinfo->num_fun == 0)
2549
    gaps = TRUE;
2550
  else
2551
    {
2552
      if (sinfo->fun[0].lo != 0)
2553
        gaps = TRUE;
2554
      if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2555
        {
2556
          const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2557
 
2558
          info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2559
          sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2560
        }
2561
      else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2562
        gaps = TRUE;
2563
    }
2564
  return gaps;
2565
}
2566
 
2567
/* Search current function info for a function that contains address
2568
   OFFSET in section SEC.  */
2569
 
2570
static struct function_info *
2571
find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2572
{
2573
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2574
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2575
  int lo, hi, mid;
2576
 
2577
  lo = 0;
2578
  hi = sinfo->num_fun;
2579
  while (lo < hi)
2580
    {
2581
      mid = (lo + hi) / 2;
2582
      if (offset < sinfo->fun[mid].lo)
2583
        hi = mid;
2584
      else if (offset >= sinfo->fun[mid].hi)
2585
        lo = mid + 1;
2586
      else
2587
        return &sinfo->fun[mid];
2588
    }
2589
  info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2590
                          sec, offset);
2591
  bfd_set_error (bfd_error_bad_value);
2592
  return NULL;
2593
}
2594
 
2595
/* Add CALLEE to CALLER call list if not already present.  Return TRUE
2596
   if CALLEE was new.  If this function return FALSE, CALLEE should
2597
   be freed.  */
2598
 
2599
static bfd_boolean
2600
insert_callee (struct function_info *caller, struct call_info *callee)
2601
{
2602
  struct call_info **pp, *p;
2603
 
2604
  for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2605
    if (p->fun == callee->fun)
2606
      {
2607
        /* Tail calls use less stack than normal calls.  Retain entry
2608
           for normal call over one for tail call.  */
2609
        p->is_tail &= callee->is_tail;
2610
        if (!p->is_tail)
2611
          {
2612
            p->fun->start = NULL;
2613
            p->fun->is_func = TRUE;
2614
          }
2615
        p->count += callee->count;
2616
        /* Reorder list so most recent call is first.  */
2617
        *pp = p->next;
2618
        p->next = caller->call_list;
2619
        caller->call_list = p;
2620
        return FALSE;
2621
      }
2622
  callee->next = caller->call_list;
2623
  caller->call_list = callee;
2624
  return TRUE;
2625
}
2626
 
2627
/* Copy CALL and insert the copy into CALLER.  */
2628
 
2629
static bfd_boolean
2630
copy_callee (struct function_info *caller, const struct call_info *call)
2631
{
2632
  struct call_info *callee;
2633
  callee = bfd_malloc (sizeof (*callee));
2634
  if (callee == NULL)
2635
    return FALSE;
2636
  *callee = *call;
2637
  if (!insert_callee (caller, callee))
2638
    free (callee);
2639
  return TRUE;
2640
}
2641
 
2642
/* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
2643
   overlay stub sections.  */
2644
 
2645
static bfd_boolean
2646
interesting_section (asection *s)
2647
{
2648
  return (s->output_section != bfd_abs_section_ptr
2649
          && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2650
              == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2651
          && s->size != 0);
2652
}
2653
 
2654
/* Rummage through the relocs for SEC, looking for function calls.
2655
   If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
2656
   mark destination symbols on calls as being functions.  Also
2657
   look at branches, which may be tail calls or go to hot/cold
2658
   section part of same function.  */
2659
 
2660
static bfd_boolean
2661
mark_functions_via_relocs (asection *sec,
2662
                           struct bfd_link_info *info,
2663
                           int call_tree)
2664
{
2665
  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2666
  Elf_Internal_Shdr *symtab_hdr;
2667
  void *psyms;
2668
  unsigned int priority = 0;
2669
  static bfd_boolean warned;
2670
 
2671
  if (!interesting_section (sec)
2672
      || sec->reloc_count == 0)
2673
    return TRUE;
2674
 
2675
  internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2676
                                               info->keep_memory);
2677
  if (internal_relocs == NULL)
2678
    return FALSE;
2679
 
2680
  symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2681
  psyms = &symtab_hdr->contents;
2682
  irela = internal_relocs;
2683
  irelaend = irela + sec->reloc_count;
2684
  for (; irela < irelaend; irela++)
2685
    {
2686
      enum elf_spu_reloc_type r_type;
2687
      unsigned int r_indx;
2688
      asection *sym_sec;
2689
      Elf_Internal_Sym *sym;
2690
      struct elf_link_hash_entry *h;
2691
      bfd_vma val;
2692
      bfd_boolean nonbranch, is_call;
2693
      struct function_info *caller;
2694
      struct call_info *callee;
2695
 
2696
      r_type = ELF32_R_TYPE (irela->r_info);
2697
      nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2698
 
2699
      r_indx = ELF32_R_SYM (irela->r_info);
2700
      if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2701
        return FALSE;
2702
 
2703
      if (sym_sec == NULL
2704
          || sym_sec->output_section == bfd_abs_section_ptr)
2705
        continue;
2706
 
2707
      is_call = FALSE;
2708
      if (!nonbranch)
2709
        {
2710
          unsigned char insn[4];
2711
 
2712
          if (!bfd_get_section_contents (sec->owner, sec, insn,
2713
                                         irela->r_offset, 4))
2714
            return FALSE;
2715
          if (is_branch (insn))
2716
            {
2717
              is_call = (insn[0] & 0xfd) == 0x31;
2718
              priority = insn[1] & 0x0f;
2719
              priority <<= 8;
2720
              priority |= insn[2];
2721
              priority <<= 8;
2722
              priority |= insn[3];
2723
              priority >>= 7;
2724
              if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2725
                  != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2726
                {
2727
                  if (!warned)
2728
                    info->callbacks->einfo
2729
                      (_("%B(%A+0x%v): call to non-code section"
2730
                         " %B(%A), analysis incomplete\n"),
2731
                       sec->owner, sec, irela->r_offset,
2732
                       sym_sec->owner, sym_sec);
2733
                  warned = TRUE;
2734
                  continue;
2735
                }
2736
            }
2737
          else
2738
            {
2739
              nonbranch = TRUE;
2740
              if (is_hint (insn))
2741
                continue;
2742
            }
2743
        }
2744
 
2745
      if (nonbranch)
2746
        {
2747
          /* For --auto-overlay, count possible stubs we need for
2748
             function pointer references.  */
2749
          unsigned int sym_type;
2750
          if (h)
2751
            sym_type = h->type;
2752
          else
2753
            sym_type = ELF_ST_TYPE (sym->st_info);
2754
          if (sym_type == STT_FUNC)
2755
            {
2756
              if (call_tree && spu_hash_table (info)->params->auto_overlay)
2757
                spu_hash_table (info)->non_ovly_stub += 1;
2758
              /* If the symbol type is STT_FUNC then this must be a
2759
                 function pointer initialisation.  */
2760
              continue;
2761
            }
2762
          /* Ignore data references.  */
2763
          if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2764
              != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2765
            continue;
2766
          /* Otherwise we probably have a jump table reloc for
2767
             a switch statement or some other reference to a
2768
             code label.  */
2769
        }
2770
 
2771
      if (h)
2772
        val = h->root.u.def.value;
2773
      else
2774
        val = sym->st_value;
2775
      val += irela->r_addend;
2776
 
2777
      if (!call_tree)
2778
        {
2779
          struct function_info *fun;
2780
 
2781
          if (irela->r_addend != 0)
2782
            {
2783
              Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2784
              if (fake == NULL)
2785
                return FALSE;
2786
              fake->st_value = val;
2787
              fake->st_shndx
2788
                = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2789
              sym = fake;
2790
            }
2791
          if (sym)
2792
            fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2793
          else
2794
            fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2795
          if (fun == NULL)
2796
            return FALSE;
2797
          if (irela->r_addend != 0
2798
              && fun->u.sym != sym)
2799
            free (sym);
2800
          continue;
2801
        }
2802
 
2803
      caller = find_function (sec, irela->r_offset, info);
2804
      if (caller == NULL)
2805
        return FALSE;
2806
      callee = bfd_malloc (sizeof *callee);
2807
      if (callee == NULL)
2808
        return FALSE;
2809
 
2810
      callee->fun = find_function (sym_sec, val, info);
2811
      if (callee->fun == NULL)
2812
        return FALSE;
2813
      callee->is_tail = !is_call;
2814
      callee->is_pasted = FALSE;
2815
      callee->broken_cycle = FALSE;
2816
      callee->priority = priority;
2817
      callee->count = nonbranch? 0 : 1;
2818
      if (callee->fun->last_caller != sec)
2819
        {
2820
          callee->fun->last_caller = sec;
2821
          callee->fun->call_count += 1;
2822
        }
2823
      if (!insert_callee (caller, callee))
2824
        free (callee);
2825
      else if (!is_call
2826
               && !callee->fun->is_func
2827
               && callee->fun->stack == 0)
2828
        {
2829
          /* This is either a tail call or a branch from one part of
2830
             the function to another, ie. hot/cold section.  If the
2831
             destination has been called by some other function then
2832
             it is a separate function.  We also assume that functions
2833
             are not split across input files.  */
2834
          if (sec->owner != sym_sec->owner)
2835
            {
2836
              callee->fun->start = NULL;
2837
              callee->fun->is_func = TRUE;
2838
            }
2839
          else if (callee->fun->start == NULL)
2840
            {
2841
              struct function_info *caller_start = caller;
2842
              while (caller_start->start)
2843
                caller_start = caller_start->start;
2844
 
2845
              if (caller_start != callee->fun)
2846
                callee->fun->start = caller_start;
2847
            }
2848
          else
2849
            {
2850
              struct function_info *callee_start;
2851
              struct function_info *caller_start;
2852
              callee_start = callee->fun;
2853
              while (callee_start->start)
2854
                callee_start = callee_start->start;
2855
              caller_start = caller;
2856
              while (caller_start->start)
2857
                caller_start = caller_start->start;
2858
              if (caller_start != callee_start)
2859
                {
2860
                  callee->fun->start = NULL;
2861
                  callee->fun->is_func = TRUE;
2862
                }
2863
            }
2864
        }
2865
    }
2866
 
2867
  return TRUE;
2868
}
2869
 
2870
/* Handle something like .init or .fini, which has a piece of a function.
2871
   These sections are pasted together to form a single function.  */
2872
 
2873
static bfd_boolean
2874
pasted_function (asection *sec)
2875
{
2876
  struct bfd_link_order *l;
2877
  struct _spu_elf_section_data *sec_data;
2878
  struct spu_elf_stack_info *sinfo;
2879
  Elf_Internal_Sym *fake;
2880
  struct function_info *fun, *fun_start;
2881
 
2882
  fake = bfd_zmalloc (sizeof (*fake));
2883
  if (fake == NULL)
2884
    return FALSE;
2885
  fake->st_value = 0;
2886
  fake->st_size = sec->size;
2887
  fake->st_shndx
2888
    = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2889
  fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2890
  if (!fun)
2891
    return FALSE;
2892
 
2893
  /* Find a function immediately preceding this section.  */
2894
  fun_start = NULL;
2895
  for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2896
    {
2897
      if (l->u.indirect.section == sec)
2898
        {
2899
          if (fun_start != NULL)
2900
            {
2901
              struct call_info *callee = bfd_malloc (sizeof *callee);
2902
              if (callee == NULL)
2903
                return FALSE;
2904
 
2905
              fun->start = fun_start;
2906
              callee->fun = fun;
2907
              callee->is_tail = TRUE;
2908
              callee->is_pasted = TRUE;
2909
              callee->broken_cycle = FALSE;
2910
              callee->priority = 0;
2911
              callee->count = 1;
2912
              if (!insert_callee (fun_start, callee))
2913
                free (callee);
2914
              return TRUE;
2915
            }
2916
          break;
2917
        }
2918
      if (l->type == bfd_indirect_link_order
2919
          && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2920
          && (sinfo = sec_data->u.i.stack_info) != NULL
2921
          && sinfo->num_fun != 0)
2922
        fun_start = &sinfo->fun[sinfo->num_fun - 1];
2923
    }
2924
 
2925
  /* Don't return an error if we did not find a function preceding this
2926
     section.  The section may have incorrect flags.  */
2927
  return TRUE;
2928
}
2929
 
2930
/* Map address ranges in code sections to functions.  */
2931
 
2932
static bfd_boolean
2933
discover_functions (struct bfd_link_info *info)
2934
{
2935
  bfd *ibfd;
2936
  int bfd_idx;
2937
  Elf_Internal_Sym ***psym_arr;
2938
  asection ***sec_arr;
2939
  bfd_boolean gaps = FALSE;
2940
 
2941
  bfd_idx = 0;
2942
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2943
    bfd_idx++;
2944
 
2945
  psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2946
  if (psym_arr == NULL)
2947
    return FALSE;
2948
  sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2949
  if (sec_arr == NULL)
2950
    return FALSE;
2951
 
2952
  for (ibfd = info->input_bfds, bfd_idx = 0;
2953
       ibfd != NULL;
2954
       ibfd = ibfd->link_next, bfd_idx++)
2955
    {
2956
      extern const bfd_target bfd_elf32_spu_vec;
2957
      Elf_Internal_Shdr *symtab_hdr;
2958
      asection *sec;
2959
      size_t symcount;
2960
      Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2961
      asection **psecs, **p;
2962
 
2963
      if (ibfd->xvec != &bfd_elf32_spu_vec)
2964
        continue;
2965
 
2966
      /* Read all the symbols.  */
2967
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2968
      symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2969
      if (symcount == 0)
2970
        {
2971
          if (!gaps)
2972
            for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2973
              if (interesting_section (sec))
2974
                {
2975
                  gaps = TRUE;
2976
                  break;
2977
                }
2978
          continue;
2979
        }
2980
 
2981
      if (symtab_hdr->contents != NULL)
2982
        {
2983
          /* Don't use cached symbols since the generic ELF linker
2984
             code only reads local symbols, and we need globals too.  */
2985
          free (symtab_hdr->contents);
2986
          symtab_hdr->contents = NULL;
2987
        }
2988
      syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2989
                                   NULL, NULL, NULL);
2990
      symtab_hdr->contents = (void *) syms;
2991
      if (syms == NULL)
2992
        return FALSE;
2993
 
2994
      /* Select defined function symbols that are going to be output.  */
2995
      psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2996
      if (psyms == NULL)
2997
        return FALSE;
2998
      psym_arr[bfd_idx] = psyms;
2999
      psecs = bfd_malloc (symcount * sizeof (*psecs));
3000
      if (psecs == NULL)
3001
        return FALSE;
3002
      sec_arr[bfd_idx] = psecs;
3003
      for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3004
        if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3005
            || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3006
          {
3007
            asection *s;
3008
 
3009
            *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3010
            if (s != NULL && interesting_section (s))
3011
              *psy++ = sy;
3012
          }
3013
      symcount = psy - psyms;
3014
      *psy = NULL;
3015
 
3016
      /* Sort them by section and offset within section.  */
3017
      sort_syms_syms = syms;
3018
      sort_syms_psecs = psecs;
3019
      qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3020
 
3021
      /* Now inspect the function symbols.  */
3022
      for (psy = psyms; psy < psyms + symcount; )
3023
        {
3024
          asection *s = psecs[*psy - syms];
3025
          Elf_Internal_Sym **psy2;
3026
 
3027
          for (psy2 = psy; ++psy2 < psyms + symcount; )
3028
            if (psecs[*psy2 - syms] != s)
3029
              break;
3030
 
3031
          if (!alloc_stack_info (s, psy2 - psy))
3032
            return FALSE;
3033
          psy = psy2;
3034
        }
3035
 
3036
      /* First install info about properly typed and sized functions.
3037
         In an ideal world this will cover all code sections, except
3038
         when partitioning functions into hot and cold sections,
3039
         and the horrible pasted together .init and .fini functions.  */
3040
      for (psy = psyms; psy < psyms + symcount; ++psy)
3041
        {
3042
          sy = *psy;
3043
          if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3044
            {
3045
              asection *s = psecs[sy - syms];
3046
              if (!maybe_insert_function (s, sy, FALSE, TRUE))
3047
                return FALSE;
3048
            }
3049
        }
3050
 
3051
      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3052
        if (interesting_section (sec))
3053
          gaps |= check_function_ranges (sec, info);
3054
    }
3055
 
3056
  if (gaps)
3057
    {
3058
      /* See if we can discover more function symbols by looking at
3059
         relocations.  */
3060
      for (ibfd = info->input_bfds, bfd_idx = 0;
3061
           ibfd != NULL;
3062
           ibfd = ibfd->link_next, bfd_idx++)
3063
        {
3064
          asection *sec;
3065
 
3066
          if (psym_arr[bfd_idx] == NULL)
3067
            continue;
3068
 
3069
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3070
            if (!mark_functions_via_relocs (sec, info, FALSE))
3071
              return FALSE;
3072
        }
3073
 
3074
      for (ibfd = info->input_bfds, bfd_idx = 0;
3075
           ibfd != NULL;
3076
           ibfd = ibfd->link_next, bfd_idx++)
3077
        {
3078
          Elf_Internal_Shdr *symtab_hdr;
3079
          asection *sec;
3080
          Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3081
          asection **psecs;
3082
 
3083
          if ((psyms = psym_arr[bfd_idx]) == NULL)
3084
            continue;
3085
 
3086
          psecs = sec_arr[bfd_idx];
3087
 
3088
          symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3089
          syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3090
 
3091
          gaps = FALSE;
3092
          for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3093
            if (interesting_section (sec))
3094
              gaps |= check_function_ranges (sec, info);
3095
          if (!gaps)
3096
            continue;
3097
 
3098
          /* Finally, install all globals.  */
3099
          for (psy = psyms; (sy = *psy) != NULL; ++psy)
3100
            {
3101
              asection *s;
3102
 
3103
              s = psecs[sy - syms];
3104
 
3105
              /* Global syms might be improperly typed functions.  */
3106
              if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3107
                  && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3108
                {
3109
                  if (!maybe_insert_function (s, sy, FALSE, FALSE))
3110
                    return FALSE;
3111
                }
3112
            }
3113
        }
3114
 
3115
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3116
        {
3117
          extern const bfd_target bfd_elf32_spu_vec;
3118
          asection *sec;
3119
 
3120
          if (ibfd->xvec != &bfd_elf32_spu_vec)
3121
            continue;
3122
 
3123
          /* Some of the symbols we've installed as marking the
3124
             beginning of functions may have a size of zero.  Extend
3125
             the range of such functions to the beginning of the
3126
             next symbol of interest.  */
3127
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3128
            if (interesting_section (sec))
3129
              {
3130
                struct _spu_elf_section_data *sec_data;
3131
                struct spu_elf_stack_info *sinfo;
3132
 
3133
                sec_data = spu_elf_section_data (sec);
3134
                sinfo = sec_data->u.i.stack_info;
3135
                if (sinfo != NULL && sinfo->num_fun != 0)
3136
                  {
3137
                    int fun_idx;
3138
                    bfd_vma hi = sec->size;
3139
 
3140
                    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3141
                      {
3142
                        sinfo->fun[fun_idx].hi = hi;
3143
                        hi = sinfo->fun[fun_idx].lo;
3144
                      }
3145
 
3146
                    sinfo->fun[0].lo = 0;
3147
                  }
3148
                /* No symbols in this section.  Must be .init or .fini
3149
                   or something similar.  */
3150
                else if (!pasted_function (sec))
3151
                  return FALSE;
3152
              }
3153
        }
3154
    }
3155
 
3156
  for (ibfd = info->input_bfds, bfd_idx = 0;
3157
       ibfd != NULL;
3158
       ibfd = ibfd->link_next, bfd_idx++)
3159
    {
3160
      if (psym_arr[bfd_idx] == NULL)
3161
        continue;
3162
 
3163
      free (psym_arr[bfd_idx]);
3164
      free (sec_arr[bfd_idx]);
3165
    }
3166
 
3167
  free (psym_arr);
3168
  free (sec_arr);
3169
 
3170
  return TRUE;
3171
}
3172
 
3173
/* Iterate over all function_info we have collected, calling DOIT on
3174
   each node if ROOT_ONLY is false.  Only call DOIT on root nodes
3175
   if ROOT_ONLY.  */
3176
 
3177
static bfd_boolean
3178
for_each_node (bfd_boolean (*doit) (struct function_info *,
3179
                                    struct bfd_link_info *,
3180
                                    void *),
3181
               struct bfd_link_info *info,
3182
               void *param,
3183
               int root_only)
3184
{
3185
  bfd *ibfd;
3186
 
3187
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3188
    {
3189
      extern const bfd_target bfd_elf32_spu_vec;
3190
      asection *sec;
3191
 
3192
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3193
        continue;
3194
 
3195
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3196
        {
3197
          struct _spu_elf_section_data *sec_data;
3198
          struct spu_elf_stack_info *sinfo;
3199
 
3200
          if ((sec_data = spu_elf_section_data (sec)) != NULL
3201
              && (sinfo = sec_data->u.i.stack_info) != NULL)
3202
            {
3203
              int i;
3204
              for (i = 0; i < sinfo->num_fun; ++i)
3205
                if (!root_only || !sinfo->fun[i].non_root)
3206
                  if (!doit (&sinfo->fun[i], info, param))
3207
                    return FALSE;
3208
            }
3209
        }
3210
    }
3211
  return TRUE;
3212
}
3213
 
3214
/* Transfer call info attached to struct function_info entries for
3215
   all of a given function's sections to the first entry.  */
3216
 
3217
static bfd_boolean
3218
transfer_calls (struct function_info *fun,
3219
                struct bfd_link_info *info ATTRIBUTE_UNUSED,
3220
                void *param ATTRIBUTE_UNUSED)
3221
{
3222
  struct function_info *start = fun->start;
3223
 
3224
  if (start != NULL)
3225
    {
3226
      struct call_info *call, *call_next;
3227
 
3228
      while (start->start != NULL)
3229
        start = start->start;
3230
      for (call = fun->call_list; call != NULL; call = call_next)
3231
        {
3232
          call_next = call->next;
3233
          if (!insert_callee (start, call))
3234
            free (call);
3235
        }
3236
      fun->call_list = NULL;
3237
    }
3238
  return TRUE;
3239
}
3240
 
3241
/* Mark nodes in the call graph that are called by some other node.  */
3242
 
3243
static bfd_boolean
3244
mark_non_root (struct function_info *fun,
3245
               struct bfd_link_info *info ATTRIBUTE_UNUSED,
3246
               void *param ATTRIBUTE_UNUSED)
3247
{
3248
  struct call_info *call;
3249
 
3250
  if (fun->visit1)
3251
    return TRUE;
3252
  fun->visit1 = TRUE;
3253
  for (call = fun->call_list; call; call = call->next)
3254
    {
3255
      call->fun->non_root = TRUE;
3256
      mark_non_root (call->fun, 0, 0);
3257
    }
3258
  return TRUE;
3259
}
3260
 
3261
/* Remove cycles from the call graph.  Set depth of nodes.  */
3262
 
3263
static bfd_boolean
3264
remove_cycles (struct function_info *fun,
3265
               struct bfd_link_info *info,
3266
               void *param)
3267
{
3268
  struct call_info **callp, *call;
3269
  unsigned int depth = *(unsigned int *) param;
3270
  unsigned int max_depth = depth;
3271
 
3272
  fun->depth = depth;
3273
  fun->visit2 = TRUE;
3274
  fun->marking = TRUE;
3275
 
3276
  callp = &fun->call_list;
3277
  while ((call = *callp) != NULL)
3278
    {
3279
      call->max_depth = depth + !call->is_pasted;
3280
      if (!call->fun->visit2)
3281
        {
3282
          if (!remove_cycles (call->fun, info, &call->max_depth))
3283
            return FALSE;
3284
          if (max_depth < call->max_depth)
3285
            max_depth = call->max_depth;
3286
        }
3287
      else if (call->fun->marking)
3288
        {
3289
          struct spu_link_hash_table *htab = spu_hash_table (info);
3290
 
3291
          if (!htab->params->auto_overlay
3292
              && htab->params->stack_analysis)
3293
            {
3294
              const char *f1 = func_name (fun);
3295
              const char *f2 = func_name (call->fun);
3296
 
3297
              info->callbacks->info (_("Stack analysis will ignore the call "
3298
                                       "from %s to %s\n"),
3299
                                     f1, f2);
3300
            }
3301
 
3302
          call->broken_cycle = TRUE;
3303
        }
3304
      callp = &call->next;
3305
    }
3306
  fun->marking = FALSE;
3307
  *(unsigned int *) param = max_depth;
3308
  return TRUE;
3309
}
3310
 
3311
/* Check that we actually visited all nodes in remove_cycles.  If we
3312
   didn't, then there is some cycle in the call graph not attached to
3313
   any root node.  Arbitrarily choose a node in the cycle as a new
3314
   root and break the cycle.  */
3315
 
3316
static bfd_boolean
3317
mark_detached_root (struct function_info *fun,
3318
                    struct bfd_link_info *info,
3319
                    void *param)
3320
{
3321
  if (fun->visit2)
3322
    return TRUE;
3323
  fun->non_root = FALSE;
3324
  *(unsigned int *) param = 0;
3325
  return remove_cycles (fun, info, param);
3326
}
3327
 
3328
/* Populate call_list for each function.  */
3329
 
3330
static bfd_boolean
3331
build_call_tree (struct bfd_link_info *info)
3332
{
3333
  bfd *ibfd;
3334
  unsigned int depth;
3335
 
3336
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3337
    {
3338
      extern const bfd_target bfd_elf32_spu_vec;
3339
      asection *sec;
3340
 
3341
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3342
        continue;
3343
 
3344
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3345
        if (!mark_functions_via_relocs (sec, info, TRUE))
3346
          return FALSE;
3347
    }
3348
 
3349
  /* Transfer call info from hot/cold section part of function
3350
     to main entry.  */
3351
  if (!spu_hash_table (info)->params->auto_overlay
3352
      && !for_each_node (transfer_calls, info, 0, FALSE))
3353
    return FALSE;
3354
 
3355
  /* Find the call graph root(s).  */
3356
  if (!for_each_node (mark_non_root, info, 0, FALSE))
3357
    return FALSE;
3358
 
3359
  /* Remove cycles from the call graph.  We start from the root node(s)
3360
     so that we break cycles in a reasonable place.  */
3361
  depth = 0;
3362
  if (!for_each_node (remove_cycles, info, &depth, TRUE))
3363
    return FALSE;
3364
 
3365
  return for_each_node (mark_detached_root, info, &depth, FALSE);
3366
}
3367
 
3368
/* qsort predicate to sort calls by priority, max_depth then count.  */
3369
 
3370
static int
3371
sort_calls (const void *a, const void *b)
3372
{
3373
  struct call_info *const *c1 = a;
3374
  struct call_info *const *c2 = b;
3375
  int delta;
3376
 
3377
  delta = (*c2)->priority - (*c1)->priority;
3378
  if (delta != 0)
3379
    return delta;
3380
 
3381
  delta = (*c2)->max_depth - (*c1)->max_depth;
3382
  if (delta != 0)
3383
    return delta;
3384
 
3385
  delta = (*c2)->count - (*c1)->count;
3386
  if (delta != 0)
3387
    return delta;
3388
 
3389
  return (char *) c1 - (char *) c2;
3390
}
3391
 
3392
struct _mos_param {
3393
  unsigned int max_overlay_size;
3394
};
3395
 
3396
/* Set linker_mark and gc_mark on any sections that we will put in
3397
   overlays.  These flags are used by the generic ELF linker, but we
3398
   won't be continuing on to bfd_elf_final_link so it is OK to use
3399
   them.  linker_mark is clear before we get here.  Set segment_mark
3400
   on sections that are part of a pasted function (excluding the last
3401
   section).
3402
 
3403
   Set up function rodata section if --overlay-rodata.  We don't
3404
   currently include merged string constant rodata sections since
3405
 
3406
   Sort the call graph so that the deepest nodes will be visited
3407
   first.  */
3408
 
3409
static bfd_boolean
3410
mark_overlay_section (struct function_info *fun,
3411
                      struct bfd_link_info *info,
3412
                      void *param)
3413
{
3414
  struct call_info *call;
3415
  unsigned int count;
3416
  struct _mos_param *mos_param = param;
3417
  struct spu_link_hash_table *htab = spu_hash_table (info);
3418
 
3419
  if (fun->visit4)
3420
    return TRUE;
3421
 
3422
  fun->visit4 = TRUE;
3423
  if (!fun->sec->linker_mark
3424
      && (htab->params->ovly_flavour != ovly_soft_icache
3425
          || htab->params->non_ia_text
3426
          || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3427
          || strcmp (fun->sec->name, ".init") == 0
3428
          || strcmp (fun->sec->name, ".fini") == 0))
3429
    {
3430
      unsigned int size;
3431
 
3432
      fun->sec->linker_mark = 1;
3433
      fun->sec->gc_mark = 1;
3434
      fun->sec->segment_mark = 0;
3435
      /* Ensure SEC_CODE is set on this text section (it ought to
3436
         be!), and SEC_CODE is clear on rodata sections.  We use
3437
         this flag to differentiate the two overlay section types.  */
3438
      fun->sec->flags |= SEC_CODE;
3439
 
3440
      size = fun->sec->size;
3441
      if (htab->params->auto_overlay & OVERLAY_RODATA)
3442
        {
3443
          char *name = NULL;
3444
 
3445
          /* Find the rodata section corresponding to this function's
3446
             text section.  */
3447
          if (strcmp (fun->sec->name, ".text") == 0)
3448
            {
3449
              name = bfd_malloc (sizeof (".rodata"));
3450
              if (name == NULL)
3451
                return FALSE;
3452
              memcpy (name, ".rodata", sizeof (".rodata"));
3453
            }
3454
          else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3455
            {
3456
              size_t len = strlen (fun->sec->name);
3457
              name = bfd_malloc (len + 3);
3458
              if (name == NULL)
3459
                return FALSE;
3460
              memcpy (name, ".rodata", sizeof (".rodata"));
3461
              memcpy (name + 7, fun->sec->name + 5, len - 4);
3462
            }
3463
          else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3464
            {
3465
              size_t len = strlen (fun->sec->name) + 1;
3466
              name = bfd_malloc (len);
3467
              if (name == NULL)
3468
                return FALSE;
3469
              memcpy (name, fun->sec->name, len);
3470
              name[14] = 'r';
3471
            }
3472
 
3473
          if (name != NULL)
3474
            {
3475
              asection *rodata = NULL;
3476
              asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3477
              if (group_sec == NULL)
3478
                rodata = bfd_get_section_by_name (fun->sec->owner, name);
3479
              else
3480
                while (group_sec != NULL && group_sec != fun->sec)
3481
                  {
3482
                    if (strcmp (group_sec->name, name) == 0)
3483
                      {
3484
                        rodata = group_sec;
3485
                        break;
3486
                      }
3487
                    group_sec = elf_section_data (group_sec)->next_in_group;
3488
                  }
3489
              fun->rodata = rodata;
3490
              if (fun->rodata)
3491
                {
3492
                  size += fun->rodata->size;
3493
                  if (htab->params->line_size != 0
3494
                      && size > htab->params->line_size)
3495
                    {
3496
                      size -= fun->rodata->size;
3497
                      fun->rodata = NULL;
3498
                    }
3499
                  else
3500
                    {
3501
                      fun->rodata->linker_mark = 1;
3502
                      fun->rodata->gc_mark = 1;
3503
                      fun->rodata->flags &= ~SEC_CODE;
3504
                    }
3505
                }
3506
              free (name);
3507
            }
3508
        }
3509
      if (mos_param->max_overlay_size < size)
3510
        mos_param->max_overlay_size = size;
3511
    }
3512
 
3513
  for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3514
    count += 1;
3515
 
3516
  if (count > 1)
3517
    {
3518
      struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3519
      if (calls == NULL)
3520
        return FALSE;
3521
 
3522
      for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3523
        calls[count++] = call;
3524
 
3525
      qsort (calls, count, sizeof (*calls), sort_calls);
3526
 
3527
      fun->call_list = NULL;
3528
      while (count != 0)
3529
        {
3530
          --count;
3531
          calls[count]->next = fun->call_list;
3532
          fun->call_list = calls[count];
3533
        }
3534
      free (calls);
3535
    }
3536
 
3537
  for (call = fun->call_list; call != NULL; call = call->next)
3538
    {
3539
      if (call->is_pasted)
3540
        {
3541
          /* There can only be one is_pasted call per function_info.  */
3542
          BFD_ASSERT (!fun->sec->segment_mark);
3543
          fun->sec->segment_mark = 1;
3544
        }
3545
      if (!call->broken_cycle
3546
          && !mark_overlay_section (call->fun, info, param))
3547
        return FALSE;
3548
    }
3549
 
3550
  /* Don't put entry code into an overlay.  The overlay manager needs
3551
     a stack!  Also, don't mark .ovl.init as an overlay.  */
3552
  if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3553
      == info->output_bfd->start_address
3554
      || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3555
    {
3556
      fun->sec->linker_mark = 0;
3557
      if (fun->rodata != NULL)
3558
        fun->rodata->linker_mark = 0;
3559
    }
3560
  return TRUE;
3561
}
3562
 
3563
/* If non-zero then unmark functions called from those within sections
3564
   that we need to unmark.  Unfortunately this isn't reliable since the
3565
   call graph cannot know the destination of function pointer calls.  */
3566
#define RECURSE_UNMARK 0
3567
 
3568
struct _uos_param {
3569
  asection *exclude_input_section;
3570
  asection *exclude_output_section;
3571
  unsigned long clearing;
3572
};
3573
 
3574
/* Undo some of mark_overlay_section's work.  */
3575
 
3576
static bfd_boolean
3577
unmark_overlay_section (struct function_info *fun,
3578
                        struct bfd_link_info *info,
3579
                        void *param)
3580
{
3581
  struct call_info *call;
3582
  struct _uos_param *uos_param = param;
3583
  unsigned int excluded = 0;
3584
 
3585
  if (fun->visit5)
3586
    return TRUE;
3587
 
3588
  fun->visit5 = TRUE;
3589
 
3590
  excluded = 0;
3591
  if (fun->sec == uos_param->exclude_input_section
3592
      || fun->sec->output_section == uos_param->exclude_output_section)
3593
    excluded = 1;
3594
 
3595
  if (RECURSE_UNMARK)
3596
    uos_param->clearing += excluded;
3597
 
3598
  if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3599
    {
3600
      fun->sec->linker_mark = 0;
3601
      if (fun->rodata)
3602
        fun->rodata->linker_mark = 0;
3603
    }
3604
 
3605
  for (call = fun->call_list; call != NULL; call = call->next)
3606
    if (!call->broken_cycle
3607
        && !unmark_overlay_section (call->fun, info, param))
3608
      return FALSE;
3609
 
3610
  if (RECURSE_UNMARK)
3611
    uos_param->clearing -= excluded;
3612
  return TRUE;
3613
}
3614
 
3615
struct _cl_param {
3616
  unsigned int lib_size;
3617
  asection **lib_sections;
3618
};
3619
 
3620
/* Add sections we have marked as belonging to overlays to an array
3621
   for consideration as non-overlay sections.  The array consist of
3622
   pairs of sections, (text,rodata), for functions in the call graph.  */
3623
 
3624
static bfd_boolean
3625
collect_lib_sections (struct function_info *fun,
3626
                      struct bfd_link_info *info,
3627
                      void *param)
3628
{
3629
  struct _cl_param *lib_param = param;
3630
  struct call_info *call;
3631
  unsigned int size;
3632
 
3633
  if (fun->visit6)
3634
    return TRUE;
3635
 
3636
  fun->visit6 = TRUE;
3637
  if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3638
    return TRUE;
3639
 
3640
  size = fun->sec->size;
3641
  if (fun->rodata)
3642
    size += fun->rodata->size;
3643
 
3644
  if (size <= lib_param->lib_size)
3645
    {
3646
      *lib_param->lib_sections++ = fun->sec;
3647
      fun->sec->gc_mark = 0;
3648
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3649
        {
3650
          *lib_param->lib_sections++ = fun->rodata;
3651
          fun->rodata->gc_mark = 0;
3652
        }
3653
      else
3654
        *lib_param->lib_sections++ = NULL;
3655
    }
3656
 
3657
  for (call = fun->call_list; call != NULL; call = call->next)
3658
    if (!call->broken_cycle)
3659
      collect_lib_sections (call->fun, info, param);
3660
 
3661
  return TRUE;
3662
}
3663
 
3664
/* qsort predicate to sort sections by call count.  */
3665
 
3666
static int
3667
sort_lib (const void *a, const void *b)
3668
{
3669
  asection *const *s1 = a;
3670
  asection *const *s2 = b;
3671
  struct _spu_elf_section_data *sec_data;
3672
  struct spu_elf_stack_info *sinfo;
3673
  int delta;
3674
 
3675
  delta = 0;
3676
  if ((sec_data = spu_elf_section_data (*s1)) != NULL
3677
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3678
    {
3679
      int i;
3680
      for (i = 0; i < sinfo->num_fun; ++i)
3681
        delta -= sinfo->fun[i].call_count;
3682
    }
3683
 
3684
  if ((sec_data = spu_elf_section_data (*s2)) != NULL
3685
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3686
    {
3687
      int i;
3688
      for (i = 0; i < sinfo->num_fun; ++i)
3689
        delta += sinfo->fun[i].call_count;
3690
    }
3691
 
3692
  if (delta != 0)
3693
    return delta;
3694
 
3695
  return s1 - s2;
3696
}
3697
 
3698
/* Remove some sections from those marked to be in overlays.  Choose
3699
   those that are called from many places, likely library functions.  */
3700
 
3701
static unsigned int
3702
auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3703
{
3704
  bfd *ibfd;
3705
  asection **lib_sections;
3706
  unsigned int i, lib_count;
3707
  struct _cl_param collect_lib_param;
3708
  struct function_info dummy_caller;
3709
  struct spu_link_hash_table *htab;
3710
 
3711
  memset (&dummy_caller, 0, sizeof (dummy_caller));
3712
  lib_count = 0;
3713
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3714
    {
3715
      extern const bfd_target bfd_elf32_spu_vec;
3716
      asection *sec;
3717
 
3718
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3719
        continue;
3720
 
3721
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3722
        if (sec->linker_mark
3723
            && sec->size < lib_size
3724
            && (sec->flags & SEC_CODE) != 0)
3725
          lib_count += 1;
3726
    }
3727
  lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3728
  if (lib_sections == NULL)
3729
    return (unsigned int) -1;
3730
  collect_lib_param.lib_size = lib_size;
3731
  collect_lib_param.lib_sections = lib_sections;
3732
  if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3733
                      TRUE))
3734
    return (unsigned int) -1;
3735
  lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3736
 
3737
  /* Sort sections so that those with the most calls are first.  */
3738
  if (lib_count > 1)
3739
    qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3740
 
3741
  htab = spu_hash_table (info);
3742
  for (i = 0; i < lib_count; i++)
3743
    {
3744
      unsigned int tmp, stub_size;
3745
      asection *sec;
3746
      struct _spu_elf_section_data *sec_data;
3747
      struct spu_elf_stack_info *sinfo;
3748
 
3749
      sec = lib_sections[2 * i];
3750
      /* If this section is OK, its size must be less than lib_size.  */
3751
      tmp = sec->size;
3752
      /* If it has a rodata section, then add that too.  */
3753
      if (lib_sections[2 * i + 1])
3754
        tmp += lib_sections[2 * i + 1]->size;
3755
      /* Add any new overlay call stubs needed by the section.  */
3756
      stub_size = 0;
3757
      if (tmp < lib_size
3758
          && (sec_data = spu_elf_section_data (sec)) != NULL
3759
          && (sinfo = sec_data->u.i.stack_info) != NULL)
3760
        {
3761
          int k;
3762
          struct call_info *call;
3763
 
3764
          for (k = 0; k < sinfo->num_fun; ++k)
3765
            for (call = sinfo->fun[k].call_list; call; call = call->next)
3766
              if (call->fun->sec->linker_mark)
3767
                {
3768
                  struct call_info *p;
3769
                  for (p = dummy_caller.call_list; p; p = p->next)
3770
                    if (p->fun == call->fun)
3771
                      break;
3772
                  if (!p)
3773
                    stub_size += ovl_stub_size (htab->params);
3774
                }
3775
        }
3776
      if (tmp + stub_size < lib_size)
3777
        {
3778
          struct call_info **pp, *p;
3779
 
3780
          /* This section fits.  Mark it as non-overlay.  */
3781
          lib_sections[2 * i]->linker_mark = 0;
3782
          if (lib_sections[2 * i + 1])
3783
            lib_sections[2 * i + 1]->linker_mark = 0;
3784
          lib_size -= tmp + stub_size;
3785
          /* Call stubs to the section we just added are no longer
3786
             needed.  */
3787
          pp = &dummy_caller.call_list;
3788
          while ((p = *pp) != NULL)
3789
            if (!p->fun->sec->linker_mark)
3790
              {
3791
                lib_size += ovl_stub_size (htab->params);
3792
                *pp = p->next;
3793
                free (p);
3794
              }
3795
            else
3796
              pp = &p->next;
3797
          /* Add new call stubs to dummy_caller.  */
3798
          if ((sec_data = spu_elf_section_data (sec)) != NULL
3799
              && (sinfo = sec_data->u.i.stack_info) != NULL)
3800
            {
3801
              int k;
3802
              struct call_info *call;
3803
 
3804
              for (k = 0; k < sinfo->num_fun; ++k)
3805
                for (call = sinfo->fun[k].call_list;
3806
                     call;
3807
                     call = call->next)
3808
                  if (call->fun->sec->linker_mark)
3809
                    {
3810
                      struct call_info *callee;
3811
                      callee = bfd_malloc (sizeof (*callee));
3812
                      if (callee == NULL)
3813
                        return (unsigned int) -1;
3814
                      *callee = *call;
3815
                      if (!insert_callee (&dummy_caller, callee))
3816
                        free (callee);
3817
                    }
3818
            }
3819
        }
3820
    }
3821
  while (dummy_caller.call_list != NULL)
3822
    {
3823
      struct call_info *call = dummy_caller.call_list;
3824
      dummy_caller.call_list = call->next;
3825
      free (call);
3826
    }
3827
  for (i = 0; i < 2 * lib_count; i++)
3828
    if (lib_sections[i])
3829
      lib_sections[i]->gc_mark = 1;
3830
  free (lib_sections);
3831
  return lib_size;
3832
}
3833
 
3834
/* Build an array of overlay sections.  The deepest node's section is
3835
   added first, then its parent node's section, then everything called
3836
   from the parent section.  The idea being to group sections to
3837
   minimise calls between different overlays.  */
3838
 
3839
static bfd_boolean
3840
collect_overlays (struct function_info *fun,
3841
                  struct bfd_link_info *info,
3842
                  void *param)
3843
{
3844
  struct call_info *call;
3845
  bfd_boolean added_fun;
3846
  asection ***ovly_sections = param;
3847
 
3848
  if (fun->visit7)
3849
    return TRUE;
3850
 
3851
  fun->visit7 = TRUE;
3852
  for (call = fun->call_list; call != NULL; call = call->next)
3853
    if (!call->is_pasted && !call->broken_cycle)
3854
      {
3855
        if (!collect_overlays (call->fun, info, ovly_sections))
3856
          return FALSE;
3857
        break;
3858
      }
3859
 
3860
  added_fun = FALSE;
3861
  if (fun->sec->linker_mark && fun->sec->gc_mark)
3862
    {
3863
      fun->sec->gc_mark = 0;
3864
      *(*ovly_sections)++ = fun->sec;
3865
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3866
        {
3867
          fun->rodata->gc_mark = 0;
3868
          *(*ovly_sections)++ = fun->rodata;
3869
        }
3870
      else
3871
        *(*ovly_sections)++ = NULL;
3872
      added_fun = TRUE;
3873
 
3874
      /* Pasted sections must stay with the first section.  We don't
3875
         put pasted sections in the array, just the first section.
3876
         Mark subsequent sections as already considered.  */
3877
      if (fun->sec->segment_mark)
3878
        {
3879
          struct function_info *call_fun = fun;
3880
          do
3881
            {
3882
              for (call = call_fun->call_list; call != NULL; call = call->next)
3883
                if (call->is_pasted)
3884
                  {
3885
                    call_fun = call->fun;
3886
                    call_fun->sec->gc_mark = 0;
3887
                    if (call_fun->rodata)
3888
                      call_fun->rodata->gc_mark = 0;
3889
                    break;
3890
                  }
3891
              if (call == NULL)
3892
                abort ();
3893
            }
3894
          while (call_fun->sec->segment_mark);
3895
        }
3896
    }
3897
 
3898
  for (call = fun->call_list; call != NULL; call = call->next)
3899
    if (!call->broken_cycle
3900
        && !collect_overlays (call->fun, info, ovly_sections))
3901
      return FALSE;
3902
 
3903
  if (added_fun)
3904
    {
3905
      struct _spu_elf_section_data *sec_data;
3906
      struct spu_elf_stack_info *sinfo;
3907
 
3908
      if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3909
          && (sinfo = sec_data->u.i.stack_info) != NULL)
3910
        {
3911
          int i;
3912
          for (i = 0; i < sinfo->num_fun; ++i)
3913
            if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3914
              return FALSE;
3915
        }
3916
    }
3917
 
3918
  return TRUE;
3919
}
3920
 
3921
struct _sum_stack_param {
3922
  size_t cum_stack;
3923
  size_t overall_stack;
3924
  bfd_boolean emit_stack_syms;
3925
};
3926
 
3927
/* Descend the call graph for FUN, accumulating total stack required.  */
3928
 
3929
static bfd_boolean
3930
sum_stack (struct function_info *fun,
3931
           struct bfd_link_info *info,
3932
           void *param)
3933
{
3934
  struct call_info *call;
3935
  struct function_info *max;
3936
  size_t stack, cum_stack;
3937
  const char *f1;
3938
  bfd_boolean has_call;
3939
  struct _sum_stack_param *sum_stack_param = param;
3940
  struct spu_link_hash_table *htab;
3941
 
3942
  cum_stack = fun->stack;
3943
  sum_stack_param->cum_stack = cum_stack;
3944
  if (fun->visit3)
3945
    return TRUE;
3946
 
3947
  has_call = FALSE;
3948
  max = NULL;
3949
  for (call = fun->call_list; call; call = call->next)
3950
    {
3951
      if (call->broken_cycle)
3952
        continue;
3953
      if (!call->is_pasted)
3954
        has_call = TRUE;
3955
      if (!sum_stack (call->fun, info, sum_stack_param))
3956
        return FALSE;
3957
      stack = sum_stack_param->cum_stack;
3958
      /* Include caller stack for normal calls, don't do so for
3959
         tail calls.  fun->stack here is local stack usage for
3960
         this function.  */
3961
      if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3962
        stack += fun->stack;
3963
      if (cum_stack < stack)
3964
        {
3965
          cum_stack = stack;
3966
          max = call->fun;
3967
        }
3968
    }
3969
 
3970
  sum_stack_param->cum_stack = cum_stack;
3971
  stack = fun->stack;
3972
  /* Now fun->stack holds cumulative stack.  */
3973
  fun->stack = cum_stack;
3974
  fun->visit3 = TRUE;
3975
 
3976
  if (!fun->non_root
3977
      && sum_stack_param->overall_stack < cum_stack)
3978
    sum_stack_param->overall_stack = cum_stack;
3979
 
3980
  htab = spu_hash_table (info);
3981
  if (htab->params->auto_overlay)
3982
    return TRUE;
3983
 
3984
  f1 = func_name (fun);
3985
  if (htab->params->stack_analysis)
3986
    {
3987
      if (!fun->non_root)
3988
        info->callbacks->info (_("  %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3989
      info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3990
                              f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3991
 
3992
      if (has_call)
3993
        {
3994
          info->callbacks->minfo (_("  calls:\n"));
3995
          for (call = fun->call_list; call; call = call->next)
3996
            if (!call->is_pasted && !call->broken_cycle)
3997
              {
3998
                const char *f2 = func_name (call->fun);
3999
                const char *ann1 = call->fun == max ? "*" : " ";
4000
                const char *ann2 = call->is_tail ? "t" : " ";
4001
 
4002
                info->callbacks->minfo (_("   %s%s %s\n"), ann1, ann2, f2);
4003
              }
4004
        }
4005
    }
4006
 
4007
  if (sum_stack_param->emit_stack_syms)
4008
    {
4009
      char *name = bfd_malloc (18 + strlen (f1));
4010
      struct elf_link_hash_entry *h;
4011
 
4012
      if (name == NULL)
4013
        return FALSE;
4014
 
4015
      if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4016
        sprintf (name, "__stack_%s", f1);
4017
      else
4018
        sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4019
 
4020
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4021
      free (name);
4022
      if (h != NULL
4023
          && (h->root.type == bfd_link_hash_new
4024
              || h->root.type == bfd_link_hash_undefined
4025
              || h->root.type == bfd_link_hash_undefweak))
4026
        {
4027
          h->root.type = bfd_link_hash_defined;
4028
          h->root.u.def.section = bfd_abs_section_ptr;
4029
          h->root.u.def.value = cum_stack;
4030
          h->size = 0;
4031
          h->type = 0;
4032
          h->ref_regular = 1;
4033
          h->def_regular = 1;
4034
          h->ref_regular_nonweak = 1;
4035
          h->forced_local = 1;
4036
          h->non_elf = 0;
4037
        }
4038
    }
4039
 
4040
  return TRUE;
4041
}
4042
 
4043
/* SEC is part of a pasted function.  Return the call_info for the
4044
   next section of this function.  */
4045
 
4046
static struct call_info *
4047
find_pasted_call (asection *sec)
4048
{
4049
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4050
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4051
  struct call_info *call;
4052
  int k;
4053
 
4054
  for (k = 0; k < sinfo->num_fun; ++k)
4055
    for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4056
      if (call->is_pasted)
4057
        return call;
4058
  abort ();
4059
  return 0;
4060
}
4061
 
4062
/* qsort predicate to sort bfds by file name.  */
4063
 
4064
static int
4065
sort_bfds (const void *a, const void *b)
4066
{
4067
  bfd *const *abfd1 = a;
4068
  bfd *const *abfd2 = b;
4069
 
4070
  return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
4071
}
4072
 
4073
static unsigned int
4074
print_one_overlay_section (FILE *script,
4075
                           unsigned int base,
4076
                           unsigned int count,
4077
                           unsigned int ovlynum,
4078
                           unsigned int *ovly_map,
4079
                           asection **ovly_sections,
4080
                           struct bfd_link_info *info)
4081
{
4082
  unsigned int j;
4083
 
4084
  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4085
    {
4086
      asection *sec = ovly_sections[2 * j];
4087
 
4088
      if (fprintf (script, "   %s%c%s (%s)\n",
4089
                   (sec->owner->my_archive != NULL
4090
                    ? sec->owner->my_archive->filename : ""),
4091
                   info->path_separator,
4092
                   sec->owner->filename,
4093
                   sec->name) <= 0)
4094
        return -1;
4095
      if (sec->segment_mark)
4096
        {
4097
          struct call_info *call = find_pasted_call (sec);
4098
          while (call != NULL)
4099
            {
4100
              struct function_info *call_fun = call->fun;
4101
              sec = call_fun->sec;
4102
              if (fprintf (script, "   %s%c%s (%s)\n",
4103
                           (sec->owner->my_archive != NULL
4104
                            ? sec->owner->my_archive->filename : ""),
4105
                           info->path_separator,
4106
                           sec->owner->filename,
4107
                           sec->name) <= 0)
4108
                return -1;
4109
              for (call = call_fun->call_list; call; call = call->next)
4110
                if (call->is_pasted)
4111
                  break;
4112
            }
4113
        }
4114
    }
4115
 
4116
  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4117
    {
4118
      asection *sec = ovly_sections[2 * j + 1];
4119
      if (sec != NULL
4120
          && fprintf (script, "   %s%c%s (%s)\n",
4121
                      (sec->owner->my_archive != NULL
4122
                       ? sec->owner->my_archive->filename : ""),
4123
                      info->path_separator,
4124
                      sec->owner->filename,
4125
                      sec->name) <= 0)
4126
        return -1;
4127
 
4128
      sec = ovly_sections[2 * j];
4129
      if (sec->segment_mark)
4130
        {
4131
          struct call_info *call = find_pasted_call (sec);
4132
          while (call != NULL)
4133
            {
4134
              struct function_info *call_fun = call->fun;
4135
              sec = call_fun->rodata;
4136
              if (sec != NULL
4137
                  && fprintf (script, "   %s%c%s (%s)\n",
4138
                              (sec->owner->my_archive != NULL
4139
                               ? sec->owner->my_archive->filename : ""),
4140
                              info->path_separator,
4141
                              sec->owner->filename,
4142
                              sec->name) <= 0)
4143
                return -1;
4144
              for (call = call_fun->call_list; call; call = call->next)
4145
                if (call->is_pasted)
4146
                  break;
4147
            }
4148
        }
4149
    }
4150
 
4151
  return j;
4152
}
4153
 
4154
/* Handle --auto-overlay.  */
4155
 
4156
static void
4157
spu_elf_auto_overlay (struct bfd_link_info *info)
4158
{
4159
  bfd *ibfd;
4160
  bfd **bfd_arr;
4161
  struct elf_segment_map *m;
4162
  unsigned int fixed_size, lo, hi;
4163
  unsigned int reserved;
4164
  struct spu_link_hash_table *htab;
4165
  unsigned int base, i, count, bfd_count;
4166
  unsigned int region, ovlynum;
4167
  asection **ovly_sections, **ovly_p;
4168
  unsigned int *ovly_map;
4169
  FILE *script;
4170
  unsigned int total_overlay_size, overlay_size;
4171
  const char *ovly_mgr_entry;
4172
  struct elf_link_hash_entry *h;
4173
  struct _mos_param mos_param;
4174
  struct _uos_param uos_param;
4175
  struct function_info dummy_caller;
4176
 
4177
  /* Find the extents of our loadable image.  */
4178
  lo = (unsigned int) -1;
4179
  hi = 0;
4180
  for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
4181
    if (m->p_type == PT_LOAD)
4182
      for (i = 0; i < m->count; i++)
4183
        if (m->sections[i]->size != 0)
4184
          {
4185
            if (m->sections[i]->vma < lo)
4186
              lo = m->sections[i]->vma;
4187
            if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4188
              hi = m->sections[i]->vma + m->sections[i]->size - 1;
4189
          }
4190
  fixed_size = hi + 1 - lo;
4191
 
4192
  if (!discover_functions (info))
4193
    goto err_exit;
4194
 
4195
  if (!build_call_tree (info))
4196
    goto err_exit;
4197
 
4198
  htab = spu_hash_table (info);
4199
  reserved = htab->params->auto_overlay_reserved;
4200
  if (reserved == 0)
4201
    {
4202
      struct _sum_stack_param sum_stack_param;
4203
 
4204
      sum_stack_param.emit_stack_syms = 0;
4205
      sum_stack_param.overall_stack = 0;
4206
      if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4207
        goto err_exit;
4208
      reserved = (sum_stack_param.overall_stack
4209
                  + htab->params->extra_stack_space);
4210
    }
4211
 
4212
  /* No need for overlays if everything already fits.  */
4213
  if (fixed_size + reserved <= htab->local_store
4214
      && htab->params->ovly_flavour != ovly_soft_icache)
4215
    {
4216
      htab->params->auto_overlay = 0;
4217
      return;
4218
    }
4219
 
4220
  uos_param.exclude_input_section = 0;
4221
  uos_param.exclude_output_section
4222
    = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4223
 
4224
  ovly_mgr_entry = "__ovly_load";
4225
  if (htab->params->ovly_flavour == ovly_soft_icache)
4226
    ovly_mgr_entry = "__icache_br_handler";
4227
  h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4228
                            FALSE, FALSE, FALSE);
4229
  if (h != NULL
4230
      && (h->root.type == bfd_link_hash_defined
4231
          || h->root.type == bfd_link_hash_defweak)
4232
      && h->def_regular)
4233
    {
4234
      /* We have a user supplied overlay manager.  */
4235
      uos_param.exclude_input_section = h->root.u.def.section;
4236
    }
4237
  else
4238
    {
4239
      /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4240
         builtin version to .text, and will adjust .text size.  */
4241
      fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4242
    }
4243
 
4244
  /* Mark overlay sections, and find max overlay section size.  */
4245
  mos_param.max_overlay_size = 0;
4246
  if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4247
    goto err_exit;
4248
 
4249
  /* We can't put the overlay manager or interrupt routines in
4250
     overlays.  */
4251
  uos_param.clearing = 0;
4252
  if ((uos_param.exclude_input_section
4253
       || uos_param.exclude_output_section)
4254
      && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4255
    goto err_exit;
4256
 
4257
  bfd_count = 0;
4258
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4259
    ++bfd_count;
4260
  bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4261
  if (bfd_arr == NULL)
4262
    goto err_exit;
4263
 
4264
  /* Count overlay sections, and subtract their sizes from "fixed_size".  */
4265
  count = 0;
4266
  bfd_count = 0;
4267
  total_overlay_size = 0;
4268
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4269
    {
4270
      extern const bfd_target bfd_elf32_spu_vec;
4271
      asection *sec;
4272
      unsigned int old_count;
4273
 
4274
      if (ibfd->xvec != &bfd_elf32_spu_vec)
4275
        continue;
4276
 
4277
      old_count = count;
4278
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4279
        if (sec->linker_mark)
4280
          {
4281
            if ((sec->flags & SEC_CODE) != 0)
4282
              count += 1;
4283
            fixed_size -= sec->size;
4284
            total_overlay_size += sec->size;
4285
          }
4286
        else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4287
                 && sec->output_section->owner == info->output_bfd
4288
                 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4289
          fixed_size -= sec->size;
4290
      if (count != old_count)
4291
        bfd_arr[bfd_count++] = ibfd;
4292
    }
4293
 
4294
  /* Since the overlay link script selects sections by file name and
4295
     section name, ensure that file names are unique.  */
4296
  if (bfd_count > 1)
4297
    {
4298
      bfd_boolean ok = TRUE;
4299
 
4300
      qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4301
      for (i = 1; i < bfd_count; ++i)
4302
        if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4303
          {
4304
            if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4305
              {
4306
                if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4307
                  info->callbacks->einfo (_("%s duplicated in %s\n"),
4308
                                          bfd_arr[i]->filename,
4309
                                          bfd_arr[i]->my_archive->filename);
4310
                else
4311
                  info->callbacks->einfo (_("%s duplicated\n"),
4312
                                          bfd_arr[i]->filename);
4313
                ok = FALSE;
4314
              }
4315
          }
4316
      if (!ok)
4317
        {
4318
          info->callbacks->einfo (_("sorry, no support for duplicate "
4319
                                    "object files in auto-overlay script\n"));
4320
          bfd_set_error (bfd_error_bad_value);
4321
          goto err_exit;
4322
        }
4323
    }
4324
  free (bfd_arr);
4325
 
4326
  fixed_size += reserved;
4327
  fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4328
  if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4329
    {
4330
      if (htab->params->ovly_flavour == ovly_soft_icache)
4331
        {
4332
          /* Stubs in the non-icache area are bigger.  */
4333
          fixed_size += htab->non_ovly_stub * 16;
4334
          /* Space for icache manager tables.
4335
             a) Tag array, one quadword per cache line.
4336
             - word 0: ia address of present line, init to zero.  */
4337
          fixed_size += 16 << htab->num_lines_log2;
4338
          /* b) Rewrite "to" list, one quadword per cache line.  */
4339
          fixed_size += 16 << htab->num_lines_log2;
4340
          /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4341
                to a power-of-two number of full quadwords) per cache line.  */
4342
          fixed_size += 16 << (htab->fromelem_size_log2
4343
                               + htab->num_lines_log2);
4344
          /* d) Pointer to __ea backing store (toe), 1 quadword.  */
4345
          fixed_size += 16;
4346
        }
4347
      else
4348
        {
4349
          /* Guess number of overlays.  Assuming overlay buffer is on
4350
             average only half full should be conservative.  */
4351
          ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4352
                     / (htab->local_store - fixed_size));
4353
          /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
4354
          fixed_size += ovlynum * 16 + 16 + 4 + 16;
4355
        }
4356
    }
4357
 
4358
  if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4359
    info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4360
                              "size of 0x%v exceeds local store\n"),
4361
                            (bfd_vma) fixed_size,
4362
                            (bfd_vma) mos_param.max_overlay_size);
4363
 
4364
  /* Now see if we should put some functions in the non-overlay area.  */
4365
  else if (fixed_size < htab->params->auto_overlay_fixed)
4366
    {
4367
      unsigned int max_fixed, lib_size;
4368
 
4369
      max_fixed = htab->local_store - mos_param.max_overlay_size;
4370
      if (max_fixed > htab->params->auto_overlay_fixed)
4371
        max_fixed = htab->params->auto_overlay_fixed;
4372
      lib_size = max_fixed - fixed_size;
4373
      lib_size = auto_ovl_lib_functions (info, lib_size);
4374
      if (lib_size == (unsigned int) -1)
4375
        goto err_exit;
4376
      fixed_size = max_fixed - lib_size;
4377
    }
4378
 
4379
  /* Build an array of sections, suitably sorted to place into
4380
     overlays.  */
4381
  ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4382
  if (ovly_sections == NULL)
4383
    goto err_exit;
4384
  ovly_p = ovly_sections;
4385
  if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4386
    goto err_exit;
4387
  count = (size_t) (ovly_p - ovly_sections) / 2;
4388
  ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4389
  if (ovly_map == NULL)
4390
    goto err_exit;
4391
 
4392
  memset (&dummy_caller, 0, sizeof (dummy_caller));
4393
  overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4394
  if (htab->params->line_size != 0)
4395
    overlay_size = htab->params->line_size;
4396
  base = 0;
4397
  ovlynum = 0;
4398
  while (base < count)
4399
    {
4400
      unsigned int size = 0, rosize = 0, roalign = 0;
4401
 
4402
      for (i = base; i < count; i++)
4403
        {
4404
          asection *sec, *rosec;
4405
          unsigned int tmp, rotmp;
4406
          unsigned int num_stubs;
4407
          struct call_info *call, *pasty;
4408
          struct _spu_elf_section_data *sec_data;
4409
          struct spu_elf_stack_info *sinfo;
4410
          unsigned int k;
4411
 
4412
          /* See whether we can add this section to the current
4413
             overlay without overflowing our overlay buffer.  */
4414
          sec = ovly_sections[2 * i];
4415
          tmp = align_power (size, sec->alignment_power) + sec->size;
4416
          rotmp = rosize;
4417
          rosec = ovly_sections[2 * i + 1];
4418
          if (rosec != NULL)
4419
            {
4420
              rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4421
              if (roalign < rosec->alignment_power)
4422
                roalign = rosec->alignment_power;
4423
            }
4424
          if (align_power (tmp, roalign) + rotmp > overlay_size)
4425
            break;
4426
          if (sec->segment_mark)
4427
            {
4428
              /* Pasted sections must stay together, so add their
4429
                 sizes too.  */
4430
              pasty = find_pasted_call (sec);
4431
              while (pasty != NULL)
4432
                {
4433
                  struct function_info *call_fun = pasty->fun;
4434
                  tmp = (align_power (tmp, call_fun->sec->alignment_power)
4435
                         + call_fun->sec->size);
4436
                  if (call_fun->rodata)
4437
                    {
4438
                      rotmp = (align_power (rotmp,
4439
                                            call_fun->rodata->alignment_power)
4440
                               + call_fun->rodata->size);
4441
                      if (roalign < rosec->alignment_power)
4442
                        roalign = rosec->alignment_power;
4443
                    }
4444
                  for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4445
                    if (pasty->is_pasted)
4446
                      break;
4447
                }
4448
            }
4449
          if (align_power (tmp, roalign) + rotmp > overlay_size)
4450
            break;
4451
 
4452
          /* If we add this section, we might need new overlay call
4453
             stubs.  Add any overlay section calls to dummy_call.  */
4454
          pasty = NULL;
4455
          sec_data = spu_elf_section_data (sec);
4456
          sinfo = sec_data->u.i.stack_info;
4457
          for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4458
            for (call = sinfo->fun[k].call_list; call; call = call->next)
4459
              if (call->is_pasted)
4460
                {
4461
                  BFD_ASSERT (pasty == NULL);
4462
                  pasty = call;
4463
                }
4464
              else if (call->fun->sec->linker_mark)
4465
                {
4466
                  if (!copy_callee (&dummy_caller, call))
4467
                    goto err_exit;
4468
                }
4469
          while (pasty != NULL)
4470
            {
4471
              struct function_info *call_fun = pasty->fun;
4472
              pasty = NULL;
4473
              for (call = call_fun->call_list; call; call = call->next)
4474
                if (call->is_pasted)
4475
                  {
4476
                    BFD_ASSERT (pasty == NULL);
4477
                    pasty = call;
4478
                  }
4479
                else if (!copy_callee (&dummy_caller, call))
4480
                  goto err_exit;
4481
            }
4482
 
4483
          /* Calculate call stub size.  */
4484
          num_stubs = 0;
4485
          for (call = dummy_caller.call_list; call; call = call->next)
4486
            {
4487
              unsigned int stub_delta = 1;
4488
 
4489
              if (htab->params->ovly_flavour == ovly_soft_icache)
4490
                stub_delta = call->count;
4491
              num_stubs += stub_delta;
4492
 
4493
              /* If the call is within this overlay, we won't need a
4494
                 stub.  */
4495
              for (k = base; k < i + 1; k++)
4496
                if (call->fun->sec == ovly_sections[2 * k])
4497
                  {
4498
                    num_stubs -= stub_delta;
4499
                    break;
4500
                  }
4501
            }
4502
          if (htab->params->ovly_flavour == ovly_soft_icache
4503
              && num_stubs > htab->params->max_branch)
4504
            break;
4505
          if (align_power (tmp, roalign) + rotmp
4506
              + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4507
            break;
4508
          size = tmp;
4509
          rosize = rotmp;
4510
        }
4511
 
4512
      if (i == base)
4513
        {
4514
          info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4515
                                  ovly_sections[2 * i]->owner,
4516
                                  ovly_sections[2 * i],
4517
                                  ovly_sections[2 * i + 1] ? " + rodata" : "");
4518
          bfd_set_error (bfd_error_bad_value);
4519
          goto err_exit;
4520
        }
4521
 
4522
      while (dummy_caller.call_list != NULL)
4523
        {
4524
          struct call_info *call = dummy_caller.call_list;
4525
          dummy_caller.call_list = call->next;
4526
          free (call);
4527
        }
4528
 
4529
      ++ovlynum;
4530
      while (base < i)
4531
        ovly_map[base++] = ovlynum;
4532
    }
4533
 
4534
  script = htab->params->spu_elf_open_overlay_script ();
4535
 
4536
  if (htab->params->ovly_flavour == ovly_soft_icache)
4537
    {
4538
      if (fprintf (script, "SECTIONS\n{\n") <= 0)
4539
        goto file_err;
4540
 
4541
      if (fprintf (script,
4542
                   " . = ALIGN (%u);\n"
4543
                   " .ovl.init : { *(.ovl.init) }\n"
4544
                   " . = ABSOLUTE (ADDR (.ovl.init));\n",
4545
                   htab->params->line_size) <= 0)
4546
        goto file_err;
4547
 
4548
      base = 0;
4549
      ovlynum = 1;
4550
      while (base < count)
4551
        {
4552
          unsigned int indx = ovlynum - 1;
4553
          unsigned int vma, lma;
4554
 
4555
          vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4556
          lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4557
 
4558
          if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4559
                               ": AT (LOADADDR (.ovl.init) + %u) {\n",
4560
                       ovlynum, vma, lma) <= 0)
4561
            goto file_err;
4562
 
4563
          base = print_one_overlay_section (script, base, count, ovlynum,
4564
                                            ovly_map, ovly_sections, info);
4565
          if (base == (unsigned) -1)
4566
            goto file_err;
4567
 
4568
          if (fprintf (script, "  }\n") <= 0)
4569
            goto file_err;
4570
 
4571
          ovlynum++;
4572
        }
4573
 
4574
      if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4575
                   1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4576
        goto file_err;
4577
 
4578
      if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4579
        goto file_err;
4580
    }
4581
  else
4582
    {
4583
      if (fprintf (script, "SECTIONS\n{\n") <= 0)
4584
        goto file_err;
4585
 
4586
      if (fprintf (script,
4587
                   " . = ALIGN (16);\n"
4588
                   " .ovl.init : { *(.ovl.init) }\n"
4589
                   " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4590
        goto file_err;
4591
 
4592
      for (region = 1; region <= htab->params->num_lines; region++)
4593
        {
4594
          ovlynum = region;
4595
          base = 0;
4596
          while (base < count && ovly_map[base] < ovlynum)
4597
            base++;
4598
 
4599
          if (base == count)
4600
            break;
4601
 
4602
          if (region == 1)
4603
            {
4604
              /* We need to set lma since we are overlaying .ovl.init.  */
4605
              if (fprintf (script,
4606
                           " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4607
                goto file_err;
4608
            }
4609
          else
4610
            {
4611
              if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4612
                goto file_err;
4613
            }
4614
 
4615
          while (base < count)
4616
            {
4617
              if (fprintf (script, "  .ovly%u {\n", ovlynum) <= 0)
4618
                goto file_err;
4619
 
4620
              base = print_one_overlay_section (script, base, count, ovlynum,
4621
                                                ovly_map, ovly_sections, info);
4622
              if (base == (unsigned) -1)
4623
                goto file_err;
4624
 
4625
              if (fprintf (script, "  }\n") <= 0)
4626
                goto file_err;
4627
 
4628
              ovlynum += htab->params->num_lines;
4629
              while (base < count && ovly_map[base] < ovlynum)
4630
                base++;
4631
            }
4632
 
4633
          if (fprintf (script, " }\n") <= 0)
4634
            goto file_err;
4635
        }
4636
 
4637
      if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4638
        goto file_err;
4639
    }
4640
 
4641
  free (ovly_map);
4642
  free (ovly_sections);
4643
 
4644
  if (fclose (script) != 0)
4645
    goto file_err;
4646
 
4647
  if (htab->params->auto_overlay & AUTO_RELINK)
4648
    (*htab->params->spu_elf_relink) ();
4649
 
4650
  xexit (0);
4651
 
4652
 file_err:
4653
  bfd_set_error (bfd_error_system_call);
4654
 err_exit:
4655
  info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4656
  xexit (1);
4657
}
4658
 
4659
/* Provide an estimate of total stack required.  */
4660
 
4661
static bfd_boolean
4662
spu_elf_stack_analysis (struct bfd_link_info *info)
4663
{
4664
  struct spu_link_hash_table *htab;
4665
  struct _sum_stack_param sum_stack_param;
4666
 
4667
  if (!discover_functions (info))
4668
    return FALSE;
4669
 
4670
  if (!build_call_tree (info))
4671
    return FALSE;
4672
 
4673
  htab = spu_hash_table (info);
4674
  if (htab->params->stack_analysis)
4675
    {
4676
      info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4677
      info->callbacks->minfo (_("\nStack size for functions.  "
4678
                                "Annotations: '*' max stack, 't' tail call\n"));
4679
    }
4680
 
4681
  sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4682
  sum_stack_param.overall_stack = 0;
4683
  if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4684
    return FALSE;
4685
 
4686
  if (htab->params->stack_analysis)
4687
    info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4688
                           (bfd_vma) sum_stack_param.overall_stack);
4689
  return TRUE;
4690
}
4691
 
4692
/* Perform a final link.  */
4693
 
4694
static bfd_boolean
4695
spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4696
{
4697
  struct spu_link_hash_table *htab = spu_hash_table (info);
4698
 
4699
  if (htab->params->auto_overlay)
4700
    spu_elf_auto_overlay (info);
4701
 
4702
  if ((htab->params->stack_analysis
4703
       || (htab->params->ovly_flavour == ovly_soft_icache
4704
           && htab->params->lrlive_analysis))
4705
      && !spu_elf_stack_analysis (info))
4706
    info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4707
 
4708
  if (!spu_elf_build_stubs (info))
4709
    info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4710
 
4711
  return bfd_elf_final_link (output_bfd, info);
4712
}
4713
 
4714
/* Called when not normally emitting relocs, ie. !info->relocatable
4715
   and !info->emitrelocations.  Returns a count of special relocs
4716
   that need to be emitted.  */
4717
 
4718
static unsigned int
4719
spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4720
{
4721
  Elf_Internal_Rela *relocs;
4722
  unsigned int count = 0;
4723
 
4724
  relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4725
                                      info->keep_memory);
4726
  if (relocs != NULL)
4727
    {
4728
      Elf_Internal_Rela *rel;
4729
      Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4730
 
4731
      for (rel = relocs; rel < relend; rel++)
4732
        {
4733
          int r_type = ELF32_R_TYPE (rel->r_info);
4734
          if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4735
            ++count;
4736
        }
4737
 
4738
      if (elf_section_data (sec)->relocs != relocs)
4739
        free (relocs);
4740
    }
4741
 
4742
  return count;
4743
}
4744
 
4745
/* Functions for adding fixup records to .fixup */
4746
 
4747
#define FIXUP_RECORD_SIZE 4
4748
 
4749
#define FIXUP_PUT(output_bfd,htab,index,addr) \
4750
          bfd_put_32 (output_bfd, addr, \
4751
                      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4752
#define FIXUP_GET(output_bfd,htab,index) \
4753
          bfd_get_32 (output_bfd, \
4754
                      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4755
 
4756
/* Store OFFSET in .fixup.  This assumes it will be called with an
4757
   increasing OFFSET.  When this OFFSET fits with the last base offset,
4758
   it just sets a bit, otherwise it adds a new fixup record.  */
4759
static void
4760
spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4761
                    bfd_vma offset)
4762
{
4763
  struct spu_link_hash_table *htab = spu_hash_table (info);
4764
  asection *sfixup = htab->sfixup;
4765
  bfd_vma qaddr = offset & ~(bfd_vma) 15;
4766
  bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4767
  if (sfixup->reloc_count == 0)
4768
    {
4769
      FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4770
      sfixup->reloc_count++;
4771
    }
4772
  else
4773
    {
4774
      bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4775
      if (qaddr != (base & ~(bfd_vma) 15))
4776
        {
4777
          if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4778
            (*_bfd_error_handler) (_("fatal error while creating .fixup"));
4779
          FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4780
          sfixup->reloc_count++;
4781
        }
4782
      else
4783
        FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4784
    }
4785
}
4786
 
4787
/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
4788
 
4789
static int
4790
spu_elf_relocate_section (bfd *output_bfd,
4791
                          struct bfd_link_info *info,
4792
                          bfd *input_bfd,
4793
                          asection *input_section,
4794
                          bfd_byte *contents,
4795
                          Elf_Internal_Rela *relocs,
4796
                          Elf_Internal_Sym *local_syms,
4797
                          asection **local_sections)
4798
{
4799
  Elf_Internal_Shdr *symtab_hdr;
4800
  struct elf_link_hash_entry **sym_hashes;
4801
  Elf_Internal_Rela *rel, *relend;
4802
  struct spu_link_hash_table *htab;
4803
  asection *ea;
4804
  int ret = TRUE;
4805
  bfd_boolean emit_these_relocs = FALSE;
4806
  bfd_boolean is_ea_sym;
4807
  bfd_boolean stubs;
4808
  unsigned int iovl = 0;
4809
 
4810
  htab = spu_hash_table (info);
4811
  stubs = (htab->stub_sec != NULL
4812
           && maybe_needs_stubs (input_section));
4813
  iovl = overlay_index (input_section);
4814
  ea = bfd_get_section_by_name (output_bfd, "._ea");
4815
  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4816
  sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4817
 
4818
  rel = relocs;
4819
  relend = relocs + input_section->reloc_count;
4820
  for (; rel < relend; rel++)
4821
    {
4822
      int r_type;
4823
      reloc_howto_type *howto;
4824
      unsigned int r_symndx;
4825
      Elf_Internal_Sym *sym;
4826
      asection *sec;
4827
      struct elf_link_hash_entry *h;
4828
      const char *sym_name;
4829
      bfd_vma relocation;
4830
      bfd_vma addend;
4831
      bfd_reloc_status_type r;
4832
      bfd_boolean unresolved_reloc;
4833
      enum _stub_type stub_type;
4834
 
4835
      r_symndx = ELF32_R_SYM (rel->r_info);
4836
      r_type = ELF32_R_TYPE (rel->r_info);
4837
      howto = elf_howto_table + r_type;
4838
      unresolved_reloc = FALSE;
4839
      h = NULL;
4840
      sym = NULL;
4841
      sec = NULL;
4842
      if (r_symndx < symtab_hdr->sh_info)
4843
        {
4844
          sym = local_syms + r_symndx;
4845
          sec = local_sections[r_symndx];
4846
          sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4847
          relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4848
        }
4849
      else
4850
        {
4851
          if (sym_hashes == NULL)
4852
            return FALSE;
4853
 
4854
          h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4855
 
4856
          while (h->root.type == bfd_link_hash_indirect
4857
                 || h->root.type == bfd_link_hash_warning)
4858
            h = (struct elf_link_hash_entry *) h->root.u.i.link;
4859
 
4860
          relocation = 0;
4861
          if (h->root.type == bfd_link_hash_defined
4862
              || h->root.type == bfd_link_hash_defweak)
4863
            {
4864
              sec = h->root.u.def.section;
4865
              if (sec == NULL
4866
                  || sec->output_section == NULL)
4867
                /* Set a flag that will be cleared later if we find a
4868
                   relocation value for this symbol.  output_section
4869
                   is typically NULL for symbols satisfied by a shared
4870
                   library.  */
4871
                unresolved_reloc = TRUE;
4872
              else
4873
                relocation = (h->root.u.def.value
4874
                              + sec->output_section->vma
4875
                              + sec->output_offset);
4876
            }
4877
          else if (h->root.type == bfd_link_hash_undefweak)
4878
            ;
4879
          else if (info->unresolved_syms_in_objects == RM_IGNORE
4880
                   && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4881
            ;
4882
          else if (!info->relocatable
4883
                   && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4884
            {
4885
              bfd_boolean err;
4886
              err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4887
                     || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4888
              if (!info->callbacks->undefined_symbol (info,
4889
                                                      h->root.root.string,
4890
                                                      input_bfd,
4891
                                                      input_section,
4892
                                                      rel->r_offset, err))
4893
                return FALSE;
4894
            }
4895
          sym_name = h->root.root.string;
4896
        }
4897
 
4898
      if (sec != NULL && elf_discarded_section (sec))
4899
        RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4900
                                         rel, relend, howto, contents);
4901
 
4902
      if (info->relocatable)
4903
        continue;
4904
 
4905
      /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4906
      if (r_type == R_SPU_ADD_PIC
4907
          && h != NULL
4908
          && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4909
        {
4910
          bfd_byte *loc = contents + rel->r_offset;
4911
          loc[0] = 0x1c;
4912
          loc[1] = 0x00;
4913
          loc[2] &= 0x3f;
4914
        }
4915
 
4916
      is_ea_sym = (ea != NULL
4917
                   && sec != NULL
4918
                   && sec->output_section == ea);
4919
 
4920
      /* If this symbol is in an overlay area, we may need to relocate
4921
         to the overlay stub.  */
4922
      addend = rel->r_addend;
4923
      if (stubs
4924
          && !is_ea_sym
4925
          && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4926
                                          contents, info)) != no_stub)
4927
        {
4928
          unsigned int ovl = 0;
4929
          struct got_entry *g, **head;
4930
 
4931
          if (stub_type != nonovl_stub)
4932
            ovl = iovl;
4933
 
4934
          if (h != NULL)
4935
            head = &h->got.glist;
4936
          else
4937
            head = elf_local_got_ents (input_bfd) + r_symndx;
4938
 
4939
          for (g = *head; g != NULL; g = g->next)
4940
            if (htab->params->ovly_flavour == ovly_soft_icache
4941
                ? (g->ovl == ovl
4942
                   && g->br_addr == (rel->r_offset
4943
                                     + input_section->output_offset
4944
                                     + input_section->output_section->vma))
4945
                : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4946
              break;
4947
          if (g == NULL)
4948
            abort ();
4949
 
4950
          relocation = g->stub_addr;
4951
          addend = 0;
4952
        }
4953
      else
4954
        {
4955
          /* For soft icache, encode the overlay index into addresses.  */
4956
          if (htab->params->ovly_flavour == ovly_soft_icache
4957
              && (r_type == R_SPU_ADDR16_HI
4958
                  || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4959
              && !is_ea_sym)
4960
            {
4961
              unsigned int ovl = overlay_index (sec);
4962
              if (ovl != 0)
4963
                {
4964
                  unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4965
                  relocation += set_id << 18;
4966
                }
4967
            }
4968
        }
4969
 
4970
      if (htab->params->emit_fixups && !info->relocatable
4971
          && (input_section->flags & SEC_ALLOC) != 0
4972
          && r_type == R_SPU_ADDR32)
4973
        {
4974
          bfd_vma offset;
4975
          offset = rel->r_offset + input_section->output_section->vma
4976
                   + input_section->output_offset;
4977
          spu_elf_emit_fixup (output_bfd, info, offset);
4978
        }
4979
 
4980
      if (unresolved_reloc)
4981
        ;
4982
      else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4983
        {
4984
          if (is_ea_sym)
4985
            {
4986
              /* ._ea is a special section that isn't allocated in SPU
4987
                 memory, but rather occupies space in PPU memory as
4988
                 part of an embedded ELF image.  If this reloc is
4989
                 against a symbol defined in ._ea, then transform the
4990
                 reloc into an equivalent one without a symbol
4991
                 relative to the start of the ELF image.  */
4992
              rel->r_addend += (relocation
4993
                                - ea->vma
4994
                                + elf_section_data (ea)->this_hdr.sh_offset);
4995
              rel->r_info = ELF32_R_INFO (0, r_type);
4996
            }
4997
          emit_these_relocs = TRUE;
4998
          continue;
4999
        }
5000
      else if (is_ea_sym)
5001
        unresolved_reloc = TRUE;
5002
 
5003
      if (unresolved_reloc)
5004
        {
5005
          (*_bfd_error_handler)
5006
            (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5007
             input_bfd,
5008
             bfd_get_section_name (input_bfd, input_section),
5009
             (long) rel->r_offset,
5010
             howto->name,
5011
             sym_name);
5012
          ret = FALSE;
5013
        }
5014
 
5015
      r = _bfd_final_link_relocate (howto,
5016
                                    input_bfd,
5017
                                    input_section,
5018
                                    contents,
5019
                                    rel->r_offset, relocation, addend);
5020
 
5021
      if (r != bfd_reloc_ok)
5022
        {
5023
          const char *msg = (const char *) 0;
5024
 
5025
          switch (r)
5026
            {
5027
            case bfd_reloc_overflow:
5028
              if (!((*info->callbacks->reloc_overflow)
5029
                    (info, (h ? &h->root : NULL), sym_name, howto->name,
5030
                     (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
5031
                return FALSE;
5032
              break;
5033
 
5034
            case bfd_reloc_undefined:
5035
              if (!((*info->callbacks->undefined_symbol)
5036
                    (info, sym_name, input_bfd, input_section,
5037
                     rel->r_offset, TRUE)))
5038
                return FALSE;
5039
              break;
5040
 
5041
            case bfd_reloc_outofrange:
5042
              msg = _("internal error: out of range error");
5043
              goto common_error;
5044
 
5045
            case bfd_reloc_notsupported:
5046
              msg = _("internal error: unsupported relocation error");
5047
              goto common_error;
5048
 
5049
            case bfd_reloc_dangerous:
5050
              msg = _("internal error: dangerous error");
5051
              goto common_error;
5052
 
5053
            default:
5054
              msg = _("internal error: unknown error");
5055
              /* fall through */
5056
 
5057
            common_error:
5058
              ret = FALSE;
5059
              if (!((*info->callbacks->warning)
5060
                    (info, msg, sym_name, input_bfd, input_section,
5061
                     rel->r_offset)))
5062
                return FALSE;
5063
              break;
5064
            }
5065
        }
5066
    }
5067
 
5068
  if (ret
5069
      && emit_these_relocs
5070
      && !info->emitrelocations)
5071
    {
5072
      Elf_Internal_Rela *wrel;
5073
      Elf_Internal_Shdr *rel_hdr;
5074
 
5075
      wrel = rel = relocs;
5076
      relend = relocs + input_section->reloc_count;
5077
      for (; rel < relend; rel++)
5078
        {
5079
          int r_type;
5080
 
5081
          r_type = ELF32_R_TYPE (rel->r_info);
5082
          if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5083
            *wrel++ = *rel;
5084
        }
5085
      input_section->reloc_count = wrel - relocs;
5086
      /* Backflips for _bfd_elf_link_output_relocs.  */
5087
      rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5088
      rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5089
      ret = 2;
5090
    }
5091
 
5092
  return ret;
5093
}
5094
 
5095
static bfd_boolean
5096
spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5097
                                 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5098
{
5099
  return TRUE;
5100
}
5101
 
5102
/* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
5103
 
5104
static int
5105
spu_elf_output_symbol_hook (struct bfd_link_info *info,
5106
                            const char *sym_name ATTRIBUTE_UNUSED,
5107
                            Elf_Internal_Sym *sym,
5108
                            asection *sym_sec ATTRIBUTE_UNUSED,
5109
                            struct elf_link_hash_entry *h)
5110
{
5111
  struct spu_link_hash_table *htab = spu_hash_table (info);
5112
 
5113
  if (!info->relocatable
5114
      && htab->stub_sec != NULL
5115
      && h != NULL
5116
      && (h->root.type == bfd_link_hash_defined
5117
          || h->root.type == bfd_link_hash_defweak)
5118
      && h->def_regular
5119
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5120
    {
5121
      struct got_entry *g;
5122
 
5123
      for (g = h->got.glist; g != NULL; g = g->next)
5124
        if (htab->params->ovly_flavour == ovly_soft_icache
5125
            ? g->br_addr == g->stub_addr
5126
            : g->addend == 0 && g->ovl == 0)
5127
          {
5128
            sym->st_shndx = (_bfd_elf_section_from_bfd_section
5129
                             (htab->stub_sec[0]->output_section->owner,
5130
                              htab->stub_sec[0]->output_section));
5131
            sym->st_value = g->stub_addr;
5132
            break;
5133
          }
5134
    }
5135
 
5136
  return 1;
5137
}
5138
 
5139
static int spu_plugin = 0;
5140
 
5141
void
5142
spu_elf_plugin (int val)
5143
{
5144
  spu_plugin = val;
5145
}
5146
 
5147
/* Set ELF header e_type for plugins.  */
5148
 
5149
static void
5150
spu_elf_post_process_headers (bfd *abfd,
5151
                              struct bfd_link_info *info ATTRIBUTE_UNUSED)
5152
{
5153
  if (spu_plugin)
5154
    {
5155
      Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5156
 
5157
      i_ehdrp->e_type = ET_DYN;
5158
    }
5159
}
5160
 
5161
/* We may add an extra PT_LOAD segment for .toe.  We also need extra
5162
   segments for overlays.  */
5163
 
5164
static int
5165
spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5166
{
5167
  int extra = 0;
5168
  asection *sec;
5169
 
5170
  if (info != NULL)
5171
    {
5172
      struct spu_link_hash_table *htab = spu_hash_table (info);
5173
      extra = htab->num_overlays;
5174
    }
5175
 
5176
  if (extra)
5177
    ++extra;
5178
 
5179
  sec = bfd_get_section_by_name (abfd, ".toe");
5180
  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5181
    ++extra;
5182
 
5183
  return extra;
5184
}
5185
 
5186
/* Remove .toe section from other PT_LOAD segments and put it in
5187
   a segment of its own.  Put overlays in separate segments too.  */
5188
 
5189
static bfd_boolean
5190
spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5191
{
5192
  asection *toe, *s;
5193
  struct elf_segment_map *m, *m_overlay;
5194
  struct elf_segment_map **p, **p_overlay;
5195
  unsigned int i;
5196
 
5197
  if (info == NULL)
5198
    return TRUE;
5199
 
5200
  toe = bfd_get_section_by_name (abfd, ".toe");
5201
  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
5202
    if (m->p_type == PT_LOAD && m->count > 1)
5203
      for (i = 0; i < m->count; i++)
5204
        if ((s = m->sections[i]) == toe
5205
            || spu_elf_section_data (s)->u.o.ovl_index != 0)
5206
          {
5207
            struct elf_segment_map *m2;
5208
            bfd_vma amt;
5209
 
5210
            if (i + 1 < m->count)
5211
              {
5212
                amt = sizeof (struct elf_segment_map);
5213
                amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5214
                m2 = bfd_zalloc (abfd, amt);
5215
                if (m2 == NULL)
5216
                  return FALSE;
5217
                m2->count = m->count - (i + 1);
5218
                memcpy (m2->sections, m->sections + i + 1,
5219
                        m2->count * sizeof (m->sections[0]));
5220
                m2->p_type = PT_LOAD;
5221
                m2->next = m->next;
5222
                m->next = m2;
5223
              }
5224
            m->count = 1;
5225
            if (i != 0)
5226
              {
5227
                m->count = i;
5228
                amt = sizeof (struct elf_segment_map);
5229
                m2 = bfd_zalloc (abfd, amt);
5230
                if (m2 == NULL)
5231
                  return FALSE;
5232
                m2->p_type = PT_LOAD;
5233
                m2->count = 1;
5234
                m2->sections[0] = s;
5235
                m2->next = m->next;
5236
                m->next = m2;
5237
              }
5238
            break;
5239
          }
5240
 
5241
 
5242
  /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5243
     PT_LOAD segments.  This can cause the .ovl.init section to be
5244
     overwritten with the contents of some overlay segment.  To work
5245
     around this issue, we ensure that all PF_OVERLAY segments are
5246
     sorted first amongst the program headers; this ensures that even
5247
     with a broken loader, the .ovl.init section (which is not marked
5248
     as PF_OVERLAY) will be placed into SPU local store on startup.  */
5249
 
5250
  /* Move all overlay segments onto a separate list.  */
5251
  p = &elf_tdata (abfd)->segment_map;
5252
  p_overlay = &m_overlay;
5253
  while (*p != NULL)
5254
    {
5255
      if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5256
          && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5257
        {
5258
          m = *p;
5259
          *p = m->next;
5260
          *p_overlay = m;
5261
          p_overlay = &m->next;
5262
          continue;
5263
        }
5264
 
5265
      p = &((*p)->next);
5266
    }
5267
 
5268
  /* Re-insert overlay segments at the head of the segment map.  */
5269
  *p_overlay = elf_tdata (abfd)->segment_map;
5270
  elf_tdata (abfd)->segment_map = m_overlay;
5271
 
5272
  return TRUE;
5273
}
5274
 
5275
/* Tweak the section type of .note.spu_name.  */
5276
 
5277
static bfd_boolean
5278
spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5279
                       Elf_Internal_Shdr *hdr,
5280
                       asection *sec)
5281
{
5282
  if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5283
    hdr->sh_type = SHT_NOTE;
5284
  return TRUE;
5285
}
5286
 
5287
/* Tweak phdrs before writing them out.  */
5288
 
5289
static int
5290
spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5291
{
5292
  const struct elf_backend_data *bed;
5293
  struct elf_obj_tdata *tdata;
5294
  Elf_Internal_Phdr *phdr, *last;
5295
  struct spu_link_hash_table *htab;
5296
  unsigned int count;
5297
  unsigned int i;
5298
 
5299
  if (info == NULL)
5300
    return TRUE;
5301
 
5302
  bed = get_elf_backend_data (abfd);
5303
  tdata = elf_tdata (abfd);
5304
  phdr = tdata->phdr;
5305
  count = tdata->program_header_size / bed->s->sizeof_phdr;
5306
  htab = spu_hash_table (info);
5307
  if (htab->num_overlays != 0)
5308
    {
5309
      struct elf_segment_map *m;
5310
      unsigned int o;
5311
 
5312
      for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
5313
        if (m->count != 0
5314
            && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5315
          {
5316
            /* Mark this as an overlay header.  */
5317
            phdr[i].p_flags |= PF_OVERLAY;
5318
 
5319
            if (htab->ovtab != NULL && htab->ovtab->size != 0
5320
                && htab->params->ovly_flavour != ovly_soft_icache)
5321
              {
5322
                bfd_byte *p = htab->ovtab->contents;
5323
                unsigned int off = o * 16 + 8;
5324
 
5325
                /* Write file_off into _ovly_table.  */
5326
                bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5327
              }
5328
          }
5329
      /* Soft-icache has its file offset put in .ovl.init.  */
5330
      if (htab->init != NULL && htab->init->size != 0)
5331
        {
5332
          bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5333
 
5334
          bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5335
        }
5336
    }
5337
 
5338
  /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5339
     of 16.  This should always be possible when using the standard
5340
     linker scripts, but don't create overlapping segments if
5341
     someone is playing games with linker scripts.  */
5342
  last = NULL;
5343
  for (i = count; i-- != 0; )
5344
    if (phdr[i].p_type == PT_LOAD)
5345
      {
5346
        unsigned adjust;
5347
 
5348
        adjust = -phdr[i].p_filesz & 15;
5349
        if (adjust != 0
5350
            && last != NULL
5351
            && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5352
          break;
5353
 
5354
        adjust = -phdr[i].p_memsz & 15;
5355
        if (adjust != 0
5356
            && last != NULL
5357
            && phdr[i].p_filesz != 0
5358
            && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5359
            && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5360
          break;
5361
 
5362
        if (phdr[i].p_filesz != 0)
5363
          last = &phdr[i];
5364
      }
5365
 
5366
  if (i == (unsigned int) -1)
5367
    for (i = count; i-- != 0; )
5368
      if (phdr[i].p_type == PT_LOAD)
5369
        {
5370
        unsigned adjust;
5371
 
5372
        adjust = -phdr[i].p_filesz & 15;
5373
        phdr[i].p_filesz += adjust;
5374
 
5375
        adjust = -phdr[i].p_memsz & 15;
5376
        phdr[i].p_memsz += adjust;
5377
      }
5378
 
5379
  return TRUE;
5380
}
5381
 
5382
bfd_boolean
5383
spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5384
{
5385
  struct spu_link_hash_table *htab = spu_hash_table (info);
5386
  if (htab->params->emit_fixups)
5387
    {
5388
      asection *sfixup = htab->sfixup;
5389
      int fixup_count = 0;
5390
      bfd *ibfd;
5391
      size_t size;
5392
 
5393
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
5394
        {
5395
          asection *isec;
5396
 
5397
          if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5398
            continue;
5399
 
5400
          /* Walk over each section attached to the input bfd.  */
5401
          for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5402
            {
5403
              Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5404
              bfd_vma base_end;
5405
 
5406
              /* If there aren't any relocs, then there's nothing more
5407
                 to do.  */
5408
              if ((isec->flags & SEC_ALLOC) == 0
5409
                  || (isec->flags & SEC_RELOC) == 0
5410
                  || isec->reloc_count == 0)
5411
                continue;
5412
 
5413
              /* Get the relocs.  */
5414
              internal_relocs =
5415
                _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5416
                                           info->keep_memory);
5417
              if (internal_relocs == NULL)
5418
                return FALSE;
5419
 
5420
              /* 1 quadword can contain up to 4 R_SPU_ADDR32
5421
                 relocations.  They are stored in a single word by
5422
                 saving the upper 28 bits of the address and setting the
5423
                 lower 4 bits to a bit mask of the words that have the
5424
                 relocation.  BASE_END keeps track of the next quadword. */
5425
              irela = internal_relocs;
5426
              irelaend = irela + isec->reloc_count;
5427
              base_end = 0;
5428
              for (; irela < irelaend; irela++)
5429
                if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5430
                    && irela->r_offset >= base_end)
5431
                  {
5432
                    base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5433
                    fixup_count++;
5434
                  }
5435
            }
5436
        }
5437
 
5438
      /* We always have a NULL fixup as a sentinel */
5439
      size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5440
      if (!bfd_set_section_size (output_bfd, sfixup, size))
5441
        return FALSE;
5442
      sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5443
      if (sfixup->contents == NULL)
5444
        return FALSE;
5445
    }
5446
  return TRUE;
5447
}
5448
 
5449
#define TARGET_BIG_SYM          bfd_elf32_spu_vec
5450
#define TARGET_BIG_NAME         "elf32-spu"
5451
#define ELF_ARCH                bfd_arch_spu
5452
#define ELF_TARGET_ID           SPU_ELF_DATA
5453
#define ELF_MACHINE_CODE        EM_SPU
5454
/* This matches the alignment need for DMA.  */
5455
#define ELF_MAXPAGESIZE         0x80
5456
#define elf_backend_rela_normal         1
5457
#define elf_backend_can_gc_sections     1
5458
 
5459
#define bfd_elf32_bfd_reloc_type_lookup         spu_elf_reloc_type_lookup
5460
#define bfd_elf32_bfd_reloc_name_lookup         spu_elf_reloc_name_lookup
5461
#define elf_info_to_howto                       spu_elf_info_to_howto
5462
#define elf_backend_count_relocs                spu_elf_count_relocs
5463
#define elf_backend_relocate_section            spu_elf_relocate_section
5464
#define elf_backend_finish_dynamic_sections     spu_elf_finish_dynamic_sections
5465
#define elf_backend_symbol_processing           spu_elf_backend_symbol_processing
5466
#define elf_backend_link_output_symbol_hook     spu_elf_output_symbol_hook
5467
#define elf_backend_object_p                    spu_elf_object_p
5468
#define bfd_elf32_new_section_hook              spu_elf_new_section_hook
5469
#define bfd_elf32_bfd_link_hash_table_create    spu_elf_link_hash_table_create
5470
 
5471
#define elf_backend_additional_program_headers  spu_elf_additional_program_headers
5472
#define elf_backend_modify_segment_map          spu_elf_modify_segment_map
5473
#define elf_backend_modify_program_headers      spu_elf_modify_program_headers
5474
#define elf_backend_post_process_headers        spu_elf_post_process_headers
5475
#define elf_backend_fake_sections               spu_elf_fake_sections
5476
#define elf_backend_special_sections            spu_elf_special_sections
5477
#define bfd_elf32_bfd_final_link                spu_elf_final_link
5478
 
5479
#include "elf32-target.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.