OpenCores
URL https://opencores.org/ocsvn/open8_urisc/open8_urisc/trunk

Subversion Repositories open8_urisc

[/] [open8_urisc/] [trunk/] [gnu/] [binutils/] [bfd/] [elf32-spu.c] - Blame information for rev 249

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 14 khays
/* SPU specific support for 32-bit ELF
2
 
3 166 khays
   Copyright 2006, 2007, 2008, 2009, 2010, 2011, 2012
4
   Free Software Foundation, Inc.
5 14 khays
 
6
   This file is part of BFD, the Binary File Descriptor library.
7
 
8
   This program is free software; you can redistribute it and/or modify
9
   it under the terms of the GNU General Public License as published by
10
   the Free Software Foundation; either version 3 of the License, or
11
   (at your option) any later version.
12
 
13
   This program is distributed in the hope that it will be useful,
14
   but WITHOUT ANY WARRANTY; without even the implied warranty of
15
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
   GNU General Public License for more details.
17
 
18
   You should have received a copy of the GNU General Public License along
19
   with this program; if not, write to the Free Software Foundation, Inc.,
20
   51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
21
 
22
#include "sysdep.h"
23
#include "libiberty.h"
24
#include "bfd.h"
25
#include "bfdlink.h"
26
#include "libbfd.h"
27
#include "elf-bfd.h"
28
#include "elf/spu.h"
29
#include "elf32-spu.h"
30
 
31
/* We use RELA style relocs.  Don't define USE_REL.  */
32
 
33
static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
34
                                           void *, asection *,
35
                                           bfd *, char **);
36
 
37
/* Values of type 'enum elf_spu_reloc_type' are used to index this
38
   array, so it must be declared in the order of that type.  */
39
 
40
static reloc_howto_type elf_howto_table[] = {
41
  HOWTO (R_SPU_NONE,       0, 0,  0, FALSE,  0, complain_overflow_dont,
42
         bfd_elf_generic_reloc, "SPU_NONE",
43
         FALSE, 0, 0x00000000, FALSE),
44
  HOWTO (R_SPU_ADDR10,     4, 2, 10, FALSE, 14, complain_overflow_bitfield,
45
         bfd_elf_generic_reloc, "SPU_ADDR10",
46
         FALSE, 0, 0x00ffc000, FALSE),
47
  HOWTO (R_SPU_ADDR16,     2, 2, 16, FALSE,  7, complain_overflow_bitfield,
48
         bfd_elf_generic_reloc, "SPU_ADDR16",
49
         FALSE, 0, 0x007fff80, FALSE),
50
  HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE,  7, complain_overflow_bitfield,
51
         bfd_elf_generic_reloc, "SPU_ADDR16_HI",
52
         FALSE, 0, 0x007fff80, FALSE),
53
  HOWTO (R_SPU_ADDR16_LO,  0, 2, 16, FALSE,  7, complain_overflow_dont,
54
         bfd_elf_generic_reloc, "SPU_ADDR16_LO",
55
         FALSE, 0, 0x007fff80, FALSE),
56
  HOWTO (R_SPU_ADDR18,     0, 2, 18, FALSE,  7, complain_overflow_bitfield,
57
         bfd_elf_generic_reloc, "SPU_ADDR18",
58
         FALSE, 0, 0x01ffff80, FALSE),
59
  HOWTO (R_SPU_ADDR32,     0, 2, 32, FALSE,  0, complain_overflow_dont,
60
         bfd_elf_generic_reloc, "SPU_ADDR32",
61
         FALSE, 0, 0xffffffff, FALSE),
62
  HOWTO (R_SPU_REL16,      2, 2, 16,  TRUE,  7, complain_overflow_bitfield,
63
         bfd_elf_generic_reloc, "SPU_REL16",
64
         FALSE, 0, 0x007fff80, TRUE),
65
  HOWTO (R_SPU_ADDR7,      0, 2,  7, FALSE, 14, complain_overflow_dont,
66
         bfd_elf_generic_reloc, "SPU_ADDR7",
67
         FALSE, 0, 0x001fc000, FALSE),
68
  HOWTO (R_SPU_REL9,       2, 2,  9,  TRUE,  0, complain_overflow_signed,
69
         spu_elf_rel9,          "SPU_REL9",
70
         FALSE, 0, 0x0180007f, TRUE),
71
  HOWTO (R_SPU_REL9I,      2, 2,  9,  TRUE,  0, complain_overflow_signed,
72
         spu_elf_rel9,          "SPU_REL9I",
73
         FALSE, 0, 0x0000c07f, TRUE),
74
  HOWTO (R_SPU_ADDR10I,    0, 2, 10, FALSE, 14, complain_overflow_signed,
75
         bfd_elf_generic_reloc, "SPU_ADDR10I",
76
         FALSE, 0, 0x00ffc000, FALSE),
77
  HOWTO (R_SPU_ADDR16I,    0, 2, 16, FALSE,  7, complain_overflow_signed,
78
         bfd_elf_generic_reloc, "SPU_ADDR16I",
79
         FALSE, 0, 0x007fff80, FALSE),
80
  HOWTO (R_SPU_REL32,      0, 2, 32, TRUE,  0, complain_overflow_dont,
81
         bfd_elf_generic_reloc, "SPU_REL32",
82
         FALSE, 0, 0xffffffff, TRUE),
83
  HOWTO (R_SPU_ADDR16X,    0, 2, 16, FALSE,  7, complain_overflow_bitfield,
84
         bfd_elf_generic_reloc, "SPU_ADDR16X",
85
         FALSE, 0, 0x007fff80, FALSE),
86
  HOWTO (R_SPU_PPU32,      0, 2, 32, FALSE,  0, complain_overflow_dont,
87
         bfd_elf_generic_reloc, "SPU_PPU32",
88
         FALSE, 0, 0xffffffff, FALSE),
89
  HOWTO (R_SPU_PPU64,      0, 4, 64, FALSE,  0, complain_overflow_dont,
90
         bfd_elf_generic_reloc, "SPU_PPU64",
91
         FALSE, 0, -1, FALSE),
92
  HOWTO (R_SPU_ADD_PIC,      0, 0, 0, FALSE,  0, complain_overflow_dont,
93
         bfd_elf_generic_reloc, "SPU_ADD_PIC",
94
         FALSE, 0, 0x00000000, FALSE),
95
};
96
 
97
static struct bfd_elf_special_section const spu_elf_special_sections[] = {
98
  { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
99
  { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
100
  { NULL, 0, 0, 0, 0 }
101
};
102
 
103
static enum elf_spu_reloc_type
104
spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
105
{
106
  switch (code)
107
    {
108
    default:
109
      return R_SPU_NONE;
110
    case BFD_RELOC_SPU_IMM10W:
111
      return R_SPU_ADDR10;
112
    case BFD_RELOC_SPU_IMM16W:
113
      return R_SPU_ADDR16;
114
    case BFD_RELOC_SPU_LO16:
115
      return R_SPU_ADDR16_LO;
116
    case BFD_RELOC_SPU_HI16:
117
      return R_SPU_ADDR16_HI;
118
    case BFD_RELOC_SPU_IMM18:
119
      return R_SPU_ADDR18;
120
    case BFD_RELOC_SPU_PCREL16:
121
      return R_SPU_REL16;
122
    case BFD_RELOC_SPU_IMM7:
123
      return R_SPU_ADDR7;
124
    case BFD_RELOC_SPU_IMM8:
125
      return R_SPU_NONE;
126
    case BFD_RELOC_SPU_PCREL9a:
127
      return R_SPU_REL9;
128
    case BFD_RELOC_SPU_PCREL9b:
129
      return R_SPU_REL9I;
130
    case BFD_RELOC_SPU_IMM10:
131
      return R_SPU_ADDR10I;
132
    case BFD_RELOC_SPU_IMM16:
133
      return R_SPU_ADDR16I;
134
    case BFD_RELOC_32:
135
      return R_SPU_ADDR32;
136
    case BFD_RELOC_32_PCREL:
137
      return R_SPU_REL32;
138
    case BFD_RELOC_SPU_PPU32:
139
      return R_SPU_PPU32;
140
    case BFD_RELOC_SPU_PPU64:
141
      return R_SPU_PPU64;
142
    case BFD_RELOC_SPU_ADD_PIC:
143
      return R_SPU_ADD_PIC;
144
    }
145
}
146
 
147
static void
148
spu_elf_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED,
149
                       arelent *cache_ptr,
150
                       Elf_Internal_Rela *dst)
151
{
152
  enum elf_spu_reloc_type r_type;
153
 
154
  r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
155
  BFD_ASSERT (r_type < R_SPU_max);
156
  cache_ptr->howto = &elf_howto_table[(int) r_type];
157
}
158
 
159
static reloc_howto_type *
160
spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
161
                           bfd_reloc_code_real_type code)
162
{
163
  enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
164
 
165
  if (r_type == R_SPU_NONE)
166
    return NULL;
167
 
168
  return elf_howto_table + r_type;
169
}
170
 
171
static reloc_howto_type *
172
spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
173
                           const char *r_name)
174
{
175
  unsigned int i;
176
 
177
  for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
178
    if (elf_howto_table[i].name != NULL
179
        && strcasecmp (elf_howto_table[i].name, r_name) == 0)
180
      return &elf_howto_table[i];
181
 
182
  return NULL;
183
}
184
 
185
/* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
186
 
187
static bfd_reloc_status_type
188
spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
189
              void *data, asection *input_section,
190
              bfd *output_bfd, char **error_message)
191
{
192
  bfd_size_type octets;
193
  bfd_vma val;
194
  long insn;
195
 
196
  /* If this is a relocatable link (output_bfd test tells us), just
197
     call the generic function.  Any adjustment will be done at final
198
     link time.  */
199
  if (output_bfd != NULL)
200
    return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
201
                                  input_section, output_bfd, error_message);
202
 
203
  if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
204
    return bfd_reloc_outofrange;
205
  octets = reloc_entry->address * bfd_octets_per_byte (abfd);
206
 
207
  /* Get symbol value.  */
208
  val = 0;
209
  if (!bfd_is_com_section (symbol->section))
210
    val = symbol->value;
211
  if (symbol->section->output_section)
212
    val += symbol->section->output_section->vma;
213
 
214
  val += reloc_entry->addend;
215
 
216
  /* Make it pc-relative.  */
217
  val -= input_section->output_section->vma + input_section->output_offset;
218
 
219
  val >>= 2;
220
  if (val + 256 >= 512)
221
    return bfd_reloc_overflow;
222
 
223
  insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
224
 
225
  /* Move two high bits of value to REL9I and REL9 position.
226
     The mask will take care of selecting the right field.  */
227
  val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
228
  insn &= ~reloc_entry->howto->dst_mask;
229
  insn |= val & reloc_entry->howto->dst_mask;
230
  bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
231
  return bfd_reloc_ok;
232
}
233
 
234
static bfd_boolean
235
spu_elf_new_section_hook (bfd *abfd, asection *sec)
236
{
237
  if (!sec->used_by_bfd)
238
    {
239
      struct _spu_elf_section_data *sdata;
240
 
241
      sdata = bfd_zalloc (abfd, sizeof (*sdata));
242
      if (sdata == NULL)
243
        return FALSE;
244
      sec->used_by_bfd = sdata;
245
    }
246
 
247
  return _bfd_elf_new_section_hook (abfd, sec);
248
}
249
 
250
/* Set up overlay info for executables.  */
251
 
252
static bfd_boolean
253
spu_elf_object_p (bfd *abfd)
254
{
255
  if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
256
    {
257
      unsigned int i, num_ovl, num_buf;
258
      Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
259
      Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
260
      Elf_Internal_Phdr *last_phdr = NULL;
261
 
262
      for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
263
        if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
264
          {
265
            unsigned int j;
266
 
267
            ++num_ovl;
268
            if (last_phdr == NULL
269
                || ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
270
              ++num_buf;
271
            last_phdr = phdr;
272
            for (j = 1; j < elf_numsections (abfd); j++)
273
              {
274
                Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
275
 
276
                if (ELF_SECTION_SIZE (shdr, phdr) != 0
277
                    && ELF_SECTION_IN_SEGMENT (shdr, phdr))
278
                  {
279
                    asection *sec = shdr->bfd_section;
280
                    spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
281
                    spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
282
                  }
283
              }
284
          }
285
    }
286
  return TRUE;
287
}
288
 
289
/* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
290
   strip --strip-unneeded will not remove them.  */
291
 
292
static void
293
spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
294
{
295
  if (sym->name != NULL
296
      && sym->section != bfd_abs_section_ptr
297
      && strncmp (sym->name, "_EAR_", 5) == 0)
298
    sym->flags |= BSF_KEEP;
299
}
300
 
301
/* SPU ELF linker hash table.  */
302
 
303
struct spu_link_hash_table
304
{
305
  struct elf_link_hash_table elf;
306
 
307
  struct spu_elf_params *params;
308
 
309
  /* Shortcuts to overlay sections.  */
310
  asection *ovtab;
311
  asection *init;
312
  asection *toe;
313
  asection **ovl_sec;
314
 
315
  /* Count of stubs in each overlay section.  */
316
  unsigned int *stub_count;
317
 
318
  /* The stub section for each overlay section.  */
319
  asection **stub_sec;
320
 
321
  struct elf_link_hash_entry *ovly_entry[2];
322
 
323
  /* Number of overlay buffers.  */
324
  unsigned int num_buf;
325
 
326
  /* Total number of overlays.  */
327
  unsigned int num_overlays;
328
 
329
  /* For soft icache.  */
330
  unsigned int line_size_log2;
331
  unsigned int num_lines_log2;
332
  unsigned int fromelem_size_log2;
333
 
334
  /* How much memory we have.  */
335
  unsigned int local_store;
336
 
337
  /* Count of overlay stubs needed in non-overlay area.  */
338
  unsigned int non_ovly_stub;
339
 
340
  /* Pointer to the fixup section */
341
  asection *sfixup;
342
 
343
  /* Set on error.  */
344
  unsigned int stub_err : 1;
345
};
346
 
347
/* Hijack the generic got fields for overlay stub accounting.  */
348
 
349
struct got_entry
350
{
351
  struct got_entry *next;
352
  unsigned int ovl;
353
  union {
354
    bfd_vma addend;
355
    bfd_vma br_addr;
356
  };
357
  bfd_vma stub_addr;
358
};
359
 
360
#define spu_hash_table(p) \
361
  (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
362
  == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
363
 
364
struct call_info
365
{
366
  struct function_info *fun;
367
  struct call_info *next;
368
  unsigned int count;
369
  unsigned int max_depth;
370
  unsigned int is_tail : 1;
371
  unsigned int is_pasted : 1;
372
  unsigned int broken_cycle : 1;
373
  unsigned int priority : 13;
374
};
375
 
376
struct function_info
377
{
378
  /* List of functions called.  Also branches to hot/cold part of
379
     function.  */
380
  struct call_info *call_list;
381
  /* For hot/cold part of function, point to owner.  */
382
  struct function_info *start;
383
  /* Symbol at start of function.  */
384
  union {
385
    Elf_Internal_Sym *sym;
386
    struct elf_link_hash_entry *h;
387
  } u;
388
  /* Function section.  */
389
  asection *sec;
390
  asection *rodata;
391
  /* Where last called from, and number of sections called from.  */
392
  asection *last_caller;
393
  unsigned int call_count;
394
  /* Address range of (this part of) function.  */
395
  bfd_vma lo, hi;
396
  /* Offset where we found a store of lr, or -1 if none found.  */
397
  bfd_vma lr_store;
398
  /* Offset where we found the stack adjustment insn.  */
399
  bfd_vma sp_adjust;
400
  /* Stack usage.  */
401
  int stack;
402
  /* Distance from root of call tree.  Tail and hot/cold branches
403
     count as one deeper.  We aren't counting stack frames here.  */
404
  unsigned int depth;
405
  /* Set if global symbol.  */
406
  unsigned int global : 1;
407
  /* Set if known to be start of function (as distinct from a hunk
408
     in hot/cold section.  */
409
  unsigned int is_func : 1;
410
  /* Set if not a root node.  */
411
  unsigned int non_root : 1;
412
  /* Flags used during call tree traversal.  It's cheaper to replicate
413
     the visit flags than have one which needs clearing after a traversal.  */
414
  unsigned int visit1 : 1;
415
  unsigned int visit2 : 1;
416
  unsigned int marking : 1;
417
  unsigned int visit3 : 1;
418
  unsigned int visit4 : 1;
419
  unsigned int visit5 : 1;
420
  unsigned int visit6 : 1;
421
  unsigned int visit7 : 1;
422
};
423
 
424
struct spu_elf_stack_info
425
{
426
  int num_fun;
427
  int max_fun;
428
  /* Variable size array describing functions, one per contiguous
429
     address range belonging to a function.  */
430
  struct function_info fun[1];
431
};
432
 
433
static struct function_info *find_function (asection *, bfd_vma,
434
                                            struct bfd_link_info *);
435
 
436
/* Create a spu ELF linker hash table.  */
437
 
438
static struct bfd_link_hash_table *
439
spu_elf_link_hash_table_create (bfd *abfd)
440
{
441
  struct spu_link_hash_table *htab;
442
 
443
  htab = bfd_malloc (sizeof (*htab));
444
  if (htab == NULL)
445
    return NULL;
446
 
447
  if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
448
                                      _bfd_elf_link_hash_newfunc,
449
                                      sizeof (struct elf_link_hash_entry),
450
                                      SPU_ELF_DATA))
451
    {
452
      free (htab);
453
      return NULL;
454
    }
455
 
456
  memset (&htab->ovtab, 0,
457
          sizeof (*htab) - offsetof (struct spu_link_hash_table, ovtab));
458
 
459
  htab->elf.init_got_refcount.refcount = 0;
460
  htab->elf.init_got_refcount.glist = NULL;
461
  htab->elf.init_got_offset.offset = 0;
462
  htab->elf.init_got_offset.glist = NULL;
463
  return &htab->elf.root;
464
}
465
 
466
void
467
spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
468
{
469
  bfd_vma max_branch_log2;
470
 
471
  struct spu_link_hash_table *htab = spu_hash_table (info);
472
  htab->params = params;
473
  htab->line_size_log2 = bfd_log2 (htab->params->line_size);
474
  htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
475
 
476
  /* For the software i-cache, we provide a "from" list whose size
477
     is a power-of-two number of quadwords, big enough to hold one
478
     byte per outgoing branch.  Compute this number here.  */
479
  max_branch_log2 = bfd_log2 (htab->params->max_branch);
480
  htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
481
}
482
 
483
/* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
484
   to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
485
   *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
486
 
487
static bfd_boolean
488
get_sym_h (struct elf_link_hash_entry **hp,
489
           Elf_Internal_Sym **symp,
490
           asection **symsecp,
491
           Elf_Internal_Sym **locsymsp,
492
           unsigned long r_symndx,
493
           bfd *ibfd)
494
{
495
  Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
496
 
497
  if (r_symndx >= symtab_hdr->sh_info)
498
    {
499
      struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
500
      struct elf_link_hash_entry *h;
501
 
502
      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
503
      while (h->root.type == bfd_link_hash_indirect
504
             || h->root.type == bfd_link_hash_warning)
505
        h = (struct elf_link_hash_entry *) h->root.u.i.link;
506
 
507
      if (hp != NULL)
508
        *hp = h;
509
 
510
      if (symp != NULL)
511
        *symp = NULL;
512
 
513
      if (symsecp != NULL)
514
        {
515
          asection *symsec = NULL;
516
          if (h->root.type == bfd_link_hash_defined
517
              || h->root.type == bfd_link_hash_defweak)
518
            symsec = h->root.u.def.section;
519
          *symsecp = symsec;
520
        }
521
    }
522
  else
523
    {
524
      Elf_Internal_Sym *sym;
525
      Elf_Internal_Sym *locsyms = *locsymsp;
526
 
527
      if (locsyms == NULL)
528
        {
529
          locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
530
          if (locsyms == NULL)
531
            locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
532
                                            symtab_hdr->sh_info,
533
                                            0, NULL, NULL, NULL);
534
          if (locsyms == NULL)
535
            return FALSE;
536
          *locsymsp = locsyms;
537
        }
538
      sym = locsyms + r_symndx;
539
 
540
      if (hp != NULL)
541
        *hp = NULL;
542
 
543
      if (symp != NULL)
544
        *symp = sym;
545
 
546
      if (symsecp != NULL)
547
        *symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
548
    }
549
 
550
  return TRUE;
551
}
552
 
553
/* Create the note section if not already present.  This is done early so
554
   that the linker maps the sections to the right place in the output.  */
555
 
556
bfd_boolean
557
spu_elf_create_sections (struct bfd_link_info *info)
558
{
559
  struct spu_link_hash_table *htab = spu_hash_table (info);
560
  bfd *ibfd;
561
 
562
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
563
    if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
564
      break;
565
 
566
  if (ibfd == NULL)
567
    {
568
      /* Make SPU_PTNOTE_SPUNAME section.  */
569
      asection *s;
570
      size_t name_len;
571
      size_t size;
572
      bfd_byte *data;
573
      flagword flags;
574
 
575
      ibfd = info->input_bfds;
576
      flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
577
      s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
578
      if (s == NULL
579
          || !bfd_set_section_alignment (ibfd, s, 4))
580
        return FALSE;
581
 
582
      name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
583
      size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
584
      size += (name_len + 3) & -4;
585
 
586
      if (!bfd_set_section_size (ibfd, s, size))
587
        return FALSE;
588
 
589
      data = bfd_zalloc (ibfd, size);
590
      if (data == NULL)
591
        return FALSE;
592
 
593
      bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
594
      bfd_put_32 (ibfd, name_len, data + 4);
595
      bfd_put_32 (ibfd, 1, data + 8);
596
      memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
597
      memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
598
              bfd_get_filename (info->output_bfd), name_len);
599
      s->contents = data;
600
    }
601
 
602
  if (htab->params->emit_fixups)
603
    {
604
      asection *s;
605
      flagword flags;
606
 
607
      if (htab->elf.dynobj == NULL)
608
        htab->elf.dynobj = ibfd;
609
      ibfd = htab->elf.dynobj;
610
      flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
611
               | SEC_IN_MEMORY | SEC_LINKER_CREATED);
612
      s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
613
      if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
614
        return FALSE;
615
      htab->sfixup = s;
616
    }
617
 
618
  return TRUE;
619
}
620
 
621
/* qsort predicate to sort sections by vma.  */
622
 
623
static int
624
sort_sections (const void *a, const void *b)
625
{
626
  const asection *const *s1 = a;
627
  const asection *const *s2 = b;
628
  bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
629
 
630
  if (delta != 0)
631
    return delta < 0 ? -1 : 1;
632
 
633
  return (*s1)->index - (*s2)->index;
634
}
635
 
636
/* Identify overlays in the output bfd, and number them.
637
   Returns 0 on error, 1 if no overlays, 2 if overlays.  */
638
 
639
int
640
spu_elf_find_overlays (struct bfd_link_info *info)
641
{
642
  struct spu_link_hash_table *htab = spu_hash_table (info);
643
  asection **alloc_sec;
644
  unsigned int i, n, ovl_index, num_buf;
645
  asection *s;
646
  bfd_vma ovl_end;
647
  static const char *const entry_names[2][2] = {
648
    { "__ovly_load", "__icache_br_handler" },
649
    { "__ovly_return", "__icache_call_handler" }
650
  };
651
 
652
  if (info->output_bfd->section_count < 2)
653
    return 1;
654
 
655
  alloc_sec
656
    = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
657
  if (alloc_sec == NULL)
658
    return 0;
659
 
660
  /* Pick out all the alloced sections.  */
661
  for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
662
    if ((s->flags & SEC_ALLOC) != 0
663
        && (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
664
        && s->size != 0)
665
      alloc_sec[n++] = s;
666
 
667
  if (n == 0)
668
    {
669
      free (alloc_sec);
670
      return 1;
671
    }
672
 
673
  /* Sort them by vma.  */
674
  qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
675
 
676
  ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
677
  if (htab->params->ovly_flavour == ovly_soft_icache)
678
    {
679
      unsigned int prev_buf = 0, set_id = 0;
680
 
681
      /* Look for an overlapping vma to find the first overlay section.  */
682
      bfd_vma vma_start = 0;
683
 
684
      for (i = 1; i < n; i++)
685
        {
686
          s = alloc_sec[i];
687
          if (s->vma < ovl_end)
688
            {
689
              asection *s0 = alloc_sec[i - 1];
690
              vma_start = s0->vma;
691
              ovl_end = (s0->vma
692
                         + ((bfd_vma) 1
693
                            << (htab->num_lines_log2 + htab->line_size_log2)));
694
              --i;
695
              break;
696
            }
697
          else
698
            ovl_end = s->vma + s->size;
699
        }
700
 
701
      /* Now find any sections within the cache area.  */
702
      for (ovl_index = 0, num_buf = 0; i < n; i++)
703
        {
704
          s = alloc_sec[i];
705
          if (s->vma >= ovl_end)
706
            break;
707
 
708
          /* A section in an overlay area called .ovl.init is not
709
             an overlay, in the sense that it might be loaded in
710
             by the overlay manager, but rather the initial
711
             section contents for the overlay buffer.  */
712
          if (strncmp (s->name, ".ovl.init", 9) != 0)
713
            {
714
              num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
715
              set_id = (num_buf == prev_buf)? set_id + 1 : 0;
716
              prev_buf = num_buf;
717
 
718
              if ((s->vma - vma_start) & (htab->params->line_size - 1))
719
                {
720
                  info->callbacks->einfo (_("%X%P: overlay section %A "
721
                                            "does not start on a cache line.\n"),
722
                                          s);
723
                  bfd_set_error (bfd_error_bad_value);
724
                  return 0;
725
                }
726
              else if (s->size > htab->params->line_size)
727
                {
728
                  info->callbacks->einfo (_("%X%P: overlay section %A "
729
                                            "is larger than a cache line.\n"),
730
                                          s);
731
                  bfd_set_error (bfd_error_bad_value);
732
                  return 0;
733
                }
734
 
735
              alloc_sec[ovl_index++] = s;
736
              spu_elf_section_data (s)->u.o.ovl_index
737
                = (set_id << htab->num_lines_log2) + num_buf;
738
              spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
739
            }
740
        }
741
 
742
      /* Ensure there are no more overlay sections.  */
743
      for ( ; i < n; i++)
744
        {
745
          s = alloc_sec[i];
746
          if (s->vma < ovl_end)
747
            {
748
              info->callbacks->einfo (_("%X%P: overlay section %A "
749
                                        "is not in cache area.\n"),
750
                                      alloc_sec[i-1]);
751
              bfd_set_error (bfd_error_bad_value);
752
              return 0;
753
            }
754
          else
755
            ovl_end = s->vma + s->size;
756
        }
757
    }
758
  else
759
    {
760
      /* Look for overlapping vmas.  Any with overlap must be overlays.
761
         Count them.  Also count the number of overlay regions.  */
762
      for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
763
        {
764
          s = alloc_sec[i];
765
          if (s->vma < ovl_end)
766
            {
767
              asection *s0 = alloc_sec[i - 1];
768
 
769
              if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
770
                {
771
                  ++num_buf;
772
                  if (strncmp (s0->name, ".ovl.init", 9) != 0)
773
                    {
774
                      alloc_sec[ovl_index] = s0;
775
                      spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
776
                      spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
777
                    }
778
                  else
779
                    ovl_end = s->vma + s->size;
780
                }
781
              if (strncmp (s->name, ".ovl.init", 9) != 0)
782
                {
783
                  alloc_sec[ovl_index] = s;
784
                  spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
785
                  spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
786
                  if (s0->vma != s->vma)
787
                    {
788
                      info->callbacks->einfo (_("%X%P: overlay sections %A "
789
                                                "and %A do not start at the "
790
                                                "same address.\n"),
791
                                              s0, s);
792
                      bfd_set_error (bfd_error_bad_value);
793
                      return 0;
794
                    }
795
                  if (ovl_end < s->vma + s->size)
796
                    ovl_end = s->vma + s->size;
797
                }
798
            }
799
          else
800
            ovl_end = s->vma + s->size;
801
        }
802
    }
803
 
804
  htab->num_overlays = ovl_index;
805
  htab->num_buf = num_buf;
806
  htab->ovl_sec = alloc_sec;
807
 
808
  if (ovl_index == 0)
809
    return 1;
810
 
811
  for (i = 0; i < 2; i++)
812
    {
813
      const char *name;
814
      struct elf_link_hash_entry *h;
815
 
816
      name = entry_names[i][htab->params->ovly_flavour];
817
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
818
      if (h == NULL)
819
        return 0;
820
 
821
      if (h->root.type == bfd_link_hash_new)
822
        {
823
          h->root.type = bfd_link_hash_undefined;
824
          h->ref_regular = 1;
825
          h->ref_regular_nonweak = 1;
826
          h->non_elf = 0;
827
        }
828
      htab->ovly_entry[i] = h;
829
    }
830
 
831
  return 2;
832
}
833
 
834
/* Non-zero to use bra in overlay stubs rather than br.  */
835
#define BRA_STUBS 0
836
 
837
#define BRA     0x30000000
838
#define BRASL   0x31000000
839
#define BR      0x32000000
840
#define BRSL    0x33000000
841
#define NOP     0x40200000
842
#define LNOP    0x00200000
843
#define ILA     0x42000000
844
 
845
/* Return true for all relative and absolute branch instructions.
846
   bra   00110000 0..
847
   brasl 00110001 0..
848
   br    00110010 0..
849
   brsl  00110011 0..
850
   brz   00100000 0..
851
   brnz  00100001 0..
852
   brhz  00100010 0..
853
   brhnz 00100011 0..  */
854
 
855
static bfd_boolean
856
is_branch (const unsigned char *insn)
857
{
858
  return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
859
}
860
 
861
/* Return true for all indirect branch instructions.
862
   bi     00110101 000
863
   bisl   00110101 001
864
   iret   00110101 010
865
   bisled 00110101 011
866
   biz    00100101 000
867
   binz   00100101 001
868
   bihz   00100101 010
869
   bihnz  00100101 011  */
870
 
871
static bfd_boolean
872
is_indirect_branch (const unsigned char *insn)
873
{
874
  return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
875
}
876
 
877
/* Return true for branch hint instructions.
878
   hbra  0001000..
879
   hbrr  0001001..  */
880
 
881
static bfd_boolean
882
is_hint (const unsigned char *insn)
883
{
884
  return (insn[0] & 0xfc) == 0x10;
885
}
886
 
887
/* True if INPUT_SECTION might need overlay stubs.  */
888
 
889
static bfd_boolean
890
maybe_needs_stubs (asection *input_section)
891
{
892
  /* No stubs for debug sections and suchlike.  */
893
  if ((input_section->flags & SEC_ALLOC) == 0)
894
    return FALSE;
895
 
896
  /* No stubs for link-once sections that will be discarded.  */
897
  if (input_section->output_section == bfd_abs_section_ptr)
898
    return FALSE;
899
 
900
  /* Don't create stubs for .eh_frame references.  */
901
  if (strcmp (input_section->name, ".eh_frame") == 0)
902
    return FALSE;
903
 
904
  return TRUE;
905
}
906
 
907
enum _stub_type
908
{
909
  no_stub,
910
  call_ovl_stub,
911
  br000_ovl_stub,
912
  br001_ovl_stub,
913
  br010_ovl_stub,
914
  br011_ovl_stub,
915
  br100_ovl_stub,
916
  br101_ovl_stub,
917
  br110_ovl_stub,
918
  br111_ovl_stub,
919
  nonovl_stub,
920
  stub_error
921
};
922
 
923
/* Return non-zero if this reloc symbol should go via an overlay stub.
924
   Return 2 if the stub must be in non-overlay area.  */
925
 
926
static enum _stub_type
927
needs_ovl_stub (struct elf_link_hash_entry *h,
928
                Elf_Internal_Sym *sym,
929
                asection *sym_sec,
930
                asection *input_section,
931
                Elf_Internal_Rela *irela,
932
                bfd_byte *contents,
933
                struct bfd_link_info *info)
934
{
935
  struct spu_link_hash_table *htab = spu_hash_table (info);
936
  enum elf_spu_reloc_type r_type;
937
  unsigned int sym_type;
938
  bfd_boolean branch, hint, call;
939
  enum _stub_type ret = no_stub;
940
  bfd_byte insn[4];
941
 
942
  if (sym_sec == NULL
943
      || sym_sec->output_section == bfd_abs_section_ptr
944
      || spu_elf_section_data (sym_sec->output_section) == NULL)
945
    return ret;
946
 
947
  if (h != NULL)
948
    {
949
      /* Ensure no stubs for user supplied overlay manager syms.  */
950
      if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
951
        return ret;
952
 
953
      /* setjmp always goes via an overlay stub, because then the return
954
         and hence the longjmp goes via __ovly_return.  That magically
955
         makes setjmp/longjmp between overlays work.  */
956
      if (strncmp (h->root.root.string, "setjmp", 6) == 0
957
          && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
958
        ret = call_ovl_stub;
959
    }
960
 
961
  if (h != NULL)
962
    sym_type = h->type;
963
  else
964
    sym_type = ELF_ST_TYPE (sym->st_info);
965
 
966
  r_type = ELF32_R_TYPE (irela->r_info);
967
  branch = FALSE;
968
  hint = FALSE;
969
  call = FALSE;
970
  if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
971
    {
972
      if (contents == NULL)
973
        {
974
          contents = insn;
975
          if (!bfd_get_section_contents (input_section->owner,
976
                                         input_section,
977
                                         contents,
978
                                         irela->r_offset, 4))
979
            return stub_error;
980
        }
981
      else
982
        contents += irela->r_offset;
983
 
984
      branch = is_branch (contents);
985
      hint = is_hint (contents);
986
      if (branch || hint)
987
        {
988
          call = (contents[0] & 0xfd) == 0x31;
989
          if (call
990
              && sym_type != STT_FUNC
991
              && contents != insn)
992
            {
993
              /* It's common for people to write assembly and forget
994
                 to give function symbols the right type.  Handle
995
                 calls to such symbols, but warn so that (hopefully)
996
                 people will fix their code.  We need the symbol
997
                 type to be correct to distinguish function pointer
998
                 initialisation from other pointer initialisations.  */
999
              const char *sym_name;
1000
 
1001
              if (h != NULL)
1002
                sym_name = h->root.root.string;
1003
              else
1004
                {
1005
                  Elf_Internal_Shdr *symtab_hdr;
1006
                  symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1007
                  sym_name = bfd_elf_sym_name (input_section->owner,
1008
                                               symtab_hdr,
1009
                                               sym,
1010
                                               sym_sec);
1011
                }
1012
              (*_bfd_error_handler) (_("warning: call to non-function"
1013
                                       " symbol %s defined in %B"),
1014
                                     sym_sec->owner, sym_name);
1015
 
1016
            }
1017
        }
1018
    }
1019
 
1020
  if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1021
      || (sym_type != STT_FUNC
1022
          && !(branch || hint)
1023
          && (sym_sec->flags & SEC_CODE) == 0))
1024
    return no_stub;
1025
 
1026
  /* Usually, symbols in non-overlay sections don't need stubs.  */
1027
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1028
      && !htab->params->non_overlay_stubs)
1029
    return ret;
1030
 
1031
  /* A reference from some other section to a symbol in an overlay
1032
     section needs a stub.  */
1033
  if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1034
       != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1035
    {
1036
      unsigned int lrlive = 0;
1037
      if (branch)
1038
        lrlive = (contents[1] & 0x70) >> 4;
1039
 
1040
      if (!lrlive && (call || sym_type == STT_FUNC))
1041
        ret = call_ovl_stub;
1042
      else
1043
        ret = br000_ovl_stub + lrlive;
1044
    }
1045
 
1046
  /* If this insn isn't a branch then we are possibly taking the
1047
     address of a function and passing it out somehow.  Soft-icache code
1048
     always generates inline code to do indirect branches.  */
1049
  if (!(branch || hint)
1050
      && sym_type == STT_FUNC
1051
      && htab->params->ovly_flavour != ovly_soft_icache)
1052
    ret = nonovl_stub;
1053
 
1054
  return ret;
1055
}
1056
 
1057
static bfd_boolean
1058
count_stub (struct spu_link_hash_table *htab,
1059
            bfd *ibfd,
1060
            asection *isec,
1061
            enum _stub_type stub_type,
1062
            struct elf_link_hash_entry *h,
1063
            const Elf_Internal_Rela *irela)
1064
{
1065
  unsigned int ovl = 0;
1066
  struct got_entry *g, **head;
1067
  bfd_vma addend;
1068
 
1069
  /* If this instruction is a branch or call, we need a stub
1070
     for it.  One stub per function per overlay.
1071
     If it isn't a branch, then we are taking the address of
1072
     this function so need a stub in the non-overlay area
1073
     for it.  One stub per function.  */
1074
  if (stub_type != nonovl_stub)
1075
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1076
 
1077
  if (h != NULL)
1078
    head = &h->got.glist;
1079
  else
1080
    {
1081
      if (elf_local_got_ents (ibfd) == NULL)
1082
        {
1083
          bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1084
                               * sizeof (*elf_local_got_ents (ibfd)));
1085
          elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1086
          if (elf_local_got_ents (ibfd) == NULL)
1087
            return FALSE;
1088
        }
1089
      head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1090
    }
1091
 
1092
  if (htab->params->ovly_flavour == ovly_soft_icache)
1093
    {
1094
      htab->stub_count[ovl] += 1;
1095
      return TRUE;
1096
    }
1097
 
1098
  addend = 0;
1099
  if (irela != NULL)
1100
    addend = irela->r_addend;
1101
 
1102
  if (ovl == 0)
1103
    {
1104
      struct got_entry *gnext;
1105
 
1106
      for (g = *head; g != NULL; g = g->next)
1107
        if (g->addend == addend && g->ovl == 0)
1108
          break;
1109
 
1110
      if (g == NULL)
1111
        {
1112
          /* Need a new non-overlay area stub.  Zap other stubs.  */
1113
          for (g = *head; g != NULL; g = gnext)
1114
            {
1115
              gnext = g->next;
1116
              if (g->addend == addend)
1117
                {
1118
                  htab->stub_count[g->ovl] -= 1;
1119
                  free (g);
1120
                }
1121
            }
1122
        }
1123
    }
1124
  else
1125
    {
1126
      for (g = *head; g != NULL; g = g->next)
1127
        if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1128
          break;
1129
    }
1130
 
1131
  if (g == NULL)
1132
    {
1133
      g = bfd_malloc (sizeof *g);
1134
      if (g == NULL)
1135
        return FALSE;
1136
      g->ovl = ovl;
1137
      g->addend = addend;
1138
      g->stub_addr = (bfd_vma) -1;
1139
      g->next = *head;
1140
      *head = g;
1141
 
1142
      htab->stub_count[ovl] += 1;
1143
    }
1144
 
1145
  return TRUE;
1146
}
1147
 
1148
/* Support two sizes of overlay stubs, a slower more compact stub of two
1149
   intructions, and a faster stub of four instructions.
1150
   Soft-icache stubs are four or eight words.  */
1151
 
1152
static unsigned int
1153
ovl_stub_size (struct spu_elf_params *params)
1154
{
1155
  return 16 << params->ovly_flavour >> params->compact_stub;
1156
}
1157
 
1158
static unsigned int
1159
ovl_stub_size_log2 (struct spu_elf_params *params)
1160
{
1161
  return 4 + params->ovly_flavour - params->compact_stub;
1162
}
1163
 
1164
/* Two instruction overlay stubs look like:
1165
 
1166
   brsl $75,__ovly_load
1167
   .word target_ovl_and_address
1168
 
1169
   ovl_and_address is a word with the overlay number in the top 14 bits
1170
   and local store address in the bottom 18 bits.
1171
 
1172
   Four instruction overlay stubs look like:
1173
 
1174
   ila $78,ovl_number
1175
   lnop
1176
   ila $79,target_address
1177
   br __ovly_load
1178
 
1179
   Software icache stubs are:
1180
 
1181
   .word target_index
1182
   .word target_ia;
1183
   .word lrlive_branchlocalstoreaddr;
1184
   brasl $75,__icache_br_handler
1185
   .quad xor_pattern
1186
*/
1187
 
1188
static bfd_boolean
1189
build_stub (struct bfd_link_info *info,
1190
            bfd *ibfd,
1191
            asection *isec,
1192
            enum _stub_type stub_type,
1193
            struct elf_link_hash_entry *h,
1194
            const Elf_Internal_Rela *irela,
1195
            bfd_vma dest,
1196
            asection *dest_sec)
1197
{
1198
  struct spu_link_hash_table *htab = spu_hash_table (info);
1199
  unsigned int ovl, dest_ovl, set_id;
1200
  struct got_entry *g, **head;
1201
  asection *sec;
1202
  bfd_vma addend, from, to, br_dest, patt;
1203
  unsigned int lrlive;
1204
 
1205
  ovl = 0;
1206
  if (stub_type != nonovl_stub)
1207
    ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1208
 
1209
  if (h != NULL)
1210
    head = &h->got.glist;
1211
  else
1212
    head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1213
 
1214
  addend = 0;
1215
  if (irela != NULL)
1216
    addend = irela->r_addend;
1217
 
1218
  if (htab->params->ovly_flavour == ovly_soft_icache)
1219
    {
1220
      g = bfd_malloc (sizeof *g);
1221
      if (g == NULL)
1222
        return FALSE;
1223
      g->ovl = ovl;
1224
      g->br_addr = 0;
1225
      if (irela != NULL)
1226
        g->br_addr = (irela->r_offset
1227
                      + isec->output_offset
1228
                      + isec->output_section->vma);
1229
      g->next = *head;
1230
      *head = g;
1231
    }
1232
  else
1233
    {
1234
      for (g = *head; g != NULL; g = g->next)
1235
        if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1236
          break;
1237
      if (g == NULL)
1238
        abort ();
1239
 
1240
      if (g->ovl == 0 && ovl != 0)
1241
        return TRUE;
1242
 
1243
      if (g->stub_addr != (bfd_vma) -1)
1244
        return TRUE;
1245
    }
1246
 
1247
  sec = htab->stub_sec[ovl];
1248
  dest += dest_sec->output_offset + dest_sec->output_section->vma;
1249
  from = sec->size + sec->output_offset + sec->output_section->vma;
1250
  g->stub_addr = from;
1251
  to = (htab->ovly_entry[0]->root.u.def.value
1252
        + htab->ovly_entry[0]->root.u.def.section->output_offset
1253
        + htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1254
 
1255
  if (((dest | to | from) & 3) != 0)
1256
    {
1257
      htab->stub_err = 1;
1258
      return FALSE;
1259
    }
1260
  dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1261
 
1262
  if (htab->params->ovly_flavour == ovly_normal
1263
      && !htab->params->compact_stub)
1264
    {
1265
      bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1266
                  sec->contents + sec->size);
1267
      bfd_put_32 (sec->owner, LNOP,
1268
                  sec->contents + sec->size + 4);
1269
      bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1270
                  sec->contents + sec->size + 8);
1271
      if (!BRA_STUBS)
1272
        bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1273
                    sec->contents + sec->size + 12);
1274
      else
1275
        bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1276
                    sec->contents + sec->size + 12);
1277
    }
1278
  else if (htab->params->ovly_flavour == ovly_normal
1279
           && htab->params->compact_stub)
1280
    {
1281
      if (!BRA_STUBS)
1282
        bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1283
                    sec->contents + sec->size);
1284
      else
1285
        bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1286
                    sec->contents + sec->size);
1287
      bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1288
                  sec->contents + sec->size + 4);
1289
    }
1290
  else if (htab->params->ovly_flavour == ovly_soft_icache
1291
           && htab->params->compact_stub)
1292
    {
1293
      lrlive = 0;
1294
      if (stub_type == nonovl_stub)
1295
        ;
1296
      else if (stub_type == call_ovl_stub)
1297
        /* A brsl makes lr live and *(*sp+16) is live.
1298
           Tail calls have the same liveness.  */
1299
        lrlive = 5;
1300
      else if (!htab->params->lrlive_analysis)
1301
        /* Assume stack frame and lr save.  */
1302
        lrlive = 1;
1303
      else if (irela != NULL)
1304
        {
1305
          /* Analyse branch instructions.  */
1306
          struct function_info *caller;
1307
          bfd_vma off;
1308
 
1309
          caller = find_function (isec, irela->r_offset, info);
1310
          if (caller->start == NULL)
1311
            off = irela->r_offset;
1312
          else
1313
            {
1314
              struct function_info *found = NULL;
1315
 
1316
              /* Find the earliest piece of this function that
1317
                 has frame adjusting instructions.  We might
1318
                 see dynamic frame adjustment (eg. for alloca)
1319
                 in some later piece, but functions using
1320
                 alloca always set up a frame earlier.  Frame
1321
                 setup instructions are always in one piece.  */
1322
              if (caller->lr_store != (bfd_vma) -1
1323
                  || caller->sp_adjust != (bfd_vma) -1)
1324
                found = caller;
1325
              while (caller->start != NULL)
1326
                {
1327
                  caller = caller->start;
1328
                  if (caller->lr_store != (bfd_vma) -1
1329
                      || caller->sp_adjust != (bfd_vma) -1)
1330
                    found = caller;
1331
                }
1332
              if (found != NULL)
1333
                caller = found;
1334
              off = (bfd_vma) -1;
1335
            }
1336
 
1337
          if (off > caller->sp_adjust)
1338
            {
1339
              if (off > caller->lr_store)
1340
                /* Only *(*sp+16) is live.  */
1341
                lrlive = 1;
1342
              else
1343
                /* If no lr save, then we must be in a
1344
                   leaf function with a frame.
1345
                   lr is still live.  */
1346
                lrlive = 4;
1347
            }
1348
          else if (off > caller->lr_store)
1349
            {
1350
              /* Between lr save and stack adjust.  */
1351
              lrlive = 3;
1352
              /* This should never happen since prologues won't
1353
                 be split here.  */
1354
              BFD_ASSERT (0);
1355
            }
1356
          else
1357
            /* On entry to function.  */
1358
            lrlive = 5;
1359
 
1360
          if (stub_type != br000_ovl_stub
1361
              && lrlive != stub_type - br000_ovl_stub)
1362
            info->callbacks->einfo (_("%A:0x%v lrlive .brinfo (%u) differs "
1363
                                      "from analysis (%u)\n"),
1364
                                    isec, irela->r_offset, lrlive,
1365
                                    stub_type - br000_ovl_stub);
1366
        }
1367
 
1368
      /* If given lrlive info via .brinfo, use it.  */
1369
      if (stub_type > br000_ovl_stub)
1370
        lrlive = stub_type - br000_ovl_stub;
1371
 
1372
      if (ovl == 0)
1373
        to = (htab->ovly_entry[1]->root.u.def.value
1374
              + htab->ovly_entry[1]->root.u.def.section->output_offset
1375
              + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1376
 
1377
      /* The branch that uses this stub goes to stub_addr + 4.  We'll
1378
         set up an xor pattern that can be used by the icache manager
1379
         to modify this branch to go directly to its destination.  */
1380
      g->stub_addr += 4;
1381
      br_dest = g->stub_addr;
1382
      if (irela == NULL)
1383
        {
1384
          /* Except in the case of _SPUEAR_ stubs, the branch in
1385
             question is the one in the stub itself.  */
1386
          BFD_ASSERT (stub_type == nonovl_stub);
1387
          g->br_addr = g->stub_addr;
1388
          br_dest = to;
1389
        }
1390
 
1391
      set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1392
      bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1393
                  sec->contents + sec->size);
1394
      bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1395
                  sec->contents + sec->size + 4);
1396
      bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1397
                  sec->contents + sec->size + 8);
1398
      patt = dest ^ br_dest;
1399
      if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1400
        patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1401
      bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1402
                  sec->contents + sec->size + 12);
1403
 
1404
      if (ovl == 0)
1405
        /* Extra space for linked list entries.  */
1406
        sec->size += 16;
1407
    }
1408
  else
1409
    abort ();
1410
 
1411
  sec->size += ovl_stub_size (htab->params);
1412
 
1413
  if (htab->params->emit_stub_syms)
1414
    {
1415
      size_t len;
1416
      char *name;
1417
      int add;
1418
 
1419
      len = 8 + sizeof (".ovl_call.") - 1;
1420
      if (h != NULL)
1421
        len += strlen (h->root.root.string);
1422
      else
1423
        len += 8 + 1 + 8;
1424
      add = 0;
1425
      if (irela != NULL)
1426
        add = (int) irela->r_addend & 0xffffffff;
1427
      if (add != 0)
1428
        len += 1 + 8;
1429 166 khays
      name = bfd_malloc (len + 1);
1430 14 khays
      if (name == NULL)
1431
        return FALSE;
1432
 
1433
      sprintf (name, "%08x.ovl_call.", g->ovl);
1434
      if (h != NULL)
1435
        strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1436
      else
1437
        sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1438
                 dest_sec->id & 0xffffffff,
1439
                 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1440
      if (add != 0)
1441
        sprintf (name + len - 9, "+%x", add);
1442
 
1443
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1444
      free (name);
1445
      if (h == NULL)
1446
        return FALSE;
1447
      if (h->root.type == bfd_link_hash_new)
1448
        {
1449
          h->root.type = bfd_link_hash_defined;
1450
          h->root.u.def.section = sec;
1451
          h->size = ovl_stub_size (htab->params);
1452
          h->root.u.def.value = sec->size - h->size;
1453
          h->type = STT_FUNC;
1454
          h->ref_regular = 1;
1455
          h->def_regular = 1;
1456
          h->ref_regular_nonweak = 1;
1457
          h->forced_local = 1;
1458
          h->non_elf = 0;
1459
        }
1460
    }
1461
 
1462
  return TRUE;
1463
}
1464
 
1465
/* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1466
   symbols.  */
1467
 
1468
static bfd_boolean
1469
allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1470
{
1471
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1472
     invoked by the PPU.  */
1473
  struct bfd_link_info *info = inf;
1474
  struct spu_link_hash_table *htab = spu_hash_table (info);
1475
  asection *sym_sec;
1476
 
1477
  if ((h->root.type == bfd_link_hash_defined
1478
       || h->root.type == bfd_link_hash_defweak)
1479
      && h->def_regular
1480
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1481
      && (sym_sec = h->root.u.def.section) != NULL
1482
      && sym_sec->output_section != bfd_abs_section_ptr
1483
      && spu_elf_section_data (sym_sec->output_section) != NULL
1484
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1485
          || htab->params->non_overlay_stubs))
1486
    {
1487
      return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1488
    }
1489
 
1490
  return TRUE;
1491
}
1492
 
1493
static bfd_boolean
1494
build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1495
{
1496
  /* Symbols starting with _SPUEAR_ need a stub because they may be
1497
     invoked by the PPU.  */
1498
  struct bfd_link_info *info = inf;
1499
  struct spu_link_hash_table *htab = spu_hash_table (info);
1500
  asection *sym_sec;
1501
 
1502
  if ((h->root.type == bfd_link_hash_defined
1503
       || h->root.type == bfd_link_hash_defweak)
1504
      && h->def_regular
1505
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1506
      && (sym_sec = h->root.u.def.section) != NULL
1507
      && sym_sec->output_section != bfd_abs_section_ptr
1508
      && spu_elf_section_data (sym_sec->output_section) != NULL
1509
      && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1510
          || htab->params->non_overlay_stubs))
1511
    {
1512
      return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1513
                         h->root.u.def.value, sym_sec);
1514
    }
1515
 
1516
  return TRUE;
1517
}
1518
 
1519
/* Size or build stubs.  */
1520
 
1521
static bfd_boolean
1522
process_stubs (struct bfd_link_info *info, bfd_boolean build)
1523
{
1524
  struct spu_link_hash_table *htab = spu_hash_table (info);
1525
  bfd *ibfd;
1526
 
1527
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
1528
    {
1529
      extern const bfd_target bfd_elf32_spu_vec;
1530
      Elf_Internal_Shdr *symtab_hdr;
1531
      asection *isec;
1532
      Elf_Internal_Sym *local_syms = NULL;
1533
 
1534
      if (ibfd->xvec != &bfd_elf32_spu_vec)
1535
        continue;
1536
 
1537
      /* We'll need the symbol table in a second.  */
1538
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1539
      if (symtab_hdr->sh_info == 0)
1540
        continue;
1541
 
1542
      /* Walk over each section attached to the input bfd.  */
1543
      for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1544
        {
1545
          Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1546
 
1547
          /* If there aren't any relocs, then there's nothing more to do.  */
1548
          if ((isec->flags & SEC_RELOC) == 0
1549
              || isec->reloc_count == 0)
1550
            continue;
1551
 
1552
          if (!maybe_needs_stubs (isec))
1553
            continue;
1554
 
1555
          /* Get the relocs.  */
1556
          internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1557
                                                       info->keep_memory);
1558
          if (internal_relocs == NULL)
1559
            goto error_ret_free_local;
1560
 
1561
          /* Now examine each relocation.  */
1562
          irela = internal_relocs;
1563
          irelaend = irela + isec->reloc_count;
1564
          for (; irela < irelaend; irela++)
1565
            {
1566
              enum elf_spu_reloc_type r_type;
1567
              unsigned int r_indx;
1568
              asection *sym_sec;
1569
              Elf_Internal_Sym *sym;
1570
              struct elf_link_hash_entry *h;
1571
              enum _stub_type stub_type;
1572
 
1573
              r_type = ELF32_R_TYPE (irela->r_info);
1574
              r_indx = ELF32_R_SYM (irela->r_info);
1575
 
1576
              if (r_type >= R_SPU_max)
1577
                {
1578
                  bfd_set_error (bfd_error_bad_value);
1579
                error_ret_free_internal:
1580
                  if (elf_section_data (isec)->relocs != internal_relocs)
1581
                    free (internal_relocs);
1582
                error_ret_free_local:
1583
                  if (local_syms != NULL
1584
                      && (symtab_hdr->contents
1585
                          != (unsigned char *) local_syms))
1586
                    free (local_syms);
1587
                  return FALSE;
1588
                }
1589
 
1590
              /* Determine the reloc target section.  */
1591
              if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1592
                goto error_ret_free_internal;
1593
 
1594
              stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1595
                                          NULL, info);
1596
              if (stub_type == no_stub)
1597
                continue;
1598
              else if (stub_type == stub_error)
1599
                goto error_ret_free_internal;
1600
 
1601
              if (htab->stub_count == NULL)
1602
                {
1603
                  bfd_size_type amt;
1604
                  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1605
                  htab->stub_count = bfd_zmalloc (amt);
1606
                  if (htab->stub_count == NULL)
1607
                    goto error_ret_free_internal;
1608
                }
1609
 
1610
              if (!build)
1611
                {
1612
                  if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1613
                    goto error_ret_free_internal;
1614
                }
1615
              else
1616
                {
1617
                  bfd_vma dest;
1618
 
1619
                  if (h != NULL)
1620
                    dest = h->root.u.def.value;
1621
                  else
1622
                    dest = sym->st_value;
1623
                  dest += irela->r_addend;
1624
                  if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1625
                                   dest, sym_sec))
1626
                    goto error_ret_free_internal;
1627
                }
1628
            }
1629
 
1630
          /* We're done with the internal relocs, free them.  */
1631
          if (elf_section_data (isec)->relocs != internal_relocs)
1632
            free (internal_relocs);
1633
        }
1634
 
1635
      if (local_syms != NULL
1636
          && symtab_hdr->contents != (unsigned char *) local_syms)
1637
        {
1638
          if (!info->keep_memory)
1639
            free (local_syms);
1640
          else
1641
            symtab_hdr->contents = (unsigned char *) local_syms;
1642
        }
1643
    }
1644
 
1645
  return TRUE;
1646
}
1647
 
1648
/* Allocate space for overlay call and return stubs.
1649
   Return 0 on error, 1 if no overlays, 2 otherwise.  */
1650
 
1651
int
1652
spu_elf_size_stubs (struct bfd_link_info *info)
1653
{
1654
  struct spu_link_hash_table *htab;
1655
  bfd *ibfd;
1656
  bfd_size_type amt;
1657
  flagword flags;
1658
  unsigned int i;
1659
  asection *stub;
1660
 
1661
  if (!process_stubs (info, FALSE))
1662
    return 0;
1663
 
1664
  htab = spu_hash_table (info);
1665
  elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1666
  if (htab->stub_err)
1667
    return 0;
1668
 
1669
  ibfd = info->input_bfds;
1670
  if (htab->stub_count != NULL)
1671
    {
1672
      amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1673
      htab->stub_sec = bfd_zmalloc (amt);
1674
      if (htab->stub_sec == NULL)
1675
        return 0;
1676
 
1677
      flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1678
               | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1679
      stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1680
      htab->stub_sec[0] = stub;
1681
      if (stub == NULL
1682
          || !bfd_set_section_alignment (ibfd, stub,
1683
                                         ovl_stub_size_log2 (htab->params)))
1684
        return 0;
1685
      stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1686
      if (htab->params->ovly_flavour == ovly_soft_icache)
1687
        /* Extra space for linked list entries.  */
1688
        stub->size += htab->stub_count[0] * 16;
1689
 
1690
      for (i = 0; i < htab->num_overlays; ++i)
1691
        {
1692
          asection *osec = htab->ovl_sec[i];
1693
          unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1694
          stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1695
          htab->stub_sec[ovl] = stub;
1696
          if (stub == NULL
1697
              || !bfd_set_section_alignment (ibfd, stub,
1698
                                             ovl_stub_size_log2 (htab->params)))
1699
            return 0;
1700
          stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1701
        }
1702
    }
1703
 
1704
  if (htab->params->ovly_flavour == ovly_soft_icache)
1705
    {
1706
      /* Space for icache manager tables.
1707
         a) Tag array, one quadword per cache line.
1708
         b) Rewrite "to" list, one quadword per cache line.
1709
         c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1710
            a power-of-two number of full quadwords) per cache line.  */
1711
 
1712
      flags = SEC_ALLOC;
1713
      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1714
      if (htab->ovtab == NULL
1715
          || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1716
        return 0;
1717
 
1718
      htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1719
                          << htab->num_lines_log2;
1720
 
1721
      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1722
      htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1723
      if (htab->init == NULL
1724
          || !bfd_set_section_alignment (ibfd, htab->init, 4))
1725
        return 0;
1726
 
1727
      htab->init->size = 16;
1728
    }
1729
  else if (htab->stub_count == NULL)
1730
    return 1;
1731
  else
1732
    {
1733
      /* htab->ovtab consists of two arrays.
1734
         .      struct {
1735
         .        u32 vma;
1736
         .        u32 size;
1737
         .        u32 file_off;
1738
         .        u32 buf;
1739
         .      } _ovly_table[];
1740
         .
1741
         .      struct {
1742
         .        u32 mapped;
1743
         .      } _ovly_buf_table[];
1744
         .  */
1745
 
1746
      flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1747
      htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1748
      if (htab->ovtab == NULL
1749
          || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1750
        return 0;
1751
 
1752
      htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1753
    }
1754
 
1755
  htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1756
  if (htab->toe == NULL
1757
      || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1758
    return 0;
1759
  htab->toe->size = 16;
1760
 
1761
  return 2;
1762
}
1763
 
1764
/* Called from ld to place overlay manager data sections.  This is done
1765
   after the overlay manager itself is loaded, mainly so that the
1766
   linker's htab->init section is placed after any other .ovl.init
1767
   sections.  */
1768
 
1769
void
1770
spu_elf_place_overlay_data (struct bfd_link_info *info)
1771
{
1772
  struct spu_link_hash_table *htab = spu_hash_table (info);
1773
  unsigned int i;
1774
 
1775
  if (htab->stub_sec != NULL)
1776
    {
1777
      (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1778
 
1779
      for (i = 0; i < htab->num_overlays; ++i)
1780
        {
1781
          asection *osec = htab->ovl_sec[i];
1782
          unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1783
          (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1784
        }
1785
    }
1786
 
1787
  if (htab->params->ovly_flavour == ovly_soft_icache)
1788
    (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1789
 
1790
  if (htab->ovtab != NULL)
1791
    {
1792
      const char *ovout = ".data";
1793
      if (htab->params->ovly_flavour == ovly_soft_icache)
1794
        ovout = ".bss";
1795
      (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1796
    }
1797
 
1798
  if (htab->toe != NULL)
1799
    (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1800
}
1801
 
1802
/* Functions to handle embedded spu_ovl.o object.  */
1803
 
1804
static void *
1805
ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1806
{
1807
  return stream;
1808
}
1809
 
1810
static file_ptr
1811
ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1812
               void *stream,
1813
               void *buf,
1814
               file_ptr nbytes,
1815
               file_ptr offset)
1816
{
1817
  struct _ovl_stream *os;
1818
  size_t count;
1819
  size_t max;
1820
 
1821
  os = (struct _ovl_stream *) stream;
1822
  max = (const char *) os->end - (const char *) os->start;
1823
 
1824
  if ((ufile_ptr) offset >= max)
1825
    return 0;
1826
 
1827
  count = nbytes;
1828
  if (count > max - offset)
1829
    count = max - offset;
1830
 
1831
  memcpy (buf, (const char *) os->start + offset, count);
1832
  return count;
1833
}
1834
 
1835
bfd_boolean
1836
spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1837
{
1838
  *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1839
                              "elf32-spu",
1840
                              ovl_mgr_open,
1841
                              (void *) stream,
1842
                              ovl_mgr_pread,
1843
                              NULL,
1844
                              NULL);
1845
  return *ovl_bfd != NULL;
1846
}
1847
 
1848
static unsigned int
1849
overlay_index (asection *sec)
1850
{
1851
  if (sec == NULL
1852
      || sec->output_section == bfd_abs_section_ptr)
1853
    return 0;
1854
  return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1855
}
1856
 
1857
/* Define an STT_OBJECT symbol.  */
1858
 
1859
static struct elf_link_hash_entry *
1860
define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1861
{
1862
  struct elf_link_hash_entry *h;
1863
 
1864
  h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1865
  if (h == NULL)
1866
    return NULL;
1867
 
1868
  if (h->root.type != bfd_link_hash_defined
1869
      || !h->def_regular)
1870
    {
1871
      h->root.type = bfd_link_hash_defined;
1872
      h->root.u.def.section = htab->ovtab;
1873
      h->type = STT_OBJECT;
1874
      h->ref_regular = 1;
1875
      h->def_regular = 1;
1876
      h->ref_regular_nonweak = 1;
1877
      h->non_elf = 0;
1878
    }
1879
  else if (h->root.u.def.section->owner != NULL)
1880
    {
1881
      (*_bfd_error_handler) (_("%B is not allowed to define %s"),
1882
                             h->root.u.def.section->owner,
1883
                             h->root.root.string);
1884
      bfd_set_error (bfd_error_bad_value);
1885
      return NULL;
1886
    }
1887
  else
1888
    {
1889
      (*_bfd_error_handler) (_("you are not allowed to define %s in a script"),
1890
                             h->root.root.string);
1891
      bfd_set_error (bfd_error_bad_value);
1892
      return NULL;
1893
    }
1894
 
1895
  return h;
1896
}
1897
 
1898
/* Fill in all stubs and the overlay tables.  */
1899
 
1900
static bfd_boolean
1901
spu_elf_build_stubs (struct bfd_link_info *info)
1902
{
1903
  struct spu_link_hash_table *htab = spu_hash_table (info);
1904
  struct elf_link_hash_entry *h;
1905
  bfd_byte *p;
1906
  asection *s;
1907
  bfd *obfd;
1908
  unsigned int i;
1909
 
1910
  if (htab->num_overlays != 0)
1911
    {
1912
      for (i = 0; i < 2; i++)
1913
        {
1914
          h = htab->ovly_entry[i];
1915
          if (h != NULL
1916
              && (h->root.type == bfd_link_hash_defined
1917
                  || h->root.type == bfd_link_hash_defweak)
1918
              && h->def_regular)
1919
            {
1920
              s = h->root.u.def.section->output_section;
1921
              if (spu_elf_section_data (s)->u.o.ovl_index)
1922
                {
1923
                  (*_bfd_error_handler) (_("%s in overlay section"),
1924
                                         h->root.root.string);
1925
                  bfd_set_error (bfd_error_bad_value);
1926
                  return FALSE;
1927
                }
1928
            }
1929
        }
1930
    }
1931
 
1932
  if (htab->stub_sec != NULL)
1933
    {
1934
      for (i = 0; i <= htab->num_overlays; i++)
1935
        if (htab->stub_sec[i]->size != 0)
1936
          {
1937
            htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1938
                                                      htab->stub_sec[i]->size);
1939
            if (htab->stub_sec[i]->contents == NULL)
1940
              return FALSE;
1941
            htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1942
            htab->stub_sec[i]->size = 0;
1943
          }
1944
 
1945
      /* Fill in all the stubs.  */
1946
      process_stubs (info, TRUE);
1947
      if (!htab->stub_err)
1948
        elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1949
 
1950
      if (htab->stub_err)
1951
        {
1952
          (*_bfd_error_handler) (_("overlay stub relocation overflow"));
1953
          bfd_set_error (bfd_error_bad_value);
1954
          return FALSE;
1955
        }
1956
 
1957
      for (i = 0; i <= htab->num_overlays; i++)
1958
        {
1959
          if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1960
            {
1961
              (*_bfd_error_handler)  (_("stubs don't match calculated size"));
1962
              bfd_set_error (bfd_error_bad_value);
1963
              return FALSE;
1964
            }
1965
          htab->stub_sec[i]->rawsize = 0;
1966
        }
1967
    }
1968
 
1969
  if (htab->ovtab == NULL || htab->ovtab->size == 0)
1970
    return TRUE;
1971
 
1972
  htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
1973
  if (htab->ovtab->contents == NULL)
1974
    return FALSE;
1975
 
1976
  p = htab->ovtab->contents;
1977
  if (htab->params->ovly_flavour == ovly_soft_icache)
1978
    {
1979
      bfd_vma off;
1980
 
1981
      h = define_ovtab_symbol (htab, "__icache_tag_array");
1982
      if (h == NULL)
1983
        return FALSE;
1984
      h->root.u.def.value = 0;
1985
      h->size = 16 << htab->num_lines_log2;
1986
      off = h->size;
1987
 
1988
      h = define_ovtab_symbol (htab, "__icache_tag_array_size");
1989
      if (h == NULL)
1990
        return FALSE;
1991
      h->root.u.def.value = 16 << htab->num_lines_log2;
1992
      h->root.u.def.section = bfd_abs_section_ptr;
1993
 
1994
      h = define_ovtab_symbol (htab, "__icache_rewrite_to");
1995
      if (h == NULL)
1996
        return FALSE;
1997
      h->root.u.def.value = off;
1998
      h->size = 16 << htab->num_lines_log2;
1999
      off += h->size;
2000
 
2001
      h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2002
      if (h == NULL)
2003
        return FALSE;
2004
      h->root.u.def.value = 16 << htab->num_lines_log2;
2005
      h->root.u.def.section = bfd_abs_section_ptr;
2006
 
2007
      h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2008
      if (h == NULL)
2009
        return FALSE;
2010
      h->root.u.def.value = off;
2011
      h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2012
      off += h->size;
2013
 
2014
      h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2015
      if (h == NULL)
2016
        return FALSE;
2017
      h->root.u.def.value = 16 << (htab->fromelem_size_log2
2018
                                   + htab->num_lines_log2);
2019
      h->root.u.def.section = bfd_abs_section_ptr;
2020
 
2021
      h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2022
      if (h == NULL)
2023
        return FALSE;
2024
      h->root.u.def.value = htab->fromelem_size_log2;
2025
      h->root.u.def.section = bfd_abs_section_ptr;
2026
 
2027
      h = define_ovtab_symbol (htab, "__icache_base");
2028
      if (h == NULL)
2029
        return FALSE;
2030
      h->root.u.def.value = htab->ovl_sec[0]->vma;
2031
      h->root.u.def.section = bfd_abs_section_ptr;
2032
      h->size = htab->num_buf << htab->line_size_log2;
2033
 
2034
      h = define_ovtab_symbol (htab, "__icache_linesize");
2035
      if (h == NULL)
2036
        return FALSE;
2037
      h->root.u.def.value = 1 << htab->line_size_log2;
2038
      h->root.u.def.section = bfd_abs_section_ptr;
2039
 
2040
      h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2041
      if (h == NULL)
2042
        return FALSE;
2043
      h->root.u.def.value = htab->line_size_log2;
2044
      h->root.u.def.section = bfd_abs_section_ptr;
2045
 
2046
      h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2047
      if (h == NULL)
2048
        return FALSE;
2049
      h->root.u.def.value = -htab->line_size_log2;
2050
      h->root.u.def.section = bfd_abs_section_ptr;
2051
 
2052
      h = define_ovtab_symbol (htab, "__icache_cachesize");
2053
      if (h == NULL)
2054
        return FALSE;
2055
      h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2056
      h->root.u.def.section = bfd_abs_section_ptr;
2057
 
2058
      h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2059
      if (h == NULL)
2060
        return FALSE;
2061
      h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2062
      h->root.u.def.section = bfd_abs_section_ptr;
2063
 
2064
      h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2065
      if (h == NULL)
2066
        return FALSE;
2067
      h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2068
      h->root.u.def.section = bfd_abs_section_ptr;
2069
 
2070
      if (htab->init != NULL && htab->init->size != 0)
2071
        {
2072
          htab->init->contents = bfd_zalloc (htab->init->owner,
2073
                                             htab->init->size);
2074
          if (htab->init->contents == NULL)
2075
            return FALSE;
2076
 
2077
          h = define_ovtab_symbol (htab, "__icache_fileoff");
2078
          if (h == NULL)
2079
            return FALSE;
2080
          h->root.u.def.value = 0;
2081
          h->root.u.def.section = htab->init;
2082
          h->size = 8;
2083
        }
2084
    }
2085
  else
2086
    {
2087
      /* Write out _ovly_table.  */
2088
      /* set low bit of .size to mark non-overlay area as present.  */
2089
      p[7] = 1;
2090
      obfd = htab->ovtab->output_section->owner;
2091
      for (s = obfd->sections; s != NULL; s = s->next)
2092
        {
2093
          unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2094
 
2095
          if (ovl_index != 0)
2096
            {
2097
              unsigned long off = ovl_index * 16;
2098
              unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2099
 
2100
              bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2101
              bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2102
                          p + off + 4);
2103
              /* file_off written later in spu_elf_modify_program_headers.  */
2104
              bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2105
            }
2106
        }
2107
 
2108
      h = define_ovtab_symbol (htab, "_ovly_table");
2109
      if (h == NULL)
2110
        return FALSE;
2111
      h->root.u.def.value = 16;
2112
      h->size = htab->num_overlays * 16;
2113
 
2114
      h = define_ovtab_symbol (htab, "_ovly_table_end");
2115
      if (h == NULL)
2116
        return FALSE;
2117
      h->root.u.def.value = htab->num_overlays * 16 + 16;
2118
      h->size = 0;
2119
 
2120
      h = define_ovtab_symbol (htab, "_ovly_buf_table");
2121
      if (h == NULL)
2122
        return FALSE;
2123
      h->root.u.def.value = htab->num_overlays * 16 + 16;
2124
      h->size = htab->num_buf * 4;
2125
 
2126
      h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2127
      if (h == NULL)
2128
        return FALSE;
2129
      h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2130
      h->size = 0;
2131
    }
2132
 
2133
  h = define_ovtab_symbol (htab, "_EAR_");
2134
  if (h == NULL)
2135
    return FALSE;
2136
  h->root.u.def.section = htab->toe;
2137
  h->root.u.def.value = 0;
2138
  h->size = 16;
2139
 
2140
  return TRUE;
2141
}
2142
 
2143
/* Check that all loadable section VMAs lie in the range
2144
   LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
2145
 
2146
asection *
2147
spu_elf_check_vma (struct bfd_link_info *info)
2148
{
2149
  struct elf_segment_map *m;
2150
  unsigned int i;
2151
  struct spu_link_hash_table *htab = spu_hash_table (info);
2152
  bfd *abfd = info->output_bfd;
2153
  bfd_vma hi = htab->params->local_store_hi;
2154
  bfd_vma lo = htab->params->local_store_lo;
2155
 
2156
  htab->local_store = hi + 1 - lo;
2157
 
2158
  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
2159
    if (m->p_type == PT_LOAD)
2160
      for (i = 0; i < m->count; i++)
2161
        if (m->sections[i]->size != 0
2162
            && (m->sections[i]->vma < lo
2163
                || m->sections[i]->vma > hi
2164
                || m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2165
          return m->sections[i];
2166
 
2167
  return NULL;
2168
}
2169
 
2170
/* OFFSET in SEC (presumably) is the beginning of a function prologue.
2171
   Search for stack adjusting insns, and return the sp delta.
2172
   If a store of lr is found save the instruction offset to *LR_STORE.
2173
   If a stack adjusting instruction is found, save that offset to
2174
   *SP_ADJUST.  */
2175
 
2176
static int
2177
find_function_stack_adjust (asection *sec,
2178
                            bfd_vma offset,
2179
                            bfd_vma *lr_store,
2180
                            bfd_vma *sp_adjust)
2181
{
2182
  int reg[128];
2183
 
2184
  memset (reg, 0, sizeof (reg));
2185
  for ( ; offset + 4 <= sec->size; offset += 4)
2186
    {
2187
      unsigned char buf[4];
2188
      int rt, ra;
2189
      int imm;
2190
 
2191
      /* Assume no relocs on stack adjusing insns.  */
2192
      if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2193
        break;
2194
 
2195
      rt = buf[3] & 0x7f;
2196
      ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2197
 
2198
      if (buf[0] == 0x24 /* stqd */)
2199
        {
2200
          if (rt == 0 /* lr */ && ra == 1 /* sp */)
2201
            *lr_store = offset;
2202
          continue;
2203
        }
2204
 
2205
      /* Partly decoded immediate field.  */
2206
      imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2207
 
2208
      if (buf[0] == 0x1c /* ai */)
2209
        {
2210
          imm >>= 7;
2211
          imm = (imm ^ 0x200) - 0x200;
2212
          reg[rt] = reg[ra] + imm;
2213
 
2214
          if (rt == 1 /* sp */)
2215
            {
2216
              if (reg[rt] > 0)
2217
                break;
2218
              *sp_adjust = offset;
2219
              return reg[rt];
2220
            }
2221
        }
2222
      else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2223
        {
2224
          int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2225
 
2226
          reg[rt] = reg[ra] + reg[rb];
2227
          if (rt == 1)
2228
            {
2229
              if (reg[rt] > 0)
2230
                break;
2231
              *sp_adjust = offset;
2232
              return reg[rt];
2233
            }
2234
        }
2235
      else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2236
        {
2237
          int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2238
 
2239
          reg[rt] = reg[rb] - reg[ra];
2240
          if (rt == 1)
2241
            {
2242
              if (reg[rt] > 0)
2243
                break;
2244
              *sp_adjust = offset;
2245
              return reg[rt];
2246
            }
2247
        }
2248
      else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2249
        {
2250
          if (buf[0] >= 0x42 /* ila */)
2251
            imm |= (buf[0] & 1) << 17;
2252
          else
2253
            {
2254
              imm &= 0xffff;
2255
 
2256
              if (buf[0] == 0x40 /* il */)
2257
                {
2258
                  if ((buf[1] & 0x80) == 0)
2259
                    continue;
2260
                  imm = (imm ^ 0x8000) - 0x8000;
2261
                }
2262
              else if ((buf[1] & 0x80) == 0 /* ilhu */)
2263
                imm <<= 16;
2264
            }
2265
          reg[rt] = imm;
2266
          continue;
2267
        }
2268
      else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2269
        {
2270
          reg[rt] |= imm & 0xffff;
2271
          continue;
2272
        }
2273
      else if (buf[0] == 0x04 /* ori */)
2274
        {
2275
          imm >>= 7;
2276
          imm = (imm ^ 0x200) - 0x200;
2277
          reg[rt] = reg[ra] | imm;
2278
          continue;
2279
        }
2280
      else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2281
        {
2282
          reg[rt] = (  ((imm & 0x8000) ? 0xff000000 : 0)
2283
                     | ((imm & 0x4000) ? 0x00ff0000 : 0)
2284
                     | ((imm & 0x2000) ? 0x0000ff00 : 0)
2285
                     | ((imm & 0x1000) ? 0x000000ff : 0));
2286
          continue;
2287
        }
2288
      else if (buf[0] == 0x16 /* andbi */)
2289
        {
2290
          imm >>= 7;
2291
          imm &= 0xff;
2292
          imm |= imm << 8;
2293
          imm |= imm << 16;
2294
          reg[rt] = reg[ra] & imm;
2295
          continue;
2296
        }
2297
      else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2298
        {
2299
          /* Used in pic reg load.  Say rt is trashed.  Won't be used
2300
             in stack adjust, but we need to continue past this branch.  */
2301
          reg[rt] = 0;
2302
          continue;
2303
        }
2304
      else if (is_branch (buf) || is_indirect_branch (buf))
2305
        /* If we hit a branch then we must be out of the prologue.  */
2306
        break;
2307
    }
2308
 
2309
  return 0;
2310
}
2311
 
2312
/* qsort predicate to sort symbols by section and value.  */
2313
 
2314
static Elf_Internal_Sym *sort_syms_syms;
2315
static asection **sort_syms_psecs;
2316
 
2317
static int
2318
sort_syms (const void *a, const void *b)
2319
{
2320
  Elf_Internal_Sym *const *s1 = a;
2321
  Elf_Internal_Sym *const *s2 = b;
2322
  asection *sec1,*sec2;
2323
  bfd_signed_vma delta;
2324
 
2325
  sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2326
  sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2327
 
2328
  if (sec1 != sec2)
2329
    return sec1->index - sec2->index;
2330
 
2331
  delta = (*s1)->st_value - (*s2)->st_value;
2332
  if (delta != 0)
2333
    return delta < 0 ? -1 : 1;
2334
 
2335
  delta = (*s2)->st_size - (*s1)->st_size;
2336
  if (delta != 0)
2337
    return delta < 0 ? -1 : 1;
2338
 
2339
  return *s1 < *s2 ? -1 : 1;
2340
}
2341
 
2342
/* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2343
   entries for section SEC.  */
2344
 
2345
static struct spu_elf_stack_info *
2346
alloc_stack_info (asection *sec, int max_fun)
2347
{
2348
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2349
  bfd_size_type amt;
2350
 
2351
  amt = sizeof (struct spu_elf_stack_info);
2352
  amt += (max_fun - 1) * sizeof (struct function_info);
2353
  sec_data->u.i.stack_info = bfd_zmalloc (amt);
2354
  if (sec_data->u.i.stack_info != NULL)
2355
    sec_data->u.i.stack_info->max_fun = max_fun;
2356
  return sec_data->u.i.stack_info;
2357
}
2358
 
2359
/* Add a new struct function_info describing a (part of a) function
2360
   starting at SYM_H.  Keep the array sorted by address.  */
2361
 
2362
static struct function_info *
2363
maybe_insert_function (asection *sec,
2364
                       void *sym_h,
2365
                       bfd_boolean global,
2366
                       bfd_boolean is_func)
2367
{
2368
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2369
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2370
  int i;
2371
  bfd_vma off, size;
2372
 
2373
  if (sinfo == NULL)
2374
    {
2375
      sinfo = alloc_stack_info (sec, 20);
2376
      if (sinfo == NULL)
2377
        return NULL;
2378
    }
2379
 
2380
  if (!global)
2381
    {
2382
      Elf_Internal_Sym *sym = sym_h;
2383
      off = sym->st_value;
2384
      size = sym->st_size;
2385
    }
2386
  else
2387
    {
2388
      struct elf_link_hash_entry *h = sym_h;
2389
      off = h->root.u.def.value;
2390
      size = h->size;
2391
    }
2392
 
2393
  for (i = sinfo->num_fun; --i >= 0; )
2394
    if (sinfo->fun[i].lo <= off)
2395
      break;
2396
 
2397
  if (i >= 0)
2398
    {
2399
      /* Don't add another entry for an alias, but do update some
2400
         info.  */
2401
      if (sinfo->fun[i].lo == off)
2402
        {
2403
          /* Prefer globals over local syms.  */
2404
          if (global && !sinfo->fun[i].global)
2405
            {
2406
              sinfo->fun[i].global = TRUE;
2407
              sinfo->fun[i].u.h = sym_h;
2408
            }
2409
          if (is_func)
2410
            sinfo->fun[i].is_func = TRUE;
2411
          return &sinfo->fun[i];
2412
        }
2413
      /* Ignore a zero-size symbol inside an existing function.  */
2414
      else if (sinfo->fun[i].hi > off && size == 0)
2415
        return &sinfo->fun[i];
2416
    }
2417
 
2418
  if (sinfo->num_fun >= sinfo->max_fun)
2419
    {
2420
      bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2421
      bfd_size_type old = amt;
2422
 
2423
      old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2424
      sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2425
      amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2426
      sinfo = bfd_realloc (sinfo, amt);
2427
      if (sinfo == NULL)
2428
        return NULL;
2429
      memset ((char *) sinfo + old, 0, amt - old);
2430
      sec_data->u.i.stack_info = sinfo;
2431
    }
2432
 
2433
  if (++i < sinfo->num_fun)
2434
    memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2435
             (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2436
  sinfo->fun[i].is_func = is_func;
2437
  sinfo->fun[i].global = global;
2438
  sinfo->fun[i].sec = sec;
2439
  if (global)
2440
    sinfo->fun[i].u.h = sym_h;
2441
  else
2442
    sinfo->fun[i].u.sym = sym_h;
2443
  sinfo->fun[i].lo = off;
2444
  sinfo->fun[i].hi = off + size;
2445
  sinfo->fun[i].lr_store = -1;
2446
  sinfo->fun[i].sp_adjust = -1;
2447
  sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2448
                                                     &sinfo->fun[i].lr_store,
2449
                                                     &sinfo->fun[i].sp_adjust);
2450
  sinfo->num_fun += 1;
2451
  return &sinfo->fun[i];
2452
}
2453
 
2454
/* Return the name of FUN.  */
2455
 
2456
static const char *
2457
func_name (struct function_info *fun)
2458
{
2459
  asection *sec;
2460
  bfd *ibfd;
2461
  Elf_Internal_Shdr *symtab_hdr;
2462
 
2463
  while (fun->start != NULL)
2464
    fun = fun->start;
2465
 
2466
  if (fun->global)
2467
    return fun->u.h->root.root.string;
2468
 
2469
  sec = fun->sec;
2470
  if (fun->u.sym->st_name == 0)
2471
    {
2472
      size_t len = strlen (sec->name);
2473
      char *name = bfd_malloc (len + 10);
2474
      if (name == NULL)
2475
        return "(null)";
2476
      sprintf (name, "%s+%lx", sec->name,
2477
               (unsigned long) fun->u.sym->st_value & 0xffffffff);
2478
      return name;
2479
    }
2480
  ibfd = sec->owner;
2481
  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2482
  return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2483
}
2484
 
2485
/* Read the instruction at OFF in SEC.  Return true iff the instruction
2486
   is a nop, lnop, or stop 0 (all zero insn).  */
2487
 
2488
static bfd_boolean
2489
is_nop (asection *sec, bfd_vma off)
2490
{
2491
  unsigned char insn[4];
2492
 
2493
  if (off + 4 > sec->size
2494
      || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2495
    return FALSE;
2496
  if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2497
    return TRUE;
2498
  if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2499
    return TRUE;
2500
  return FALSE;
2501
}
2502
 
2503
/* Extend the range of FUN to cover nop padding up to LIMIT.
2504
   Return TRUE iff some instruction other than a NOP was found.  */
2505
 
2506
static bfd_boolean
2507
insns_at_end (struct function_info *fun, bfd_vma limit)
2508
{
2509
  bfd_vma off = (fun->hi + 3) & -4;
2510
 
2511
  while (off < limit && is_nop (fun->sec, off))
2512
    off += 4;
2513
  if (off < limit)
2514
    {
2515
      fun->hi = off;
2516
      return TRUE;
2517
    }
2518
  fun->hi = limit;
2519
  return FALSE;
2520
}
2521
 
2522
/* Check and fix overlapping function ranges.  Return TRUE iff there
2523
   are gaps in the current info we have about functions in SEC.  */
2524
 
2525
static bfd_boolean
2526
check_function_ranges (asection *sec, struct bfd_link_info *info)
2527
{
2528
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2529
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2530
  int i;
2531
  bfd_boolean gaps = FALSE;
2532
 
2533
  if (sinfo == NULL)
2534
    return FALSE;
2535
 
2536
  for (i = 1; i < sinfo->num_fun; i++)
2537
    if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2538
      {
2539
        /* Fix overlapping symbols.  */
2540
        const char *f1 = func_name (&sinfo->fun[i - 1]);
2541
        const char *f2 = func_name (&sinfo->fun[i]);
2542
 
2543
        info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2544
        sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2545
      }
2546
    else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2547
      gaps = TRUE;
2548
 
2549
  if (sinfo->num_fun == 0)
2550
    gaps = TRUE;
2551
  else
2552
    {
2553
      if (sinfo->fun[0].lo != 0)
2554
        gaps = TRUE;
2555
      if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2556
        {
2557
          const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2558
 
2559
          info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2560
          sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2561
        }
2562
      else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2563
        gaps = TRUE;
2564
    }
2565
  return gaps;
2566
}
2567
 
2568
/* Search current function info for a function that contains address
2569
   OFFSET in section SEC.  */
2570
 
2571
static struct function_info *
2572
find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2573
{
2574
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2575
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2576
  int lo, hi, mid;
2577
 
2578
  lo = 0;
2579
  hi = sinfo->num_fun;
2580
  while (lo < hi)
2581
    {
2582
      mid = (lo + hi) / 2;
2583
      if (offset < sinfo->fun[mid].lo)
2584
        hi = mid;
2585
      else if (offset >= sinfo->fun[mid].hi)
2586
        lo = mid + 1;
2587
      else
2588
        return &sinfo->fun[mid];
2589
    }
2590
  info->callbacks->einfo (_("%A:0x%v not found in function table\n"),
2591
                          sec, offset);
2592
  bfd_set_error (bfd_error_bad_value);
2593
  return NULL;
2594
}
2595
 
2596
/* Add CALLEE to CALLER call list if not already present.  Return TRUE
2597
   if CALLEE was new.  If this function return FALSE, CALLEE should
2598
   be freed.  */
2599
 
2600
static bfd_boolean
2601
insert_callee (struct function_info *caller, struct call_info *callee)
2602
{
2603
  struct call_info **pp, *p;
2604
 
2605
  for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2606
    if (p->fun == callee->fun)
2607
      {
2608
        /* Tail calls use less stack than normal calls.  Retain entry
2609
           for normal call over one for tail call.  */
2610
        p->is_tail &= callee->is_tail;
2611
        if (!p->is_tail)
2612
          {
2613
            p->fun->start = NULL;
2614
            p->fun->is_func = TRUE;
2615
          }
2616
        p->count += callee->count;
2617
        /* Reorder list so most recent call is first.  */
2618
        *pp = p->next;
2619
        p->next = caller->call_list;
2620
        caller->call_list = p;
2621
        return FALSE;
2622
      }
2623
  callee->next = caller->call_list;
2624
  caller->call_list = callee;
2625
  return TRUE;
2626
}
2627
 
2628
/* Copy CALL and insert the copy into CALLER.  */
2629
 
2630
static bfd_boolean
2631
copy_callee (struct function_info *caller, const struct call_info *call)
2632
{
2633
  struct call_info *callee;
2634
  callee = bfd_malloc (sizeof (*callee));
2635
  if (callee == NULL)
2636
    return FALSE;
2637
  *callee = *call;
2638
  if (!insert_callee (caller, callee))
2639
    free (callee);
2640
  return TRUE;
2641
}
2642
 
2643
/* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
2644
   overlay stub sections.  */
2645
 
2646
static bfd_boolean
2647
interesting_section (asection *s)
2648
{
2649
  return (s->output_section != bfd_abs_section_ptr
2650
          && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2651
              == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2652
          && s->size != 0);
2653
}
2654
 
2655
/* Rummage through the relocs for SEC, looking for function calls.
2656
   If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
2657
   mark destination symbols on calls as being functions.  Also
2658
   look at branches, which may be tail calls or go to hot/cold
2659
   section part of same function.  */
2660
 
2661
static bfd_boolean
2662
mark_functions_via_relocs (asection *sec,
2663
                           struct bfd_link_info *info,
2664
                           int call_tree)
2665
{
2666
  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2667
  Elf_Internal_Shdr *symtab_hdr;
2668
  void *psyms;
2669
  unsigned int priority = 0;
2670
  static bfd_boolean warned;
2671
 
2672
  if (!interesting_section (sec)
2673
      || sec->reloc_count == 0)
2674
    return TRUE;
2675
 
2676
  internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2677
                                               info->keep_memory);
2678
  if (internal_relocs == NULL)
2679
    return FALSE;
2680
 
2681
  symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2682
  psyms = &symtab_hdr->contents;
2683
  irela = internal_relocs;
2684
  irelaend = irela + sec->reloc_count;
2685
  for (; irela < irelaend; irela++)
2686
    {
2687
      enum elf_spu_reloc_type r_type;
2688
      unsigned int r_indx;
2689
      asection *sym_sec;
2690
      Elf_Internal_Sym *sym;
2691
      struct elf_link_hash_entry *h;
2692
      bfd_vma val;
2693
      bfd_boolean nonbranch, is_call;
2694
      struct function_info *caller;
2695
      struct call_info *callee;
2696
 
2697
      r_type = ELF32_R_TYPE (irela->r_info);
2698
      nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2699
 
2700
      r_indx = ELF32_R_SYM (irela->r_info);
2701
      if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2702
        return FALSE;
2703
 
2704
      if (sym_sec == NULL
2705
          || sym_sec->output_section == bfd_abs_section_ptr)
2706
        continue;
2707
 
2708
      is_call = FALSE;
2709
      if (!nonbranch)
2710
        {
2711
          unsigned char insn[4];
2712
 
2713
          if (!bfd_get_section_contents (sec->owner, sec, insn,
2714
                                         irela->r_offset, 4))
2715
            return FALSE;
2716
          if (is_branch (insn))
2717
            {
2718
              is_call = (insn[0] & 0xfd) == 0x31;
2719
              priority = insn[1] & 0x0f;
2720
              priority <<= 8;
2721
              priority |= insn[2];
2722
              priority <<= 8;
2723
              priority |= insn[3];
2724
              priority >>= 7;
2725
              if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2726
                  != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2727
                {
2728
                  if (!warned)
2729
                    info->callbacks->einfo
2730
                      (_("%B(%A+0x%v): call to non-code section"
2731
                         " %B(%A), analysis incomplete\n"),
2732
                       sec->owner, sec, irela->r_offset,
2733
                       sym_sec->owner, sym_sec);
2734
                  warned = TRUE;
2735
                  continue;
2736
                }
2737
            }
2738
          else
2739
            {
2740
              nonbranch = TRUE;
2741
              if (is_hint (insn))
2742
                continue;
2743
            }
2744
        }
2745
 
2746
      if (nonbranch)
2747
        {
2748
          /* For --auto-overlay, count possible stubs we need for
2749
             function pointer references.  */
2750
          unsigned int sym_type;
2751
          if (h)
2752
            sym_type = h->type;
2753
          else
2754
            sym_type = ELF_ST_TYPE (sym->st_info);
2755
          if (sym_type == STT_FUNC)
2756
            {
2757
              if (call_tree && spu_hash_table (info)->params->auto_overlay)
2758
                spu_hash_table (info)->non_ovly_stub += 1;
2759
              /* If the symbol type is STT_FUNC then this must be a
2760
                 function pointer initialisation.  */
2761
              continue;
2762
            }
2763
          /* Ignore data references.  */
2764
          if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2765
              != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2766
            continue;
2767
          /* Otherwise we probably have a jump table reloc for
2768
             a switch statement or some other reference to a
2769
             code label.  */
2770
        }
2771
 
2772
      if (h)
2773
        val = h->root.u.def.value;
2774
      else
2775
        val = sym->st_value;
2776
      val += irela->r_addend;
2777
 
2778
      if (!call_tree)
2779
        {
2780
          struct function_info *fun;
2781
 
2782
          if (irela->r_addend != 0)
2783
            {
2784
              Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2785
              if (fake == NULL)
2786
                return FALSE;
2787
              fake->st_value = val;
2788
              fake->st_shndx
2789
                = _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2790
              sym = fake;
2791
            }
2792
          if (sym)
2793
            fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2794
          else
2795
            fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2796
          if (fun == NULL)
2797
            return FALSE;
2798
          if (irela->r_addend != 0
2799
              && fun->u.sym != sym)
2800
            free (sym);
2801
          continue;
2802
        }
2803
 
2804
      caller = find_function (sec, irela->r_offset, info);
2805
      if (caller == NULL)
2806
        return FALSE;
2807
      callee = bfd_malloc (sizeof *callee);
2808
      if (callee == NULL)
2809
        return FALSE;
2810
 
2811
      callee->fun = find_function (sym_sec, val, info);
2812
      if (callee->fun == NULL)
2813
        return FALSE;
2814
      callee->is_tail = !is_call;
2815
      callee->is_pasted = FALSE;
2816
      callee->broken_cycle = FALSE;
2817
      callee->priority = priority;
2818
      callee->count = nonbranch? 0 : 1;
2819
      if (callee->fun->last_caller != sec)
2820
        {
2821
          callee->fun->last_caller = sec;
2822
          callee->fun->call_count += 1;
2823
        }
2824
      if (!insert_callee (caller, callee))
2825
        free (callee);
2826
      else if (!is_call
2827
               && !callee->fun->is_func
2828
               && callee->fun->stack == 0)
2829
        {
2830
          /* This is either a tail call or a branch from one part of
2831
             the function to another, ie. hot/cold section.  If the
2832
             destination has been called by some other function then
2833
             it is a separate function.  We also assume that functions
2834
             are not split across input files.  */
2835
          if (sec->owner != sym_sec->owner)
2836
            {
2837
              callee->fun->start = NULL;
2838
              callee->fun->is_func = TRUE;
2839
            }
2840
          else if (callee->fun->start == NULL)
2841
            {
2842
              struct function_info *caller_start = caller;
2843
              while (caller_start->start)
2844
                caller_start = caller_start->start;
2845
 
2846
              if (caller_start != callee->fun)
2847
                callee->fun->start = caller_start;
2848
            }
2849
          else
2850
            {
2851
              struct function_info *callee_start;
2852
              struct function_info *caller_start;
2853
              callee_start = callee->fun;
2854
              while (callee_start->start)
2855
                callee_start = callee_start->start;
2856
              caller_start = caller;
2857
              while (caller_start->start)
2858
                caller_start = caller_start->start;
2859
              if (caller_start != callee_start)
2860
                {
2861
                  callee->fun->start = NULL;
2862
                  callee->fun->is_func = TRUE;
2863
                }
2864
            }
2865
        }
2866
    }
2867
 
2868
  return TRUE;
2869
}
2870
 
2871
/* Handle something like .init or .fini, which has a piece of a function.
2872
   These sections are pasted together to form a single function.  */
2873
 
2874
static bfd_boolean
2875
pasted_function (asection *sec)
2876
{
2877
  struct bfd_link_order *l;
2878
  struct _spu_elf_section_data *sec_data;
2879
  struct spu_elf_stack_info *sinfo;
2880
  Elf_Internal_Sym *fake;
2881
  struct function_info *fun, *fun_start;
2882
 
2883
  fake = bfd_zmalloc (sizeof (*fake));
2884
  if (fake == NULL)
2885
    return FALSE;
2886
  fake->st_value = 0;
2887
  fake->st_size = sec->size;
2888
  fake->st_shndx
2889
    = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2890
  fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2891
  if (!fun)
2892
    return FALSE;
2893
 
2894
  /* Find a function immediately preceding this section.  */
2895
  fun_start = NULL;
2896
  for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2897
    {
2898
      if (l->u.indirect.section == sec)
2899
        {
2900
          if (fun_start != NULL)
2901
            {
2902
              struct call_info *callee = bfd_malloc (sizeof *callee);
2903
              if (callee == NULL)
2904
                return FALSE;
2905
 
2906
              fun->start = fun_start;
2907
              callee->fun = fun;
2908
              callee->is_tail = TRUE;
2909
              callee->is_pasted = TRUE;
2910
              callee->broken_cycle = FALSE;
2911
              callee->priority = 0;
2912
              callee->count = 1;
2913
              if (!insert_callee (fun_start, callee))
2914
                free (callee);
2915
              return TRUE;
2916
            }
2917
          break;
2918
        }
2919
      if (l->type == bfd_indirect_link_order
2920
          && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2921
          && (sinfo = sec_data->u.i.stack_info) != NULL
2922
          && sinfo->num_fun != 0)
2923
        fun_start = &sinfo->fun[sinfo->num_fun - 1];
2924
    }
2925
 
2926
  /* Don't return an error if we did not find a function preceding this
2927
     section.  The section may have incorrect flags.  */
2928
  return TRUE;
2929
}
2930
 
2931
/* Map address ranges in code sections to functions.  */
2932
 
2933
static bfd_boolean
2934
discover_functions (struct bfd_link_info *info)
2935
{
2936
  bfd *ibfd;
2937
  int bfd_idx;
2938
  Elf_Internal_Sym ***psym_arr;
2939
  asection ***sec_arr;
2940
  bfd_boolean gaps = FALSE;
2941
 
2942
  bfd_idx = 0;
2943
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
2944
    bfd_idx++;
2945
 
2946
  psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2947
  if (psym_arr == NULL)
2948
    return FALSE;
2949
  sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2950
  if (sec_arr == NULL)
2951
    return FALSE;
2952
 
2953
  for (ibfd = info->input_bfds, bfd_idx = 0;
2954
       ibfd != NULL;
2955
       ibfd = ibfd->link_next, bfd_idx++)
2956
    {
2957
      extern const bfd_target bfd_elf32_spu_vec;
2958
      Elf_Internal_Shdr *symtab_hdr;
2959
      asection *sec;
2960
      size_t symcount;
2961
      Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2962
      asection **psecs, **p;
2963
 
2964
      if (ibfd->xvec != &bfd_elf32_spu_vec)
2965
        continue;
2966
 
2967
      /* Read all the symbols.  */
2968
      symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2969
      symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
2970
      if (symcount == 0)
2971
        {
2972
          if (!gaps)
2973
            for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
2974
              if (interesting_section (sec))
2975
                {
2976
                  gaps = TRUE;
2977
                  break;
2978
                }
2979
          continue;
2980
        }
2981
 
2982
      if (symtab_hdr->contents != NULL)
2983
        {
2984
          /* Don't use cached symbols since the generic ELF linker
2985
             code only reads local symbols, and we need globals too.  */
2986
          free (symtab_hdr->contents);
2987
          symtab_hdr->contents = NULL;
2988
        }
2989
      syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
2990
                                   NULL, NULL, NULL);
2991
      symtab_hdr->contents = (void *) syms;
2992
      if (syms == NULL)
2993
        return FALSE;
2994
 
2995
      /* Select defined function symbols that are going to be output.  */
2996
      psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
2997
      if (psyms == NULL)
2998
        return FALSE;
2999
      psym_arr[bfd_idx] = psyms;
3000
      psecs = bfd_malloc (symcount * sizeof (*psecs));
3001
      if (psecs == NULL)
3002
        return FALSE;
3003
      sec_arr[bfd_idx] = psecs;
3004
      for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3005
        if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3006
            || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3007
          {
3008
            asection *s;
3009
 
3010
            *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3011
            if (s != NULL && interesting_section (s))
3012
              *psy++ = sy;
3013
          }
3014
      symcount = psy - psyms;
3015
      *psy = NULL;
3016
 
3017
      /* Sort them by section and offset within section.  */
3018
      sort_syms_syms = syms;
3019
      sort_syms_psecs = psecs;
3020
      qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3021
 
3022
      /* Now inspect the function symbols.  */
3023
      for (psy = psyms; psy < psyms + symcount; )
3024
        {
3025
          asection *s = psecs[*psy - syms];
3026
          Elf_Internal_Sym **psy2;
3027
 
3028
          for (psy2 = psy; ++psy2 < psyms + symcount; )
3029
            if (psecs[*psy2 - syms] != s)
3030
              break;
3031
 
3032
          if (!alloc_stack_info (s, psy2 - psy))
3033
            return FALSE;
3034
          psy = psy2;
3035
        }
3036
 
3037
      /* First install info about properly typed and sized functions.
3038
         In an ideal world this will cover all code sections, except
3039
         when partitioning functions into hot and cold sections,
3040
         and the horrible pasted together .init and .fini functions.  */
3041
      for (psy = psyms; psy < psyms + symcount; ++psy)
3042
        {
3043
          sy = *psy;
3044
          if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3045
            {
3046
              asection *s = psecs[sy - syms];
3047
              if (!maybe_insert_function (s, sy, FALSE, TRUE))
3048
                return FALSE;
3049
            }
3050
        }
3051
 
3052
      for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3053
        if (interesting_section (sec))
3054
          gaps |= check_function_ranges (sec, info);
3055
    }
3056
 
3057
  if (gaps)
3058
    {
3059
      /* See if we can discover more function symbols by looking at
3060
         relocations.  */
3061
      for (ibfd = info->input_bfds, bfd_idx = 0;
3062
           ibfd != NULL;
3063
           ibfd = ibfd->link_next, bfd_idx++)
3064
        {
3065
          asection *sec;
3066
 
3067
          if (psym_arr[bfd_idx] == NULL)
3068
            continue;
3069
 
3070
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3071
            if (!mark_functions_via_relocs (sec, info, FALSE))
3072
              return FALSE;
3073
        }
3074
 
3075
      for (ibfd = info->input_bfds, bfd_idx = 0;
3076
           ibfd != NULL;
3077
           ibfd = ibfd->link_next, bfd_idx++)
3078
        {
3079
          Elf_Internal_Shdr *symtab_hdr;
3080
          asection *sec;
3081
          Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3082
          asection **psecs;
3083
 
3084
          if ((psyms = psym_arr[bfd_idx]) == NULL)
3085
            continue;
3086
 
3087
          psecs = sec_arr[bfd_idx];
3088
 
3089
          symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3090
          syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3091
 
3092
          gaps = FALSE;
3093
          for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3094
            if (interesting_section (sec))
3095
              gaps |= check_function_ranges (sec, info);
3096
          if (!gaps)
3097
            continue;
3098
 
3099
          /* Finally, install all globals.  */
3100
          for (psy = psyms; (sy = *psy) != NULL; ++psy)
3101
            {
3102
              asection *s;
3103
 
3104
              s = psecs[sy - syms];
3105
 
3106
              /* Global syms might be improperly typed functions.  */
3107
              if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3108
                  && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3109
                {
3110
                  if (!maybe_insert_function (s, sy, FALSE, FALSE))
3111
                    return FALSE;
3112
                }
3113
            }
3114
        }
3115
 
3116
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3117
        {
3118
          extern const bfd_target bfd_elf32_spu_vec;
3119
          asection *sec;
3120
 
3121
          if (ibfd->xvec != &bfd_elf32_spu_vec)
3122
            continue;
3123
 
3124
          /* Some of the symbols we've installed as marking the
3125
             beginning of functions may have a size of zero.  Extend
3126
             the range of such functions to the beginning of the
3127
             next symbol of interest.  */
3128
          for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3129
            if (interesting_section (sec))
3130
              {
3131
                struct _spu_elf_section_data *sec_data;
3132
                struct spu_elf_stack_info *sinfo;
3133
 
3134
                sec_data = spu_elf_section_data (sec);
3135
                sinfo = sec_data->u.i.stack_info;
3136
                if (sinfo != NULL && sinfo->num_fun != 0)
3137
                  {
3138
                    int fun_idx;
3139
                    bfd_vma hi = sec->size;
3140
 
3141
                    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3142
                      {
3143
                        sinfo->fun[fun_idx].hi = hi;
3144
                        hi = sinfo->fun[fun_idx].lo;
3145
                      }
3146
 
3147
                    sinfo->fun[0].lo = 0;
3148
                  }
3149
                /* No symbols in this section.  Must be .init or .fini
3150
                   or something similar.  */
3151
                else if (!pasted_function (sec))
3152
                  return FALSE;
3153
              }
3154
        }
3155
    }
3156
 
3157
  for (ibfd = info->input_bfds, bfd_idx = 0;
3158
       ibfd != NULL;
3159
       ibfd = ibfd->link_next, bfd_idx++)
3160
    {
3161
      if (psym_arr[bfd_idx] == NULL)
3162
        continue;
3163
 
3164
      free (psym_arr[bfd_idx]);
3165
      free (sec_arr[bfd_idx]);
3166
    }
3167
 
3168
  free (psym_arr);
3169
  free (sec_arr);
3170
 
3171
  return TRUE;
3172
}
3173
 
3174
/* Iterate over all function_info we have collected, calling DOIT on
3175
   each node if ROOT_ONLY is false.  Only call DOIT on root nodes
3176
   if ROOT_ONLY.  */
3177
 
3178
static bfd_boolean
3179
for_each_node (bfd_boolean (*doit) (struct function_info *,
3180
                                    struct bfd_link_info *,
3181
                                    void *),
3182
               struct bfd_link_info *info,
3183
               void *param,
3184
               int root_only)
3185
{
3186
  bfd *ibfd;
3187
 
3188
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3189
    {
3190
      extern const bfd_target bfd_elf32_spu_vec;
3191
      asection *sec;
3192
 
3193
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3194
        continue;
3195
 
3196
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3197
        {
3198
          struct _spu_elf_section_data *sec_data;
3199
          struct spu_elf_stack_info *sinfo;
3200
 
3201
          if ((sec_data = spu_elf_section_data (sec)) != NULL
3202
              && (sinfo = sec_data->u.i.stack_info) != NULL)
3203
            {
3204
              int i;
3205
              for (i = 0; i < sinfo->num_fun; ++i)
3206
                if (!root_only || !sinfo->fun[i].non_root)
3207
                  if (!doit (&sinfo->fun[i], info, param))
3208
                    return FALSE;
3209
            }
3210
        }
3211
    }
3212
  return TRUE;
3213
}
3214
 
3215
/* Transfer call info attached to struct function_info entries for
3216
   all of a given function's sections to the first entry.  */
3217
 
3218
static bfd_boolean
3219
transfer_calls (struct function_info *fun,
3220
                struct bfd_link_info *info ATTRIBUTE_UNUSED,
3221
                void *param ATTRIBUTE_UNUSED)
3222
{
3223
  struct function_info *start = fun->start;
3224
 
3225
  if (start != NULL)
3226
    {
3227
      struct call_info *call, *call_next;
3228
 
3229
      while (start->start != NULL)
3230
        start = start->start;
3231
      for (call = fun->call_list; call != NULL; call = call_next)
3232
        {
3233
          call_next = call->next;
3234
          if (!insert_callee (start, call))
3235
            free (call);
3236
        }
3237
      fun->call_list = NULL;
3238
    }
3239
  return TRUE;
3240
}
3241
 
3242
/* Mark nodes in the call graph that are called by some other node.  */
3243
 
3244
static bfd_boolean
3245
mark_non_root (struct function_info *fun,
3246
               struct bfd_link_info *info ATTRIBUTE_UNUSED,
3247
               void *param ATTRIBUTE_UNUSED)
3248
{
3249
  struct call_info *call;
3250
 
3251
  if (fun->visit1)
3252
    return TRUE;
3253
  fun->visit1 = TRUE;
3254
  for (call = fun->call_list; call; call = call->next)
3255
    {
3256
      call->fun->non_root = TRUE;
3257
      mark_non_root (call->fun, 0, 0);
3258
    }
3259
  return TRUE;
3260
}
3261
 
3262
/* Remove cycles from the call graph.  Set depth of nodes.  */
3263
 
3264
static bfd_boolean
3265
remove_cycles (struct function_info *fun,
3266
               struct bfd_link_info *info,
3267
               void *param)
3268
{
3269
  struct call_info **callp, *call;
3270
  unsigned int depth = *(unsigned int *) param;
3271
  unsigned int max_depth = depth;
3272
 
3273
  fun->depth = depth;
3274
  fun->visit2 = TRUE;
3275
  fun->marking = TRUE;
3276
 
3277
  callp = &fun->call_list;
3278
  while ((call = *callp) != NULL)
3279
    {
3280
      call->max_depth = depth + !call->is_pasted;
3281
      if (!call->fun->visit2)
3282
        {
3283
          if (!remove_cycles (call->fun, info, &call->max_depth))
3284
            return FALSE;
3285
          if (max_depth < call->max_depth)
3286
            max_depth = call->max_depth;
3287
        }
3288
      else if (call->fun->marking)
3289
        {
3290
          struct spu_link_hash_table *htab = spu_hash_table (info);
3291
 
3292
          if (!htab->params->auto_overlay
3293
              && htab->params->stack_analysis)
3294
            {
3295
              const char *f1 = func_name (fun);
3296
              const char *f2 = func_name (call->fun);
3297
 
3298
              info->callbacks->info (_("Stack analysis will ignore the call "
3299
                                       "from %s to %s\n"),
3300
                                     f1, f2);
3301
            }
3302
 
3303
          call->broken_cycle = TRUE;
3304
        }
3305
      callp = &call->next;
3306
    }
3307
  fun->marking = FALSE;
3308
  *(unsigned int *) param = max_depth;
3309
  return TRUE;
3310
}
3311
 
3312
/* Check that we actually visited all nodes in remove_cycles.  If we
3313
   didn't, then there is some cycle in the call graph not attached to
3314
   any root node.  Arbitrarily choose a node in the cycle as a new
3315
   root and break the cycle.  */
3316
 
3317
static bfd_boolean
3318
mark_detached_root (struct function_info *fun,
3319
                    struct bfd_link_info *info,
3320
                    void *param)
3321
{
3322
  if (fun->visit2)
3323
    return TRUE;
3324
  fun->non_root = FALSE;
3325
  *(unsigned int *) param = 0;
3326
  return remove_cycles (fun, info, param);
3327
}
3328
 
3329
/* Populate call_list for each function.  */
3330
 
3331
static bfd_boolean
3332
build_call_tree (struct bfd_link_info *info)
3333
{
3334
  bfd *ibfd;
3335
  unsigned int depth;
3336
 
3337
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3338
    {
3339
      extern const bfd_target bfd_elf32_spu_vec;
3340
      asection *sec;
3341
 
3342
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3343
        continue;
3344
 
3345
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3346
        if (!mark_functions_via_relocs (sec, info, TRUE))
3347
          return FALSE;
3348
    }
3349
 
3350
  /* Transfer call info from hot/cold section part of function
3351
     to main entry.  */
3352
  if (!spu_hash_table (info)->params->auto_overlay
3353
      && !for_each_node (transfer_calls, info, 0, FALSE))
3354
    return FALSE;
3355
 
3356
  /* Find the call graph root(s).  */
3357
  if (!for_each_node (mark_non_root, info, 0, FALSE))
3358
    return FALSE;
3359
 
3360
  /* Remove cycles from the call graph.  We start from the root node(s)
3361
     so that we break cycles in a reasonable place.  */
3362
  depth = 0;
3363
  if (!for_each_node (remove_cycles, info, &depth, TRUE))
3364
    return FALSE;
3365
 
3366
  return for_each_node (mark_detached_root, info, &depth, FALSE);
3367
}
3368
 
3369
/* qsort predicate to sort calls by priority, max_depth then count.  */
3370
 
3371
static int
3372
sort_calls (const void *a, const void *b)
3373
{
3374
  struct call_info *const *c1 = a;
3375
  struct call_info *const *c2 = b;
3376
  int delta;
3377
 
3378
  delta = (*c2)->priority - (*c1)->priority;
3379
  if (delta != 0)
3380
    return delta;
3381
 
3382
  delta = (*c2)->max_depth - (*c1)->max_depth;
3383
  if (delta != 0)
3384
    return delta;
3385
 
3386
  delta = (*c2)->count - (*c1)->count;
3387
  if (delta != 0)
3388
    return delta;
3389
 
3390
  return (char *) c1 - (char *) c2;
3391
}
3392
 
3393
struct _mos_param {
3394
  unsigned int max_overlay_size;
3395
};
3396
 
3397
/* Set linker_mark and gc_mark on any sections that we will put in
3398
   overlays.  These flags are used by the generic ELF linker, but we
3399
   won't be continuing on to bfd_elf_final_link so it is OK to use
3400
   them.  linker_mark is clear before we get here.  Set segment_mark
3401
   on sections that are part of a pasted function (excluding the last
3402
   section).
3403
 
3404
   Set up function rodata section if --overlay-rodata.  We don't
3405
   currently include merged string constant rodata sections since
3406
 
3407
   Sort the call graph so that the deepest nodes will be visited
3408
   first.  */
3409
 
3410
static bfd_boolean
3411
mark_overlay_section (struct function_info *fun,
3412
                      struct bfd_link_info *info,
3413
                      void *param)
3414
{
3415
  struct call_info *call;
3416
  unsigned int count;
3417
  struct _mos_param *mos_param = param;
3418
  struct spu_link_hash_table *htab = spu_hash_table (info);
3419
 
3420
  if (fun->visit4)
3421
    return TRUE;
3422
 
3423
  fun->visit4 = TRUE;
3424
  if (!fun->sec->linker_mark
3425
      && (htab->params->ovly_flavour != ovly_soft_icache
3426
          || htab->params->non_ia_text
3427
          || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3428
          || strcmp (fun->sec->name, ".init") == 0
3429
          || strcmp (fun->sec->name, ".fini") == 0))
3430
    {
3431
      unsigned int size;
3432
 
3433
      fun->sec->linker_mark = 1;
3434
      fun->sec->gc_mark = 1;
3435
      fun->sec->segment_mark = 0;
3436
      /* Ensure SEC_CODE is set on this text section (it ought to
3437
         be!), and SEC_CODE is clear on rodata sections.  We use
3438
         this flag to differentiate the two overlay section types.  */
3439
      fun->sec->flags |= SEC_CODE;
3440
 
3441
      size = fun->sec->size;
3442
      if (htab->params->auto_overlay & OVERLAY_RODATA)
3443
        {
3444
          char *name = NULL;
3445
 
3446
          /* Find the rodata section corresponding to this function's
3447
             text section.  */
3448
          if (strcmp (fun->sec->name, ".text") == 0)
3449
            {
3450
              name = bfd_malloc (sizeof (".rodata"));
3451
              if (name == NULL)
3452
                return FALSE;
3453
              memcpy (name, ".rodata", sizeof (".rodata"));
3454
            }
3455
          else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3456
            {
3457
              size_t len = strlen (fun->sec->name);
3458
              name = bfd_malloc (len + 3);
3459
              if (name == NULL)
3460
                return FALSE;
3461
              memcpy (name, ".rodata", sizeof (".rodata"));
3462
              memcpy (name + 7, fun->sec->name + 5, len - 4);
3463
            }
3464
          else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3465
            {
3466
              size_t len = strlen (fun->sec->name) + 1;
3467
              name = bfd_malloc (len);
3468
              if (name == NULL)
3469
                return FALSE;
3470
              memcpy (name, fun->sec->name, len);
3471
              name[14] = 'r';
3472
            }
3473
 
3474
          if (name != NULL)
3475
            {
3476
              asection *rodata = NULL;
3477
              asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3478
              if (group_sec == NULL)
3479
                rodata = bfd_get_section_by_name (fun->sec->owner, name);
3480
              else
3481
                while (group_sec != NULL && group_sec != fun->sec)
3482
                  {
3483
                    if (strcmp (group_sec->name, name) == 0)
3484
                      {
3485
                        rodata = group_sec;
3486
                        break;
3487
                      }
3488
                    group_sec = elf_section_data (group_sec)->next_in_group;
3489
                  }
3490
              fun->rodata = rodata;
3491
              if (fun->rodata)
3492
                {
3493
                  size += fun->rodata->size;
3494
                  if (htab->params->line_size != 0
3495
                      && size > htab->params->line_size)
3496
                    {
3497
                      size -= fun->rodata->size;
3498
                      fun->rodata = NULL;
3499
                    }
3500
                  else
3501
                    {
3502
                      fun->rodata->linker_mark = 1;
3503
                      fun->rodata->gc_mark = 1;
3504
                      fun->rodata->flags &= ~SEC_CODE;
3505
                    }
3506
                }
3507
              free (name);
3508
            }
3509
        }
3510
      if (mos_param->max_overlay_size < size)
3511
        mos_param->max_overlay_size = size;
3512
    }
3513
 
3514
  for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3515
    count += 1;
3516
 
3517
  if (count > 1)
3518
    {
3519
      struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3520
      if (calls == NULL)
3521
        return FALSE;
3522
 
3523
      for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3524
        calls[count++] = call;
3525
 
3526
      qsort (calls, count, sizeof (*calls), sort_calls);
3527
 
3528
      fun->call_list = NULL;
3529
      while (count != 0)
3530
        {
3531
          --count;
3532
          calls[count]->next = fun->call_list;
3533
          fun->call_list = calls[count];
3534
        }
3535
      free (calls);
3536
    }
3537
 
3538
  for (call = fun->call_list; call != NULL; call = call->next)
3539
    {
3540
      if (call->is_pasted)
3541
        {
3542
          /* There can only be one is_pasted call per function_info.  */
3543
          BFD_ASSERT (!fun->sec->segment_mark);
3544
          fun->sec->segment_mark = 1;
3545
        }
3546
      if (!call->broken_cycle
3547
          && !mark_overlay_section (call->fun, info, param))
3548
        return FALSE;
3549
    }
3550
 
3551
  /* Don't put entry code into an overlay.  The overlay manager needs
3552
     a stack!  Also, don't mark .ovl.init as an overlay.  */
3553
  if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3554
      == info->output_bfd->start_address
3555
      || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3556
    {
3557
      fun->sec->linker_mark = 0;
3558
      if (fun->rodata != NULL)
3559
        fun->rodata->linker_mark = 0;
3560
    }
3561
  return TRUE;
3562
}
3563
 
3564
/* If non-zero then unmark functions called from those within sections
3565
   that we need to unmark.  Unfortunately this isn't reliable since the
3566
   call graph cannot know the destination of function pointer calls.  */
3567
#define RECURSE_UNMARK 0
3568
 
3569
struct _uos_param {
3570
  asection *exclude_input_section;
3571
  asection *exclude_output_section;
3572
  unsigned long clearing;
3573
};
3574
 
3575
/* Undo some of mark_overlay_section's work.  */
3576
 
3577
static bfd_boolean
3578
unmark_overlay_section (struct function_info *fun,
3579
                        struct bfd_link_info *info,
3580
                        void *param)
3581
{
3582
  struct call_info *call;
3583
  struct _uos_param *uos_param = param;
3584
  unsigned int excluded = 0;
3585
 
3586
  if (fun->visit5)
3587
    return TRUE;
3588
 
3589
  fun->visit5 = TRUE;
3590
 
3591
  excluded = 0;
3592
  if (fun->sec == uos_param->exclude_input_section
3593
      || fun->sec->output_section == uos_param->exclude_output_section)
3594
    excluded = 1;
3595
 
3596
  if (RECURSE_UNMARK)
3597
    uos_param->clearing += excluded;
3598
 
3599
  if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3600
    {
3601
      fun->sec->linker_mark = 0;
3602
      if (fun->rodata)
3603
        fun->rodata->linker_mark = 0;
3604
    }
3605
 
3606
  for (call = fun->call_list; call != NULL; call = call->next)
3607
    if (!call->broken_cycle
3608
        && !unmark_overlay_section (call->fun, info, param))
3609
      return FALSE;
3610
 
3611
  if (RECURSE_UNMARK)
3612
    uos_param->clearing -= excluded;
3613
  return TRUE;
3614
}
3615
 
3616
struct _cl_param {
3617
  unsigned int lib_size;
3618
  asection **lib_sections;
3619
};
3620
 
3621
/* Add sections we have marked as belonging to overlays to an array
3622
   for consideration as non-overlay sections.  The array consist of
3623
   pairs of sections, (text,rodata), for functions in the call graph.  */
3624
 
3625
static bfd_boolean
3626
collect_lib_sections (struct function_info *fun,
3627
                      struct bfd_link_info *info,
3628
                      void *param)
3629
{
3630
  struct _cl_param *lib_param = param;
3631
  struct call_info *call;
3632
  unsigned int size;
3633
 
3634
  if (fun->visit6)
3635
    return TRUE;
3636
 
3637
  fun->visit6 = TRUE;
3638
  if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3639
    return TRUE;
3640
 
3641
  size = fun->sec->size;
3642
  if (fun->rodata)
3643
    size += fun->rodata->size;
3644
 
3645
  if (size <= lib_param->lib_size)
3646
    {
3647
      *lib_param->lib_sections++ = fun->sec;
3648
      fun->sec->gc_mark = 0;
3649
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3650
        {
3651
          *lib_param->lib_sections++ = fun->rodata;
3652
          fun->rodata->gc_mark = 0;
3653
        }
3654
      else
3655
        *lib_param->lib_sections++ = NULL;
3656
    }
3657
 
3658
  for (call = fun->call_list; call != NULL; call = call->next)
3659
    if (!call->broken_cycle)
3660
      collect_lib_sections (call->fun, info, param);
3661
 
3662
  return TRUE;
3663
}
3664
 
3665
/* qsort predicate to sort sections by call count.  */
3666
 
3667
static int
3668
sort_lib (const void *a, const void *b)
3669
{
3670
  asection *const *s1 = a;
3671
  asection *const *s2 = b;
3672
  struct _spu_elf_section_data *sec_data;
3673
  struct spu_elf_stack_info *sinfo;
3674
  int delta;
3675
 
3676
  delta = 0;
3677
  if ((sec_data = spu_elf_section_data (*s1)) != NULL
3678
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3679
    {
3680
      int i;
3681
      for (i = 0; i < sinfo->num_fun; ++i)
3682
        delta -= sinfo->fun[i].call_count;
3683
    }
3684
 
3685
  if ((sec_data = spu_elf_section_data (*s2)) != NULL
3686
      && (sinfo = sec_data->u.i.stack_info) != NULL)
3687
    {
3688
      int i;
3689
      for (i = 0; i < sinfo->num_fun; ++i)
3690
        delta += sinfo->fun[i].call_count;
3691
    }
3692
 
3693
  if (delta != 0)
3694
    return delta;
3695
 
3696
  return s1 - s2;
3697
}
3698
 
3699
/* Remove some sections from those marked to be in overlays.  Choose
3700
   those that are called from many places, likely library functions.  */
3701
 
3702
static unsigned int
3703
auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3704
{
3705
  bfd *ibfd;
3706
  asection **lib_sections;
3707
  unsigned int i, lib_count;
3708
  struct _cl_param collect_lib_param;
3709
  struct function_info dummy_caller;
3710
  struct spu_link_hash_table *htab;
3711
 
3712
  memset (&dummy_caller, 0, sizeof (dummy_caller));
3713
  lib_count = 0;
3714
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
3715
    {
3716
      extern const bfd_target bfd_elf32_spu_vec;
3717
      asection *sec;
3718
 
3719
      if (ibfd->xvec != &bfd_elf32_spu_vec)
3720
        continue;
3721
 
3722
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3723
        if (sec->linker_mark
3724
            && sec->size < lib_size
3725
            && (sec->flags & SEC_CODE) != 0)
3726
          lib_count += 1;
3727
    }
3728
  lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3729
  if (lib_sections == NULL)
3730
    return (unsigned int) -1;
3731
  collect_lib_param.lib_size = lib_size;
3732
  collect_lib_param.lib_sections = lib_sections;
3733
  if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3734
                      TRUE))
3735
    return (unsigned int) -1;
3736
  lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3737
 
3738
  /* Sort sections so that those with the most calls are first.  */
3739
  if (lib_count > 1)
3740
    qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3741
 
3742
  htab = spu_hash_table (info);
3743
  for (i = 0; i < lib_count; i++)
3744
    {
3745
      unsigned int tmp, stub_size;
3746
      asection *sec;
3747
      struct _spu_elf_section_data *sec_data;
3748
      struct spu_elf_stack_info *sinfo;
3749
 
3750
      sec = lib_sections[2 * i];
3751
      /* If this section is OK, its size must be less than lib_size.  */
3752
      tmp = sec->size;
3753
      /* If it has a rodata section, then add that too.  */
3754
      if (lib_sections[2 * i + 1])
3755
        tmp += lib_sections[2 * i + 1]->size;
3756
      /* Add any new overlay call stubs needed by the section.  */
3757
      stub_size = 0;
3758
      if (tmp < lib_size
3759
          && (sec_data = spu_elf_section_data (sec)) != NULL
3760
          && (sinfo = sec_data->u.i.stack_info) != NULL)
3761
        {
3762
          int k;
3763
          struct call_info *call;
3764
 
3765
          for (k = 0; k < sinfo->num_fun; ++k)
3766
            for (call = sinfo->fun[k].call_list; call; call = call->next)
3767
              if (call->fun->sec->linker_mark)
3768
                {
3769
                  struct call_info *p;
3770
                  for (p = dummy_caller.call_list; p; p = p->next)
3771
                    if (p->fun == call->fun)
3772
                      break;
3773
                  if (!p)
3774
                    stub_size += ovl_stub_size (htab->params);
3775
                }
3776
        }
3777
      if (tmp + stub_size < lib_size)
3778
        {
3779
          struct call_info **pp, *p;
3780
 
3781
          /* This section fits.  Mark it as non-overlay.  */
3782
          lib_sections[2 * i]->linker_mark = 0;
3783
          if (lib_sections[2 * i + 1])
3784
            lib_sections[2 * i + 1]->linker_mark = 0;
3785
          lib_size -= tmp + stub_size;
3786
          /* Call stubs to the section we just added are no longer
3787
             needed.  */
3788
          pp = &dummy_caller.call_list;
3789
          while ((p = *pp) != NULL)
3790
            if (!p->fun->sec->linker_mark)
3791
              {
3792
                lib_size += ovl_stub_size (htab->params);
3793
                *pp = p->next;
3794
                free (p);
3795
              }
3796
            else
3797
              pp = &p->next;
3798
          /* Add new call stubs to dummy_caller.  */
3799
          if ((sec_data = spu_elf_section_data (sec)) != NULL
3800
              && (sinfo = sec_data->u.i.stack_info) != NULL)
3801
            {
3802
              int k;
3803
              struct call_info *call;
3804
 
3805
              for (k = 0; k < sinfo->num_fun; ++k)
3806
                for (call = sinfo->fun[k].call_list;
3807
                     call;
3808
                     call = call->next)
3809
                  if (call->fun->sec->linker_mark)
3810
                    {
3811
                      struct call_info *callee;
3812
                      callee = bfd_malloc (sizeof (*callee));
3813
                      if (callee == NULL)
3814
                        return (unsigned int) -1;
3815
                      *callee = *call;
3816
                      if (!insert_callee (&dummy_caller, callee))
3817
                        free (callee);
3818
                    }
3819
            }
3820
        }
3821
    }
3822
  while (dummy_caller.call_list != NULL)
3823
    {
3824
      struct call_info *call = dummy_caller.call_list;
3825
      dummy_caller.call_list = call->next;
3826
      free (call);
3827
    }
3828
  for (i = 0; i < 2 * lib_count; i++)
3829
    if (lib_sections[i])
3830
      lib_sections[i]->gc_mark = 1;
3831
  free (lib_sections);
3832
  return lib_size;
3833
}
3834
 
3835
/* Build an array of overlay sections.  The deepest node's section is
3836
   added first, then its parent node's section, then everything called
3837
   from the parent section.  The idea being to group sections to
3838
   minimise calls between different overlays.  */
3839
 
3840
static bfd_boolean
3841
collect_overlays (struct function_info *fun,
3842
                  struct bfd_link_info *info,
3843
                  void *param)
3844
{
3845
  struct call_info *call;
3846
  bfd_boolean added_fun;
3847
  asection ***ovly_sections = param;
3848
 
3849
  if (fun->visit7)
3850
    return TRUE;
3851
 
3852
  fun->visit7 = TRUE;
3853
  for (call = fun->call_list; call != NULL; call = call->next)
3854
    if (!call->is_pasted && !call->broken_cycle)
3855
      {
3856
        if (!collect_overlays (call->fun, info, ovly_sections))
3857
          return FALSE;
3858
        break;
3859
      }
3860
 
3861
  added_fun = FALSE;
3862
  if (fun->sec->linker_mark && fun->sec->gc_mark)
3863
    {
3864
      fun->sec->gc_mark = 0;
3865
      *(*ovly_sections)++ = fun->sec;
3866
      if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3867
        {
3868
          fun->rodata->gc_mark = 0;
3869
          *(*ovly_sections)++ = fun->rodata;
3870
        }
3871
      else
3872
        *(*ovly_sections)++ = NULL;
3873
      added_fun = TRUE;
3874
 
3875
      /* Pasted sections must stay with the first section.  We don't
3876
         put pasted sections in the array, just the first section.
3877
         Mark subsequent sections as already considered.  */
3878
      if (fun->sec->segment_mark)
3879
        {
3880
          struct function_info *call_fun = fun;
3881
          do
3882
            {
3883
              for (call = call_fun->call_list; call != NULL; call = call->next)
3884
                if (call->is_pasted)
3885
                  {
3886
                    call_fun = call->fun;
3887
                    call_fun->sec->gc_mark = 0;
3888
                    if (call_fun->rodata)
3889
                      call_fun->rodata->gc_mark = 0;
3890
                    break;
3891
                  }
3892
              if (call == NULL)
3893
                abort ();
3894
            }
3895
          while (call_fun->sec->segment_mark);
3896
        }
3897
    }
3898
 
3899
  for (call = fun->call_list; call != NULL; call = call->next)
3900
    if (!call->broken_cycle
3901
        && !collect_overlays (call->fun, info, ovly_sections))
3902
      return FALSE;
3903
 
3904
  if (added_fun)
3905
    {
3906
      struct _spu_elf_section_data *sec_data;
3907
      struct spu_elf_stack_info *sinfo;
3908
 
3909
      if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3910
          && (sinfo = sec_data->u.i.stack_info) != NULL)
3911
        {
3912
          int i;
3913
          for (i = 0; i < sinfo->num_fun; ++i)
3914
            if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3915
              return FALSE;
3916
        }
3917
    }
3918
 
3919
  return TRUE;
3920
}
3921
 
3922
struct _sum_stack_param {
3923
  size_t cum_stack;
3924
  size_t overall_stack;
3925
  bfd_boolean emit_stack_syms;
3926
};
3927
 
3928
/* Descend the call graph for FUN, accumulating total stack required.  */
3929
 
3930
static bfd_boolean
3931
sum_stack (struct function_info *fun,
3932
           struct bfd_link_info *info,
3933
           void *param)
3934
{
3935
  struct call_info *call;
3936
  struct function_info *max;
3937
  size_t stack, cum_stack;
3938
  const char *f1;
3939
  bfd_boolean has_call;
3940
  struct _sum_stack_param *sum_stack_param = param;
3941
  struct spu_link_hash_table *htab;
3942
 
3943
  cum_stack = fun->stack;
3944
  sum_stack_param->cum_stack = cum_stack;
3945
  if (fun->visit3)
3946
    return TRUE;
3947
 
3948
  has_call = FALSE;
3949
  max = NULL;
3950
  for (call = fun->call_list; call; call = call->next)
3951
    {
3952
      if (call->broken_cycle)
3953
        continue;
3954
      if (!call->is_pasted)
3955
        has_call = TRUE;
3956
      if (!sum_stack (call->fun, info, sum_stack_param))
3957
        return FALSE;
3958
      stack = sum_stack_param->cum_stack;
3959
      /* Include caller stack for normal calls, don't do so for
3960
         tail calls.  fun->stack here is local stack usage for
3961
         this function.  */
3962
      if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3963
        stack += fun->stack;
3964
      if (cum_stack < stack)
3965
        {
3966
          cum_stack = stack;
3967
          max = call->fun;
3968
        }
3969
    }
3970
 
3971
  sum_stack_param->cum_stack = cum_stack;
3972
  stack = fun->stack;
3973
  /* Now fun->stack holds cumulative stack.  */
3974
  fun->stack = cum_stack;
3975
  fun->visit3 = TRUE;
3976
 
3977
  if (!fun->non_root
3978
      && sum_stack_param->overall_stack < cum_stack)
3979
    sum_stack_param->overall_stack = cum_stack;
3980
 
3981
  htab = spu_hash_table (info);
3982
  if (htab->params->auto_overlay)
3983
    return TRUE;
3984
 
3985
  f1 = func_name (fun);
3986
  if (htab->params->stack_analysis)
3987
    {
3988
      if (!fun->non_root)
3989
        info->callbacks->info (_("  %s: 0x%v\n"), f1, (bfd_vma) cum_stack);
3990
      info->callbacks->minfo (_("%s: 0x%v 0x%v\n"),
3991
                              f1, (bfd_vma) stack, (bfd_vma) cum_stack);
3992
 
3993
      if (has_call)
3994
        {
3995
          info->callbacks->minfo (_("  calls:\n"));
3996
          for (call = fun->call_list; call; call = call->next)
3997
            if (!call->is_pasted && !call->broken_cycle)
3998
              {
3999
                const char *f2 = func_name (call->fun);
4000
                const char *ann1 = call->fun == max ? "*" : " ";
4001
                const char *ann2 = call->is_tail ? "t" : " ";
4002
 
4003
                info->callbacks->minfo (_("   %s%s %s\n"), ann1, ann2, f2);
4004
              }
4005
        }
4006
    }
4007
 
4008
  if (sum_stack_param->emit_stack_syms)
4009
    {
4010
      char *name = bfd_malloc (18 + strlen (f1));
4011
      struct elf_link_hash_entry *h;
4012
 
4013
      if (name == NULL)
4014
        return FALSE;
4015
 
4016
      if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4017
        sprintf (name, "__stack_%s", f1);
4018
      else
4019
        sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4020
 
4021
      h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4022
      free (name);
4023
      if (h != NULL
4024
          && (h->root.type == bfd_link_hash_new
4025
              || h->root.type == bfd_link_hash_undefined
4026
              || h->root.type == bfd_link_hash_undefweak))
4027
        {
4028
          h->root.type = bfd_link_hash_defined;
4029
          h->root.u.def.section = bfd_abs_section_ptr;
4030
          h->root.u.def.value = cum_stack;
4031
          h->size = 0;
4032
          h->type = 0;
4033
          h->ref_regular = 1;
4034
          h->def_regular = 1;
4035
          h->ref_regular_nonweak = 1;
4036
          h->forced_local = 1;
4037
          h->non_elf = 0;
4038
        }
4039
    }
4040
 
4041
  return TRUE;
4042
}
4043
 
4044
/* SEC is part of a pasted function.  Return the call_info for the
4045
   next section of this function.  */
4046
 
4047
static struct call_info *
4048
find_pasted_call (asection *sec)
4049
{
4050
  struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4051
  struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4052
  struct call_info *call;
4053
  int k;
4054
 
4055
  for (k = 0; k < sinfo->num_fun; ++k)
4056
    for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4057
      if (call->is_pasted)
4058
        return call;
4059
  abort ();
4060
  return 0;
4061
}
4062
 
4063
/* qsort predicate to sort bfds by file name.  */
4064
 
4065
static int
4066
sort_bfds (const void *a, const void *b)
4067
{
4068
  bfd *const *abfd1 = a;
4069
  bfd *const *abfd2 = b;
4070
 
4071
  return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
4072
}
4073
 
4074
static unsigned int
4075
print_one_overlay_section (FILE *script,
4076
                           unsigned int base,
4077
                           unsigned int count,
4078
                           unsigned int ovlynum,
4079
                           unsigned int *ovly_map,
4080
                           asection **ovly_sections,
4081
                           struct bfd_link_info *info)
4082
{
4083
  unsigned int j;
4084
 
4085
  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4086
    {
4087
      asection *sec = ovly_sections[2 * j];
4088
 
4089
      if (fprintf (script, "   %s%c%s (%s)\n",
4090
                   (sec->owner->my_archive != NULL
4091
                    ? sec->owner->my_archive->filename : ""),
4092
                   info->path_separator,
4093
                   sec->owner->filename,
4094
                   sec->name) <= 0)
4095
        return -1;
4096
      if (sec->segment_mark)
4097
        {
4098
          struct call_info *call = find_pasted_call (sec);
4099
          while (call != NULL)
4100
            {
4101
              struct function_info *call_fun = call->fun;
4102
              sec = call_fun->sec;
4103
              if (fprintf (script, "   %s%c%s (%s)\n",
4104
                           (sec->owner->my_archive != NULL
4105
                            ? sec->owner->my_archive->filename : ""),
4106
                           info->path_separator,
4107
                           sec->owner->filename,
4108
                           sec->name) <= 0)
4109
                return -1;
4110
              for (call = call_fun->call_list; call; call = call->next)
4111
                if (call->is_pasted)
4112
                  break;
4113
            }
4114
        }
4115
    }
4116
 
4117
  for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4118
    {
4119
      asection *sec = ovly_sections[2 * j + 1];
4120
      if (sec != NULL
4121
          && fprintf (script, "   %s%c%s (%s)\n",
4122
                      (sec->owner->my_archive != NULL
4123
                       ? sec->owner->my_archive->filename : ""),
4124
                      info->path_separator,
4125
                      sec->owner->filename,
4126
                      sec->name) <= 0)
4127
        return -1;
4128
 
4129
      sec = ovly_sections[2 * j];
4130
      if (sec->segment_mark)
4131
        {
4132
          struct call_info *call = find_pasted_call (sec);
4133
          while (call != NULL)
4134
            {
4135
              struct function_info *call_fun = call->fun;
4136
              sec = call_fun->rodata;
4137
              if (sec != NULL
4138
                  && fprintf (script, "   %s%c%s (%s)\n",
4139
                              (sec->owner->my_archive != NULL
4140
                               ? sec->owner->my_archive->filename : ""),
4141
                              info->path_separator,
4142
                              sec->owner->filename,
4143
                              sec->name) <= 0)
4144
                return -1;
4145
              for (call = call_fun->call_list; call; call = call->next)
4146
                if (call->is_pasted)
4147
                  break;
4148
            }
4149
        }
4150
    }
4151
 
4152
  return j;
4153
}
4154
 
4155
/* Handle --auto-overlay.  */
4156
 
4157
static void
4158
spu_elf_auto_overlay (struct bfd_link_info *info)
4159
{
4160
  bfd *ibfd;
4161
  bfd **bfd_arr;
4162
  struct elf_segment_map *m;
4163
  unsigned int fixed_size, lo, hi;
4164
  unsigned int reserved;
4165
  struct spu_link_hash_table *htab;
4166
  unsigned int base, i, count, bfd_count;
4167
  unsigned int region, ovlynum;
4168
  asection **ovly_sections, **ovly_p;
4169
  unsigned int *ovly_map;
4170
  FILE *script;
4171
  unsigned int total_overlay_size, overlay_size;
4172
  const char *ovly_mgr_entry;
4173
  struct elf_link_hash_entry *h;
4174
  struct _mos_param mos_param;
4175
  struct _uos_param uos_param;
4176
  struct function_info dummy_caller;
4177
 
4178
  /* Find the extents of our loadable image.  */
4179
  lo = (unsigned int) -1;
4180
  hi = 0;
4181
  for (m = elf_tdata (info->output_bfd)->segment_map; m != NULL; m = m->next)
4182
    if (m->p_type == PT_LOAD)
4183
      for (i = 0; i < m->count; i++)
4184
        if (m->sections[i]->size != 0)
4185
          {
4186
            if (m->sections[i]->vma < lo)
4187
              lo = m->sections[i]->vma;
4188
            if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4189
              hi = m->sections[i]->vma + m->sections[i]->size - 1;
4190
          }
4191
  fixed_size = hi + 1 - lo;
4192
 
4193
  if (!discover_functions (info))
4194
    goto err_exit;
4195
 
4196
  if (!build_call_tree (info))
4197
    goto err_exit;
4198
 
4199
  htab = spu_hash_table (info);
4200
  reserved = htab->params->auto_overlay_reserved;
4201
  if (reserved == 0)
4202
    {
4203
      struct _sum_stack_param sum_stack_param;
4204
 
4205
      sum_stack_param.emit_stack_syms = 0;
4206
      sum_stack_param.overall_stack = 0;
4207
      if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4208
        goto err_exit;
4209
      reserved = (sum_stack_param.overall_stack
4210
                  + htab->params->extra_stack_space);
4211
    }
4212
 
4213
  /* No need for overlays if everything already fits.  */
4214
  if (fixed_size + reserved <= htab->local_store
4215
      && htab->params->ovly_flavour != ovly_soft_icache)
4216
    {
4217
      htab->params->auto_overlay = 0;
4218
      return;
4219
    }
4220
 
4221
  uos_param.exclude_input_section = 0;
4222
  uos_param.exclude_output_section
4223
    = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4224
 
4225
  ovly_mgr_entry = "__ovly_load";
4226
  if (htab->params->ovly_flavour == ovly_soft_icache)
4227
    ovly_mgr_entry = "__icache_br_handler";
4228
  h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4229
                            FALSE, FALSE, FALSE);
4230
  if (h != NULL
4231
      && (h->root.type == bfd_link_hash_defined
4232
          || h->root.type == bfd_link_hash_defweak)
4233
      && h->def_regular)
4234
    {
4235
      /* We have a user supplied overlay manager.  */
4236
      uos_param.exclude_input_section = h->root.u.def.section;
4237
    }
4238
  else
4239
    {
4240
      /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4241
         builtin version to .text, and will adjust .text size.  */
4242
      fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4243
    }
4244
 
4245
  /* Mark overlay sections, and find max overlay section size.  */
4246
  mos_param.max_overlay_size = 0;
4247
  if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4248
    goto err_exit;
4249
 
4250
  /* We can't put the overlay manager or interrupt routines in
4251
     overlays.  */
4252
  uos_param.clearing = 0;
4253
  if ((uos_param.exclude_input_section
4254
       || uos_param.exclude_output_section)
4255
      && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4256
    goto err_exit;
4257
 
4258
  bfd_count = 0;
4259
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4260
    ++bfd_count;
4261
  bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4262
  if (bfd_arr == NULL)
4263
    goto err_exit;
4264
 
4265
  /* Count overlay sections, and subtract their sizes from "fixed_size".  */
4266
  count = 0;
4267
  bfd_count = 0;
4268
  total_overlay_size = 0;
4269
  for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
4270
    {
4271
      extern const bfd_target bfd_elf32_spu_vec;
4272
      asection *sec;
4273
      unsigned int old_count;
4274
 
4275
      if (ibfd->xvec != &bfd_elf32_spu_vec)
4276
        continue;
4277
 
4278
      old_count = count;
4279
      for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4280
        if (sec->linker_mark)
4281
          {
4282
            if ((sec->flags & SEC_CODE) != 0)
4283
              count += 1;
4284
            fixed_size -= sec->size;
4285
            total_overlay_size += sec->size;
4286
          }
4287
        else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4288
                 && sec->output_section->owner == info->output_bfd
4289
                 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4290
          fixed_size -= sec->size;
4291
      if (count != old_count)
4292
        bfd_arr[bfd_count++] = ibfd;
4293
    }
4294
 
4295
  /* Since the overlay link script selects sections by file name and
4296
     section name, ensure that file names are unique.  */
4297
  if (bfd_count > 1)
4298
    {
4299
      bfd_boolean ok = TRUE;
4300
 
4301
      qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4302
      for (i = 1; i < bfd_count; ++i)
4303
        if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4304
          {
4305
            if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4306
              {
4307
                if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4308
                  info->callbacks->einfo (_("%s duplicated in %s\n"),
4309
                                          bfd_arr[i]->filename,
4310
                                          bfd_arr[i]->my_archive->filename);
4311
                else
4312
                  info->callbacks->einfo (_("%s duplicated\n"),
4313
                                          bfd_arr[i]->filename);
4314
                ok = FALSE;
4315
              }
4316
          }
4317
      if (!ok)
4318
        {
4319
          info->callbacks->einfo (_("sorry, no support for duplicate "
4320
                                    "object files in auto-overlay script\n"));
4321
          bfd_set_error (bfd_error_bad_value);
4322
          goto err_exit;
4323
        }
4324
    }
4325
  free (bfd_arr);
4326
 
4327
  fixed_size += reserved;
4328
  fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4329
  if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4330
    {
4331
      if (htab->params->ovly_flavour == ovly_soft_icache)
4332
        {
4333
          /* Stubs in the non-icache area are bigger.  */
4334
          fixed_size += htab->non_ovly_stub * 16;
4335
          /* Space for icache manager tables.
4336
             a) Tag array, one quadword per cache line.
4337
             - word 0: ia address of present line, init to zero.  */
4338
          fixed_size += 16 << htab->num_lines_log2;
4339
          /* b) Rewrite "to" list, one quadword per cache line.  */
4340
          fixed_size += 16 << htab->num_lines_log2;
4341
          /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4342
                to a power-of-two number of full quadwords) per cache line.  */
4343
          fixed_size += 16 << (htab->fromelem_size_log2
4344
                               + htab->num_lines_log2);
4345
          /* d) Pointer to __ea backing store (toe), 1 quadword.  */
4346
          fixed_size += 16;
4347
        }
4348
      else
4349
        {
4350
          /* Guess number of overlays.  Assuming overlay buffer is on
4351
             average only half full should be conservative.  */
4352
          ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4353
                     / (htab->local_store - fixed_size));
4354
          /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
4355
          fixed_size += ovlynum * 16 + 16 + 4 + 16;
4356
        }
4357
    }
4358
 
4359
  if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4360
    info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4361
                              "size of 0x%v exceeds local store\n"),
4362
                            (bfd_vma) fixed_size,
4363
                            (bfd_vma) mos_param.max_overlay_size);
4364
 
4365
  /* Now see if we should put some functions in the non-overlay area.  */
4366
  else if (fixed_size < htab->params->auto_overlay_fixed)
4367
    {
4368
      unsigned int max_fixed, lib_size;
4369
 
4370
      max_fixed = htab->local_store - mos_param.max_overlay_size;
4371
      if (max_fixed > htab->params->auto_overlay_fixed)
4372
        max_fixed = htab->params->auto_overlay_fixed;
4373
      lib_size = max_fixed - fixed_size;
4374
      lib_size = auto_ovl_lib_functions (info, lib_size);
4375
      if (lib_size == (unsigned int) -1)
4376
        goto err_exit;
4377
      fixed_size = max_fixed - lib_size;
4378
    }
4379
 
4380
  /* Build an array of sections, suitably sorted to place into
4381
     overlays.  */
4382
  ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4383
  if (ovly_sections == NULL)
4384
    goto err_exit;
4385
  ovly_p = ovly_sections;
4386
  if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4387
    goto err_exit;
4388
  count = (size_t) (ovly_p - ovly_sections) / 2;
4389
  ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4390
  if (ovly_map == NULL)
4391
    goto err_exit;
4392
 
4393
  memset (&dummy_caller, 0, sizeof (dummy_caller));
4394
  overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4395
  if (htab->params->line_size != 0)
4396
    overlay_size = htab->params->line_size;
4397
  base = 0;
4398
  ovlynum = 0;
4399
  while (base < count)
4400
    {
4401
      unsigned int size = 0, rosize = 0, roalign = 0;
4402
 
4403
      for (i = base; i < count; i++)
4404
        {
4405
          asection *sec, *rosec;
4406
          unsigned int tmp, rotmp;
4407
          unsigned int num_stubs;
4408
          struct call_info *call, *pasty;
4409
          struct _spu_elf_section_data *sec_data;
4410
          struct spu_elf_stack_info *sinfo;
4411
          unsigned int k;
4412
 
4413
          /* See whether we can add this section to the current
4414
             overlay without overflowing our overlay buffer.  */
4415
          sec = ovly_sections[2 * i];
4416
          tmp = align_power (size, sec->alignment_power) + sec->size;
4417
          rotmp = rosize;
4418
          rosec = ovly_sections[2 * i + 1];
4419
          if (rosec != NULL)
4420
            {
4421
              rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4422
              if (roalign < rosec->alignment_power)
4423
                roalign = rosec->alignment_power;
4424
            }
4425
          if (align_power (tmp, roalign) + rotmp > overlay_size)
4426
            break;
4427
          if (sec->segment_mark)
4428
            {
4429
              /* Pasted sections must stay together, so add their
4430
                 sizes too.  */
4431
              pasty = find_pasted_call (sec);
4432
              while (pasty != NULL)
4433
                {
4434
                  struct function_info *call_fun = pasty->fun;
4435
                  tmp = (align_power (tmp, call_fun->sec->alignment_power)
4436
                         + call_fun->sec->size);
4437
                  if (call_fun->rodata)
4438
                    {
4439
                      rotmp = (align_power (rotmp,
4440
                                            call_fun->rodata->alignment_power)
4441
                               + call_fun->rodata->size);
4442
                      if (roalign < rosec->alignment_power)
4443
                        roalign = rosec->alignment_power;
4444
                    }
4445
                  for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4446
                    if (pasty->is_pasted)
4447
                      break;
4448
                }
4449
            }
4450
          if (align_power (tmp, roalign) + rotmp > overlay_size)
4451
            break;
4452
 
4453
          /* If we add this section, we might need new overlay call
4454
             stubs.  Add any overlay section calls to dummy_call.  */
4455
          pasty = NULL;
4456
          sec_data = spu_elf_section_data (sec);
4457
          sinfo = sec_data->u.i.stack_info;
4458
          for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4459
            for (call = sinfo->fun[k].call_list; call; call = call->next)
4460
              if (call->is_pasted)
4461
                {
4462
                  BFD_ASSERT (pasty == NULL);
4463
                  pasty = call;
4464
                }
4465
              else if (call->fun->sec->linker_mark)
4466
                {
4467
                  if (!copy_callee (&dummy_caller, call))
4468
                    goto err_exit;
4469
                }
4470
          while (pasty != NULL)
4471
            {
4472
              struct function_info *call_fun = pasty->fun;
4473
              pasty = NULL;
4474
              for (call = call_fun->call_list; call; call = call->next)
4475
                if (call->is_pasted)
4476
                  {
4477
                    BFD_ASSERT (pasty == NULL);
4478
                    pasty = call;
4479
                  }
4480
                else if (!copy_callee (&dummy_caller, call))
4481
                  goto err_exit;
4482
            }
4483
 
4484
          /* Calculate call stub size.  */
4485
          num_stubs = 0;
4486
          for (call = dummy_caller.call_list; call; call = call->next)
4487
            {
4488
              unsigned int stub_delta = 1;
4489
 
4490
              if (htab->params->ovly_flavour == ovly_soft_icache)
4491
                stub_delta = call->count;
4492
              num_stubs += stub_delta;
4493
 
4494
              /* If the call is within this overlay, we won't need a
4495
                 stub.  */
4496
              for (k = base; k < i + 1; k++)
4497
                if (call->fun->sec == ovly_sections[2 * k])
4498
                  {
4499
                    num_stubs -= stub_delta;
4500
                    break;
4501
                  }
4502
            }
4503
          if (htab->params->ovly_flavour == ovly_soft_icache
4504
              && num_stubs > htab->params->max_branch)
4505
            break;
4506
          if (align_power (tmp, roalign) + rotmp
4507
              + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4508
            break;
4509
          size = tmp;
4510
          rosize = rotmp;
4511
        }
4512
 
4513
      if (i == base)
4514
        {
4515
          info->callbacks->einfo (_("%B:%A%s exceeds overlay size\n"),
4516
                                  ovly_sections[2 * i]->owner,
4517
                                  ovly_sections[2 * i],
4518
                                  ovly_sections[2 * i + 1] ? " + rodata" : "");
4519
          bfd_set_error (bfd_error_bad_value);
4520
          goto err_exit;
4521
        }
4522
 
4523
      while (dummy_caller.call_list != NULL)
4524
        {
4525
          struct call_info *call = dummy_caller.call_list;
4526
          dummy_caller.call_list = call->next;
4527
          free (call);
4528
        }
4529
 
4530
      ++ovlynum;
4531
      while (base < i)
4532
        ovly_map[base++] = ovlynum;
4533
    }
4534
 
4535
  script = htab->params->spu_elf_open_overlay_script ();
4536
 
4537
  if (htab->params->ovly_flavour == ovly_soft_icache)
4538
    {
4539
      if (fprintf (script, "SECTIONS\n{\n") <= 0)
4540
        goto file_err;
4541
 
4542
      if (fprintf (script,
4543
                   " . = ALIGN (%u);\n"
4544
                   " .ovl.init : { *(.ovl.init) }\n"
4545
                   " . = ABSOLUTE (ADDR (.ovl.init));\n",
4546
                   htab->params->line_size) <= 0)
4547
        goto file_err;
4548
 
4549
      base = 0;
4550
      ovlynum = 1;
4551
      while (base < count)
4552
        {
4553
          unsigned int indx = ovlynum - 1;
4554
          unsigned int vma, lma;
4555
 
4556
          vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4557
          lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4558
 
4559
          if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4560
                               ": AT (LOADADDR (.ovl.init) + %u) {\n",
4561
                       ovlynum, vma, lma) <= 0)
4562
            goto file_err;
4563
 
4564
          base = print_one_overlay_section (script, base, count, ovlynum,
4565
                                            ovly_map, ovly_sections, info);
4566
          if (base == (unsigned) -1)
4567
            goto file_err;
4568
 
4569
          if (fprintf (script, "  }\n") <= 0)
4570
            goto file_err;
4571
 
4572
          ovlynum++;
4573
        }
4574
 
4575
      if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4576
                   1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4577
        goto file_err;
4578
 
4579
      if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4580
        goto file_err;
4581
    }
4582
  else
4583
    {
4584
      if (fprintf (script, "SECTIONS\n{\n") <= 0)
4585
        goto file_err;
4586
 
4587
      if (fprintf (script,
4588
                   " . = ALIGN (16);\n"
4589
                   " .ovl.init : { *(.ovl.init) }\n"
4590
                   " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4591
        goto file_err;
4592
 
4593
      for (region = 1; region <= htab->params->num_lines; region++)
4594
        {
4595
          ovlynum = region;
4596
          base = 0;
4597
          while (base < count && ovly_map[base] < ovlynum)
4598
            base++;
4599
 
4600
          if (base == count)
4601
            break;
4602
 
4603
          if (region == 1)
4604
            {
4605
              /* We need to set lma since we are overlaying .ovl.init.  */
4606
              if (fprintf (script,
4607
                           " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4608
                goto file_err;
4609
            }
4610
          else
4611
            {
4612
              if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4613
                goto file_err;
4614
            }
4615
 
4616
          while (base < count)
4617
            {
4618
              if (fprintf (script, "  .ovly%u {\n", ovlynum) <= 0)
4619
                goto file_err;
4620
 
4621
              base = print_one_overlay_section (script, base, count, ovlynum,
4622
                                                ovly_map, ovly_sections, info);
4623
              if (base == (unsigned) -1)
4624
                goto file_err;
4625
 
4626
              if (fprintf (script, "  }\n") <= 0)
4627
                goto file_err;
4628
 
4629
              ovlynum += htab->params->num_lines;
4630
              while (base < count && ovly_map[base] < ovlynum)
4631
                base++;
4632
            }
4633
 
4634
          if (fprintf (script, " }\n") <= 0)
4635
            goto file_err;
4636
        }
4637
 
4638
      if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4639
        goto file_err;
4640
    }
4641
 
4642
  free (ovly_map);
4643
  free (ovly_sections);
4644
 
4645
  if (fclose (script) != 0)
4646
    goto file_err;
4647
 
4648
  if (htab->params->auto_overlay & AUTO_RELINK)
4649
    (*htab->params->spu_elf_relink) ();
4650
 
4651
  xexit (0);
4652
 
4653
 file_err:
4654
  bfd_set_error (bfd_error_system_call);
4655
 err_exit:
4656
  info->callbacks->einfo ("%F%P: auto overlay error: %E\n");
4657
  xexit (1);
4658
}
4659
 
4660
/* Provide an estimate of total stack required.  */
4661
 
4662
static bfd_boolean
4663
spu_elf_stack_analysis (struct bfd_link_info *info)
4664
{
4665
  struct spu_link_hash_table *htab;
4666
  struct _sum_stack_param sum_stack_param;
4667
 
4668
  if (!discover_functions (info))
4669
    return FALSE;
4670
 
4671
  if (!build_call_tree (info))
4672
    return FALSE;
4673
 
4674
  htab = spu_hash_table (info);
4675
  if (htab->params->stack_analysis)
4676
    {
4677
      info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4678
      info->callbacks->minfo (_("\nStack size for functions.  "
4679
                                "Annotations: '*' max stack, 't' tail call\n"));
4680
    }
4681
 
4682
  sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4683
  sum_stack_param.overall_stack = 0;
4684
  if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4685
    return FALSE;
4686
 
4687
  if (htab->params->stack_analysis)
4688
    info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4689
                           (bfd_vma) sum_stack_param.overall_stack);
4690
  return TRUE;
4691
}
4692
 
4693
/* Perform a final link.  */
4694
 
4695
static bfd_boolean
4696
spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4697
{
4698
  struct spu_link_hash_table *htab = spu_hash_table (info);
4699
 
4700
  if (htab->params->auto_overlay)
4701
    spu_elf_auto_overlay (info);
4702
 
4703
  if ((htab->params->stack_analysis
4704
       || (htab->params->ovly_flavour == ovly_soft_icache
4705
           && htab->params->lrlive_analysis))
4706
      && !spu_elf_stack_analysis (info))
4707
    info->callbacks->einfo ("%X%P: stack/lrlive analysis error: %E\n");
4708
 
4709
  if (!spu_elf_build_stubs (info))
4710
    info->callbacks->einfo ("%F%P: can not build overlay stubs: %E\n");
4711
 
4712
  return bfd_elf_final_link (output_bfd, info);
4713
}
4714
 
4715
/* Called when not normally emitting relocs, ie. !info->relocatable
4716
   and !info->emitrelocations.  Returns a count of special relocs
4717
   that need to be emitted.  */
4718
 
4719
static unsigned int
4720
spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4721
{
4722
  Elf_Internal_Rela *relocs;
4723
  unsigned int count = 0;
4724
 
4725
  relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4726
                                      info->keep_memory);
4727
  if (relocs != NULL)
4728
    {
4729
      Elf_Internal_Rela *rel;
4730
      Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4731
 
4732
      for (rel = relocs; rel < relend; rel++)
4733
        {
4734
          int r_type = ELF32_R_TYPE (rel->r_info);
4735
          if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4736
            ++count;
4737
        }
4738
 
4739
      if (elf_section_data (sec)->relocs != relocs)
4740
        free (relocs);
4741
    }
4742
 
4743
  return count;
4744
}
4745
 
4746
/* Functions for adding fixup records to .fixup */
4747
 
4748
#define FIXUP_RECORD_SIZE 4
4749
 
4750
#define FIXUP_PUT(output_bfd,htab,index,addr) \
4751
          bfd_put_32 (output_bfd, addr, \
4752
                      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4753
#define FIXUP_GET(output_bfd,htab,index) \
4754
          bfd_get_32 (output_bfd, \
4755
                      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4756
 
4757
/* Store OFFSET in .fixup.  This assumes it will be called with an
4758
   increasing OFFSET.  When this OFFSET fits with the last base offset,
4759
   it just sets a bit, otherwise it adds a new fixup record.  */
4760
static void
4761
spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4762
                    bfd_vma offset)
4763
{
4764
  struct spu_link_hash_table *htab = spu_hash_table (info);
4765
  asection *sfixup = htab->sfixup;
4766
  bfd_vma qaddr = offset & ~(bfd_vma) 15;
4767
  bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4768
  if (sfixup->reloc_count == 0)
4769
    {
4770
      FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4771
      sfixup->reloc_count++;
4772
    }
4773
  else
4774
    {
4775
      bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4776
      if (qaddr != (base & ~(bfd_vma) 15))
4777
        {
4778
          if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4779
            (*_bfd_error_handler) (_("fatal error while creating .fixup"));
4780
          FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4781
          sfixup->reloc_count++;
4782
        }
4783
      else
4784
        FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4785
    }
4786
}
4787
 
4788
/* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
4789
 
4790
static int
4791
spu_elf_relocate_section (bfd *output_bfd,
4792
                          struct bfd_link_info *info,
4793
                          bfd *input_bfd,
4794
                          asection *input_section,
4795
                          bfd_byte *contents,
4796
                          Elf_Internal_Rela *relocs,
4797
                          Elf_Internal_Sym *local_syms,
4798
                          asection **local_sections)
4799
{
4800
  Elf_Internal_Shdr *symtab_hdr;
4801
  struct elf_link_hash_entry **sym_hashes;
4802
  Elf_Internal_Rela *rel, *relend;
4803
  struct spu_link_hash_table *htab;
4804
  asection *ea;
4805
  int ret = TRUE;
4806
  bfd_boolean emit_these_relocs = FALSE;
4807
  bfd_boolean is_ea_sym;
4808
  bfd_boolean stubs;
4809
  unsigned int iovl = 0;
4810
 
4811
  htab = spu_hash_table (info);
4812
  stubs = (htab->stub_sec != NULL
4813
           && maybe_needs_stubs (input_section));
4814
  iovl = overlay_index (input_section);
4815
  ea = bfd_get_section_by_name (output_bfd, "._ea");
4816
  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4817
  sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4818
 
4819
  rel = relocs;
4820
  relend = relocs + input_section->reloc_count;
4821
  for (; rel < relend; rel++)
4822
    {
4823
      int r_type;
4824
      reloc_howto_type *howto;
4825
      unsigned int r_symndx;
4826
      Elf_Internal_Sym *sym;
4827
      asection *sec;
4828
      struct elf_link_hash_entry *h;
4829
      const char *sym_name;
4830
      bfd_vma relocation;
4831
      bfd_vma addend;
4832
      bfd_reloc_status_type r;
4833
      bfd_boolean unresolved_reloc;
4834
      enum _stub_type stub_type;
4835
 
4836
      r_symndx = ELF32_R_SYM (rel->r_info);
4837
      r_type = ELF32_R_TYPE (rel->r_info);
4838
      howto = elf_howto_table + r_type;
4839
      unresolved_reloc = FALSE;
4840
      h = NULL;
4841
      sym = NULL;
4842
      sec = NULL;
4843
      if (r_symndx < symtab_hdr->sh_info)
4844
        {
4845
          sym = local_syms + r_symndx;
4846
          sec = local_sections[r_symndx];
4847
          sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4848
          relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4849
        }
4850
      else
4851
        {
4852
          if (sym_hashes == NULL)
4853
            return FALSE;
4854
 
4855
          h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4856
 
4857
          while (h->root.type == bfd_link_hash_indirect
4858
                 || h->root.type == bfd_link_hash_warning)
4859
            h = (struct elf_link_hash_entry *) h->root.u.i.link;
4860
 
4861
          relocation = 0;
4862
          if (h->root.type == bfd_link_hash_defined
4863
              || h->root.type == bfd_link_hash_defweak)
4864
            {
4865
              sec = h->root.u.def.section;
4866
              if (sec == NULL
4867
                  || sec->output_section == NULL)
4868
                /* Set a flag that will be cleared later if we find a
4869
                   relocation value for this symbol.  output_section
4870
                   is typically NULL for symbols satisfied by a shared
4871
                   library.  */
4872
                unresolved_reloc = TRUE;
4873
              else
4874
                relocation = (h->root.u.def.value
4875
                              + sec->output_section->vma
4876
                              + sec->output_offset);
4877
            }
4878
          else if (h->root.type == bfd_link_hash_undefweak)
4879
            ;
4880
          else if (info->unresolved_syms_in_objects == RM_IGNORE
4881
                   && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4882
            ;
4883
          else if (!info->relocatable
4884
                   && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4885
            {
4886
              bfd_boolean err;
4887
              err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4888
                     || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4889
              if (!info->callbacks->undefined_symbol (info,
4890
                                                      h->root.root.string,
4891
                                                      input_bfd,
4892
                                                      input_section,
4893
                                                      rel->r_offset, err))
4894
                return FALSE;
4895
            }
4896
          sym_name = h->root.root.string;
4897
        }
4898
 
4899
      if (sec != NULL && elf_discarded_section (sec))
4900
        RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4901
                                         rel, relend, howto, contents);
4902
 
4903
      if (info->relocatable)
4904
        continue;
4905
 
4906
      /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4907
      if (r_type == R_SPU_ADD_PIC
4908
          && h != NULL
4909
          && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4910
        {
4911
          bfd_byte *loc = contents + rel->r_offset;
4912
          loc[0] = 0x1c;
4913
          loc[1] = 0x00;
4914
          loc[2] &= 0x3f;
4915
        }
4916
 
4917
      is_ea_sym = (ea != NULL
4918
                   && sec != NULL
4919
                   && sec->output_section == ea);
4920
 
4921
      /* If this symbol is in an overlay area, we may need to relocate
4922
         to the overlay stub.  */
4923
      addend = rel->r_addend;
4924
      if (stubs
4925
          && !is_ea_sym
4926
          && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4927
                                          contents, info)) != no_stub)
4928
        {
4929
          unsigned int ovl = 0;
4930
          struct got_entry *g, **head;
4931
 
4932
          if (stub_type != nonovl_stub)
4933
            ovl = iovl;
4934
 
4935
          if (h != NULL)
4936
            head = &h->got.glist;
4937
          else
4938
            head = elf_local_got_ents (input_bfd) + r_symndx;
4939
 
4940
          for (g = *head; g != NULL; g = g->next)
4941
            if (htab->params->ovly_flavour == ovly_soft_icache
4942
                ? (g->ovl == ovl
4943
                   && g->br_addr == (rel->r_offset
4944
                                     + input_section->output_offset
4945
                                     + input_section->output_section->vma))
4946
                : g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4947
              break;
4948
          if (g == NULL)
4949
            abort ();
4950
 
4951
          relocation = g->stub_addr;
4952
          addend = 0;
4953
        }
4954
      else
4955
        {
4956
          /* For soft icache, encode the overlay index into addresses.  */
4957
          if (htab->params->ovly_flavour == ovly_soft_icache
4958
              && (r_type == R_SPU_ADDR16_HI
4959
                  || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4960
              && !is_ea_sym)
4961
            {
4962
              unsigned int ovl = overlay_index (sec);
4963
              if (ovl != 0)
4964
                {
4965
                  unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
4966
                  relocation += set_id << 18;
4967
                }
4968
            }
4969
        }
4970
 
4971
      if (htab->params->emit_fixups && !info->relocatable
4972
          && (input_section->flags & SEC_ALLOC) != 0
4973
          && r_type == R_SPU_ADDR32)
4974
        {
4975
          bfd_vma offset;
4976
          offset = rel->r_offset + input_section->output_section->vma
4977
                   + input_section->output_offset;
4978
          spu_elf_emit_fixup (output_bfd, info, offset);
4979
        }
4980
 
4981
      if (unresolved_reloc)
4982
        ;
4983
      else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4984
        {
4985
          if (is_ea_sym)
4986
            {
4987
              /* ._ea is a special section that isn't allocated in SPU
4988
                 memory, but rather occupies space in PPU memory as
4989
                 part of an embedded ELF image.  If this reloc is
4990
                 against a symbol defined in ._ea, then transform the
4991
                 reloc into an equivalent one without a symbol
4992
                 relative to the start of the ELF image.  */
4993
              rel->r_addend += (relocation
4994
                                - ea->vma
4995
                                + elf_section_data (ea)->this_hdr.sh_offset);
4996
              rel->r_info = ELF32_R_INFO (0, r_type);
4997
            }
4998
          emit_these_relocs = TRUE;
4999
          continue;
5000
        }
5001
      else if (is_ea_sym)
5002
        unresolved_reloc = TRUE;
5003
 
5004 163 khays
      if (unresolved_reloc
5005
          && _bfd_elf_section_offset (output_bfd, info, input_section,
5006
                                      rel->r_offset) != (bfd_vma) -1)
5007 14 khays
        {
5008
          (*_bfd_error_handler)
5009
            (_("%B(%s+0x%lx): unresolvable %s relocation against symbol `%s'"),
5010
             input_bfd,
5011
             bfd_get_section_name (input_bfd, input_section),
5012
             (long) rel->r_offset,
5013
             howto->name,
5014
             sym_name);
5015
          ret = FALSE;
5016
        }
5017
 
5018
      r = _bfd_final_link_relocate (howto,
5019
                                    input_bfd,
5020
                                    input_section,
5021
                                    contents,
5022
                                    rel->r_offset, relocation, addend);
5023
 
5024
      if (r != bfd_reloc_ok)
5025
        {
5026
          const char *msg = (const char *) 0;
5027
 
5028
          switch (r)
5029
            {
5030
            case bfd_reloc_overflow:
5031
              if (!((*info->callbacks->reloc_overflow)
5032
                    (info, (h ? &h->root : NULL), sym_name, howto->name,
5033
                     (bfd_vma) 0, input_bfd, input_section, rel->r_offset)))
5034
                return FALSE;
5035
              break;
5036
 
5037
            case bfd_reloc_undefined:
5038
              if (!((*info->callbacks->undefined_symbol)
5039
                    (info, sym_name, input_bfd, input_section,
5040
                     rel->r_offset, TRUE)))
5041
                return FALSE;
5042
              break;
5043
 
5044
            case bfd_reloc_outofrange:
5045
              msg = _("internal error: out of range error");
5046
              goto common_error;
5047
 
5048
            case bfd_reloc_notsupported:
5049
              msg = _("internal error: unsupported relocation error");
5050
              goto common_error;
5051
 
5052
            case bfd_reloc_dangerous:
5053
              msg = _("internal error: dangerous error");
5054
              goto common_error;
5055
 
5056
            default:
5057
              msg = _("internal error: unknown error");
5058
              /* fall through */
5059
 
5060
            common_error:
5061
              ret = FALSE;
5062
              if (!((*info->callbacks->warning)
5063
                    (info, msg, sym_name, input_bfd, input_section,
5064
                     rel->r_offset)))
5065
                return FALSE;
5066
              break;
5067
            }
5068
        }
5069
    }
5070
 
5071
  if (ret
5072
      && emit_these_relocs
5073
      && !info->emitrelocations)
5074
    {
5075
      Elf_Internal_Rela *wrel;
5076
      Elf_Internal_Shdr *rel_hdr;
5077
 
5078
      wrel = rel = relocs;
5079
      relend = relocs + input_section->reloc_count;
5080
      for (; rel < relend; rel++)
5081
        {
5082
          int r_type;
5083
 
5084
          r_type = ELF32_R_TYPE (rel->r_info);
5085
          if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5086
            *wrel++ = *rel;
5087
        }
5088
      input_section->reloc_count = wrel - relocs;
5089
      /* Backflips for _bfd_elf_link_output_relocs.  */
5090
      rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5091
      rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5092
      ret = 2;
5093
    }
5094
 
5095
  return ret;
5096
}
5097
 
5098
static bfd_boolean
5099
spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5100
                                 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5101
{
5102
  return TRUE;
5103
}
5104
 
5105
/* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
5106
 
5107
static int
5108
spu_elf_output_symbol_hook (struct bfd_link_info *info,
5109
                            const char *sym_name ATTRIBUTE_UNUSED,
5110
                            Elf_Internal_Sym *sym,
5111
                            asection *sym_sec ATTRIBUTE_UNUSED,
5112
                            struct elf_link_hash_entry *h)
5113
{
5114
  struct spu_link_hash_table *htab = spu_hash_table (info);
5115
 
5116
  if (!info->relocatable
5117
      && htab->stub_sec != NULL
5118
      && h != NULL
5119
      && (h->root.type == bfd_link_hash_defined
5120
          || h->root.type == bfd_link_hash_defweak)
5121
      && h->def_regular
5122
      && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5123
    {
5124
      struct got_entry *g;
5125
 
5126
      for (g = h->got.glist; g != NULL; g = g->next)
5127
        if (htab->params->ovly_flavour == ovly_soft_icache
5128
            ? g->br_addr == g->stub_addr
5129
            : g->addend == 0 && g->ovl == 0)
5130
          {
5131
            sym->st_shndx = (_bfd_elf_section_from_bfd_section
5132
                             (htab->stub_sec[0]->output_section->owner,
5133
                              htab->stub_sec[0]->output_section));
5134
            sym->st_value = g->stub_addr;
5135
            break;
5136
          }
5137
    }
5138
 
5139
  return 1;
5140
}
5141
 
5142
static int spu_plugin = 0;
5143
 
5144
void
5145
spu_elf_plugin (int val)
5146
{
5147
  spu_plugin = val;
5148
}
5149
 
5150
/* Set ELF header e_type for plugins.  */
5151
 
5152
static void
5153
spu_elf_post_process_headers (bfd *abfd,
5154
                              struct bfd_link_info *info ATTRIBUTE_UNUSED)
5155
{
5156
  if (spu_plugin)
5157
    {
5158
      Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5159
 
5160
      i_ehdrp->e_type = ET_DYN;
5161
    }
5162
}
5163
 
5164
/* We may add an extra PT_LOAD segment for .toe.  We also need extra
5165
   segments for overlays.  */
5166
 
5167
static int
5168
spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5169
{
5170
  int extra = 0;
5171
  asection *sec;
5172
 
5173
  if (info != NULL)
5174
    {
5175
      struct spu_link_hash_table *htab = spu_hash_table (info);
5176
      extra = htab->num_overlays;
5177
    }
5178
 
5179
  if (extra)
5180
    ++extra;
5181
 
5182
  sec = bfd_get_section_by_name (abfd, ".toe");
5183
  if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5184
    ++extra;
5185
 
5186
  return extra;
5187
}
5188
 
5189
/* Remove .toe section from other PT_LOAD segments and put it in
5190
   a segment of its own.  Put overlays in separate segments too.  */
5191
 
5192
static bfd_boolean
5193
spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5194
{
5195
  asection *toe, *s;
5196
  struct elf_segment_map *m, *m_overlay;
5197
  struct elf_segment_map **p, **p_overlay;
5198
  unsigned int i;
5199
 
5200
  if (info == NULL)
5201
    return TRUE;
5202
 
5203
  toe = bfd_get_section_by_name (abfd, ".toe");
5204
  for (m = elf_tdata (abfd)->segment_map; m != NULL; m = m->next)
5205
    if (m->p_type == PT_LOAD && m->count > 1)
5206
      for (i = 0; i < m->count; i++)
5207
        if ((s = m->sections[i]) == toe
5208
            || spu_elf_section_data (s)->u.o.ovl_index != 0)
5209
          {
5210
            struct elf_segment_map *m2;
5211
            bfd_vma amt;
5212
 
5213
            if (i + 1 < m->count)
5214
              {
5215
                amt = sizeof (struct elf_segment_map);
5216
                amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5217
                m2 = bfd_zalloc (abfd, amt);
5218
                if (m2 == NULL)
5219
                  return FALSE;
5220
                m2->count = m->count - (i + 1);
5221
                memcpy (m2->sections, m->sections + i + 1,
5222
                        m2->count * sizeof (m->sections[0]));
5223
                m2->p_type = PT_LOAD;
5224
                m2->next = m->next;
5225
                m->next = m2;
5226
              }
5227
            m->count = 1;
5228
            if (i != 0)
5229
              {
5230
                m->count = i;
5231
                amt = sizeof (struct elf_segment_map);
5232
                m2 = bfd_zalloc (abfd, amt);
5233
                if (m2 == NULL)
5234
                  return FALSE;
5235
                m2->p_type = PT_LOAD;
5236
                m2->count = 1;
5237
                m2->sections[0] = s;
5238
                m2->next = m->next;
5239
                m->next = m2;
5240
              }
5241
            break;
5242
          }
5243
 
5244
 
5245
  /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5246
     PT_LOAD segments.  This can cause the .ovl.init section to be
5247
     overwritten with the contents of some overlay segment.  To work
5248
     around this issue, we ensure that all PF_OVERLAY segments are
5249
     sorted first amongst the program headers; this ensures that even
5250
     with a broken loader, the .ovl.init section (which is not marked
5251
     as PF_OVERLAY) will be placed into SPU local store on startup.  */
5252
 
5253
  /* Move all overlay segments onto a separate list.  */
5254
  p = &elf_tdata (abfd)->segment_map;
5255
  p_overlay = &m_overlay;
5256
  while (*p != NULL)
5257
    {
5258
      if ((*p)->p_type == PT_LOAD && (*p)->count == 1
5259
          && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5260
        {
5261
          m = *p;
5262
          *p = m->next;
5263
          *p_overlay = m;
5264
          p_overlay = &m->next;
5265
          continue;
5266
        }
5267
 
5268
      p = &((*p)->next);
5269
    }
5270
 
5271
  /* Re-insert overlay segments at the head of the segment map.  */
5272
  *p_overlay = elf_tdata (abfd)->segment_map;
5273
  elf_tdata (abfd)->segment_map = m_overlay;
5274
 
5275
  return TRUE;
5276
}
5277
 
5278
/* Tweak the section type of .note.spu_name.  */
5279
 
5280
static bfd_boolean
5281
spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5282
                       Elf_Internal_Shdr *hdr,
5283
                       asection *sec)
5284
{
5285
  if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5286
    hdr->sh_type = SHT_NOTE;
5287
  return TRUE;
5288
}
5289
 
5290
/* Tweak phdrs before writing them out.  */
5291
 
5292
static int
5293
spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5294
{
5295
  const struct elf_backend_data *bed;
5296
  struct elf_obj_tdata *tdata;
5297
  Elf_Internal_Phdr *phdr, *last;
5298
  struct spu_link_hash_table *htab;
5299
  unsigned int count;
5300
  unsigned int i;
5301
 
5302
  if (info == NULL)
5303
    return TRUE;
5304
 
5305
  bed = get_elf_backend_data (abfd);
5306
  tdata = elf_tdata (abfd);
5307
  phdr = tdata->phdr;
5308
  count = tdata->program_header_size / bed->s->sizeof_phdr;
5309
  htab = spu_hash_table (info);
5310
  if (htab->num_overlays != 0)
5311
    {
5312
      struct elf_segment_map *m;
5313
      unsigned int o;
5314
 
5315
      for (i = 0, m = elf_tdata (abfd)->segment_map; m; ++i, m = m->next)
5316
        if (m->count != 0
5317
            && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5318
          {
5319
            /* Mark this as an overlay header.  */
5320
            phdr[i].p_flags |= PF_OVERLAY;
5321
 
5322
            if (htab->ovtab != NULL && htab->ovtab->size != 0
5323
                && htab->params->ovly_flavour != ovly_soft_icache)
5324
              {
5325
                bfd_byte *p = htab->ovtab->contents;
5326
                unsigned int off = o * 16 + 8;
5327
 
5328
                /* Write file_off into _ovly_table.  */
5329
                bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5330
              }
5331
          }
5332
      /* Soft-icache has its file offset put in .ovl.init.  */
5333
      if (htab->init != NULL && htab->init->size != 0)
5334
        {
5335
          bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5336
 
5337
          bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5338
        }
5339
    }
5340
 
5341
  /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5342
     of 16.  This should always be possible when using the standard
5343
     linker scripts, but don't create overlapping segments if
5344
     someone is playing games with linker scripts.  */
5345
  last = NULL;
5346
  for (i = count; i-- != 0; )
5347
    if (phdr[i].p_type == PT_LOAD)
5348
      {
5349
        unsigned adjust;
5350
 
5351
        adjust = -phdr[i].p_filesz & 15;
5352
        if (adjust != 0
5353
            && last != NULL
5354
            && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5355
          break;
5356
 
5357
        adjust = -phdr[i].p_memsz & 15;
5358
        if (adjust != 0
5359
            && last != NULL
5360
            && phdr[i].p_filesz != 0
5361
            && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5362
            && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5363
          break;
5364
 
5365
        if (phdr[i].p_filesz != 0)
5366
          last = &phdr[i];
5367
      }
5368
 
5369
  if (i == (unsigned int) -1)
5370
    for (i = count; i-- != 0; )
5371
      if (phdr[i].p_type == PT_LOAD)
5372
        {
5373
        unsigned adjust;
5374
 
5375
        adjust = -phdr[i].p_filesz & 15;
5376
        phdr[i].p_filesz += adjust;
5377
 
5378
        adjust = -phdr[i].p_memsz & 15;
5379
        phdr[i].p_memsz += adjust;
5380
      }
5381
 
5382
  return TRUE;
5383
}
5384
 
5385
bfd_boolean
5386
spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5387
{
5388
  struct spu_link_hash_table *htab = spu_hash_table (info);
5389
  if (htab->params->emit_fixups)
5390
    {
5391
      asection *sfixup = htab->sfixup;
5392
      int fixup_count = 0;
5393
      bfd *ibfd;
5394
      size_t size;
5395
 
5396
      for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link_next)
5397
        {
5398
          asection *isec;
5399
 
5400
          if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5401
            continue;
5402
 
5403
          /* Walk over each section attached to the input bfd.  */
5404
          for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5405
            {
5406
              Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5407
              bfd_vma base_end;
5408
 
5409
              /* If there aren't any relocs, then there's nothing more
5410
                 to do.  */
5411
              if ((isec->flags & SEC_ALLOC) == 0
5412
                  || (isec->flags & SEC_RELOC) == 0
5413
                  || isec->reloc_count == 0)
5414
                continue;
5415
 
5416
              /* Get the relocs.  */
5417
              internal_relocs =
5418
                _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5419
                                           info->keep_memory);
5420
              if (internal_relocs == NULL)
5421
                return FALSE;
5422
 
5423
              /* 1 quadword can contain up to 4 R_SPU_ADDR32
5424
                 relocations.  They are stored in a single word by
5425
                 saving the upper 28 bits of the address and setting the
5426
                 lower 4 bits to a bit mask of the words that have the
5427
                 relocation.  BASE_END keeps track of the next quadword. */
5428
              irela = internal_relocs;
5429
              irelaend = irela + isec->reloc_count;
5430
              base_end = 0;
5431
              for (; irela < irelaend; irela++)
5432
                if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5433
                    && irela->r_offset >= base_end)
5434
                  {
5435
                    base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5436
                    fixup_count++;
5437
                  }
5438
            }
5439
        }
5440
 
5441
      /* We always have a NULL fixup as a sentinel */
5442
      size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5443
      if (!bfd_set_section_size (output_bfd, sfixup, size))
5444
        return FALSE;
5445
      sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5446
      if (sfixup->contents == NULL)
5447
        return FALSE;
5448
    }
5449
  return TRUE;
5450
}
5451
 
5452
#define TARGET_BIG_SYM          bfd_elf32_spu_vec
5453
#define TARGET_BIG_NAME         "elf32-spu"
5454
#define ELF_ARCH                bfd_arch_spu
5455
#define ELF_TARGET_ID           SPU_ELF_DATA
5456
#define ELF_MACHINE_CODE        EM_SPU
5457
/* This matches the alignment need for DMA.  */
5458
#define ELF_MAXPAGESIZE         0x80
5459
#define elf_backend_rela_normal         1
5460
#define elf_backend_can_gc_sections     1
5461
 
5462
#define bfd_elf32_bfd_reloc_type_lookup         spu_elf_reloc_type_lookup
5463
#define bfd_elf32_bfd_reloc_name_lookup         spu_elf_reloc_name_lookup
5464
#define elf_info_to_howto                       spu_elf_info_to_howto
5465
#define elf_backend_count_relocs                spu_elf_count_relocs
5466
#define elf_backend_relocate_section            spu_elf_relocate_section
5467
#define elf_backend_finish_dynamic_sections     spu_elf_finish_dynamic_sections
5468
#define elf_backend_symbol_processing           spu_elf_backend_symbol_processing
5469
#define elf_backend_link_output_symbol_hook     spu_elf_output_symbol_hook
5470
#define elf_backend_object_p                    spu_elf_object_p
5471
#define bfd_elf32_new_section_hook              spu_elf_new_section_hook
5472
#define bfd_elf32_bfd_link_hash_table_create    spu_elf_link_hash_table_create
5473
 
5474
#define elf_backend_additional_program_headers  spu_elf_additional_program_headers
5475
#define elf_backend_modify_segment_map          spu_elf_modify_segment_map
5476
#define elf_backend_modify_program_headers      spu_elf_modify_program_headers
5477
#define elf_backend_post_process_headers        spu_elf_post_process_headers
5478
#define elf_backend_fake_sections               spu_elf_fake_sections
5479
#define elf_backend_special_sections            spu_elf_special_sections
5480
#define bfd_elf32_bfd_final_link                spu_elf_final_link
5481
 
5482
#include "elf32-target.h"

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.