OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gdb-7.2/] [gdb/] [spu-tdep.c] - Blame information for rev 517

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 330 jeremybenn
/* SPU target-dependent code for GDB, the GNU debugger.
2
   Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
3
 
4
   Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5
   Based on a port by Sid Manning <sid@us.ibm.com>.
6
 
7
   This file is part of GDB.
8
 
9
   This program is free software; you can redistribute it and/or modify
10
   it under the terms of the GNU General Public License as published by
11
   the Free Software Foundation; either version 3 of the License, or
12
   (at your option) any later version.
13
 
14
   This program is distributed in the hope that it will be useful,
15
   but WITHOUT ANY WARRANTY; without even the implied warranty of
16
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17
   GNU General Public License for more details.
18
 
19
   You should have received a copy of the GNU General Public License
20
   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21
 
22
#include "defs.h"
23
#include "arch-utils.h"
24
#include "gdbtypes.h"
25
#include "gdbcmd.h"
26
#include "gdbcore.h"
27
#include "gdb_string.h"
28
#include "gdb_assert.h"
29
#include "frame.h"
30
#include "frame-unwind.h"
31
#include "frame-base.h"
32
#include "trad-frame.h"
33
#include "symtab.h"
34
#include "symfile.h"
35
#include "value.h"
36
#include "inferior.h"
37
#include "dis-asm.h"
38
#include "objfiles.h"
39
#include "language.h"
40
#include "regcache.h"
41
#include "reggroups.h"
42
#include "floatformat.h"
43
#include "block.h"
44
#include "observer.h"
45
#include "infcall.h"
46
 
47
#include "spu-tdep.h"
48
 
49
 
50
/* The list of available "set spu " and "show spu " commands.  */
51
static struct cmd_list_element *setspucmdlist = NULL;
52
static struct cmd_list_element *showspucmdlist = NULL;
53
 
54
/* Whether to stop for new SPE contexts.  */
55
static int spu_stop_on_load_p = 0;
56
/* Whether to automatically flush the SW-managed cache.  */
57
static int spu_auto_flush_cache_p = 1;
58
 
59
 
60
/* The tdep structure.  */
61
struct gdbarch_tdep
62
{
63
  /* The spufs ID identifying our address space.  */
64
  int id;
65
 
66
  /* SPU-specific vector type.  */
67
  struct type *spu_builtin_type_vec128;
68
};
69
 
70
 
71
/* SPU-specific vector type.  */
72
static struct type *
73
spu_builtin_type_vec128 (struct gdbarch *gdbarch)
74
{
75
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
76
 
77
  if (!tdep->spu_builtin_type_vec128)
78
    {
79
      const struct builtin_type *bt = builtin_type (gdbarch);
80
      struct type *t;
81
 
82
      t = arch_composite_type (gdbarch,
83
                               "__spu_builtin_type_vec128", TYPE_CODE_UNION);
84
      append_composite_type_field (t, "uint128", bt->builtin_int128);
85
      append_composite_type_field (t, "v2_int64",
86
                                   init_vector_type (bt->builtin_int64, 2));
87
      append_composite_type_field (t, "v4_int32",
88
                                   init_vector_type (bt->builtin_int32, 4));
89
      append_composite_type_field (t, "v8_int16",
90
                                   init_vector_type (bt->builtin_int16, 8));
91
      append_composite_type_field (t, "v16_int8",
92
                                   init_vector_type (bt->builtin_int8, 16));
93
      append_composite_type_field (t, "v2_double",
94
                                   init_vector_type (bt->builtin_double, 2));
95
      append_composite_type_field (t, "v4_float",
96
                                   init_vector_type (bt->builtin_float, 4));
97
 
98
      TYPE_VECTOR (t) = 1;
99
      TYPE_NAME (t) = "spu_builtin_type_vec128";
100
 
101
      tdep->spu_builtin_type_vec128 = t;
102
    }
103
 
104
  return tdep->spu_builtin_type_vec128;
105
}
106
 
107
 
108
/* The list of available "info spu " commands.  */
109
static struct cmd_list_element *infospucmdlist = NULL;
110
 
111
/* Registers.  */
112
 
113
static const char *
114
spu_register_name (struct gdbarch *gdbarch, int reg_nr)
115
{
116
  static char *register_names[] =
117
    {
118
      "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
119
      "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
120
      "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
121
      "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
122
      "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
123
      "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
124
      "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
125
      "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
126
      "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
127
      "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
128
      "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
129
      "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
130
      "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
131
      "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
132
      "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
133
      "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
134
      "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
135
    };
136
 
137
  if (reg_nr < 0)
138
    return NULL;
139
  if (reg_nr >= sizeof register_names / sizeof *register_names)
140
    return NULL;
141
 
142
  return register_names[reg_nr];
143
}
144
 
145
static struct type *
146
spu_register_type (struct gdbarch *gdbarch, int reg_nr)
147
{
148
  if (reg_nr < SPU_NUM_GPRS)
149
    return spu_builtin_type_vec128 (gdbarch);
150
 
151
  switch (reg_nr)
152
    {
153
    case SPU_ID_REGNUM:
154
      return builtin_type (gdbarch)->builtin_uint32;
155
 
156
    case SPU_PC_REGNUM:
157
      return builtin_type (gdbarch)->builtin_func_ptr;
158
 
159
    case SPU_SP_REGNUM:
160
      return builtin_type (gdbarch)->builtin_data_ptr;
161
 
162
    case SPU_FPSCR_REGNUM:
163
      return builtin_type (gdbarch)->builtin_uint128;
164
 
165
    case SPU_SRR0_REGNUM:
166
      return builtin_type (gdbarch)->builtin_uint32;
167
 
168
    case SPU_LSLR_REGNUM:
169
      return builtin_type (gdbarch)->builtin_uint32;
170
 
171
    case SPU_DECR_REGNUM:
172
      return builtin_type (gdbarch)->builtin_uint32;
173
 
174
    case SPU_DECR_STATUS_REGNUM:
175
      return builtin_type (gdbarch)->builtin_uint32;
176
 
177
    default:
178
      internal_error (__FILE__, __LINE__, "invalid regnum");
179
    }
180
}
181
 
182
/* Pseudo registers for preferred slots - stack pointer.  */
183
 
184
static void
185
spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
186
                              gdb_byte *buf)
187
{
188
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
189
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
190
  gdb_byte reg[32];
191
  char annex[32];
192
  ULONGEST id;
193
 
194
  regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
195
  xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
196
  memset (reg, 0, sizeof reg);
197
  target_read (&current_target, TARGET_OBJECT_SPU, annex,
198
               reg, 0, sizeof reg);
199
 
200
  store_unsigned_integer (buf, 4, byte_order, strtoulst (reg, NULL, 16));
201
}
202
 
203
static void
204
spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
205
                          int regnum, gdb_byte *buf)
206
{
207
  gdb_byte reg[16];
208
  char annex[32];
209
  ULONGEST id;
210
 
211
  switch (regnum)
212
    {
213
    case SPU_SP_REGNUM:
214
      regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
215
      memcpy (buf, reg, 4);
216
      break;
217
 
218
    case SPU_FPSCR_REGNUM:
219
      regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
220
      xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
221
      target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
222
      break;
223
 
224
    case SPU_SRR0_REGNUM:
225
      spu_pseudo_register_read_spu (regcache, "srr0", buf);
226
      break;
227
 
228
    case SPU_LSLR_REGNUM:
229
      spu_pseudo_register_read_spu (regcache, "lslr", buf);
230
      break;
231
 
232
    case SPU_DECR_REGNUM:
233
      spu_pseudo_register_read_spu (regcache, "decr", buf);
234
      break;
235
 
236
    case SPU_DECR_STATUS_REGNUM:
237
      spu_pseudo_register_read_spu (regcache, "decr_status", buf);
238
      break;
239
 
240
    default:
241
      internal_error (__FILE__, __LINE__, _("invalid regnum"));
242
    }
243
}
244
 
245
static void
246
spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
247
                               const gdb_byte *buf)
248
{
249
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
250
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
251
  gdb_byte reg[32];
252
  char annex[32];
253
  ULONGEST id;
254
 
255
  regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
256
  xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
257
  xsnprintf (reg, sizeof reg, "0x%s",
258
             phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4));
259
  target_write (&current_target, TARGET_OBJECT_SPU, annex,
260
                reg, 0, strlen (reg));
261
}
262
 
263
static void
264
spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
265
                           int regnum, const gdb_byte *buf)
266
{
267
  gdb_byte reg[16];
268
  char annex[32];
269
  ULONGEST id;
270
 
271
  switch (regnum)
272
    {
273
    case SPU_SP_REGNUM:
274
      regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
275
      memcpy (reg, buf, 4);
276
      regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
277
      break;
278
 
279
    case SPU_FPSCR_REGNUM:
280
      regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
281
      xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
282
      target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
283
      break;
284
 
285
    case SPU_SRR0_REGNUM:
286
      spu_pseudo_register_write_spu (regcache, "srr0", buf);
287
      break;
288
 
289
    case SPU_LSLR_REGNUM:
290
      spu_pseudo_register_write_spu (regcache, "lslr", buf);
291
      break;
292
 
293
    case SPU_DECR_REGNUM:
294
      spu_pseudo_register_write_spu (regcache, "decr", buf);
295
      break;
296
 
297
    case SPU_DECR_STATUS_REGNUM:
298
      spu_pseudo_register_write_spu (regcache, "decr_status", buf);
299
      break;
300
 
301
    default:
302
      internal_error (__FILE__, __LINE__, _("invalid regnum"));
303
    }
304
}
305
 
306
/* Value conversion -- access scalar values at the preferred slot.  */
307
 
308
static struct value *
309
spu_value_from_register (struct type *type, int regnum,
310
                         struct frame_info *frame)
311
{
312
  struct value *value = default_value_from_register (type, regnum, frame);
313
  int len = TYPE_LENGTH (type);
314
 
315
  if (regnum < SPU_NUM_GPRS && len < 16)
316
    {
317
      int preferred_slot = len < 4 ? 4 - len : 0;
318
      set_value_offset (value, preferred_slot);
319
    }
320
 
321
  return value;
322
}
323
 
324
/* Register groups.  */
325
 
326
static int
327
spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
328
                         struct reggroup *group)
329
{
330
  /* Registers displayed via 'info regs'.  */
331
  if (group == general_reggroup)
332
    return 1;
333
 
334
  /* Registers displayed via 'info float'.  */
335
  if (group == float_reggroup)
336
    return 0;
337
 
338
  /* Registers that need to be saved/restored in order to
339
     push or pop frames.  */
340
  if (group == save_reggroup || group == restore_reggroup)
341
    return 1;
342
 
343
  return default_register_reggroup_p (gdbarch, regnum, group);
344
}
345
 
346
 
347
/* Address handling.  */
348
 
349
static int
350
spu_gdbarch_id (struct gdbarch *gdbarch)
351
{
352
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
353
  int id = tdep->id;
354
 
355
  /* The objfile architecture of a standalone SPU executable does not
356
     provide an SPU ID.  Retrieve it from the the objfile's relocated
357
     address range in this special case.  */
358
  if (id == -1
359
      && symfile_objfile && symfile_objfile->obfd
360
      && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu
361
      && symfile_objfile->sections != symfile_objfile->sections_end)
362
    id = SPUADDR_SPU (obj_section_addr (symfile_objfile->sections));
363
 
364
  return id;
365
}
366
 
367
static int
368
spu_address_class_type_flags (int byte_size, int dwarf2_addr_class)
369
{
370
  if (dwarf2_addr_class == 1)
371
    return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
372
  else
373
    return 0;
374
}
375
 
376
static const char *
377
spu_address_class_type_flags_to_name (struct gdbarch *gdbarch, int type_flags)
378
{
379
  if (type_flags & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1)
380
    return "__ea";
381
  else
382
    return NULL;
383
}
384
 
385
static int
386
spu_address_class_name_to_type_flags (struct gdbarch *gdbarch,
387
                                      const char *name, int *type_flags_ptr)
388
{
389
  if (strcmp (name, "__ea") == 0)
390
    {
391
      *type_flags_ptr = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
392
      return 1;
393
    }
394
  else
395
   return 0;
396
}
397
 
398
static void
399
spu_address_to_pointer (struct gdbarch *gdbarch,
400
                        struct type *type, gdb_byte *buf, CORE_ADDR addr)
401
{
402
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
403
  store_unsigned_integer (buf, TYPE_LENGTH (type), byte_order,
404
                          SPUADDR_ADDR (addr));
405
}
406
 
407
static CORE_ADDR
408
spu_pointer_to_address (struct gdbarch *gdbarch,
409
                        struct type *type, const gdb_byte *buf)
410
{
411
  int id = spu_gdbarch_id (gdbarch);
412
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
413
  ULONGEST addr
414
    = extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order);
415
 
416
  /* Do not convert __ea pointers.  */
417
  if (TYPE_ADDRESS_CLASS_1 (type))
418
    return addr;
419
 
420
  return addr? SPUADDR (id, addr) : 0;
421
}
422
 
423
static CORE_ADDR
424
spu_integer_to_address (struct gdbarch *gdbarch,
425
                        struct type *type, const gdb_byte *buf)
426
{
427
  int id = spu_gdbarch_id (gdbarch);
428
  ULONGEST addr = unpack_long (type, buf);
429
 
430
  return SPUADDR (id, addr);
431
}
432
 
433
 
434
/* Decoding SPU instructions.  */
435
 
436
enum
437
  {
438
    op_lqd   = 0x34,
439
    op_lqx   = 0x3c4,
440
    op_lqa   = 0x61,
441
    op_lqr   = 0x67,
442
    op_stqd  = 0x24,
443
    op_stqx  = 0x144,
444
    op_stqa  = 0x41,
445
    op_stqr  = 0x47,
446
 
447
    op_il    = 0x081,
448
    op_ila   = 0x21,
449
    op_a     = 0x0c0,
450
    op_ai    = 0x1c,
451
 
452
    op_selb  = 0x4,
453
 
454
    op_br    = 0x64,
455
    op_bra   = 0x60,
456
    op_brsl  = 0x66,
457
    op_brasl = 0x62,
458
    op_brnz  = 0x42,
459
    op_brz   = 0x40,
460
    op_brhnz = 0x46,
461
    op_brhz  = 0x44,
462
    op_bi    = 0x1a8,
463
    op_bisl  = 0x1a9,
464
    op_biz   = 0x128,
465
    op_binz  = 0x129,
466
    op_bihz  = 0x12a,
467
    op_bihnz = 0x12b,
468
  };
469
 
470
static int
471
is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
472
{
473
  if ((insn >> 21) == op)
474
    {
475
      *rt = insn & 127;
476
      *ra = (insn >> 7) & 127;
477
      *rb = (insn >> 14) & 127;
478
      return 1;
479
    }
480
 
481
  return 0;
482
}
483
 
484
static int
485
is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
486
{
487
  if ((insn >> 28) == op)
488
    {
489
      *rt = (insn >> 21) & 127;
490
      *ra = (insn >> 7) & 127;
491
      *rb = (insn >> 14) & 127;
492
      *rc = insn & 127;
493
      return 1;
494
    }
495
 
496
  return 0;
497
}
498
 
499
static int
500
is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
501
{
502
  if ((insn >> 21) == op)
503
    {
504
      *rt = insn & 127;
505
      *ra = (insn >> 7) & 127;
506
      *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
507
      return 1;
508
    }
509
 
510
  return 0;
511
}
512
 
513
static int
514
is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
515
{
516
  if ((insn >> 24) == op)
517
    {
518
      *rt = insn & 127;
519
      *ra = (insn >> 7) & 127;
520
      *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
521
      return 1;
522
    }
523
 
524
  return 0;
525
}
526
 
527
static int
528
is_ri16 (unsigned int insn, int op, int *rt, int *i16)
529
{
530
  if ((insn >> 23) == op)
531
    {
532
      *rt = insn & 127;
533
      *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
534
      return 1;
535
    }
536
 
537
  return 0;
538
}
539
 
540
static int
541
is_ri18 (unsigned int insn, int op, int *rt, int *i18)
542
{
543
  if ((insn >> 25) == op)
544
    {
545
      *rt = insn & 127;
546
      *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
547
      return 1;
548
    }
549
 
550
  return 0;
551
}
552
 
553
static int
554
is_branch (unsigned int insn, int *offset, int *reg)
555
{
556
  int rt, i7, i16;
557
 
558
  if (is_ri16 (insn, op_br, &rt, &i16)
559
      || is_ri16 (insn, op_brsl, &rt, &i16)
560
      || is_ri16 (insn, op_brnz, &rt, &i16)
561
      || is_ri16 (insn, op_brz, &rt, &i16)
562
      || is_ri16 (insn, op_brhnz, &rt, &i16)
563
      || is_ri16 (insn, op_brhz, &rt, &i16))
564
    {
565
      *reg = SPU_PC_REGNUM;
566
      *offset = i16 << 2;
567
      return 1;
568
    }
569
 
570
  if (is_ri16 (insn, op_bra, &rt, &i16)
571
      || is_ri16 (insn, op_brasl, &rt, &i16))
572
    {
573
      *reg = -1;
574
      *offset = i16 << 2;
575
      return 1;
576
    }
577
 
578
  if (is_ri7 (insn, op_bi, &rt, reg, &i7)
579
      || is_ri7 (insn, op_bisl, &rt, reg, &i7)
580
      || is_ri7 (insn, op_biz, &rt, reg, &i7)
581
      || is_ri7 (insn, op_binz, &rt, reg, &i7)
582
      || is_ri7 (insn, op_bihz, &rt, reg, &i7)
583
      || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
584
    {
585
      *offset = 0;
586
      return 1;
587
    }
588
 
589
  return 0;
590
}
591
 
592
 
593
/* Prolog parsing.  */
594
 
595
struct spu_prologue_data
596
  {
597
    /* Stack frame size.  -1 if analysis was unsuccessful.  */
598
    int size;
599
 
600
    /* How to find the CFA.  The CFA is equal to SP at function entry.  */
601
    int cfa_reg;
602
    int cfa_offset;
603
 
604
    /* Offset relative to CFA where a register is saved.  -1 if invalid.  */
605
    int reg_offset[SPU_NUM_GPRS];
606
  };
607
 
608
static CORE_ADDR
609
spu_analyze_prologue (struct gdbarch *gdbarch,
610
                      CORE_ADDR start_pc, CORE_ADDR end_pc,
611
                      struct spu_prologue_data *data)
612
{
613
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
614
  int found_sp = 0;
615
  int found_fp = 0;
616
  int found_lr = 0;
617
  int found_bc = 0;
618
  int reg_immed[SPU_NUM_GPRS];
619
  gdb_byte buf[16];
620
  CORE_ADDR prolog_pc = start_pc;
621
  CORE_ADDR pc;
622
  int i;
623
 
624
 
625
  /* Initialize DATA to default values.  */
626
  data->size = -1;
627
 
628
  data->cfa_reg = SPU_RAW_SP_REGNUM;
629
  data->cfa_offset = 0;
630
 
631
  for (i = 0; i < SPU_NUM_GPRS; i++)
632
    data->reg_offset[i] = -1;
633
 
634
  /* Set up REG_IMMED array.  This is non-zero for a register if we know its
635
     preferred slot currently holds this immediate value.  */
636
  for (i = 0; i < SPU_NUM_GPRS; i++)
637
      reg_immed[i] = 0;
638
 
639
  /* Scan instructions until the first branch.
640
 
641
     The following instructions are important prolog components:
642
 
643
        - The first instruction to set up the stack pointer.
644
        - The first instruction to set up the frame pointer.
645
        - The first instruction to save the link register.
646
        - The first instruction to save the backchain.
647
 
648
     We return the instruction after the latest of these four,
649
     or the incoming PC if none is found.  The first instruction
650
     to set up the stack pointer also defines the frame size.
651
 
652
     Note that instructions saving incoming arguments to their stack
653
     slots are not counted as important, because they are hard to
654
     identify with certainty.  This should not matter much, because
655
     arguments are relevant only in code compiled with debug data,
656
     and in such code the GDB core will advance until the first source
657
     line anyway, using SAL data.
658
 
659
     For purposes of stack unwinding, we analyze the following types
660
     of instructions in addition:
661
 
662
      - Any instruction adding to the current frame pointer.
663
      - Any instruction loading an immediate constant into a register.
664
      - Any instruction storing a register onto the stack.
665
 
666
     These are used to compute the CFA and REG_OFFSET output.  */
667
 
668
  for (pc = start_pc; pc < end_pc; pc += 4)
669
    {
670
      unsigned int insn;
671
      int rt, ra, rb, rc, immed;
672
 
673
      if (target_read_memory (pc, buf, 4))
674
        break;
675
      insn = extract_unsigned_integer (buf, 4, byte_order);
676
 
677
      /* AI is the typical instruction to set up a stack frame.
678
         It is also used to initialize the frame pointer.  */
679
      if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
680
        {
681
          if (rt == data->cfa_reg && ra == data->cfa_reg)
682
            data->cfa_offset -= immed;
683
 
684
          if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
685
              && !found_sp)
686
            {
687
              found_sp = 1;
688
              prolog_pc = pc + 4;
689
 
690
              data->size = -immed;
691
            }
692
          else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
693
                   && !found_fp)
694
            {
695
              found_fp = 1;
696
              prolog_pc = pc + 4;
697
 
698
              data->cfa_reg = SPU_FP_REGNUM;
699
              data->cfa_offset -= immed;
700
            }
701
        }
702
 
703
      /* A is used to set up stack frames of size >= 512 bytes.
704
         If we have tracked the contents of the addend register,
705
         we can handle this as well.  */
706
      else if (is_rr (insn, op_a, &rt, &ra, &rb))
707
        {
708
          if (rt == data->cfa_reg && ra == data->cfa_reg)
709
            {
710
              if (reg_immed[rb] != 0)
711
                data->cfa_offset -= reg_immed[rb];
712
              else
713
                data->cfa_reg = -1;  /* We don't know the CFA any more.  */
714
            }
715
 
716
          if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
717
              && !found_sp)
718
            {
719
              found_sp = 1;
720
              prolog_pc = pc + 4;
721
 
722
              if (reg_immed[rb] != 0)
723
                data->size = -reg_immed[rb];
724
            }
725
        }
726
 
727
      /* We need to track IL and ILA used to load immediate constants
728
         in case they are later used as input to an A instruction.  */
729
      else if (is_ri16 (insn, op_il, &rt, &immed))
730
        {
731
          reg_immed[rt] = immed;
732
 
733
          if (rt == SPU_RAW_SP_REGNUM && !found_sp)
734
            found_sp = 1;
735
        }
736
 
737
      else if (is_ri18 (insn, op_ila, &rt, &immed))
738
        {
739
          reg_immed[rt] = immed & 0x3ffff;
740
 
741
          if (rt == SPU_RAW_SP_REGNUM && !found_sp)
742
            found_sp = 1;
743
        }
744
 
745
      /* STQD is used to save registers to the stack.  */
746
      else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
747
        {
748
          if (ra == data->cfa_reg)
749
            data->reg_offset[rt] = data->cfa_offset - (immed << 4);
750
 
751
          if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
752
              && !found_lr)
753
            {
754
              found_lr = 1;
755
              prolog_pc = pc + 4;
756
            }
757
 
758
          if (ra == SPU_RAW_SP_REGNUM
759
              && (found_sp? immed == 0 : rt == SPU_RAW_SP_REGNUM)
760
              && !found_bc)
761
            {
762
              found_bc = 1;
763
              prolog_pc = pc + 4;
764
            }
765
        }
766
 
767
      /* _start uses SELB to set up the stack pointer.  */
768
      else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
769
        {
770
          if (rt == SPU_RAW_SP_REGNUM && !found_sp)
771
            found_sp = 1;
772
        }
773
 
774
      /* We terminate if we find a branch.  */
775
      else if (is_branch (insn, &immed, &ra))
776
        break;
777
    }
778
 
779
 
780
  /* If we successfully parsed until here, and didn't find any instruction
781
     modifying SP, we assume we have a frameless function.  */
782
  if (!found_sp)
783
    data->size = 0;
784
 
785
  /* Return cooked instead of raw SP.  */
786
  if (data->cfa_reg == SPU_RAW_SP_REGNUM)
787
    data->cfa_reg = SPU_SP_REGNUM;
788
 
789
  return prolog_pc;
790
}
791
 
792
/* Return the first instruction after the prologue starting at PC.  */
793
static CORE_ADDR
794
spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
795
{
796
  struct spu_prologue_data data;
797
  return spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
798
}
799
 
800
/* Return the frame pointer in use at address PC.  */
801
static void
802
spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
803
                           int *reg, LONGEST *offset)
804
{
805
  struct spu_prologue_data data;
806
  spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
807
 
808
  if (data.size != -1 && data.cfa_reg != -1)
809
    {
810
      /* The 'frame pointer' address is CFA minus frame size.  */
811
      *reg = data.cfa_reg;
812
      *offset = data.cfa_offset - data.size;
813
    }
814
  else
815
    {
816
      /* ??? We don't really know ... */
817
      *reg = SPU_SP_REGNUM;
818
      *offset = 0;
819
    }
820
}
821
 
822
/* Return true if we are in the function's epilogue, i.e. after the
823
   instruction that destroyed the function's stack frame.
824
 
825
   1) scan forward from the point of execution:
826
       a) If you find an instruction that modifies the stack pointer
827
          or transfers control (except a return), execution is not in
828
          an epilogue, return.
829
       b) Stop scanning if you find a return instruction or reach the
830
          end of the function or reach the hard limit for the size of
831
          an epilogue.
832
   2) scan backward from the point of execution:
833
        a) If you find an instruction that modifies the stack pointer,
834
            execution *is* in an epilogue, return.
835
        b) Stop scanning if you reach an instruction that transfers
836
           control or the beginning of the function or reach the hard
837
           limit for the size of an epilogue.  */
838
 
839
static int
840
spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
841
{
842
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
843
  CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
844
  bfd_byte buf[4];
845
  unsigned int insn;
846
  int rt, ra, rb, rc, immed;
847
 
848
  /* Find the search limits based on function boundaries and hard limit.
849
     We assume the epilogue can be up to 64 instructions long.  */
850
 
851
  const int spu_max_epilogue_size = 64 * 4;
852
 
853
  if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
854
    return 0;
855
 
856
  if (pc - func_start < spu_max_epilogue_size)
857
    epilogue_start = func_start;
858
  else
859
    epilogue_start = pc - spu_max_epilogue_size;
860
 
861
  if (func_end - pc < spu_max_epilogue_size)
862
    epilogue_end = func_end;
863
  else
864
    epilogue_end = pc + spu_max_epilogue_size;
865
 
866
  /* Scan forward until next 'bi $0'.  */
867
 
868
  for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
869
    {
870
      if (target_read_memory (scan_pc, buf, 4))
871
        return 0;
872
      insn = extract_unsigned_integer (buf, 4, byte_order);
873
 
874
      if (is_branch (insn, &immed, &ra))
875
        {
876
          if (immed == 0 && ra == SPU_LR_REGNUM)
877
            break;
878
 
879
          return 0;
880
        }
881
 
882
      if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
883
          || is_rr (insn, op_a, &rt, &ra, &rb)
884
          || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
885
        {
886
          if (rt == SPU_RAW_SP_REGNUM)
887
            return 0;
888
        }
889
    }
890
 
891
  if (scan_pc >= epilogue_end)
892
    return 0;
893
 
894
  /* Scan backward until adjustment to stack pointer (R1).  */
895
 
896
  for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
897
    {
898
      if (target_read_memory (scan_pc, buf, 4))
899
        return 0;
900
      insn = extract_unsigned_integer (buf, 4, byte_order);
901
 
902
      if (is_branch (insn, &immed, &ra))
903
        return 0;
904
 
905
      if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
906
          || is_rr (insn, op_a, &rt, &ra, &rb)
907
          || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
908
        {
909
          if (rt == SPU_RAW_SP_REGNUM)
910
            return 1;
911
        }
912
    }
913
 
914
  return 0;
915
}
916
 
917
 
918
/* Normal stack frames.  */
919
 
920
struct spu_unwind_cache
921
{
922
  CORE_ADDR func;
923
  CORE_ADDR frame_base;
924
  CORE_ADDR local_base;
925
 
926
  struct trad_frame_saved_reg *saved_regs;
927
};
928
 
929
static struct spu_unwind_cache *
930
spu_frame_unwind_cache (struct frame_info *this_frame,
931
                        void **this_prologue_cache)
932
{
933
  struct gdbarch *gdbarch = get_frame_arch (this_frame);
934
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
935
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
936
  struct spu_unwind_cache *info;
937
  struct spu_prologue_data data;
938
  CORE_ADDR id = tdep->id;
939
  gdb_byte buf[16];
940
 
941
  if (*this_prologue_cache)
942
    return *this_prologue_cache;
943
 
944
  info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
945
  *this_prologue_cache = info;
946
  info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
947
  info->frame_base = 0;
948
  info->local_base = 0;
949
 
950
  /* Find the start of the current function, and analyze its prologue.  */
951
  info->func = get_frame_func (this_frame);
952
  if (info->func == 0)
953
    {
954
      /* Fall back to using the current PC as frame ID.  */
955
      info->func = get_frame_pc (this_frame);
956
      data.size = -1;
957
    }
958
  else
959
    spu_analyze_prologue (gdbarch, info->func, get_frame_pc (this_frame),
960
                          &data);
961
 
962
  /* If successful, use prologue analysis data.  */
963
  if (data.size != -1 && data.cfa_reg != -1)
964
    {
965
      CORE_ADDR cfa;
966
      int i;
967
 
968
      /* Determine CFA via unwound CFA_REG plus CFA_OFFSET.  */
969
      get_frame_register (this_frame, data.cfa_reg, buf);
970
      cfa = extract_unsigned_integer (buf, 4, byte_order) + data.cfa_offset;
971
      cfa = SPUADDR (id, cfa);
972
 
973
      /* Call-saved register slots.  */
974
      for (i = 0; i < SPU_NUM_GPRS; i++)
975
        if (i == SPU_LR_REGNUM
976
            || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
977
          if (data.reg_offset[i] != -1)
978
            info->saved_regs[i].addr = cfa - data.reg_offset[i];
979
 
980
      /* Frame bases.  */
981
      info->frame_base = cfa;
982
      info->local_base = cfa - data.size;
983
    }
984
 
985
  /* Otherwise, fall back to reading the backchain link.  */
986
  else
987
    {
988
      CORE_ADDR reg;
989
      LONGEST backchain;
990
      ULONGEST lslr;
991
      int status;
992
 
993
      /* Get local store limit.  */
994
      lslr = get_frame_register_unsigned (this_frame, SPU_LSLR_REGNUM);
995
      if (!lslr)
996
        lslr = (ULONGEST) -1;
997
 
998
      /* Get the backchain.  */
999
      reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1000
      status = safe_read_memory_integer (SPUADDR (id, reg), 4, byte_order,
1001
                                         &backchain);
1002
 
1003
      /* A zero backchain terminates the frame chain.  Also, sanity
1004
         check against the local store size limit.  */
1005
      if (status && backchain > 0 && backchain <= lslr)
1006
        {
1007
          /* Assume the link register is saved into its slot.  */
1008
          if (backchain + 16 <= lslr)
1009
            info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id, backchain + 16);
1010
 
1011
          /* Frame bases.  */
1012
          info->frame_base = SPUADDR (id, backchain);
1013
          info->local_base = SPUADDR (id, reg);
1014
        }
1015
    }
1016
 
1017
  /* If we didn't find a frame, we cannot determine SP / return address.  */
1018
  if (info->frame_base == 0)
1019
    return info;
1020
 
1021
  /* The previous SP is equal to the CFA.  */
1022
  trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM,
1023
                        SPUADDR_ADDR (info->frame_base));
1024
 
1025
  /* Read full contents of the unwound link register in order to
1026
     be able to determine the return address.  */
1027
  if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
1028
    target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
1029
  else
1030
    get_frame_register (this_frame, SPU_LR_REGNUM, buf);
1031
 
1032
  /* Normally, the return address is contained in the slot 0 of the
1033
     link register, and slots 1-3 are zero.  For an overlay return,
1034
     slot 0 contains the address of the overlay manager return stub,
1035
     slot 1 contains the partition number of the overlay section to
1036
     be returned to, and slot 2 contains the return address within
1037
     that section.  Return the latter address in that case.  */
1038
  if (extract_unsigned_integer (buf + 8, 4, byte_order) != 0)
1039
    trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1040
                          extract_unsigned_integer (buf + 8, 4, byte_order));
1041
  else
1042
    trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1043
                          extract_unsigned_integer (buf, 4, byte_order));
1044
 
1045
  return info;
1046
}
1047
 
1048
static void
1049
spu_frame_this_id (struct frame_info *this_frame,
1050
                   void **this_prologue_cache, struct frame_id *this_id)
1051
{
1052
  struct spu_unwind_cache *info =
1053
    spu_frame_unwind_cache (this_frame, this_prologue_cache);
1054
 
1055
  if (info->frame_base == 0)
1056
    return;
1057
 
1058
  *this_id = frame_id_build (info->frame_base, info->func);
1059
}
1060
 
1061
static struct value *
1062
spu_frame_prev_register (struct frame_info *this_frame,
1063
                         void **this_prologue_cache, int regnum)
1064
{
1065
  struct spu_unwind_cache *info
1066
    = spu_frame_unwind_cache (this_frame, this_prologue_cache);
1067
 
1068
  /* Special-case the stack pointer.  */
1069
  if (regnum == SPU_RAW_SP_REGNUM)
1070
    regnum = SPU_SP_REGNUM;
1071
 
1072
  return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
1073
}
1074
 
1075
static const struct frame_unwind spu_frame_unwind = {
1076
  NORMAL_FRAME,
1077
  spu_frame_this_id,
1078
  spu_frame_prev_register,
1079
  NULL,
1080
  default_frame_sniffer
1081
};
1082
 
1083
static CORE_ADDR
1084
spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
1085
{
1086
  struct spu_unwind_cache *info
1087
    = spu_frame_unwind_cache (this_frame, this_cache);
1088
  return info->local_base;
1089
}
1090
 
1091
static const struct frame_base spu_frame_base = {
1092
  &spu_frame_unwind,
1093
  spu_frame_base_address,
1094
  spu_frame_base_address,
1095
  spu_frame_base_address
1096
};
1097
 
1098
static CORE_ADDR
1099
spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
1100
{
1101
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1102
  CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
1103
  /* Mask off interrupt enable bit.  */
1104
  return SPUADDR (tdep->id, pc & -4);
1105
}
1106
 
1107
static CORE_ADDR
1108
spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1109
{
1110
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1111
  CORE_ADDR sp = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1112
  return SPUADDR (tdep->id, sp);
1113
}
1114
 
1115
static CORE_ADDR
1116
spu_read_pc (struct regcache *regcache)
1117
{
1118
  struct gdbarch_tdep *tdep = gdbarch_tdep (get_regcache_arch (regcache));
1119
  ULONGEST pc;
1120
  regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1121
  /* Mask off interrupt enable bit.  */
1122
  return SPUADDR (tdep->id, pc & -4);
1123
}
1124
 
1125
static void
1126
spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1127
{
1128
  /* Keep interrupt enabled state unchanged.  */
1129
  ULONGEST old_pc;
1130
  regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1131
  regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1132
                                  (SPUADDR_ADDR (pc) & -4) | (old_pc & 3));
1133
}
1134
 
1135
 
1136
/* Cell/B.E. cross-architecture unwinder support.  */
1137
 
1138
struct spu2ppu_cache
1139
{
1140
  struct frame_id frame_id;
1141
  struct regcache *regcache;
1142
};
1143
 
1144
static struct gdbarch *
1145
spu2ppu_prev_arch (struct frame_info *this_frame, void **this_cache)
1146
{
1147
  struct spu2ppu_cache *cache = *this_cache;
1148
  return get_regcache_arch (cache->regcache);
1149
}
1150
 
1151
static void
1152
spu2ppu_this_id (struct frame_info *this_frame,
1153
                 void **this_cache, struct frame_id *this_id)
1154
{
1155
  struct spu2ppu_cache *cache = *this_cache;
1156
  *this_id = cache->frame_id;
1157
}
1158
 
1159
static struct value *
1160
spu2ppu_prev_register (struct frame_info *this_frame,
1161
                       void **this_cache, int regnum)
1162
{
1163
  struct spu2ppu_cache *cache = *this_cache;
1164
  struct gdbarch *gdbarch = get_regcache_arch (cache->regcache);
1165
  gdb_byte *buf;
1166
 
1167
  buf = alloca (register_size (gdbarch, regnum));
1168
  regcache_cooked_read (cache->regcache, regnum, buf);
1169
  return frame_unwind_got_bytes (this_frame, regnum, buf);
1170
}
1171
 
1172
static int
1173
spu2ppu_sniffer (const struct frame_unwind *self,
1174
                 struct frame_info *this_frame, void **this_prologue_cache)
1175
{
1176
  struct gdbarch *gdbarch = get_frame_arch (this_frame);
1177
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1178
  CORE_ADDR base, func, backchain;
1179
  gdb_byte buf[4];
1180
 
1181
  if (gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_spu)
1182
    return 0;
1183
 
1184
  base = get_frame_sp (this_frame);
1185
  func = get_frame_pc (this_frame);
1186
  if (target_read_memory (base, buf, 4))
1187
    return 0;
1188
  backchain = extract_unsigned_integer (buf, 4, byte_order);
1189
 
1190
  if (!backchain)
1191
    {
1192
      struct frame_info *fi;
1193
 
1194
      struct spu2ppu_cache *cache
1195
        = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache);
1196
 
1197
      cache->frame_id = frame_id_build (base + 16, func);
1198
 
1199
      for (fi = get_next_frame (this_frame); fi; fi = get_next_frame (fi))
1200
        if (gdbarch_bfd_arch_info (get_frame_arch (fi))->arch != bfd_arch_spu)
1201
          break;
1202
 
1203
      if (fi)
1204
        {
1205
          cache->regcache = frame_save_as_regcache (fi);
1206
          *this_prologue_cache = cache;
1207
          return 1;
1208
        }
1209
      else
1210
        {
1211
          struct regcache *regcache;
1212
          regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1213
          cache->regcache = regcache_dup (regcache);
1214
          *this_prologue_cache = cache;
1215
          return 1;
1216
        }
1217
    }
1218
 
1219
  return 0;
1220
}
1221
 
1222
static void
1223
spu2ppu_dealloc_cache (struct frame_info *self, void *this_cache)
1224
{
1225
  struct spu2ppu_cache *cache = this_cache;
1226
  regcache_xfree (cache->regcache);
1227
}
1228
 
1229
static const struct frame_unwind spu2ppu_unwind = {
1230
  ARCH_FRAME,
1231
  spu2ppu_this_id,
1232
  spu2ppu_prev_register,
1233
  NULL,
1234
  spu2ppu_sniffer,
1235
  spu2ppu_dealloc_cache,
1236
  spu2ppu_prev_arch,
1237
};
1238
 
1239
 
1240
/* Function calling convention.  */
1241
 
1242
static CORE_ADDR
1243
spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1244
{
1245
  return sp & ~15;
1246
}
1247
 
1248
static CORE_ADDR
1249
spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr,
1250
                     struct value **args, int nargs, struct type *value_type,
1251
                     CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
1252
                     struct regcache *regcache)
1253
{
1254
  /* Allocate space sufficient for a breakpoint, keeping the stack aligned.  */
1255
  sp = (sp - 4) & ~15;
1256
  /* Store the address of that breakpoint */
1257
  *bp_addr = sp;
1258
  /* The call starts at the callee's entry point.  */
1259
  *real_pc = funaddr;
1260
 
1261
  return sp;
1262
}
1263
 
1264
static int
1265
spu_scalar_value_p (struct type *type)
1266
{
1267
  switch (TYPE_CODE (type))
1268
    {
1269
    case TYPE_CODE_INT:
1270
    case TYPE_CODE_ENUM:
1271
    case TYPE_CODE_RANGE:
1272
    case TYPE_CODE_CHAR:
1273
    case TYPE_CODE_BOOL:
1274
    case TYPE_CODE_PTR:
1275
    case TYPE_CODE_REF:
1276
      return TYPE_LENGTH (type) <= 16;
1277
 
1278
    default:
1279
      return 0;
1280
    }
1281
}
1282
 
1283
static void
1284
spu_value_to_regcache (struct regcache *regcache, int regnum,
1285
                       struct type *type, const gdb_byte *in)
1286
{
1287
  int len = TYPE_LENGTH (type);
1288
 
1289
  if (spu_scalar_value_p (type))
1290
    {
1291
      int preferred_slot = len < 4 ? 4 - len : 0;
1292
      regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1293
    }
1294
  else
1295
    {
1296
      while (len >= 16)
1297
        {
1298
          regcache_cooked_write (regcache, regnum++, in);
1299
          in += 16;
1300
          len -= 16;
1301
        }
1302
 
1303
      if (len > 0)
1304
        regcache_cooked_write_part (regcache, regnum, 0, len, in);
1305
    }
1306
}
1307
 
1308
static void
1309
spu_regcache_to_value (struct regcache *regcache, int regnum,
1310
                       struct type *type, gdb_byte *out)
1311
{
1312
  int len = TYPE_LENGTH (type);
1313
 
1314
  if (spu_scalar_value_p (type))
1315
    {
1316
      int preferred_slot = len < 4 ? 4 - len : 0;
1317
      regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1318
    }
1319
  else
1320
    {
1321
      while (len >= 16)
1322
        {
1323
          regcache_cooked_read (regcache, regnum++, out);
1324
          out += 16;
1325
          len -= 16;
1326
        }
1327
 
1328
      if (len > 0)
1329
        regcache_cooked_read_part (regcache, regnum, 0, len, out);
1330
    }
1331
}
1332
 
1333
static CORE_ADDR
1334
spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1335
                     struct regcache *regcache, CORE_ADDR bp_addr,
1336
                     int nargs, struct value **args, CORE_ADDR sp,
1337
                     int struct_return, CORE_ADDR struct_addr)
1338
{
1339
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1340
  CORE_ADDR sp_delta;
1341
  int i;
1342
  int regnum = SPU_ARG1_REGNUM;
1343
  int stack_arg = -1;
1344
  gdb_byte buf[16];
1345
 
1346
  /* Set the return address.  */
1347
  memset (buf, 0, sizeof buf);
1348
  store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (bp_addr));
1349
  regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1350
 
1351
  /* If STRUCT_RETURN is true, then the struct return address (in
1352
     STRUCT_ADDR) will consume the first argument-passing register.
1353
     Both adjust the register count and store that value.  */
1354
  if (struct_return)
1355
    {
1356
      memset (buf, 0, sizeof buf);
1357
      store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (struct_addr));
1358
      regcache_cooked_write (regcache, regnum++, buf);
1359
    }
1360
 
1361
  /* Fill in argument registers.  */
1362
  for (i = 0; i < nargs; i++)
1363
    {
1364
      struct value *arg = args[i];
1365
      struct type *type = check_typedef (value_type (arg));
1366
      const gdb_byte *contents = value_contents (arg);
1367
      int len = TYPE_LENGTH (type);
1368
      int n_regs = align_up (len, 16) / 16;
1369
 
1370
      /* If the argument doesn't wholly fit into registers, it and
1371
         all subsequent arguments go to the stack.  */
1372
      if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1373
        {
1374
          stack_arg = i;
1375
          break;
1376
        }
1377
 
1378
      spu_value_to_regcache (regcache, regnum, type, contents);
1379
      regnum += n_regs;
1380
    }
1381
 
1382
  /* Overflow arguments go to the stack.  */
1383
  if (stack_arg != -1)
1384
    {
1385
      CORE_ADDR ap;
1386
 
1387
      /* Allocate all required stack size.  */
1388
      for (i = stack_arg; i < nargs; i++)
1389
        {
1390
          struct type *type = check_typedef (value_type (args[i]));
1391
          sp -= align_up (TYPE_LENGTH (type), 16);
1392
        }
1393
 
1394
      /* Fill in stack arguments.  */
1395
      ap = sp;
1396
      for (i = stack_arg; i < nargs; i++)
1397
        {
1398
          struct value *arg = args[i];
1399
          struct type *type = check_typedef (value_type (arg));
1400
          int len = TYPE_LENGTH (type);
1401
          int preferred_slot;
1402
 
1403
          if (spu_scalar_value_p (type))
1404
            preferred_slot = len < 4 ? 4 - len : 0;
1405
          else
1406
            preferred_slot = 0;
1407
 
1408
          target_write_memory (ap + preferred_slot, value_contents (arg), len);
1409
          ap += align_up (TYPE_LENGTH (type), 16);
1410
        }
1411
    }
1412
 
1413
  /* Allocate stack frame header.  */
1414
  sp -= 32;
1415
 
1416
  /* Store stack back chain.  */
1417
  regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1418
  target_write_memory (sp, buf, 16);
1419
 
1420
  /* Finally, update all slots of the SP register.  */
1421
  sp_delta = sp - extract_unsigned_integer (buf, 4, byte_order);
1422
  for (i = 0; i < 4; i++)
1423
    {
1424
      CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4, byte_order);
1425
      store_unsigned_integer (buf + 4*i, 4, byte_order, sp_slot + sp_delta);
1426
    }
1427
  regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
1428
 
1429
  return sp;
1430
}
1431
 
1432
static struct frame_id
1433
spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1434
{
1435
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1436
  CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1437
  CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1438
  return frame_id_build (SPUADDR (tdep->id, sp), SPUADDR (tdep->id, pc & -4));
1439
}
1440
 
1441
/* Function return value access.  */
1442
 
1443
static enum return_value_convention
1444
spu_return_value (struct gdbarch *gdbarch, struct type *func_type,
1445
                  struct type *type, struct regcache *regcache,
1446
                  gdb_byte *out, const gdb_byte *in)
1447
{
1448
  enum return_value_convention rvc;
1449
 
1450
  if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1451
    rvc = RETURN_VALUE_REGISTER_CONVENTION;
1452
  else
1453
    rvc = RETURN_VALUE_STRUCT_CONVENTION;
1454
 
1455
  if (in)
1456
    {
1457
      switch (rvc)
1458
        {
1459
        case RETURN_VALUE_REGISTER_CONVENTION:
1460
          spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1461
          break;
1462
 
1463
        case RETURN_VALUE_STRUCT_CONVENTION:
1464
          error ("Cannot set function return value.");
1465
          break;
1466
        }
1467
    }
1468
  else if (out)
1469
    {
1470
      switch (rvc)
1471
        {
1472
        case RETURN_VALUE_REGISTER_CONVENTION:
1473
          spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1474
          break;
1475
 
1476
        case RETURN_VALUE_STRUCT_CONVENTION:
1477
          error ("Function return value unknown.");
1478
          break;
1479
        }
1480
    }
1481
 
1482
  return rvc;
1483
}
1484
 
1485
 
1486
/* Breakpoints.  */
1487
 
1488
static const gdb_byte *
1489
spu_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR * pcptr, int *lenptr)
1490
{
1491
  static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1492
 
1493
  *lenptr = sizeof breakpoint;
1494
  return breakpoint;
1495
}
1496
 
1497
static int
1498
spu_memory_remove_breakpoint (struct gdbarch *gdbarch,
1499
                              struct bp_target_info *bp_tgt)
1500
{
1501
  /* We work around a problem in combined Cell/B.E. debugging here.  Consider
1502
     that in a combined application, we have some breakpoints inserted in SPU
1503
     code, and now the application forks (on the PPU side).  GDB common code
1504
     will assume that the fork system call copied all breakpoints into the new
1505
     process' address space, and that all those copies now need to be removed
1506
     (see breakpoint.c:detach_breakpoints).
1507
 
1508
     While this is certainly true for PPU side breakpoints, it is not true
1509
     for SPU side breakpoints.  fork will clone the SPU context file
1510
     descriptors, so that all the existing SPU contexts are in accessible
1511
     in the new process.  However, the contents of the SPU contexts themselves
1512
     are *not* cloned.  Therefore the effect of detach_breakpoints is to
1513
     remove SPU breakpoints from the *original* SPU context's local store
1514
     -- this is not the correct behaviour.
1515
 
1516
     The workaround is to check whether the PID we are asked to remove this
1517
     breakpoint from (i.e. ptid_get_pid (inferior_ptid)) is different from the
1518
     PID of the current inferior (i.e. current_inferior ()->pid).  This is only
1519
     true in the context of detach_breakpoints.  If so, we simply do nothing.
1520
     [ Note that for the fork child process, it does not matter if breakpoints
1521
     remain inserted, because those SPU contexts are not runnable anyway --
1522
     the Linux kernel allows only the original process to invoke spu_run.  */
1523
 
1524
  if (ptid_get_pid (inferior_ptid) != current_inferior ()->pid)
1525
    return 0;
1526
 
1527
  return default_memory_remove_breakpoint (gdbarch, bp_tgt);
1528
}
1529
 
1530
 
1531
/* Software single-stepping support.  */
1532
 
1533
static int
1534
spu_software_single_step (struct frame_info *frame)
1535
{
1536
  struct gdbarch *gdbarch = get_frame_arch (frame);
1537
  struct address_space *aspace = get_frame_address_space (frame);
1538
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1539
  CORE_ADDR pc, next_pc;
1540
  unsigned int insn;
1541
  int offset, reg;
1542
  gdb_byte buf[4];
1543
  ULONGEST lslr;
1544
 
1545
  pc = get_frame_pc (frame);
1546
 
1547
  if (target_read_memory (pc, buf, 4))
1548
    return 1;
1549
  insn = extract_unsigned_integer (buf, 4, byte_order);
1550
 
1551
  /* Get local store limit.  */
1552
  lslr = get_frame_register_unsigned (frame, SPU_LSLR_REGNUM);
1553
  if (!lslr)
1554
    lslr = (ULONGEST) -1;
1555
 
1556
  /* Next sequential instruction is at PC + 4, except if the current
1557
     instruction is a PPE-assisted call, in which case it is at PC + 8.
1558
     Wrap around LS limit to be on the safe side.  */
1559
  if ((insn & 0xffffff00) == 0x00002100)
1560
    next_pc = (SPUADDR_ADDR (pc) + 8) & lslr;
1561
  else
1562
    next_pc = (SPUADDR_ADDR (pc) + 4) & lslr;
1563
 
1564
  insert_single_step_breakpoint (gdbarch,
1565
                                 aspace, SPUADDR (SPUADDR_SPU (pc), next_pc));
1566
 
1567
  if (is_branch (insn, &offset, &reg))
1568
    {
1569
      CORE_ADDR target = offset;
1570
 
1571
      if (reg == SPU_PC_REGNUM)
1572
        target += SPUADDR_ADDR (pc);
1573
      else if (reg != -1)
1574
        {
1575
          get_frame_register_bytes (frame, reg, 0, 4, buf);
1576
          target += extract_unsigned_integer (buf, 4, byte_order) & -4;
1577
        }
1578
 
1579
      target = target & lslr;
1580
      if (target != next_pc)
1581
        insert_single_step_breakpoint (gdbarch, aspace,
1582
                                       SPUADDR (SPUADDR_SPU (pc), target));
1583
    }
1584
 
1585
  return 1;
1586
}
1587
 
1588
 
1589
/* Longjmp support.  */
1590
 
1591
static int
1592
spu_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1593
{
1594
  struct gdbarch *gdbarch = get_frame_arch (frame);
1595
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1596
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1597
  gdb_byte buf[4];
1598
  CORE_ADDR jb_addr;
1599
 
1600
  /* Jump buffer is pointed to by the argument register $r3.  */
1601
  get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf);
1602
  jb_addr = extract_unsigned_integer (buf, 4, byte_order);
1603
  if (target_read_memory (SPUADDR (tdep->id, jb_addr), buf, 4))
1604
    return 0;
1605
 
1606
  *pc = extract_unsigned_integer (buf, 4, byte_order);
1607
  *pc = SPUADDR (tdep->id, *pc);
1608
  return 1;
1609
}
1610
 
1611
 
1612
/* Disassembler.  */
1613
 
1614
struct spu_dis_asm_data
1615
{
1616
  struct gdbarch *gdbarch;
1617
  int id;
1618
};
1619
 
1620
static void
1621
spu_dis_asm_print_address (bfd_vma addr, struct disassemble_info *info)
1622
{
1623
  struct spu_dis_asm_data *data = info->application_data;
1624
  print_address (data->gdbarch, SPUADDR (data->id, addr), info->stream);
1625
}
1626
 
1627
static int
1628
gdb_print_insn_spu (bfd_vma memaddr, struct disassemble_info *info)
1629
{
1630
  /* The opcodes disassembler does 18-bit address arithmetic.  Make sure the
1631
     SPU ID encoded in the high bits is added back when we call print_address.  */
1632
  struct disassemble_info spu_info = *info;
1633
  struct spu_dis_asm_data data;
1634
  data.gdbarch = info->application_data;
1635
  data.id = SPUADDR_SPU (memaddr);
1636
 
1637
  spu_info.application_data = &data;
1638
  spu_info.print_address_func = spu_dis_asm_print_address;
1639
  return print_insn_spu (memaddr, &spu_info);
1640
}
1641
 
1642
 
1643
/* Target overlays for the SPU overlay manager.
1644
 
1645
   See the documentation of simple_overlay_update for how the
1646
   interface is supposed to work.
1647
 
1648
   Data structures used by the overlay manager:
1649
 
1650
   struct ovly_table
1651
     {
1652
        u32 vma;
1653
        u32 size;
1654
        u32 pos;
1655
        u32 buf;
1656
     } _ovly_table[];   -- one entry per overlay section
1657
 
1658
   struct ovly_buf_table
1659
     {
1660
        u32 mapped;
1661
     } _ovly_buf_table[];  -- one entry per overlay buffer
1662
 
1663
   _ovly_table should never change.
1664
 
1665
   Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1666
   and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1667
   of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1668
 
1669
   mapped is an index into _ovly_table. Both the mapped and buf indices start
1670
   from one to reference the first entry in their respective tables.  */
1671
 
1672
/* Using the per-objfile private data mechanism, we store for each
1673
   objfile an array of "struct spu_overlay_table" structures, one
1674
   for each obj_section of the objfile.  This structure holds two
1675
   fields, MAPPED_PTR and MAPPED_VAL.  If MAPPED_PTR is zero, this
1676
   is *not* an overlay section.  If it is non-zero, it represents
1677
   a target address.  The overlay section is mapped iff the target
1678
   integer at this location equals MAPPED_VAL.  */
1679
 
1680
static const struct objfile_data *spu_overlay_data;
1681
 
1682
struct spu_overlay_table
1683
  {
1684
    CORE_ADDR mapped_ptr;
1685
    CORE_ADDR mapped_val;
1686
  };
1687
 
1688
/* Retrieve the overlay table for OBJFILE.  If not already cached, read
1689
   the _ovly_table data structure from the target and initialize the
1690
   spu_overlay_table data structure from it.  */
1691
static struct spu_overlay_table *
1692
spu_get_overlay_table (struct objfile *objfile)
1693
{
1694
  enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)?
1695
                   BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1696
  struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1697
  CORE_ADDR ovly_table_base, ovly_buf_table_base;
1698
  unsigned ovly_table_size, ovly_buf_table_size;
1699
  struct spu_overlay_table *tbl;
1700
  struct obj_section *osect;
1701
  char *ovly_table;
1702
  int i;
1703
 
1704
  tbl = objfile_data (objfile, spu_overlay_data);
1705
  if (tbl)
1706
    return tbl;
1707
 
1708
  ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1709
  if (!ovly_table_msym)
1710
    return NULL;
1711
 
1712
  ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
1713
  if (!ovly_buf_table_msym)
1714
    return NULL;
1715
 
1716
  ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1717
  ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1718
 
1719
  ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1720
  ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1721
 
1722
  ovly_table = xmalloc (ovly_table_size);
1723
  read_memory (ovly_table_base, ovly_table, ovly_table_size);
1724
 
1725
  tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1726
                        objfile->sections_end - objfile->sections,
1727
                        struct spu_overlay_table);
1728
 
1729
  for (i = 0; i < ovly_table_size / 16; i++)
1730
    {
1731
      CORE_ADDR vma  = extract_unsigned_integer (ovly_table + 16*i + 0,
1732
                                                 4, byte_order);
1733
      CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4,
1734
                                                 4, byte_order);
1735
      CORE_ADDR pos  = extract_unsigned_integer (ovly_table + 16*i + 8,
1736
                                                 4, byte_order);
1737
      CORE_ADDR buf  = extract_unsigned_integer (ovly_table + 16*i + 12,
1738
                                                 4, byte_order);
1739
 
1740
      if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1741
        continue;
1742
 
1743
      ALL_OBJFILE_OSECTIONS (objfile, osect)
1744
        if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1745
            && pos == osect->the_bfd_section->filepos)
1746
          {
1747
            int ndx = osect - objfile->sections;
1748
            tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1749
            tbl[ndx].mapped_val = i + 1;
1750
            break;
1751
          }
1752
    }
1753
 
1754
  xfree (ovly_table);
1755
  set_objfile_data (objfile, spu_overlay_data, tbl);
1756
  return tbl;
1757
}
1758
 
1759
/* Read _ovly_buf_table entry from the target to dermine whether
1760
   OSECT is currently mapped, and update the mapped state.  */
1761
static void
1762
spu_overlay_update_osect (struct obj_section *osect)
1763
{
1764
  enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)?
1765
                   BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1766
  struct spu_overlay_table *ovly_table;
1767
  CORE_ADDR id, val;
1768
 
1769
  ovly_table = spu_get_overlay_table (osect->objfile);
1770
  if (!ovly_table)
1771
    return;
1772
 
1773
  ovly_table += osect - osect->objfile->sections;
1774
  if (ovly_table->mapped_ptr == 0)
1775
    return;
1776
 
1777
  id = SPUADDR_SPU (obj_section_addr (osect));
1778
  val = read_memory_unsigned_integer (SPUADDR (id, ovly_table->mapped_ptr),
1779
                                      4, byte_order);
1780
  osect->ovly_mapped = (val == ovly_table->mapped_val);
1781
}
1782
 
1783
/* If OSECT is NULL, then update all sections' mapped state.
1784
   If OSECT is non-NULL, then update only OSECT's mapped state.  */
1785
static void
1786
spu_overlay_update (struct obj_section *osect)
1787
{
1788
  /* Just one section.  */
1789
  if (osect)
1790
    spu_overlay_update_osect (osect);
1791
 
1792
  /* All sections.  */
1793
  else
1794
    {
1795
      struct objfile *objfile;
1796
 
1797
      ALL_OBJSECTIONS (objfile, osect)
1798
        if (section_is_overlay (osect))
1799
          spu_overlay_update_osect (osect);
1800
    }
1801
}
1802
 
1803
/* Whenever a new objfile is loaded, read the target's _ovly_table.
1804
   If there is one, go through all sections and make sure for non-
1805
   overlay sections LMA equals VMA, while for overlay sections LMA
1806
   is larger than SPU_OVERLAY_LMA.  */
1807
static void
1808
spu_overlay_new_objfile (struct objfile *objfile)
1809
{
1810
  struct spu_overlay_table *ovly_table;
1811
  struct obj_section *osect;
1812
 
1813
  /* If we've already touched this file, do nothing.  */
1814
  if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1815
    return;
1816
 
1817
  /* Consider only SPU objfiles.  */
1818
  if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1819
    return;
1820
 
1821
  /* Check if this objfile has overlays.  */
1822
  ovly_table = spu_get_overlay_table (objfile);
1823
  if (!ovly_table)
1824
    return;
1825
 
1826
  /* Now go and fiddle with all the LMAs.  */
1827
  ALL_OBJFILE_OSECTIONS (objfile, osect)
1828
    {
1829
      bfd *obfd = objfile->obfd;
1830
      asection *bsect = osect->the_bfd_section;
1831
      int ndx = osect - objfile->sections;
1832
 
1833
      if (ovly_table[ndx].mapped_ptr == 0)
1834
        bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1835
      else
1836
        bfd_section_lma (obfd, bsect) = SPU_OVERLAY_LMA + bsect->filepos;
1837
    }
1838
}
1839
 
1840
 
1841
/* Insert temporary breakpoint on "main" function of newly loaded
1842
   SPE context OBJFILE.  */
1843
static void
1844
spu_catch_start (struct objfile *objfile)
1845
{
1846
  struct minimal_symbol *minsym;
1847
  struct symtab *symtab;
1848
  CORE_ADDR pc;
1849
  char buf[32];
1850
 
1851
  /* Do this only if requested by "set spu stop-on-load on".  */
1852
  if (!spu_stop_on_load_p)
1853
    return;
1854
 
1855
  /* Consider only SPU objfiles.  */
1856
  if (!objfile || bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1857
    return;
1858
 
1859
  /* The main objfile is handled differently.  */
1860
  if (objfile == symfile_objfile)
1861
    return;
1862
 
1863
  /* There can be multiple symbols named "main".  Search for the
1864
     "main" in *this* objfile.  */
1865
  minsym = lookup_minimal_symbol ("main", NULL, objfile);
1866
  if (!minsym)
1867
    return;
1868
 
1869
  /* If we have debugging information, try to use it -- this
1870
     will allow us to properly skip the prologue.  */
1871
  pc = SYMBOL_VALUE_ADDRESS (minsym);
1872
  symtab = find_pc_sect_symtab (pc, SYMBOL_OBJ_SECTION (minsym));
1873
  if (symtab != NULL)
1874
    {
1875
      struct blockvector *bv = BLOCKVECTOR (symtab);
1876
      struct block *block = BLOCKVECTOR_BLOCK (bv, GLOBAL_BLOCK);
1877
      struct symbol *sym;
1878
      struct symtab_and_line sal;
1879
 
1880
      sym = lookup_block_symbol (block, "main", VAR_DOMAIN);
1881
      if (sym)
1882
        {
1883
          fixup_symbol_section (sym, objfile);
1884
          sal = find_function_start_sal (sym, 1);
1885
          pc = sal.pc;
1886
        }
1887
    }
1888
 
1889
  /* Use a numerical address for the set_breakpoint command to avoid having
1890
     the breakpoint re-set incorrectly.  */
1891
  xsnprintf (buf, sizeof buf, "*%s", core_addr_to_string (pc));
1892
  create_breakpoint (get_objfile_arch (objfile), buf /* arg */,
1893
                     NULL /* cond_string */, -1 /* thread */,
1894
 
1895
                     bp_breakpoint /* type_wanted */,
1896
 
1897
                     AUTO_BOOLEAN_FALSE /* pending_break_support */,
1898
                     NULL /* ops */, 0 /* from_tty */, 1 /* enabled */);
1899
}
1900
 
1901
 
1902
/* Look up OBJFILE loaded into FRAME's SPU context.  */
1903
static struct objfile *
1904
spu_objfile_from_frame (struct frame_info *frame)
1905
{
1906
  struct gdbarch *gdbarch = get_frame_arch (frame);
1907
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1908
  struct objfile *obj;
1909
 
1910
  if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
1911
    return NULL;
1912
 
1913
  ALL_OBJFILES (obj)
1914
    {
1915
      if (obj->sections != obj->sections_end
1916
          && SPUADDR_SPU (obj_section_addr (obj->sections)) == tdep->id)
1917
        return obj;
1918
    }
1919
 
1920
  return NULL;
1921
}
1922
 
1923
/* Flush cache for ea pointer access if available.  */
1924
static void
1925
flush_ea_cache (void)
1926
{
1927
  struct minimal_symbol *msymbol;
1928
  struct objfile *obj;
1929
 
1930
  if (!has_stack_frames ())
1931
    return;
1932
 
1933
  obj = spu_objfile_from_frame (get_current_frame ());
1934
  if (obj == NULL)
1935
    return;
1936
 
1937
  /* Lookup inferior function __cache_flush.  */
1938
  msymbol = lookup_minimal_symbol ("__cache_flush", NULL, obj);
1939
  if (msymbol != NULL)
1940
    {
1941
      struct type *type;
1942
      CORE_ADDR addr;
1943
 
1944
      type = objfile_type (obj)->builtin_void;
1945
      type = lookup_function_type (type);
1946
      type = lookup_pointer_type (type);
1947
      addr = SYMBOL_VALUE_ADDRESS (msymbol);
1948
 
1949
      call_function_by_hand (value_from_pointer (type, addr), 0, NULL);
1950
    }
1951
}
1952
 
1953
/* This handler is called when the inferior has stopped.  If it is stopped in
1954
   SPU architecture then flush the ea cache if used.  */
1955
static void
1956
spu_attach_normal_stop (struct bpstats *bs, int print_frame)
1957
{
1958
  if (!spu_auto_flush_cache_p)
1959
    return;
1960
 
1961
  /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
1962
     re-entering this function when __cache_flush stops.  */
1963
  spu_auto_flush_cache_p = 0;
1964
  flush_ea_cache ();
1965
  spu_auto_flush_cache_p = 1;
1966
}
1967
 
1968
 
1969
/* "info spu" commands.  */
1970
 
1971
static void
1972
info_spu_event_command (char *args, int from_tty)
1973
{
1974
  struct frame_info *frame = get_selected_frame (NULL);
1975
  ULONGEST event_status = 0;
1976
  ULONGEST event_mask = 0;
1977
  struct cleanup *chain;
1978
  gdb_byte buf[100];
1979
  char annex[32];
1980
  LONGEST len;
1981
  int rc, id;
1982
 
1983
  if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1984
    error (_("\"info spu\" is only supported on the SPU architecture."));
1985
 
1986
  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1987
 
1988
  xsnprintf (annex, sizeof annex, "%d/event_status", id);
1989
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1990
                     buf, 0, (sizeof (buf) - 1));
1991
  if (len <= 0)
1992
    error (_("Could not read event_status."));
1993
  buf[len] = '\0';
1994
  event_status = strtoulst (buf, NULL, 16);
1995
 
1996
  xsnprintf (annex, sizeof annex, "%d/event_mask", id);
1997
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1998
                     buf, 0, (sizeof (buf) - 1));
1999
  if (len <= 0)
2000
    error (_("Could not read event_mask."));
2001
  buf[len] = '\0';
2002
  event_mask = strtoulst (buf, NULL, 16);
2003
 
2004
  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
2005
 
2006
  if (ui_out_is_mi_like_p (uiout))
2007
    {
2008
      ui_out_field_fmt (uiout, "event_status",
2009
                        "0x%s", phex_nz (event_status, 4));
2010
      ui_out_field_fmt (uiout, "event_mask",
2011
                        "0x%s", phex_nz (event_mask, 4));
2012
    }
2013
  else
2014
    {
2015
      printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
2016
      printf_filtered (_("Event Mask   0x%s\n"), phex (event_mask, 4));
2017
    }
2018
 
2019
  do_cleanups (chain);
2020
}
2021
 
2022
static void
2023
info_spu_signal_command (char *args, int from_tty)
2024
{
2025
  struct frame_info *frame = get_selected_frame (NULL);
2026
  struct gdbarch *gdbarch = get_frame_arch (frame);
2027
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2028
  ULONGEST signal1 = 0;
2029
  ULONGEST signal1_type = 0;
2030
  int signal1_pending = 0;
2031
  ULONGEST signal2 = 0;
2032
  ULONGEST signal2_type = 0;
2033
  int signal2_pending = 0;
2034
  struct cleanup *chain;
2035
  char annex[32];
2036
  gdb_byte buf[100];
2037
  LONGEST len;
2038
  int rc, id;
2039
 
2040
  if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2041
    error (_("\"info spu\" is only supported on the SPU architecture."));
2042
 
2043
  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2044
 
2045
  xsnprintf (annex, sizeof annex, "%d/signal1", id);
2046
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2047
  if (len < 0)
2048
    error (_("Could not read signal1."));
2049
  else if (len == 4)
2050
    {
2051
      signal1 = extract_unsigned_integer (buf, 4, byte_order);
2052
      signal1_pending = 1;
2053
    }
2054
 
2055
  xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
2056
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2057
                     buf, 0, (sizeof (buf) - 1));
2058
  if (len <= 0)
2059
    error (_("Could not read signal1_type."));
2060
  buf[len] = '\0';
2061
  signal1_type = strtoulst (buf, NULL, 16);
2062
 
2063
  xsnprintf (annex, sizeof annex, "%d/signal2", id);
2064
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2065
  if (len < 0)
2066
    error (_("Could not read signal2."));
2067
  else if (len == 4)
2068
    {
2069
      signal2 = extract_unsigned_integer (buf, 4, byte_order);
2070
      signal2_pending = 1;
2071
    }
2072
 
2073
  xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
2074
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2075
                     buf, 0, (sizeof (buf) - 1));
2076
  if (len <= 0)
2077
    error (_("Could not read signal2_type."));
2078
  buf[len] = '\0';
2079
  signal2_type = strtoulst (buf, NULL, 16);
2080
 
2081
  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
2082
 
2083
  if (ui_out_is_mi_like_p (uiout))
2084
    {
2085
      ui_out_field_int (uiout, "signal1_pending", signal1_pending);
2086
      ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
2087
      ui_out_field_int (uiout, "signal1_type", signal1_type);
2088
      ui_out_field_int (uiout, "signal2_pending", signal2_pending);
2089
      ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
2090
      ui_out_field_int (uiout, "signal2_type", signal2_type);
2091
    }
2092
  else
2093
    {
2094
      if (signal1_pending)
2095
        printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
2096
      else
2097
        printf_filtered (_("Signal 1 not pending "));
2098
 
2099
      if (signal1_type)
2100
        printf_filtered (_("(Type Or)\n"));
2101
      else
2102
        printf_filtered (_("(Type Overwrite)\n"));
2103
 
2104
      if (signal2_pending)
2105
        printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
2106
      else
2107
        printf_filtered (_("Signal 2 not pending "));
2108
 
2109
      if (signal2_type)
2110
        printf_filtered (_("(Type Or)\n"));
2111
      else
2112
        printf_filtered (_("(Type Overwrite)\n"));
2113
    }
2114
 
2115
  do_cleanups (chain);
2116
}
2117
 
2118
static void
2119
info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order,
2120
                       const char *field, const char *msg)
2121
{
2122
  struct cleanup *chain;
2123
  int i;
2124
 
2125
  if (nr <= 0)
2126
    return;
2127
 
2128
  chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
2129
 
2130
  ui_out_table_header (uiout, 32, ui_left, field, msg);
2131
  ui_out_table_body (uiout);
2132
 
2133
  for (i = 0; i < nr; i++)
2134
    {
2135
      struct cleanup *val_chain;
2136
      ULONGEST val;
2137
      val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
2138
      val = extract_unsigned_integer (buf + 4*i, 4, byte_order);
2139
      ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
2140
      do_cleanups (val_chain);
2141
 
2142
      if (!ui_out_is_mi_like_p (uiout))
2143
        printf_filtered ("\n");
2144
    }
2145
 
2146
  do_cleanups (chain);
2147
}
2148
 
2149
static void
2150
info_spu_mailbox_command (char *args, int from_tty)
2151
{
2152
  struct frame_info *frame = get_selected_frame (NULL);
2153
  struct gdbarch *gdbarch = get_frame_arch (frame);
2154
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2155
  struct cleanup *chain;
2156
  char annex[32];
2157
  gdb_byte buf[1024];
2158
  LONGEST len;
2159
  int i, id;
2160
 
2161
  if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2162
    error (_("\"info spu\" is only supported on the SPU architecture."));
2163
 
2164
  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2165
 
2166
  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
2167
 
2168
  xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
2169
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2170
                     buf, 0, sizeof buf);
2171
  if (len < 0)
2172
    error (_("Could not read mbox_info."));
2173
 
2174
  info_spu_mailbox_list (buf, len / 4, byte_order,
2175
                         "mbox", "SPU Outbound Mailbox");
2176
 
2177
  xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
2178
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2179
                     buf, 0, sizeof buf);
2180
  if (len < 0)
2181
    error (_("Could not read ibox_info."));
2182
 
2183
  info_spu_mailbox_list (buf, len / 4, byte_order,
2184
                         "ibox", "SPU Outbound Interrupt Mailbox");
2185
 
2186
  xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
2187
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2188
                     buf, 0, sizeof buf);
2189
  if (len < 0)
2190
    error (_("Could not read wbox_info."));
2191
 
2192
  info_spu_mailbox_list (buf, len / 4, byte_order,
2193
                         "wbox", "SPU Inbound Mailbox");
2194
 
2195
  do_cleanups (chain);
2196
}
2197
 
2198
static ULONGEST
2199
spu_mfc_get_bitfield (ULONGEST word, int first, int last)
2200
{
2201
  ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
2202
  return (word >> (63 - last)) & mask;
2203
}
2204
 
2205
static void
2206
info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order)
2207
{
2208
  static char *spu_mfc_opcode[256] =
2209
    {
2210
    /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2211
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2212
    /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2213
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2214
    /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
2215
             "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
2216
    /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
2217
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2218
    /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
2219
             "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
2220
    /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2221
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2222
    /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2223
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2224
    /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2225
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2226
    /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
2227
             NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
2228
    /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2229
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2230
    /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
2231
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2232
    /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
2233
             "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2234
    /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2235
             "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
2236
    /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2237
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2238
    /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2239
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2240
    /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2241
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2242
    };
2243
 
2244
  int *seq = alloca (nr * sizeof (int));
2245
  int done = 0;
2246
  struct cleanup *chain;
2247
  int i, j;
2248
 
2249
 
2250
  /* Determine sequence in which to display (valid) entries.  */
2251
  for (i = 0; i < nr; i++)
2252
    {
2253
      /* Search for the first valid entry all of whose
2254
         dependencies are met.  */
2255
      for (j = 0; j < nr; j++)
2256
        {
2257
          ULONGEST mfc_cq_dw3;
2258
          ULONGEST dependencies;
2259
 
2260
          if (done & (1 << (nr - 1 - j)))
2261
            continue;
2262
 
2263
          mfc_cq_dw3
2264
            = extract_unsigned_integer (buf + 32*j + 24,8, byte_order);
2265
          if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
2266
            continue;
2267
 
2268
          dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
2269
          if ((dependencies & done) != dependencies)
2270
            continue;
2271
 
2272
          seq[i] = j;
2273
          done |= 1 << (nr - 1 - j);
2274
          break;
2275
        }
2276
 
2277
      if (j == nr)
2278
        break;
2279
    }
2280
 
2281
  nr = i;
2282
 
2283
 
2284
  chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
2285
 
2286
  ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
2287
  ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
2288
  ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
2289
  ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
2290
  ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
2291
  ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
2292
  ui_out_table_header (uiout, 7, ui_left, "size", "Size");
2293
  ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
2294
  ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
2295
  ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
2296
 
2297
  ui_out_table_body (uiout);
2298
 
2299
  for (i = 0; i < nr; i++)
2300
    {
2301
      struct cleanup *cmd_chain;
2302
      ULONGEST mfc_cq_dw0;
2303
      ULONGEST mfc_cq_dw1;
2304
      ULONGEST mfc_cq_dw2;
2305
      int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
2306
      int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
2307
      ULONGEST mfc_ea;
2308
      int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
2309
 
2310
      /* Decode contents of MFC Command Queue Context Save/Restore Registers.
2311
         See "Cell Broadband Engine Registers V1.3", section 3.3.2.1.  */
2312
 
2313
      mfc_cq_dw0
2314
        = extract_unsigned_integer (buf + 32*seq[i], 8, byte_order);
2315
      mfc_cq_dw1
2316
        = extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order);
2317
      mfc_cq_dw2
2318
        = extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order);
2319
 
2320
      list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
2321
      list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
2322
      mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
2323
      mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
2324
      list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
2325
      rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
2326
      tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
2327
 
2328
      mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
2329
                | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
2330
 
2331
      mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
2332
      mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
2333
      noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
2334
      qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
2335
      ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
2336
      cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
2337
 
2338
      cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
2339
 
2340
      if (spu_mfc_opcode[mfc_cmd_opcode])
2341
        ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
2342
      else
2343
        ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
2344
 
2345
      ui_out_field_int (uiout, "tag", mfc_cmd_tag);
2346
      ui_out_field_int (uiout, "tid", tclass_id);
2347
      ui_out_field_int (uiout, "rid", rclass_id);
2348
 
2349
      if (ea_valid_p)
2350
        ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
2351
      else
2352
        ui_out_field_skip (uiout, "ea");
2353
 
2354
      ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
2355
      if (qw_valid_p)
2356
        ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
2357
      else
2358
        ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
2359
 
2360
      if (list_valid_p)
2361
        {
2362
          ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
2363
          ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
2364
        }
2365
      else
2366
        {
2367
          ui_out_field_skip (uiout, "lstaddr");
2368
          ui_out_field_skip (uiout, "lstsize");
2369
        }
2370
 
2371
      if (cmd_error_p)
2372
        ui_out_field_string (uiout, "error_p", "*");
2373
      else
2374
        ui_out_field_skip (uiout, "error_p");
2375
 
2376
      do_cleanups (cmd_chain);
2377
 
2378
      if (!ui_out_is_mi_like_p (uiout))
2379
        printf_filtered ("\n");
2380
    }
2381
 
2382
  do_cleanups (chain);
2383
}
2384
 
2385
static void
2386
info_spu_dma_command (char *args, int from_tty)
2387
{
2388
  struct frame_info *frame = get_selected_frame (NULL);
2389
  struct gdbarch *gdbarch = get_frame_arch (frame);
2390
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2391
  ULONGEST dma_info_type;
2392
  ULONGEST dma_info_mask;
2393
  ULONGEST dma_info_status;
2394
  ULONGEST dma_info_stall_and_notify;
2395
  ULONGEST dma_info_atomic_command_status;
2396
  struct cleanup *chain;
2397
  char annex[32];
2398
  gdb_byte buf[1024];
2399
  LONGEST len;
2400
  int i, id;
2401
 
2402
  if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2403
    error (_("\"info spu\" is only supported on the SPU architecture."));
2404
 
2405
  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2406
 
2407
  xsnprintf (annex, sizeof annex, "%d/dma_info", id);
2408
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2409
                     buf, 0, 40 + 16 * 32);
2410
  if (len <= 0)
2411
    error (_("Could not read dma_info."));
2412
 
2413
  dma_info_type
2414
    = extract_unsigned_integer (buf, 8, byte_order);
2415
  dma_info_mask
2416
    = extract_unsigned_integer (buf + 8, 8, byte_order);
2417
  dma_info_status
2418
    = extract_unsigned_integer (buf + 16, 8, byte_order);
2419
  dma_info_stall_and_notify
2420
    = extract_unsigned_integer (buf + 24, 8, byte_order);
2421
  dma_info_atomic_command_status
2422
    = extract_unsigned_integer (buf + 32, 8, byte_order);
2423
 
2424
  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
2425
 
2426
  if (ui_out_is_mi_like_p (uiout))
2427
    {
2428
      ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
2429
                        phex_nz (dma_info_type, 4));
2430
      ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
2431
                        phex_nz (dma_info_mask, 4));
2432
      ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
2433
                        phex_nz (dma_info_status, 4));
2434
      ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
2435
                        phex_nz (dma_info_stall_and_notify, 4));
2436
      ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
2437
                        phex_nz (dma_info_atomic_command_status, 4));
2438
    }
2439
  else
2440
    {
2441
      const char *query_msg = _("no query pending");
2442
 
2443
      if (dma_info_type & 4)
2444
        switch (dma_info_type & 3)
2445
          {
2446
            case 1: query_msg = _("'any' query pending"); break;
2447
            case 2: query_msg = _("'all' query pending"); break;
2448
            default: query_msg = _("undefined query type"); break;
2449
          }
2450
 
2451
      printf_filtered (_("Tag-Group Status  0x%s\n"),
2452
                       phex (dma_info_status, 4));
2453
      printf_filtered (_("Tag-Group Mask    0x%s (%s)\n"),
2454
                       phex (dma_info_mask, 4), query_msg);
2455
      printf_filtered (_("Stall-and-Notify  0x%s\n"),
2456
                       phex (dma_info_stall_and_notify, 4));
2457
      printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2458
                       phex (dma_info_atomic_command_status, 4));
2459
      printf_filtered ("\n");
2460
    }
2461
 
2462
  info_spu_dma_cmdlist (buf + 40, 16, byte_order);
2463
  do_cleanups (chain);
2464
}
2465
 
2466
static void
2467
info_spu_proxydma_command (char *args, int from_tty)
2468
{
2469
  struct frame_info *frame = get_selected_frame (NULL);
2470
  struct gdbarch *gdbarch = get_frame_arch (frame);
2471
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2472
  ULONGEST dma_info_type;
2473
  ULONGEST dma_info_mask;
2474
  ULONGEST dma_info_status;
2475
  struct cleanup *chain;
2476
  char annex[32];
2477
  gdb_byte buf[1024];
2478
  LONGEST len;
2479
  int i, id;
2480
 
2481
  if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2482
    error (_("\"info spu\" is only supported on the SPU architecture."));
2483
 
2484
  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2485
 
2486
  xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
2487
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2488
                     buf, 0, 24 + 8 * 32);
2489
  if (len <= 0)
2490
    error (_("Could not read proxydma_info."));
2491
 
2492
  dma_info_type = extract_unsigned_integer (buf, 8, byte_order);
2493
  dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order);
2494
  dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order);
2495
 
2496
  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
2497
 
2498
  if (ui_out_is_mi_like_p (uiout))
2499
    {
2500
      ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
2501
                        phex_nz (dma_info_type, 4));
2502
      ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
2503
                        phex_nz (dma_info_mask, 4));
2504
      ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
2505
                        phex_nz (dma_info_status, 4));
2506
    }
2507
  else
2508
    {
2509
      const char *query_msg;
2510
 
2511
      switch (dma_info_type & 3)
2512
        {
2513
        case 0: query_msg = _("no query pending"); break;
2514
        case 1: query_msg = _("'any' query pending"); break;
2515
        case 2: query_msg = _("'all' query pending"); break;
2516
        default: query_msg = _("undefined query type"); break;
2517
        }
2518
 
2519
      printf_filtered (_("Tag-Group Status  0x%s\n"),
2520
                       phex (dma_info_status, 4));
2521
      printf_filtered (_("Tag-Group Mask    0x%s (%s)\n"),
2522
                       phex (dma_info_mask, 4), query_msg);
2523
      printf_filtered ("\n");
2524
    }
2525
 
2526
  info_spu_dma_cmdlist (buf + 24, 8, byte_order);
2527
  do_cleanups (chain);
2528
}
2529
 
2530
static void
2531
info_spu_command (char *args, int from_tty)
2532
{
2533
  printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
2534
  help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
2535
}
2536
 
2537
 
2538
/* Root of all "set spu "/"show spu " commands.  */
2539
 
2540
static void
2541
show_spu_command (char *args, int from_tty)
2542
{
2543
  help_list (showspucmdlist, "show spu ", all_commands, gdb_stdout);
2544
}
2545
 
2546
static void
2547
set_spu_command (char *args, int from_tty)
2548
{
2549
  help_list (setspucmdlist, "set spu ", all_commands, gdb_stdout);
2550
}
2551
 
2552
static void
2553
show_spu_stop_on_load (struct ui_file *file, int from_tty,
2554
                       struct cmd_list_element *c, const char *value)
2555
{
2556
  fprintf_filtered (file, _("Stopping for new SPE threads is %s.\n"),
2557
                    value);
2558
}
2559
 
2560
static void
2561
show_spu_auto_flush_cache (struct ui_file *file, int from_tty,
2562
                           struct cmd_list_element *c, const char *value)
2563
{
2564
  fprintf_filtered (file, _("Automatic software-cache flush is %s.\n"),
2565
                    value);
2566
}
2567
 
2568
 
2569
/* Set up gdbarch struct.  */
2570
 
2571
static struct gdbarch *
2572
spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2573
{
2574
  struct gdbarch *gdbarch;
2575
  struct gdbarch_tdep *tdep;
2576
  int id = -1;
2577
 
2578
  /* Which spufs ID was requested as address space?  */
2579
  if (info.tdep_info)
2580
    id = *(int *)info.tdep_info;
2581
  /* For objfile architectures of SPU solibs, decode the ID from the name.
2582
     This assumes the filename convention employed by solib-spu.c.  */
2583
  else if (info.abfd)
2584
    {
2585
      char *name = strrchr (info.abfd->filename, '@');
2586
      if (name)
2587
        sscanf (name, "@0x%*x <%d>", &id);
2588
    }
2589
 
2590
  /* Find a candidate among extant architectures.  */
2591
  for (arches = gdbarch_list_lookup_by_info (arches, &info);
2592
       arches != NULL;
2593
       arches = gdbarch_list_lookup_by_info (arches->next, &info))
2594
    {
2595
      tdep = gdbarch_tdep (arches->gdbarch);
2596
      if (tdep && tdep->id == id)
2597
        return arches->gdbarch;
2598
    }
2599
 
2600
  /* None found, so create a new architecture.  */
2601
  tdep = XCALLOC (1, struct gdbarch_tdep);
2602
  tdep->id = id;
2603
  gdbarch = gdbarch_alloc (&info, tdep);
2604
 
2605
  /* Disassembler.  */
2606
  set_gdbarch_print_insn (gdbarch, gdb_print_insn_spu);
2607
 
2608
  /* Registers.  */
2609
  set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2610
  set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2611
  set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2612
  set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2613
  set_gdbarch_read_pc (gdbarch, spu_read_pc);
2614
  set_gdbarch_write_pc (gdbarch, spu_write_pc);
2615
  set_gdbarch_register_name (gdbarch, spu_register_name);
2616
  set_gdbarch_register_type (gdbarch, spu_register_type);
2617
  set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2618
  set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2619
  set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2620
  set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2621
 
2622
  /* Data types.  */
2623
  set_gdbarch_char_signed (gdbarch, 0);
2624
  set_gdbarch_ptr_bit (gdbarch, 32);
2625
  set_gdbarch_addr_bit (gdbarch, 32);
2626
  set_gdbarch_short_bit (gdbarch, 16);
2627
  set_gdbarch_int_bit (gdbarch, 32);
2628
  set_gdbarch_long_bit (gdbarch, 32);
2629
  set_gdbarch_long_long_bit (gdbarch, 64);
2630
  set_gdbarch_float_bit (gdbarch, 32);
2631
  set_gdbarch_double_bit (gdbarch, 64);
2632
  set_gdbarch_long_double_bit (gdbarch, 64);
2633
  set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2634
  set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2635
  set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2636
 
2637
  /* Address handling.  */
2638
  set_gdbarch_address_to_pointer (gdbarch, spu_address_to_pointer);
2639
  set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2640
  set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2641
  set_gdbarch_address_class_type_flags (gdbarch, spu_address_class_type_flags);
2642
  set_gdbarch_address_class_type_flags_to_name
2643
    (gdbarch, spu_address_class_type_flags_to_name);
2644
  set_gdbarch_address_class_name_to_type_flags
2645
    (gdbarch, spu_address_class_name_to_type_flags);
2646
 
2647
 
2648
  /* Inferior function calls.  */
2649
  set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2650
  set_gdbarch_frame_align (gdbarch, spu_frame_align);
2651
  set_gdbarch_frame_red_zone_size (gdbarch, 2000);
2652
  set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code);
2653
  set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2654
  set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
2655
  set_gdbarch_return_value (gdbarch, spu_return_value);
2656
 
2657
  /* Frame handling.  */
2658
  set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2659
  frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
2660
  frame_base_set_default (gdbarch, &spu_frame_base);
2661
  set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2662
  set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2663
  set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2664
  set_gdbarch_frame_args_skip (gdbarch, 0);
2665
  set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2666
  set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2667
 
2668
  /* Cell/B.E. cross-architecture unwinder support.  */
2669
  frame_unwind_prepend_unwinder (gdbarch, &spu2ppu_unwind);
2670
 
2671
  /* Breakpoints.  */
2672
  set_gdbarch_decr_pc_after_break (gdbarch, 4);
2673
  set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2674
  set_gdbarch_memory_remove_breakpoint (gdbarch, spu_memory_remove_breakpoint);
2675
  set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2676
  set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2677
  set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target);
2678
 
2679
  /* Overlays.  */
2680
  set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2681
 
2682
  return gdbarch;
2683
}
2684
 
2685
/* Provide a prototype to silence -Wmissing-prototypes.  */
2686
extern initialize_file_ftype _initialize_spu_tdep;
2687
 
2688
void
2689
_initialize_spu_tdep (void)
2690
{
2691
  register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2692
 
2693
  /* Add ourselves to objfile event chain.  */
2694
  observer_attach_new_objfile (spu_overlay_new_objfile);
2695
  spu_overlay_data = register_objfile_data ();
2696
 
2697
  /* Install spu stop-on-load handler.  */
2698
  observer_attach_new_objfile (spu_catch_start);
2699
 
2700
  /* Add ourselves to normal_stop event chain.  */
2701
  observer_attach_normal_stop (spu_attach_normal_stop);
2702
 
2703
  /* Add root prefix command for all "set spu"/"show spu" commands.  */
2704
  add_prefix_cmd ("spu", no_class, set_spu_command,
2705
                  _("Various SPU specific commands."),
2706
                  &setspucmdlist, "set spu ", 0, &setlist);
2707
  add_prefix_cmd ("spu", no_class, show_spu_command,
2708
                  _("Various SPU specific commands."),
2709
                  &showspucmdlist, "show spu ", 0, &showlist);
2710
 
2711
  /* Toggle whether or not to add a temporary breakpoint at the "main"
2712
     function of new SPE contexts.  */
2713
  add_setshow_boolean_cmd ("stop-on-load", class_support,
2714
                          &spu_stop_on_load_p, _("\
2715
Set whether to stop for new SPE threads."),
2716
                           _("\
2717
Show whether to stop for new SPE threads."),
2718
                           _("\
2719
Use \"on\" to give control to the user when a new SPE thread\n\
2720
enters its \"main\" function.\n\
2721
Use \"off\" to disable stopping for new SPE threads."),
2722
                          NULL,
2723
                          show_spu_stop_on_load,
2724
                          &setspucmdlist, &showspucmdlist);
2725
 
2726
  /* Toggle whether or not to automatically flush the software-managed
2727
     cache whenever SPE execution stops.  */
2728
  add_setshow_boolean_cmd ("auto-flush-cache", class_support,
2729
                          &spu_auto_flush_cache_p, _("\
2730
Set whether to automatically flush the software-managed cache."),
2731
                           _("\
2732
Show whether to automatically flush the software-managed cache."),
2733
                           _("\
2734
Use \"on\" to automatically flush the software-managed cache\n\
2735
whenever SPE execution stops.\n\
2736
Use \"off\" to never automatically flush the software-managed cache."),
2737
                          NULL,
2738
                          show_spu_auto_flush_cache,
2739
                          &setspucmdlist, &showspucmdlist);
2740
 
2741
  /* Add root prefix command for all "info spu" commands.  */
2742
  add_prefix_cmd ("spu", class_info, info_spu_command,
2743
                  _("Various SPU specific commands."),
2744
                  &infospucmdlist, "info spu ", 0, &infolist);
2745
 
2746
  /* Add various "info spu" commands.  */
2747
  add_cmd ("event", class_info, info_spu_event_command,
2748
           _("Display SPU event facility status.\n"),
2749
           &infospucmdlist);
2750
  add_cmd ("signal", class_info, info_spu_signal_command,
2751
           _("Display SPU signal notification facility status.\n"),
2752
           &infospucmdlist);
2753
  add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2754
           _("Display SPU mailbox facility status.\n"),
2755
           &infospucmdlist);
2756
  add_cmd ("dma", class_info, info_spu_dma_command,
2757
           _("Display MFC DMA status.\n"),
2758
           &infospucmdlist);
2759
  add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2760
           _("Display MFC Proxy-DMA status.\n"),
2761
           &infospucmdlist);
2762
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.