OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gdb-7.1/] [gdb/] [spu-tdep.c] - Blame information for rev 280

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 227 jeremybenn
/* SPU target-dependent code for GDB, the GNU debugger.
2
   Copyright (C) 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
3
 
4
   Contributed by Ulrich Weigand <uweigand@de.ibm.com>.
5
   Based on a port by Sid Manning <sid@us.ibm.com>.
6
 
7
   This file is part of GDB.
8
 
9
   This program is free software; you can redistribute it and/or modify
10
   it under the terms of the GNU General Public License as published by
11
   the Free Software Foundation; either version 3 of the License, or
12
   (at your option) any later version.
13
 
14
   This program is distributed in the hope that it will be useful,
15
   but WITHOUT ANY WARRANTY; without even the implied warranty of
16
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17
   GNU General Public License for more details.
18
 
19
   You should have received a copy of the GNU General Public License
20
   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
21
 
22
#include "defs.h"
23
#include "arch-utils.h"
24
#include "gdbtypes.h"
25
#include "gdbcmd.h"
26
#include "gdbcore.h"
27
#include "gdb_string.h"
28
#include "gdb_assert.h"
29
#include "frame.h"
30
#include "frame-unwind.h"
31
#include "frame-base.h"
32
#include "trad-frame.h"
33
#include "symtab.h"
34
#include "symfile.h"
35
#include "value.h"
36
#include "inferior.h"
37
#include "dis-asm.h"
38
#include "objfiles.h"
39
#include "language.h"
40
#include "regcache.h"
41
#include "reggroups.h"
42
#include "floatformat.h"
43
#include "block.h"
44
#include "observer.h"
45
#include "infcall.h"
46
 
47
#include "spu-tdep.h"
48
 
49
 
50
/* The list of available "set spu " and "show spu " commands.  */
51
static struct cmd_list_element *setspucmdlist = NULL;
52
static struct cmd_list_element *showspucmdlist = NULL;
53
 
54
/* Whether to stop for new SPE contexts.  */
55
static int spu_stop_on_load_p = 0;
56
/* Whether to automatically flush the SW-managed cache.  */
57
static int spu_auto_flush_cache_p = 1;
58
 
59
 
60
/* The tdep structure.  */
61
struct gdbarch_tdep
62
{
63
  /* The spufs ID identifying our address space.  */
64
  int id;
65
 
66
  /* SPU-specific vector type.  */
67
  struct type *spu_builtin_type_vec128;
68
};
69
 
70
 
71
/* SPU-specific vector type.  */
72
static struct type *
73
spu_builtin_type_vec128 (struct gdbarch *gdbarch)
74
{
75
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
76
 
77
  if (!tdep->spu_builtin_type_vec128)
78
    {
79
      const struct builtin_type *bt = builtin_type (gdbarch);
80
      struct type *t;
81
 
82
      t = arch_composite_type (gdbarch,
83
                               "__spu_builtin_type_vec128", TYPE_CODE_UNION);
84
      append_composite_type_field (t, "uint128", bt->builtin_int128);
85
      append_composite_type_field (t, "v2_int64",
86
                                   init_vector_type (bt->builtin_int64, 2));
87
      append_composite_type_field (t, "v4_int32",
88
                                   init_vector_type (bt->builtin_int32, 4));
89
      append_composite_type_field (t, "v8_int16",
90
                                   init_vector_type (bt->builtin_int16, 8));
91
      append_composite_type_field (t, "v16_int8",
92
                                   init_vector_type (bt->builtin_int8, 16));
93
      append_composite_type_field (t, "v2_double",
94
                                   init_vector_type (bt->builtin_double, 2));
95
      append_composite_type_field (t, "v4_float",
96
                                   init_vector_type (bt->builtin_float, 4));
97
 
98
      TYPE_VECTOR (t) = 1;
99
      TYPE_NAME (t) = "spu_builtin_type_vec128";
100
 
101
      tdep->spu_builtin_type_vec128 = t;
102
    }
103
 
104
  return tdep->spu_builtin_type_vec128;
105
}
106
 
107
 
108
/* The list of available "info spu " commands.  */
109
static struct cmd_list_element *infospucmdlist = NULL;
110
 
111
/* Registers.  */
112
 
113
static const char *
114
spu_register_name (struct gdbarch *gdbarch, int reg_nr)
115
{
116
  static char *register_names[] =
117
    {
118
      "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
119
      "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
120
      "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
121
      "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
122
      "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
123
      "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
124
      "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
125
      "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
126
      "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
127
      "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
128
      "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
129
      "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
130
      "r96", "r97", "r98", "r99", "r100", "r101", "r102", "r103",
131
      "r104", "r105", "r106", "r107", "r108", "r109", "r110", "r111",
132
      "r112", "r113", "r114", "r115", "r116", "r117", "r118", "r119",
133
      "r120", "r121", "r122", "r123", "r124", "r125", "r126", "r127",
134
      "id", "pc", "sp", "fpscr", "srr0", "lslr", "decr", "decr_status"
135
    };
136
 
137
  if (reg_nr < 0)
138
    return NULL;
139
  if (reg_nr >= sizeof register_names / sizeof *register_names)
140
    return NULL;
141
 
142
  return register_names[reg_nr];
143
}
144
 
145
static struct type *
146
spu_register_type (struct gdbarch *gdbarch, int reg_nr)
147
{
148
  if (reg_nr < SPU_NUM_GPRS)
149
    return spu_builtin_type_vec128 (gdbarch);
150
 
151
  switch (reg_nr)
152
    {
153
    case SPU_ID_REGNUM:
154
      return builtin_type (gdbarch)->builtin_uint32;
155
 
156
    case SPU_PC_REGNUM:
157
      return builtin_type (gdbarch)->builtin_func_ptr;
158
 
159
    case SPU_SP_REGNUM:
160
      return builtin_type (gdbarch)->builtin_data_ptr;
161
 
162
    case SPU_FPSCR_REGNUM:
163
      return builtin_type (gdbarch)->builtin_uint128;
164
 
165
    case SPU_SRR0_REGNUM:
166
      return builtin_type (gdbarch)->builtin_uint32;
167
 
168
    case SPU_LSLR_REGNUM:
169
      return builtin_type (gdbarch)->builtin_uint32;
170
 
171
    case SPU_DECR_REGNUM:
172
      return builtin_type (gdbarch)->builtin_uint32;
173
 
174
    case SPU_DECR_STATUS_REGNUM:
175
      return builtin_type (gdbarch)->builtin_uint32;
176
 
177
    default:
178
      internal_error (__FILE__, __LINE__, "invalid regnum");
179
    }
180
}
181
 
182
/* Pseudo registers for preferred slots - stack pointer.  */
183
 
184
static void
185
spu_pseudo_register_read_spu (struct regcache *regcache, const char *regname,
186
                              gdb_byte *buf)
187
{
188
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
189
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
190
  gdb_byte reg[32];
191
  char annex[32];
192
  ULONGEST id;
193
 
194
  regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
195
  xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
196
  memset (reg, 0, sizeof reg);
197
  target_read (&current_target, TARGET_OBJECT_SPU, annex,
198
               reg, 0, sizeof reg);
199
 
200
  store_unsigned_integer (buf, 4, byte_order, strtoulst (reg, NULL, 16));
201
}
202
 
203
static void
204
spu_pseudo_register_read (struct gdbarch *gdbarch, struct regcache *regcache,
205
                          int regnum, gdb_byte *buf)
206
{
207
  gdb_byte reg[16];
208
  char annex[32];
209
  ULONGEST id;
210
 
211
  switch (regnum)
212
    {
213
    case SPU_SP_REGNUM:
214
      regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
215
      memcpy (buf, reg, 4);
216
      break;
217
 
218
    case SPU_FPSCR_REGNUM:
219
      regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
220
      xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
221
      target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
222
      break;
223
 
224
    case SPU_SRR0_REGNUM:
225
      spu_pseudo_register_read_spu (regcache, "srr0", buf);
226
      break;
227
 
228
    case SPU_LSLR_REGNUM:
229
      spu_pseudo_register_read_spu (regcache, "lslr", buf);
230
      break;
231
 
232
    case SPU_DECR_REGNUM:
233
      spu_pseudo_register_read_spu (regcache, "decr", buf);
234
      break;
235
 
236
    case SPU_DECR_STATUS_REGNUM:
237
      spu_pseudo_register_read_spu (regcache, "decr_status", buf);
238
      break;
239
 
240
    default:
241
      internal_error (__FILE__, __LINE__, _("invalid regnum"));
242
    }
243
}
244
 
245
static void
246
spu_pseudo_register_write_spu (struct regcache *regcache, const char *regname,
247
                               const gdb_byte *buf)
248
{
249
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
250
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
251
  gdb_byte reg[32];
252
  char annex[32];
253
  ULONGEST id;
254
 
255
  regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
256
  xsnprintf (annex, sizeof annex, "%d/%s", (int) id, regname);
257
  xsnprintf (reg, sizeof reg, "0x%s",
258
             phex_nz (extract_unsigned_integer (buf, 4, byte_order), 4));
259
  target_write (&current_target, TARGET_OBJECT_SPU, annex,
260
                reg, 0, strlen (reg));
261
}
262
 
263
static void
264
spu_pseudo_register_write (struct gdbarch *gdbarch, struct regcache *regcache,
265
                           int regnum, const gdb_byte *buf)
266
{
267
  gdb_byte reg[16];
268
  char annex[32];
269
  ULONGEST id;
270
 
271
  switch (regnum)
272
    {
273
    case SPU_SP_REGNUM:
274
      regcache_raw_read (regcache, SPU_RAW_SP_REGNUM, reg);
275
      memcpy (reg, buf, 4);
276
      regcache_raw_write (regcache, SPU_RAW_SP_REGNUM, reg);
277
      break;
278
 
279
    case SPU_FPSCR_REGNUM:
280
      regcache_raw_read_unsigned (regcache, SPU_ID_REGNUM, &id);
281
      xsnprintf (annex, sizeof annex, "%d/fpcr", (int) id);
282
      target_write (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 16);
283
      break;
284
 
285
    case SPU_SRR0_REGNUM:
286
      spu_pseudo_register_write_spu (regcache, "srr0", buf);
287
      break;
288
 
289
    case SPU_LSLR_REGNUM:
290
      spu_pseudo_register_write_spu (regcache, "lslr", buf);
291
      break;
292
 
293
    case SPU_DECR_REGNUM:
294
      spu_pseudo_register_write_spu (regcache, "decr", buf);
295
      break;
296
 
297
    case SPU_DECR_STATUS_REGNUM:
298
      spu_pseudo_register_write_spu (regcache, "decr_status", buf);
299
      break;
300
 
301
    default:
302
      internal_error (__FILE__, __LINE__, _("invalid regnum"));
303
    }
304
}
305
 
306
/* Value conversion -- access scalar values at the preferred slot.  */
307
 
308
static struct value *
309
spu_value_from_register (struct type *type, int regnum,
310
                         struct frame_info *frame)
311
{
312
  struct value *value = default_value_from_register (type, regnum, frame);
313
  int len = TYPE_LENGTH (type);
314
 
315
  if (regnum < SPU_NUM_GPRS && len < 16)
316
    {
317
      int preferred_slot = len < 4 ? 4 - len : 0;
318
      set_value_offset (value, preferred_slot);
319
    }
320
 
321
  return value;
322
}
323
 
324
/* Register groups.  */
325
 
326
static int
327
spu_register_reggroup_p (struct gdbarch *gdbarch, int regnum,
328
                         struct reggroup *group)
329
{
330
  /* Registers displayed via 'info regs'.  */
331
  if (group == general_reggroup)
332
    return 1;
333
 
334
  /* Registers displayed via 'info float'.  */
335
  if (group == float_reggroup)
336
    return 0;
337
 
338
  /* Registers that need to be saved/restored in order to
339
     push or pop frames.  */
340
  if (group == save_reggroup || group == restore_reggroup)
341
    return 1;
342
 
343
  return default_register_reggroup_p (gdbarch, regnum, group);
344
}
345
 
346
 
347
/* Address handling.  */
348
 
349
static int
350
spu_gdbarch_id (struct gdbarch *gdbarch)
351
{
352
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
353
  int id = tdep->id;
354
 
355
  /* The objfile architecture of a standalone SPU executable does not
356
     provide an SPU ID.  Retrieve it from the the objfile's relocated
357
     address range in this special case.  */
358
  if (id == -1
359
      && symfile_objfile && symfile_objfile->obfd
360
      && bfd_get_arch (symfile_objfile->obfd) == bfd_arch_spu
361
      && symfile_objfile->sections != symfile_objfile->sections_end)
362
    id = SPUADDR_SPU (obj_section_addr (symfile_objfile->sections));
363
 
364
  return id;
365
}
366
 
367
static ULONGEST
368
spu_lslr (int id)
369
{
370
  gdb_byte buf[32];
371
  char annex[32];
372
 
373
  if (id == -1)
374
    return SPU_LS_SIZE - 1;
375
 
376
  xsnprintf (annex, sizeof annex, "%d/lslr", id);
377
  memset (buf, 0, sizeof buf);
378
  target_read (&current_target, TARGET_OBJECT_SPU, annex,
379
               buf, 0, sizeof buf);
380
 
381
  return strtoulst (buf, NULL, 16);
382
}
383
 
384
static int
385
spu_address_class_type_flags (int byte_size, int dwarf2_addr_class)
386
{
387
  if (dwarf2_addr_class == 1)
388
    return TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
389
  else
390
    return 0;
391
}
392
 
393
static const char *
394
spu_address_class_type_flags_to_name (struct gdbarch *gdbarch, int type_flags)
395
{
396
  if (type_flags & TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1)
397
    return "__ea";
398
  else
399
    return NULL;
400
}
401
 
402
static int
403
spu_address_class_name_to_type_flags (struct gdbarch *gdbarch,
404
                                      const char *name, int *type_flags_ptr)
405
{
406
  if (strcmp (name, "__ea") == 0)
407
    {
408
      *type_flags_ptr = TYPE_INSTANCE_FLAG_ADDRESS_CLASS_1;
409
      return 1;
410
    }
411
  else
412
   return 0;
413
}
414
 
415
static void
416
spu_address_to_pointer (struct gdbarch *gdbarch,
417
                        struct type *type, gdb_byte *buf, CORE_ADDR addr)
418
{
419
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
420
  store_unsigned_integer (buf, TYPE_LENGTH (type), byte_order,
421
                          SPUADDR_ADDR (addr));
422
}
423
 
424
static CORE_ADDR
425
spu_pointer_to_address (struct gdbarch *gdbarch,
426
                        struct type *type, const gdb_byte *buf)
427
{
428
  int id = spu_gdbarch_id (gdbarch);
429
  ULONGEST lslr = spu_lslr (id);
430
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
431
  ULONGEST addr
432
    = extract_unsigned_integer (buf, TYPE_LENGTH (type), byte_order);
433
 
434
  /* Do not convert __ea pointers.  */
435
  if (TYPE_ADDRESS_CLASS_1 (type))
436
    return addr;
437
 
438
  return addr? SPUADDR (id, addr & lslr) : 0;
439
}
440
 
441
static CORE_ADDR
442
spu_integer_to_address (struct gdbarch *gdbarch,
443
                        struct type *type, const gdb_byte *buf)
444
{
445
  int id = spu_gdbarch_id (gdbarch);
446
  ULONGEST lslr = spu_lslr (id);
447
  ULONGEST addr = unpack_long (type, buf);
448
 
449
  return SPUADDR (id, addr & lslr);
450
}
451
 
452
 
453
/* Decoding SPU instructions.  */
454
 
455
enum
456
  {
457
    op_lqd   = 0x34,
458
    op_lqx   = 0x3c4,
459
    op_lqa   = 0x61,
460
    op_lqr   = 0x67,
461
    op_stqd  = 0x24,
462
    op_stqx  = 0x144,
463
    op_stqa  = 0x41,
464
    op_stqr  = 0x47,
465
 
466
    op_il    = 0x081,
467
    op_ila   = 0x21,
468
    op_a     = 0x0c0,
469
    op_ai    = 0x1c,
470
 
471
    op_selb  = 0x4,
472
 
473
    op_br    = 0x64,
474
    op_bra   = 0x60,
475
    op_brsl  = 0x66,
476
    op_brasl = 0x62,
477
    op_brnz  = 0x42,
478
    op_brz   = 0x40,
479
    op_brhnz = 0x46,
480
    op_brhz  = 0x44,
481
    op_bi    = 0x1a8,
482
    op_bisl  = 0x1a9,
483
    op_biz   = 0x128,
484
    op_binz  = 0x129,
485
    op_bihz  = 0x12a,
486
    op_bihnz = 0x12b,
487
  };
488
 
489
static int
490
is_rr (unsigned int insn, int op, int *rt, int *ra, int *rb)
491
{
492
  if ((insn >> 21) == op)
493
    {
494
      *rt = insn & 127;
495
      *ra = (insn >> 7) & 127;
496
      *rb = (insn >> 14) & 127;
497
      return 1;
498
    }
499
 
500
  return 0;
501
}
502
 
503
static int
504
is_rrr (unsigned int insn, int op, int *rt, int *ra, int *rb, int *rc)
505
{
506
  if ((insn >> 28) == op)
507
    {
508
      *rt = (insn >> 21) & 127;
509
      *ra = (insn >> 7) & 127;
510
      *rb = (insn >> 14) & 127;
511
      *rc = insn & 127;
512
      return 1;
513
    }
514
 
515
  return 0;
516
}
517
 
518
static int
519
is_ri7 (unsigned int insn, int op, int *rt, int *ra, int *i7)
520
{
521
  if ((insn >> 21) == op)
522
    {
523
      *rt = insn & 127;
524
      *ra = (insn >> 7) & 127;
525
      *i7 = (((insn >> 14) & 127) ^ 0x40) - 0x40;
526
      return 1;
527
    }
528
 
529
  return 0;
530
}
531
 
532
static int
533
is_ri10 (unsigned int insn, int op, int *rt, int *ra, int *i10)
534
{
535
  if ((insn >> 24) == op)
536
    {
537
      *rt = insn & 127;
538
      *ra = (insn >> 7) & 127;
539
      *i10 = (((insn >> 14) & 0x3ff) ^ 0x200) - 0x200;
540
      return 1;
541
    }
542
 
543
  return 0;
544
}
545
 
546
static int
547
is_ri16 (unsigned int insn, int op, int *rt, int *i16)
548
{
549
  if ((insn >> 23) == op)
550
    {
551
      *rt = insn & 127;
552
      *i16 = (((insn >> 7) & 0xffff) ^ 0x8000) - 0x8000;
553
      return 1;
554
    }
555
 
556
  return 0;
557
}
558
 
559
static int
560
is_ri18 (unsigned int insn, int op, int *rt, int *i18)
561
{
562
  if ((insn >> 25) == op)
563
    {
564
      *rt = insn & 127;
565
      *i18 = (((insn >> 7) & 0x3ffff) ^ 0x20000) - 0x20000;
566
      return 1;
567
    }
568
 
569
  return 0;
570
}
571
 
572
static int
573
is_branch (unsigned int insn, int *offset, int *reg)
574
{
575
  int rt, i7, i16;
576
 
577
  if (is_ri16 (insn, op_br, &rt, &i16)
578
      || is_ri16 (insn, op_brsl, &rt, &i16)
579
      || is_ri16 (insn, op_brnz, &rt, &i16)
580
      || is_ri16 (insn, op_brz, &rt, &i16)
581
      || is_ri16 (insn, op_brhnz, &rt, &i16)
582
      || is_ri16 (insn, op_brhz, &rt, &i16))
583
    {
584
      *reg = SPU_PC_REGNUM;
585
      *offset = i16 << 2;
586
      return 1;
587
    }
588
 
589
  if (is_ri16 (insn, op_bra, &rt, &i16)
590
      || is_ri16 (insn, op_brasl, &rt, &i16))
591
    {
592
      *reg = -1;
593
      *offset = i16 << 2;
594
      return 1;
595
    }
596
 
597
  if (is_ri7 (insn, op_bi, &rt, reg, &i7)
598
      || is_ri7 (insn, op_bisl, &rt, reg, &i7)
599
      || is_ri7 (insn, op_biz, &rt, reg, &i7)
600
      || is_ri7 (insn, op_binz, &rt, reg, &i7)
601
      || is_ri7 (insn, op_bihz, &rt, reg, &i7)
602
      || is_ri7 (insn, op_bihnz, &rt, reg, &i7))
603
    {
604
      *offset = 0;
605
      return 1;
606
    }
607
 
608
  return 0;
609
}
610
 
611
 
612
/* Prolog parsing.  */
613
 
614
struct spu_prologue_data
615
  {
616
    /* Stack frame size.  -1 if analysis was unsuccessful.  */
617
    int size;
618
 
619
    /* How to find the CFA.  The CFA is equal to SP at function entry.  */
620
    int cfa_reg;
621
    int cfa_offset;
622
 
623
    /* Offset relative to CFA where a register is saved.  -1 if invalid.  */
624
    int reg_offset[SPU_NUM_GPRS];
625
  };
626
 
627
static CORE_ADDR
628
spu_analyze_prologue (struct gdbarch *gdbarch,
629
                      CORE_ADDR start_pc, CORE_ADDR end_pc,
630
                      struct spu_prologue_data *data)
631
{
632
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
633
  int found_sp = 0;
634
  int found_fp = 0;
635
  int found_lr = 0;
636
  int reg_immed[SPU_NUM_GPRS];
637
  gdb_byte buf[16];
638
  CORE_ADDR prolog_pc = start_pc;
639
  CORE_ADDR pc;
640
  int i;
641
 
642
 
643
  /* Initialize DATA to default values.  */
644
  data->size = -1;
645
 
646
  data->cfa_reg = SPU_RAW_SP_REGNUM;
647
  data->cfa_offset = 0;
648
 
649
  for (i = 0; i < SPU_NUM_GPRS; i++)
650
    data->reg_offset[i] = -1;
651
 
652
  /* Set up REG_IMMED array.  This is non-zero for a register if we know its
653
     preferred slot currently holds this immediate value.  */
654
  for (i = 0; i < SPU_NUM_GPRS; i++)
655
      reg_immed[i] = 0;
656
 
657
  /* Scan instructions until the first branch.
658
 
659
     The following instructions are important prolog components:
660
 
661
        - The first instruction to set up the stack pointer.
662
        - The first instruction to set up the frame pointer.
663
        - The first instruction to save the link register.
664
 
665
     We return the instruction after the latest of these three,
666
     or the incoming PC if none is found.  The first instruction
667
     to set up the stack pointer also defines the frame size.
668
 
669
     Note that instructions saving incoming arguments to their stack
670
     slots are not counted as important, because they are hard to
671
     identify with certainty.  This should not matter much, because
672
     arguments are relevant only in code compiled with debug data,
673
     and in such code the GDB core will advance until the first source
674
     line anyway, using SAL data.
675
 
676
     For purposes of stack unwinding, we analyze the following types
677
     of instructions in addition:
678
 
679
      - Any instruction adding to the current frame pointer.
680
      - Any instruction loading an immediate constant into a register.
681
      - Any instruction storing a register onto the stack.
682
 
683
     These are used to compute the CFA and REG_OFFSET output.  */
684
 
685
  for (pc = start_pc; pc < end_pc; pc += 4)
686
    {
687
      unsigned int insn;
688
      int rt, ra, rb, rc, immed;
689
 
690
      if (target_read_memory (pc, buf, 4))
691
        break;
692
      insn = extract_unsigned_integer (buf, 4, byte_order);
693
 
694
      /* AI is the typical instruction to set up a stack frame.
695
         It is also used to initialize the frame pointer.  */
696
      if (is_ri10 (insn, op_ai, &rt, &ra, &immed))
697
        {
698
          if (rt == data->cfa_reg && ra == data->cfa_reg)
699
            data->cfa_offset -= immed;
700
 
701
          if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
702
              && !found_sp)
703
            {
704
              found_sp = 1;
705
              prolog_pc = pc + 4;
706
 
707
              data->size = -immed;
708
            }
709
          else if (rt == SPU_FP_REGNUM && ra == SPU_RAW_SP_REGNUM
710
                   && !found_fp)
711
            {
712
              found_fp = 1;
713
              prolog_pc = pc + 4;
714
 
715
              data->cfa_reg = SPU_FP_REGNUM;
716
              data->cfa_offset -= immed;
717
            }
718
        }
719
 
720
      /* A is used to set up stack frames of size >= 512 bytes.
721
         If we have tracked the contents of the addend register,
722
         we can handle this as well.  */
723
      else if (is_rr (insn, op_a, &rt, &ra, &rb))
724
        {
725
          if (rt == data->cfa_reg && ra == data->cfa_reg)
726
            {
727
              if (reg_immed[rb] != 0)
728
                data->cfa_offset -= reg_immed[rb];
729
              else
730
                data->cfa_reg = -1;  /* We don't know the CFA any more.  */
731
            }
732
 
733
          if (rt == SPU_RAW_SP_REGNUM && ra == SPU_RAW_SP_REGNUM
734
              && !found_sp)
735
            {
736
              found_sp = 1;
737
              prolog_pc = pc + 4;
738
 
739
              if (reg_immed[rb] != 0)
740
                data->size = -reg_immed[rb];
741
            }
742
        }
743
 
744
      /* We need to track IL and ILA used to load immediate constants
745
         in case they are later used as input to an A instruction.  */
746
      else if (is_ri16 (insn, op_il, &rt, &immed))
747
        {
748
          reg_immed[rt] = immed;
749
 
750
          if (rt == SPU_RAW_SP_REGNUM && !found_sp)
751
            found_sp = 1;
752
        }
753
 
754
      else if (is_ri18 (insn, op_ila, &rt, &immed))
755
        {
756
          reg_immed[rt] = immed & 0x3ffff;
757
 
758
          if (rt == SPU_RAW_SP_REGNUM && !found_sp)
759
            found_sp = 1;
760
        }
761
 
762
      /* STQD is used to save registers to the stack.  */
763
      else if (is_ri10 (insn, op_stqd, &rt, &ra, &immed))
764
        {
765
          if (ra == data->cfa_reg)
766
            data->reg_offset[rt] = data->cfa_offset - (immed << 4);
767
 
768
          if (ra == data->cfa_reg && rt == SPU_LR_REGNUM
769
              && !found_lr)
770
            {
771
              found_lr = 1;
772
              prolog_pc = pc + 4;
773
            }
774
        }
775
 
776
      /* _start uses SELB to set up the stack pointer.  */
777
      else if (is_rrr (insn, op_selb, &rt, &ra, &rb, &rc))
778
        {
779
          if (rt == SPU_RAW_SP_REGNUM && !found_sp)
780
            found_sp = 1;
781
        }
782
 
783
      /* We terminate if we find a branch.  */
784
      else if (is_branch (insn, &immed, &ra))
785
        break;
786
    }
787
 
788
 
789
  /* If we successfully parsed until here, and didn't find any instruction
790
     modifying SP, we assume we have a frameless function.  */
791
  if (!found_sp)
792
    data->size = 0;
793
 
794
  /* Return cooked instead of raw SP.  */
795
  if (data->cfa_reg == SPU_RAW_SP_REGNUM)
796
    data->cfa_reg = SPU_SP_REGNUM;
797
 
798
  return prolog_pc;
799
}
800
 
801
/* Return the first instruction after the prologue starting at PC.  */
802
static CORE_ADDR
803
spu_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR pc)
804
{
805
  struct spu_prologue_data data;
806
  return spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
807
}
808
 
809
/* Return the frame pointer in use at address PC.  */
810
static void
811
spu_virtual_frame_pointer (struct gdbarch *gdbarch, CORE_ADDR pc,
812
                           int *reg, LONGEST *offset)
813
{
814
  struct spu_prologue_data data;
815
  spu_analyze_prologue (gdbarch, pc, (CORE_ADDR)-1, &data);
816
 
817
  if (data.size != -1 && data.cfa_reg != -1)
818
    {
819
      /* The 'frame pointer' address is CFA minus frame size.  */
820
      *reg = data.cfa_reg;
821
      *offset = data.cfa_offset - data.size;
822
    }
823
  else
824
    {
825
      /* ??? We don't really know ... */
826
      *reg = SPU_SP_REGNUM;
827
      *offset = 0;
828
    }
829
}
830
 
831
/* Return true if we are in the function's epilogue, i.e. after the
832
   instruction that destroyed the function's stack frame.
833
 
834
   1) scan forward from the point of execution:
835
       a) If you find an instruction that modifies the stack pointer
836
          or transfers control (except a return), execution is not in
837
          an epilogue, return.
838
       b) Stop scanning if you find a return instruction or reach the
839
          end of the function or reach the hard limit for the size of
840
          an epilogue.
841
   2) scan backward from the point of execution:
842
        a) If you find an instruction that modifies the stack pointer,
843
            execution *is* in an epilogue, return.
844
        b) Stop scanning if you reach an instruction that transfers
845
           control or the beginning of the function or reach the hard
846
           limit for the size of an epilogue.  */
847
 
848
static int
849
spu_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
850
{
851
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
852
  CORE_ADDR scan_pc, func_start, func_end, epilogue_start, epilogue_end;
853
  bfd_byte buf[4];
854
  unsigned int insn;
855
  int rt, ra, rb, rc, immed;
856
 
857
  /* Find the search limits based on function boundaries and hard limit.
858
     We assume the epilogue can be up to 64 instructions long.  */
859
 
860
  const int spu_max_epilogue_size = 64 * 4;
861
 
862
  if (!find_pc_partial_function (pc, NULL, &func_start, &func_end))
863
    return 0;
864
 
865
  if (pc - func_start < spu_max_epilogue_size)
866
    epilogue_start = func_start;
867
  else
868
    epilogue_start = pc - spu_max_epilogue_size;
869
 
870
  if (func_end - pc < spu_max_epilogue_size)
871
    epilogue_end = func_end;
872
  else
873
    epilogue_end = pc + spu_max_epilogue_size;
874
 
875
  /* Scan forward until next 'bi $0'.  */
876
 
877
  for (scan_pc = pc; scan_pc < epilogue_end; scan_pc += 4)
878
    {
879
      if (target_read_memory (scan_pc, buf, 4))
880
        return 0;
881
      insn = extract_unsigned_integer (buf, 4, byte_order);
882
 
883
      if (is_branch (insn, &immed, &ra))
884
        {
885
          if (immed == 0 && ra == SPU_LR_REGNUM)
886
            break;
887
 
888
          return 0;
889
        }
890
 
891
      if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
892
          || is_rr (insn, op_a, &rt, &ra, &rb)
893
          || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
894
        {
895
          if (rt == SPU_RAW_SP_REGNUM)
896
            return 0;
897
        }
898
    }
899
 
900
  if (scan_pc >= epilogue_end)
901
    return 0;
902
 
903
  /* Scan backward until adjustment to stack pointer (R1).  */
904
 
905
  for (scan_pc = pc - 4; scan_pc >= epilogue_start; scan_pc -= 4)
906
    {
907
      if (target_read_memory (scan_pc, buf, 4))
908
        return 0;
909
      insn = extract_unsigned_integer (buf, 4, byte_order);
910
 
911
      if (is_branch (insn, &immed, &ra))
912
        return 0;
913
 
914
      if (is_ri10 (insn, op_ai, &rt, &ra, &immed)
915
          || is_rr (insn, op_a, &rt, &ra, &rb)
916
          || is_ri10 (insn, op_lqd, &rt, &ra, &immed))
917
        {
918
          if (rt == SPU_RAW_SP_REGNUM)
919
            return 1;
920
        }
921
    }
922
 
923
  return 0;
924
}
925
 
926
 
927
/* Normal stack frames.  */
928
 
929
struct spu_unwind_cache
930
{
931
  CORE_ADDR func;
932
  CORE_ADDR frame_base;
933
  CORE_ADDR local_base;
934
 
935
  struct trad_frame_saved_reg *saved_regs;
936
};
937
 
938
static struct spu_unwind_cache *
939
spu_frame_unwind_cache (struct frame_info *this_frame,
940
                        void **this_prologue_cache)
941
{
942
  struct gdbarch *gdbarch = get_frame_arch (this_frame);
943
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
944
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
945
  struct spu_unwind_cache *info;
946
  struct spu_prologue_data data;
947
  CORE_ADDR id = tdep->id;
948
  gdb_byte buf[16];
949
 
950
  if (*this_prologue_cache)
951
    return *this_prologue_cache;
952
 
953
  info = FRAME_OBSTACK_ZALLOC (struct spu_unwind_cache);
954
  *this_prologue_cache = info;
955
  info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
956
  info->frame_base = 0;
957
  info->local_base = 0;
958
 
959
  /* Find the start of the current function, and analyze its prologue.  */
960
  info->func = get_frame_func (this_frame);
961
  if (info->func == 0)
962
    {
963
      /* Fall back to using the current PC as frame ID.  */
964
      info->func = get_frame_pc (this_frame);
965
      data.size = -1;
966
    }
967
  else
968
    spu_analyze_prologue (gdbarch, info->func, get_frame_pc (this_frame),
969
                          &data);
970
 
971
  /* If successful, use prologue analysis data.  */
972
  if (data.size != -1 && data.cfa_reg != -1)
973
    {
974
      CORE_ADDR cfa;
975
      int i;
976
 
977
      /* Determine CFA via unwound CFA_REG plus CFA_OFFSET.  */
978
      get_frame_register (this_frame, data.cfa_reg, buf);
979
      cfa = extract_unsigned_integer (buf, 4, byte_order) + data.cfa_offset;
980
      cfa = SPUADDR (id, cfa);
981
 
982
      /* Call-saved register slots.  */
983
      for (i = 0; i < SPU_NUM_GPRS; i++)
984
        if (i == SPU_LR_REGNUM
985
            || (i >= SPU_SAVED1_REGNUM && i <= SPU_SAVEDN_REGNUM))
986
          if (data.reg_offset[i] != -1)
987
            info->saved_regs[i].addr = cfa - data.reg_offset[i];
988
 
989
      /* Frame bases.  */
990
      info->frame_base = cfa;
991
      info->local_base = cfa - data.size;
992
    }
993
 
994
  /* Otherwise, fall back to reading the backchain link.  */
995
  else
996
    {
997
      CORE_ADDR reg;
998
      LONGEST backchain;
999
      int status;
1000
 
1001
      /* Get the backchain.  */
1002
      reg = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1003
      status = safe_read_memory_integer (SPUADDR (id, reg), 4, byte_order,
1004
                                         &backchain);
1005
 
1006
      /* A zero backchain terminates the frame chain.  Also, sanity
1007
         check against the local store size limit.  */
1008
      if (status && backchain > 0 && backchain < SPU_LS_SIZE)
1009
        {
1010
          /* Assume the link register is saved into its slot.  */
1011
          if (backchain + 16 < SPU_LS_SIZE)
1012
            info->saved_regs[SPU_LR_REGNUM].addr = SPUADDR (id, backchain + 16);
1013
 
1014
          /* Frame bases.  */
1015
          info->frame_base = SPUADDR (id, backchain);
1016
          info->local_base = SPUADDR (id, reg);
1017
        }
1018
    }
1019
 
1020
  /* If we didn't find a frame, we cannot determine SP / return address.  */
1021
  if (info->frame_base == 0)
1022
    return info;
1023
 
1024
  /* The previous SP is equal to the CFA.  */
1025
  trad_frame_set_value (info->saved_regs, SPU_SP_REGNUM,
1026
                        SPUADDR_ADDR (info->frame_base));
1027
 
1028
  /* Read full contents of the unwound link register in order to
1029
     be able to determine the return address.  */
1030
  if (trad_frame_addr_p (info->saved_regs, SPU_LR_REGNUM))
1031
    target_read_memory (info->saved_regs[SPU_LR_REGNUM].addr, buf, 16);
1032
  else
1033
    get_frame_register (this_frame, SPU_LR_REGNUM, buf);
1034
 
1035
  /* Normally, the return address is contained in the slot 0 of the
1036
     link register, and slots 1-3 are zero.  For an overlay return,
1037
     slot 0 contains the address of the overlay manager return stub,
1038
     slot 1 contains the partition number of the overlay section to
1039
     be returned to, and slot 2 contains the return address within
1040
     that section.  Return the latter address in that case.  */
1041
  if (extract_unsigned_integer (buf + 8, 4, byte_order) != 0)
1042
    trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1043
                          extract_unsigned_integer (buf + 8, 4, byte_order));
1044
  else
1045
    trad_frame_set_value (info->saved_regs, SPU_PC_REGNUM,
1046
                          extract_unsigned_integer (buf, 4, byte_order));
1047
 
1048
  return info;
1049
}
1050
 
1051
static void
1052
spu_frame_this_id (struct frame_info *this_frame,
1053
                   void **this_prologue_cache, struct frame_id *this_id)
1054
{
1055
  struct spu_unwind_cache *info =
1056
    spu_frame_unwind_cache (this_frame, this_prologue_cache);
1057
 
1058
  if (info->frame_base == 0)
1059
    return;
1060
 
1061
  *this_id = frame_id_build (info->frame_base, info->func);
1062
}
1063
 
1064
static struct value *
1065
spu_frame_prev_register (struct frame_info *this_frame,
1066
                         void **this_prologue_cache, int regnum)
1067
{
1068
  struct spu_unwind_cache *info
1069
    = spu_frame_unwind_cache (this_frame, this_prologue_cache);
1070
 
1071
  /* Special-case the stack pointer.  */
1072
  if (regnum == SPU_RAW_SP_REGNUM)
1073
    regnum = SPU_SP_REGNUM;
1074
 
1075
  return trad_frame_get_prev_register (this_frame, info->saved_regs, regnum);
1076
}
1077
 
1078
static const struct frame_unwind spu_frame_unwind = {
1079
  NORMAL_FRAME,
1080
  spu_frame_this_id,
1081
  spu_frame_prev_register,
1082
  NULL,
1083
  default_frame_sniffer
1084
};
1085
 
1086
static CORE_ADDR
1087
spu_frame_base_address (struct frame_info *this_frame, void **this_cache)
1088
{
1089
  struct spu_unwind_cache *info
1090
    = spu_frame_unwind_cache (this_frame, this_cache);
1091
  return info->local_base;
1092
}
1093
 
1094
static const struct frame_base spu_frame_base = {
1095
  &spu_frame_unwind,
1096
  spu_frame_base_address,
1097
  spu_frame_base_address,
1098
  spu_frame_base_address
1099
};
1100
 
1101
static CORE_ADDR
1102
spu_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
1103
{
1104
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1105
  CORE_ADDR pc = frame_unwind_register_unsigned (next_frame, SPU_PC_REGNUM);
1106
  /* Mask off interrupt enable bit.  */
1107
  return SPUADDR (tdep->id, pc & -4);
1108
}
1109
 
1110
static CORE_ADDR
1111
spu_unwind_sp (struct gdbarch *gdbarch, struct frame_info *next_frame)
1112
{
1113
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1114
  CORE_ADDR sp = frame_unwind_register_unsigned (next_frame, SPU_SP_REGNUM);
1115
  return SPUADDR (tdep->id, sp);
1116
}
1117
 
1118
static CORE_ADDR
1119
spu_read_pc (struct regcache *regcache)
1120
{
1121
  struct gdbarch_tdep *tdep = gdbarch_tdep (get_regcache_arch (regcache));
1122
  ULONGEST pc;
1123
  regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &pc);
1124
  /* Mask off interrupt enable bit.  */
1125
  return SPUADDR (tdep->id, pc & -4);
1126
}
1127
 
1128
static void
1129
spu_write_pc (struct regcache *regcache, CORE_ADDR pc)
1130
{
1131
  /* Keep interrupt enabled state unchanged.  */
1132
  ULONGEST old_pc;
1133
  regcache_cooked_read_unsigned (regcache, SPU_PC_REGNUM, &old_pc);
1134
  regcache_cooked_write_unsigned (regcache, SPU_PC_REGNUM,
1135
                                  (SPUADDR_ADDR (pc) & -4) | (old_pc & 3));
1136
}
1137
 
1138
 
1139
/* Cell/B.E. cross-architecture unwinder support.  */
1140
 
1141
struct spu2ppu_cache
1142
{
1143
  struct frame_id frame_id;
1144
  struct regcache *regcache;
1145
};
1146
 
1147
static struct gdbarch *
1148
spu2ppu_prev_arch (struct frame_info *this_frame, void **this_cache)
1149
{
1150
  struct spu2ppu_cache *cache = *this_cache;
1151
  return get_regcache_arch (cache->regcache);
1152
}
1153
 
1154
static void
1155
spu2ppu_this_id (struct frame_info *this_frame,
1156
                 void **this_cache, struct frame_id *this_id)
1157
{
1158
  struct spu2ppu_cache *cache = *this_cache;
1159
  *this_id = cache->frame_id;
1160
}
1161
 
1162
static struct value *
1163
spu2ppu_prev_register (struct frame_info *this_frame,
1164
                       void **this_cache, int regnum)
1165
{
1166
  struct spu2ppu_cache *cache = *this_cache;
1167
  struct gdbarch *gdbarch = get_regcache_arch (cache->regcache);
1168
  gdb_byte *buf;
1169
 
1170
  buf = alloca (register_size (gdbarch, regnum));
1171
  regcache_cooked_read (cache->regcache, regnum, buf);
1172
  return frame_unwind_got_bytes (this_frame, regnum, buf);
1173
}
1174
 
1175
static int
1176
spu2ppu_sniffer (const struct frame_unwind *self,
1177
                 struct frame_info *this_frame, void **this_prologue_cache)
1178
{
1179
  struct gdbarch *gdbarch = get_frame_arch (this_frame);
1180
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1181
  CORE_ADDR base, func, backchain;
1182
  gdb_byte buf[4];
1183
 
1184
  if (gdbarch_bfd_arch_info (target_gdbarch)->arch == bfd_arch_spu)
1185
    return 0;
1186
 
1187
  base = get_frame_sp (this_frame);
1188
  func = get_frame_pc (this_frame);
1189
  if (target_read_memory (base, buf, 4))
1190
    return 0;
1191
  backchain = extract_unsigned_integer (buf, 4, byte_order);
1192
 
1193
  if (!backchain)
1194
    {
1195
      struct frame_info *fi;
1196
 
1197
      struct spu2ppu_cache *cache
1198
        = FRAME_OBSTACK_CALLOC (1, struct spu2ppu_cache);
1199
 
1200
      cache->frame_id = frame_id_build (base + 16, func);
1201
 
1202
      for (fi = get_next_frame (this_frame); fi; fi = get_next_frame (fi))
1203
        if (gdbarch_bfd_arch_info (get_frame_arch (fi))->arch != bfd_arch_spu)
1204
          break;
1205
 
1206
      if (fi)
1207
        {
1208
          cache->regcache = frame_save_as_regcache (fi);
1209
          *this_prologue_cache = cache;
1210
          return 1;
1211
        }
1212
      else
1213
        {
1214
          struct regcache *regcache;
1215
          regcache = get_thread_arch_regcache (inferior_ptid, target_gdbarch);
1216
          cache->regcache = regcache_dup (regcache);
1217
          *this_prologue_cache = cache;
1218
          return 1;
1219
        }
1220
    }
1221
 
1222
  return 0;
1223
}
1224
 
1225
static void
1226
spu2ppu_dealloc_cache (struct frame_info *self, void *this_cache)
1227
{
1228
  struct spu2ppu_cache *cache = this_cache;
1229
  regcache_xfree (cache->regcache);
1230
}
1231
 
1232
static const struct frame_unwind spu2ppu_unwind = {
1233
  ARCH_FRAME,
1234
  spu2ppu_this_id,
1235
  spu2ppu_prev_register,
1236
  NULL,
1237
  spu2ppu_sniffer,
1238
  spu2ppu_dealloc_cache,
1239
  spu2ppu_prev_arch,
1240
};
1241
 
1242
 
1243
/* Function calling convention.  */
1244
 
1245
static CORE_ADDR
1246
spu_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
1247
{
1248
  return sp & ~15;
1249
}
1250
 
1251
static CORE_ADDR
1252
spu_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp, CORE_ADDR funaddr,
1253
                     struct value **args, int nargs, struct type *value_type,
1254
                     CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
1255
                     struct regcache *regcache)
1256
{
1257
  /* Allocate space sufficient for a breakpoint, keeping the stack aligned.  */
1258
  sp = (sp - 4) & ~15;
1259
  /* Store the address of that breakpoint */
1260
  *bp_addr = sp;
1261
  /* The call starts at the callee's entry point.  */
1262
  *real_pc = funaddr;
1263
 
1264
  return sp;
1265
}
1266
 
1267
static int
1268
spu_scalar_value_p (struct type *type)
1269
{
1270
  switch (TYPE_CODE (type))
1271
    {
1272
    case TYPE_CODE_INT:
1273
    case TYPE_CODE_ENUM:
1274
    case TYPE_CODE_RANGE:
1275
    case TYPE_CODE_CHAR:
1276
    case TYPE_CODE_BOOL:
1277
    case TYPE_CODE_PTR:
1278
    case TYPE_CODE_REF:
1279
      return TYPE_LENGTH (type) <= 16;
1280
 
1281
    default:
1282
      return 0;
1283
    }
1284
}
1285
 
1286
static void
1287
spu_value_to_regcache (struct regcache *regcache, int regnum,
1288
                       struct type *type, const gdb_byte *in)
1289
{
1290
  int len = TYPE_LENGTH (type);
1291
 
1292
  if (spu_scalar_value_p (type))
1293
    {
1294
      int preferred_slot = len < 4 ? 4 - len : 0;
1295
      regcache_cooked_write_part (regcache, regnum, preferred_slot, len, in);
1296
    }
1297
  else
1298
    {
1299
      while (len >= 16)
1300
        {
1301
          regcache_cooked_write (regcache, regnum++, in);
1302
          in += 16;
1303
          len -= 16;
1304
        }
1305
 
1306
      if (len > 0)
1307
        regcache_cooked_write_part (regcache, regnum, 0, len, in);
1308
    }
1309
}
1310
 
1311
static void
1312
spu_regcache_to_value (struct regcache *regcache, int regnum,
1313
                       struct type *type, gdb_byte *out)
1314
{
1315
  int len = TYPE_LENGTH (type);
1316
 
1317
  if (spu_scalar_value_p (type))
1318
    {
1319
      int preferred_slot = len < 4 ? 4 - len : 0;
1320
      regcache_cooked_read_part (regcache, regnum, preferred_slot, len, out);
1321
    }
1322
  else
1323
    {
1324
      while (len >= 16)
1325
        {
1326
          regcache_cooked_read (regcache, regnum++, out);
1327
          out += 16;
1328
          len -= 16;
1329
        }
1330
 
1331
      if (len > 0)
1332
        regcache_cooked_read_part (regcache, regnum, 0, len, out);
1333
    }
1334
}
1335
 
1336
static CORE_ADDR
1337
spu_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
1338
                     struct regcache *regcache, CORE_ADDR bp_addr,
1339
                     int nargs, struct value **args, CORE_ADDR sp,
1340
                     int struct_return, CORE_ADDR struct_addr)
1341
{
1342
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1343
  CORE_ADDR sp_delta;
1344
  int i;
1345
  int regnum = SPU_ARG1_REGNUM;
1346
  int stack_arg = -1;
1347
  gdb_byte buf[16];
1348
 
1349
  /* Set the return address.  */
1350
  memset (buf, 0, sizeof buf);
1351
  store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (bp_addr));
1352
  regcache_cooked_write (regcache, SPU_LR_REGNUM, buf);
1353
 
1354
  /* If STRUCT_RETURN is true, then the struct return address (in
1355
     STRUCT_ADDR) will consume the first argument-passing register.
1356
     Both adjust the register count and store that value.  */
1357
  if (struct_return)
1358
    {
1359
      memset (buf, 0, sizeof buf);
1360
      store_unsigned_integer (buf, 4, byte_order, SPUADDR_ADDR (struct_addr));
1361
      regcache_cooked_write (regcache, regnum++, buf);
1362
    }
1363
 
1364
  /* Fill in argument registers.  */
1365
  for (i = 0; i < nargs; i++)
1366
    {
1367
      struct value *arg = args[i];
1368
      struct type *type = check_typedef (value_type (arg));
1369
      const gdb_byte *contents = value_contents (arg);
1370
      int len = TYPE_LENGTH (type);
1371
      int n_regs = align_up (len, 16) / 16;
1372
 
1373
      /* If the argument doesn't wholly fit into registers, it and
1374
         all subsequent arguments go to the stack.  */
1375
      if (regnum + n_regs - 1 > SPU_ARGN_REGNUM)
1376
        {
1377
          stack_arg = i;
1378
          break;
1379
        }
1380
 
1381
      spu_value_to_regcache (regcache, regnum, type, contents);
1382
      regnum += n_regs;
1383
    }
1384
 
1385
  /* Overflow arguments go to the stack.  */
1386
  if (stack_arg != -1)
1387
    {
1388
      CORE_ADDR ap;
1389
 
1390
      /* Allocate all required stack size.  */
1391
      for (i = stack_arg; i < nargs; i++)
1392
        {
1393
          struct type *type = check_typedef (value_type (args[i]));
1394
          sp -= align_up (TYPE_LENGTH (type), 16);
1395
        }
1396
 
1397
      /* Fill in stack arguments.  */
1398
      ap = sp;
1399
      for (i = stack_arg; i < nargs; i++)
1400
        {
1401
          struct value *arg = args[i];
1402
          struct type *type = check_typedef (value_type (arg));
1403
          int len = TYPE_LENGTH (type);
1404
          int preferred_slot;
1405
 
1406
          if (spu_scalar_value_p (type))
1407
            preferred_slot = len < 4 ? 4 - len : 0;
1408
          else
1409
            preferred_slot = 0;
1410
 
1411
          target_write_memory (ap + preferred_slot, value_contents (arg), len);
1412
          ap += align_up (TYPE_LENGTH (type), 16);
1413
        }
1414
    }
1415
 
1416
  /* Allocate stack frame header.  */
1417
  sp -= 32;
1418
 
1419
  /* Store stack back chain.  */
1420
  regcache_cooked_read (regcache, SPU_RAW_SP_REGNUM, buf);
1421
  target_write_memory (sp, buf, 16);
1422
 
1423
  /* Finally, update all slots of the SP register.  */
1424
  sp_delta = sp - extract_unsigned_integer (buf, 4, byte_order);
1425
  for (i = 0; i < 4; i++)
1426
    {
1427
      CORE_ADDR sp_slot = extract_unsigned_integer (buf + 4*i, 4, byte_order);
1428
      store_unsigned_integer (buf + 4*i, 4, byte_order, sp_slot + sp_delta);
1429
    }
1430
  regcache_cooked_write (regcache, SPU_RAW_SP_REGNUM, buf);
1431
 
1432
  return sp;
1433
}
1434
 
1435
static struct frame_id
1436
spu_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
1437
{
1438
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1439
  CORE_ADDR pc = get_frame_register_unsigned (this_frame, SPU_PC_REGNUM);
1440
  CORE_ADDR sp = get_frame_register_unsigned (this_frame, SPU_SP_REGNUM);
1441
  return frame_id_build (SPUADDR (tdep->id, sp), SPUADDR (tdep->id, pc & -4));
1442
}
1443
 
1444
/* Function return value access.  */
1445
 
1446
static enum return_value_convention
1447
spu_return_value (struct gdbarch *gdbarch, struct type *func_type,
1448
                  struct type *type, struct regcache *regcache,
1449
                  gdb_byte *out, const gdb_byte *in)
1450
{
1451
  enum return_value_convention rvc;
1452
 
1453
  if (TYPE_LENGTH (type) <= (SPU_ARGN_REGNUM - SPU_ARG1_REGNUM + 1) * 16)
1454
    rvc = RETURN_VALUE_REGISTER_CONVENTION;
1455
  else
1456
    rvc = RETURN_VALUE_STRUCT_CONVENTION;
1457
 
1458
  if (in)
1459
    {
1460
      switch (rvc)
1461
        {
1462
        case RETURN_VALUE_REGISTER_CONVENTION:
1463
          spu_value_to_regcache (regcache, SPU_ARG1_REGNUM, type, in);
1464
          break;
1465
 
1466
        case RETURN_VALUE_STRUCT_CONVENTION:
1467
          error ("Cannot set function return value.");
1468
          break;
1469
        }
1470
    }
1471
  else if (out)
1472
    {
1473
      switch (rvc)
1474
        {
1475
        case RETURN_VALUE_REGISTER_CONVENTION:
1476
          spu_regcache_to_value (regcache, SPU_ARG1_REGNUM, type, out);
1477
          break;
1478
 
1479
        case RETURN_VALUE_STRUCT_CONVENTION:
1480
          error ("Function return value unknown.");
1481
          break;
1482
        }
1483
    }
1484
 
1485
  return rvc;
1486
}
1487
 
1488
 
1489
/* Breakpoints.  */
1490
 
1491
static const gdb_byte *
1492
spu_breakpoint_from_pc (struct gdbarch *gdbarch, CORE_ADDR * pcptr, int *lenptr)
1493
{
1494
  static const gdb_byte breakpoint[] = { 0x00, 0x00, 0x3f, 0xff };
1495
 
1496
  *lenptr = sizeof breakpoint;
1497
  return breakpoint;
1498
}
1499
 
1500
 
1501
/* Software single-stepping support.  */
1502
 
1503
static int
1504
spu_software_single_step (struct frame_info *frame)
1505
{
1506
  struct gdbarch *gdbarch = get_frame_arch (frame);
1507
  struct address_space *aspace = get_frame_address_space (frame);
1508
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1509
  CORE_ADDR pc, next_pc;
1510
  unsigned int insn;
1511
  int offset, reg;
1512
  gdb_byte buf[4];
1513
 
1514
  pc = get_frame_pc (frame);
1515
 
1516
  if (target_read_memory (pc, buf, 4))
1517
    return 1;
1518
  insn = extract_unsigned_integer (buf, 4, byte_order);
1519
 
1520
  /* Next sequential instruction is at PC + 4, except if the current
1521
     instruction is a PPE-assisted call, in which case it is at PC + 8.
1522
     Wrap around LS limit to be on the safe side.  */
1523
  if ((insn & 0xffffff00) == 0x00002100)
1524
    next_pc = (SPUADDR_ADDR (pc) + 8) & (SPU_LS_SIZE - 1);
1525
  else
1526
    next_pc = (SPUADDR_ADDR (pc) + 4) & (SPU_LS_SIZE - 1);
1527
 
1528
  insert_single_step_breakpoint (gdbarch,
1529
                                 aspace, SPUADDR (SPUADDR_SPU (pc), next_pc));
1530
 
1531
  if (is_branch (insn, &offset, &reg))
1532
    {
1533
      CORE_ADDR target = offset;
1534
 
1535
      if (reg == SPU_PC_REGNUM)
1536
        target += SPUADDR_ADDR (pc);
1537
      else if (reg != -1)
1538
        {
1539
          get_frame_register_bytes (frame, reg, 0, 4, buf);
1540
          target += extract_unsigned_integer (buf, 4, byte_order) & -4;
1541
        }
1542
 
1543
      target = target & (SPU_LS_SIZE - 1);
1544
      if (target != next_pc)
1545
        insert_single_step_breakpoint (gdbarch, aspace,
1546
                                       SPUADDR (SPUADDR_SPU (pc), target));
1547
    }
1548
 
1549
  return 1;
1550
}
1551
 
1552
 
1553
/* Longjmp support.  */
1554
 
1555
static int
1556
spu_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
1557
{
1558
  struct gdbarch *gdbarch = get_frame_arch (frame);
1559
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1560
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1561
  gdb_byte buf[4];
1562
  CORE_ADDR jb_addr;
1563
 
1564
  /* Jump buffer is pointed to by the argument register $r3.  */
1565
  get_frame_register_bytes (frame, SPU_ARG1_REGNUM, 0, 4, buf);
1566
  jb_addr = extract_unsigned_integer (buf, 4, byte_order);
1567
  if (target_read_memory (SPUADDR (tdep->id, jb_addr), buf, 4))
1568
    return 0;
1569
 
1570
  *pc = extract_unsigned_integer (buf, 4, byte_order);
1571
  *pc = SPUADDR (tdep->id, *pc);
1572
  return 1;
1573
}
1574
 
1575
 
1576
/* Disassembler.  */
1577
 
1578
struct spu_dis_asm_data
1579
{
1580
  struct gdbarch *gdbarch;
1581
  int id;
1582
};
1583
 
1584
static void
1585
spu_dis_asm_print_address (bfd_vma addr, struct disassemble_info *info)
1586
{
1587
  struct spu_dis_asm_data *data = info->application_data;
1588
  print_address (data->gdbarch, SPUADDR (data->id, addr), info->stream);
1589
}
1590
 
1591
static int
1592
gdb_print_insn_spu (bfd_vma memaddr, struct disassemble_info *info)
1593
{
1594
  /* The opcodes disassembler does 18-bit address arithmetic.  Make sure the
1595
     SPU ID encoded in the high bits is added back when we call print_address.  */
1596
  struct disassemble_info spu_info = *info;
1597
  struct spu_dis_asm_data data;
1598
  data.gdbarch = info->application_data;
1599
  data.id = SPUADDR_SPU (memaddr);
1600
 
1601
  spu_info.application_data = &data;
1602
  spu_info.print_address_func = spu_dis_asm_print_address;
1603
  return print_insn_spu (memaddr, &spu_info);
1604
}
1605
 
1606
 
1607
/* Target overlays for the SPU overlay manager.
1608
 
1609
   See the documentation of simple_overlay_update for how the
1610
   interface is supposed to work.
1611
 
1612
   Data structures used by the overlay manager:
1613
 
1614
   struct ovly_table
1615
     {
1616
        u32 vma;
1617
        u32 size;
1618
        u32 pos;
1619
        u32 buf;
1620
     } _ovly_table[];   -- one entry per overlay section
1621
 
1622
   struct ovly_buf_table
1623
     {
1624
        u32 mapped;
1625
     } _ovly_buf_table[];  -- one entry per overlay buffer
1626
 
1627
   _ovly_table should never change.
1628
 
1629
   Both tables are aligned to a 16-byte boundary, the symbols _ovly_table
1630
   and _ovly_buf_table are of type STT_OBJECT and their size set to the size
1631
   of the respective array. buf in _ovly_table is an index into _ovly_buf_table.
1632
 
1633
   mapped is an index into _ovly_table. Both the mapped and buf indices start
1634
   from one to reference the first entry in their respective tables.  */
1635
 
1636
/* Using the per-objfile private data mechanism, we store for each
1637
   objfile an array of "struct spu_overlay_table" structures, one
1638
   for each obj_section of the objfile.  This structure holds two
1639
   fields, MAPPED_PTR and MAPPED_VAL.  If MAPPED_PTR is zero, this
1640
   is *not* an overlay section.  If it is non-zero, it represents
1641
   a target address.  The overlay section is mapped iff the target
1642
   integer at this location equals MAPPED_VAL.  */
1643
 
1644
static const struct objfile_data *spu_overlay_data;
1645
 
1646
struct spu_overlay_table
1647
  {
1648
    CORE_ADDR mapped_ptr;
1649
    CORE_ADDR mapped_val;
1650
  };
1651
 
1652
/* Retrieve the overlay table for OBJFILE.  If not already cached, read
1653
   the _ovly_table data structure from the target and initialize the
1654
   spu_overlay_table data structure from it.  */
1655
static struct spu_overlay_table *
1656
spu_get_overlay_table (struct objfile *objfile)
1657
{
1658
  enum bfd_endian byte_order = bfd_big_endian (objfile->obfd)?
1659
                   BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1660
  struct minimal_symbol *ovly_table_msym, *ovly_buf_table_msym;
1661
  CORE_ADDR ovly_table_base, ovly_buf_table_base;
1662
  unsigned ovly_table_size, ovly_buf_table_size;
1663
  struct spu_overlay_table *tbl;
1664
  struct obj_section *osect;
1665
  char *ovly_table;
1666
  int i;
1667
 
1668
  tbl = objfile_data (objfile, spu_overlay_data);
1669
  if (tbl)
1670
    return tbl;
1671
 
1672
  ovly_table_msym = lookup_minimal_symbol ("_ovly_table", NULL, objfile);
1673
  if (!ovly_table_msym)
1674
    return NULL;
1675
 
1676
  ovly_buf_table_msym = lookup_minimal_symbol ("_ovly_buf_table", NULL, objfile);
1677
  if (!ovly_buf_table_msym)
1678
    return NULL;
1679
 
1680
  ovly_table_base = SYMBOL_VALUE_ADDRESS (ovly_table_msym);
1681
  ovly_table_size = MSYMBOL_SIZE (ovly_table_msym);
1682
 
1683
  ovly_buf_table_base = SYMBOL_VALUE_ADDRESS (ovly_buf_table_msym);
1684
  ovly_buf_table_size = MSYMBOL_SIZE (ovly_buf_table_msym);
1685
 
1686
  ovly_table = xmalloc (ovly_table_size);
1687
  read_memory (ovly_table_base, ovly_table, ovly_table_size);
1688
 
1689
  tbl = OBSTACK_CALLOC (&objfile->objfile_obstack,
1690
                        objfile->sections_end - objfile->sections,
1691
                        struct spu_overlay_table);
1692
 
1693
  for (i = 0; i < ovly_table_size / 16; i++)
1694
    {
1695
      CORE_ADDR vma  = extract_unsigned_integer (ovly_table + 16*i + 0,
1696
                                                 4, byte_order);
1697
      CORE_ADDR size = extract_unsigned_integer (ovly_table + 16*i + 4,
1698
                                                 4, byte_order);
1699
      CORE_ADDR pos  = extract_unsigned_integer (ovly_table + 16*i + 8,
1700
                                                 4, byte_order);
1701
      CORE_ADDR buf  = extract_unsigned_integer (ovly_table + 16*i + 12,
1702
                                                 4, byte_order);
1703
 
1704
      if (buf == 0 || (buf - 1) * 4 >= ovly_buf_table_size)
1705
        continue;
1706
 
1707
      ALL_OBJFILE_OSECTIONS (objfile, osect)
1708
        if (vma == bfd_section_vma (objfile->obfd, osect->the_bfd_section)
1709
            && pos == osect->the_bfd_section->filepos)
1710
          {
1711
            int ndx = osect - objfile->sections;
1712
            tbl[ndx].mapped_ptr = ovly_buf_table_base + (buf - 1) * 4;
1713
            tbl[ndx].mapped_val = i + 1;
1714
            break;
1715
          }
1716
    }
1717
 
1718
  xfree (ovly_table);
1719
  set_objfile_data (objfile, spu_overlay_data, tbl);
1720
  return tbl;
1721
}
1722
 
1723
/* Read _ovly_buf_table entry from the target to dermine whether
1724
   OSECT is currently mapped, and update the mapped state.  */
1725
static void
1726
spu_overlay_update_osect (struct obj_section *osect)
1727
{
1728
  enum bfd_endian byte_order = bfd_big_endian (osect->objfile->obfd)?
1729
                   BFD_ENDIAN_BIG : BFD_ENDIAN_LITTLE;
1730
  struct spu_overlay_table *ovly_table;
1731
  CORE_ADDR id, val;
1732
 
1733
  ovly_table = spu_get_overlay_table (osect->objfile);
1734
  if (!ovly_table)
1735
    return;
1736
 
1737
  ovly_table += osect - osect->objfile->sections;
1738
  if (ovly_table->mapped_ptr == 0)
1739
    return;
1740
 
1741
  id = SPUADDR_SPU (obj_section_addr (osect));
1742
  val = read_memory_unsigned_integer (SPUADDR (id, ovly_table->mapped_ptr),
1743
                                      4, byte_order);
1744
  osect->ovly_mapped = (val == ovly_table->mapped_val);
1745
}
1746
 
1747
/* If OSECT is NULL, then update all sections' mapped state.
1748
   If OSECT is non-NULL, then update only OSECT's mapped state.  */
1749
static void
1750
spu_overlay_update (struct obj_section *osect)
1751
{
1752
  /* Just one section.  */
1753
  if (osect)
1754
    spu_overlay_update_osect (osect);
1755
 
1756
  /* All sections.  */
1757
  else
1758
    {
1759
      struct objfile *objfile;
1760
 
1761
      ALL_OBJSECTIONS (objfile, osect)
1762
        if (section_is_overlay (osect))
1763
          spu_overlay_update_osect (osect);
1764
    }
1765
}
1766
 
1767
/* Whenever a new objfile is loaded, read the target's _ovly_table.
1768
   If there is one, go through all sections and make sure for non-
1769
   overlay sections LMA equals VMA, while for overlay sections LMA
1770
   is larger than local store size.  */
1771
static void
1772
spu_overlay_new_objfile (struct objfile *objfile)
1773
{
1774
  struct spu_overlay_table *ovly_table;
1775
  struct obj_section *osect;
1776
 
1777
  /* If we've already touched this file, do nothing.  */
1778
  if (!objfile || objfile_data (objfile, spu_overlay_data) != NULL)
1779
    return;
1780
 
1781
  /* Consider only SPU objfiles.  */
1782
  if (bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1783
    return;
1784
 
1785
  /* Check if this objfile has overlays.  */
1786
  ovly_table = spu_get_overlay_table (objfile);
1787
  if (!ovly_table)
1788
    return;
1789
 
1790
  /* Now go and fiddle with all the LMAs.  */
1791
  ALL_OBJFILE_OSECTIONS (objfile, osect)
1792
    {
1793
      bfd *obfd = objfile->obfd;
1794
      asection *bsect = osect->the_bfd_section;
1795
      int ndx = osect - objfile->sections;
1796
 
1797
      if (ovly_table[ndx].mapped_ptr == 0)
1798
        bfd_section_lma (obfd, bsect) = bfd_section_vma (obfd, bsect);
1799
      else
1800
        bfd_section_lma (obfd, bsect) = bsect->filepos + SPU_LS_SIZE;
1801
    }
1802
}
1803
 
1804
 
1805
/* Insert temporary breakpoint on "main" function of newly loaded
1806
   SPE context OBJFILE.  */
1807
static void
1808
spu_catch_start (struct objfile *objfile)
1809
{
1810
  struct minimal_symbol *minsym;
1811
  struct symtab *symtab;
1812
  CORE_ADDR pc;
1813
  char buf[32];
1814
 
1815
  /* Do this only if requested by "set spu stop-on-load on".  */
1816
  if (!spu_stop_on_load_p)
1817
    return;
1818
 
1819
  /* Consider only SPU objfiles.  */
1820
  if (!objfile || bfd_get_arch (objfile->obfd) != bfd_arch_spu)
1821
    return;
1822
 
1823
  /* The main objfile is handled differently.  */
1824
  if (objfile == symfile_objfile)
1825
    return;
1826
 
1827
  /* There can be multiple symbols named "main".  Search for the
1828
     "main" in *this* objfile.  */
1829
  minsym = lookup_minimal_symbol ("main", NULL, objfile);
1830
  if (!minsym)
1831
    return;
1832
 
1833
  /* If we have debugging information, try to use it -- this
1834
     will allow us to properly skip the prologue.  */
1835
  pc = SYMBOL_VALUE_ADDRESS (minsym);
1836
  symtab = find_pc_sect_symtab (pc, SYMBOL_OBJ_SECTION (minsym));
1837
  if (symtab != NULL)
1838
    {
1839
      struct blockvector *bv = BLOCKVECTOR (symtab);
1840
      struct block *block = BLOCKVECTOR_BLOCK (bv, GLOBAL_BLOCK);
1841
      struct symbol *sym;
1842
      struct symtab_and_line sal;
1843
 
1844
      sym = lookup_block_symbol (block, "main", NULL, VAR_DOMAIN);
1845
      if (sym)
1846
        {
1847
          fixup_symbol_section (sym, objfile);
1848
          sal = find_function_start_sal (sym, 1);
1849
          pc = sal.pc;
1850
        }
1851
    }
1852
 
1853
  /* Use a numerical address for the set_breakpoint command to avoid having
1854
     the breakpoint re-set incorrectly.  */
1855
  xsnprintf (buf, sizeof buf, "*%s", core_addr_to_string (pc));
1856
  set_breakpoint (get_objfile_arch (objfile),
1857
                  buf, NULL /* condition */,
1858
 
1859
                  -1 /* thread */, 0 /* ignore_count */,
1860
 
1861
}
1862
 
1863
 
1864
/* Look up OBJFILE loaded into FRAME's SPU context.  */
1865
static struct objfile *
1866
spu_objfile_from_frame (struct frame_info *frame)
1867
{
1868
  struct gdbarch *gdbarch = get_frame_arch (frame);
1869
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1870
  struct objfile *obj;
1871
 
1872
  if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
1873
    return NULL;
1874
 
1875
  ALL_OBJFILES (obj)
1876
    {
1877
      if (obj->sections != obj->sections_end
1878
          && SPUADDR_SPU (obj_section_addr (obj->sections)) == tdep->id)
1879
        return obj;
1880
    }
1881
 
1882
  return NULL;
1883
}
1884
 
1885
/* Flush cache for ea pointer access if available.  */
1886
static void
1887
flush_ea_cache (void)
1888
{
1889
  struct minimal_symbol *msymbol;
1890
  struct objfile *obj;
1891
 
1892
  if (!has_stack_frames ())
1893
    return;
1894
 
1895
  obj = spu_objfile_from_frame (get_current_frame ());
1896
  if (obj == NULL)
1897
    return;
1898
 
1899
  /* Lookup inferior function __cache_flush.  */
1900
  msymbol = lookup_minimal_symbol ("__cache_flush", NULL, obj);
1901
  if (msymbol != NULL)
1902
    {
1903
      struct type *type;
1904
      CORE_ADDR addr;
1905
 
1906
      type = objfile_type (obj)->builtin_void;
1907
      type = lookup_function_type (type);
1908
      type = lookup_pointer_type (type);
1909
      addr = SYMBOL_VALUE_ADDRESS (msymbol);
1910
 
1911
      call_function_by_hand (value_from_pointer (type, addr), 0, NULL);
1912
    }
1913
}
1914
 
1915
/* This handler is called when the inferior has stopped.  If it is stopped in
1916
   SPU architecture then flush the ea cache if used.  */
1917
static void
1918
spu_attach_normal_stop (struct bpstats *bs, int print_frame)
1919
{
1920
  if (!spu_auto_flush_cache_p)
1921
    return;
1922
 
1923
  /* Temporarily reset spu_auto_flush_cache_p to avoid recursively
1924
     re-entering this function when __cache_flush stops.  */
1925
  spu_auto_flush_cache_p = 0;
1926
  flush_ea_cache ();
1927
  spu_auto_flush_cache_p = 1;
1928
}
1929
 
1930
 
1931
/* "info spu" commands.  */
1932
 
1933
static void
1934
info_spu_event_command (char *args, int from_tty)
1935
{
1936
  struct frame_info *frame = get_selected_frame (NULL);
1937
  ULONGEST event_status = 0;
1938
  ULONGEST event_mask = 0;
1939
  struct cleanup *chain;
1940
  gdb_byte buf[100];
1941
  char annex[32];
1942
  LONGEST len;
1943
  int rc, id;
1944
 
1945
  if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
1946
    error (_("\"info spu\" is only supported on the SPU architecture."));
1947
 
1948
  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
1949
 
1950
  xsnprintf (annex, sizeof annex, "%d/event_status", id);
1951
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1952
                     buf, 0, (sizeof (buf) - 1));
1953
  if (len <= 0)
1954
    error (_("Could not read event_status."));
1955
  buf[len] = '\0';
1956
  event_status = strtoulst (buf, NULL, 16);
1957
 
1958
  xsnprintf (annex, sizeof annex, "%d/event_mask", id);
1959
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
1960
                     buf, 0, (sizeof (buf) - 1));
1961
  if (len <= 0)
1962
    error (_("Could not read event_mask."));
1963
  buf[len] = '\0';
1964
  event_mask = strtoulst (buf, NULL, 16);
1965
 
1966
  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoEvent");
1967
 
1968
  if (ui_out_is_mi_like_p (uiout))
1969
    {
1970
      ui_out_field_fmt (uiout, "event_status",
1971
                        "0x%s", phex_nz (event_status, 4));
1972
      ui_out_field_fmt (uiout, "event_mask",
1973
                        "0x%s", phex_nz (event_mask, 4));
1974
    }
1975
  else
1976
    {
1977
      printf_filtered (_("Event Status 0x%s\n"), phex (event_status, 4));
1978
      printf_filtered (_("Event Mask   0x%s\n"), phex (event_mask, 4));
1979
    }
1980
 
1981
  do_cleanups (chain);
1982
}
1983
 
1984
static void
1985
info_spu_signal_command (char *args, int from_tty)
1986
{
1987
  struct frame_info *frame = get_selected_frame (NULL);
1988
  struct gdbarch *gdbarch = get_frame_arch (frame);
1989
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1990
  ULONGEST signal1 = 0;
1991
  ULONGEST signal1_type = 0;
1992
  int signal1_pending = 0;
1993
  ULONGEST signal2 = 0;
1994
  ULONGEST signal2_type = 0;
1995
  int signal2_pending = 0;
1996
  struct cleanup *chain;
1997
  char annex[32];
1998
  gdb_byte buf[100];
1999
  LONGEST len;
2000
  int rc, id;
2001
 
2002
  if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2003
    error (_("\"info spu\" is only supported on the SPU architecture."));
2004
 
2005
  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2006
 
2007
  xsnprintf (annex, sizeof annex, "%d/signal1", id);
2008
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2009
  if (len < 0)
2010
    error (_("Could not read signal1."));
2011
  else if (len == 4)
2012
    {
2013
      signal1 = extract_unsigned_integer (buf, 4, byte_order);
2014
      signal1_pending = 1;
2015
    }
2016
 
2017
  xsnprintf (annex, sizeof annex, "%d/signal1_type", id);
2018
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2019
                     buf, 0, (sizeof (buf) - 1));
2020
  if (len <= 0)
2021
    error (_("Could not read signal1_type."));
2022
  buf[len] = '\0';
2023
  signal1_type = strtoulst (buf, NULL, 16);
2024
 
2025
  xsnprintf (annex, sizeof annex, "%d/signal2", id);
2026
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex, buf, 0, 4);
2027
  if (len < 0)
2028
    error (_("Could not read signal2."));
2029
  else if (len == 4)
2030
    {
2031
      signal2 = extract_unsigned_integer (buf, 4, byte_order);
2032
      signal2_pending = 1;
2033
    }
2034
 
2035
  xsnprintf (annex, sizeof annex, "%d/signal2_type", id);
2036
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2037
                     buf, 0, (sizeof (buf) - 1));
2038
  if (len <= 0)
2039
    error (_("Could not read signal2_type."));
2040
  buf[len] = '\0';
2041
  signal2_type = strtoulst (buf, NULL, 16);
2042
 
2043
  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoSignal");
2044
 
2045
  if (ui_out_is_mi_like_p (uiout))
2046
    {
2047
      ui_out_field_int (uiout, "signal1_pending", signal1_pending);
2048
      ui_out_field_fmt (uiout, "signal1", "0x%s", phex_nz (signal1, 4));
2049
      ui_out_field_int (uiout, "signal1_type", signal1_type);
2050
      ui_out_field_int (uiout, "signal2_pending", signal2_pending);
2051
      ui_out_field_fmt (uiout, "signal2", "0x%s", phex_nz (signal2, 4));
2052
      ui_out_field_int (uiout, "signal2_type", signal2_type);
2053
    }
2054
  else
2055
    {
2056
      if (signal1_pending)
2057
        printf_filtered (_("Signal 1 control word 0x%s "), phex (signal1, 4));
2058
      else
2059
        printf_filtered (_("Signal 1 not pending "));
2060
 
2061
      if (signal1_type)
2062
        printf_filtered (_("(Type Or)\n"));
2063
      else
2064
        printf_filtered (_("(Type Overwrite)\n"));
2065
 
2066
      if (signal2_pending)
2067
        printf_filtered (_("Signal 2 control word 0x%s "), phex (signal2, 4));
2068
      else
2069
        printf_filtered (_("Signal 2 not pending "));
2070
 
2071
      if (signal2_type)
2072
        printf_filtered (_("(Type Or)\n"));
2073
      else
2074
        printf_filtered (_("(Type Overwrite)\n"));
2075
    }
2076
 
2077
  do_cleanups (chain);
2078
}
2079
 
2080
static void
2081
info_spu_mailbox_list (gdb_byte *buf, int nr, enum bfd_endian byte_order,
2082
                       const char *field, const char *msg)
2083
{
2084
  struct cleanup *chain;
2085
  int i;
2086
 
2087
  if (nr <= 0)
2088
    return;
2089
 
2090
  chain = make_cleanup_ui_out_table_begin_end (uiout, 1, nr, "mbox");
2091
 
2092
  ui_out_table_header (uiout, 32, ui_left, field, msg);
2093
  ui_out_table_body (uiout);
2094
 
2095
  for (i = 0; i < nr; i++)
2096
    {
2097
      struct cleanup *val_chain;
2098
      ULONGEST val;
2099
      val_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "mbox");
2100
      val = extract_unsigned_integer (buf + 4*i, 4, byte_order);
2101
      ui_out_field_fmt (uiout, field, "0x%s", phex (val, 4));
2102
      do_cleanups (val_chain);
2103
 
2104
      if (!ui_out_is_mi_like_p (uiout))
2105
        printf_filtered ("\n");
2106
    }
2107
 
2108
  do_cleanups (chain);
2109
}
2110
 
2111
static void
2112
info_spu_mailbox_command (char *args, int from_tty)
2113
{
2114
  struct frame_info *frame = get_selected_frame (NULL);
2115
  struct gdbarch *gdbarch = get_frame_arch (frame);
2116
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2117
  struct cleanup *chain;
2118
  char annex[32];
2119
  gdb_byte buf[1024];
2120
  LONGEST len;
2121
  int i, id;
2122
 
2123
  if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2124
    error (_("\"info spu\" is only supported on the SPU architecture."));
2125
 
2126
  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2127
 
2128
  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoMailbox");
2129
 
2130
  xsnprintf (annex, sizeof annex, "%d/mbox_info", id);
2131
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2132
                     buf, 0, sizeof buf);
2133
  if (len < 0)
2134
    error (_("Could not read mbox_info."));
2135
 
2136
  info_spu_mailbox_list (buf, len / 4, byte_order,
2137
                         "mbox", "SPU Outbound Mailbox");
2138
 
2139
  xsnprintf (annex, sizeof annex, "%d/ibox_info", id);
2140
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2141
                     buf, 0, sizeof buf);
2142
  if (len < 0)
2143
    error (_("Could not read ibox_info."));
2144
 
2145
  info_spu_mailbox_list (buf, len / 4, byte_order,
2146
                         "ibox", "SPU Outbound Interrupt Mailbox");
2147
 
2148
  xsnprintf (annex, sizeof annex, "%d/wbox_info", id);
2149
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2150
                     buf, 0, sizeof buf);
2151
  if (len < 0)
2152
    error (_("Could not read wbox_info."));
2153
 
2154
  info_spu_mailbox_list (buf, len / 4, byte_order,
2155
                         "wbox", "SPU Inbound Mailbox");
2156
 
2157
  do_cleanups (chain);
2158
}
2159
 
2160
static ULONGEST
2161
spu_mfc_get_bitfield (ULONGEST word, int first, int last)
2162
{
2163
  ULONGEST mask = ~(~(ULONGEST)0 << (last - first + 1));
2164
  return (word >> (63 - last)) & mask;
2165
}
2166
 
2167
static void
2168
info_spu_dma_cmdlist (gdb_byte *buf, int nr, enum bfd_endian byte_order)
2169
{
2170
  static char *spu_mfc_opcode[256] =
2171
    {
2172
    /* 00 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2173
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2174
    /* 10 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2175
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2176
    /* 20 */ "put", "putb", "putf", NULL, "putl", "putlb", "putlf", NULL,
2177
             "puts", "putbs", "putfs", NULL, NULL, NULL, NULL, NULL,
2178
    /* 30 */ "putr", "putrb", "putrf", NULL, "putrl", "putrlb", "putrlf", NULL,
2179
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2180
    /* 40 */ "get", "getb", "getf", NULL, "getl", "getlb", "getlf", NULL,
2181
             "gets", "getbs", "getfs", NULL, NULL, NULL, NULL, NULL,
2182
    /* 50 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2183
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2184
    /* 60 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2185
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2186
    /* 70 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2187
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2188
    /* 80 */ "sdcrt", "sdcrtst", NULL, NULL, NULL, NULL, NULL, NULL,
2189
             NULL, "sdcrz", NULL, NULL, NULL, "sdcrst", NULL, "sdcrf",
2190
    /* 90 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2191
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2192
    /* a0 */ "sndsig", "sndsigb", "sndsigf", NULL, NULL, NULL, NULL, NULL,
2193
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2194
    /* b0 */ "putlluc", NULL, NULL, NULL, "putllc", NULL, NULL, NULL,
2195
             "putqlluc", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2196
    /* c0 */ "barrier", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2197
             "mfceieio", NULL, NULL, NULL, "mfcsync", NULL, NULL, NULL,
2198
    /* d0 */ "getllar", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2199
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2200
    /* e0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2201
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2202
    /* f0 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2203
             NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
2204
    };
2205
 
2206
  int *seq = alloca (nr * sizeof (int));
2207
  int done = 0;
2208
  struct cleanup *chain;
2209
  int i, j;
2210
 
2211
 
2212
  /* Determine sequence in which to display (valid) entries.  */
2213
  for (i = 0; i < nr; i++)
2214
    {
2215
      /* Search for the first valid entry all of whose
2216
         dependencies are met.  */
2217
      for (j = 0; j < nr; j++)
2218
        {
2219
          ULONGEST mfc_cq_dw3;
2220
          ULONGEST dependencies;
2221
 
2222
          if (done & (1 << (nr - 1 - j)))
2223
            continue;
2224
 
2225
          mfc_cq_dw3
2226
            = extract_unsigned_integer (buf + 32*j + 24,8, byte_order);
2227
          if (!spu_mfc_get_bitfield (mfc_cq_dw3, 16, 16))
2228
            continue;
2229
 
2230
          dependencies = spu_mfc_get_bitfield (mfc_cq_dw3, 0, nr - 1);
2231
          if ((dependencies & done) != dependencies)
2232
            continue;
2233
 
2234
          seq[i] = j;
2235
          done |= 1 << (nr - 1 - j);
2236
          break;
2237
        }
2238
 
2239
      if (j == nr)
2240
        break;
2241
    }
2242
 
2243
  nr = i;
2244
 
2245
 
2246
  chain = make_cleanup_ui_out_table_begin_end (uiout, 10, nr, "dma_cmd");
2247
 
2248
  ui_out_table_header (uiout, 7, ui_left, "opcode", "Opcode");
2249
  ui_out_table_header (uiout, 3, ui_left, "tag", "Tag");
2250
  ui_out_table_header (uiout, 3, ui_left, "tid", "TId");
2251
  ui_out_table_header (uiout, 3, ui_left, "rid", "RId");
2252
  ui_out_table_header (uiout, 18, ui_left, "ea", "EA");
2253
  ui_out_table_header (uiout, 7, ui_left, "lsa", "LSA");
2254
  ui_out_table_header (uiout, 7, ui_left, "size", "Size");
2255
  ui_out_table_header (uiout, 7, ui_left, "lstaddr", "LstAddr");
2256
  ui_out_table_header (uiout, 7, ui_left, "lstsize", "LstSize");
2257
  ui_out_table_header (uiout, 1, ui_left, "error_p", "E");
2258
 
2259
  ui_out_table_body (uiout);
2260
 
2261
  for (i = 0; i < nr; i++)
2262
    {
2263
      struct cleanup *cmd_chain;
2264
      ULONGEST mfc_cq_dw0;
2265
      ULONGEST mfc_cq_dw1;
2266
      ULONGEST mfc_cq_dw2;
2267
      int mfc_cmd_opcode, mfc_cmd_tag, rclass_id, tclass_id;
2268
      int lsa, size, list_lsa, list_size, mfc_lsa, mfc_size;
2269
      ULONGEST mfc_ea;
2270
      int list_valid_p, noop_valid_p, qw_valid_p, ea_valid_p, cmd_error_p;
2271
 
2272
      /* Decode contents of MFC Command Queue Context Save/Restore Registers.
2273
         See "Cell Broadband Engine Registers V1.3", section 3.3.2.1.  */
2274
 
2275
      mfc_cq_dw0
2276
        = extract_unsigned_integer (buf + 32*seq[i], 8, byte_order);
2277
      mfc_cq_dw1
2278
        = extract_unsigned_integer (buf + 32*seq[i] + 8, 8, byte_order);
2279
      mfc_cq_dw2
2280
        = extract_unsigned_integer (buf + 32*seq[i] + 16, 8, byte_order);
2281
 
2282
      list_lsa = spu_mfc_get_bitfield (mfc_cq_dw0, 0, 14);
2283
      list_size = spu_mfc_get_bitfield (mfc_cq_dw0, 15, 26);
2284
      mfc_cmd_opcode = spu_mfc_get_bitfield (mfc_cq_dw0, 27, 34);
2285
      mfc_cmd_tag = spu_mfc_get_bitfield (mfc_cq_dw0, 35, 39);
2286
      list_valid_p = spu_mfc_get_bitfield (mfc_cq_dw0, 40, 40);
2287
      rclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 41, 43);
2288
      tclass_id = spu_mfc_get_bitfield (mfc_cq_dw0, 44, 46);
2289
 
2290
      mfc_ea = spu_mfc_get_bitfield (mfc_cq_dw1, 0, 51) << 12
2291
                | spu_mfc_get_bitfield (mfc_cq_dw2, 25, 36);
2292
 
2293
      mfc_lsa = spu_mfc_get_bitfield (mfc_cq_dw2, 0, 13);
2294
      mfc_size = spu_mfc_get_bitfield (mfc_cq_dw2, 14, 24);
2295
      noop_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 37, 37);
2296
      qw_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 38, 38);
2297
      ea_valid_p = spu_mfc_get_bitfield (mfc_cq_dw2, 39, 39);
2298
      cmd_error_p = spu_mfc_get_bitfield (mfc_cq_dw2, 40, 40);
2299
 
2300
      cmd_chain = make_cleanup_ui_out_tuple_begin_end (uiout, "cmd");
2301
 
2302
      if (spu_mfc_opcode[mfc_cmd_opcode])
2303
        ui_out_field_string (uiout, "opcode", spu_mfc_opcode[mfc_cmd_opcode]);
2304
      else
2305
        ui_out_field_int (uiout, "opcode", mfc_cmd_opcode);
2306
 
2307
      ui_out_field_int (uiout, "tag", mfc_cmd_tag);
2308
      ui_out_field_int (uiout, "tid", tclass_id);
2309
      ui_out_field_int (uiout, "rid", rclass_id);
2310
 
2311
      if (ea_valid_p)
2312
        ui_out_field_fmt (uiout, "ea", "0x%s", phex (mfc_ea, 8));
2313
      else
2314
        ui_out_field_skip (uiout, "ea");
2315
 
2316
      ui_out_field_fmt (uiout, "lsa", "0x%05x", mfc_lsa << 4);
2317
      if (qw_valid_p)
2318
        ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size << 4);
2319
      else
2320
        ui_out_field_fmt (uiout, "size", "0x%05x", mfc_size);
2321
 
2322
      if (list_valid_p)
2323
        {
2324
          ui_out_field_fmt (uiout, "lstaddr", "0x%05x", list_lsa << 3);
2325
          ui_out_field_fmt (uiout, "lstsize", "0x%05x", list_size << 3);
2326
        }
2327
      else
2328
        {
2329
          ui_out_field_skip (uiout, "lstaddr");
2330
          ui_out_field_skip (uiout, "lstsize");
2331
        }
2332
 
2333
      if (cmd_error_p)
2334
        ui_out_field_string (uiout, "error_p", "*");
2335
      else
2336
        ui_out_field_skip (uiout, "error_p");
2337
 
2338
      do_cleanups (cmd_chain);
2339
 
2340
      if (!ui_out_is_mi_like_p (uiout))
2341
        printf_filtered ("\n");
2342
    }
2343
 
2344
  do_cleanups (chain);
2345
}
2346
 
2347
static void
2348
info_spu_dma_command (char *args, int from_tty)
2349
{
2350
  struct frame_info *frame = get_selected_frame (NULL);
2351
  struct gdbarch *gdbarch = get_frame_arch (frame);
2352
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2353
  ULONGEST dma_info_type;
2354
  ULONGEST dma_info_mask;
2355
  ULONGEST dma_info_status;
2356
  ULONGEST dma_info_stall_and_notify;
2357
  ULONGEST dma_info_atomic_command_status;
2358
  struct cleanup *chain;
2359
  char annex[32];
2360
  gdb_byte buf[1024];
2361
  LONGEST len;
2362
  int i, id;
2363
 
2364
  if (gdbarch_bfd_arch_info (get_frame_arch (frame))->arch != bfd_arch_spu)
2365
    error (_("\"info spu\" is only supported on the SPU architecture."));
2366
 
2367
  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2368
 
2369
  xsnprintf (annex, sizeof annex, "%d/dma_info", id);
2370
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2371
                     buf, 0, 40 + 16 * 32);
2372
  if (len <= 0)
2373
    error (_("Could not read dma_info."));
2374
 
2375
  dma_info_type
2376
    = extract_unsigned_integer (buf, 8, byte_order);
2377
  dma_info_mask
2378
    = extract_unsigned_integer (buf + 8, 8, byte_order);
2379
  dma_info_status
2380
    = extract_unsigned_integer (buf + 16, 8, byte_order);
2381
  dma_info_stall_and_notify
2382
    = extract_unsigned_integer (buf + 24, 8, byte_order);
2383
  dma_info_atomic_command_status
2384
    = extract_unsigned_integer (buf + 32, 8, byte_order);
2385
 
2386
  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoDMA");
2387
 
2388
  if (ui_out_is_mi_like_p (uiout))
2389
    {
2390
      ui_out_field_fmt (uiout, "dma_info_type", "0x%s",
2391
                        phex_nz (dma_info_type, 4));
2392
      ui_out_field_fmt (uiout, "dma_info_mask", "0x%s",
2393
                        phex_nz (dma_info_mask, 4));
2394
      ui_out_field_fmt (uiout, "dma_info_status", "0x%s",
2395
                        phex_nz (dma_info_status, 4));
2396
      ui_out_field_fmt (uiout, "dma_info_stall_and_notify", "0x%s",
2397
                        phex_nz (dma_info_stall_and_notify, 4));
2398
      ui_out_field_fmt (uiout, "dma_info_atomic_command_status", "0x%s",
2399
                        phex_nz (dma_info_atomic_command_status, 4));
2400
    }
2401
  else
2402
    {
2403
      const char *query_msg = _("no query pending");
2404
 
2405
      if (dma_info_type & 4)
2406
        switch (dma_info_type & 3)
2407
          {
2408
            case 1: query_msg = _("'any' query pending"); break;
2409
            case 2: query_msg = _("'all' query pending"); break;
2410
            default: query_msg = _("undefined query type"); break;
2411
          }
2412
 
2413
      printf_filtered (_("Tag-Group Status  0x%s\n"),
2414
                       phex (dma_info_status, 4));
2415
      printf_filtered (_("Tag-Group Mask    0x%s (%s)\n"),
2416
                       phex (dma_info_mask, 4), query_msg);
2417
      printf_filtered (_("Stall-and-Notify  0x%s\n"),
2418
                       phex (dma_info_stall_and_notify, 4));
2419
      printf_filtered (_("Atomic Cmd Status 0x%s\n"),
2420
                       phex (dma_info_atomic_command_status, 4));
2421
      printf_filtered ("\n");
2422
    }
2423
 
2424
  info_spu_dma_cmdlist (buf + 40, 16, byte_order);
2425
  do_cleanups (chain);
2426
}
2427
 
2428
static void
2429
info_spu_proxydma_command (char *args, int from_tty)
2430
{
2431
  struct frame_info *frame = get_selected_frame (NULL);
2432
  struct gdbarch *gdbarch = get_frame_arch (frame);
2433
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2434
  ULONGEST dma_info_type;
2435
  ULONGEST dma_info_mask;
2436
  ULONGEST dma_info_status;
2437
  struct cleanup *chain;
2438
  char annex[32];
2439
  gdb_byte buf[1024];
2440
  LONGEST len;
2441
  int i, id;
2442
 
2443
  if (gdbarch_bfd_arch_info (gdbarch)->arch != bfd_arch_spu)
2444
    error (_("\"info spu\" is only supported on the SPU architecture."));
2445
 
2446
  id = get_frame_register_unsigned (frame, SPU_ID_REGNUM);
2447
 
2448
  xsnprintf (annex, sizeof annex, "%d/proxydma_info", id);
2449
  len = target_read (&current_target, TARGET_OBJECT_SPU, annex,
2450
                     buf, 0, 24 + 8 * 32);
2451
  if (len <= 0)
2452
    error (_("Could not read proxydma_info."));
2453
 
2454
  dma_info_type = extract_unsigned_integer (buf, 8, byte_order);
2455
  dma_info_mask = extract_unsigned_integer (buf + 8, 8, byte_order);
2456
  dma_info_status = extract_unsigned_integer (buf + 16, 8, byte_order);
2457
 
2458
  chain = make_cleanup_ui_out_tuple_begin_end (uiout, "SPUInfoProxyDMA");
2459
 
2460
  if (ui_out_is_mi_like_p (uiout))
2461
    {
2462
      ui_out_field_fmt (uiout, "proxydma_info_type", "0x%s",
2463
                        phex_nz (dma_info_type, 4));
2464
      ui_out_field_fmt (uiout, "proxydma_info_mask", "0x%s",
2465
                        phex_nz (dma_info_mask, 4));
2466
      ui_out_field_fmt (uiout, "proxydma_info_status", "0x%s",
2467
                        phex_nz (dma_info_status, 4));
2468
    }
2469
  else
2470
    {
2471
      const char *query_msg;
2472
 
2473
      switch (dma_info_type & 3)
2474
        {
2475
        case 0: query_msg = _("no query pending"); break;
2476
        case 1: query_msg = _("'any' query pending"); break;
2477
        case 2: query_msg = _("'all' query pending"); break;
2478
        default: query_msg = _("undefined query type"); break;
2479
        }
2480
 
2481
      printf_filtered (_("Tag-Group Status  0x%s\n"),
2482
                       phex (dma_info_status, 4));
2483
      printf_filtered (_("Tag-Group Mask    0x%s (%s)\n"),
2484
                       phex (dma_info_mask, 4), query_msg);
2485
      printf_filtered ("\n");
2486
    }
2487
 
2488
  info_spu_dma_cmdlist (buf + 24, 8, byte_order);
2489
  do_cleanups (chain);
2490
}
2491
 
2492
static void
2493
info_spu_command (char *args, int from_tty)
2494
{
2495
  printf_unfiltered (_("\"info spu\" must be followed by the name of an SPU facility.\n"));
2496
  help_list (infospucmdlist, "info spu ", -1, gdb_stdout);
2497
}
2498
 
2499
 
2500
/* Root of all "set spu "/"show spu " commands.  */
2501
 
2502
static void
2503
show_spu_command (char *args, int from_tty)
2504
{
2505
  help_list (showspucmdlist, "show spu ", all_commands, gdb_stdout);
2506
}
2507
 
2508
static void
2509
set_spu_command (char *args, int from_tty)
2510
{
2511
  help_list (setspucmdlist, "set spu ", all_commands, gdb_stdout);
2512
}
2513
 
2514
static void
2515
show_spu_stop_on_load (struct ui_file *file, int from_tty,
2516
                       struct cmd_list_element *c, const char *value)
2517
{
2518
  fprintf_filtered (file, _("Stopping for new SPE threads is %s.\n"),
2519
                    value);
2520
}
2521
 
2522
static void
2523
show_spu_auto_flush_cache (struct ui_file *file, int from_tty,
2524
                           struct cmd_list_element *c, const char *value)
2525
{
2526
  fprintf_filtered (file, _("Automatic software-cache flush is %s.\n"),
2527
                    value);
2528
}
2529
 
2530
 
2531
/* Set up gdbarch struct.  */
2532
 
2533
static struct gdbarch *
2534
spu_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches)
2535
{
2536
  struct gdbarch *gdbarch;
2537
  struct gdbarch_tdep *tdep;
2538
  int id = -1;
2539
 
2540
  /* Which spufs ID was requested as address space?  */
2541
  if (info.tdep_info)
2542
    id = *(int *)info.tdep_info;
2543
  /* For objfile architectures of SPU solibs, decode the ID from the name.
2544
     This assumes the filename convention employed by solib-spu.c.  */
2545
  else if (info.abfd)
2546
    {
2547
      char *name = strrchr (info.abfd->filename, '@');
2548
      if (name)
2549
        sscanf (name, "@0x%*x <%d>", &id);
2550
    }
2551
 
2552
  /* Find a candidate among extant architectures.  */
2553
  for (arches = gdbarch_list_lookup_by_info (arches, &info);
2554
       arches != NULL;
2555
       arches = gdbarch_list_lookup_by_info (arches->next, &info))
2556
    {
2557
      tdep = gdbarch_tdep (arches->gdbarch);
2558
      if (tdep && tdep->id == id)
2559
        return arches->gdbarch;
2560
    }
2561
 
2562
  /* None found, so create a new architecture.  */
2563
  tdep = XCALLOC (1, struct gdbarch_tdep);
2564
  tdep->id = id;
2565
  gdbarch = gdbarch_alloc (&info, tdep);
2566
 
2567
  /* Disassembler.  */
2568
  set_gdbarch_print_insn (gdbarch, gdb_print_insn_spu);
2569
 
2570
  /* Registers.  */
2571
  set_gdbarch_num_regs (gdbarch, SPU_NUM_REGS);
2572
  set_gdbarch_num_pseudo_regs (gdbarch, SPU_NUM_PSEUDO_REGS);
2573
  set_gdbarch_sp_regnum (gdbarch, SPU_SP_REGNUM);
2574
  set_gdbarch_pc_regnum (gdbarch, SPU_PC_REGNUM);
2575
  set_gdbarch_read_pc (gdbarch, spu_read_pc);
2576
  set_gdbarch_write_pc (gdbarch, spu_write_pc);
2577
  set_gdbarch_register_name (gdbarch, spu_register_name);
2578
  set_gdbarch_register_type (gdbarch, spu_register_type);
2579
  set_gdbarch_pseudo_register_read (gdbarch, spu_pseudo_register_read);
2580
  set_gdbarch_pseudo_register_write (gdbarch, spu_pseudo_register_write);
2581
  set_gdbarch_value_from_register (gdbarch, spu_value_from_register);
2582
  set_gdbarch_register_reggroup_p (gdbarch, spu_register_reggroup_p);
2583
 
2584
  /* Data types.  */
2585
  set_gdbarch_char_signed (gdbarch, 0);
2586
  set_gdbarch_ptr_bit (gdbarch, 32);
2587
  set_gdbarch_addr_bit (gdbarch, 32);
2588
  set_gdbarch_short_bit (gdbarch, 16);
2589
  set_gdbarch_int_bit (gdbarch, 32);
2590
  set_gdbarch_long_bit (gdbarch, 32);
2591
  set_gdbarch_long_long_bit (gdbarch, 64);
2592
  set_gdbarch_float_bit (gdbarch, 32);
2593
  set_gdbarch_double_bit (gdbarch, 64);
2594
  set_gdbarch_long_double_bit (gdbarch, 64);
2595
  set_gdbarch_float_format (gdbarch, floatformats_ieee_single);
2596
  set_gdbarch_double_format (gdbarch, floatformats_ieee_double);
2597
  set_gdbarch_long_double_format (gdbarch, floatformats_ieee_double);
2598
 
2599
  /* Address handling.  */
2600
  set_gdbarch_address_to_pointer (gdbarch, spu_address_to_pointer);
2601
  set_gdbarch_pointer_to_address (gdbarch, spu_pointer_to_address);
2602
  set_gdbarch_integer_to_address (gdbarch, spu_integer_to_address);
2603
  set_gdbarch_address_class_type_flags (gdbarch, spu_address_class_type_flags);
2604
  set_gdbarch_address_class_type_flags_to_name
2605
    (gdbarch, spu_address_class_type_flags_to_name);
2606
  set_gdbarch_address_class_name_to_type_flags
2607
    (gdbarch, spu_address_class_name_to_type_flags);
2608
 
2609
 
2610
  /* Inferior function calls.  */
2611
  set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
2612
  set_gdbarch_frame_align (gdbarch, spu_frame_align);
2613
  set_gdbarch_frame_red_zone_size (gdbarch, 2000);
2614
  set_gdbarch_push_dummy_code (gdbarch, spu_push_dummy_code);
2615
  set_gdbarch_push_dummy_call (gdbarch, spu_push_dummy_call);
2616
  set_gdbarch_dummy_id (gdbarch, spu_dummy_id);
2617
  set_gdbarch_return_value (gdbarch, spu_return_value);
2618
 
2619
  /* Frame handling.  */
2620
  set_gdbarch_inner_than (gdbarch, core_addr_lessthan);
2621
  frame_unwind_append_unwinder (gdbarch, &spu_frame_unwind);
2622
  frame_base_set_default (gdbarch, &spu_frame_base);
2623
  set_gdbarch_unwind_pc (gdbarch, spu_unwind_pc);
2624
  set_gdbarch_unwind_sp (gdbarch, spu_unwind_sp);
2625
  set_gdbarch_virtual_frame_pointer (gdbarch, spu_virtual_frame_pointer);
2626
  set_gdbarch_frame_args_skip (gdbarch, 0);
2627
  set_gdbarch_skip_prologue (gdbarch, spu_skip_prologue);
2628
  set_gdbarch_in_function_epilogue_p (gdbarch, spu_in_function_epilogue_p);
2629
 
2630
  /* Cell/B.E. cross-architecture unwinder support.  */
2631
  frame_unwind_prepend_unwinder (gdbarch, &spu2ppu_unwind);
2632
 
2633
  /* Breakpoints.  */
2634
  set_gdbarch_decr_pc_after_break (gdbarch, 4);
2635
  set_gdbarch_breakpoint_from_pc (gdbarch, spu_breakpoint_from_pc);
2636
  set_gdbarch_cannot_step_breakpoint (gdbarch, 1);
2637
  set_gdbarch_software_single_step (gdbarch, spu_software_single_step);
2638
  set_gdbarch_get_longjmp_target (gdbarch, spu_get_longjmp_target);
2639
 
2640
  /* Overlays.  */
2641
  set_gdbarch_overlay_update (gdbarch, spu_overlay_update);
2642
 
2643
  return gdbarch;
2644
}
2645
 
2646
/* Provide a prototype to silence -Wmissing-prototypes.  */
2647
extern initialize_file_ftype _initialize_spu_tdep;
2648
 
2649
void
2650
_initialize_spu_tdep (void)
2651
{
2652
  register_gdbarch_init (bfd_arch_spu, spu_gdbarch_init);
2653
 
2654
  /* Add ourselves to objfile event chain.  */
2655
  observer_attach_new_objfile (spu_overlay_new_objfile);
2656
  spu_overlay_data = register_objfile_data ();
2657
 
2658
  /* Install spu stop-on-load handler.  */
2659
  observer_attach_new_objfile (spu_catch_start);
2660
 
2661
  /* Add ourselves to normal_stop event chain.  */
2662
  observer_attach_normal_stop (spu_attach_normal_stop);
2663
 
2664
  /* Add root prefix command for all "set spu"/"show spu" commands.  */
2665
  add_prefix_cmd ("spu", no_class, set_spu_command,
2666
                  _("Various SPU specific commands."),
2667
                  &setspucmdlist, "set spu ", 0, &setlist);
2668
  add_prefix_cmd ("spu", no_class, show_spu_command,
2669
                  _("Various SPU specific commands."),
2670
                  &showspucmdlist, "show spu ", 0, &showlist);
2671
 
2672
  /* Toggle whether or not to add a temporary breakpoint at the "main"
2673
     function of new SPE contexts.  */
2674
  add_setshow_boolean_cmd ("stop-on-load", class_support,
2675
                          &spu_stop_on_load_p, _("\
2676
Set whether to stop for new SPE threads."),
2677
                           _("\
2678
Show whether to stop for new SPE threads."),
2679
                           _("\
2680
Use \"on\" to give control to the user when a new SPE thread\n\
2681
enters its \"main\" function.\n\
2682
Use \"off\" to disable stopping for new SPE threads."),
2683
                          NULL,
2684
                          show_spu_stop_on_load,
2685
                          &setspucmdlist, &showspucmdlist);
2686
 
2687
  /* Toggle whether or not to automatically flush the software-managed
2688
     cache whenever SPE execution stops.  */
2689
  add_setshow_boolean_cmd ("auto-flush-cache", class_support,
2690
                          &spu_auto_flush_cache_p, _("\
2691
Set whether to automatically flush the software-managed cache."),
2692
                           _("\
2693
Show whether to automatically flush the software-managed cache."),
2694
                           _("\
2695
Use \"on\" to automatically flush the software-managed cache\n\
2696
whenever SPE execution stops.\n\
2697
Use \"off\" to never automatically flush the software-managed cache."),
2698
                          NULL,
2699
                          show_spu_auto_flush_cache,
2700
                          &setspucmdlist, &showspucmdlist);
2701
 
2702
  /* Add root prefix command for all "info spu" commands.  */
2703
  add_prefix_cmd ("spu", class_info, info_spu_command,
2704
                  _("Various SPU specific commands."),
2705
                  &infospucmdlist, "info spu ", 0, &infolist);
2706
 
2707
  /* Add various "info spu" commands.  */
2708
  add_cmd ("event", class_info, info_spu_event_command,
2709
           _("Display SPU event facility status.\n"),
2710
           &infospucmdlist);
2711
  add_cmd ("signal", class_info, info_spu_signal_command,
2712
           _("Display SPU signal notification facility status.\n"),
2713
           &infospucmdlist);
2714
  add_cmd ("mailbox", class_info, info_spu_mailbox_command,
2715
           _("Display SPU mailbox facility status.\n"),
2716
           &infospucmdlist);
2717
  add_cmd ("dma", class_info, info_spu_dma_command,
2718
           _("Display MFC DMA status.\n"),
2719
           &infospucmdlist);
2720
  add_cmd ("proxydma", class_info, info_spu_proxydma_command,
2721
           _("Display MFC Proxy-DMA status.\n"),
2722
           &infospucmdlist);
2723
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.