OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gdb-7.1/] [gdb/] [amd64-tdep.c] - Blame information for rev 326

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 227 jeremybenn
/* Target-dependent code for AMD64.
2
 
3
   Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4
   Free Software Foundation, Inc.
5
 
6
   Contributed by Jiri Smid, SuSE Labs.
7
 
8
   This file is part of GDB.
9
 
10
   This program is free software; you can redistribute it and/or modify
11
   it under the terms of the GNU General Public License as published by
12
   the Free Software Foundation; either version 3 of the License, or
13
   (at your option) any later version.
14
 
15
   This program is distributed in the hope that it will be useful,
16
   but WITHOUT ANY WARRANTY; without even the implied warranty of
17
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18
   GNU General Public License for more details.
19
 
20
   You should have received a copy of the GNU General Public License
21
   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
22
 
23
#include "defs.h"
24
#include "opcode/i386.h"
25
#include "dis-asm.h"
26
#include "arch-utils.h"
27
#include "block.h"
28
#include "dummy-frame.h"
29
#include "frame.h"
30
#include "frame-base.h"
31
#include "frame-unwind.h"
32
#include "inferior.h"
33
#include "gdbcmd.h"
34
#include "gdbcore.h"
35
#include "objfiles.h"
36
#include "regcache.h"
37
#include "regset.h"
38
#include "symfile.h"
39
 
40
#include "gdb_assert.h"
41
 
42
#include "amd64-tdep.h"
43
#include "i387-tdep.h"
44
 
45
/* Note that the AMD64 architecture was previously known as x86-64.
46
   The latter is (forever) engraved into the canonical system name as
47
   returned by config.guess, and used as the name for the AMD64 port
48
   of GNU/Linux.  The BSD's have renamed their ports to amd64; they
49
   don't like to shout.  For GDB we prefer the amd64_-prefix over the
50
   x86_64_-prefix since it's so much easier to type.  */
51
 
52
/* Register information.  */
53
 
54
static const char *amd64_register_names[] =
55
{
56
  "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
57
 
58
  /* %r8 is indeed register number 8.  */
59
  "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
60
  "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
61
 
62
  /* %st0 is register number 24.  */
63
  "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
64
  "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
65
 
66
  /* %xmm0 is register number 40.  */
67
  "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
68
  "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
69
  "mxcsr",
70
};
71
 
72
/* Total number of registers.  */
73
#define AMD64_NUM_REGS  ARRAY_SIZE (amd64_register_names)
74
 
75
/* The registers used to pass integer arguments during a function call.  */
76
static int amd64_dummy_call_integer_regs[] =
77
{
78
  AMD64_RDI_REGNUM,             /* %rdi */
79
  AMD64_RSI_REGNUM,             /* %rsi */
80
  AMD64_RDX_REGNUM,             /* %rdx */
81
  AMD64_RCX_REGNUM,             /* %rcx */
82
  8,                            /* %r8 */
83
  9                             /* %r9 */
84
};
85
 
86
/* Return the name of register REGNUM.  */
87
 
88
const char *
89
amd64_register_name (struct gdbarch *gdbarch, int regnum)
90
{
91
  if (regnum >= 0 && regnum < AMD64_NUM_REGS)
92
    return amd64_register_names[regnum];
93
 
94
  return NULL;
95
}
96
 
97
/* Return the GDB type object for the "standard" data type of data in
98
   register REGNUM. */
99
 
100
struct type *
101
amd64_register_type (struct gdbarch *gdbarch, int regnum)
102
{
103
  if (regnum >= AMD64_RAX_REGNUM && regnum <= AMD64_RDI_REGNUM)
104
    return builtin_type (gdbarch)->builtin_int64;
105
  if (regnum == AMD64_RBP_REGNUM || regnum == AMD64_RSP_REGNUM)
106
    return builtin_type (gdbarch)->builtin_data_ptr;
107
  if (regnum >= AMD64_R8_REGNUM && regnum <= AMD64_R15_REGNUM)
108
    return builtin_type (gdbarch)->builtin_int64;
109
  if (regnum == AMD64_RIP_REGNUM)
110
    return builtin_type (gdbarch)->builtin_func_ptr;
111
  if (regnum == AMD64_EFLAGS_REGNUM)
112
    return i386_eflags_type (gdbarch);
113
  if (regnum >= AMD64_CS_REGNUM && regnum <= AMD64_GS_REGNUM)
114
    return builtin_type (gdbarch)->builtin_int32;
115
  if (regnum >= AMD64_ST0_REGNUM && regnum <= AMD64_ST0_REGNUM + 7)
116
    return i387_ext_type (gdbarch);
117
  if (regnum >= AMD64_FCTRL_REGNUM && regnum <= AMD64_FCTRL_REGNUM + 7)
118
    return builtin_type (gdbarch)->builtin_int32;
119
  if (regnum >= AMD64_XMM0_REGNUM && regnum <= AMD64_XMM0_REGNUM + 15)
120
    return i386_sse_type (gdbarch);
121
  if (regnum == AMD64_MXCSR_REGNUM)
122
    return i386_mxcsr_type (gdbarch);
123
 
124
  internal_error (__FILE__, __LINE__, _("invalid regnum"));
125
}
126
 
127
/* DWARF Register Number Mapping as defined in the System V psABI,
128
   section 3.6.  */
129
 
130
static int amd64_dwarf_regmap[] =
131
{
132
  /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI.  */
133
  AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
134
  AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
135
  AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
136
 
137
  /* Frame Pointer Register RBP.  */
138
  AMD64_RBP_REGNUM,
139
 
140
  /* Stack Pointer Register RSP.  */
141
  AMD64_RSP_REGNUM,
142
 
143
  /* Extended Integer Registers 8 - 15.  */
144
  8, 9, 10, 11, 12, 13, 14, 15,
145
 
146
  /* Return Address RA.  Mapped to RIP.  */
147
  AMD64_RIP_REGNUM,
148
 
149
  /* SSE Registers 0 - 7.  */
150
  AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
151
  AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
152
  AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
153
  AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
154
 
155
  /* Extended SSE Registers 8 - 15.  */
156
  AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
157
  AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
158
  AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
159
  AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
160
 
161
  /* Floating Point Registers 0-7.  */
162
  AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
163
  AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
164
  AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
165
  AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
166
 
167
  /* Control and Status Flags Register.  */
168
  AMD64_EFLAGS_REGNUM,
169
 
170
  /* Selector Registers.  */
171
  AMD64_ES_REGNUM,
172
  AMD64_CS_REGNUM,
173
  AMD64_SS_REGNUM,
174
  AMD64_DS_REGNUM,
175
  AMD64_FS_REGNUM,
176
  AMD64_GS_REGNUM,
177
  -1,
178
  -1,
179
 
180
  /* Segment Base Address Registers.  */
181
  -1,
182
  -1,
183
  -1,
184
  -1,
185
 
186
  /* Special Selector Registers.  */
187
  -1,
188
  -1,
189
 
190
  /* Floating Point Control Registers.  */
191
  AMD64_MXCSR_REGNUM,
192
  AMD64_FCTRL_REGNUM,
193
  AMD64_FSTAT_REGNUM
194
};
195
 
196
static const int amd64_dwarf_regmap_len =
197
  (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
198
 
199
/* Convert DWARF register number REG to the appropriate register
200
   number used by GDB.  */
201
 
202
static int
203
amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
204
{
205
  int regnum = -1;
206
 
207
  if (reg >= 0 && reg < amd64_dwarf_regmap_len)
208
    regnum = amd64_dwarf_regmap[reg];
209
 
210
  if (regnum == -1)
211
    warning (_("Unmapped DWARF Register #%d encountered."), reg);
212
 
213
  return regnum;
214
}
215
 
216
/* Map architectural register numbers to gdb register numbers.  */
217
 
218
static const int amd64_arch_regmap[16] =
219
{
220
  AMD64_RAX_REGNUM,     /* %rax */
221
  AMD64_RCX_REGNUM,     /* %rcx */
222
  AMD64_RDX_REGNUM,     /* %rdx */
223
  AMD64_RBX_REGNUM,     /* %rbx */
224
  AMD64_RSP_REGNUM,     /* %rsp */
225
  AMD64_RBP_REGNUM,     /* %rbp */
226
  AMD64_RSI_REGNUM,     /* %rsi */
227
  AMD64_RDI_REGNUM,     /* %rdi */
228
  AMD64_R8_REGNUM,      /* %r8 */
229
  AMD64_R9_REGNUM,      /* %r9 */
230
  AMD64_R10_REGNUM,     /* %r10 */
231
  AMD64_R11_REGNUM,     /* %r11 */
232
  AMD64_R12_REGNUM,     /* %r12 */
233
  AMD64_R13_REGNUM,     /* %r13 */
234
  AMD64_R14_REGNUM,     /* %r14 */
235
  AMD64_R15_REGNUM      /* %r15 */
236
};
237
 
238
static const int amd64_arch_regmap_len =
239
  (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
240
 
241
/* Convert architectural register number REG to the appropriate register
242
   number used by GDB.  */
243
 
244
static int
245
amd64_arch_reg_to_regnum (int reg)
246
{
247
  gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
248
 
249
  return amd64_arch_regmap[reg];
250
}
251
 
252
 
253
 
254
/* Return the union class of CLASS1 and CLASS2.  See the psABI for
255
   details.  */
256
 
257
static enum amd64_reg_class
258
amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
259
{
260
  /* Rule (a): If both classes are equal, this is the resulting class.  */
261
  if (class1 == class2)
262
    return class1;
263
 
264
  /* Rule (b): If one of the classes is NO_CLASS, the resulting class
265
     is the other class.  */
266
  if (class1 == AMD64_NO_CLASS)
267
    return class2;
268
  if (class2 == AMD64_NO_CLASS)
269
    return class1;
270
 
271
  /* Rule (c): If one of the classes is MEMORY, the result is MEMORY.  */
272
  if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
273
    return AMD64_MEMORY;
274
 
275
  /* Rule (d): If one of the classes is INTEGER, the result is INTEGER.  */
276
  if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
277
    return AMD64_INTEGER;
278
 
279
  /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
280
     MEMORY is used as class.  */
281
  if (class1 == AMD64_X87 || class1 == AMD64_X87UP
282
      || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
283
      || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
284
    return AMD64_MEMORY;
285
 
286
  /* Rule (f): Otherwise class SSE is used.  */
287
  return AMD64_SSE;
288
}
289
 
290
/* Return non-zero if TYPE is a non-POD structure or union type.  */
291
 
292
static int
293
amd64_non_pod_p (struct type *type)
294
{
295
  /* ??? A class with a base class certainly isn't POD, but does this
296
     catch all non-POD structure types?  */
297
  if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
298
    return 1;
299
 
300
  return 0;
301
}
302
 
303
/* Classify TYPE according to the rules for aggregate (structures and
304
   arrays) and union types, and store the result in CLASS.  */
305
 
306
static void
307
amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
308
{
309
  int len = TYPE_LENGTH (type);
310
 
311
  /* 1. If the size of an object is larger than two eightbytes, or in
312
        C++, is a non-POD structure or union type, or contains
313
        unaligned fields, it has class memory.  */
314
  if (len > 16 || amd64_non_pod_p (type))
315
    {
316
      class[0] = class[1] = AMD64_MEMORY;
317
      return;
318
    }
319
 
320
  /* 2. Both eightbytes get initialized to class NO_CLASS.  */
321
  class[0] = class[1] = AMD64_NO_CLASS;
322
 
323
  /* 3. Each field of an object is classified recursively so that
324
        always two fields are considered. The resulting class is
325
        calculated according to the classes of the fields in the
326
        eightbyte: */
327
 
328
  if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
329
    {
330
      struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
331
 
332
      /* All fields in an array have the same type.  */
333
      amd64_classify (subtype, class);
334
      if (len > 8 && class[1] == AMD64_NO_CLASS)
335
        class[1] = class[0];
336
    }
337
  else
338
    {
339
      int i;
340
 
341
      /* Structure or union.  */
342
      gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
343
                  || TYPE_CODE (type) == TYPE_CODE_UNION);
344
 
345
      for (i = 0; i < TYPE_NFIELDS (type); i++)
346
        {
347
          struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
348
          int pos = TYPE_FIELD_BITPOS (type, i) / 64;
349
          enum amd64_reg_class subclass[2];
350
          int bitsize = TYPE_FIELD_BITSIZE (type, i);
351
          int endpos;
352
 
353
          if (bitsize == 0)
354
            bitsize = TYPE_LENGTH (subtype) * 8;
355
          endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
356
 
357
          /* Ignore static fields.  */
358
          if (field_is_static (&TYPE_FIELD (type, i)))
359
            continue;
360
 
361
          gdb_assert (pos == 0 || pos == 1);
362
 
363
          amd64_classify (subtype, subclass);
364
          class[pos] = amd64_merge_classes (class[pos], subclass[0]);
365
          if (bitsize <= 64 && pos == 0 && endpos == 1)
366
            /* This is a bit of an odd case:  We have a field that would
367
               normally fit in one of the two eightbytes, except that
368
               it is placed in a way that this field straddles them.
369
               This has been seen with a structure containing an array.
370
 
371
               The ABI is a bit unclear in this case, but we assume that
372
               this field's class (stored in subclass[0]) must also be merged
373
               into class[1].  In other words, our field has a piece stored
374
               in the second eight-byte, and thus its class applies to
375
               the second eight-byte as well.
376
 
377
               In the case where the field length exceeds 8 bytes,
378
               it should not be necessary to merge the field class
379
               into class[1].  As LEN > 8, subclass[1] is necessarily
380
               different from AMD64_NO_CLASS.  If subclass[1] is equal
381
               to subclass[0], then the normal class[1]/subclass[1]
382
               merging will take care of everything.  For subclass[1]
383
               to be different from subclass[0], I can only see the case
384
               where we have a SSE/SSEUP or X87/X87UP pair, which both
385
               use up all 16 bytes of the aggregate, and are already
386
               handled just fine (because each portion sits on its own
387
               8-byte).  */
388
            class[1] = amd64_merge_classes (class[1], subclass[0]);
389
          if (pos == 0)
390
            class[1] = amd64_merge_classes (class[1], subclass[1]);
391
        }
392
    }
393
 
394
  /* 4. Then a post merger cleanup is done:  */
395
 
396
  /* Rule (a): If one of the classes is MEMORY, the whole argument is
397
     passed in memory.  */
398
  if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
399
    class[0] = class[1] = AMD64_MEMORY;
400
 
401
  /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
402
     SSE.  */
403
  if (class[0] == AMD64_SSEUP)
404
    class[0] = AMD64_SSE;
405
  if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
406
    class[1] = AMD64_SSE;
407
}
408
 
409
/* Classify TYPE, and store the result in CLASS.  */
410
 
411
void
412
amd64_classify (struct type *type, enum amd64_reg_class class[2])
413
{
414
  enum type_code code = TYPE_CODE (type);
415
  int len = TYPE_LENGTH (type);
416
 
417
  class[0] = class[1] = AMD64_NO_CLASS;
418
 
419
  /* Arguments of types (signed and unsigned) _Bool, char, short, int,
420
     long, long long, and pointers are in the INTEGER class.  Similarly,
421
     range types, used by languages such as Ada, are also in the INTEGER
422
     class.  */
423
  if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
424
       || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
425
       || code == TYPE_CODE_CHAR
426
       || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
427
      && (len == 1 || len == 2 || len == 4 || len == 8))
428
    class[0] = AMD64_INTEGER;
429
 
430
  /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
431
     are in class SSE.  */
432
  else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
433
           && (len == 4 || len == 8))
434
    /* FIXME: __m64 .  */
435
    class[0] = AMD64_SSE;
436
 
437
  /* Arguments of types __float128, _Decimal128 and __m128 are split into
438
     two halves.  The least significant ones belong to class SSE, the most
439
     significant one to class SSEUP.  */
440
  else if (code == TYPE_CODE_DECFLOAT && len == 16)
441
    /* FIXME: __float128, __m128.  */
442
    class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
443
 
444
  /* The 64-bit mantissa of arguments of type long double belongs to
445
     class X87, the 16-bit exponent plus 6 bytes of padding belongs to
446
     class X87UP.  */
447
  else if (code == TYPE_CODE_FLT && len == 16)
448
    /* Class X87 and X87UP.  */
449
    class[0] = AMD64_X87, class[1] = AMD64_X87UP;
450
 
451
  /* Aggregates.  */
452
  else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
453
           || code == TYPE_CODE_UNION)
454
    amd64_classify_aggregate (type, class);
455
}
456
 
457
static enum return_value_convention
458
amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
459
                    struct type *type, struct regcache *regcache,
460
                    gdb_byte *readbuf, const gdb_byte *writebuf)
461
{
462
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
463
  enum amd64_reg_class class[2];
464
  int len = TYPE_LENGTH (type);
465
  static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
466
  static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
467
  int integer_reg = 0;
468
  int sse_reg = 0;
469
  int i;
470
 
471
  gdb_assert (!(readbuf && writebuf));
472
  gdb_assert (tdep->classify);
473
 
474
  /* 1. Classify the return type with the classification algorithm.  */
475
  tdep->classify (type, class);
476
 
477
  /* 2. If the type has class MEMORY, then the caller provides space
478
     for the return value and passes the address of this storage in
479
     %rdi as if it were the first argument to the function. In effect,
480
     this address becomes a hidden first argument.
481
 
482
     On return %rax will contain the address that has been passed in
483
     by the caller in %rdi.  */
484
  if (class[0] == AMD64_MEMORY)
485
    {
486
      /* As indicated by the comment above, the ABI guarantees that we
487
         can always find the return value just after the function has
488
         returned.  */
489
 
490
      if (readbuf)
491
        {
492
          ULONGEST addr;
493
 
494
          regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
495
          read_memory (addr, readbuf, TYPE_LENGTH (type));
496
        }
497
 
498
      return RETURN_VALUE_ABI_RETURNS_ADDRESS;
499
    }
500
 
501
  gdb_assert (class[1] != AMD64_MEMORY);
502
  gdb_assert (len <= 16);
503
 
504
  for (i = 0; len > 0; i++, len -= 8)
505
    {
506
      int regnum = -1;
507
      int offset = 0;
508
 
509
      switch (class[i])
510
        {
511
        case AMD64_INTEGER:
512
          /* 3. If the class is INTEGER, the next available register
513
             of the sequence %rax, %rdx is used.  */
514
          regnum = integer_regnum[integer_reg++];
515
          break;
516
 
517
        case AMD64_SSE:
518
          /* 4. If the class is SSE, the next available SSE register
519
             of the sequence %xmm0, %xmm1 is used.  */
520
          regnum = sse_regnum[sse_reg++];
521
          break;
522
 
523
        case AMD64_SSEUP:
524
          /* 5. If the class is SSEUP, the eightbyte is passed in the
525
             upper half of the last used SSE register.  */
526
          gdb_assert (sse_reg > 0);
527
          regnum = sse_regnum[sse_reg - 1];
528
          offset = 8;
529
          break;
530
 
531
        case AMD64_X87:
532
          /* 6. If the class is X87, the value is returned on the X87
533
             stack in %st0 as 80-bit x87 number.  */
534
          regnum = AMD64_ST0_REGNUM;
535
          if (writebuf)
536
            i387_return_value (gdbarch, regcache);
537
          break;
538
 
539
        case AMD64_X87UP:
540
          /* 7. If the class is X87UP, the value is returned together
541
             with the previous X87 value in %st0.  */
542
          gdb_assert (i > 0 && class[0] == AMD64_X87);
543
          regnum = AMD64_ST0_REGNUM;
544
          offset = 8;
545
          len = 2;
546
          break;
547
 
548
        case AMD64_NO_CLASS:
549
          continue;
550
 
551
        default:
552
          gdb_assert (!"Unexpected register class.");
553
        }
554
 
555
      gdb_assert (regnum != -1);
556
 
557
      if (readbuf)
558
        regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
559
                                readbuf + i * 8);
560
      if (writebuf)
561
        regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
562
                                 writebuf + i * 8);
563
    }
564
 
565
  return RETURN_VALUE_REGISTER_CONVENTION;
566
}
567
 
568
 
569
static CORE_ADDR
570
amd64_push_arguments (struct regcache *regcache, int nargs,
571
                      struct value **args, CORE_ADDR sp, int struct_return)
572
{
573
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
574
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
575
  int *integer_regs = tdep->call_dummy_integer_regs;
576
  int num_integer_regs = tdep->call_dummy_num_integer_regs;
577
 
578
  static int sse_regnum[] =
579
  {
580
    /* %xmm0 ... %xmm7 */
581
    AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
582
    AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
583
    AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
584
    AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
585
  };
586
  struct value **stack_args = alloca (nargs * sizeof (struct value *));
587
  /* An array that mirrors the stack_args array.  For all arguments
588
     that are passed by MEMORY, if that argument's address also needs
589
     to be stored in a register, the ARG_ADDR_REGNO array will contain
590
     that register number (or a negative value otherwise).  */
591
  int *arg_addr_regno = alloca (nargs * sizeof (int));
592
  int num_stack_args = 0;
593
  int num_elements = 0;
594
  int element = 0;
595
  int integer_reg = 0;
596
  int sse_reg = 0;
597
  int i;
598
 
599
  gdb_assert (tdep->classify);
600
 
601
  /* Reserve a register for the "hidden" argument.  */
602
  if (struct_return)
603
    integer_reg++;
604
 
605
  for (i = 0; i < nargs; i++)
606
    {
607
      struct type *type = value_type (args[i]);
608
      int len = TYPE_LENGTH (type);
609
      enum amd64_reg_class class[2];
610
      int needed_integer_regs = 0;
611
      int needed_sse_regs = 0;
612
      int j;
613
 
614
      /* Classify argument.  */
615
      tdep->classify (type, class);
616
 
617
      /* Calculate the number of integer and SSE registers needed for
618
         this argument.  */
619
      for (j = 0; j < 2; j++)
620
        {
621
          if (class[j] == AMD64_INTEGER)
622
            needed_integer_regs++;
623
          else if (class[j] == AMD64_SSE)
624
            needed_sse_regs++;
625
        }
626
 
627
      /* Check whether enough registers are available, and if the
628
         argument should be passed in registers at all.  */
629
      if (integer_reg + needed_integer_regs > num_integer_regs
630
          || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
631
          || (needed_integer_regs == 0 && needed_sse_regs == 0))
632
        {
633
          /* The argument will be passed on the stack.  */
634
          num_elements += ((len + 7) / 8);
635
          stack_args[num_stack_args] = args[i];
636
          /* If this is an AMD64_MEMORY argument whose address must also
637
             be passed in one of the integer registers, reserve that
638
             register and associate this value to that register so that
639
             we can store the argument address as soon as we know it.  */
640
          if (class[0] == AMD64_MEMORY
641
              && tdep->memory_args_by_pointer
642
              && integer_reg < tdep->call_dummy_num_integer_regs)
643
            arg_addr_regno[num_stack_args] =
644
              tdep->call_dummy_integer_regs[integer_reg++];
645
          else
646
            arg_addr_regno[num_stack_args] = -1;
647
          num_stack_args++;
648
        }
649
      else
650
        {
651
          /* The argument will be passed in registers.  */
652
          const gdb_byte *valbuf = value_contents (args[i]);
653
          gdb_byte buf[8];
654
 
655
          gdb_assert (len <= 16);
656
 
657
          for (j = 0; len > 0; j++, len -= 8)
658
            {
659
              int regnum = -1;
660
              int offset = 0;
661
 
662
              switch (class[j])
663
                {
664
                case AMD64_INTEGER:
665
                  regnum = integer_regs[integer_reg++];
666
                  break;
667
 
668
                case AMD64_SSE:
669
                  regnum = sse_regnum[sse_reg++];
670
                  break;
671
 
672
                case AMD64_SSEUP:
673
                  gdb_assert (sse_reg > 0);
674
                  regnum = sse_regnum[sse_reg - 1];
675
                  offset = 8;
676
                  break;
677
 
678
                default:
679
                  gdb_assert (!"Unexpected register class.");
680
                }
681
 
682
              gdb_assert (regnum != -1);
683
              memset (buf, 0, sizeof buf);
684
              memcpy (buf, valbuf + j * 8, min (len, 8));
685
              regcache_raw_write_part (regcache, regnum, offset, 8, buf);
686
            }
687
        }
688
    }
689
 
690
  /* Allocate space for the arguments on the stack.  */
691
  sp -= num_elements * 8;
692
 
693
  /* The psABI says that "The end of the input argument area shall be
694
     aligned on a 16 byte boundary."  */
695
  sp &= ~0xf;
696
 
697
  /* Write out the arguments to the stack.  */
698
  for (i = 0; i < num_stack_args; i++)
699
    {
700
      struct type *type = value_type (stack_args[i]);
701
      const gdb_byte *valbuf = value_contents (stack_args[i]);
702
      int len = TYPE_LENGTH (type);
703
      CORE_ADDR arg_addr = sp + element * 8;
704
 
705
      write_memory (arg_addr, valbuf, len);
706
      if (arg_addr_regno[i] >= 0)
707
        {
708
          /* We also need to store the address of that argument in
709
             the given register.  */
710
          gdb_byte buf[8];
711
          enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
712
 
713
          store_unsigned_integer (buf, 8, byte_order, arg_addr);
714
          regcache_cooked_write (regcache, arg_addr_regno[i], buf);
715
        }
716
      element += ((len + 7) / 8);
717
    }
718
 
719
  /* The psABI says that "For calls that may call functions that use
720
     varargs or stdargs (prototype-less calls or calls to functions
721
     containing ellipsis (...) in the declaration) %al is used as
722
     hidden argument to specify the number of SSE registers used.  */
723
  regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
724
  return sp;
725
}
726
 
727
static CORE_ADDR
728
amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
729
                       struct regcache *regcache, CORE_ADDR bp_addr,
730
                       int nargs, struct value **args,  CORE_ADDR sp,
731
                       int struct_return, CORE_ADDR struct_addr)
732
{
733
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
734
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
735
  gdb_byte buf[8];
736
 
737
  /* Pass arguments.  */
738
  sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
739
 
740
  /* Pass "hidden" argument".  */
741
  if (struct_return)
742
    {
743
      struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
744
      /* The "hidden" argument is passed throught the first argument
745
         register.  */
746
      const int arg_regnum = tdep->call_dummy_integer_regs[0];
747
 
748
      store_unsigned_integer (buf, 8, byte_order, struct_addr);
749
      regcache_cooked_write (regcache, arg_regnum, buf);
750
    }
751
 
752
  /* Reserve some memory on the stack for the integer-parameter registers,
753
     if required by the ABI.  */
754
  if (tdep->integer_param_regs_saved_in_caller_frame)
755
    sp -= tdep->call_dummy_num_integer_regs * 8;
756
 
757
  /* Store return address.  */
758
  sp -= 8;
759
  store_unsigned_integer (buf, 8, byte_order, bp_addr);
760
  write_memory (sp, buf, 8);
761
 
762
  /* Finally, update the stack pointer...  */
763
  store_unsigned_integer (buf, 8, byte_order, sp);
764
  regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
765
 
766
  /* ...and fake a frame pointer.  */
767
  regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
768
 
769
  return sp + 16;
770
}
771
 
772
/* Displaced instruction handling.  */
773
 
774
/* A partially decoded instruction.
775
   This contains enough details for displaced stepping purposes.  */
776
 
777
struct amd64_insn
778
{
779
  /* The number of opcode bytes.  */
780
  int opcode_len;
781
  /* The offset of the rex prefix or -1 if not present.  */
782
  int rex_offset;
783
  /* The offset to the first opcode byte.  */
784
  int opcode_offset;
785
  /* The offset to the modrm byte or -1 if not present.  */
786
  int modrm_offset;
787
 
788
  /* The raw instruction.  */
789
  gdb_byte *raw_insn;
790
};
791
 
792
struct displaced_step_closure
793
{
794
  /* For rip-relative insns, saved copy of the reg we use instead of %rip.  */
795
  int tmp_used;
796
  int tmp_regno;
797
  ULONGEST tmp_save;
798
 
799
  /* Details of the instruction.  */
800
  struct amd64_insn insn_details;
801
 
802
  /* Amount of space allocated to insn_buf.  */
803
  int max_len;
804
 
805
  /* The possibly modified insn.
806
     This is a variable-length field.  */
807
  gdb_byte insn_buf[1];
808
};
809
 
810
/* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
811
   ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
812
   at which point delete these in favor of libopcodes' versions).  */
813
 
814
static const unsigned char onebyte_has_modrm[256] = {
815
  /*       0 1 2 3 4 5 6 7 8 9 a b c d e f        */
816
  /*       -------------------------------        */
817
  /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
818
  /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
819
  /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
820
  /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
821
  /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
822
  /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
823
  /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
824
  /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
825
  /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
826
  /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
827
  /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
828
  /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
829
  /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
830
  /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
831
  /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
832
  /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1  /* f0 */
833
  /*       -------------------------------        */
834
  /*       0 1 2 3 4 5 6 7 8 9 a b c d e f        */
835
};
836
 
837
static const unsigned char twobyte_has_modrm[256] = {
838
  /*       0 1 2 3 4 5 6 7 8 9 a b c d e f        */
839
  /*       -------------------------------        */
840
  /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
841
  /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
842
  /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
843
  /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
844
  /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
845
  /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
846
  /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
847
  /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
848
  /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
849
  /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
850
  /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
851
  /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
852
  /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
853
  /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
854
  /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
855
  /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0  /* ff */
856
  /*       -------------------------------        */
857
  /*       0 1 2 3 4 5 6 7 8 9 a b c d e f        */
858
};
859
 
860
static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
861
 
862
static int
863
rex_prefix_p (gdb_byte pfx)
864
{
865
  return REX_PREFIX_P (pfx);
866
}
867
 
868
/* Skip the legacy instruction prefixes in INSN.
869
   We assume INSN is properly sentineled so we don't have to worry
870
   about falling off the end of the buffer.  */
871
 
872
static gdb_byte *
873
amd64_skip_prefixes (gdb_byte *insn)
874
{
875
  while (1)
876
    {
877
      switch (*insn)
878
        {
879
        case DATA_PREFIX_OPCODE:
880
        case ADDR_PREFIX_OPCODE:
881
        case CS_PREFIX_OPCODE:
882
        case DS_PREFIX_OPCODE:
883
        case ES_PREFIX_OPCODE:
884
        case FS_PREFIX_OPCODE:
885
        case GS_PREFIX_OPCODE:
886
        case SS_PREFIX_OPCODE:
887
        case LOCK_PREFIX_OPCODE:
888
        case REPE_PREFIX_OPCODE:
889
        case REPNE_PREFIX_OPCODE:
890
          ++insn;
891
          continue;
892
        default:
893
          break;
894
        }
895
      break;
896
    }
897
 
898
  return insn;
899
}
900
 
901
/* fprintf-function for amd64_insn_length.
902
   This function is a nop, we don't want to print anything, we just want to
903
   compute the length of the insn.  */
904
 
905
static int ATTR_FORMAT (printf, 2, 3)
906
amd64_insn_length_fprintf (void *stream, const char *format, ...)
907
{
908
  return 0;
909
}
910
 
911
/* Initialize a struct disassemble_info for amd64_insn_length.  */
912
 
913
static void
914
amd64_insn_length_init_dis (struct gdbarch *gdbarch,
915
                            struct disassemble_info *di,
916
                            const gdb_byte *insn, int max_len,
917
                            CORE_ADDR addr)
918
{
919
  init_disassemble_info (di, NULL, amd64_insn_length_fprintf);
920
 
921
  /* init_disassemble_info installs buffer_read_memory, etc.
922
     so we don't need to do that here.
923
     The cast is necessary until disassemble_info is const-ified.  */
924
  di->buffer = (gdb_byte *) insn;
925
  di->buffer_length = max_len;
926
  di->buffer_vma = addr;
927
 
928
  di->arch = gdbarch_bfd_arch_info (gdbarch)->arch;
929
  di->mach = gdbarch_bfd_arch_info (gdbarch)->mach;
930
  di->endian = gdbarch_byte_order (gdbarch);
931
  di->endian_code = gdbarch_byte_order_for_code (gdbarch);
932
 
933
  disassemble_init_for_target (di);
934
}
935
 
936
/* Return the length in bytes of INSN.
937
   MAX_LEN is the size of the buffer containing INSN.
938
   libopcodes currently doesn't export a utility to compute the
939
   instruction length, so use the disassembler until then.  */
940
 
941
static int
942
amd64_insn_length (struct gdbarch *gdbarch,
943
                   const gdb_byte *insn, int max_len, CORE_ADDR addr)
944
{
945
  struct disassemble_info di;
946
 
947
  amd64_insn_length_init_dis (gdbarch, &di, insn, max_len, addr);
948
 
949
  return gdbarch_print_insn (gdbarch, addr, &di);
950
}
951
 
952
/* Return an integer register (other than RSP) that is unused as an input
953
   operand in INSN.
954
   In order to not require adding a rex prefix if the insn doesn't already
955
   have one, the result is restricted to RAX ... RDI, sans RSP.
956
   The register numbering of the result follows architecture ordering,
957
   e.g. RDI = 7.  */
958
 
959
static int
960
amd64_get_unused_input_int_reg (const struct amd64_insn *details)
961
{
962
  /* 1 bit for each reg */
963
  int used_regs_mask = 0;
964
 
965
  /* There can be at most 3 int regs used as inputs in an insn, and we have
966
     7 to choose from (RAX ... RDI, sans RSP).
967
     This allows us to take a conservative approach and keep things simple.
968
     E.g. By avoiding RAX, we don't have to specifically watch for opcodes
969
     that implicitly specify RAX.  */
970
 
971
  /* Avoid RAX.  */
972
  used_regs_mask |= 1 << EAX_REG_NUM;
973
  /* Similarily avoid RDX, implicit operand in divides.  */
974
  used_regs_mask |= 1 << EDX_REG_NUM;
975
  /* Avoid RSP.  */
976
  used_regs_mask |= 1 << ESP_REG_NUM;
977
 
978
  /* If the opcode is one byte long and there's no ModRM byte,
979
     assume the opcode specifies a register.  */
980
  if (details->opcode_len == 1 && details->modrm_offset == -1)
981
    used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
982
 
983
  /* Mark used regs in the modrm/sib bytes.  */
984
  if (details->modrm_offset != -1)
985
    {
986
      int modrm = details->raw_insn[details->modrm_offset];
987
      int mod = MODRM_MOD_FIELD (modrm);
988
      int reg = MODRM_REG_FIELD (modrm);
989
      int rm = MODRM_RM_FIELD (modrm);
990
      int have_sib = mod != 3 && rm == 4;
991
 
992
      /* Assume the reg field of the modrm byte specifies a register.  */
993
      used_regs_mask |= 1 << reg;
994
 
995
      if (have_sib)
996
        {
997
          int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
998
          int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
999
          used_regs_mask |= 1 << base;
1000
          used_regs_mask |= 1 << index;
1001
        }
1002
      else
1003
        {
1004
          used_regs_mask |= 1 << rm;
1005
        }
1006
    }
1007
 
1008
  gdb_assert (used_regs_mask < 256);
1009
  gdb_assert (used_regs_mask != 255);
1010
 
1011
  /* Finally, find a free reg.  */
1012
  {
1013
    int i;
1014
 
1015
    for (i = 0; i < 8; ++i)
1016
      {
1017
        if (! (used_regs_mask & (1 << i)))
1018
          return i;
1019
      }
1020
 
1021
    /* We shouldn't get here.  */
1022
    internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1023
  }
1024
}
1025
 
1026
/* Extract the details of INSN that we need.  */
1027
 
1028
static void
1029
amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1030
{
1031
  gdb_byte *start = insn;
1032
  int need_modrm;
1033
 
1034
  details->raw_insn = insn;
1035
 
1036
  details->opcode_len = -1;
1037
  details->rex_offset = -1;
1038
  details->opcode_offset = -1;
1039
  details->modrm_offset = -1;
1040
 
1041
  /* Skip legacy instruction prefixes.  */
1042
  insn = amd64_skip_prefixes (insn);
1043
 
1044
  /* Skip REX instruction prefix.  */
1045
  if (rex_prefix_p (*insn))
1046
    {
1047
      details->rex_offset = insn - start;
1048
      ++insn;
1049
    }
1050
 
1051
  details->opcode_offset = insn - start;
1052
 
1053
  if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1054
    {
1055
      /* Two or three-byte opcode.  */
1056
      ++insn;
1057
      need_modrm = twobyte_has_modrm[*insn];
1058
 
1059
      /* Check for three-byte opcode.  */
1060
      switch (*insn)
1061
        {
1062
        case 0x24:
1063
        case 0x25:
1064
        case 0x38:
1065
        case 0x3a:
1066
        case 0x7a:
1067
        case 0x7b:
1068
          ++insn;
1069
          details->opcode_len = 3;
1070
          break;
1071
        default:
1072
          details->opcode_len = 2;
1073
          break;
1074
        }
1075
    }
1076
  else
1077
    {
1078
      /* One-byte opcode.  */
1079
      need_modrm = onebyte_has_modrm[*insn];
1080
      details->opcode_len = 1;
1081
    }
1082
 
1083
  if (need_modrm)
1084
    {
1085
      ++insn;
1086
      details->modrm_offset = insn - start;
1087
    }
1088
}
1089
 
1090
/* Update %rip-relative addressing in INSN.
1091
 
1092
   %rip-relative addressing only uses a 32-bit displacement.
1093
   32 bits is not enough to be guaranteed to cover the distance between where
1094
   the real instruction is and where its copy is.
1095
   Convert the insn to use base+disp addressing.
1096
   We set base = pc + insn_length so we can leave disp unchanged.  */
1097
 
1098
static void
1099
fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1100
              CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1101
{
1102
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1103
  const struct amd64_insn *insn_details = &dsc->insn_details;
1104
  int modrm_offset = insn_details->modrm_offset;
1105
  gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1106
  CORE_ADDR rip_base;
1107
  int32_t disp;
1108
  int insn_length;
1109
  int arch_tmp_regno, tmp_regno;
1110
  ULONGEST orig_value;
1111
 
1112
  /* %rip+disp32 addressing mode, displacement follows ModRM byte.  */
1113
  ++insn;
1114
 
1115
  /* Compute the rip-relative address.  */
1116
  disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1117
  insn_length = amd64_insn_length (gdbarch, dsc->insn_buf, dsc->max_len, from);
1118
  rip_base = from + insn_length;
1119
 
1120
  /* We need a register to hold the address.
1121
     Pick one not used in the insn.
1122
     NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7.  */
1123
  arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1124
  tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1125
 
1126
  /* REX.B should be unset as we were using rip-relative addressing,
1127
     but ensure it's unset anyway, tmp_regno is not r8-r15.  */
1128
  if (insn_details->rex_offset != -1)
1129
    dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1130
 
1131
  regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1132
  dsc->tmp_regno = tmp_regno;
1133
  dsc->tmp_save = orig_value;
1134
  dsc->tmp_used = 1;
1135
 
1136
  /* Convert the ModRM field to be base+disp.  */
1137
  dsc->insn_buf[modrm_offset] &= ~0xc7;
1138
  dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1139
 
1140
  regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1141
 
1142
  if (debug_displaced)
1143
    fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1144
                        "displaced: using temp reg %d, old value %s, new value %s\n",
1145
                        dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1146
                        paddress (gdbarch, rip_base));
1147
}
1148
 
1149
static void
1150
fixup_displaced_copy (struct gdbarch *gdbarch,
1151
                      struct displaced_step_closure *dsc,
1152
                      CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1153
{
1154
  const struct amd64_insn *details = &dsc->insn_details;
1155
 
1156
  if (details->modrm_offset != -1)
1157
    {
1158
      gdb_byte modrm = details->raw_insn[details->modrm_offset];
1159
 
1160
      if ((modrm & 0xc7) == 0x05)
1161
        {
1162
          /* The insn uses rip-relative addressing.
1163
             Deal with it.  */
1164
          fixup_riprel (gdbarch, dsc, from, to, regs);
1165
        }
1166
    }
1167
}
1168
 
1169
struct displaced_step_closure *
1170
amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1171
                                CORE_ADDR from, CORE_ADDR to,
1172
                                struct regcache *regs)
1173
{
1174
  int len = gdbarch_max_insn_length (gdbarch);
1175
  /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1176
     continually watch for running off the end of the buffer.  */
1177
  int fixup_sentinel_space = len;
1178
  struct displaced_step_closure *dsc =
1179
    xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1180
  gdb_byte *buf = &dsc->insn_buf[0];
1181
  struct amd64_insn *details = &dsc->insn_details;
1182
 
1183
  dsc->tmp_used = 0;
1184
  dsc->max_len = len + fixup_sentinel_space;
1185
 
1186
  read_memory (from, buf, len);
1187
 
1188
  /* Set up the sentinel space so we don't have to worry about running
1189
     off the end of the buffer.  An excessive number of leading prefixes
1190
     could otherwise cause this.  */
1191
  memset (buf + len, 0, fixup_sentinel_space);
1192
 
1193
  amd64_get_insn_details (buf, details);
1194
 
1195
  /* GDB may get control back after the insn after the syscall.
1196
     Presumably this is a kernel bug.
1197
     If this is a syscall, make sure there's a nop afterwards.  */
1198
  {
1199
    int syscall_length;
1200
 
1201
    if (amd64_syscall_p (details, &syscall_length))
1202
      buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1203
  }
1204
 
1205
  /* Modify the insn to cope with the address where it will be executed from.
1206
     In particular, handle any rip-relative addressing.  */
1207
  fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1208
 
1209
  write_memory (to, buf, len);
1210
 
1211
  if (debug_displaced)
1212
    {
1213
      fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1214
                          paddress (gdbarch, from), paddress (gdbarch, to));
1215
      displaced_step_dump_bytes (gdb_stdlog, buf, len);
1216
    }
1217
 
1218
  return dsc;
1219
}
1220
 
1221
static int
1222
amd64_absolute_jmp_p (const struct amd64_insn *details)
1223
{
1224
  const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1225
 
1226
  if (insn[0] == 0xff)
1227
    {
1228
      /* jump near, absolute indirect (/4) */
1229
      if ((insn[1] & 0x38) == 0x20)
1230
        return 1;
1231
 
1232
      /* jump far, absolute indirect (/5) */
1233
      if ((insn[1] & 0x38) == 0x28)
1234
        return 1;
1235
    }
1236
 
1237
  return 0;
1238
}
1239
 
1240
static int
1241
amd64_absolute_call_p (const struct amd64_insn *details)
1242
{
1243
  const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1244
 
1245
  if (insn[0] == 0xff)
1246
    {
1247
      /* Call near, absolute indirect (/2) */
1248
      if ((insn[1] & 0x38) == 0x10)
1249
        return 1;
1250
 
1251
      /* Call far, absolute indirect (/3) */
1252
      if ((insn[1] & 0x38) == 0x18)
1253
        return 1;
1254
    }
1255
 
1256
  return 0;
1257
}
1258
 
1259
static int
1260
amd64_ret_p (const struct amd64_insn *details)
1261
{
1262
  /* NOTE: gcc can emit "repz ; ret".  */
1263
  const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1264
 
1265
  switch (insn[0])
1266
    {
1267
    case 0xc2: /* ret near, pop N bytes */
1268
    case 0xc3: /* ret near */
1269
    case 0xca: /* ret far, pop N bytes */
1270
    case 0xcb: /* ret far */
1271
    case 0xcf: /* iret */
1272
      return 1;
1273
 
1274
    default:
1275
      return 0;
1276
    }
1277
}
1278
 
1279
static int
1280
amd64_call_p (const struct amd64_insn *details)
1281
{
1282
  const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1283
 
1284
  if (amd64_absolute_call_p (details))
1285
    return 1;
1286
 
1287
  /* call near, relative */
1288
  if (insn[0] == 0xe8)
1289
    return 1;
1290
 
1291
  return 0;
1292
}
1293
 
1294
/* Return non-zero if INSN is a system call, and set *LENGTHP to its
1295
   length in bytes.  Otherwise, return zero.  */
1296
 
1297
static int
1298
amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1299
{
1300
  const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1301
 
1302
  if (insn[0] == 0x0f && insn[1] == 0x05)
1303
    {
1304
      *lengthp = 2;
1305
      return 1;
1306
    }
1307
 
1308
  return 0;
1309
}
1310
 
1311
/* Fix up the state of registers and memory after having single-stepped
1312
   a displaced instruction.  */
1313
 
1314
void
1315
amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1316
                            struct displaced_step_closure *dsc,
1317
                            CORE_ADDR from, CORE_ADDR to,
1318
                            struct regcache *regs)
1319
{
1320
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1321
  /* The offset we applied to the instruction's address.  */
1322
  ULONGEST insn_offset = to - from;
1323
  gdb_byte *insn = dsc->insn_buf;
1324
  const struct amd64_insn *insn_details = &dsc->insn_details;
1325
 
1326
  if (debug_displaced)
1327
    fprintf_unfiltered (gdb_stdlog,
1328
                        "displaced: fixup (%s, %s), "
1329
                        "insn = 0x%02x 0x%02x ...\n",
1330
                        paddress (gdbarch, from), paddress (gdbarch, to),
1331
                        insn[0], insn[1]);
1332
 
1333
  /* If we used a tmp reg, restore it.  */
1334
 
1335
  if (dsc->tmp_used)
1336
    {
1337
      if (debug_displaced)
1338
        fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1339
                            dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1340
      regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1341
    }
1342
 
1343
  /* The list of issues to contend with here is taken from
1344
     resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1345
     Yay for Free Software!  */
1346
 
1347
  /* Relocate the %rip back to the program's instruction stream,
1348
     if necessary.  */
1349
 
1350
  /* Except in the case of absolute or indirect jump or call
1351
     instructions, or a return instruction, the new rip is relative to
1352
     the displaced instruction; make it relative to the original insn.
1353
     Well, signal handler returns don't need relocation either, but we use the
1354
     value of %rip to recognize those; see below.  */
1355
  if (! amd64_absolute_jmp_p (insn_details)
1356
      && ! amd64_absolute_call_p (insn_details)
1357
      && ! amd64_ret_p (insn_details))
1358
    {
1359
      ULONGEST orig_rip;
1360
      int insn_len;
1361
 
1362
      regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1363
 
1364
      /* A signal trampoline system call changes the %rip, resuming
1365
         execution of the main program after the signal handler has
1366
         returned.  That makes them like 'return' instructions; we
1367
         shouldn't relocate %rip.
1368
 
1369
         But most system calls don't, and we do need to relocate %rip.
1370
 
1371
         Our heuristic for distinguishing these cases: if stepping
1372
         over the system call instruction left control directly after
1373
         the instruction, the we relocate --- control almost certainly
1374
         doesn't belong in the displaced copy.  Otherwise, we assume
1375
         the instruction has put control where it belongs, and leave
1376
         it unrelocated.  Goodness help us if there are PC-relative
1377
         system calls.  */
1378
      if (amd64_syscall_p (insn_details, &insn_len)
1379
          && orig_rip != to + insn_len
1380
          /* GDB can get control back after the insn after the syscall.
1381
             Presumably this is a kernel bug.
1382
             Fixup ensures its a nop, we add one to the length for it.  */
1383
          && orig_rip != to + insn_len + 1)
1384
        {
1385
          if (debug_displaced)
1386
            fprintf_unfiltered (gdb_stdlog,
1387
                                "displaced: syscall changed %%rip; "
1388
                                "not relocating\n");
1389
        }
1390
      else
1391
        {
1392
          ULONGEST rip = orig_rip - insn_offset;
1393
 
1394
          /* If we just stepped over a breakpoint insn, we don't backup
1395
             the pc on purpose; this is to match behaviour without
1396
             stepping.  */
1397
 
1398
          regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1399
 
1400
          if (debug_displaced)
1401
            fprintf_unfiltered (gdb_stdlog,
1402
                                "displaced: "
1403
                                "relocated %%rip from %s to %s\n",
1404
                                paddress (gdbarch, orig_rip),
1405
                                paddress (gdbarch, rip));
1406
        }
1407
    }
1408
 
1409
  /* If the instruction was PUSHFL, then the TF bit will be set in the
1410
     pushed value, and should be cleared.  We'll leave this for later,
1411
     since GDB already messes up the TF flag when stepping over a
1412
     pushfl.  */
1413
 
1414
  /* If the instruction was a call, the return address now atop the
1415
     stack is the address following the copied instruction.  We need
1416
     to make it the address following the original instruction.  */
1417
  if (amd64_call_p (insn_details))
1418
    {
1419
      ULONGEST rsp;
1420
      ULONGEST retaddr;
1421
      const ULONGEST retaddr_len = 8;
1422
 
1423
      regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1424
      retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1425
      retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1426
      write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1427
 
1428
      if (debug_displaced)
1429
        fprintf_unfiltered (gdb_stdlog,
1430
                            "displaced: relocated return addr at %s "
1431
                            "to %s\n",
1432
                            paddress (gdbarch, rsp),
1433
                            paddress (gdbarch, retaddr));
1434
    }
1435
}
1436
 
1437
/* The maximum number of saved registers.  This should include %rip.  */
1438
#define AMD64_NUM_SAVED_REGS    AMD64_NUM_GREGS
1439
 
1440
struct amd64_frame_cache
1441
{
1442
  /* Base address.  */
1443
  CORE_ADDR base;
1444
  CORE_ADDR sp_offset;
1445
  CORE_ADDR pc;
1446
 
1447
  /* Saved registers.  */
1448
  CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1449
  CORE_ADDR saved_sp;
1450
  int saved_sp_reg;
1451
 
1452
  /* Do we have a frame?  */
1453
  int frameless_p;
1454
};
1455
 
1456
/* Initialize a frame cache.  */
1457
 
1458
static void
1459
amd64_init_frame_cache (struct amd64_frame_cache *cache)
1460
{
1461
  int i;
1462
 
1463
  /* Base address.  */
1464
  cache->base = 0;
1465
  cache->sp_offset = -8;
1466
  cache->pc = 0;
1467
 
1468
  /* Saved registers.  We initialize these to -1 since zero is a valid
1469
     offset (that's where %rbp is supposed to be stored).
1470
     The values start out as being offsets, and are later converted to
1471
     addresses (at which point -1 is interpreted as an address, still meaning
1472
     "invalid").  */
1473
  for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1474
    cache->saved_regs[i] = -1;
1475
  cache->saved_sp = 0;
1476
  cache->saved_sp_reg = -1;
1477
 
1478
  /* Frameless until proven otherwise.  */
1479
  cache->frameless_p = 1;
1480
}
1481
 
1482
/* Allocate and initialize a frame cache.  */
1483
 
1484
static struct amd64_frame_cache *
1485
amd64_alloc_frame_cache (void)
1486
{
1487
  struct amd64_frame_cache *cache;
1488
 
1489
  cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1490
  amd64_init_frame_cache (cache);
1491
  return cache;
1492
}
1493
 
1494
/* GCC 4.4 and later, can put code in the prologue to realign the
1495
   stack pointer.  Check whether PC points to such code, and update
1496
   CACHE accordingly.  Return the first instruction after the code
1497
   sequence or CURRENT_PC, whichever is smaller.  If we don't
1498
   recognize the code, return PC.  */
1499
 
1500
static CORE_ADDR
1501
amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1502
                           struct amd64_frame_cache *cache)
1503
{
1504
  /* There are 2 code sequences to re-align stack before the frame
1505
     gets set up:
1506
 
1507
        1. Use a caller-saved saved register:
1508
 
1509
                leaq  8(%rsp), %reg
1510
                andq  $-XXX, %rsp
1511
                pushq -8(%reg)
1512
 
1513
        2. Use a callee-saved saved register:
1514
 
1515
                pushq %reg
1516
                leaq  16(%rsp), %reg
1517
                andq  $-XXX, %rsp
1518
                pushq -8(%reg)
1519
 
1520
     "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1521
 
1522
        0x48 0x83 0xe4 0xf0                     andq $-16, %rsp
1523
        0x48 0x81 0xe4 0x00 0xff 0xff 0xff      andq $-256, %rsp
1524
   */
1525
 
1526
  gdb_byte buf[18];
1527
  int reg, r;
1528
  int offset, offset_and;
1529
 
1530
  if (target_read_memory (pc, buf, sizeof buf))
1531
    return pc;
1532
 
1533
  /* Check caller-saved saved register.  The first instruction has
1534
     to be "leaq 8(%rsp), %reg".  */
1535
  if ((buf[0] & 0xfb) == 0x48
1536
      && buf[1] == 0x8d
1537
      && buf[3] == 0x24
1538
      && buf[4] == 0x8)
1539
    {
1540
      /* MOD must be binary 10 and R/M must be binary 100.  */
1541
      if ((buf[2] & 0xc7) != 0x44)
1542
        return pc;
1543
 
1544
      /* REG has register number.  */
1545
      reg = (buf[2] >> 3) & 7;
1546
 
1547
      /* Check the REX.R bit.  */
1548
      if (buf[0] == 0x4c)
1549
        reg += 8;
1550
 
1551
      offset = 5;
1552
    }
1553
  else
1554
    {
1555
      /* Check callee-saved saved register.  The first instruction
1556
         has to be "pushq %reg".  */
1557
      reg = 0;
1558
      if ((buf[0] & 0xf8) == 0x50)
1559
        offset = 0;
1560
      else if ((buf[0] & 0xf6) == 0x40
1561
               && (buf[1] & 0xf8) == 0x50)
1562
        {
1563
          /* Check the REX.B bit.  */
1564
          if ((buf[0] & 1) != 0)
1565
            reg = 8;
1566
 
1567
          offset = 1;
1568
        }
1569
      else
1570
        return pc;
1571
 
1572
      /* Get register.  */
1573
      reg += buf[offset] & 0x7;
1574
 
1575
      offset++;
1576
 
1577
      /* The next instruction has to be "leaq 16(%rsp), %reg".  */
1578
      if ((buf[offset] & 0xfb) != 0x48
1579
          || buf[offset + 1] != 0x8d
1580
          || buf[offset + 3] != 0x24
1581
          || buf[offset + 4] != 0x10)
1582
        return pc;
1583
 
1584
      /* MOD must be binary 10 and R/M must be binary 100.  */
1585
      if ((buf[offset + 2] & 0xc7) != 0x44)
1586
        return pc;
1587
 
1588
      /* REG has register number.  */
1589
      r = (buf[offset + 2] >> 3) & 7;
1590
 
1591
      /* Check the REX.R bit.  */
1592
      if (buf[offset] == 0x4c)
1593
        r += 8;
1594
 
1595
      /* Registers in pushq and leaq have to be the same.  */
1596
      if (reg != r)
1597
        return pc;
1598
 
1599
      offset += 5;
1600
    }
1601
 
1602
  /* Rigister can't be %rsp nor %rbp.  */
1603
  if (reg == 4 || reg == 5)
1604
    return pc;
1605
 
1606
  /* The next instruction has to be "andq $-XXX, %rsp".  */
1607
  if (buf[offset] != 0x48
1608
      || buf[offset + 2] != 0xe4
1609
      || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1610
    return pc;
1611
 
1612
  offset_and = offset;
1613
  offset += buf[offset + 1] == 0x81 ? 7 : 4;
1614
 
1615
  /* The next instruction has to be "pushq -8(%reg)".  */
1616
  r = 0;
1617
  if (buf[offset] == 0xff)
1618
    offset++;
1619
  else if ((buf[offset] & 0xf6) == 0x40
1620
           && buf[offset + 1] == 0xff)
1621
    {
1622
      /* Check the REX.B bit.  */
1623
      if ((buf[offset] & 0x1) != 0)
1624
        r = 8;
1625
      offset += 2;
1626
    }
1627
  else
1628
    return pc;
1629
 
1630
  /* 8bit -8 is 0xf8.  REG must be binary 110 and MOD must be binary
1631
     01.  */
1632
  if (buf[offset + 1] != 0xf8
1633
      || (buf[offset] & 0xf8) != 0x70)
1634
    return pc;
1635
 
1636
  /* R/M has register.  */
1637
  r += buf[offset] & 7;
1638
 
1639
  /* Registers in leaq and pushq have to be the same.  */
1640
  if (reg != r)
1641
    return pc;
1642
 
1643
  if (current_pc > pc + offset_and)
1644
    cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
1645
 
1646
  return min (pc + offset + 2, current_pc);
1647
}
1648
 
1649
/* Do a limited analysis of the prologue at PC and update CACHE
1650
   accordingly.  Bail out early if CURRENT_PC is reached.  Return the
1651
   address where the analysis stopped.
1652
 
1653
   We will handle only functions beginning with:
1654
 
1655
      pushq %rbp        0x55
1656
      movq %rsp, %rbp   0x48 0x89 0xe5
1657
 
1658
   Any function that doesn't start with this sequence will be assumed
1659
   to have no prologue and thus no valid frame pointer in %rbp.  */
1660
 
1661
static CORE_ADDR
1662
amd64_analyze_prologue (struct gdbarch *gdbarch,
1663
                        CORE_ADDR pc, CORE_ADDR current_pc,
1664
                        struct amd64_frame_cache *cache)
1665
{
1666
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1667
  static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1668
  gdb_byte buf[3];
1669
  gdb_byte op;
1670
 
1671
  if (current_pc <= pc)
1672
    return current_pc;
1673
 
1674
  pc = amd64_analyze_stack_align (pc, current_pc, cache);
1675
 
1676
  op = read_memory_unsigned_integer (pc, 1, byte_order);
1677
 
1678
  if (op == 0x55)               /* pushq %rbp */
1679
    {
1680
      /* Take into account that we've executed the `pushq %rbp' that
1681
         starts this instruction sequence.  */
1682
      cache->saved_regs[AMD64_RBP_REGNUM] = 0;
1683
      cache->sp_offset += 8;
1684
 
1685
      /* If that's all, return now.  */
1686
      if (current_pc <= pc + 1)
1687
        return current_pc;
1688
 
1689
      /* Check for `movq %rsp, %rbp'.  */
1690
      read_memory (pc + 1, buf, 3);
1691
      if (memcmp (buf, proto, 3) != 0)
1692
        return pc + 1;
1693
 
1694
      /* OK, we actually have a frame.  */
1695
      cache->frameless_p = 0;
1696
      return pc + 4;
1697
    }
1698
 
1699
  return pc;
1700
}
1701
 
1702
/* Return PC of first real instruction.  */
1703
 
1704
static CORE_ADDR
1705
amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
1706
{
1707
  struct amd64_frame_cache cache;
1708
  CORE_ADDR pc;
1709
 
1710
  amd64_init_frame_cache (&cache);
1711
  pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
1712
                               &cache);
1713
  if (cache.frameless_p)
1714
    return start_pc;
1715
 
1716
  return pc;
1717
}
1718
 
1719
 
1720
/* Normal frames.  */
1721
 
1722
static struct amd64_frame_cache *
1723
amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
1724
{
1725
  struct gdbarch *gdbarch = get_frame_arch (this_frame);
1726
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1727
  struct amd64_frame_cache *cache;
1728
  gdb_byte buf[8];
1729
  int i;
1730
 
1731
  if (*this_cache)
1732
    return *this_cache;
1733
 
1734
  cache = amd64_alloc_frame_cache ();
1735
  *this_cache = cache;
1736
 
1737
  cache->pc = get_frame_func (this_frame);
1738
  if (cache->pc != 0)
1739
    amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
1740
                            cache);
1741
 
1742
  if (cache->saved_sp_reg != -1)
1743
    {
1744
      /* Stack pointer has been saved.  */
1745
      get_frame_register (this_frame, cache->saved_sp_reg, buf);
1746
      cache->saved_sp = extract_unsigned_integer(buf, 8, byte_order);
1747
    }
1748
 
1749
  if (cache->frameless_p)
1750
    {
1751
      /* We didn't find a valid frame.  If we're at the start of a
1752
         function, or somewhere half-way its prologue, the function's
1753
         frame probably hasn't been fully setup yet.  Try to
1754
         reconstruct the base address for the stack frame by looking
1755
         at the stack pointer.  For truly "frameless" functions this
1756
         might work too.  */
1757
 
1758
      if (cache->saved_sp_reg != -1)
1759
        {
1760
          /* We're halfway aligning the stack.  */
1761
          cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1762
          cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1763
 
1764
          /* This will be added back below.  */
1765
          cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1766
        }
1767
      else
1768
        {
1769
          get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1770
          cache->base = extract_unsigned_integer (buf, 8, byte_order)
1771
                        + cache->sp_offset;
1772
        }
1773
    }
1774
  else
1775
    {
1776
      get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
1777
      cache->base = extract_unsigned_integer (buf, 8, byte_order);
1778
    }
1779
 
1780
  /* Now that we have the base address for the stack frame we can
1781
     calculate the value of %rsp in the calling frame.  */
1782
  cache->saved_sp = cache->base + 16;
1783
 
1784
  /* For normal frames, %rip is stored at 8(%rbp).  If we don't have a
1785
     frame we find it at the same offset from the reconstructed base
1786
     address.  If we're halfway aligning the stack, %rip is handled
1787
     differently (see above).  */
1788
  if (!cache->frameless_p || cache->saved_sp_reg == -1)
1789
    cache->saved_regs[AMD64_RIP_REGNUM] = 8;
1790
 
1791
  /* Adjust all the saved registers such that they contain addresses
1792
     instead of offsets.  */
1793
  for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1794
    if (cache->saved_regs[i] != -1)
1795
      cache->saved_regs[i] += cache->base;
1796
 
1797
  return cache;
1798
}
1799
 
1800
static void
1801
amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1802
                     struct frame_id *this_id)
1803
{
1804
  struct amd64_frame_cache *cache =
1805
    amd64_frame_cache (this_frame, this_cache);
1806
 
1807
  /* This marks the outermost frame.  */
1808
  if (cache->base == 0)
1809
    return;
1810
 
1811
  (*this_id) = frame_id_build (cache->base + 16, cache->pc);
1812
}
1813
 
1814
static struct value *
1815
amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
1816
                           int regnum)
1817
{
1818
  struct gdbarch *gdbarch = get_frame_arch (this_frame);
1819
  struct amd64_frame_cache *cache =
1820
    amd64_frame_cache (this_frame, this_cache);
1821
 
1822
  gdb_assert (regnum >= 0);
1823
 
1824
  if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
1825
    return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
1826
 
1827
  if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
1828
    return frame_unwind_got_memory (this_frame, regnum,
1829
                                    cache->saved_regs[regnum]);
1830
 
1831
  return frame_unwind_got_register (this_frame, regnum, regnum);
1832
}
1833
 
1834
static const struct frame_unwind amd64_frame_unwind =
1835
{
1836
  NORMAL_FRAME,
1837
  amd64_frame_this_id,
1838
  amd64_frame_prev_register,
1839
  NULL,
1840
  default_frame_sniffer
1841
};
1842
 
1843
 
1844
/* Signal trampolines.  */
1845
 
1846
/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
1847
   64-bit variants.  This would require using identical frame caches
1848
   on both platforms.  */
1849
 
1850
static struct amd64_frame_cache *
1851
amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
1852
{
1853
  struct gdbarch *gdbarch = get_frame_arch (this_frame);
1854
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
1855
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1856
  struct amd64_frame_cache *cache;
1857
  CORE_ADDR addr;
1858
  gdb_byte buf[8];
1859
  int i;
1860
 
1861
  if (*this_cache)
1862
    return *this_cache;
1863
 
1864
  cache = amd64_alloc_frame_cache ();
1865
 
1866
  get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1867
  cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
1868
 
1869
  addr = tdep->sigcontext_addr (this_frame);
1870
  gdb_assert (tdep->sc_reg_offset);
1871
  gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
1872
  for (i = 0; i < tdep->sc_num_regs; i++)
1873
    if (tdep->sc_reg_offset[i] != -1)
1874
      cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
1875
 
1876
  *this_cache = cache;
1877
  return cache;
1878
}
1879
 
1880
static void
1881
amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
1882
                              void **this_cache, struct frame_id *this_id)
1883
{
1884
  struct amd64_frame_cache *cache =
1885
    amd64_sigtramp_frame_cache (this_frame, this_cache);
1886
 
1887
  (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
1888
}
1889
 
1890
static struct value *
1891
amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
1892
                                    void **this_cache, int regnum)
1893
{
1894
  /* Make sure we've initialized the cache.  */
1895
  amd64_sigtramp_frame_cache (this_frame, this_cache);
1896
 
1897
  return amd64_frame_prev_register (this_frame, this_cache, regnum);
1898
}
1899
 
1900
static int
1901
amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
1902
                              struct frame_info *this_frame,
1903
                              void **this_cache)
1904
{
1905
  struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
1906
 
1907
  /* We shouldn't even bother if we don't have a sigcontext_addr
1908
     handler.  */
1909
  if (tdep->sigcontext_addr == NULL)
1910
    return 0;
1911
 
1912
  if (tdep->sigtramp_p != NULL)
1913
    {
1914
      if (tdep->sigtramp_p (this_frame))
1915
        return 1;
1916
    }
1917
 
1918
  if (tdep->sigtramp_start != 0)
1919
    {
1920
      CORE_ADDR pc = get_frame_pc (this_frame);
1921
 
1922
      gdb_assert (tdep->sigtramp_end != 0);
1923
      if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
1924
        return 1;
1925
    }
1926
 
1927
  return 0;
1928
}
1929
 
1930
static const struct frame_unwind amd64_sigtramp_frame_unwind =
1931
{
1932
  SIGTRAMP_FRAME,
1933
  amd64_sigtramp_frame_this_id,
1934
  amd64_sigtramp_frame_prev_register,
1935
  NULL,
1936
  amd64_sigtramp_frame_sniffer
1937
};
1938
 
1939
 
1940
static CORE_ADDR
1941
amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
1942
{
1943
  struct amd64_frame_cache *cache =
1944
    amd64_frame_cache (this_frame, this_cache);
1945
 
1946
  return cache->base;
1947
}
1948
 
1949
static const struct frame_base amd64_frame_base =
1950
{
1951
  &amd64_frame_unwind,
1952
  amd64_frame_base_address,
1953
  amd64_frame_base_address,
1954
  amd64_frame_base_address
1955
};
1956
 
1957
/* Normal frames, but in a function epilogue.  */
1958
 
1959
/* The epilogue is defined here as the 'ret' instruction, which will
1960
   follow any instruction such as 'leave' or 'pop %ebp' that destroys
1961
   the function's stack frame.  */
1962
 
1963
static int
1964
amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
1965
{
1966
  gdb_byte insn;
1967
 
1968
  if (target_read_memory (pc, &insn, 1))
1969
    return 0;   /* Can't read memory at pc.  */
1970
 
1971
  if (insn != 0xc3)     /* 'ret' instruction.  */
1972
    return 0;
1973
 
1974
  return 1;
1975
}
1976
 
1977
static int
1978
amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
1979
                              struct frame_info *this_frame,
1980
                              void **this_prologue_cache)
1981
{
1982
  if (frame_relative_level (this_frame) == 0)
1983
    return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
1984
                                         get_frame_pc (this_frame));
1985
  else
1986
    return 0;
1987
}
1988
 
1989
static struct amd64_frame_cache *
1990
amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
1991
{
1992
  struct gdbarch *gdbarch = get_frame_arch (this_frame);
1993
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1994
  struct amd64_frame_cache *cache;
1995
  gdb_byte buf[8];
1996
 
1997
  if (*this_cache)
1998
    return *this_cache;
1999
 
2000
  cache = amd64_alloc_frame_cache ();
2001
  *this_cache = cache;
2002
 
2003
  /* Cache base will be %esp plus cache->sp_offset (-8).  */
2004
  get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2005
  cache->base = extract_unsigned_integer (buf, 8,
2006
                                          byte_order) + cache->sp_offset;
2007
 
2008
  /* Cache pc will be the frame func.  */
2009
  cache->pc = get_frame_pc (this_frame);
2010
 
2011
  /* The saved %esp will be at cache->base plus 16.  */
2012
  cache->saved_sp = cache->base + 16;
2013
 
2014
  /* The saved %eip will be at cache->base plus 8.  */
2015
  cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2016
 
2017
  return cache;
2018
}
2019
 
2020
static void
2021
amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2022
                              void **this_cache,
2023
                              struct frame_id *this_id)
2024
{
2025
  struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2026
                                                               this_cache);
2027
 
2028
  (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2029
}
2030
 
2031
static const struct frame_unwind amd64_epilogue_frame_unwind =
2032
{
2033
  NORMAL_FRAME,
2034
  amd64_epilogue_frame_this_id,
2035
  amd64_frame_prev_register,
2036
  NULL,
2037
  amd64_epilogue_frame_sniffer
2038
};
2039
 
2040
static struct frame_id
2041
amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2042
{
2043
  CORE_ADDR fp;
2044
 
2045
  fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2046
 
2047
  return frame_id_build (fp + 16, get_frame_pc (this_frame));
2048
}
2049
 
2050
/* 16 byte align the SP per frame requirements.  */
2051
 
2052
static CORE_ADDR
2053
amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2054
{
2055
  return sp & -(CORE_ADDR)16;
2056
}
2057
 
2058
 
2059
/* Supply register REGNUM from the buffer specified by FPREGS and LEN
2060
   in the floating-point register set REGSET to register cache
2061
   REGCACHE.  If REGNUM is -1, do this for all registers in REGSET.  */
2062
 
2063
static void
2064
amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2065
                       int regnum, const void *fpregs, size_t len)
2066
{
2067
  const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2068
 
2069
  gdb_assert (len == tdep->sizeof_fpregset);
2070
  amd64_supply_fxsave (regcache, regnum, fpregs);
2071
}
2072
 
2073
/* Collect register REGNUM from the register cache REGCACHE and store
2074
   it in the buffer specified by FPREGS and LEN as described by the
2075
   floating-point register set REGSET.  If REGNUM is -1, do this for
2076
   all registers in REGSET.  */
2077
 
2078
static void
2079
amd64_collect_fpregset (const struct regset *regset,
2080
                        const struct regcache *regcache,
2081
                        int regnum, void *fpregs, size_t len)
2082
{
2083
  const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2084
 
2085
  gdb_assert (len == tdep->sizeof_fpregset);
2086
  amd64_collect_fxsave (regcache, regnum, fpregs);
2087
}
2088
 
2089
/* Return the appropriate register set for the core section identified
2090
   by SECT_NAME and SECT_SIZE.  */
2091
 
2092
static const struct regset *
2093
amd64_regset_from_core_section (struct gdbarch *gdbarch,
2094
                                const char *sect_name, size_t sect_size)
2095
{
2096
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2097
 
2098
  if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2099
    {
2100
      if (tdep->fpregset == NULL)
2101
        tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
2102
                                       amd64_collect_fpregset);
2103
 
2104
      return tdep->fpregset;
2105
    }
2106
 
2107
  return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2108
}
2109
 
2110
 
2111
/* Figure out where the longjmp will land.  Slurp the jmp_buf out of
2112
   %rdi.  We expect its value to be a pointer to the jmp_buf structure
2113
   from which we extract the address that we will land at.  This
2114
   address is copied into PC.  This routine returns non-zero on
2115
   success.  */
2116
 
2117
static int
2118
amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2119
{
2120
  gdb_byte buf[8];
2121
  CORE_ADDR jb_addr;
2122
  struct gdbarch *gdbarch = get_frame_arch (frame);
2123
  int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2124
  int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2125
 
2126
  /* If JB_PC_OFFSET is -1, we have no way to find out where the
2127
     longjmp will land.  */
2128
  if (jb_pc_offset == -1)
2129
    return 0;
2130
 
2131
  get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2132
  jb_addr= extract_typed_address
2133
            (buf, builtin_type (gdbarch)->builtin_data_ptr);
2134
  if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2135
    return 0;
2136
 
2137
  *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2138
 
2139
  return 1;
2140
}
2141
 
2142
static const int amd64_record_regmap[] =
2143
{
2144
  AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2145
  AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2146
  AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2147
  AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2148
  AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2149
  AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2150
};
2151
 
2152
void
2153
amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2154
{
2155
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2156
 
2157
  /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2158
     floating-point registers.  */
2159
  tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2160
 
2161
  /* AMD64 has an FPU and 16 SSE registers.  */
2162
  tdep->st0_regnum = AMD64_ST0_REGNUM;
2163
  tdep->num_xmm_regs = 16;
2164
 
2165
  /* This is what all the fuss is about.  */
2166
  set_gdbarch_long_bit (gdbarch, 64);
2167
  set_gdbarch_long_long_bit (gdbarch, 64);
2168
  set_gdbarch_ptr_bit (gdbarch, 64);
2169
 
2170
  /* In contrast to the i386, on AMD64 a `long double' actually takes
2171
     up 128 bits, even though it's still based on the i387 extended
2172
     floating-point format which has only 80 significant bits.  */
2173
  set_gdbarch_long_double_bit (gdbarch, 128);
2174
 
2175
  set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2176
  set_gdbarch_register_name (gdbarch, amd64_register_name);
2177
  set_gdbarch_register_type (gdbarch, amd64_register_type);
2178
 
2179
  /* Register numbers of various important registers.  */
2180
  set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2181
  set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2182
  set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2183
  set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
2184
 
2185
  /* The "default" register numbering scheme for AMD64 is referred to
2186
     as the "DWARF Register Number Mapping" in the System V psABI.
2187
     The preferred debugging format for all known AMD64 targets is
2188
     actually DWARF2, and GCC doesn't seem to support DWARF (that is
2189
     DWARF-1), but we provide the same mapping just in case.  This
2190
     mapping is also used for stabs, which GCC does support.  */
2191
  set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2192
  set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2193
 
2194
  /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2195
     be in use on any of the supported AMD64 targets.  */
2196
 
2197
  /* Call dummy code.  */
2198
  set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2199
  set_gdbarch_frame_align (gdbarch, amd64_frame_align);
2200
  set_gdbarch_frame_red_zone_size (gdbarch, 128);
2201
  tdep->call_dummy_num_integer_regs =
2202
    ARRAY_SIZE (amd64_dummy_call_integer_regs);
2203
  tdep->call_dummy_integer_regs = amd64_dummy_call_integer_regs;
2204
  tdep->classify = amd64_classify;
2205
 
2206
  set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
2207
  set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2208
  set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2209
 
2210
  set_gdbarch_return_value (gdbarch, amd64_return_value);
2211
 
2212
  set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
2213
 
2214
  /* Avoid wiring in the MMX registers for now.  */
2215
  set_gdbarch_num_pseudo_regs (gdbarch, 0);
2216
  tdep->mm0_regnum = -1;
2217
 
2218
  tdep->record_regmap = amd64_record_regmap;
2219
 
2220
  set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
2221
 
2222
  /* Hook the function epilogue frame unwinder.  This unwinder is
2223
     appended to the list first, so that it supercedes the other
2224
     unwinders in function epilogues.  */
2225
  frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
2226
 
2227
  /* Hook the prologue-based frame unwinders.  */
2228
  frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2229
  frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
2230
  frame_base_set_default (gdbarch, &amd64_frame_base);
2231
 
2232
  /* If we have a register mapping, enable the generic core file support.  */
2233
  if (tdep->gregset_reg_offset)
2234
    set_gdbarch_regset_from_core_section (gdbarch,
2235
                                          amd64_regset_from_core_section);
2236
 
2237
  set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
2238
}
2239
 
2240
 
2241
/* The 64-bit FXSAVE format differs from the 32-bit format in the
2242
   sense that the instruction pointer and data pointer are simply
2243
   64-bit offsets into the code segment and the data segment instead
2244
   of a selector offset pair.  The functions below store the upper 32
2245
   bits of these pointers (instead of just the 16-bits of the segment
2246
   selector).  */
2247
 
2248
/* Fill register REGNUM in REGCACHE with the appropriate
2249
   floating-point or SSE register value from *FXSAVE.  If REGNUM is
2250
   -1, do this for all registers.  This function masks off any of the
2251
   reserved bits in *FXSAVE.  */
2252
 
2253
void
2254
amd64_supply_fxsave (struct regcache *regcache, int regnum,
2255
                     const void *fxsave)
2256
{
2257
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
2258
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2259
 
2260
  i387_supply_fxsave (regcache, regnum, fxsave);
2261
 
2262
  if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
2263
    {
2264
      const gdb_byte *regs = fxsave;
2265
 
2266
      if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2267
        regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2268
      if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2269
        regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2270
    }
2271
}
2272
 
2273
/* Fill register REGNUM (if it is a floating-point or SSE register) in
2274
   *FXSAVE with the value from REGCACHE.  If REGNUM is -1, do this for
2275
   all registers.  This function doesn't touch any of the reserved
2276
   bits in *FXSAVE.  */
2277
 
2278
void
2279
amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2280
                      void *fxsave)
2281
{
2282
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
2283
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2284
  gdb_byte *regs = fxsave;
2285
 
2286
  i387_collect_fxsave (regcache, regnum, fxsave);
2287
 
2288
  if (gdbarch_ptr_bit (gdbarch) == 64)
2289
    {
2290
      if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2291
        regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2292
      if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2293
        regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2294
    }
2295
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.