OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [tags/] [gnu-src/] [gdb-7.2/] [gdb-7.2-or32-1.0rc3/] [gdb/] [amd64-tdep.c] - Blame information for rev 513

Details | Compare with Previous | View Log

Line No. Rev Author Line
1 330 jeremybenn
/* Target-dependent code for AMD64.
2
 
3
   Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4
   Free Software Foundation, Inc.
5
 
6
   Contributed by Jiri Smid, SuSE Labs.
7
 
8
   This file is part of GDB.
9
 
10
   This program is free software; you can redistribute it and/or modify
11
   it under the terms of the GNU General Public License as published by
12
   the Free Software Foundation; either version 3 of the License, or
13
   (at your option) any later version.
14
 
15
   This program is distributed in the hope that it will be useful,
16
   but WITHOUT ANY WARRANTY; without even the implied warranty of
17
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18
   GNU General Public License for more details.
19
 
20
   You should have received a copy of the GNU General Public License
21
   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
22
 
23
#include "defs.h"
24
#include "opcode/i386.h"
25
#include "dis-asm.h"
26
#include "arch-utils.h"
27
#include "block.h"
28
#include "dummy-frame.h"
29
#include "frame.h"
30
#include "frame-base.h"
31
#include "frame-unwind.h"
32
#include "inferior.h"
33
#include "gdbcmd.h"
34
#include "gdbcore.h"
35
#include "objfiles.h"
36
#include "regcache.h"
37
#include "regset.h"
38
#include "symfile.h"
39
#include "disasm.h"
40
#include "gdb_assert.h"
41
 
42
#include "amd64-tdep.h"
43
#include "i387-tdep.h"
44
 
45
#include "features/i386/amd64.c"
46
#include "features/i386/amd64-avx.c"
47
 
48
/* Note that the AMD64 architecture was previously known as x86-64.
49
   The latter is (forever) engraved into the canonical system name as
50
   returned by config.guess, and used as the name for the AMD64 port
51
   of GNU/Linux.  The BSD's have renamed their ports to amd64; they
52
   don't like to shout.  For GDB we prefer the amd64_-prefix over the
53
   x86_64_-prefix since it's so much easier to type.  */
54
 
55
/* Register information.  */
56
 
57
static const char *amd64_register_names[] =
58
{
59
  "rax", "rbx", "rcx", "rdx", "rsi", "rdi", "rbp", "rsp",
60
 
61
  /* %r8 is indeed register number 8.  */
62
  "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
63
  "rip", "eflags", "cs", "ss", "ds", "es", "fs", "gs",
64
 
65
  /* %st0 is register number 24.  */
66
  "st0", "st1", "st2", "st3", "st4", "st5", "st6", "st7",
67
  "fctrl", "fstat", "ftag", "fiseg", "fioff", "foseg", "fooff", "fop",
68
 
69
  /* %xmm0 is register number 40.  */
70
  "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
71
  "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
72
  "mxcsr",
73
};
74
 
75
static const char *amd64_ymm_names[] =
76
{
77
  "ymm0", "ymm1", "ymm2", "ymm3",
78
  "ymm4", "ymm5", "ymm6", "ymm7",
79
  "ymm8", "ymm9", "ymm10", "ymm11",
80
  "ymm12", "ymm13", "ymm14", "ymm15"
81
};
82
 
83
static const char *amd64_ymmh_names[] =
84
{
85
  "ymm0h", "ymm1h", "ymm2h", "ymm3h",
86
  "ymm4h", "ymm5h", "ymm6h", "ymm7h",
87
  "ymm8h", "ymm9h", "ymm10h", "ymm11h",
88
  "ymm12h", "ymm13h", "ymm14h", "ymm15h"
89
};
90
 
91
/* The registers used to pass integer arguments during a function call.  */
92
static int amd64_dummy_call_integer_regs[] =
93
{
94
  AMD64_RDI_REGNUM,             /* %rdi */
95
  AMD64_RSI_REGNUM,             /* %rsi */
96
  AMD64_RDX_REGNUM,             /* %rdx */
97
  AMD64_RCX_REGNUM,             /* %rcx */
98
  8,                            /* %r8 */
99
  9                             /* %r9 */
100
};
101
 
102
/* DWARF Register Number Mapping as defined in the System V psABI,
103
   section 3.6.  */
104
 
105
static int amd64_dwarf_regmap[] =
106
{
107
  /* General Purpose Registers RAX, RDX, RCX, RBX, RSI, RDI.  */
108
  AMD64_RAX_REGNUM, AMD64_RDX_REGNUM,
109
  AMD64_RCX_REGNUM, AMD64_RBX_REGNUM,
110
  AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
111
 
112
  /* Frame Pointer Register RBP.  */
113
  AMD64_RBP_REGNUM,
114
 
115
  /* Stack Pointer Register RSP.  */
116
  AMD64_RSP_REGNUM,
117
 
118
  /* Extended Integer Registers 8 - 15.  */
119
  8, 9, 10, 11, 12, 13, 14, 15,
120
 
121
  /* Return Address RA.  Mapped to RIP.  */
122
  AMD64_RIP_REGNUM,
123
 
124
  /* SSE Registers 0 - 7.  */
125
  AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
126
  AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
127
  AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
128
  AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
129
 
130
  /* Extended SSE Registers 8 - 15.  */
131
  AMD64_XMM0_REGNUM + 8, AMD64_XMM0_REGNUM + 9,
132
  AMD64_XMM0_REGNUM + 10, AMD64_XMM0_REGNUM + 11,
133
  AMD64_XMM0_REGNUM + 12, AMD64_XMM0_REGNUM + 13,
134
  AMD64_XMM0_REGNUM + 14, AMD64_XMM0_REGNUM + 15,
135
 
136
  /* Floating Point Registers 0-7.  */
137
  AMD64_ST0_REGNUM + 0, AMD64_ST0_REGNUM + 1,
138
  AMD64_ST0_REGNUM + 2, AMD64_ST0_REGNUM + 3,
139
  AMD64_ST0_REGNUM + 4, AMD64_ST0_REGNUM + 5,
140
  AMD64_ST0_REGNUM + 6, AMD64_ST0_REGNUM + 7,
141
 
142
  /* Control and Status Flags Register.  */
143
  AMD64_EFLAGS_REGNUM,
144
 
145
  /* Selector Registers.  */
146
  AMD64_ES_REGNUM,
147
  AMD64_CS_REGNUM,
148
  AMD64_SS_REGNUM,
149
  AMD64_DS_REGNUM,
150
  AMD64_FS_REGNUM,
151
  AMD64_GS_REGNUM,
152
  -1,
153
  -1,
154
 
155
  /* Segment Base Address Registers.  */
156
  -1,
157
  -1,
158
  -1,
159
  -1,
160
 
161
  /* Special Selector Registers.  */
162
  -1,
163
  -1,
164
 
165
  /* Floating Point Control Registers.  */
166
  AMD64_MXCSR_REGNUM,
167
  AMD64_FCTRL_REGNUM,
168
  AMD64_FSTAT_REGNUM
169
};
170
 
171
static const int amd64_dwarf_regmap_len =
172
  (sizeof (amd64_dwarf_regmap) / sizeof (amd64_dwarf_regmap[0]));
173
 
174
/* Convert DWARF register number REG to the appropriate register
175
   number used by GDB.  */
176
 
177
static int
178
amd64_dwarf_reg_to_regnum (struct gdbarch *gdbarch, int reg)
179
{
180
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
181
  int ymm0_regnum = tdep->ymm0_regnum;
182
  int regnum = -1;
183
 
184
  if (reg >= 0 && reg < amd64_dwarf_regmap_len)
185
    regnum = amd64_dwarf_regmap[reg];
186
 
187
  if (regnum == -1)
188
    warning (_("Unmapped DWARF Register #%d encountered."), reg);
189
  else if (ymm0_regnum >= 0
190
           && i386_xmm_regnum_p (gdbarch, regnum))
191
    regnum += ymm0_regnum - I387_XMM0_REGNUM (tdep);
192
 
193
  return regnum;
194
}
195
 
196
/* Map architectural register numbers to gdb register numbers.  */
197
 
198
static const int amd64_arch_regmap[16] =
199
{
200
  AMD64_RAX_REGNUM,     /* %rax */
201
  AMD64_RCX_REGNUM,     /* %rcx */
202
  AMD64_RDX_REGNUM,     /* %rdx */
203
  AMD64_RBX_REGNUM,     /* %rbx */
204
  AMD64_RSP_REGNUM,     /* %rsp */
205
  AMD64_RBP_REGNUM,     /* %rbp */
206
  AMD64_RSI_REGNUM,     /* %rsi */
207
  AMD64_RDI_REGNUM,     /* %rdi */
208
  AMD64_R8_REGNUM,      /* %r8 */
209
  AMD64_R9_REGNUM,      /* %r9 */
210
  AMD64_R10_REGNUM,     /* %r10 */
211
  AMD64_R11_REGNUM,     /* %r11 */
212
  AMD64_R12_REGNUM,     /* %r12 */
213
  AMD64_R13_REGNUM,     /* %r13 */
214
  AMD64_R14_REGNUM,     /* %r14 */
215
  AMD64_R15_REGNUM      /* %r15 */
216
};
217
 
218
static const int amd64_arch_regmap_len =
219
  (sizeof (amd64_arch_regmap) / sizeof (amd64_arch_regmap[0]));
220
 
221
/* Convert architectural register number REG to the appropriate register
222
   number used by GDB.  */
223
 
224
static int
225
amd64_arch_reg_to_regnum (int reg)
226
{
227
  gdb_assert (reg >= 0 && reg < amd64_arch_regmap_len);
228
 
229
  return amd64_arch_regmap[reg];
230
}
231
 
232
/* Register names for byte pseudo-registers.  */
233
 
234
static const char *amd64_byte_names[] =
235
{
236
  "al", "bl", "cl", "dl", "sil", "dil", "bpl", "spl",
237
  "r8l", "r9l", "r10l", "r11l", "r12l", "r13l", "r14l", "r15l",
238
  "ah", "bh", "ch", "dh"
239
};
240
 
241
/* Number of lower byte registers.  */
242
#define AMD64_NUM_LOWER_BYTE_REGS 16
243
 
244
/* Register names for word pseudo-registers.  */
245
 
246
static const char *amd64_word_names[] =
247
{
248
  "ax", "bx", "cx", "dx", "si", "di", "bp", "",
249
  "r8w", "r9w", "r10w", "r11w", "r12w", "r13w", "r14w", "r15w"
250
};
251
 
252
/* Register names for dword pseudo-registers.  */
253
 
254
static const char *amd64_dword_names[] =
255
{
256
  "eax", "ebx", "ecx", "edx", "esi", "edi", "ebp", "esp",
257
  "r8d", "r9d", "r10d", "r11d", "r12d", "r13d", "r14d", "r15d"
258
};
259
 
260
/* Return the name of register REGNUM, or the empty string if it is
261
   an anonymous register. */
262
 
263
static const char *
264
amd64_register_name (struct gdbarch *gdbarch, int regnum)
265
{
266
  /* Hide the upper YMM registers.  */
267
  if (i386_ymmh_regnum_p (gdbarch, regnum))
268
    return "";
269
 
270
  return tdesc_register_name (gdbarch, regnum);
271
}
272
 
273
/* Return the name of register REGNUM.  */
274
 
275
static const char *
276
amd64_pseudo_register_name (struct gdbarch *gdbarch, int regnum)
277
{
278
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
279
  if (i386_byte_regnum_p (gdbarch, regnum))
280
    return amd64_byte_names[regnum - tdep->al_regnum];
281
  else if (i386_ymm_regnum_p (gdbarch, regnum))
282
    return amd64_ymm_names[regnum - tdep->ymm0_regnum];
283
  else if (i386_word_regnum_p (gdbarch, regnum))
284
    return amd64_word_names[regnum - tdep->ax_regnum];
285
  else if (i386_dword_regnum_p (gdbarch, regnum))
286
    return amd64_dword_names[regnum - tdep->eax_regnum];
287
  else
288
    return i386_pseudo_register_name (gdbarch, regnum);
289
}
290
 
291
static void
292
amd64_pseudo_register_read (struct gdbarch *gdbarch,
293
                            struct regcache *regcache,
294
                            int regnum, gdb_byte *buf)
295
{
296
  gdb_byte raw_buf[MAX_REGISTER_SIZE];
297
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
298
 
299
  if (i386_byte_regnum_p (gdbarch, regnum))
300
    {
301
      int gpnum = regnum - tdep->al_regnum;
302
 
303
      /* Extract (always little endian).  */
304
      if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
305
        {
306
          /* Special handling for AH, BH, CH, DH.  */
307
          regcache_raw_read (regcache,
308
                             gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
309
          memcpy (buf, raw_buf + 1, 1);
310
        }
311
      else
312
        {
313
          regcache_raw_read (regcache, gpnum, raw_buf);
314
          memcpy (buf, raw_buf, 1);
315
        }
316
    }
317
  else if (i386_dword_regnum_p (gdbarch, regnum))
318
    {
319
      int gpnum = regnum - tdep->eax_regnum;
320
      /* Extract (always little endian).  */
321
      regcache_raw_read (regcache, gpnum, raw_buf);
322
      memcpy (buf, raw_buf, 4);
323
    }
324
  else
325
    i386_pseudo_register_read (gdbarch, regcache, regnum, buf);
326
}
327
 
328
static void
329
amd64_pseudo_register_write (struct gdbarch *gdbarch,
330
                             struct regcache *regcache,
331
                             int regnum, const gdb_byte *buf)
332
{
333
  gdb_byte raw_buf[MAX_REGISTER_SIZE];
334
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
335
 
336
  if (i386_byte_regnum_p (gdbarch, regnum))
337
    {
338
      int gpnum = regnum - tdep->al_regnum;
339
 
340
      if (gpnum >= AMD64_NUM_LOWER_BYTE_REGS)
341
        {
342
          /* Read ... AH, BH, CH, DH.  */
343
          regcache_raw_read (regcache,
344
                             gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
345
          /* ... Modify ... (always little endian).  */
346
          memcpy (raw_buf + 1, buf, 1);
347
          /* ... Write.  */
348
          regcache_raw_write (regcache,
349
                              gpnum - AMD64_NUM_LOWER_BYTE_REGS, raw_buf);
350
        }
351
      else
352
        {
353
          /* Read ...  */
354
          regcache_raw_read (regcache, gpnum, raw_buf);
355
          /* ... Modify ... (always little endian).  */
356
          memcpy (raw_buf, buf, 1);
357
          /* ... Write.  */
358
          regcache_raw_write (regcache, gpnum, raw_buf);
359
        }
360
    }
361
  else if (i386_dword_regnum_p (gdbarch, regnum))
362
    {
363
      int gpnum = regnum - tdep->eax_regnum;
364
 
365
      /* Read ...  */
366
      regcache_raw_read (regcache, gpnum, raw_buf);
367
      /* ... Modify ... (always little endian).  */
368
      memcpy (raw_buf, buf, 4);
369
      /* ... Write.  */
370
      regcache_raw_write (regcache, gpnum, raw_buf);
371
    }
372
  else
373
    i386_pseudo_register_write (gdbarch, regcache, regnum, buf);
374
}
375
 
376
 
377
 
378
/* Return the union class of CLASS1 and CLASS2.  See the psABI for
379
   details.  */
380
 
381
static enum amd64_reg_class
382
amd64_merge_classes (enum amd64_reg_class class1, enum amd64_reg_class class2)
383
{
384
  /* Rule (a): If both classes are equal, this is the resulting class.  */
385
  if (class1 == class2)
386
    return class1;
387
 
388
  /* Rule (b): If one of the classes is NO_CLASS, the resulting class
389
     is the other class.  */
390
  if (class1 == AMD64_NO_CLASS)
391
    return class2;
392
  if (class2 == AMD64_NO_CLASS)
393
    return class1;
394
 
395
  /* Rule (c): If one of the classes is MEMORY, the result is MEMORY.  */
396
  if (class1 == AMD64_MEMORY || class2 == AMD64_MEMORY)
397
    return AMD64_MEMORY;
398
 
399
  /* Rule (d): If one of the classes is INTEGER, the result is INTEGER.  */
400
  if (class1 == AMD64_INTEGER || class2 == AMD64_INTEGER)
401
    return AMD64_INTEGER;
402
 
403
  /* Rule (e): If one of the classes is X87, X87UP, COMPLEX_X87 class,
404
     MEMORY is used as class.  */
405
  if (class1 == AMD64_X87 || class1 == AMD64_X87UP
406
      || class1 == AMD64_COMPLEX_X87 || class2 == AMD64_X87
407
      || class2 == AMD64_X87UP || class2 == AMD64_COMPLEX_X87)
408
    return AMD64_MEMORY;
409
 
410
  /* Rule (f): Otherwise class SSE is used.  */
411
  return AMD64_SSE;
412
}
413
 
414
/* Return non-zero if TYPE is a non-POD structure or union type.  */
415
 
416
static int
417
amd64_non_pod_p (struct type *type)
418
{
419
  /* ??? A class with a base class certainly isn't POD, but does this
420
     catch all non-POD structure types?  */
421
  if (TYPE_CODE (type) == TYPE_CODE_STRUCT && TYPE_N_BASECLASSES (type) > 0)
422
    return 1;
423
 
424
  return 0;
425
}
426
 
427
/* Classify TYPE according to the rules for aggregate (structures and
428
   arrays) and union types, and store the result in CLASS.  */
429
 
430
static void
431
amd64_classify_aggregate (struct type *type, enum amd64_reg_class class[2])
432
{
433
  int len = TYPE_LENGTH (type);
434
 
435
  /* 1. If the size of an object is larger than two eightbytes, or in
436
        C++, is a non-POD structure or union type, or contains
437
        unaligned fields, it has class memory.  */
438
  if (len > 16 || amd64_non_pod_p (type))
439
    {
440
      class[0] = class[1] = AMD64_MEMORY;
441
      return;
442
    }
443
 
444
  /* 2. Both eightbytes get initialized to class NO_CLASS.  */
445
  class[0] = class[1] = AMD64_NO_CLASS;
446
 
447
  /* 3. Each field of an object is classified recursively so that
448
        always two fields are considered. The resulting class is
449
        calculated according to the classes of the fields in the
450
        eightbyte: */
451
 
452
  if (TYPE_CODE (type) == TYPE_CODE_ARRAY)
453
    {
454
      struct type *subtype = check_typedef (TYPE_TARGET_TYPE (type));
455
 
456
      /* All fields in an array have the same type.  */
457
      amd64_classify (subtype, class);
458
      if (len > 8 && class[1] == AMD64_NO_CLASS)
459
        class[1] = class[0];
460
    }
461
  else
462
    {
463
      int i;
464
 
465
      /* Structure or union.  */
466
      gdb_assert (TYPE_CODE (type) == TYPE_CODE_STRUCT
467
                  || TYPE_CODE (type) == TYPE_CODE_UNION);
468
 
469
      for (i = 0; i < TYPE_NFIELDS (type); i++)
470
        {
471
          struct type *subtype = check_typedef (TYPE_FIELD_TYPE (type, i));
472
          int pos = TYPE_FIELD_BITPOS (type, i) / 64;
473
          enum amd64_reg_class subclass[2];
474
          int bitsize = TYPE_FIELD_BITSIZE (type, i);
475
          int endpos;
476
 
477
          if (bitsize == 0)
478
            bitsize = TYPE_LENGTH (subtype) * 8;
479
          endpos = (TYPE_FIELD_BITPOS (type, i) + bitsize - 1) / 64;
480
 
481
          /* Ignore static fields.  */
482
          if (field_is_static (&TYPE_FIELD (type, i)))
483
            continue;
484
 
485
          gdb_assert (pos == 0 || pos == 1);
486
 
487
          amd64_classify (subtype, subclass);
488
          class[pos] = amd64_merge_classes (class[pos], subclass[0]);
489
          if (bitsize <= 64 && pos == 0 && endpos == 1)
490
            /* This is a bit of an odd case:  We have a field that would
491
               normally fit in one of the two eightbytes, except that
492
               it is placed in a way that this field straddles them.
493
               This has been seen with a structure containing an array.
494
 
495
               The ABI is a bit unclear in this case, but we assume that
496
               this field's class (stored in subclass[0]) must also be merged
497
               into class[1].  In other words, our field has a piece stored
498
               in the second eight-byte, and thus its class applies to
499
               the second eight-byte as well.
500
 
501
               In the case where the field length exceeds 8 bytes,
502
               it should not be necessary to merge the field class
503
               into class[1].  As LEN > 8, subclass[1] is necessarily
504
               different from AMD64_NO_CLASS.  If subclass[1] is equal
505
               to subclass[0], then the normal class[1]/subclass[1]
506
               merging will take care of everything.  For subclass[1]
507
               to be different from subclass[0], I can only see the case
508
               where we have a SSE/SSEUP or X87/X87UP pair, which both
509
               use up all 16 bytes of the aggregate, and are already
510
               handled just fine (because each portion sits on its own
511
               8-byte).  */
512
            class[1] = amd64_merge_classes (class[1], subclass[0]);
513
          if (pos == 0)
514
            class[1] = amd64_merge_classes (class[1], subclass[1]);
515
        }
516
    }
517
 
518
  /* 4. Then a post merger cleanup is done:  */
519
 
520
  /* Rule (a): If one of the classes is MEMORY, the whole argument is
521
     passed in memory.  */
522
  if (class[0] == AMD64_MEMORY || class[1] == AMD64_MEMORY)
523
    class[0] = class[1] = AMD64_MEMORY;
524
 
525
  /* Rule (b): If SSEUP is not preceeded by SSE, it is converted to
526
     SSE.  */
527
  if (class[0] == AMD64_SSEUP)
528
    class[0] = AMD64_SSE;
529
  if (class[1] == AMD64_SSEUP && class[0] != AMD64_SSE)
530
    class[1] = AMD64_SSE;
531
}
532
 
533
/* Classify TYPE, and store the result in CLASS.  */
534
 
535
void
536
amd64_classify (struct type *type, enum amd64_reg_class class[2])
537
{
538
  enum type_code code = TYPE_CODE (type);
539
  int len = TYPE_LENGTH (type);
540
 
541
  class[0] = class[1] = AMD64_NO_CLASS;
542
 
543
  /* Arguments of types (signed and unsigned) _Bool, char, short, int,
544
     long, long long, and pointers are in the INTEGER class.  Similarly,
545
     range types, used by languages such as Ada, are also in the INTEGER
546
     class.  */
547
  if ((code == TYPE_CODE_INT || code == TYPE_CODE_ENUM
548
       || code == TYPE_CODE_BOOL || code == TYPE_CODE_RANGE
549
       || code == TYPE_CODE_CHAR
550
       || code == TYPE_CODE_PTR || code == TYPE_CODE_REF)
551
      && (len == 1 || len == 2 || len == 4 || len == 8))
552
    class[0] = AMD64_INTEGER;
553
 
554
  /* Arguments of types float, double, _Decimal32, _Decimal64 and __m64
555
     are in class SSE.  */
556
  else if ((code == TYPE_CODE_FLT || code == TYPE_CODE_DECFLOAT)
557
           && (len == 4 || len == 8))
558
    /* FIXME: __m64 .  */
559
    class[0] = AMD64_SSE;
560
 
561
  /* Arguments of types __float128, _Decimal128 and __m128 are split into
562
     two halves.  The least significant ones belong to class SSE, the most
563
     significant one to class SSEUP.  */
564
  else if (code == TYPE_CODE_DECFLOAT && len == 16)
565
    /* FIXME: __float128, __m128.  */
566
    class[0] = AMD64_SSE, class[1] = AMD64_SSEUP;
567
 
568
  /* The 64-bit mantissa of arguments of type long double belongs to
569
     class X87, the 16-bit exponent plus 6 bytes of padding belongs to
570
     class X87UP.  */
571
  else if (code == TYPE_CODE_FLT && len == 16)
572
    /* Class X87 and X87UP.  */
573
    class[0] = AMD64_X87, class[1] = AMD64_X87UP;
574
 
575
  /* Aggregates.  */
576
  else if (code == TYPE_CODE_ARRAY || code == TYPE_CODE_STRUCT
577
           || code == TYPE_CODE_UNION)
578
    amd64_classify_aggregate (type, class);
579
}
580
 
581
static enum return_value_convention
582
amd64_return_value (struct gdbarch *gdbarch, struct type *func_type,
583
                    struct type *type, struct regcache *regcache,
584
                    gdb_byte *readbuf, const gdb_byte *writebuf)
585
{
586
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
587
  enum amd64_reg_class class[2];
588
  int len = TYPE_LENGTH (type);
589
  static int integer_regnum[] = { AMD64_RAX_REGNUM, AMD64_RDX_REGNUM };
590
  static int sse_regnum[] = { AMD64_XMM0_REGNUM, AMD64_XMM1_REGNUM };
591
  int integer_reg = 0;
592
  int sse_reg = 0;
593
  int i;
594
 
595
  gdb_assert (!(readbuf && writebuf));
596
  gdb_assert (tdep->classify);
597
 
598
  /* 1. Classify the return type with the classification algorithm.  */
599
  tdep->classify (type, class);
600
 
601
  /* 2. If the type has class MEMORY, then the caller provides space
602
     for the return value and passes the address of this storage in
603
     %rdi as if it were the first argument to the function. In effect,
604
     this address becomes a hidden first argument.
605
 
606
     On return %rax will contain the address that has been passed in
607
     by the caller in %rdi.  */
608
  if (class[0] == AMD64_MEMORY)
609
    {
610
      /* As indicated by the comment above, the ABI guarantees that we
611
         can always find the return value just after the function has
612
         returned.  */
613
 
614
      if (readbuf)
615
        {
616
          ULONGEST addr;
617
 
618
          regcache_raw_read_unsigned (regcache, AMD64_RAX_REGNUM, &addr);
619
          read_memory (addr, readbuf, TYPE_LENGTH (type));
620
        }
621
 
622
      return RETURN_VALUE_ABI_RETURNS_ADDRESS;
623
    }
624
 
625
  gdb_assert (class[1] != AMD64_MEMORY);
626
  gdb_assert (len <= 16);
627
 
628
  for (i = 0; len > 0; i++, len -= 8)
629
    {
630
      int regnum = -1;
631
      int offset = 0;
632
 
633
      switch (class[i])
634
        {
635
        case AMD64_INTEGER:
636
          /* 3. If the class is INTEGER, the next available register
637
             of the sequence %rax, %rdx is used.  */
638
          regnum = integer_regnum[integer_reg++];
639
          break;
640
 
641
        case AMD64_SSE:
642
          /* 4. If the class is SSE, the next available SSE register
643
             of the sequence %xmm0, %xmm1 is used.  */
644
          regnum = sse_regnum[sse_reg++];
645
          break;
646
 
647
        case AMD64_SSEUP:
648
          /* 5. If the class is SSEUP, the eightbyte is passed in the
649
             upper half of the last used SSE register.  */
650
          gdb_assert (sse_reg > 0);
651
          regnum = sse_regnum[sse_reg - 1];
652
          offset = 8;
653
          break;
654
 
655
        case AMD64_X87:
656
          /* 6. If the class is X87, the value is returned on the X87
657
             stack in %st0 as 80-bit x87 number.  */
658
          regnum = AMD64_ST0_REGNUM;
659
          if (writebuf)
660
            i387_return_value (gdbarch, regcache);
661
          break;
662
 
663
        case AMD64_X87UP:
664
          /* 7. If the class is X87UP, the value is returned together
665
             with the previous X87 value in %st0.  */
666
          gdb_assert (i > 0 && class[0] == AMD64_X87);
667
          regnum = AMD64_ST0_REGNUM;
668
          offset = 8;
669
          len = 2;
670
          break;
671
 
672
        case AMD64_NO_CLASS:
673
          continue;
674
 
675
        default:
676
          gdb_assert (!"Unexpected register class.");
677
        }
678
 
679
      gdb_assert (regnum != -1);
680
 
681
      if (readbuf)
682
        regcache_raw_read_part (regcache, regnum, offset, min (len, 8),
683
                                readbuf + i * 8);
684
      if (writebuf)
685
        regcache_raw_write_part (regcache, regnum, offset, min (len, 8),
686
                                 writebuf + i * 8);
687
    }
688
 
689
  return RETURN_VALUE_REGISTER_CONVENTION;
690
}
691
 
692
 
693
static CORE_ADDR
694
amd64_push_arguments (struct regcache *regcache, int nargs,
695
                      struct value **args, CORE_ADDR sp, int struct_return)
696
{
697
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
698
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
699
  int *integer_regs = tdep->call_dummy_integer_regs;
700
  int num_integer_regs = tdep->call_dummy_num_integer_regs;
701
 
702
  static int sse_regnum[] =
703
  {
704
    /* %xmm0 ... %xmm7 */
705
    AMD64_XMM0_REGNUM + 0, AMD64_XMM1_REGNUM,
706
    AMD64_XMM0_REGNUM + 2, AMD64_XMM0_REGNUM + 3,
707
    AMD64_XMM0_REGNUM + 4, AMD64_XMM0_REGNUM + 5,
708
    AMD64_XMM0_REGNUM + 6, AMD64_XMM0_REGNUM + 7,
709
  };
710
  struct value **stack_args = alloca (nargs * sizeof (struct value *));
711
  /* An array that mirrors the stack_args array.  For all arguments
712
     that are passed by MEMORY, if that argument's address also needs
713
     to be stored in a register, the ARG_ADDR_REGNO array will contain
714
     that register number (or a negative value otherwise).  */
715
  int *arg_addr_regno = alloca (nargs * sizeof (int));
716
  int num_stack_args = 0;
717
  int num_elements = 0;
718
  int element = 0;
719
  int integer_reg = 0;
720
  int sse_reg = 0;
721
  int i;
722
 
723
  gdb_assert (tdep->classify);
724
 
725
  /* Reserve a register for the "hidden" argument.  */
726
  if (struct_return)
727
    integer_reg++;
728
 
729
  for (i = 0; i < nargs; i++)
730
    {
731
      struct type *type = value_type (args[i]);
732
      int len = TYPE_LENGTH (type);
733
      enum amd64_reg_class class[2];
734
      int needed_integer_regs = 0;
735
      int needed_sse_regs = 0;
736
      int j;
737
 
738
      /* Classify argument.  */
739
      tdep->classify (type, class);
740
 
741
      /* Calculate the number of integer and SSE registers needed for
742
         this argument.  */
743
      for (j = 0; j < 2; j++)
744
        {
745
          if (class[j] == AMD64_INTEGER)
746
            needed_integer_regs++;
747
          else if (class[j] == AMD64_SSE)
748
            needed_sse_regs++;
749
        }
750
 
751
      /* Check whether enough registers are available, and if the
752
         argument should be passed in registers at all.  */
753
      if (integer_reg + needed_integer_regs > num_integer_regs
754
          || sse_reg + needed_sse_regs > ARRAY_SIZE (sse_regnum)
755
          || (needed_integer_regs == 0 && needed_sse_regs == 0))
756
        {
757
          /* The argument will be passed on the stack.  */
758
          num_elements += ((len + 7) / 8);
759
          stack_args[num_stack_args] = args[i];
760
          /* If this is an AMD64_MEMORY argument whose address must also
761
             be passed in one of the integer registers, reserve that
762
             register and associate this value to that register so that
763
             we can store the argument address as soon as we know it.  */
764
          if (class[0] == AMD64_MEMORY
765
              && tdep->memory_args_by_pointer
766
              && integer_reg < tdep->call_dummy_num_integer_regs)
767
            arg_addr_regno[num_stack_args] =
768
              tdep->call_dummy_integer_regs[integer_reg++];
769
          else
770
            arg_addr_regno[num_stack_args] = -1;
771
          num_stack_args++;
772
        }
773
      else
774
        {
775
          /* The argument will be passed in registers.  */
776
          const gdb_byte *valbuf = value_contents (args[i]);
777
          gdb_byte buf[8];
778
 
779
          gdb_assert (len <= 16);
780
 
781
          for (j = 0; len > 0; j++, len -= 8)
782
            {
783
              int regnum = -1;
784
              int offset = 0;
785
 
786
              switch (class[j])
787
                {
788
                case AMD64_INTEGER:
789
                  regnum = integer_regs[integer_reg++];
790
                  break;
791
 
792
                case AMD64_SSE:
793
                  regnum = sse_regnum[sse_reg++];
794
                  break;
795
 
796
                case AMD64_SSEUP:
797
                  gdb_assert (sse_reg > 0);
798
                  regnum = sse_regnum[sse_reg - 1];
799
                  offset = 8;
800
                  break;
801
 
802
                default:
803
                  gdb_assert (!"Unexpected register class.");
804
                }
805
 
806
              gdb_assert (regnum != -1);
807
              memset (buf, 0, sizeof buf);
808
              memcpy (buf, valbuf + j * 8, min (len, 8));
809
              regcache_raw_write_part (regcache, regnum, offset, 8, buf);
810
            }
811
        }
812
    }
813
 
814
  /* Allocate space for the arguments on the stack.  */
815
  sp -= num_elements * 8;
816
 
817
  /* The psABI says that "The end of the input argument area shall be
818
     aligned on a 16 byte boundary."  */
819
  sp &= ~0xf;
820
 
821
  /* Write out the arguments to the stack.  */
822
  for (i = 0; i < num_stack_args; i++)
823
    {
824
      struct type *type = value_type (stack_args[i]);
825
      const gdb_byte *valbuf = value_contents (stack_args[i]);
826
      int len = TYPE_LENGTH (type);
827
      CORE_ADDR arg_addr = sp + element * 8;
828
 
829
      write_memory (arg_addr, valbuf, len);
830
      if (arg_addr_regno[i] >= 0)
831
        {
832
          /* We also need to store the address of that argument in
833
             the given register.  */
834
          gdb_byte buf[8];
835
          enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
836
 
837
          store_unsigned_integer (buf, 8, byte_order, arg_addr);
838
          regcache_cooked_write (regcache, arg_addr_regno[i], buf);
839
        }
840
      element += ((len + 7) / 8);
841
    }
842
 
843
  /* The psABI says that "For calls that may call functions that use
844
     varargs or stdargs (prototype-less calls or calls to functions
845
     containing ellipsis (...) in the declaration) %al is used as
846
     hidden argument to specify the number of SSE registers used.  */
847
  regcache_raw_write_unsigned (regcache, AMD64_RAX_REGNUM, sse_reg);
848
  return sp;
849
}
850
 
851
static CORE_ADDR
852
amd64_push_dummy_call (struct gdbarch *gdbarch, struct value *function,
853
                       struct regcache *regcache, CORE_ADDR bp_addr,
854
                       int nargs, struct value **args,  CORE_ADDR sp,
855
                       int struct_return, CORE_ADDR struct_addr)
856
{
857
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
858
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
859
  gdb_byte buf[8];
860
 
861
  /* Pass arguments.  */
862
  sp = amd64_push_arguments (regcache, nargs, args, sp, struct_return);
863
 
864
  /* Pass "hidden" argument".  */
865
  if (struct_return)
866
    {
867
      struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
868
      /* The "hidden" argument is passed throught the first argument
869
         register.  */
870
      const int arg_regnum = tdep->call_dummy_integer_regs[0];
871
 
872
      store_unsigned_integer (buf, 8, byte_order, struct_addr);
873
      regcache_cooked_write (regcache, arg_regnum, buf);
874
    }
875
 
876
  /* Reserve some memory on the stack for the integer-parameter registers,
877
     if required by the ABI.  */
878
  if (tdep->integer_param_regs_saved_in_caller_frame)
879
    sp -= tdep->call_dummy_num_integer_regs * 8;
880
 
881
  /* Store return address.  */
882
  sp -= 8;
883
  store_unsigned_integer (buf, 8, byte_order, bp_addr);
884
  write_memory (sp, buf, 8);
885
 
886
  /* Finally, update the stack pointer...  */
887
  store_unsigned_integer (buf, 8, byte_order, sp);
888
  regcache_cooked_write (regcache, AMD64_RSP_REGNUM, buf);
889
 
890
  /* ...and fake a frame pointer.  */
891
  regcache_cooked_write (regcache, AMD64_RBP_REGNUM, buf);
892
 
893
  return sp + 16;
894
}
895
 
896
/* Displaced instruction handling.  */
897
 
898
/* A partially decoded instruction.
899
   This contains enough details for displaced stepping purposes.  */
900
 
901
struct amd64_insn
902
{
903
  /* The number of opcode bytes.  */
904
  int opcode_len;
905
  /* The offset of the rex prefix or -1 if not present.  */
906
  int rex_offset;
907
  /* The offset to the first opcode byte.  */
908
  int opcode_offset;
909
  /* The offset to the modrm byte or -1 if not present.  */
910
  int modrm_offset;
911
 
912
  /* The raw instruction.  */
913
  gdb_byte *raw_insn;
914
};
915
 
916
struct displaced_step_closure
917
{
918
  /* For rip-relative insns, saved copy of the reg we use instead of %rip.  */
919
  int tmp_used;
920
  int tmp_regno;
921
  ULONGEST tmp_save;
922
 
923
  /* Details of the instruction.  */
924
  struct amd64_insn insn_details;
925
 
926
  /* Amount of space allocated to insn_buf.  */
927
  int max_len;
928
 
929
  /* The possibly modified insn.
930
     This is a variable-length field.  */
931
  gdb_byte insn_buf[1];
932
};
933
 
934
/* WARNING: Keep onebyte_has_modrm, twobyte_has_modrm in sync with
935
   ../opcodes/i386-dis.c (until libopcodes exports them, or an alternative,
936
   at which point delete these in favor of libopcodes' versions).  */
937
 
938
static const unsigned char onebyte_has_modrm[256] = {
939
  /*       0 1 2 3 4 5 6 7 8 9 a b c d e f        */
940
  /*       -------------------------------        */
941
  /* 00 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 00 */
942
  /* 10 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 10 */
943
  /* 20 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 20 */
944
  /* 30 */ 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0, /* 30 */
945
  /* 40 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 40 */
946
  /* 50 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 50 */
947
  /* 60 */ 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0, /* 60 */
948
  /* 70 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 70 */
949
  /* 80 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 80 */
950
  /* 90 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 90 */
951
  /* a0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* a0 */
952
  /* b0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* b0 */
953
  /* c0 */ 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0, /* c0 */
954
  /* d0 */ 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1, /* d0 */
955
  /* e0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* e0 */
956
  /* f0 */ 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1  /* f0 */
957
  /*       -------------------------------        */
958
  /*       0 1 2 3 4 5 6 7 8 9 a b c d e f        */
959
};
960
 
961
static const unsigned char twobyte_has_modrm[256] = {
962
  /*       0 1 2 3 4 5 6 7 8 9 a b c d e f        */
963
  /*       -------------------------------        */
964
  /* 00 */ 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1, /* 0f */
965
  /* 10 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 1f */
966
  /* 20 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 2f */
967
  /* 30 */ 0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0, /* 3f */
968
  /* 40 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 4f */
969
  /* 50 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 5f */
970
  /* 60 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 6f */
971
  /* 70 */ 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1, /* 7f */
972
  /* 80 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* 8f */
973
  /* 90 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* 9f */
974
  /* a0 */ 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1, /* af */
975
  /* b0 */ 1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1, /* bf */
976
  /* c0 */ 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, /* cf */
977
  /* d0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* df */
978
  /* e0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, /* ef */
979
  /* f0 */ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0  /* ff */
980
  /*       -------------------------------        */
981
  /*       0 1 2 3 4 5 6 7 8 9 a b c d e f        */
982
};
983
 
984
static int amd64_syscall_p (const struct amd64_insn *insn, int *lengthp);
985
 
986
static int
987
rex_prefix_p (gdb_byte pfx)
988
{
989
  return REX_PREFIX_P (pfx);
990
}
991
 
992
/* Skip the legacy instruction prefixes in INSN.
993
   We assume INSN is properly sentineled so we don't have to worry
994
   about falling off the end of the buffer.  */
995
 
996
static gdb_byte *
997
amd64_skip_prefixes (gdb_byte *insn)
998
{
999
  while (1)
1000
    {
1001
      switch (*insn)
1002
        {
1003
        case DATA_PREFIX_OPCODE:
1004
        case ADDR_PREFIX_OPCODE:
1005
        case CS_PREFIX_OPCODE:
1006
        case DS_PREFIX_OPCODE:
1007
        case ES_PREFIX_OPCODE:
1008
        case FS_PREFIX_OPCODE:
1009
        case GS_PREFIX_OPCODE:
1010
        case SS_PREFIX_OPCODE:
1011
        case LOCK_PREFIX_OPCODE:
1012
        case REPE_PREFIX_OPCODE:
1013
        case REPNE_PREFIX_OPCODE:
1014
          ++insn;
1015
          continue;
1016
        default:
1017
          break;
1018
        }
1019
      break;
1020
    }
1021
 
1022
  return insn;
1023
}
1024
 
1025
/* Return an integer register (other than RSP) that is unused as an input
1026
   operand in INSN.
1027
   In order to not require adding a rex prefix if the insn doesn't already
1028
   have one, the result is restricted to RAX ... RDI, sans RSP.
1029
   The register numbering of the result follows architecture ordering,
1030
   e.g. RDI = 7.  */
1031
 
1032
static int
1033
amd64_get_unused_input_int_reg (const struct amd64_insn *details)
1034
{
1035
  /* 1 bit for each reg */
1036
  int used_regs_mask = 0;
1037
 
1038
  /* There can be at most 3 int regs used as inputs in an insn, and we have
1039
     7 to choose from (RAX ... RDI, sans RSP).
1040
     This allows us to take a conservative approach and keep things simple.
1041
     E.g. By avoiding RAX, we don't have to specifically watch for opcodes
1042
     that implicitly specify RAX.  */
1043
 
1044
  /* Avoid RAX.  */
1045
  used_regs_mask |= 1 << EAX_REG_NUM;
1046
  /* Similarily avoid RDX, implicit operand in divides.  */
1047
  used_regs_mask |= 1 << EDX_REG_NUM;
1048
  /* Avoid RSP.  */
1049
  used_regs_mask |= 1 << ESP_REG_NUM;
1050
 
1051
  /* If the opcode is one byte long and there's no ModRM byte,
1052
     assume the opcode specifies a register.  */
1053
  if (details->opcode_len == 1 && details->modrm_offset == -1)
1054
    used_regs_mask |= 1 << (details->raw_insn[details->opcode_offset] & 7);
1055
 
1056
  /* Mark used regs in the modrm/sib bytes.  */
1057
  if (details->modrm_offset != -1)
1058
    {
1059
      int modrm = details->raw_insn[details->modrm_offset];
1060
      int mod = MODRM_MOD_FIELD (modrm);
1061
      int reg = MODRM_REG_FIELD (modrm);
1062
      int rm = MODRM_RM_FIELD (modrm);
1063
      int have_sib = mod != 3 && rm == 4;
1064
 
1065
      /* Assume the reg field of the modrm byte specifies a register.  */
1066
      used_regs_mask |= 1 << reg;
1067
 
1068
      if (have_sib)
1069
        {
1070
          int base = SIB_BASE_FIELD (details->raw_insn[details->modrm_offset + 1]);
1071
          int index = SIB_INDEX_FIELD (details->raw_insn[details->modrm_offset + 1]);
1072
          used_regs_mask |= 1 << base;
1073
          used_regs_mask |= 1 << index;
1074
        }
1075
      else
1076
        {
1077
          used_regs_mask |= 1 << rm;
1078
        }
1079
    }
1080
 
1081
  gdb_assert (used_regs_mask < 256);
1082
  gdb_assert (used_regs_mask != 255);
1083
 
1084
  /* Finally, find a free reg.  */
1085
  {
1086
    int i;
1087
 
1088
    for (i = 0; i < 8; ++i)
1089
      {
1090
        if (! (used_regs_mask & (1 << i)))
1091
          return i;
1092
      }
1093
 
1094
    /* We shouldn't get here.  */
1095
    internal_error (__FILE__, __LINE__, _("unable to find free reg"));
1096
  }
1097
}
1098
 
1099
/* Extract the details of INSN that we need.  */
1100
 
1101
static void
1102
amd64_get_insn_details (gdb_byte *insn, struct amd64_insn *details)
1103
{
1104
  gdb_byte *start = insn;
1105
  int need_modrm;
1106
 
1107
  details->raw_insn = insn;
1108
 
1109
  details->opcode_len = -1;
1110
  details->rex_offset = -1;
1111
  details->opcode_offset = -1;
1112
  details->modrm_offset = -1;
1113
 
1114
  /* Skip legacy instruction prefixes.  */
1115
  insn = amd64_skip_prefixes (insn);
1116
 
1117
  /* Skip REX instruction prefix.  */
1118
  if (rex_prefix_p (*insn))
1119
    {
1120
      details->rex_offset = insn - start;
1121
      ++insn;
1122
    }
1123
 
1124
  details->opcode_offset = insn - start;
1125
 
1126
  if (*insn == TWO_BYTE_OPCODE_ESCAPE)
1127
    {
1128
      /* Two or three-byte opcode.  */
1129
      ++insn;
1130
      need_modrm = twobyte_has_modrm[*insn];
1131
 
1132
      /* Check for three-byte opcode.  */
1133
      switch (*insn)
1134
        {
1135
        case 0x24:
1136
        case 0x25:
1137
        case 0x38:
1138
        case 0x3a:
1139
        case 0x7a:
1140
        case 0x7b:
1141
          ++insn;
1142
          details->opcode_len = 3;
1143
          break;
1144
        default:
1145
          details->opcode_len = 2;
1146
          break;
1147
        }
1148
    }
1149
  else
1150
    {
1151
      /* One-byte opcode.  */
1152
      need_modrm = onebyte_has_modrm[*insn];
1153
      details->opcode_len = 1;
1154
    }
1155
 
1156
  if (need_modrm)
1157
    {
1158
      ++insn;
1159
      details->modrm_offset = insn - start;
1160
    }
1161
}
1162
 
1163
/* Update %rip-relative addressing in INSN.
1164
 
1165
   %rip-relative addressing only uses a 32-bit displacement.
1166
   32 bits is not enough to be guaranteed to cover the distance between where
1167
   the real instruction is and where its copy is.
1168
   Convert the insn to use base+disp addressing.
1169
   We set base = pc + insn_length so we can leave disp unchanged.  */
1170
 
1171
static void
1172
fixup_riprel (struct gdbarch *gdbarch, struct displaced_step_closure *dsc,
1173
              CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1174
{
1175
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1176
  const struct amd64_insn *insn_details = &dsc->insn_details;
1177
  int modrm_offset = insn_details->modrm_offset;
1178
  gdb_byte *insn = insn_details->raw_insn + modrm_offset;
1179
  CORE_ADDR rip_base;
1180
  int32_t disp;
1181
  int insn_length;
1182
  int arch_tmp_regno, tmp_regno;
1183
  ULONGEST orig_value;
1184
 
1185
  /* %rip+disp32 addressing mode, displacement follows ModRM byte.  */
1186
  ++insn;
1187
 
1188
  /* Compute the rip-relative address.  */
1189
  disp = extract_signed_integer (insn, sizeof (int32_t), byte_order);
1190
  insn_length = gdb_buffered_insn_length (gdbarch, dsc->insn_buf,
1191
                                          dsc->max_len, from);
1192
  rip_base = from + insn_length;
1193
 
1194
  /* We need a register to hold the address.
1195
     Pick one not used in the insn.
1196
     NOTE: arch_tmp_regno uses architecture ordering, e.g. RDI = 7.  */
1197
  arch_tmp_regno = amd64_get_unused_input_int_reg (insn_details);
1198
  tmp_regno = amd64_arch_reg_to_regnum (arch_tmp_regno);
1199
 
1200
  /* REX.B should be unset as we were using rip-relative addressing,
1201
     but ensure it's unset anyway, tmp_regno is not r8-r15.  */
1202
  if (insn_details->rex_offset != -1)
1203
    dsc->insn_buf[insn_details->rex_offset] &= ~REX_B;
1204
 
1205
  regcache_cooked_read_unsigned (regs, tmp_regno, &orig_value);
1206
  dsc->tmp_regno = tmp_regno;
1207
  dsc->tmp_save = orig_value;
1208
  dsc->tmp_used = 1;
1209
 
1210
  /* Convert the ModRM field to be base+disp.  */
1211
  dsc->insn_buf[modrm_offset] &= ~0xc7;
1212
  dsc->insn_buf[modrm_offset] |= 0x80 + arch_tmp_regno;
1213
 
1214
  regcache_cooked_write_unsigned (regs, tmp_regno, rip_base);
1215
 
1216
  if (debug_displaced)
1217
    fprintf_unfiltered (gdb_stdlog, "displaced: %%rip-relative addressing used.\n"
1218
                        "displaced: using temp reg %d, old value %s, new value %s\n",
1219
                        dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save),
1220
                        paddress (gdbarch, rip_base));
1221
}
1222
 
1223
static void
1224
fixup_displaced_copy (struct gdbarch *gdbarch,
1225
                      struct displaced_step_closure *dsc,
1226
                      CORE_ADDR from, CORE_ADDR to, struct regcache *regs)
1227
{
1228
  const struct amd64_insn *details = &dsc->insn_details;
1229
 
1230
  if (details->modrm_offset != -1)
1231
    {
1232
      gdb_byte modrm = details->raw_insn[details->modrm_offset];
1233
 
1234
      if ((modrm & 0xc7) == 0x05)
1235
        {
1236
          /* The insn uses rip-relative addressing.
1237
             Deal with it.  */
1238
          fixup_riprel (gdbarch, dsc, from, to, regs);
1239
        }
1240
    }
1241
}
1242
 
1243
struct displaced_step_closure *
1244
amd64_displaced_step_copy_insn (struct gdbarch *gdbarch,
1245
                                CORE_ADDR from, CORE_ADDR to,
1246
                                struct regcache *regs)
1247
{
1248
  int len = gdbarch_max_insn_length (gdbarch);
1249
  /* Extra space for sentinels so fixup_{riprel,displaced_copy don't have to
1250
     continually watch for running off the end of the buffer.  */
1251
  int fixup_sentinel_space = len;
1252
  struct displaced_step_closure *dsc =
1253
    xmalloc (sizeof (*dsc) + len + fixup_sentinel_space);
1254
  gdb_byte *buf = &dsc->insn_buf[0];
1255
  struct amd64_insn *details = &dsc->insn_details;
1256
 
1257
  dsc->tmp_used = 0;
1258
  dsc->max_len = len + fixup_sentinel_space;
1259
 
1260
  read_memory (from, buf, len);
1261
 
1262
  /* Set up the sentinel space so we don't have to worry about running
1263
     off the end of the buffer.  An excessive number of leading prefixes
1264
     could otherwise cause this.  */
1265
  memset (buf + len, 0, fixup_sentinel_space);
1266
 
1267
  amd64_get_insn_details (buf, details);
1268
 
1269
  /* GDB may get control back after the insn after the syscall.
1270
     Presumably this is a kernel bug.
1271
     If this is a syscall, make sure there's a nop afterwards.  */
1272
  {
1273
    int syscall_length;
1274
 
1275
    if (amd64_syscall_p (details, &syscall_length))
1276
      buf[details->opcode_offset + syscall_length] = NOP_OPCODE;
1277
  }
1278
 
1279
  /* Modify the insn to cope with the address where it will be executed from.
1280
     In particular, handle any rip-relative addressing.  */
1281
  fixup_displaced_copy (gdbarch, dsc, from, to, regs);
1282
 
1283
  write_memory (to, buf, len);
1284
 
1285
  if (debug_displaced)
1286
    {
1287
      fprintf_unfiltered (gdb_stdlog, "displaced: copy %s->%s: ",
1288
                          paddress (gdbarch, from), paddress (gdbarch, to));
1289
      displaced_step_dump_bytes (gdb_stdlog, buf, len);
1290
    }
1291
 
1292
  return dsc;
1293
}
1294
 
1295
static int
1296
amd64_absolute_jmp_p (const struct amd64_insn *details)
1297
{
1298
  const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1299
 
1300
  if (insn[0] == 0xff)
1301
    {
1302
      /* jump near, absolute indirect (/4) */
1303
      if ((insn[1] & 0x38) == 0x20)
1304
        return 1;
1305
 
1306
      /* jump far, absolute indirect (/5) */
1307
      if ((insn[1] & 0x38) == 0x28)
1308
        return 1;
1309
    }
1310
 
1311
  return 0;
1312
}
1313
 
1314
static int
1315
amd64_absolute_call_p (const struct amd64_insn *details)
1316
{
1317
  const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1318
 
1319
  if (insn[0] == 0xff)
1320
    {
1321
      /* Call near, absolute indirect (/2) */
1322
      if ((insn[1] & 0x38) == 0x10)
1323
        return 1;
1324
 
1325
      /* Call far, absolute indirect (/3) */
1326
      if ((insn[1] & 0x38) == 0x18)
1327
        return 1;
1328
    }
1329
 
1330
  return 0;
1331
}
1332
 
1333
static int
1334
amd64_ret_p (const struct amd64_insn *details)
1335
{
1336
  /* NOTE: gcc can emit "repz ; ret".  */
1337
  const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1338
 
1339
  switch (insn[0])
1340
    {
1341
    case 0xc2: /* ret near, pop N bytes */
1342
    case 0xc3: /* ret near */
1343
    case 0xca: /* ret far, pop N bytes */
1344
    case 0xcb: /* ret far */
1345
    case 0xcf: /* iret */
1346
      return 1;
1347
 
1348
    default:
1349
      return 0;
1350
    }
1351
}
1352
 
1353
static int
1354
amd64_call_p (const struct amd64_insn *details)
1355
{
1356
  const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1357
 
1358
  if (amd64_absolute_call_p (details))
1359
    return 1;
1360
 
1361
  /* call near, relative */
1362
  if (insn[0] == 0xe8)
1363
    return 1;
1364
 
1365
  return 0;
1366
}
1367
 
1368
/* Return non-zero if INSN is a system call, and set *LENGTHP to its
1369
   length in bytes.  Otherwise, return zero.  */
1370
 
1371
static int
1372
amd64_syscall_p (const struct amd64_insn *details, int *lengthp)
1373
{
1374
  const gdb_byte *insn = &details->raw_insn[details->opcode_offset];
1375
 
1376
  if (insn[0] == 0x0f && insn[1] == 0x05)
1377
    {
1378
      *lengthp = 2;
1379
      return 1;
1380
    }
1381
 
1382
  return 0;
1383
}
1384
 
1385
/* Fix up the state of registers and memory after having single-stepped
1386
   a displaced instruction.  */
1387
 
1388
void
1389
amd64_displaced_step_fixup (struct gdbarch *gdbarch,
1390
                            struct displaced_step_closure *dsc,
1391
                            CORE_ADDR from, CORE_ADDR to,
1392
                            struct regcache *regs)
1393
{
1394
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1395
  /* The offset we applied to the instruction's address.  */
1396
  ULONGEST insn_offset = to - from;
1397
  gdb_byte *insn = dsc->insn_buf;
1398
  const struct amd64_insn *insn_details = &dsc->insn_details;
1399
 
1400
  if (debug_displaced)
1401
    fprintf_unfiltered (gdb_stdlog,
1402
                        "displaced: fixup (%s, %s), "
1403
                        "insn = 0x%02x 0x%02x ...\n",
1404
                        paddress (gdbarch, from), paddress (gdbarch, to),
1405
                        insn[0], insn[1]);
1406
 
1407
  /* If we used a tmp reg, restore it.  */
1408
 
1409
  if (dsc->tmp_used)
1410
    {
1411
      if (debug_displaced)
1412
        fprintf_unfiltered (gdb_stdlog, "displaced: restoring reg %d to %s\n",
1413
                            dsc->tmp_regno, paddress (gdbarch, dsc->tmp_save));
1414
      regcache_cooked_write_unsigned (regs, dsc->tmp_regno, dsc->tmp_save);
1415
    }
1416
 
1417
  /* The list of issues to contend with here is taken from
1418
     resume_execution in arch/x86/kernel/kprobes.c, Linux 2.6.28.
1419
     Yay for Free Software!  */
1420
 
1421
  /* Relocate the %rip back to the program's instruction stream,
1422
     if necessary.  */
1423
 
1424
  /* Except in the case of absolute or indirect jump or call
1425
     instructions, or a return instruction, the new rip is relative to
1426
     the displaced instruction; make it relative to the original insn.
1427
     Well, signal handler returns don't need relocation either, but we use the
1428
     value of %rip to recognize those; see below.  */
1429
  if (! amd64_absolute_jmp_p (insn_details)
1430
      && ! amd64_absolute_call_p (insn_details)
1431
      && ! amd64_ret_p (insn_details))
1432
    {
1433
      ULONGEST orig_rip;
1434
      int insn_len;
1435
 
1436
      regcache_cooked_read_unsigned (regs, AMD64_RIP_REGNUM, &orig_rip);
1437
 
1438
      /* A signal trampoline system call changes the %rip, resuming
1439
         execution of the main program after the signal handler has
1440
         returned.  That makes them like 'return' instructions; we
1441
         shouldn't relocate %rip.
1442
 
1443
         But most system calls don't, and we do need to relocate %rip.
1444
 
1445
         Our heuristic for distinguishing these cases: if stepping
1446
         over the system call instruction left control directly after
1447
         the instruction, the we relocate --- control almost certainly
1448
         doesn't belong in the displaced copy.  Otherwise, we assume
1449
         the instruction has put control where it belongs, and leave
1450
         it unrelocated.  Goodness help us if there are PC-relative
1451
         system calls.  */
1452
      if (amd64_syscall_p (insn_details, &insn_len)
1453
          && orig_rip != to + insn_len
1454
          /* GDB can get control back after the insn after the syscall.
1455
             Presumably this is a kernel bug.
1456
             Fixup ensures its a nop, we add one to the length for it.  */
1457
          && orig_rip != to + insn_len + 1)
1458
        {
1459
          if (debug_displaced)
1460
            fprintf_unfiltered (gdb_stdlog,
1461
                                "displaced: syscall changed %%rip; "
1462
                                "not relocating\n");
1463
        }
1464
      else
1465
        {
1466
          ULONGEST rip = orig_rip - insn_offset;
1467
 
1468
          /* If we just stepped over a breakpoint insn, we don't backup
1469
             the pc on purpose; this is to match behaviour without
1470
             stepping.  */
1471
 
1472
          regcache_cooked_write_unsigned (regs, AMD64_RIP_REGNUM, rip);
1473
 
1474
          if (debug_displaced)
1475
            fprintf_unfiltered (gdb_stdlog,
1476
                                "displaced: "
1477
                                "relocated %%rip from %s to %s\n",
1478
                                paddress (gdbarch, orig_rip),
1479
                                paddress (gdbarch, rip));
1480
        }
1481
    }
1482
 
1483
  /* If the instruction was PUSHFL, then the TF bit will be set in the
1484
     pushed value, and should be cleared.  We'll leave this for later,
1485
     since GDB already messes up the TF flag when stepping over a
1486
     pushfl.  */
1487
 
1488
  /* If the instruction was a call, the return address now atop the
1489
     stack is the address following the copied instruction.  We need
1490
     to make it the address following the original instruction.  */
1491
  if (amd64_call_p (insn_details))
1492
    {
1493
      ULONGEST rsp;
1494
      ULONGEST retaddr;
1495
      const ULONGEST retaddr_len = 8;
1496
 
1497
      regcache_cooked_read_unsigned (regs, AMD64_RSP_REGNUM, &rsp);
1498
      retaddr = read_memory_unsigned_integer (rsp, retaddr_len, byte_order);
1499
      retaddr = (retaddr - insn_offset) & 0xffffffffUL;
1500
      write_memory_unsigned_integer (rsp, retaddr_len, byte_order, retaddr);
1501
 
1502
      if (debug_displaced)
1503
        fprintf_unfiltered (gdb_stdlog,
1504
                            "displaced: relocated return addr at %s "
1505
                            "to %s\n",
1506
                            paddress (gdbarch, rsp),
1507
                            paddress (gdbarch, retaddr));
1508
    }
1509
}
1510
 
1511
/* If the instruction INSN uses RIP-relative addressing, return the
1512
   offset into the raw INSN where the displacement to be adjusted is
1513
   found.  Returns 0 if the instruction doesn't use RIP-relative
1514
   addressing.  */
1515
 
1516
static int
1517
rip_relative_offset (struct amd64_insn *insn)
1518
{
1519
  if (insn->modrm_offset != -1)
1520
    {
1521
      gdb_byte modrm = insn->raw_insn[insn->modrm_offset];
1522
 
1523
      if ((modrm & 0xc7) == 0x05)
1524
        {
1525
          /* The displacement is found right after the ModRM byte.  */
1526
          return insn->modrm_offset + 1;
1527
        }
1528
    }
1529
 
1530
  return 0;
1531
}
1532
 
1533
static void
1534
append_insns (CORE_ADDR *to, ULONGEST len, const gdb_byte *buf)
1535
{
1536
  target_write_memory (*to, buf, len);
1537
  *to += len;
1538
}
1539
 
1540
void
1541
amd64_relocate_instruction (struct gdbarch *gdbarch,
1542
                            CORE_ADDR *to, CORE_ADDR oldloc)
1543
{
1544
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1545
  int len = gdbarch_max_insn_length (gdbarch);
1546
  /* Extra space for sentinels.  */
1547
  int fixup_sentinel_space = len;
1548
  gdb_byte *buf = xmalloc (len + fixup_sentinel_space);
1549
  struct amd64_insn insn_details;
1550
  int offset = 0;
1551
  LONGEST rel32, newrel;
1552
  gdb_byte *insn;
1553
  int insn_length;
1554
 
1555
  read_memory (oldloc, buf, len);
1556
 
1557
  /* Set up the sentinel space so we don't have to worry about running
1558
     off the end of the buffer.  An excessive number of leading prefixes
1559
     could otherwise cause this.  */
1560
  memset (buf + len, 0, fixup_sentinel_space);
1561
 
1562
  insn = buf;
1563
  amd64_get_insn_details (insn, &insn_details);
1564
 
1565
  insn_length = gdb_buffered_insn_length (gdbarch, insn, len, oldloc);
1566
 
1567
  /* Skip legacy instruction prefixes.  */
1568
  insn = amd64_skip_prefixes (insn);
1569
 
1570
  /* Adjust calls with 32-bit relative addresses as push/jump, with
1571
     the address pushed being the location where the original call in
1572
     the user program would return to.  */
1573
  if (insn[0] == 0xe8)
1574
    {
1575
      gdb_byte push_buf[16];
1576
      unsigned int ret_addr;
1577
 
1578
      /* Where "ret" in the original code will return to.  */
1579
      ret_addr = oldloc + insn_length;
1580
      push_buf[0] = 0x68; /* pushq $... */
1581
      memcpy (&push_buf[1], &ret_addr, 4);
1582
      /* Push the push.  */
1583
      append_insns (to, 5, push_buf);
1584
 
1585
      /* Convert the relative call to a relative jump.  */
1586
      insn[0] = 0xe9;
1587
 
1588
      /* Adjust the destination offset.  */
1589
      rel32 = extract_signed_integer (insn + 1, 4, byte_order);
1590
      newrel = (oldloc - *to) + rel32;
1591
      store_signed_integer (insn + 1, 4, newrel, byte_order);
1592
 
1593
      /* Write the adjusted jump into its displaced location.  */
1594
      append_insns (to, 5, insn);
1595
      return;
1596
    }
1597
 
1598
  offset = rip_relative_offset (&insn_details);
1599
  if (!offset)
1600
    {
1601
      /* Adjust jumps with 32-bit relative addresses.  Calls are
1602
         already handled above.  */
1603
      if (insn[0] == 0xe9)
1604
        offset = 1;
1605
      /* Adjust conditional jumps.  */
1606
      else if (insn[0] == 0x0f && (insn[1] & 0xf0) == 0x80)
1607
        offset = 2;
1608
    }
1609
 
1610
  if (offset)
1611
    {
1612
      rel32 = extract_signed_integer (insn + offset, 4, byte_order);
1613
      newrel = (oldloc - *to) + rel32;
1614
      store_signed_integer (insn + offset, 4, newrel, byte_order);
1615
      if (debug_displaced)
1616
        fprintf_unfiltered (gdb_stdlog,
1617
                            "Adjusted insn rel32=0x%s at 0x%s to"
1618
                            " rel32=0x%s at 0x%s\n",
1619
                            hex_string (rel32), paddress (gdbarch, oldloc),
1620
                            hex_string (newrel), paddress (gdbarch, *to));
1621
    }
1622
 
1623
  /* Write the adjusted instruction into its displaced location.  */
1624
  append_insns (to, insn_length, buf);
1625
}
1626
 
1627
 
1628
/* The maximum number of saved registers.  This should include %rip.  */
1629
#define AMD64_NUM_SAVED_REGS    AMD64_NUM_GREGS
1630
 
1631
struct amd64_frame_cache
1632
{
1633
  /* Base address.  */
1634
  CORE_ADDR base;
1635
  CORE_ADDR sp_offset;
1636
  CORE_ADDR pc;
1637
 
1638
  /* Saved registers.  */
1639
  CORE_ADDR saved_regs[AMD64_NUM_SAVED_REGS];
1640
  CORE_ADDR saved_sp;
1641
  int saved_sp_reg;
1642
 
1643
  /* Do we have a frame?  */
1644
  int frameless_p;
1645
};
1646
 
1647
/* Initialize a frame cache.  */
1648
 
1649
static void
1650
amd64_init_frame_cache (struct amd64_frame_cache *cache)
1651
{
1652
  int i;
1653
 
1654
  /* Base address.  */
1655
  cache->base = 0;
1656
  cache->sp_offset = -8;
1657
  cache->pc = 0;
1658
 
1659
  /* Saved registers.  We initialize these to -1 since zero is a valid
1660
     offset (that's where %rbp is supposed to be stored).
1661
     The values start out as being offsets, and are later converted to
1662
     addresses (at which point -1 is interpreted as an address, still meaning
1663
     "invalid").  */
1664
  for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1665
    cache->saved_regs[i] = -1;
1666
  cache->saved_sp = 0;
1667
  cache->saved_sp_reg = -1;
1668
 
1669
  /* Frameless until proven otherwise.  */
1670
  cache->frameless_p = 1;
1671
}
1672
 
1673
/* Allocate and initialize a frame cache.  */
1674
 
1675
static struct amd64_frame_cache *
1676
amd64_alloc_frame_cache (void)
1677
{
1678
  struct amd64_frame_cache *cache;
1679
 
1680
  cache = FRAME_OBSTACK_ZALLOC (struct amd64_frame_cache);
1681
  amd64_init_frame_cache (cache);
1682
  return cache;
1683
}
1684
 
1685
/* GCC 4.4 and later, can put code in the prologue to realign the
1686
   stack pointer.  Check whether PC points to such code, and update
1687
   CACHE accordingly.  Return the first instruction after the code
1688
   sequence or CURRENT_PC, whichever is smaller.  If we don't
1689
   recognize the code, return PC.  */
1690
 
1691
static CORE_ADDR
1692
amd64_analyze_stack_align (CORE_ADDR pc, CORE_ADDR current_pc,
1693
                           struct amd64_frame_cache *cache)
1694
{
1695
  /* There are 2 code sequences to re-align stack before the frame
1696
     gets set up:
1697
 
1698
        1. Use a caller-saved saved register:
1699
 
1700
                leaq  8(%rsp), %reg
1701
                andq  $-XXX, %rsp
1702
                pushq -8(%reg)
1703
 
1704
        2. Use a callee-saved saved register:
1705
 
1706
                pushq %reg
1707
                leaq  16(%rsp), %reg
1708
                andq  $-XXX, %rsp
1709
                pushq -8(%reg)
1710
 
1711
     "andq $-XXX, %rsp" can be either 4 bytes or 7 bytes:
1712
 
1713
        0x48 0x83 0xe4 0xf0                     andq $-16, %rsp
1714
        0x48 0x81 0xe4 0x00 0xff 0xff 0xff      andq $-256, %rsp
1715
   */
1716
 
1717
  gdb_byte buf[18];
1718
  int reg, r;
1719
  int offset, offset_and;
1720
 
1721
  if (target_read_memory (pc, buf, sizeof buf))
1722
    return pc;
1723
 
1724
  /* Check caller-saved saved register.  The first instruction has
1725
     to be "leaq 8(%rsp), %reg".  */
1726
  if ((buf[0] & 0xfb) == 0x48
1727
      && buf[1] == 0x8d
1728
      && buf[3] == 0x24
1729
      && buf[4] == 0x8)
1730
    {
1731
      /* MOD must be binary 10 and R/M must be binary 100.  */
1732
      if ((buf[2] & 0xc7) != 0x44)
1733
        return pc;
1734
 
1735
      /* REG has register number.  */
1736
      reg = (buf[2] >> 3) & 7;
1737
 
1738
      /* Check the REX.R bit.  */
1739
      if (buf[0] == 0x4c)
1740
        reg += 8;
1741
 
1742
      offset = 5;
1743
    }
1744
  else
1745
    {
1746
      /* Check callee-saved saved register.  The first instruction
1747
         has to be "pushq %reg".  */
1748
      reg = 0;
1749
      if ((buf[0] & 0xf8) == 0x50)
1750
        offset = 0;
1751
      else if ((buf[0] & 0xf6) == 0x40
1752
               && (buf[1] & 0xf8) == 0x50)
1753
        {
1754
          /* Check the REX.B bit.  */
1755
          if ((buf[0] & 1) != 0)
1756
            reg = 8;
1757
 
1758
          offset = 1;
1759
        }
1760
      else
1761
        return pc;
1762
 
1763
      /* Get register.  */
1764
      reg += buf[offset] & 0x7;
1765
 
1766
      offset++;
1767
 
1768
      /* The next instruction has to be "leaq 16(%rsp), %reg".  */
1769
      if ((buf[offset] & 0xfb) != 0x48
1770
          || buf[offset + 1] != 0x8d
1771
          || buf[offset + 3] != 0x24
1772
          || buf[offset + 4] != 0x10)
1773
        return pc;
1774
 
1775
      /* MOD must be binary 10 and R/M must be binary 100.  */
1776
      if ((buf[offset + 2] & 0xc7) != 0x44)
1777
        return pc;
1778
 
1779
      /* REG has register number.  */
1780
      r = (buf[offset + 2] >> 3) & 7;
1781
 
1782
      /* Check the REX.R bit.  */
1783
      if (buf[offset] == 0x4c)
1784
        r += 8;
1785
 
1786
      /* Registers in pushq and leaq have to be the same.  */
1787
      if (reg != r)
1788
        return pc;
1789
 
1790
      offset += 5;
1791
    }
1792
 
1793
  /* Rigister can't be %rsp nor %rbp.  */
1794
  if (reg == 4 || reg == 5)
1795
    return pc;
1796
 
1797
  /* The next instruction has to be "andq $-XXX, %rsp".  */
1798
  if (buf[offset] != 0x48
1799
      || buf[offset + 2] != 0xe4
1800
      || (buf[offset + 1] != 0x81 && buf[offset + 1] != 0x83))
1801
    return pc;
1802
 
1803
  offset_and = offset;
1804
  offset += buf[offset + 1] == 0x81 ? 7 : 4;
1805
 
1806
  /* The next instruction has to be "pushq -8(%reg)".  */
1807
  r = 0;
1808
  if (buf[offset] == 0xff)
1809
    offset++;
1810
  else if ((buf[offset] & 0xf6) == 0x40
1811
           && buf[offset + 1] == 0xff)
1812
    {
1813
      /* Check the REX.B bit.  */
1814
      if ((buf[offset] & 0x1) != 0)
1815
        r = 8;
1816
      offset += 2;
1817
    }
1818
  else
1819
    return pc;
1820
 
1821
  /* 8bit -8 is 0xf8.  REG must be binary 110 and MOD must be binary
1822
     01.  */
1823
  if (buf[offset + 1] != 0xf8
1824
      || (buf[offset] & 0xf8) != 0x70)
1825
    return pc;
1826
 
1827
  /* R/M has register.  */
1828
  r += buf[offset] & 7;
1829
 
1830
  /* Registers in leaq and pushq have to be the same.  */
1831
  if (reg != r)
1832
    return pc;
1833
 
1834
  if (current_pc > pc + offset_and)
1835
    cache->saved_sp_reg = amd64_arch_reg_to_regnum (reg);
1836
 
1837
  return min (pc + offset + 2, current_pc);
1838
}
1839
 
1840
/* Do a limited analysis of the prologue at PC and update CACHE
1841
   accordingly.  Bail out early if CURRENT_PC is reached.  Return the
1842
   address where the analysis stopped.
1843
 
1844
   We will handle only functions beginning with:
1845
 
1846
      pushq %rbp        0x55
1847
      movq %rsp, %rbp   0x48 0x89 0xe5
1848
 
1849
   Any function that doesn't start with this sequence will be assumed
1850
   to have no prologue and thus no valid frame pointer in %rbp.  */
1851
 
1852
static CORE_ADDR
1853
amd64_analyze_prologue (struct gdbarch *gdbarch,
1854
                        CORE_ADDR pc, CORE_ADDR current_pc,
1855
                        struct amd64_frame_cache *cache)
1856
{
1857
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1858
  static gdb_byte proto[3] = { 0x48, 0x89, 0xe5 }; /* movq %rsp, %rbp */
1859
  gdb_byte buf[3];
1860
  gdb_byte op;
1861
 
1862
  if (current_pc <= pc)
1863
    return current_pc;
1864
 
1865
  pc = amd64_analyze_stack_align (pc, current_pc, cache);
1866
 
1867
  op = read_memory_unsigned_integer (pc, 1, byte_order);
1868
 
1869
  if (op == 0x55)               /* pushq %rbp */
1870
    {
1871
      /* Take into account that we've executed the `pushq %rbp' that
1872
         starts this instruction sequence.  */
1873
      cache->saved_regs[AMD64_RBP_REGNUM] = 0;
1874
      cache->sp_offset += 8;
1875
 
1876
      /* If that's all, return now.  */
1877
      if (current_pc <= pc + 1)
1878
        return current_pc;
1879
 
1880
      /* Check for `movq %rsp, %rbp'.  */
1881
      read_memory (pc + 1, buf, 3);
1882
      if (memcmp (buf, proto, 3) != 0)
1883
        return pc + 1;
1884
 
1885
      /* OK, we actually have a frame.  */
1886
      cache->frameless_p = 0;
1887
      return pc + 4;
1888
    }
1889
 
1890
  return pc;
1891
}
1892
 
1893
/* Return PC of first real instruction.  */
1894
 
1895
static CORE_ADDR
1896
amd64_skip_prologue (struct gdbarch *gdbarch, CORE_ADDR start_pc)
1897
{
1898
  struct amd64_frame_cache cache;
1899
  CORE_ADDR pc;
1900
 
1901
  amd64_init_frame_cache (&cache);
1902
  pc = amd64_analyze_prologue (gdbarch, start_pc, 0xffffffffffffffffLL,
1903
                               &cache);
1904
  if (cache.frameless_p)
1905
    return start_pc;
1906
 
1907
  return pc;
1908
}
1909
 
1910
 
1911
/* Normal frames.  */
1912
 
1913
static struct amd64_frame_cache *
1914
amd64_frame_cache (struct frame_info *this_frame, void **this_cache)
1915
{
1916
  struct gdbarch *gdbarch = get_frame_arch (this_frame);
1917
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
1918
  struct amd64_frame_cache *cache;
1919
  gdb_byte buf[8];
1920
  int i;
1921
 
1922
  if (*this_cache)
1923
    return *this_cache;
1924
 
1925
  cache = amd64_alloc_frame_cache ();
1926
  *this_cache = cache;
1927
 
1928
  cache->pc = get_frame_func (this_frame);
1929
  if (cache->pc != 0)
1930
    amd64_analyze_prologue (gdbarch, cache->pc, get_frame_pc (this_frame),
1931
                            cache);
1932
 
1933
  if (cache->saved_sp_reg != -1)
1934
    {
1935
      /* Stack pointer has been saved.  */
1936
      get_frame_register (this_frame, cache->saved_sp_reg, buf);
1937
      cache->saved_sp = extract_unsigned_integer(buf, 8, byte_order);
1938
    }
1939
 
1940
  if (cache->frameless_p)
1941
    {
1942
      /* We didn't find a valid frame.  If we're at the start of a
1943
         function, or somewhere half-way its prologue, the function's
1944
         frame probably hasn't been fully setup yet.  Try to
1945
         reconstruct the base address for the stack frame by looking
1946
         at the stack pointer.  For truly "frameless" functions this
1947
         might work too.  */
1948
 
1949
      if (cache->saved_sp_reg != -1)
1950
        {
1951
          /* We're halfway aligning the stack.  */
1952
          cache->base = ((cache->saved_sp - 8) & 0xfffffffffffffff0LL) - 8;
1953
          cache->saved_regs[AMD64_RIP_REGNUM] = cache->saved_sp - 8;
1954
 
1955
          /* This will be added back below.  */
1956
          cache->saved_regs[AMD64_RIP_REGNUM] -= cache->base;
1957
        }
1958
      else
1959
        {
1960
          get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
1961
          cache->base = extract_unsigned_integer (buf, 8, byte_order)
1962
                        + cache->sp_offset;
1963
        }
1964
    }
1965
  else
1966
    {
1967
      get_frame_register (this_frame, AMD64_RBP_REGNUM, buf);
1968
      cache->base = extract_unsigned_integer (buf, 8, byte_order);
1969
    }
1970
 
1971
  /* Now that we have the base address for the stack frame we can
1972
     calculate the value of %rsp in the calling frame.  */
1973
  cache->saved_sp = cache->base + 16;
1974
 
1975
  /* For normal frames, %rip is stored at 8(%rbp).  If we don't have a
1976
     frame we find it at the same offset from the reconstructed base
1977
     address.  If we're halfway aligning the stack, %rip is handled
1978
     differently (see above).  */
1979
  if (!cache->frameless_p || cache->saved_sp_reg == -1)
1980
    cache->saved_regs[AMD64_RIP_REGNUM] = 8;
1981
 
1982
  /* Adjust all the saved registers such that they contain addresses
1983
     instead of offsets.  */
1984
  for (i = 0; i < AMD64_NUM_SAVED_REGS; i++)
1985
    if (cache->saved_regs[i] != -1)
1986
      cache->saved_regs[i] += cache->base;
1987
 
1988
  return cache;
1989
}
1990
 
1991
static void
1992
amd64_frame_this_id (struct frame_info *this_frame, void **this_cache,
1993
                     struct frame_id *this_id)
1994
{
1995
  struct amd64_frame_cache *cache =
1996
    amd64_frame_cache (this_frame, this_cache);
1997
 
1998
  /* This marks the outermost frame.  */
1999
  if (cache->base == 0)
2000
    return;
2001
 
2002
  (*this_id) = frame_id_build (cache->base + 16, cache->pc);
2003
}
2004
 
2005
static struct value *
2006
amd64_frame_prev_register (struct frame_info *this_frame, void **this_cache,
2007
                           int regnum)
2008
{
2009
  struct gdbarch *gdbarch = get_frame_arch (this_frame);
2010
  struct amd64_frame_cache *cache =
2011
    amd64_frame_cache (this_frame, this_cache);
2012
 
2013
  gdb_assert (regnum >= 0);
2014
 
2015
  if (regnum == gdbarch_sp_regnum (gdbarch) && cache->saved_sp)
2016
    return frame_unwind_got_constant (this_frame, regnum, cache->saved_sp);
2017
 
2018
  if (regnum < AMD64_NUM_SAVED_REGS && cache->saved_regs[regnum] != -1)
2019
    return frame_unwind_got_memory (this_frame, regnum,
2020
                                    cache->saved_regs[regnum]);
2021
 
2022
  return frame_unwind_got_register (this_frame, regnum, regnum);
2023
}
2024
 
2025
static const struct frame_unwind amd64_frame_unwind =
2026
{
2027
  NORMAL_FRAME,
2028
  amd64_frame_this_id,
2029
  amd64_frame_prev_register,
2030
  NULL,
2031
  default_frame_sniffer
2032
};
2033
 
2034
 
2035
/* Signal trampolines.  */
2036
 
2037
/* FIXME: kettenis/20030419: Perhaps, we can unify the 32-bit and
2038
   64-bit variants.  This would require using identical frame caches
2039
   on both platforms.  */
2040
 
2041
static struct amd64_frame_cache *
2042
amd64_sigtramp_frame_cache (struct frame_info *this_frame, void **this_cache)
2043
{
2044
  struct gdbarch *gdbarch = get_frame_arch (this_frame);
2045
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2046
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2047
  struct amd64_frame_cache *cache;
2048
  CORE_ADDR addr;
2049
  gdb_byte buf[8];
2050
  int i;
2051
 
2052
  if (*this_cache)
2053
    return *this_cache;
2054
 
2055
  cache = amd64_alloc_frame_cache ();
2056
 
2057
  get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2058
  cache->base = extract_unsigned_integer (buf, 8, byte_order) - 8;
2059
 
2060
  addr = tdep->sigcontext_addr (this_frame);
2061
  gdb_assert (tdep->sc_reg_offset);
2062
  gdb_assert (tdep->sc_num_regs <= AMD64_NUM_SAVED_REGS);
2063
  for (i = 0; i < tdep->sc_num_regs; i++)
2064
    if (tdep->sc_reg_offset[i] != -1)
2065
      cache->saved_regs[i] = addr + tdep->sc_reg_offset[i];
2066
 
2067
  *this_cache = cache;
2068
  return cache;
2069
}
2070
 
2071
static void
2072
amd64_sigtramp_frame_this_id (struct frame_info *this_frame,
2073
                              void **this_cache, struct frame_id *this_id)
2074
{
2075
  struct amd64_frame_cache *cache =
2076
    amd64_sigtramp_frame_cache (this_frame, this_cache);
2077
 
2078
  (*this_id) = frame_id_build (cache->base + 16, get_frame_pc (this_frame));
2079
}
2080
 
2081
static struct value *
2082
amd64_sigtramp_frame_prev_register (struct frame_info *this_frame,
2083
                                    void **this_cache, int regnum)
2084
{
2085
  /* Make sure we've initialized the cache.  */
2086
  amd64_sigtramp_frame_cache (this_frame, this_cache);
2087
 
2088
  return amd64_frame_prev_register (this_frame, this_cache, regnum);
2089
}
2090
 
2091
static int
2092
amd64_sigtramp_frame_sniffer (const struct frame_unwind *self,
2093
                              struct frame_info *this_frame,
2094
                              void **this_cache)
2095
{
2096
  struct gdbarch_tdep *tdep = gdbarch_tdep (get_frame_arch (this_frame));
2097
 
2098
  /* We shouldn't even bother if we don't have a sigcontext_addr
2099
     handler.  */
2100
  if (tdep->sigcontext_addr == NULL)
2101
    return 0;
2102
 
2103
  if (tdep->sigtramp_p != NULL)
2104
    {
2105
      if (tdep->sigtramp_p (this_frame))
2106
        return 1;
2107
    }
2108
 
2109
  if (tdep->sigtramp_start != 0)
2110
    {
2111
      CORE_ADDR pc = get_frame_pc (this_frame);
2112
 
2113
      gdb_assert (tdep->sigtramp_end != 0);
2114
      if (pc >= tdep->sigtramp_start && pc < tdep->sigtramp_end)
2115
        return 1;
2116
    }
2117
 
2118
  return 0;
2119
}
2120
 
2121
static const struct frame_unwind amd64_sigtramp_frame_unwind =
2122
{
2123
  SIGTRAMP_FRAME,
2124
  amd64_sigtramp_frame_this_id,
2125
  amd64_sigtramp_frame_prev_register,
2126
  NULL,
2127
  amd64_sigtramp_frame_sniffer
2128
};
2129
 
2130
 
2131
static CORE_ADDR
2132
amd64_frame_base_address (struct frame_info *this_frame, void **this_cache)
2133
{
2134
  struct amd64_frame_cache *cache =
2135
    amd64_frame_cache (this_frame, this_cache);
2136
 
2137
  return cache->base;
2138
}
2139
 
2140
static const struct frame_base amd64_frame_base =
2141
{
2142
  &amd64_frame_unwind,
2143
  amd64_frame_base_address,
2144
  amd64_frame_base_address,
2145
  amd64_frame_base_address
2146
};
2147
 
2148
/* Normal frames, but in a function epilogue.  */
2149
 
2150
/* The epilogue is defined here as the 'ret' instruction, which will
2151
   follow any instruction such as 'leave' or 'pop %ebp' that destroys
2152
   the function's stack frame.  */
2153
 
2154
static int
2155
amd64_in_function_epilogue_p (struct gdbarch *gdbarch, CORE_ADDR pc)
2156
{
2157
  gdb_byte insn;
2158
 
2159
  if (target_read_memory (pc, &insn, 1))
2160
    return 0;   /* Can't read memory at pc.  */
2161
 
2162
  if (insn != 0xc3)     /* 'ret' instruction.  */
2163
    return 0;
2164
 
2165
  return 1;
2166
}
2167
 
2168
static int
2169
amd64_epilogue_frame_sniffer (const struct frame_unwind *self,
2170
                              struct frame_info *this_frame,
2171
                              void **this_prologue_cache)
2172
{
2173
  if (frame_relative_level (this_frame) == 0)
2174
    return amd64_in_function_epilogue_p (get_frame_arch (this_frame),
2175
                                         get_frame_pc (this_frame));
2176
  else
2177
    return 0;
2178
}
2179
 
2180
static struct amd64_frame_cache *
2181
amd64_epilogue_frame_cache (struct frame_info *this_frame, void **this_cache)
2182
{
2183
  struct gdbarch *gdbarch = get_frame_arch (this_frame);
2184
  enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
2185
  struct amd64_frame_cache *cache;
2186
  gdb_byte buf[8];
2187
 
2188
  if (*this_cache)
2189
    return *this_cache;
2190
 
2191
  cache = amd64_alloc_frame_cache ();
2192
  *this_cache = cache;
2193
 
2194
  /* Cache base will be %esp plus cache->sp_offset (-8).  */
2195
  get_frame_register (this_frame, AMD64_RSP_REGNUM, buf);
2196
  cache->base = extract_unsigned_integer (buf, 8,
2197
                                          byte_order) + cache->sp_offset;
2198
 
2199
  /* Cache pc will be the frame func.  */
2200
  cache->pc = get_frame_pc (this_frame);
2201
 
2202
  /* The saved %esp will be at cache->base plus 16.  */
2203
  cache->saved_sp = cache->base + 16;
2204
 
2205
  /* The saved %eip will be at cache->base plus 8.  */
2206
  cache->saved_regs[AMD64_RIP_REGNUM] = cache->base + 8;
2207
 
2208
  return cache;
2209
}
2210
 
2211
static void
2212
amd64_epilogue_frame_this_id (struct frame_info *this_frame,
2213
                              void **this_cache,
2214
                              struct frame_id *this_id)
2215
{
2216
  struct amd64_frame_cache *cache = amd64_epilogue_frame_cache (this_frame,
2217
                                                               this_cache);
2218
 
2219
  (*this_id) = frame_id_build (cache->base + 8, cache->pc);
2220
}
2221
 
2222
static const struct frame_unwind amd64_epilogue_frame_unwind =
2223
{
2224
  NORMAL_FRAME,
2225
  amd64_epilogue_frame_this_id,
2226
  amd64_frame_prev_register,
2227
  NULL,
2228
  amd64_epilogue_frame_sniffer
2229
};
2230
 
2231
static struct frame_id
2232
amd64_dummy_id (struct gdbarch *gdbarch, struct frame_info *this_frame)
2233
{
2234
  CORE_ADDR fp;
2235
 
2236
  fp = get_frame_register_unsigned (this_frame, AMD64_RBP_REGNUM);
2237
 
2238
  return frame_id_build (fp + 16, get_frame_pc (this_frame));
2239
}
2240
 
2241
/* 16 byte align the SP per frame requirements.  */
2242
 
2243
static CORE_ADDR
2244
amd64_frame_align (struct gdbarch *gdbarch, CORE_ADDR sp)
2245
{
2246
  return sp & -(CORE_ADDR)16;
2247
}
2248
 
2249
 
2250
/* Supply register REGNUM from the buffer specified by FPREGS and LEN
2251
   in the floating-point register set REGSET to register cache
2252
   REGCACHE.  If REGNUM is -1, do this for all registers in REGSET.  */
2253
 
2254
static void
2255
amd64_supply_fpregset (const struct regset *regset, struct regcache *regcache,
2256
                       int regnum, const void *fpregs, size_t len)
2257
{
2258
  const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2259
 
2260
  gdb_assert (len == tdep->sizeof_fpregset);
2261
  amd64_supply_fxsave (regcache, regnum, fpregs);
2262
}
2263
 
2264
/* Collect register REGNUM from the register cache REGCACHE and store
2265
   it in the buffer specified by FPREGS and LEN as described by the
2266
   floating-point register set REGSET.  If REGNUM is -1, do this for
2267
   all registers in REGSET.  */
2268
 
2269
static void
2270
amd64_collect_fpregset (const struct regset *regset,
2271
                        const struct regcache *regcache,
2272
                        int regnum, void *fpregs, size_t len)
2273
{
2274
  const struct gdbarch_tdep *tdep = gdbarch_tdep (regset->arch);
2275
 
2276
  gdb_assert (len == tdep->sizeof_fpregset);
2277
  amd64_collect_fxsave (regcache, regnum, fpregs);
2278
}
2279
 
2280
/* Similar to amd64_supply_fpregset, but use XSAVE extended state.  */
2281
 
2282
static void
2283
amd64_supply_xstateregset (const struct regset *regset,
2284
                           struct regcache *regcache, int regnum,
2285
                           const void *xstateregs, size_t len)
2286
{
2287
  amd64_supply_xsave (regcache, regnum, xstateregs);
2288
}
2289
 
2290
/* Similar to amd64_collect_fpregset, but use XSAVE extended state.  */
2291
 
2292
static void
2293
amd64_collect_xstateregset (const struct regset *regset,
2294
                            const struct regcache *regcache,
2295
                            int regnum, void *xstateregs, size_t len)
2296
{
2297
  amd64_collect_xsave (regcache, regnum, xstateregs, 1);
2298
}
2299
 
2300
/* Return the appropriate register set for the core section identified
2301
   by SECT_NAME and SECT_SIZE.  */
2302
 
2303
static const struct regset *
2304
amd64_regset_from_core_section (struct gdbarch *gdbarch,
2305
                                const char *sect_name, size_t sect_size)
2306
{
2307
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2308
 
2309
  if (strcmp (sect_name, ".reg2") == 0 && sect_size == tdep->sizeof_fpregset)
2310
    {
2311
      if (tdep->fpregset == NULL)
2312
        tdep->fpregset = regset_alloc (gdbarch, amd64_supply_fpregset,
2313
                                       amd64_collect_fpregset);
2314
 
2315
      return tdep->fpregset;
2316
    }
2317
 
2318
  if (strcmp (sect_name, ".reg-xstate") == 0)
2319
    {
2320
      if (tdep->xstateregset == NULL)
2321
        tdep->xstateregset = regset_alloc (gdbarch,
2322
                                           amd64_supply_xstateregset,
2323
                                           amd64_collect_xstateregset);
2324
 
2325
      return tdep->xstateregset;
2326
    }
2327
 
2328
  return i386_regset_from_core_section (gdbarch, sect_name, sect_size);
2329
}
2330
 
2331
 
2332
/* Figure out where the longjmp will land.  Slurp the jmp_buf out of
2333
   %rdi.  We expect its value to be a pointer to the jmp_buf structure
2334
   from which we extract the address that we will land at.  This
2335
   address is copied into PC.  This routine returns non-zero on
2336
   success.  */
2337
 
2338
static int
2339
amd64_get_longjmp_target (struct frame_info *frame, CORE_ADDR *pc)
2340
{
2341
  gdb_byte buf[8];
2342
  CORE_ADDR jb_addr;
2343
  struct gdbarch *gdbarch = get_frame_arch (frame);
2344
  int jb_pc_offset = gdbarch_tdep (gdbarch)->jb_pc_offset;
2345
  int len = TYPE_LENGTH (builtin_type (gdbarch)->builtin_func_ptr);
2346
 
2347
  /* If JB_PC_OFFSET is -1, we have no way to find out where the
2348
     longjmp will land.  */
2349
  if (jb_pc_offset == -1)
2350
    return 0;
2351
 
2352
  get_frame_register (frame, AMD64_RDI_REGNUM, buf);
2353
  jb_addr= extract_typed_address
2354
            (buf, builtin_type (gdbarch)->builtin_data_ptr);
2355
  if (target_read_memory (jb_addr + jb_pc_offset, buf, len))
2356
    return 0;
2357
 
2358
  *pc = extract_typed_address (buf, builtin_type (gdbarch)->builtin_func_ptr);
2359
 
2360
  return 1;
2361
}
2362
 
2363
static const int amd64_record_regmap[] =
2364
{
2365
  AMD64_RAX_REGNUM, AMD64_RCX_REGNUM, AMD64_RDX_REGNUM, AMD64_RBX_REGNUM,
2366
  AMD64_RSP_REGNUM, AMD64_RBP_REGNUM, AMD64_RSI_REGNUM, AMD64_RDI_REGNUM,
2367
  AMD64_R8_REGNUM, AMD64_R9_REGNUM, AMD64_R10_REGNUM, AMD64_R11_REGNUM,
2368
  AMD64_R12_REGNUM, AMD64_R13_REGNUM, AMD64_R14_REGNUM, AMD64_R15_REGNUM,
2369
  AMD64_RIP_REGNUM, AMD64_EFLAGS_REGNUM, AMD64_CS_REGNUM, AMD64_SS_REGNUM,
2370
  AMD64_DS_REGNUM, AMD64_ES_REGNUM, AMD64_FS_REGNUM, AMD64_GS_REGNUM
2371
};
2372
 
2373
void
2374
amd64_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
2375
{
2376
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2377
  const struct target_desc *tdesc = info.target_desc;
2378
 
2379
  /* AMD64 generally uses `fxsave' instead of `fsave' for saving its
2380
     floating-point registers.  */
2381
  tdep->sizeof_fpregset = I387_SIZEOF_FXSAVE;
2382
 
2383
  if (! tdesc_has_registers (tdesc))
2384
    tdesc = tdesc_amd64;
2385
  tdep->tdesc = tdesc;
2386
 
2387
  tdep->num_core_regs = AMD64_NUM_GREGS + I387_NUM_REGS;
2388
  tdep->register_names = amd64_register_names;
2389
 
2390
  if (tdesc_find_feature (tdesc, "org.gnu.gdb.i386.avx") != NULL)
2391
    {
2392
      tdep->ymmh_register_names = amd64_ymmh_names;
2393
      tdep->num_ymm_regs = 16;
2394
      tdep->ymm0h_regnum = AMD64_YMM0H_REGNUM;
2395
    }
2396
 
2397
  tdep->num_byte_regs = 20;
2398
  tdep->num_word_regs = 16;
2399
  tdep->num_dword_regs = 16;
2400
  /* Avoid wiring in the MMX registers for now.  */
2401
  tdep->num_mmx_regs = 0;
2402
 
2403
  set_gdbarch_pseudo_register_read (gdbarch,
2404
                                    amd64_pseudo_register_read);
2405
  set_gdbarch_pseudo_register_write (gdbarch,
2406
                                     amd64_pseudo_register_write);
2407
 
2408
  set_tdesc_pseudo_register_name (gdbarch, amd64_pseudo_register_name);
2409
 
2410
  set_gdbarch_register_name (gdbarch, amd64_register_name);
2411
 
2412
  /* AMD64 has an FPU and 16 SSE registers.  */
2413
  tdep->st0_regnum = AMD64_ST0_REGNUM;
2414
  tdep->num_xmm_regs = 16;
2415
 
2416
  /* This is what all the fuss is about.  */
2417
  set_gdbarch_long_bit (gdbarch, 64);
2418
  set_gdbarch_long_long_bit (gdbarch, 64);
2419
  set_gdbarch_ptr_bit (gdbarch, 64);
2420
 
2421
  /* In contrast to the i386, on AMD64 a `long double' actually takes
2422
     up 128 bits, even though it's still based on the i387 extended
2423
     floating-point format which has only 80 significant bits.  */
2424
  set_gdbarch_long_double_bit (gdbarch, 128);
2425
 
2426
  set_gdbarch_num_regs (gdbarch, AMD64_NUM_REGS);
2427
 
2428
  /* Register numbers of various important registers.  */
2429
  set_gdbarch_sp_regnum (gdbarch, AMD64_RSP_REGNUM); /* %rsp */
2430
  set_gdbarch_pc_regnum (gdbarch, AMD64_RIP_REGNUM); /* %rip */
2431
  set_gdbarch_ps_regnum (gdbarch, AMD64_EFLAGS_REGNUM); /* %eflags */
2432
  set_gdbarch_fp0_regnum (gdbarch, AMD64_ST0_REGNUM); /* %st(0) */
2433
 
2434
  /* The "default" register numbering scheme for AMD64 is referred to
2435
     as the "DWARF Register Number Mapping" in the System V psABI.
2436
     The preferred debugging format for all known AMD64 targets is
2437
     actually DWARF2, and GCC doesn't seem to support DWARF (that is
2438
     DWARF-1), but we provide the same mapping just in case.  This
2439
     mapping is also used for stabs, which GCC does support.  */
2440
  set_gdbarch_stab_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2441
  set_gdbarch_dwarf2_reg_to_regnum (gdbarch, amd64_dwarf_reg_to_regnum);
2442
 
2443
  /* We don't override SDB_REG_RO_REGNUM, since COFF doesn't seem to
2444
     be in use on any of the supported AMD64 targets.  */
2445
 
2446
  /* Call dummy code.  */
2447
  set_gdbarch_push_dummy_call (gdbarch, amd64_push_dummy_call);
2448
  set_gdbarch_frame_align (gdbarch, amd64_frame_align);
2449
  set_gdbarch_frame_red_zone_size (gdbarch, 128);
2450
  tdep->call_dummy_num_integer_regs =
2451
    ARRAY_SIZE (amd64_dummy_call_integer_regs);
2452
  tdep->call_dummy_integer_regs = amd64_dummy_call_integer_regs;
2453
  tdep->classify = amd64_classify;
2454
 
2455
  set_gdbarch_convert_register_p (gdbarch, i387_convert_register_p);
2456
  set_gdbarch_register_to_value (gdbarch, i387_register_to_value);
2457
  set_gdbarch_value_to_register (gdbarch, i387_value_to_register);
2458
 
2459
  set_gdbarch_return_value (gdbarch, amd64_return_value);
2460
 
2461
  set_gdbarch_skip_prologue (gdbarch, amd64_skip_prologue);
2462
 
2463
  tdep->record_regmap = amd64_record_regmap;
2464
 
2465
  set_gdbarch_dummy_id (gdbarch, amd64_dummy_id);
2466
 
2467
  /* Hook the function epilogue frame unwinder.  This unwinder is
2468
     appended to the list first, so that it supercedes the other
2469
     unwinders in function epilogues.  */
2470
  frame_unwind_prepend_unwinder (gdbarch, &amd64_epilogue_frame_unwind);
2471
 
2472
  /* Hook the prologue-based frame unwinders.  */
2473
  frame_unwind_append_unwinder (gdbarch, &amd64_sigtramp_frame_unwind);
2474
  frame_unwind_append_unwinder (gdbarch, &amd64_frame_unwind);
2475
  frame_base_set_default (gdbarch, &amd64_frame_base);
2476
 
2477
  /* If we have a register mapping, enable the generic core file support.  */
2478
  if (tdep->gregset_reg_offset)
2479
    set_gdbarch_regset_from_core_section (gdbarch,
2480
                                          amd64_regset_from_core_section);
2481
 
2482
  set_gdbarch_get_longjmp_target (gdbarch, amd64_get_longjmp_target);
2483
 
2484
  set_gdbarch_relocate_instruction (gdbarch, amd64_relocate_instruction);
2485
}
2486
 
2487
/* Provide a prototype to silence -Wmissing-prototypes.  */
2488
void _initialize_amd64_tdep (void);
2489
 
2490
void
2491
_initialize_amd64_tdep (void)
2492
{
2493
  initialize_tdesc_amd64 ();
2494
  initialize_tdesc_amd64_avx ();
2495
}
2496
 
2497
 
2498
/* The 64-bit FXSAVE format differs from the 32-bit format in the
2499
   sense that the instruction pointer and data pointer are simply
2500
   64-bit offsets into the code segment and the data segment instead
2501
   of a selector offset pair.  The functions below store the upper 32
2502
   bits of these pointers (instead of just the 16-bits of the segment
2503
   selector).  */
2504
 
2505
/* Fill register REGNUM in REGCACHE with the appropriate
2506
   floating-point or SSE register value from *FXSAVE.  If REGNUM is
2507
   -1, do this for all registers.  This function masks off any of the
2508
   reserved bits in *FXSAVE.  */
2509
 
2510
void
2511
amd64_supply_fxsave (struct regcache *regcache, int regnum,
2512
                     const void *fxsave)
2513
{
2514
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
2515
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2516
 
2517
  i387_supply_fxsave (regcache, regnum, fxsave);
2518
 
2519
  if (fxsave && gdbarch_ptr_bit (gdbarch) == 64)
2520
    {
2521
      const gdb_byte *regs = fxsave;
2522
 
2523
      if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2524
        regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2525
      if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2526
        regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2527
    }
2528
}
2529
 
2530
/* Similar to amd64_supply_fxsave, but use XSAVE extended state.  */
2531
 
2532
void
2533
amd64_supply_xsave (struct regcache *regcache, int regnum,
2534
                    const void *xsave)
2535
{
2536
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
2537
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2538
 
2539
  i387_supply_xsave (regcache, regnum, xsave);
2540
 
2541
  if (xsave && gdbarch_ptr_bit (gdbarch) == 64)
2542
    {
2543
      const gdb_byte *regs = xsave;
2544
 
2545
      if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2546
        regcache_raw_supply (regcache, I387_FISEG_REGNUM (tdep),
2547
                             regs + 12);
2548
      if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2549
        regcache_raw_supply (regcache, I387_FOSEG_REGNUM (tdep),
2550
                             regs + 20);
2551
    }
2552
}
2553
 
2554
/* Fill register REGNUM (if it is a floating-point or SSE register) in
2555
   *FXSAVE with the value from REGCACHE.  If REGNUM is -1, do this for
2556
   all registers.  This function doesn't touch any of the reserved
2557
   bits in *FXSAVE.  */
2558
 
2559
void
2560
amd64_collect_fxsave (const struct regcache *regcache, int regnum,
2561
                      void *fxsave)
2562
{
2563
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
2564
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2565
  gdb_byte *regs = fxsave;
2566
 
2567
  i387_collect_fxsave (regcache, regnum, fxsave);
2568
 
2569
  if (gdbarch_ptr_bit (gdbarch) == 64)
2570
    {
2571
      if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2572
        regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep), regs + 12);
2573
      if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2574
        regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep), regs + 20);
2575
    }
2576
}
2577
 
2578
/* Similar to amd64_collect_fxsave, but but use XSAVE extended state.  */
2579
 
2580
void
2581
amd64_collect_xsave (const struct regcache *regcache, int regnum,
2582
                     void *xsave, int gcore)
2583
{
2584
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
2585
  struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
2586
  gdb_byte *regs = xsave;
2587
 
2588
  i387_collect_xsave (regcache, regnum, xsave, gcore);
2589
 
2590
  if (gdbarch_ptr_bit (gdbarch) == 64)
2591
    {
2592
      if (regnum == -1 || regnum == I387_FISEG_REGNUM (tdep))
2593
        regcache_raw_collect (regcache, I387_FISEG_REGNUM (tdep),
2594
                              regs + 12);
2595
      if (regnum == -1 || regnum == I387_FOSEG_REGNUM (tdep))
2596
        regcache_raw_collect (regcache, I387_FOSEG_REGNUM (tdep),
2597
                              regs + 20);
2598
    }
2599
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.