OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-stable/] [gdb-7.2/] [gdb/] [gdbserver/] [linux-x86-low.c] - Blame information for rev 330

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 330 jeremybenn
/* GNU/Linux/x86-64 specific low level interface, for the remote server
2
   for GDB.
3
   Copyright (C) 2002, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4
   Free Software Foundation, Inc.
5
 
6
   This file is part of GDB.
7
 
8
   This program is free software; you can redistribute it and/or modify
9
   it under the terms of the GNU General Public License as published by
10
   the Free Software Foundation; either version 3 of the License, or
11
   (at your option) any later version.
12
 
13
   This program is distributed in the hope that it will be useful,
14
   but WITHOUT ANY WARRANTY; without even the implied warranty of
15
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
   GNU General Public License for more details.
17
 
18
   You should have received a copy of the GNU General Public License
19
   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
20
 
21
#include <stddef.h>
22
#include <signal.h>
23
#include <limits.h>
24
#include "server.h"
25
#include "linux-low.h"
26
#include "i387-fp.h"
27
#include "i386-low.h"
28
#include "i386-xstate.h"
29
#include "elf/common.h"
30
 
31
#include "gdb_proc_service.h"
32
 
33
/* Defined in auto-generated file i386-linux.c.  */
34
void init_registers_i386_linux (void);
35
/* Defined in auto-generated file amd64-linux.c.  */
36
void init_registers_amd64_linux (void);
37
/* Defined in auto-generated file i386-avx-linux.c.  */
38
void init_registers_i386_avx_linux (void);
39
/* Defined in auto-generated file amd64-avx-linux.c.  */
40
void init_registers_amd64_avx_linux (void);
41
/* Defined in auto-generated file i386-mmx-linux.c.  */
42
void init_registers_i386_mmx_linux (void);
43
 
44
static unsigned char jump_insn[] = { 0xe9, 0, 0, 0, 0 };
45
 
46
/* Backward compatibility for gdb without XML support.  */
47
 
48
static const char *xmltarget_i386_linux_no_xml = "@<target>\
49
<architecture>i386</architecture>\
50
<osabi>GNU/Linux</osabi>\
51
</target>";
52
 
53
#ifdef __x86_64__
54
static const char *xmltarget_amd64_linux_no_xml = "@<target>\
55
<architecture>i386:x86-64</architecture>\
56
<osabi>GNU/Linux</osabi>\
57
</target>";
58
#endif
59
 
60
#include <sys/reg.h>
61
#include <sys/procfs.h>
62
#include <sys/ptrace.h>
63
#include <sys/uio.h>
64
 
65
#ifndef PTRACE_GETREGSET
66
#define PTRACE_GETREGSET        0x4204
67
#endif
68
 
69
#ifndef PTRACE_SETREGSET
70
#define PTRACE_SETREGSET        0x4205
71
#endif
72
 
73
 
74
#ifndef PTRACE_GET_THREAD_AREA
75
#define PTRACE_GET_THREAD_AREA 25
76
#endif
77
 
78
/* This definition comes from prctl.h, but some kernels may not have it.  */
79
#ifndef PTRACE_ARCH_PRCTL
80
#define PTRACE_ARCH_PRCTL      30
81
#endif
82
 
83
/* The following definitions come from prctl.h, but may be absent
84
   for certain configurations.  */
85
#ifndef ARCH_GET_FS
86
#define ARCH_SET_GS 0x1001
87
#define ARCH_SET_FS 0x1002
88
#define ARCH_GET_FS 0x1003
89
#define ARCH_GET_GS 0x1004
90
#endif
91
 
92
/* Per-process arch-specific data we want to keep.  */
93
 
94
struct arch_process_info
95
{
96
  struct i386_debug_reg_state debug_reg_state;
97
};
98
 
99
/* Per-thread arch-specific data we want to keep.  */
100
 
101
struct arch_lwp_info
102
{
103
  /* Non-zero if our copy differs from what's recorded in the thread.  */
104
  int debug_registers_changed;
105
};
106
 
107
#ifdef __x86_64__
108
 
109
/* Mapping between the general-purpose registers in `struct user'
110
   format and GDB's register array layout.
111
   Note that the transfer layout uses 64-bit regs.  */
112
static /*const*/ int i386_regmap[] =
113
{
114
  RAX * 8, RCX * 8, RDX * 8, RBX * 8,
115
  RSP * 8, RBP * 8, RSI * 8, RDI * 8,
116
  RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
117
  DS * 8, ES * 8, FS * 8, GS * 8
118
};
119
 
120
#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
121
 
122
/* So code below doesn't have to care, i386 or amd64.  */
123
#define ORIG_EAX ORIG_RAX
124
 
125
static const int x86_64_regmap[] =
126
{
127
  RAX * 8, RBX * 8, RCX * 8, RDX * 8,
128
  RSI * 8, RDI * 8, RBP * 8, RSP * 8,
129
  R8 * 8, R9 * 8, R10 * 8, R11 * 8,
130
  R12 * 8, R13 * 8, R14 * 8, R15 * 8,
131
  RIP * 8, EFLAGS * 8, CS * 8, SS * 8,
132
  DS * 8, ES * 8, FS * 8, GS * 8,
133
  -1, -1, -1, -1, -1, -1, -1, -1,
134
  -1, -1, -1, -1, -1, -1, -1, -1,
135
  -1, -1, -1, -1, -1, -1, -1, -1,
136
  -1, -1, -1, -1, -1, -1, -1, -1, -1,
137
  ORIG_RAX * 8
138
};
139
 
140
#define X86_64_NUM_REGS (sizeof (x86_64_regmap) / sizeof (x86_64_regmap[0]))
141
 
142
#else /* ! __x86_64__ */
143
 
144
/* Mapping between the general-purpose registers in `struct user'
145
   format and GDB's register array layout.  */
146
static /*const*/ int i386_regmap[] =
147
{
148
  EAX * 4, ECX * 4, EDX * 4, EBX * 4,
149
  UESP * 4, EBP * 4, ESI * 4, EDI * 4,
150
  EIP * 4, EFL * 4, CS * 4, SS * 4,
151
  DS * 4, ES * 4, FS * 4, GS * 4
152
};
153
 
154
#define I386_NUM_REGS (sizeof (i386_regmap) / sizeof (i386_regmap[0]))
155
 
156
#endif
157
 
158
/* Called by libthread_db.  */
159
 
160
ps_err_e
161
ps_get_thread_area (const struct ps_prochandle *ph,
162
                    lwpid_t lwpid, int idx, void **base)
163
{
164
#ifdef __x86_64__
165
  int use_64bit = register_size (0) == 8;
166
 
167
  if (use_64bit)
168
    {
169
      switch (idx)
170
        {
171
        case FS:
172
          if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_FS) == 0)
173
            return PS_OK;
174
          break;
175
        case GS:
176
          if (ptrace (PTRACE_ARCH_PRCTL, lwpid, base, ARCH_GET_GS) == 0)
177
            return PS_OK;
178
          break;
179
        default:
180
          return PS_BADADDR;
181
        }
182
      return PS_ERR;
183
    }
184
#endif
185
 
186
  {
187
    unsigned int desc[4];
188
 
189
    if (ptrace (PTRACE_GET_THREAD_AREA, lwpid,
190
                (void *) (intptr_t) idx, (unsigned long) &desc) < 0)
191
      return PS_ERR;
192
 
193
    *(int *)base = desc[1];
194
    return PS_OK;
195
  }
196
}
197
 
198
/* Get the thread area address.  This is used to recognize which
199
   thread is which when tracing with the in-process agent library.  We
200
   don't read anything from the address, and treat it as opaque; it's
201
   the address itself that we assume is unique per-thread.  */
202
 
203
static int
204
x86_get_thread_area (int lwpid, CORE_ADDR *addr)
205
{
206
#ifdef __x86_64__
207
  int use_64bit = register_size (0) == 8;
208
 
209
  if (use_64bit)
210
    {
211
      void *base;
212
      if (ptrace (PTRACE_ARCH_PRCTL, lwpid, &base, ARCH_GET_FS) == 0)
213
        {
214
          *addr = (CORE_ADDR) (uintptr_t) base;
215
          return 0;
216
        }
217
 
218
      return -1;
219
    }
220
#endif
221
 
222
  {
223
    struct lwp_info *lwp = find_lwp_pid (pid_to_ptid (lwpid));
224
    struct regcache *regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
225
    unsigned int desc[4];
226
    ULONGEST gs = 0;
227
    const int reg_thread_area = 3; /* bits to scale down register value.  */
228
    int idx;
229
 
230
    collect_register_by_name (regcache, "gs", &gs);
231
 
232
    idx = gs >> reg_thread_area;
233
 
234
    if (ptrace (PTRACE_GET_THREAD_AREA,
235
                lwpid_of (lwp), (void *) (long) idx, (unsigned long) &desc) < 0)
236
      return -1;
237
 
238
    *addr = desc[1];
239
    return 0;
240
  }
241
}
242
 
243
 
244
 
245
static int
246
i386_cannot_store_register (int regno)
247
{
248
  return regno >= I386_NUM_REGS;
249
}
250
 
251
static int
252
i386_cannot_fetch_register (int regno)
253
{
254
  return regno >= I386_NUM_REGS;
255
}
256
 
257
static void
258
x86_fill_gregset (struct regcache *regcache, void *buf)
259
{
260
  int i;
261
 
262
#ifdef __x86_64__
263
  if (register_size (0) == 8)
264
    {
265
      for (i = 0; i < X86_64_NUM_REGS; i++)
266
        if (x86_64_regmap[i] != -1)
267
          collect_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
268
      return;
269
    }
270
#endif
271
 
272
  for (i = 0; i < I386_NUM_REGS; i++)
273
    collect_register (regcache, i, ((char *) buf) + i386_regmap[i]);
274
 
275
  collect_register_by_name (regcache, "orig_eax",
276
                            ((char *) buf) + ORIG_EAX * 4);
277
}
278
 
279
static void
280
x86_store_gregset (struct regcache *regcache, const void *buf)
281
{
282
  int i;
283
 
284
#ifdef __x86_64__
285
  if (register_size (0) == 8)
286
    {
287
      for (i = 0; i < X86_64_NUM_REGS; i++)
288
        if (x86_64_regmap[i] != -1)
289
          supply_register (regcache, i, ((char *) buf) + x86_64_regmap[i]);
290
      return;
291
    }
292
#endif
293
 
294
  for (i = 0; i < I386_NUM_REGS; i++)
295
    supply_register (regcache, i, ((char *) buf) + i386_regmap[i]);
296
 
297
  supply_register_by_name (regcache, "orig_eax",
298
                           ((char *) buf) + ORIG_EAX * 4);
299
}
300
 
301
static void
302
x86_fill_fpregset (struct regcache *regcache, void *buf)
303
{
304
#ifdef __x86_64__
305
  i387_cache_to_fxsave (regcache, buf);
306
#else
307
  i387_cache_to_fsave (regcache, buf);
308
#endif
309
}
310
 
311
static void
312
x86_store_fpregset (struct regcache *regcache, const void *buf)
313
{
314
#ifdef __x86_64__
315
  i387_fxsave_to_cache (regcache, buf);
316
#else
317
  i387_fsave_to_cache (regcache, buf);
318
#endif
319
}
320
 
321
#ifndef __x86_64__
322
 
323
static void
324
x86_fill_fpxregset (struct regcache *regcache, void *buf)
325
{
326
  i387_cache_to_fxsave (regcache, buf);
327
}
328
 
329
static void
330
x86_store_fpxregset (struct regcache *regcache, const void *buf)
331
{
332
  i387_fxsave_to_cache (regcache, buf);
333
}
334
 
335
#endif
336
 
337
static void
338
x86_fill_xstateregset (struct regcache *regcache, void *buf)
339
{
340
  i387_cache_to_xsave (regcache, buf);
341
}
342
 
343
static void
344
x86_store_xstateregset (struct regcache *regcache, const void *buf)
345
{
346
  i387_xsave_to_cache (regcache, buf);
347
}
348
 
349
/* ??? The non-biarch i386 case stores all the i387 regs twice.
350
   Once in i387_.*fsave.* and once in i387_.*fxsave.*.
351
   This is, presumably, to handle the case where PTRACE_[GS]ETFPXREGS
352
   doesn't work.  IWBN to avoid the duplication in the case where it
353
   does work.  Maybe the arch_setup routine could check whether it works
354
   and update target_regsets accordingly, maybe by moving target_regsets
355
   to linux_target_ops and set the right one there, rather than having to
356
   modify the target_regsets global.  */
357
 
358
struct regset_info target_regsets[] =
359
{
360
#ifdef HAVE_PTRACE_GETREGS
361
  { PTRACE_GETREGS, PTRACE_SETREGS, 0, sizeof (elf_gregset_t),
362
    GENERAL_REGS,
363
    x86_fill_gregset, x86_store_gregset },
364
  { PTRACE_GETREGSET, PTRACE_SETREGSET, NT_X86_XSTATE, 0,
365
    EXTENDED_REGS, x86_fill_xstateregset, x86_store_xstateregset },
366
# ifndef __x86_64__
367
#  ifdef HAVE_PTRACE_GETFPXREGS
368
  { PTRACE_GETFPXREGS, PTRACE_SETFPXREGS, 0, sizeof (elf_fpxregset_t),
369
    EXTENDED_REGS,
370
    x86_fill_fpxregset, x86_store_fpxregset },
371
#  endif
372
# endif
373
  { PTRACE_GETFPREGS, PTRACE_SETFPREGS, 0, sizeof (elf_fpregset_t),
374
    FP_REGS,
375
    x86_fill_fpregset, x86_store_fpregset },
376
#endif /* HAVE_PTRACE_GETREGS */
377
  { 0, 0, 0, -1, -1, NULL, NULL }
378
};
379
 
380
static CORE_ADDR
381
x86_get_pc (struct regcache *regcache)
382
{
383
  int use_64bit = register_size (0) == 8;
384
 
385
  if (use_64bit)
386
    {
387
      unsigned long pc;
388
      collect_register_by_name (regcache, "rip", &pc);
389
      return (CORE_ADDR) pc;
390
    }
391
  else
392
    {
393
      unsigned int pc;
394
      collect_register_by_name (regcache, "eip", &pc);
395
      return (CORE_ADDR) pc;
396
    }
397
}
398
 
399
static void
400
x86_set_pc (struct regcache *regcache, CORE_ADDR pc)
401
{
402
  int use_64bit = register_size (0) == 8;
403
 
404
  if (use_64bit)
405
    {
406
      unsigned long newpc = pc;
407
      supply_register_by_name (regcache, "rip", &newpc);
408
    }
409
  else
410
    {
411
      unsigned int newpc = pc;
412
      supply_register_by_name (regcache, "eip", &newpc);
413
    }
414
}
415
 
416
static const unsigned char x86_breakpoint[] = { 0xCC };
417
#define x86_breakpoint_len 1
418
 
419
static int
420
x86_breakpoint_at (CORE_ADDR pc)
421
{
422
  unsigned char c;
423
 
424
  (*the_target->read_memory) (pc, &c, 1);
425
  if (c == 0xCC)
426
    return 1;
427
 
428
  return 0;
429
}
430
 
431
/* Support for debug registers.  */
432
 
433
static unsigned long
434
x86_linux_dr_get (ptid_t ptid, int regnum)
435
{
436
  int tid;
437
  unsigned long value;
438
 
439
  tid = ptid_get_lwp (ptid);
440
 
441
  errno = 0;
442
  value = ptrace (PTRACE_PEEKUSER, tid,
443
                  offsetof (struct user, u_debugreg[regnum]), 0);
444
  if (errno != 0)
445
    error ("Couldn't read debug register");
446
 
447
  return value;
448
}
449
 
450
static void
451
x86_linux_dr_set (ptid_t ptid, int regnum, unsigned long value)
452
{
453
  int tid;
454
 
455
  tid = ptid_get_lwp (ptid);
456
 
457
  errno = 0;
458
  ptrace (PTRACE_POKEUSER, tid,
459
          offsetof (struct user, u_debugreg[regnum]), value);
460
  if (errno != 0)
461
    error ("Couldn't write debug register");
462
}
463
 
464
/* Update the inferior's debug register REGNUM from STATE.  */
465
 
466
void
467
i386_dr_low_set_addr (const struct i386_debug_reg_state *state, int regnum)
468
{
469
  struct inferior_list_entry *lp;
470
  CORE_ADDR addr;
471
  /* Only need to update the threads of this process.  */
472
  int pid = pid_of (get_thread_lwp (current_inferior));
473
 
474
  if (! (regnum >= 0 && regnum <= DR_LASTADDR - DR_FIRSTADDR))
475
    fatal ("Invalid debug register %d", regnum);
476
 
477
  addr = state->dr_mirror[regnum];
478
 
479
  for (lp = all_lwps.head; lp; lp = lp->next)
480
    {
481
      struct lwp_info *lwp = (struct lwp_info *) lp;
482
 
483
      /* The actual update is done later, we just mark that the register
484
         needs updating.  */
485
      if (pid_of (lwp) == pid)
486
        lwp->arch_private->debug_registers_changed = 1;
487
    }
488
}
489
 
490
/* Update the inferior's DR7 debug control register from STATE.  */
491
 
492
void
493
i386_dr_low_set_control (const struct i386_debug_reg_state *state)
494
{
495
  struct inferior_list_entry *lp;
496
  /* Only need to update the threads of this process.  */
497
  int pid = pid_of (get_thread_lwp (current_inferior));
498
 
499
  for (lp = all_lwps.head; lp; lp = lp->next)
500
    {
501
      struct lwp_info *lwp = (struct lwp_info *) lp;
502
 
503
      /* The actual update is done later, we just mark that the register
504
         needs updating.  */
505
      if (pid_of (lwp) == pid)
506
        lwp->arch_private->debug_registers_changed = 1;
507
    }
508
}
509
 
510
/* Get the value of the DR6 debug status register from the inferior
511
   and record it in STATE.  */
512
 
513
void
514
i386_dr_low_get_status (struct i386_debug_reg_state *state)
515
{
516
  struct lwp_info *lwp = get_thread_lwp (current_inferior);
517
  ptid_t ptid = ptid_of (lwp);
518
 
519
  state->dr_status_mirror = x86_linux_dr_get (ptid, DR_STATUS);
520
}
521
 
522
/* Watchpoint support.  */
523
 
524
static int
525
x86_insert_point (char type, CORE_ADDR addr, int len)
526
{
527
  struct process_info *proc = current_process ();
528
  switch (type)
529
    {
530
    case '0':
531
      return set_gdb_breakpoint_at (addr);
532
    case '2':
533
    case '3':
534
    case '4':
535
      return i386_low_insert_watchpoint (&proc->private->arch_private->debug_reg_state,
536
                                         type, addr, len);
537
    default:
538
      /* Unsupported.  */
539
      return 1;
540
    }
541
}
542
 
543
static int
544
x86_remove_point (char type, CORE_ADDR addr, int len)
545
{
546
  struct process_info *proc = current_process ();
547
  switch (type)
548
    {
549
    case '0':
550
      return delete_gdb_breakpoint_at (addr);
551
    case '2':
552
    case '3':
553
    case '4':
554
      return i386_low_remove_watchpoint (&proc->private->arch_private->debug_reg_state,
555
                                         type, addr, len);
556
    default:
557
      /* Unsupported.  */
558
      return 1;
559
    }
560
}
561
 
562
static int
563
x86_stopped_by_watchpoint (void)
564
{
565
  struct process_info *proc = current_process ();
566
  return i386_low_stopped_by_watchpoint (&proc->private->arch_private->debug_reg_state);
567
}
568
 
569
static CORE_ADDR
570
x86_stopped_data_address (void)
571
{
572
  struct process_info *proc = current_process ();
573
  CORE_ADDR addr;
574
  if (i386_low_stopped_data_address (&proc->private->arch_private->debug_reg_state,
575
                                     &addr))
576
    return addr;
577
  return 0;
578
}
579
 
580
/* Called when a new process is created.  */
581
 
582
static struct arch_process_info *
583
x86_linux_new_process (void)
584
{
585
  struct arch_process_info *info = xcalloc (1, sizeof (*info));
586
 
587
  i386_low_init_dregs (&info->debug_reg_state);
588
 
589
  return info;
590
}
591
 
592
/* Called when a new thread is detected.  */
593
 
594
static struct arch_lwp_info *
595
x86_linux_new_thread (void)
596
{
597
  struct arch_lwp_info *info = xcalloc (1, sizeof (*info));
598
 
599
  info->debug_registers_changed = 1;
600
 
601
  return info;
602
}
603
 
604
/* Called when resuming a thread.
605
   If the debug regs have changed, update the thread's copies.  */
606
 
607
static void
608
x86_linux_prepare_to_resume (struct lwp_info *lwp)
609
{
610
  ptid_t ptid = ptid_of (lwp);
611
 
612
  if (lwp->arch_private->debug_registers_changed)
613
    {
614
      int i;
615
      int pid = ptid_get_pid (ptid);
616
      struct process_info *proc = find_process_pid (pid);
617
      struct i386_debug_reg_state *state = &proc->private->arch_private->debug_reg_state;
618
 
619
      for (i = DR_FIRSTADDR; i <= DR_LASTADDR; i++)
620
        x86_linux_dr_set (ptid, i, state->dr_mirror[i]);
621
 
622
      x86_linux_dr_set (ptid, DR_CONTROL, state->dr_control_mirror);
623
 
624
      lwp->arch_private->debug_registers_changed = 0;
625
    }
626
 
627
  if (lwp->stopped_by_watchpoint)
628
    x86_linux_dr_set (ptid, DR_STATUS, 0);
629
}
630
 
631
/* When GDBSERVER is built as a 64-bit application on linux, the
632
   PTRACE_GETSIGINFO data is always presented in 64-bit layout.  Since
633
   debugging a 32-bit inferior with a 64-bit GDBSERVER should look the same
634
   as debugging it with a 32-bit GDBSERVER, we do the 32-bit <-> 64-bit
635
   conversion in-place ourselves.  */
636
 
637
/* These types below (compat_*) define a siginfo type that is layout
638
   compatible with the siginfo type exported by the 32-bit userspace
639
   support.  */
640
 
641
#ifdef __x86_64__
642
 
643
typedef int compat_int_t;
644
typedef unsigned int compat_uptr_t;
645
 
646
typedef int compat_time_t;
647
typedef int compat_timer_t;
648
typedef int compat_clock_t;
649
 
650
struct compat_timeval
651
{
652
  compat_time_t tv_sec;
653
  int tv_usec;
654
};
655
 
656
typedef union compat_sigval
657
{
658
  compat_int_t sival_int;
659
  compat_uptr_t sival_ptr;
660
} compat_sigval_t;
661
 
662
typedef struct compat_siginfo
663
{
664
  int si_signo;
665
  int si_errno;
666
  int si_code;
667
 
668
  union
669
  {
670
    int _pad[((128 / sizeof (int)) - 3)];
671
 
672
    /* kill() */
673
    struct
674
    {
675
      unsigned int _pid;
676
      unsigned int _uid;
677
    } _kill;
678
 
679
    /* POSIX.1b timers */
680
    struct
681
    {
682
      compat_timer_t _tid;
683
      int _overrun;
684
      compat_sigval_t _sigval;
685
    } _timer;
686
 
687
    /* POSIX.1b signals */
688
    struct
689
    {
690
      unsigned int _pid;
691
      unsigned int _uid;
692
      compat_sigval_t _sigval;
693
    } _rt;
694
 
695
    /* SIGCHLD */
696
    struct
697
    {
698
      unsigned int _pid;
699
      unsigned int _uid;
700
      int _status;
701
      compat_clock_t _utime;
702
      compat_clock_t _stime;
703
    } _sigchld;
704
 
705
    /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
706
    struct
707
    {
708
      unsigned int _addr;
709
    } _sigfault;
710
 
711
    /* SIGPOLL */
712
    struct
713
    {
714
      int _band;
715
      int _fd;
716
    } _sigpoll;
717
  } _sifields;
718
} compat_siginfo_t;
719
 
720
#define cpt_si_pid _sifields._kill._pid
721
#define cpt_si_uid _sifields._kill._uid
722
#define cpt_si_timerid _sifields._timer._tid
723
#define cpt_si_overrun _sifields._timer._overrun
724
#define cpt_si_status _sifields._sigchld._status
725
#define cpt_si_utime _sifields._sigchld._utime
726
#define cpt_si_stime _sifields._sigchld._stime
727
#define cpt_si_ptr _sifields._rt._sigval.sival_ptr
728
#define cpt_si_addr _sifields._sigfault._addr
729
#define cpt_si_band _sifields._sigpoll._band
730
#define cpt_si_fd _sifields._sigpoll._fd
731
 
732
/* glibc at least up to 2.3.2 doesn't have si_timerid, si_overrun.
733
   In their place is si_timer1,si_timer2.  */
734
#ifndef si_timerid
735
#define si_timerid si_timer1
736
#endif
737
#ifndef si_overrun
738
#define si_overrun si_timer2
739
#endif
740
 
741
static void
742
compat_siginfo_from_siginfo (compat_siginfo_t *to, siginfo_t *from)
743
{
744
  memset (to, 0, sizeof (*to));
745
 
746
  to->si_signo = from->si_signo;
747
  to->si_errno = from->si_errno;
748
  to->si_code = from->si_code;
749
 
750
  if (to->si_code < 0)
751
    {
752
      to->cpt_si_ptr = (intptr_t) from->si_ptr;
753
    }
754
  else if (to->si_code == SI_USER)
755
    {
756
      to->cpt_si_pid = from->si_pid;
757
      to->cpt_si_uid = from->si_uid;
758
    }
759
  else if (to->si_code == SI_TIMER)
760
    {
761
      to->cpt_si_timerid = from->si_timerid;
762
      to->cpt_si_overrun = from->si_overrun;
763
      to->cpt_si_ptr = (intptr_t) from->si_ptr;
764
    }
765
  else
766
    {
767
      switch (to->si_signo)
768
        {
769
        case SIGCHLD:
770
          to->cpt_si_pid = from->si_pid;
771
          to->cpt_si_uid = from->si_uid;
772
          to->cpt_si_status = from->si_status;
773
          to->cpt_si_utime = from->si_utime;
774
          to->cpt_si_stime = from->si_stime;
775
          break;
776
        case SIGILL:
777
        case SIGFPE:
778
        case SIGSEGV:
779
        case SIGBUS:
780
          to->cpt_si_addr = (intptr_t) from->si_addr;
781
          break;
782
        case SIGPOLL:
783
          to->cpt_si_band = from->si_band;
784
          to->cpt_si_fd = from->si_fd;
785
          break;
786
        default:
787
          to->cpt_si_pid = from->si_pid;
788
          to->cpt_si_uid = from->si_uid;
789
          to->cpt_si_ptr = (intptr_t) from->si_ptr;
790
          break;
791
        }
792
    }
793
}
794
 
795
static void
796
siginfo_from_compat_siginfo (siginfo_t *to, compat_siginfo_t *from)
797
{
798
  memset (to, 0, sizeof (*to));
799
 
800
  to->si_signo = from->si_signo;
801
  to->si_errno = from->si_errno;
802
  to->si_code = from->si_code;
803
 
804
  if (to->si_code < 0)
805
    {
806
      to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
807
    }
808
  else if (to->si_code == SI_USER)
809
    {
810
      to->si_pid = from->cpt_si_pid;
811
      to->si_uid = from->cpt_si_uid;
812
    }
813
  else if (to->si_code == SI_TIMER)
814
    {
815
      to->si_timerid = from->cpt_si_timerid;
816
      to->si_overrun = from->cpt_si_overrun;
817
      to->si_ptr = (void *) (intptr_t) from->cpt_si_ptr;
818
    }
819
  else
820
    {
821
      switch (to->si_signo)
822
        {
823
        case SIGCHLD:
824
          to->si_pid = from->cpt_si_pid;
825
          to->si_uid = from->cpt_si_uid;
826
          to->si_status = from->cpt_si_status;
827
          to->si_utime = from->cpt_si_utime;
828
          to->si_stime = from->cpt_si_stime;
829
          break;
830
        case SIGILL:
831
        case SIGFPE:
832
        case SIGSEGV:
833
        case SIGBUS:
834
          to->si_addr = (void *) (intptr_t) from->cpt_si_addr;
835
          break;
836
        case SIGPOLL:
837
          to->si_band = from->cpt_si_band;
838
          to->si_fd = from->cpt_si_fd;
839
          break;
840
        default:
841
          to->si_pid = from->cpt_si_pid;
842
          to->si_uid = from->cpt_si_uid;
843
          to->si_ptr = (void* ) (intptr_t) from->cpt_si_ptr;
844
          break;
845
        }
846
    }
847
}
848
 
849
#endif /* __x86_64__ */
850
 
851
/* Convert a native/host siginfo object, into/from the siginfo in the
852
   layout of the inferiors' architecture.  Returns true if any
853
   conversion was done; false otherwise.  If DIRECTION is 1, then copy
854
   from INF to NATIVE.  If DIRECTION is 0, copy from NATIVE to
855
   INF.  */
856
 
857
static int
858
x86_siginfo_fixup (struct siginfo *native, void *inf, int direction)
859
{
860
#ifdef __x86_64__
861
  /* Is the inferior 32-bit?  If so, then fixup the siginfo object.  */
862
  if (register_size (0) == 4)
863
    {
864
      if (sizeof (struct siginfo) != sizeof (compat_siginfo_t))
865
        fatal ("unexpected difference in siginfo");
866
 
867
      if (direction == 0)
868
        compat_siginfo_from_siginfo ((struct compat_siginfo *) inf, native);
869
      else
870
        siginfo_from_compat_siginfo (native, (struct compat_siginfo *) inf);
871
 
872
      return 1;
873
    }
874
#endif
875
 
876
  return 0;
877
}
878
 
879
static int use_xml;
880
 
881
/* Update gdbserver_xmltarget.  */
882
 
883
static void
884
x86_linux_update_xmltarget (void)
885
{
886
  int pid;
887
  struct regset_info *regset;
888
  static unsigned long long xcr0;
889
  static int have_ptrace_getregset = -1;
890
#if !defined(__x86_64__) && defined(HAVE_PTRACE_GETFPXREGS)
891
  static int have_ptrace_getfpxregs = -1;
892
#endif
893
 
894
  if (!current_inferior)
895
    return;
896
 
897
  /* Before changing the register cache internal layout or the target
898
     regsets, flush the contents of the current valid caches back to
899
     the threads.  */
900
  regcache_invalidate ();
901
 
902
  pid = pid_of (get_thread_lwp (current_inferior));
903
#ifdef __x86_64__
904
  if (num_xmm_registers == 8)
905
    init_registers_i386_linux ();
906
  else
907
    init_registers_amd64_linux ();
908
#else
909
    {
910
# ifdef HAVE_PTRACE_GETFPXREGS
911
      if (have_ptrace_getfpxregs == -1)
912
        {
913
          elf_fpxregset_t fpxregs;
914
 
915
          if (ptrace (PTRACE_GETFPXREGS, pid, 0, (int) &fpxregs) < 0)
916
            {
917
              have_ptrace_getfpxregs = 0;
918
              x86_xcr0 = I386_XSTATE_X87_MASK;
919
 
920
              /* Disable PTRACE_GETFPXREGS.  */
921
              for (regset = target_regsets;
922
                   regset->fill_function != NULL; regset++)
923
                if (regset->get_request == PTRACE_GETFPXREGS)
924
                  {
925
                    regset->size = 0;
926
                    break;
927
                  }
928
            }
929
          else
930
            have_ptrace_getfpxregs = 1;
931
        }
932
 
933
      if (!have_ptrace_getfpxregs)
934
        {
935
          init_registers_i386_mmx_linux ();
936
          return;
937
        }
938
# endif
939
      init_registers_i386_linux ();
940
    }
941
#endif
942
 
943
  if (!use_xml)
944
    {
945
      /* Don't use XML.  */
946
#ifdef __x86_64__
947
      if (num_xmm_registers == 8)
948
        gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
949
      else
950
        gdbserver_xmltarget = xmltarget_amd64_linux_no_xml;
951
#else
952
      gdbserver_xmltarget = xmltarget_i386_linux_no_xml;
953
#endif
954
 
955
      x86_xcr0 = I386_XSTATE_SSE_MASK;
956
 
957
      return;
958
    }
959
 
960
  /* Check if XSAVE extended state is supported.  */
961
  if (have_ptrace_getregset == -1)
962
    {
963
      unsigned long long xstateregs[I386_XSTATE_SSE_SIZE / sizeof (long long)];
964
      struct iovec iov;
965
 
966
      iov.iov_base = xstateregs;
967
      iov.iov_len = sizeof (xstateregs);
968
 
969
      /* Check if PTRACE_GETREGSET works.  */
970
      if (ptrace (PTRACE_GETREGSET, pid, (unsigned int) NT_X86_XSTATE,
971
                  &iov) < 0)
972
        {
973
          have_ptrace_getregset = 0;
974
          return;
975
        }
976
      else
977
        have_ptrace_getregset = 1;
978
 
979
      /* Get XCR0 from XSAVE extended state at byte 464.  */
980
      xcr0 = xstateregs[464 / sizeof (long long)];
981
 
982
      /* Use PTRACE_GETREGSET if it is available.  */
983
      for (regset = target_regsets;
984
           regset->fill_function != NULL; regset++)
985
        if (regset->get_request == PTRACE_GETREGSET)
986
          regset->size = I386_XSTATE_SIZE (xcr0);
987
        else if (regset->type != GENERAL_REGS)
988
          regset->size = 0;
989
    }
990
 
991
  if (have_ptrace_getregset)
992
    {
993
      /* AVX is the highest feature we support.  */
994
      if ((xcr0 & I386_XSTATE_AVX_MASK) == I386_XSTATE_AVX_MASK)
995
        {
996
          x86_xcr0 = xcr0;
997
 
998
#ifdef __x86_64__
999
          /* I386 has 8 xmm regs.  */
1000
          if (num_xmm_registers == 8)
1001
            init_registers_i386_avx_linux ();
1002
          else
1003
            init_registers_amd64_avx_linux ();
1004
#else
1005
          init_registers_i386_avx_linux ();
1006
#endif
1007
        }
1008
    }
1009
}
1010
 
1011
/* Process qSupported query, "xmlRegisters=".  Update the buffer size for
1012
   PTRACE_GETREGSET.  */
1013
 
1014
static void
1015
x86_linux_process_qsupported (const char *query)
1016
{
1017
  /* Return if gdb doesn't support XML.  If gdb sends "xmlRegisters="
1018
     with "i386" in qSupported query, it supports x86 XML target
1019
     descriptions.  */
1020
  use_xml = 0;
1021
  if (query != NULL && strncmp (query, "xmlRegisters=", 13) == 0)
1022
    {
1023
      char *copy = xstrdup (query + 13);
1024
      char *p;
1025
 
1026
      for (p = strtok (copy, ","); p != NULL; p = strtok (NULL, ","))
1027
        {
1028
          if (strcmp (p, "i386") == 0)
1029
            {
1030
              use_xml = 1;
1031
              break;
1032
            }
1033
        }
1034
 
1035
      free (copy);
1036
    }
1037
 
1038
  x86_linux_update_xmltarget ();
1039
}
1040
 
1041
/* Initialize gdbserver for the architecture of the inferior.  */
1042
 
1043
static void
1044
x86_arch_setup (void)
1045
{
1046
#ifdef __x86_64__
1047
  int pid = pid_of (get_thread_lwp (current_inferior));
1048
  char *file = linux_child_pid_to_exec_file (pid);
1049
  int use_64bit = elf_64_file_p (file);
1050
 
1051
  free (file);
1052
 
1053
  if (use_64bit < 0)
1054
    {
1055
      /* This can only happen if /proc/<pid>/exe is unreadable,
1056
         but "that can't happen" if we've gotten this far.
1057
         Fall through and assume this is a 32-bit program.  */
1058
    }
1059
  else if (use_64bit)
1060
    {
1061
      /* Amd64 doesn't have HAVE_LINUX_USRREGS.  */
1062
      the_low_target.num_regs = -1;
1063
      the_low_target.regmap = NULL;
1064
      the_low_target.cannot_fetch_register = NULL;
1065
      the_low_target.cannot_store_register = NULL;
1066
 
1067
      /* Amd64 has 16 xmm regs.  */
1068
      num_xmm_registers = 16;
1069
 
1070
      x86_linux_update_xmltarget ();
1071
      return;
1072
    }
1073
#endif
1074
 
1075
  /* Ok we have a 32-bit inferior.  */
1076
 
1077
  the_low_target.num_regs = I386_NUM_REGS;
1078
  the_low_target.regmap = i386_regmap;
1079
  the_low_target.cannot_fetch_register = i386_cannot_fetch_register;
1080
  the_low_target.cannot_store_register = i386_cannot_store_register;
1081
 
1082
  /* I386 has 8 xmm regs.  */
1083
  num_xmm_registers = 8;
1084
 
1085
  x86_linux_update_xmltarget ();
1086
}
1087
 
1088
static int
1089
x86_supports_tracepoints (void)
1090
{
1091
  return 1;
1092
}
1093
 
1094
static void
1095
append_insns (CORE_ADDR *to, size_t len, const unsigned char *buf)
1096
{
1097
  write_inferior_memory (*to, buf, len);
1098
  *to += len;
1099
}
1100
 
1101
static int
1102
push_opcode (unsigned char *buf, char *op)
1103
{
1104
  unsigned char *buf_org = buf;
1105
 
1106
  while (1)
1107
    {
1108
      char *endptr;
1109
      unsigned long ul = strtoul (op, &endptr, 16);
1110
 
1111
      if (endptr == op)
1112
        break;
1113
 
1114
      *buf++ = ul;
1115
      op = endptr;
1116
    }
1117
 
1118
  return buf - buf_org;
1119
}
1120
 
1121
#ifdef __x86_64__
1122
 
1123
/* Build a jump pad that saves registers and calls a collection
1124
   function.  Writes a jump instruction to the jump pad to
1125
   JJUMPAD_INSN.  The caller is responsible to write it in at the
1126
   tracepoint address.  */
1127
 
1128
static int
1129
amd64_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1130
                                        CORE_ADDR collector,
1131
                                        CORE_ADDR lockaddr,
1132
                                        ULONGEST orig_size,
1133
                                        CORE_ADDR *jump_entry,
1134
                                        unsigned char *jjump_pad_insn,
1135
                                        ULONGEST *jjump_pad_insn_size,
1136
                                        CORE_ADDR *adjusted_insn_addr,
1137
                                        CORE_ADDR *adjusted_insn_addr_end)
1138
{
1139
  unsigned char buf[40];
1140
  int i, offset;
1141
  CORE_ADDR buildaddr = *jump_entry;
1142
 
1143
  /* Build the jump pad.  */
1144
 
1145
  /* First, do tracepoint data collection.  Save registers.  */
1146
  i = 0;
1147
  /* Need to ensure stack pointer saved first.  */
1148
  buf[i++] = 0x54; /* push %rsp */
1149
  buf[i++] = 0x55; /* push %rbp */
1150
  buf[i++] = 0x57; /* push %rdi */
1151
  buf[i++] = 0x56; /* push %rsi */
1152
  buf[i++] = 0x52; /* push %rdx */
1153
  buf[i++] = 0x51; /* push %rcx */
1154
  buf[i++] = 0x53; /* push %rbx */
1155
  buf[i++] = 0x50; /* push %rax */
1156
  buf[i++] = 0x41; buf[i++] = 0x57; /* push %r15 */
1157
  buf[i++] = 0x41; buf[i++] = 0x56; /* push %r14 */
1158
  buf[i++] = 0x41; buf[i++] = 0x55; /* push %r13 */
1159
  buf[i++] = 0x41; buf[i++] = 0x54; /* push %r12 */
1160
  buf[i++] = 0x41; buf[i++] = 0x53; /* push %r11 */
1161
  buf[i++] = 0x41; buf[i++] = 0x52; /* push %r10 */
1162
  buf[i++] = 0x41; buf[i++] = 0x51; /* push %r9 */
1163
  buf[i++] = 0x41; buf[i++] = 0x50; /* push %r8 */
1164
  buf[i++] = 0x9c; /* pushfq */
1165
  buf[i++] = 0x48; /* movl <addr>,%rdi */
1166
  buf[i++] = 0xbf;
1167
  *((unsigned long *)(buf + i)) = (unsigned long) tpaddr;
1168
  i += sizeof (unsigned long);
1169
  buf[i++] = 0x57; /* push %rdi */
1170
  append_insns (&buildaddr, i, buf);
1171
 
1172
  /* Stack space for the collecting_t object.  */
1173
  i = 0;
1174
  i += push_opcode (&buf[i], "48 83 ec 18");    /* sub $0x18,%rsp */
1175
  i += push_opcode (&buf[i], "48 b8");          /* mov <tpoint>,%rax */
1176
  memcpy (buf + i, &tpoint, 8);
1177
  i += 8;
1178
  i += push_opcode (&buf[i], "48 89 04 24");    /* mov %rax,(%rsp) */
1179
  i += push_opcode (&buf[i],
1180
                    "64 48 8b 04 25 00 00 00 00"); /* mov %fs:0x0,%rax */
1181
  i += push_opcode (&buf[i], "48 89 44 24 08"); /* mov %rax,0x8(%rsp) */
1182
  append_insns (&buildaddr, i, buf);
1183
 
1184
  /* spin-lock.  */
1185
  i = 0;
1186
  i += push_opcode (&buf[i], "48 be");          /* movl <lockaddr>,%rsi */
1187
  memcpy (&buf[i], (void *) &lockaddr, 8);
1188
  i += 8;
1189
  i += push_opcode (&buf[i], "48 89 e1");       /* mov %rsp,%rcx */
1190
  i += push_opcode (&buf[i], "31 c0");          /* xor %eax,%eax */
1191
  i += push_opcode (&buf[i], "f0 48 0f b1 0e"); /* lock cmpxchg %rcx,(%rsi) */
1192
  i += push_opcode (&buf[i], "48 85 c0");       /* test %rax,%rax */
1193
  i += push_opcode (&buf[i], "75 f4");          /* jne <again> */
1194
  append_insns (&buildaddr, i, buf);
1195
 
1196
  /* Set up the gdb_collect call.  */
1197
  /* At this point, (stack pointer + 0x18) is the base of our saved
1198
     register block.  */
1199
 
1200
  i = 0;
1201
  i += push_opcode (&buf[i], "48 89 e6");       /* mov %rsp,%rsi */
1202
  i += push_opcode (&buf[i], "48 83 c6 18");    /* add $0x18,%rsi */
1203
 
1204
  /* tpoint address may be 64-bit wide.  */
1205
  i += push_opcode (&buf[i], "48 bf");          /* movl <addr>,%rdi */
1206
  memcpy (buf + i, &tpoint, 8);
1207
  i += 8;
1208
  append_insns (&buildaddr, i, buf);
1209
 
1210
  /* The collector function being in the shared library, may be
1211
     >31-bits away off the jump pad.  */
1212
  i = 0;
1213
  i += push_opcode (&buf[i], "48 b8");          /* mov $collector,%rax */
1214
  memcpy (buf + i, &collector, 8);
1215
  i += 8;
1216
  i += push_opcode (&buf[i], "ff d0");          /* callq *%rax */
1217
  append_insns (&buildaddr, i, buf);
1218
 
1219
  /* Clear the spin-lock.  */
1220
  i = 0;
1221
  i += push_opcode (&buf[i], "31 c0");          /* xor %eax,%eax */
1222
  i += push_opcode (&buf[i], "48 a3");          /* mov %rax, lockaddr */
1223
  memcpy (buf + i, &lockaddr, 8);
1224
  i += 8;
1225
  append_insns (&buildaddr, i, buf);
1226
 
1227
  /* Remove stack that had been used for the collect_t object.  */
1228
  i = 0;
1229
  i += push_opcode (&buf[i], "48 83 c4 18");    /* add $0x18,%rsp */
1230
  append_insns (&buildaddr, i, buf);
1231
 
1232
  /* Restore register state.  */
1233
  i = 0;
1234
  buf[i++] = 0x48; /* add $0x8,%rsp */
1235
  buf[i++] = 0x83;
1236
  buf[i++] = 0xc4;
1237
  buf[i++] = 0x08;
1238
  buf[i++] = 0x9d; /* popfq */
1239
  buf[i++] = 0x41; buf[i++] = 0x58; /* pop %r8 */
1240
  buf[i++] = 0x41; buf[i++] = 0x59; /* pop %r9 */
1241
  buf[i++] = 0x41; buf[i++] = 0x5a; /* pop %r10 */
1242
  buf[i++] = 0x41; buf[i++] = 0x5b; /* pop %r11 */
1243
  buf[i++] = 0x41; buf[i++] = 0x5c; /* pop %r12 */
1244
  buf[i++] = 0x41; buf[i++] = 0x5d; /* pop %r13 */
1245
  buf[i++] = 0x41; buf[i++] = 0x5e; /* pop %r14 */
1246
  buf[i++] = 0x41; buf[i++] = 0x5f; /* pop %r15 */
1247
  buf[i++] = 0x58; /* pop %rax */
1248
  buf[i++] = 0x5b; /* pop %rbx */
1249
  buf[i++] = 0x59; /* pop %rcx */
1250
  buf[i++] = 0x5a; /* pop %rdx */
1251
  buf[i++] = 0x5e; /* pop %rsi */
1252
  buf[i++] = 0x5f; /* pop %rdi */
1253
  buf[i++] = 0x5d; /* pop %rbp */
1254
  buf[i++] = 0x5c; /* pop %rsp */
1255
  append_insns (&buildaddr, i, buf);
1256
 
1257
  /* Now, adjust the original instruction to execute in the jump
1258
     pad.  */
1259
  *adjusted_insn_addr = buildaddr;
1260
  relocate_instruction (&buildaddr, tpaddr);
1261
  *adjusted_insn_addr_end = buildaddr;
1262
 
1263
  /* Finally, write a jump back to the program.  */
1264
  offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1265
  memcpy (buf, jump_insn, sizeof (jump_insn));
1266
  memcpy (buf + 1, &offset, 4);
1267
  append_insns (&buildaddr, sizeof (jump_insn), buf);
1268
 
1269
  /* The jump pad is now built.  Wire in a jump to our jump pad.  This
1270
     is always done last (by our caller actually), so that we can
1271
     install fast tracepoints with threads running.  This relies on
1272
     the agent's atomic write support.  */
1273
  offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1274
  memcpy (buf, jump_insn, sizeof (jump_insn));
1275
  memcpy (buf + 1, &offset, 4);
1276
  memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1277
  *jjump_pad_insn_size = sizeof (jump_insn);
1278
 
1279
  /* Return the end address of our pad.  */
1280
  *jump_entry = buildaddr;
1281
 
1282
  return 0;
1283
}
1284
 
1285
#endif /* __x86_64__ */
1286
 
1287
/* Build a jump pad that saves registers and calls a collection
1288
   function.  Writes a jump instruction to the jump pad to
1289
   JJUMPAD_INSN.  The caller is responsible to write it in at the
1290
   tracepoint address.  */
1291
 
1292
static int
1293
i386_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1294
                                       CORE_ADDR collector,
1295
                                       CORE_ADDR lockaddr,
1296
                                       ULONGEST orig_size,
1297
                                       CORE_ADDR *jump_entry,
1298
                                       unsigned char *jjump_pad_insn,
1299
                                       ULONGEST *jjump_pad_insn_size,
1300
                                       CORE_ADDR *adjusted_insn_addr,
1301
                                       CORE_ADDR *adjusted_insn_addr_end)
1302
{
1303
  unsigned char buf[0x100];
1304
  int i, offset;
1305
  CORE_ADDR buildaddr = *jump_entry;
1306
 
1307
  /* Build the jump pad.  */
1308
 
1309
  /* First, do tracepoint data collection.  Save registers.  */
1310
  i = 0;
1311
  buf[i++] = 0x60; /* pushad */
1312
  buf[i++] = 0x68; /* push tpaddr aka $pc */
1313
  *((int *)(buf + i)) = (int) tpaddr;
1314
  i += 4;
1315
  buf[i++] = 0x9c; /* pushf */
1316
  buf[i++] = 0x1e; /* push %ds */
1317
  buf[i++] = 0x06; /* push %es */
1318
  buf[i++] = 0x0f; /* push %fs */
1319
  buf[i++] = 0xa0;
1320
  buf[i++] = 0x0f; /* push %gs */
1321
  buf[i++] = 0xa8;
1322
  buf[i++] = 0x16; /* push %ss */
1323
  buf[i++] = 0x0e; /* push %cs */
1324
  append_insns (&buildaddr, i, buf);
1325
 
1326
  /* Stack space for the collecting_t object.  */
1327
  i = 0;
1328
  i += push_opcode (&buf[i], "83 ec 08");       /* sub    $0x8,%esp */
1329
 
1330
  /* Build the object.  */
1331
  i += push_opcode (&buf[i], "b8");             /* mov    <tpoint>,%eax */
1332
  memcpy (buf + i, &tpoint, 4);
1333
  i += 4;
1334
  i += push_opcode (&buf[i], "89 04 24");          /* mov %eax,(%esp) */
1335
 
1336
  i += push_opcode (&buf[i], "65 a1 00 00 00 00"); /* mov %gs:0x0,%eax */
1337
  i += push_opcode (&buf[i], "89 44 24 04");       /* mov %eax,0x4(%esp) */
1338
  append_insns (&buildaddr, i, buf);
1339
 
1340
  /* spin-lock.  Note this is using cmpxchg, which leaves i386 behind.
1341
     If we cared for it, this could be using xchg alternatively.  */
1342
 
1343
  i = 0;
1344
  i += push_opcode (&buf[i], "31 c0");          /* xor %eax,%eax */
1345
  i += push_opcode (&buf[i], "f0 0f b1 25");    /* lock cmpxchg
1346
                                                   %esp,<lockaddr> */
1347
  memcpy (&buf[i], (void *) &lockaddr, 4);
1348
  i += 4;
1349
  i += push_opcode (&buf[i], "85 c0");          /* test %eax,%eax */
1350
  i += push_opcode (&buf[i], "75 f2");          /* jne <again> */
1351
  append_insns (&buildaddr, i, buf);
1352
 
1353
 
1354
  /* Set up arguments to the gdb_collect call.  */
1355
  i = 0;
1356
  i += push_opcode (&buf[i], "89 e0");          /* mov %esp,%eax */
1357
  i += push_opcode (&buf[i], "83 c0 08");       /* add $0x08,%eax */
1358
  i += push_opcode (&buf[i], "89 44 24 fc");    /* mov %eax,-0x4(%esp) */
1359
  append_insns (&buildaddr, i, buf);
1360
 
1361
  i = 0;
1362
  i += push_opcode (&buf[i], "83 ec 08");       /* sub $0x8,%esp */
1363
  append_insns (&buildaddr, i, buf);
1364
 
1365
  i = 0;
1366
  i += push_opcode (&buf[i], "c7 04 24");       /* movl <addr>,(%esp) */
1367
  memcpy (&buf[i], (void *) &tpoint, 4);
1368
  i += 4;
1369
  append_insns (&buildaddr, i, buf);
1370
 
1371
  buf[0] = 0xe8; /* call <reladdr> */
1372
  offset = collector - (buildaddr + sizeof (jump_insn));
1373
  memcpy (buf + 1, &offset, 4);
1374
  append_insns (&buildaddr, 5, buf);
1375
  /* Clean up after the call.  */
1376
  buf[0] = 0x83; /* add $0x8,%esp */
1377
  buf[1] = 0xc4;
1378
  buf[2] = 0x08;
1379
  append_insns (&buildaddr, 3, buf);
1380
 
1381
 
1382
  /* Clear the spin-lock.  This would need the LOCK prefix on older
1383
     broken archs.  */
1384
  i = 0;
1385
  i += push_opcode (&buf[i], "31 c0");          /* xor %eax,%eax */
1386
  i += push_opcode (&buf[i], "a3");             /* mov %eax, lockaddr */
1387
  memcpy (buf + i, &lockaddr, 4);
1388
  i += 4;
1389
  append_insns (&buildaddr, i, buf);
1390
 
1391
 
1392
  /* Remove stack that had been used for the collect_t object.  */
1393
  i = 0;
1394
  i += push_opcode (&buf[i], "83 c4 08");       /* add $0x08,%esp */
1395
  append_insns (&buildaddr, i, buf);
1396
 
1397
  i = 0;
1398
  buf[i++] = 0x83; /* add $0x4,%esp (no pop of %cs, assume unchanged) */
1399
  buf[i++] = 0xc4;
1400
  buf[i++] = 0x04;
1401
  buf[i++] = 0x17; /* pop %ss */
1402
  buf[i++] = 0x0f; /* pop %gs */
1403
  buf[i++] = 0xa9;
1404
  buf[i++] = 0x0f; /* pop %fs */
1405
  buf[i++] = 0xa1;
1406
  buf[i++] = 0x07; /* pop %es */
1407
  buf[i++] = 0x1f; /* pop %de */
1408
  buf[i++] = 0x9d; /* popf */
1409
  buf[i++] = 0x83; /* add $0x4,%esp (pop of tpaddr aka $pc) */
1410
  buf[i++] = 0xc4;
1411
  buf[i++] = 0x04;
1412
  buf[i++] = 0x61; /* popad */
1413
  append_insns (&buildaddr, i, buf);
1414
 
1415
  /* Now, adjust the original instruction to execute in the jump
1416
     pad.  */
1417
  *adjusted_insn_addr = buildaddr;
1418
  relocate_instruction (&buildaddr, tpaddr);
1419
  *adjusted_insn_addr_end = buildaddr;
1420
 
1421
  /* Write the jump back to the program.  */
1422
  offset = (tpaddr + orig_size) - (buildaddr + sizeof (jump_insn));
1423
  memcpy (buf, jump_insn, sizeof (jump_insn));
1424
  memcpy (buf + 1, &offset, 4);
1425
  append_insns (&buildaddr, sizeof (jump_insn), buf);
1426
 
1427
  /* The jump pad is now built.  Wire in a jump to our jump pad.  This
1428
     is always done last (by our caller actually), so that we can
1429
     install fast tracepoints with threads running.  This relies on
1430
     the agent's atomic write support.  */
1431
  offset = *jump_entry - (tpaddr + sizeof (jump_insn));
1432
  memcpy (buf, jump_insn, sizeof (jump_insn));
1433
  memcpy (buf + 1, &offset, 4);
1434
  memcpy (jjump_pad_insn, buf, sizeof (jump_insn));
1435
  *jjump_pad_insn_size = sizeof (jump_insn);
1436
 
1437
  /* Return the end address of our pad.  */
1438
  *jump_entry = buildaddr;
1439
 
1440
  return 0;
1441
}
1442
 
1443
static int
1444
x86_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
1445
                                      CORE_ADDR collector,
1446
                                      CORE_ADDR lockaddr,
1447
                                      ULONGEST orig_size,
1448
                                      CORE_ADDR *jump_entry,
1449
                                      unsigned char *jjump_pad_insn,
1450
                                      ULONGEST *jjump_pad_insn_size,
1451
                                      CORE_ADDR *adjusted_insn_addr,
1452
                                      CORE_ADDR *adjusted_insn_addr_end)
1453
{
1454
#ifdef __x86_64__
1455
  if (register_size (0) == 8)
1456
    return amd64_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1457
                                                   collector, lockaddr,
1458
                                                   orig_size, jump_entry,
1459
                                                   jjump_pad_insn,
1460
                                                   jjump_pad_insn_size,
1461
                                                   adjusted_insn_addr,
1462
                                                   adjusted_insn_addr_end);
1463
#endif
1464
 
1465
  return i386_install_fast_tracepoint_jump_pad (tpoint, tpaddr,
1466
                                                collector, lockaddr,
1467
                                                orig_size, jump_entry,
1468
                                                jjump_pad_insn,
1469
                                                jjump_pad_insn_size,
1470
                                                adjusted_insn_addr,
1471
                                                adjusted_insn_addr_end);
1472
}
1473
 
1474
static void
1475
add_insns (unsigned char *start, int len)
1476
{
1477
  CORE_ADDR buildaddr = current_insn_ptr;
1478
 
1479
  if (debug_threads)
1480
    fprintf (stderr, "Adding %d bytes of insn at %s\n",
1481
             len, paddress (buildaddr));
1482
 
1483
  append_insns (&buildaddr, len, start);
1484
  current_insn_ptr = buildaddr;
1485
}
1486
 
1487
/* Our general strategy for emitting code is to avoid specifying raw
1488
   bytes whenever possible, and instead copy a block of inline asm
1489
   that is embedded in the function.  This is a little messy, because
1490
   we need to keep the compiler from discarding what looks like dead
1491
   code, plus suppress various warnings.  */
1492
 
1493
#define EMIT_ASM(NAME, INSNS)                                           \
1494
  do                                                                    \
1495
    {                                                                   \
1496
      extern unsigned char start_ ## NAME, end_ ## NAME;                \
1497
      add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);     \
1498
      __asm__ ("jmp end_" #NAME "\n"                            \
1499
               "\t" "start_" #NAME ":"                                  \
1500
               "\t" INSNS "\n"                                          \
1501
               "\t" "end_" #NAME ":");                                  \
1502
    } while (0)
1503
 
1504
#ifdef __x86_64__
1505
 
1506
#define EMIT_ASM32(NAME,INSNS)                                          \
1507
  do                                                                    \
1508
    {                                                                   \
1509
      extern unsigned char start_ ## NAME, end_ ## NAME;                \
1510
      add_insns (&start_ ## NAME, &end_ ## NAME - &start_ ## NAME);     \
1511
      __asm__ (".code32\n"                                              \
1512
               "\t" "jmp end_" #NAME "\n"                               \
1513
               "\t" "start_" #NAME ":\n"                                \
1514
               "\t" INSNS "\n"                                          \
1515
               "\t" "end_" #NAME ":\n"                                  \
1516
               ".code64\n");                                            \
1517
    } while (0)
1518
 
1519
#else
1520
 
1521
#define EMIT_ASM32(NAME,INSNS) EMIT_ASM(NAME,INSNS)
1522
 
1523
#endif
1524
 
1525
#ifdef __x86_64__
1526
 
1527
static void
1528
amd64_emit_prologue (void)
1529
{
1530
  EMIT_ASM (amd64_prologue,
1531
            "pushq %rbp\n\t"
1532
            "movq %rsp,%rbp\n\t"
1533
            "sub $0x20,%rsp\n\t"
1534
            "movq %rdi,-8(%rbp)\n\t"
1535
            "movq %rsi,-16(%rbp)");
1536
}
1537
 
1538
 
1539
static void
1540
amd64_emit_epilogue (void)
1541
{
1542
  EMIT_ASM (amd64_epilogue,
1543
            "movq -16(%rbp),%rdi\n\t"
1544
            "movq %rax,(%rdi)\n\t"
1545
            "xor %rax,%rax\n\t"
1546
            "leave\n\t"
1547
            "ret");
1548
}
1549
 
1550
static void
1551
amd64_emit_add (void)
1552
{
1553
  EMIT_ASM (amd64_add,
1554
            "add (%rsp),%rax\n\t"
1555
            "lea 0x8(%rsp),%rsp");
1556
}
1557
 
1558
static void
1559
amd64_emit_sub (void)
1560
{
1561
  EMIT_ASM (amd64_sub,
1562
            "sub %rax,(%rsp)\n\t"
1563
            "pop %rax");
1564
}
1565
 
1566
static void
1567
amd64_emit_mul (void)
1568
{
1569
  emit_error = 1;
1570
}
1571
 
1572
static void
1573
amd64_emit_lsh (void)
1574
{
1575
  emit_error = 1;
1576
}
1577
 
1578
static void
1579
amd64_emit_rsh_signed (void)
1580
{
1581
  emit_error = 1;
1582
}
1583
 
1584
static void
1585
amd64_emit_rsh_unsigned (void)
1586
{
1587
  emit_error = 1;
1588
}
1589
 
1590
static void
1591
amd64_emit_ext (int arg)
1592
{
1593
  switch (arg)
1594
    {
1595
    case 8:
1596
      EMIT_ASM (amd64_ext_8,
1597
                "cbtw\n\t"
1598
                "cwtl\n\t"
1599
                "cltq");
1600
      break;
1601
    case 16:
1602
      EMIT_ASM (amd64_ext_16,
1603
                "cwtl\n\t"
1604
                "cltq");
1605
      break;
1606
    case 32:
1607
      EMIT_ASM (amd64_ext_32,
1608
                "cltq");
1609
      break;
1610
    default:
1611
      emit_error = 1;
1612
    }
1613
}
1614
 
1615
static void
1616
amd64_emit_log_not (void)
1617
{
1618
  EMIT_ASM (amd64_log_not,
1619
            "test %rax,%rax\n\t"
1620
            "sete %cl\n\t"
1621
            "movzbq %cl,%rax");
1622
}
1623
 
1624
static void
1625
amd64_emit_bit_and (void)
1626
{
1627
  EMIT_ASM (amd64_and,
1628
            "and (%rsp),%rax\n\t"
1629
            "lea 0x8(%rsp),%rsp");
1630
}
1631
 
1632
static void
1633
amd64_emit_bit_or (void)
1634
{
1635
  EMIT_ASM (amd64_or,
1636
            "or (%rsp),%rax\n\t"
1637
            "lea 0x8(%rsp),%rsp");
1638
}
1639
 
1640
static void
1641
amd64_emit_bit_xor (void)
1642
{
1643
  EMIT_ASM (amd64_xor,
1644
            "xor (%rsp),%rax\n\t"
1645
            "lea 0x8(%rsp),%rsp");
1646
}
1647
 
1648
static void
1649
amd64_emit_bit_not (void)
1650
{
1651
  EMIT_ASM (amd64_bit_not,
1652
            "xorq $0xffffffffffffffff,%rax");
1653
}
1654
 
1655
static void
1656
amd64_emit_equal (void)
1657
{
1658
  EMIT_ASM (amd64_equal,
1659
            "cmp %rax,(%rsp)\n\t"
1660
            "je .Lamd64_equal_true\n\t"
1661
            "xor %rax,%rax\n\t"
1662
            "jmp .Lamd64_equal_end\n\t"
1663
            ".Lamd64_equal_true:\n\t"
1664
            "mov $0x1,%rax\n\t"
1665
            ".Lamd64_equal_end:\n\t"
1666
            "lea 0x8(%rsp),%rsp");
1667
}
1668
 
1669
static void
1670
amd64_emit_less_signed (void)
1671
{
1672
  EMIT_ASM (amd64_less_signed,
1673
            "cmp %rax,(%rsp)\n\t"
1674
            "jl .Lamd64_less_signed_true\n\t"
1675
            "xor %rax,%rax\n\t"
1676
            "jmp .Lamd64_less_signed_end\n\t"
1677
            ".Lamd64_less_signed_true:\n\t"
1678
            "mov $1,%rax\n\t"
1679
            ".Lamd64_less_signed_end:\n\t"
1680
            "lea 0x8(%rsp),%rsp");
1681
}
1682
 
1683
static void
1684
amd64_emit_less_unsigned (void)
1685
{
1686
  EMIT_ASM (amd64_less_unsigned,
1687
            "cmp %rax,(%rsp)\n\t"
1688
            "jb .Lamd64_less_unsigned_true\n\t"
1689
            "xor %rax,%rax\n\t"
1690
            "jmp .Lamd64_less_unsigned_end\n\t"
1691
            ".Lamd64_less_unsigned_true:\n\t"
1692
            "mov $1,%rax\n\t"
1693
            ".Lamd64_less_unsigned_end:\n\t"
1694
            "lea 0x8(%rsp),%rsp");
1695
}
1696
 
1697
static void
1698
amd64_emit_ref (int size)
1699
{
1700
  switch (size)
1701
    {
1702
    case 1:
1703
      EMIT_ASM (amd64_ref1,
1704
                "movb (%rax),%al");
1705
      break;
1706
    case 2:
1707
      EMIT_ASM (amd64_ref2,
1708
                "movw (%rax),%ax");
1709
      break;
1710
    case 4:
1711
      EMIT_ASM (amd64_ref4,
1712
                "movl (%rax),%eax");
1713
      break;
1714
    case 8:
1715
      EMIT_ASM (amd64_ref8,
1716
                "movq (%rax),%rax");
1717
      break;
1718
    }
1719
}
1720
 
1721
static void
1722
amd64_emit_if_goto (int *offset_p, int *size_p)
1723
{
1724
  EMIT_ASM (amd64_if_goto,
1725
            "mov %rax,%rcx\n\t"
1726
            "pop %rax\n\t"
1727
            "cmp $0,%rcx\n\t"
1728
            ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
1729
  if (offset_p)
1730
    *offset_p = 10;
1731
  if (size_p)
1732
    *size_p = 4;
1733
}
1734
 
1735
static void
1736
amd64_emit_goto (int *offset_p, int *size_p)
1737
{
1738
  EMIT_ASM (amd64_goto,
1739
            ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
1740
  if (offset_p)
1741
    *offset_p = 1;
1742
  if (size_p)
1743
    *size_p = 4;
1744
}
1745
 
1746
static void
1747
amd64_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
1748
{
1749
  int diff = (to - (from + size));
1750
  unsigned char buf[sizeof (int)];
1751
 
1752
  if (size != 4)
1753
    {
1754
      emit_error = 1;
1755
      return;
1756
    }
1757
 
1758
  memcpy (buf, &diff, sizeof (int));
1759
  write_inferior_memory (from, buf, sizeof (int));
1760
}
1761
 
1762
static void
1763
amd64_emit_const (LONGEST num)
1764
{
1765
  unsigned char buf[16];
1766
  int i;
1767
  CORE_ADDR buildaddr = current_insn_ptr;
1768
 
1769
  i = 0;
1770
  buf[i++] = 0x48;  buf[i++] = 0xb8; /* mov $<n>,%rax */
1771
  *((LONGEST *) (&buf[i])) = num;
1772
  i += 8;
1773
  append_insns (&buildaddr, i, buf);
1774
  current_insn_ptr = buildaddr;
1775
}
1776
 
1777
static void
1778
amd64_emit_call (CORE_ADDR fn)
1779
{
1780
  unsigned char buf[16];
1781
  int i;
1782
  CORE_ADDR buildaddr;
1783
  LONGEST offset64;
1784
 
1785
  /* The destination function being in the shared library, may be
1786
     >31-bits away off the compiled code pad.  */
1787
 
1788
  buildaddr = current_insn_ptr;
1789
 
1790
  offset64 = fn - (buildaddr + 1 /* call op */ + 4 /* 32-bit offset */);
1791
 
1792
  i = 0;
1793
 
1794
  if (offset64 > INT_MAX || offset64 < INT_MIN)
1795
    {
1796
      /* Offset is too large for a call.  Use callq, but that requires
1797
         a register, so avoid it if possible.  Use r10, since it is
1798
         call-clobbered, we don't have to push/pop it.  */
1799
      buf[i++] = 0x48; /* mov $fn,%r10 */
1800
      buf[i++] = 0xba;
1801
      memcpy (buf + i, &fn, 8);
1802
      i += 8;
1803
      buf[i++] = 0xff; /* callq *%r10 */
1804
      buf[i++] = 0xd2;
1805
    }
1806
  else
1807
    {
1808
      int offset32 = offset64; /* we know we can't overflow here.  */
1809
      memcpy (buf + i, &offset32, 4);
1810
      i += 4;
1811
    }
1812
 
1813
  append_insns (&buildaddr, i, buf);
1814
  current_insn_ptr = buildaddr;
1815
}
1816
 
1817
static void
1818
amd64_emit_reg (int reg)
1819
{
1820
  unsigned char buf[16];
1821
  int i;
1822
  CORE_ADDR buildaddr;
1823
 
1824
  /* Assume raw_regs is still in %rdi.  */
1825
  buildaddr = current_insn_ptr;
1826
  i = 0;
1827
  buf[i++] = 0xbe; /* mov $<n>,%esi */
1828
  *((int *) (&buf[i])) = reg;
1829
  i += 4;
1830
  append_insns (&buildaddr, i, buf);
1831
  current_insn_ptr = buildaddr;
1832
  amd64_emit_call (get_raw_reg_func_addr ());
1833
}
1834
 
1835
static void
1836
amd64_emit_pop (void)
1837
{
1838
  EMIT_ASM (amd64_pop,
1839
            "pop %rax");
1840
}
1841
 
1842
static void
1843
amd64_emit_stack_flush (void)
1844
{
1845
  EMIT_ASM (amd64_stack_flush,
1846
            "push %rax");
1847
}
1848
 
1849
static void
1850
amd64_emit_zero_ext (int arg)
1851
{
1852
  switch (arg)
1853
    {
1854
    case 8:
1855
      EMIT_ASM (amd64_zero_ext_8,
1856
                "and $0xff,%rax");
1857
      break;
1858
    case 16:
1859
      EMIT_ASM (amd64_zero_ext_16,
1860
                "and $0xffff,%rax");
1861
      break;
1862
    case 32:
1863
      EMIT_ASM (amd64_zero_ext_32,
1864
                "mov $0xffffffff,%rcx\n\t"
1865
                "and %rcx,%rax");
1866
      break;
1867
    default:
1868
      emit_error = 1;
1869
    }
1870
}
1871
 
1872
static void
1873
amd64_emit_swap (void)
1874
{
1875
  EMIT_ASM (amd64_swap,
1876
            "mov %rax,%rcx\n\t"
1877
            "pop %rax\n\t"
1878
            "push %rcx");
1879
}
1880
 
1881
static void
1882
amd64_emit_stack_adjust (int n)
1883
{
1884
  unsigned char buf[16];
1885
  int i;
1886
  CORE_ADDR buildaddr = current_insn_ptr;
1887
 
1888
  i = 0;
1889
  buf[i++] = 0x48; /* lea $<n>(%rsp),%rsp */
1890
  buf[i++] = 0x8d;
1891
  buf[i++] = 0x64;
1892
  buf[i++] = 0x24;
1893
  /* This only handles adjustments up to 16, but we don't expect any more.  */
1894
  buf[i++] = n * 8;
1895
  append_insns (&buildaddr, i, buf);
1896
  current_insn_ptr = buildaddr;
1897
}
1898
 
1899
/* FN's prototype is `LONGEST(*fn)(int)'.  */
1900
 
1901
static void
1902
amd64_emit_int_call_1 (CORE_ADDR fn, int arg1)
1903
{
1904
  unsigned char buf[16];
1905
  int i;
1906
  CORE_ADDR buildaddr;
1907
 
1908
  buildaddr = current_insn_ptr;
1909
  i = 0;
1910
  buf[i++] = 0xbf; /* movl $<n>,%edi */
1911
  *((int *) (&buf[i])) = arg1;
1912
  i += 4;
1913
  append_insns (&buildaddr, i, buf);
1914
  current_insn_ptr = buildaddr;
1915
  amd64_emit_call (fn);
1916
}
1917
 
1918
/* FN's prototype is `void(*fn)(int,LONGEST)'.  */
1919
 
1920
static void
1921
amd64_emit_void_call_2 (CORE_ADDR fn, int arg1)
1922
{
1923
  unsigned char buf[16];
1924
  int i;
1925
  CORE_ADDR buildaddr;
1926
 
1927
  buildaddr = current_insn_ptr;
1928
  i = 0;
1929
  buf[i++] = 0xbf; /* movl $<n>,%edi */
1930
  *((int *) (&buf[i])) = arg1;
1931
  i += 4;
1932
  append_insns (&buildaddr, i, buf);
1933
  current_insn_ptr = buildaddr;
1934
  EMIT_ASM (amd64_void_call_2_a,
1935
            /* Save away a copy of the stack top.  */
1936
            "push %rax\n\t"
1937
            /* Also pass top as the second argument.  */
1938
            "mov %rax,%rsi");
1939
  amd64_emit_call (fn);
1940
  EMIT_ASM (amd64_void_call_2_b,
1941
            /* Restore the stack top, %rax may have been trashed.  */
1942
            "pop %rax");
1943
}
1944
 
1945
struct emit_ops amd64_emit_ops =
1946
  {
1947
    amd64_emit_prologue,
1948
    amd64_emit_epilogue,
1949
    amd64_emit_add,
1950
    amd64_emit_sub,
1951
    amd64_emit_mul,
1952
    amd64_emit_lsh,
1953
    amd64_emit_rsh_signed,
1954
    amd64_emit_rsh_unsigned,
1955
    amd64_emit_ext,
1956
    amd64_emit_log_not,
1957
    amd64_emit_bit_and,
1958
    amd64_emit_bit_or,
1959
    amd64_emit_bit_xor,
1960
    amd64_emit_bit_not,
1961
    amd64_emit_equal,
1962
    amd64_emit_less_signed,
1963
    amd64_emit_less_unsigned,
1964
    amd64_emit_ref,
1965
    amd64_emit_if_goto,
1966
    amd64_emit_goto,
1967
    amd64_write_goto_address,
1968
    amd64_emit_const,
1969
    amd64_emit_call,
1970
    amd64_emit_reg,
1971
    amd64_emit_pop,
1972
    amd64_emit_stack_flush,
1973
    amd64_emit_zero_ext,
1974
    amd64_emit_swap,
1975
    amd64_emit_stack_adjust,
1976
    amd64_emit_int_call_1,
1977
    amd64_emit_void_call_2
1978
  };
1979
 
1980
#endif /* __x86_64__ */
1981
 
1982
static void
1983
i386_emit_prologue (void)
1984
{
1985
  EMIT_ASM32 (i386_prologue,
1986
            "push %ebp\n\t"
1987
            "mov %esp,%ebp");
1988
  /* At this point, the raw regs base address is at 8(%ebp), and the
1989
     value pointer is at 12(%ebp).  */
1990
}
1991
 
1992
static void
1993
i386_emit_epilogue (void)
1994
{
1995
  EMIT_ASM32 (i386_epilogue,
1996
            "mov 12(%ebp),%ecx\n\t"
1997
            "mov %eax,(%ecx)\n\t"
1998
            "mov %ebx,0x4(%ecx)\n\t"
1999
            "xor %eax,%eax\n\t"
2000
            "pop %ebp\n\t"
2001
            "ret");
2002
}
2003
 
2004
static void
2005
i386_emit_add (void)
2006
{
2007
  EMIT_ASM32 (i386_add,
2008
            "add (%esp),%eax\n\t"
2009
            "adc 0x4(%esp),%ebx\n\t"
2010
            "lea 0x8(%esp),%esp");
2011
}
2012
 
2013
static void
2014
i386_emit_sub (void)
2015
{
2016
  EMIT_ASM32 (i386_sub,
2017
            "subl %eax,(%esp)\n\t"
2018
            "sbbl %ebx,4(%esp)\n\t"
2019
            "pop %eax\n\t"
2020
            "pop %ebx\n\t");
2021
}
2022
 
2023
static void
2024
i386_emit_mul (void)
2025
{
2026
  emit_error = 1;
2027
}
2028
 
2029
static void
2030
i386_emit_lsh (void)
2031
{
2032
  emit_error = 1;
2033
}
2034
 
2035
static void
2036
i386_emit_rsh_signed (void)
2037
{
2038
  emit_error = 1;
2039
}
2040
 
2041
static void
2042
i386_emit_rsh_unsigned (void)
2043
{
2044
  emit_error = 1;
2045
}
2046
 
2047
static void
2048
i386_emit_ext (int arg)
2049
{
2050
  switch (arg)
2051
    {
2052
    case 8:
2053
      EMIT_ASM32 (i386_ext_8,
2054
                "cbtw\n\t"
2055
                "cwtl\n\t"
2056
                "movl %eax,%ebx\n\t"
2057
                "sarl $31,%ebx");
2058
      break;
2059
    case 16:
2060
      EMIT_ASM32 (i386_ext_16,
2061
                "cwtl\n\t"
2062
                "movl %eax,%ebx\n\t"
2063
                "sarl $31,%ebx");
2064
      break;
2065
    case 32:
2066
      EMIT_ASM32 (i386_ext_32,
2067
                "movl %eax,%ebx\n\t"
2068
                "sarl $31,%ebx");
2069
      break;
2070
    default:
2071
      emit_error = 1;
2072
    }
2073
}
2074
 
2075
static void
2076
i386_emit_log_not (void)
2077
{
2078
  EMIT_ASM32 (i386_log_not,
2079
            "or %ebx,%eax\n\t"
2080
            "test %eax,%eax\n\t"
2081
            "sete %cl\n\t"
2082
            "xor %ebx,%ebx\n\t"
2083
            "movzbl %cl,%eax");
2084
}
2085
 
2086
static void
2087
i386_emit_bit_and (void)
2088
{
2089
  EMIT_ASM32 (i386_and,
2090
            "and (%esp),%eax\n\t"
2091
            "and 0x4(%esp),%ebx\n\t"
2092
            "lea 0x8(%esp),%esp");
2093
}
2094
 
2095
static void
2096
i386_emit_bit_or (void)
2097
{
2098
  EMIT_ASM32 (i386_or,
2099
            "or (%esp),%eax\n\t"
2100
            "or 0x4(%esp),%ebx\n\t"
2101
            "lea 0x8(%esp),%esp");
2102
}
2103
 
2104
static void
2105
i386_emit_bit_xor (void)
2106
{
2107
  EMIT_ASM32 (i386_xor,
2108
            "xor (%esp),%eax\n\t"
2109
            "xor 0x4(%esp),%ebx\n\t"
2110
            "lea 0x8(%esp),%esp");
2111
}
2112
 
2113
static void
2114
i386_emit_bit_not (void)
2115
{
2116
  EMIT_ASM32 (i386_bit_not,
2117
            "xor $0xffffffff,%eax\n\t"
2118
            "xor $0xffffffff,%ebx\n\t");
2119
}
2120
 
2121
static void
2122
i386_emit_equal (void)
2123
{
2124
  EMIT_ASM32 (i386_equal,
2125
            "cmpl %ebx,4(%esp)\n\t"
2126
            "jne .Li386_equal_false\n\t"
2127
            "cmpl %eax,(%esp)\n\t"
2128
            "je .Li386_equal_true\n\t"
2129
            ".Li386_equal_false:\n\t"
2130
            "xor %eax,%eax\n\t"
2131
            "jmp .Li386_equal_end\n\t"
2132
            ".Li386_equal_true:\n\t"
2133
            "mov $1,%eax\n\t"
2134
            ".Li386_equal_end:\n\t"
2135
            "xor %ebx,%ebx\n\t"
2136
            "lea 0x8(%esp),%esp");
2137
}
2138
 
2139
static void
2140
i386_emit_less_signed (void)
2141
{
2142
  EMIT_ASM32 (i386_less_signed,
2143
            "cmpl %ebx,4(%esp)\n\t"
2144
            "jl .Li386_less_signed_true\n\t"
2145
            "jne .Li386_less_signed_false\n\t"
2146
            "cmpl %eax,(%esp)\n\t"
2147
            "jl .Li386_less_signed_true\n\t"
2148
            ".Li386_less_signed_false:\n\t"
2149
            "xor %eax,%eax\n\t"
2150
            "jmp .Li386_less_signed_end\n\t"
2151
            ".Li386_less_signed_true:\n\t"
2152
            "mov $1,%eax\n\t"
2153
            ".Li386_less_signed_end:\n\t"
2154
            "xor %ebx,%ebx\n\t"
2155
            "lea 0x8(%esp),%esp");
2156
}
2157
 
2158
static void
2159
i386_emit_less_unsigned (void)
2160
{
2161
  EMIT_ASM32 (i386_less_unsigned,
2162
            "cmpl %ebx,4(%esp)\n\t"
2163
            "jb .Li386_less_unsigned_true\n\t"
2164
            "jne .Li386_less_unsigned_false\n\t"
2165
            "cmpl %eax,(%esp)\n\t"
2166
            "jb .Li386_less_unsigned_true\n\t"
2167
            ".Li386_less_unsigned_false:\n\t"
2168
            "xor %eax,%eax\n\t"
2169
            "jmp .Li386_less_unsigned_end\n\t"
2170
            ".Li386_less_unsigned_true:\n\t"
2171
            "mov $1,%eax\n\t"
2172
            ".Li386_less_unsigned_end:\n\t"
2173
            "xor %ebx,%ebx\n\t"
2174
            "lea 0x8(%esp),%esp");
2175
}
2176
 
2177
static void
2178
i386_emit_ref (int size)
2179
{
2180
  switch (size)
2181
    {
2182
    case 1:
2183
      EMIT_ASM32 (i386_ref1,
2184
                "movb (%eax),%al");
2185
      break;
2186
    case 2:
2187
      EMIT_ASM32 (i386_ref2,
2188
                "movw (%eax),%ax");
2189
      break;
2190
    case 4:
2191
      EMIT_ASM32 (i386_ref4,
2192
                "movl (%eax),%eax");
2193
      break;
2194
    case 8:
2195
      EMIT_ASM32 (i386_ref8,
2196
                "movl 4(%eax),%ebx\n\t"
2197
                "movl (%eax),%eax");
2198
      break;
2199
    }
2200
}
2201
 
2202
static void
2203
i386_emit_if_goto (int *offset_p, int *size_p)
2204
{
2205
  EMIT_ASM32 (i386_if_goto,
2206
            "mov %eax,%ecx\n\t"
2207
            "or %ebx,%ecx\n\t"
2208
            "pop %eax\n\t"
2209
            "pop %ebx\n\t"
2210
            "cmpl $0,%ecx\n\t"
2211
            /* Don't trust the assembler to choose the right jump */
2212
            ".byte 0x0f, 0x85, 0x0, 0x0, 0x0, 0x0");
2213
 
2214
  if (offset_p)
2215
    *offset_p = 11; /* be sure that this matches the sequence above */
2216
  if (size_p)
2217
    *size_p = 4;
2218
}
2219
 
2220
static void
2221
i386_emit_goto (int *offset_p, int *size_p)
2222
{
2223
  EMIT_ASM32 (i386_goto,
2224
            /* Don't trust the assembler to choose the right jump */
2225
            ".byte 0xe9, 0x0, 0x0, 0x0, 0x0");
2226
  if (offset_p)
2227
    *offset_p = 1;
2228
  if (size_p)
2229
    *size_p = 4;
2230
}
2231
 
2232
static void
2233
i386_write_goto_address (CORE_ADDR from, CORE_ADDR to, int size)
2234
{
2235
  int diff = (to - (from + size));
2236
  unsigned char buf[sizeof (int)];
2237
 
2238
  /* We're only doing 4-byte sizes at the moment.  */
2239
  if (size != 4)
2240
    {
2241
      emit_error = 1;
2242
      return;
2243
    }
2244
 
2245
  memcpy (buf, &diff, sizeof (int));
2246
  write_inferior_memory (from, buf, sizeof (int));
2247
}
2248
 
2249
static void
2250
i386_emit_const (LONGEST num)
2251
{
2252
  unsigned char buf[16];
2253
  int i, hi;
2254
  CORE_ADDR buildaddr = current_insn_ptr;
2255
 
2256
  i = 0;
2257
  buf[i++] = 0xb8; /* mov $<n>,%eax */
2258
  *((int *) (&buf[i])) = (num & 0xffffffff);
2259
  i += 4;
2260
  hi = ((num >> 32) & 0xffffffff);
2261
  if (hi)
2262
    {
2263
      buf[i++] = 0xbb; /* mov $<n>,%ebx */
2264
      *((int *) (&buf[i])) = hi;
2265
      i += 4;
2266
    }
2267
  else
2268
    {
2269
      buf[i++] = 0x31; buf[i++] = 0xdb; /* xor %ebx,%ebx */
2270
    }
2271
  append_insns (&buildaddr, i, buf);
2272
  current_insn_ptr = buildaddr;
2273
}
2274
 
2275
static void
2276
i386_emit_call (CORE_ADDR fn)
2277
{
2278
  unsigned char buf[16];
2279
  int i, offset;
2280
  CORE_ADDR buildaddr;
2281
 
2282
  buildaddr = current_insn_ptr;
2283
  i = 0;
2284
  buf[i++] = 0xe8; /* call <reladdr> */
2285
  offset = ((int) fn) - (buildaddr + 5);
2286
  memcpy (buf + 1, &offset, 4);
2287
  append_insns (&buildaddr, 5, buf);
2288
  current_insn_ptr = buildaddr;
2289
}
2290
 
2291
static void
2292
i386_emit_reg (int reg)
2293
{
2294
  unsigned char buf[16];
2295
  int i;
2296
  CORE_ADDR buildaddr;
2297
 
2298
  EMIT_ASM32 (i386_reg_a,
2299
            "sub $0x8,%esp");
2300
  buildaddr = current_insn_ptr;
2301
  i = 0;
2302
  buf[i++] = 0xb8; /* mov $<n>,%eax */
2303
  *((int *) (&buf[i])) = reg;
2304
  i += 4;
2305
  append_insns (&buildaddr, i, buf);
2306
  current_insn_ptr = buildaddr;
2307
  EMIT_ASM32 (i386_reg_b,
2308
            "mov %eax,4(%esp)\n\t"
2309
            "mov 8(%ebp),%eax\n\t"
2310
            "mov %eax,(%esp)");
2311
  i386_emit_call (get_raw_reg_func_addr ());
2312
  EMIT_ASM32 (i386_reg_c,
2313
            "xor %ebx,%ebx\n\t"
2314
            "lea 0x8(%esp),%esp");
2315
}
2316
 
2317
static void
2318
i386_emit_pop (void)
2319
{
2320
  EMIT_ASM32 (i386_pop,
2321
            "pop %eax\n\t"
2322
            "pop %ebx");
2323
}
2324
 
2325
static void
2326
i386_emit_stack_flush (void)
2327
{
2328
  EMIT_ASM32 (i386_stack_flush,
2329
            "push %ebx\n\t"
2330
            "push %eax");
2331
}
2332
 
2333
static void
2334
i386_emit_zero_ext (int arg)
2335
{
2336
  switch (arg)
2337
    {
2338
    case 8:
2339
      EMIT_ASM32 (i386_zero_ext_8,
2340
                "and $0xff,%eax\n\t"
2341
                "xor %ebx,%ebx");
2342
      break;
2343
    case 16:
2344
      EMIT_ASM32 (i386_zero_ext_16,
2345
                "and $0xffff,%eax\n\t"
2346
                "xor %ebx,%ebx");
2347
      break;
2348
    case 32:
2349
      EMIT_ASM32 (i386_zero_ext_32,
2350
                "xor %ebx,%ebx");
2351
      break;
2352
    default:
2353
      emit_error = 1;
2354
    }
2355
}
2356
 
2357
static void
2358
i386_emit_swap (void)
2359
{
2360
  EMIT_ASM32 (i386_swap,
2361
            "mov %eax,%ecx\n\t"
2362
            "mov %ebx,%edx\n\t"
2363
            "pop %eax\n\t"
2364
            "pop %ebx\n\t"
2365
            "push %edx\n\t"
2366
            "push %ecx");
2367
}
2368
 
2369
static void
2370
i386_emit_stack_adjust (int n)
2371
{
2372
  unsigned char buf[16];
2373
  int i;
2374
  CORE_ADDR buildaddr = current_insn_ptr;
2375
 
2376
  i = 0;
2377
  buf[i++] = 0x8d; /* lea $<n>(%esp),%esp */
2378
  buf[i++] = 0x64;
2379
  buf[i++] = 0x24;
2380
  buf[i++] = n * 8;
2381
  append_insns (&buildaddr, i, buf);
2382
  current_insn_ptr = buildaddr;
2383
}
2384
 
2385
/* FN's prototype is `LONGEST(*fn)(int)'.  */
2386
 
2387
static void
2388
i386_emit_int_call_1 (CORE_ADDR fn, int arg1)
2389
{
2390
  unsigned char buf[16];
2391
  int i;
2392
  CORE_ADDR buildaddr;
2393
 
2394
  EMIT_ASM32 (i386_int_call_1_a,
2395
            /* Reserve a bit of stack space.  */
2396
            "sub $0x8,%esp");
2397
  /* Put the one argument on the stack.  */
2398
  buildaddr = current_insn_ptr;
2399
  i = 0;
2400
  buf[i++] = 0xc7;  /* movl $<arg1>,(%esp) */
2401
  buf[i++] = 0x04;
2402
  buf[i++] = 0x24;
2403
  *((int *) (&buf[i])) = arg1;
2404
  i += 4;
2405
  append_insns (&buildaddr, i, buf);
2406
  current_insn_ptr = buildaddr;
2407
  i386_emit_call (fn);
2408
  EMIT_ASM32 (i386_int_call_1_c,
2409
            "mov %edx,%ebx\n\t"
2410
            "lea 0x8(%esp),%esp");
2411
}
2412
 
2413
/* FN's prototype is `void(*fn)(int,LONGEST)'.  */
2414
 
2415
static void
2416
i386_emit_void_call_2 (CORE_ADDR fn, int arg1)
2417
{
2418
  unsigned char buf[16];
2419
  int i;
2420
  CORE_ADDR buildaddr;
2421
 
2422
  EMIT_ASM32 (i386_void_call_2_a,
2423
            /* Preserve %eax only; we don't have to worry about %ebx.  */
2424
            "push %eax\n\t"
2425
            /* Reserve a bit of stack space for arguments.  */
2426
            "sub $0x10,%esp\n\t"
2427
            /* Copy "top" to the second argument position.  (Note that
2428
               we can't assume function won't scribble on its
2429
               arguments, so don't try to restore from this.)  */
2430
            "mov %eax,4(%esp)\n\t"
2431
            "mov %ebx,8(%esp)");
2432
  /* Put the first argument on the stack.  */
2433
  buildaddr = current_insn_ptr;
2434
  i = 0;
2435
  buf[i++] = 0xc7;  /* movl $<arg1>,(%esp) */
2436
  buf[i++] = 0x04;
2437
  buf[i++] = 0x24;
2438
  *((int *) (&buf[i])) = arg1;
2439
  i += 4;
2440
  append_insns (&buildaddr, i, buf);
2441
  current_insn_ptr = buildaddr;
2442
  i386_emit_call (fn);
2443
  EMIT_ASM32 (i386_void_call_2_b,
2444
            "lea 0x10(%esp),%esp\n\t"
2445
            /* Restore original stack top.  */
2446
            "pop %eax");
2447
}
2448
 
2449
struct emit_ops i386_emit_ops =
2450
  {
2451
    i386_emit_prologue,
2452
    i386_emit_epilogue,
2453
    i386_emit_add,
2454
    i386_emit_sub,
2455
    i386_emit_mul,
2456
    i386_emit_lsh,
2457
    i386_emit_rsh_signed,
2458
    i386_emit_rsh_unsigned,
2459
    i386_emit_ext,
2460
    i386_emit_log_not,
2461
    i386_emit_bit_and,
2462
    i386_emit_bit_or,
2463
    i386_emit_bit_xor,
2464
    i386_emit_bit_not,
2465
    i386_emit_equal,
2466
    i386_emit_less_signed,
2467
    i386_emit_less_unsigned,
2468
    i386_emit_ref,
2469
    i386_emit_if_goto,
2470
    i386_emit_goto,
2471
    i386_write_goto_address,
2472
    i386_emit_const,
2473
    i386_emit_call,
2474
    i386_emit_reg,
2475
    i386_emit_pop,
2476
    i386_emit_stack_flush,
2477
    i386_emit_zero_ext,
2478
    i386_emit_swap,
2479
    i386_emit_stack_adjust,
2480
    i386_emit_int_call_1,
2481
    i386_emit_void_call_2
2482
  };
2483
 
2484
 
2485
static struct emit_ops *
2486
x86_emit_ops (void)
2487
{
2488
#ifdef __x86_64__
2489
  int use_64bit = register_size (0) == 8;
2490
 
2491
  if (use_64bit)
2492
    return &amd64_emit_ops;
2493
  else
2494
#endif
2495
    return &i386_emit_ops;
2496
}
2497
 
2498
/* This is initialized assuming an amd64 target.
2499
   x86_arch_setup will correct it for i386 or amd64 targets.  */
2500
 
2501
struct linux_target_ops the_low_target =
2502
{
2503
  x86_arch_setup,
2504
  -1,
2505
  NULL,
2506
  NULL,
2507
  NULL,
2508
  x86_get_pc,
2509
  x86_set_pc,
2510
  x86_breakpoint,
2511
  x86_breakpoint_len,
2512
  NULL,
2513
  1,
2514
  x86_breakpoint_at,
2515
  x86_insert_point,
2516
  x86_remove_point,
2517
  x86_stopped_by_watchpoint,
2518
  x86_stopped_data_address,
2519
  /* collect_ptrace_register/supply_ptrace_register are not needed in the
2520
     native i386 case (no registers smaller than an xfer unit), and are not
2521
     used in the biarch case (HAVE_LINUX_USRREGS is not defined).  */
2522
  NULL,
2523
  NULL,
2524
  /* need to fix up i386 siginfo if host is amd64 */
2525
  x86_siginfo_fixup,
2526
  x86_linux_new_process,
2527
  x86_linux_new_thread,
2528
  x86_linux_prepare_to_resume,
2529
  x86_linux_process_qsupported,
2530
  x86_supports_tracepoints,
2531
  x86_get_thread_area,
2532
  x86_install_fast_tracepoint_jump_pad,
2533
  x86_emit_ops
2534
};

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.