1 |
330 |
jeremybenn |
/* Internal interfaces for the GNU/Linux specific target code for gdbserver.
|
2 |
|
|
Copyright (C) 2002, 2004, 2005, 2007, 2008, 2009, 2010
|
3 |
|
|
Free Software Foundation, Inc.
|
4 |
|
|
|
5 |
|
|
This file is part of GDB.
|
6 |
|
|
|
7 |
|
|
This program is free software; you can redistribute it and/or modify
|
8 |
|
|
it under the terms of the GNU General Public License as published by
|
9 |
|
|
the Free Software Foundation; either version 3 of the License, or
|
10 |
|
|
(at your option) any later version.
|
11 |
|
|
|
12 |
|
|
This program is distributed in the hope that it will be useful,
|
13 |
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
14 |
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
15 |
|
|
GNU General Public License for more details.
|
16 |
|
|
|
17 |
|
|
You should have received a copy of the GNU General Public License
|
18 |
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>. */
|
19 |
|
|
|
20 |
|
|
#ifdef HAVE_THREAD_DB_H
|
21 |
|
|
#include <thread_db.h>
|
22 |
|
|
#endif
|
23 |
|
|
|
24 |
|
|
#include "gdb_proc_service.h"
|
25 |
|
|
|
26 |
|
|
#ifdef HAVE_LINUX_REGSETS
|
27 |
|
|
typedef void (*regset_fill_func) (struct regcache *, void *);
|
28 |
|
|
typedef void (*regset_store_func) (struct regcache *, const void *);
|
29 |
|
|
enum regset_type {
|
30 |
|
|
GENERAL_REGS,
|
31 |
|
|
FP_REGS,
|
32 |
|
|
EXTENDED_REGS,
|
33 |
|
|
};
|
34 |
|
|
|
35 |
|
|
struct regset_info
|
36 |
|
|
{
|
37 |
|
|
int get_request, set_request;
|
38 |
|
|
/* If NT_TYPE isn't 0, it will be passed to ptrace as the 3rd
|
39 |
|
|
argument and the 4th argument should be "const struct iovec *". */
|
40 |
|
|
int nt_type;
|
41 |
|
|
int size;
|
42 |
|
|
enum regset_type type;
|
43 |
|
|
regset_fill_func fill_function;
|
44 |
|
|
regset_store_func store_function;
|
45 |
|
|
};
|
46 |
|
|
extern struct regset_info target_regsets[];
|
47 |
|
|
#endif
|
48 |
|
|
|
49 |
|
|
struct siginfo;
|
50 |
|
|
|
51 |
|
|
struct process_info_private
|
52 |
|
|
{
|
53 |
|
|
/* Arch-specific additions. */
|
54 |
|
|
struct arch_process_info *arch_private;
|
55 |
|
|
|
56 |
|
|
/* libthread_db-specific additions. Not NULL if this process has loaded
|
57 |
|
|
thread_db, and it is active. */
|
58 |
|
|
struct thread_db *thread_db;
|
59 |
|
|
};
|
60 |
|
|
|
61 |
|
|
struct lwp_info;
|
62 |
|
|
|
63 |
|
|
struct linux_target_ops
|
64 |
|
|
{
|
65 |
|
|
/* Architecture-specific setup. */
|
66 |
|
|
void (*arch_setup) (void);
|
67 |
|
|
|
68 |
|
|
int num_regs;
|
69 |
|
|
int *regmap;
|
70 |
|
|
int (*cannot_fetch_register) (int);
|
71 |
|
|
|
72 |
|
|
/* Returns 0 if we can store the register, 1 if we can not
|
73 |
|
|
store the register, and 2 if failure to store the register
|
74 |
|
|
is acceptable. */
|
75 |
|
|
int (*cannot_store_register) (int);
|
76 |
|
|
CORE_ADDR (*get_pc) (struct regcache *regcache);
|
77 |
|
|
void (*set_pc) (struct regcache *regcache, CORE_ADDR newpc);
|
78 |
|
|
const unsigned char *breakpoint;
|
79 |
|
|
int breakpoint_len;
|
80 |
|
|
CORE_ADDR (*breakpoint_reinsert_addr) (void);
|
81 |
|
|
|
82 |
|
|
int decr_pc_after_break;
|
83 |
|
|
int (*breakpoint_at) (CORE_ADDR pc);
|
84 |
|
|
|
85 |
|
|
/* Breakpoint and watchpoint related functions. See target.h for
|
86 |
|
|
comments. */
|
87 |
|
|
int (*insert_point) (char type, CORE_ADDR addr, int len);
|
88 |
|
|
int (*remove_point) (char type, CORE_ADDR addr, int len);
|
89 |
|
|
int (*stopped_by_watchpoint) (void);
|
90 |
|
|
CORE_ADDR (*stopped_data_address) (void);
|
91 |
|
|
|
92 |
|
|
/* Hooks to reformat register data for PEEKUSR/POKEUSR (in particular
|
93 |
|
|
for registers smaller than an xfer unit). */
|
94 |
|
|
void (*collect_ptrace_register) (struct regcache *regcache,
|
95 |
|
|
int regno, char *buf);
|
96 |
|
|
void (*supply_ptrace_register) (struct regcache *regcache,
|
97 |
|
|
int regno, const char *buf);
|
98 |
|
|
|
99 |
|
|
/* Hook to convert from target format to ptrace format and back.
|
100 |
|
|
Returns true if any conversion was done; false otherwise.
|
101 |
|
|
If DIRECTION is 1, then copy from INF to NATIVE.
|
102 |
|
|
If DIRECTION is 0, copy from NATIVE to INF. */
|
103 |
|
|
int (*siginfo_fixup) (struct siginfo *native, void *inf, int direction);
|
104 |
|
|
|
105 |
|
|
/* Hook to call when a new process is created or attached to.
|
106 |
|
|
If extra per-process architecture-specific data is needed,
|
107 |
|
|
allocate it here. */
|
108 |
|
|
struct arch_process_info * (*new_process) (void);
|
109 |
|
|
|
110 |
|
|
/* Hook to call when a new thread is detected.
|
111 |
|
|
If extra per-thread architecture-specific data is needed,
|
112 |
|
|
allocate it here. */
|
113 |
|
|
struct arch_lwp_info * (*new_thread) (void);
|
114 |
|
|
|
115 |
|
|
/* Hook to call prior to resuming a thread. */
|
116 |
|
|
void (*prepare_to_resume) (struct lwp_info *);
|
117 |
|
|
|
118 |
|
|
/* Hook to support target specific qSupported. */
|
119 |
|
|
void (*process_qsupported) (const char *);
|
120 |
|
|
|
121 |
|
|
/* Returns true if the low target supports tracepoints. */
|
122 |
|
|
int (*supports_tracepoints) (void);
|
123 |
|
|
|
124 |
|
|
/* Fill ADDRP with the thread area address of LWPID. Returns 0 on
|
125 |
|
|
success, -1 on failure. */
|
126 |
|
|
int (*get_thread_area) (int lwpid, CORE_ADDR *addrp);
|
127 |
|
|
|
128 |
|
|
/* Install a fast tracepoint jump pad. See target.h for
|
129 |
|
|
comments. */
|
130 |
|
|
int (*install_fast_tracepoint_jump_pad) (CORE_ADDR tpoint, CORE_ADDR tpaddr,
|
131 |
|
|
CORE_ADDR collector,
|
132 |
|
|
CORE_ADDR lockaddr,
|
133 |
|
|
ULONGEST orig_size,
|
134 |
|
|
CORE_ADDR *jump_entry,
|
135 |
|
|
unsigned char *jjump_pad_insn,
|
136 |
|
|
ULONGEST *jjump_pad_insn_size,
|
137 |
|
|
CORE_ADDR *adjusted_insn_addr,
|
138 |
|
|
CORE_ADDR *adjusted_insn_addr_end);
|
139 |
|
|
|
140 |
|
|
/* Return the bytecode operations vector for the current inferior.
|
141 |
|
|
Returns NULL if bytecode compilation is not supported. */
|
142 |
|
|
struct emit_ops *(*emit_ops) (void);
|
143 |
|
|
};
|
144 |
|
|
|
145 |
|
|
extern struct linux_target_ops the_low_target;
|
146 |
|
|
|
147 |
|
|
#define ptid_of(proc) ((proc)->head.id)
|
148 |
|
|
#define pid_of(proc) ptid_get_pid ((proc)->head.id)
|
149 |
|
|
#define lwpid_of(proc) ptid_get_lwp ((proc)->head.id)
|
150 |
|
|
|
151 |
|
|
#define get_lwp(inf) ((struct lwp_info *)(inf))
|
152 |
|
|
#define get_thread_lwp(thr) (get_lwp (inferior_target_data (thr)))
|
153 |
|
|
#define get_lwp_thread(proc) ((struct thread_info *) \
|
154 |
|
|
find_inferior_id (&all_threads, \
|
155 |
|
|
get_lwp (proc)->head.id))
|
156 |
|
|
|
157 |
|
|
struct lwp_info
|
158 |
|
|
{
|
159 |
|
|
struct inferior_list_entry head;
|
160 |
|
|
|
161 |
|
|
/* If this flag is set, the next SIGSTOP will be ignored (the
|
162 |
|
|
process will be immediately resumed). This means that either we
|
163 |
|
|
sent the SIGSTOP to it ourselves and got some other pending event
|
164 |
|
|
(so the SIGSTOP is still pending), or that we stopped the
|
165 |
|
|
inferior implicitly via PTRACE_ATTACH and have not waited for it
|
166 |
|
|
yet. */
|
167 |
|
|
int stop_expected;
|
168 |
|
|
|
169 |
|
|
/* When this is true, we shall not try to resume this thread, even
|
170 |
|
|
if last_resume_kind isn't resume_stop. */
|
171 |
|
|
int suspended;
|
172 |
|
|
|
173 |
|
|
/* If this flag is set, the lwp is known to be stopped right now (stop
|
174 |
|
|
event already received in a wait()). */
|
175 |
|
|
int stopped;
|
176 |
|
|
|
177 |
|
|
/* If this flag is set, the lwp is known to be dead already (exit
|
178 |
|
|
event already received in a wait(), and is cached in
|
179 |
|
|
status_pending). */
|
180 |
|
|
int dead;
|
181 |
|
|
|
182 |
|
|
/* When stopped is set, the last wait status recorded for this lwp. */
|
183 |
|
|
int last_status;
|
184 |
|
|
|
185 |
|
|
/* When stopped is set, this is where the lwp stopped, with
|
186 |
|
|
decr_pc_after_break already accounted for. */
|
187 |
|
|
CORE_ADDR stop_pc;
|
188 |
|
|
|
189 |
|
|
/* If this flag is set, STATUS_PENDING is a waitstatus that has not yet
|
190 |
|
|
been reported. */
|
191 |
|
|
int status_pending_p;
|
192 |
|
|
int status_pending;
|
193 |
|
|
|
194 |
|
|
/* STOPPED_BY_WATCHPOINT is non-zero if this LWP stopped with a data
|
195 |
|
|
watchpoint trap. */
|
196 |
|
|
int stopped_by_watchpoint;
|
197 |
|
|
|
198 |
|
|
/* On architectures where it is possible to know the data address of
|
199 |
|
|
a triggered watchpoint, STOPPED_DATA_ADDRESS is non-zero, and
|
200 |
|
|
contains such data address. Only valid if STOPPED_BY_WATCHPOINT
|
201 |
|
|
is true. */
|
202 |
|
|
CORE_ADDR stopped_data_address;
|
203 |
|
|
|
204 |
|
|
/* If this is non-zero, it is a breakpoint to be reinserted at our next
|
205 |
|
|
stop (SIGTRAP stops only). */
|
206 |
|
|
CORE_ADDR bp_reinsert;
|
207 |
|
|
|
208 |
|
|
/* If this flag is set, the last continue operation at the ptrace
|
209 |
|
|
level on this process was a single-step. */
|
210 |
|
|
int stepping;
|
211 |
|
|
|
212 |
|
|
/* If this flag is set, we need to set the event request flags the
|
213 |
|
|
next time we see this LWP stop. */
|
214 |
|
|
int must_set_ptrace_flags;
|
215 |
|
|
|
216 |
|
|
/* If this is non-zero, it points to a chain of signals which need to
|
217 |
|
|
be delivered to this process. */
|
218 |
|
|
struct pending_signals *pending_signals;
|
219 |
|
|
|
220 |
|
|
/* A link used when resuming. It is initialized from the resume request,
|
221 |
|
|
and then processed and cleared in linux_resume_one_lwp. */
|
222 |
|
|
struct thread_resume *resume;
|
223 |
|
|
|
224 |
|
|
/* True if it is known that this lwp is presently collecting a fast
|
225 |
|
|
tracepoint (it is in the jump pad or in some code that will
|
226 |
|
|
return to the jump pad. Normally, we won't care about this, but
|
227 |
|
|
we will if a signal arrives to this lwp while it is
|
228 |
|
|
collecting. */
|
229 |
|
|
int collecting_fast_tracepoint;
|
230 |
|
|
|
231 |
|
|
/* If this is non-zero, it points to a chain of signals which need
|
232 |
|
|
to be reported to GDB. These were deferred because the thread
|
233 |
|
|
was doing a fast tracepoint collect when they arrived. */
|
234 |
|
|
struct pending_signals *pending_signals_to_report;
|
235 |
|
|
|
236 |
|
|
/* When collecting_fast_tracepoint is first found to be 1, we insert
|
237 |
|
|
a exit-jump-pad-quickly breakpoint. This is it. */
|
238 |
|
|
struct breakpoint *exit_jump_pad_bkpt;
|
239 |
|
|
|
240 |
|
|
/* True if the LWP was seen stop at an internal breakpoint and needs
|
241 |
|
|
stepping over later when it is resumed. */
|
242 |
|
|
int need_step_over;
|
243 |
|
|
|
244 |
|
|
int thread_known;
|
245 |
|
|
#ifdef HAVE_THREAD_DB_H
|
246 |
|
|
/* The thread handle, used for e.g. TLS access. Only valid if
|
247 |
|
|
THREAD_KNOWN is set. */
|
248 |
|
|
td_thrhandle_t th;
|
249 |
|
|
#endif
|
250 |
|
|
|
251 |
|
|
/* Arch-specific additions. */
|
252 |
|
|
struct arch_lwp_info *arch_private;
|
253 |
|
|
};
|
254 |
|
|
|
255 |
|
|
extern struct inferior_list all_lwps;
|
256 |
|
|
|
257 |
|
|
char *linux_child_pid_to_exec_file (int pid);
|
258 |
|
|
int elf_64_file_p (const char *file);
|
259 |
|
|
|
260 |
|
|
void linux_attach_lwp (unsigned long pid);
|
261 |
|
|
struct lwp_info *find_lwp_pid (ptid_t ptid);
|
262 |
|
|
int linux_get_thread_area (int lwpid, CORE_ADDR *area);
|
263 |
|
|
|
264 |
|
|
/* From thread-db.c */
|
265 |
|
|
int thread_db_init (int use_events);
|
266 |
|
|
void thread_db_detach (struct process_info *);
|
267 |
|
|
void thread_db_mourn (struct process_info *);
|
268 |
|
|
int thread_db_handle_monitor_command (char *);
|
269 |
|
|
int thread_db_get_tls_address (struct thread_info *thread, CORE_ADDR offset,
|
270 |
|
|
CORE_ADDR load_module, CORE_ADDR *address);
|
271 |
|
|
int thread_db_look_up_one_symbol (const char *name, CORE_ADDR *addrp);
|