1 |
104 |
markom |
/* Native support code for HPUX PA-RISC.
|
2 |
|
|
Copyright 1986, 1987, 1989, 1990, 1991, 1992, 1993, 1998, 1999
|
3 |
|
|
Free Software Foundation, Inc.
|
4 |
|
|
|
5 |
|
|
Contributed by the Center for Software Science at the
|
6 |
|
|
University of Utah (pa-gdb-bugs@cs.utah.edu).
|
7 |
|
|
|
8 |
|
|
This file is part of GDB.
|
9 |
|
|
|
10 |
|
|
This program is free software; you can redistribute it and/or modify
|
11 |
|
|
it under the terms of the GNU General Public License as published by
|
12 |
|
|
the Free Software Foundation; either version 2 of the License, or
|
13 |
|
|
(at your option) any later version.
|
14 |
|
|
|
15 |
|
|
This program is distributed in the hope that it will be useful,
|
16 |
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
17 |
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
18 |
|
|
GNU General Public License for more details.
|
19 |
|
|
|
20 |
|
|
You should have received a copy of the GNU General Public License
|
21 |
|
|
along with this program; if not, write to the Free Software
|
22 |
|
|
Foundation, Inc., 59 Temple Place - Suite 330,
|
23 |
|
|
Boston, MA 02111-1307, USA. */
|
24 |
|
|
|
25 |
|
|
|
26 |
|
|
#include "defs.h"
|
27 |
|
|
#include "inferior.h"
|
28 |
|
|
#include "target.h"
|
29 |
|
|
#include <sys/ptrace.h>
|
30 |
|
|
#include "gdbcore.h"
|
31 |
|
|
#include "gdb_wait.h"
|
32 |
|
|
#include <signal.h>
|
33 |
|
|
|
34 |
|
|
extern CORE_ADDR text_end;
|
35 |
|
|
|
36 |
|
|
static void fetch_register PARAMS ((int));
|
37 |
|
|
|
38 |
|
|
void
|
39 |
|
|
fetch_inferior_registers (regno)
|
40 |
|
|
int regno;
|
41 |
|
|
{
|
42 |
|
|
if (regno == -1)
|
43 |
|
|
for (regno = 0; regno < NUM_REGS; regno++)
|
44 |
|
|
fetch_register (regno);
|
45 |
|
|
else
|
46 |
|
|
fetch_register (regno);
|
47 |
|
|
}
|
48 |
|
|
|
49 |
|
|
/* Our own version of the offsetof macro, since we can't assume ANSI C. */
|
50 |
|
|
#define HPPAH_OFFSETOF(type, member) ((int) (&((type *) 0)->member))
|
51 |
|
|
|
52 |
|
|
/* Store our register values back into the inferior.
|
53 |
|
|
If REGNO is -1, do this for all registers.
|
54 |
|
|
Otherwise, REGNO specifies which register (so we can save time). */
|
55 |
|
|
|
56 |
|
|
void
|
57 |
|
|
store_inferior_registers (regno)
|
58 |
|
|
int regno;
|
59 |
|
|
{
|
60 |
|
|
register unsigned int regaddr;
|
61 |
|
|
char buf[80];
|
62 |
|
|
register int i;
|
63 |
|
|
unsigned int offset = U_REGS_OFFSET;
|
64 |
|
|
int scratch;
|
65 |
|
|
|
66 |
|
|
if (regno >= 0)
|
67 |
|
|
{
|
68 |
|
|
unsigned int addr, len, offset;
|
69 |
|
|
|
70 |
|
|
if (CANNOT_STORE_REGISTER (regno))
|
71 |
|
|
return;
|
72 |
|
|
|
73 |
|
|
offset = 0;
|
74 |
|
|
len = REGISTER_RAW_SIZE (regno);
|
75 |
|
|
|
76 |
|
|
/* Requests for register zero actually want the save_state's
|
77 |
|
|
ss_flags member. As RM says: "Oh, what a hack!" */
|
78 |
|
|
if (regno == 0)
|
79 |
|
|
{
|
80 |
|
|
save_state_t ss;
|
81 |
|
|
addr = HPPAH_OFFSETOF (save_state_t, ss_flags);
|
82 |
|
|
len = sizeof (ss.ss_flags);
|
83 |
|
|
|
84 |
|
|
/* Note that ss_flags is always an int, no matter what
|
85 |
|
|
REGISTER_RAW_SIZE(0) says. Assuming all HP-UX PA machines
|
86 |
|
|
are big-endian, put it at the least significant end of the
|
87 |
|
|
value, and zap the rest of the buffer. */
|
88 |
|
|
offset = REGISTER_RAW_SIZE (0) - len;
|
89 |
|
|
}
|
90 |
|
|
|
91 |
|
|
/* Floating-point registers come from the ss_fpblock area. */
|
92 |
|
|
else if (regno >= FP0_REGNUM)
|
93 |
|
|
addr = (HPPAH_OFFSETOF (save_state_t, ss_fpblock)
|
94 |
|
|
+ (REGISTER_BYTE (regno) - REGISTER_BYTE (FP0_REGNUM)));
|
95 |
|
|
|
96 |
|
|
/* Wide registers come from the ss_wide area.
|
97 |
|
|
I think it's more PC to test (ss_flags & SS_WIDEREGS) to select
|
98 |
|
|
between ss_wide and ss_narrow than to use the raw register size.
|
99 |
|
|
But checking ss_flags would require an extra ptrace call for
|
100 |
|
|
every register reference. Bleah. */
|
101 |
|
|
else if (len == 8)
|
102 |
|
|
addr = (HPPAH_OFFSETOF (save_state_t, ss_wide)
|
103 |
|
|
+ REGISTER_BYTE (regno));
|
104 |
|
|
|
105 |
|
|
/* Narrow registers come from the ss_narrow area. Note that
|
106 |
|
|
ss_narrow starts with gr1, not gr0. */
|
107 |
|
|
else if (len == 4)
|
108 |
|
|
addr = (HPPAH_OFFSETOF (save_state_t, ss_narrow)
|
109 |
|
|
+ (REGISTER_BYTE (regno) - REGISTER_BYTE (1)));
|
110 |
|
|
else
|
111 |
|
|
internal_error ("hppah-nat.c (write_register): unexpected register size");
|
112 |
|
|
|
113 |
|
|
#ifdef GDB_TARGET_IS_HPPA_20W
|
114 |
|
|
/* Unbelieveable. The PC head and tail must be written in 64bit hunks
|
115 |
|
|
or we will get an error. Worse yet, the oddball ptrace/ttrace
|
116 |
|
|
layering will not allow us to perform a 64bit register store.
|
117 |
|
|
|
118 |
|
|
What a crock. */
|
119 |
|
|
if (regno == PCOQ_HEAD_REGNUM || regno == PCOQ_TAIL_REGNUM && len == 8)
|
120 |
|
|
{
|
121 |
|
|
CORE_ADDR temp;
|
122 |
|
|
|
123 |
|
|
temp = *(CORE_ADDR *)®isters[REGISTER_BYTE (regno)];
|
124 |
|
|
|
125 |
|
|
/* Set the priv level (stored in the low two bits of the PC. */
|
126 |
|
|
temp |= 0x3;
|
127 |
|
|
|
128 |
|
|
ttrace_write_reg_64 (inferior_pid, (CORE_ADDR)addr, (CORE_ADDR)&temp);
|
129 |
|
|
|
130 |
|
|
/* If we fail to write the PC, give a true error instead of
|
131 |
|
|
just a warning. */
|
132 |
|
|
if (errno != 0)
|
133 |
|
|
{
|
134 |
|
|
char *err = safe_strerror (errno);
|
135 |
|
|
char *msg = alloca (strlen (err) + 128);
|
136 |
|
|
sprintf (msg, "writing `%s' register: %s",
|
137 |
|
|
REGISTER_NAME (regno), err);
|
138 |
|
|
perror_with_name (msg);
|
139 |
|
|
}
|
140 |
|
|
return;
|
141 |
|
|
}
|
142 |
|
|
|
143 |
|
|
/* Another crock. HPUX complains if you write a nonzero value to
|
144 |
|
|
the high part of IPSW. What will it take for HP to catch a
|
145 |
|
|
clue about building sensible interfaces? */
|
146 |
|
|
if (regno == IPSW_REGNUM && len == 8)
|
147 |
|
|
*(int *)®isters[REGISTER_BYTE (regno)] = 0;
|
148 |
|
|
#endif
|
149 |
|
|
|
150 |
|
|
for (i = 0; i < len; i += sizeof (int))
|
151 |
|
|
{
|
152 |
|
|
errno = 0;
|
153 |
|
|
call_ptrace (PT_WUREGS, inferior_pid, (PTRACE_ARG3_TYPE) addr + i,
|
154 |
|
|
*(int *) ®isters[REGISTER_BYTE (regno) + i]);
|
155 |
|
|
if (errno != 0)
|
156 |
|
|
{
|
157 |
|
|
/* Warning, not error, in case we are attached; sometimes
|
158 |
|
|
the kernel doesn't let us at the registers. */
|
159 |
|
|
char *err = safe_strerror (errno);
|
160 |
|
|
char *msg = alloca (strlen (err) + 128);
|
161 |
|
|
sprintf (msg, "writing `%s' register: %s",
|
162 |
|
|
REGISTER_NAME (regno), err);
|
163 |
|
|
/* If we fail to write the PC, give a true error instead of
|
164 |
|
|
just a warning. */
|
165 |
|
|
if (regno == PCOQ_HEAD_REGNUM || regno == PCOQ_TAIL_REGNUM)
|
166 |
|
|
perror_with_name (msg);
|
167 |
|
|
else
|
168 |
|
|
warning (msg);
|
169 |
|
|
return;
|
170 |
|
|
}
|
171 |
|
|
}
|
172 |
|
|
}
|
173 |
|
|
else
|
174 |
|
|
for (regno = 0; regno < NUM_REGS; regno++)
|
175 |
|
|
store_inferior_registers (regno);
|
176 |
|
|
}
|
177 |
|
|
|
178 |
|
|
|
179 |
|
|
/* Fetch a register's value from the process's U area. */
|
180 |
|
|
static void
|
181 |
|
|
fetch_register (regno)
|
182 |
|
|
int regno;
|
183 |
|
|
{
|
184 |
|
|
char buf[MAX_REGISTER_RAW_SIZE];
|
185 |
|
|
unsigned int addr, len, offset;
|
186 |
|
|
int i;
|
187 |
|
|
|
188 |
|
|
offset = 0;
|
189 |
|
|
len = REGISTER_RAW_SIZE (regno);
|
190 |
|
|
|
191 |
|
|
/* Requests for register zero actually want the save_state's
|
192 |
|
|
ss_flags member. As RM says: "Oh, what a hack!" */
|
193 |
|
|
if (regno == 0)
|
194 |
|
|
{
|
195 |
|
|
save_state_t ss;
|
196 |
|
|
addr = HPPAH_OFFSETOF (save_state_t, ss_flags);
|
197 |
|
|
len = sizeof (ss.ss_flags);
|
198 |
|
|
|
199 |
|
|
/* Note that ss_flags is always an int, no matter what
|
200 |
|
|
REGISTER_RAW_SIZE(0) says. Assuming all HP-UX PA machines
|
201 |
|
|
are big-endian, put it at the least significant end of the
|
202 |
|
|
value, and zap the rest of the buffer. */
|
203 |
|
|
offset = REGISTER_RAW_SIZE (0) - len;
|
204 |
|
|
memset (buf, 0, sizeof (buf));
|
205 |
|
|
}
|
206 |
|
|
|
207 |
|
|
/* Floating-point registers come from the ss_fpblock area. */
|
208 |
|
|
else if (regno >= FP0_REGNUM)
|
209 |
|
|
addr = (HPPAH_OFFSETOF (save_state_t, ss_fpblock)
|
210 |
|
|
+ (REGISTER_BYTE (regno) - REGISTER_BYTE (FP0_REGNUM)));
|
211 |
|
|
|
212 |
|
|
/* Wide registers come from the ss_wide area.
|
213 |
|
|
I think it's more PC to test (ss_flags & SS_WIDEREGS) to select
|
214 |
|
|
between ss_wide and ss_narrow than to use the raw register size.
|
215 |
|
|
But checking ss_flags would require an extra ptrace call for
|
216 |
|
|
every register reference. Bleah. */
|
217 |
|
|
else if (len == 8)
|
218 |
|
|
addr = (HPPAH_OFFSETOF (save_state_t, ss_wide)
|
219 |
|
|
+ REGISTER_BYTE (regno));
|
220 |
|
|
|
221 |
|
|
/* Narrow registers come from the ss_narrow area. Note that
|
222 |
|
|
ss_narrow starts with gr1, not gr0. */
|
223 |
|
|
else if (len == 4)
|
224 |
|
|
addr = (HPPAH_OFFSETOF (save_state_t, ss_narrow)
|
225 |
|
|
+ (REGISTER_BYTE (regno) - REGISTER_BYTE (1)));
|
226 |
|
|
|
227 |
|
|
else
|
228 |
|
|
internal_error ("hppa-nat.c (fetch_register): unexpected register size");
|
229 |
|
|
|
230 |
|
|
for (i = 0; i < len; i += sizeof (int))
|
231 |
|
|
{
|
232 |
|
|
errno = 0;
|
233 |
|
|
/* Copy an int from the U area to buf. Fill the least
|
234 |
|
|
significant end if len != raw_size. */
|
235 |
|
|
* (int *) &buf[offset + i] =
|
236 |
|
|
call_ptrace (PT_RUREGS, inferior_pid,
|
237 |
|
|
(PTRACE_ARG3_TYPE) addr + i, 0);
|
238 |
|
|
if (errno != 0)
|
239 |
|
|
{
|
240 |
|
|
/* Warning, not error, in case we are attached; sometimes
|
241 |
|
|
the kernel doesn't let us at the registers. */
|
242 |
|
|
char *err = safe_strerror (errno);
|
243 |
|
|
char *msg = alloca (strlen (err) + 128);
|
244 |
|
|
sprintf (msg, "reading `%s' register: %s",
|
245 |
|
|
REGISTER_NAME (regno), err);
|
246 |
|
|
warning (msg);
|
247 |
|
|
return;
|
248 |
|
|
}
|
249 |
|
|
}
|
250 |
|
|
|
251 |
|
|
/* If we're reading an address from the instruction address queue,
|
252 |
|
|
mask out the bottom two bits --- they contain the privilege
|
253 |
|
|
level. */
|
254 |
|
|
if (regno == PCOQ_HEAD_REGNUM || regno == PCOQ_TAIL_REGNUM)
|
255 |
|
|
buf[len - 1] &= ~0x3;
|
256 |
|
|
|
257 |
|
|
supply_register (regno, buf);
|
258 |
|
|
}
|
259 |
|
|
|
260 |
|
|
|
261 |
|
|
/* Copy LEN bytes to or from inferior's memory starting at MEMADDR
|
262 |
|
|
to debugger memory starting at MYADDR. Copy to inferior if
|
263 |
|
|
WRITE is nonzero.
|
264 |
|
|
|
265 |
|
|
Returns the length copied, which is either the LEN argument or zero.
|
266 |
|
|
This xfer function does not do partial moves, since child_ops
|
267 |
|
|
doesn't allow memory operations to cross below us in the target stack
|
268 |
|
|
anyway. */
|
269 |
|
|
|
270 |
|
|
int
|
271 |
|
|
child_xfer_memory (memaddr, myaddr, len, write, target)
|
272 |
|
|
CORE_ADDR memaddr;
|
273 |
|
|
char *myaddr;
|
274 |
|
|
int len;
|
275 |
|
|
int write;
|
276 |
|
|
struct target_ops *target; /* ignored */
|
277 |
|
|
{
|
278 |
|
|
register int i;
|
279 |
|
|
/* Round starting address down to longword boundary. */
|
280 |
|
|
register CORE_ADDR addr = memaddr & - (CORE_ADDR)(sizeof (int));
|
281 |
|
|
/* Round ending address up; get number of longwords that makes. */
|
282 |
|
|
register int count
|
283 |
|
|
= (((memaddr + len) - addr) + sizeof (int) - 1) / sizeof (int);
|
284 |
|
|
|
285 |
|
|
/* Allocate buffer of that many longwords.
|
286 |
|
|
Note -- do not use alloca to allocate this buffer since there is no
|
287 |
|
|
guarantee of when the buffer will actually be deallocated.
|
288 |
|
|
|
289 |
|
|
This routine can be called over and over with the same call chain;
|
290 |
|
|
this (in effect) would pile up all those alloca requests until a call
|
291 |
|
|
to alloca was made from a point higher than this routine in the
|
292 |
|
|
call chain. */
|
293 |
|
|
register int *buffer = (int *) xmalloc (count * sizeof (int));
|
294 |
|
|
|
295 |
|
|
if (write)
|
296 |
|
|
{
|
297 |
|
|
/* Fill start and end extra bytes of buffer with existing memory data. */
|
298 |
|
|
if (addr != memaddr || len < (int) sizeof (int))
|
299 |
|
|
{
|
300 |
|
|
/* Need part of initial word -- fetch it. */
|
301 |
|
|
buffer[0] = call_ptrace (addr < text_end ? PT_RIUSER : PT_RDUSER,
|
302 |
|
|
inferior_pid, (PTRACE_ARG3_TYPE) addr, 0);
|
303 |
|
|
}
|
304 |
|
|
|
305 |
|
|
if (count > 1) /* FIXME, avoid if even boundary */
|
306 |
|
|
{
|
307 |
|
|
buffer[count - 1]
|
308 |
|
|
= call_ptrace (addr < text_end ? PT_RIUSER : PT_RDUSER,
|
309 |
|
|
inferior_pid,
|
310 |
|
|
(PTRACE_ARG3_TYPE) (addr
|
311 |
|
|
+ (count - 1) * sizeof (int)),
|
312 |
|
|
0);
|
313 |
|
|
}
|
314 |
|
|
|
315 |
|
|
/* Copy data to be written over corresponding part of buffer */
|
316 |
|
|
memcpy ((char *) buffer + (memaddr & (sizeof (int) - 1)), myaddr, len);
|
317 |
|
|
|
318 |
|
|
/* Write the entire buffer. */
|
319 |
|
|
for (i = 0; i < count; i++, addr += sizeof (int))
|
320 |
|
|
{
|
321 |
|
|
int pt_status;
|
322 |
|
|
int pt_request;
|
323 |
|
|
/* The HP-UX kernel crashes if you use PT_WDUSER to write into the
|
324 |
|
|
text segment. FIXME -- does it work to write into the data
|
325 |
|
|
segment using WIUSER, or do these idiots really expect us to
|
326 |
|
|
figure out which segment the address is in, so we can use a
|
327 |
|
|
separate system call for it??! */
|
328 |
|
|
errno = 0;
|
329 |
|
|
pt_request = (addr < text_end) ? PT_WIUSER : PT_WDUSER;
|
330 |
|
|
pt_status = call_ptrace (pt_request,
|
331 |
|
|
inferior_pid,
|
332 |
|
|
(PTRACE_ARG3_TYPE) addr,
|
333 |
|
|
buffer[i]);
|
334 |
|
|
|
335 |
|
|
/* Did we fail? Might we've guessed wrong about which
|
336 |
|
|
segment this address resides in? Try the other request,
|
337 |
|
|
and see if that works... */
|
338 |
|
|
if ((pt_status == -1) && errno)
|
339 |
|
|
{
|
340 |
|
|
errno = 0;
|
341 |
|
|
pt_request = (pt_request == PT_WIUSER) ? PT_WDUSER : PT_WIUSER;
|
342 |
|
|
pt_status = call_ptrace (pt_request,
|
343 |
|
|
inferior_pid,
|
344 |
|
|
(PTRACE_ARG3_TYPE) addr,
|
345 |
|
|
buffer[i]);
|
346 |
|
|
|
347 |
|
|
/* No, we still fail. Okay, time to punt. */
|
348 |
|
|
if ((pt_status == -1) && errno)
|
349 |
|
|
{
|
350 |
|
|
free (buffer);
|
351 |
|
|
return 0;
|
352 |
|
|
}
|
353 |
|
|
}
|
354 |
|
|
}
|
355 |
|
|
}
|
356 |
|
|
else
|
357 |
|
|
{
|
358 |
|
|
/* Read all the longwords */
|
359 |
|
|
for (i = 0; i < count; i++, addr += sizeof (int))
|
360 |
|
|
{
|
361 |
|
|
errno = 0;
|
362 |
|
|
buffer[i] = call_ptrace (addr < text_end ? PT_RIUSER : PT_RDUSER,
|
363 |
|
|
inferior_pid, (PTRACE_ARG3_TYPE) addr, 0);
|
364 |
|
|
if (errno)
|
365 |
|
|
{
|
366 |
|
|
free (buffer);
|
367 |
|
|
return 0;
|
368 |
|
|
}
|
369 |
|
|
QUIT;
|
370 |
|
|
}
|
371 |
|
|
|
372 |
|
|
/* Copy appropriate bytes out of the buffer. */
|
373 |
|
|
memcpy (myaddr, (char *) buffer + (memaddr & (sizeof (int) - 1)), len);
|
374 |
|
|
}
|
375 |
|
|
free (buffer);
|
376 |
|
|
return len;
|
377 |
|
|
}
|
378 |
|
|
|
379 |
|
|
|
380 |
|
|
void
|
381 |
|
|
child_post_follow_inferior_by_clone ()
|
382 |
|
|
{
|
383 |
|
|
int status;
|
384 |
|
|
|
385 |
|
|
/* This function is used when following both the parent and child
|
386 |
|
|
of a fork. In this case, the debugger clones itself. The original
|
387 |
|
|
debugger follows the parent, the clone follows the child. The
|
388 |
|
|
original detaches from the child, delivering a SIGSTOP to it to
|
389 |
|
|
keep it from running away until the clone can attach itself.
|
390 |
|
|
|
391 |
|
|
At this point, the clone has attached to the child. Because of
|
392 |
|
|
the SIGSTOP, we must now deliver a SIGCONT to the child, or it
|
393 |
|
|
won't behave properly. */
|
394 |
|
|
status = kill (inferior_pid, SIGCONT);
|
395 |
|
|
}
|
396 |
|
|
|
397 |
|
|
|
398 |
|
|
void
|
399 |
|
|
child_post_follow_vfork (parent_pid, followed_parent, child_pid, followed_child)
|
400 |
|
|
int parent_pid;
|
401 |
|
|
int followed_parent;
|
402 |
|
|
int child_pid;
|
403 |
|
|
int followed_child;
|
404 |
|
|
{
|
405 |
|
|
/* Are we a debugger that followed the parent of a vfork? If so,
|
406 |
|
|
then recall that the child's vfork event was delivered to us
|
407 |
|
|
first. And, that the parent was suspended by the OS until the
|
408 |
|
|
child's exec or exit events were received.
|
409 |
|
|
|
410 |
|
|
Upon receiving that child vfork, then, we were forced to remove
|
411 |
|
|
all breakpoints in the child and continue it so that it could
|
412 |
|
|
reach the exec or exit point.
|
413 |
|
|
|
414 |
|
|
But also recall that the parent and child of a vfork share the
|
415 |
|
|
same address space. Thus, removing bp's in the child also
|
416 |
|
|
removed them from the parent.
|
417 |
|
|
|
418 |
|
|
Now that the child has safely exec'd or exited, we must restore
|
419 |
|
|
the parent's breakpoints before we continue it. Else, we may
|
420 |
|
|
cause it run past expected stopping points. */
|
421 |
|
|
if (followed_parent)
|
422 |
|
|
{
|
423 |
|
|
reattach_breakpoints (parent_pid);
|
424 |
|
|
}
|
425 |
|
|
|
426 |
|
|
/* Are we a debugger that followed the child of a vfork? If so,
|
427 |
|
|
then recall that we don't actually acquire control of the child
|
428 |
|
|
until after it has exec'd or exited. */
|
429 |
|
|
if (followed_child)
|
430 |
|
|
{
|
431 |
|
|
/* If the child has exited, then there's nothing for us to do.
|
432 |
|
|
In the case of an exec event, we'll let that be handled by
|
433 |
|
|
the normal mechanism that notices and handles exec events, in
|
434 |
|
|
resume(). */
|
435 |
|
|
}
|
436 |
|
|
}
|
437 |
|
|
|
438 |
|
|
/* Format a process id, given PID. Be sure to terminate
|
439 |
|
|
this with a null--it's going to be printed via a "%s". */
|
440 |
|
|
char *
|
441 |
|
|
child_pid_to_str (pid)
|
442 |
|
|
pid_t pid;
|
443 |
|
|
{
|
444 |
|
|
/* Static because address returned */
|
445 |
|
|
static char buf[30];
|
446 |
|
|
|
447 |
|
|
/* Extra NULLs for paranoia's sake */
|
448 |
|
|
sprintf (buf, "process %d\0\0\0\0", pid);
|
449 |
|
|
|
450 |
|
|
return buf;
|
451 |
|
|
}
|
452 |
|
|
|
453 |
|
|
/* Format a thread id, given TID. Be sure to terminate
|
454 |
|
|
this with a null--it's going to be printed via a "%s".
|
455 |
|
|
|
456 |
|
|
Note: This is a core-gdb tid, not the actual system tid.
|
457 |
|
|
See infttrace.c for details. */
|
458 |
|
|
char *
|
459 |
|
|
hppa_tid_to_str (tid)
|
460 |
|
|
pid_t tid;
|
461 |
|
|
{
|
462 |
|
|
/* Static because address returned */
|
463 |
|
|
static char buf[30];
|
464 |
|
|
|
465 |
|
|
/* Extra NULLs for paranoia's sake */
|
466 |
|
|
sprintf (buf, "system thread %d\0\0\0\0", tid);
|
467 |
|
|
|
468 |
|
|
return buf;
|
469 |
|
|
}
|
470 |
|
|
|
471 |
|
|
#if !defined (GDB_NATIVE_HPUX_11)
|
472 |
|
|
|
473 |
|
|
/* The following code is a substitute for the infttrace.c versions used
|
474 |
|
|
with ttrace() in HPUX 11. */
|
475 |
|
|
|
476 |
|
|
/* This value is an arbitrary integer. */
|
477 |
|
|
#define PT_VERSION 123456
|
478 |
|
|
|
479 |
|
|
/* This semaphore is used to coordinate the child and parent processes
|
480 |
|
|
after a fork(), and before an exec() by the child. See
|
481 |
|
|
parent_attach_all for details. */
|
482 |
|
|
|
483 |
|
|
typedef struct
|
484 |
|
|
{
|
485 |
|
|
int parent_channel[2]; /* Parent "talks" to [1], child "listens" to [0] */
|
486 |
|
|
int child_channel[2]; /* Child "talks" to [1], parent "listens" to [0] */
|
487 |
|
|
}
|
488 |
|
|
startup_semaphore_t;
|
489 |
|
|
|
490 |
|
|
#define SEM_TALK (1)
|
491 |
|
|
#define SEM_LISTEN (0)
|
492 |
|
|
|
493 |
|
|
static startup_semaphore_t startup_semaphore;
|
494 |
|
|
|
495 |
|
|
extern int parent_attach_all PARAMS ((int, PTRACE_ARG3_TYPE, int));
|
496 |
|
|
|
497 |
|
|
#ifdef PT_SETTRC
|
498 |
|
|
/* This function causes the caller's process to be traced by its
|
499 |
|
|
parent. This is intended to be called after GDB forks itself,
|
500 |
|
|
and before the child execs the target.
|
501 |
|
|
|
502 |
|
|
Note that HP-UX ptrace is rather funky in how this is done.
|
503 |
|
|
If the parent wants to get the initial exec event of a child,
|
504 |
|
|
it must set the ptrace event mask of the child to include execs.
|
505 |
|
|
(The child cannot do this itself.) This must be done after the
|
506 |
|
|
child is forked, but before it execs.
|
507 |
|
|
|
508 |
|
|
To coordinate the parent and child, we implement a semaphore using
|
509 |
|
|
pipes. After SETTRC'ing itself, the child tells the parent that
|
510 |
|
|
it is now traceable by the parent, and waits for the parent's
|
511 |
|
|
acknowledgement. The parent can then set the child's event mask,
|
512 |
|
|
and notify the child that it can now exec.
|
513 |
|
|
|
514 |
|
|
(The acknowledgement by parent happens as a result of a call to
|
515 |
|
|
child_acknowledge_created_inferior.) */
|
516 |
|
|
|
517 |
|
|
int
|
518 |
|
|
parent_attach_all (pid, addr, data)
|
519 |
|
|
int pid;
|
520 |
|
|
PTRACE_ARG3_TYPE addr;
|
521 |
|
|
int data;
|
522 |
|
|
{
|
523 |
|
|
int pt_status = 0;
|
524 |
|
|
|
525 |
|
|
/* We need a memory home for a constant. */
|
526 |
|
|
int tc_magic_child = PT_VERSION;
|
527 |
|
|
int tc_magic_parent = 0;
|
528 |
|
|
|
529 |
|
|
/* The remainder of this function is only useful for HPUX 10.0 and
|
530 |
|
|
later, as it depends upon the ability to request notification
|
531 |
|
|
of specific kinds of events by the kernel. */
|
532 |
|
|
#if defined(PT_SET_EVENT_MASK)
|
533 |
|
|
|
534 |
|
|
/* Notify the parent that we're potentially ready to exec(). */
|
535 |
|
|
write (startup_semaphore.child_channel[SEM_TALK],
|
536 |
|
|
&tc_magic_child,
|
537 |
|
|
sizeof (tc_magic_child));
|
538 |
|
|
|
539 |
|
|
/* Wait for acknowledgement from the parent. */
|
540 |
|
|
read (startup_semaphore.parent_channel[SEM_LISTEN],
|
541 |
|
|
&tc_magic_parent,
|
542 |
|
|
sizeof (tc_magic_parent));
|
543 |
|
|
if (tc_magic_child != tc_magic_parent)
|
544 |
|
|
warning ("mismatched semaphore magic");
|
545 |
|
|
|
546 |
|
|
/* Discard our copy of the semaphore. */
|
547 |
|
|
(void) close (startup_semaphore.parent_channel[SEM_LISTEN]);
|
548 |
|
|
(void) close (startup_semaphore.parent_channel[SEM_TALK]);
|
549 |
|
|
(void) close (startup_semaphore.child_channel[SEM_LISTEN]);
|
550 |
|
|
(void) close (startup_semaphore.child_channel[SEM_TALK]);
|
551 |
|
|
#endif
|
552 |
|
|
|
553 |
|
|
return 0;
|
554 |
|
|
}
|
555 |
|
|
#endif
|
556 |
|
|
|
557 |
|
|
int
|
558 |
|
|
hppa_require_attach (pid)
|
559 |
|
|
int pid;
|
560 |
|
|
{
|
561 |
|
|
int pt_status;
|
562 |
|
|
CORE_ADDR pc;
|
563 |
|
|
CORE_ADDR pc_addr;
|
564 |
|
|
unsigned int regs_offset;
|
565 |
|
|
|
566 |
|
|
/* Are we already attached? There appears to be no explicit way to
|
567 |
|
|
answer this via ptrace, so we try something which should be
|
568 |
|
|
innocuous if we are attached. If that fails, then we assume
|
569 |
|
|
we're not attached, and so attempt to make it so. */
|
570 |
|
|
|
571 |
|
|
errno = 0;
|
572 |
|
|
regs_offset = U_REGS_OFFSET;
|
573 |
|
|
pc_addr = register_addr (PC_REGNUM, regs_offset);
|
574 |
|
|
pc = call_ptrace (PT_READ_U, pid, (PTRACE_ARG3_TYPE) pc_addr, 0);
|
575 |
|
|
|
576 |
|
|
if (errno)
|
577 |
|
|
{
|
578 |
|
|
errno = 0;
|
579 |
|
|
pt_status = call_ptrace (PT_ATTACH, pid, (PTRACE_ARG3_TYPE) 0, 0);
|
580 |
|
|
|
581 |
|
|
if (errno)
|
582 |
|
|
return -1;
|
583 |
|
|
|
584 |
|
|
/* Now we really are attached. */
|
585 |
|
|
errno = 0;
|
586 |
|
|
}
|
587 |
|
|
attach_flag = 1;
|
588 |
|
|
return pid;
|
589 |
|
|
}
|
590 |
|
|
|
591 |
|
|
int
|
592 |
|
|
hppa_require_detach (pid, signal)
|
593 |
|
|
int pid;
|
594 |
|
|
int signal;
|
595 |
|
|
{
|
596 |
|
|
errno = 0;
|
597 |
|
|
call_ptrace (PT_DETACH, pid, (PTRACE_ARG3_TYPE) 1, signal);
|
598 |
|
|
errno = 0; /* Ignore any errors. */
|
599 |
|
|
return pid;
|
600 |
|
|
}
|
601 |
|
|
|
602 |
|
|
/* Since ptrace doesn't support memory page-protection events, which
|
603 |
|
|
are used to implement "hardware" watchpoints on HP-UX, these are
|
604 |
|
|
dummy versions, which perform no useful work. */
|
605 |
|
|
|
606 |
|
|
void
|
607 |
|
|
hppa_enable_page_protection_events (pid)
|
608 |
|
|
int pid;
|
609 |
|
|
{
|
610 |
|
|
}
|
611 |
|
|
|
612 |
|
|
void
|
613 |
|
|
hppa_disable_page_protection_events (pid)
|
614 |
|
|
int pid;
|
615 |
|
|
{
|
616 |
|
|
}
|
617 |
|
|
|
618 |
|
|
int
|
619 |
|
|
hppa_insert_hw_watchpoint (pid, start, len, type)
|
620 |
|
|
int pid;
|
621 |
|
|
CORE_ADDR start;
|
622 |
|
|
LONGEST len;
|
623 |
|
|
int type;
|
624 |
|
|
{
|
625 |
|
|
error ("Hardware watchpoints not implemented on this platform.");
|
626 |
|
|
}
|
627 |
|
|
|
628 |
|
|
int
|
629 |
|
|
hppa_remove_hw_watchpoint (pid, start, len, type)
|
630 |
|
|
int pid;
|
631 |
|
|
CORE_ADDR start;
|
632 |
|
|
LONGEST len;
|
633 |
|
|
enum bptype type;
|
634 |
|
|
{
|
635 |
|
|
error ("Hardware watchpoints not implemented on this platform.");
|
636 |
|
|
}
|
637 |
|
|
|
638 |
|
|
int
|
639 |
|
|
hppa_can_use_hw_watchpoint (type, cnt, ot)
|
640 |
|
|
enum bptype type;
|
641 |
|
|
int cnt;
|
642 |
|
|
enum bptype ot;
|
643 |
|
|
{
|
644 |
|
|
return 0;
|
645 |
|
|
}
|
646 |
|
|
|
647 |
|
|
int
|
648 |
|
|
hppa_range_profitable_for_hw_watchpoint (pid, start, len)
|
649 |
|
|
int pid;
|
650 |
|
|
CORE_ADDR start;
|
651 |
|
|
LONGEST len;
|
652 |
|
|
{
|
653 |
|
|
error ("Hardware watchpoints not implemented on this platform.");
|
654 |
|
|
}
|
655 |
|
|
|
656 |
|
|
char *
|
657 |
|
|
hppa_pid_or_tid_to_str (id)
|
658 |
|
|
pid_t id;
|
659 |
|
|
{
|
660 |
|
|
/* In the ptrace world, there are only processes. */
|
661 |
|
|
return child_pid_to_str (id);
|
662 |
|
|
}
|
663 |
|
|
|
664 |
|
|
/* This function has no meaning in a non-threaded world. Thus, we
|
665 |
|
|
return 0 (FALSE). See the use of "hppa_prepare_to_proceed" in
|
666 |
|
|
hppa-tdep.c. */
|
667 |
|
|
|
668 |
|
|
pid_t
|
669 |
|
|
hppa_switched_threads (pid)
|
670 |
|
|
pid_t pid;
|
671 |
|
|
{
|
672 |
|
|
return (pid_t) 0;
|
673 |
|
|
}
|
674 |
|
|
|
675 |
|
|
void
|
676 |
|
|
hppa_ensure_vforking_parent_remains_stopped (pid)
|
677 |
|
|
int pid;
|
678 |
|
|
{
|
679 |
|
|
/* This assumes that the vforked parent is presently stopped, and
|
680 |
|
|
that the vforked child has just delivered its first exec event.
|
681 |
|
|
Calling kill() this way will cause the SIGTRAP to be delivered as
|
682 |
|
|
soon as the parent is resumed, which happens as soon as the
|
683 |
|
|
vforked child is resumed. See wait_for_inferior for the use of
|
684 |
|
|
this function. */
|
685 |
|
|
kill (pid, SIGTRAP);
|
686 |
|
|
}
|
687 |
|
|
|
688 |
|
|
int
|
689 |
|
|
hppa_resume_execd_vforking_child_to_get_parent_vfork ()
|
690 |
|
|
{
|
691 |
|
|
return 1; /* Yes, the child must be resumed. */
|
692 |
|
|
}
|
693 |
|
|
|
694 |
|
|
void
|
695 |
|
|
require_notification_of_events (pid)
|
696 |
|
|
int pid;
|
697 |
|
|
{
|
698 |
|
|
#if defined(PT_SET_EVENT_MASK)
|
699 |
|
|
int pt_status;
|
700 |
|
|
ptrace_event_t ptrace_events;
|
701 |
|
|
int nsigs;
|
702 |
|
|
int signum;
|
703 |
|
|
|
704 |
|
|
/* Instruct the kernel as to the set of events we wish to be
|
705 |
|
|
informed of. (This support does not exist before HPUX 10.0.
|
706 |
|
|
We'll assume if PT_SET_EVENT_MASK has not been defined by
|
707 |
|
|
<sys/ptrace.h>, then we're being built on pre-10.0.) */
|
708 |
|
|
memset (&ptrace_events, 0, sizeof (ptrace_events));
|
709 |
|
|
|
710 |
|
|
/* Note: By default, all signals are visible to us. If we wish
|
711 |
|
|
the kernel to keep certain signals hidden from us, we do it
|
712 |
|
|
by calling sigdelset (ptrace_events.pe_signals, signal) for
|
713 |
|
|
each such signal here, before doing PT_SET_EVENT_MASK. */
|
714 |
|
|
/* RM: The above comment is no longer true. We start with ignoring
|
715 |
|
|
all signals, and then add the ones we are interested in. We could
|
716 |
|
|
do it the other way: start by looking at all signals and then
|
717 |
|
|
deleting the ones that we aren't interested in, except that
|
718 |
|
|
multiple gdb signals may be mapped to the same host signal
|
719 |
|
|
(eg. TARGET_SIGNAL_IO and TARGET_SIGNAL_POLL both get mapped to
|
720 |
|
|
signal 22 on HPUX 10.20) We want to be notified if we are
|
721 |
|
|
interested in either signal. */
|
722 |
|
|
sigfillset (&ptrace_events.pe_signals);
|
723 |
|
|
|
724 |
|
|
/* RM: Let's not bother with signals we don't care about */
|
725 |
|
|
nsigs = (int) TARGET_SIGNAL_LAST;
|
726 |
|
|
for (signum = nsigs; signum > 0; signum--)
|
727 |
|
|
{
|
728 |
|
|
if ((signal_stop_state (signum)) ||
|
729 |
|
|
(signal_print_state (signum)) ||
|
730 |
|
|
(!signal_pass_state (signum)))
|
731 |
|
|
{
|
732 |
|
|
if (target_signal_to_host_p (signum))
|
733 |
|
|
sigdelset (&ptrace_events.pe_signals,
|
734 |
|
|
target_signal_to_host (signum));
|
735 |
|
|
}
|
736 |
|
|
}
|
737 |
|
|
|
738 |
|
|
ptrace_events.pe_set_event = 0;
|
739 |
|
|
|
740 |
|
|
ptrace_events.pe_set_event |= PTRACE_SIGNAL;
|
741 |
|
|
ptrace_events.pe_set_event |= PTRACE_EXEC;
|
742 |
|
|
ptrace_events.pe_set_event |= PTRACE_FORK;
|
743 |
|
|
ptrace_events.pe_set_event |= PTRACE_VFORK;
|
744 |
|
|
/* ??rehrauer: Add this one when we're prepared to catch it...
|
745 |
|
|
ptrace_events.pe_set_event |= PTRACE_EXIT;
|
746 |
|
|
*/
|
747 |
|
|
|
748 |
|
|
errno = 0;
|
749 |
|
|
pt_status = call_ptrace (PT_SET_EVENT_MASK,
|
750 |
|
|
pid,
|
751 |
|
|
(PTRACE_ARG3_TYPE) & ptrace_events,
|
752 |
|
|
sizeof (ptrace_events));
|
753 |
|
|
if (errno)
|
754 |
|
|
perror_with_name ("ptrace");
|
755 |
|
|
if (pt_status < 0)
|
756 |
|
|
return;
|
757 |
|
|
#endif
|
758 |
|
|
}
|
759 |
|
|
|
760 |
|
|
void
|
761 |
|
|
require_notification_of_exec_events (pid)
|
762 |
|
|
int pid;
|
763 |
|
|
{
|
764 |
|
|
#if defined(PT_SET_EVENT_MASK)
|
765 |
|
|
int pt_status;
|
766 |
|
|
ptrace_event_t ptrace_events;
|
767 |
|
|
|
768 |
|
|
/* Instruct the kernel as to the set of events we wish to be
|
769 |
|
|
informed of. (This support does not exist before HPUX 10.0.
|
770 |
|
|
We'll assume if PT_SET_EVENT_MASK has not been defined by
|
771 |
|
|
<sys/ptrace.h>, then we're being built on pre-10.0.) */
|
772 |
|
|
memset (&ptrace_events, 0, sizeof (ptrace_events));
|
773 |
|
|
|
774 |
|
|
/* Note: By default, all signals are visible to us. If we wish
|
775 |
|
|
the kernel to keep certain signals hidden from us, we do it
|
776 |
|
|
by calling sigdelset (ptrace_events.pe_signals, signal) for
|
777 |
|
|
each such signal here, before doing PT_SET_EVENT_MASK. */
|
778 |
|
|
sigemptyset (&ptrace_events.pe_signals);
|
779 |
|
|
|
780 |
|
|
ptrace_events.pe_set_event = 0;
|
781 |
|
|
|
782 |
|
|
ptrace_events.pe_set_event |= PTRACE_EXEC;
|
783 |
|
|
/* ??rehrauer: Add this one when we're prepared to catch it...
|
784 |
|
|
ptrace_events.pe_set_event |= PTRACE_EXIT;
|
785 |
|
|
*/
|
786 |
|
|
|
787 |
|
|
errno = 0;
|
788 |
|
|
pt_status = call_ptrace (PT_SET_EVENT_MASK,
|
789 |
|
|
pid,
|
790 |
|
|
(PTRACE_ARG3_TYPE) & ptrace_events,
|
791 |
|
|
sizeof (ptrace_events));
|
792 |
|
|
if (errno)
|
793 |
|
|
perror_with_name ("ptrace");
|
794 |
|
|
if (pt_status < 0)
|
795 |
|
|
return;
|
796 |
|
|
#endif
|
797 |
|
|
}
|
798 |
|
|
|
799 |
|
|
/* This function is called by the parent process, with pid being the
|
800 |
|
|
ID of the child process, after the debugger has forked. */
|
801 |
|
|
|
802 |
|
|
void
|
803 |
|
|
child_acknowledge_created_inferior (pid)
|
804 |
|
|
int pid;
|
805 |
|
|
{
|
806 |
|
|
/* We need a memory home for a constant. */
|
807 |
|
|
int tc_magic_parent = PT_VERSION;
|
808 |
|
|
int tc_magic_child = 0;
|
809 |
|
|
|
810 |
|
|
/* The remainder of this function is only useful for HPUX 10.0 and
|
811 |
|
|
later, as it depends upon the ability to request notification
|
812 |
|
|
of specific kinds of events by the kernel. */
|
813 |
|
|
#if defined(PT_SET_EVENT_MASK)
|
814 |
|
|
/* Wait for the child to tell us that it has forked. */
|
815 |
|
|
read (startup_semaphore.child_channel[SEM_LISTEN],
|
816 |
|
|
&tc_magic_child,
|
817 |
|
|
sizeof (tc_magic_child));
|
818 |
|
|
|
819 |
|
|
/* Notify the child that it can exec.
|
820 |
|
|
|
821 |
|
|
In the infttrace.c variant of this function, we set the child's
|
822 |
|
|
event mask after the fork but before the exec. In the ptrace
|
823 |
|
|
world, it seems we can't set the event mask until after the exec. */
|
824 |
|
|
write (startup_semaphore.parent_channel[SEM_TALK],
|
825 |
|
|
&tc_magic_parent,
|
826 |
|
|
sizeof (tc_magic_parent));
|
827 |
|
|
|
828 |
|
|
/* We'd better pause a bit before trying to set the event mask,
|
829 |
|
|
though, to ensure that the exec has happened. We don't want to
|
830 |
|
|
wait() on the child, because that'll screw up the upper layers
|
831 |
|
|
of gdb's execution control that expect to see the exec event.
|
832 |
|
|
|
833 |
|
|
After an exec, the child is no longer executing gdb code. Hence,
|
834 |
|
|
we can't have yet another synchronization via the pipes. We'll
|
835 |
|
|
just sleep for a second, and hope that's enough delay... */
|
836 |
|
|
sleep (1);
|
837 |
|
|
|
838 |
|
|
/* Instruct the kernel as to the set of events we wish to be
|
839 |
|
|
informed of. */
|
840 |
|
|
require_notification_of_exec_events (pid);
|
841 |
|
|
|
842 |
|
|
/* Discard our copy of the semaphore. */
|
843 |
|
|
(void) close (startup_semaphore.parent_channel[SEM_LISTEN]);
|
844 |
|
|
(void) close (startup_semaphore.parent_channel[SEM_TALK]);
|
845 |
|
|
(void) close (startup_semaphore.child_channel[SEM_LISTEN]);
|
846 |
|
|
(void) close (startup_semaphore.child_channel[SEM_TALK]);
|
847 |
|
|
#endif
|
848 |
|
|
}
|
849 |
|
|
|
850 |
|
|
void
|
851 |
|
|
child_post_startup_inferior (pid)
|
852 |
|
|
int pid;
|
853 |
|
|
{
|
854 |
|
|
require_notification_of_events (pid);
|
855 |
|
|
}
|
856 |
|
|
|
857 |
|
|
void
|
858 |
|
|
child_post_attach (pid)
|
859 |
|
|
int pid;
|
860 |
|
|
{
|
861 |
|
|
require_notification_of_events (pid);
|
862 |
|
|
}
|
863 |
|
|
|
864 |
|
|
int
|
865 |
|
|
child_insert_fork_catchpoint (pid)
|
866 |
|
|
int pid;
|
867 |
|
|
{
|
868 |
|
|
/* This request is only available on HPUX 10.0 and later. */
|
869 |
|
|
#if !defined(PT_SET_EVENT_MASK)
|
870 |
|
|
error ("Unable to catch forks prior to HPUX 10.0");
|
871 |
|
|
#else
|
872 |
|
|
/* Enable reporting of fork events from the kernel. */
|
873 |
|
|
/* ??rehrauer: For the moment, we're always enabling these events,
|
874 |
|
|
and just ignoring them if there's no catchpoint to catch them. */
|
875 |
|
|
return 0;
|
876 |
|
|
#endif
|
877 |
|
|
}
|
878 |
|
|
|
879 |
|
|
int
|
880 |
|
|
child_remove_fork_catchpoint (pid)
|
881 |
|
|
int pid;
|
882 |
|
|
{
|
883 |
|
|
/* This request is only available on HPUX 10.0 and later. */
|
884 |
|
|
#if !defined(PT_SET_EVENT_MASK)
|
885 |
|
|
error ("Unable to catch forks prior to HPUX 10.0");
|
886 |
|
|
#else
|
887 |
|
|
/* Disable reporting of fork events from the kernel. */
|
888 |
|
|
/* ??rehrauer: For the moment, we're always enabling these events,
|
889 |
|
|
and just ignoring them if there's no catchpoint to catch them. */
|
890 |
|
|
return 0;
|
891 |
|
|
#endif
|
892 |
|
|
}
|
893 |
|
|
|
894 |
|
|
int
|
895 |
|
|
child_insert_vfork_catchpoint (pid)
|
896 |
|
|
int pid;
|
897 |
|
|
{
|
898 |
|
|
/* This request is only available on HPUX 10.0 and later. */
|
899 |
|
|
#if !defined(PT_SET_EVENT_MASK)
|
900 |
|
|
error ("Unable to catch vforks prior to HPUX 10.0");
|
901 |
|
|
#else
|
902 |
|
|
/* Enable reporting of vfork events from the kernel. */
|
903 |
|
|
/* ??rehrauer: For the moment, we're always enabling these events,
|
904 |
|
|
and just ignoring them if there's no catchpoint to catch them. */
|
905 |
|
|
return 0;
|
906 |
|
|
#endif
|
907 |
|
|
}
|
908 |
|
|
|
909 |
|
|
int
|
910 |
|
|
child_remove_vfork_catchpoint (pid)
|
911 |
|
|
int pid;
|
912 |
|
|
{
|
913 |
|
|
/* This request is only available on HPUX 10.0 and later. */
|
914 |
|
|
#if !defined(PT_SET_EVENT_MASK)
|
915 |
|
|
error ("Unable to catch vforks prior to HPUX 10.0");
|
916 |
|
|
#else
|
917 |
|
|
/* Disable reporting of vfork events from the kernel. */
|
918 |
|
|
/* ??rehrauer: For the moment, we're always enabling these events,
|
919 |
|
|
and just ignoring them if there's no catchpoint to catch them. */
|
920 |
|
|
return 0;
|
921 |
|
|
#endif
|
922 |
|
|
}
|
923 |
|
|
|
924 |
|
|
int
|
925 |
|
|
child_has_forked (pid, childpid)
|
926 |
|
|
int pid;
|
927 |
|
|
int *childpid;
|
928 |
|
|
{
|
929 |
|
|
/* This request is only available on HPUX 10.0 and later. */
|
930 |
|
|
#if !defined(PT_GET_PROCESS_STATE)
|
931 |
|
|
*childpid = 0;
|
932 |
|
|
return 0;
|
933 |
|
|
#else
|
934 |
|
|
int pt_status;
|
935 |
|
|
ptrace_state_t ptrace_state;
|
936 |
|
|
|
937 |
|
|
errno = 0;
|
938 |
|
|
pt_status = call_ptrace (PT_GET_PROCESS_STATE,
|
939 |
|
|
pid,
|
940 |
|
|
(PTRACE_ARG3_TYPE) & ptrace_state,
|
941 |
|
|
sizeof (ptrace_state));
|
942 |
|
|
if (errno)
|
943 |
|
|
perror_with_name ("ptrace");
|
944 |
|
|
if (pt_status < 0)
|
945 |
|
|
return 0;
|
946 |
|
|
|
947 |
|
|
if (ptrace_state.pe_report_event & PTRACE_FORK)
|
948 |
|
|
{
|
949 |
|
|
*childpid = ptrace_state.pe_other_pid;
|
950 |
|
|
return 1;
|
951 |
|
|
}
|
952 |
|
|
|
953 |
|
|
return 0;
|
954 |
|
|
#endif
|
955 |
|
|
}
|
956 |
|
|
|
957 |
|
|
int
|
958 |
|
|
child_has_vforked (pid, childpid)
|
959 |
|
|
int pid;
|
960 |
|
|
int *childpid;
|
961 |
|
|
{
|
962 |
|
|
/* This request is only available on HPUX 10.0 and later. */
|
963 |
|
|
#if !defined(PT_GET_PROCESS_STATE)
|
964 |
|
|
*childpid = 0;
|
965 |
|
|
return 0;
|
966 |
|
|
|
967 |
|
|
#else
|
968 |
|
|
int pt_status;
|
969 |
|
|
ptrace_state_t ptrace_state;
|
970 |
|
|
|
971 |
|
|
errno = 0;
|
972 |
|
|
pt_status = call_ptrace (PT_GET_PROCESS_STATE,
|
973 |
|
|
pid,
|
974 |
|
|
(PTRACE_ARG3_TYPE) & ptrace_state,
|
975 |
|
|
sizeof (ptrace_state));
|
976 |
|
|
if (errno)
|
977 |
|
|
perror_with_name ("ptrace");
|
978 |
|
|
if (pt_status < 0)
|
979 |
|
|
return 0;
|
980 |
|
|
|
981 |
|
|
if (ptrace_state.pe_report_event & PTRACE_VFORK)
|
982 |
|
|
{
|
983 |
|
|
*childpid = ptrace_state.pe_other_pid;
|
984 |
|
|
return 1;
|
985 |
|
|
}
|
986 |
|
|
|
987 |
|
|
return 0;
|
988 |
|
|
#endif
|
989 |
|
|
}
|
990 |
|
|
|
991 |
|
|
int
|
992 |
|
|
child_can_follow_vfork_prior_to_exec ()
|
993 |
|
|
{
|
994 |
|
|
/* ptrace doesn't allow this. */
|
995 |
|
|
return 0;
|
996 |
|
|
}
|
997 |
|
|
|
998 |
|
|
int
|
999 |
|
|
child_insert_exec_catchpoint (pid)
|
1000 |
|
|
int pid;
|
1001 |
|
|
{
|
1002 |
|
|
/* This request is only available on HPUX 10.0 and later. */
|
1003 |
|
|
#if !defined(PT_SET_EVENT_MASK)
|
1004 |
|
|
error ("Unable to catch execs prior to HPUX 10.0");
|
1005 |
|
|
|
1006 |
|
|
#else
|
1007 |
|
|
/* Enable reporting of exec events from the kernel. */
|
1008 |
|
|
/* ??rehrauer: For the moment, we're always enabling these events,
|
1009 |
|
|
and just ignoring them if there's no catchpoint to catch them. */
|
1010 |
|
|
return 0;
|
1011 |
|
|
#endif
|
1012 |
|
|
}
|
1013 |
|
|
|
1014 |
|
|
int
|
1015 |
|
|
child_remove_exec_catchpoint (pid)
|
1016 |
|
|
int pid;
|
1017 |
|
|
{
|
1018 |
|
|
/* This request is only available on HPUX 10.0 and later. */
|
1019 |
|
|
#if !defined(PT_SET_EVENT_MASK)
|
1020 |
|
|
error ("Unable to catch execs prior to HPUX 10.0");
|
1021 |
|
|
|
1022 |
|
|
#else
|
1023 |
|
|
/* Disable reporting of exec events from the kernel. */
|
1024 |
|
|
/* ??rehrauer: For the moment, we're always enabling these events,
|
1025 |
|
|
and just ignoring them if there's no catchpoint to catch them. */
|
1026 |
|
|
return 0;
|
1027 |
|
|
#endif
|
1028 |
|
|
}
|
1029 |
|
|
|
1030 |
|
|
int
|
1031 |
|
|
child_has_execd (pid, execd_pathname)
|
1032 |
|
|
int pid;
|
1033 |
|
|
char **execd_pathname;
|
1034 |
|
|
{
|
1035 |
|
|
/* This request is only available on HPUX 10.0 and later. */
|
1036 |
|
|
#if !defined(PT_GET_PROCESS_STATE)
|
1037 |
|
|
*execd_pathname = NULL;
|
1038 |
|
|
return 0;
|
1039 |
|
|
|
1040 |
|
|
#else
|
1041 |
|
|
int pt_status;
|
1042 |
|
|
ptrace_state_t ptrace_state;
|
1043 |
|
|
|
1044 |
|
|
errno = 0;
|
1045 |
|
|
pt_status = call_ptrace (PT_GET_PROCESS_STATE,
|
1046 |
|
|
pid,
|
1047 |
|
|
(PTRACE_ARG3_TYPE) & ptrace_state,
|
1048 |
|
|
sizeof (ptrace_state));
|
1049 |
|
|
if (errno)
|
1050 |
|
|
perror_with_name ("ptrace");
|
1051 |
|
|
if (pt_status < 0)
|
1052 |
|
|
return 0;
|
1053 |
|
|
|
1054 |
|
|
if (ptrace_state.pe_report_event & PTRACE_EXEC)
|
1055 |
|
|
{
|
1056 |
|
|
char *exec_file = target_pid_to_exec_file (pid);
|
1057 |
|
|
*execd_pathname = savestring (exec_file, strlen (exec_file));
|
1058 |
|
|
return 1;
|
1059 |
|
|
}
|
1060 |
|
|
|
1061 |
|
|
return 0;
|
1062 |
|
|
#endif
|
1063 |
|
|
}
|
1064 |
|
|
|
1065 |
|
|
int
|
1066 |
|
|
child_reported_exec_events_per_exec_call ()
|
1067 |
|
|
{
|
1068 |
|
|
return 2; /* ptrace reports the event twice per call. */
|
1069 |
|
|
}
|
1070 |
|
|
|
1071 |
|
|
int
|
1072 |
|
|
child_has_syscall_event (pid, kind, syscall_id)
|
1073 |
|
|
int pid;
|
1074 |
|
|
enum target_waitkind *kind;
|
1075 |
|
|
int *syscall_id;
|
1076 |
|
|
{
|
1077 |
|
|
/* This request is only available on HPUX 10.30 and later, via
|
1078 |
|
|
the ttrace interface. */
|
1079 |
|
|
|
1080 |
|
|
*kind = TARGET_WAITKIND_SPURIOUS;
|
1081 |
|
|
*syscall_id = -1;
|
1082 |
|
|
return 0;
|
1083 |
|
|
}
|
1084 |
|
|
|
1085 |
|
|
char *
|
1086 |
|
|
child_pid_to_exec_file (pid)
|
1087 |
|
|
int pid;
|
1088 |
|
|
{
|
1089 |
|
|
static char exec_file_buffer[1024];
|
1090 |
|
|
int pt_status;
|
1091 |
|
|
CORE_ADDR top_of_stack;
|
1092 |
|
|
char four_chars[4];
|
1093 |
|
|
int name_index;
|
1094 |
|
|
int i;
|
1095 |
|
|
int saved_inferior_pid;
|
1096 |
|
|
boolean done;
|
1097 |
|
|
|
1098 |
|
|
#ifdef PT_GET_PROCESS_PATHNAME
|
1099 |
|
|
/* As of 10.x HP-UX, there's an explicit request to get the pathname. */
|
1100 |
|
|
pt_status = call_ptrace (PT_GET_PROCESS_PATHNAME,
|
1101 |
|
|
pid,
|
1102 |
|
|
(PTRACE_ARG3_TYPE) exec_file_buffer,
|
1103 |
|
|
sizeof (exec_file_buffer) - 1);
|
1104 |
|
|
if (pt_status == 0)
|
1105 |
|
|
return exec_file_buffer;
|
1106 |
|
|
#endif
|
1107 |
|
|
|
1108 |
|
|
/* It appears that this request is broken prior to 10.30.
|
1109 |
|
|
If it fails, try a really, truly amazingly gross hack
|
1110 |
|
|
that DDE uses, of pawing through the process' data
|
1111 |
|
|
segment to find the pathname. */
|
1112 |
|
|
|
1113 |
|
|
top_of_stack = 0x7b03a000;
|
1114 |
|
|
name_index = 0;
|
1115 |
|
|
done = 0;
|
1116 |
|
|
|
1117 |
|
|
/* On the chance that pid != inferior_pid, set inferior_pid
|
1118 |
|
|
to pid, so that (grrrr!) implicit uses of inferior_pid get
|
1119 |
|
|
the right id. */
|
1120 |
|
|
|
1121 |
|
|
saved_inferior_pid = inferior_pid;
|
1122 |
|
|
inferior_pid = pid;
|
1123 |
|
|
|
1124 |
|
|
/* Try to grab a null-terminated string. */
|
1125 |
|
|
while (!done)
|
1126 |
|
|
{
|
1127 |
|
|
if (target_read_memory (top_of_stack, four_chars, 4) != 0)
|
1128 |
|
|
{
|
1129 |
|
|
inferior_pid = saved_inferior_pid;
|
1130 |
|
|
return NULL;
|
1131 |
|
|
}
|
1132 |
|
|
for (i = 0; i < 4; i++)
|
1133 |
|
|
{
|
1134 |
|
|
exec_file_buffer[name_index++] = four_chars[i];
|
1135 |
|
|
done = (four_chars[i] == '\0');
|
1136 |
|
|
if (done)
|
1137 |
|
|
break;
|
1138 |
|
|
}
|
1139 |
|
|
top_of_stack += 4;
|
1140 |
|
|
}
|
1141 |
|
|
|
1142 |
|
|
if (exec_file_buffer[0] == '\0')
|
1143 |
|
|
{
|
1144 |
|
|
inferior_pid = saved_inferior_pid;
|
1145 |
|
|
return NULL;
|
1146 |
|
|
}
|
1147 |
|
|
|
1148 |
|
|
inferior_pid = saved_inferior_pid;
|
1149 |
|
|
return exec_file_buffer;
|
1150 |
|
|
}
|
1151 |
|
|
|
1152 |
|
|
void
|
1153 |
|
|
pre_fork_inferior ()
|
1154 |
|
|
{
|
1155 |
|
|
int status;
|
1156 |
|
|
|
1157 |
|
|
status = pipe (startup_semaphore.parent_channel);
|
1158 |
|
|
if (status < 0)
|
1159 |
|
|
{
|
1160 |
|
|
warning ("error getting parent pipe for startup semaphore");
|
1161 |
|
|
return;
|
1162 |
|
|
}
|
1163 |
|
|
|
1164 |
|
|
status = pipe (startup_semaphore.child_channel);
|
1165 |
|
|
if (status < 0)
|
1166 |
|
|
{
|
1167 |
|
|
warning ("error getting child pipe for startup semaphore");
|
1168 |
|
|
return;
|
1169 |
|
|
}
|
1170 |
|
|
}
|
1171 |
|
|
|
1172 |
|
|
|
1173 |
|
|
/* Check to see if the given thread is alive.
|
1174 |
|
|
|
1175 |
|
|
This is a no-op, as ptrace doesn't support threads, so we just
|
1176 |
|
|
return "TRUE". */
|
1177 |
|
|
|
1178 |
|
|
int
|
1179 |
|
|
child_thread_alive (pid)
|
1180 |
|
|
int pid;
|
1181 |
|
|
{
|
1182 |
|
|
return 1;
|
1183 |
|
|
}
|
1184 |
|
|
|
1185 |
|
|
#endif /* ! GDB_NATIVE_HPUX_11 */
|