1 |
330 |
jeremybenn |
/* Target-dependent code for HP-UX on PA-RISC.
|
2 |
|
|
|
3 |
|
|
Copyright (C) 2002, 2003, 2004, 2005, 2007, 2008, 2009, 2010
|
4 |
|
|
Free Software Foundation, Inc.
|
5 |
|
|
|
6 |
|
|
This file is part of GDB.
|
7 |
|
|
|
8 |
|
|
This program is free software; you can redistribute it and/or modify
|
9 |
|
|
it under the terms of the GNU General Public License as published by
|
10 |
|
|
the Free Software Foundation; either version 3 of the License, or
|
11 |
|
|
(at your option) any later version.
|
12 |
|
|
|
13 |
|
|
This program is distributed in the hope that it will be useful,
|
14 |
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
15 |
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
16 |
|
|
GNU General Public License for more details.
|
17 |
|
|
|
18 |
|
|
You should have received a copy of the GNU General Public License
|
19 |
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>. */
|
20 |
|
|
|
21 |
|
|
#include "defs.h"
|
22 |
|
|
#include "arch-utils.h"
|
23 |
|
|
#include "gdbcore.h"
|
24 |
|
|
#include "osabi.h"
|
25 |
|
|
#include "frame.h"
|
26 |
|
|
#include "frame-unwind.h"
|
27 |
|
|
#include "trad-frame.h"
|
28 |
|
|
#include "symtab.h"
|
29 |
|
|
#include "objfiles.h"
|
30 |
|
|
#include "inferior.h"
|
31 |
|
|
#include "infcall.h"
|
32 |
|
|
#include "observer.h"
|
33 |
|
|
#include "hppa-tdep.h"
|
34 |
|
|
#include "solib-som.h"
|
35 |
|
|
#include "solib-pa64.h"
|
36 |
|
|
#include "regset.h"
|
37 |
|
|
#include "regcache.h"
|
38 |
|
|
#include "exceptions.h"
|
39 |
|
|
|
40 |
|
|
#include "gdb_string.h"
|
41 |
|
|
|
42 |
|
|
#define IS_32BIT_TARGET(_gdbarch) \
|
43 |
|
|
((gdbarch_tdep (_gdbarch))->bytes_per_address == 4)
|
44 |
|
|
|
45 |
|
|
/* Bit in the `ss_flag' member of `struct save_state' that indicates
|
46 |
|
|
that the 64-bit register values are live. From
|
47 |
|
|
<machine/save_state.h>. */
|
48 |
|
|
#define HPPA_HPUX_SS_WIDEREGS 0x40
|
49 |
|
|
|
50 |
|
|
/* Offsets of various parts of `struct save_state'. From
|
51 |
|
|
<machine/save_state.h>. */
|
52 |
|
|
#define HPPA_HPUX_SS_FLAGS_OFFSET 0
|
53 |
|
|
#define HPPA_HPUX_SS_NARROW_OFFSET 4
|
54 |
|
|
#define HPPA_HPUX_SS_FPBLOCK_OFFSET 256
|
55 |
|
|
#define HPPA_HPUX_SS_WIDE_OFFSET 640
|
56 |
|
|
|
57 |
|
|
/* The size of `struct save_state. */
|
58 |
|
|
#define HPPA_HPUX_SAVE_STATE_SIZE 1152
|
59 |
|
|
|
60 |
|
|
/* The size of `struct pa89_save_state', which corresponds to PA-RISC
|
61 |
|
|
1.1, the lowest common denominator that we support. */
|
62 |
|
|
#define HPPA_HPUX_PA89_SAVE_STATE_SIZE 512
|
63 |
|
|
|
64 |
|
|
|
65 |
|
|
/* Forward declarations. */
|
66 |
|
|
extern void _initialize_hppa_hpux_tdep (void);
|
67 |
|
|
extern initialize_file_ftype _initialize_hppa_hpux_tdep;
|
68 |
|
|
|
69 |
|
|
static int
|
70 |
|
|
in_opd_section (CORE_ADDR pc)
|
71 |
|
|
{
|
72 |
|
|
struct obj_section *s;
|
73 |
|
|
int retval = 0;
|
74 |
|
|
|
75 |
|
|
s = find_pc_section (pc);
|
76 |
|
|
|
77 |
|
|
retval = (s != NULL
|
78 |
|
|
&& s->the_bfd_section->name != NULL
|
79 |
|
|
&& strcmp (s->the_bfd_section->name, ".opd") == 0);
|
80 |
|
|
return (retval);
|
81 |
|
|
}
|
82 |
|
|
|
83 |
|
|
/* Return one if PC is in the call path of a trampoline, else return zero.
|
84 |
|
|
|
85 |
|
|
Note we return one for *any* call trampoline (long-call, arg-reloc), not
|
86 |
|
|
just shared library trampolines (import, export). */
|
87 |
|
|
|
88 |
|
|
static int
|
89 |
|
|
hppa32_hpux_in_solib_call_trampoline (struct gdbarch *gdbarch,
|
90 |
|
|
CORE_ADDR pc, char *name)
|
91 |
|
|
{
|
92 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
93 |
|
|
struct minimal_symbol *minsym;
|
94 |
|
|
struct unwind_table_entry *u;
|
95 |
|
|
|
96 |
|
|
/* First see if PC is in one of the two C-library trampolines. */
|
97 |
|
|
if (pc == hppa_symbol_address("$$dyncall")
|
98 |
|
|
|| pc == hppa_symbol_address("_sr4export"))
|
99 |
|
|
return 1;
|
100 |
|
|
|
101 |
|
|
minsym = lookup_minimal_symbol_by_pc (pc);
|
102 |
|
|
if (minsym && strcmp (SYMBOL_LINKAGE_NAME (minsym), ".stub") == 0)
|
103 |
|
|
return 1;
|
104 |
|
|
|
105 |
|
|
/* Get the unwind descriptor corresponding to PC, return zero
|
106 |
|
|
if no unwind was found. */
|
107 |
|
|
u = find_unwind_entry (pc);
|
108 |
|
|
if (!u)
|
109 |
|
|
return 0;
|
110 |
|
|
|
111 |
|
|
/* If this isn't a linker stub, then return now. */
|
112 |
|
|
if (u->stub_unwind.stub_type == 0)
|
113 |
|
|
return 0;
|
114 |
|
|
|
115 |
|
|
/* By definition a long-branch stub is a call stub. */
|
116 |
|
|
if (u->stub_unwind.stub_type == LONG_BRANCH)
|
117 |
|
|
return 1;
|
118 |
|
|
|
119 |
|
|
/* The call and return path execute the same instructions within
|
120 |
|
|
an IMPORT stub! So an IMPORT stub is both a call and return
|
121 |
|
|
trampoline. */
|
122 |
|
|
if (u->stub_unwind.stub_type == IMPORT)
|
123 |
|
|
return 1;
|
124 |
|
|
|
125 |
|
|
/* Parameter relocation stubs always have a call path and may have a
|
126 |
|
|
return path. */
|
127 |
|
|
if (u->stub_unwind.stub_type == PARAMETER_RELOCATION
|
128 |
|
|
|| u->stub_unwind.stub_type == EXPORT)
|
129 |
|
|
{
|
130 |
|
|
CORE_ADDR addr;
|
131 |
|
|
|
132 |
|
|
/* Search forward from the current PC until we hit a branch
|
133 |
|
|
or the end of the stub. */
|
134 |
|
|
for (addr = pc; addr <= u->region_end; addr += 4)
|
135 |
|
|
{
|
136 |
|
|
unsigned long insn;
|
137 |
|
|
|
138 |
|
|
insn = read_memory_integer (addr, 4, byte_order);
|
139 |
|
|
|
140 |
|
|
/* Does it look like a bl? If so then it's the call path, if
|
141 |
|
|
we find a bv or be first, then we're on the return path. */
|
142 |
|
|
if ((insn & 0xfc00e000) == 0xe8000000)
|
143 |
|
|
return 1;
|
144 |
|
|
else if ((insn & 0xfc00e001) == 0xe800c000
|
145 |
|
|
|| (insn & 0xfc000000) == 0xe0000000)
|
146 |
|
|
return 0;
|
147 |
|
|
}
|
148 |
|
|
|
149 |
|
|
/* Should never happen. */
|
150 |
|
|
warning (_("Unable to find branch in parameter relocation stub."));
|
151 |
|
|
return 0;
|
152 |
|
|
}
|
153 |
|
|
|
154 |
|
|
/* Unknown stub type. For now, just return zero. */
|
155 |
|
|
return 0;
|
156 |
|
|
}
|
157 |
|
|
|
158 |
|
|
static int
|
159 |
|
|
hppa64_hpux_in_solib_call_trampoline (struct gdbarch *gdbarch,
|
160 |
|
|
CORE_ADDR pc, char *name)
|
161 |
|
|
{
|
162 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
163 |
|
|
|
164 |
|
|
/* PA64 has a completely different stub/trampoline scheme. Is it
|
165 |
|
|
better? Maybe. It's certainly harder to determine with any
|
166 |
|
|
certainty that we are in a stub because we can not refer to the
|
167 |
|
|
unwinders to help.
|
168 |
|
|
|
169 |
|
|
The heuristic is simple. Try to lookup the current PC value in th
|
170 |
|
|
minimal symbol table. If that fails, then assume we are not in a
|
171 |
|
|
stub and return.
|
172 |
|
|
|
173 |
|
|
Then see if the PC value falls within the section bounds for the
|
174 |
|
|
section containing the minimal symbol we found in the first
|
175 |
|
|
step. If it does, then assume we are not in a stub and return.
|
176 |
|
|
|
177 |
|
|
Finally peek at the instructions to see if they look like a stub. */
|
178 |
|
|
struct minimal_symbol *minsym;
|
179 |
|
|
asection *sec;
|
180 |
|
|
CORE_ADDR addr;
|
181 |
|
|
int insn, i;
|
182 |
|
|
|
183 |
|
|
minsym = lookup_minimal_symbol_by_pc (pc);
|
184 |
|
|
if (! minsym)
|
185 |
|
|
return 0;
|
186 |
|
|
|
187 |
|
|
sec = SYMBOL_OBJ_SECTION (minsym)->the_bfd_section;
|
188 |
|
|
|
189 |
|
|
if (bfd_get_section_vma (sec->owner, sec) <= pc
|
190 |
|
|
&& pc < (bfd_get_section_vma (sec->owner, sec)
|
191 |
|
|
+ bfd_section_size (sec->owner, sec)))
|
192 |
|
|
return 0;
|
193 |
|
|
|
194 |
|
|
/* We might be in a stub. Peek at the instructions. Stubs are 3
|
195 |
|
|
instructions long. */
|
196 |
|
|
insn = read_memory_integer (pc, 4, byte_order);
|
197 |
|
|
|
198 |
|
|
/* Find out where we think we are within the stub. */
|
199 |
|
|
if ((insn & 0xffffc00e) == 0x53610000)
|
200 |
|
|
addr = pc;
|
201 |
|
|
else if ((insn & 0xffffffff) == 0xe820d000)
|
202 |
|
|
addr = pc - 4;
|
203 |
|
|
else if ((insn & 0xffffc00e) == 0x537b0000)
|
204 |
|
|
addr = pc - 8;
|
205 |
|
|
else
|
206 |
|
|
return 0;
|
207 |
|
|
|
208 |
|
|
/* Now verify each insn in the range looks like a stub instruction. */
|
209 |
|
|
insn = read_memory_integer (addr, 4, byte_order);
|
210 |
|
|
if ((insn & 0xffffc00e) != 0x53610000)
|
211 |
|
|
return 0;
|
212 |
|
|
|
213 |
|
|
/* Now verify each insn in the range looks like a stub instruction. */
|
214 |
|
|
insn = read_memory_integer (addr + 4, 4, byte_order);
|
215 |
|
|
if ((insn & 0xffffffff) != 0xe820d000)
|
216 |
|
|
return 0;
|
217 |
|
|
|
218 |
|
|
/* Now verify each insn in the range looks like a stub instruction. */
|
219 |
|
|
insn = read_memory_integer (addr + 8, 4, byte_order);
|
220 |
|
|
if ((insn & 0xffffc00e) != 0x537b0000)
|
221 |
|
|
return 0;
|
222 |
|
|
|
223 |
|
|
/* Looks like a stub. */
|
224 |
|
|
return 1;
|
225 |
|
|
}
|
226 |
|
|
|
227 |
|
|
/* Return one if PC is in the return path of a trampoline, else return zero.
|
228 |
|
|
|
229 |
|
|
Note we return one for *any* call trampoline (long-call, arg-reloc), not
|
230 |
|
|
just shared library trampolines (import, export). */
|
231 |
|
|
|
232 |
|
|
static int
|
233 |
|
|
hppa_hpux_in_solib_return_trampoline (struct gdbarch *gdbarch,
|
234 |
|
|
CORE_ADDR pc, char *name)
|
235 |
|
|
{
|
236 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
237 |
|
|
struct unwind_table_entry *u;
|
238 |
|
|
|
239 |
|
|
/* Get the unwind descriptor corresponding to PC, return zero
|
240 |
|
|
if no unwind was found. */
|
241 |
|
|
u = find_unwind_entry (pc);
|
242 |
|
|
if (!u)
|
243 |
|
|
return 0;
|
244 |
|
|
|
245 |
|
|
/* If this isn't a linker stub or it's just a long branch stub, then
|
246 |
|
|
return zero. */
|
247 |
|
|
if (u->stub_unwind.stub_type == 0 || u->stub_unwind.stub_type == LONG_BRANCH)
|
248 |
|
|
return 0;
|
249 |
|
|
|
250 |
|
|
/* The call and return path execute the same instructions within
|
251 |
|
|
an IMPORT stub! So an IMPORT stub is both a call and return
|
252 |
|
|
trampoline. */
|
253 |
|
|
if (u->stub_unwind.stub_type == IMPORT)
|
254 |
|
|
return 1;
|
255 |
|
|
|
256 |
|
|
/* Parameter relocation stubs always have a call path and may have a
|
257 |
|
|
return path. */
|
258 |
|
|
if (u->stub_unwind.stub_type == PARAMETER_RELOCATION
|
259 |
|
|
|| u->stub_unwind.stub_type == EXPORT)
|
260 |
|
|
{
|
261 |
|
|
CORE_ADDR addr;
|
262 |
|
|
|
263 |
|
|
/* Search forward from the current PC until we hit a branch
|
264 |
|
|
or the end of the stub. */
|
265 |
|
|
for (addr = pc; addr <= u->region_end; addr += 4)
|
266 |
|
|
{
|
267 |
|
|
unsigned long insn;
|
268 |
|
|
|
269 |
|
|
insn = read_memory_integer (addr, 4, byte_order);
|
270 |
|
|
|
271 |
|
|
/* Does it look like a bl? If so then it's the call path, if
|
272 |
|
|
we find a bv or be first, then we're on the return path. */
|
273 |
|
|
if ((insn & 0xfc00e000) == 0xe8000000)
|
274 |
|
|
return 0;
|
275 |
|
|
else if ((insn & 0xfc00e001) == 0xe800c000
|
276 |
|
|
|| (insn & 0xfc000000) == 0xe0000000)
|
277 |
|
|
return 1;
|
278 |
|
|
}
|
279 |
|
|
|
280 |
|
|
/* Should never happen. */
|
281 |
|
|
warning (_("Unable to find branch in parameter relocation stub."));
|
282 |
|
|
return 0;
|
283 |
|
|
}
|
284 |
|
|
|
285 |
|
|
/* Unknown stub type. For now, just return zero. */
|
286 |
|
|
return 0;
|
287 |
|
|
|
288 |
|
|
}
|
289 |
|
|
|
290 |
|
|
/* Figure out if PC is in a trampoline, and if so find out where
|
291 |
|
|
the trampoline will jump to. If not in a trampoline, return zero.
|
292 |
|
|
|
293 |
|
|
Simple code examination probably is not a good idea since the code
|
294 |
|
|
sequences in trampolines can also appear in user code.
|
295 |
|
|
|
296 |
|
|
We use unwinds and information from the minimal symbol table to
|
297 |
|
|
determine when we're in a trampoline. This won't work for ELF
|
298 |
|
|
(yet) since it doesn't create stub unwind entries. Whether or
|
299 |
|
|
not ELF will create stub unwinds or normal unwinds for linker
|
300 |
|
|
stubs is still being debated.
|
301 |
|
|
|
302 |
|
|
This should handle simple calls through dyncall or sr4export,
|
303 |
|
|
long calls, argument relocation stubs, and dyncall/sr4export
|
304 |
|
|
calling an argument relocation stub. It even handles some stubs
|
305 |
|
|
used in dynamic executables. */
|
306 |
|
|
|
307 |
|
|
static CORE_ADDR
|
308 |
|
|
hppa_hpux_skip_trampoline_code (struct frame_info *frame, CORE_ADDR pc)
|
309 |
|
|
{
|
310 |
|
|
struct gdbarch *gdbarch = get_frame_arch (frame);
|
311 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
312 |
|
|
int word_size = gdbarch_ptr_bit (gdbarch) / 8;
|
313 |
|
|
long orig_pc = pc;
|
314 |
|
|
long prev_inst, curr_inst, loc;
|
315 |
|
|
struct minimal_symbol *msym;
|
316 |
|
|
struct unwind_table_entry *u;
|
317 |
|
|
|
318 |
|
|
/* Addresses passed to dyncall may *NOT* be the actual address
|
319 |
|
|
of the function. So we may have to do something special. */
|
320 |
|
|
if (pc == hppa_symbol_address("$$dyncall"))
|
321 |
|
|
{
|
322 |
|
|
pc = (CORE_ADDR) get_frame_register_unsigned (frame, 22);
|
323 |
|
|
|
324 |
|
|
/* If bit 30 (counting from the left) is on, then pc is the address of
|
325 |
|
|
the PLT entry for this function, not the address of the function
|
326 |
|
|
itself. Bit 31 has meaning too, but only for MPE. */
|
327 |
|
|
if (pc & 0x2)
|
328 |
|
|
pc = (CORE_ADDR) read_memory_integer (pc & ~0x3, word_size, byte_order);
|
329 |
|
|
}
|
330 |
|
|
if (pc == hppa_symbol_address("$$dyncall_external"))
|
331 |
|
|
{
|
332 |
|
|
pc = (CORE_ADDR) get_frame_register_unsigned (frame, 22);
|
333 |
|
|
pc = (CORE_ADDR) read_memory_integer (pc & ~0x3, word_size, byte_order);
|
334 |
|
|
}
|
335 |
|
|
else if (pc == hppa_symbol_address("_sr4export"))
|
336 |
|
|
pc = (CORE_ADDR) get_frame_register_unsigned (frame, 22);
|
337 |
|
|
|
338 |
|
|
/* Get the unwind descriptor corresponding to PC, return zero
|
339 |
|
|
if no unwind was found. */
|
340 |
|
|
u = find_unwind_entry (pc);
|
341 |
|
|
if (!u)
|
342 |
|
|
return 0;
|
343 |
|
|
|
344 |
|
|
/* If this isn't a linker stub, then return now. */
|
345 |
|
|
/* elz: attention here! (FIXME) because of a compiler/linker
|
346 |
|
|
error, some stubs which should have a non zero stub_unwind.stub_type
|
347 |
|
|
have unfortunately a value of zero. So this function would return here
|
348 |
|
|
as if we were not in a trampoline. To fix this, we go look at the partial
|
349 |
|
|
symbol information, which reports this guy as a stub.
|
350 |
|
|
(FIXME): Unfortunately, we are not that lucky: it turns out that the
|
351 |
|
|
partial symbol information is also wrong sometimes. This is because
|
352 |
|
|
when it is entered (somread.c::som_symtab_read()) it can happen that
|
353 |
|
|
if the type of the symbol (from the som) is Entry, and the symbol is
|
354 |
|
|
in a shared library, then it can also be a trampoline. This would
|
355 |
|
|
be OK, except that I believe the way they decide if we are ina shared library
|
356 |
|
|
does not work. SOOOO..., even if we have a regular function w/o trampolines
|
357 |
|
|
its minimal symbol can be assigned type mst_solib_trampoline.
|
358 |
|
|
Also, if we find that the symbol is a real stub, then we fix the unwind
|
359 |
|
|
descriptor, and define the stub type to be EXPORT.
|
360 |
|
|
Hopefully this is correct most of the times. */
|
361 |
|
|
if (u->stub_unwind.stub_type == 0)
|
362 |
|
|
{
|
363 |
|
|
|
364 |
|
|
/* elz: NOTE (FIXME!) once the problem with the unwind information is fixed
|
365 |
|
|
we can delete all the code which appears between the lines */
|
366 |
|
|
/*--------------------------------------------------------------------------*/
|
367 |
|
|
msym = lookup_minimal_symbol_by_pc (pc);
|
368 |
|
|
|
369 |
|
|
if (msym == NULL || MSYMBOL_TYPE (msym) != mst_solib_trampoline)
|
370 |
|
|
return orig_pc == pc ? 0 : pc & ~0x3;
|
371 |
|
|
|
372 |
|
|
else if (msym != NULL && MSYMBOL_TYPE (msym) == mst_solib_trampoline)
|
373 |
|
|
{
|
374 |
|
|
struct objfile *objfile;
|
375 |
|
|
struct minimal_symbol *msymbol;
|
376 |
|
|
int function_found = 0;
|
377 |
|
|
|
378 |
|
|
/* go look if there is another minimal symbol with the same name as
|
379 |
|
|
this one, but with type mst_text. This would happen if the msym
|
380 |
|
|
is an actual trampoline, in which case there would be another
|
381 |
|
|
symbol with the same name corresponding to the real function */
|
382 |
|
|
|
383 |
|
|
ALL_MSYMBOLS (objfile, msymbol)
|
384 |
|
|
{
|
385 |
|
|
if (MSYMBOL_TYPE (msymbol) == mst_text
|
386 |
|
|
&& strcmp (SYMBOL_LINKAGE_NAME (msymbol),
|
387 |
|
|
SYMBOL_LINKAGE_NAME (msym)) == 0)
|
388 |
|
|
{
|
389 |
|
|
function_found = 1;
|
390 |
|
|
break;
|
391 |
|
|
}
|
392 |
|
|
}
|
393 |
|
|
|
394 |
|
|
if (function_found)
|
395 |
|
|
/* the type of msym is correct (mst_solib_trampoline), but
|
396 |
|
|
the unwind info is wrong, so set it to the correct value */
|
397 |
|
|
u->stub_unwind.stub_type = EXPORT;
|
398 |
|
|
else
|
399 |
|
|
/* the stub type info in the unwind is correct (this is not a
|
400 |
|
|
trampoline), but the msym type information is wrong, it
|
401 |
|
|
should be mst_text. So we need to fix the msym, and also
|
402 |
|
|
get out of this function */
|
403 |
|
|
{
|
404 |
|
|
MSYMBOL_TYPE (msym) = mst_text;
|
405 |
|
|
return orig_pc == pc ? 0 : pc & ~0x3;
|
406 |
|
|
}
|
407 |
|
|
}
|
408 |
|
|
|
409 |
|
|
/*--------------------------------------------------------------------------*/
|
410 |
|
|
}
|
411 |
|
|
|
412 |
|
|
/* It's a stub. Search for a branch and figure out where it goes.
|
413 |
|
|
Note we have to handle multi insn branch sequences like ldil;ble.
|
414 |
|
|
Most (all?) other branches can be determined by examining the contents
|
415 |
|
|
of certain registers and the stack. */
|
416 |
|
|
|
417 |
|
|
loc = pc;
|
418 |
|
|
curr_inst = 0;
|
419 |
|
|
prev_inst = 0;
|
420 |
|
|
while (1)
|
421 |
|
|
{
|
422 |
|
|
/* Make sure we haven't walked outside the range of this stub. */
|
423 |
|
|
if (u != find_unwind_entry (loc))
|
424 |
|
|
{
|
425 |
|
|
warning (_("Unable to find branch in linker stub"));
|
426 |
|
|
return orig_pc == pc ? 0 : pc & ~0x3;
|
427 |
|
|
}
|
428 |
|
|
|
429 |
|
|
prev_inst = curr_inst;
|
430 |
|
|
curr_inst = read_memory_integer (loc, 4, byte_order);
|
431 |
|
|
|
432 |
|
|
/* Does it look like a branch external using %r1? Then it's the
|
433 |
|
|
branch from the stub to the actual function. */
|
434 |
|
|
if ((curr_inst & 0xffe0e000) == 0xe0202000)
|
435 |
|
|
{
|
436 |
|
|
/* Yup. See if the previous instruction loaded
|
437 |
|
|
a value into %r1. If so compute and return the jump address. */
|
438 |
|
|
if ((prev_inst & 0xffe00000) == 0x20200000)
|
439 |
|
|
return (hppa_extract_21 (prev_inst) + hppa_extract_17 (curr_inst)) & ~0x3;
|
440 |
|
|
else
|
441 |
|
|
{
|
442 |
|
|
warning (_("Unable to find ldil X,%%r1 before ble Y(%%sr4,%%r1)."));
|
443 |
|
|
return orig_pc == pc ? 0 : pc & ~0x3;
|
444 |
|
|
}
|
445 |
|
|
}
|
446 |
|
|
|
447 |
|
|
/* Does it look like a be 0(sr0,%r21)? OR
|
448 |
|
|
Does it look like a be, n 0(sr0,%r21)? OR
|
449 |
|
|
Does it look like a bve (r21)? (this is on PA2.0)
|
450 |
|
|
Does it look like a bve, n(r21)? (this is also on PA2.0)
|
451 |
|
|
That's the branch from an
|
452 |
|
|
import stub to an export stub.
|
453 |
|
|
|
454 |
|
|
It is impossible to determine the target of the branch via
|
455 |
|
|
simple examination of instructions and/or data (consider
|
456 |
|
|
that the address in the plabel may be the address of the
|
457 |
|
|
bind-on-reference routine in the dynamic loader).
|
458 |
|
|
|
459 |
|
|
So we have try an alternative approach.
|
460 |
|
|
|
461 |
|
|
Get the name of the symbol at our current location; it should
|
462 |
|
|
be a stub symbol with the same name as the symbol in the
|
463 |
|
|
shared library.
|
464 |
|
|
|
465 |
|
|
Then lookup a minimal symbol with the same name; we should
|
466 |
|
|
get the minimal symbol for the target routine in the shared
|
467 |
|
|
library as those take precedence of import/export stubs. */
|
468 |
|
|
if ((curr_inst == 0xe2a00000) ||
|
469 |
|
|
(curr_inst == 0xe2a00002) ||
|
470 |
|
|
(curr_inst == 0xeaa0d000) ||
|
471 |
|
|
(curr_inst == 0xeaa0d002))
|
472 |
|
|
{
|
473 |
|
|
struct minimal_symbol *stubsym, *libsym;
|
474 |
|
|
|
475 |
|
|
stubsym = lookup_minimal_symbol_by_pc (loc);
|
476 |
|
|
if (stubsym == NULL)
|
477 |
|
|
{
|
478 |
|
|
warning (_("Unable to find symbol for 0x%lx"), loc);
|
479 |
|
|
return orig_pc == pc ? 0 : pc & ~0x3;
|
480 |
|
|
}
|
481 |
|
|
|
482 |
|
|
libsym = lookup_minimal_symbol (SYMBOL_LINKAGE_NAME (stubsym), NULL, NULL);
|
483 |
|
|
if (libsym == NULL)
|
484 |
|
|
{
|
485 |
|
|
warning (_("Unable to find library symbol for %s."),
|
486 |
|
|
SYMBOL_PRINT_NAME (stubsym));
|
487 |
|
|
return orig_pc == pc ? 0 : pc & ~0x3;
|
488 |
|
|
}
|
489 |
|
|
|
490 |
|
|
return SYMBOL_VALUE (libsym);
|
491 |
|
|
}
|
492 |
|
|
|
493 |
|
|
/* Does it look like bl X,%rp or bl X,%r0? Another way to do a
|
494 |
|
|
branch from the stub to the actual function. */
|
495 |
|
|
/*elz */
|
496 |
|
|
else if ((curr_inst & 0xffe0e000) == 0xe8400000
|
497 |
|
|
|| (curr_inst & 0xffe0e000) == 0xe8000000
|
498 |
|
|
|| (curr_inst & 0xffe0e000) == 0xe800A000)
|
499 |
|
|
return (loc + hppa_extract_17 (curr_inst) + 8) & ~0x3;
|
500 |
|
|
|
501 |
|
|
/* Does it look like bv (rp)? Note this depends on the
|
502 |
|
|
current stack pointer being the same as the stack
|
503 |
|
|
pointer in the stub itself! This is a branch on from the
|
504 |
|
|
stub back to the original caller. */
|
505 |
|
|
/*else if ((curr_inst & 0xffe0e000) == 0xe840c000) */
|
506 |
|
|
else if ((curr_inst & 0xffe0f000) == 0xe840c000)
|
507 |
|
|
{
|
508 |
|
|
/* Yup. See if the previous instruction loaded
|
509 |
|
|
rp from sp - 8. */
|
510 |
|
|
if (prev_inst == 0x4bc23ff1)
|
511 |
|
|
{
|
512 |
|
|
CORE_ADDR sp;
|
513 |
|
|
sp = get_frame_register_unsigned (frame, HPPA_SP_REGNUM);
|
514 |
|
|
return read_memory_integer (sp - 8, 4, byte_order) & ~0x3;
|
515 |
|
|
}
|
516 |
|
|
else
|
517 |
|
|
{
|
518 |
|
|
warning (_("Unable to find restore of %%rp before bv (%%rp)."));
|
519 |
|
|
return orig_pc == pc ? 0 : pc & ~0x3;
|
520 |
|
|
}
|
521 |
|
|
}
|
522 |
|
|
|
523 |
|
|
/* elz: added this case to capture the new instruction
|
524 |
|
|
at the end of the return part of an export stub used by
|
525 |
|
|
the PA2.0: BVE, n (rp) */
|
526 |
|
|
else if ((curr_inst & 0xffe0f000) == 0xe840d000)
|
527 |
|
|
{
|
528 |
|
|
return (read_memory_integer
|
529 |
|
|
(get_frame_register_unsigned (frame, HPPA_SP_REGNUM) - 24,
|
530 |
|
|
word_size, byte_order)) & ~0x3;
|
531 |
|
|
}
|
532 |
|
|
|
533 |
|
|
/* What about be,n 0(sr0,%rp)? It's just another way we return to
|
534 |
|
|
the original caller from the stub. Used in dynamic executables. */
|
535 |
|
|
else if (curr_inst == 0xe0400002)
|
536 |
|
|
{
|
537 |
|
|
/* The value we jump to is sitting in sp - 24. But that's
|
538 |
|
|
loaded several instructions before the be instruction.
|
539 |
|
|
I guess we could check for the previous instruction being
|
540 |
|
|
mtsp %r1,%sr0 if we want to do sanity checking. */
|
541 |
|
|
return (read_memory_integer
|
542 |
|
|
(get_frame_register_unsigned (frame, HPPA_SP_REGNUM) - 24,
|
543 |
|
|
word_size, byte_order)) & ~0x3;
|
544 |
|
|
}
|
545 |
|
|
|
546 |
|
|
/* Haven't found the branch yet, but we're still in the stub.
|
547 |
|
|
Keep looking. */
|
548 |
|
|
loc += 4;
|
549 |
|
|
}
|
550 |
|
|
}
|
551 |
|
|
|
552 |
|
|
static void
|
553 |
|
|
hppa_skip_permanent_breakpoint (struct regcache *regcache)
|
554 |
|
|
{
|
555 |
|
|
/* To step over a breakpoint instruction on the PA takes some
|
556 |
|
|
fiddling with the instruction address queue.
|
557 |
|
|
|
558 |
|
|
When we stop at a breakpoint, the IA queue front (the instruction
|
559 |
|
|
we're executing now) points at the breakpoint instruction, and
|
560 |
|
|
the IA queue back (the next instruction to execute) points to
|
561 |
|
|
whatever instruction we would execute after the breakpoint, if it
|
562 |
|
|
were an ordinary instruction. This is the case even if the
|
563 |
|
|
breakpoint is in the delay slot of a branch instruction.
|
564 |
|
|
|
565 |
|
|
Clearly, to step past the breakpoint, we need to set the queue
|
566 |
|
|
front to the back. But what do we put in the back? What
|
567 |
|
|
instruction comes after that one? Because of the branch delay
|
568 |
|
|
slot, the next insn is always at the back + 4. */
|
569 |
|
|
|
570 |
|
|
ULONGEST pcoq_tail, pcsq_tail;
|
571 |
|
|
regcache_cooked_read_unsigned (regcache, HPPA_PCOQ_TAIL_REGNUM, &pcoq_tail);
|
572 |
|
|
regcache_cooked_read_unsigned (regcache, HPPA_PCSQ_TAIL_REGNUM, &pcsq_tail);
|
573 |
|
|
|
574 |
|
|
regcache_cooked_write_unsigned (regcache, HPPA_PCOQ_HEAD_REGNUM, pcoq_tail);
|
575 |
|
|
regcache_cooked_write_unsigned (regcache, HPPA_PCSQ_HEAD_REGNUM, pcsq_tail);
|
576 |
|
|
|
577 |
|
|
regcache_cooked_write_unsigned (regcache, HPPA_PCOQ_TAIL_REGNUM, pcoq_tail + 4);
|
578 |
|
|
/* We can leave the tail's space the same, since there's no jump. */
|
579 |
|
|
}
|
580 |
|
|
|
581 |
|
|
|
582 |
|
|
/* Signal frames. */
|
583 |
|
|
struct hppa_hpux_sigtramp_unwind_cache
|
584 |
|
|
{
|
585 |
|
|
CORE_ADDR base;
|
586 |
|
|
struct trad_frame_saved_reg *saved_regs;
|
587 |
|
|
};
|
588 |
|
|
|
589 |
|
|
static int hppa_hpux_tramp_reg[] = {
|
590 |
|
|
HPPA_SAR_REGNUM,
|
591 |
|
|
HPPA_PCOQ_HEAD_REGNUM,
|
592 |
|
|
HPPA_PCSQ_HEAD_REGNUM,
|
593 |
|
|
HPPA_PCOQ_TAIL_REGNUM,
|
594 |
|
|
HPPA_PCSQ_TAIL_REGNUM,
|
595 |
|
|
HPPA_EIEM_REGNUM,
|
596 |
|
|
HPPA_IIR_REGNUM,
|
597 |
|
|
HPPA_ISR_REGNUM,
|
598 |
|
|
HPPA_IOR_REGNUM,
|
599 |
|
|
HPPA_IPSW_REGNUM,
|
600 |
|
|
-1,
|
601 |
|
|
HPPA_SR4_REGNUM,
|
602 |
|
|
HPPA_SR4_REGNUM + 1,
|
603 |
|
|
HPPA_SR4_REGNUM + 2,
|
604 |
|
|
HPPA_SR4_REGNUM + 3,
|
605 |
|
|
HPPA_SR4_REGNUM + 4,
|
606 |
|
|
HPPA_SR4_REGNUM + 5,
|
607 |
|
|
HPPA_SR4_REGNUM + 6,
|
608 |
|
|
HPPA_SR4_REGNUM + 7,
|
609 |
|
|
HPPA_RCR_REGNUM,
|
610 |
|
|
HPPA_PID0_REGNUM,
|
611 |
|
|
HPPA_PID1_REGNUM,
|
612 |
|
|
HPPA_CCR_REGNUM,
|
613 |
|
|
HPPA_PID2_REGNUM,
|
614 |
|
|
HPPA_PID3_REGNUM,
|
615 |
|
|
HPPA_TR0_REGNUM,
|
616 |
|
|
HPPA_TR0_REGNUM + 1,
|
617 |
|
|
HPPA_TR0_REGNUM + 2,
|
618 |
|
|
HPPA_CR27_REGNUM
|
619 |
|
|
};
|
620 |
|
|
|
621 |
|
|
static struct hppa_hpux_sigtramp_unwind_cache *
|
622 |
|
|
hppa_hpux_sigtramp_frame_unwind_cache (struct frame_info *this_frame,
|
623 |
|
|
void **this_cache)
|
624 |
|
|
|
625 |
|
|
{
|
626 |
|
|
struct gdbarch *gdbarch = get_frame_arch (this_frame);
|
627 |
|
|
struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
|
628 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
629 |
|
|
struct hppa_hpux_sigtramp_unwind_cache *info;
|
630 |
|
|
unsigned int flag;
|
631 |
|
|
CORE_ADDR sp, scptr, off;
|
632 |
|
|
int i, incr, szoff;
|
633 |
|
|
|
634 |
|
|
if (*this_cache)
|
635 |
|
|
return *this_cache;
|
636 |
|
|
|
637 |
|
|
info = FRAME_OBSTACK_ZALLOC (struct hppa_hpux_sigtramp_unwind_cache);
|
638 |
|
|
*this_cache = info;
|
639 |
|
|
info->saved_regs = trad_frame_alloc_saved_regs (this_frame);
|
640 |
|
|
|
641 |
|
|
sp = get_frame_register_unsigned (this_frame, HPPA_SP_REGNUM);
|
642 |
|
|
|
643 |
|
|
if (IS_32BIT_TARGET (gdbarch))
|
644 |
|
|
scptr = sp - 1352;
|
645 |
|
|
else
|
646 |
|
|
scptr = sp - 1520;
|
647 |
|
|
|
648 |
|
|
off = scptr;
|
649 |
|
|
|
650 |
|
|
/* See /usr/include/machine/save_state.h for the structure of the save_state_t
|
651 |
|
|
structure. */
|
652 |
|
|
|
653 |
|
|
flag = read_memory_unsigned_integer (scptr + HPPA_HPUX_SS_FLAGS_OFFSET,
|
654 |
|
|
4, byte_order);
|
655 |
|
|
|
656 |
|
|
if (!(flag & HPPA_HPUX_SS_WIDEREGS))
|
657 |
|
|
{
|
658 |
|
|
/* Narrow registers. */
|
659 |
|
|
off = scptr + HPPA_HPUX_SS_NARROW_OFFSET;
|
660 |
|
|
incr = 4;
|
661 |
|
|
szoff = 0;
|
662 |
|
|
}
|
663 |
|
|
else
|
664 |
|
|
{
|
665 |
|
|
/* Wide registers. */
|
666 |
|
|
off = scptr + HPPA_HPUX_SS_WIDE_OFFSET + 8;
|
667 |
|
|
incr = 8;
|
668 |
|
|
szoff = (tdep->bytes_per_address == 4 ? 4 : 0);
|
669 |
|
|
}
|
670 |
|
|
|
671 |
|
|
for (i = 1; i < 32; i++)
|
672 |
|
|
{
|
673 |
|
|
info->saved_regs[HPPA_R0_REGNUM + i].addr = off + szoff;
|
674 |
|
|
off += incr;
|
675 |
|
|
}
|
676 |
|
|
|
677 |
|
|
for (i = 0; i < ARRAY_SIZE (hppa_hpux_tramp_reg); i++)
|
678 |
|
|
{
|
679 |
|
|
if (hppa_hpux_tramp_reg[i] > 0)
|
680 |
|
|
info->saved_regs[hppa_hpux_tramp_reg[i]].addr = off + szoff;
|
681 |
|
|
|
682 |
|
|
off += incr;
|
683 |
|
|
}
|
684 |
|
|
|
685 |
|
|
/* TODO: fp regs */
|
686 |
|
|
|
687 |
|
|
info->base = get_frame_register_unsigned (this_frame, HPPA_SP_REGNUM);
|
688 |
|
|
|
689 |
|
|
return info;
|
690 |
|
|
}
|
691 |
|
|
|
692 |
|
|
static void
|
693 |
|
|
hppa_hpux_sigtramp_frame_this_id (struct frame_info *this_frame,
|
694 |
|
|
void **this_prologue_cache,
|
695 |
|
|
struct frame_id *this_id)
|
696 |
|
|
{
|
697 |
|
|
struct hppa_hpux_sigtramp_unwind_cache *info
|
698 |
|
|
= hppa_hpux_sigtramp_frame_unwind_cache (this_frame, this_prologue_cache);
|
699 |
|
|
|
700 |
|
|
*this_id = frame_id_build (info->base, get_frame_pc (this_frame));
|
701 |
|
|
}
|
702 |
|
|
|
703 |
|
|
static struct value *
|
704 |
|
|
hppa_hpux_sigtramp_frame_prev_register (struct frame_info *this_frame,
|
705 |
|
|
void **this_prologue_cache,
|
706 |
|
|
int regnum)
|
707 |
|
|
{
|
708 |
|
|
struct hppa_hpux_sigtramp_unwind_cache *info
|
709 |
|
|
= hppa_hpux_sigtramp_frame_unwind_cache (this_frame, this_prologue_cache);
|
710 |
|
|
|
711 |
|
|
return hppa_frame_prev_register_helper (this_frame, info->saved_regs, regnum);
|
712 |
|
|
}
|
713 |
|
|
|
714 |
|
|
static int
|
715 |
|
|
hppa_hpux_sigtramp_unwind_sniffer (const struct frame_unwind *self,
|
716 |
|
|
struct frame_info *this_frame,
|
717 |
|
|
void **this_cache)
|
718 |
|
|
{
|
719 |
|
|
struct gdbarch *gdbarch = get_frame_arch (this_frame);
|
720 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
721 |
|
|
struct unwind_table_entry *u;
|
722 |
|
|
CORE_ADDR pc = get_frame_pc (this_frame);
|
723 |
|
|
|
724 |
|
|
u = find_unwind_entry (pc);
|
725 |
|
|
|
726 |
|
|
/* If this is an export stub, try to get the unwind descriptor for
|
727 |
|
|
the actual function itself. */
|
728 |
|
|
if (u && u->stub_unwind.stub_type == EXPORT)
|
729 |
|
|
{
|
730 |
|
|
gdb_byte buf[HPPA_INSN_SIZE];
|
731 |
|
|
unsigned long insn;
|
732 |
|
|
|
733 |
|
|
if (!safe_frame_unwind_memory (this_frame, u->region_start,
|
734 |
|
|
buf, sizeof buf))
|
735 |
|
|
return 0;
|
736 |
|
|
|
737 |
|
|
insn = extract_unsigned_integer (buf, sizeof buf, byte_order);
|
738 |
|
|
if ((insn & 0xffe0e000) == 0xe8400000)
|
739 |
|
|
u = find_unwind_entry(u->region_start + hppa_extract_17 (insn) + 8);
|
740 |
|
|
}
|
741 |
|
|
|
742 |
|
|
if (u && u->HP_UX_interrupt_marker)
|
743 |
|
|
return 1;
|
744 |
|
|
|
745 |
|
|
return 0;
|
746 |
|
|
}
|
747 |
|
|
|
748 |
|
|
static const struct frame_unwind hppa_hpux_sigtramp_frame_unwind = {
|
749 |
|
|
SIGTRAMP_FRAME,
|
750 |
|
|
hppa_hpux_sigtramp_frame_this_id,
|
751 |
|
|
hppa_hpux_sigtramp_frame_prev_register,
|
752 |
|
|
NULL,
|
753 |
|
|
hppa_hpux_sigtramp_unwind_sniffer
|
754 |
|
|
};
|
755 |
|
|
|
756 |
|
|
static CORE_ADDR
|
757 |
|
|
hppa32_hpux_find_global_pointer (struct gdbarch *gdbarch,
|
758 |
|
|
struct value *function)
|
759 |
|
|
{
|
760 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
761 |
|
|
CORE_ADDR faddr;
|
762 |
|
|
|
763 |
|
|
faddr = value_as_address (function);
|
764 |
|
|
|
765 |
|
|
/* Is this a plabel? If so, dereference it to get the gp value. */
|
766 |
|
|
if (faddr & 2)
|
767 |
|
|
{
|
768 |
|
|
int status;
|
769 |
|
|
char buf[4];
|
770 |
|
|
|
771 |
|
|
faddr &= ~3;
|
772 |
|
|
|
773 |
|
|
status = target_read_memory (faddr + 4, buf, sizeof (buf));
|
774 |
|
|
if (status == 0)
|
775 |
|
|
return extract_unsigned_integer (buf, sizeof (buf), byte_order);
|
776 |
|
|
}
|
777 |
|
|
|
778 |
|
|
return gdbarch_tdep (gdbarch)->solib_get_got_by_pc (faddr);
|
779 |
|
|
}
|
780 |
|
|
|
781 |
|
|
static CORE_ADDR
|
782 |
|
|
hppa64_hpux_find_global_pointer (struct gdbarch *gdbarch,
|
783 |
|
|
struct value *function)
|
784 |
|
|
{
|
785 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
786 |
|
|
CORE_ADDR faddr;
|
787 |
|
|
char buf[32];
|
788 |
|
|
|
789 |
|
|
faddr = value_as_address (function);
|
790 |
|
|
|
791 |
|
|
if (in_opd_section (faddr))
|
792 |
|
|
{
|
793 |
|
|
target_read_memory (faddr, buf, sizeof (buf));
|
794 |
|
|
return extract_unsigned_integer (&buf[24], 8, byte_order);
|
795 |
|
|
}
|
796 |
|
|
else
|
797 |
|
|
{
|
798 |
|
|
return gdbarch_tdep (gdbarch)->solib_get_got_by_pc (faddr);
|
799 |
|
|
}
|
800 |
|
|
}
|
801 |
|
|
|
802 |
|
|
static unsigned int ldsid_pattern[] = {
|
803 |
|
|
0x000010a0, /* ldsid (rX),rY */
|
804 |
|
|
0x00001820, /* mtsp rY,sr0 */
|
805 |
|
|
0xe0000000 /* be,n (sr0,rX) */
|
806 |
|
|
};
|
807 |
|
|
|
808 |
|
|
static CORE_ADDR
|
809 |
|
|
hppa_hpux_search_pattern (struct gdbarch *gdbarch,
|
810 |
|
|
CORE_ADDR start, CORE_ADDR end,
|
811 |
|
|
unsigned int *patterns, int count)
|
812 |
|
|
{
|
813 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
814 |
|
|
int num_insns = (end - start + HPPA_INSN_SIZE) / HPPA_INSN_SIZE;
|
815 |
|
|
unsigned int *insns;
|
816 |
|
|
gdb_byte *buf;
|
817 |
|
|
int offset, i;
|
818 |
|
|
|
819 |
|
|
buf = alloca (num_insns * HPPA_INSN_SIZE);
|
820 |
|
|
insns = alloca (num_insns * sizeof (unsigned int));
|
821 |
|
|
|
822 |
|
|
read_memory (start, buf, num_insns * HPPA_INSN_SIZE);
|
823 |
|
|
for (i = 0; i < num_insns; i++, buf += HPPA_INSN_SIZE)
|
824 |
|
|
insns[i] = extract_unsigned_integer (buf, HPPA_INSN_SIZE, byte_order);
|
825 |
|
|
|
826 |
|
|
for (offset = 0; offset <= num_insns - count; offset++)
|
827 |
|
|
{
|
828 |
|
|
for (i = 0; i < count; i++)
|
829 |
|
|
{
|
830 |
|
|
if ((insns[offset + i] & patterns[i]) != patterns[i])
|
831 |
|
|
break;
|
832 |
|
|
}
|
833 |
|
|
if (i == count)
|
834 |
|
|
break;
|
835 |
|
|
}
|
836 |
|
|
|
837 |
|
|
if (offset <= num_insns - count)
|
838 |
|
|
return start + offset * HPPA_INSN_SIZE;
|
839 |
|
|
else
|
840 |
|
|
return 0;
|
841 |
|
|
}
|
842 |
|
|
|
843 |
|
|
static CORE_ADDR
|
844 |
|
|
hppa32_hpux_search_dummy_call_sequence (struct gdbarch *gdbarch, CORE_ADDR pc,
|
845 |
|
|
int *argreg)
|
846 |
|
|
{
|
847 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
848 |
|
|
struct objfile *obj;
|
849 |
|
|
struct obj_section *sec;
|
850 |
|
|
struct hppa_objfile_private *priv;
|
851 |
|
|
struct frame_info *frame;
|
852 |
|
|
struct unwind_table_entry *u;
|
853 |
|
|
CORE_ADDR addr, rp;
|
854 |
|
|
char buf[4];
|
855 |
|
|
unsigned int insn;
|
856 |
|
|
|
857 |
|
|
sec = find_pc_section (pc);
|
858 |
|
|
obj = sec->objfile;
|
859 |
|
|
priv = objfile_data (obj, hppa_objfile_priv_data);
|
860 |
|
|
|
861 |
|
|
if (!priv)
|
862 |
|
|
priv = hppa_init_objfile_priv_data (obj);
|
863 |
|
|
if (!priv)
|
864 |
|
|
error (_("Internal error creating objfile private data."));
|
865 |
|
|
|
866 |
|
|
/* Use the cached value if we have one. */
|
867 |
|
|
if (priv->dummy_call_sequence_addr != 0)
|
868 |
|
|
{
|
869 |
|
|
*argreg = priv->dummy_call_sequence_reg;
|
870 |
|
|
return priv->dummy_call_sequence_addr;
|
871 |
|
|
}
|
872 |
|
|
|
873 |
|
|
/* First try a heuristic; if we are in a shared library call, our return
|
874 |
|
|
pointer is likely to point at an export stub. */
|
875 |
|
|
frame = get_current_frame ();
|
876 |
|
|
rp = frame_unwind_register_unsigned (frame, 2);
|
877 |
|
|
u = find_unwind_entry (rp);
|
878 |
|
|
if (u && u->stub_unwind.stub_type == EXPORT)
|
879 |
|
|
{
|
880 |
|
|
addr = hppa_hpux_search_pattern (gdbarch,
|
881 |
|
|
u->region_start, u->region_end,
|
882 |
|
|
ldsid_pattern,
|
883 |
|
|
ARRAY_SIZE (ldsid_pattern));
|
884 |
|
|
if (addr)
|
885 |
|
|
goto found_pattern;
|
886 |
|
|
}
|
887 |
|
|
|
888 |
|
|
/* Next thing to try is to look for an export stub. */
|
889 |
|
|
if (priv->unwind_info)
|
890 |
|
|
{
|
891 |
|
|
int i;
|
892 |
|
|
|
893 |
|
|
for (i = 0; i < priv->unwind_info->last; i++)
|
894 |
|
|
{
|
895 |
|
|
struct unwind_table_entry *u;
|
896 |
|
|
u = &priv->unwind_info->table[i];
|
897 |
|
|
if (u->stub_unwind.stub_type == EXPORT)
|
898 |
|
|
{
|
899 |
|
|
addr = hppa_hpux_search_pattern (gdbarch,
|
900 |
|
|
u->region_start, u->region_end,
|
901 |
|
|
ldsid_pattern,
|
902 |
|
|
ARRAY_SIZE (ldsid_pattern));
|
903 |
|
|
if (addr)
|
904 |
|
|
{
|
905 |
|
|
goto found_pattern;
|
906 |
|
|
}
|
907 |
|
|
}
|
908 |
|
|
}
|
909 |
|
|
}
|
910 |
|
|
|
911 |
|
|
/* Finally, if this is the main executable, try to locate a sequence
|
912 |
|
|
from noshlibs */
|
913 |
|
|
addr = hppa_symbol_address ("noshlibs");
|
914 |
|
|
sec = find_pc_section (addr);
|
915 |
|
|
|
916 |
|
|
if (sec && sec->objfile == obj)
|
917 |
|
|
{
|
918 |
|
|
CORE_ADDR start, end;
|
919 |
|
|
|
920 |
|
|
find_pc_partial_function (addr, NULL, &start, &end);
|
921 |
|
|
if (start != 0 && end != 0)
|
922 |
|
|
{
|
923 |
|
|
addr = hppa_hpux_search_pattern (gdbarch, start, end, ldsid_pattern,
|
924 |
|
|
ARRAY_SIZE (ldsid_pattern));
|
925 |
|
|
if (addr)
|
926 |
|
|
goto found_pattern;
|
927 |
|
|
}
|
928 |
|
|
}
|
929 |
|
|
|
930 |
|
|
/* Can't find a suitable sequence. */
|
931 |
|
|
return 0;
|
932 |
|
|
|
933 |
|
|
found_pattern:
|
934 |
|
|
target_read_memory (addr, buf, sizeof (buf));
|
935 |
|
|
insn = extract_unsigned_integer (buf, sizeof (buf), byte_order);
|
936 |
|
|
priv->dummy_call_sequence_addr = addr;
|
937 |
|
|
priv->dummy_call_sequence_reg = (insn >> 21) & 0x1f;
|
938 |
|
|
|
939 |
|
|
*argreg = priv->dummy_call_sequence_reg;
|
940 |
|
|
return priv->dummy_call_sequence_addr;
|
941 |
|
|
}
|
942 |
|
|
|
943 |
|
|
static CORE_ADDR
|
944 |
|
|
hppa64_hpux_search_dummy_call_sequence (struct gdbarch *gdbarch, CORE_ADDR pc,
|
945 |
|
|
int *argreg)
|
946 |
|
|
{
|
947 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
948 |
|
|
struct objfile *obj;
|
949 |
|
|
struct obj_section *sec;
|
950 |
|
|
struct hppa_objfile_private *priv;
|
951 |
|
|
CORE_ADDR addr;
|
952 |
|
|
struct minimal_symbol *msym;
|
953 |
|
|
int i;
|
954 |
|
|
|
955 |
|
|
sec = find_pc_section (pc);
|
956 |
|
|
obj = sec->objfile;
|
957 |
|
|
priv = objfile_data (obj, hppa_objfile_priv_data);
|
958 |
|
|
|
959 |
|
|
if (!priv)
|
960 |
|
|
priv = hppa_init_objfile_priv_data (obj);
|
961 |
|
|
if (!priv)
|
962 |
|
|
error (_("Internal error creating objfile private data."));
|
963 |
|
|
|
964 |
|
|
/* Use the cached value if we have one. */
|
965 |
|
|
if (priv->dummy_call_sequence_addr != 0)
|
966 |
|
|
{
|
967 |
|
|
*argreg = priv->dummy_call_sequence_reg;
|
968 |
|
|
return priv->dummy_call_sequence_addr;
|
969 |
|
|
}
|
970 |
|
|
|
971 |
|
|
/* FIXME: Without stub unwind information, locating a suitable sequence is
|
972 |
|
|
fairly difficult. For now, we implement a very naive and inefficient
|
973 |
|
|
scheme; try to read in blocks of code, and look for a "bve,n (rp)"
|
974 |
|
|
instruction. These are likely to occur at the end of functions, so
|
975 |
|
|
we only look at the last two instructions of each function. */
|
976 |
|
|
for (i = 0, msym = obj->msymbols; i < obj->minimal_symbol_count; i++, msym++)
|
977 |
|
|
{
|
978 |
|
|
CORE_ADDR begin, end;
|
979 |
|
|
char *name;
|
980 |
|
|
gdb_byte buf[2 * HPPA_INSN_SIZE];
|
981 |
|
|
int offset;
|
982 |
|
|
|
983 |
|
|
find_pc_partial_function (SYMBOL_VALUE_ADDRESS (msym), &name,
|
984 |
|
|
&begin, &end);
|
985 |
|
|
|
986 |
|
|
if (name == NULL || begin == 0 || end == 0)
|
987 |
|
|
continue;
|
988 |
|
|
|
989 |
|
|
if (target_read_memory (end - sizeof (buf), buf, sizeof (buf)) == 0)
|
990 |
|
|
{
|
991 |
|
|
for (offset = 0; offset < sizeof (buf); offset++)
|
992 |
|
|
{
|
993 |
|
|
unsigned int insn;
|
994 |
|
|
|
995 |
|
|
insn = extract_unsigned_integer (buf + offset,
|
996 |
|
|
HPPA_INSN_SIZE, byte_order);
|
997 |
|
|
if (insn == 0xe840d002) /* bve,n (rp) */
|
998 |
|
|
{
|
999 |
|
|
addr = (end - sizeof (buf)) + offset;
|
1000 |
|
|
goto found_pattern;
|
1001 |
|
|
}
|
1002 |
|
|
}
|
1003 |
|
|
}
|
1004 |
|
|
}
|
1005 |
|
|
|
1006 |
|
|
/* Can't find a suitable sequence. */
|
1007 |
|
|
return 0;
|
1008 |
|
|
|
1009 |
|
|
found_pattern:
|
1010 |
|
|
priv->dummy_call_sequence_addr = addr;
|
1011 |
|
|
/* Right now we only look for a "bve,l (rp)" sequence, so the register is
|
1012 |
|
|
always HPPA_RP_REGNUM. */
|
1013 |
|
|
priv->dummy_call_sequence_reg = HPPA_RP_REGNUM;
|
1014 |
|
|
|
1015 |
|
|
*argreg = priv->dummy_call_sequence_reg;
|
1016 |
|
|
return priv->dummy_call_sequence_addr;
|
1017 |
|
|
}
|
1018 |
|
|
|
1019 |
|
|
static CORE_ADDR
|
1020 |
|
|
hppa_hpux_find_import_stub_for_addr (CORE_ADDR funcaddr)
|
1021 |
|
|
{
|
1022 |
|
|
struct objfile *objfile;
|
1023 |
|
|
struct minimal_symbol *funsym, *stubsym;
|
1024 |
|
|
CORE_ADDR stubaddr;
|
1025 |
|
|
|
1026 |
|
|
funsym = lookup_minimal_symbol_by_pc (funcaddr);
|
1027 |
|
|
stubaddr = 0;
|
1028 |
|
|
|
1029 |
|
|
ALL_OBJFILES (objfile)
|
1030 |
|
|
{
|
1031 |
|
|
stubsym = lookup_minimal_symbol_solib_trampoline
|
1032 |
|
|
(SYMBOL_LINKAGE_NAME (funsym), objfile);
|
1033 |
|
|
|
1034 |
|
|
if (stubsym)
|
1035 |
|
|
{
|
1036 |
|
|
struct unwind_table_entry *u;
|
1037 |
|
|
|
1038 |
|
|
u = find_unwind_entry (SYMBOL_VALUE (stubsym));
|
1039 |
|
|
if (u == NULL
|
1040 |
|
|
|| (u->stub_unwind.stub_type != IMPORT
|
1041 |
|
|
&& u->stub_unwind.stub_type != IMPORT_SHLIB))
|
1042 |
|
|
continue;
|
1043 |
|
|
|
1044 |
|
|
stubaddr = SYMBOL_VALUE (stubsym);
|
1045 |
|
|
|
1046 |
|
|
/* If we found an IMPORT stub, then we can stop searching;
|
1047 |
|
|
if we found an IMPORT_SHLIB, we want to continue the search
|
1048 |
|
|
in the hopes that we will find an IMPORT stub. */
|
1049 |
|
|
if (u->stub_unwind.stub_type == IMPORT)
|
1050 |
|
|
break;
|
1051 |
|
|
}
|
1052 |
|
|
}
|
1053 |
|
|
|
1054 |
|
|
return stubaddr;
|
1055 |
|
|
}
|
1056 |
|
|
|
1057 |
|
|
static int
|
1058 |
|
|
hppa_hpux_sr_for_addr (struct gdbarch *gdbarch, CORE_ADDR addr)
|
1059 |
|
|
{
|
1060 |
|
|
int sr;
|
1061 |
|
|
/* The space register to use is encoded in the top 2 bits of the address. */
|
1062 |
|
|
sr = addr >> (gdbarch_tdep (gdbarch)->bytes_per_address * 8 - 2);
|
1063 |
|
|
return sr + 4;
|
1064 |
|
|
}
|
1065 |
|
|
|
1066 |
|
|
static CORE_ADDR
|
1067 |
|
|
hppa_hpux_find_dummy_bpaddr (CORE_ADDR addr)
|
1068 |
|
|
{
|
1069 |
|
|
/* In order for us to restore the space register to its starting state,
|
1070 |
|
|
we need the dummy trampoline to return to the an instruction address in
|
1071 |
|
|
the same space as where we started the call. We used to place the
|
1072 |
|
|
breakpoint near the current pc, however, this breaks nested dummy calls
|
1073 |
|
|
as the nested call will hit the breakpoint address and terminate
|
1074 |
|
|
prematurely. Instead, we try to look for an address in the same space to
|
1075 |
|
|
put the breakpoint.
|
1076 |
|
|
|
1077 |
|
|
This is similar in spirit to putting the breakpoint at the "entry point"
|
1078 |
|
|
of an executable. */
|
1079 |
|
|
|
1080 |
|
|
struct obj_section *sec;
|
1081 |
|
|
struct unwind_table_entry *u;
|
1082 |
|
|
struct minimal_symbol *msym;
|
1083 |
|
|
CORE_ADDR func;
|
1084 |
|
|
int i;
|
1085 |
|
|
|
1086 |
|
|
sec = find_pc_section (addr);
|
1087 |
|
|
if (sec)
|
1088 |
|
|
{
|
1089 |
|
|
/* First try the lowest address in the section; we can use it as long
|
1090 |
|
|
as it is "regular" code (i.e. not a stub) */
|
1091 |
|
|
u = find_unwind_entry (obj_section_addr (sec));
|
1092 |
|
|
if (!u || u->stub_unwind.stub_type == 0)
|
1093 |
|
|
return obj_section_addr (sec);
|
1094 |
|
|
|
1095 |
|
|
/* Otherwise, we need to find a symbol for a regular function. We
|
1096 |
|
|
do this by walking the list of msymbols in the objfile. The symbol
|
1097 |
|
|
we find should not be the same as the function that was passed in. */
|
1098 |
|
|
|
1099 |
|
|
/* FIXME: this is broken, because we can find a function that will be
|
1100 |
|
|
called by the dummy call target function, which will still not
|
1101 |
|
|
work. */
|
1102 |
|
|
|
1103 |
|
|
find_pc_partial_function (addr, NULL, &func, NULL);
|
1104 |
|
|
for (i = 0, msym = sec->objfile->msymbols;
|
1105 |
|
|
i < sec->objfile->minimal_symbol_count;
|
1106 |
|
|
i++, msym++)
|
1107 |
|
|
{
|
1108 |
|
|
u = find_unwind_entry (SYMBOL_VALUE_ADDRESS (msym));
|
1109 |
|
|
if (func != SYMBOL_VALUE_ADDRESS (msym)
|
1110 |
|
|
&& (!u || u->stub_unwind.stub_type == 0))
|
1111 |
|
|
return SYMBOL_VALUE_ADDRESS (msym);
|
1112 |
|
|
}
|
1113 |
|
|
}
|
1114 |
|
|
|
1115 |
|
|
warning (_("Cannot find suitable address to place dummy breakpoint; nested "
|
1116 |
|
|
"calls may fail."));
|
1117 |
|
|
return addr - 4;
|
1118 |
|
|
}
|
1119 |
|
|
|
1120 |
|
|
static CORE_ADDR
|
1121 |
|
|
hppa_hpux_push_dummy_code (struct gdbarch *gdbarch, CORE_ADDR sp,
|
1122 |
|
|
CORE_ADDR funcaddr,
|
1123 |
|
|
struct value **args, int nargs,
|
1124 |
|
|
struct type *value_type,
|
1125 |
|
|
CORE_ADDR *real_pc, CORE_ADDR *bp_addr,
|
1126 |
|
|
struct regcache *regcache)
|
1127 |
|
|
{
|
1128 |
|
|
CORE_ADDR pc, stubaddr;
|
1129 |
|
|
int argreg = 0;
|
1130 |
|
|
|
1131 |
|
|
pc = regcache_read_pc (regcache);
|
1132 |
|
|
|
1133 |
|
|
/* Note: we don't want to pass a function descriptor here; push_dummy_call
|
1134 |
|
|
fills in the PIC register for us. */
|
1135 |
|
|
funcaddr = gdbarch_convert_from_func_ptr_addr (gdbarch, funcaddr, NULL);
|
1136 |
|
|
|
1137 |
|
|
/* The simple case is where we call a function in the same space that we are
|
1138 |
|
|
currently in; in that case we don't really need to do anything. */
|
1139 |
|
|
if (hppa_hpux_sr_for_addr (gdbarch, pc)
|
1140 |
|
|
== hppa_hpux_sr_for_addr (gdbarch, funcaddr))
|
1141 |
|
|
{
|
1142 |
|
|
/* Intraspace call. */
|
1143 |
|
|
*bp_addr = hppa_hpux_find_dummy_bpaddr (pc);
|
1144 |
|
|
*real_pc = funcaddr;
|
1145 |
|
|
regcache_cooked_write_unsigned (regcache, HPPA_RP_REGNUM, *bp_addr);
|
1146 |
|
|
|
1147 |
|
|
return sp;
|
1148 |
|
|
}
|
1149 |
|
|
|
1150 |
|
|
/* In order to make an interspace call, we need to go through a stub.
|
1151 |
|
|
gcc supplies an appropriate stub called "__gcc_plt_call", however, if
|
1152 |
|
|
an application is compiled with HP compilers then this stub is not
|
1153 |
|
|
available. We used to fallback to "__d_plt_call", however that stub
|
1154 |
|
|
is not entirely useful for us because it doesn't do an interspace
|
1155 |
|
|
return back to the caller. Also, on hppa64-hpux, there is no
|
1156 |
|
|
__gcc_plt_call available. In order to keep the code uniform, we
|
1157 |
|
|
instead don't use either of these stubs, but instead write our own
|
1158 |
|
|
onto the stack.
|
1159 |
|
|
|
1160 |
|
|
A problem arises since the stack is located in a different space than
|
1161 |
|
|
code, so in order to branch to a stack stub, we will need to do an
|
1162 |
|
|
interspace branch. Previous versions of gdb did this by modifying code
|
1163 |
|
|
at the current pc and doing single-stepping to set the pcsq. Since this
|
1164 |
|
|
is highly undesirable, we use a different scheme:
|
1165 |
|
|
|
1166 |
|
|
All we really need to do the branch to the stub is a short instruction
|
1167 |
|
|
sequence like this:
|
1168 |
|
|
|
1169 |
|
|
PA1.1:
|
1170 |
|
|
ldsid (rX),r1
|
1171 |
|
|
mtsp r1,sr0
|
1172 |
|
|
be,n (sr0,rX)
|
1173 |
|
|
|
1174 |
|
|
PA2.0:
|
1175 |
|
|
bve,n (sr0,rX)
|
1176 |
|
|
|
1177 |
|
|
Instead of writing these sequences ourselves, we can find it in
|
1178 |
|
|
the instruction stream that belongs to the current space. While this
|
1179 |
|
|
seems difficult at first, we are actually guaranteed to find the sequences
|
1180 |
|
|
in several places:
|
1181 |
|
|
|
1182 |
|
|
For 32-bit code:
|
1183 |
|
|
- in export stubs for shared libraries
|
1184 |
|
|
- in the "noshlibs" routine in the main module
|
1185 |
|
|
|
1186 |
|
|
For 64-bit code:
|
1187 |
|
|
- at the end of each "regular" function
|
1188 |
|
|
|
1189 |
|
|
We cache the address of these sequences in the objfile's private data
|
1190 |
|
|
since these operations can potentially be quite expensive.
|
1191 |
|
|
|
1192 |
|
|
So, what we do is:
|
1193 |
|
|
- write a stack trampoline
|
1194 |
|
|
- look for a suitable instruction sequence in the current space
|
1195 |
|
|
- point the sequence at the trampoline
|
1196 |
|
|
- set the return address of the trampoline to the current space
|
1197 |
|
|
(see hppa_hpux_find_dummy_call_bpaddr)
|
1198 |
|
|
- set the continuing address of the "dummy code" as the sequence.
|
1199 |
|
|
|
1200 |
|
|
*/
|
1201 |
|
|
|
1202 |
|
|
if (IS_32BIT_TARGET (gdbarch))
|
1203 |
|
|
{
|
1204 |
|
|
static unsigned int hppa32_tramp[] = {
|
1205 |
|
|
0x0fdf1291, /* stw r31,-8(,sp) */
|
1206 |
|
|
0x02c010a1, /* ldsid (,r22),r1 */
|
1207 |
|
|
0x00011820, /* mtsp r1,sr0 */
|
1208 |
|
|
0xe6c00000, /* be,l 0(sr0,r22),%sr0,%r31 */
|
1209 |
|
|
0x081f0242, /* copy r31,rp */
|
1210 |
|
|
0x0fd11082, /* ldw -8(,sp),rp */
|
1211 |
|
|
0x004010a1, /* ldsid (,rp),r1 */
|
1212 |
|
|
0x00011820, /* mtsp r1,sr0 */
|
1213 |
|
|
0xe0400000, /* be 0(sr0,rp) */
|
1214 |
|
|
0x08000240 /* nop */
|
1215 |
|
|
};
|
1216 |
|
|
|
1217 |
|
|
/* for hppa32, we must call the function through a stub so that on
|
1218 |
|
|
return it can return to the space of our trampoline. */
|
1219 |
|
|
stubaddr = hppa_hpux_find_import_stub_for_addr (funcaddr);
|
1220 |
|
|
if (stubaddr == 0)
|
1221 |
|
|
error (_("Cannot call external function not referenced by application "
|
1222 |
|
|
"(no import stub).\n"));
|
1223 |
|
|
regcache_cooked_write_unsigned (regcache, 22, stubaddr);
|
1224 |
|
|
|
1225 |
|
|
write_memory (sp, (char *)&hppa32_tramp, sizeof (hppa32_tramp));
|
1226 |
|
|
|
1227 |
|
|
*bp_addr = hppa_hpux_find_dummy_bpaddr (pc);
|
1228 |
|
|
regcache_cooked_write_unsigned (regcache, 31, *bp_addr);
|
1229 |
|
|
|
1230 |
|
|
*real_pc = hppa32_hpux_search_dummy_call_sequence (gdbarch, pc, &argreg);
|
1231 |
|
|
if (*real_pc == 0)
|
1232 |
|
|
error (_("Cannot make interspace call from here."));
|
1233 |
|
|
|
1234 |
|
|
regcache_cooked_write_unsigned (regcache, argreg, sp);
|
1235 |
|
|
|
1236 |
|
|
sp += sizeof (hppa32_tramp);
|
1237 |
|
|
}
|
1238 |
|
|
else
|
1239 |
|
|
{
|
1240 |
|
|
static unsigned int hppa64_tramp[] = {
|
1241 |
|
|
0xeac0f000, /* bve,l (r22),%r2 */
|
1242 |
|
|
0x0fdf12d1, /* std r31,-8(,sp) */
|
1243 |
|
|
0x0fd110c2, /* ldd -8(,sp),rp */
|
1244 |
|
|
0xe840d002, /* bve,n (rp) */
|
1245 |
|
|
0x08000240 /* nop */
|
1246 |
|
|
};
|
1247 |
|
|
|
1248 |
|
|
/* for hppa64, we don't need to call through a stub; all functions
|
1249 |
|
|
return via a bve. */
|
1250 |
|
|
regcache_cooked_write_unsigned (regcache, 22, funcaddr);
|
1251 |
|
|
write_memory (sp, (char *)&hppa64_tramp, sizeof (hppa64_tramp));
|
1252 |
|
|
|
1253 |
|
|
*bp_addr = pc - 4;
|
1254 |
|
|
regcache_cooked_write_unsigned (regcache, 31, *bp_addr);
|
1255 |
|
|
|
1256 |
|
|
*real_pc = hppa64_hpux_search_dummy_call_sequence (gdbarch, pc, &argreg);
|
1257 |
|
|
if (*real_pc == 0)
|
1258 |
|
|
error (_("Cannot make interspace call from here."));
|
1259 |
|
|
|
1260 |
|
|
regcache_cooked_write_unsigned (regcache, argreg, sp);
|
1261 |
|
|
|
1262 |
|
|
sp += sizeof (hppa64_tramp);
|
1263 |
|
|
}
|
1264 |
|
|
|
1265 |
|
|
sp = gdbarch_frame_align (gdbarch, sp);
|
1266 |
|
|
|
1267 |
|
|
return sp;
|
1268 |
|
|
}
|
1269 |
|
|
|
1270 |
|
|
|
1271 |
|
|
|
1272 |
|
|
static void
|
1273 |
|
|
hppa_hpux_supply_ss_narrow (struct regcache *regcache,
|
1274 |
|
|
int regnum, const char *save_state)
|
1275 |
|
|
{
|
1276 |
|
|
const char *ss_narrow = save_state + HPPA_HPUX_SS_NARROW_OFFSET;
|
1277 |
|
|
int i, offset = 0;
|
1278 |
|
|
|
1279 |
|
|
for (i = HPPA_R1_REGNUM; i < HPPA_FP0_REGNUM; i++)
|
1280 |
|
|
{
|
1281 |
|
|
if (regnum == i || regnum == -1)
|
1282 |
|
|
regcache_raw_supply (regcache, i, ss_narrow + offset);
|
1283 |
|
|
|
1284 |
|
|
offset += 4;
|
1285 |
|
|
}
|
1286 |
|
|
}
|
1287 |
|
|
|
1288 |
|
|
static void
|
1289 |
|
|
hppa_hpux_supply_ss_fpblock (struct regcache *regcache,
|
1290 |
|
|
int regnum, const char *save_state)
|
1291 |
|
|
{
|
1292 |
|
|
const char *ss_fpblock = save_state + HPPA_HPUX_SS_FPBLOCK_OFFSET;
|
1293 |
|
|
int i, offset = 0;
|
1294 |
|
|
|
1295 |
|
|
/* FIXME: We view the floating-point state as 64 single-precision
|
1296 |
|
|
registers for 32-bit code, and 32 double-precision register for
|
1297 |
|
|
64-bit code. This distinction is artificial and should be
|
1298 |
|
|
eliminated. If that ever happens, we should remove the if-clause
|
1299 |
|
|
below. */
|
1300 |
|
|
|
1301 |
|
|
if (register_size (get_regcache_arch (regcache), HPPA_FP0_REGNUM) == 4)
|
1302 |
|
|
{
|
1303 |
|
|
for (i = HPPA_FP0_REGNUM; i < HPPA_FP0_REGNUM + 64; i++)
|
1304 |
|
|
{
|
1305 |
|
|
if (regnum == i || regnum == -1)
|
1306 |
|
|
regcache_raw_supply (regcache, i, ss_fpblock + offset);
|
1307 |
|
|
|
1308 |
|
|
offset += 4;
|
1309 |
|
|
}
|
1310 |
|
|
}
|
1311 |
|
|
else
|
1312 |
|
|
{
|
1313 |
|
|
for (i = HPPA_FP0_REGNUM; i < HPPA_FP0_REGNUM + 32; i++)
|
1314 |
|
|
{
|
1315 |
|
|
if (regnum == i || regnum == -1)
|
1316 |
|
|
regcache_raw_supply (regcache, i, ss_fpblock + offset);
|
1317 |
|
|
|
1318 |
|
|
offset += 8;
|
1319 |
|
|
}
|
1320 |
|
|
}
|
1321 |
|
|
}
|
1322 |
|
|
|
1323 |
|
|
static void
|
1324 |
|
|
hppa_hpux_supply_ss_wide (struct regcache *regcache,
|
1325 |
|
|
int regnum, const char *save_state)
|
1326 |
|
|
{
|
1327 |
|
|
const char *ss_wide = save_state + HPPA_HPUX_SS_WIDE_OFFSET;
|
1328 |
|
|
int i, offset = 8;
|
1329 |
|
|
|
1330 |
|
|
if (register_size (get_regcache_arch (regcache), HPPA_R1_REGNUM) == 4)
|
1331 |
|
|
offset += 4;
|
1332 |
|
|
|
1333 |
|
|
for (i = HPPA_R1_REGNUM; i < HPPA_FP0_REGNUM; i++)
|
1334 |
|
|
{
|
1335 |
|
|
if (regnum == i || regnum == -1)
|
1336 |
|
|
regcache_raw_supply (regcache, i, ss_wide + offset);
|
1337 |
|
|
|
1338 |
|
|
offset += 8;
|
1339 |
|
|
}
|
1340 |
|
|
}
|
1341 |
|
|
|
1342 |
|
|
static void
|
1343 |
|
|
hppa_hpux_supply_save_state (const struct regset *regset,
|
1344 |
|
|
struct regcache *regcache,
|
1345 |
|
|
int regnum, const void *regs, size_t len)
|
1346 |
|
|
{
|
1347 |
|
|
struct gdbarch *gdbarch = get_regcache_arch (regcache);
|
1348 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
1349 |
|
|
const char *proc_info = regs;
|
1350 |
|
|
const char *save_state = proc_info + 8;
|
1351 |
|
|
ULONGEST flags;
|
1352 |
|
|
|
1353 |
|
|
flags = extract_unsigned_integer (save_state + HPPA_HPUX_SS_FLAGS_OFFSET,
|
1354 |
|
|
4, byte_order);
|
1355 |
|
|
if (regnum == -1 || regnum == HPPA_FLAGS_REGNUM)
|
1356 |
|
|
{
|
1357 |
|
|
size_t size = register_size (gdbarch, HPPA_FLAGS_REGNUM);
|
1358 |
|
|
char buf[8];
|
1359 |
|
|
|
1360 |
|
|
store_unsigned_integer (buf, size, byte_order, flags);
|
1361 |
|
|
regcache_raw_supply (regcache, HPPA_FLAGS_REGNUM, buf);
|
1362 |
|
|
}
|
1363 |
|
|
|
1364 |
|
|
/* If the SS_WIDEREGS flag is set, we really do need the full
|
1365 |
|
|
`struct save_state'. */
|
1366 |
|
|
if (flags & HPPA_HPUX_SS_WIDEREGS && len < HPPA_HPUX_SAVE_STATE_SIZE)
|
1367 |
|
|
error (_("Register set contents too small"));
|
1368 |
|
|
|
1369 |
|
|
if (flags & HPPA_HPUX_SS_WIDEREGS)
|
1370 |
|
|
hppa_hpux_supply_ss_wide (regcache, regnum, save_state);
|
1371 |
|
|
else
|
1372 |
|
|
hppa_hpux_supply_ss_narrow (regcache, regnum, save_state);
|
1373 |
|
|
|
1374 |
|
|
hppa_hpux_supply_ss_fpblock (regcache, regnum, save_state);
|
1375 |
|
|
}
|
1376 |
|
|
|
1377 |
|
|
/* HP-UX register set. */
|
1378 |
|
|
|
1379 |
|
|
static struct regset hppa_hpux_regset =
|
1380 |
|
|
{
|
1381 |
|
|
NULL,
|
1382 |
|
|
hppa_hpux_supply_save_state
|
1383 |
|
|
};
|
1384 |
|
|
|
1385 |
|
|
static const struct regset *
|
1386 |
|
|
hppa_hpux_regset_from_core_section (struct gdbarch *gdbarch,
|
1387 |
|
|
const char *sect_name, size_t sect_size)
|
1388 |
|
|
{
|
1389 |
|
|
if (strcmp (sect_name, ".reg") == 0
|
1390 |
|
|
&& sect_size >= HPPA_HPUX_PA89_SAVE_STATE_SIZE + 8)
|
1391 |
|
|
return &hppa_hpux_regset;
|
1392 |
|
|
|
1393 |
|
|
return NULL;
|
1394 |
|
|
}
|
1395 |
|
|
|
1396 |
|
|
|
1397 |
|
|
/* Bit in the `ss_flag' member of `struct save_state' that indicates
|
1398 |
|
|
the state was saved from a system call. From
|
1399 |
|
|
<machine/save_state.h>. */
|
1400 |
|
|
#define HPPA_HPUX_SS_INSYSCALL 0x02
|
1401 |
|
|
|
1402 |
|
|
static CORE_ADDR
|
1403 |
|
|
hppa_hpux_read_pc (struct regcache *regcache)
|
1404 |
|
|
{
|
1405 |
|
|
ULONGEST flags;
|
1406 |
|
|
|
1407 |
|
|
/* If we're currently in a system call return the contents of %r31. */
|
1408 |
|
|
regcache_cooked_read_unsigned (regcache, HPPA_FLAGS_REGNUM, &flags);
|
1409 |
|
|
if (flags & HPPA_HPUX_SS_INSYSCALL)
|
1410 |
|
|
{
|
1411 |
|
|
ULONGEST pc;
|
1412 |
|
|
regcache_cooked_read_unsigned (regcache, HPPA_R31_REGNUM, &pc);
|
1413 |
|
|
return pc & ~0x3;
|
1414 |
|
|
}
|
1415 |
|
|
|
1416 |
|
|
return hppa_read_pc (regcache);
|
1417 |
|
|
}
|
1418 |
|
|
|
1419 |
|
|
static void
|
1420 |
|
|
hppa_hpux_write_pc (struct regcache *regcache, CORE_ADDR pc)
|
1421 |
|
|
{
|
1422 |
|
|
ULONGEST flags;
|
1423 |
|
|
|
1424 |
|
|
/* If we're currently in a system call also write PC into %r31. */
|
1425 |
|
|
regcache_cooked_read_unsigned (regcache, HPPA_FLAGS_REGNUM, &flags);
|
1426 |
|
|
if (flags & HPPA_HPUX_SS_INSYSCALL)
|
1427 |
|
|
regcache_cooked_write_unsigned (regcache, HPPA_R31_REGNUM, pc | 0x3);
|
1428 |
|
|
|
1429 |
|
|
hppa_write_pc (regcache, pc);
|
1430 |
|
|
}
|
1431 |
|
|
|
1432 |
|
|
static CORE_ADDR
|
1433 |
|
|
hppa_hpux_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame)
|
1434 |
|
|
{
|
1435 |
|
|
ULONGEST flags;
|
1436 |
|
|
|
1437 |
|
|
/* If we're currently in a system call return the contents of %r31. */
|
1438 |
|
|
flags = frame_unwind_register_unsigned (next_frame, HPPA_FLAGS_REGNUM);
|
1439 |
|
|
if (flags & HPPA_HPUX_SS_INSYSCALL)
|
1440 |
|
|
return frame_unwind_register_unsigned (next_frame, HPPA_R31_REGNUM) & ~0x3;
|
1441 |
|
|
|
1442 |
|
|
return hppa_unwind_pc (gdbarch, next_frame);
|
1443 |
|
|
}
|
1444 |
|
|
|
1445 |
|
|
|
1446 |
|
|
/* Given the current value of the pc, check to see if it is inside a stub, and
|
1447 |
|
|
if so, change the value of the pc to point to the caller of the stub.
|
1448 |
|
|
THIS_FRAME is the current frame in the current list of frames.
|
1449 |
|
|
BASE contains to stack frame base of the current frame.
|
1450 |
|
|
SAVE_REGS is the register file stored in the frame cache. */
|
1451 |
|
|
static void
|
1452 |
|
|
hppa_hpux_unwind_adjust_stub (struct frame_info *this_frame, CORE_ADDR base,
|
1453 |
|
|
struct trad_frame_saved_reg *saved_regs)
|
1454 |
|
|
{
|
1455 |
|
|
struct gdbarch *gdbarch = get_frame_arch (this_frame);
|
1456 |
|
|
enum bfd_endian byte_order = gdbarch_byte_order (gdbarch);
|
1457 |
|
|
int word_size = gdbarch_ptr_bit (gdbarch) / 8;
|
1458 |
|
|
struct value *pcoq_head_val;
|
1459 |
|
|
ULONGEST pcoq_head;
|
1460 |
|
|
CORE_ADDR stubpc;
|
1461 |
|
|
struct unwind_table_entry *u;
|
1462 |
|
|
|
1463 |
|
|
pcoq_head_val = trad_frame_get_prev_register (this_frame, saved_regs,
|
1464 |
|
|
HPPA_PCOQ_HEAD_REGNUM);
|
1465 |
|
|
pcoq_head =
|
1466 |
|
|
extract_unsigned_integer (value_contents_all (pcoq_head_val),
|
1467 |
|
|
register_size (gdbarch, HPPA_PCOQ_HEAD_REGNUM),
|
1468 |
|
|
byte_order);
|
1469 |
|
|
|
1470 |
|
|
u = find_unwind_entry (pcoq_head);
|
1471 |
|
|
if (u && u->stub_unwind.stub_type == EXPORT)
|
1472 |
|
|
{
|
1473 |
|
|
stubpc = read_memory_integer (base - 24, word_size, byte_order);
|
1474 |
|
|
trad_frame_set_value (saved_regs, HPPA_PCOQ_HEAD_REGNUM, stubpc);
|
1475 |
|
|
}
|
1476 |
|
|
else if (hppa_symbol_address ("__gcc_plt_call")
|
1477 |
|
|
== get_pc_function_start (pcoq_head))
|
1478 |
|
|
{
|
1479 |
|
|
stubpc = read_memory_integer (base - 8, word_size, byte_order);
|
1480 |
|
|
trad_frame_set_value (saved_regs, HPPA_PCOQ_HEAD_REGNUM, stubpc);
|
1481 |
|
|
}
|
1482 |
|
|
}
|
1483 |
|
|
|
1484 |
|
|
static void
|
1485 |
|
|
hppa_hpux_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
|
1486 |
|
|
{
|
1487 |
|
|
struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
|
1488 |
|
|
|
1489 |
|
|
if (IS_32BIT_TARGET (gdbarch))
|
1490 |
|
|
tdep->in_solib_call_trampoline = hppa32_hpux_in_solib_call_trampoline;
|
1491 |
|
|
else
|
1492 |
|
|
tdep->in_solib_call_trampoline = hppa64_hpux_in_solib_call_trampoline;
|
1493 |
|
|
|
1494 |
|
|
tdep->unwind_adjust_stub = hppa_hpux_unwind_adjust_stub;
|
1495 |
|
|
|
1496 |
|
|
set_gdbarch_in_solib_return_trampoline
|
1497 |
|
|
(gdbarch, hppa_hpux_in_solib_return_trampoline);
|
1498 |
|
|
set_gdbarch_skip_trampoline_code (gdbarch, hppa_hpux_skip_trampoline_code);
|
1499 |
|
|
|
1500 |
|
|
set_gdbarch_push_dummy_code (gdbarch, hppa_hpux_push_dummy_code);
|
1501 |
|
|
set_gdbarch_call_dummy_location (gdbarch, ON_STACK);
|
1502 |
|
|
|
1503 |
|
|
set_gdbarch_read_pc (gdbarch, hppa_hpux_read_pc);
|
1504 |
|
|
set_gdbarch_write_pc (gdbarch, hppa_hpux_write_pc);
|
1505 |
|
|
set_gdbarch_unwind_pc (gdbarch, hppa_hpux_unwind_pc);
|
1506 |
|
|
set_gdbarch_skip_permanent_breakpoint
|
1507 |
|
|
(gdbarch, hppa_skip_permanent_breakpoint);
|
1508 |
|
|
|
1509 |
|
|
set_gdbarch_regset_from_core_section
|
1510 |
|
|
(gdbarch, hppa_hpux_regset_from_core_section);
|
1511 |
|
|
|
1512 |
|
|
frame_unwind_append_unwinder (gdbarch, &hppa_hpux_sigtramp_frame_unwind);
|
1513 |
|
|
}
|
1514 |
|
|
|
1515 |
|
|
static void
|
1516 |
|
|
hppa_hpux_som_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
|
1517 |
|
|
{
|
1518 |
|
|
struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
|
1519 |
|
|
|
1520 |
|
|
tdep->is_elf = 0;
|
1521 |
|
|
|
1522 |
|
|
tdep->find_global_pointer = hppa32_hpux_find_global_pointer;
|
1523 |
|
|
|
1524 |
|
|
hppa_hpux_init_abi (info, gdbarch);
|
1525 |
|
|
som_solib_select (gdbarch);
|
1526 |
|
|
}
|
1527 |
|
|
|
1528 |
|
|
static void
|
1529 |
|
|
hppa_hpux_elf_init_abi (struct gdbarch_info info, struct gdbarch *gdbarch)
|
1530 |
|
|
{
|
1531 |
|
|
struct gdbarch_tdep *tdep = gdbarch_tdep (gdbarch);
|
1532 |
|
|
|
1533 |
|
|
tdep->is_elf = 1;
|
1534 |
|
|
tdep->find_global_pointer = hppa64_hpux_find_global_pointer;
|
1535 |
|
|
|
1536 |
|
|
hppa_hpux_init_abi (info, gdbarch);
|
1537 |
|
|
pa64_solib_select (gdbarch);
|
1538 |
|
|
}
|
1539 |
|
|
|
1540 |
|
|
static enum gdb_osabi
|
1541 |
|
|
hppa_hpux_core_osabi_sniffer (bfd *abfd)
|
1542 |
|
|
{
|
1543 |
|
|
if (strcmp (bfd_get_target (abfd), "hpux-core") == 0)
|
1544 |
|
|
return GDB_OSABI_HPUX_SOM;
|
1545 |
|
|
else if (strcmp (bfd_get_target (abfd), "elf64-hppa") == 0)
|
1546 |
|
|
{
|
1547 |
|
|
asection *section;
|
1548 |
|
|
|
1549 |
|
|
section = bfd_get_section_by_name (abfd, ".kernel");
|
1550 |
|
|
if (section)
|
1551 |
|
|
{
|
1552 |
|
|
bfd_size_type size;
|
1553 |
|
|
char *contents;
|
1554 |
|
|
|
1555 |
|
|
size = bfd_section_size (abfd, section);
|
1556 |
|
|
contents = alloca (size);
|
1557 |
|
|
if (bfd_get_section_contents (abfd, section, contents,
|
1558 |
|
|
(file_ptr) 0, size)
|
1559 |
|
|
&& strcmp (contents, "HP-UX") == 0)
|
1560 |
|
|
return GDB_OSABI_HPUX_ELF;
|
1561 |
|
|
}
|
1562 |
|
|
}
|
1563 |
|
|
|
1564 |
|
|
return GDB_OSABI_UNKNOWN;
|
1565 |
|
|
}
|
1566 |
|
|
|
1567 |
|
|
void
|
1568 |
|
|
_initialize_hppa_hpux_tdep (void)
|
1569 |
|
|
{
|
1570 |
|
|
/* BFD doesn't set a flavour for HP-UX style core files. It doesn't
|
1571 |
|
|
set the architecture either. */
|
1572 |
|
|
gdbarch_register_osabi_sniffer (bfd_arch_unknown,
|
1573 |
|
|
bfd_target_unknown_flavour,
|
1574 |
|
|
hppa_hpux_core_osabi_sniffer);
|
1575 |
|
|
gdbarch_register_osabi_sniffer (bfd_arch_hppa,
|
1576 |
|
|
bfd_target_elf_flavour,
|
1577 |
|
|
hppa_hpux_core_osabi_sniffer);
|
1578 |
|
|
|
1579 |
|
|
gdbarch_register_osabi (bfd_arch_hppa, 0, GDB_OSABI_HPUX_SOM,
|
1580 |
|
|
hppa_hpux_som_init_abi);
|
1581 |
|
|
gdbarch_register_osabi (bfd_arch_hppa, bfd_mach_hppa20w, GDB_OSABI_HPUX_ELF,
|
1582 |
|
|
hppa_hpux_elf_init_abi);
|
1583 |
|
|
}
|