OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-stable/] [gdb-7.2/] [gdb/] [infrun.c] - Blame information for rev 865

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 330 jeremybenn
/* Target-struct-independent code to start (run) and stop an inferior
2
   process.
3
 
4
   Copyright (C) 1986, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995,
5
   1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007,
6
   2008, 2009, 2010 Free Software Foundation, Inc.
7
 
8
   This file is part of GDB.
9
 
10
   This program is free software; you can redistribute it and/or modify
11
   it under the terms of the GNU General Public License as published by
12
   the Free Software Foundation; either version 3 of the License, or
13
   (at your option) any later version.
14
 
15
   This program is distributed in the hope that it will be useful,
16
   but WITHOUT ANY WARRANTY; without even the implied warranty of
17
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18
   GNU General Public License for more details.
19
 
20
   You should have received a copy of the GNU General Public License
21
   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
22
 
23
#include "defs.h"
24
#include "gdb_string.h"
25
#include <ctype.h>
26
#include "symtab.h"
27
#include "frame.h"
28
#include "inferior.h"
29
#include "exceptions.h"
30
#include "breakpoint.h"
31
#include "gdb_wait.h"
32
#include "gdbcore.h"
33
#include "gdbcmd.h"
34
#include "cli/cli-script.h"
35
#include "target.h"
36
#include "gdbthread.h"
37
#include "annotate.h"
38
#include "symfile.h"
39
#include "top.h"
40
#include <signal.h>
41
#include "inf-loop.h"
42
#include "regcache.h"
43
#include "value.h"
44
#include "observer.h"
45
#include "language.h"
46
#include "solib.h"
47
#include "main.h"
48
#include "gdb_assert.h"
49
#include "mi/mi-common.h"
50
#include "event-top.h"
51
#include "record.h"
52
#include "inline-frame.h"
53
#include "jit.h"
54
#include "tracepoint.h"
55
 
56
/* Prototypes for local functions */
57
 
58
static void signals_info (char *, int);
59
 
60
static void handle_command (char *, int);
61
 
62
static void sig_print_info (enum target_signal);
63
 
64
static void sig_print_header (void);
65
 
66
static void resume_cleanups (void *);
67
 
68
static int hook_stop_stub (void *);
69
 
70
static int restore_selected_frame (void *);
71
 
72
static int follow_fork (void);
73
 
74
static void set_schedlock_func (char *args, int from_tty,
75
                                struct cmd_list_element *c);
76
 
77
static int currently_stepping (struct thread_info *tp);
78
 
79
static int currently_stepping_or_nexting_callback (struct thread_info *tp,
80
                                                   void *data);
81
 
82
static void xdb_handle_command (char *args, int from_tty);
83
 
84
static int prepare_to_proceed (int);
85
 
86
void _initialize_infrun (void);
87
 
88
void nullify_last_target_wait_ptid (void);
89
 
90
/* When set, stop the 'step' command if we enter a function which has
91
   no line number information.  The normal behavior is that we step
92
   over such function.  */
93
int step_stop_if_no_debug = 0;
94
static void
95
show_step_stop_if_no_debug (struct ui_file *file, int from_tty,
96
                            struct cmd_list_element *c, const char *value)
97
{
98
  fprintf_filtered (file, _("Mode of the step operation is %s.\n"), value);
99
}
100
 
101
/* In asynchronous mode, but simulating synchronous execution. */
102
 
103
int sync_execution = 0;
104
 
105
/* wait_for_inferior and normal_stop use this to notify the user
106
   when the inferior stopped in a different thread than it had been
107
   running in.  */
108
 
109
static ptid_t previous_inferior_ptid;
110
 
111
/* Default behavior is to detach newly forked processes (legacy).  */
112
int detach_fork = 1;
113
 
114
int debug_displaced = 0;
115
static void
116
show_debug_displaced (struct ui_file *file, int from_tty,
117
                      struct cmd_list_element *c, const char *value)
118
{
119
  fprintf_filtered (file, _("Displace stepping debugging is %s.\n"), value);
120
}
121
 
122
int debug_infrun = 0;
123
static void
124
show_debug_infrun (struct ui_file *file, int from_tty,
125
                   struct cmd_list_element *c, const char *value)
126
{
127
  fprintf_filtered (file, _("Inferior debugging is %s.\n"), value);
128
}
129
 
130
/* If the program uses ELF-style shared libraries, then calls to
131
   functions in shared libraries go through stubs, which live in a
132
   table called the PLT (Procedure Linkage Table).  The first time the
133
   function is called, the stub sends control to the dynamic linker,
134
   which looks up the function's real address, patches the stub so
135
   that future calls will go directly to the function, and then passes
136
   control to the function.
137
 
138
   If we are stepping at the source level, we don't want to see any of
139
   this --- we just want to skip over the stub and the dynamic linker.
140
   The simple approach is to single-step until control leaves the
141
   dynamic linker.
142
 
143
   However, on some systems (e.g., Red Hat's 5.2 distribution) the
144
   dynamic linker calls functions in the shared C library, so you
145
   can't tell from the PC alone whether the dynamic linker is still
146
   running.  In this case, we use a step-resume breakpoint to get us
147
   past the dynamic linker, as if we were using "next" to step over a
148
   function call.
149
 
150
   in_solib_dynsym_resolve_code() says whether we're in the dynamic
151
   linker code or not.  Normally, this means we single-step.  However,
152
   if SKIP_SOLIB_RESOLVER then returns non-zero, then its value is an
153
   address where we can place a step-resume breakpoint to get past the
154
   linker's symbol resolution function.
155
 
156
   in_solib_dynsym_resolve_code() can generally be implemented in a
157
   pretty portable way, by comparing the PC against the address ranges
158
   of the dynamic linker's sections.
159
 
160
   SKIP_SOLIB_RESOLVER is generally going to be system-specific, since
161
   it depends on internal details of the dynamic linker.  It's usually
162
   not too hard to figure out where to put a breakpoint, but it
163
   certainly isn't portable.  SKIP_SOLIB_RESOLVER should do plenty of
164
   sanity checking.  If it can't figure things out, returning zero and
165
   getting the (possibly confusing) stepping behavior is better than
166
   signalling an error, which will obscure the change in the
167
   inferior's state.  */
168
 
169
/* This function returns TRUE if pc is the address of an instruction
170
   that lies within the dynamic linker (such as the event hook, or the
171
   dld itself).
172
 
173
   This function must be used only when a dynamic linker event has
174
   been caught, and the inferior is being stepped out of the hook, or
175
   undefined results are guaranteed.  */
176
 
177
#ifndef SOLIB_IN_DYNAMIC_LINKER
178
#define SOLIB_IN_DYNAMIC_LINKER(pid,pc) 0
179
#endif
180
 
181
/* "Observer mode" is somewhat like a more extreme version of
182
   non-stop, in which all GDB operations that might affect the
183
   target's execution have been disabled.  */
184
 
185
static int non_stop_1 = 0;
186
 
187
int observer_mode = 0;
188
static int observer_mode_1 = 0;
189
 
190
static void
191
set_observer_mode (char *args, int from_tty,
192
                   struct cmd_list_element *c)
193
{
194
  extern int pagination_enabled;
195
 
196
  if (target_has_execution)
197
    {
198
      observer_mode_1 = observer_mode;
199
      error (_("Cannot change this setting while the inferior is running."));
200
    }
201
 
202
  observer_mode = observer_mode_1;
203
 
204
  may_write_registers = !observer_mode;
205
  may_write_memory = !observer_mode;
206
  may_insert_breakpoints = !observer_mode;
207
  may_insert_tracepoints = !observer_mode;
208
  /* We can insert fast tracepoints in or out of observer mode,
209
     but enable them if we're going into this mode.  */
210
  if (observer_mode)
211
    may_insert_fast_tracepoints = 1;
212
  may_stop = !observer_mode;
213
  update_target_permissions ();
214
 
215
  /* Going *into* observer mode we must force non-stop, then
216
     going out we leave it that way.  */
217
  if (observer_mode)
218
    {
219
      target_async_permitted = 1;
220
      pagination_enabled = 0;
221
      non_stop = non_stop_1 = 1;
222
    }
223
 
224
  if (from_tty)
225
    printf_filtered (_("Observer mode is now %s.\n"),
226
                     (observer_mode ? "on" : "off"));
227
}
228
 
229
static void
230
show_observer_mode (struct ui_file *file, int from_tty,
231
                    struct cmd_list_element *c, const char *value)
232
{
233
  fprintf_filtered (file, _("Observer mode is %s.\n"), value);
234
}
235
 
236
/* This updates the value of observer mode based on changes in
237
   permissions.  Note that we are deliberately ignoring the values of
238
   may-write-registers and may-write-memory, since the user may have
239
   reason to enable these during a session, for instance to turn on a
240
   debugging-related global.  */
241
 
242
void
243
update_observer_mode (void)
244
{
245
  int newval;
246
 
247
  newval = (!may_insert_breakpoints
248
            && !may_insert_tracepoints
249
            && may_insert_fast_tracepoints
250
            && !may_stop
251
            && non_stop);
252
 
253
  /* Let the user know if things change.  */
254
  if (newval != observer_mode)
255
    printf_filtered (_("Observer mode is now %s.\n"),
256
                     (newval ? "on" : "off"));
257
 
258
  observer_mode = observer_mode_1 = newval;
259
}
260
 
261
/* Tables of how to react to signals; the user sets them.  */
262
 
263
static unsigned char *signal_stop;
264
static unsigned char *signal_print;
265
static unsigned char *signal_program;
266
 
267
#define SET_SIGS(nsigs,sigs,flags) \
268
  do { \
269
    int signum = (nsigs); \
270
    while (signum-- > 0) \
271
      if ((sigs)[signum]) \
272
        (flags)[signum] = 1; \
273
  } while (0)
274
 
275
#define UNSET_SIGS(nsigs,sigs,flags) \
276
  do { \
277
    int signum = (nsigs); \
278
    while (signum-- > 0) \
279
      if ((sigs)[signum]) \
280
        (flags)[signum] = 0; \
281
  } while (0)
282
 
283
/* Value to pass to target_resume() to cause all threads to resume */
284
 
285
#define RESUME_ALL minus_one_ptid
286
 
287
/* Command list pointer for the "stop" placeholder.  */
288
 
289
static struct cmd_list_element *stop_command;
290
 
291
/* Function inferior was in as of last step command.  */
292
 
293
static struct symbol *step_start_function;
294
 
295
/* Nonzero if we want to give control to the user when we're notified
296
   of shared library events by the dynamic linker.  */
297
int stop_on_solib_events;
298
static void
299
show_stop_on_solib_events (struct ui_file *file, int from_tty,
300
                           struct cmd_list_element *c, const char *value)
301
{
302
  fprintf_filtered (file, _("Stopping for shared library events is %s.\n"),
303
                    value);
304
}
305
 
306
/* Nonzero means expecting a trace trap
307
   and should stop the inferior and return silently when it happens.  */
308
 
309
int stop_after_trap;
310
 
311
/* Save register contents here when executing a "finish" command or are
312
   about to pop a stack dummy frame, if-and-only-if proceed_to_finish is set.
313
   Thus this contains the return value from the called function (assuming
314
   values are returned in a register).  */
315
 
316
struct regcache *stop_registers;
317
 
318
/* Nonzero after stop if current stack frame should be printed.  */
319
 
320
static int stop_print_frame;
321
 
322
/* This is a cached copy of the pid/waitstatus of the last event
323
   returned by target_wait()/deprecated_target_wait_hook().  This
324
   information is returned by get_last_target_status().  */
325
static ptid_t target_last_wait_ptid;
326
static struct target_waitstatus target_last_waitstatus;
327
 
328
static void context_switch (ptid_t ptid);
329
 
330
void init_thread_stepping_state (struct thread_info *tss);
331
 
332
void init_infwait_state (void);
333
 
334
static const char follow_fork_mode_child[] = "child";
335
static const char follow_fork_mode_parent[] = "parent";
336
 
337
static const char *follow_fork_mode_kind_names[] = {
338
  follow_fork_mode_child,
339
  follow_fork_mode_parent,
340
  NULL
341
};
342
 
343
static const char *follow_fork_mode_string = follow_fork_mode_parent;
344
static void
345
show_follow_fork_mode_string (struct ui_file *file, int from_tty,
346
                              struct cmd_list_element *c, const char *value)
347
{
348
  fprintf_filtered (file, _("\
349
Debugger response to a program call of fork or vfork is \"%s\".\n"),
350
                    value);
351
}
352
 
353
 
354
/* Tell the target to follow the fork we're stopped at.  Returns true
355
   if the inferior should be resumed; false, if the target for some
356
   reason decided it's best not to resume.  */
357
 
358
static int
359
follow_fork (void)
360
{
361
  int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
362
  int should_resume = 1;
363
  struct thread_info *tp;
364
 
365
  /* Copy user stepping state to the new inferior thread.  FIXME: the
366
     followed fork child thread should have a copy of most of the
367
     parent thread structure's run control related fields, not just these.
368
     Initialized to avoid "may be used uninitialized" warnings from gcc.  */
369
  struct breakpoint *step_resume_breakpoint = NULL;
370
  CORE_ADDR step_range_start = 0;
371
  CORE_ADDR step_range_end = 0;
372
  struct frame_id step_frame_id = { 0 };
373
 
374
  if (!non_stop)
375
    {
376
      ptid_t wait_ptid;
377
      struct target_waitstatus wait_status;
378
 
379
      /* Get the last target status returned by target_wait().  */
380
      get_last_target_status (&wait_ptid, &wait_status);
381
 
382
      /* If not stopped at a fork event, then there's nothing else to
383
         do.  */
384
      if (wait_status.kind != TARGET_WAITKIND_FORKED
385
          && wait_status.kind != TARGET_WAITKIND_VFORKED)
386
        return 1;
387
 
388
      /* Check if we switched over from WAIT_PTID, since the event was
389
         reported.  */
390
      if (!ptid_equal (wait_ptid, minus_one_ptid)
391
          && !ptid_equal (inferior_ptid, wait_ptid))
392
        {
393
          /* We did.  Switch back to WAIT_PTID thread, to tell the
394
             target to follow it (in either direction).  We'll
395
             afterwards refuse to resume, and inform the user what
396
             happened.  */
397
          switch_to_thread (wait_ptid);
398
          should_resume = 0;
399
        }
400
    }
401
 
402
  tp = inferior_thread ();
403
 
404
  /* If there were any forks/vforks that were caught and are now to be
405
     followed, then do so now.  */
406
  switch (tp->pending_follow.kind)
407
    {
408
    case TARGET_WAITKIND_FORKED:
409
    case TARGET_WAITKIND_VFORKED:
410
      {
411
        ptid_t parent, child;
412
 
413
        /* If the user did a next/step, etc, over a fork call,
414
           preserve the stepping state in the fork child.  */
415
        if (follow_child && should_resume)
416
          {
417
            step_resume_breakpoint
418
              = clone_momentary_breakpoint (tp->step_resume_breakpoint);
419
            step_range_start = tp->step_range_start;
420
            step_range_end = tp->step_range_end;
421
            step_frame_id = tp->step_frame_id;
422
 
423
            /* For now, delete the parent's sr breakpoint, otherwise,
424
               parent/child sr breakpoints are considered duplicates,
425
               and the child version will not be installed.  Remove
426
               this when the breakpoints module becomes aware of
427
               inferiors and address spaces.  */
428
            delete_step_resume_breakpoint (tp);
429
            tp->step_range_start = 0;
430
            tp->step_range_end = 0;
431
            tp->step_frame_id = null_frame_id;
432
          }
433
 
434
        parent = inferior_ptid;
435
        child = tp->pending_follow.value.related_pid;
436
 
437
        /* Tell the target to do whatever is necessary to follow
438
           either parent or child.  */
439
        if (target_follow_fork (follow_child))
440
          {
441
            /* Target refused to follow, or there's some other reason
442
               we shouldn't resume.  */
443
            should_resume = 0;
444
          }
445
        else
446
          {
447
            /* This pending follow fork event is now handled, one way
448
               or another.  The previous selected thread may be gone
449
               from the lists by now, but if it is still around, need
450
               to clear the pending follow request.  */
451
            tp = find_thread_ptid (parent);
452
            if (tp)
453
              tp->pending_follow.kind = TARGET_WAITKIND_SPURIOUS;
454
 
455
            /* This makes sure we don't try to apply the "Switched
456
               over from WAIT_PID" logic above.  */
457
            nullify_last_target_wait_ptid ();
458
 
459
            /* If we followed the child, switch to it... */
460
            if (follow_child)
461
              {
462
                switch_to_thread (child);
463
 
464
                /* ... and preserve the stepping state, in case the
465
                   user was stepping over the fork call.  */
466
                if (should_resume)
467
                  {
468
                    tp = inferior_thread ();
469
                    tp->step_resume_breakpoint = step_resume_breakpoint;
470
                    tp->step_range_start = step_range_start;
471
                    tp->step_range_end = step_range_end;
472
                    tp->step_frame_id = step_frame_id;
473
                  }
474
                else
475
                  {
476
                    /* If we get here, it was because we're trying to
477
                       resume from a fork catchpoint, but, the user
478
                       has switched threads away from the thread that
479
                       forked.  In that case, the resume command
480
                       issued is most likely not applicable to the
481
                       child, so just warn, and refuse to resume.  */
482
                    warning (_("\
483
Not resuming: switched threads before following fork child.\n"));
484
                  }
485
 
486
                /* Reset breakpoints in the child as appropriate.  */
487
                follow_inferior_reset_breakpoints ();
488
              }
489
            else
490
              switch_to_thread (parent);
491
          }
492
      }
493
      break;
494
    case TARGET_WAITKIND_SPURIOUS:
495
      /* Nothing to follow.  */
496
      break;
497
    default:
498
      internal_error (__FILE__, __LINE__,
499
                      "Unexpected pending_follow.kind %d\n",
500
                      tp->pending_follow.kind);
501
      break;
502
    }
503
 
504
  return should_resume;
505
}
506
 
507
void
508
follow_inferior_reset_breakpoints (void)
509
{
510
  struct thread_info *tp = inferior_thread ();
511
 
512
  /* Was there a step_resume breakpoint?  (There was if the user
513
     did a "next" at the fork() call.)  If so, explicitly reset its
514
     thread number.
515
 
516
     step_resumes are a form of bp that are made to be per-thread.
517
     Since we created the step_resume bp when the parent process
518
     was being debugged, and now are switching to the child process,
519
     from the breakpoint package's viewpoint, that's a switch of
520
     "threads".  We must update the bp's notion of which thread
521
     it is for, or it'll be ignored when it triggers.  */
522
 
523
  if (tp->step_resume_breakpoint)
524
    breakpoint_re_set_thread (tp->step_resume_breakpoint);
525
 
526
  /* Reinsert all breakpoints in the child.  The user may have set
527
     breakpoints after catching the fork, in which case those
528
     were never set in the child, but only in the parent.  This makes
529
     sure the inserted breakpoints match the breakpoint list.  */
530
 
531
  breakpoint_re_set ();
532
  insert_breakpoints ();
533
}
534
 
535
/* The child has exited or execed: resume threads of the parent the
536
   user wanted to be executing.  */
537
 
538
static int
539
proceed_after_vfork_done (struct thread_info *thread,
540
                          void *arg)
541
{
542
  int pid = * (int *) arg;
543
 
544
  if (ptid_get_pid (thread->ptid) == pid
545
      && is_running (thread->ptid)
546
      && !is_executing (thread->ptid)
547
      && !thread->stop_requested
548
      && thread->stop_signal == TARGET_SIGNAL_0)
549
    {
550
      if (debug_infrun)
551
        fprintf_unfiltered (gdb_stdlog,
552
                            "infrun: resuming vfork parent thread %s\n",
553
                            target_pid_to_str (thread->ptid));
554
 
555
      switch_to_thread (thread->ptid);
556
      clear_proceed_status ();
557
      proceed ((CORE_ADDR) -1, TARGET_SIGNAL_DEFAULT, 0);
558
    }
559
 
560
  return 0;
561
}
562
 
563
/* Called whenever we notice an exec or exit event, to handle
564
   detaching or resuming a vfork parent.  */
565
 
566
static void
567
handle_vfork_child_exec_or_exit (int exec)
568
{
569
  struct inferior *inf = current_inferior ();
570
 
571
  if (inf->vfork_parent)
572
    {
573
      int resume_parent = -1;
574
 
575
      /* This exec or exit marks the end of the shared memory region
576
         between the parent and the child.  If the user wanted to
577
         detach from the parent, now is the time.  */
578
 
579
      if (inf->vfork_parent->pending_detach)
580
        {
581
          struct thread_info *tp;
582
          struct cleanup *old_chain;
583
          struct program_space *pspace;
584
          struct address_space *aspace;
585
 
586
          /* follow-fork child, detach-on-fork on */
587
 
588
          old_chain = make_cleanup_restore_current_thread ();
589
 
590
          /* We're letting loose of the parent.  */
591
          tp = any_live_thread_of_process (inf->vfork_parent->pid);
592
          switch_to_thread (tp->ptid);
593
 
594
          /* We're about to detach from the parent, which implicitly
595
             removes breakpoints from its address space.  There's a
596
             catch here: we want to reuse the spaces for the child,
597
             but, parent/child are still sharing the pspace at this
598
             point, although the exec in reality makes the kernel give
599
             the child a fresh set of new pages.  The problem here is
600
             that the breakpoints module being unaware of this, would
601
             likely chose the child process to write to the parent
602
             address space.  Swapping the child temporarily away from
603
             the spaces has the desired effect.  Yes, this is "sort
604
             of" a hack.  */
605
 
606
          pspace = inf->pspace;
607
          aspace = inf->aspace;
608
          inf->aspace = NULL;
609
          inf->pspace = NULL;
610
 
611
          if (debug_infrun || info_verbose)
612
            {
613
              target_terminal_ours ();
614
 
615
              if (exec)
616
                fprintf_filtered (gdb_stdlog,
617
                                  "Detaching vfork parent process %d after child exec.\n",
618
                                  inf->vfork_parent->pid);
619
              else
620
                fprintf_filtered (gdb_stdlog,
621
                                  "Detaching vfork parent process %d after child exit.\n",
622
                                  inf->vfork_parent->pid);
623
            }
624
 
625
          target_detach (NULL, 0);
626
 
627
          /* Put it back.  */
628
          inf->pspace = pspace;
629
          inf->aspace = aspace;
630
 
631
          do_cleanups (old_chain);
632
        }
633
      else if (exec)
634
        {
635
          /* We're staying attached to the parent, so, really give the
636
             child a new address space.  */
637
          inf->pspace = add_program_space (maybe_new_address_space ());
638
          inf->aspace = inf->pspace->aspace;
639
          inf->removable = 1;
640
          set_current_program_space (inf->pspace);
641
 
642
          resume_parent = inf->vfork_parent->pid;
643
 
644
          /* Break the bonds.  */
645
          inf->vfork_parent->vfork_child = NULL;
646
        }
647
      else
648
        {
649
          struct cleanup *old_chain;
650
          struct program_space *pspace;
651
 
652
          /* If this is a vfork child exiting, then the pspace and
653
             aspaces were shared with the parent.  Since we're
654
             reporting the process exit, we'll be mourning all that is
655
             found in the address space, and switching to null_ptid,
656
             preparing to start a new inferior.  But, since we don't
657
             want to clobber the parent's address/program spaces, we
658
             go ahead and create a new one for this exiting
659
             inferior.  */
660
 
661
          /* Switch to null_ptid, so that clone_program_space doesn't want
662
             to read the selected frame of a dead process.  */
663
          old_chain = save_inferior_ptid ();
664
          inferior_ptid = null_ptid;
665
 
666
          /* This inferior is dead, so avoid giving the breakpoints
667
             module the option to write through to it (cloning a
668
             program space resets breakpoints).  */
669
          inf->aspace = NULL;
670
          inf->pspace = NULL;
671
          pspace = add_program_space (maybe_new_address_space ());
672
          set_current_program_space (pspace);
673
          inf->removable = 1;
674
          clone_program_space (pspace, inf->vfork_parent->pspace);
675
          inf->pspace = pspace;
676
          inf->aspace = pspace->aspace;
677
 
678
          /* Put back inferior_ptid.  We'll continue mourning this
679
             inferior. */
680
          do_cleanups (old_chain);
681
 
682
          resume_parent = inf->vfork_parent->pid;
683
          /* Break the bonds.  */
684
          inf->vfork_parent->vfork_child = NULL;
685
        }
686
 
687
      inf->vfork_parent = NULL;
688
 
689
      gdb_assert (current_program_space == inf->pspace);
690
 
691
      if (non_stop && resume_parent != -1)
692
        {
693
          /* If the user wanted the parent to be running, let it go
694
             free now.  */
695
          struct cleanup *old_chain = make_cleanup_restore_current_thread ();
696
 
697
          if (debug_infrun)
698
            fprintf_unfiltered (gdb_stdlog, "infrun: resuming vfork parent process %d\n",
699
                                resume_parent);
700
 
701
          iterate_over_threads (proceed_after_vfork_done, &resume_parent);
702
 
703
          do_cleanups (old_chain);
704
        }
705
    }
706
}
707
 
708
/* Enum strings for "set|show displaced-stepping".  */
709
 
710
static const char follow_exec_mode_new[] = "new";
711
static const char follow_exec_mode_same[] = "same";
712
static const char *follow_exec_mode_names[] =
713
{
714
  follow_exec_mode_new,
715
  follow_exec_mode_same,
716
  NULL,
717
};
718
 
719
static const char *follow_exec_mode_string = follow_exec_mode_same;
720
static void
721
show_follow_exec_mode_string (struct ui_file *file, int from_tty,
722
                              struct cmd_list_element *c, const char *value)
723
{
724
  fprintf_filtered (file, _("Follow exec mode is \"%s\".\n"),  value);
725
}
726
 
727
/* EXECD_PATHNAME is assumed to be non-NULL. */
728
 
729
static void
730
follow_exec (ptid_t pid, char *execd_pathname)
731
{
732
  struct thread_info *th = inferior_thread ();
733
  struct inferior *inf = current_inferior ();
734
 
735
  /* This is an exec event that we actually wish to pay attention to.
736
     Refresh our symbol table to the newly exec'd program, remove any
737
     momentary bp's, etc.
738
 
739
     If there are breakpoints, they aren't really inserted now,
740
     since the exec() transformed our inferior into a fresh set
741
     of instructions.
742
 
743
     We want to preserve symbolic breakpoints on the list, since
744
     we have hopes that they can be reset after the new a.out's
745
     symbol table is read.
746
 
747
     However, any "raw" breakpoints must be removed from the list
748
     (e.g., the solib bp's), since their address is probably invalid
749
     now.
750
 
751
     And, we DON'T want to call delete_breakpoints() here, since
752
     that may write the bp's "shadow contents" (the instruction
753
     value that was overwritten witha TRAP instruction).  Since
754
     we now have a new a.out, those shadow contents aren't valid. */
755
 
756
  mark_breakpoints_out ();
757
 
758
  update_breakpoints_after_exec ();
759
 
760
  /* If there was one, it's gone now.  We cannot truly step-to-next
761
     statement through an exec(). */
762
  th->step_resume_breakpoint = NULL;
763
  th->step_range_start = 0;
764
  th->step_range_end = 0;
765
 
766
  /* The target reports the exec event to the main thread, even if
767
     some other thread does the exec, and even if the main thread was
768
     already stopped --- if debugging in non-stop mode, it's possible
769
     the user had the main thread held stopped in the previous image
770
     --- release it now.  This is the same behavior as step-over-exec
771
     with scheduler-locking on in all-stop mode.  */
772
  th->stop_requested = 0;
773
 
774
  /* What is this a.out's name? */
775
  printf_unfiltered (_("%s is executing new program: %s\n"),
776
                     target_pid_to_str (inferior_ptid),
777
                     execd_pathname);
778
 
779
  /* We've followed the inferior through an exec.  Therefore, the
780
     inferior has essentially been killed & reborn. */
781
 
782
  gdb_flush (gdb_stdout);
783
 
784
  breakpoint_init_inferior (inf_execd);
785
 
786
  if (gdb_sysroot && *gdb_sysroot)
787
    {
788
      char *name = alloca (strlen (gdb_sysroot)
789
                            + strlen (execd_pathname)
790
                            + 1);
791
 
792
      strcpy (name, gdb_sysroot);
793
      strcat (name, execd_pathname);
794
      execd_pathname = name;
795
    }
796
 
797
  /* Reset the shared library package.  This ensures that we get a
798
     shlib event when the child reaches "_start", at which point the
799
     dld will have had a chance to initialize the child.  */
800
  /* Also, loading a symbol file below may trigger symbol lookups, and
801
     we don't want those to be satisfied by the libraries of the
802
     previous incarnation of this process.  */
803
  no_shared_libraries (NULL, 0);
804
 
805
  if (follow_exec_mode_string == follow_exec_mode_new)
806
    {
807
      struct program_space *pspace;
808
 
809
      /* The user wants to keep the old inferior and program spaces
810
         around.  Create a new fresh one, and switch to it.  */
811
 
812
      inf = add_inferior (current_inferior ()->pid);
813
      pspace = add_program_space (maybe_new_address_space ());
814
      inf->pspace = pspace;
815
      inf->aspace = pspace->aspace;
816
 
817
      exit_inferior_num_silent (current_inferior ()->num);
818
 
819
      set_current_inferior (inf);
820
      set_current_program_space (pspace);
821
    }
822
 
823
  gdb_assert (current_program_space == inf->pspace);
824
 
825
  /* That a.out is now the one to use. */
826
  exec_file_attach (execd_pathname, 0);
827
 
828
  /* Load the main file's symbols.  */
829
  symbol_file_add_main (execd_pathname, 0);
830
 
831
#ifdef SOLIB_CREATE_INFERIOR_HOOK
832
  SOLIB_CREATE_INFERIOR_HOOK (PIDGET (inferior_ptid));
833
#else
834
  solib_create_inferior_hook (0);
835
#endif
836
 
837
  jit_inferior_created_hook ();
838
 
839
  /* Reinsert all breakpoints.  (Those which were symbolic have
840
     been reset to the proper address in the new a.out, thanks
841
     to symbol_file_command...) */
842
  insert_breakpoints ();
843
 
844
  /* The next resume of this inferior should bring it to the shlib
845
     startup breakpoints.  (If the user had also set bp's on
846
     "main" from the old (parent) process, then they'll auto-
847
     matically get reset there in the new process.) */
848
}
849
 
850
/* Non-zero if we just simulating a single-step.  This is needed
851
   because we cannot remove the breakpoints in the inferior process
852
   until after the `wait' in `wait_for_inferior'.  */
853
static int singlestep_breakpoints_inserted_p = 0;
854
 
855
/* The thread we inserted single-step breakpoints for.  */
856
static ptid_t singlestep_ptid;
857
 
858
/* PC when we started this single-step.  */
859
static CORE_ADDR singlestep_pc;
860
 
861
/* If another thread hit the singlestep breakpoint, we save the original
862
   thread here so that we can resume single-stepping it later.  */
863
static ptid_t saved_singlestep_ptid;
864
static int stepping_past_singlestep_breakpoint;
865
 
866
/* If not equal to null_ptid, this means that after stepping over breakpoint
867
   is finished, we need to switch to deferred_step_ptid, and step it.
868
 
869
   The use case is when one thread has hit a breakpoint, and then the user
870
   has switched to another thread and issued 'step'. We need to step over
871
   breakpoint in the thread which hit the breakpoint, but then continue
872
   stepping the thread user has selected.  */
873
static ptid_t deferred_step_ptid;
874
 
875
/* Displaced stepping.  */
876
 
877
/* In non-stop debugging mode, we must take special care to manage
878
   breakpoints properly; in particular, the traditional strategy for
879
   stepping a thread past a breakpoint it has hit is unsuitable.
880
   'Displaced stepping' is a tactic for stepping one thread past a
881
   breakpoint it has hit while ensuring that other threads running
882
   concurrently will hit the breakpoint as they should.
883
 
884
   The traditional way to step a thread T off a breakpoint in a
885
   multi-threaded program in all-stop mode is as follows:
886
 
887
   a0) Initially, all threads are stopped, and breakpoints are not
888
       inserted.
889
   a1) We single-step T, leaving breakpoints uninserted.
890
   a2) We insert breakpoints, and resume all threads.
891
 
892
   In non-stop debugging, however, this strategy is unsuitable: we
893
   don't want to have to stop all threads in the system in order to
894
   continue or step T past a breakpoint.  Instead, we use displaced
895
   stepping:
896
 
897
   n0) Initially, T is stopped, other threads are running, and
898
       breakpoints are inserted.
899
   n1) We copy the instruction "under" the breakpoint to a separate
900
       location, outside the main code stream, making any adjustments
901
       to the instruction, register, and memory state as directed by
902
       T's architecture.
903
   n2) We single-step T over the instruction at its new location.
904
   n3) We adjust the resulting register and memory state as directed
905
       by T's architecture.  This includes resetting T's PC to point
906
       back into the main instruction stream.
907
   n4) We resume T.
908
 
909
   This approach depends on the following gdbarch methods:
910
 
911
   - gdbarch_max_insn_length and gdbarch_displaced_step_location
912
     indicate where to copy the instruction, and how much space must
913
     be reserved there.  We use these in step n1.
914
 
915
   - gdbarch_displaced_step_copy_insn copies a instruction to a new
916
     address, and makes any necessary adjustments to the instruction,
917
     register contents, and memory.  We use this in step n1.
918
 
919
   - gdbarch_displaced_step_fixup adjusts registers and memory after
920
     we have successfuly single-stepped the instruction, to yield the
921
     same effect the instruction would have had if we had executed it
922
     at its original address.  We use this in step n3.
923
 
924
   - gdbarch_displaced_step_free_closure provides cleanup.
925
 
926
   The gdbarch_displaced_step_copy_insn and
927
   gdbarch_displaced_step_fixup functions must be written so that
928
   copying an instruction with gdbarch_displaced_step_copy_insn,
929
   single-stepping across the copied instruction, and then applying
930
   gdbarch_displaced_insn_fixup should have the same effects on the
931
   thread's memory and registers as stepping the instruction in place
932
   would have.  Exactly which responsibilities fall to the copy and
933
   which fall to the fixup is up to the author of those functions.
934
 
935
   See the comments in gdbarch.sh for details.
936
 
937
   Note that displaced stepping and software single-step cannot
938
   currently be used in combination, although with some care I think
939
   they could be made to.  Software single-step works by placing
940
   breakpoints on all possible subsequent instructions; if the
941
   displaced instruction is a PC-relative jump, those breakpoints
942
   could fall in very strange places --- on pages that aren't
943
   executable, or at addresses that are not proper instruction
944
   boundaries.  (We do generally let other threads run while we wait
945
   to hit the software single-step breakpoint, and they might
946
   encounter such a corrupted instruction.)  One way to work around
947
   this would be to have gdbarch_displaced_step_copy_insn fully
948
   simulate the effect of PC-relative instructions (and return NULL)
949
   on architectures that use software single-stepping.
950
 
951
   In non-stop mode, we can have independent and simultaneous step
952
   requests, so more than one thread may need to simultaneously step
953
   over a breakpoint.  The current implementation assumes there is
954
   only one scratch space per process.  In this case, we have to
955
   serialize access to the scratch space.  If thread A wants to step
956
   over a breakpoint, but we are currently waiting for some other
957
   thread to complete a displaced step, we leave thread A stopped and
958
   place it in the displaced_step_request_queue.  Whenever a displaced
959
   step finishes, we pick the next thread in the queue and start a new
960
   displaced step operation on it.  See displaced_step_prepare and
961
   displaced_step_fixup for details.  */
962
 
963
struct displaced_step_request
964
{
965
  ptid_t ptid;
966
  struct displaced_step_request *next;
967
};
968
 
969
/* Per-inferior displaced stepping state.  */
970
struct displaced_step_inferior_state
971
{
972
  /* Pointer to next in linked list.  */
973
  struct displaced_step_inferior_state *next;
974
 
975
  /* The process this displaced step state refers to.  */
976
  int pid;
977
 
978
  /* A queue of pending displaced stepping requests.  One entry per
979
     thread that needs to do a displaced step.  */
980
  struct displaced_step_request *step_request_queue;
981
 
982
  /* If this is not null_ptid, this is the thread carrying out a
983
     displaced single-step in process PID.  This thread's state will
984
     require fixing up once it has completed its step.  */
985
  ptid_t step_ptid;
986
 
987
  /* The architecture the thread had when we stepped it.  */
988
  struct gdbarch *step_gdbarch;
989
 
990
  /* The closure provided gdbarch_displaced_step_copy_insn, to be used
991
     for post-step cleanup.  */
992
  struct displaced_step_closure *step_closure;
993
 
994
  /* The address of the original instruction, and the copy we
995
     made.  */
996
  CORE_ADDR step_original, step_copy;
997
 
998
  /* Saved contents of copy area.  */
999
  gdb_byte *step_saved_copy;
1000
};
1001
 
1002
/* The list of states of processes involved in displaced stepping
1003
   presently.  */
1004
static struct displaced_step_inferior_state *displaced_step_inferior_states;
1005
 
1006
/* Get the displaced stepping state of process PID.  */
1007
 
1008
static struct displaced_step_inferior_state *
1009
get_displaced_stepping_state (int pid)
1010
{
1011
  struct displaced_step_inferior_state *state;
1012
 
1013
  for (state = displaced_step_inferior_states;
1014
       state != NULL;
1015
       state = state->next)
1016
    if (state->pid == pid)
1017
      return state;
1018
 
1019
  return NULL;
1020
}
1021
 
1022
/* Add a new displaced stepping state for process PID to the displaced
1023
   stepping state list, or return a pointer to an already existing
1024
   entry, if it already exists.  Never returns NULL.  */
1025
 
1026
static struct displaced_step_inferior_state *
1027
add_displaced_stepping_state (int pid)
1028
{
1029
  struct displaced_step_inferior_state *state;
1030
 
1031
  for (state = displaced_step_inferior_states;
1032
       state != NULL;
1033
       state = state->next)
1034
    if (state->pid == pid)
1035
      return state;
1036
 
1037
  state = xcalloc (1, sizeof (*state));
1038
  state->pid = pid;
1039
  state->next = displaced_step_inferior_states;
1040
  displaced_step_inferior_states = state;
1041
 
1042
  return state;
1043
}
1044
 
1045
/* Remove the displaced stepping state of process PID.  */
1046
 
1047
static void
1048
remove_displaced_stepping_state (int pid)
1049
{
1050
  struct displaced_step_inferior_state *it, **prev_next_p;
1051
 
1052
  gdb_assert (pid != 0);
1053
 
1054
  it = displaced_step_inferior_states;
1055
  prev_next_p = &displaced_step_inferior_states;
1056
  while (it)
1057
    {
1058
      if (it->pid == pid)
1059
        {
1060
          *prev_next_p = it->next;
1061
          xfree (it);
1062
          return;
1063
        }
1064
 
1065
      prev_next_p = &it->next;
1066
      it = *prev_next_p;
1067
    }
1068
}
1069
 
1070
static void
1071
infrun_inferior_exit (struct inferior *inf)
1072
{
1073
  remove_displaced_stepping_state (inf->pid);
1074
}
1075
 
1076
/* Enum strings for "set|show displaced-stepping".  */
1077
 
1078
static const char can_use_displaced_stepping_auto[] = "auto";
1079
static const char can_use_displaced_stepping_on[] = "on";
1080
static const char can_use_displaced_stepping_off[] = "off";
1081
static const char *can_use_displaced_stepping_enum[] =
1082
{
1083
  can_use_displaced_stepping_auto,
1084
  can_use_displaced_stepping_on,
1085
  can_use_displaced_stepping_off,
1086
  NULL,
1087
};
1088
 
1089
/* If ON, and the architecture supports it, GDB will use displaced
1090
   stepping to step over breakpoints.  If OFF, or if the architecture
1091
   doesn't support it, GDB will instead use the traditional
1092
   hold-and-step approach.  If AUTO (which is the default), GDB will
1093
   decide which technique to use to step over breakpoints depending on
1094
   which of all-stop or non-stop mode is active --- displaced stepping
1095
   in non-stop mode; hold-and-step in all-stop mode.  */
1096
 
1097
static const char *can_use_displaced_stepping =
1098
  can_use_displaced_stepping_auto;
1099
 
1100
static void
1101
show_can_use_displaced_stepping (struct ui_file *file, int from_tty,
1102
                                 struct cmd_list_element *c,
1103
                                 const char *value)
1104
{
1105
  if (can_use_displaced_stepping == can_use_displaced_stepping_auto)
1106
    fprintf_filtered (file, _("\
1107
Debugger's willingness to use displaced stepping to step over \
1108
breakpoints is %s (currently %s).\n"),
1109
                      value, non_stop ? "on" : "off");
1110
  else
1111
    fprintf_filtered (file, _("\
1112
Debugger's willingness to use displaced stepping to step over \
1113
breakpoints is %s.\n"), value);
1114
}
1115
 
1116
/* Return non-zero if displaced stepping can/should be used to step
1117
   over breakpoints.  */
1118
 
1119
static int
1120
use_displaced_stepping (struct gdbarch *gdbarch)
1121
{
1122
  return (((can_use_displaced_stepping == can_use_displaced_stepping_auto
1123
            && non_stop)
1124
           || can_use_displaced_stepping == can_use_displaced_stepping_on)
1125
          && gdbarch_displaced_step_copy_insn_p (gdbarch)
1126
          && !RECORD_IS_USED);
1127
}
1128
 
1129
/* Clean out any stray displaced stepping state.  */
1130
static void
1131
displaced_step_clear (struct displaced_step_inferior_state *displaced)
1132
{
1133
  /* Indicate that there is no cleanup pending.  */
1134
  displaced->step_ptid = null_ptid;
1135
 
1136
  if (displaced->step_closure)
1137
    {
1138
      gdbarch_displaced_step_free_closure (displaced->step_gdbarch,
1139
                                           displaced->step_closure);
1140
      displaced->step_closure = NULL;
1141
    }
1142
}
1143
 
1144
static void
1145
displaced_step_clear_cleanup (void *arg)
1146
{
1147
  struct displaced_step_inferior_state *state = arg;
1148
 
1149
  displaced_step_clear (state);
1150
}
1151
 
1152
/* Dump LEN bytes at BUF in hex to FILE, followed by a newline.  */
1153
void
1154
displaced_step_dump_bytes (struct ui_file *file,
1155
                           const gdb_byte *buf,
1156
                           size_t len)
1157
{
1158
  int i;
1159
 
1160
  for (i = 0; i < len; i++)
1161
    fprintf_unfiltered (file, "%02x ", buf[i]);
1162
  fputs_unfiltered ("\n", file);
1163
}
1164
 
1165
/* Prepare to single-step, using displaced stepping.
1166
 
1167
   Note that we cannot use displaced stepping when we have a signal to
1168
   deliver.  If we have a signal to deliver and an instruction to step
1169
   over, then after the step, there will be no indication from the
1170
   target whether the thread entered a signal handler or ignored the
1171
   signal and stepped over the instruction successfully --- both cases
1172
   result in a simple SIGTRAP.  In the first case we mustn't do a
1173
   fixup, and in the second case we must --- but we can't tell which.
1174
   Comments in the code for 'random signals' in handle_inferior_event
1175
   explain how we handle this case instead.
1176
 
1177
   Returns 1 if preparing was successful -- this thread is going to be
1178
   stepped now; or 0 if displaced stepping this thread got queued.  */
1179
static int
1180
displaced_step_prepare (ptid_t ptid)
1181
{
1182
  struct cleanup *old_cleanups, *ignore_cleanups;
1183
  struct regcache *regcache = get_thread_regcache (ptid);
1184
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
1185
  CORE_ADDR original, copy;
1186
  ULONGEST len;
1187
  struct displaced_step_closure *closure;
1188
  struct displaced_step_inferior_state *displaced;
1189
 
1190
  /* We should never reach this function if the architecture does not
1191
     support displaced stepping.  */
1192
  gdb_assert (gdbarch_displaced_step_copy_insn_p (gdbarch));
1193
 
1194
  /* We have to displaced step one thread at a time, as we only have
1195
     access to a single scratch space per inferior.  */
1196
 
1197
  displaced = add_displaced_stepping_state (ptid_get_pid (ptid));
1198
 
1199
  if (!ptid_equal (displaced->step_ptid, null_ptid))
1200
    {
1201
      /* Already waiting for a displaced step to finish.  Defer this
1202
         request and place in queue.  */
1203
      struct displaced_step_request *req, *new_req;
1204
 
1205
      if (debug_displaced)
1206
        fprintf_unfiltered (gdb_stdlog,
1207
                            "displaced: defering step of %s\n",
1208
                            target_pid_to_str (ptid));
1209
 
1210
      new_req = xmalloc (sizeof (*new_req));
1211
      new_req->ptid = ptid;
1212
      new_req->next = NULL;
1213
 
1214
      if (displaced->step_request_queue)
1215
        {
1216
          for (req = displaced->step_request_queue;
1217
               req && req->next;
1218
               req = req->next)
1219
            ;
1220
          req->next = new_req;
1221
        }
1222
      else
1223
        displaced->step_request_queue = new_req;
1224
 
1225
      return 0;
1226
    }
1227
  else
1228
    {
1229
      if (debug_displaced)
1230
        fprintf_unfiltered (gdb_stdlog,
1231
                            "displaced: stepping %s now\n",
1232
                            target_pid_to_str (ptid));
1233
    }
1234
 
1235
  displaced_step_clear (displaced);
1236
 
1237
  old_cleanups = save_inferior_ptid ();
1238
  inferior_ptid = ptid;
1239
 
1240
  original = regcache_read_pc (regcache);
1241
 
1242
  copy = gdbarch_displaced_step_location (gdbarch);
1243
  len = gdbarch_max_insn_length (gdbarch);
1244
 
1245
  /* Save the original contents of the copy area.  */
1246
  displaced->step_saved_copy = xmalloc (len);
1247
  ignore_cleanups = make_cleanup (free_current_contents,
1248
                                  &displaced->step_saved_copy);
1249
  read_memory (copy, displaced->step_saved_copy, len);
1250
  if (debug_displaced)
1251
    {
1252
      fprintf_unfiltered (gdb_stdlog, "displaced: saved %s: ",
1253
                          paddress (gdbarch, copy));
1254
      displaced_step_dump_bytes (gdb_stdlog,
1255
                                 displaced->step_saved_copy,
1256
                                 len);
1257
    };
1258
 
1259
  closure = gdbarch_displaced_step_copy_insn (gdbarch,
1260
                                              original, copy, regcache);
1261
 
1262
  /* We don't support the fully-simulated case at present.  */
1263
  gdb_assert (closure);
1264
 
1265
  /* Save the information we need to fix things up if the step
1266
     succeeds.  */
1267
  displaced->step_ptid = ptid;
1268
  displaced->step_gdbarch = gdbarch;
1269
  displaced->step_closure = closure;
1270
  displaced->step_original = original;
1271
  displaced->step_copy = copy;
1272
 
1273
  make_cleanup (displaced_step_clear_cleanup, displaced);
1274
 
1275
  /* Resume execution at the copy.  */
1276
  regcache_write_pc (regcache, copy);
1277
 
1278
  discard_cleanups (ignore_cleanups);
1279
 
1280
  do_cleanups (old_cleanups);
1281
 
1282
  if (debug_displaced)
1283
    fprintf_unfiltered (gdb_stdlog, "displaced: displaced pc to %s\n",
1284
                        paddress (gdbarch, copy));
1285
 
1286
  return 1;
1287
}
1288
 
1289
static void
1290
write_memory_ptid (ptid_t ptid, CORE_ADDR memaddr, const gdb_byte *myaddr, int len)
1291
{
1292
  struct cleanup *ptid_cleanup = save_inferior_ptid ();
1293
 
1294
  inferior_ptid = ptid;
1295
  write_memory (memaddr, myaddr, len);
1296
  do_cleanups (ptid_cleanup);
1297
}
1298
 
1299
static void
1300
displaced_step_fixup (ptid_t event_ptid, enum target_signal signal)
1301
{
1302
  struct cleanup *old_cleanups;
1303
  struct displaced_step_inferior_state *displaced
1304
    = get_displaced_stepping_state (ptid_get_pid (event_ptid));
1305
 
1306
  /* Was any thread of this process doing a displaced step?  */
1307
  if (displaced == NULL)
1308
    return;
1309
 
1310
  /* Was this event for the pid we displaced?  */
1311
  if (ptid_equal (displaced->step_ptid, null_ptid)
1312
      || ! ptid_equal (displaced->step_ptid, event_ptid))
1313
    return;
1314
 
1315
  old_cleanups = make_cleanup (displaced_step_clear_cleanup, displaced);
1316
 
1317
  /* Restore the contents of the copy area.  */
1318
  {
1319
    ULONGEST len = gdbarch_max_insn_length (displaced->step_gdbarch);
1320
 
1321
    write_memory_ptid (displaced->step_ptid, displaced->step_copy,
1322
                       displaced->step_saved_copy, len);
1323
    if (debug_displaced)
1324
      fprintf_unfiltered (gdb_stdlog, "displaced: restored %s\n",
1325
                          paddress (displaced->step_gdbarch,
1326
                                    displaced->step_copy));
1327
  }
1328
 
1329
  /* Did the instruction complete successfully?  */
1330
  if (signal == TARGET_SIGNAL_TRAP)
1331
    {
1332
      /* Fix up the resulting state.  */
1333
      gdbarch_displaced_step_fixup (displaced->step_gdbarch,
1334
                                    displaced->step_closure,
1335
                                    displaced->step_original,
1336
                                    displaced->step_copy,
1337
                                    get_thread_regcache (displaced->step_ptid));
1338
    }
1339
  else
1340
    {
1341
      /* Since the instruction didn't complete, all we can do is
1342
         relocate the PC.  */
1343
      struct regcache *regcache = get_thread_regcache (event_ptid);
1344
      CORE_ADDR pc = regcache_read_pc (regcache);
1345
 
1346
      pc = displaced->step_original + (pc - displaced->step_copy);
1347
      regcache_write_pc (regcache, pc);
1348
    }
1349
 
1350
  do_cleanups (old_cleanups);
1351
 
1352
  displaced->step_ptid = null_ptid;
1353
 
1354
  /* Are there any pending displaced stepping requests?  If so, run
1355
     one now.  Leave the state object around, since we're likely to
1356
     need it again soon.  */
1357
  while (displaced->step_request_queue)
1358
    {
1359
      struct displaced_step_request *head;
1360
      ptid_t ptid;
1361
      struct regcache *regcache;
1362
      struct gdbarch *gdbarch;
1363
      CORE_ADDR actual_pc;
1364
      struct address_space *aspace;
1365
 
1366
      head = displaced->step_request_queue;
1367
      ptid = head->ptid;
1368
      displaced->step_request_queue = head->next;
1369
      xfree (head);
1370
 
1371
      context_switch (ptid);
1372
 
1373
      regcache = get_thread_regcache (ptid);
1374
      actual_pc = regcache_read_pc (regcache);
1375
      aspace = get_regcache_aspace (regcache);
1376
 
1377
      if (breakpoint_here_p (aspace, actual_pc))
1378
        {
1379
          if (debug_displaced)
1380
            fprintf_unfiltered (gdb_stdlog,
1381
                                "displaced: stepping queued %s now\n",
1382
                                target_pid_to_str (ptid));
1383
 
1384
          displaced_step_prepare (ptid);
1385
 
1386
          gdbarch = get_regcache_arch (regcache);
1387
 
1388
          if (debug_displaced)
1389
            {
1390
              CORE_ADDR actual_pc = regcache_read_pc (regcache);
1391
              gdb_byte buf[4];
1392
 
1393
              fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1394
                                  paddress (gdbarch, actual_pc));
1395
              read_memory (actual_pc, buf, sizeof (buf));
1396
              displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1397
            }
1398
 
1399
          if (gdbarch_displaced_step_hw_singlestep (gdbarch,
1400
                                                    displaced->step_closure))
1401
            target_resume (ptid, 1, TARGET_SIGNAL_0);
1402
          else
1403
            target_resume (ptid, 0, TARGET_SIGNAL_0);
1404
 
1405
          /* Done, we're stepping a thread.  */
1406
          break;
1407
        }
1408
      else
1409
        {
1410
          int step;
1411
          struct thread_info *tp = inferior_thread ();
1412
 
1413
          /* The breakpoint we were sitting under has since been
1414
             removed.  */
1415
          tp->trap_expected = 0;
1416
 
1417
          /* Go back to what we were trying to do.  */
1418
          step = currently_stepping (tp);
1419
 
1420
          if (debug_displaced)
1421
            fprintf_unfiltered (gdb_stdlog, "breakpoint is gone %s: step(%d)\n",
1422
                                target_pid_to_str (tp->ptid), step);
1423
 
1424
          target_resume (ptid, step, TARGET_SIGNAL_0);
1425
          tp->stop_signal = TARGET_SIGNAL_0;
1426
 
1427
          /* This request was discarded.  See if there's any other
1428
             thread waiting for its turn.  */
1429
        }
1430
    }
1431
}
1432
 
1433
/* Update global variables holding ptids to hold NEW_PTID if they were
1434
   holding OLD_PTID.  */
1435
static void
1436
infrun_thread_ptid_changed (ptid_t old_ptid, ptid_t new_ptid)
1437
{
1438
  struct displaced_step_request *it;
1439
  struct displaced_step_inferior_state *displaced;
1440
 
1441
  if (ptid_equal (inferior_ptid, old_ptid))
1442
    inferior_ptid = new_ptid;
1443
 
1444
  if (ptid_equal (singlestep_ptid, old_ptid))
1445
    singlestep_ptid = new_ptid;
1446
 
1447
  if (ptid_equal (deferred_step_ptid, old_ptid))
1448
    deferred_step_ptid = new_ptid;
1449
 
1450
  for (displaced = displaced_step_inferior_states;
1451
       displaced;
1452
       displaced = displaced->next)
1453
    {
1454
      if (ptid_equal (displaced->step_ptid, old_ptid))
1455
        displaced->step_ptid = new_ptid;
1456
 
1457
      for (it = displaced->step_request_queue; it; it = it->next)
1458
        if (ptid_equal (it->ptid, old_ptid))
1459
          it->ptid = new_ptid;
1460
    }
1461
}
1462
 
1463
 
1464
/* Resuming.  */
1465
 
1466
/* Things to clean up if we QUIT out of resume ().  */
1467
static void
1468
resume_cleanups (void *ignore)
1469
{
1470
  normal_stop ();
1471
}
1472
 
1473
static const char schedlock_off[] = "off";
1474
static const char schedlock_on[] = "on";
1475
static const char schedlock_step[] = "step";
1476
static const char *scheduler_enums[] = {
1477
  schedlock_off,
1478
  schedlock_on,
1479
  schedlock_step,
1480
  NULL
1481
};
1482
static const char *scheduler_mode = schedlock_off;
1483
static void
1484
show_scheduler_mode (struct ui_file *file, int from_tty,
1485
                     struct cmd_list_element *c, const char *value)
1486
{
1487
  fprintf_filtered (file, _("\
1488
Mode for locking scheduler during execution is \"%s\".\n"),
1489
                    value);
1490
}
1491
 
1492
static void
1493
set_schedlock_func (char *args, int from_tty, struct cmd_list_element *c)
1494
{
1495
  if (!target_can_lock_scheduler)
1496
    {
1497
      scheduler_mode = schedlock_off;
1498
      error (_("Target '%s' cannot support this command."), target_shortname);
1499
    }
1500
}
1501
 
1502
/* True if execution commands resume all threads of all processes by
1503
   default; otherwise, resume only threads of the current inferior
1504
   process.  */
1505
int sched_multi = 0;
1506
 
1507
/* Try to setup for software single stepping over the specified location.
1508
   Return 1 if target_resume() should use hardware single step.
1509
 
1510
   GDBARCH the current gdbarch.
1511
   PC the location to step over.  */
1512
 
1513
static int
1514
maybe_software_singlestep (struct gdbarch *gdbarch, CORE_ADDR pc)
1515
{
1516
  int hw_step = 1;
1517
 
1518
  if (gdbarch_software_single_step_p (gdbarch)
1519
      && gdbarch_software_single_step (gdbarch, get_current_frame ()))
1520
    {
1521
      hw_step = 0;
1522
      /* Do not pull these breakpoints until after a `wait' in
1523
         `wait_for_inferior' */
1524
      singlestep_breakpoints_inserted_p = 1;
1525
      singlestep_ptid = inferior_ptid;
1526
      singlestep_pc = pc;
1527
    }
1528
  return hw_step;
1529
}
1530
 
1531
/* Resume the inferior, but allow a QUIT.  This is useful if the user
1532
   wants to interrupt some lengthy single-stepping operation
1533
   (for child processes, the SIGINT goes to the inferior, and so
1534
   we get a SIGINT random_signal, but for remote debugging and perhaps
1535
   other targets, that's not true).
1536
 
1537
   STEP nonzero if we should step (zero to continue instead).
1538
   SIG is the signal to give the inferior (zero for none).  */
1539
void
1540
resume (int step, enum target_signal sig)
1541
{
1542
  int should_resume = 1;
1543
  struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
1544
  struct regcache *regcache = get_current_regcache ();
1545
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
1546
  struct thread_info *tp = inferior_thread ();
1547
  CORE_ADDR pc = regcache_read_pc (regcache);
1548
  struct address_space *aspace = get_regcache_aspace (regcache);
1549
 
1550
  QUIT;
1551
 
1552
  if (debug_infrun)
1553
    fprintf_unfiltered (gdb_stdlog,
1554
                        "infrun: resume (step=%d, signal=%d), "
1555
                        "trap_expected=%d\n",
1556
                        step, sig, tp->trap_expected);
1557
 
1558
  /* Normally, by the time we reach `resume', the breakpoints are either
1559
     removed or inserted, as appropriate.  The exception is if we're sitting
1560
     at a permanent breakpoint; we need to step over it, but permanent
1561
     breakpoints can't be removed.  So we have to test for it here.  */
1562
  if (breakpoint_here_p (aspace, pc) == permanent_breakpoint_here)
1563
    {
1564
      if (gdbarch_skip_permanent_breakpoint_p (gdbarch))
1565
        gdbarch_skip_permanent_breakpoint (gdbarch, regcache);
1566
      else
1567
        error (_("\
1568
The program is stopped at a permanent breakpoint, but GDB does not know\n\
1569
how to step past a permanent breakpoint on this architecture.  Try using\n\
1570
a command like `return' or `jump' to continue execution."));
1571
    }
1572
 
1573
  /* If enabled, step over breakpoints by executing a copy of the
1574
     instruction at a different address.
1575
 
1576
     We can't use displaced stepping when we have a signal to deliver;
1577
     the comments for displaced_step_prepare explain why.  The
1578
     comments in the handle_inferior event for dealing with 'random
1579
     signals' explain what we do instead.  */
1580
  if (use_displaced_stepping (gdbarch)
1581
      && (tp->trap_expected
1582
          || (step && gdbarch_software_single_step_p (gdbarch)))
1583
      && sig == TARGET_SIGNAL_0)
1584
    {
1585
      struct displaced_step_inferior_state *displaced;
1586
 
1587
      if (!displaced_step_prepare (inferior_ptid))
1588
        {
1589
          /* Got placed in displaced stepping queue.  Will be resumed
1590
             later when all the currently queued displaced stepping
1591
             requests finish.  The thread is not executing at this point,
1592
             and the call to set_executing will be made later.  But we
1593
             need to call set_running here, since from frontend point of view,
1594
             the thread is running.  */
1595
          set_running (inferior_ptid, 1);
1596
          discard_cleanups (old_cleanups);
1597
          return;
1598
        }
1599
 
1600
      displaced = get_displaced_stepping_state (ptid_get_pid (inferior_ptid));
1601
      step = gdbarch_displaced_step_hw_singlestep (gdbarch,
1602
                                                   displaced->step_closure);
1603
    }
1604
 
1605
  /* Do we need to do it the hard way, w/temp breakpoints?  */
1606
  else if (step)
1607
    step = maybe_software_singlestep (gdbarch, pc);
1608
 
1609
  if (should_resume)
1610
    {
1611
      ptid_t resume_ptid;
1612
 
1613
      /* If STEP is set, it's a request to use hardware stepping
1614
         facilities.  But in that case, we should never
1615
         use singlestep breakpoint.  */
1616
      gdb_assert (!(singlestep_breakpoints_inserted_p && step));
1617
 
1618
      /* Decide the set of threads to ask the target to resume.  Start
1619
         by assuming everything will be resumed, than narrow the set
1620
         by applying increasingly restricting conditions.  */
1621
 
1622
      /* By default, resume all threads of all processes.  */
1623
      resume_ptid = RESUME_ALL;
1624
 
1625
      /* Maybe resume only all threads of the current process.  */
1626
      if (!sched_multi && target_supports_multi_process ())
1627
        {
1628
          resume_ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
1629
        }
1630
 
1631
      /* Maybe resume a single thread after all.  */
1632
      if (singlestep_breakpoints_inserted_p
1633
          && stepping_past_singlestep_breakpoint)
1634
        {
1635
          /* The situation here is as follows.  In thread T1 we wanted to
1636
             single-step.  Lacking hardware single-stepping we've
1637
             set breakpoint at the PC of the next instruction -- call it
1638
             P.  After resuming, we've hit that breakpoint in thread T2.
1639
             Now we've removed original breakpoint, inserted breakpoint
1640
             at P+1, and try to step to advance T2 past breakpoint.
1641
             We need to step only T2, as if T1 is allowed to freely run,
1642
             it can run past P, and if other threads are allowed to run,
1643
             they can hit breakpoint at P+1, and nested hits of single-step
1644
             breakpoints is not something we'd want -- that's complicated
1645
             to support, and has no value.  */
1646
          resume_ptid = inferior_ptid;
1647
        }
1648
      else if ((step || singlestep_breakpoints_inserted_p)
1649
               && tp->trap_expected)
1650
        {
1651
          /* We're allowing a thread to run past a breakpoint it has
1652
             hit, by single-stepping the thread with the breakpoint
1653
             removed.  In which case, we need to single-step only this
1654
             thread, and keep others stopped, as they can miss this
1655
             breakpoint if allowed to run.
1656
 
1657
             The current code actually removes all breakpoints when
1658
             doing this, not just the one being stepped over, so if we
1659
             let other threads run, we can actually miss any
1660
             breakpoint, not just the one at PC.  */
1661
          resume_ptid = inferior_ptid;
1662
        }
1663
      else if (non_stop)
1664
        {
1665
          /* With non-stop mode on, threads are always handled
1666
             individually.  */
1667
          resume_ptid = inferior_ptid;
1668
        }
1669
      else if ((scheduler_mode == schedlock_on)
1670
               || (scheduler_mode == schedlock_step
1671
                   && (step || singlestep_breakpoints_inserted_p)))
1672
        {
1673
          /* User-settable 'scheduler' mode requires solo thread resume. */
1674
          resume_ptid = inferior_ptid;
1675
        }
1676
 
1677
      if (gdbarch_cannot_step_breakpoint (gdbarch))
1678
        {
1679
          /* Most targets can step a breakpoint instruction, thus
1680
             executing it normally.  But if this one cannot, just
1681
             continue and we will hit it anyway.  */
1682
          if (step && breakpoint_inserted_here_p (aspace, pc))
1683
            step = 0;
1684
        }
1685
 
1686
      if (debug_displaced
1687
          && use_displaced_stepping (gdbarch)
1688
          && tp->trap_expected)
1689
        {
1690
          struct regcache *resume_regcache = get_thread_regcache (resume_ptid);
1691
          struct gdbarch *resume_gdbarch = get_regcache_arch (resume_regcache);
1692
          CORE_ADDR actual_pc = regcache_read_pc (resume_regcache);
1693
          gdb_byte buf[4];
1694
 
1695
          fprintf_unfiltered (gdb_stdlog, "displaced: run %s: ",
1696
                              paddress (resume_gdbarch, actual_pc));
1697
          read_memory (actual_pc, buf, sizeof (buf));
1698
          displaced_step_dump_bytes (gdb_stdlog, buf, sizeof (buf));
1699
        }
1700
 
1701
      /* Install inferior's terminal modes.  */
1702
      target_terminal_inferior ();
1703
 
1704
      /* Avoid confusing the next resume, if the next stop/resume
1705
         happens to apply to another thread.  */
1706
      tp->stop_signal = TARGET_SIGNAL_0;
1707
 
1708
      target_resume (resume_ptid, step, sig);
1709
    }
1710
 
1711
  discard_cleanups (old_cleanups);
1712
}
1713
 
1714
/* Proceeding.  */
1715
 
1716
/* Clear out all variables saying what to do when inferior is continued.
1717
   First do this, then set the ones you want, then call `proceed'.  */
1718
 
1719
static void
1720
clear_proceed_status_thread (struct thread_info *tp)
1721
{
1722
  if (debug_infrun)
1723
    fprintf_unfiltered (gdb_stdlog,
1724
                        "infrun: clear_proceed_status_thread (%s)\n",
1725
                        target_pid_to_str (tp->ptid));
1726
 
1727
  tp->trap_expected = 0;
1728
  tp->step_range_start = 0;
1729
  tp->step_range_end = 0;
1730
  tp->step_frame_id = null_frame_id;
1731
  tp->step_stack_frame_id = null_frame_id;
1732
  tp->step_over_calls = STEP_OVER_UNDEBUGGABLE;
1733
  tp->stop_requested = 0;
1734
 
1735
  tp->stop_step = 0;
1736
 
1737
  tp->proceed_to_finish = 0;
1738
 
1739
  /* Discard any remaining commands or status from previous stop.  */
1740
  bpstat_clear (&tp->stop_bpstat);
1741
}
1742
 
1743
static int
1744
clear_proceed_status_callback (struct thread_info *tp, void *data)
1745
{
1746
  if (is_exited (tp->ptid))
1747
    return 0;
1748
 
1749
  clear_proceed_status_thread (tp);
1750
  return 0;
1751
}
1752
 
1753
void
1754
clear_proceed_status (void)
1755
{
1756
  if (!non_stop)
1757
    {
1758
      /* In all-stop mode, delete the per-thread status of all
1759
         threads, even if inferior_ptid is null_ptid, there may be
1760
         threads on the list.  E.g., we may be launching a new
1761
         process, while selecting the executable.  */
1762
      iterate_over_threads (clear_proceed_status_callback, NULL);
1763
    }
1764
 
1765
  if (!ptid_equal (inferior_ptid, null_ptid))
1766
    {
1767
      struct inferior *inferior;
1768
 
1769
      if (non_stop)
1770
        {
1771
          /* If in non-stop mode, only delete the per-thread status of
1772
             the current thread.  */
1773
          clear_proceed_status_thread (inferior_thread ());
1774
        }
1775
 
1776
      inferior = current_inferior ();
1777
      inferior->stop_soon = NO_STOP_QUIETLY;
1778
    }
1779
 
1780
  stop_after_trap = 0;
1781
 
1782
  observer_notify_about_to_proceed ();
1783
 
1784
  if (stop_registers)
1785
    {
1786
      regcache_xfree (stop_registers);
1787
      stop_registers = NULL;
1788
    }
1789
}
1790
 
1791
/* Check the current thread against the thread that reported the most recent
1792
   event.  If a step-over is required return TRUE and set the current thread
1793
   to the old thread.  Otherwise return FALSE.
1794
 
1795
   This should be suitable for any targets that support threads. */
1796
 
1797
static int
1798
prepare_to_proceed (int step)
1799
{
1800
  ptid_t wait_ptid;
1801
  struct target_waitstatus wait_status;
1802
  int schedlock_enabled;
1803
 
1804
  /* With non-stop mode on, threads are always handled individually.  */
1805
  gdb_assert (! non_stop);
1806
 
1807
  /* Get the last target status returned by target_wait().  */
1808
  get_last_target_status (&wait_ptid, &wait_status);
1809
 
1810
  /* Make sure we were stopped at a breakpoint.  */
1811
  if (wait_status.kind != TARGET_WAITKIND_STOPPED
1812
      || (wait_status.value.sig != TARGET_SIGNAL_TRAP
1813
          && wait_status.value.sig != TARGET_SIGNAL_ILL
1814
          && wait_status.value.sig != TARGET_SIGNAL_SEGV
1815
          && wait_status.value.sig != TARGET_SIGNAL_EMT))
1816
    {
1817
      return 0;
1818
    }
1819
 
1820
  schedlock_enabled = (scheduler_mode == schedlock_on
1821
                       || (scheduler_mode == schedlock_step
1822
                           && step));
1823
 
1824
  /* Don't switch over to WAIT_PTID if scheduler locking is on.  */
1825
  if (schedlock_enabled)
1826
    return 0;
1827
 
1828
  /* Don't switch over if we're about to resume some other process
1829
     other than WAIT_PTID's, and schedule-multiple is off.  */
1830
  if (!sched_multi
1831
      && ptid_get_pid (wait_ptid) != ptid_get_pid (inferior_ptid))
1832
    return 0;
1833
 
1834
  /* Switched over from WAIT_PID.  */
1835
  if (!ptid_equal (wait_ptid, minus_one_ptid)
1836
      && !ptid_equal (inferior_ptid, wait_ptid))
1837
    {
1838
      struct regcache *regcache = get_thread_regcache (wait_ptid);
1839
 
1840
      if (breakpoint_here_p (get_regcache_aspace (regcache),
1841
                             regcache_read_pc (regcache)))
1842
        {
1843
          /* If stepping, remember current thread to switch back to.  */
1844
          if (step)
1845
            deferred_step_ptid = inferior_ptid;
1846
 
1847
          /* Switch back to WAIT_PID thread.  */
1848
          switch_to_thread (wait_ptid);
1849
 
1850
          /* We return 1 to indicate that there is a breakpoint here,
1851
             so we need to step over it before continuing to avoid
1852
             hitting it straight away. */
1853
          return 1;
1854
        }
1855
    }
1856
 
1857
  return 0;
1858
}
1859
 
1860
/* Basic routine for continuing the program in various fashions.
1861
 
1862
   ADDR is the address to resume at, or -1 for resume where stopped.
1863
   SIGGNAL is the signal to give it, or 0 for none,
1864
   or -1 for act according to how it stopped.
1865
   STEP is nonzero if should trap after one instruction.
1866
   -1 means return after that and print nothing.
1867
   You should probably set various step_... variables
1868
   before calling here, if you are stepping.
1869
 
1870
   You should call clear_proceed_status before calling proceed.  */
1871
 
1872
void
1873
proceed (CORE_ADDR addr, enum target_signal siggnal, int step)
1874
{
1875
  struct regcache *regcache;
1876
  struct gdbarch *gdbarch;
1877
  struct thread_info *tp;
1878
  CORE_ADDR pc;
1879
  struct address_space *aspace;
1880
  int oneproc = 0;
1881
 
1882
  /* If we're stopped at a fork/vfork, follow the branch set by the
1883
     "set follow-fork-mode" command; otherwise, we'll just proceed
1884
     resuming the current thread.  */
1885
  if (!follow_fork ())
1886
    {
1887
      /* The target for some reason decided not to resume.  */
1888
      normal_stop ();
1889
      return;
1890
    }
1891
 
1892
  regcache = get_current_regcache ();
1893
  gdbarch = get_regcache_arch (regcache);
1894
  aspace = get_regcache_aspace (regcache);
1895
  pc = regcache_read_pc (regcache);
1896
 
1897
  if (step > 0)
1898
    step_start_function = find_pc_function (pc);
1899
  if (step < 0)
1900
    stop_after_trap = 1;
1901
 
1902
  if (addr == (CORE_ADDR) -1)
1903
    {
1904
      if (pc == stop_pc && breakpoint_here_p (aspace, pc)
1905
          && execution_direction != EXEC_REVERSE)
1906
        /* There is a breakpoint at the address we will resume at,
1907
           step one instruction before inserting breakpoints so that
1908
           we do not stop right away (and report a second hit at this
1909
           breakpoint).
1910
 
1911
           Note, we don't do this in reverse, because we won't
1912
           actually be executing the breakpoint insn anyway.
1913
           We'll be (un-)executing the previous instruction.  */
1914
 
1915
        oneproc = 1;
1916
      else if (gdbarch_single_step_through_delay_p (gdbarch)
1917
               && gdbarch_single_step_through_delay (gdbarch,
1918
                                                     get_current_frame ()))
1919
        /* We stepped onto an instruction that needs to be stepped
1920
           again before re-inserting the breakpoint, do so.  */
1921
        oneproc = 1;
1922
    }
1923
  else
1924
    {
1925
      regcache_write_pc (regcache, addr);
1926
    }
1927
 
1928
  if (debug_infrun)
1929
    fprintf_unfiltered (gdb_stdlog,
1930
                        "infrun: proceed (addr=%s, signal=%d, step=%d)\n",
1931
                        paddress (gdbarch, addr), siggnal, step);
1932
 
1933
  /* We're handling a live event, so make sure we're doing live
1934
     debugging.  If we're looking at traceframes while the target is
1935
     running, we're going to need to get back to that mode after
1936
     handling the event.  */
1937
  if (non_stop)
1938
    {
1939
      make_cleanup_restore_current_traceframe ();
1940
      set_traceframe_number (-1);
1941
    }
1942
 
1943
  if (non_stop)
1944
    /* In non-stop, each thread is handled individually.  The context
1945
       must already be set to the right thread here.  */
1946
    ;
1947
  else
1948
    {
1949
      /* In a multi-threaded task we may select another thread and
1950
         then continue or step.
1951
 
1952
         But if the old thread was stopped at a breakpoint, it will
1953
         immediately cause another breakpoint stop without any
1954
         execution (i.e. it will report a breakpoint hit incorrectly).
1955
         So we must step over it first.
1956
 
1957
         prepare_to_proceed checks the current thread against the
1958
         thread that reported the most recent event.  If a step-over
1959
         is required it returns TRUE and sets the current thread to
1960
         the old thread. */
1961
      if (prepare_to_proceed (step))
1962
        oneproc = 1;
1963
    }
1964
 
1965
  /* prepare_to_proceed may change the current thread.  */
1966
  tp = inferior_thread ();
1967
 
1968
  if (oneproc)
1969
    {
1970
      tp->trap_expected = 1;
1971
      /* If displaced stepping is enabled, we can step over the
1972
         breakpoint without hitting it, so leave all breakpoints
1973
         inserted.  Otherwise we need to disable all breakpoints, step
1974
         one instruction, and then re-add them when that step is
1975
         finished.  */
1976
      if (!use_displaced_stepping (gdbarch))
1977
        remove_breakpoints ();
1978
    }
1979
 
1980
  /* We can insert breakpoints if we're not trying to step over one,
1981
     or if we are stepping over one but we're using displaced stepping
1982
     to do so.  */
1983
  if (! tp->trap_expected || use_displaced_stepping (gdbarch))
1984
    insert_breakpoints ();
1985
 
1986
  if (!non_stop)
1987
    {
1988
      /* Pass the last stop signal to the thread we're resuming,
1989
         irrespective of whether the current thread is the thread that
1990
         got the last event or not.  This was historically GDB's
1991
         behaviour before keeping a stop_signal per thread.  */
1992
 
1993
      struct thread_info *last_thread;
1994
      ptid_t last_ptid;
1995
      struct target_waitstatus last_status;
1996
 
1997
      get_last_target_status (&last_ptid, &last_status);
1998
      if (!ptid_equal (inferior_ptid, last_ptid)
1999
          && !ptid_equal (last_ptid, null_ptid)
2000
          && !ptid_equal (last_ptid, minus_one_ptid))
2001
        {
2002
          last_thread = find_thread_ptid (last_ptid);
2003
          if (last_thread)
2004
            {
2005
              tp->stop_signal = last_thread->stop_signal;
2006
              last_thread->stop_signal = TARGET_SIGNAL_0;
2007
            }
2008
        }
2009
    }
2010
 
2011
  if (siggnal != TARGET_SIGNAL_DEFAULT)
2012
    tp->stop_signal = siggnal;
2013
  /* If this signal should not be seen by program,
2014
     give it zero.  Used for debugging signals.  */
2015
  else if (!signal_program[tp->stop_signal])
2016
    tp->stop_signal = TARGET_SIGNAL_0;
2017
 
2018
  annotate_starting ();
2019
 
2020
  /* Make sure that output from GDB appears before output from the
2021
     inferior.  */
2022
  gdb_flush (gdb_stdout);
2023
 
2024
  /* Refresh prev_pc value just prior to resuming.  This used to be
2025
     done in stop_stepping, however, setting prev_pc there did not handle
2026
     scenarios such as inferior function calls or returning from
2027
     a function via the return command.  In those cases, the prev_pc
2028
     value was not set properly for subsequent commands.  The prev_pc value
2029
     is used to initialize the starting line number in the ecs.  With an
2030
     invalid value, the gdb next command ends up stopping at the position
2031
     represented by the next line table entry past our start position.
2032
     On platforms that generate one line table entry per line, this
2033
     is not a problem.  However, on the ia64, the compiler generates
2034
     extraneous line table entries that do not increase the line number.
2035
     When we issue the gdb next command on the ia64 after an inferior call
2036
     or a return command, we often end up a few instructions forward, still
2037
     within the original line we started.
2038
 
2039
     An attempt was made to refresh the prev_pc at the same time the
2040
     execution_control_state is initialized (for instance, just before
2041
     waiting for an inferior event).  But this approach did not work
2042
     because of platforms that use ptrace, where the pc register cannot
2043
     be read unless the inferior is stopped.  At that point, we are not
2044
     guaranteed the inferior is stopped and so the regcache_read_pc() call
2045
     can fail.  Setting the prev_pc value here ensures the value is updated
2046
     correctly when the inferior is stopped.  */
2047
  tp->prev_pc = regcache_read_pc (get_current_regcache ());
2048
 
2049
  /* Fill in with reasonable starting values.  */
2050
  init_thread_stepping_state (tp);
2051
 
2052
  /* Reset to normal state.  */
2053
  init_infwait_state ();
2054
 
2055
  /* Resume inferior.  */
2056
  resume (oneproc || step || bpstat_should_step (), tp->stop_signal);
2057
 
2058
  /* Wait for it to stop (if not standalone)
2059
     and in any case decode why it stopped, and act accordingly.  */
2060
  /* Do this only if we are not using the event loop, or if the target
2061
     does not support asynchronous execution. */
2062
  if (!target_can_async_p ())
2063
    {
2064
      wait_for_inferior (0);
2065
      normal_stop ();
2066
    }
2067
}
2068
 
2069
 
2070
/* Start remote-debugging of a machine over a serial link.  */
2071
 
2072
void
2073
start_remote (int from_tty)
2074
{
2075
  struct inferior *inferior;
2076
 
2077
  init_wait_for_inferior ();
2078
  inferior = current_inferior ();
2079
  inferior->stop_soon = STOP_QUIETLY_REMOTE;
2080
 
2081
  /* Always go on waiting for the target, regardless of the mode. */
2082
  /* FIXME: cagney/1999-09-23: At present it isn't possible to
2083
     indicate to wait_for_inferior that a target should timeout if
2084
     nothing is returned (instead of just blocking).  Because of this,
2085
     targets expecting an immediate response need to, internally, set
2086
     things up so that the target_wait() is forced to eventually
2087
     timeout. */
2088
  /* FIXME: cagney/1999-09-24: It isn't possible for target_open() to
2089
     differentiate to its caller what the state of the target is after
2090
     the initial open has been performed.  Here we're assuming that
2091
     the target has stopped.  It should be possible to eventually have
2092
     target_open() return to the caller an indication that the target
2093
     is currently running and GDB state should be set to the same as
2094
     for an async run. */
2095
  wait_for_inferior (0);
2096
 
2097
  /* Now that the inferior has stopped, do any bookkeeping like
2098
     loading shared libraries.  We want to do this before normal_stop,
2099
     so that the displayed frame is up to date.  */
2100
  post_create_inferior (&current_target, from_tty);
2101
 
2102
  normal_stop ();
2103
}
2104
 
2105
/* Initialize static vars when a new inferior begins.  */
2106
 
2107
void
2108
init_wait_for_inferior (void)
2109
{
2110
  /* These are meaningless until the first time through wait_for_inferior.  */
2111
 
2112
  breakpoint_init_inferior (inf_starting);
2113
 
2114
  clear_proceed_status ();
2115
 
2116
  stepping_past_singlestep_breakpoint = 0;
2117
  deferred_step_ptid = null_ptid;
2118
 
2119
  target_last_wait_ptid = minus_one_ptid;
2120
 
2121
  previous_inferior_ptid = null_ptid;
2122
  init_infwait_state ();
2123
 
2124
  /* Discard any skipped inlined frames.  */
2125
  clear_inline_frame_state (minus_one_ptid);
2126
}
2127
 
2128
 
2129
/* This enum encodes possible reasons for doing a target_wait, so that
2130
   wfi can call target_wait in one place.  (Ultimately the call will be
2131
   moved out of the infinite loop entirely.) */
2132
 
2133
enum infwait_states
2134
{
2135
  infwait_normal_state,
2136
  infwait_thread_hop_state,
2137
  infwait_step_watch_state,
2138
  infwait_nonstep_watch_state
2139
};
2140
 
2141
/* Why did the inferior stop? Used to print the appropriate messages
2142
   to the interface from within handle_inferior_event(). */
2143
enum inferior_stop_reason
2144
{
2145
  /* Step, next, nexti, stepi finished. */
2146
  END_STEPPING_RANGE,
2147
  /* Inferior terminated by signal. */
2148
  SIGNAL_EXITED,
2149
  /* Inferior exited. */
2150
  EXITED,
2151
  /* Inferior received signal, and user asked to be notified. */
2152
  SIGNAL_RECEIVED,
2153
  /* Reverse execution -- target ran out of history info.  */
2154
  NO_HISTORY
2155
};
2156
 
2157
/* The PTID we'll do a target_wait on.*/
2158
ptid_t waiton_ptid;
2159
 
2160
/* Current inferior wait state.  */
2161
enum infwait_states infwait_state;
2162
 
2163
/* Data to be passed around while handling an event.  This data is
2164
   discarded between events.  */
2165
struct execution_control_state
2166
{
2167
  ptid_t ptid;
2168
  /* The thread that got the event, if this was a thread event; NULL
2169
     otherwise.  */
2170
  struct thread_info *event_thread;
2171
 
2172
  struct target_waitstatus ws;
2173
  int random_signal;
2174
  CORE_ADDR stop_func_start;
2175
  CORE_ADDR stop_func_end;
2176
  char *stop_func_name;
2177
  int new_thread_event;
2178
  int wait_some_more;
2179
};
2180
 
2181
static void handle_inferior_event (struct execution_control_state *ecs);
2182
 
2183
static void handle_step_into_function (struct gdbarch *gdbarch,
2184
                                       struct execution_control_state *ecs);
2185
static void handle_step_into_function_backward (struct gdbarch *gdbarch,
2186
                                                struct execution_control_state *ecs);
2187
static void insert_step_resume_breakpoint_at_frame (struct frame_info *step_frame);
2188
static void insert_step_resume_breakpoint_at_caller (struct frame_info *);
2189
static void insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
2190
                                                  struct symtab_and_line sr_sal,
2191
                                                  struct frame_id sr_id);
2192
static void insert_longjmp_resume_breakpoint (struct gdbarch *, CORE_ADDR);
2193
 
2194
static void stop_stepping (struct execution_control_state *ecs);
2195
static void prepare_to_wait (struct execution_control_state *ecs);
2196
static void keep_going (struct execution_control_state *ecs);
2197
static void print_stop_reason (enum inferior_stop_reason stop_reason,
2198
                               int stop_info);
2199
 
2200
/* Callback for iterate over threads.  If the thread is stopped, but
2201
   the user/frontend doesn't know about that yet, go through
2202
   normal_stop, as if the thread had just stopped now.  ARG points at
2203
   a ptid.  If PTID is MINUS_ONE_PTID, applies to all threads.  If
2204
   ptid_is_pid(PTID) is true, applies to all threads of the process
2205
   pointed at by PTID.  Otherwise, apply only to the thread pointed by
2206
   PTID.  */
2207
 
2208
static int
2209
infrun_thread_stop_requested_callback (struct thread_info *info, void *arg)
2210
{
2211
  ptid_t ptid = * (ptid_t *) arg;
2212
 
2213
  if ((ptid_equal (info->ptid, ptid)
2214
       || ptid_equal (minus_one_ptid, ptid)
2215
       || (ptid_is_pid (ptid)
2216
           && ptid_get_pid (ptid) == ptid_get_pid (info->ptid)))
2217
      && is_running (info->ptid)
2218
      && !is_executing (info->ptid))
2219
    {
2220
      struct cleanup *old_chain;
2221
      struct execution_control_state ecss;
2222
      struct execution_control_state *ecs = &ecss;
2223
 
2224
      memset (ecs, 0, sizeof (*ecs));
2225
 
2226
      old_chain = make_cleanup_restore_current_thread ();
2227
 
2228
      switch_to_thread (info->ptid);
2229
 
2230
      /* Go through handle_inferior_event/normal_stop, so we always
2231
         have consistent output as if the stop event had been
2232
         reported.  */
2233
      ecs->ptid = info->ptid;
2234
      ecs->event_thread = find_thread_ptid (info->ptid);
2235
      ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2236
      ecs->ws.value.sig = TARGET_SIGNAL_0;
2237
 
2238
      handle_inferior_event (ecs);
2239
 
2240
      if (!ecs->wait_some_more)
2241
        {
2242
          struct thread_info *tp;
2243
 
2244
          normal_stop ();
2245
 
2246
          /* Finish off the continuations.  The continations
2247
             themselves are responsible for realising the thread
2248
             didn't finish what it was supposed to do.  */
2249
          tp = inferior_thread ();
2250
          do_all_intermediate_continuations_thread (tp);
2251
          do_all_continuations_thread (tp);
2252
        }
2253
 
2254
      do_cleanups (old_chain);
2255
    }
2256
 
2257
  return 0;
2258
}
2259
 
2260
/* This function is attached as a "thread_stop_requested" observer.
2261
   Cleanup local state that assumed the PTID was to be resumed, and
2262
   report the stop to the frontend.  */
2263
 
2264
static void
2265
infrun_thread_stop_requested (ptid_t ptid)
2266
{
2267
  struct displaced_step_inferior_state *displaced;
2268
 
2269
  /* PTID was requested to stop.  Remove it from the displaced
2270
     stepping queue, so we don't try to resume it automatically.  */
2271
 
2272
  for (displaced = displaced_step_inferior_states;
2273
       displaced;
2274
       displaced = displaced->next)
2275
    {
2276
      struct displaced_step_request *it, **prev_next_p;
2277
 
2278
      it = displaced->step_request_queue;
2279
      prev_next_p = &displaced->step_request_queue;
2280
      while (it)
2281
        {
2282
          if (ptid_match (it->ptid, ptid))
2283
            {
2284
              *prev_next_p = it->next;
2285
              it->next = NULL;
2286
              xfree (it);
2287
            }
2288
          else
2289
            {
2290
              prev_next_p = &it->next;
2291
            }
2292
 
2293
          it = *prev_next_p;
2294
        }
2295
    }
2296
 
2297
  iterate_over_threads (infrun_thread_stop_requested_callback, &ptid);
2298
}
2299
 
2300
static void
2301
infrun_thread_thread_exit (struct thread_info *tp, int silent)
2302
{
2303
  if (ptid_equal (target_last_wait_ptid, tp->ptid))
2304
    nullify_last_target_wait_ptid ();
2305
}
2306
 
2307
/* Callback for iterate_over_threads.  */
2308
 
2309
static int
2310
delete_step_resume_breakpoint_callback (struct thread_info *info, void *data)
2311
{
2312
  if (is_exited (info->ptid))
2313
    return 0;
2314
 
2315
  delete_step_resume_breakpoint (info);
2316
  return 0;
2317
}
2318
 
2319
/* In all-stop, delete the step resume breakpoint of any thread that
2320
   had one.  In non-stop, delete the step resume breakpoint of the
2321
   thread that just stopped.  */
2322
 
2323
static void
2324
delete_step_thread_step_resume_breakpoint (void)
2325
{
2326
  if (!target_has_execution
2327
      || ptid_equal (inferior_ptid, null_ptid))
2328
    /* If the inferior has exited, we have already deleted the step
2329
       resume breakpoints out of GDB's lists.  */
2330
    return;
2331
 
2332
  if (non_stop)
2333
    {
2334
      /* If in non-stop mode, only delete the step-resume or
2335
         longjmp-resume breakpoint of the thread that just stopped
2336
         stepping.  */
2337
      struct thread_info *tp = inferior_thread ();
2338
 
2339
      delete_step_resume_breakpoint (tp);
2340
    }
2341
  else
2342
    /* In all-stop mode, delete all step-resume and longjmp-resume
2343
       breakpoints of any thread that had them.  */
2344
    iterate_over_threads (delete_step_resume_breakpoint_callback, NULL);
2345
}
2346
 
2347
/* A cleanup wrapper. */
2348
 
2349
static void
2350
delete_step_thread_step_resume_breakpoint_cleanup (void *arg)
2351
{
2352
  delete_step_thread_step_resume_breakpoint ();
2353
}
2354
 
2355
/* Pretty print the results of target_wait, for debugging purposes.  */
2356
 
2357
static void
2358
print_target_wait_results (ptid_t waiton_ptid, ptid_t result_ptid,
2359
                           const struct target_waitstatus *ws)
2360
{
2361
  char *status_string = target_waitstatus_to_string (ws);
2362
  struct ui_file *tmp_stream = mem_fileopen ();
2363
  char *text;
2364
 
2365
  /* The text is split over several lines because it was getting too long.
2366
     Call fprintf_unfiltered (gdb_stdlog) once so that the text is still
2367
     output as a unit; we want only one timestamp printed if debug_timestamp
2368
     is set.  */
2369
 
2370
  fprintf_unfiltered (tmp_stream,
2371
                      "infrun: target_wait (%d", PIDGET (waiton_ptid));
2372
  if (PIDGET (waiton_ptid) != -1)
2373
    fprintf_unfiltered (tmp_stream,
2374
                        " [%s]", target_pid_to_str (waiton_ptid));
2375
  fprintf_unfiltered (tmp_stream, ", status) =\n");
2376
  fprintf_unfiltered (tmp_stream,
2377
                      "infrun:   %d [%s],\n",
2378
                      PIDGET (result_ptid), target_pid_to_str (result_ptid));
2379
  fprintf_unfiltered (tmp_stream,
2380
                      "infrun:   %s\n",
2381
                      status_string);
2382
 
2383
  text = ui_file_xstrdup (tmp_stream, NULL);
2384
 
2385
  /* This uses %s in part to handle %'s in the text, but also to avoid
2386
     a gcc error: the format attribute requires a string literal.  */
2387
  fprintf_unfiltered (gdb_stdlog, "%s", text);
2388
 
2389
  xfree (status_string);
2390
  xfree (text);
2391
  ui_file_delete (tmp_stream);
2392
}
2393
 
2394
/* Prepare and stabilize the inferior for detaching it.  E.g.,
2395
   detaching while a thread is displaced stepping is a recipe for
2396
   crashing it, as nothing would readjust the PC out of the scratch
2397
   pad.  */
2398
 
2399
void
2400
prepare_for_detach (void)
2401
{
2402
  struct inferior *inf = current_inferior ();
2403
  ptid_t pid_ptid = pid_to_ptid (inf->pid);
2404
  struct cleanup *old_chain_1;
2405
  struct displaced_step_inferior_state *displaced;
2406
 
2407
  displaced = get_displaced_stepping_state (inf->pid);
2408
 
2409
  /* Is any thread of this process displaced stepping?  If not,
2410
     there's nothing else to do.  */
2411
  if (displaced == NULL || ptid_equal (displaced->step_ptid, null_ptid))
2412
    return;
2413
 
2414
  if (debug_infrun)
2415
    fprintf_unfiltered (gdb_stdlog,
2416
                        "displaced-stepping in-process while detaching");
2417
 
2418
  old_chain_1 = make_cleanup_restore_integer (&inf->detaching);
2419
  inf->detaching = 1;
2420
 
2421
  while (!ptid_equal (displaced->step_ptid, null_ptid))
2422
    {
2423
      struct cleanup *old_chain_2;
2424
      struct execution_control_state ecss;
2425
      struct execution_control_state *ecs;
2426
 
2427
      ecs = &ecss;
2428
      memset (ecs, 0, sizeof (*ecs));
2429
 
2430
      overlay_cache_invalid = 1;
2431
 
2432
      /* We have to invalidate the registers BEFORE calling
2433
         target_wait because they can be loaded from the target while
2434
         in target_wait.  This makes remote debugging a bit more
2435
         efficient for those targets that provide critical registers
2436
         as part of their normal status mechanism. */
2437
 
2438
      registers_changed ();
2439
 
2440
      if (deprecated_target_wait_hook)
2441
        ecs->ptid = deprecated_target_wait_hook (pid_ptid, &ecs->ws, 0);
2442
      else
2443
        ecs->ptid = target_wait (pid_ptid, &ecs->ws, 0);
2444
 
2445
      if (debug_infrun)
2446
        print_target_wait_results (pid_ptid, ecs->ptid, &ecs->ws);
2447
 
2448
      /* If an error happens while handling the event, propagate GDB's
2449
         knowledge of the executing state to the frontend/user running
2450
         state.  */
2451
      old_chain_2 = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2452
 
2453
      /* In non-stop mode, each thread is handled individually.
2454
         Switch early, so the global state is set correctly for this
2455
         thread.  */
2456
      if (non_stop
2457
          && ecs->ws.kind != TARGET_WAITKIND_EXITED
2458
          && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2459
        context_switch (ecs->ptid);
2460
 
2461
      /* Now figure out what to do with the result of the result.  */
2462
      handle_inferior_event (ecs);
2463
 
2464
      /* No error, don't finish the state yet.  */
2465
      discard_cleanups (old_chain_2);
2466
 
2467
      /* Breakpoints and watchpoints are not installed on the target
2468
         at this point, and signals are passed directly to the
2469
         inferior, so this must mean the process is gone.  */
2470
      if (!ecs->wait_some_more)
2471
        {
2472
          discard_cleanups (old_chain_1);
2473
          error (_("Program exited while detaching"));
2474
        }
2475
    }
2476
 
2477
  discard_cleanups (old_chain_1);
2478
}
2479
 
2480
/* Wait for control to return from inferior to debugger.
2481
 
2482
   If TREAT_EXEC_AS_SIGTRAP is non-zero, then handle EXEC signals
2483
   as if they were SIGTRAP signals.  This can be useful during
2484
   the startup sequence on some targets such as HP/UX, where
2485
   we receive an EXEC event instead of the expected SIGTRAP.
2486
 
2487
   If inferior gets a signal, we may decide to start it up again
2488
   instead of returning.  That is why there is a loop in this function.
2489
   When this function actually returns it means the inferior
2490
   should be left stopped and GDB should read more commands.  */
2491
 
2492
void
2493
wait_for_inferior (int treat_exec_as_sigtrap)
2494
{
2495
  struct cleanup *old_cleanups;
2496
  struct execution_control_state ecss;
2497
  struct execution_control_state *ecs;
2498
 
2499
  if (debug_infrun)
2500
    fprintf_unfiltered
2501
      (gdb_stdlog, "infrun: wait_for_inferior (treat_exec_as_sigtrap=%d)\n",
2502
       treat_exec_as_sigtrap);
2503
 
2504
  old_cleanups =
2505
    make_cleanup (delete_step_thread_step_resume_breakpoint_cleanup, NULL);
2506
 
2507
  ecs = &ecss;
2508
  memset (ecs, 0, sizeof (*ecs));
2509
 
2510
  /* We'll update this if & when we switch to a new thread.  */
2511
  previous_inferior_ptid = inferior_ptid;
2512
 
2513
  while (1)
2514
    {
2515
      struct cleanup *old_chain;
2516
 
2517
      /* We have to invalidate the registers BEFORE calling target_wait
2518
         because they can be loaded from the target while in target_wait.
2519
         This makes remote debugging a bit more efficient for those
2520
         targets that provide critical registers as part of their normal
2521
         status mechanism. */
2522
 
2523
      overlay_cache_invalid = 1;
2524
      registers_changed ();
2525
 
2526
      if (deprecated_target_wait_hook)
2527
        ecs->ptid = deprecated_target_wait_hook (waiton_ptid, &ecs->ws, 0);
2528
      else
2529
        ecs->ptid = target_wait (waiton_ptid, &ecs->ws, 0);
2530
 
2531
      if (debug_infrun)
2532
        print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2533
 
2534
      if (treat_exec_as_sigtrap && ecs->ws.kind == TARGET_WAITKIND_EXECD)
2535
        {
2536
          xfree (ecs->ws.value.execd_pathname);
2537
          ecs->ws.kind = TARGET_WAITKIND_STOPPED;
2538
          ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
2539
        }
2540
 
2541
      /* If an error happens while handling the event, propagate GDB's
2542
         knowledge of the executing state to the frontend/user running
2543
         state.  */
2544
      old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2545
 
2546
      if (ecs->ws.kind == TARGET_WAITKIND_SYSCALL_ENTRY
2547
          || ecs->ws.kind == TARGET_WAITKIND_SYSCALL_RETURN)
2548
        ecs->ws.value.syscall_number = UNKNOWN_SYSCALL;
2549
 
2550
      /* Now figure out what to do with the result of the result.  */
2551
      handle_inferior_event (ecs);
2552
 
2553
      /* No error, don't finish the state yet.  */
2554
      discard_cleanups (old_chain);
2555
 
2556
      if (!ecs->wait_some_more)
2557
        break;
2558
    }
2559
 
2560
  do_cleanups (old_cleanups);
2561
}
2562
 
2563
/* Asynchronous version of wait_for_inferior. It is called by the
2564
   event loop whenever a change of state is detected on the file
2565
   descriptor corresponding to the target. It can be called more than
2566
   once to complete a single execution command. In such cases we need
2567
   to keep the state in a global variable ECSS. If it is the last time
2568
   that this function is called for a single execution command, then
2569
   report to the user that the inferior has stopped, and do the
2570
   necessary cleanups. */
2571
 
2572
void
2573
fetch_inferior_event (void *client_data)
2574
{
2575
  struct execution_control_state ecss;
2576
  struct execution_control_state *ecs = &ecss;
2577
  struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
2578
  struct cleanup *ts_old_chain;
2579
  int was_sync = sync_execution;
2580
 
2581
  memset (ecs, 0, sizeof (*ecs));
2582
 
2583
  /* We'll update this if & when we switch to a new thread.  */
2584
  previous_inferior_ptid = inferior_ptid;
2585
 
2586
  if (non_stop)
2587
    /* In non-stop mode, the user/frontend should not notice a thread
2588
       switch due to internal events.  Make sure we reverse to the
2589
       user selected thread and frame after handling the event and
2590
       running any breakpoint commands.  */
2591
    make_cleanup_restore_current_thread ();
2592
 
2593
  /* We have to invalidate the registers BEFORE calling target_wait
2594
     because they can be loaded from the target while in target_wait.
2595
     This makes remote debugging a bit more efficient for those
2596
     targets that provide critical registers as part of their normal
2597
     status mechanism. */
2598
 
2599
  overlay_cache_invalid = 1;
2600
  registers_changed ();
2601
 
2602
  if (deprecated_target_wait_hook)
2603
    ecs->ptid =
2604
      deprecated_target_wait_hook (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2605
  else
2606
    ecs->ptid = target_wait (waiton_ptid, &ecs->ws, TARGET_WNOHANG);
2607
 
2608
  if (debug_infrun)
2609
    print_target_wait_results (waiton_ptid, ecs->ptid, &ecs->ws);
2610
 
2611
  if (non_stop
2612
      && ecs->ws.kind != TARGET_WAITKIND_IGNORE
2613
      && ecs->ws.kind != TARGET_WAITKIND_EXITED
2614
      && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2615
    /* In non-stop mode, each thread is handled individually.  Switch
2616
       early, so the global state is set correctly for this
2617
       thread.  */
2618
    context_switch (ecs->ptid);
2619
 
2620
  /* If an error happens while handling the event, propagate GDB's
2621
     knowledge of the executing state to the frontend/user running
2622
     state.  */
2623
  if (!non_stop)
2624
    ts_old_chain = make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
2625
  else
2626
    ts_old_chain = make_cleanup (finish_thread_state_cleanup, &ecs->ptid);
2627
 
2628
  /* Now figure out what to do with the result of the result.  */
2629
  handle_inferior_event (ecs);
2630
 
2631
  if (!ecs->wait_some_more)
2632
    {
2633
      struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2634
 
2635
      delete_step_thread_step_resume_breakpoint ();
2636
 
2637
      /* We may not find an inferior if this was a process exit.  */
2638
      if (inf == NULL || inf->stop_soon == NO_STOP_QUIETLY)
2639
        normal_stop ();
2640
 
2641
      if (target_has_execution
2642
          && ecs->ws.kind != TARGET_WAITKIND_EXITED
2643
          && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
2644
          && ecs->event_thread->step_multi
2645
          && ecs->event_thread->stop_step)
2646
        inferior_event_handler (INF_EXEC_CONTINUE, NULL);
2647
      else
2648
        inferior_event_handler (INF_EXEC_COMPLETE, NULL);
2649
    }
2650
 
2651
  /* No error, don't finish the thread states yet.  */
2652
  discard_cleanups (ts_old_chain);
2653
 
2654
  /* Revert thread and frame.  */
2655
  do_cleanups (old_chain);
2656
 
2657
  /* If the inferior was in sync execution mode, and now isn't,
2658
     restore the prompt.  */
2659
  if (was_sync && !sync_execution)
2660
    display_gdb_prompt (0);
2661
}
2662
 
2663
/* Record the frame and location we're currently stepping through.  */
2664
void
2665
set_step_info (struct frame_info *frame, struct symtab_and_line sal)
2666
{
2667
  struct thread_info *tp = inferior_thread ();
2668
 
2669
  tp->step_frame_id = get_frame_id (frame);
2670
  tp->step_stack_frame_id = get_stack_frame_id (frame);
2671
 
2672
  tp->current_symtab = sal.symtab;
2673
  tp->current_line = sal.line;
2674
}
2675
 
2676
/* Clear context switchable stepping state.  */
2677
 
2678
void
2679
init_thread_stepping_state (struct thread_info *tss)
2680
{
2681
  tss->stepping_over_breakpoint = 0;
2682
  tss->step_after_step_resume_breakpoint = 0;
2683
  tss->stepping_through_solib_after_catch = 0;
2684
  tss->stepping_through_solib_catchpoints = NULL;
2685
}
2686
 
2687
/* Return the cached copy of the last pid/waitstatus returned by
2688
   target_wait()/deprecated_target_wait_hook().  The data is actually
2689
   cached by handle_inferior_event(), which gets called immediately
2690
   after target_wait()/deprecated_target_wait_hook().  */
2691
 
2692
void
2693
get_last_target_status (ptid_t *ptidp, struct target_waitstatus *status)
2694
{
2695
  *ptidp = target_last_wait_ptid;
2696
  *status = target_last_waitstatus;
2697
}
2698
 
2699
void
2700
nullify_last_target_wait_ptid (void)
2701
{
2702
  target_last_wait_ptid = minus_one_ptid;
2703
}
2704
 
2705
/* Switch thread contexts.  */
2706
 
2707
static void
2708
context_switch (ptid_t ptid)
2709
{
2710
  if (debug_infrun)
2711
    {
2712
      fprintf_unfiltered (gdb_stdlog, "infrun: Switching context from %s ",
2713
                          target_pid_to_str (inferior_ptid));
2714
      fprintf_unfiltered (gdb_stdlog, "to %s\n",
2715
                          target_pid_to_str (ptid));
2716
    }
2717
 
2718
  switch_to_thread (ptid);
2719
}
2720
 
2721
static void
2722
adjust_pc_after_break (struct execution_control_state *ecs)
2723
{
2724
  struct regcache *regcache;
2725
  struct gdbarch *gdbarch;
2726
  struct address_space *aspace;
2727
  CORE_ADDR breakpoint_pc;
2728
 
2729
  /* If we've hit a breakpoint, we'll normally be stopped with SIGTRAP.  If
2730
     we aren't, just return.
2731
 
2732
     We assume that waitkinds other than TARGET_WAITKIND_STOPPED are not
2733
     affected by gdbarch_decr_pc_after_break.  Other waitkinds which are
2734
     implemented by software breakpoints should be handled through the normal
2735
     breakpoint layer.
2736
 
2737
     NOTE drow/2004-01-31: On some targets, breakpoints may generate
2738
     different signals (SIGILL or SIGEMT for instance), but it is less
2739
     clear where the PC is pointing afterwards.  It may not match
2740
     gdbarch_decr_pc_after_break.  I don't know any specific target that
2741
     generates these signals at breakpoints (the code has been in GDB since at
2742
     least 1992) so I can not guess how to handle them here.
2743
 
2744
     In earlier versions of GDB, a target with
2745
     gdbarch_have_nonsteppable_watchpoint would have the PC after hitting a
2746
     watchpoint affected by gdbarch_decr_pc_after_break.  I haven't found any
2747
     target with both of these set in GDB history, and it seems unlikely to be
2748
     correct, so gdbarch_have_nonsteppable_watchpoint is not checked here.  */
2749
 
2750
  if (ecs->ws.kind != TARGET_WAITKIND_STOPPED)
2751
    return;
2752
 
2753
  if (ecs->ws.value.sig != TARGET_SIGNAL_TRAP)
2754
    return;
2755
 
2756
  /* In reverse execution, when a breakpoint is hit, the instruction
2757
     under it has already been de-executed.  The reported PC always
2758
     points at the breakpoint address, so adjusting it further would
2759
     be wrong.  E.g., consider this case on a decr_pc_after_break == 1
2760
     architecture:
2761
 
2762
       B1         0x08000000 :   INSN1
2763
       B2         0x08000001 :   INSN2
2764
                  0x08000002 :   INSN3
2765
            PC -> 0x08000003 :   INSN4
2766
 
2767
     Say you're stopped at 0x08000003 as above.  Reverse continuing
2768
     from that point should hit B2 as below.  Reading the PC when the
2769
     SIGTRAP is reported should read 0x08000001 and INSN2 should have
2770
     been de-executed already.
2771
 
2772
       B1         0x08000000 :   INSN1
2773
       B2   PC -> 0x08000001 :   INSN2
2774
                  0x08000002 :   INSN3
2775
                  0x08000003 :   INSN4
2776
 
2777
     We can't apply the same logic as for forward execution, because
2778
     we would wrongly adjust the PC to 0x08000000, since there's a
2779
     breakpoint at PC - 1.  We'd then report a hit on B1, although
2780
     INSN1 hadn't been de-executed yet.  Doing nothing is the correct
2781
     behaviour.  */
2782
  if (execution_direction == EXEC_REVERSE)
2783
    return;
2784
 
2785
  /* If this target does not decrement the PC after breakpoints, then
2786
     we have nothing to do.  */
2787
  regcache = get_thread_regcache (ecs->ptid);
2788
  gdbarch = get_regcache_arch (regcache);
2789
  if (gdbarch_decr_pc_after_break (gdbarch) == 0)
2790
    return;
2791
 
2792
  aspace = get_regcache_aspace (regcache);
2793
 
2794
  /* Find the location where (if we've hit a breakpoint) the
2795
     breakpoint would be.  */
2796
  breakpoint_pc = regcache_read_pc (regcache)
2797
                  - gdbarch_decr_pc_after_break (gdbarch);
2798
 
2799
  /* Check whether there actually is a software breakpoint inserted at
2800
     that location.
2801
 
2802
     If in non-stop mode, a race condition is possible where we've
2803
     removed a breakpoint, but stop events for that breakpoint were
2804
     already queued and arrive later.  To suppress those spurious
2805
     SIGTRAPs, we keep a list of such breakpoint locations for a bit,
2806
     and retire them after a number of stop events are reported.  */
2807
  if (software_breakpoint_inserted_here_p (aspace, breakpoint_pc)
2808
      || (non_stop && moribund_breakpoint_here_p (aspace, breakpoint_pc)))
2809
    {
2810
      struct cleanup *old_cleanups = NULL;
2811
 
2812
      if (RECORD_IS_USED)
2813
        old_cleanups = record_gdb_operation_disable_set ();
2814
 
2815
      /* When using hardware single-step, a SIGTRAP is reported for both
2816
         a completed single-step and a software breakpoint.  Need to
2817
         differentiate between the two, as the latter needs adjusting
2818
         but the former does not.
2819
 
2820
         The SIGTRAP can be due to a completed hardware single-step only if
2821
          - we didn't insert software single-step breakpoints
2822
          - the thread to be examined is still the current thread
2823
          - this thread is currently being stepped
2824
 
2825
         If any of these events did not occur, we must have stopped due
2826
         to hitting a software breakpoint, and have to back up to the
2827
         breakpoint address.
2828
 
2829
         As a special case, we could have hardware single-stepped a
2830
         software breakpoint.  In this case (prev_pc == breakpoint_pc),
2831
         we also need to back up to the breakpoint address.  */
2832
 
2833
      if (singlestep_breakpoints_inserted_p
2834
          || !ptid_equal (ecs->ptid, inferior_ptid)
2835
          || !currently_stepping (ecs->event_thread)
2836
          || ecs->event_thread->prev_pc == breakpoint_pc)
2837
        regcache_write_pc (regcache, breakpoint_pc);
2838
 
2839
      if (RECORD_IS_USED)
2840
        do_cleanups (old_cleanups);
2841
    }
2842
}
2843
 
2844
void
2845
init_infwait_state (void)
2846
{
2847
  waiton_ptid = pid_to_ptid (-1);
2848
  infwait_state = infwait_normal_state;
2849
}
2850
 
2851
void
2852
error_is_running (void)
2853
{
2854
  error (_("\
2855
Cannot execute this command while the selected thread is running."));
2856
}
2857
 
2858
void
2859
ensure_not_running (void)
2860
{
2861
  if (is_running (inferior_ptid))
2862
    error_is_running ();
2863
}
2864
 
2865
static int
2866
stepped_in_from (struct frame_info *frame, struct frame_id step_frame_id)
2867
{
2868
  for (frame = get_prev_frame (frame);
2869
       frame != NULL;
2870
       frame = get_prev_frame (frame))
2871
    {
2872
      if (frame_id_eq (get_frame_id (frame), step_frame_id))
2873
        return 1;
2874
      if (get_frame_type (frame) != INLINE_FRAME)
2875
        break;
2876
    }
2877
 
2878
  return 0;
2879
}
2880
 
2881
/* Auxiliary function that handles syscall entry/return events.
2882
   It returns 1 if the inferior should keep going (and GDB
2883
   should ignore the event), or 0 if the event deserves to be
2884
   processed.  */
2885
 
2886
static int
2887
handle_syscall_event (struct execution_control_state *ecs)
2888
{
2889
  struct regcache *regcache;
2890
  struct gdbarch *gdbarch;
2891
  int syscall_number;
2892
 
2893
  if (!ptid_equal (ecs->ptid, inferior_ptid))
2894
    context_switch (ecs->ptid);
2895
 
2896
  regcache = get_thread_regcache (ecs->ptid);
2897
  gdbarch = get_regcache_arch (regcache);
2898
  syscall_number = gdbarch_get_syscall_number (gdbarch, ecs->ptid);
2899
  stop_pc = regcache_read_pc (regcache);
2900
 
2901
  target_last_waitstatus.value.syscall_number = syscall_number;
2902
 
2903
  if (catch_syscall_enabled () > 0
2904
      && catching_syscall_number (syscall_number) > 0)
2905
    {
2906
      if (debug_infrun)
2907
        fprintf_unfiltered (gdb_stdlog, "infrun: syscall number = '%d'\n",
2908
                            syscall_number);
2909
 
2910
      ecs->event_thread->stop_bpstat
2911
        = bpstat_stop_status (get_regcache_aspace (regcache),
2912
                              stop_pc, ecs->ptid);
2913
      ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
2914
 
2915
      if (!ecs->random_signal)
2916
        {
2917
          /* Catchpoint hit.  */
2918
          ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
2919
          return 0;
2920
        }
2921
    }
2922
 
2923
  /* If no catchpoint triggered for this, then keep going.  */
2924
  ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
2925
  keep_going (ecs);
2926
  return 1;
2927
}
2928
 
2929
/* Given an execution control state that has been freshly filled in
2930
   by an event from the inferior, figure out what it means and take
2931
   appropriate action.  */
2932
 
2933
static void
2934
handle_inferior_event (struct execution_control_state *ecs)
2935
{
2936
  struct frame_info *frame;
2937
  struct gdbarch *gdbarch;
2938
  int sw_single_step_trap_p = 0;
2939
  int stopped_by_watchpoint;
2940
  int stepped_after_stopped_by_watchpoint = 0;
2941
  struct symtab_and_line stop_pc_sal;
2942
  enum stop_kind stop_soon;
2943
 
2944
  if (ecs->ws.kind == TARGET_WAITKIND_IGNORE)
2945
    {
2946
      /* We had an event in the inferior, but we are not interested in
2947
         handling it at this level.  The lower layers have already
2948
         done what needs to be done, if anything.
2949
 
2950
         One of the possible circumstances for this is when the
2951
         inferior produces output for the console.  The inferior has
2952
         not stopped, and we are ignoring the event.  Another possible
2953
         circumstance is any event which the lower level knows will be
2954
         reported multiple times without an intervening resume.  */
2955
      if (debug_infrun)
2956
        fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_IGNORE\n");
2957
      prepare_to_wait (ecs);
2958
      return;
2959
    }
2960
 
2961
  if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2962
      && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED)
2963
    {
2964
      struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
2965
 
2966
      gdb_assert (inf);
2967
      stop_soon = inf->stop_soon;
2968
    }
2969
  else
2970
    stop_soon = NO_STOP_QUIETLY;
2971
 
2972
  /* Cache the last pid/waitstatus. */
2973
  target_last_wait_ptid = ecs->ptid;
2974
  target_last_waitstatus = ecs->ws;
2975
 
2976
  /* Always clear state belonging to the previous time we stopped.  */
2977
  stop_stack_dummy = STOP_NONE;
2978
 
2979
  /* If it's a new process, add it to the thread database */
2980
 
2981
  ecs->new_thread_event = (!ptid_equal (ecs->ptid, inferior_ptid)
2982
                           && !ptid_equal (ecs->ptid, minus_one_ptid)
2983
                           && !in_thread_list (ecs->ptid));
2984
 
2985
  if (ecs->ws.kind != TARGET_WAITKIND_EXITED
2986
      && ecs->ws.kind != TARGET_WAITKIND_SIGNALLED && ecs->new_thread_event)
2987
    add_thread (ecs->ptid);
2988
 
2989
  ecs->event_thread = find_thread_ptid (ecs->ptid);
2990
 
2991
  /* Dependent on valid ECS->EVENT_THREAD.  */
2992
  adjust_pc_after_break (ecs);
2993
 
2994
  /* Dependent on the current PC value modified by adjust_pc_after_break.  */
2995
  reinit_frame_cache ();
2996
 
2997
  breakpoint_retire_moribund ();
2998
 
2999
  /* First, distinguish signals caused by the debugger from signals
3000
     that have to do with the program's own actions.  Note that
3001
     breakpoint insns may cause SIGTRAP or SIGILL or SIGEMT, depending
3002
     on the operating system version.  Here we detect when a SIGILL or
3003
     SIGEMT is really a breakpoint and change it to SIGTRAP.  We do
3004
     something similar for SIGSEGV, since a SIGSEGV will be generated
3005
     when we're trying to execute a breakpoint instruction on a
3006
     non-executable stack.  This happens for call dummy breakpoints
3007
     for architectures like SPARC that place call dummies on the
3008
     stack.  */
3009
  if (ecs->ws.kind == TARGET_WAITKIND_STOPPED
3010
      && (ecs->ws.value.sig == TARGET_SIGNAL_ILL
3011
          || ecs->ws.value.sig == TARGET_SIGNAL_SEGV
3012
          || ecs->ws.value.sig == TARGET_SIGNAL_EMT))
3013
    {
3014
      struct regcache *regcache = get_thread_regcache (ecs->ptid);
3015
 
3016
      if (breakpoint_inserted_here_p (get_regcache_aspace (regcache),
3017
                                      regcache_read_pc (regcache)))
3018
        {
3019
          if (debug_infrun)
3020
            fprintf_unfiltered (gdb_stdlog,
3021
                                "infrun: Treating signal as SIGTRAP\n");
3022
          ecs->ws.value.sig = TARGET_SIGNAL_TRAP;
3023
        }
3024
    }
3025
 
3026
  /* Mark the non-executing threads accordingly.  In all-stop, all
3027
     threads of all processes are stopped when we get any event
3028
     reported.  In non-stop mode, only the event thread stops.  If
3029
     we're handling a process exit in non-stop mode, there's nothing
3030
     to do, as threads of the dead process are gone, and threads of
3031
     any other process were left running.  */
3032
  if (!non_stop)
3033
    set_executing (minus_one_ptid, 0);
3034
  else if (ecs->ws.kind != TARGET_WAITKIND_SIGNALLED
3035
           && ecs->ws.kind != TARGET_WAITKIND_EXITED)
3036
    set_executing (inferior_ptid, 0);
3037
 
3038
  switch (infwait_state)
3039
    {
3040
    case infwait_thread_hop_state:
3041
      if (debug_infrun)
3042
        fprintf_unfiltered (gdb_stdlog, "infrun: infwait_thread_hop_state\n");
3043
      break;
3044
 
3045
    case infwait_normal_state:
3046
      if (debug_infrun)
3047
        fprintf_unfiltered (gdb_stdlog, "infrun: infwait_normal_state\n");
3048
      break;
3049
 
3050
    case infwait_step_watch_state:
3051
      if (debug_infrun)
3052
        fprintf_unfiltered (gdb_stdlog,
3053
                            "infrun: infwait_step_watch_state\n");
3054
 
3055
      stepped_after_stopped_by_watchpoint = 1;
3056
      break;
3057
 
3058
    case infwait_nonstep_watch_state:
3059
      if (debug_infrun)
3060
        fprintf_unfiltered (gdb_stdlog,
3061
                            "infrun: infwait_nonstep_watch_state\n");
3062
      insert_breakpoints ();
3063
 
3064
      /* FIXME-maybe: is this cleaner than setting a flag?  Does it
3065
         handle things like signals arriving and other things happening
3066
         in combination correctly?  */
3067
      stepped_after_stopped_by_watchpoint = 1;
3068
      break;
3069
 
3070
    default:
3071
      internal_error (__FILE__, __LINE__, _("bad switch"));
3072
    }
3073
 
3074
  infwait_state = infwait_normal_state;
3075
  waiton_ptid = pid_to_ptid (-1);
3076
 
3077
  switch (ecs->ws.kind)
3078
    {
3079
    case TARGET_WAITKIND_LOADED:
3080
      if (debug_infrun)
3081
        fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_LOADED\n");
3082
      /* Ignore gracefully during startup of the inferior, as it might
3083
         be the shell which has just loaded some objects, otherwise
3084
         add the symbols for the newly loaded objects.  Also ignore at
3085
         the beginning of an attach or remote session; we will query
3086
         the full list of libraries once the connection is
3087
         established.  */
3088
      if (stop_soon == NO_STOP_QUIETLY)
3089
        {
3090
          /* Check for any newly added shared libraries if we're
3091
             supposed to be adding them automatically.  Switch
3092
             terminal for any messages produced by
3093
             breakpoint_re_set.  */
3094
          target_terminal_ours_for_output ();
3095
          /* NOTE: cagney/2003-11-25: Make certain that the target
3096
             stack's section table is kept up-to-date.  Architectures,
3097
             (e.g., PPC64), use the section table to perform
3098
             operations such as address => section name and hence
3099
             require the table to contain all sections (including
3100
             those found in shared libraries).  */
3101
#ifdef SOLIB_ADD
3102
          SOLIB_ADD (NULL, 0, &current_target, auto_solib_add);
3103
#else
3104
          solib_add (NULL, 0, &current_target, auto_solib_add);
3105
#endif
3106
          target_terminal_inferior ();
3107
 
3108
          /* If requested, stop when the dynamic linker notifies
3109
             gdb of events.  This allows the user to get control
3110
             and place breakpoints in initializer routines for
3111
             dynamically loaded objects (among other things).  */
3112
          if (stop_on_solib_events)
3113
            {
3114
              /* Make sure we print "Stopped due to solib-event" in
3115
                 normal_stop.  */
3116
              stop_print_frame = 1;
3117
 
3118
              stop_stepping (ecs);
3119
              return;
3120
            }
3121
 
3122
          /* NOTE drow/2007-05-11: This might be a good place to check
3123
             for "catch load".  */
3124
        }
3125
 
3126
      /* If we are skipping through a shell, or through shared library
3127
         loading that we aren't interested in, resume the program.  If
3128
         we're running the program normally, also resume.  But stop if
3129
         we're attaching or setting up a remote connection.  */
3130
      if (stop_soon == STOP_QUIETLY || stop_soon == NO_STOP_QUIETLY)
3131
        {
3132
          /* Loading of shared libraries might have changed breakpoint
3133
             addresses.  Make sure new breakpoints are inserted.  */
3134
          if (stop_soon == NO_STOP_QUIETLY
3135
              && !breakpoints_always_inserted_mode ())
3136
            insert_breakpoints ();
3137
          resume (0, TARGET_SIGNAL_0);
3138
          prepare_to_wait (ecs);
3139
          return;
3140
        }
3141
 
3142
      break;
3143
 
3144
    case TARGET_WAITKIND_SPURIOUS:
3145
      if (debug_infrun)
3146
        fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SPURIOUS\n");
3147
      resume (0, TARGET_SIGNAL_0);
3148
      prepare_to_wait (ecs);
3149
      return;
3150
 
3151
    case TARGET_WAITKIND_EXITED:
3152
      if (debug_infrun)
3153
        fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXITED\n");
3154
      inferior_ptid = ecs->ptid;
3155
      set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3156
      set_current_program_space (current_inferior ()->pspace);
3157
      handle_vfork_child_exec_or_exit (0);
3158
      target_terminal_ours ();  /* Must do this before mourn anyway */
3159
      print_stop_reason (EXITED, ecs->ws.value.integer);
3160
 
3161
      /* Record the exit code in the convenience variable $_exitcode, so
3162
         that the user can inspect this again later.  */
3163
      set_internalvar_integer (lookup_internalvar ("_exitcode"),
3164
                               (LONGEST) ecs->ws.value.integer);
3165
      gdb_flush (gdb_stdout);
3166
      target_mourn_inferior ();
3167
      singlestep_breakpoints_inserted_p = 0;
3168
      cancel_single_step_breakpoints ();
3169
      stop_print_frame = 0;
3170
      stop_stepping (ecs);
3171
      return;
3172
 
3173
    case TARGET_WAITKIND_SIGNALLED:
3174
      if (debug_infrun)
3175
        fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SIGNALLED\n");
3176
      inferior_ptid = ecs->ptid;
3177
      set_current_inferior (find_inferior_pid (ptid_get_pid (ecs->ptid)));
3178
      set_current_program_space (current_inferior ()->pspace);
3179
      handle_vfork_child_exec_or_exit (0);
3180
      stop_print_frame = 0;
3181
      target_terminal_ours ();  /* Must do this before mourn anyway */
3182
 
3183
      /* Note: By definition of TARGET_WAITKIND_SIGNALLED, we shouldn't
3184
         reach here unless the inferior is dead.  However, for years
3185
         target_kill() was called here, which hints that fatal signals aren't
3186
         really fatal on some systems.  If that's true, then some changes
3187
         may be needed. */
3188
      target_mourn_inferior ();
3189
 
3190
      print_stop_reason (SIGNAL_EXITED, ecs->ws.value.sig);
3191
      singlestep_breakpoints_inserted_p = 0;
3192
      cancel_single_step_breakpoints ();
3193
      stop_stepping (ecs);
3194
      return;
3195
 
3196
      /* The following are the only cases in which we keep going;
3197
         the above cases end in a continue or goto. */
3198
    case TARGET_WAITKIND_FORKED:
3199
    case TARGET_WAITKIND_VFORKED:
3200
      if (debug_infrun)
3201
        fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_FORKED\n");
3202
 
3203
      if (!ptid_equal (ecs->ptid, inferior_ptid))
3204
        {
3205
          context_switch (ecs->ptid);
3206
          reinit_frame_cache ();
3207
        }
3208
 
3209
      /* Immediately detach breakpoints from the child before there's
3210
         any chance of letting the user delete breakpoints from the
3211
         breakpoint lists.  If we don't do this early, it's easy to
3212
         leave left over traps in the child, vis: "break foo; catch
3213
         fork; c; <fork>; del; c; <child calls foo>".  We only follow
3214
         the fork on the last `continue', and by that time the
3215
         breakpoint at "foo" is long gone from the breakpoint table.
3216
         If we vforked, then we don't need to unpatch here, since both
3217
         parent and child are sharing the same memory pages; we'll
3218
         need to unpatch at follow/detach time instead to be certain
3219
         that new breakpoints added between catchpoint hit time and
3220
         vfork follow are detached.  */
3221
      if (ecs->ws.kind != TARGET_WAITKIND_VFORKED)
3222
        {
3223
          int child_pid = ptid_get_pid (ecs->ws.value.related_pid);
3224
 
3225
          /* This won't actually modify the breakpoint list, but will
3226
             physically remove the breakpoints from the child.  */
3227
          detach_breakpoints (child_pid);
3228
        }
3229
 
3230
      if (singlestep_breakpoints_inserted_p)
3231
        {
3232
          /* Pull the single step breakpoints out of the target. */
3233
          remove_single_step_breakpoints ();
3234
          singlestep_breakpoints_inserted_p = 0;
3235
        }
3236
 
3237
      /* In case the event is caught by a catchpoint, remember that
3238
         the event is to be followed at the next resume of the thread,
3239
         and not immediately.  */
3240
      ecs->event_thread->pending_follow = ecs->ws;
3241
 
3242
      stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3243
 
3244
      ecs->event_thread->stop_bpstat
3245
        = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3246
                              stop_pc, ecs->ptid);
3247
 
3248
      /* Note that we're interested in knowing the bpstat actually
3249
         causes a stop, not just if it may explain the signal.
3250
         Software watchpoints, for example, always appear in the
3251
         bpstat.  */
3252
      ecs->random_signal = !bpstat_causes_stop (ecs->event_thread->stop_bpstat);
3253
 
3254
      /* If no catchpoint triggered for this, then keep going.  */
3255
      if (ecs->random_signal)
3256
        {
3257
          ptid_t parent;
3258
          ptid_t child;
3259
          int should_resume;
3260
          int follow_child = (follow_fork_mode_string == follow_fork_mode_child);
3261
 
3262
          ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3263
 
3264
          should_resume = follow_fork ();
3265
 
3266
          parent = ecs->ptid;
3267
          child = ecs->ws.value.related_pid;
3268
 
3269
          /* In non-stop mode, also resume the other branch.  */
3270
          if (non_stop && !detach_fork)
3271
            {
3272
              if (follow_child)
3273
                switch_to_thread (parent);
3274
              else
3275
                switch_to_thread (child);
3276
 
3277
              ecs->event_thread = inferior_thread ();
3278
              ecs->ptid = inferior_ptid;
3279
              keep_going (ecs);
3280
            }
3281
 
3282
          if (follow_child)
3283
            switch_to_thread (child);
3284
          else
3285
            switch_to_thread (parent);
3286
 
3287
          ecs->event_thread = inferior_thread ();
3288
          ecs->ptid = inferior_ptid;
3289
 
3290
          if (should_resume)
3291
            keep_going (ecs);
3292
          else
3293
            stop_stepping (ecs);
3294
          return;
3295
        }
3296
      ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3297
      goto process_event_stop_test;
3298
 
3299
    case TARGET_WAITKIND_VFORK_DONE:
3300
      /* Done with the shared memory region.  Re-insert breakpoints in
3301
         the parent, and keep going.  */
3302
 
3303
      if (debug_infrun)
3304
        fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_VFORK_DONE\n");
3305
 
3306
      if (!ptid_equal (ecs->ptid, inferior_ptid))
3307
        context_switch (ecs->ptid);
3308
 
3309
      current_inferior ()->waiting_for_vfork_done = 0;
3310
      current_inferior ()->pspace->breakpoints_not_allowed = 0;
3311
      /* This also takes care of reinserting breakpoints in the
3312
         previously locked inferior.  */
3313
      keep_going (ecs);
3314
      return;
3315
 
3316
    case TARGET_WAITKIND_EXECD:
3317
      if (debug_infrun)
3318
        fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_EXECD\n");
3319
 
3320
      if (!ptid_equal (ecs->ptid, inferior_ptid))
3321
        {
3322
          context_switch (ecs->ptid);
3323
          reinit_frame_cache ();
3324
        }
3325
 
3326
      singlestep_breakpoints_inserted_p = 0;
3327
      cancel_single_step_breakpoints ();
3328
 
3329
      stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3330
 
3331
      /* Do whatever is necessary to the parent branch of the vfork.  */
3332
      handle_vfork_child_exec_or_exit (1);
3333
 
3334
      /* This causes the eventpoints and symbol table to be reset.
3335
         Must do this now, before trying to determine whether to
3336
         stop.  */
3337
      follow_exec (inferior_ptid, ecs->ws.value.execd_pathname);
3338
 
3339
      ecs->event_thread->stop_bpstat
3340
        = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3341
                              stop_pc, ecs->ptid);
3342
      ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3343
 
3344
      /* Note that this may be referenced from inside
3345
         bpstat_stop_status above, through inferior_has_execd.  */
3346
      xfree (ecs->ws.value.execd_pathname);
3347
      ecs->ws.value.execd_pathname = NULL;
3348
 
3349
      /* If no catchpoint triggered for this, then keep going.  */
3350
      if (ecs->random_signal)
3351
        {
3352
          ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3353
          keep_going (ecs);
3354
          return;
3355
        }
3356
      ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3357
      goto process_event_stop_test;
3358
 
3359
      /* Be careful not to try to gather much state about a thread
3360
         that's in a syscall.  It's frequently a losing proposition.  */
3361
    case TARGET_WAITKIND_SYSCALL_ENTRY:
3362
      if (debug_infrun)
3363
        fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_ENTRY\n");
3364
      /* Getting the current syscall number */
3365
      if (handle_syscall_event (ecs) != 0)
3366
        return;
3367
      goto process_event_stop_test;
3368
 
3369
      /* Before examining the threads further, step this thread to
3370
         get it entirely out of the syscall.  (We get notice of the
3371
         event when the thread is just on the verge of exiting a
3372
         syscall.  Stepping one instruction seems to get it back
3373
         into user code.)  */
3374
    case TARGET_WAITKIND_SYSCALL_RETURN:
3375
      if (debug_infrun)
3376
        fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_SYSCALL_RETURN\n");
3377
      if (handle_syscall_event (ecs) != 0)
3378
        return;
3379
      goto process_event_stop_test;
3380
 
3381
    case TARGET_WAITKIND_STOPPED:
3382
      if (debug_infrun)
3383
        fprintf_unfiltered (gdb_stdlog, "infrun: TARGET_WAITKIND_STOPPED\n");
3384
      ecs->event_thread->stop_signal = ecs->ws.value.sig;
3385
      break;
3386
 
3387
    case TARGET_WAITKIND_NO_HISTORY:
3388
      /* Reverse execution: target ran out of history info.  */
3389
      stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3390
      print_stop_reason (NO_HISTORY, 0);
3391
      stop_stepping (ecs);
3392
      return;
3393
    }
3394
 
3395
  if (ecs->new_thread_event)
3396
    {
3397
      if (non_stop)
3398
        /* Non-stop assumes that the target handles adding new threads
3399
           to the thread list.  */
3400
        internal_error (__FILE__, __LINE__, "\
3401
targets should add new threads to the thread list themselves in non-stop mode.");
3402
 
3403
      /* We may want to consider not doing a resume here in order to
3404
         give the user a chance to play with the new thread.  It might
3405
         be good to make that a user-settable option.  */
3406
 
3407
      /* At this point, all threads are stopped (happens automatically
3408
         in either the OS or the native code).  Therefore we need to
3409
         continue all threads in order to make progress.  */
3410
 
3411
      if (!ptid_equal (ecs->ptid, inferior_ptid))
3412
        context_switch (ecs->ptid);
3413
      target_resume (RESUME_ALL, 0, TARGET_SIGNAL_0);
3414
      prepare_to_wait (ecs);
3415
      return;
3416
    }
3417
 
3418
  if (ecs->ws.kind == TARGET_WAITKIND_STOPPED)
3419
    {
3420
      /* Do we need to clean up the state of a thread that has
3421
         completed a displaced single-step?  (Doing so usually affects
3422
         the PC, so do it here, before we set stop_pc.)  */
3423
      displaced_step_fixup (ecs->ptid, ecs->event_thread->stop_signal);
3424
 
3425
      /* If we either finished a single-step or hit a breakpoint, but
3426
         the user wanted this thread to be stopped, pretend we got a
3427
         SIG0 (generic unsignaled stop).  */
3428
 
3429
      if (ecs->event_thread->stop_requested
3430
          && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3431
        ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3432
    }
3433
 
3434
  stop_pc = regcache_read_pc (get_thread_regcache (ecs->ptid));
3435
 
3436
  if (debug_infrun)
3437
    {
3438
      struct regcache *regcache = get_thread_regcache (ecs->ptid);
3439
      struct gdbarch *gdbarch = get_regcache_arch (regcache);
3440
      struct cleanup *old_chain = save_inferior_ptid ();
3441
 
3442
      inferior_ptid = ecs->ptid;
3443
 
3444
      fprintf_unfiltered (gdb_stdlog, "infrun: stop_pc = %s\n",
3445
                          paddress (gdbarch, stop_pc));
3446
      if (target_stopped_by_watchpoint ())
3447
        {
3448
          CORE_ADDR addr;
3449
 
3450
          fprintf_unfiltered (gdb_stdlog, "infrun: stopped by watchpoint\n");
3451
 
3452
          if (target_stopped_data_address (&current_target, &addr))
3453
            fprintf_unfiltered (gdb_stdlog,
3454
                                "infrun: stopped data address = %s\n",
3455
                                paddress (gdbarch, addr));
3456
          else
3457
            fprintf_unfiltered (gdb_stdlog,
3458
                                "infrun: (no data address available)\n");
3459
        }
3460
 
3461
      do_cleanups (old_chain);
3462
    }
3463
 
3464
  if (stepping_past_singlestep_breakpoint)
3465
    {
3466
      gdb_assert (singlestep_breakpoints_inserted_p);
3467
      gdb_assert (ptid_equal (singlestep_ptid, ecs->ptid));
3468
      gdb_assert (!ptid_equal (singlestep_ptid, saved_singlestep_ptid));
3469
 
3470
      stepping_past_singlestep_breakpoint = 0;
3471
 
3472
      /* We've either finished single-stepping past the single-step
3473
         breakpoint, or stopped for some other reason.  It would be nice if
3474
         we could tell, but we can't reliably.  */
3475
      if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3476
        {
3477
          if (debug_infrun)
3478
            fprintf_unfiltered (gdb_stdlog, "infrun: stepping_past_singlestep_breakpoint\n");
3479
          /* Pull the single step breakpoints out of the target.  */
3480
          remove_single_step_breakpoints ();
3481
          singlestep_breakpoints_inserted_p = 0;
3482
 
3483
          ecs->random_signal = 0;
3484
          ecs->event_thread->trap_expected = 0;
3485
 
3486
          context_switch (saved_singlestep_ptid);
3487
          if (deprecated_context_hook)
3488
            deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3489
 
3490
          resume (1, TARGET_SIGNAL_0);
3491
          prepare_to_wait (ecs);
3492
          return;
3493
        }
3494
    }
3495
 
3496
  if (!ptid_equal (deferred_step_ptid, null_ptid))
3497
    {
3498
      /* In non-stop mode, there's never a deferred_step_ptid set.  */
3499
      gdb_assert (!non_stop);
3500
 
3501
      /* If we stopped for some other reason than single-stepping, ignore
3502
         the fact that we were supposed to switch back.  */
3503
      if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3504
        {
3505
          if (debug_infrun)
3506
            fprintf_unfiltered (gdb_stdlog,
3507
                                "infrun: handling deferred step\n");
3508
 
3509
          /* Pull the single step breakpoints out of the target.  */
3510
          if (singlestep_breakpoints_inserted_p)
3511
            {
3512
              remove_single_step_breakpoints ();
3513
              singlestep_breakpoints_inserted_p = 0;
3514
            }
3515
 
3516
          /* Note: We do not call context_switch at this point, as the
3517
             context is already set up for stepping the original thread.  */
3518
          switch_to_thread (deferred_step_ptid);
3519
          deferred_step_ptid = null_ptid;
3520
          /* Suppress spurious "Switching to ..." message.  */
3521
          previous_inferior_ptid = inferior_ptid;
3522
 
3523
          resume (1, TARGET_SIGNAL_0);
3524
          prepare_to_wait (ecs);
3525
          return;
3526
        }
3527
 
3528
      deferred_step_ptid = null_ptid;
3529
    }
3530
 
3531
  /* See if a thread hit a thread-specific breakpoint that was meant for
3532
     another thread.  If so, then step that thread past the breakpoint,
3533
     and continue it.  */
3534
 
3535
  if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3536
    {
3537
      int thread_hop_needed = 0;
3538
      struct address_space *aspace =
3539
        get_regcache_aspace (get_thread_regcache (ecs->ptid));
3540
 
3541
      /* Check if a regular breakpoint has been hit before checking
3542
         for a potential single step breakpoint. Otherwise, GDB will
3543
         not see this breakpoint hit when stepping onto breakpoints.  */
3544
      if (regular_breakpoint_inserted_here_p (aspace, stop_pc))
3545
        {
3546
          ecs->random_signal = 0;
3547
          if (!breakpoint_thread_match (aspace, stop_pc, ecs->ptid))
3548
            thread_hop_needed = 1;
3549
        }
3550
      else if (singlestep_breakpoints_inserted_p)
3551
        {
3552
          /* We have not context switched yet, so this should be true
3553
             no matter which thread hit the singlestep breakpoint.  */
3554
          gdb_assert (ptid_equal (inferior_ptid, singlestep_ptid));
3555
          if (debug_infrun)
3556
            fprintf_unfiltered (gdb_stdlog, "infrun: software single step "
3557
                                "trap for %s\n",
3558
                                target_pid_to_str (ecs->ptid));
3559
 
3560
          ecs->random_signal = 0;
3561
          /* The call to in_thread_list is necessary because PTIDs sometimes
3562
             change when we go from single-threaded to multi-threaded.  If
3563
             the singlestep_ptid is still in the list, assume that it is
3564
             really different from ecs->ptid.  */
3565
          if (!ptid_equal (singlestep_ptid, ecs->ptid)
3566
              && in_thread_list (singlestep_ptid))
3567
            {
3568
              /* If the PC of the thread we were trying to single-step
3569
                 has changed, discard this event (which we were going
3570
                 to ignore anyway), and pretend we saw that thread
3571
                 trap.  This prevents us continuously moving the
3572
                 single-step breakpoint forward, one instruction at a
3573
                 time.  If the PC has changed, then the thread we were
3574
                 trying to single-step has trapped or been signalled,
3575
                 but the event has not been reported to GDB yet.
3576
 
3577
                 There might be some cases where this loses signal
3578
                 information, if a signal has arrived at exactly the
3579
                 same time that the PC changed, but this is the best
3580
                 we can do with the information available.  Perhaps we
3581
                 should arrange to report all events for all threads
3582
                 when they stop, or to re-poll the remote looking for
3583
                 this particular thread (i.e. temporarily enable
3584
                 schedlock).  */
3585
 
3586
             CORE_ADDR new_singlestep_pc
3587
               = regcache_read_pc (get_thread_regcache (singlestep_ptid));
3588
 
3589
             if (new_singlestep_pc != singlestep_pc)
3590
               {
3591
                 enum target_signal stop_signal;
3592
 
3593
                 if (debug_infrun)
3594
                   fprintf_unfiltered (gdb_stdlog, "infrun: unexpected thread,"
3595
                                       " but expected thread advanced also\n");
3596
 
3597
                 /* The current context still belongs to
3598
                    singlestep_ptid.  Don't swap here, since that's
3599
                    the context we want to use.  Just fudge our
3600
                    state and continue.  */
3601
                 stop_signal = ecs->event_thread->stop_signal;
3602
                 ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3603
                 ecs->ptid = singlestep_ptid;
3604
                 ecs->event_thread = find_thread_ptid (ecs->ptid);
3605
                 ecs->event_thread->stop_signal = stop_signal;
3606
                 stop_pc = new_singlestep_pc;
3607
               }
3608
             else
3609
               {
3610
                 if (debug_infrun)
3611
                   fprintf_unfiltered (gdb_stdlog,
3612
                                       "infrun: unexpected thread\n");
3613
 
3614
                 thread_hop_needed = 1;
3615
                 stepping_past_singlestep_breakpoint = 1;
3616
                 saved_singlestep_ptid = singlestep_ptid;
3617
               }
3618
            }
3619
        }
3620
 
3621
      if (thread_hop_needed)
3622
        {
3623
          struct regcache *thread_regcache;
3624
          int remove_status = 0;
3625
 
3626
          if (debug_infrun)
3627
            fprintf_unfiltered (gdb_stdlog, "infrun: thread_hop_needed\n");
3628
 
3629
          /* Switch context before touching inferior memory, the
3630
             previous thread may have exited.  */
3631
          if (!ptid_equal (inferior_ptid, ecs->ptid))
3632
            context_switch (ecs->ptid);
3633
 
3634
          /* Saw a breakpoint, but it was hit by the wrong thread.
3635
             Just continue. */
3636
 
3637
          if (singlestep_breakpoints_inserted_p)
3638
            {
3639
              /* Pull the single step breakpoints out of the target. */
3640
              remove_single_step_breakpoints ();
3641
              singlestep_breakpoints_inserted_p = 0;
3642
            }
3643
 
3644
          /* If the arch can displace step, don't remove the
3645
             breakpoints.  */
3646
          thread_regcache = get_thread_regcache (ecs->ptid);
3647
          if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
3648
            remove_status = remove_breakpoints ();
3649
 
3650
          /* Did we fail to remove breakpoints?  If so, try
3651
             to set the PC past the bp.  (There's at least
3652
             one situation in which we can fail to remove
3653
             the bp's: On HP-UX's that use ttrace, we can't
3654
             change the address space of a vforking child
3655
             process until the child exits (well, okay, not
3656
             then either :-) or execs. */
3657
          if (remove_status != 0)
3658
            error (_("Cannot step over breakpoint hit in wrong thread"));
3659
          else
3660
            {                   /* Single step */
3661
              if (!non_stop)
3662
                {
3663
                  /* Only need to require the next event from this
3664
                     thread in all-stop mode.  */
3665
                  waiton_ptid = ecs->ptid;
3666
                  infwait_state = infwait_thread_hop_state;
3667
                }
3668
 
3669
              ecs->event_thread->stepping_over_breakpoint = 1;
3670
              keep_going (ecs);
3671
              return;
3672
            }
3673
        }
3674
      else if (singlestep_breakpoints_inserted_p)
3675
        {
3676
          sw_single_step_trap_p = 1;
3677
          ecs->random_signal = 0;
3678
        }
3679
    }
3680
  else
3681
    ecs->random_signal = 1;
3682
 
3683
  /* See if something interesting happened to the non-current thread.  If
3684
     so, then switch to that thread.  */
3685
  if (!ptid_equal (ecs->ptid, inferior_ptid))
3686
    {
3687
      if (debug_infrun)
3688
        fprintf_unfiltered (gdb_stdlog, "infrun: context switch\n");
3689
 
3690
      context_switch (ecs->ptid);
3691
 
3692
      if (deprecated_context_hook)
3693
        deprecated_context_hook (pid_to_thread_id (ecs->ptid));
3694
    }
3695
 
3696
  /* At this point, get hold of the now-current thread's frame.  */
3697
  frame = get_current_frame ();
3698
  gdbarch = get_frame_arch (frame);
3699
 
3700
  if (singlestep_breakpoints_inserted_p)
3701
    {
3702
      /* Pull the single step breakpoints out of the target. */
3703
      remove_single_step_breakpoints ();
3704
      singlestep_breakpoints_inserted_p = 0;
3705
    }
3706
 
3707
  if (stepped_after_stopped_by_watchpoint)
3708
    stopped_by_watchpoint = 0;
3709
  else
3710
    stopped_by_watchpoint = watchpoints_triggered (&ecs->ws);
3711
 
3712
  /* If necessary, step over this watchpoint.  We'll be back to display
3713
     it in a moment.  */
3714
  if (stopped_by_watchpoint
3715
      && (target_have_steppable_watchpoint
3716
          || gdbarch_have_nonsteppable_watchpoint (gdbarch)))
3717
    {
3718
      /* At this point, we are stopped at an instruction which has
3719
         attempted to write to a piece of memory under control of
3720
         a watchpoint.  The instruction hasn't actually executed
3721
         yet.  If we were to evaluate the watchpoint expression
3722
         now, we would get the old value, and therefore no change
3723
         would seem to have occurred.
3724
 
3725
         In order to make watchpoints work `right', we really need
3726
         to complete the memory write, and then evaluate the
3727
         watchpoint expression.  We do this by single-stepping the
3728
         target.
3729
 
3730
         It may not be necessary to disable the watchpoint to stop over
3731
         it.  For example, the PA can (with some kernel cooperation)
3732
         single step over a watchpoint without disabling the watchpoint.
3733
 
3734
         It is far more common to need to disable a watchpoint to step
3735
         the inferior over it.  If we have non-steppable watchpoints,
3736
         we must disable the current watchpoint; it's simplest to
3737
         disable all watchpoints and breakpoints.  */
3738
      int hw_step = 1;
3739
 
3740
      if (!target_have_steppable_watchpoint)
3741
        remove_breakpoints ();
3742
        /* Single step */
3743
      hw_step = maybe_software_singlestep (gdbarch, stop_pc);
3744
      target_resume (ecs->ptid, hw_step, TARGET_SIGNAL_0);
3745
      waiton_ptid = ecs->ptid;
3746
      if (target_have_steppable_watchpoint)
3747
        infwait_state = infwait_step_watch_state;
3748
      else
3749
        infwait_state = infwait_nonstep_watch_state;
3750
      prepare_to_wait (ecs);
3751
      return;
3752
    }
3753
 
3754
  ecs->stop_func_start = 0;
3755
  ecs->stop_func_end = 0;
3756
  ecs->stop_func_name = 0;
3757
  /* Don't care about return value; stop_func_start and stop_func_name
3758
     will both be 0 if it doesn't work.  */
3759
  find_pc_partial_function (stop_pc, &ecs->stop_func_name,
3760
                            &ecs->stop_func_start, &ecs->stop_func_end);
3761
  ecs->stop_func_start
3762
    += gdbarch_deprecated_function_start_offset (gdbarch);
3763
  ecs->event_thread->stepping_over_breakpoint = 0;
3764
  bpstat_clear (&ecs->event_thread->stop_bpstat);
3765
  ecs->event_thread->stop_step = 0;
3766
  stop_print_frame = 1;
3767
  ecs->random_signal = 0;
3768
  stopped_by_random_signal = 0;
3769
 
3770
  /* Hide inlined functions starting here, unless we just performed stepi or
3771
     nexti.  After stepi and nexti, always show the innermost frame (not any
3772
     inline function call sites).  */
3773
  if (ecs->event_thread->step_range_end != 1)
3774
    skip_inline_frames (ecs->ptid);
3775
 
3776
  if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3777
      && ecs->event_thread->trap_expected
3778
      && gdbarch_single_step_through_delay_p (gdbarch)
3779
      && currently_stepping (ecs->event_thread))
3780
    {
3781
      /* We're trying to step off a breakpoint.  Turns out that we're
3782
         also on an instruction that needs to be stepped multiple
3783
         times before it's been fully executing. E.g., architectures
3784
         with a delay slot.  It needs to be stepped twice, once for
3785
         the instruction and once for the delay slot.  */
3786
      int step_through_delay
3787
        = gdbarch_single_step_through_delay (gdbarch, frame);
3788
 
3789
      if (debug_infrun && step_through_delay)
3790
        fprintf_unfiltered (gdb_stdlog, "infrun: step through delay\n");
3791
      if (ecs->event_thread->step_range_end == 0 && step_through_delay)
3792
        {
3793
          /* The user issued a continue when stopped at a breakpoint.
3794
             Set up for another trap and get out of here.  */
3795
         ecs->event_thread->stepping_over_breakpoint = 1;
3796
         keep_going (ecs);
3797
         return;
3798
        }
3799
      else if (step_through_delay)
3800
        {
3801
          /* The user issued a step when stopped at a breakpoint.
3802
             Maybe we should stop, maybe we should not - the delay
3803
             slot *might* correspond to a line of source.  In any
3804
             case, don't decide that here, just set
3805
             ecs->stepping_over_breakpoint, making sure we
3806
             single-step again before breakpoints are re-inserted.  */
3807
          ecs->event_thread->stepping_over_breakpoint = 1;
3808
        }
3809
    }
3810
 
3811
  /* Look at the cause of the stop, and decide what to do.
3812
     The alternatives are:
3813
     1) stop_stepping and return; to really stop and return to the debugger,
3814
     2) keep_going and return to start up again
3815
     (set ecs->event_thread->stepping_over_breakpoint to 1 to single step once)
3816
     3) set ecs->random_signal to 1, and the decision between 1 and 2
3817
     will be made according to the signal handling tables.  */
3818
 
3819
  if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3820
      || stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_NO_SIGSTOP
3821
      || stop_soon == STOP_QUIETLY_REMOTE)
3822
    {
3823
      if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP && stop_after_trap)
3824
        {
3825
          if (debug_infrun)
3826
            fprintf_unfiltered (gdb_stdlog, "infrun: stopped\n");
3827
          stop_print_frame = 0;
3828
          stop_stepping (ecs);
3829
          return;
3830
        }
3831
 
3832
      /* This is originated from start_remote(), start_inferior() and
3833
         shared libraries hook functions.  */
3834
      if (stop_soon == STOP_QUIETLY || stop_soon == STOP_QUIETLY_REMOTE)
3835
        {
3836
          if (debug_infrun)
3837
            fprintf_unfiltered (gdb_stdlog, "infrun: quietly stopped\n");
3838
          stop_stepping (ecs);
3839
          return;
3840
        }
3841
 
3842
      /* This originates from attach_command().  We need to overwrite
3843
         the stop_signal here, because some kernels don't ignore a
3844
         SIGSTOP in a subsequent ptrace(PTRACE_CONT,SIGSTOP) call.
3845
         See more comments in inferior.h.  On the other hand, if we
3846
         get a non-SIGSTOP, report it to the user - assume the backend
3847
         will handle the SIGSTOP if it should show up later.
3848
 
3849
         Also consider that the attach is complete when we see a
3850
         SIGTRAP.  Some systems (e.g. Windows), and stubs supporting
3851
         target extended-remote report it instead of a SIGSTOP
3852
         (e.g. gdbserver).  We already rely on SIGTRAP being our
3853
         signal, so this is no exception.
3854
 
3855
         Also consider that the attach is complete when we see a
3856
         TARGET_SIGNAL_0.  In non-stop mode, GDB will explicitly tell
3857
         the target to stop all threads of the inferior, in case the
3858
         low level attach operation doesn't stop them implicitly.  If
3859
         they weren't stopped implicitly, then the stub will report a
3860
         TARGET_SIGNAL_0, meaning: stopped for no particular reason
3861
         other than GDB's request.  */
3862
      if (stop_soon == STOP_QUIETLY_NO_SIGSTOP
3863
          && (ecs->event_thread->stop_signal == TARGET_SIGNAL_STOP
3864
              || ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3865
              || ecs->event_thread->stop_signal == TARGET_SIGNAL_0))
3866
        {
3867
          stop_stepping (ecs);
3868
          ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3869
          return;
3870
        }
3871
 
3872
      /* See if there is a breakpoint at the current PC.  */
3873
      ecs->event_thread->stop_bpstat
3874
        = bpstat_stop_status (get_regcache_aspace (get_current_regcache ()),
3875
                              stop_pc, ecs->ptid);
3876
 
3877
      /* Following in case break condition called a
3878
         function.  */
3879
      stop_print_frame = 1;
3880
 
3881
      /* This is where we handle "moribund" watchpoints.  Unlike
3882
         software breakpoints traps, hardware watchpoint traps are
3883
         always distinguishable from random traps.  If no high-level
3884
         watchpoint is associated with the reported stop data address
3885
         anymore, then the bpstat does not explain the signal ---
3886
         simply make sure to ignore it if `stopped_by_watchpoint' is
3887
         set.  */
3888
 
3889
      if (debug_infrun
3890
          && ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
3891
          && !bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3892
          && stopped_by_watchpoint)
3893
        fprintf_unfiltered (gdb_stdlog, "\
3894
infrun: no user watchpoint explains watchpoint SIGTRAP, ignoring\n");
3895
 
3896
      /* NOTE: cagney/2003-03-29: These two checks for a random signal
3897
         at one stage in the past included checks for an inferior
3898
         function call's call dummy's return breakpoint.  The original
3899
         comment, that went with the test, read:
3900
 
3901
         ``End of a stack dummy.  Some systems (e.g. Sony news) give
3902
         another signal besides SIGTRAP, so check here as well as
3903
         above.''
3904
 
3905
         If someone ever tries to get call dummys on a
3906
         non-executable stack to work (where the target would stop
3907
         with something like a SIGSEGV), then those tests might need
3908
         to be re-instated.  Given, however, that the tests were only
3909
         enabled when momentary breakpoints were not being used, I
3910
         suspect that it won't be the case.
3911
 
3912
         NOTE: kettenis/2004-02-05: Indeed such checks don't seem to
3913
         be necessary for call dummies on a non-executable stack on
3914
         SPARC.  */
3915
 
3916
      if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP)
3917
        ecs->random_signal
3918
          = !(bpstat_explains_signal (ecs->event_thread->stop_bpstat)
3919
              || stopped_by_watchpoint
3920
              || ecs->event_thread->trap_expected
3921
              || (ecs->event_thread->step_range_end
3922
                  && ecs->event_thread->step_resume_breakpoint == NULL));
3923
      else
3924
        {
3925
          ecs->random_signal = !bpstat_explains_signal (ecs->event_thread->stop_bpstat);
3926
          if (!ecs->random_signal)
3927
            ecs->event_thread->stop_signal = TARGET_SIGNAL_TRAP;
3928
        }
3929
    }
3930
 
3931
  /* When we reach this point, we've pretty much decided
3932
     that the reason for stopping must've been a random
3933
     (unexpected) signal. */
3934
 
3935
  else
3936
    ecs->random_signal = 1;
3937
 
3938
process_event_stop_test:
3939
 
3940
  /* Re-fetch current thread's frame in case we did a
3941
     "goto process_event_stop_test" above.  */
3942
  frame = get_current_frame ();
3943
  gdbarch = get_frame_arch (frame);
3944
 
3945
  /* For the program's own signals, act according to
3946
     the signal handling tables.  */
3947
 
3948
  if (ecs->random_signal)
3949
    {
3950
      /* Signal not for debugging purposes.  */
3951
      int printed = 0;
3952
      struct inferior *inf = find_inferior_pid (ptid_get_pid (ecs->ptid));
3953
 
3954
      if (debug_infrun)
3955
         fprintf_unfiltered (gdb_stdlog, "infrun: random signal %d\n",
3956
                             ecs->event_thread->stop_signal);
3957
 
3958
      stopped_by_random_signal = 1;
3959
 
3960
      if (signal_print[ecs->event_thread->stop_signal])
3961
        {
3962
          printed = 1;
3963
          target_terminal_ours_for_output ();
3964
          print_stop_reason (SIGNAL_RECEIVED, ecs->event_thread->stop_signal);
3965
        }
3966
      /* Always stop on signals if we're either just gaining control
3967
         of the program, or the user explicitly requested this thread
3968
         to remain stopped.  */
3969
      if (stop_soon != NO_STOP_QUIETLY
3970
          || ecs->event_thread->stop_requested
3971
          || (!inf->detaching
3972
              && signal_stop_state (ecs->event_thread->stop_signal)))
3973
        {
3974
          stop_stepping (ecs);
3975
          return;
3976
        }
3977
      /* If not going to stop, give terminal back
3978
         if we took it away.  */
3979
      else if (printed)
3980
        target_terminal_inferior ();
3981
 
3982
      /* Clear the signal if it should not be passed.  */
3983
      if (signal_program[ecs->event_thread->stop_signal] == 0)
3984
        ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
3985
 
3986
      if (ecs->event_thread->prev_pc == stop_pc
3987
          && ecs->event_thread->trap_expected
3988
          && ecs->event_thread->step_resume_breakpoint == NULL)
3989
        {
3990
          /* We were just starting a new sequence, attempting to
3991
             single-step off of a breakpoint and expecting a SIGTRAP.
3992
             Instead this signal arrives.  This signal will take us out
3993
             of the stepping range so GDB needs to remember to, when
3994
             the signal handler returns, resume stepping off that
3995
             breakpoint.  */
3996
          /* To simplify things, "continue" is forced to use the same
3997
             code paths as single-step - set a breakpoint at the
3998
             signal return address and then, once hit, step off that
3999
             breakpoint.  */
4000
          if (debug_infrun)
4001
            fprintf_unfiltered (gdb_stdlog,
4002
                                "infrun: signal arrived while stepping over "
4003
                                "breakpoint\n");
4004
 
4005
          insert_step_resume_breakpoint_at_frame (frame);
4006
          ecs->event_thread->step_after_step_resume_breakpoint = 1;
4007
          keep_going (ecs);
4008
          return;
4009
        }
4010
 
4011
      if (ecs->event_thread->step_range_end != 0
4012
          && ecs->event_thread->stop_signal != TARGET_SIGNAL_0
4013
          && (ecs->event_thread->step_range_start <= stop_pc
4014
              && stop_pc < ecs->event_thread->step_range_end)
4015
          && frame_id_eq (get_stack_frame_id (frame),
4016
                          ecs->event_thread->step_stack_frame_id)
4017
          && ecs->event_thread->step_resume_breakpoint == NULL)
4018
        {
4019
          /* The inferior is about to take a signal that will take it
4020
             out of the single step range.  Set a breakpoint at the
4021
             current PC (which is presumably where the signal handler
4022
             will eventually return) and then allow the inferior to
4023
             run free.
4024
 
4025
             Note that this is only needed for a signal delivered
4026
             while in the single-step range.  Nested signals aren't a
4027
             problem as they eventually all return.  */
4028
          if (debug_infrun)
4029
            fprintf_unfiltered (gdb_stdlog,
4030
                                "infrun: signal may take us out of "
4031
                                "single-step range\n");
4032
 
4033
          insert_step_resume_breakpoint_at_frame (frame);
4034
          keep_going (ecs);
4035
          return;
4036
        }
4037
 
4038
      /* Note: step_resume_breakpoint may be non-NULL.  This occures
4039
         when either there's a nested signal, or when there's a
4040
         pending signal enabled just as the signal handler returns
4041
         (leaving the inferior at the step-resume-breakpoint without
4042
         actually executing it).  Either way continue until the
4043
         breakpoint is really hit.  */
4044
      keep_going (ecs);
4045
      return;
4046
    }
4047
 
4048
  /* Handle cases caused by hitting a breakpoint.  */
4049
  {
4050
    CORE_ADDR jmp_buf_pc;
4051
    struct bpstat_what what;
4052
 
4053
    what = bpstat_what (ecs->event_thread->stop_bpstat);
4054
 
4055
    if (what.call_dummy)
4056
      {
4057
        stop_stack_dummy = what.call_dummy;
4058
      }
4059
 
4060
    /* If we hit an internal event that triggers symbol changes, the
4061
       current frame will be invalidated within bpstat_what (e.g., if
4062
       we hit an internal solib event).  Re-fetch it.  */
4063
    frame = get_current_frame ();
4064
    gdbarch = get_frame_arch (frame);
4065
 
4066
    switch (what.main_action)
4067
      {
4068
      case BPSTAT_WHAT_SET_LONGJMP_RESUME:
4069
        /* If we hit the breakpoint at longjmp while stepping, we
4070
           install a momentary breakpoint at the target of the
4071
           jmp_buf.  */
4072
 
4073
        if (debug_infrun)
4074
          fprintf_unfiltered (gdb_stdlog,
4075
                              "infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME\n");
4076
 
4077
        ecs->event_thread->stepping_over_breakpoint = 1;
4078
 
4079
        if (!gdbarch_get_longjmp_target_p (gdbarch)
4080
            || !gdbarch_get_longjmp_target (gdbarch, frame, &jmp_buf_pc))
4081
          {
4082
            if (debug_infrun)
4083
              fprintf_unfiltered (gdb_stdlog, "\
4084
infrun: BPSTAT_WHAT_SET_LONGJMP_RESUME (!gdbarch_get_longjmp_target)\n");
4085
            keep_going (ecs);
4086
            return;
4087
          }
4088
 
4089
        /* We're going to replace the current step-resume breakpoint
4090
           with a longjmp-resume breakpoint.  */
4091
        delete_step_resume_breakpoint (ecs->event_thread);
4092
 
4093
        /* Insert a breakpoint at resume address.  */
4094
        insert_longjmp_resume_breakpoint (gdbarch, jmp_buf_pc);
4095
 
4096
        keep_going (ecs);
4097
        return;
4098
 
4099
      case BPSTAT_WHAT_CLEAR_LONGJMP_RESUME:
4100
        if (debug_infrun)
4101
          fprintf_unfiltered (gdb_stdlog,
4102
                              "infrun: BPSTAT_WHAT_CLEAR_LONGJMP_RESUME\n");
4103
 
4104
        gdb_assert (ecs->event_thread->step_resume_breakpoint != NULL);
4105
        delete_step_resume_breakpoint (ecs->event_thread);
4106
 
4107
        ecs->event_thread->stop_step = 1;
4108
        print_stop_reason (END_STEPPING_RANGE, 0);
4109
        stop_stepping (ecs);
4110
        return;
4111
 
4112
      case BPSTAT_WHAT_SINGLE:
4113
        if (debug_infrun)
4114
          fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_SINGLE\n");
4115
        ecs->event_thread->stepping_over_breakpoint = 1;
4116
        /* Still need to check other stuff, at least the case
4117
           where we are stepping and step out of the right range.  */
4118
        break;
4119
 
4120
      case BPSTAT_WHAT_STOP_NOISY:
4121
        if (debug_infrun)
4122
          fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_NOISY\n");
4123
        stop_print_frame = 1;
4124
 
4125
        /* We are about to nuke the step_resume_breakpointt via the
4126
           cleanup chain, so no need to worry about it here.  */
4127
 
4128
        stop_stepping (ecs);
4129
        return;
4130
 
4131
      case BPSTAT_WHAT_STOP_SILENT:
4132
        if (debug_infrun)
4133
          fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STOP_SILENT\n");
4134
        stop_print_frame = 0;
4135
 
4136
        /* We are about to nuke the step_resume_breakpoin via the
4137
           cleanup chain, so no need to worry about it here.  */
4138
 
4139
        stop_stepping (ecs);
4140
        return;
4141
 
4142
      case BPSTAT_WHAT_STEP_RESUME:
4143
        if (debug_infrun)
4144
          fprintf_unfiltered (gdb_stdlog, "infrun: BPSTAT_WHAT_STEP_RESUME\n");
4145
 
4146
        delete_step_resume_breakpoint (ecs->event_thread);
4147
        if (ecs->event_thread->step_after_step_resume_breakpoint)
4148
          {
4149
            /* Back when the step-resume breakpoint was inserted, we
4150
               were trying to single-step off a breakpoint.  Go back
4151
               to doing that.  */
4152
            ecs->event_thread->step_after_step_resume_breakpoint = 0;
4153
            ecs->event_thread->stepping_over_breakpoint = 1;
4154
            keep_going (ecs);
4155
            return;
4156
          }
4157
        if (stop_pc == ecs->stop_func_start
4158
            && execution_direction == EXEC_REVERSE)
4159
          {
4160
            /* We are stepping over a function call in reverse, and
4161
               just hit the step-resume breakpoint at the start
4162
               address of the function.  Go back to single-stepping,
4163
               which should take us back to the function call.  */
4164
            ecs->event_thread->stepping_over_breakpoint = 1;
4165
            keep_going (ecs);
4166
            return;
4167
          }
4168
        break;
4169
 
4170
      case BPSTAT_WHAT_KEEP_CHECKING:
4171
        break;
4172
      }
4173
  }
4174
 
4175
  /* We come here if we hit a breakpoint but should not
4176
     stop for it.  Possibly we also were stepping
4177
     and should stop for that.  So fall through and
4178
     test for stepping.  But, if not stepping,
4179
     do not stop.  */
4180
 
4181
  /* In all-stop mode, if we're currently stepping but have stopped in
4182
     some other thread, we need to switch back to the stepped thread.  */
4183
  if (!non_stop)
4184
    {
4185
      struct thread_info *tp;
4186
 
4187
      tp = iterate_over_threads (currently_stepping_or_nexting_callback,
4188
                                 ecs->event_thread);
4189
      if (tp)
4190
        {
4191
          /* However, if the current thread is blocked on some internal
4192
             breakpoint, and we simply need to step over that breakpoint
4193
             to get it going again, do that first.  */
4194
          if ((ecs->event_thread->trap_expected
4195
               && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
4196
              || ecs->event_thread->stepping_over_breakpoint)
4197
            {
4198
              keep_going (ecs);
4199
              return;
4200
            }
4201
 
4202
          /* If the stepping thread exited, then don't try to switch
4203
             back and resume it, which could fail in several different
4204
             ways depending on the target.  Instead, just keep going.
4205
 
4206
             We can find a stepping dead thread in the thread list in
4207
             two cases:
4208
 
4209
             - The target supports thread exit events, and when the
4210
             target tries to delete the thread from the thread list,
4211
             inferior_ptid pointed at the exiting thread.  In such
4212
             case, calling delete_thread does not really remove the
4213
             thread from the list; instead, the thread is left listed,
4214
             with 'exited' state.
4215
 
4216
             - The target's debug interface does not support thread
4217
             exit events, and so we have no idea whatsoever if the
4218
             previously stepping thread is still alive.  For that
4219
             reason, we need to synchronously query the target
4220
             now.  */
4221
          if (is_exited (tp->ptid)
4222
              || !target_thread_alive (tp->ptid))
4223
            {
4224
              if (debug_infrun)
4225
                fprintf_unfiltered (gdb_stdlog, "\
4226
infrun: not switching back to stepped thread, it has vanished\n");
4227
 
4228
              delete_thread (tp->ptid);
4229
              keep_going (ecs);
4230
              return;
4231
            }
4232
 
4233
          /* Otherwise, we no longer expect a trap in the current thread.
4234
             Clear the trap_expected flag before switching back -- this is
4235
             what keep_going would do as well, if we called it.  */
4236
          ecs->event_thread->trap_expected = 0;
4237
 
4238
          if (debug_infrun)
4239
            fprintf_unfiltered (gdb_stdlog,
4240
                                "infrun: switching back to stepped thread\n");
4241
 
4242
          ecs->event_thread = tp;
4243
          ecs->ptid = tp->ptid;
4244
          context_switch (ecs->ptid);
4245
          keep_going (ecs);
4246
          return;
4247
        }
4248
    }
4249
 
4250
  /* Are we stepping to get the inferior out of the dynamic linker's
4251
     hook (and possibly the dld itself) after catching a shlib
4252
     event?  */
4253
  if (ecs->event_thread->stepping_through_solib_after_catch)
4254
    {
4255
#if defined(SOLIB_ADD)
4256
      /* Have we reached our destination?  If not, keep going. */
4257
      if (SOLIB_IN_DYNAMIC_LINKER (PIDGET (ecs->ptid), stop_pc))
4258
        {
4259
          if (debug_infrun)
4260
            fprintf_unfiltered (gdb_stdlog, "infrun: stepping in dynamic linker\n");
4261
          ecs->event_thread->stepping_over_breakpoint = 1;
4262
          keep_going (ecs);
4263
          return;
4264
        }
4265
#endif
4266
      if (debug_infrun)
4267
         fprintf_unfiltered (gdb_stdlog, "infrun: step past dynamic linker\n");
4268
      /* Else, stop and report the catchpoint(s) whose triggering
4269
         caused us to begin stepping. */
4270
      ecs->event_thread->stepping_through_solib_after_catch = 0;
4271
      bpstat_clear (&ecs->event_thread->stop_bpstat);
4272
      ecs->event_thread->stop_bpstat
4273
        = bpstat_copy (ecs->event_thread->stepping_through_solib_catchpoints);
4274
      bpstat_clear (&ecs->event_thread->stepping_through_solib_catchpoints);
4275
      stop_print_frame = 1;
4276
      stop_stepping (ecs);
4277
      return;
4278
    }
4279
 
4280
  if (ecs->event_thread->step_resume_breakpoint)
4281
    {
4282
      if (debug_infrun)
4283
         fprintf_unfiltered (gdb_stdlog,
4284
                             "infrun: step-resume breakpoint is inserted\n");
4285
 
4286
      /* Having a step-resume breakpoint overrides anything
4287
         else having to do with stepping commands until
4288
         that breakpoint is reached.  */
4289
      keep_going (ecs);
4290
      return;
4291
    }
4292
 
4293
  if (ecs->event_thread->step_range_end == 0)
4294
    {
4295
      if (debug_infrun)
4296
         fprintf_unfiltered (gdb_stdlog, "infrun: no stepping, continue\n");
4297
      /* Likewise if we aren't even stepping.  */
4298
      keep_going (ecs);
4299
      return;
4300
    }
4301
 
4302
  /* Re-fetch current thread's frame in case the code above caused
4303
     the frame cache to be re-initialized, making our FRAME variable
4304
     a dangling pointer.  */
4305
  frame = get_current_frame ();
4306
  gdbarch = get_frame_arch (frame);
4307
 
4308
  /* If stepping through a line, keep going if still within it.
4309
 
4310
     Note that step_range_end is the address of the first instruction
4311
     beyond the step range, and NOT the address of the last instruction
4312
     within it!
4313
 
4314
     Note also that during reverse execution, we may be stepping
4315
     through a function epilogue and therefore must detect when
4316
     the current-frame changes in the middle of a line.  */
4317
 
4318
  if (stop_pc >= ecs->event_thread->step_range_start
4319
      && stop_pc < ecs->event_thread->step_range_end
4320
      && (execution_direction != EXEC_REVERSE
4321
          || frame_id_eq (get_frame_id (frame),
4322
                          ecs->event_thread->step_frame_id)))
4323
    {
4324
      if (debug_infrun)
4325
        fprintf_unfiltered
4326
          (gdb_stdlog, "infrun: stepping inside range [%s-%s]\n",
4327
           paddress (gdbarch, ecs->event_thread->step_range_start),
4328
           paddress (gdbarch, ecs->event_thread->step_range_end));
4329
 
4330
      /* When stepping backward, stop at beginning of line range
4331
         (unless it's the function entry point, in which case
4332
         keep going back to the call point).  */
4333
      if (stop_pc == ecs->event_thread->step_range_start
4334
          && stop_pc != ecs->stop_func_start
4335
          && execution_direction == EXEC_REVERSE)
4336
        {
4337
          ecs->event_thread->stop_step = 1;
4338
          print_stop_reason (END_STEPPING_RANGE, 0);
4339
          stop_stepping (ecs);
4340
        }
4341
      else
4342
        keep_going (ecs);
4343
 
4344
      return;
4345
    }
4346
 
4347
  /* We stepped out of the stepping range.  */
4348
 
4349
  /* If we are stepping at the source level and entered the runtime
4350
     loader dynamic symbol resolution code...
4351
 
4352
     EXEC_FORWARD: we keep on single stepping until we exit the run
4353
     time loader code and reach the callee's address.
4354
 
4355
     EXEC_REVERSE: we've already executed the callee (backward), and
4356
     the runtime loader code is handled just like any other
4357
     undebuggable function call.  Now we need only keep stepping
4358
     backward through the trampoline code, and that's handled further
4359
     down, so there is nothing for us to do here.  */
4360
 
4361
  if (execution_direction != EXEC_REVERSE
4362
      && ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4363
      && in_solib_dynsym_resolve_code (stop_pc))
4364
    {
4365
      CORE_ADDR pc_after_resolver =
4366
        gdbarch_skip_solib_resolver (gdbarch, stop_pc);
4367
 
4368
      if (debug_infrun)
4369
         fprintf_unfiltered (gdb_stdlog, "infrun: stepped into dynsym resolve code\n");
4370
 
4371
      if (pc_after_resolver)
4372
        {
4373
          /* Set up a step-resume breakpoint at the address
4374
             indicated by SKIP_SOLIB_RESOLVER.  */
4375
          struct symtab_and_line sr_sal;
4376
 
4377
          init_sal (&sr_sal);
4378
          sr_sal.pc = pc_after_resolver;
4379
          sr_sal.pspace = get_frame_program_space (frame);
4380
 
4381
          insert_step_resume_breakpoint_at_sal (gdbarch,
4382
                                                sr_sal, null_frame_id);
4383
        }
4384
 
4385
      keep_going (ecs);
4386
      return;
4387
    }
4388
 
4389
  if (ecs->event_thread->step_range_end != 1
4390
      && (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4391
          || ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4392
      && get_frame_type (frame) == SIGTRAMP_FRAME)
4393
    {
4394
      if (debug_infrun)
4395
         fprintf_unfiltered (gdb_stdlog, "infrun: stepped into signal trampoline\n");
4396
      /* The inferior, while doing a "step" or "next", has ended up in
4397
         a signal trampoline (either by a signal being delivered or by
4398
         the signal handler returning).  Just single-step until the
4399
         inferior leaves the trampoline (either by calling the handler
4400
         or returning).  */
4401
      keep_going (ecs);
4402
      return;
4403
    }
4404
 
4405
  /* Check for subroutine calls.  The check for the current frame
4406
     equalling the step ID is not necessary - the check of the
4407
     previous frame's ID is sufficient - but it is a common case and
4408
     cheaper than checking the previous frame's ID.
4409
 
4410
     NOTE: frame_id_eq will never report two invalid frame IDs as
4411
     being equal, so to get into this block, both the current and
4412
     previous frame must have valid frame IDs.  */
4413
  /* The outer_frame_id check is a heuristic to detect stepping
4414
     through startup code.  If we step over an instruction which
4415
     sets the stack pointer from an invalid value to a valid value,
4416
     we may detect that as a subroutine call from the mythical
4417
     "outermost" function.  This could be fixed by marking
4418
     outermost frames as !stack_p,code_p,special_p.  Then the
4419
     initial outermost frame, before sp was valid, would
4420
     have code_addr == &_start.  See the comment in frame_id_eq
4421
     for more.  */
4422
  if (!frame_id_eq (get_stack_frame_id (frame),
4423
                    ecs->event_thread->step_stack_frame_id)
4424
      && (frame_id_eq (frame_unwind_caller_id (get_current_frame ()),
4425
                       ecs->event_thread->step_stack_frame_id)
4426
          && (!frame_id_eq (ecs->event_thread->step_stack_frame_id,
4427
                            outer_frame_id)
4428
              || step_start_function != find_pc_function (stop_pc))))
4429
    {
4430
      CORE_ADDR real_stop_pc;
4431
 
4432
      if (debug_infrun)
4433
         fprintf_unfiltered (gdb_stdlog, "infrun: stepped into subroutine\n");
4434
 
4435
      if ((ecs->event_thread->step_over_calls == STEP_OVER_NONE)
4436
          || ((ecs->event_thread->step_range_end == 1)
4437
              && in_prologue (gdbarch, ecs->event_thread->prev_pc,
4438
                              ecs->stop_func_start)))
4439
        {
4440
          /* I presume that step_over_calls is only 0 when we're
4441
             supposed to be stepping at the assembly language level
4442
             ("stepi").  Just stop.  */
4443
          /* Also, maybe we just did a "nexti" inside a prolog, so we
4444
             thought it was a subroutine call but it was not.  Stop as
4445
             well.  FENN */
4446
          /* And this works the same backward as frontward.  MVS */
4447
          ecs->event_thread->stop_step = 1;
4448
          print_stop_reason (END_STEPPING_RANGE, 0);
4449
          stop_stepping (ecs);
4450
          return;
4451
        }
4452
 
4453
      /* Reverse stepping through solib trampolines.  */
4454
 
4455
      if (execution_direction == EXEC_REVERSE
4456
          && ecs->event_thread->step_over_calls != STEP_OVER_NONE
4457
          && (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4458
              || (ecs->stop_func_start == 0
4459
                  && in_solib_dynsym_resolve_code (stop_pc))))
4460
        {
4461
          /* Any solib trampoline code can be handled in reverse
4462
             by simply continuing to single-step.  We have already
4463
             executed the solib function (backwards), and a few
4464
             steps will take us back through the trampoline to the
4465
             caller.  */
4466
          keep_going (ecs);
4467
          return;
4468
        }
4469
 
4470
      if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4471
        {
4472
          /* We're doing a "next".
4473
 
4474
             Normal (forward) execution: set a breakpoint at the
4475
             callee's return address (the address at which the caller
4476
             will resume).
4477
 
4478
             Reverse (backward) execution.  set the step-resume
4479
             breakpoint at the start of the function that we just
4480
             stepped into (backwards), and continue to there.  When we
4481
             get there, we'll need to single-step back to the caller.  */
4482
 
4483
          if (execution_direction == EXEC_REVERSE)
4484
            {
4485
              struct symtab_and_line sr_sal;
4486
 
4487
              /* Normal function call return (static or dynamic).  */
4488
              init_sal (&sr_sal);
4489
              sr_sal.pc = ecs->stop_func_start;
4490
              sr_sal.pspace = get_frame_program_space (frame);
4491
              insert_step_resume_breakpoint_at_sal (gdbarch,
4492
                                                    sr_sal, null_frame_id);
4493
            }
4494
          else
4495
            insert_step_resume_breakpoint_at_caller (frame);
4496
 
4497
          keep_going (ecs);
4498
          return;
4499
        }
4500
 
4501
      /* If we are in a function call trampoline (a stub between the
4502
         calling routine and the real function), locate the real
4503
         function.  That's what tells us (a) whether we want to step
4504
         into it at all, and (b) what prologue we want to run to the
4505
         end of, if we do step into it.  */
4506
      real_stop_pc = skip_language_trampoline (frame, stop_pc);
4507
      if (real_stop_pc == 0)
4508
        real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4509
      if (real_stop_pc != 0)
4510
        ecs->stop_func_start = real_stop_pc;
4511
 
4512
      if (real_stop_pc != 0 && in_solib_dynsym_resolve_code (real_stop_pc))
4513
        {
4514
          struct symtab_and_line sr_sal;
4515
 
4516
          init_sal (&sr_sal);
4517
          sr_sal.pc = ecs->stop_func_start;
4518
          sr_sal.pspace = get_frame_program_space (frame);
4519
 
4520
          insert_step_resume_breakpoint_at_sal (gdbarch,
4521
                                                sr_sal, null_frame_id);
4522
          keep_going (ecs);
4523
          return;
4524
        }
4525
 
4526
      /* If we have line number information for the function we are
4527
         thinking of stepping into, step into it.
4528
 
4529
         If there are several symtabs at that PC (e.g. with include
4530
         files), just want to know whether *any* of them have line
4531
         numbers.  find_pc_line handles this.  */
4532
      {
4533
        struct symtab_and_line tmp_sal;
4534
 
4535
        tmp_sal = find_pc_line (ecs->stop_func_start, 0);
4536
        tmp_sal.pspace = get_frame_program_space (frame);
4537
        if (tmp_sal.line != 0)
4538
          {
4539
            if (execution_direction == EXEC_REVERSE)
4540
              handle_step_into_function_backward (gdbarch, ecs);
4541
            else
4542
              handle_step_into_function (gdbarch, ecs);
4543
            return;
4544
          }
4545
      }
4546
 
4547
      /* If we have no line number and the step-stop-if-no-debug is
4548
         set, we stop the step so that the user has a chance to switch
4549
         in assembly mode.  */
4550
      if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4551
          && step_stop_if_no_debug)
4552
        {
4553
          ecs->event_thread->stop_step = 1;
4554
          print_stop_reason (END_STEPPING_RANGE, 0);
4555
          stop_stepping (ecs);
4556
          return;
4557
        }
4558
 
4559
      if (execution_direction == EXEC_REVERSE)
4560
        {
4561
          /* Set a breakpoint at callee's start address.
4562
             From there we can step once and be back in the caller.  */
4563
          struct symtab_and_line sr_sal;
4564
 
4565
          init_sal (&sr_sal);
4566
          sr_sal.pc = ecs->stop_func_start;
4567
          sr_sal.pspace = get_frame_program_space (frame);
4568
          insert_step_resume_breakpoint_at_sal (gdbarch,
4569
                                                sr_sal, null_frame_id);
4570
        }
4571
      else
4572
        /* Set a breakpoint at callee's return address (the address
4573
           at which the caller will resume).  */
4574
        insert_step_resume_breakpoint_at_caller (frame);
4575
 
4576
      keep_going (ecs);
4577
      return;
4578
    }
4579
 
4580
  /* Reverse stepping through solib trampolines.  */
4581
 
4582
  if (execution_direction == EXEC_REVERSE
4583
      && ecs->event_thread->step_over_calls != STEP_OVER_NONE)
4584
    {
4585
      if (gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc)
4586
          || (ecs->stop_func_start == 0
4587
              && in_solib_dynsym_resolve_code (stop_pc)))
4588
        {
4589
          /* Any solib trampoline code can be handled in reverse
4590
             by simply continuing to single-step.  We have already
4591
             executed the solib function (backwards), and a few
4592
             steps will take us back through the trampoline to the
4593
             caller.  */
4594
          keep_going (ecs);
4595
          return;
4596
        }
4597
      else if (in_solib_dynsym_resolve_code (stop_pc))
4598
        {
4599
          /* Stepped backward into the solib dynsym resolver.
4600
             Set a breakpoint at its start and continue, then
4601
             one more step will take us out.  */
4602
          struct symtab_and_line sr_sal;
4603
 
4604
          init_sal (&sr_sal);
4605
          sr_sal.pc = ecs->stop_func_start;
4606
          sr_sal.pspace = get_frame_program_space (frame);
4607
          insert_step_resume_breakpoint_at_sal (gdbarch,
4608
                                                sr_sal, null_frame_id);
4609
          keep_going (ecs);
4610
          return;
4611
        }
4612
    }
4613
 
4614
  /* If we're in the return path from a shared library trampoline,
4615
     we want to proceed through the trampoline when stepping.  */
4616
  if (gdbarch_in_solib_return_trampoline (gdbarch,
4617
                                          stop_pc, ecs->stop_func_name))
4618
    {
4619
      /* Determine where this trampoline returns.  */
4620
      CORE_ADDR real_stop_pc;
4621
 
4622
      real_stop_pc = gdbarch_skip_trampoline_code (gdbarch, frame, stop_pc);
4623
 
4624
      if (debug_infrun)
4625
         fprintf_unfiltered (gdb_stdlog, "infrun: stepped into solib return tramp\n");
4626
 
4627
      /* Only proceed through if we know where it's going.  */
4628
      if (real_stop_pc)
4629
        {
4630
          /* And put the step-breakpoint there and go until there. */
4631
          struct symtab_and_line sr_sal;
4632
 
4633
          init_sal (&sr_sal);   /* initialize to zeroes */
4634
          sr_sal.pc = real_stop_pc;
4635
          sr_sal.section = find_pc_overlay (sr_sal.pc);
4636
          sr_sal.pspace = get_frame_program_space (frame);
4637
 
4638
          /* Do not specify what the fp should be when we stop since
4639
             on some machines the prologue is where the new fp value
4640
             is established.  */
4641
          insert_step_resume_breakpoint_at_sal (gdbarch,
4642
                                                sr_sal, null_frame_id);
4643
 
4644
          /* Restart without fiddling with the step ranges or
4645
             other state.  */
4646
          keep_going (ecs);
4647
          return;
4648
        }
4649
    }
4650
 
4651
  stop_pc_sal = find_pc_line (stop_pc, 0);
4652
 
4653
  /* NOTE: tausq/2004-05-24: This if block used to be done before all
4654
     the trampoline processing logic, however, there are some trampolines
4655
     that have no names, so we should do trampoline handling first.  */
4656
  if (ecs->event_thread->step_over_calls == STEP_OVER_UNDEBUGGABLE
4657
      && ecs->stop_func_name == NULL
4658
      && stop_pc_sal.line == 0)
4659
    {
4660
      if (debug_infrun)
4661
         fprintf_unfiltered (gdb_stdlog, "infrun: stepped into undebuggable function\n");
4662
 
4663
      /* The inferior just stepped into, or returned to, an
4664
         undebuggable function (where there is no debugging information
4665
         and no line number corresponding to the address where the
4666
         inferior stopped).  Since we want to skip this kind of code,
4667
         we keep going until the inferior returns from this
4668
         function - unless the user has asked us not to (via
4669
         set step-mode) or we no longer know how to get back
4670
         to the call site.  */
4671
      if (step_stop_if_no_debug
4672
          || !frame_id_p (frame_unwind_caller_id (frame)))
4673
        {
4674
          /* If we have no line number and the step-stop-if-no-debug
4675
             is set, we stop the step so that the user has a chance to
4676
             switch in assembly mode.  */
4677
          ecs->event_thread->stop_step = 1;
4678
          print_stop_reason (END_STEPPING_RANGE, 0);
4679
          stop_stepping (ecs);
4680
          return;
4681
        }
4682
      else
4683
        {
4684
          /* Set a breakpoint at callee's return address (the address
4685
             at which the caller will resume).  */
4686
          insert_step_resume_breakpoint_at_caller (frame);
4687
          keep_going (ecs);
4688
          return;
4689
        }
4690
    }
4691
 
4692
  if (ecs->event_thread->step_range_end == 1)
4693
    {
4694
      /* It is stepi or nexti.  We always want to stop stepping after
4695
         one instruction.  */
4696
      if (debug_infrun)
4697
         fprintf_unfiltered (gdb_stdlog, "infrun: stepi/nexti\n");
4698
      ecs->event_thread->stop_step = 1;
4699
      print_stop_reason (END_STEPPING_RANGE, 0);
4700
      stop_stepping (ecs);
4701
      return;
4702
    }
4703
 
4704
  if (stop_pc_sal.line == 0)
4705
    {
4706
      /* We have no line number information.  That means to stop
4707
         stepping (does this always happen right after one instruction,
4708
         when we do "s" in a function with no line numbers,
4709
         or can this happen as a result of a return or longjmp?).  */
4710
      if (debug_infrun)
4711
         fprintf_unfiltered (gdb_stdlog, "infrun: no line number info\n");
4712
      ecs->event_thread->stop_step = 1;
4713
      print_stop_reason (END_STEPPING_RANGE, 0);
4714
      stop_stepping (ecs);
4715
      return;
4716
    }
4717
 
4718
  /* Look for "calls" to inlined functions, part one.  If the inline
4719
     frame machinery detected some skipped call sites, we have entered
4720
     a new inline function.  */
4721
 
4722
  if (frame_id_eq (get_frame_id (get_current_frame ()),
4723
                   ecs->event_thread->step_frame_id)
4724
      && inline_skipped_frames (ecs->ptid))
4725
    {
4726
      struct symtab_and_line call_sal;
4727
 
4728
      if (debug_infrun)
4729
        fprintf_unfiltered (gdb_stdlog,
4730
                            "infrun: stepped into inlined function\n");
4731
 
4732
      find_frame_sal (get_current_frame (), &call_sal);
4733
 
4734
      if (ecs->event_thread->step_over_calls != STEP_OVER_ALL)
4735
        {
4736
          /* For "step", we're going to stop.  But if the call site
4737
             for this inlined function is on the same source line as
4738
             we were previously stepping, go down into the function
4739
             first.  Otherwise stop at the call site.  */
4740
 
4741
          if (call_sal.line == ecs->event_thread->current_line
4742
              && call_sal.symtab == ecs->event_thread->current_symtab)
4743
            step_into_inline_frame (ecs->ptid);
4744
 
4745
          ecs->event_thread->stop_step = 1;
4746
          print_stop_reason (END_STEPPING_RANGE, 0);
4747
          stop_stepping (ecs);
4748
          return;
4749
        }
4750
      else
4751
        {
4752
          /* For "next", we should stop at the call site if it is on a
4753
             different source line.  Otherwise continue through the
4754
             inlined function.  */
4755
          if (call_sal.line == ecs->event_thread->current_line
4756
              && call_sal.symtab == ecs->event_thread->current_symtab)
4757
            keep_going (ecs);
4758
          else
4759
            {
4760
              ecs->event_thread->stop_step = 1;
4761
              print_stop_reason (END_STEPPING_RANGE, 0);
4762
              stop_stepping (ecs);
4763
            }
4764
          return;
4765
        }
4766
    }
4767
 
4768
  /* Look for "calls" to inlined functions, part two.  If we are still
4769
     in the same real function we were stepping through, but we have
4770
     to go further up to find the exact frame ID, we are stepping
4771
     through a more inlined call beyond its call site.  */
4772
 
4773
  if (get_frame_type (get_current_frame ()) == INLINE_FRAME
4774
      && !frame_id_eq (get_frame_id (get_current_frame ()),
4775
                       ecs->event_thread->step_frame_id)
4776
      && stepped_in_from (get_current_frame (),
4777
                          ecs->event_thread->step_frame_id))
4778
    {
4779
      if (debug_infrun)
4780
        fprintf_unfiltered (gdb_stdlog,
4781
                            "infrun: stepping through inlined function\n");
4782
 
4783
      if (ecs->event_thread->step_over_calls == STEP_OVER_ALL)
4784
        keep_going (ecs);
4785
      else
4786
        {
4787
          ecs->event_thread->stop_step = 1;
4788
          print_stop_reason (END_STEPPING_RANGE, 0);
4789
          stop_stepping (ecs);
4790
        }
4791
      return;
4792
    }
4793
 
4794
  if ((stop_pc == stop_pc_sal.pc)
4795
      && (ecs->event_thread->current_line != stop_pc_sal.line
4796
          || ecs->event_thread->current_symtab != stop_pc_sal.symtab))
4797
    {
4798
      /* We are at the start of a different line.  So stop.  Note that
4799
         we don't stop if we step into the middle of a different line.
4800
         That is said to make things like for (;;) statements work
4801
         better.  */
4802
      if (debug_infrun)
4803
         fprintf_unfiltered (gdb_stdlog, "infrun: stepped to a different line\n");
4804
      ecs->event_thread->stop_step = 1;
4805
      print_stop_reason (END_STEPPING_RANGE, 0);
4806
      stop_stepping (ecs);
4807
      return;
4808
    }
4809
 
4810
  /* We aren't done stepping.
4811
 
4812
     Optimize by setting the stepping range to the line.
4813
     (We might not be in the original line, but if we entered a
4814
     new line in mid-statement, we continue stepping.  This makes
4815
     things like for(;;) statements work better.)  */
4816
 
4817
  ecs->event_thread->step_range_start = stop_pc_sal.pc;
4818
  ecs->event_thread->step_range_end = stop_pc_sal.end;
4819
  set_step_info (frame, stop_pc_sal);
4820
 
4821
  if (debug_infrun)
4822
     fprintf_unfiltered (gdb_stdlog, "infrun: keep going\n");
4823
  keep_going (ecs);
4824
}
4825
 
4826
/* Is thread TP in the middle of single-stepping?  */
4827
 
4828
static int
4829
currently_stepping (struct thread_info *tp)
4830
{
4831
  return ((tp->step_range_end && tp->step_resume_breakpoint == NULL)
4832
          || tp->trap_expected
4833
          || tp->stepping_through_solib_after_catch
4834
          || bpstat_should_step ());
4835
}
4836
 
4837
/* Returns true if any thread *but* the one passed in "data" is in the
4838
   middle of stepping or of handling a "next".  */
4839
 
4840
static int
4841
currently_stepping_or_nexting_callback (struct thread_info *tp, void *data)
4842
{
4843
  if (tp == data)
4844
    return 0;
4845
 
4846
  return (tp->step_range_end
4847
          || tp->trap_expected
4848
          || tp->stepping_through_solib_after_catch);
4849
}
4850
 
4851
/* Inferior has stepped into a subroutine call with source code that
4852
   we should not step over.  Do step to the first line of code in
4853
   it.  */
4854
 
4855
static void
4856
handle_step_into_function (struct gdbarch *gdbarch,
4857
                           struct execution_control_state *ecs)
4858
{
4859
  struct symtab *s;
4860
  struct symtab_and_line stop_func_sal, sr_sal;
4861
 
4862
  s = find_pc_symtab (stop_pc);
4863
  if (s && s->language != language_asm)
4864
    ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4865
                                                  ecs->stop_func_start);
4866
 
4867
  stop_func_sal = find_pc_line (ecs->stop_func_start, 0);
4868
  /* Use the step_resume_break to step until the end of the prologue,
4869
     even if that involves jumps (as it seems to on the vax under
4870
     4.2).  */
4871
  /* If the prologue ends in the middle of a source line, continue to
4872
     the end of that source line (if it is still within the function).
4873
     Otherwise, just go to end of prologue.  */
4874
  if (stop_func_sal.end
4875
      && stop_func_sal.pc != ecs->stop_func_start
4876
      && stop_func_sal.end < ecs->stop_func_end)
4877
    ecs->stop_func_start = stop_func_sal.end;
4878
 
4879
  /* Architectures which require breakpoint adjustment might not be able
4880
     to place a breakpoint at the computed address.  If so, the test
4881
     ``ecs->stop_func_start == stop_pc'' will never succeed.  Adjust
4882
     ecs->stop_func_start to an address at which a breakpoint may be
4883
     legitimately placed.
4884
 
4885
     Note:  kevinb/2004-01-19:  On FR-V, if this adjustment is not
4886
     made, GDB will enter an infinite loop when stepping through
4887
     optimized code consisting of VLIW instructions which contain
4888
     subinstructions corresponding to different source lines.  On
4889
     FR-V, it's not permitted to place a breakpoint on any but the
4890
     first subinstruction of a VLIW instruction.  When a breakpoint is
4891
     set, GDB will adjust the breakpoint address to the beginning of
4892
     the VLIW instruction.  Thus, we need to make the corresponding
4893
     adjustment here when computing the stop address.  */
4894
 
4895
  if (gdbarch_adjust_breakpoint_address_p (gdbarch))
4896
    {
4897
      ecs->stop_func_start
4898
        = gdbarch_adjust_breakpoint_address (gdbarch,
4899
                                             ecs->stop_func_start);
4900
    }
4901
 
4902
  if (ecs->stop_func_start == stop_pc)
4903
    {
4904
      /* We are already there: stop now.  */
4905
      ecs->event_thread->stop_step = 1;
4906
      print_stop_reason (END_STEPPING_RANGE, 0);
4907
      stop_stepping (ecs);
4908
      return;
4909
    }
4910
  else
4911
    {
4912
      /* Put the step-breakpoint there and go until there.  */
4913
      init_sal (&sr_sal);       /* initialize to zeroes */
4914
      sr_sal.pc = ecs->stop_func_start;
4915
      sr_sal.section = find_pc_overlay (ecs->stop_func_start);
4916
      sr_sal.pspace = get_frame_program_space (get_current_frame ());
4917
 
4918
      /* Do not specify what the fp should be when we stop since on
4919
         some machines the prologue is where the new fp value is
4920
         established.  */
4921
      insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal, null_frame_id);
4922
 
4923
      /* And make sure stepping stops right away then.  */
4924
      ecs->event_thread->step_range_end = ecs->event_thread->step_range_start;
4925
    }
4926
  keep_going (ecs);
4927
}
4928
 
4929
/* Inferior has stepped backward into a subroutine call with source
4930
   code that we should not step over.  Do step to the beginning of the
4931
   last line of code in it.  */
4932
 
4933
static void
4934
handle_step_into_function_backward (struct gdbarch *gdbarch,
4935
                                    struct execution_control_state *ecs)
4936
{
4937
  struct symtab *s;
4938
  struct symtab_and_line stop_func_sal;
4939
 
4940
  s = find_pc_symtab (stop_pc);
4941
  if (s && s->language != language_asm)
4942
    ecs->stop_func_start = gdbarch_skip_prologue (gdbarch,
4943
                                                  ecs->stop_func_start);
4944
 
4945
  stop_func_sal = find_pc_line (stop_pc, 0);
4946
 
4947
  /* OK, we're just going to keep stepping here.  */
4948
  if (stop_func_sal.pc == stop_pc)
4949
    {
4950
      /* We're there already.  Just stop stepping now.  */
4951
      ecs->event_thread->stop_step = 1;
4952
      print_stop_reason (END_STEPPING_RANGE, 0);
4953
      stop_stepping (ecs);
4954
    }
4955
  else
4956
    {
4957
      /* Else just reset the step range and keep going.
4958
         No step-resume breakpoint, they don't work for
4959
         epilogues, which can have multiple entry paths.  */
4960
      ecs->event_thread->step_range_start = stop_func_sal.pc;
4961
      ecs->event_thread->step_range_end = stop_func_sal.end;
4962
      keep_going (ecs);
4963
    }
4964
  return;
4965
}
4966
 
4967
/* Insert a "step-resume breakpoint" at SR_SAL with frame ID SR_ID.
4968
   This is used to both functions and to skip over code.  */
4969
 
4970
static void
4971
insert_step_resume_breakpoint_at_sal (struct gdbarch *gdbarch,
4972
                                      struct symtab_and_line sr_sal,
4973
                                      struct frame_id sr_id)
4974
{
4975
  /* There should never be more than one step-resume or longjmp-resume
4976
     breakpoint per thread, so we should never be setting a new
4977
     step_resume_breakpoint when one is already active.  */
4978
  gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
4979
 
4980
  if (debug_infrun)
4981
    fprintf_unfiltered (gdb_stdlog,
4982
                        "infrun: inserting step-resume breakpoint at %s\n",
4983
                        paddress (gdbarch, sr_sal.pc));
4984
 
4985
  inferior_thread ()->step_resume_breakpoint
4986
    = set_momentary_breakpoint (gdbarch, sr_sal, sr_id, bp_step_resume);
4987
}
4988
 
4989
/* Insert a "step-resume breakpoint" at RETURN_FRAME.pc.  This is used
4990
   to skip a potential signal handler.
4991
 
4992
   This is called with the interrupted function's frame.  The signal
4993
   handler, when it returns, will resume the interrupted function at
4994
   RETURN_FRAME.pc.  */
4995
 
4996
static void
4997
insert_step_resume_breakpoint_at_frame (struct frame_info *return_frame)
4998
{
4999
  struct symtab_and_line sr_sal;
5000
  struct gdbarch *gdbarch;
5001
 
5002
  gdb_assert (return_frame != NULL);
5003
  init_sal (&sr_sal);           /* initialize to zeros */
5004
 
5005
  gdbarch = get_frame_arch (return_frame);
5006
  sr_sal.pc = gdbarch_addr_bits_remove (gdbarch, get_frame_pc (return_frame));
5007
  sr_sal.section = find_pc_overlay (sr_sal.pc);
5008
  sr_sal.pspace = get_frame_program_space (return_frame);
5009
 
5010
  insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5011
                                        get_stack_frame_id (return_frame));
5012
}
5013
 
5014
/* Similar to insert_step_resume_breakpoint_at_frame, except
5015
   but a breakpoint at the previous frame's PC.  This is used to
5016
   skip a function after stepping into it (for "next" or if the called
5017
   function has no debugging information).
5018
 
5019
   The current function has almost always been reached by single
5020
   stepping a call or return instruction.  NEXT_FRAME belongs to the
5021
   current function, and the breakpoint will be set at the caller's
5022
   resume address.
5023
 
5024
   This is a separate function rather than reusing
5025
   insert_step_resume_breakpoint_at_frame in order to avoid
5026
   get_prev_frame, which may stop prematurely (see the implementation
5027
   of frame_unwind_caller_id for an example).  */
5028
 
5029
static void
5030
insert_step_resume_breakpoint_at_caller (struct frame_info *next_frame)
5031
{
5032
  struct symtab_and_line sr_sal;
5033
  struct gdbarch *gdbarch;
5034
 
5035
  /* We shouldn't have gotten here if we don't know where the call site
5036
     is.  */
5037
  gdb_assert (frame_id_p (frame_unwind_caller_id (next_frame)));
5038
 
5039
  init_sal (&sr_sal);           /* initialize to zeros */
5040
 
5041
  gdbarch = frame_unwind_caller_arch (next_frame);
5042
  sr_sal.pc = gdbarch_addr_bits_remove (gdbarch,
5043
                                        frame_unwind_caller_pc (next_frame));
5044
  sr_sal.section = find_pc_overlay (sr_sal.pc);
5045
  sr_sal.pspace = frame_unwind_program_space (next_frame);
5046
 
5047
  insert_step_resume_breakpoint_at_sal (gdbarch, sr_sal,
5048
                                        frame_unwind_caller_id (next_frame));
5049
}
5050
 
5051
/* Insert a "longjmp-resume" breakpoint at PC.  This is used to set a
5052
   new breakpoint at the target of a jmp_buf.  The handling of
5053
   longjmp-resume uses the same mechanisms used for handling
5054
   "step-resume" breakpoints.  */
5055
 
5056
static void
5057
insert_longjmp_resume_breakpoint (struct gdbarch *gdbarch, CORE_ADDR pc)
5058
{
5059
  /* There should never be more than one step-resume or longjmp-resume
5060
     breakpoint per thread, so we should never be setting a new
5061
     longjmp_resume_breakpoint when one is already active.  */
5062
  gdb_assert (inferior_thread ()->step_resume_breakpoint == NULL);
5063
 
5064
  if (debug_infrun)
5065
    fprintf_unfiltered (gdb_stdlog,
5066
                        "infrun: inserting longjmp-resume breakpoint at %s\n",
5067
                        paddress (gdbarch, pc));
5068
 
5069
  inferior_thread ()->step_resume_breakpoint =
5070
    set_momentary_breakpoint_at_pc (gdbarch, pc, bp_longjmp_resume);
5071
}
5072
 
5073
static void
5074
stop_stepping (struct execution_control_state *ecs)
5075
{
5076
  if (debug_infrun)
5077
    fprintf_unfiltered (gdb_stdlog, "infrun: stop_stepping\n");
5078
 
5079
  /* Let callers know we don't want to wait for the inferior anymore.  */
5080
  ecs->wait_some_more = 0;
5081
}
5082
 
5083
/* This function handles various cases where we need to continue
5084
   waiting for the inferior.  */
5085
/* (Used to be the keep_going: label in the old wait_for_inferior) */
5086
 
5087
static void
5088
keep_going (struct execution_control_state *ecs)
5089
{
5090
  /* Make sure normal_stop is called if we get a QUIT handled before
5091
     reaching resume.  */
5092
  struct cleanup *old_cleanups = make_cleanup (resume_cleanups, 0);
5093
 
5094
  /* Save the pc before execution, to compare with pc after stop.  */
5095
  ecs->event_thread->prev_pc
5096
    = regcache_read_pc (get_thread_regcache (ecs->ptid));
5097
 
5098
  /* If we did not do break;, it means we should keep running the
5099
     inferior and not return to debugger.  */
5100
 
5101
  if (ecs->event_thread->trap_expected
5102
      && ecs->event_thread->stop_signal != TARGET_SIGNAL_TRAP)
5103
    {
5104
      /* We took a signal (which we are supposed to pass through to
5105
         the inferior, else we'd not get here) and we haven't yet
5106
         gotten our trap.  Simply continue.  */
5107
 
5108
      discard_cleanups (old_cleanups);
5109
      resume (currently_stepping (ecs->event_thread),
5110
              ecs->event_thread->stop_signal);
5111
    }
5112
  else
5113
    {
5114
      /* Either the trap was not expected, but we are continuing
5115
         anyway (the user asked that this signal be passed to the
5116
         child)
5117
         -- or --
5118
         The signal was SIGTRAP, e.g. it was our signal, but we
5119
         decided we should resume from it.
5120
 
5121
         We're going to run this baby now!
5122
 
5123
         Note that insert_breakpoints won't try to re-insert
5124
         already inserted breakpoints.  Therefore, we don't
5125
         care if breakpoints were already inserted, or not.  */
5126
 
5127
      if (ecs->event_thread->stepping_over_breakpoint)
5128
        {
5129
          struct regcache *thread_regcache = get_thread_regcache (ecs->ptid);
5130
 
5131
          if (!use_displaced_stepping (get_regcache_arch (thread_regcache)))
5132
            /* Since we can't do a displaced step, we have to remove
5133
               the breakpoint while we step it.  To keep things
5134
               simple, we remove them all.  */
5135
            remove_breakpoints ();
5136
        }
5137
      else
5138
        {
5139
          struct gdb_exception e;
5140
 
5141
          /* Stop stepping when inserting breakpoints
5142
             has failed.  */
5143
          TRY_CATCH (e, RETURN_MASK_ERROR)
5144
            {
5145
              insert_breakpoints ();
5146
            }
5147
          if (e.reason < 0)
5148
            {
5149
              exception_print (gdb_stderr, e);
5150
              stop_stepping (ecs);
5151
              return;
5152
            }
5153
        }
5154
 
5155
      ecs->event_thread->trap_expected = ecs->event_thread->stepping_over_breakpoint;
5156
 
5157
      /* Do not deliver SIGNAL_TRAP (except when the user explicitly
5158
         specifies that such a signal should be delivered to the
5159
         target program).
5160
 
5161
         Typically, this would occure when a user is debugging a
5162
         target monitor on a simulator: the target monitor sets a
5163
         breakpoint; the simulator encounters this break-point and
5164
         halts the simulation handing control to GDB; GDB, noteing
5165
         that the break-point isn't valid, returns control back to the
5166
         simulator; the simulator then delivers the hardware
5167
         equivalent of a SIGNAL_TRAP to the program being debugged. */
5168
 
5169
      if (ecs->event_thread->stop_signal == TARGET_SIGNAL_TRAP
5170
          && !signal_program[ecs->event_thread->stop_signal])
5171
        ecs->event_thread->stop_signal = TARGET_SIGNAL_0;
5172
 
5173
      discard_cleanups (old_cleanups);
5174
      resume (currently_stepping (ecs->event_thread),
5175
              ecs->event_thread->stop_signal);
5176
    }
5177
 
5178
  prepare_to_wait (ecs);
5179
}
5180
 
5181
/* This function normally comes after a resume, before
5182
   handle_inferior_event exits.  It takes care of any last bits of
5183
   housekeeping, and sets the all-important wait_some_more flag.  */
5184
 
5185
static void
5186
prepare_to_wait (struct execution_control_state *ecs)
5187
{
5188
  if (debug_infrun)
5189
    fprintf_unfiltered (gdb_stdlog, "infrun: prepare_to_wait\n");
5190
 
5191
  /* This is the old end of the while loop.  Let everybody know we
5192
     want to wait for the inferior some more and get called again
5193
     soon.  */
5194
  ecs->wait_some_more = 1;
5195
}
5196
 
5197
/* Print why the inferior has stopped. We always print something when
5198
   the inferior exits, or receives a signal. The rest of the cases are
5199
   dealt with later on in normal_stop() and print_it_typical().  Ideally
5200
   there should be a call to this function from handle_inferior_event()
5201
   each time stop_stepping() is called.*/
5202
static void
5203
print_stop_reason (enum inferior_stop_reason stop_reason, int stop_info)
5204
{
5205
  switch (stop_reason)
5206
    {
5207
    case END_STEPPING_RANGE:
5208
      /* We are done with a step/next/si/ni command. */
5209
      /* For now print nothing. */
5210
      /* Print a message only if not in the middle of doing a "step n"
5211
         operation for n > 1 */
5212
      if (!inferior_thread ()->step_multi
5213
          || !inferior_thread ()->stop_step)
5214
        if (ui_out_is_mi_like_p (uiout))
5215
          ui_out_field_string
5216
            (uiout, "reason",
5217
             async_reason_lookup (EXEC_ASYNC_END_STEPPING_RANGE));
5218
      break;
5219
    case SIGNAL_EXITED:
5220
      /* The inferior was terminated by a signal. */
5221
      annotate_signalled ();
5222
      if (ui_out_is_mi_like_p (uiout))
5223
        ui_out_field_string
5224
          (uiout, "reason",
5225
           async_reason_lookup (EXEC_ASYNC_EXITED_SIGNALLED));
5226
      ui_out_text (uiout, "\nProgram terminated with signal ");
5227
      annotate_signal_name ();
5228
      ui_out_field_string (uiout, "signal-name",
5229
                           target_signal_to_name (stop_info));
5230
      annotate_signal_name_end ();
5231
      ui_out_text (uiout, ", ");
5232
      annotate_signal_string ();
5233
      ui_out_field_string (uiout, "signal-meaning",
5234
                           target_signal_to_string (stop_info));
5235
      annotate_signal_string_end ();
5236
      ui_out_text (uiout, ".\n");
5237
      ui_out_text (uiout, "The program no longer exists.\n");
5238
      break;
5239
    case EXITED:
5240
      /* The inferior program is finished. */
5241
      annotate_exited (stop_info);
5242
      if (stop_info)
5243
        {
5244
          if (ui_out_is_mi_like_p (uiout))
5245
            ui_out_field_string (uiout, "reason",
5246
                                 async_reason_lookup (EXEC_ASYNC_EXITED));
5247
          ui_out_text (uiout, "\nProgram exited with code ");
5248
          ui_out_field_fmt (uiout, "exit-code", "0%o",
5249
                            (unsigned int) stop_info);
5250
          ui_out_text (uiout, ".\n");
5251
        }
5252
      else
5253
        {
5254
          if (ui_out_is_mi_like_p (uiout))
5255
            ui_out_field_string
5256
              (uiout, "reason",
5257
               async_reason_lookup (EXEC_ASYNC_EXITED_NORMALLY));
5258
          ui_out_text (uiout, "\nProgram exited normally.\n");
5259
        }
5260
      /* Support the --return-child-result option.  */
5261
      return_child_result_value = stop_info;
5262
      break;
5263
    case SIGNAL_RECEIVED:
5264
      /* Signal received.  The signal table tells us to print about
5265
         it. */
5266
      annotate_signal ();
5267
 
5268
      if (stop_info == TARGET_SIGNAL_0 && !ui_out_is_mi_like_p (uiout))
5269
        {
5270
          struct thread_info *t = inferior_thread ();
5271
 
5272
          ui_out_text (uiout, "\n[");
5273
          ui_out_field_string (uiout, "thread-name",
5274
                               target_pid_to_str (t->ptid));
5275
          ui_out_field_fmt (uiout, "thread-id", "] #%d", t->num);
5276
          ui_out_text (uiout, " stopped");
5277
        }
5278
      else
5279
        {
5280
          ui_out_text (uiout, "\nProgram received signal ");
5281
          annotate_signal_name ();
5282
          if (ui_out_is_mi_like_p (uiout))
5283
            ui_out_field_string
5284
              (uiout, "reason", async_reason_lookup (EXEC_ASYNC_SIGNAL_RECEIVED));
5285
          ui_out_field_string (uiout, "signal-name",
5286
                               target_signal_to_name (stop_info));
5287
          annotate_signal_name_end ();
5288
          ui_out_text (uiout, ", ");
5289
          annotate_signal_string ();
5290
          ui_out_field_string (uiout, "signal-meaning",
5291
                               target_signal_to_string (stop_info));
5292
          annotate_signal_string_end ();
5293
        }
5294
      ui_out_text (uiout, ".\n");
5295
      break;
5296
    case NO_HISTORY:
5297
      /* Reverse execution: target ran out of history info.  */
5298
      ui_out_text (uiout, "\nNo more reverse-execution history.\n");
5299
      break;
5300
    default:
5301
      internal_error (__FILE__, __LINE__,
5302
                      _("print_stop_reason: unrecognized enum value"));
5303
      break;
5304
    }
5305
}
5306
 
5307
 
5308
/* Here to return control to GDB when the inferior stops for real.
5309
   Print appropriate messages, remove breakpoints, give terminal our modes.
5310
 
5311
   STOP_PRINT_FRAME nonzero means print the executing frame
5312
   (pc, function, args, file, line number and line text).
5313
   BREAKPOINTS_FAILED nonzero means stop was due to error
5314
   attempting to insert breakpoints.  */
5315
 
5316
void
5317
normal_stop (void)
5318
{
5319
  struct target_waitstatus last;
5320
  ptid_t last_ptid;
5321
  struct cleanup *old_chain = make_cleanup (null_cleanup, NULL);
5322
 
5323
  get_last_target_status (&last_ptid, &last);
5324
 
5325
  /* If an exception is thrown from this point on, make sure to
5326
     propagate GDB's knowledge of the executing state to the
5327
     frontend/user running state.  A QUIT is an easy exception to see
5328
     here, so do this before any filtered output.  */
5329
  if (!non_stop)
5330
    make_cleanup (finish_thread_state_cleanup, &minus_one_ptid);
5331
  else if (last.kind != TARGET_WAITKIND_SIGNALLED
5332
           && last.kind != TARGET_WAITKIND_EXITED)
5333
    make_cleanup (finish_thread_state_cleanup, &inferior_ptid);
5334
 
5335
  /* In non-stop mode, we don't want GDB to switch threads behind the
5336
     user's back, to avoid races where the user is typing a command to
5337
     apply to thread x, but GDB switches to thread y before the user
5338
     finishes entering the command.  */
5339
 
5340
  /* As with the notification of thread events, we want to delay
5341
     notifying the user that we've switched thread context until
5342
     the inferior actually stops.
5343
 
5344
     There's no point in saying anything if the inferior has exited.
5345
     Note that SIGNALLED here means "exited with a signal", not
5346
     "received a signal".  */
5347
  if (!non_stop
5348
      && !ptid_equal (previous_inferior_ptid, inferior_ptid)
5349
      && target_has_execution
5350
      && last.kind != TARGET_WAITKIND_SIGNALLED
5351
      && last.kind != TARGET_WAITKIND_EXITED)
5352
    {
5353
      target_terminal_ours_for_output ();
5354
      printf_filtered (_("[Switching to %s]\n"),
5355
                       target_pid_to_str (inferior_ptid));
5356
      annotate_thread_changed ();
5357
      previous_inferior_ptid = inferior_ptid;
5358
    }
5359
 
5360
  if (!breakpoints_always_inserted_mode () && target_has_execution)
5361
    {
5362
      if (remove_breakpoints ())
5363
        {
5364
          target_terminal_ours_for_output ();
5365
          printf_filtered (_("\
5366
Cannot remove breakpoints because program is no longer writable.\n\
5367
Further execution is probably impossible.\n"));
5368
        }
5369
    }
5370
 
5371
  /* If an auto-display called a function and that got a signal,
5372
     delete that auto-display to avoid an infinite recursion.  */
5373
 
5374
  if (stopped_by_random_signal)
5375
    disable_current_display ();
5376
 
5377
  /* Don't print a message if in the middle of doing a "step n"
5378
     operation for n > 1 */
5379
  if (target_has_execution
5380
      && last.kind != TARGET_WAITKIND_SIGNALLED
5381
      && last.kind != TARGET_WAITKIND_EXITED
5382
      && inferior_thread ()->step_multi
5383
      && inferior_thread ()->stop_step)
5384
    goto done;
5385
 
5386
  target_terminal_ours ();
5387
 
5388
  /* Set the current source location.  This will also happen if we
5389
     display the frame below, but the current SAL will be incorrect
5390
     during a user hook-stop function.  */
5391
  if (has_stack_frames () && !stop_stack_dummy)
5392
    set_current_sal_from_frame (get_current_frame (), 1);
5393
 
5394
  /* Let the user/frontend see the threads as stopped.  */
5395
  do_cleanups (old_chain);
5396
 
5397
  /* Look up the hook_stop and run it (CLI internally handles problem
5398
     of stop_command's pre-hook not existing).  */
5399
  if (stop_command)
5400
    catch_errors (hook_stop_stub, stop_command,
5401
                  "Error while running hook_stop:\n", RETURN_MASK_ALL);
5402
 
5403
  if (!has_stack_frames ())
5404
    goto done;
5405
 
5406
  if (last.kind == TARGET_WAITKIND_SIGNALLED
5407
      || last.kind == TARGET_WAITKIND_EXITED)
5408
    goto done;
5409
 
5410
  /* Select innermost stack frame - i.e., current frame is frame 0,
5411
     and current location is based on that.
5412
     Don't do this on return from a stack dummy routine,
5413
     or if the program has exited. */
5414
 
5415
  if (!stop_stack_dummy)
5416
    {
5417
      select_frame (get_current_frame ());
5418
 
5419
      /* Print current location without a level number, if
5420
         we have changed functions or hit a breakpoint.
5421
         Print source line if we have one.
5422
         bpstat_print() contains the logic deciding in detail
5423
         what to print, based on the event(s) that just occurred. */
5424
 
5425
      /* If --batch-silent is enabled then there's no need to print the current
5426
         source location, and to try risks causing an error message about
5427
         missing source files.  */
5428
      if (stop_print_frame && !batch_silent)
5429
        {
5430
          int bpstat_ret;
5431
          int source_flag;
5432
          int do_frame_printing = 1;
5433
          struct thread_info *tp = inferior_thread ();
5434
 
5435
          bpstat_ret = bpstat_print (tp->stop_bpstat);
5436
          switch (bpstat_ret)
5437
            {
5438
            case PRINT_UNKNOWN:
5439
              /* If we had hit a shared library event breakpoint,
5440
                 bpstat_print would print out this message.  If we hit
5441
                 an OS-level shared library event, do the same
5442
                 thing.  */
5443
              if (last.kind == TARGET_WAITKIND_LOADED)
5444
                {
5445
                  printf_filtered (_("Stopped due to shared library event\n"));
5446
                  source_flag = SRC_LINE;       /* something bogus */
5447
                  do_frame_printing = 0;
5448
                  break;
5449
                }
5450
 
5451
              /* FIXME: cagney/2002-12-01: Given that a frame ID does
5452
                 (or should) carry around the function and does (or
5453
                 should) use that when doing a frame comparison.  */
5454
              if (tp->stop_step
5455
                  && frame_id_eq (tp->step_frame_id,
5456
                                  get_frame_id (get_current_frame ()))
5457
                  && step_start_function == find_pc_function (stop_pc))
5458
                source_flag = SRC_LINE; /* finished step, just print source line */
5459
              else
5460
                source_flag = SRC_AND_LOC;      /* print location and source line */
5461
              break;
5462
            case PRINT_SRC_AND_LOC:
5463
              source_flag = SRC_AND_LOC;        /* print location and source line */
5464
              break;
5465
            case PRINT_SRC_ONLY:
5466
              source_flag = SRC_LINE;
5467
              break;
5468
            case PRINT_NOTHING:
5469
              source_flag = SRC_LINE;   /* something bogus */
5470
              do_frame_printing = 0;
5471
              break;
5472
            default:
5473
              internal_error (__FILE__, __LINE__, _("Unknown value."));
5474
            }
5475
 
5476
          /* The behavior of this routine with respect to the source
5477
             flag is:
5478
             SRC_LINE: Print only source line
5479
             LOCATION: Print only location
5480
             SRC_AND_LOC: Print location and source line */
5481
          if (do_frame_printing)
5482
            print_stack_frame (get_selected_frame (NULL), 0, source_flag);
5483
 
5484
          /* Display the auto-display expressions.  */
5485
          do_displays ();
5486
        }
5487
    }
5488
 
5489
  /* Save the function value return registers, if we care.
5490
     We might be about to restore their previous contents.  */
5491
  if (inferior_thread ()->proceed_to_finish)
5492
    {
5493
      /* This should not be necessary.  */
5494
      if (stop_registers)
5495
        regcache_xfree (stop_registers);
5496
 
5497
      /* NB: The copy goes through to the target picking up the value of
5498
         all the registers.  */
5499
      stop_registers = regcache_dup (get_current_regcache ());
5500
    }
5501
 
5502
  if (stop_stack_dummy == STOP_STACK_DUMMY)
5503
    {
5504
      /* Pop the empty frame that contains the stack dummy.
5505
         This also restores inferior state prior to the call
5506
         (struct inferior_thread_state).  */
5507
      struct frame_info *frame = get_current_frame ();
5508
 
5509
      gdb_assert (get_frame_type (frame) == DUMMY_FRAME);
5510
      frame_pop (frame);
5511
      /* frame_pop() calls reinit_frame_cache as the last thing it does
5512
         which means there's currently no selected frame.  We don't need
5513
         to re-establish a selected frame if the dummy call returns normally,
5514
         that will be done by restore_inferior_status.  However, we do have
5515
         to handle the case where the dummy call is returning after being
5516
         stopped (e.g. the dummy call previously hit a breakpoint).  We
5517
         can't know which case we have so just always re-establish a
5518
         selected frame here.  */
5519
      select_frame (get_current_frame ());
5520
    }
5521
 
5522
done:
5523
  annotate_stopped ();
5524
 
5525
  /* Suppress the stop observer if we're in the middle of:
5526
 
5527
     - a step n (n > 1), as there still more steps to be done.
5528
 
5529
     - a "finish" command, as the observer will be called in
5530
       finish_command_continuation, so it can include the inferior
5531
       function's return value.
5532
 
5533
     - calling an inferior function, as we pretend we inferior didn't
5534
       run at all.  The return value of the call is handled by the
5535
       expression evaluator, through call_function_by_hand.  */
5536
 
5537
  if (!target_has_execution
5538
      || last.kind == TARGET_WAITKIND_SIGNALLED
5539
      || last.kind == TARGET_WAITKIND_EXITED
5540
      || (!inferior_thread ()->step_multi
5541
          && !(inferior_thread ()->stop_bpstat
5542
               && inferior_thread ()->proceed_to_finish)
5543
          && !inferior_thread ()->in_infcall))
5544
    {
5545
      if (!ptid_equal (inferior_ptid, null_ptid))
5546
        observer_notify_normal_stop (inferior_thread ()->stop_bpstat,
5547
                                     stop_print_frame);
5548
      else
5549
        observer_notify_normal_stop (NULL, stop_print_frame);
5550
    }
5551
 
5552
  if (target_has_execution)
5553
    {
5554
      if (last.kind != TARGET_WAITKIND_SIGNALLED
5555
          && last.kind != TARGET_WAITKIND_EXITED)
5556
        /* Delete the breakpoint we stopped at, if it wants to be deleted.
5557
           Delete any breakpoint that is to be deleted at the next stop.  */
5558
        breakpoint_auto_delete (inferior_thread ()->stop_bpstat);
5559
    }
5560
 
5561
  /* Try to get rid of automatically added inferiors that are no
5562
     longer needed.  Keeping those around slows down things linearly.
5563
     Note that this never removes the current inferior.  */
5564
  prune_inferiors ();
5565
}
5566
 
5567
static int
5568
hook_stop_stub (void *cmd)
5569
{
5570
  execute_cmd_pre_hook ((struct cmd_list_element *) cmd);
5571
  return (0);
5572
}
5573
 
5574
int
5575
signal_stop_state (int signo)
5576
{
5577
  return signal_stop[signo];
5578
}
5579
 
5580
int
5581
signal_print_state (int signo)
5582
{
5583
  return signal_print[signo];
5584
}
5585
 
5586
int
5587
signal_pass_state (int signo)
5588
{
5589
  return signal_program[signo];
5590
}
5591
 
5592
int
5593
signal_stop_update (int signo, int state)
5594
{
5595
  int ret = signal_stop[signo];
5596
 
5597
  signal_stop[signo] = state;
5598
  return ret;
5599
}
5600
 
5601
int
5602
signal_print_update (int signo, int state)
5603
{
5604
  int ret = signal_print[signo];
5605
 
5606
  signal_print[signo] = state;
5607
  return ret;
5608
}
5609
 
5610
int
5611
signal_pass_update (int signo, int state)
5612
{
5613
  int ret = signal_program[signo];
5614
 
5615
  signal_program[signo] = state;
5616
  return ret;
5617
}
5618
 
5619
static void
5620
sig_print_header (void)
5621
{
5622
  printf_filtered (_("\
5623
Signal        Stop\tPrint\tPass to program\tDescription\n"));
5624
}
5625
 
5626
static void
5627
sig_print_info (enum target_signal oursig)
5628
{
5629
  const char *name = target_signal_to_name (oursig);
5630
  int name_padding = 13 - strlen (name);
5631
 
5632
  if (name_padding <= 0)
5633
    name_padding = 0;
5634
 
5635
  printf_filtered ("%s", name);
5636
  printf_filtered ("%*.*s ", name_padding, name_padding, "                 ");
5637
  printf_filtered ("%s\t", signal_stop[oursig] ? "Yes" : "No");
5638
  printf_filtered ("%s\t", signal_print[oursig] ? "Yes" : "No");
5639
  printf_filtered ("%s\t\t", signal_program[oursig] ? "Yes" : "No");
5640
  printf_filtered ("%s\n", target_signal_to_string (oursig));
5641
}
5642
 
5643
/* Specify how various signals in the inferior should be handled.  */
5644
 
5645
static void
5646
handle_command (char *args, int from_tty)
5647
{
5648
  char **argv;
5649
  int digits, wordlen;
5650
  int sigfirst, signum, siglast;
5651
  enum target_signal oursig;
5652
  int allsigs;
5653
  int nsigs;
5654
  unsigned char *sigs;
5655
  struct cleanup *old_chain;
5656
 
5657
  if (args == NULL)
5658
    {
5659
      error_no_arg (_("signal to handle"));
5660
    }
5661
 
5662
  /* Allocate and zero an array of flags for which signals to handle. */
5663
 
5664
  nsigs = (int) TARGET_SIGNAL_LAST;
5665
  sigs = (unsigned char *) alloca (nsigs);
5666
  memset (sigs, 0, nsigs);
5667
 
5668
  /* Break the command line up into args. */
5669
 
5670
  argv = gdb_buildargv (args);
5671
  old_chain = make_cleanup_freeargv (argv);
5672
 
5673
  /* Walk through the args, looking for signal oursigs, signal names, and
5674
     actions.  Signal numbers and signal names may be interspersed with
5675
     actions, with the actions being performed for all signals cumulatively
5676
     specified.  Signal ranges can be specified as <LOW>-<HIGH>. */
5677
 
5678
  while (*argv != NULL)
5679
    {
5680
      wordlen = strlen (*argv);
5681
      for (digits = 0; isdigit ((*argv)[digits]); digits++)
5682
        {;
5683
        }
5684
      allsigs = 0;
5685
      sigfirst = siglast = -1;
5686
 
5687
      if (wordlen >= 1 && !strncmp (*argv, "all", wordlen))
5688
        {
5689
          /* Apply action to all signals except those used by the
5690
             debugger.  Silently skip those. */
5691
          allsigs = 1;
5692
          sigfirst = 0;
5693
          siglast = nsigs - 1;
5694
        }
5695
      else if (wordlen >= 1 && !strncmp (*argv, "stop", wordlen))
5696
        {
5697
          SET_SIGS (nsigs, sigs, signal_stop);
5698
          SET_SIGS (nsigs, sigs, signal_print);
5699
        }
5700
      else if (wordlen >= 1 && !strncmp (*argv, "ignore", wordlen))
5701
        {
5702
          UNSET_SIGS (nsigs, sigs, signal_program);
5703
        }
5704
      else if (wordlen >= 2 && !strncmp (*argv, "print", wordlen))
5705
        {
5706
          SET_SIGS (nsigs, sigs, signal_print);
5707
        }
5708
      else if (wordlen >= 2 && !strncmp (*argv, "pass", wordlen))
5709
        {
5710
          SET_SIGS (nsigs, sigs, signal_program);
5711
        }
5712
      else if (wordlen >= 3 && !strncmp (*argv, "nostop", wordlen))
5713
        {
5714
          UNSET_SIGS (nsigs, sigs, signal_stop);
5715
        }
5716
      else if (wordlen >= 3 && !strncmp (*argv, "noignore", wordlen))
5717
        {
5718
          SET_SIGS (nsigs, sigs, signal_program);
5719
        }
5720
      else if (wordlen >= 4 && !strncmp (*argv, "noprint", wordlen))
5721
        {
5722
          UNSET_SIGS (nsigs, sigs, signal_print);
5723
          UNSET_SIGS (nsigs, sigs, signal_stop);
5724
        }
5725
      else if (wordlen >= 4 && !strncmp (*argv, "nopass", wordlen))
5726
        {
5727
          UNSET_SIGS (nsigs, sigs, signal_program);
5728
        }
5729
      else if (digits > 0)
5730
        {
5731
          /* It is numeric.  The numeric signal refers to our own
5732
             internal signal numbering from target.h, not to host/target
5733
             signal  number.  This is a feature; users really should be
5734
             using symbolic names anyway, and the common ones like
5735
             SIGHUP, SIGINT, SIGALRM, etc. will work right anyway.  */
5736
 
5737
          sigfirst = siglast = (int)
5738
            target_signal_from_command (atoi (*argv));
5739
          if ((*argv)[digits] == '-')
5740
            {
5741
              siglast = (int)
5742
                target_signal_from_command (atoi ((*argv) + digits + 1));
5743
            }
5744
          if (sigfirst > siglast)
5745
            {
5746
              /* Bet he didn't figure we'd think of this case... */
5747
              signum = sigfirst;
5748
              sigfirst = siglast;
5749
              siglast = signum;
5750
            }
5751
        }
5752
      else
5753
        {
5754
          oursig = target_signal_from_name (*argv);
5755
          if (oursig != TARGET_SIGNAL_UNKNOWN)
5756
            {
5757
              sigfirst = siglast = (int) oursig;
5758
            }
5759
          else
5760
            {
5761
              /* Not a number and not a recognized flag word => complain.  */
5762
              error (_("Unrecognized or ambiguous flag word: \"%s\"."), *argv);
5763
            }
5764
        }
5765
 
5766
      /* If any signal numbers or symbol names were found, set flags for
5767
         which signals to apply actions to. */
5768
 
5769
      for (signum = sigfirst; signum >= 0 && signum <= siglast; signum++)
5770
        {
5771
          switch ((enum target_signal) signum)
5772
            {
5773
            case TARGET_SIGNAL_TRAP:
5774
            case TARGET_SIGNAL_INT:
5775
              if (!allsigs && !sigs[signum])
5776
                {
5777
                  if (query (_("%s is used by the debugger.\n\
5778
Are you sure you want to change it? "), target_signal_to_name ((enum target_signal) signum)))
5779
                    {
5780
                      sigs[signum] = 1;
5781
                    }
5782
                  else
5783
                    {
5784
                      printf_unfiltered (_("Not confirmed, unchanged.\n"));
5785
                      gdb_flush (gdb_stdout);
5786
                    }
5787
                }
5788
              break;
5789
            case TARGET_SIGNAL_0:
5790
            case TARGET_SIGNAL_DEFAULT:
5791
            case TARGET_SIGNAL_UNKNOWN:
5792
              /* Make sure that "all" doesn't print these.  */
5793
              break;
5794
            default:
5795
              sigs[signum] = 1;
5796
              break;
5797
            }
5798
        }
5799
 
5800
      argv++;
5801
    }
5802
 
5803
  for (signum = 0; signum < nsigs; signum++)
5804
    if (sigs[signum])
5805
      {
5806
        target_notice_signals (inferior_ptid);
5807
 
5808
        if (from_tty)
5809
          {
5810
            /* Show the results.  */
5811
            sig_print_header ();
5812
            for (; signum < nsigs; signum++)
5813
              if (sigs[signum])
5814
                sig_print_info (signum);
5815
          }
5816
 
5817
        break;
5818
      }
5819
 
5820
  do_cleanups (old_chain);
5821
}
5822
 
5823
static void
5824
xdb_handle_command (char *args, int from_tty)
5825
{
5826
  char **argv;
5827
  struct cleanup *old_chain;
5828
 
5829
  if (args == NULL)
5830
    error_no_arg (_("xdb command"));
5831
 
5832
  /* Break the command line up into args. */
5833
 
5834
  argv = gdb_buildargv (args);
5835
  old_chain = make_cleanup_freeargv (argv);
5836
  if (argv[1] != (char *) NULL)
5837
    {
5838
      char *argBuf;
5839
      int bufLen;
5840
 
5841
      bufLen = strlen (argv[0]) + 20;
5842
      argBuf = (char *) xmalloc (bufLen);
5843
      if (argBuf)
5844
        {
5845
          int validFlag = 1;
5846
          enum target_signal oursig;
5847
 
5848
          oursig = target_signal_from_name (argv[0]);
5849
          memset (argBuf, 0, bufLen);
5850
          if (strcmp (argv[1], "Q") == 0)
5851
            sprintf (argBuf, "%s %s", argv[0], "noprint");
5852
          else
5853
            {
5854
              if (strcmp (argv[1], "s") == 0)
5855
                {
5856
                  if (!signal_stop[oursig])
5857
                    sprintf (argBuf, "%s %s", argv[0], "stop");
5858
                  else
5859
                    sprintf (argBuf, "%s %s", argv[0], "nostop");
5860
                }
5861
              else if (strcmp (argv[1], "i") == 0)
5862
                {
5863
                  if (!signal_program[oursig])
5864
                    sprintf (argBuf, "%s %s", argv[0], "pass");
5865
                  else
5866
                    sprintf (argBuf, "%s %s", argv[0], "nopass");
5867
                }
5868
              else if (strcmp (argv[1], "r") == 0)
5869
                {
5870
                  if (!signal_print[oursig])
5871
                    sprintf (argBuf, "%s %s", argv[0], "print");
5872
                  else
5873
                    sprintf (argBuf, "%s %s", argv[0], "noprint");
5874
                }
5875
              else
5876
                validFlag = 0;
5877
            }
5878
          if (validFlag)
5879
            handle_command (argBuf, from_tty);
5880
          else
5881
            printf_filtered (_("Invalid signal handling flag.\n"));
5882
          if (argBuf)
5883
            xfree (argBuf);
5884
        }
5885
    }
5886
  do_cleanups (old_chain);
5887
}
5888
 
5889
/* Print current contents of the tables set by the handle command.
5890
   It is possible we should just be printing signals actually used
5891
   by the current target (but for things to work right when switching
5892
   targets, all signals should be in the signal tables).  */
5893
 
5894
static void
5895
signals_info (char *signum_exp, int from_tty)
5896
{
5897
  enum target_signal oursig;
5898
 
5899
  sig_print_header ();
5900
 
5901
  if (signum_exp)
5902
    {
5903
      /* First see if this is a symbol name.  */
5904
      oursig = target_signal_from_name (signum_exp);
5905
      if (oursig == TARGET_SIGNAL_UNKNOWN)
5906
        {
5907
          /* No, try numeric.  */
5908
          oursig =
5909
            target_signal_from_command (parse_and_eval_long (signum_exp));
5910
        }
5911
      sig_print_info (oursig);
5912
      return;
5913
    }
5914
 
5915
  printf_filtered ("\n");
5916
  /* These ugly casts brought to you by the native VAX compiler.  */
5917
  for (oursig = TARGET_SIGNAL_FIRST;
5918
       (int) oursig < (int) TARGET_SIGNAL_LAST;
5919
       oursig = (enum target_signal) ((int) oursig + 1))
5920
    {
5921
      QUIT;
5922
 
5923
      if (oursig != TARGET_SIGNAL_UNKNOWN
5924
          && oursig != TARGET_SIGNAL_DEFAULT && oursig != TARGET_SIGNAL_0)
5925
        sig_print_info (oursig);
5926
    }
5927
 
5928
  printf_filtered (_("\nUse the \"handle\" command to change these tables.\n"));
5929
}
5930
 
5931
/* The $_siginfo convenience variable is a bit special.  We don't know
5932
   for sure the type of the value until we actually have a chance to
5933
   fetch the data.  The type can change depending on gdbarch, so it it
5934
   also dependent on which thread you have selected.
5935
 
5936
     1. making $_siginfo be an internalvar that creates a new value on
5937
     access.
5938
 
5939
     2. making the value of $_siginfo be an lval_computed value.  */
5940
 
5941
/* This function implements the lval_computed support for reading a
5942
   $_siginfo value.  */
5943
 
5944
static void
5945
siginfo_value_read (struct value *v)
5946
{
5947
  LONGEST transferred;
5948
 
5949
  transferred =
5950
    target_read (&current_target, TARGET_OBJECT_SIGNAL_INFO,
5951
                 NULL,
5952
                 value_contents_all_raw (v),
5953
                 value_offset (v),
5954
                 TYPE_LENGTH (value_type (v)));
5955
 
5956
  if (transferred != TYPE_LENGTH (value_type (v)))
5957
    error (_("Unable to read siginfo"));
5958
}
5959
 
5960
/* This function implements the lval_computed support for writing a
5961
   $_siginfo value.  */
5962
 
5963
static void
5964
siginfo_value_write (struct value *v, struct value *fromval)
5965
{
5966
  LONGEST transferred;
5967
 
5968
  transferred = target_write (&current_target,
5969
                              TARGET_OBJECT_SIGNAL_INFO,
5970
                              NULL,
5971
                              value_contents_all_raw (fromval),
5972
                              value_offset (v),
5973
                              TYPE_LENGTH (value_type (fromval)));
5974
 
5975
  if (transferred != TYPE_LENGTH (value_type (fromval)))
5976
    error (_("Unable to write siginfo"));
5977
}
5978
 
5979
static struct lval_funcs siginfo_value_funcs =
5980
  {
5981
    siginfo_value_read,
5982
    siginfo_value_write
5983
  };
5984
 
5985
/* Return a new value with the correct type for the siginfo object of
5986
   the current thread using architecture GDBARCH.  Return a void value
5987
   if there's no object available.  */
5988
 
5989
static struct value *
5990
siginfo_make_value (struct gdbarch *gdbarch, struct internalvar *var)
5991
{
5992
  if (target_has_stack
5993
      && !ptid_equal (inferior_ptid, null_ptid)
5994
      && gdbarch_get_siginfo_type_p (gdbarch))
5995
    {
5996
      struct type *type = gdbarch_get_siginfo_type (gdbarch);
5997
 
5998
      return allocate_computed_value (type, &siginfo_value_funcs, NULL);
5999
    }
6000
 
6001
  return allocate_value (builtin_type (gdbarch)->builtin_void);
6002
}
6003
 
6004
 
6005
/* Inferior thread state.
6006
   These are details related to the inferior itself, and don't include
6007
   things like what frame the user had selected or what gdb was doing
6008
   with the target at the time.
6009
   For inferior function calls these are things we want to restore
6010
   regardless of whether the function call successfully completes
6011
   or the dummy frame has to be manually popped.  */
6012
 
6013
struct inferior_thread_state
6014
{
6015
  enum target_signal stop_signal;
6016
  CORE_ADDR stop_pc;
6017
  struct regcache *registers;
6018
};
6019
 
6020
struct inferior_thread_state *
6021
save_inferior_thread_state (void)
6022
{
6023
  struct inferior_thread_state *inf_state = XMALLOC (struct inferior_thread_state);
6024
  struct thread_info *tp = inferior_thread ();
6025
 
6026
  inf_state->stop_signal = tp->stop_signal;
6027
  inf_state->stop_pc = stop_pc;
6028
 
6029
  inf_state->registers = regcache_dup (get_current_regcache ());
6030
 
6031
  return inf_state;
6032
}
6033
 
6034
/* Restore inferior session state to INF_STATE.  */
6035
 
6036
void
6037
restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6038
{
6039
  struct thread_info *tp = inferior_thread ();
6040
 
6041
  tp->stop_signal = inf_state->stop_signal;
6042
  stop_pc = inf_state->stop_pc;
6043
 
6044
  /* The inferior can be gone if the user types "print exit(0)"
6045
     (and perhaps other times).  */
6046
  if (target_has_execution)
6047
    /* NB: The register write goes through to the target.  */
6048
    regcache_cpy (get_current_regcache (), inf_state->registers);
6049
  regcache_xfree (inf_state->registers);
6050
  xfree (inf_state);
6051
}
6052
 
6053
static void
6054
do_restore_inferior_thread_state_cleanup (void *state)
6055
{
6056
  restore_inferior_thread_state (state);
6057
}
6058
 
6059
struct cleanup *
6060
make_cleanup_restore_inferior_thread_state (struct inferior_thread_state *inf_state)
6061
{
6062
  return make_cleanup (do_restore_inferior_thread_state_cleanup, inf_state);
6063
}
6064
 
6065
void
6066
discard_inferior_thread_state (struct inferior_thread_state *inf_state)
6067
{
6068
  regcache_xfree (inf_state->registers);
6069
  xfree (inf_state);
6070
}
6071
 
6072
struct regcache *
6073
get_inferior_thread_state_regcache (struct inferior_thread_state *inf_state)
6074
{
6075
  return inf_state->registers;
6076
}
6077
 
6078
/* Session related state for inferior function calls.
6079
   These are the additional bits of state that need to be restored
6080
   when an inferior function call successfully completes.  */
6081
 
6082
struct inferior_status
6083
{
6084
  bpstat stop_bpstat;
6085
  int stop_step;
6086
  enum stop_stack_kind stop_stack_dummy;
6087
  int stopped_by_random_signal;
6088
  int stepping_over_breakpoint;
6089
  CORE_ADDR step_range_start;
6090
  CORE_ADDR step_range_end;
6091
  struct frame_id step_frame_id;
6092
  struct frame_id step_stack_frame_id;
6093
  enum step_over_calls_kind step_over_calls;
6094
  CORE_ADDR step_resume_break_address;
6095
  int stop_after_trap;
6096
  int stop_soon;
6097
 
6098
  /* ID if the selected frame when the inferior function call was made.  */
6099
  struct frame_id selected_frame_id;
6100
 
6101
  int proceed_to_finish;
6102
  int in_infcall;
6103
};
6104
 
6105
/* Save all of the information associated with the inferior<==>gdb
6106
   connection.  */
6107
 
6108
struct inferior_status *
6109
save_inferior_status (void)
6110
{
6111
  struct inferior_status *inf_status = XMALLOC (struct inferior_status);
6112
  struct thread_info *tp = inferior_thread ();
6113
  struct inferior *inf = current_inferior ();
6114
 
6115
  inf_status->stop_step = tp->stop_step;
6116
  inf_status->stop_stack_dummy = stop_stack_dummy;
6117
  inf_status->stopped_by_random_signal = stopped_by_random_signal;
6118
  inf_status->stepping_over_breakpoint = tp->trap_expected;
6119
  inf_status->step_range_start = tp->step_range_start;
6120
  inf_status->step_range_end = tp->step_range_end;
6121
  inf_status->step_frame_id = tp->step_frame_id;
6122
  inf_status->step_stack_frame_id = tp->step_stack_frame_id;
6123
  inf_status->step_over_calls = tp->step_over_calls;
6124
  inf_status->stop_after_trap = stop_after_trap;
6125
  inf_status->stop_soon = inf->stop_soon;
6126
  /* Save original bpstat chain here; replace it with copy of chain.
6127
     If caller's caller is walking the chain, they'll be happier if we
6128
     hand them back the original chain when restore_inferior_status is
6129
     called.  */
6130
  inf_status->stop_bpstat = tp->stop_bpstat;
6131
  tp->stop_bpstat = bpstat_copy (tp->stop_bpstat);
6132
  inf_status->proceed_to_finish = tp->proceed_to_finish;
6133
  inf_status->in_infcall = tp->in_infcall;
6134
 
6135
  inf_status->selected_frame_id = get_frame_id (get_selected_frame (NULL));
6136
 
6137
  return inf_status;
6138
}
6139
 
6140
static int
6141
restore_selected_frame (void *args)
6142
{
6143
  struct frame_id *fid = (struct frame_id *) args;
6144
  struct frame_info *frame;
6145
 
6146
  frame = frame_find_by_id (*fid);
6147
 
6148
  /* If inf_status->selected_frame_id is NULL, there was no previously
6149
     selected frame.  */
6150
  if (frame == NULL)
6151
    {
6152
      warning (_("Unable to restore previously selected frame."));
6153
      return 0;
6154
    }
6155
 
6156
  select_frame (frame);
6157
 
6158
  return (1);
6159
}
6160
 
6161
/* Restore inferior session state to INF_STATUS.  */
6162
 
6163
void
6164
restore_inferior_status (struct inferior_status *inf_status)
6165
{
6166
  struct thread_info *tp = inferior_thread ();
6167
  struct inferior *inf = current_inferior ();
6168
 
6169
  tp->stop_step = inf_status->stop_step;
6170
  stop_stack_dummy = inf_status->stop_stack_dummy;
6171
  stopped_by_random_signal = inf_status->stopped_by_random_signal;
6172
  tp->trap_expected = inf_status->stepping_over_breakpoint;
6173
  tp->step_range_start = inf_status->step_range_start;
6174
  tp->step_range_end = inf_status->step_range_end;
6175
  tp->step_frame_id = inf_status->step_frame_id;
6176
  tp->step_stack_frame_id = inf_status->step_stack_frame_id;
6177
  tp->step_over_calls = inf_status->step_over_calls;
6178
  stop_after_trap = inf_status->stop_after_trap;
6179
  inf->stop_soon = inf_status->stop_soon;
6180
  bpstat_clear (&tp->stop_bpstat);
6181
  tp->stop_bpstat = inf_status->stop_bpstat;
6182
  inf_status->stop_bpstat = NULL;
6183
  tp->proceed_to_finish = inf_status->proceed_to_finish;
6184
  tp->in_infcall = inf_status->in_infcall;
6185
 
6186
  if (target_has_stack)
6187
    {
6188
      /* The point of catch_errors is that if the stack is clobbered,
6189
         walking the stack might encounter a garbage pointer and
6190
         error() trying to dereference it.  */
6191
      if (catch_errors
6192
          (restore_selected_frame, &inf_status->selected_frame_id,
6193
           "Unable to restore previously selected frame:\n",
6194
           RETURN_MASK_ERROR) == 0)
6195
        /* Error in restoring the selected frame.  Select the innermost
6196
           frame.  */
6197
        select_frame (get_current_frame ());
6198
    }
6199
 
6200
  xfree (inf_status);
6201
}
6202
 
6203
static void
6204
do_restore_inferior_status_cleanup (void *sts)
6205
{
6206
  restore_inferior_status (sts);
6207
}
6208
 
6209
struct cleanup *
6210
make_cleanup_restore_inferior_status (struct inferior_status *inf_status)
6211
{
6212
  return make_cleanup (do_restore_inferior_status_cleanup, inf_status);
6213
}
6214
 
6215
void
6216
discard_inferior_status (struct inferior_status *inf_status)
6217
{
6218
  /* See save_inferior_status for info on stop_bpstat. */
6219
  bpstat_clear (&inf_status->stop_bpstat);
6220
  xfree (inf_status);
6221
}
6222
 
6223
int
6224
inferior_has_forked (ptid_t pid, ptid_t *child_pid)
6225
{
6226
  struct target_waitstatus last;
6227
  ptid_t last_ptid;
6228
 
6229
  get_last_target_status (&last_ptid, &last);
6230
 
6231
  if (last.kind != TARGET_WAITKIND_FORKED)
6232
    return 0;
6233
 
6234
  if (!ptid_equal (last_ptid, pid))
6235
    return 0;
6236
 
6237
  *child_pid = last.value.related_pid;
6238
  return 1;
6239
}
6240
 
6241
int
6242
inferior_has_vforked (ptid_t pid, ptid_t *child_pid)
6243
{
6244
  struct target_waitstatus last;
6245
  ptid_t last_ptid;
6246
 
6247
  get_last_target_status (&last_ptid, &last);
6248
 
6249
  if (last.kind != TARGET_WAITKIND_VFORKED)
6250
    return 0;
6251
 
6252
  if (!ptid_equal (last_ptid, pid))
6253
    return 0;
6254
 
6255
  *child_pid = last.value.related_pid;
6256
  return 1;
6257
}
6258
 
6259
int
6260
inferior_has_execd (ptid_t pid, char **execd_pathname)
6261
{
6262
  struct target_waitstatus last;
6263
  ptid_t last_ptid;
6264
 
6265
  get_last_target_status (&last_ptid, &last);
6266
 
6267
  if (last.kind != TARGET_WAITKIND_EXECD)
6268
    return 0;
6269
 
6270
  if (!ptid_equal (last_ptid, pid))
6271
    return 0;
6272
 
6273
  *execd_pathname = xstrdup (last.value.execd_pathname);
6274
  return 1;
6275
}
6276
 
6277
int
6278
inferior_has_called_syscall (ptid_t pid, int *syscall_number)
6279
{
6280
  struct target_waitstatus last;
6281
  ptid_t last_ptid;
6282
 
6283
  get_last_target_status (&last_ptid, &last);
6284
 
6285
  if (last.kind != TARGET_WAITKIND_SYSCALL_ENTRY &&
6286
      last.kind != TARGET_WAITKIND_SYSCALL_RETURN)
6287
    return 0;
6288
 
6289
  if (!ptid_equal (last_ptid, pid))
6290
    return 0;
6291
 
6292
  *syscall_number = last.value.syscall_number;
6293
  return 1;
6294
}
6295
 
6296
/* Oft used ptids */
6297
ptid_t null_ptid;
6298
ptid_t minus_one_ptid;
6299
 
6300
/* Create a ptid given the necessary PID, LWP, and TID components.  */
6301
 
6302
ptid_t
6303
ptid_build (int pid, long lwp, long tid)
6304
{
6305
  ptid_t ptid;
6306
 
6307
  ptid.pid = pid;
6308
  ptid.lwp = lwp;
6309
  ptid.tid = tid;
6310
  return ptid;
6311
}
6312
 
6313
/* Create a ptid from just a pid.  */
6314
 
6315
ptid_t
6316
pid_to_ptid (int pid)
6317
{
6318
  return ptid_build (pid, 0, 0);
6319
}
6320
 
6321
/* Fetch the pid (process id) component from a ptid.  */
6322
 
6323
int
6324
ptid_get_pid (ptid_t ptid)
6325
{
6326
  return ptid.pid;
6327
}
6328
 
6329
/* Fetch the lwp (lightweight process) component from a ptid.  */
6330
 
6331
long
6332
ptid_get_lwp (ptid_t ptid)
6333
{
6334
  return ptid.lwp;
6335
}
6336
 
6337
/* Fetch the tid (thread id) component from a ptid.  */
6338
 
6339
long
6340
ptid_get_tid (ptid_t ptid)
6341
{
6342
  return ptid.tid;
6343
}
6344
 
6345
/* ptid_equal() is used to test equality of two ptids.  */
6346
 
6347
int
6348
ptid_equal (ptid_t ptid1, ptid_t ptid2)
6349
{
6350
  return (ptid1.pid == ptid2.pid && ptid1.lwp == ptid2.lwp
6351
          && ptid1.tid == ptid2.tid);
6352
}
6353
 
6354
/* Returns true if PTID represents a process.  */
6355
 
6356
int
6357
ptid_is_pid (ptid_t ptid)
6358
{
6359
  if (ptid_equal (minus_one_ptid, ptid))
6360
    return 0;
6361
  if (ptid_equal (null_ptid, ptid))
6362
    return 0;
6363
 
6364
  return (ptid_get_lwp (ptid) == 0 && ptid_get_tid (ptid) == 0);
6365
}
6366
 
6367
int
6368
ptid_match (ptid_t ptid, ptid_t filter)
6369
{
6370
  /* Since both parameters have the same type, prevent easy mistakes
6371
     from happening.  */
6372
  gdb_assert (!ptid_equal (ptid, minus_one_ptid)
6373
              && !ptid_equal (ptid, null_ptid));
6374
 
6375
  if (ptid_equal (filter, minus_one_ptid))
6376
    return 1;
6377
  if (ptid_is_pid (filter)
6378
      && ptid_get_pid (ptid) == ptid_get_pid (filter))
6379
    return 1;
6380
  else if (ptid_equal (ptid, filter))
6381
    return 1;
6382
 
6383
  return 0;
6384
}
6385
 
6386
/* restore_inferior_ptid() will be used by the cleanup machinery
6387
   to restore the inferior_ptid value saved in a call to
6388
   save_inferior_ptid().  */
6389
 
6390
static void
6391
restore_inferior_ptid (void *arg)
6392
{
6393
  ptid_t *saved_ptid_ptr = arg;
6394
 
6395
  inferior_ptid = *saved_ptid_ptr;
6396
  xfree (arg);
6397
}
6398
 
6399
/* Save the value of inferior_ptid so that it may be restored by a
6400
   later call to do_cleanups().  Returns the struct cleanup pointer
6401
   needed for later doing the cleanup.  */
6402
 
6403
struct cleanup *
6404
save_inferior_ptid (void)
6405
{
6406
  ptid_t *saved_ptid_ptr;
6407
 
6408
  saved_ptid_ptr = xmalloc (sizeof (ptid_t));
6409
  *saved_ptid_ptr = inferior_ptid;
6410
  return make_cleanup (restore_inferior_ptid, saved_ptid_ptr);
6411
}
6412
 
6413
 
6414
/* User interface for reverse debugging:
6415
   Set exec-direction / show exec-direction commands
6416
   (returns error unless target implements to_set_exec_direction method).  */
6417
 
6418
enum exec_direction_kind execution_direction = EXEC_FORWARD;
6419
static const char exec_forward[] = "forward";
6420
static const char exec_reverse[] = "reverse";
6421
static const char *exec_direction = exec_forward;
6422
static const char *exec_direction_names[] = {
6423
  exec_forward,
6424
  exec_reverse,
6425
  NULL
6426
};
6427
 
6428
static void
6429
set_exec_direction_func (char *args, int from_tty,
6430
                         struct cmd_list_element *cmd)
6431
{
6432
  if (target_can_execute_reverse)
6433
    {
6434
      if (!strcmp (exec_direction, exec_forward))
6435
        execution_direction = EXEC_FORWARD;
6436
      else if (!strcmp (exec_direction, exec_reverse))
6437
        execution_direction = EXEC_REVERSE;
6438
    }
6439
  else
6440
    {
6441
      exec_direction = exec_forward;
6442
      error (_("Target does not support this operation."));
6443
    }
6444
}
6445
 
6446
static void
6447
show_exec_direction_func (struct ui_file *out, int from_tty,
6448
                          struct cmd_list_element *cmd, const char *value)
6449
{
6450
  switch (execution_direction) {
6451
  case EXEC_FORWARD:
6452
    fprintf_filtered (out, _("Forward.\n"));
6453
    break;
6454
  case EXEC_REVERSE:
6455
    fprintf_filtered (out, _("Reverse.\n"));
6456
    break;
6457
  case EXEC_ERROR:
6458
  default:
6459
    fprintf_filtered (out,
6460
                      _("Forward (target `%s' does not support exec-direction).\n"),
6461
                      target_shortname);
6462
    break;
6463
  }
6464
}
6465
 
6466
/* User interface for non-stop mode.  */
6467
 
6468
int non_stop = 0;
6469
 
6470
static void
6471
set_non_stop (char *args, int from_tty,
6472
              struct cmd_list_element *c)
6473
{
6474
  if (target_has_execution)
6475
    {
6476
      non_stop_1 = non_stop;
6477
      error (_("Cannot change this setting while the inferior is running."));
6478
    }
6479
 
6480
  non_stop = non_stop_1;
6481
}
6482
 
6483
static void
6484
show_non_stop (struct ui_file *file, int from_tty,
6485
               struct cmd_list_element *c, const char *value)
6486
{
6487
  fprintf_filtered (file,
6488
                    _("Controlling the inferior in non-stop mode is %s.\n"),
6489
                    value);
6490
}
6491
 
6492
static void
6493
show_schedule_multiple (struct ui_file *file, int from_tty,
6494
                        struct cmd_list_element *c, const char *value)
6495
{
6496
  fprintf_filtered (file, _("\
6497
Resuming the execution of threads of all processes is %s.\n"), value);
6498
}
6499
 
6500
void
6501
_initialize_infrun (void)
6502
{
6503
  int i;
6504
  int numsigs;
6505
 
6506
  add_info ("signals", signals_info, _("\
6507
What debugger does when program gets various signals.\n\
6508
Specify a signal as argument to print info on that signal only."));
6509
  add_info_alias ("handle", "signals", 0);
6510
 
6511
  add_com ("handle", class_run, handle_command, _("\
6512
Specify how to handle a signal.\n\
6513
Args are signals and actions to apply to those signals.\n\
6514
Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6515
from 1-15 are allowed for compatibility with old versions of GDB.\n\
6516
Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6517
The special arg \"all\" is recognized to mean all signals except those\n\
6518
used by the debugger, typically SIGTRAP and SIGINT.\n\
6519
Recognized actions include \"stop\", \"nostop\", \"print\", \"noprint\",\n\
6520
\"pass\", \"nopass\", \"ignore\", or \"noignore\".\n\
6521
Stop means reenter debugger if this signal happens (implies print).\n\
6522
Print means print a message if this signal happens.\n\
6523
Pass means let program see this signal; otherwise program doesn't know.\n\
6524
Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6525
Pass and Stop may be combined."));
6526
  if (xdb_commands)
6527
    {
6528
      add_com ("lz", class_info, signals_info, _("\
6529
What debugger does when program gets various signals.\n\
6530
Specify a signal as argument to print info on that signal only."));
6531
      add_com ("z", class_run, xdb_handle_command, _("\
6532
Specify how to handle a signal.\n\
6533
Args are signals and actions to apply to those signals.\n\
6534
Symbolic signals (e.g. SIGSEGV) are recommended but numeric signals\n\
6535
from 1-15 are allowed for compatibility with old versions of GDB.\n\
6536
Numeric ranges may be specified with the form LOW-HIGH (e.g. 1-5).\n\
6537
The special arg \"all\" is recognized to mean all signals except those\n\
6538
used by the debugger, typically SIGTRAP and SIGINT.\n\
6539
Recognized actions include \"s\" (toggles between stop and nostop),\n\
6540
\"r\" (toggles between print and noprint), \"i\" (toggles between pass and \
6541
nopass), \"Q\" (noprint)\n\
6542
Stop means reenter debugger if this signal happens (implies print).\n\
6543
Print means print a message if this signal happens.\n\
6544
Pass means let program see this signal; otherwise program doesn't know.\n\
6545
Ignore is a synonym for nopass and noignore is a synonym for pass.\n\
6546
Pass and Stop may be combined."));
6547
    }
6548
 
6549
  if (!dbx_commands)
6550
    stop_command = add_cmd ("stop", class_obscure,
6551
                            not_just_help_class_command, _("\
6552
There is no `stop' command, but you can set a hook on `stop'.\n\
6553
This allows you to set a list of commands to be run each time execution\n\
6554
of the program stops."), &cmdlist);
6555
 
6556
  add_setshow_zinteger_cmd ("infrun", class_maintenance, &debug_infrun, _("\
6557
Set inferior debugging."), _("\
6558
Show inferior debugging."), _("\
6559
When non-zero, inferior specific debugging is enabled."),
6560
                            NULL,
6561
                            show_debug_infrun,
6562
                            &setdebuglist, &showdebuglist);
6563
 
6564
  add_setshow_boolean_cmd ("displaced", class_maintenance, &debug_displaced, _("\
6565
Set displaced stepping debugging."), _("\
6566
Show displaced stepping debugging."), _("\
6567
When non-zero, displaced stepping specific debugging is enabled."),
6568
                            NULL,
6569
                            show_debug_displaced,
6570
                            &setdebuglist, &showdebuglist);
6571
 
6572
  add_setshow_boolean_cmd ("non-stop", no_class,
6573
                           &non_stop_1, _("\
6574
Set whether gdb controls the inferior in non-stop mode."), _("\
6575
Show whether gdb controls the inferior in non-stop mode."), _("\
6576
When debugging a multi-threaded program and this setting is\n\
6577
off (the default, also called all-stop mode), when one thread stops\n\
6578
(for a breakpoint, watchpoint, exception, or similar events), GDB stops\n\
6579
all other threads in the program while you interact with the thread of\n\
6580
interest.  When you continue or step a thread, you can allow the other\n\
6581
threads to run, or have them remain stopped, but while you inspect any\n\
6582
thread's state, all threads stop.\n\
6583
\n\
6584
In non-stop mode, when one thread stops, other threads can continue\n\
6585
to run freely.  You'll be able to step each thread independently,\n\
6586
leave it stopped or free to run as needed."),
6587
                           set_non_stop,
6588
                           show_non_stop,
6589
                           &setlist,
6590
                           &showlist);
6591
 
6592
  numsigs = (int) TARGET_SIGNAL_LAST;
6593
  signal_stop = (unsigned char *) xmalloc (sizeof (signal_stop[0]) * numsigs);
6594
  signal_print = (unsigned char *)
6595
    xmalloc (sizeof (signal_print[0]) * numsigs);
6596
  signal_program = (unsigned char *)
6597
    xmalloc (sizeof (signal_program[0]) * numsigs);
6598
  for (i = 0; i < numsigs; i++)
6599
    {
6600
      signal_stop[i] = 1;
6601
      signal_print[i] = 1;
6602
      signal_program[i] = 1;
6603
    }
6604
 
6605
  /* Signals caused by debugger's own actions
6606
     should not be given to the program afterwards.  */
6607
  signal_program[TARGET_SIGNAL_TRAP] = 0;
6608
  signal_program[TARGET_SIGNAL_INT] = 0;
6609
 
6610
  /* Signals that are not errors should not normally enter the debugger.  */
6611
  signal_stop[TARGET_SIGNAL_ALRM] = 0;
6612
  signal_print[TARGET_SIGNAL_ALRM] = 0;
6613
  signal_stop[TARGET_SIGNAL_VTALRM] = 0;
6614
  signal_print[TARGET_SIGNAL_VTALRM] = 0;
6615
  signal_stop[TARGET_SIGNAL_PROF] = 0;
6616
  signal_print[TARGET_SIGNAL_PROF] = 0;
6617
  signal_stop[TARGET_SIGNAL_CHLD] = 0;
6618
  signal_print[TARGET_SIGNAL_CHLD] = 0;
6619
  signal_stop[TARGET_SIGNAL_IO] = 0;
6620
  signal_print[TARGET_SIGNAL_IO] = 0;
6621
  signal_stop[TARGET_SIGNAL_POLL] = 0;
6622
  signal_print[TARGET_SIGNAL_POLL] = 0;
6623
  signal_stop[TARGET_SIGNAL_URG] = 0;
6624
  signal_print[TARGET_SIGNAL_URG] = 0;
6625
  signal_stop[TARGET_SIGNAL_WINCH] = 0;
6626
  signal_print[TARGET_SIGNAL_WINCH] = 0;
6627
 
6628
  /* These signals are used internally by user-level thread
6629
     implementations.  (See signal(5) on Solaris.)  Like the above
6630
     signals, a healthy program receives and handles them as part of
6631
     its normal operation.  */
6632
  signal_stop[TARGET_SIGNAL_LWP] = 0;
6633
  signal_print[TARGET_SIGNAL_LWP] = 0;
6634
  signal_stop[TARGET_SIGNAL_WAITING] = 0;
6635
  signal_print[TARGET_SIGNAL_WAITING] = 0;
6636
  signal_stop[TARGET_SIGNAL_CANCEL] = 0;
6637
  signal_print[TARGET_SIGNAL_CANCEL] = 0;
6638
 
6639
  add_setshow_zinteger_cmd ("stop-on-solib-events", class_support,
6640
                            &stop_on_solib_events, _("\
6641
Set stopping for shared library events."), _("\
6642
Show stopping for shared library events."), _("\
6643
If nonzero, gdb will give control to the user when the dynamic linker\n\
6644
notifies gdb of shared library events.  The most common event of interest\n\
6645
to the user would be loading/unloading of a new library."),
6646
                            NULL,
6647
                            show_stop_on_solib_events,
6648
                            &setlist, &showlist);
6649
 
6650
  add_setshow_enum_cmd ("follow-fork-mode", class_run,
6651
                        follow_fork_mode_kind_names,
6652
                        &follow_fork_mode_string, _("\
6653
Set debugger response to a program call of fork or vfork."), _("\
6654
Show debugger response to a program call of fork or vfork."), _("\
6655
A fork or vfork creates a new process.  follow-fork-mode can be:\n\
6656
  parent  - the original process is debugged after a fork\n\
6657
  child   - the new process is debugged after a fork\n\
6658
The unfollowed process will continue to run.\n\
6659
By default, the debugger will follow the parent process."),
6660
                        NULL,
6661
                        show_follow_fork_mode_string,
6662
                        &setlist, &showlist);
6663
 
6664
  add_setshow_enum_cmd ("follow-exec-mode", class_run,
6665
                        follow_exec_mode_names,
6666
                        &follow_exec_mode_string, _("\
6667
Set debugger response to a program call of exec."), _("\
6668
Show debugger response to a program call of exec."), _("\
6669
An exec call replaces the program image of a process.\n\
6670
\n\
6671
follow-exec-mode can be:\n\
6672
\n\
6673
  new - the debugger creates a new inferior and rebinds the process\n\
6674
to this new inferior.  The program the process was running before\n\
6675
the exec call can be restarted afterwards by restarting the original\n\
6676
inferior.\n\
6677
\n\
6678
  same - the debugger keeps the process bound to the same inferior.\n\
6679
The new executable image replaces the previous executable loaded in\n\
6680
the inferior.  Restarting the inferior after the exec call restarts\n\
6681
the executable the process was running after the exec call.\n\
6682
\n\
6683
By default, the debugger will use the same inferior."),
6684
                        NULL,
6685
                        show_follow_exec_mode_string,
6686
                        &setlist, &showlist);
6687
 
6688
  add_setshow_enum_cmd ("scheduler-locking", class_run,
6689
                        scheduler_enums, &scheduler_mode, _("\
6690
Set mode for locking scheduler during execution."), _("\
6691
Show mode for locking scheduler during execution."), _("\
6692
off  == no locking (threads may preempt at any time)\n\
6693
on   == full locking (no thread except the current thread may run)\n\
6694
step == scheduler locked during every single-step operation.\n\
6695
        In this mode, no other thread may run during a step command.\n\
6696
        Other threads may run while stepping over a function call ('next')."),
6697
                        set_schedlock_func,     /* traps on target vector */
6698
                        show_scheduler_mode,
6699
                        &setlist, &showlist);
6700
 
6701
  add_setshow_boolean_cmd ("schedule-multiple", class_run, &sched_multi, _("\
6702
Set mode for resuming threads of all processes."), _("\
6703
Show mode for resuming threads of all processes."), _("\
6704
When on, execution commands (such as 'continue' or 'next') resume all\n\
6705
threads of all processes.  When off (which is the default), execution\n\
6706
commands only resume the threads of the current process.  The set of\n\
6707
threads that are resumed is further refined by the scheduler-locking\n\
6708
mode (see help set scheduler-locking)."),
6709
                           NULL,
6710
                           show_schedule_multiple,
6711
                           &setlist, &showlist);
6712
 
6713
  add_setshow_boolean_cmd ("step-mode", class_run, &step_stop_if_no_debug, _("\
6714
Set mode of the step operation."), _("\
6715
Show mode of the step operation."), _("\
6716
When set, doing a step over a function without debug line information\n\
6717
will stop at the first instruction of that function. Otherwise, the\n\
6718
function is skipped and the step command stops at a different source line."),
6719
                           NULL,
6720
                           show_step_stop_if_no_debug,
6721
                           &setlist, &showlist);
6722
 
6723
  add_setshow_enum_cmd ("displaced-stepping", class_run,
6724
                        can_use_displaced_stepping_enum,
6725
                        &can_use_displaced_stepping, _("\
6726
Set debugger's willingness to use displaced stepping."), _("\
6727
Show debugger's willingness to use displaced stepping."), _("\
6728
If on, gdb will use displaced stepping to step over breakpoints if it is\n\
6729
supported by the target architecture.  If off, gdb will not use displaced\n\
6730
stepping to step over breakpoints, even if such is supported by the target\n\
6731
architecture.  If auto (which is the default), gdb will use displaced stepping\n\
6732
if the target architecture supports it and non-stop mode is active, but will not\n\
6733
use it in all-stop mode (see help set non-stop)."),
6734
                        NULL,
6735
                        show_can_use_displaced_stepping,
6736
                        &setlist, &showlist);
6737
 
6738
  add_setshow_enum_cmd ("exec-direction", class_run, exec_direction_names,
6739
                        &exec_direction, _("Set direction of execution.\n\
6740
Options are 'forward' or 'reverse'."),
6741
                        _("Show direction of execution (forward/reverse)."),
6742
                        _("Tells gdb whether to execute forward or backward."),
6743
                        set_exec_direction_func, show_exec_direction_func,
6744
                        &setlist, &showlist);
6745
 
6746
  /* Set/show detach-on-fork: user-settable mode.  */
6747
 
6748
  add_setshow_boolean_cmd ("detach-on-fork", class_run, &detach_fork, _("\
6749
Set whether gdb will detach the child of a fork."), _("\
6750
Show whether gdb will detach the child of a fork."), _("\
6751
Tells gdb whether to detach the child of a fork."),
6752
                           NULL, NULL, &setlist, &showlist);
6753
 
6754
  /* ptid initializations */
6755
  null_ptid = ptid_build (0, 0, 0);
6756
  minus_one_ptid = ptid_build (-1, 0, 0);
6757
  inferior_ptid = null_ptid;
6758
  target_last_wait_ptid = minus_one_ptid;
6759
 
6760
  observer_attach_thread_ptid_changed (infrun_thread_ptid_changed);
6761
  observer_attach_thread_stop_requested (infrun_thread_stop_requested);
6762
  observer_attach_thread_exit (infrun_thread_thread_exit);
6763
  observer_attach_inferior_exit (infrun_inferior_exit);
6764
 
6765
  /* Explicitly create without lookup, since that tries to create a
6766
     value with a void typed value, and when we get here, gdbarch
6767
     isn't initialized yet.  At this point, we're quite sure there
6768
     isn't another convenience variable of the same name.  */
6769
  create_internalvar_type_lazy ("_siginfo", siginfo_make_value);
6770
 
6771
  add_setshow_boolean_cmd ("observer", no_class,
6772
                           &observer_mode_1, _("\
6773
Set whether gdb controls the inferior in observer mode."), _("\
6774
Show whether gdb controls the inferior in observer mode."), _("\
6775
In observer mode, GDB can get data from the inferior, but not\n\
6776
affect its execution.  Registers and memory may not be changed,\n\
6777
breakpoints may not be set, and the program cannot be interrupted\n\
6778
or signalled."),
6779
                           set_observer_mode,
6780
                           show_observer_mode,
6781
                           &setlist,
6782
                           &showlist);
6783
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.