OpenCores
URL https://opencores.org/ocsvn/openrisc/openrisc/trunk

Subversion Repositories openrisc

[/] [openrisc/] [trunk/] [gnu-stable/] [gdb-7.2/] [gdb/] [gdbserver/] [linux-low.c] - Blame information for rev 835

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 330 jeremybenn
/* Low level interface to ptrace, for the remote server for GDB.
2
   Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3
   2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
 
5
   This file is part of GDB.
6
 
7
   This program is free software; you can redistribute it and/or modify
8
   it under the terms of the GNU General Public License as published by
9
   the Free Software Foundation; either version 3 of the License, or
10
   (at your option) any later version.
11
 
12
   This program is distributed in the hope that it will be useful,
13
   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
   GNU General Public License for more details.
16
 
17
   You should have received a copy of the GNU General Public License
18
   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19
 
20
#include "server.h"
21
#include "linux-low.h"
22
 
23
#include <sys/wait.h>
24
#include <stdio.h>
25
#include <sys/param.h>
26
#include <sys/ptrace.h>
27
#include <signal.h>
28
#include <sys/ioctl.h>
29
#include <fcntl.h>
30
#include <string.h>
31
#include <stdlib.h>
32
#include <unistd.h>
33
#include <errno.h>
34
#include <sys/syscall.h>
35
#include <sched.h>
36
#include <ctype.h>
37
#include <pwd.h>
38
#include <sys/types.h>
39
#include <dirent.h>
40
#include <sys/stat.h>
41
#include <sys/vfs.h>
42
#include <sys/uio.h>
43
#ifndef ELFMAG0
44
/* Don't include <linux/elf.h> here.  If it got included by gdb_proc_service.h
45
   then ELFMAG0 will have been defined.  If it didn't get included by
46
   gdb_proc_service.h then including it will likely introduce a duplicate
47
   definition of elf_fpregset_t.  */
48
#include <elf.h>
49
#endif
50
 
51
#ifndef SPUFS_MAGIC
52
#define SPUFS_MAGIC 0x23c9b64e
53
#endif
54
 
55
#ifndef PTRACE_GETSIGINFO
56
# define PTRACE_GETSIGINFO 0x4202
57
# define PTRACE_SETSIGINFO 0x4203
58
#endif
59
 
60
#ifndef O_LARGEFILE
61
#define O_LARGEFILE 0
62
#endif
63
 
64
/* If the system headers did not provide the constants, hard-code the normal
65
   values.  */
66
#ifndef PTRACE_EVENT_FORK
67
 
68
#define PTRACE_SETOPTIONS       0x4200
69
#define PTRACE_GETEVENTMSG      0x4201
70
 
71
/* options set using PTRACE_SETOPTIONS */
72
#define PTRACE_O_TRACESYSGOOD   0x00000001
73
#define PTRACE_O_TRACEFORK      0x00000002
74
#define PTRACE_O_TRACEVFORK     0x00000004
75
#define PTRACE_O_TRACECLONE     0x00000008
76
#define PTRACE_O_TRACEEXEC      0x00000010
77
#define PTRACE_O_TRACEVFORKDONE 0x00000020
78
#define PTRACE_O_TRACEEXIT      0x00000040
79
 
80
/* Wait extended result codes for the above trace options.  */
81
#define PTRACE_EVENT_FORK       1
82
#define PTRACE_EVENT_VFORK      2
83
#define PTRACE_EVENT_CLONE      3
84
#define PTRACE_EVENT_EXEC       4
85
#define PTRACE_EVENT_VFORK_DONE 5
86
#define PTRACE_EVENT_EXIT       6
87
 
88
#endif /* PTRACE_EVENT_FORK */
89
 
90
/* We can't always assume that this flag is available, but all systems
91
   with the ptrace event handlers also have __WALL, so it's safe to use
92
   in some contexts.  */
93
#ifndef __WALL
94
#define __WALL          0x40000000 /* Wait for any child.  */
95
#endif
96
 
97
#ifndef W_STOPCODE
98
#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
99
#endif
100
 
101
#ifdef __UCLIBC__
102
#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
103
#define HAS_NOMMU
104
#endif
105
#endif
106
 
107
/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
108
   representation of the thread ID.
109
 
110
   ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
111
   the same as the LWP ID.
112
 
113
   ``all_processes'' is keyed by the "overall process ID", which
114
   GNU/Linux calls tgid, "thread group ID".  */
115
 
116
struct inferior_list all_lwps;
117
 
118
/* A list of all unknown processes which receive stop signals.  Some other
119
   process will presumably claim each of these as forked children
120
   momentarily.  */
121
 
122
struct inferior_list stopped_pids;
123
 
124
/* FIXME this is a bit of a hack, and could be removed.  */
125
int stopping_threads;
126
 
127
/* FIXME make into a target method?  */
128
int using_threads = 1;
129
 
130
/* True if we're presently stabilizing threads (moving them out of
131
   jump pads).  */
132
static int stabilizing_threads;
133
 
134
/* This flag is true iff we've just created or attached to our first
135
   inferior but it has not stopped yet.  As soon as it does, we need
136
   to call the low target's arch_setup callback.  Doing this only on
137
   the first inferior avoids reinializing the architecture on every
138
   inferior, and avoids messing with the register caches of the
139
   already running inferiors.  NOTE: this assumes all inferiors under
140
   control of gdbserver have the same architecture.  */
141
static int new_inferior;
142
 
143
static void linux_resume_one_lwp (struct lwp_info *lwp,
144
                                  int step, int signal, siginfo_t *info);
145
static void linux_resume (struct thread_resume *resume_info, size_t n);
146
static void stop_all_lwps (int suspend, struct lwp_info *except);
147
static void unstop_all_lwps (int unsuspend, struct lwp_info *except);
148
static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
149
static void *add_lwp (ptid_t ptid);
150
static int linux_stopped_by_watchpoint (void);
151
static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
152
static int linux_core_of_thread (ptid_t ptid);
153
static void proceed_all_lwps (void);
154
static int finish_step_over (struct lwp_info *lwp);
155
static CORE_ADDR get_stop_pc (struct lwp_info *lwp);
156
static int kill_lwp (unsigned long lwpid, int signo);
157
static void linux_enable_event_reporting (int pid);
158
 
159
/* True if the low target can hardware single-step.  Such targets
160
   don't need a BREAKPOINT_REINSERT_ADDR callback.  */
161
 
162
static int
163
can_hardware_single_step (void)
164
{
165
  return (the_low_target.breakpoint_reinsert_addr == NULL);
166
}
167
 
168
/* True if the low target supports memory breakpoints.  If so, we'll
169
   have a GET_PC implementation.  */
170
 
171
static int
172
supports_breakpoints (void)
173
{
174
  return (the_low_target.get_pc != NULL);
175
}
176
 
177
/* Returns true if this target can support fast tracepoints.  This
178
   does not mean that the in-process agent has been loaded in the
179
   inferior.  */
180
 
181
static int
182
supports_fast_tracepoints (void)
183
{
184
  return the_low_target.install_fast_tracepoint_jump_pad != NULL;
185
}
186
 
187
struct pending_signals
188
{
189
  int signal;
190
  siginfo_t info;
191
  struct pending_signals *prev;
192
};
193
 
194
#define PTRACE_ARG3_TYPE void *
195
#define PTRACE_ARG4_TYPE void *
196
#define PTRACE_XFER_TYPE long
197
 
198
#ifdef HAVE_LINUX_REGSETS
199
static char *disabled_regsets;
200
static int num_regsets;
201
#endif
202
 
203
/* The read/write ends of the pipe registered as waitable file in the
204
   event loop.  */
205
static int linux_event_pipe[2] = { -1, -1 };
206
 
207
/* True if we're currently in async mode.  */
208
#define target_is_async_p() (linux_event_pipe[0] != -1)
209
 
210
static void send_sigstop (struct lwp_info *lwp);
211
static void wait_for_sigstop (struct inferior_list_entry *entry);
212
 
213
/* Accepts an integer PID; Returns a string representing a file that
214
   can be opened to get info for the child process.
215
   Space for the result is malloc'd, caller must free.  */
216
 
217
char *
218
linux_child_pid_to_exec_file (int pid)
219
{
220
  char *name1, *name2;
221
 
222
  name1 = xmalloc (MAXPATHLEN);
223
  name2 = xmalloc (MAXPATHLEN);
224
  memset (name2, 0, MAXPATHLEN);
225
 
226
  sprintf (name1, "/proc/%d/exe", pid);
227
  if (readlink (name1, name2, MAXPATHLEN) > 0)
228
    {
229
      free (name1);
230
      return name2;
231
    }
232
  else
233
    {
234
      free (name2);
235
      return name1;
236
    }
237
}
238
 
239
/* Return non-zero if HEADER is a 64-bit ELF file.  */
240
 
241
static int
242
elf_64_header_p (const Elf64_Ehdr *header)
243
{
244
  return (header->e_ident[EI_MAG0] == ELFMAG0
245
          && header->e_ident[EI_MAG1] == ELFMAG1
246
          && header->e_ident[EI_MAG2] == ELFMAG2
247
          && header->e_ident[EI_MAG3] == ELFMAG3
248
          && header->e_ident[EI_CLASS] == ELFCLASS64);
249
}
250
 
251
/* Return non-zero if FILE is a 64-bit ELF file,
252
   zero if the file is not a 64-bit ELF file,
253
   and -1 if the file is not accessible or doesn't exist.  */
254
 
255
int
256
elf_64_file_p (const char *file)
257
{
258
  Elf64_Ehdr header;
259
  int fd;
260
 
261
  fd = open (file, O_RDONLY);
262
  if (fd < 0)
263
    return -1;
264
 
265
  if (read (fd, &header, sizeof (header)) != sizeof (header))
266
    {
267
      close (fd);
268
      return 0;
269
    }
270
  close (fd);
271
 
272
  return elf_64_header_p (&header);
273
}
274
 
275
static void
276
delete_lwp (struct lwp_info *lwp)
277
{
278
  remove_thread (get_lwp_thread (lwp));
279
  remove_inferior (&all_lwps, &lwp->head);
280
  free (lwp->arch_private);
281
  free (lwp);
282
}
283
 
284
/* Add a process to the common process list, and set its private
285
   data.  */
286
 
287
static struct process_info *
288
linux_add_process (int pid, int attached)
289
{
290
  struct process_info *proc;
291
 
292
  /* Is this the first process?  If so, then set the arch.  */
293
  if (all_processes.head == NULL)
294
    new_inferior = 1;
295
 
296
  proc = add_process (pid, attached);
297
  proc->private = xcalloc (1, sizeof (*proc->private));
298
 
299
  if (the_low_target.new_process != NULL)
300
    proc->private->arch_private = the_low_target.new_process ();
301
 
302
  return proc;
303
}
304
 
305
/* Wrapper function for waitpid which handles EINTR, and emulates
306
   __WALL for systems where that is not available.  */
307
 
308
static int
309
my_waitpid (int pid, int *status, int flags)
310
{
311
  int ret, out_errno;
312
 
313
  if (debug_threads)
314
    fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
315
 
316
  if (flags & __WALL)
317
    {
318
      sigset_t block_mask, org_mask, wake_mask;
319
      int wnohang;
320
 
321
      wnohang = (flags & WNOHANG) != 0;
322
      flags &= ~(__WALL | __WCLONE);
323
      flags |= WNOHANG;
324
 
325
      /* Block all signals while here.  This avoids knowing about
326
         LinuxThread's signals.  */
327
      sigfillset (&block_mask);
328
      sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
329
 
330
      /* ... except during the sigsuspend below.  */
331
      sigemptyset (&wake_mask);
332
 
333
      while (1)
334
        {
335
          /* Since all signals are blocked, there's no need to check
336
             for EINTR here.  */
337
          ret = waitpid (pid, status, flags);
338
          out_errno = errno;
339
 
340
          if (ret == -1 && out_errno != ECHILD)
341
            break;
342
          else if (ret > 0)
343
            break;
344
 
345
          if (flags & __WCLONE)
346
            {
347
              /* We've tried both flavors now.  If WNOHANG is set,
348
                 there's nothing else to do, just bail out.  */
349
              if (wnohang)
350
                break;
351
 
352
              if (debug_threads)
353
                fprintf (stderr, "blocking\n");
354
 
355
              /* Block waiting for signals.  */
356
              sigsuspend (&wake_mask);
357
            }
358
 
359
          flags ^= __WCLONE;
360
        }
361
 
362
      sigprocmask (SIG_SETMASK, &org_mask, NULL);
363
    }
364
  else
365
    {
366
      do
367
        ret = waitpid (pid, status, flags);
368
      while (ret == -1 && errno == EINTR);
369
      out_errno = errno;
370
    }
371
 
372
  if (debug_threads)
373
    fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
374
             pid, flags, status ? *status : -1, ret);
375
 
376
  errno = out_errno;
377
  return ret;
378
}
379
 
380
/* Handle a GNU/Linux extended wait response.  If we see a clone
381
   event, we need to add the new LWP to our list (and not report the
382
   trap to higher layers).  */
383
 
384
static void
385
handle_extended_wait (struct lwp_info *event_child, int wstat)
386
{
387
  int event = wstat >> 16;
388
  struct lwp_info *new_lwp;
389
 
390
  if (event == PTRACE_EVENT_CLONE)
391
    {
392
      ptid_t ptid;
393
      unsigned long new_pid;
394
      int ret, status = W_STOPCODE (SIGSTOP);
395
 
396
      ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
397
 
398
      /* If we haven't already seen the new PID stop, wait for it now.  */
399
      if (! pull_pid_from_list (&stopped_pids, new_pid))
400
        {
401
          /* The new child has a pending SIGSTOP.  We can't affect it until it
402
             hits the SIGSTOP, but we're already attached.  */
403
 
404
          ret = my_waitpid (new_pid, &status, __WALL);
405
 
406
          if (ret == -1)
407
            perror_with_name ("waiting for new child");
408
          else if (ret != new_pid)
409
            warning ("wait returned unexpected PID %d", ret);
410
          else if (!WIFSTOPPED (status))
411
            warning ("wait returned unexpected status 0x%x", status);
412
        }
413
 
414
      linux_enable_event_reporting (new_pid);
415
 
416
      ptid = ptid_build (pid_of (event_child), new_pid, 0);
417
      new_lwp = (struct lwp_info *) add_lwp (ptid);
418
      add_thread (ptid, new_lwp);
419
 
420
      /* Either we're going to immediately resume the new thread
421
         or leave it stopped.  linux_resume_one_lwp is a nop if it
422
         thinks the thread is currently running, so set this first
423
         before calling linux_resume_one_lwp.  */
424
      new_lwp->stopped = 1;
425
 
426
      /* Normally we will get the pending SIGSTOP.  But in some cases
427
         we might get another signal delivered to the group first.
428
         If we do get another signal, be sure not to lose it.  */
429
      if (WSTOPSIG (status) == SIGSTOP)
430
        {
431
          if (stopping_threads)
432
            new_lwp->stop_pc = get_stop_pc (new_lwp);
433
          else
434
            linux_resume_one_lwp (new_lwp, 0, 0, NULL);
435
        }
436
      else
437
        {
438
          new_lwp->stop_expected = 1;
439
 
440
          if (stopping_threads)
441
            {
442
              new_lwp->stop_pc = get_stop_pc (new_lwp);
443
              new_lwp->status_pending_p = 1;
444
              new_lwp->status_pending = status;
445
            }
446
          else
447
            /* Pass the signal on.  This is what GDB does - except
448
               shouldn't we really report it instead?  */
449
            linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
450
        }
451
 
452
      /* Always resume the current thread.  If we are stopping
453
         threads, it will have a pending SIGSTOP; we may as well
454
         collect it now.  */
455
      linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
456
    }
457
}
458
 
459
/* Return the PC as read from the regcache of LWP, without any
460
   adjustment.  */
461
 
462
static CORE_ADDR
463
get_pc (struct lwp_info *lwp)
464
{
465
  struct thread_info *saved_inferior;
466
  struct regcache *regcache;
467
  CORE_ADDR pc;
468
 
469
  if (the_low_target.get_pc == NULL)
470
    return 0;
471
 
472
  saved_inferior = current_inferior;
473
  current_inferior = get_lwp_thread (lwp);
474
 
475
  regcache = get_thread_regcache (current_inferior, 1);
476
  pc = (*the_low_target.get_pc) (regcache);
477
 
478
  if (debug_threads)
479
    fprintf (stderr, "pc is 0x%lx\n", (long) pc);
480
 
481
  current_inferior = saved_inferior;
482
  return pc;
483
}
484
 
485
/* This function should only be called if LWP got a SIGTRAP.
486
   The SIGTRAP could mean several things.
487
 
488
   On i386, where decr_pc_after_break is non-zero:
489
   If we were single-stepping this process using PTRACE_SINGLESTEP,
490
   we will get only the one SIGTRAP (even if the instruction we
491
   stepped over was a breakpoint).  The value of $eip will be the
492
   next instruction.
493
   If we continue the process using PTRACE_CONT, we will get a
494
   SIGTRAP when we hit a breakpoint.  The value of $eip will be
495
   the instruction after the breakpoint (i.e. needs to be
496
   decremented).  If we report the SIGTRAP to GDB, we must also
497
   report the undecremented PC.  If we cancel the SIGTRAP, we
498
   must resume at the decremented PC.
499
 
500
   (Presumably, not yet tested) On a non-decr_pc_after_break machine
501
   with hardware or kernel single-step:
502
   If we single-step over a breakpoint instruction, our PC will
503
   point at the following instruction.  If we continue and hit a
504
   breakpoint instruction, our PC will point at the breakpoint
505
   instruction.  */
506
 
507
static CORE_ADDR
508
get_stop_pc (struct lwp_info *lwp)
509
{
510
  CORE_ADDR stop_pc;
511
 
512
  if (the_low_target.get_pc == NULL)
513
    return 0;
514
 
515
  stop_pc = get_pc (lwp);
516
 
517
  if (WSTOPSIG (lwp->last_status) == SIGTRAP
518
      && !lwp->stepping
519
      && !lwp->stopped_by_watchpoint
520
      && lwp->last_status >> 16 == 0)
521
    stop_pc -= the_low_target.decr_pc_after_break;
522
 
523
  if (debug_threads)
524
    fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
525
 
526
  return stop_pc;
527
}
528
 
529
static void *
530
add_lwp (ptid_t ptid)
531
{
532
  struct lwp_info *lwp;
533
 
534
  lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
535
  memset (lwp, 0, sizeof (*lwp));
536
 
537
  lwp->head.id = ptid;
538
 
539
  if (the_low_target.new_thread != NULL)
540
    lwp->arch_private = the_low_target.new_thread ();
541
 
542
  add_inferior_to_list (&all_lwps, &lwp->head);
543
 
544
  return lwp;
545
}
546
 
547
/* Start an inferior process and returns its pid.
548
   ALLARGS is a vector of program-name and args. */
549
 
550
static int
551
linux_create_inferior (char *program, char **allargs)
552
{
553
  struct lwp_info *new_lwp;
554
  int pid;
555
  ptid_t ptid;
556
 
557
#if defined(__UCLIBC__) && defined(HAS_NOMMU)
558
  pid = vfork ();
559
#else
560
  pid = fork ();
561
#endif
562
  if (pid < 0)
563
    perror_with_name ("fork");
564
 
565
  if (pid == 0)
566
    {
567
      ptrace (PTRACE_TRACEME, 0, 0, 0);
568
 
569
#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does.  */
570
      signal (__SIGRTMIN + 1, SIG_DFL);
571
#endif
572
 
573
      setpgid (0, 0);
574
 
575
      execv (program, allargs);
576
      if (errno == ENOENT)
577
        execvp (program, allargs);
578
 
579
      fprintf (stderr, "Cannot exec %s: %s.\n", program,
580
               strerror (errno));
581
      fflush (stderr);
582
      _exit (0177);
583
    }
584
 
585
  linux_add_process (pid, 0);
586
 
587
  ptid = ptid_build (pid, pid, 0);
588
  new_lwp = add_lwp (ptid);
589
  add_thread (ptid, new_lwp);
590
  new_lwp->must_set_ptrace_flags = 1;
591
 
592
  return pid;
593
}
594
 
595
/* Attach to an inferior process.  */
596
 
597
static void
598
linux_attach_lwp_1 (unsigned long lwpid, int initial)
599
{
600
  ptid_t ptid;
601
  struct lwp_info *new_lwp;
602
 
603
  if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
604
    {
605
      if (!initial)
606
        {
607
          /* If we fail to attach to an LWP, just warn.  */
608
          fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
609
                   strerror (errno), errno);
610
          fflush (stderr);
611
          return;
612
        }
613
      else
614
        /* If we fail to attach to a process, report an error.  */
615
        error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
616
               strerror (errno), errno);
617
    }
618
 
619
  if (initial)
620
    /* NOTE/FIXME: This lwp might have not been the tgid.  */
621
    ptid = ptid_build (lwpid, lwpid, 0);
622
  else
623
    {
624
      /* Note that extracting the pid from the current inferior is
625
         safe, since we're always called in the context of the same
626
         process as this new thread.  */
627
      int pid = pid_of (get_thread_lwp (current_inferior));
628
      ptid = ptid_build (pid, lwpid, 0);
629
    }
630
 
631
  new_lwp = (struct lwp_info *) add_lwp (ptid);
632
  add_thread (ptid, new_lwp);
633
 
634
  /* We need to wait for SIGSTOP before being able to make the next
635
     ptrace call on this LWP.  */
636
  new_lwp->must_set_ptrace_flags = 1;
637
 
638
  /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
639
     brings it to a halt.
640
 
641
     There are several cases to consider here:
642
 
643
     1) gdbserver has already attached to the process and is being notified
644
        of a new thread that is being created.
645
        In this case we should ignore that SIGSTOP and resume the
646
        process.  This is handled below by setting stop_expected = 1,
647
        and the fact that add_thread sets last_resume_kind ==
648
        resume_continue.
649
 
650
     2) This is the first thread (the process thread), and we're attaching
651
        to it via attach_inferior.
652
        In this case we want the process thread to stop.
653
        This is handled by having linux_attach set last_resume_kind ==
654
        resume_stop after we return.
655
        ??? If the process already has several threads we leave the other
656
        threads running.
657
 
658
     3) GDB is connecting to gdbserver and is requesting an enumeration of all
659
        existing threads.
660
        In this case we want the thread to stop.
661
        FIXME: This case is currently not properly handled.
662
        We should wait for the SIGSTOP but don't.  Things work apparently
663
        because enough time passes between when we ptrace (ATTACH) and when
664
        gdb makes the next ptrace call on the thread.
665
 
666
     On the other hand, if we are currently trying to stop all threads, we
667
     should treat the new thread as if we had sent it a SIGSTOP.  This works
668
     because we are guaranteed that the add_lwp call above added us to the
669
     end of the list, and so the new thread has not yet reached
670
     wait_for_sigstop (but will).  */
671
  new_lwp->stop_expected = 1;
672
}
673
 
674
void
675
linux_attach_lwp (unsigned long lwpid)
676
{
677
  linux_attach_lwp_1 (lwpid, 0);
678
}
679
 
680
int
681
linux_attach (unsigned long pid)
682
{
683
  linux_attach_lwp_1 (pid, 1);
684
  linux_add_process (pid, 1);
685
 
686
  if (!non_stop)
687
    {
688
      struct thread_info *thread;
689
 
690
     /* Don't ignore the initial SIGSTOP if we just attached to this
691
        process.  It will be collected by wait shortly.  */
692
      thread = find_thread_ptid (ptid_build (pid, pid, 0));
693
      thread->last_resume_kind = resume_stop;
694
    }
695
 
696
  return 0;
697
}
698
 
699
struct counter
700
{
701
  int pid;
702
  int count;
703
};
704
 
705
static int
706
second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
707
{
708
  struct counter *counter = args;
709
 
710
  if (ptid_get_pid (entry->id) == counter->pid)
711
    {
712
      if (++counter->count > 1)
713
        return 1;
714
    }
715
 
716
  return 0;
717
}
718
 
719
static int
720
last_thread_of_process_p (struct thread_info *thread)
721
{
722
  ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
723
  int pid = ptid_get_pid (ptid);
724
  struct counter counter = { pid , 0 };
725
 
726
  return (find_inferior (&all_threads,
727
                         second_thread_of_pid_p, &counter) == NULL);
728
}
729
 
730
/* Kill the inferior lwp.  */
731
 
732
static int
733
linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
734
{
735
  struct thread_info *thread = (struct thread_info *) entry;
736
  struct lwp_info *lwp = get_thread_lwp (thread);
737
  int wstat;
738
  int pid = * (int *) args;
739
 
740
  if (ptid_get_pid (entry->id) != pid)
741
    return 0;
742
 
743
  /* We avoid killing the first thread here, because of a Linux kernel (at
744
     least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
745
     the children get a chance to be reaped, it will remain a zombie
746
     forever.  */
747
 
748
  if (lwpid_of (lwp) == pid)
749
    {
750
      if (debug_threads)
751
        fprintf (stderr, "lkop: is last of process %s\n",
752
                 target_pid_to_str (entry->id));
753
      return 0;
754
    }
755
 
756
  do
757
    {
758
      ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
759
 
760
      /* Make sure it died.  The loop is most likely unnecessary.  */
761
      pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
762
    } while (pid > 0 && WIFSTOPPED (wstat));
763
 
764
  return 0;
765
}
766
 
767
static int
768
linux_kill (int pid)
769
{
770
  struct process_info *process;
771
  struct lwp_info *lwp;
772
  struct thread_info *thread;
773
  int wstat;
774
  int lwpid;
775
 
776
  process = find_process_pid (pid);
777
  if (process == NULL)
778
    return -1;
779
 
780
  /* If we're killing a running inferior, make sure it is stopped
781
     first, as PTRACE_KILL will not work otherwise.  */
782
  stop_all_lwps (0, NULL);
783
 
784
  find_inferior (&all_threads, linux_kill_one_lwp, &pid);
785
 
786
  /* See the comment in linux_kill_one_lwp.  We did not kill the first
787
     thread in the list, so do so now.  */
788
  lwp = find_lwp_pid (pid_to_ptid (pid));
789
  thread = get_lwp_thread (lwp);
790
 
791
  if (debug_threads)
792
    fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
793
             lwpid_of (lwp), pid);
794
 
795
  do
796
    {
797
      ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
798
 
799
      /* Make sure it died.  The loop is most likely unnecessary.  */
800
      lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
801
    } while (lwpid > 0 && WIFSTOPPED (wstat));
802
 
803
  the_target->mourn (process);
804
 
805
  /* Since we presently can only stop all lwps of all processes, we
806
     need to unstop lwps of other processes.  */
807
  unstop_all_lwps (0, NULL);
808
  return 0;
809
}
810
 
811
static int
812
linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
813
{
814
  struct thread_info *thread = (struct thread_info *) entry;
815
  struct lwp_info *lwp = get_thread_lwp (thread);
816
  int pid = * (int *) args;
817
 
818
  if (ptid_get_pid (entry->id) != pid)
819
    return 0;
820
 
821
  /* If this process is stopped but is expecting a SIGSTOP, then make
822
     sure we take care of that now.  This isn't absolutely guaranteed
823
     to collect the SIGSTOP, but is fairly likely to.  */
824
  if (lwp->stop_expected)
825
    {
826
      int wstat;
827
      /* Clear stop_expected, so that the SIGSTOP will be reported.  */
828
      lwp->stop_expected = 0;
829
      linux_resume_one_lwp (lwp, 0, 0, NULL);
830
      linux_wait_for_event (lwp->head.id, &wstat, __WALL);
831
    }
832
 
833
  /* Flush any pending changes to the process's registers.  */
834
  regcache_invalidate_one ((struct inferior_list_entry *)
835
                           get_lwp_thread (lwp));
836
 
837
  /* Finally, let it resume.  */
838
  ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
839
 
840
  delete_lwp (lwp);
841
  return 0;
842
}
843
 
844
static int
845
linux_detach (int pid)
846
{
847
  struct process_info *process;
848
 
849
  process = find_process_pid (pid);
850
  if (process == NULL)
851
    return -1;
852
 
853
  /* Stop all threads before detaching.  First, ptrace requires that
854
     the thread is stopped to sucessfully detach.  Second, thread_db
855
     may need to uninstall thread event breakpoints from memory, which
856
     only works with a stopped process anyway.  */
857
  stop_all_lwps (0, NULL);
858
 
859
#ifdef USE_THREAD_DB
860
  thread_db_detach (process);
861
#endif
862
 
863
  /* Stabilize threads (move out of jump pads).  */
864
  stabilize_threads ();
865
 
866
  find_inferior (&all_threads, linux_detach_one_lwp, &pid);
867
 
868
  the_target->mourn (process);
869
 
870
  /* Since we presently can only stop all lwps of all processes, we
871
     need to unstop lwps of other processes.  */
872
  unstop_all_lwps (0, NULL);
873
  return 0;
874
}
875
 
876
/* Remove all LWPs that belong to process PROC from the lwp list.  */
877
 
878
static int
879
delete_lwp_callback (struct inferior_list_entry *entry, void *proc)
880
{
881
  struct lwp_info *lwp = (struct lwp_info *) entry;
882
  struct process_info *process = proc;
883
 
884
  if (pid_of (lwp) == pid_of (process))
885
    delete_lwp (lwp);
886
 
887
  return 0;
888
}
889
 
890
static void
891
linux_mourn (struct process_info *process)
892
{
893
  struct process_info_private *priv;
894
 
895
#ifdef USE_THREAD_DB
896
  thread_db_mourn (process);
897
#endif
898
 
899
  find_inferior (&all_lwps, delete_lwp_callback, process);
900
 
901
  /* Freeing all private data.  */
902
  priv = process->private;
903
  free (priv->arch_private);
904
  free (priv);
905
  process->private = NULL;
906
 
907
  remove_process (process);
908
}
909
 
910
static void
911
linux_join (int pid)
912
{
913
  int status, ret;
914
  struct process_info *process;
915
 
916
  process = find_process_pid (pid);
917
  if (process == NULL)
918
    return;
919
 
920
  do {
921
    ret = my_waitpid (pid, &status, 0);
922
    if (WIFEXITED (status) || WIFSIGNALED (status))
923
      break;
924
  } while (ret != -1 || errno != ECHILD);
925
}
926
 
927
/* Return nonzero if the given thread is still alive.  */
928
static int
929
linux_thread_alive (ptid_t ptid)
930
{
931
  struct lwp_info *lwp = find_lwp_pid (ptid);
932
 
933
  /* We assume we always know if a thread exits.  If a whole process
934
     exited but we still haven't been able to report it to GDB, we'll
935
     hold on to the last lwp of the dead process.  */
936
  if (lwp != NULL)
937
    return !lwp->dead;
938
  else
939
    return 0;
940
}
941
 
942
/* Return 1 if this lwp has an interesting status pending.  */
943
static int
944
status_pending_p_callback (struct inferior_list_entry *entry, void *arg)
945
{
946
  struct lwp_info *lwp = (struct lwp_info *) entry;
947
  ptid_t ptid = * (ptid_t *) arg;
948
  struct thread_info *thread;
949
 
950
  /* Check if we're only interested in events from a specific process
951
     or its lwps.  */
952
  if (!ptid_equal (minus_one_ptid, ptid)
953
      && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
954
    return 0;
955
 
956
  thread = get_lwp_thread (lwp);
957
 
958
  /* If we got a `vCont;t', but we haven't reported a stop yet, do
959
     report any status pending the LWP may have.  */
960
  if (thread->last_resume_kind == resume_stop
961
      && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
962
    return 0;
963
 
964
  return lwp->status_pending_p;
965
}
966
 
967
static int
968
same_lwp (struct inferior_list_entry *entry, void *data)
969
{
970
  ptid_t ptid = *(ptid_t *) data;
971
  int lwp;
972
 
973
  if (ptid_get_lwp (ptid) != 0)
974
    lwp = ptid_get_lwp (ptid);
975
  else
976
    lwp = ptid_get_pid (ptid);
977
 
978
  if (ptid_get_lwp (entry->id) == lwp)
979
    return 1;
980
 
981
  return 0;
982
}
983
 
984
struct lwp_info *
985
find_lwp_pid (ptid_t ptid)
986
{
987
  return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
988
}
989
 
990
static struct lwp_info *
991
linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
992
{
993
  int ret;
994
  int to_wait_for = -1;
995
  struct lwp_info *child = NULL;
996
 
997
  if (debug_threads)
998
    fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
999
 
1000
  if (ptid_equal (ptid, minus_one_ptid))
1001
    to_wait_for = -1;                   /* any child */
1002
  else
1003
    to_wait_for = ptid_get_lwp (ptid);  /* this lwp only */
1004
 
1005
  options |= __WALL;
1006
 
1007
retry:
1008
 
1009
  ret = my_waitpid (to_wait_for, wstatp, options);
1010
  if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1011
    return NULL;
1012
  else if (ret == -1)
1013
    perror_with_name ("waitpid");
1014
 
1015
  if (debug_threads
1016
      && (!WIFSTOPPED (*wstatp)
1017
          || (WSTOPSIG (*wstatp) != 32
1018
              && WSTOPSIG (*wstatp) != 33)))
1019
    fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1020
 
1021
  child = find_lwp_pid (pid_to_ptid (ret));
1022
 
1023
  /* If we didn't find a process, one of two things presumably happened:
1024
     - A process we started and then detached from has exited.  Ignore it.
1025
     - A process we are controlling has forked and the new child's stop
1026
     was reported to us by the kernel.  Save its PID.  */
1027
  if (child == NULL && WIFSTOPPED (*wstatp))
1028
    {
1029
      add_pid_to_list (&stopped_pids, ret);
1030
      goto retry;
1031
    }
1032
  else if (child == NULL)
1033
    goto retry;
1034
 
1035
  child->stopped = 1;
1036
 
1037
  child->last_status = *wstatp;
1038
 
1039
  /* Architecture-specific setup after inferior is running.
1040
     This needs to happen after we have attached to the inferior
1041
     and it is stopped for the first time, but before we access
1042
     any inferior registers.  */
1043
  if (new_inferior)
1044
    {
1045
      the_low_target.arch_setup ();
1046
#ifdef HAVE_LINUX_REGSETS
1047
      memset (disabled_regsets, 0, num_regsets);
1048
#endif
1049
      new_inferior = 0;
1050
    }
1051
 
1052
  /* Fetch the possibly triggered data watchpoint info and store it in
1053
     CHILD.
1054
 
1055
     On some archs, like x86, that use debug registers to set
1056
     watchpoints, it's possible that the way to know which watched
1057
     address trapped, is to check the register that is used to select
1058
     which address to watch.  Problem is, between setting the
1059
     watchpoint and reading back which data address trapped, the user
1060
     may change the set of watchpoints, and, as a consequence, GDB
1061
     changes the debug registers in the inferior.  To avoid reading
1062
     back a stale stopped-data-address when that happens, we cache in
1063
     LP the fact that a watchpoint trapped, and the corresponding data
1064
     address, as soon as we see CHILD stop with a SIGTRAP.  If GDB
1065
     changes the debug registers meanwhile, we have the cached data we
1066
     can rely on.  */
1067
 
1068
  if (WIFSTOPPED (*wstatp) && WSTOPSIG (*wstatp) == SIGTRAP)
1069
    {
1070
      if (the_low_target.stopped_by_watchpoint == NULL)
1071
        {
1072
          child->stopped_by_watchpoint = 0;
1073
        }
1074
      else
1075
        {
1076
          struct thread_info *saved_inferior;
1077
 
1078
          saved_inferior = current_inferior;
1079
          current_inferior = get_lwp_thread (child);
1080
 
1081
          child->stopped_by_watchpoint
1082
            = the_low_target.stopped_by_watchpoint ();
1083
 
1084
          if (child->stopped_by_watchpoint)
1085
            {
1086
              if (the_low_target.stopped_data_address != NULL)
1087
                child->stopped_data_address
1088
                  = the_low_target.stopped_data_address ();
1089
              else
1090
                child->stopped_data_address = 0;
1091
            }
1092
 
1093
          current_inferior = saved_inferior;
1094
        }
1095
    }
1096
 
1097
  /* Store the STOP_PC, with adjustment applied.  This depends on the
1098
     architecture being defined already (so that CHILD has a valid
1099
     regcache), and on LAST_STATUS being set (to check for SIGTRAP or
1100
     not).  */
1101
  if (WIFSTOPPED (*wstatp))
1102
    child->stop_pc = get_stop_pc (child);
1103
 
1104
  if (debug_threads
1105
      && WIFSTOPPED (*wstatp)
1106
      && the_low_target.get_pc != NULL)
1107
    {
1108
      struct thread_info *saved_inferior = current_inferior;
1109
      struct regcache *regcache;
1110
      CORE_ADDR pc;
1111
 
1112
      current_inferior = get_lwp_thread (child);
1113
      regcache = get_thread_regcache (current_inferior, 1);
1114
      pc = (*the_low_target.get_pc) (regcache);
1115
      fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1116
      current_inferior = saved_inferior;
1117
    }
1118
 
1119
  return child;
1120
}
1121
 
1122
/* This function should only be called if the LWP got a SIGTRAP.
1123
 
1124
   Handle any tracepoint steps or hits.  Return true if a tracepoint
1125
   event was handled, 0 otherwise.  */
1126
 
1127
static int
1128
handle_tracepoints (struct lwp_info *lwp)
1129
{
1130
  struct thread_info *tinfo = get_lwp_thread (lwp);
1131
  int tpoint_related_event = 0;
1132
 
1133
  /* If this tracepoint hit causes a tracing stop, we'll immediately
1134
     uninsert tracepoints.  To do this, we temporarily pause all
1135
     threads, unpatch away, and then unpause threads.  We need to make
1136
     sure the unpausing doesn't resume LWP too.  */
1137
  lwp->suspended++;
1138
 
1139
  /* And we need to be sure that any all-threads-stopping doesn't try
1140
     to move threads out of the jump pads, as it could deadlock the
1141
     inferior (LWP could be in the jump pad, maybe even holding the
1142
     lock.)  */
1143
 
1144
  /* Do any necessary step collect actions.  */
1145
  tpoint_related_event |= tracepoint_finished_step (tinfo, lwp->stop_pc);
1146
 
1147
  tpoint_related_event |= handle_tracepoint_bkpts (tinfo, lwp->stop_pc);
1148
 
1149
  /* See if we just hit a tracepoint and do its main collect
1150
     actions.  */
1151
  tpoint_related_event |= tracepoint_was_hit (tinfo, lwp->stop_pc);
1152
 
1153
  lwp->suspended--;
1154
 
1155
  gdb_assert (lwp->suspended == 0);
1156
  gdb_assert (!stabilizing_threads || lwp->collecting_fast_tracepoint);
1157
 
1158
  if (tpoint_related_event)
1159
    {
1160
      if (debug_threads)
1161
        fprintf (stderr, "got a tracepoint event\n");
1162
      return 1;
1163
    }
1164
 
1165
  return 0;
1166
}
1167
 
1168
/* Convenience wrapper.  Returns true if LWP is presently collecting a
1169
   fast tracepoint.  */
1170
 
1171
static int
1172
linux_fast_tracepoint_collecting (struct lwp_info *lwp,
1173
                                  struct fast_tpoint_collect_status *status)
1174
{
1175
  CORE_ADDR thread_area;
1176
 
1177
  if (the_low_target.get_thread_area == NULL)
1178
    return 0;
1179
 
1180
  /* Get the thread area address.  This is used to recognize which
1181
     thread is which when tracing with the in-process agent library.
1182
     We don't read anything from the address, and treat it as opaque;
1183
     it's the address itself that we assume is unique per-thread.  */
1184
  if ((*the_low_target.get_thread_area) (lwpid_of (lwp), &thread_area) == -1)
1185
    return 0;
1186
 
1187
  return fast_tracepoint_collecting (thread_area, lwp->stop_pc, status);
1188
}
1189
 
1190
/* The reason we resume in the caller, is because we want to be able
1191
   to pass lwp->status_pending as WSTAT, and we need to clear
1192
   status_pending_p before resuming, otherwise, linux_resume_one_lwp
1193
   refuses to resume.  */
1194
 
1195
static int
1196
maybe_move_out_of_jump_pad (struct lwp_info *lwp, int *wstat)
1197
{
1198
  struct thread_info *saved_inferior;
1199
 
1200
  saved_inferior = current_inferior;
1201
  current_inferior = get_lwp_thread (lwp);
1202
 
1203
  if ((wstat == NULL
1204
       || (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) != SIGTRAP))
1205
      && supports_fast_tracepoints ()
1206
      && in_process_agent_loaded ())
1207
    {
1208
      struct fast_tpoint_collect_status status;
1209
      int r;
1210
 
1211
      if (debug_threads)
1212
        fprintf (stderr, "\
1213
Checking whether LWP %ld needs to move out of the jump pad.\n",
1214
                 lwpid_of (lwp));
1215
 
1216
      r = linux_fast_tracepoint_collecting (lwp, &status);
1217
 
1218
      if (wstat == NULL
1219
          || (WSTOPSIG (*wstat) != SIGILL
1220
              && WSTOPSIG (*wstat) != SIGFPE
1221
              && WSTOPSIG (*wstat) != SIGSEGV
1222
              && WSTOPSIG (*wstat) != SIGBUS))
1223
        {
1224
          lwp->collecting_fast_tracepoint = r;
1225
 
1226
          if (r != 0)
1227
            {
1228
              if (r == 1 && lwp->exit_jump_pad_bkpt == NULL)
1229
                {
1230
                  /* Haven't executed the original instruction yet.
1231
                     Set breakpoint there, and wait till it's hit,
1232
                     then single-step until exiting the jump pad.  */
1233
                  lwp->exit_jump_pad_bkpt
1234
                    = set_breakpoint_at (status.adjusted_insn_addr, NULL);
1235
                }
1236
 
1237
              if (debug_threads)
1238
                fprintf (stderr, "\
1239
Checking whether LWP %ld needs to move out of the jump pad...it does\n",
1240
                 lwpid_of (lwp));
1241
 
1242
              return 1;
1243
            }
1244
        }
1245
      else
1246
        {
1247
          /* If we get a synchronous signal while collecting, *and*
1248
             while executing the (relocated) original instruction,
1249
             reset the PC to point at the tpoint address, before
1250
             reporting to GDB.  Otherwise, it's an IPA lib bug: just
1251
             report the signal to GDB, and pray for the best.  */
1252
 
1253
          lwp->collecting_fast_tracepoint = 0;
1254
 
1255
          if (r != 0
1256
              && (status.adjusted_insn_addr <= lwp->stop_pc
1257
                  && lwp->stop_pc < status.adjusted_insn_addr_end))
1258
            {
1259
              siginfo_t info;
1260
              struct regcache *regcache;
1261
 
1262
              /* The si_addr on a few signals references the address
1263
                 of the faulting instruction.  Adjust that as
1264
                 well.  */
1265
              if ((WSTOPSIG (*wstat) == SIGILL
1266
                   || WSTOPSIG (*wstat) == SIGFPE
1267
                   || WSTOPSIG (*wstat) == SIGBUS
1268
                   || WSTOPSIG (*wstat) == SIGSEGV)
1269
                  && ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &info) == 0
1270
                  /* Final check just to make sure we don't clobber
1271
                     the siginfo of non-kernel-sent signals.  */
1272
                  && (uintptr_t) info.si_addr == lwp->stop_pc)
1273
                {
1274
                  info.si_addr = (void *) (uintptr_t) status.tpoint_addr;
1275
                  ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &info);
1276
                }
1277
 
1278
              regcache = get_thread_regcache (get_lwp_thread (lwp), 1);
1279
              (*the_low_target.set_pc) (regcache, status.tpoint_addr);
1280
              lwp->stop_pc = status.tpoint_addr;
1281
 
1282
              /* Cancel any fast tracepoint lock this thread was
1283
                 holding.  */
1284
              force_unlock_trace_buffer ();
1285
            }
1286
 
1287
          if (lwp->exit_jump_pad_bkpt != NULL)
1288
            {
1289
              if (debug_threads)
1290
                fprintf (stderr,
1291
                         "Cancelling fast exit-jump-pad: removing bkpt. "
1292
                         "stopping all threads momentarily.\n");
1293
 
1294
              stop_all_lwps (1, lwp);
1295
              cancel_breakpoints ();
1296
 
1297
              delete_breakpoint (lwp->exit_jump_pad_bkpt);
1298
              lwp->exit_jump_pad_bkpt = NULL;
1299
 
1300
              unstop_all_lwps (1, lwp);
1301
 
1302
              gdb_assert (lwp->suspended >= 0);
1303
            }
1304
        }
1305
    }
1306
 
1307
  if (debug_threads)
1308
    fprintf (stderr, "\
1309
Checking whether LWP %ld needs to move out of the jump pad...no\n",
1310
             lwpid_of (lwp));
1311
  return 0;
1312
}
1313
 
1314
/* Enqueue one signal in the "signals to report later when out of the
1315
   jump pad" list.  */
1316
 
1317
static void
1318
enqueue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1319
{
1320
  struct pending_signals *p_sig;
1321
 
1322
  if (debug_threads)
1323
    fprintf (stderr, "\
1324
Deferring signal %d for LWP %ld.\n", WSTOPSIG (*wstat), lwpid_of (lwp));
1325
 
1326
  if (debug_threads)
1327
    {
1328
      struct pending_signals *sig;
1329
 
1330
      for (sig = lwp->pending_signals_to_report;
1331
           sig != NULL;
1332
           sig = sig->prev)
1333
        fprintf (stderr,
1334
                 "   Already queued %d\n",
1335
                 sig->signal);
1336
 
1337
      fprintf (stderr, "   (no more currently queued signals)\n");
1338
    }
1339
 
1340
  p_sig = xmalloc (sizeof (*p_sig));
1341
  p_sig->prev = lwp->pending_signals_to_report;
1342
  p_sig->signal = WSTOPSIG (*wstat);
1343
  memset (&p_sig->info, 0, sizeof (siginfo_t));
1344
  ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
1345
 
1346
  lwp->pending_signals_to_report = p_sig;
1347
}
1348
 
1349
/* Dequeue one signal from the "signals to report later when out of
1350
   the jump pad" list.  */
1351
 
1352
static int
1353
dequeue_one_deferred_signal (struct lwp_info *lwp, int *wstat)
1354
{
1355
  if (lwp->pending_signals_to_report != NULL)
1356
    {
1357
      struct pending_signals **p_sig;
1358
 
1359
      p_sig = &lwp->pending_signals_to_report;
1360
      while ((*p_sig)->prev != NULL)
1361
        p_sig = &(*p_sig)->prev;
1362
 
1363
      *wstat = W_STOPCODE ((*p_sig)->signal);
1364
      if ((*p_sig)->info.si_signo != 0)
1365
        ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1366
      free (*p_sig);
1367
      *p_sig = NULL;
1368
 
1369
      if (debug_threads)
1370
        fprintf (stderr, "Reporting deferred signal %d for LWP %ld.\n",
1371
                 WSTOPSIG (*wstat), lwpid_of (lwp));
1372
 
1373
      if (debug_threads)
1374
        {
1375
          struct pending_signals *sig;
1376
 
1377
          for (sig = lwp->pending_signals_to_report;
1378
               sig != NULL;
1379
               sig = sig->prev)
1380
            fprintf (stderr,
1381
                     "   Still queued %d\n",
1382
                     sig->signal);
1383
 
1384
          fprintf (stderr, "   (no more queued signals)\n");
1385
        }
1386
 
1387
      return 1;
1388
    }
1389
 
1390
  return 0;
1391
}
1392
 
1393
/* Arrange for a breakpoint to be hit again later.  We don't keep the
1394
   SIGTRAP status and don't forward the SIGTRAP signal to the LWP.  We
1395
   will handle the current event, eventually we will resume this LWP,
1396
   and this breakpoint will trap again.  */
1397
 
1398
static int
1399
cancel_breakpoint (struct lwp_info *lwp)
1400
{
1401
  struct thread_info *saved_inferior;
1402
 
1403
  /* There's nothing to do if we don't support breakpoints.  */
1404
  if (!supports_breakpoints ())
1405
    return 0;
1406
 
1407
  /* breakpoint_at reads from current inferior.  */
1408
  saved_inferior = current_inferior;
1409
  current_inferior = get_lwp_thread (lwp);
1410
 
1411
  if ((*the_low_target.breakpoint_at) (lwp->stop_pc))
1412
    {
1413
      if (debug_threads)
1414
        fprintf (stderr,
1415
                 "CB: Push back breakpoint for %s\n",
1416
                 target_pid_to_str (ptid_of (lwp)));
1417
 
1418
      /* Back up the PC if necessary.  */
1419
      if (the_low_target.decr_pc_after_break)
1420
        {
1421
          struct regcache *regcache
1422
            = get_thread_regcache (current_inferior, 1);
1423
          (*the_low_target.set_pc) (regcache, lwp->stop_pc);
1424
        }
1425
 
1426
      current_inferior = saved_inferior;
1427
      return 1;
1428
    }
1429
  else
1430
    {
1431
      if (debug_threads)
1432
        fprintf (stderr,
1433
                 "CB: No breakpoint found at %s for [%s]\n",
1434
                 paddress (lwp->stop_pc),
1435
                 target_pid_to_str (ptid_of (lwp)));
1436
    }
1437
 
1438
  current_inferior = saved_inferior;
1439
  return 0;
1440
}
1441
 
1442
/* When the event-loop is doing a step-over, this points at the thread
1443
   being stepped.  */
1444
ptid_t step_over_bkpt;
1445
 
1446
/* Wait for an event from child PID.  If PID is -1, wait for any
1447
   child.  Store the stop status through the status pointer WSTAT.
1448
   OPTIONS is passed to the waitpid call.  Return 0 if no child stop
1449
   event was found and OPTIONS contains WNOHANG.  Return the PID of
1450
   the stopped child otherwise.  */
1451
 
1452
static int
1453
linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1454
{
1455
  struct lwp_info *event_child, *requested_child;
1456
 
1457
  event_child = NULL;
1458
  requested_child = NULL;
1459
 
1460
  /* Check for a lwp with a pending status.  */
1461
 
1462
  if (ptid_equal (ptid, minus_one_ptid)
1463
      || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1464
    {
1465
      event_child = (struct lwp_info *)
1466
        find_inferior (&all_lwps, status_pending_p_callback, &ptid);
1467
      if (debug_threads && event_child)
1468
        fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1469
    }
1470
  else
1471
    {
1472
      requested_child = find_lwp_pid (ptid);
1473
 
1474
      if (!stopping_threads
1475
          && requested_child->status_pending_p
1476
          && requested_child->collecting_fast_tracepoint)
1477
        {
1478
          enqueue_one_deferred_signal (requested_child,
1479
                                       &requested_child->status_pending);
1480
          requested_child->status_pending_p = 0;
1481
          requested_child->status_pending = 0;
1482
          linux_resume_one_lwp (requested_child, 0, 0, NULL);
1483
        }
1484
 
1485
      if (requested_child->suspended
1486
          && requested_child->status_pending_p)
1487
        fatal ("requesting an event out of a suspended child?");
1488
 
1489
      if (requested_child->status_pending_p)
1490
        event_child = requested_child;
1491
    }
1492
 
1493
  if (event_child != NULL)
1494
    {
1495
      if (debug_threads)
1496
        fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1497
                 lwpid_of (event_child), event_child->status_pending);
1498
      *wstat = event_child->status_pending;
1499
      event_child->status_pending_p = 0;
1500
      event_child->status_pending = 0;
1501
      current_inferior = get_lwp_thread (event_child);
1502
      return lwpid_of (event_child);
1503
    }
1504
 
1505
  /* We only enter this loop if no process has a pending wait status.  Thus
1506
     any action taken in response to a wait status inside this loop is
1507
     responding as soon as we detect the status, not after any pending
1508
     events.  */
1509
  while (1)
1510
    {
1511
      event_child = linux_wait_for_lwp (ptid, wstat, options);
1512
 
1513
      if ((options & WNOHANG) && event_child == NULL)
1514
        {
1515
          if (debug_threads)
1516
            fprintf (stderr, "WNOHANG set, no event found\n");
1517
          return 0;
1518
        }
1519
 
1520
      if (event_child == NULL)
1521
        error ("event from unknown child");
1522
 
1523
      current_inferior = get_lwp_thread (event_child);
1524
 
1525
      /* Check for thread exit.  */
1526
      if (! WIFSTOPPED (*wstat))
1527
        {
1528
          if (debug_threads)
1529
            fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1530
 
1531
          /* If the last thread is exiting, just return.  */
1532
          if (last_thread_of_process_p (current_inferior))
1533
            {
1534
              if (debug_threads)
1535
                fprintf (stderr, "LWP %ld is last lwp of process\n",
1536
                         lwpid_of (event_child));
1537
              return lwpid_of (event_child);
1538
            }
1539
 
1540
          if (!non_stop)
1541
            {
1542
              current_inferior = (struct thread_info *) all_threads.head;
1543
              if (debug_threads)
1544
                fprintf (stderr, "Current inferior is now %ld\n",
1545
                         lwpid_of (get_thread_lwp (current_inferior)));
1546
            }
1547
          else
1548
            {
1549
              current_inferior = NULL;
1550
              if (debug_threads)
1551
                fprintf (stderr, "Current inferior is now <NULL>\n");
1552
            }
1553
 
1554
          /* If we were waiting for this particular child to do something...
1555
             well, it did something.  */
1556
          if (requested_child != NULL)
1557
            {
1558
              int lwpid = lwpid_of (event_child);
1559
 
1560
              /* Cancel the step-over operation --- the thread that
1561
                 started it is gone.  */
1562
              if (finish_step_over (event_child))
1563
                unstop_all_lwps (1, event_child);
1564
              delete_lwp (event_child);
1565
              return lwpid;
1566
            }
1567
 
1568
          delete_lwp (event_child);
1569
 
1570
          /* Wait for a more interesting event.  */
1571
          continue;
1572
        }
1573
 
1574
      if (event_child->must_set_ptrace_flags)
1575
        {
1576
          linux_enable_event_reporting (lwpid_of (event_child));
1577
          event_child->must_set_ptrace_flags = 0;
1578
        }
1579
 
1580
      if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1581
          && *wstat >> 16 != 0)
1582
        {
1583
          handle_extended_wait (event_child, *wstat);
1584
          continue;
1585
        }
1586
 
1587
      if (WIFSTOPPED (*wstat)
1588
          && WSTOPSIG (*wstat) == SIGSTOP
1589
          && event_child->stop_expected)
1590
        {
1591
          int should_stop;
1592
 
1593
          if (debug_threads)
1594
            fprintf (stderr, "Expected stop.\n");
1595
          event_child->stop_expected = 0;
1596
 
1597
          should_stop = (current_inferior->last_resume_kind == resume_stop
1598
                         || stopping_threads);
1599
 
1600
          if (!should_stop)
1601
            {
1602
              linux_resume_one_lwp (event_child,
1603
                                    event_child->stepping, 0, NULL);
1604
              continue;
1605
            }
1606
        }
1607
 
1608
      return lwpid_of (event_child);
1609
    }
1610
 
1611
  /* NOTREACHED */
1612
  return 0;
1613
}
1614
 
1615
static int
1616
linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1617
{
1618
  ptid_t wait_ptid;
1619
 
1620
  if (ptid_is_pid (ptid))
1621
    {
1622
      /* A request to wait for a specific tgid.  This is not possible
1623
         with waitpid, so instead, we wait for any child, and leave
1624
         children we're not interested in right now with a pending
1625
         status to report later.  */
1626
      wait_ptid = minus_one_ptid;
1627
    }
1628
  else
1629
    wait_ptid = ptid;
1630
 
1631
  while (1)
1632
    {
1633
      int event_pid;
1634
 
1635
      event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1636
 
1637
      if (event_pid > 0
1638
          && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1639
        {
1640
          struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1641
 
1642
          if (! WIFSTOPPED (*wstat))
1643
            mark_lwp_dead (event_child, *wstat);
1644
          else
1645
            {
1646
              event_child->status_pending_p = 1;
1647
              event_child->status_pending = *wstat;
1648
            }
1649
        }
1650
      else
1651
        return event_pid;
1652
    }
1653
}
1654
 
1655
 
1656
/* Count the LWP's that have had events.  */
1657
 
1658
static int
1659
count_events_callback (struct inferior_list_entry *entry, void *data)
1660
{
1661
  struct lwp_info *lp = (struct lwp_info *) entry;
1662
  struct thread_info *thread = get_lwp_thread (lp);
1663
  int *count = data;
1664
 
1665
  gdb_assert (count != NULL);
1666
 
1667
  /* Count only resumed LWPs that have a SIGTRAP event pending that
1668
     should be reported to GDB.  */
1669
  if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1670
      && thread->last_resume_kind != resume_stop
1671
      && lp->status_pending_p
1672
      && WIFSTOPPED (lp->status_pending)
1673
      && WSTOPSIG (lp->status_pending) == SIGTRAP
1674
      && !breakpoint_inserted_here (lp->stop_pc))
1675
    (*count)++;
1676
 
1677
  return 0;
1678
}
1679
 
1680
/* Select the LWP (if any) that is currently being single-stepped.  */
1681
 
1682
static int
1683
select_singlestep_lwp_callback (struct inferior_list_entry *entry, void *data)
1684
{
1685
  struct lwp_info *lp = (struct lwp_info *) entry;
1686
  struct thread_info *thread = get_lwp_thread (lp);
1687
 
1688
  if (thread->last_status.kind == TARGET_WAITKIND_IGNORE
1689
      && thread->last_resume_kind == resume_step
1690
      && lp->status_pending_p)
1691
    return 1;
1692
  else
1693
    return 0;
1694
}
1695
 
1696
/* Select the Nth LWP that has had a SIGTRAP event that should be
1697
   reported to GDB.  */
1698
 
1699
static int
1700
select_event_lwp_callback (struct inferior_list_entry *entry, void *data)
1701
{
1702
  struct lwp_info *lp = (struct lwp_info *) entry;
1703
  struct thread_info *thread = get_lwp_thread (lp);
1704
  int *selector = data;
1705
 
1706
  gdb_assert (selector != NULL);
1707
 
1708
  /* Select only resumed LWPs that have a SIGTRAP event pending. */
1709
  if (thread->last_resume_kind != resume_stop
1710
      && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1711
      && lp->status_pending_p
1712
      && WIFSTOPPED (lp->status_pending)
1713
      && WSTOPSIG (lp->status_pending) == SIGTRAP
1714
      && !breakpoint_inserted_here (lp->stop_pc))
1715
    if ((*selector)-- == 0)
1716
      return 1;
1717
 
1718
  return 0;
1719
}
1720
 
1721
static int
1722
cancel_breakpoints_callback (struct inferior_list_entry *entry, void *data)
1723
{
1724
  struct lwp_info *lp = (struct lwp_info *) entry;
1725
  struct thread_info *thread = get_lwp_thread (lp);
1726
  struct lwp_info *event_lp = data;
1727
 
1728
  /* Leave the LWP that has been elected to receive a SIGTRAP alone.  */
1729
  if (lp == event_lp)
1730
    return 0;
1731
 
1732
  /* If a LWP other than the LWP that we're reporting an event for has
1733
     hit a GDB breakpoint (as opposed to some random trap signal),
1734
     then just arrange for it to hit it again later.  We don't keep
1735
     the SIGTRAP status and don't forward the SIGTRAP signal to the
1736
     LWP.  We will handle the current event, eventually we will resume
1737
     all LWPs, and this one will get its breakpoint trap again.
1738
 
1739
     If we do not do this, then we run the risk that the user will
1740
     delete or disable the breakpoint, but the LWP will have already
1741
     tripped on it.  */
1742
 
1743
  if (thread->last_resume_kind != resume_stop
1744
      && thread->last_status.kind == TARGET_WAITKIND_IGNORE
1745
      && lp->status_pending_p
1746
      && WIFSTOPPED (lp->status_pending)
1747
      && WSTOPSIG (lp->status_pending) == SIGTRAP
1748
      && !lp->stepping
1749
      && !lp->stopped_by_watchpoint
1750
      && cancel_breakpoint (lp))
1751
    /* Throw away the SIGTRAP.  */
1752
    lp->status_pending_p = 0;
1753
 
1754
  return 0;
1755
}
1756
 
1757
static void
1758
linux_cancel_breakpoints (void)
1759
{
1760
  find_inferior (&all_lwps, cancel_breakpoints_callback, NULL);
1761
}
1762
 
1763
/* Select one LWP out of those that have events pending.  */
1764
 
1765
static void
1766
select_event_lwp (struct lwp_info **orig_lp)
1767
{
1768
  int num_events = 0;
1769
  int random_selector;
1770
  struct lwp_info *event_lp;
1771
 
1772
  /* Give preference to any LWP that is being single-stepped.  */
1773
  event_lp
1774
    = (struct lwp_info *) find_inferior (&all_lwps,
1775
                                         select_singlestep_lwp_callback, NULL);
1776
  if (event_lp != NULL)
1777
    {
1778
      if (debug_threads)
1779
        fprintf (stderr,
1780
                 "SEL: Select single-step %s\n",
1781
                 target_pid_to_str (ptid_of (event_lp)));
1782
    }
1783
  else
1784
    {
1785
      /* No single-stepping LWP.  Select one at random, out of those
1786
         which have had SIGTRAP events.  */
1787
 
1788
      /* First see how many SIGTRAP events we have.  */
1789
      find_inferior (&all_lwps, count_events_callback, &num_events);
1790
 
1791
      /* Now randomly pick a LWP out of those that have had a SIGTRAP.  */
1792
      random_selector = (int)
1793
        ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1794
 
1795
      if (debug_threads && num_events > 1)
1796
        fprintf (stderr,
1797
                 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1798
                 num_events, random_selector);
1799
 
1800
      event_lp = (struct lwp_info *) find_inferior (&all_lwps,
1801
                                                    select_event_lwp_callback,
1802
                                                    &random_selector);
1803
    }
1804
 
1805
  if (event_lp != NULL)
1806
    {
1807
      /* Switch the event LWP.  */
1808
      *orig_lp = event_lp;
1809
    }
1810
}
1811
 
1812
/* Set this inferior LWP's state as "want-stopped".  We won't resume
1813
   this LWP until the client gives us another action for it.  */
1814
 
1815
static void
1816
gdb_wants_lwp_stopped (struct inferior_list_entry *entry)
1817
{
1818
  struct lwp_info *lwp = (struct lwp_info *) entry;
1819
  struct thread_info *thread = get_lwp_thread (lwp);
1820
 
1821
  /* Most threads are stopped implicitly (all-stop); tag that with
1822
     signal 0.  The thread being explicitly reported stopped to the
1823
     client, gets it's status fixed up afterwards.  */
1824
  thread->last_status.kind = TARGET_WAITKIND_STOPPED;
1825
  thread->last_status.value.sig = TARGET_SIGNAL_0;
1826
 
1827
  thread->last_resume_kind = resume_stop;
1828
}
1829
 
1830
/* Decrement the suspend count of an LWP.  */
1831
 
1832
static int
1833
unsuspend_one_lwp (struct inferior_list_entry *entry, void *except)
1834
{
1835
  struct lwp_info *lwp = (struct lwp_info *) entry;
1836
 
1837
  /* Ignore EXCEPT.  */
1838
  if (lwp == except)
1839
    return 0;
1840
 
1841
  lwp->suspended--;
1842
 
1843
  gdb_assert (lwp->suspended >= 0);
1844
  return 0;
1845
}
1846
 
1847
/* Decrement the suspend count of all LWPs, except EXCEPT, if non
1848
   NULL.  */
1849
 
1850
static void
1851
unsuspend_all_lwps (struct lwp_info *except)
1852
{
1853
  find_inferior (&all_lwps, unsuspend_one_lwp, except);
1854
}
1855
 
1856
/* Set all LWP's states as "want-stopped".  */
1857
 
1858
static void
1859
gdb_wants_all_stopped (void)
1860
{
1861
  for_each_inferior (&all_lwps, gdb_wants_lwp_stopped);
1862
}
1863
 
1864
static void move_out_of_jump_pad_callback (struct inferior_list_entry *entry);
1865
static int stuck_in_jump_pad_callback (struct inferior_list_entry *entry,
1866
                                       void *data);
1867
static int lwp_running (struct inferior_list_entry *entry, void *data);
1868
static ptid_t linux_wait_1 (ptid_t ptid,
1869
                            struct target_waitstatus *ourstatus,
1870
                            int target_options);
1871
 
1872
/* Stabilize threads (move out of jump pads).
1873
 
1874
   If a thread is midway collecting a fast tracepoint, we need to
1875
   finish the collection and move it out of the jump pad before
1876
   reporting the signal.
1877
 
1878
   This avoids recursion while collecting (when a signal arrives
1879
   midway, and the signal handler itself collects), which would trash
1880
   the trace buffer.  In case the user set a breakpoint in a signal
1881
   handler, this avoids the backtrace showing the jump pad, etc..
1882
   Most importantly, there are certain things we can't do safely if
1883
   threads are stopped in a jump pad (or in its callee's).  For
1884
   example:
1885
 
1886
     - starting a new trace run.  A thread still collecting the
1887
   previous run, could trash the trace buffer when resumed.  The trace
1888
   buffer control structures would have been reset but the thread had
1889
   no way to tell.  The thread could even midway memcpy'ing to the
1890
   buffer, which would mean that when resumed, it would clobber the
1891
   trace buffer that had been set for a new run.
1892
 
1893
     - we can't rewrite/reuse the jump pads for new tracepoints
1894
   safely.  Say you do tstart while a thread is stopped midway while
1895
   collecting.  When the thread is later resumed, it finishes the
1896
   collection, and returns to the jump pad, to execute the original
1897
   instruction that was under the tracepoint jump at the time the
1898
   older run had been started.  If the jump pad had been rewritten
1899
   since for something else in the new run, the thread would now
1900
   execute the wrong / random instructions.  */
1901
 
1902
static void
1903
linux_stabilize_threads (void)
1904
{
1905
  struct thread_info *save_inferior;
1906
  struct lwp_info *lwp_stuck;
1907
 
1908
  lwp_stuck
1909
    = (struct lwp_info *) find_inferior (&all_lwps,
1910
                                         stuck_in_jump_pad_callback, NULL);
1911
  if (lwp_stuck != NULL)
1912
    {
1913
      fprintf (stderr, "can't stabilize, LWP %ld is stuck in jump pad\n",
1914
               lwpid_of (lwp_stuck));
1915
      return;
1916
    }
1917
 
1918
  save_inferior = current_inferior;
1919
 
1920
  stabilizing_threads = 1;
1921
 
1922
  /* Kick 'em all.  */
1923
  for_each_inferior (&all_lwps, move_out_of_jump_pad_callback);
1924
 
1925
  /* Loop until all are stopped out of the jump pads.  */
1926
  while (find_inferior (&all_lwps, lwp_running, NULL) != NULL)
1927
    {
1928
      struct target_waitstatus ourstatus;
1929
      struct lwp_info *lwp;
1930
      ptid_t ptid;
1931
      int wstat;
1932
 
1933
      /* Note that we go through the full wait even loop.  While
1934
         moving threads out of jump pad, we need to be able to step
1935
         over internal breakpoints and such.  */
1936
      ptid = linux_wait_1 (minus_one_ptid, &ourstatus, 0);
1937
 
1938
      if (ourstatus.kind == TARGET_WAITKIND_STOPPED)
1939
        {
1940
          lwp = get_thread_lwp (current_inferior);
1941
 
1942
          /* Lock it.  */
1943
          lwp->suspended++;
1944
 
1945
          if (ourstatus.value.sig != TARGET_SIGNAL_0
1946
              || current_inferior->last_resume_kind == resume_stop)
1947
            {
1948
              wstat = W_STOPCODE (target_signal_to_host (ourstatus.value.sig));
1949
              enqueue_one_deferred_signal (lwp, &wstat);
1950
            }
1951
        }
1952
    }
1953
 
1954
  find_inferior (&all_lwps, unsuspend_one_lwp, NULL);
1955
 
1956
  stabilizing_threads = 0;
1957
 
1958
  current_inferior = save_inferior;
1959
 
1960
  lwp_stuck
1961
    = (struct lwp_info *) find_inferior (&all_lwps,
1962
                                         stuck_in_jump_pad_callback, NULL);
1963
  if (lwp_stuck != NULL)
1964
    {
1965
      if (debug_threads)
1966
        fprintf (stderr, "couldn't stabilize, LWP %ld got stuck in jump pad\n",
1967
                 lwpid_of (lwp_stuck));
1968
    }
1969
}
1970
 
1971
/* Wait for process, returns status.  */
1972
 
1973
static ptid_t
1974
linux_wait_1 (ptid_t ptid,
1975
              struct target_waitstatus *ourstatus, int target_options)
1976
{
1977
  int w;
1978
  struct lwp_info *event_child;
1979
  int options;
1980
  int pid;
1981
  int step_over_finished;
1982
  int bp_explains_trap;
1983
  int maybe_internal_trap;
1984
  int report_to_gdb;
1985
  int trace_event;
1986
 
1987
  /* Translate generic target options into linux options.  */
1988
  options = __WALL;
1989
  if (target_options & TARGET_WNOHANG)
1990
    options |= WNOHANG;
1991
 
1992
retry:
1993
  bp_explains_trap = 0;
1994
  trace_event = 0;
1995
  ourstatus->kind = TARGET_WAITKIND_IGNORE;
1996
 
1997
  /* If we were only supposed to resume one thread, only wait for
1998
     that thread - if it's still alive.  If it died, however - which
1999
     can happen if we're coming from the thread death case below -
2000
     then we need to make sure we restart the other threads.  We could
2001
     pick a thread at random or restart all; restarting all is less
2002
     arbitrary.  */
2003
  if (!non_stop
2004
      && !ptid_equal (cont_thread, null_ptid)
2005
      && !ptid_equal (cont_thread, minus_one_ptid))
2006
    {
2007
      struct thread_info *thread;
2008
 
2009
      thread = (struct thread_info *) find_inferior_id (&all_threads,
2010
                                                        cont_thread);
2011
 
2012
      /* No stepping, no signal - unless one is pending already, of course.  */
2013
      if (thread == NULL)
2014
        {
2015
          struct thread_resume resume_info;
2016
          resume_info.thread = minus_one_ptid;
2017
          resume_info.kind = resume_continue;
2018
          resume_info.sig = 0;
2019
          linux_resume (&resume_info, 1);
2020
        }
2021
      else
2022
        ptid = cont_thread;
2023
    }
2024
 
2025
  if (ptid_equal (step_over_bkpt, null_ptid))
2026
    pid = linux_wait_for_event (ptid, &w, options);
2027
  else
2028
    {
2029
      if (debug_threads)
2030
        fprintf (stderr, "step_over_bkpt set [%s], doing a blocking wait\n",
2031
                 target_pid_to_str (step_over_bkpt));
2032
      pid = linux_wait_for_event (step_over_bkpt, &w, options & ~WNOHANG);
2033
    }
2034
 
2035
  if (pid == 0) /* only if TARGET_WNOHANG */
2036
    return null_ptid;
2037
 
2038
  event_child = get_thread_lwp (current_inferior);
2039
 
2040
  /* If we are waiting for a particular child, and it exited,
2041
     linux_wait_for_event will return its exit status.  Similarly if
2042
     the last child exited.  If this is not the last child, however,
2043
     do not report it as exited until there is a 'thread exited' response
2044
     available in the remote protocol.  Instead, just wait for another event.
2045
     This should be safe, because if the thread crashed we will already
2046
     have reported the termination signal to GDB; that should stop any
2047
     in-progress stepping operations, etc.
2048
 
2049
     Report the exit status of the last thread to exit.  This matches
2050
     LinuxThreads' behavior.  */
2051
 
2052
  if (last_thread_of_process_p (current_inferior))
2053
    {
2054
      if (WIFEXITED (w) || WIFSIGNALED (w))
2055
        {
2056
          if (WIFEXITED (w))
2057
            {
2058
              ourstatus->kind = TARGET_WAITKIND_EXITED;
2059
              ourstatus->value.integer = WEXITSTATUS (w);
2060
 
2061
              if (debug_threads)
2062
                fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
2063
            }
2064
          else
2065
            {
2066
              ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
2067
              ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
2068
 
2069
              if (debug_threads)
2070
                fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
2071
 
2072
            }
2073
 
2074
          return ptid_of (event_child);
2075
        }
2076
    }
2077
  else
2078
    {
2079
      if (!WIFSTOPPED (w))
2080
        goto retry;
2081
    }
2082
 
2083
  /* If this event was not handled before, and is not a SIGTRAP, we
2084
     report it.  SIGILL and SIGSEGV are also treated as traps in case
2085
     a breakpoint is inserted at the current PC.  If this target does
2086
     not support internal breakpoints at all, we also report the
2087
     SIGTRAP without further processing; it's of no concern to us.  */
2088
  maybe_internal_trap
2089
    = (supports_breakpoints ()
2090
       && (WSTOPSIG (w) == SIGTRAP
2091
           || ((WSTOPSIG (w) == SIGILL
2092
                || WSTOPSIG (w) == SIGSEGV)
2093
               && (*the_low_target.breakpoint_at) (event_child->stop_pc))));
2094
 
2095
  if (maybe_internal_trap)
2096
    {
2097
      /* Handle anything that requires bookkeeping before deciding to
2098
         report the event or continue waiting.  */
2099
 
2100
      /* First check if we can explain the SIGTRAP with an internal
2101
         breakpoint, or if we should possibly report the event to GDB.
2102
         Do this before anything that may remove or insert a
2103
         breakpoint.  */
2104
      bp_explains_trap = breakpoint_inserted_here (event_child->stop_pc);
2105
 
2106
      /* We have a SIGTRAP, possibly a step-over dance has just
2107
         finished.  If so, tweak the state machine accordingly,
2108
         reinsert breakpoints and delete any reinsert (software
2109
         single-step) breakpoints.  */
2110
      step_over_finished = finish_step_over (event_child);
2111
 
2112
      /* Now invoke the callbacks of any internal breakpoints there.  */
2113
      check_breakpoints (event_child->stop_pc);
2114
 
2115
      /* Handle tracepoint data collecting.  This may overflow the
2116
         trace buffer, and cause a tracing stop, removing
2117
         breakpoints.  */
2118
      trace_event = handle_tracepoints (event_child);
2119
 
2120
      if (bp_explains_trap)
2121
        {
2122
          /* If we stepped or ran into an internal breakpoint, we've
2123
             already handled it.  So next time we resume (from this
2124
             PC), we should step over it.  */
2125
          if (debug_threads)
2126
            fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2127
 
2128
          if (breakpoint_here (event_child->stop_pc))
2129
            event_child->need_step_over = 1;
2130
        }
2131
    }
2132
  else
2133
    {
2134
      /* We have some other signal, possibly a step-over dance was in
2135
         progress, and it should be cancelled too.  */
2136
      step_over_finished = finish_step_over (event_child);
2137
    }
2138
 
2139
  /* We have all the data we need.  Either report the event to GDB, or
2140
     resume threads and keep waiting for more.  */
2141
 
2142
  /* If we're collecting a fast tracepoint, finish the collection and
2143
     move out of the jump pad before delivering a signal.  See
2144
     linux_stabilize_threads.  */
2145
 
2146
  if (WIFSTOPPED (w)
2147
      && WSTOPSIG (w) != SIGTRAP
2148
      && supports_fast_tracepoints ()
2149
      && in_process_agent_loaded ())
2150
    {
2151
      if (debug_threads)
2152
        fprintf (stderr,
2153
                 "Got signal %d for LWP %ld.  Check if we need "
2154
                 "to defer or adjust it.\n",
2155
                 WSTOPSIG (w), lwpid_of (event_child));
2156
 
2157
      /* Allow debugging the jump pad itself.  */
2158
      if (current_inferior->last_resume_kind != resume_step
2159
          && maybe_move_out_of_jump_pad (event_child, &w))
2160
        {
2161
          enqueue_one_deferred_signal (event_child, &w);
2162
 
2163
          if (debug_threads)
2164
            fprintf (stderr,
2165
                     "Signal %d for LWP %ld deferred (in jump pad)\n",
2166
                     WSTOPSIG (w), lwpid_of (event_child));
2167
 
2168
          linux_resume_one_lwp (event_child, 0, 0, NULL);
2169
          goto retry;
2170
        }
2171
    }
2172
 
2173
  if (event_child->collecting_fast_tracepoint)
2174
    {
2175
      if (debug_threads)
2176
        fprintf (stderr, "\
2177
LWP %ld was trying to move out of the jump pad (%d).  \
2178
Check if we're already there.\n",
2179
                 lwpid_of (event_child),
2180
                 event_child->collecting_fast_tracepoint);
2181
 
2182
      trace_event = 1;
2183
 
2184
      event_child->collecting_fast_tracepoint
2185
        = linux_fast_tracepoint_collecting (event_child, NULL);
2186
 
2187
      if (event_child->collecting_fast_tracepoint != 1)
2188
        {
2189
          /* No longer need this breakpoint.  */
2190
          if (event_child->exit_jump_pad_bkpt != NULL)
2191
            {
2192
              if (debug_threads)
2193
                fprintf (stderr,
2194
                         "No longer need exit-jump-pad bkpt; removing it."
2195
                         "stopping all threads momentarily.\n");
2196
 
2197
              /* Other running threads could hit this breakpoint.
2198
                 We don't handle moribund locations like GDB does,
2199
                 instead we always pause all threads when removing
2200
                 breakpoints, so that any step-over or
2201
                 decr_pc_after_break adjustment is always taken
2202
                 care of while the breakpoint is still
2203
                 inserted.  */
2204
              stop_all_lwps (1, event_child);
2205
              cancel_breakpoints ();
2206
 
2207
              delete_breakpoint (event_child->exit_jump_pad_bkpt);
2208
              event_child->exit_jump_pad_bkpt = NULL;
2209
 
2210
              unstop_all_lwps (1, event_child);
2211
 
2212
              gdb_assert (event_child->suspended >= 0);
2213
            }
2214
        }
2215
 
2216
      if (event_child->collecting_fast_tracepoint == 0)
2217
        {
2218
          if (debug_threads)
2219
            fprintf (stderr,
2220
                     "fast tracepoint finished "
2221
                     "collecting successfully.\n");
2222
 
2223
          /* We may have a deferred signal to report.  */
2224
          if (dequeue_one_deferred_signal (event_child, &w))
2225
            {
2226
              if (debug_threads)
2227
                fprintf (stderr, "dequeued one signal.\n");
2228
            }
2229
          else if (debug_threads)
2230
            {
2231
              fprintf (stderr, "no deferred signals.\n");
2232
 
2233
              if (stabilizing_threads)
2234
                {
2235
                  ourstatus->kind = TARGET_WAITKIND_STOPPED;
2236
                  ourstatus->value.sig = TARGET_SIGNAL_0;
2237
                  return ptid_of (event_child);
2238
                }
2239
            }
2240
        }
2241
    }
2242
 
2243
  /* Check whether GDB would be interested in this event.  */
2244
 
2245
  /* If GDB is not interested in this signal, don't stop other
2246
     threads, and don't report it to GDB.  Just resume the inferior
2247
     right away.  We do this for threading-related signals as well as
2248
     any that GDB specifically requested we ignore.  But never ignore
2249
     SIGSTOP if we sent it ourselves, and do not ignore signals when
2250
     stepping - they may require special handling to skip the signal
2251
     handler.  */
2252
  /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
2253
     thread library?  */
2254
  if (WIFSTOPPED (w)
2255
      && current_inferior->last_resume_kind != resume_step
2256
      && (
2257
#if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
2258
          (current_process ()->private->thread_db != NULL
2259
           && (WSTOPSIG (w) == __SIGRTMIN
2260
               || WSTOPSIG (w) == __SIGRTMIN + 1))
2261
          ||
2262
#endif
2263
          (pass_signals[target_signal_from_host (WSTOPSIG (w))]
2264
           && !(WSTOPSIG (w) == SIGSTOP
2265
                && current_inferior->last_resume_kind == resume_stop))))
2266
    {
2267
      siginfo_t info, *info_p;
2268
 
2269
      if (debug_threads)
2270
        fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
2271
                 WSTOPSIG (w), lwpid_of (event_child));
2272
 
2273
      if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
2274
        info_p = &info;
2275
      else
2276
        info_p = NULL;
2277
      linux_resume_one_lwp (event_child, event_child->stepping,
2278
                            WSTOPSIG (w), info_p);
2279
      goto retry;
2280
    }
2281
 
2282
  /* If GDB wanted this thread to single step, we always want to
2283
     report the SIGTRAP, and let GDB handle it.  Watchpoints should
2284
     always be reported.  So should signals we can't explain.  A
2285
     SIGTRAP we can't explain could be a GDB breakpoint --- we may or
2286
     not support Z0 breakpoints.  If we do, we're be able to handle
2287
     GDB breakpoints on top of internal breakpoints, by handling the
2288
     internal breakpoint and still reporting the event to GDB.  If we
2289
     don't, we're out of luck, GDB won't see the breakpoint hit.  */
2290
  report_to_gdb = (!maybe_internal_trap
2291
                   || current_inferior->last_resume_kind == resume_step
2292
                   || event_child->stopped_by_watchpoint
2293
                   || (!step_over_finished && !bp_explains_trap && !trace_event)
2294
                   || gdb_breakpoint_here (event_child->stop_pc));
2295
 
2296
  /* We found no reason GDB would want us to stop.  We either hit one
2297
     of our own breakpoints, or finished an internal step GDB
2298
     shouldn't know about.  */
2299
  if (!report_to_gdb)
2300
    {
2301
      if (debug_threads)
2302
        {
2303
          if (bp_explains_trap)
2304
            fprintf (stderr, "Hit a gdbserver breakpoint.\n");
2305
          if (step_over_finished)
2306
            fprintf (stderr, "Step-over finished.\n");
2307
          if (trace_event)
2308
            fprintf (stderr, "Tracepoint event.\n");
2309
        }
2310
 
2311
      /* We're not reporting this breakpoint to GDB, so apply the
2312
         decr_pc_after_break adjustment to the inferior's regcache
2313
         ourselves.  */
2314
 
2315
      if (the_low_target.set_pc != NULL)
2316
        {
2317
          struct regcache *regcache
2318
            = get_thread_regcache (get_lwp_thread (event_child), 1);
2319
          (*the_low_target.set_pc) (regcache, event_child->stop_pc);
2320
        }
2321
 
2322
      /* We may have finished stepping over a breakpoint.  If so,
2323
         we've stopped and suspended all LWPs momentarily except the
2324
         stepping one.  This is where we resume them all again.  We're
2325
         going to keep waiting, so use proceed, which handles stepping
2326
         over the next breakpoint.  */
2327
      if (debug_threads)
2328
        fprintf (stderr, "proceeding all threads.\n");
2329
 
2330
      if (step_over_finished)
2331
        unsuspend_all_lwps (event_child);
2332
 
2333
      proceed_all_lwps ();
2334
      goto retry;
2335
    }
2336
 
2337
  if (debug_threads)
2338
    {
2339
      if (current_inferior->last_resume_kind == resume_step)
2340
        fprintf (stderr, "GDB wanted to single-step, reporting event.\n");
2341
      if (event_child->stopped_by_watchpoint)
2342
        fprintf (stderr, "Stopped by watchpoint.\n");
2343
      if (gdb_breakpoint_here (event_child->stop_pc))
2344
        fprintf (stderr, "Stopped by GDB breakpoint.\n");
2345
      if (debug_threads)
2346
        fprintf (stderr, "Hit a non-gdbserver trap event.\n");
2347
    }
2348
 
2349
  /* Alright, we're going to report a stop.  */
2350
 
2351
  if (!non_stop && !stabilizing_threads)
2352
    {
2353
      /* In all-stop, stop all threads.  */
2354
      stop_all_lwps (0, NULL);
2355
 
2356
      /* If we're not waiting for a specific LWP, choose an event LWP
2357
         from among those that have had events.  Giving equal priority
2358
         to all LWPs that have had events helps prevent
2359
         starvation.  */
2360
      if (ptid_equal (ptid, minus_one_ptid))
2361
        {
2362
          event_child->status_pending_p = 1;
2363
          event_child->status_pending = w;
2364
 
2365
          select_event_lwp (&event_child);
2366
 
2367
          event_child->status_pending_p = 0;
2368
          w = event_child->status_pending;
2369
        }
2370
 
2371
      /* Now that we've selected our final event LWP, cancel any
2372
         breakpoints in other LWPs that have hit a GDB breakpoint.
2373
         See the comment in cancel_breakpoints_callback to find out
2374
         why.  */
2375
      find_inferior (&all_lwps, cancel_breakpoints_callback, event_child);
2376
 
2377
      /* Stabilize threads (move out of jump pads).  */
2378
      stabilize_threads ();
2379
    }
2380
  else
2381
    {
2382
      /* If we just finished a step-over, then all threads had been
2383
         momentarily paused.  In all-stop, that's fine, we want
2384
         threads stopped by now anyway.  In non-stop, we need to
2385
         re-resume threads that GDB wanted to be running.  */
2386
      if (step_over_finished)
2387
        unstop_all_lwps (1, event_child);
2388
    }
2389
 
2390
  ourstatus->kind = TARGET_WAITKIND_STOPPED;
2391
 
2392
  /* Do this before the gdb_wants_all_stopped calls below, since they
2393
     always set last_resume_kind to resume_stop.  */
2394
  if (current_inferior->last_resume_kind == resume_stop
2395
      && WSTOPSIG (w) == SIGSTOP)
2396
    {
2397
      /* A thread that has been requested to stop by GDB with vCont;t,
2398
         and it stopped cleanly, so report as SIG0.  The use of
2399
         SIGSTOP is an implementation detail.  */
2400
      ourstatus->value.sig = TARGET_SIGNAL_0;
2401
    }
2402
  else if (current_inferior->last_resume_kind == resume_stop
2403
           && WSTOPSIG (w) != SIGSTOP)
2404
    {
2405
      /* A thread that has been requested to stop by GDB with vCont;t,
2406
         but, it stopped for other reasons.  */
2407
      ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2408
    }
2409
  else
2410
    {
2411
      ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
2412
    }
2413
 
2414
  gdb_assert (ptid_equal (step_over_bkpt, null_ptid));
2415
 
2416
  if (stabilizing_threads)
2417
    return ptid_of (event_child);
2418
 
2419
  if (!non_stop)
2420
    {
2421
      /* From GDB's perspective, all-stop mode always stops all
2422
         threads implicitly.  Tag all threads as "want-stopped".  */
2423
      gdb_wants_all_stopped ();
2424
    }
2425
  else
2426
    {
2427
      /* We're reporting this LWP as stopped.  Update it's
2428
         "want-stopped" state to what the client wants, until it gets
2429
         a new resume action.  */
2430
      gdb_wants_lwp_stopped (&event_child->head);
2431
    }
2432
 
2433
  if (debug_threads)
2434
    fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
2435
             target_pid_to_str (ptid_of (event_child)),
2436
             ourstatus->kind,
2437
             ourstatus->value.sig);
2438
 
2439
  current_inferior->last_status = *ourstatus;
2440
 
2441
  return ptid_of (event_child);
2442
}
2443
 
2444
/* Get rid of any pending event in the pipe.  */
2445
static void
2446
async_file_flush (void)
2447
{
2448
  int ret;
2449
  char buf;
2450
 
2451
  do
2452
    ret = read (linux_event_pipe[0], &buf, 1);
2453
  while (ret >= 0 || (ret == -1 && errno == EINTR));
2454
}
2455
 
2456
/* Put something in the pipe, so the event loop wakes up.  */
2457
static void
2458
async_file_mark (void)
2459
{
2460
  int ret;
2461
 
2462
  async_file_flush ();
2463
 
2464
  do
2465
    ret = write (linux_event_pipe[1], "+", 1);
2466
  while (ret == 0 || (ret == -1 && errno == EINTR));
2467
 
2468
  /* Ignore EAGAIN.  If the pipe is full, the event loop will already
2469
     be awakened anyway.  */
2470
}
2471
 
2472
static ptid_t
2473
linux_wait (ptid_t ptid,
2474
            struct target_waitstatus *ourstatus, int target_options)
2475
{
2476
  ptid_t event_ptid;
2477
 
2478
  if (debug_threads)
2479
    fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
2480
 
2481
  /* Flush the async file first.  */
2482
  if (target_is_async_p ())
2483
    async_file_flush ();
2484
 
2485
  event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
2486
 
2487
  /* If at least one stop was reported, there may be more.  A single
2488
     SIGCHLD can signal more than one child stop.  */
2489
  if (target_is_async_p ()
2490
      && (target_options & TARGET_WNOHANG) != 0
2491
      && !ptid_equal (event_ptid, null_ptid))
2492
    async_file_mark ();
2493
 
2494
  return event_ptid;
2495
}
2496
 
2497
/* Send a signal to an LWP.  */
2498
 
2499
static int
2500
kill_lwp (unsigned long lwpid, int signo)
2501
{
2502
  /* Use tkill, if possible, in case we are using nptl threads.  If tkill
2503
     fails, then we are not using nptl threads and we should be using kill.  */
2504
 
2505
#ifdef __NR_tkill
2506
  {
2507
    static int tkill_failed;
2508
 
2509
    if (!tkill_failed)
2510
      {
2511
        int ret;
2512
 
2513
        errno = 0;
2514
        ret = syscall (__NR_tkill, lwpid, signo);
2515
        if (errno != ENOSYS)
2516
          return ret;
2517
        tkill_failed = 1;
2518
      }
2519
  }
2520
#endif
2521
 
2522
  return kill (lwpid, signo);
2523
}
2524
 
2525
static void
2526
send_sigstop (struct lwp_info *lwp)
2527
{
2528
  int pid;
2529
 
2530
  pid = lwpid_of (lwp);
2531
 
2532
  /* If we already have a pending stop signal for this process, don't
2533
     send another.  */
2534
  if (lwp->stop_expected)
2535
    {
2536
      if (debug_threads)
2537
        fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
2538
 
2539
      return;
2540
    }
2541
 
2542
  if (debug_threads)
2543
    fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
2544
 
2545
  lwp->stop_expected = 1;
2546
  kill_lwp (pid, SIGSTOP);
2547
}
2548
 
2549
static int
2550
send_sigstop_callback (struct inferior_list_entry *entry, void *except)
2551
{
2552
  struct lwp_info *lwp = (struct lwp_info *) entry;
2553
 
2554
  /* Ignore EXCEPT.  */
2555
  if (lwp == except)
2556
    return 0;
2557
 
2558
  if (lwp->stopped)
2559
    return 0;
2560
 
2561
  send_sigstop (lwp);
2562
  return 0;
2563
}
2564
 
2565
/* Increment the suspend count of an LWP, and stop it, if not stopped
2566
   yet.  */
2567
static int
2568
suspend_and_send_sigstop_callback (struct inferior_list_entry *entry,
2569
                                   void *except)
2570
{
2571
  struct lwp_info *lwp = (struct lwp_info *) entry;
2572
 
2573
  /* Ignore EXCEPT.  */
2574
  if (lwp == except)
2575
    return 0;
2576
 
2577
  lwp->suspended++;
2578
 
2579
  return send_sigstop_callback (entry, except);
2580
}
2581
 
2582
static void
2583
mark_lwp_dead (struct lwp_info *lwp, int wstat)
2584
{
2585
  /* It's dead, really.  */
2586
  lwp->dead = 1;
2587
 
2588
  /* Store the exit status for later.  */
2589
  lwp->status_pending_p = 1;
2590
  lwp->status_pending = wstat;
2591
 
2592
  /* Prevent trying to stop it.  */
2593
  lwp->stopped = 1;
2594
 
2595
  /* No further stops are expected from a dead lwp.  */
2596
  lwp->stop_expected = 0;
2597
}
2598
 
2599
static void
2600
wait_for_sigstop (struct inferior_list_entry *entry)
2601
{
2602
  struct lwp_info *lwp = (struct lwp_info *) entry;
2603
  struct thread_info *saved_inferior;
2604
  int wstat;
2605
  ptid_t saved_tid;
2606
  ptid_t ptid;
2607
  int pid;
2608
 
2609
  if (lwp->stopped)
2610
    {
2611
      if (debug_threads)
2612
        fprintf (stderr, "wait_for_sigstop: LWP %ld already stopped\n",
2613
                 lwpid_of (lwp));
2614
      return;
2615
    }
2616
 
2617
  saved_inferior = current_inferior;
2618
  if (saved_inferior != NULL)
2619
    saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
2620
  else
2621
    saved_tid = null_ptid; /* avoid bogus unused warning */
2622
 
2623
  ptid = lwp->head.id;
2624
 
2625
  if (debug_threads)
2626
    fprintf (stderr, "wait_for_sigstop: pulling one event\n");
2627
 
2628
  pid = linux_wait_for_event (ptid, &wstat, __WALL);
2629
 
2630
  /* If we stopped with a non-SIGSTOP signal, save it for later
2631
     and record the pending SIGSTOP.  If the process exited, just
2632
     return.  */
2633
  if (WIFSTOPPED (wstat))
2634
    {
2635
      if (debug_threads)
2636
        fprintf (stderr, "LWP %ld stopped with signal %d\n",
2637
                 lwpid_of (lwp), WSTOPSIG (wstat));
2638
 
2639
      if (WSTOPSIG (wstat) != SIGSTOP)
2640
        {
2641
          if (debug_threads)
2642
            fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
2643
                     lwpid_of (lwp), wstat);
2644
 
2645
          lwp->status_pending_p = 1;
2646
          lwp->status_pending = wstat;
2647
        }
2648
    }
2649
  else
2650
    {
2651
      if (debug_threads)
2652
        fprintf (stderr, "Process %d exited while stopping LWPs\n", pid);
2653
 
2654
      lwp = find_lwp_pid (pid_to_ptid (pid));
2655
      if (lwp)
2656
        {
2657
          /* Leave this status pending for the next time we're able to
2658
             report it.  In the mean time, we'll report this lwp as
2659
             dead to GDB, so GDB doesn't try to read registers and
2660
             memory from it.  This can only happen if this was the
2661
             last thread of the process; otherwise, PID is removed
2662
             from the thread tables before linux_wait_for_event
2663
             returns.  */
2664
          mark_lwp_dead (lwp, wstat);
2665
        }
2666
    }
2667
 
2668
  if (saved_inferior == NULL || linux_thread_alive (saved_tid))
2669
    current_inferior = saved_inferior;
2670
  else
2671
    {
2672
      if (debug_threads)
2673
        fprintf (stderr, "Previously current thread died.\n");
2674
 
2675
      if (non_stop)
2676
        {
2677
          /* We can't change the current inferior behind GDB's back,
2678
             otherwise, a subsequent command may apply to the wrong
2679
             process.  */
2680
          current_inferior = NULL;
2681
        }
2682
      else
2683
        {
2684
          /* Set a valid thread as current.  */
2685
          set_desired_inferior (0);
2686
        }
2687
    }
2688
}
2689
 
2690
/* Returns true if LWP ENTRY is stopped in a jump pad, and we can't
2691
   move it out, because we need to report the stop event to GDB.  For
2692
   example, if the user puts a breakpoint in the jump pad, it's
2693
   because she wants to debug it.  */
2694
 
2695
static int
2696
stuck_in_jump_pad_callback (struct inferior_list_entry *entry, void *data)
2697
{
2698
  struct lwp_info *lwp = (struct lwp_info *) entry;
2699
  struct thread_info *thread = get_lwp_thread (lwp);
2700
 
2701
  gdb_assert (lwp->suspended == 0);
2702
  gdb_assert (lwp->stopped);
2703
 
2704
  /* Allow debugging the jump pad, gdb_collect, etc..  */
2705
  return (supports_fast_tracepoints ()
2706
          && in_process_agent_loaded ()
2707
          && (gdb_breakpoint_here (lwp->stop_pc)
2708
              || lwp->stopped_by_watchpoint
2709
              || thread->last_resume_kind == resume_step)
2710
          && linux_fast_tracepoint_collecting (lwp, NULL));
2711
}
2712
 
2713
static void
2714
move_out_of_jump_pad_callback (struct inferior_list_entry *entry)
2715
{
2716
  struct lwp_info *lwp = (struct lwp_info *) entry;
2717
  struct thread_info *thread = get_lwp_thread (lwp);
2718
  int *wstat;
2719
 
2720
  gdb_assert (lwp->suspended == 0);
2721
  gdb_assert (lwp->stopped);
2722
 
2723
  wstat = lwp->status_pending_p ? &lwp->status_pending : NULL;
2724
 
2725
  /* Allow debugging the jump pad, gdb_collect, etc.  */
2726
  if (!gdb_breakpoint_here (lwp->stop_pc)
2727
      && !lwp->stopped_by_watchpoint
2728
      && thread->last_resume_kind != resume_step
2729
      && maybe_move_out_of_jump_pad (lwp, wstat))
2730
    {
2731
      if (debug_threads)
2732
        fprintf (stderr,
2733
                 "LWP %ld needs stabilizing (in jump pad)\n",
2734
                 lwpid_of (lwp));
2735
 
2736
      if (wstat)
2737
        {
2738
          lwp->status_pending_p = 0;
2739
          enqueue_one_deferred_signal (lwp, wstat);
2740
 
2741
          if (debug_threads)
2742
            fprintf (stderr,
2743
                     "Signal %d for LWP %ld deferred "
2744
                     "(in jump pad)\n",
2745
                     WSTOPSIG (*wstat), lwpid_of (lwp));
2746
        }
2747
 
2748
      linux_resume_one_lwp (lwp, 0, 0, NULL);
2749
    }
2750
  else
2751
    lwp->suspended++;
2752
}
2753
 
2754
static int
2755
lwp_running (struct inferior_list_entry *entry, void *data)
2756
{
2757
  struct lwp_info *lwp = (struct lwp_info *) entry;
2758
 
2759
  if (lwp->dead)
2760
    return 0;
2761
  if (lwp->stopped)
2762
    return 0;
2763
  return 1;
2764
}
2765
 
2766
/* Stop all lwps that aren't stopped yet, except EXCEPT, if not NULL.
2767
   If SUSPEND, then also increase the suspend count of every LWP,
2768
   except EXCEPT.  */
2769
 
2770
static void
2771
stop_all_lwps (int suspend, struct lwp_info *except)
2772
{
2773
  stopping_threads = 1;
2774
 
2775
  if (suspend)
2776
    find_inferior (&all_lwps, suspend_and_send_sigstop_callback, except);
2777
  else
2778
    find_inferior (&all_lwps, send_sigstop_callback, except);
2779
  for_each_inferior (&all_lwps, wait_for_sigstop);
2780
  stopping_threads = 0;
2781
}
2782
 
2783
/* Resume execution of the inferior process.
2784
   If STEP is nonzero, single-step it.
2785
   If SIGNAL is nonzero, give it that signal.  */
2786
 
2787
static void
2788
linux_resume_one_lwp (struct lwp_info *lwp,
2789
                      int step, int signal, siginfo_t *info)
2790
{
2791
  struct thread_info *saved_inferior;
2792
  int fast_tp_collecting;
2793
 
2794
  if (lwp->stopped == 0)
2795
    return;
2796
 
2797
  fast_tp_collecting = lwp->collecting_fast_tracepoint;
2798
 
2799
  gdb_assert (!stabilizing_threads || fast_tp_collecting);
2800
 
2801
  /* Cancel actions that rely on GDB not changing the PC (e.g., the
2802
     user used the "jump" command, or "set $pc = foo").  */
2803
  if (lwp->stop_pc != get_pc (lwp))
2804
    {
2805
      /* Collecting 'while-stepping' actions doesn't make sense
2806
         anymore.  */
2807
      release_while_stepping_state_list (get_lwp_thread (lwp));
2808
    }
2809
 
2810
  /* If we have pending signals or status, and a new signal, enqueue the
2811
     signal.  Also enqueue the signal if we are waiting to reinsert a
2812
     breakpoint; it will be picked up again below.  */
2813
  if (signal != 0
2814
      && (lwp->status_pending_p
2815
          || lwp->pending_signals != NULL
2816
          || lwp->bp_reinsert != 0
2817
          || fast_tp_collecting))
2818
    {
2819
      struct pending_signals *p_sig;
2820
      p_sig = xmalloc (sizeof (*p_sig));
2821
      p_sig->prev = lwp->pending_signals;
2822
      p_sig->signal = signal;
2823
      if (info == NULL)
2824
        memset (&p_sig->info, 0, sizeof (siginfo_t));
2825
      else
2826
        memcpy (&p_sig->info, info, sizeof (siginfo_t));
2827
      lwp->pending_signals = p_sig;
2828
    }
2829
 
2830
  if (lwp->status_pending_p)
2831
    {
2832
      if (debug_threads)
2833
        fprintf (stderr, "Not resuming lwp %ld (%s, signal %d, stop %s);"
2834
                 " has pending status\n",
2835
                 lwpid_of (lwp), step ? "step" : "continue", signal,
2836
                 lwp->stop_expected ? "expected" : "not expected");
2837
      return;
2838
    }
2839
 
2840
  saved_inferior = current_inferior;
2841
  current_inferior = get_lwp_thread (lwp);
2842
 
2843
  if (debug_threads)
2844
    fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
2845
             lwpid_of (lwp), step ? "step" : "continue", signal,
2846
             lwp->stop_expected ? "expected" : "not expected");
2847
 
2848
  /* This bit needs some thinking about.  If we get a signal that
2849
     we must report while a single-step reinsert is still pending,
2850
     we often end up resuming the thread.  It might be better to
2851
     (ew) allow a stack of pending events; then we could be sure that
2852
     the reinsert happened right away and not lose any signals.
2853
 
2854
     Making this stack would also shrink the window in which breakpoints are
2855
     uninserted (see comment in linux_wait_for_lwp) but not enough for
2856
     complete correctness, so it won't solve that problem.  It may be
2857
     worthwhile just to solve this one, however.  */
2858
  if (lwp->bp_reinsert != 0)
2859
    {
2860
      if (debug_threads)
2861
        fprintf (stderr, "  pending reinsert at 0x%s\n",
2862
                 paddress (lwp->bp_reinsert));
2863
 
2864
      if (lwp->bp_reinsert != 0 && can_hardware_single_step ())
2865
        {
2866
          if (fast_tp_collecting == 0)
2867
            {
2868
              if (step == 0)
2869
                fprintf (stderr, "BAD - reinserting but not stepping.\n");
2870
              if (lwp->suspended)
2871
                fprintf (stderr, "BAD - reinserting and suspended(%d).\n",
2872
                         lwp->suspended);
2873
            }
2874
 
2875
          step = 1;
2876
        }
2877
 
2878
      /* Postpone any pending signal.  It was enqueued above.  */
2879
      signal = 0;
2880
    }
2881
 
2882
  if (fast_tp_collecting == 1)
2883
    {
2884
      if (debug_threads)
2885
        fprintf (stderr, "\
2886
lwp %ld wants to get out of fast tracepoint jump pad (exit-jump-pad-bkpt)\n",
2887
                 lwpid_of (lwp));
2888
 
2889
      /* Postpone any pending signal.  It was enqueued above.  */
2890
      signal = 0;
2891
    }
2892
  else if (fast_tp_collecting == 2)
2893
    {
2894
      if (debug_threads)
2895
        fprintf (stderr, "\
2896
lwp %ld wants to get out of fast tracepoint jump pad single-stepping\n",
2897
                 lwpid_of (lwp));
2898
 
2899
      if (can_hardware_single_step ())
2900
        step = 1;
2901
      else
2902
        fatal ("moving out of jump pad single-stepping"
2903
               " not implemented on this target");
2904
 
2905
      /* Postpone any pending signal.  It was enqueued above.  */
2906
      signal = 0;
2907
    }
2908
 
2909
  /* If we have while-stepping actions in this thread set it stepping.
2910
     If we have a signal to deliver, it may or may not be set to
2911
     SIG_IGN, we don't know.  Assume so, and allow collecting
2912
     while-stepping into a signal handler.  A possible smart thing to
2913
     do would be to set an internal breakpoint at the signal return
2914
     address, continue, and carry on catching this while-stepping
2915
     action only when that breakpoint is hit.  A future
2916
     enhancement.  */
2917
  if (get_lwp_thread (lwp)->while_stepping != NULL
2918
      && can_hardware_single_step ())
2919
    {
2920
      if (debug_threads)
2921
        fprintf (stderr,
2922
                 "lwp %ld has a while-stepping action -> forcing step.\n",
2923
                 lwpid_of (lwp));
2924
      step = 1;
2925
    }
2926
 
2927
  if (debug_threads && the_low_target.get_pc != NULL)
2928
    {
2929
      struct regcache *regcache = get_thread_regcache (current_inferior, 1);
2930
      CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
2931
      fprintf (stderr, "  resuming from pc 0x%lx\n", (long) pc);
2932
    }
2933
 
2934
  /* If we have pending signals, consume one unless we are trying to
2935
     reinsert a breakpoint or we're trying to finish a fast tracepoint
2936
     collect.  */
2937
  if (lwp->pending_signals != NULL
2938
      && lwp->bp_reinsert == 0
2939
      && fast_tp_collecting == 0)
2940
    {
2941
      struct pending_signals **p_sig;
2942
 
2943
      p_sig = &lwp->pending_signals;
2944
      while ((*p_sig)->prev != NULL)
2945
        p_sig = &(*p_sig)->prev;
2946
 
2947
      signal = (*p_sig)->signal;
2948
      if ((*p_sig)->info.si_signo != 0)
2949
        ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
2950
 
2951
      free (*p_sig);
2952
      *p_sig = NULL;
2953
    }
2954
 
2955
  if (the_low_target.prepare_to_resume != NULL)
2956
    the_low_target.prepare_to_resume (lwp);
2957
 
2958
  regcache_invalidate_one ((struct inferior_list_entry *)
2959
                           get_lwp_thread (lwp));
2960
  errno = 0;
2961
  lwp->stopped = 0;
2962
  lwp->stopped_by_watchpoint = 0;
2963
  lwp->stepping = step;
2964
  ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
2965
          /* Coerce to a uintptr_t first to avoid potential gcc warning
2966
             of coercing an 8 byte integer to a 4 byte pointer.  */
2967
          (PTRACE_ARG4_TYPE) (uintptr_t) signal);
2968
 
2969
  current_inferior = saved_inferior;
2970
  if (errno)
2971
    {
2972
      /* ESRCH from ptrace either means that the thread was already
2973
         running (an error) or that it is gone (a race condition).  If
2974
         it's gone, we will get a notification the next time we wait,
2975
         so we can ignore the error.  We could differentiate these
2976
         two, but it's tricky without waiting; the thread still exists
2977
         as a zombie, so sending it signal 0 would succeed.  So just
2978
         ignore ESRCH.  */
2979
      if (errno == ESRCH)
2980
        return;
2981
 
2982
      perror_with_name ("ptrace");
2983
    }
2984
}
2985
 
2986
struct thread_resume_array
2987
{
2988
  struct thread_resume *resume;
2989
  size_t n;
2990
};
2991
 
2992
/* This function is called once per thread.  We look up the thread
2993
   in RESUME_PTR, and mark the thread with a pointer to the appropriate
2994
   resume request.
2995
 
2996
   This algorithm is O(threads * resume elements), but resume elements
2997
   is small (and will remain small at least until GDB supports thread
2998
   suspension).  */
2999
static int
3000
linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
3001
{
3002
  struct lwp_info *lwp;
3003
  struct thread_info *thread;
3004
  int ndx;
3005
  struct thread_resume_array *r;
3006
 
3007
  thread = (struct thread_info *) entry;
3008
  lwp = get_thread_lwp (thread);
3009
  r = arg;
3010
 
3011
  for (ndx = 0; ndx < r->n; ndx++)
3012
    {
3013
      ptid_t ptid = r->resume[ndx].thread;
3014
      if (ptid_equal (ptid, minus_one_ptid)
3015
          || ptid_equal (ptid, entry->id)
3016
          || (ptid_is_pid (ptid)
3017
              && (ptid_get_pid (ptid) == pid_of (lwp)))
3018
          || (ptid_get_lwp (ptid) == -1
3019
              && (ptid_get_pid (ptid) == pid_of (lwp))))
3020
        {
3021
          if (r->resume[ndx].kind == resume_stop
3022
              && thread->last_resume_kind == resume_stop)
3023
            {
3024
              if (debug_threads)
3025
                fprintf (stderr, "already %s LWP %ld at GDB's request\n",
3026
                         thread->last_status.kind == TARGET_WAITKIND_STOPPED
3027
                         ? "stopped"
3028
                         : "stopping",
3029
                         lwpid_of (lwp));
3030
 
3031
              continue;
3032
            }
3033
 
3034
          lwp->resume = &r->resume[ndx];
3035
          thread->last_resume_kind = lwp->resume->kind;
3036
 
3037
          /* If we had a deferred signal to report, dequeue one now.
3038
             This can happen if LWP gets more than one signal while
3039
             trying to get out of a jump pad.  */
3040
          if (lwp->stopped
3041
              && !lwp->status_pending_p
3042
              && dequeue_one_deferred_signal (lwp, &lwp->status_pending))
3043
            {
3044
              lwp->status_pending_p = 1;
3045
 
3046
              if (debug_threads)
3047
                fprintf (stderr,
3048
                         "Dequeueing deferred signal %d for LWP %ld, "
3049
                         "leaving status pending.\n",
3050
                         WSTOPSIG (lwp->status_pending), lwpid_of (lwp));
3051
            }
3052
 
3053
          return 0;
3054
        }
3055
    }
3056
 
3057
  /* No resume action for this thread.  */
3058
  lwp->resume = NULL;
3059
 
3060
  return 0;
3061
}
3062
 
3063
 
3064
/* Set *FLAG_P if this lwp has an interesting status pending.  */
3065
static int
3066
resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
3067
{
3068
  struct lwp_info *lwp = (struct lwp_info *) entry;
3069
 
3070
  /* LWPs which will not be resumed are not interesting, because
3071
     we might not wait for them next time through linux_wait.  */
3072
  if (lwp->resume == NULL)
3073
    return 0;
3074
 
3075
  if (lwp->status_pending_p)
3076
    * (int *) flag_p = 1;
3077
 
3078
  return 0;
3079
}
3080
 
3081
/* Return 1 if this lwp that GDB wants running is stopped at an
3082
   internal breakpoint that we need to step over.  It assumes that any
3083
   required STOP_PC adjustment has already been propagated to the
3084
   inferior's regcache.  */
3085
 
3086
static int
3087
need_step_over_p (struct inferior_list_entry *entry, void *dummy)
3088
{
3089
  struct lwp_info *lwp = (struct lwp_info *) entry;
3090
  struct thread_info *thread;
3091
  struct thread_info *saved_inferior;
3092
  CORE_ADDR pc;
3093
 
3094
  /* LWPs which will not be resumed are not interesting, because we
3095
     might not wait for them next time through linux_wait.  */
3096
 
3097
  if (!lwp->stopped)
3098
    {
3099
      if (debug_threads)
3100
        fprintf (stderr,
3101
                 "Need step over [LWP %ld]? Ignoring, not stopped\n",
3102
                 lwpid_of (lwp));
3103
      return 0;
3104
    }
3105
 
3106
  thread = get_lwp_thread (lwp);
3107
 
3108
  if (thread->last_resume_kind == resume_stop)
3109
    {
3110
      if (debug_threads)
3111
        fprintf (stderr,
3112
                 "Need step over [LWP %ld]? Ignoring, should remain stopped\n",
3113
                 lwpid_of (lwp));
3114
      return 0;
3115
    }
3116
 
3117
  gdb_assert (lwp->suspended >= 0);
3118
 
3119
  if (lwp->suspended)
3120
    {
3121
      if (debug_threads)
3122
        fprintf (stderr,
3123
                 "Need step over [LWP %ld]? Ignoring, suspended\n",
3124
                 lwpid_of (lwp));
3125
      return 0;
3126
    }
3127
 
3128
  if (!lwp->need_step_over)
3129
    {
3130
      if (debug_threads)
3131
        fprintf (stderr,
3132
                 "Need step over [LWP %ld]? No\n", lwpid_of (lwp));
3133
    }
3134
 
3135
  if (lwp->status_pending_p)
3136
    {
3137
      if (debug_threads)
3138
        fprintf (stderr,
3139
                 "Need step over [LWP %ld]? Ignoring, has pending status.\n",
3140
                 lwpid_of (lwp));
3141
      return 0;
3142
    }
3143
 
3144
  /* Note: PC, not STOP_PC.  Either GDB has adjusted the PC already,
3145
     or we have.  */
3146
  pc = get_pc (lwp);
3147
 
3148
  /* If the PC has changed since we stopped, then don't do anything,
3149
     and let the breakpoint/tracepoint be hit.  This happens if, for
3150
     instance, GDB handled the decr_pc_after_break subtraction itself,
3151
     GDB is OOL stepping this thread, or the user has issued a "jump"
3152
     command, or poked thread's registers herself.  */
3153
  if (pc != lwp->stop_pc)
3154
    {
3155
      if (debug_threads)
3156
        fprintf (stderr,
3157
                 "Need step over [LWP %ld]? Cancelling, PC was changed.  "
3158
                 "Old stop_pc was 0x%s, PC is now 0x%s\n",
3159
                 lwpid_of (lwp), paddress (lwp->stop_pc), paddress (pc));
3160
 
3161
      lwp->need_step_over = 0;
3162
      return 0;
3163
    }
3164
 
3165
  saved_inferior = current_inferior;
3166
  current_inferior = thread;
3167
 
3168
  /* We can only step over breakpoints we know about.  */
3169
  if (breakpoint_here (pc) || fast_tracepoint_jump_here (pc))
3170
    {
3171
      /* Don't step over a breakpoint that GDB expects to hit
3172
         though.  */
3173
      if (gdb_breakpoint_here (pc))
3174
        {
3175
          if (debug_threads)
3176
            fprintf (stderr,
3177
                     "Need step over [LWP %ld]? yes, but found"
3178
                     " GDB breakpoint at 0x%s; skipping step over\n",
3179
                     lwpid_of (lwp), paddress (pc));
3180
 
3181
          current_inferior = saved_inferior;
3182
          return 0;
3183
        }
3184
      else
3185
        {
3186
          if (debug_threads)
3187
            fprintf (stderr,
3188
                     "Need step over [LWP %ld]? yes, found breakpoint at 0x%s\n",
3189
                     lwpid_of (lwp), paddress (pc));
3190
 
3191
          /* We've found an lwp that needs stepping over --- return 1 so
3192
             that find_inferior stops looking.  */
3193
          current_inferior = saved_inferior;
3194
 
3195
          /* If the step over is cancelled, this is set again.  */
3196
          lwp->need_step_over = 0;
3197
          return 1;
3198
        }
3199
    }
3200
 
3201
  current_inferior = saved_inferior;
3202
 
3203
  if (debug_threads)
3204
    fprintf (stderr,
3205
             "Need step over [LWP %ld]? No, no breakpoint found at 0x%s\n",
3206
             lwpid_of (lwp), paddress (pc));
3207
 
3208
  return 0;
3209
}
3210
 
3211
/* Start a step-over operation on LWP.  When LWP stopped at a
3212
   breakpoint, to make progress, we need to remove the breakpoint out
3213
   of the way.  If we let other threads run while we do that, they may
3214
   pass by the breakpoint location and miss hitting it.  To avoid
3215
   that, a step-over momentarily stops all threads while LWP is
3216
   single-stepped while the breakpoint is temporarily uninserted from
3217
   the inferior.  When the single-step finishes, we reinsert the
3218
   breakpoint, and let all threads that are supposed to be running,
3219
   run again.
3220
 
3221
   On targets that don't support hardware single-step, we don't
3222
   currently support full software single-stepping.  Instead, we only
3223
   support stepping over the thread event breakpoint, by asking the
3224
   low target where to place a reinsert breakpoint.  Since this
3225
   routine assumes the breakpoint being stepped over is a thread event
3226
   breakpoint, it usually assumes the return address of the current
3227
   function is a good enough place to set the reinsert breakpoint.  */
3228
 
3229
static int
3230
start_step_over (struct lwp_info *lwp)
3231
{
3232
  struct thread_info *saved_inferior;
3233
  CORE_ADDR pc;
3234
  int step;
3235
 
3236
  if (debug_threads)
3237
    fprintf (stderr,
3238
             "Starting step-over on LWP %ld.  Stopping all threads\n",
3239
             lwpid_of (lwp));
3240
 
3241
  stop_all_lwps (1, lwp);
3242
  gdb_assert (lwp->suspended == 0);
3243
 
3244
  if (debug_threads)
3245
    fprintf (stderr, "Done stopping all threads for step-over.\n");
3246
 
3247
  /* Note, we should always reach here with an already adjusted PC,
3248
     either by GDB (if we're resuming due to GDB's request), or by our
3249
     caller, if we just finished handling an internal breakpoint GDB
3250
     shouldn't care about.  */
3251
  pc = get_pc (lwp);
3252
 
3253
  saved_inferior = current_inferior;
3254
  current_inferior = get_lwp_thread (lwp);
3255
 
3256
  lwp->bp_reinsert = pc;
3257
  uninsert_breakpoints_at (pc);
3258
  uninsert_fast_tracepoint_jumps_at (pc);
3259
 
3260
  if (can_hardware_single_step ())
3261
    {
3262
      step = 1;
3263
    }
3264
  else
3265
    {
3266
      CORE_ADDR raddr = (*the_low_target.breakpoint_reinsert_addr) ();
3267
      set_reinsert_breakpoint (raddr);
3268
      step = 0;
3269
    }
3270
 
3271
  current_inferior = saved_inferior;
3272
 
3273
  linux_resume_one_lwp (lwp, step, 0, NULL);
3274
 
3275
  /* Require next event from this LWP.  */
3276
  step_over_bkpt = lwp->head.id;
3277
  return 1;
3278
}
3279
 
3280
/* Finish a step-over.  Reinsert the breakpoint we had uninserted in
3281
   start_step_over, if still there, and delete any reinsert
3282
   breakpoints we've set, on non hardware single-step targets.  */
3283
 
3284
static int
3285
finish_step_over (struct lwp_info *lwp)
3286
{
3287
  if (lwp->bp_reinsert != 0)
3288
    {
3289
      if (debug_threads)
3290
        fprintf (stderr, "Finished step over.\n");
3291
 
3292
      /* Reinsert any breakpoint at LWP->BP_REINSERT.  Note that there
3293
         may be no breakpoint to reinsert there by now.  */
3294
      reinsert_breakpoints_at (lwp->bp_reinsert);
3295
      reinsert_fast_tracepoint_jumps_at (lwp->bp_reinsert);
3296
 
3297
      lwp->bp_reinsert = 0;
3298
 
3299
      /* Delete any software-single-step reinsert breakpoints.  No
3300
         longer needed.  We don't have to worry about other threads
3301
         hitting this trap, and later not being able to explain it,
3302
         because we were stepping over a breakpoint, and we hold all
3303
         threads but LWP stopped while doing that.  */
3304
      if (!can_hardware_single_step ())
3305
        delete_reinsert_breakpoints ();
3306
 
3307
      step_over_bkpt = null_ptid;
3308
      return 1;
3309
    }
3310
  else
3311
    return 0;
3312
}
3313
 
3314
/* This function is called once per thread.  We check the thread's resume
3315
   request, which will tell us whether to resume, step, or leave the thread
3316
   stopped; and what signal, if any, it should be sent.
3317
 
3318
   For threads which we aren't explicitly told otherwise, we preserve
3319
   the stepping flag; this is used for stepping over gdbserver-placed
3320
   breakpoints.
3321
 
3322
   If pending_flags was set in any thread, we queue any needed
3323
   signals, since we won't actually resume.  We already have a pending
3324
   event to report, so we don't need to preserve any step requests;
3325
   they should be re-issued if necessary.  */
3326
 
3327
static int
3328
linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
3329
{
3330
  struct lwp_info *lwp;
3331
  struct thread_info *thread;
3332
  int step;
3333
  int leave_all_stopped = * (int *) arg;
3334
  int leave_pending;
3335
 
3336
  thread = (struct thread_info *) entry;
3337
  lwp = get_thread_lwp (thread);
3338
 
3339
  if (lwp->resume == NULL)
3340
    return 0;
3341
 
3342
  if (lwp->resume->kind == resume_stop)
3343
    {
3344
      if (debug_threads)
3345
        fprintf (stderr, "resume_stop request for LWP %ld\n", lwpid_of (lwp));
3346
 
3347
      if (!lwp->stopped)
3348
        {
3349
          if (debug_threads)
3350
            fprintf (stderr, "stopping LWP %ld\n", lwpid_of (lwp));
3351
 
3352
          /* Stop the thread, and wait for the event asynchronously,
3353
             through the event loop.  */
3354
          send_sigstop (lwp);
3355
        }
3356
      else
3357
        {
3358
          if (debug_threads)
3359
            fprintf (stderr, "already stopped LWP %ld\n",
3360
                     lwpid_of (lwp));
3361
 
3362
          /* The LWP may have been stopped in an internal event that
3363
             was not meant to be notified back to GDB (e.g., gdbserver
3364
             breakpoint), so we should be reporting a stop event in
3365
             this case too.  */
3366
 
3367
          /* If the thread already has a pending SIGSTOP, this is a
3368
             no-op.  Otherwise, something later will presumably resume
3369
             the thread and this will cause it to cancel any pending
3370
             operation, due to last_resume_kind == resume_stop.  If
3371
             the thread already has a pending status to report, we
3372
             will still report it the next time we wait - see
3373
             status_pending_p_callback.  */
3374
          send_sigstop (lwp);
3375
        }
3376
 
3377
      /* For stop requests, we're done.  */
3378
      lwp->resume = NULL;
3379
      thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3380
      return 0;
3381
    }
3382
 
3383
  /* If this thread which is about to be resumed has a pending status,
3384
     then don't resume any threads - we can just report the pending
3385
     status.  Make sure to queue any signals that would otherwise be
3386
     sent.  In all-stop mode, we do this decision based on if *any*
3387
     thread has a pending status.  If there's a thread that needs the
3388
     step-over-breakpoint dance, then don't resume any other thread
3389
     but that particular one.  */
3390
  leave_pending = (lwp->status_pending_p || leave_all_stopped);
3391
 
3392
  if (!leave_pending)
3393
    {
3394
      if (debug_threads)
3395
        fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
3396
 
3397
      step = (lwp->resume->kind == resume_step);
3398
      linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
3399
    }
3400
  else
3401
    {
3402
      if (debug_threads)
3403
        fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
3404
 
3405
      /* If we have a new signal, enqueue the signal.  */
3406
      if (lwp->resume->sig != 0)
3407
        {
3408
          struct pending_signals *p_sig;
3409
          p_sig = xmalloc (sizeof (*p_sig));
3410
          p_sig->prev = lwp->pending_signals;
3411
          p_sig->signal = lwp->resume->sig;
3412
          memset (&p_sig->info, 0, sizeof (siginfo_t));
3413
 
3414
          /* If this is the same signal we were previously stopped by,
3415
             make sure to queue its siginfo.  We can ignore the return
3416
             value of ptrace; if it fails, we'll skip
3417
             PTRACE_SETSIGINFO.  */
3418
          if (WIFSTOPPED (lwp->last_status)
3419
              && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
3420
            ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
3421
 
3422
          lwp->pending_signals = p_sig;
3423
        }
3424
    }
3425
 
3426
  thread->last_status.kind = TARGET_WAITKIND_IGNORE;
3427
  lwp->resume = NULL;
3428
  return 0;
3429
}
3430
 
3431
static void
3432
linux_resume (struct thread_resume *resume_info, size_t n)
3433
{
3434
  struct thread_resume_array array = { resume_info, n };
3435
  struct lwp_info *need_step_over = NULL;
3436
  int any_pending;
3437
  int leave_all_stopped;
3438
 
3439
  find_inferior (&all_threads, linux_set_resume_request, &array);
3440
 
3441
  /* If there is a thread which would otherwise be resumed, which has
3442
     a pending status, then don't resume any threads - we can just
3443
     report the pending status.  Make sure to queue any signals that
3444
     would otherwise be sent.  In non-stop mode, we'll apply this
3445
     logic to each thread individually.  We consume all pending events
3446
     before considering to start a step-over (in all-stop).  */
3447
  any_pending = 0;
3448
  if (!non_stop)
3449
    find_inferior (&all_lwps, resume_status_pending_p, &any_pending);
3450
 
3451
  /* If there is a thread which would otherwise be resumed, which is
3452
     stopped at a breakpoint that needs stepping over, then don't
3453
     resume any threads - have it step over the breakpoint with all
3454
     other threads stopped, then resume all threads again.  Make sure
3455
     to queue any signals that would otherwise be delivered or
3456
     queued.  */
3457
  if (!any_pending && supports_breakpoints ())
3458
    need_step_over
3459
      = (struct lwp_info *) find_inferior (&all_lwps,
3460
                                           need_step_over_p, NULL);
3461
 
3462
  leave_all_stopped = (need_step_over != NULL || any_pending);
3463
 
3464
  if (debug_threads)
3465
    {
3466
      if (need_step_over != NULL)
3467
        fprintf (stderr, "Not resuming all, need step over\n");
3468
      else if (any_pending)
3469
        fprintf (stderr,
3470
                 "Not resuming, all-stop and found "
3471
                 "an LWP with pending status\n");
3472
      else
3473
        fprintf (stderr, "Resuming, no pending status or step over needed\n");
3474
    }
3475
 
3476
  /* Even if we're leaving threads stopped, queue all signals we'd
3477
     otherwise deliver.  */
3478
  find_inferior (&all_threads, linux_resume_one_thread, &leave_all_stopped);
3479
 
3480
  if (need_step_over)
3481
    start_step_over (need_step_over);
3482
}
3483
 
3484
/* This function is called once per thread.  We check the thread's
3485
   last resume request, which will tell us whether to resume, step, or
3486
   leave the thread stopped.  Any signal the client requested to be
3487
   delivered has already been enqueued at this point.
3488
 
3489
   If any thread that GDB wants running is stopped at an internal
3490
   breakpoint that needs stepping over, we start a step-over operation
3491
   on that particular thread, and leave all others stopped.  */
3492
 
3493
static int
3494
proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3495
{
3496
  struct lwp_info *lwp = (struct lwp_info *) entry;
3497
  struct thread_info *thread;
3498
  int step;
3499
 
3500
  if (lwp == except)
3501
    return 0;
3502
 
3503
  if (debug_threads)
3504
    fprintf (stderr,
3505
             "proceed_one_lwp: lwp %ld\n", lwpid_of (lwp));
3506
 
3507
  if (!lwp->stopped)
3508
    {
3509
      if (debug_threads)
3510
        fprintf (stderr, "   LWP %ld already running\n", lwpid_of (lwp));
3511
      return 0;
3512
    }
3513
 
3514
  thread = get_lwp_thread (lwp);
3515
 
3516
  if (thread->last_resume_kind == resume_stop
3517
      && thread->last_status.kind != TARGET_WAITKIND_IGNORE)
3518
    {
3519
      if (debug_threads)
3520
        fprintf (stderr, "   client wants LWP to remain %ld stopped\n",
3521
                 lwpid_of (lwp));
3522
      return 0;
3523
    }
3524
 
3525
  if (lwp->status_pending_p)
3526
    {
3527
      if (debug_threads)
3528
        fprintf (stderr, "   LWP %ld has pending status, leaving stopped\n",
3529
                 lwpid_of (lwp));
3530
      return 0;
3531
    }
3532
 
3533
  gdb_assert (lwp->suspended >= 0);
3534
 
3535
  if (lwp->suspended)
3536
    {
3537
      if (debug_threads)
3538
        fprintf (stderr, "   LWP %ld is suspended\n", lwpid_of (lwp));
3539
      return 0;
3540
    }
3541
 
3542
  if (thread->last_resume_kind == resume_stop)
3543
    {
3544
      /* We haven't reported this LWP as stopped yet (otherwise, the
3545
         last_status.kind check above would catch it, and we wouldn't
3546
         reach here.  This LWP may have been momentarily paused by a
3547
         stop_all_lwps call while handling for example, another LWP's
3548
         step-over.  In that case, the pending expected SIGSTOP signal
3549
         that was queued at vCont;t handling time will have already
3550
         been consumed by wait_for_sigstop, and so we need to requeue
3551
         another one here.  Note that if the LWP already has a SIGSTOP
3552
         pending, this is a no-op.  */
3553
 
3554
      if (debug_threads)
3555
        fprintf (stderr,
3556
                 "Client wants LWP %ld to stop. "
3557
                 "Making sure it has a SIGSTOP pending\n",
3558
                 lwpid_of (lwp));
3559
 
3560
      send_sigstop (lwp);
3561
    }
3562
 
3563
  step = thread->last_resume_kind == resume_step;
3564
  linux_resume_one_lwp (lwp, step, 0, NULL);
3565
  return 0;
3566
}
3567
 
3568
static int
3569
unsuspend_and_proceed_one_lwp (struct inferior_list_entry *entry, void *except)
3570
{
3571
  struct lwp_info *lwp = (struct lwp_info *) entry;
3572
 
3573
  if (lwp == except)
3574
    return 0;
3575
 
3576
  lwp->suspended--;
3577
  gdb_assert (lwp->suspended >= 0);
3578
 
3579
  return proceed_one_lwp (entry, except);
3580
}
3581
 
3582
/* When we finish a step-over, set threads running again.  If there's
3583
   another thread that may need a step-over, now's the time to start
3584
   it.  Eventually, we'll move all threads past their breakpoints.  */
3585
 
3586
static void
3587
proceed_all_lwps (void)
3588
{
3589
  struct lwp_info *need_step_over;
3590
 
3591
  /* If there is a thread which would otherwise be resumed, which is
3592
     stopped at a breakpoint that needs stepping over, then don't
3593
     resume any threads - have it step over the breakpoint with all
3594
     other threads stopped, then resume all threads again.  */
3595
 
3596
  if (supports_breakpoints ())
3597
    {
3598
      need_step_over
3599
        = (struct lwp_info *) find_inferior (&all_lwps,
3600
                                             need_step_over_p, NULL);
3601
 
3602
      if (need_step_over != NULL)
3603
        {
3604
          if (debug_threads)
3605
            fprintf (stderr, "proceed_all_lwps: found "
3606
                     "thread %ld needing a step-over\n",
3607
                     lwpid_of (need_step_over));
3608
 
3609
          start_step_over (need_step_over);
3610
          return;
3611
        }
3612
    }
3613
 
3614
  if (debug_threads)
3615
    fprintf (stderr, "Proceeding, no step-over needed\n");
3616
 
3617
  find_inferior (&all_lwps, proceed_one_lwp, NULL);
3618
}
3619
 
3620
/* Stopped LWPs that the client wanted to be running, that don't have
3621
   pending statuses, are set to run again, except for EXCEPT, if not
3622
   NULL.  This undoes a stop_all_lwps call.  */
3623
 
3624
static void
3625
unstop_all_lwps (int unsuspend, struct lwp_info *except)
3626
{
3627
  if (debug_threads)
3628
    {
3629
      if (except)
3630
        fprintf (stderr,
3631
                 "unstopping all lwps, except=(LWP %ld)\n", lwpid_of (except));
3632
      else
3633
        fprintf (stderr,
3634
                 "unstopping all lwps\n");
3635
    }
3636
 
3637
  if (unsuspend)
3638
    find_inferior (&all_lwps, unsuspend_and_proceed_one_lwp, except);
3639
  else
3640
    find_inferior (&all_lwps, proceed_one_lwp, except);
3641
}
3642
 
3643
#ifdef HAVE_LINUX_USRREGS
3644
 
3645
int
3646
register_addr (int regnum)
3647
{
3648
  int addr;
3649
 
3650
  if (regnum < 0 || regnum >= the_low_target.num_regs)
3651
    error ("Invalid register number %d.", regnum);
3652
 
3653
  addr = the_low_target.regmap[regnum];
3654
 
3655
  return addr;
3656
}
3657
 
3658
/* Fetch one register.  */
3659
static void
3660
fetch_register (struct regcache *regcache, int regno)
3661
{
3662
  CORE_ADDR regaddr;
3663
  int i, size;
3664
  char *buf;
3665
  int pid;
3666
 
3667
  if (regno >= the_low_target.num_regs)
3668
    return;
3669
  if ((*the_low_target.cannot_fetch_register) (regno))
3670
    return;
3671
 
3672
  regaddr = register_addr (regno);
3673
  if (regaddr == -1)
3674
    return;
3675
 
3676
  pid = lwpid_of (get_thread_lwp (current_inferior));
3677
  size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3678
          & - sizeof (PTRACE_XFER_TYPE));
3679
  buf = alloca (size);
3680
  for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3681
    {
3682
      errno = 0;
3683
      *(PTRACE_XFER_TYPE *) (buf + i) =
3684
        ptrace (PTRACE_PEEKUSER, pid,
3685
                /* Coerce to a uintptr_t first to avoid potential gcc warning
3686
                   of coercing an 8 byte integer to a 4 byte pointer.  */
3687
                (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
3688
      regaddr += sizeof (PTRACE_XFER_TYPE);
3689
      if (errno != 0)
3690
        error ("reading register %d: %s", regno, strerror (errno));
3691
    }
3692
 
3693
  if (the_low_target.supply_ptrace_register)
3694
    the_low_target.supply_ptrace_register (regcache, regno, buf);
3695
  else
3696
    supply_register (regcache, regno, buf);
3697
}
3698
 
3699
/* Fetch all registers, or just one, from the child process.  */
3700
static void
3701
usr_fetch_inferior_registers (struct regcache *regcache, int regno)
3702
{
3703
  if (regno == -1)
3704
    for (regno = 0; regno < the_low_target.num_regs; regno++)
3705
      fetch_register (regcache, regno);
3706
  else
3707
    fetch_register (regcache, regno);
3708
}
3709
 
3710
/* Store our register values back into the inferior.
3711
   If REGNO is -1, do this for all registers.
3712
   Otherwise, REGNO specifies which register (so we can save time).  */
3713
static void
3714
usr_store_inferior_registers (struct regcache *regcache, int regno)
3715
{
3716
  CORE_ADDR regaddr;
3717
  int i, size;
3718
  char *buf;
3719
  int pid;
3720
 
3721
  if (regno >= 0)
3722
    {
3723
      if (regno >= the_low_target.num_regs)
3724
        return;
3725
 
3726
      if ((*the_low_target.cannot_store_register) (regno) == 1)
3727
        return;
3728
 
3729
      regaddr = register_addr (regno);
3730
      if (regaddr == -1)
3731
        return;
3732
      errno = 0;
3733
      size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
3734
             & - sizeof (PTRACE_XFER_TYPE);
3735
      buf = alloca (size);
3736
      memset (buf, 0, size);
3737
 
3738
      if (the_low_target.collect_ptrace_register)
3739
        the_low_target.collect_ptrace_register (regcache, regno, buf);
3740
      else
3741
        collect_register (regcache, regno, buf);
3742
 
3743
      pid = lwpid_of (get_thread_lwp (current_inferior));
3744
      for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
3745
        {
3746
          errno = 0;
3747
          ptrace (PTRACE_POKEUSER, pid,
3748
                /* Coerce to a uintptr_t first to avoid potential gcc warning
3749
                   about coercing an 8 byte integer to a 4 byte pointer.  */
3750
                  (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
3751
                  (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
3752
          if (errno != 0)
3753
            {
3754
              /* At this point, ESRCH should mean the process is
3755
                 already gone, in which case we simply ignore attempts
3756
                 to change its registers.  See also the related
3757
                 comment in linux_resume_one_lwp.  */
3758
              if (errno == ESRCH)
3759
                return;
3760
 
3761
              if ((*the_low_target.cannot_store_register) (regno) == 0)
3762
                error ("writing register %d: %s", regno, strerror (errno));
3763
            }
3764
          regaddr += sizeof (PTRACE_XFER_TYPE);
3765
        }
3766
    }
3767
  else
3768
    for (regno = 0; regno < the_low_target.num_regs; regno++)
3769
      usr_store_inferior_registers (regcache, regno);
3770
}
3771
#endif /* HAVE_LINUX_USRREGS */
3772
 
3773
 
3774
 
3775
#ifdef HAVE_LINUX_REGSETS
3776
 
3777
static int
3778
regsets_fetch_inferior_registers (struct regcache *regcache)
3779
{
3780
  struct regset_info *regset;
3781
  int saw_general_regs = 0;
3782
  int pid;
3783
  struct iovec iov;
3784
 
3785
  regset = target_regsets;
3786
 
3787
  pid = lwpid_of (get_thread_lwp (current_inferior));
3788
  while (regset->size >= 0)
3789
    {
3790
      void *buf, *data;
3791
      int nt_type, res;
3792
 
3793
      if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3794
        {
3795
          regset ++;
3796
          continue;
3797
        }
3798
 
3799
      buf = xmalloc (regset->size);
3800
 
3801
      nt_type = regset->nt_type;
3802
      if (nt_type)
3803
        {
3804
          iov.iov_base = buf;
3805
          iov.iov_len = regset->size;
3806
          data = (void *) &iov;
3807
        }
3808
      else
3809
        data = buf;
3810
 
3811
#ifndef __sparc__
3812
      res = ptrace (regset->get_request, pid, nt_type, data);
3813
#else
3814
      res = ptrace (regset->get_request, pid, data, nt_type);
3815
#endif
3816
      if (res < 0)
3817
        {
3818
          if (errno == EIO)
3819
            {
3820
              /* If we get EIO on a regset, do not try it again for
3821
                 this process.  */
3822
              disabled_regsets[regset - target_regsets] = 1;
3823
              free (buf);
3824
              continue;
3825
            }
3826
          else
3827
            {
3828
              char s[256];
3829
              sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
3830
                       pid);
3831
              perror (s);
3832
            }
3833
        }
3834
      else if (regset->type == GENERAL_REGS)
3835
        saw_general_regs = 1;
3836
      regset->store_function (regcache, buf);
3837
      regset ++;
3838
      free (buf);
3839
    }
3840
  if (saw_general_regs)
3841
    return 0;
3842
  else
3843
    return 1;
3844
}
3845
 
3846
static int
3847
regsets_store_inferior_registers (struct regcache *regcache)
3848
{
3849
  struct regset_info *regset;
3850
  int saw_general_regs = 0;
3851
  int pid;
3852
  struct iovec iov;
3853
 
3854
  regset = target_regsets;
3855
 
3856
  pid = lwpid_of (get_thread_lwp (current_inferior));
3857
  while (regset->size >= 0)
3858
    {
3859
      void *buf, *data;
3860
      int nt_type, res;
3861
 
3862
      if (regset->size == 0 || disabled_regsets[regset - target_regsets])
3863
        {
3864
          regset ++;
3865
          continue;
3866
        }
3867
 
3868
      buf = xmalloc (regset->size);
3869
 
3870
      /* First fill the buffer with the current register set contents,
3871
         in case there are any items in the kernel's regset that are
3872
         not in gdbserver's regcache.  */
3873
 
3874
      nt_type = regset->nt_type;
3875
      if (nt_type)
3876
        {
3877
          iov.iov_base = buf;
3878
          iov.iov_len = regset->size;
3879
          data = (void *) &iov;
3880
        }
3881
      else
3882
        data = buf;
3883
 
3884
#ifndef __sparc__
3885
      res = ptrace (regset->get_request, pid, nt_type, data);
3886
#else
3887
      res = ptrace (regset->get_request, pid, &iov, data);
3888
#endif
3889
 
3890
      if (res == 0)
3891
        {
3892
          /* Then overlay our cached registers on that.  */
3893
          regset->fill_function (regcache, buf);
3894
 
3895
          /* Only now do we write the register set.  */
3896
#ifndef __sparc__
3897
          res = ptrace (regset->set_request, pid, nt_type, data);
3898
#else
3899
          res = ptrace (regset->set_request, pid, data, nt_type);
3900
#endif
3901
        }
3902
 
3903
      if (res < 0)
3904
        {
3905
          if (errno == EIO)
3906
            {
3907
              /* If we get EIO on a regset, do not try it again for
3908
                 this process.  */
3909
              disabled_regsets[regset - target_regsets] = 1;
3910
              free (buf);
3911
              continue;
3912
            }
3913
          else if (errno == ESRCH)
3914
            {
3915
              /* At this point, ESRCH should mean the process is
3916
                 already gone, in which case we simply ignore attempts
3917
                 to change its registers.  See also the related
3918
                 comment in linux_resume_one_lwp.  */
3919
              free (buf);
3920
              return 0;
3921
            }
3922
          else
3923
            {
3924
              perror ("Warning: ptrace(regsets_store_inferior_registers)");
3925
            }
3926
        }
3927
      else if (regset->type == GENERAL_REGS)
3928
        saw_general_regs = 1;
3929
      regset ++;
3930
      free (buf);
3931
    }
3932
  if (saw_general_regs)
3933
    return 0;
3934
  else
3935
    return 1;
3936
  return 0;
3937
}
3938
 
3939
#endif /* HAVE_LINUX_REGSETS */
3940
 
3941
 
3942
void
3943
linux_fetch_registers (struct regcache *regcache, int regno)
3944
{
3945
#ifdef HAVE_LINUX_REGSETS
3946
  if (regsets_fetch_inferior_registers (regcache) == 0)
3947
    return;
3948
#endif
3949
#ifdef HAVE_LINUX_USRREGS
3950
  usr_fetch_inferior_registers (regcache, regno);
3951
#endif
3952
}
3953
 
3954
void
3955
linux_store_registers (struct regcache *regcache, int regno)
3956
{
3957
#ifdef HAVE_LINUX_REGSETS
3958
  if (regsets_store_inferior_registers (regcache) == 0)
3959
    return;
3960
#endif
3961
#ifdef HAVE_LINUX_USRREGS
3962
  usr_store_inferior_registers (regcache, regno);
3963
#endif
3964
}
3965
 
3966
 
3967
/* Copy LEN bytes from inferior's memory starting at MEMADDR
3968
   to debugger memory starting at MYADDR.  */
3969
 
3970
static int
3971
linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
3972
{
3973
  register int i;
3974
  /* Round starting address down to longword boundary.  */
3975
  register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
3976
  /* Round ending address up; get number of longwords that makes.  */
3977
  register int count
3978
    = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
3979
      / sizeof (PTRACE_XFER_TYPE);
3980
  /* Allocate buffer of that many longwords.  */
3981
  register PTRACE_XFER_TYPE *buffer
3982
    = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
3983
  int fd;
3984
  char filename[64];
3985
  int pid = lwpid_of (get_thread_lwp (current_inferior));
3986
 
3987
  /* Try using /proc.  Don't bother for one word.  */
3988
  if (len >= 3 * sizeof (long))
3989
    {
3990
      /* We could keep this file open and cache it - possibly one per
3991
         thread.  That requires some juggling, but is even faster.  */
3992
      sprintf (filename, "/proc/%d/mem", pid);
3993
      fd = open (filename, O_RDONLY | O_LARGEFILE);
3994
      if (fd == -1)
3995
        goto no_proc;
3996
 
3997
      /* If pread64 is available, use it.  It's faster if the kernel
3998
         supports it (only one syscall), and it's 64-bit safe even on
3999
         32-bit platforms (for instance, SPARC debugging a SPARC64
4000
         application).  */
4001
#ifdef HAVE_PREAD64
4002
      if (pread64 (fd, myaddr, len, memaddr) != len)
4003
#else
4004
      if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
4005
#endif
4006
        {
4007
          close (fd);
4008
          goto no_proc;
4009
        }
4010
 
4011
      close (fd);
4012
      return 0;
4013
    }
4014
 
4015
 no_proc:
4016
  /* Read all the longwords */
4017
  for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4018
    {
4019
      errno = 0;
4020
      /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4021
         about coercing an 8 byte integer to a 4 byte pointer.  */
4022
      buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
4023
                          (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4024
      if (errno)
4025
        return errno;
4026
    }
4027
 
4028
  /* Copy appropriate bytes out of the buffer.  */
4029
  memcpy (myaddr,
4030
          (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
4031
          len);
4032
 
4033
  return 0;
4034
}
4035
 
4036
/* Copy LEN bytes of data from debugger memory at MYADDR to inferior's
4037
   memory at MEMADDR.  On failure (cannot write to the inferior)
4038
   returns the value of errno.  */
4039
 
4040
static int
4041
linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
4042
{
4043
  register int i;
4044
  /* Round starting address down to longword boundary.  */
4045
  register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
4046
  /* Round ending address up; get number of longwords that makes.  */
4047
  register int count
4048
  = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
4049
  /* Allocate buffer of that many longwords.  */
4050
  register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
4051
  int pid = lwpid_of (get_thread_lwp (current_inferior));
4052
 
4053
  if (debug_threads)
4054
    {
4055
      /* Dump up to four bytes.  */
4056
      unsigned int val = * (unsigned int *) myaddr;
4057
      if (len == 1)
4058
        val = val & 0xff;
4059
      else if (len == 2)
4060
        val = val & 0xffff;
4061
      else if (len == 3)
4062
        val = val & 0xffffff;
4063
      fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
4064
               val, (long)memaddr);
4065
    }
4066
 
4067
  /* Fill start and end extra bytes of buffer with existing memory data.  */
4068
 
4069
  errno = 0;
4070
  /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
4071
     about coercing an 8 byte integer to a 4 byte pointer.  */
4072
  buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
4073
                      (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
4074
  if (errno)
4075
    return errno;
4076
 
4077
  if (count > 1)
4078
    {
4079
      errno = 0;
4080
      buffer[count - 1]
4081
        = ptrace (PTRACE_PEEKTEXT, pid,
4082
                  /* Coerce to a uintptr_t first to avoid potential gcc warning
4083
                     about coercing an 8 byte integer to a 4 byte pointer.  */
4084
                  (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
4085
                                                  * sizeof (PTRACE_XFER_TYPE)),
4086
                  0);
4087
      if (errno)
4088
        return errno;
4089
    }
4090
 
4091
  /* Copy data to be written over corresponding part of buffer.  */
4092
 
4093
  memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
4094
 
4095
  /* Write the entire buffer.  */
4096
 
4097
  for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
4098
    {
4099
      errno = 0;
4100
      ptrace (PTRACE_POKETEXT, pid,
4101
              /* Coerce to a uintptr_t first to avoid potential gcc warning
4102
                 about coercing an 8 byte integer to a 4 byte pointer.  */
4103
              (PTRACE_ARG3_TYPE) (uintptr_t) addr,
4104
              (PTRACE_ARG4_TYPE) buffer[i]);
4105
      if (errno)
4106
        return errno;
4107
    }
4108
 
4109
  return 0;
4110
}
4111
 
4112
/* Non-zero if the kernel supports PTRACE_O_TRACEFORK.  */
4113
static int linux_supports_tracefork_flag;
4114
 
4115
static void
4116
linux_enable_event_reporting (int pid)
4117
{
4118
  if (!linux_supports_tracefork_flag)
4119
    return;
4120
 
4121
  ptrace (PTRACE_SETOPTIONS, pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
4122
}
4123
 
4124
/* Helper functions for linux_test_for_tracefork, called via clone ().  */
4125
 
4126
static int
4127
linux_tracefork_grandchild (void *arg)
4128
{
4129
  _exit (0);
4130
}
4131
 
4132
#define STACK_SIZE 4096
4133
 
4134
static int
4135
linux_tracefork_child (void *arg)
4136
{
4137
  ptrace (PTRACE_TRACEME, 0, 0, 0);
4138
  kill (getpid (), SIGSTOP);
4139
 
4140
#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4141
 
4142
  if (fork () == 0)
4143
    linux_tracefork_grandchild (NULL);
4144
 
4145
#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4146
 
4147
#ifdef __ia64__
4148
  __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
4149
            CLONE_VM | SIGCHLD, NULL);
4150
#else
4151
  clone (linux_tracefork_grandchild, arg + STACK_SIZE,
4152
         CLONE_VM | SIGCHLD, NULL);
4153
#endif
4154
 
4155
#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4156
 
4157
  _exit (0);
4158
}
4159
 
4160
/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.  Make
4161
   sure that we can enable the option, and that it had the desired
4162
   effect.  */
4163
 
4164
static void
4165
linux_test_for_tracefork (void)
4166
{
4167
  int child_pid, ret, status;
4168
  long second_pid;
4169
#if defined(__UCLIBC__) && defined(HAS_NOMMU)
4170
  char *stack = xmalloc (STACK_SIZE * 4);
4171
#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4172
 
4173
  linux_supports_tracefork_flag = 0;
4174
 
4175
#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
4176
 
4177
  child_pid = fork ();
4178
  if (child_pid == 0)
4179
    linux_tracefork_child (NULL);
4180
 
4181
#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4182
 
4183
  /* Use CLONE_VM instead of fork, to support uClinux (no MMU).  */
4184
#ifdef __ia64__
4185
  child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
4186
                        CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4187
#else /* !__ia64__ */
4188
  child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
4189
                     CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
4190
#endif /* !__ia64__ */
4191
 
4192
#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4193
 
4194
  if (child_pid == -1)
4195
    perror_with_name ("clone");
4196
 
4197
  ret = my_waitpid (child_pid, &status, 0);
4198
  if (ret == -1)
4199
    perror_with_name ("waitpid");
4200
  else if (ret != child_pid)
4201
    error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
4202
  if (! WIFSTOPPED (status))
4203
    error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
4204
 
4205
  ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
4206
                (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
4207
  if (ret != 0)
4208
    {
4209
      ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4210
      if (ret != 0)
4211
        {
4212
          warning ("linux_test_for_tracefork: failed to kill child");
4213
          return;
4214
        }
4215
 
4216
      ret = my_waitpid (child_pid, &status, 0);
4217
      if (ret != child_pid)
4218
        warning ("linux_test_for_tracefork: failed to wait for killed child");
4219
      else if (!WIFSIGNALED (status))
4220
        warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
4221
                 "killed child", status);
4222
 
4223
      return;
4224
    }
4225
 
4226
  ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
4227
  if (ret != 0)
4228
    warning ("linux_test_for_tracefork: failed to resume child");
4229
 
4230
  ret = my_waitpid (child_pid, &status, 0);
4231
 
4232
  if (ret == child_pid && WIFSTOPPED (status)
4233
      && status >> 16 == PTRACE_EVENT_FORK)
4234
    {
4235
      second_pid = 0;
4236
      ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
4237
      if (ret == 0 && second_pid != 0)
4238
        {
4239
          int second_status;
4240
 
4241
          linux_supports_tracefork_flag = 1;
4242
          my_waitpid (second_pid, &second_status, 0);
4243
          ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
4244
          if (ret != 0)
4245
            warning ("linux_test_for_tracefork: failed to kill second child");
4246
          my_waitpid (second_pid, &status, 0);
4247
        }
4248
    }
4249
  else
4250
    warning ("linux_test_for_tracefork: unexpected result from waitpid "
4251
             "(%d, status 0x%x)", ret, status);
4252
 
4253
  do
4254
    {
4255
      ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
4256
      if (ret != 0)
4257
        warning ("linux_test_for_tracefork: failed to kill child");
4258
      my_waitpid (child_pid, &status, 0);
4259
    }
4260
  while (WIFSTOPPED (status));
4261
 
4262
#if defined(__UCLIBC__) && defined(HAS_NOMMU)
4263
  free (stack);
4264
#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
4265
}
4266
 
4267
 
4268
static void
4269
linux_look_up_symbols (void)
4270
{
4271
#ifdef USE_THREAD_DB
4272
  struct process_info *proc = current_process ();
4273
 
4274
  if (proc->private->thread_db != NULL)
4275
    return;
4276
 
4277
  /* If the kernel supports tracing forks then it also supports tracing
4278
     clones, and then we don't need to use the magic thread event breakpoint
4279
     to learn about threads.  */
4280
  thread_db_init (!linux_supports_tracefork_flag);
4281
#endif
4282
}
4283
 
4284
static void
4285
linux_request_interrupt (void)
4286
{
4287
  extern unsigned long signal_pid;
4288
 
4289
  if (!ptid_equal (cont_thread, null_ptid)
4290
      && !ptid_equal (cont_thread, minus_one_ptid))
4291
    {
4292
      struct lwp_info *lwp;
4293
      int lwpid;
4294
 
4295
      lwp = get_thread_lwp (current_inferior);
4296
      lwpid = lwpid_of (lwp);
4297
      kill_lwp (lwpid, SIGINT);
4298
    }
4299
  else
4300
    kill_lwp (signal_pid, SIGINT);
4301
}
4302
 
4303
/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
4304
   to debugger memory starting at MYADDR.  */
4305
 
4306
static int
4307
linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
4308
{
4309
  char filename[PATH_MAX];
4310
  int fd, n;
4311
  int pid = lwpid_of (get_thread_lwp (current_inferior));
4312
 
4313
  snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
4314
 
4315
  fd = open (filename, O_RDONLY);
4316
  if (fd < 0)
4317
    return -1;
4318
 
4319
  if (offset != (CORE_ADDR) 0
4320
      && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4321
    n = -1;
4322
  else
4323
    n = read (fd, myaddr, len);
4324
 
4325
  close (fd);
4326
 
4327
  return n;
4328
}
4329
 
4330
/* These breakpoint and watchpoint related wrapper functions simply
4331
   pass on the function call if the target has registered a
4332
   corresponding function.  */
4333
 
4334
static int
4335
linux_insert_point (char type, CORE_ADDR addr, int len)
4336
{
4337
  if (the_low_target.insert_point != NULL)
4338
    return the_low_target.insert_point (type, addr, len);
4339
  else
4340
    /* Unsupported (see target.h).  */
4341
    return 1;
4342
}
4343
 
4344
static int
4345
linux_remove_point (char type, CORE_ADDR addr, int len)
4346
{
4347
  if (the_low_target.remove_point != NULL)
4348
    return the_low_target.remove_point (type, addr, len);
4349
  else
4350
    /* Unsupported (see target.h).  */
4351
    return 1;
4352
}
4353
 
4354
static int
4355
linux_stopped_by_watchpoint (void)
4356
{
4357
  struct lwp_info *lwp = get_thread_lwp (current_inferior);
4358
 
4359
  return lwp->stopped_by_watchpoint;
4360
}
4361
 
4362
static CORE_ADDR
4363
linux_stopped_data_address (void)
4364
{
4365
  struct lwp_info *lwp = get_thread_lwp (current_inferior);
4366
 
4367
  return lwp->stopped_data_address;
4368
}
4369
 
4370
#if defined(__UCLIBC__) && defined(HAS_NOMMU)
4371
#if defined(__mcoldfire__)
4372
/* These should really be defined in the kernel's ptrace.h header.  */
4373
#define PT_TEXT_ADDR 49*4
4374
#define PT_DATA_ADDR 50*4
4375
#define PT_TEXT_END_ADDR  51*4
4376
#endif
4377
 
4378
/* Under uClinux, programs are loaded at non-zero offsets, which we need
4379
   to tell gdb about.  */
4380
 
4381
static int
4382
linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
4383
{
4384
#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
4385
  unsigned long text, text_end, data;
4386
  int pid = lwpid_of (get_thread_lwp (current_inferior));
4387
 
4388
  errno = 0;
4389
 
4390
  text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
4391
  text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
4392
  data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
4393
 
4394
  if (errno == 0)
4395
    {
4396
      /* Both text and data offsets produced at compile-time (and so
4397
         used by gdb) are relative to the beginning of the program,
4398
         with the data segment immediately following the text segment.
4399
         However, the actual runtime layout in memory may put the data
4400
         somewhere else, so when we send gdb a data base-address, we
4401
         use the real data base address and subtract the compile-time
4402
         data base-address from it (which is just the length of the
4403
         text segment).  BSS immediately follows data in both
4404
         cases.  */
4405
      *text_p = text;
4406
      *data_p = data - (text_end - text);
4407
 
4408
      return 1;
4409
    }
4410
#endif
4411
 return 0;
4412
}
4413
#endif
4414
 
4415
static int
4416
compare_ints (const void *xa, const void *xb)
4417
{
4418
  int a = *(const int *)xa;
4419
  int b = *(const int *)xb;
4420
 
4421
  return a - b;
4422
}
4423
 
4424
static int *
4425
unique (int *b, int *e)
4426
{
4427
  int *d = b;
4428
  while (++b != e)
4429
    if (*d != *b)
4430
      *++d = *b;
4431
  return ++d;
4432
}
4433
 
4434
/* Given PID, iterates over all threads in that process.
4435
 
4436
   Information about each thread, in a format suitable for qXfer:osdata:thread
4437
   is printed to BUFFER, if it's not NULL.  BUFFER is assumed to be already
4438
   initialized, and the caller is responsible for finishing and appending '\0'
4439
   to it.
4440
 
4441
   The list of cores that threads are running on is assigned to *CORES, if it
4442
   is not NULL.  If no cores are found, *CORES will be set to NULL.  Caller
4443
   should free *CORES.  */
4444
 
4445
static void
4446
list_threads (int pid, struct buffer *buffer, char **cores)
4447
{
4448
  int count = 0;
4449
  int allocated = 10;
4450
  int *core_numbers = xmalloc (sizeof (int) * allocated);
4451
  char pathname[128];
4452
  DIR *dir;
4453
  struct dirent *dp;
4454
  struct stat statbuf;
4455
 
4456
  sprintf (pathname, "/proc/%d/task", pid);
4457
  if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
4458
    {
4459
      dir = opendir (pathname);
4460
      if (!dir)
4461
        {
4462
          free (core_numbers);
4463
          return;
4464
        }
4465
 
4466
      while ((dp = readdir (dir)) != NULL)
4467
        {
4468
          unsigned long lwp = strtoul (dp->d_name, NULL, 10);
4469
 
4470
          if (lwp != 0)
4471
            {
4472
              unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
4473
 
4474
              if (core != -1)
4475
                {
4476
                  char s[sizeof ("4294967295")];
4477
                  sprintf (s, "%u", core);
4478
 
4479
                  if (count == allocated)
4480
                    {
4481
                      allocated *= 2;
4482
                      core_numbers = realloc (core_numbers,
4483
                                              sizeof (int) * allocated);
4484
                    }
4485
                  core_numbers[count++] = core;
4486
                  if (buffer)
4487
                    buffer_xml_printf (buffer,
4488
                                       "<item>"
4489
                                       "<column name=\"pid\">%d</column>"
4490
                                       "<column name=\"tid\">%s</column>"
4491
                                       "<column name=\"core\">%s</column>"
4492
                                       "</item>", pid, dp->d_name, s);
4493
                }
4494
              else
4495
                {
4496
                  if (buffer)
4497
                    buffer_xml_printf (buffer,
4498
                                       "<item>"
4499
                                       "<column name=\"pid\">%d</column>"
4500
                                       "<column name=\"tid\">%s</column>"
4501
                                       "</item>", pid, dp->d_name);
4502
                }
4503
            }
4504
        }
4505
    }
4506
 
4507
  if (cores)
4508
    {
4509
      *cores = NULL;
4510
      if (count > 0)
4511
        {
4512
          struct buffer buffer2;
4513
          int *b;
4514
          int *e;
4515
          qsort (core_numbers, count, sizeof (int), compare_ints);
4516
 
4517
          /* Remove duplicates. */
4518
          b = core_numbers;
4519
          e = unique (b, core_numbers + count);
4520
 
4521
          buffer_init (&buffer2);
4522
 
4523
          for (b = core_numbers; b != e; ++b)
4524
            {
4525
              char number[sizeof ("4294967295")];
4526
              sprintf (number, "%u", *b);
4527
              buffer_xml_printf (&buffer2, "%s%s",
4528
                                 (b == core_numbers) ? "" : ",", number);
4529
            }
4530
          buffer_grow_str0 (&buffer2, "");
4531
 
4532
          *cores = buffer_finish (&buffer2);
4533
        }
4534
    }
4535
  free (core_numbers);
4536
}
4537
 
4538
static void
4539
show_process (int pid, const char *username, struct buffer *buffer)
4540
{
4541
  char pathname[128];
4542
  FILE *f;
4543
  char cmd[MAXPATHLEN + 1];
4544
 
4545
  sprintf (pathname, "/proc/%d/cmdline", pid);
4546
 
4547
  if ((f = fopen (pathname, "r")) != NULL)
4548
    {
4549
      size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
4550
      if (len > 0)
4551
        {
4552
          char *cores = 0;
4553
          int i;
4554
          for (i = 0; i < len; i++)
4555
            if (cmd[i] == '\0')
4556
              cmd[i] = ' ';
4557
          cmd[len] = '\0';
4558
 
4559
          buffer_xml_printf (buffer,
4560
                             "<item>"
4561
                             "<column name=\"pid\">%d</column>"
4562
                             "<column name=\"user\">%s</column>"
4563
                             "<column name=\"command\">%s</column>",
4564
                             pid,
4565
                             username,
4566
                             cmd);
4567
 
4568
          /* This only collects core numbers, and does not print threads.  */
4569
          list_threads (pid, NULL, &cores);
4570
 
4571
          if (cores)
4572
            {
4573
              buffer_xml_printf (buffer,
4574
                                 "<column name=\"cores\">%s</column>", cores);
4575
              free (cores);
4576
            }
4577
 
4578
          buffer_xml_printf (buffer, "</item>");
4579
        }
4580
      fclose (f);
4581
    }
4582
}
4583
 
4584
static int
4585
linux_qxfer_osdata (const char *annex,
4586
                    unsigned char *readbuf, unsigned const char *writebuf,
4587
                    CORE_ADDR offset, int len)
4588
{
4589
  /* We make the process list snapshot when the object starts to be
4590
     read.  */
4591
  static const char *buf;
4592
  static long len_avail = -1;
4593
  static struct buffer buffer;
4594
  int processes = 0;
4595
  int threads = 0;
4596
 
4597
  DIR *dirp;
4598
 
4599
  if (strcmp (annex, "processes") == 0)
4600
    processes = 1;
4601
  else if (strcmp (annex, "threads") == 0)
4602
    threads = 1;
4603
  else
4604
    return 0;
4605
 
4606
  if (!readbuf || writebuf)
4607
    return 0;
4608
 
4609
  if (offset == 0)
4610
    {
4611
      if (len_avail != -1 && len_avail != 0)
4612
       buffer_free (&buffer);
4613
      len_avail = 0;
4614
      buf = NULL;
4615
      buffer_init (&buffer);
4616
      if (processes)
4617
        buffer_grow_str (&buffer, "<osdata type=\"processes\">");
4618
      else if (threads)
4619
        buffer_grow_str (&buffer, "<osdata type=\"threads\">");
4620
 
4621
      dirp = opendir ("/proc");
4622
      if (dirp)
4623
       {
4624
         struct dirent *dp;
4625
         while ((dp = readdir (dirp)) != NULL)
4626
           {
4627
             struct stat statbuf;
4628
             char procentry[sizeof ("/proc/4294967295")];
4629
 
4630
             if (!isdigit (dp->d_name[0])
4631
                 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
4632
               continue;
4633
 
4634
             sprintf (procentry, "/proc/%s", dp->d_name);
4635
             if (stat (procentry, &statbuf) == 0
4636
                 && S_ISDIR (statbuf.st_mode))
4637
               {
4638
                 int pid = (int) strtoul (dp->d_name, NULL, 10);
4639
 
4640
                 if (processes)
4641
                   {
4642
                     struct passwd *entry = getpwuid (statbuf.st_uid);
4643
                     show_process (pid, entry ? entry->pw_name : "?", &buffer);
4644
                   }
4645
                 else if (threads)
4646
                   {
4647
                     list_threads (pid, &buffer, NULL);
4648
                   }
4649
               }
4650
           }
4651
 
4652
         closedir (dirp);
4653
       }
4654
      buffer_grow_str0 (&buffer, "</osdata>\n");
4655
      buf = buffer_finish (&buffer);
4656
      len_avail = strlen (buf);
4657
    }
4658
 
4659
  if (offset >= len_avail)
4660
    {
4661
      /* Done.  Get rid of the data.  */
4662
      buffer_free (&buffer);
4663
      buf = NULL;
4664
      len_avail = 0;
4665
      return 0;
4666
    }
4667
 
4668
  if (len > len_avail - offset)
4669
    len = len_avail - offset;
4670
  memcpy (readbuf, buf + offset, len);
4671
 
4672
  return len;
4673
}
4674
 
4675
/* Convert a native/host siginfo object, into/from the siginfo in the
4676
   layout of the inferiors' architecture.  */
4677
 
4678
static void
4679
siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
4680
{
4681
  int done = 0;
4682
 
4683
  if (the_low_target.siginfo_fixup != NULL)
4684
    done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
4685
 
4686
  /* If there was no callback, or the callback didn't do anything,
4687
     then just do a straight memcpy.  */
4688
  if (!done)
4689
    {
4690
      if (direction == 1)
4691
        memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4692
      else
4693
        memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4694
    }
4695
}
4696
 
4697
static int
4698
linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
4699
                    unsigned const char *writebuf, CORE_ADDR offset, int len)
4700
{
4701
  int pid;
4702
  struct siginfo siginfo;
4703
  char inf_siginfo[sizeof (struct siginfo)];
4704
 
4705
  if (current_inferior == NULL)
4706
    return -1;
4707
 
4708
  pid = lwpid_of (get_thread_lwp (current_inferior));
4709
 
4710
  if (debug_threads)
4711
    fprintf (stderr, "%s siginfo for lwp %d.\n",
4712
             readbuf != NULL ? "Reading" : "Writing",
4713
             pid);
4714
 
4715
  if (offset > sizeof (siginfo))
4716
    return -1;
4717
 
4718
  if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
4719
    return -1;
4720
 
4721
  /* When GDBSERVER is built as a 64-bit application, ptrace writes into
4722
     SIGINFO an object with 64-bit layout.  Since debugging a 32-bit
4723
     inferior with a 64-bit GDBSERVER should look the same as debugging it
4724
     with a 32-bit GDBSERVER, we need to convert it.  */
4725
  siginfo_fixup (&siginfo, inf_siginfo, 0);
4726
 
4727
  if (offset + len > sizeof (siginfo))
4728
    len = sizeof (siginfo) - offset;
4729
 
4730
  if (readbuf != NULL)
4731
    memcpy (readbuf, inf_siginfo + offset, len);
4732
  else
4733
    {
4734
      memcpy (inf_siginfo + offset, writebuf, len);
4735
 
4736
      /* Convert back to ptrace layout before flushing it out.  */
4737
      siginfo_fixup (&siginfo, inf_siginfo, 1);
4738
 
4739
      if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
4740
        return -1;
4741
    }
4742
 
4743
  return len;
4744
}
4745
 
4746
/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4747
   so we notice when children change state; as the handler for the
4748
   sigsuspend in my_waitpid.  */
4749
 
4750
static void
4751
sigchld_handler (int signo)
4752
{
4753
  int old_errno = errno;
4754
 
4755
  if (debug_threads)
4756
    /* fprintf is not async-signal-safe, so call write directly.  */
4757
    write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
4758
 
4759
  if (target_is_async_p ())
4760
    async_file_mark (); /* trigger a linux_wait */
4761
 
4762
  errno = old_errno;
4763
}
4764
 
4765
static int
4766
linux_supports_non_stop (void)
4767
{
4768
  return 1;
4769
}
4770
 
4771
static int
4772
linux_async (int enable)
4773
{
4774
  int previous = (linux_event_pipe[0] != -1);
4775
 
4776
  if (debug_threads)
4777
    fprintf (stderr, "linux_async (%d), previous=%d\n",
4778
             enable, previous);
4779
 
4780
  if (previous != enable)
4781
    {
4782
      sigset_t mask;
4783
      sigemptyset (&mask);
4784
      sigaddset (&mask, SIGCHLD);
4785
 
4786
      sigprocmask (SIG_BLOCK, &mask, NULL);
4787
 
4788
      if (enable)
4789
        {
4790
          if (pipe (linux_event_pipe) == -1)
4791
            fatal ("creating event pipe failed.");
4792
 
4793
          fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
4794
          fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
4795
 
4796
          /* Register the event loop handler.  */
4797
          add_file_handler (linux_event_pipe[0],
4798
                            handle_target_event, NULL);
4799
 
4800
          /* Always trigger a linux_wait.  */
4801
          async_file_mark ();
4802
        }
4803
      else
4804
        {
4805
          delete_file_handler (linux_event_pipe[0]);
4806
 
4807
          close (linux_event_pipe[0]);
4808
          close (linux_event_pipe[1]);
4809
          linux_event_pipe[0] = -1;
4810
          linux_event_pipe[1] = -1;
4811
        }
4812
 
4813
      sigprocmask (SIG_UNBLOCK, &mask, NULL);
4814
    }
4815
 
4816
  return previous;
4817
}
4818
 
4819
static int
4820
linux_start_non_stop (int nonstop)
4821
{
4822
  /* Register or unregister from event-loop accordingly.  */
4823
  linux_async (nonstop);
4824
  return 0;
4825
}
4826
 
4827
static int
4828
linux_supports_multi_process (void)
4829
{
4830
  return 1;
4831
}
4832
 
4833
 
4834
/* Enumerate spufs IDs for process PID.  */
4835
static int
4836
spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
4837
{
4838
  int pos = 0;
4839
  int written = 0;
4840
  char path[128];
4841
  DIR *dir;
4842
  struct dirent *entry;
4843
 
4844
  sprintf (path, "/proc/%ld/fd", pid);
4845
  dir = opendir (path);
4846
  if (!dir)
4847
    return -1;
4848
 
4849
  rewinddir (dir);
4850
  while ((entry = readdir (dir)) != NULL)
4851
    {
4852
      struct stat st;
4853
      struct statfs stfs;
4854
      int fd;
4855
 
4856
      fd = atoi (entry->d_name);
4857
      if (!fd)
4858
        continue;
4859
 
4860
      sprintf (path, "/proc/%ld/fd/%d", pid, fd);
4861
      if (stat (path, &st) != 0)
4862
        continue;
4863
      if (!S_ISDIR (st.st_mode))
4864
        continue;
4865
 
4866
      if (statfs (path, &stfs) != 0)
4867
        continue;
4868
      if (stfs.f_type != SPUFS_MAGIC)
4869
        continue;
4870
 
4871
      if (pos >= offset && pos + 4 <= offset + len)
4872
        {
4873
          *(unsigned int *)(buf + pos - offset) = fd;
4874
          written += 4;
4875
        }
4876
      pos += 4;
4877
    }
4878
 
4879
  closedir (dir);
4880
  return written;
4881
}
4882
 
4883
/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
4884
   object type, using the /proc file system.  */
4885
static int
4886
linux_qxfer_spu (const char *annex, unsigned char *readbuf,
4887
                 unsigned const char *writebuf,
4888
                 CORE_ADDR offset, int len)
4889
{
4890
  long pid = lwpid_of (get_thread_lwp (current_inferior));
4891
  char buf[128];
4892
  int fd = 0;
4893
  int ret = 0;
4894
 
4895
  if (!writebuf && !readbuf)
4896
    return -1;
4897
 
4898
  if (!*annex)
4899
    {
4900
      if (!readbuf)
4901
        return -1;
4902
      else
4903
        return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4904
    }
4905
 
4906
  sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
4907
  fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4908
  if (fd <= 0)
4909
    return -1;
4910
 
4911
  if (offset != 0
4912
      && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4913
    {
4914
      close (fd);
4915
      return 0;
4916
    }
4917
 
4918
  if (writebuf)
4919
    ret = write (fd, writebuf, (size_t) len);
4920
  else
4921
    ret = read (fd, readbuf, (size_t) len);
4922
 
4923
  close (fd);
4924
  return ret;
4925
}
4926
 
4927
static int
4928
linux_core_of_thread (ptid_t ptid)
4929
{
4930
  char filename[sizeof ("/proc//task//stat")
4931
                 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
4932
                 + 1];
4933
  FILE *f;
4934
  char *content = NULL;
4935
  char *p;
4936
  char *ts = 0;
4937
  int content_read = 0;
4938
  int i;
4939
  int core;
4940
 
4941
  sprintf (filename, "/proc/%d/task/%ld/stat",
4942
           ptid_get_pid (ptid), ptid_get_lwp (ptid));
4943
  f = fopen (filename, "r");
4944
  if (!f)
4945
    return -1;
4946
 
4947
  for (;;)
4948
    {
4949
      int n;
4950
      content = realloc (content, content_read + 1024);
4951
      n = fread (content + content_read, 1, 1024, f);
4952
      content_read += n;
4953
      if (n < 1024)
4954
        {
4955
          content[content_read] = '\0';
4956
          break;
4957
        }
4958
    }
4959
 
4960
  p = strchr (content, '(');
4961
 
4962
  /* Skip ")".  */
4963
  if (p != NULL)
4964
    p = strchr (p, ')');
4965
  if (p != NULL)
4966
    p++;
4967
 
4968
  /* If the first field after program name has index 0, then core number is
4969
     the field with index 36.  There's no constant for that anywhere.  */
4970
  if (p != NULL)
4971
    p = strtok_r (p, " ", &ts);
4972
  for (i = 0; p != NULL && i != 36; ++i)
4973
    p = strtok_r (NULL, " ", &ts);
4974
 
4975
  if (p == NULL || sscanf (p, "%d", &core) == 0)
4976
    core = -1;
4977
 
4978
  free (content);
4979
  fclose (f);
4980
 
4981
  return core;
4982
}
4983
 
4984
static void
4985
linux_process_qsupported (const char *query)
4986
{
4987
  if (the_low_target.process_qsupported != NULL)
4988
    the_low_target.process_qsupported (query);
4989
}
4990
 
4991
static int
4992
linux_supports_tracepoints (void)
4993
{
4994
  if (*the_low_target.supports_tracepoints == NULL)
4995
    return 0;
4996
 
4997
  return (*the_low_target.supports_tracepoints) ();
4998
}
4999
 
5000
static CORE_ADDR
5001
linux_read_pc (struct regcache *regcache)
5002
{
5003
  if (the_low_target.get_pc == NULL)
5004
    return 0;
5005
 
5006
  return (*the_low_target.get_pc) (regcache);
5007
}
5008
 
5009
static void
5010
linux_write_pc (struct regcache *regcache, CORE_ADDR pc)
5011
{
5012
  gdb_assert (the_low_target.set_pc != NULL);
5013
 
5014
  (*the_low_target.set_pc) (regcache, pc);
5015
}
5016
 
5017
static int
5018
linux_thread_stopped (struct thread_info *thread)
5019
{
5020
  return get_thread_lwp (thread)->stopped;
5021
}
5022
 
5023
/* This exposes stop-all-threads functionality to other modules.  */
5024
 
5025
static void
5026
linux_pause_all (int freeze)
5027
{
5028
  stop_all_lwps (freeze, NULL);
5029
}
5030
 
5031
/* This exposes unstop-all-threads functionality to other gdbserver
5032
   modules.  */
5033
 
5034
static void
5035
linux_unpause_all (int unfreeze)
5036
{
5037
  unstop_all_lwps (unfreeze, NULL);
5038
}
5039
 
5040
static int
5041
linux_install_fast_tracepoint_jump_pad (CORE_ADDR tpoint, CORE_ADDR tpaddr,
5042
                                        CORE_ADDR collector,
5043
                                        CORE_ADDR lockaddr,
5044
                                        ULONGEST orig_size,
5045
                                        CORE_ADDR *jump_entry,
5046
                                        unsigned char *jjump_pad_insn,
5047
                                        ULONGEST *jjump_pad_insn_size,
5048
                                        CORE_ADDR *adjusted_insn_addr,
5049
                                        CORE_ADDR *adjusted_insn_addr_end)
5050
{
5051
  return (*the_low_target.install_fast_tracepoint_jump_pad)
5052
    (tpoint, tpaddr, collector, lockaddr, orig_size,
5053
     jump_entry, jjump_pad_insn, jjump_pad_insn_size,
5054
     adjusted_insn_addr, adjusted_insn_addr_end);
5055
}
5056
 
5057
static struct emit_ops *
5058
linux_emit_ops (void)
5059
{
5060
  if (the_low_target.emit_ops != NULL)
5061
    return (*the_low_target.emit_ops) ();
5062
  else
5063
    return NULL;
5064
}
5065
 
5066
static struct target_ops linux_target_ops = {
5067
  linux_create_inferior,
5068
  linux_attach,
5069
  linux_kill,
5070
  linux_detach,
5071
  linux_mourn,
5072
  linux_join,
5073
  linux_thread_alive,
5074
  linux_resume,
5075
  linux_wait,
5076
  linux_fetch_registers,
5077
  linux_store_registers,
5078
  linux_read_memory,
5079
  linux_write_memory,
5080
  linux_look_up_symbols,
5081
  linux_request_interrupt,
5082
  linux_read_auxv,
5083
  linux_insert_point,
5084
  linux_remove_point,
5085
  linux_stopped_by_watchpoint,
5086
  linux_stopped_data_address,
5087
#if defined(__UCLIBC__) && defined(HAS_NOMMU)
5088
  linux_read_offsets,
5089
#else
5090
  NULL,
5091
#endif
5092
#ifdef USE_THREAD_DB
5093
  thread_db_get_tls_address,
5094
#else
5095
  NULL,
5096
#endif
5097
  linux_qxfer_spu,
5098
  hostio_last_error_from_errno,
5099
  linux_qxfer_osdata,
5100
  linux_xfer_siginfo,
5101
  linux_supports_non_stop,
5102
  linux_async,
5103
  linux_start_non_stop,
5104
  linux_supports_multi_process,
5105
#ifdef USE_THREAD_DB
5106
  thread_db_handle_monitor_command,
5107
#else
5108
  NULL,
5109
#endif
5110
  linux_core_of_thread,
5111
  linux_process_qsupported,
5112
  linux_supports_tracepoints,
5113
  linux_read_pc,
5114
  linux_write_pc,
5115
  linux_thread_stopped,
5116
  NULL,
5117
  linux_pause_all,
5118
  linux_unpause_all,
5119
  linux_cancel_breakpoints,
5120
  linux_stabilize_threads,
5121
  linux_install_fast_tracepoint_jump_pad,
5122
  linux_emit_ops
5123
};
5124
 
5125
static void
5126
linux_init_signals ()
5127
{
5128
  /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
5129
     to find what the cancel signal actually is.  */
5130
#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does.  */
5131
  signal (__SIGRTMIN+1, SIG_IGN);
5132
#endif
5133
}
5134
 
5135
void
5136
initialize_low (void)
5137
{
5138
  struct sigaction sigchld_action;
5139
  memset (&sigchld_action, 0, sizeof (sigchld_action));
5140
  set_target_ops (&linux_target_ops);
5141
  set_breakpoint_data (the_low_target.breakpoint,
5142
                       the_low_target.breakpoint_len);
5143
  linux_init_signals ();
5144
  linux_test_for_tracefork ();
5145
#ifdef HAVE_LINUX_REGSETS
5146
  for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
5147
    ;
5148
  disabled_regsets = xmalloc (num_regsets);
5149
#endif
5150
 
5151
  sigchld_action.sa_handler = sigchld_handler;
5152
  sigemptyset (&sigchld_action.sa_mask);
5153
  sigchld_action.sa_flags = SA_RESTART;
5154
  sigaction (SIGCHLD, &sigchld_action, NULL);
5155
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.