OpenCores
URL https://opencores.org/ocsvn/openrisc_2011-10-31/openrisc_2011-10-31/trunk

Subversion Repositories openrisc_2011-10-31

[/] [openrisc/] [trunk/] [gnu-src/] [gdb-7.1/] [gdb/] [linux-nat.c] - Blame information for rev 287

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 227 jeremybenn
/* GNU/Linux native-dependent code common to multiple platforms.
2
 
3
   Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
4
   Free Software Foundation, Inc.
5
 
6
   This file is part of GDB.
7
 
8
   This program is free software; you can redistribute it and/or modify
9
   it under the terms of the GNU General Public License as published by
10
   the Free Software Foundation; either version 3 of the License, or
11
   (at your option) any later version.
12
 
13
   This program is distributed in the hope that it will be useful,
14
   but WITHOUT ANY WARRANTY; without even the implied warranty of
15
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16
   GNU General Public License for more details.
17
 
18
   You should have received a copy of the GNU General Public License
19
   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
20
 
21
#include "defs.h"
22
#include "inferior.h"
23
#include "target.h"
24
#include "gdb_string.h"
25
#include "gdb_wait.h"
26
#include "gdb_assert.h"
27
#ifdef HAVE_TKILL_SYSCALL
28
#include <unistd.h>
29
#include <sys/syscall.h>
30
#endif
31
#include <sys/ptrace.h>
32
#include "linux-nat.h"
33
#include "linux-fork.h"
34
#include "gdbthread.h"
35
#include "gdbcmd.h"
36
#include "regcache.h"
37
#include "regset.h"
38
#include "inf-ptrace.h"
39
#include "auxv.h"
40
#include <sys/param.h>          /* for MAXPATHLEN */
41
#include <sys/procfs.h>         /* for elf_gregset etc. */
42
#include "elf-bfd.h"            /* for elfcore_write_* */
43
#include "gregset.h"            /* for gregset */
44
#include "gdbcore.h"            /* for get_exec_file */
45
#include <ctype.h>              /* for isdigit */
46
#include "gdbthread.h"          /* for struct thread_info etc. */
47
#include "gdb_stat.h"           /* for struct stat */
48
#include <fcntl.h>              /* for O_RDONLY */
49
#include "inf-loop.h"
50
#include "event-loop.h"
51
#include "event-top.h"
52
#include <pwd.h>
53
#include <sys/types.h>
54
#include "gdb_dirent.h"
55
#include "xml-support.h"
56
#include "terminal.h"
57
#include <sys/vfs.h>
58
#include "solib.h"
59
 
60
#ifndef SPUFS_MAGIC
61
#define SPUFS_MAGIC 0x23c9b64e
62
#endif
63
 
64
#ifdef HAVE_PERSONALITY
65
# include <sys/personality.h>
66
# if !HAVE_DECL_ADDR_NO_RANDOMIZE
67
#  define ADDR_NO_RANDOMIZE 0x0040000
68
# endif
69
#endif /* HAVE_PERSONALITY */
70
 
71
/* This comment documents high-level logic of this file.
72
 
73
Waiting for events in sync mode
74
===============================
75
 
76
When waiting for an event in a specific thread, we just use waitpid, passing
77
the specific pid, and not passing WNOHANG.
78
 
79
When waiting for an event in all threads, waitpid is not quite good. Prior to
80
version 2.4, Linux can either wait for event in main thread, or in secondary
81
threads. (2.4 has the __WALL flag).  So, if we use blocking waitpid, we might
82
miss an event.  The solution is to use non-blocking waitpid, together with
83
sigsuspend.  First, we use non-blocking waitpid to get an event in the main
84
process, if any. Second, we use non-blocking waitpid with the __WCLONED
85
flag to check for events in cloned processes.  If nothing is found, we use
86
sigsuspend to wait for SIGCHLD.  When SIGCHLD arrives, it means something
87
happened to a child process -- and SIGCHLD will be delivered both for events
88
in main debugged process and in cloned processes.  As soon as we know there's
89
an event, we get back to calling nonblocking waitpid with and without __WCLONED.
90
 
91
Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
92
so that we don't miss a signal. If SIGCHLD arrives in between, when it's
93
blocked, the signal becomes pending and sigsuspend immediately
94
notices it and returns.
95
 
96
Waiting for events in async mode
97
================================
98
 
99
In async mode, GDB should always be ready to handle both user input
100
and target events, so neither blocking waitpid nor sigsuspend are
101
viable options.  Instead, we should asynchronously notify the GDB main
102
event loop whenever there's an unprocessed event from the target.  We
103
detect asynchronous target events by handling SIGCHLD signals.  To
104
notify the event loop about target events, the self-pipe trick is used
105
--- a pipe is registered as waitable event source in the event loop,
106
the event loop select/poll's on the read end of this pipe (as well on
107
other event sources, e.g., stdin), and the SIGCHLD handler writes a
108
byte to this pipe.  This is more portable than relying on
109
pselect/ppoll, since on kernels that lack those syscalls, libc
110
emulates them with select/poll+sigprocmask, and that is racy
111
(a.k.a. plain broken).
112
 
113
Obviously, if we fail to notify the event loop if there's a target
114
event, it's bad.  OTOH, if we notify the event loop when there's no
115
event from the target, linux_nat_wait will detect that there's no real
116
event to report, and return event of type TARGET_WAITKIND_IGNORE.
117
This is mostly harmless, but it will waste time and is better avoided.
118
 
119
The main design point is that every time GDB is outside linux-nat.c,
120
we have a SIGCHLD handler installed that is called when something
121
happens to the target and notifies the GDB event loop.  Whenever GDB
122
core decides to handle the event, and calls into linux-nat.c, we
123
process things as in sync mode, except that the we never block in
124
sigsuspend.
125
 
126
While processing an event, we may end up momentarily blocked in
127
waitpid calls.  Those waitpid calls, while blocking, are guarantied to
128
return quickly.  E.g., in all-stop mode, before reporting to the core
129
that an LWP hit a breakpoint, all LWPs are stopped by sending them
130
SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
131
Note that this is different from blocking indefinitely waiting for the
132
next event --- here, we're already handling an event.
133
 
134
Use of signals
135
==============
136
 
137
We stop threads by sending a SIGSTOP.  The use of SIGSTOP instead of another
138
signal is not entirely significant; we just need for a signal to be delivered,
139
so that we can intercept it.  SIGSTOP's advantage is that it can not be
140
blocked.  A disadvantage is that it is not a real-time signal, so it can only
141
be queued once; we do not keep track of other sources of SIGSTOP.
142
 
143
Two other signals that can't be blocked are SIGCONT and SIGKILL.  But we can't
144
use them, because they have special behavior when the signal is generated -
145
not when it is delivered.  SIGCONT resumes the entire thread group and SIGKILL
146
kills the entire thread group.
147
 
148
A delivered SIGSTOP would stop the entire thread group, not just the thread we
149
tkill'd.  But we never let the SIGSTOP be delivered; we always intercept and
150
cancel it (by PTRACE_CONT without passing SIGSTOP).
151
 
152
We could use a real-time signal instead.  This would solve those problems; we
153
could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
154
But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
155
generates it, and there are races with trying to find a signal that is not
156
blocked.  */
157
 
158
#ifndef O_LARGEFILE
159
#define O_LARGEFILE 0
160
#endif
161
 
162
/* If the system headers did not provide the constants, hard-code the normal
163
   values.  */
164
#ifndef PTRACE_EVENT_FORK
165
 
166
#define PTRACE_SETOPTIONS       0x4200
167
#define PTRACE_GETEVENTMSG      0x4201
168
 
169
/* options set using PTRACE_SETOPTIONS */
170
#define PTRACE_O_TRACESYSGOOD   0x00000001
171
#define PTRACE_O_TRACEFORK      0x00000002
172
#define PTRACE_O_TRACEVFORK     0x00000004
173
#define PTRACE_O_TRACECLONE     0x00000008
174
#define PTRACE_O_TRACEEXEC      0x00000010
175
#define PTRACE_O_TRACEVFORKDONE 0x00000020
176
#define PTRACE_O_TRACEEXIT      0x00000040
177
 
178
/* Wait extended result codes for the above trace options.  */
179
#define PTRACE_EVENT_FORK       1
180
#define PTRACE_EVENT_VFORK      2
181
#define PTRACE_EVENT_CLONE      3
182
#define PTRACE_EVENT_EXEC       4
183
#define PTRACE_EVENT_VFORK_DONE 5
184
#define PTRACE_EVENT_EXIT       6
185
 
186
#endif /* PTRACE_EVENT_FORK */
187
 
188
/* Unlike other extended result codes, WSTOPSIG (status) on
189
   PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
190
   instead SIGTRAP with bit 7 set.  */
191
#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
192
 
193
/* We can't always assume that this flag is available, but all systems
194
   with the ptrace event handlers also have __WALL, so it's safe to use
195
   here.  */
196
#ifndef __WALL
197
#define __WALL          0x40000000 /* Wait for any child.  */
198
#endif
199
 
200
#ifndef PTRACE_GETSIGINFO
201
# define PTRACE_GETSIGINFO    0x4202
202
# define PTRACE_SETSIGINFO    0x4203
203
#endif
204
 
205
/* The single-threaded native GNU/Linux target_ops.  We save a pointer for
206
   the use of the multi-threaded target.  */
207
static struct target_ops *linux_ops;
208
static struct target_ops linux_ops_saved;
209
 
210
/* The method to call, if any, when a new thread is attached.  */
211
static void (*linux_nat_new_thread) (ptid_t);
212
 
213
/* The method to call, if any, when the siginfo object needs to be
214
   converted between the layout returned by ptrace, and the layout in
215
   the architecture of the inferior.  */
216
static int (*linux_nat_siginfo_fixup) (struct siginfo *,
217
                                       gdb_byte *,
218
                                       int);
219
 
220
/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
221
   Called by our to_xfer_partial.  */
222
static LONGEST (*super_xfer_partial) (struct target_ops *,
223
                                      enum target_object,
224
                                      const char *, gdb_byte *,
225
                                      const gdb_byte *,
226
                                      ULONGEST, LONGEST);
227
 
228
static int debug_linux_nat;
229
static void
230
show_debug_linux_nat (struct ui_file *file, int from_tty,
231
                      struct cmd_list_element *c, const char *value)
232
{
233
  fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
234
                    value);
235
}
236
 
237
static int debug_linux_nat_async = 0;
238
static void
239
show_debug_linux_nat_async (struct ui_file *file, int from_tty,
240
                            struct cmd_list_element *c, const char *value)
241
{
242
  fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
243
                    value);
244
}
245
 
246
static int disable_randomization = 1;
247
 
248
static void
249
show_disable_randomization (struct ui_file *file, int from_tty,
250
                            struct cmd_list_element *c, const char *value)
251
{
252
#ifdef HAVE_PERSONALITY
253
  fprintf_filtered (file, _("\
254
Disabling randomization of debuggee's virtual address space is %s.\n"),
255
                    value);
256
#else /* !HAVE_PERSONALITY */
257
  fputs_filtered (_("\
258
Disabling randomization of debuggee's virtual address space is unsupported on\n\
259
this platform.\n"), file);
260
#endif /* !HAVE_PERSONALITY */
261
}
262
 
263
static void
264
set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c)
265
{
266
#ifndef HAVE_PERSONALITY
267
  error (_("\
268
Disabling randomization of debuggee's virtual address space is unsupported on\n\
269
this platform."));
270
#endif /* !HAVE_PERSONALITY */
271
}
272
 
273
static int linux_parent_pid;
274
 
275
struct simple_pid_list
276
{
277
  int pid;
278
  int status;
279
  struct simple_pid_list *next;
280
};
281
struct simple_pid_list *stopped_pids;
282
 
283
/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
284
   can not be used, 1 if it can.  */
285
 
286
static int linux_supports_tracefork_flag = -1;
287
 
288
/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACESYSGOOD
289
   can not be used, 1 if it can.  */
290
 
291
static int linux_supports_tracesysgood_flag = -1;
292
 
293
/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
294
   PTRACE_O_TRACEVFORKDONE.  */
295
 
296
static int linux_supports_tracevforkdone_flag = -1;
297
 
298
/* Async mode support */
299
 
300
/* Zero if the async mode, although enabled, is masked, which means
301
   linux_nat_wait should behave as if async mode was off.  */
302
static int linux_nat_async_mask_value = 1;
303
 
304
/* Stores the current used ptrace() options.  */
305
static int current_ptrace_options = 0;
306
 
307
/* The read/write ends of the pipe registered as waitable file in the
308
   event loop.  */
309
static int linux_nat_event_pipe[2] = { -1, -1 };
310
 
311
/* Flush the event pipe.  */
312
 
313
static void
314
async_file_flush (void)
315
{
316
  int ret;
317
  char buf;
318
 
319
  do
320
    {
321
      ret = read (linux_nat_event_pipe[0], &buf, 1);
322
    }
323
  while (ret >= 0 || (ret == -1 && errno == EINTR));
324
}
325
 
326
/* Put something (anything, doesn't matter what, or how much) in event
327
   pipe, so that the select/poll in the event-loop realizes we have
328
   something to process.  */
329
 
330
static void
331
async_file_mark (void)
332
{
333
  int ret;
334
 
335
  /* It doesn't really matter what the pipe contains, as long we end
336
     up with something in it.  Might as well flush the previous
337
     left-overs.  */
338
  async_file_flush ();
339
 
340
  do
341
    {
342
      ret = write (linux_nat_event_pipe[1], "+", 1);
343
    }
344
  while (ret == -1 && errno == EINTR);
345
 
346
  /* Ignore EAGAIN.  If the pipe is full, the event loop will already
347
     be awakened anyway.  */
348
}
349
 
350
static void linux_nat_async (void (*callback)
351
                             (enum inferior_event_type event_type, void *context),
352
                             void *context);
353
static int linux_nat_async_mask (int mask);
354
static int kill_lwp (int lwpid, int signo);
355
 
356
static int stop_callback (struct lwp_info *lp, void *data);
357
 
358
static void block_child_signals (sigset_t *prev_mask);
359
static void restore_child_signals_mask (sigset_t *prev_mask);
360
 
361
struct lwp_info;
362
static struct lwp_info *add_lwp (ptid_t ptid);
363
static void purge_lwp_list (int pid);
364
static struct lwp_info *find_lwp_pid (ptid_t ptid);
365
 
366
 
367
/* Trivial list manipulation functions to keep track of a list of
368
   new stopped processes.  */
369
static void
370
add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
371
{
372
  struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
373
  new_pid->pid = pid;
374
  new_pid->status = status;
375
  new_pid->next = *listp;
376
  *listp = new_pid;
377
}
378
 
379
static int
380
pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
381
{
382
  struct simple_pid_list **p;
383
 
384
  for (p = listp; *p != NULL; p = &(*p)->next)
385
    if ((*p)->pid == pid)
386
      {
387
        struct simple_pid_list *next = (*p)->next;
388
        *status = (*p)->status;
389
        xfree (*p);
390
        *p = next;
391
        return 1;
392
      }
393
  return 0;
394
}
395
 
396
static void
397
linux_record_stopped_pid (int pid, int status)
398
{
399
  add_to_pid_list (&stopped_pids, pid, status);
400
}
401
 
402
 
403
/* A helper function for linux_test_for_tracefork, called after fork ().  */
404
 
405
static void
406
linux_tracefork_child (void)
407
{
408
  int ret;
409
 
410
  ptrace (PTRACE_TRACEME, 0, 0, 0);
411
  kill (getpid (), SIGSTOP);
412
  fork ();
413
  _exit (0);
414
}
415
 
416
/* Wrapper function for waitpid which handles EINTR.  */
417
 
418
static int
419
my_waitpid (int pid, int *status, int flags)
420
{
421
  int ret;
422
 
423
  do
424
    {
425
      ret = waitpid (pid, status, flags);
426
    }
427
  while (ret == -1 && errno == EINTR);
428
 
429
  return ret;
430
}
431
 
432
/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
433
 
434
   First, we try to enable fork tracing on ORIGINAL_PID.  If this fails,
435
   we know that the feature is not available.  This may change the tracing
436
   options for ORIGINAL_PID, but we'll be setting them shortly anyway.
437
 
438
   However, if it succeeds, we don't know for sure that the feature is
439
   available; old versions of PTRACE_SETOPTIONS ignored unknown options.  We
440
   create a child process, attach to it, use PTRACE_SETOPTIONS to enable
441
   fork tracing, and let it fork.  If the process exits, we assume that we
442
   can't use TRACEFORK; if we get the fork notification, and we can extract
443
   the new child's PID, then we assume that we can.  */
444
 
445
static void
446
linux_test_for_tracefork (int original_pid)
447
{
448
  int child_pid, ret, status;
449
  long second_pid;
450
  sigset_t prev_mask;
451
 
452
  /* We don't want those ptrace calls to be interrupted.  */
453
  block_child_signals (&prev_mask);
454
 
455
  linux_supports_tracefork_flag = 0;
456
  linux_supports_tracevforkdone_flag = 0;
457
 
458
  ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
459
  if (ret != 0)
460
    {
461
      restore_child_signals_mask (&prev_mask);
462
      return;
463
    }
464
 
465
  child_pid = fork ();
466
  if (child_pid == -1)
467
    perror_with_name (("fork"));
468
 
469
  if (child_pid == 0)
470
    linux_tracefork_child ();
471
 
472
  ret = my_waitpid (child_pid, &status, 0);
473
  if (ret == -1)
474
    perror_with_name (("waitpid"));
475
  else if (ret != child_pid)
476
    error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
477
  if (! WIFSTOPPED (status))
478
    error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
479
 
480
  ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
481
  if (ret != 0)
482
    {
483
      ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
484
      if (ret != 0)
485
        {
486
          warning (_("linux_test_for_tracefork: failed to kill child"));
487
          restore_child_signals_mask (&prev_mask);
488
          return;
489
        }
490
 
491
      ret = my_waitpid (child_pid, &status, 0);
492
      if (ret != child_pid)
493
        warning (_("linux_test_for_tracefork: failed to wait for killed child"));
494
      else if (!WIFSIGNALED (status))
495
        warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
496
                 "killed child"), status);
497
 
498
      restore_child_signals_mask (&prev_mask);
499
      return;
500
    }
501
 
502
  /* Check whether PTRACE_O_TRACEVFORKDONE is available.  */
503
  ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
504
                PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
505
  linux_supports_tracevforkdone_flag = (ret == 0);
506
 
507
  ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
508
  if (ret != 0)
509
    warning (_("linux_test_for_tracefork: failed to resume child"));
510
 
511
  ret = my_waitpid (child_pid, &status, 0);
512
 
513
  if (ret == child_pid && WIFSTOPPED (status)
514
      && status >> 16 == PTRACE_EVENT_FORK)
515
    {
516
      second_pid = 0;
517
      ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
518
      if (ret == 0 && second_pid != 0)
519
        {
520
          int second_status;
521
 
522
          linux_supports_tracefork_flag = 1;
523
          my_waitpid (second_pid, &second_status, 0);
524
          ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
525
          if (ret != 0)
526
            warning (_("linux_test_for_tracefork: failed to kill second child"));
527
          my_waitpid (second_pid, &status, 0);
528
        }
529
    }
530
  else
531
    warning (_("linux_test_for_tracefork: unexpected result from waitpid "
532
             "(%d, status 0x%x)"), ret, status);
533
 
534
  ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
535
  if (ret != 0)
536
    warning (_("linux_test_for_tracefork: failed to kill child"));
537
  my_waitpid (child_pid, &status, 0);
538
 
539
  restore_child_signals_mask (&prev_mask);
540
}
541
 
542
/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
543
 
544
   We try to enable syscall tracing on ORIGINAL_PID.  If this fails,
545
   we know that the feature is not available.  This may change the tracing
546
   options for ORIGINAL_PID, but we'll be setting them shortly anyway.  */
547
 
548
static void
549
linux_test_for_tracesysgood (int original_pid)
550
{
551
  int ret;
552
  sigset_t prev_mask;
553
 
554
  /* We don't want those ptrace calls to be interrupted.  */
555
  block_child_signals (&prev_mask);
556
 
557
  linux_supports_tracesysgood_flag = 0;
558
 
559
  ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
560
  if (ret != 0)
561
    goto out;
562
 
563
  linux_supports_tracesysgood_flag = 1;
564
out:
565
  restore_child_signals_mask (&prev_mask);
566
}
567
 
568
/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
569
   This function also sets linux_supports_tracesysgood_flag.  */
570
 
571
static int
572
linux_supports_tracesysgood (int pid)
573
{
574
  if (linux_supports_tracesysgood_flag == -1)
575
    linux_test_for_tracesysgood (pid);
576
  return linux_supports_tracesysgood_flag;
577
}
578
 
579
/* Return non-zero iff we have tracefork functionality available.
580
   This function also sets linux_supports_tracefork_flag.  */
581
 
582
static int
583
linux_supports_tracefork (int pid)
584
{
585
  if (linux_supports_tracefork_flag == -1)
586
    linux_test_for_tracefork (pid);
587
  return linux_supports_tracefork_flag;
588
}
589
 
590
static int
591
linux_supports_tracevforkdone (int pid)
592
{
593
  if (linux_supports_tracefork_flag == -1)
594
    linux_test_for_tracefork (pid);
595
  return linux_supports_tracevforkdone_flag;
596
}
597
 
598
static void
599
linux_enable_tracesysgood (ptid_t ptid)
600
{
601
  int pid = ptid_get_lwp (ptid);
602
 
603
  if (pid == 0)
604
    pid = ptid_get_pid (ptid);
605
 
606
  if (linux_supports_tracesysgood (pid) == 0)
607
    return;
608
 
609
  current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
610
 
611
  ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
612
}
613
 
614
 
615
void
616
linux_enable_event_reporting (ptid_t ptid)
617
{
618
  int pid = ptid_get_lwp (ptid);
619
 
620
  if (pid == 0)
621
    pid = ptid_get_pid (ptid);
622
 
623
  if (! linux_supports_tracefork (pid))
624
    return;
625
 
626
  current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
627
    | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
628
 
629
  if (linux_supports_tracevforkdone (pid))
630
    current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
631
 
632
  /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
633
     read-only process state.  */
634
 
635
  ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
636
}
637
 
638
static void
639
linux_child_post_attach (int pid)
640
{
641
  linux_enable_event_reporting (pid_to_ptid (pid));
642
  check_for_thread_db ();
643
  linux_enable_tracesysgood (pid_to_ptid (pid));
644
}
645
 
646
static void
647
linux_child_post_startup_inferior (ptid_t ptid)
648
{
649
  linux_enable_event_reporting (ptid);
650
  check_for_thread_db ();
651
  linux_enable_tracesysgood (ptid);
652
}
653
 
654
static int
655
linux_child_follow_fork (struct target_ops *ops, int follow_child)
656
{
657
  sigset_t prev_mask;
658
  int has_vforked;
659
  int parent_pid, child_pid;
660
 
661
  block_child_signals (&prev_mask);
662
 
663
  has_vforked = (inferior_thread ()->pending_follow.kind
664
                 == TARGET_WAITKIND_VFORKED);
665
  parent_pid = ptid_get_lwp (inferior_ptid);
666
  if (parent_pid == 0)
667
    parent_pid = ptid_get_pid (inferior_ptid);
668
  child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
669
 
670
  if (!detach_fork)
671
    linux_enable_event_reporting (pid_to_ptid (child_pid));
672
 
673
  if (has_vforked
674
      && !non_stop /* Non-stop always resumes both branches.  */
675
      && (!target_is_async_p () || sync_execution)
676
      && !(follow_child || detach_fork || sched_multi))
677
    {
678
      /* The parent stays blocked inside the vfork syscall until the
679
         child execs or exits.  If we don't let the child run, then
680
         the parent stays blocked.  If we're telling the parent to run
681
         in the foreground, the user will not be able to ctrl-c to get
682
         back the terminal, effectively hanging the debug session.  */
683
      fprintf_filtered (gdb_stderr, _("\
684
Can not resume the parent process over vfork in the foreground while \n\
685
holding the child stopped.  Try \"set detach-on-fork\" or \
686
\"set schedule-multiple\".\n"));
687
      return 1;
688
    }
689
 
690
  if (! follow_child)
691
    {
692
      struct lwp_info *child_lp = NULL;
693
 
694
      /* We're already attached to the parent, by default. */
695
 
696
      /* Detach new forked process?  */
697
      if (detach_fork)
698
        {
699
          /* Before detaching from the child, remove all breakpoints
700
             from it.  If we forked, then this has already been taken
701
             care of by infrun.c.  If we vforked however, any
702
             breakpoint inserted in the parent is visible in the
703
             child, even those added while stopped in a vfork
704
             catchpoint.  This will remove the breakpoints from the
705
             parent also, but they'll be reinserted below.  */
706
          if (has_vforked)
707
            {
708
              /* keep breakpoints list in sync.  */
709
              remove_breakpoints_pid (GET_PID (inferior_ptid));
710
            }
711
 
712
          if (info_verbose || debug_linux_nat)
713
            {
714
              target_terminal_ours ();
715
              fprintf_filtered (gdb_stdlog,
716
                                "Detaching after fork from child process %d.\n",
717
                                child_pid);
718
            }
719
 
720
          ptrace (PTRACE_DETACH, child_pid, 0, 0);
721
        }
722
      else
723
        {
724
          struct inferior *parent_inf, *child_inf;
725
          struct cleanup *old_chain;
726
 
727
          /* Add process to GDB's tables.  */
728
          child_inf = add_inferior (child_pid);
729
 
730
          parent_inf = current_inferior ();
731
          child_inf->attach_flag = parent_inf->attach_flag;
732
          copy_terminal_info (child_inf, parent_inf);
733
 
734
          old_chain = save_inferior_ptid ();
735
          save_current_program_space ();
736
 
737
          inferior_ptid = ptid_build (child_pid, child_pid, 0);
738
          add_thread (inferior_ptid);
739
          child_lp = add_lwp (inferior_ptid);
740
          child_lp->stopped = 1;
741
          child_lp->resumed = 1;
742
 
743
          /* If this is a vfork child, then the address-space is
744
             shared with the parent.  */
745
          if (has_vforked)
746
            {
747
              child_inf->pspace = parent_inf->pspace;
748
              child_inf->aspace = parent_inf->aspace;
749
 
750
              /* The parent will be frozen until the child is done
751
                 with the shared region.  Keep track of the
752
                 parent.  */
753
              child_inf->vfork_parent = parent_inf;
754
              child_inf->pending_detach = 0;
755
              parent_inf->vfork_child = child_inf;
756
              parent_inf->pending_detach = 0;
757
            }
758
          else
759
            {
760
              child_inf->aspace = new_address_space ();
761
              child_inf->pspace = add_program_space (child_inf->aspace);
762
              child_inf->removable = 1;
763
              set_current_program_space (child_inf->pspace);
764
              clone_program_space (child_inf->pspace, parent_inf->pspace);
765
 
766
              /* Let the shared library layer (solib-svr4) learn about
767
                 this new process, relocate the cloned exec, pull in
768
                 shared libraries, and install the solib event
769
                 breakpoint.  If a "cloned-VM" event was propagated
770
                 better throughout the core, this wouldn't be
771
                 required.  */
772
              solib_create_inferior_hook (0);
773
            }
774
 
775
          /* Let the thread_db layer learn about this new process.  */
776
          check_for_thread_db ();
777
 
778
          do_cleanups (old_chain);
779
        }
780
 
781
      if (has_vforked)
782
        {
783
          struct lwp_info *lp;
784
          struct inferior *parent_inf;
785
 
786
          parent_inf = current_inferior ();
787
 
788
          /* If we detached from the child, then we have to be careful
789
             to not insert breakpoints in the parent until the child
790
             is done with the shared memory region.  However, if we're
791
             staying attached to the child, then we can and should
792
             insert breakpoints, so that we can debug it.  A
793
             subsequent child exec or exit is enough to know when does
794
             the child stops using the parent's address space.  */
795
          parent_inf->waiting_for_vfork_done = detach_fork;
796
          parent_inf->pspace->breakpoints_not_allowed = detach_fork;
797
 
798
          lp = find_lwp_pid (pid_to_ptid (parent_pid));
799
          gdb_assert (linux_supports_tracefork_flag >= 0);
800
          if (linux_supports_tracevforkdone (0))
801
            {
802
              if (debug_linux_nat)
803
                fprintf_unfiltered (gdb_stdlog,
804
                                    "LCFF: waiting for VFORK_DONE on %d\n",
805
                                    parent_pid);
806
 
807
              lp->stopped = 1;
808
              lp->resumed = 1;
809
 
810
              /* We'll handle the VFORK_DONE event like any other
811
                 event, in target_wait.  */
812
            }
813
          else
814
            {
815
              /* We can't insert breakpoints until the child has
816
                 finished with the shared memory region.  We need to
817
                 wait until that happens.  Ideal would be to just
818
                 call:
819
                 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
820
                 - waitpid (parent_pid, &status, __WALL);
821
                 However, most architectures can't handle a syscall
822
                 being traced on the way out if it wasn't traced on
823
                 the way in.
824
 
825
                 We might also think to loop, continuing the child
826
                 until it exits or gets a SIGTRAP.  One problem is
827
                 that the child might call ptrace with PTRACE_TRACEME.
828
 
829
                 There's no simple and reliable way to figure out when
830
                 the vforked child will be done with its copy of the
831
                 shared memory.  We could step it out of the syscall,
832
                 two instructions, let it go, and then single-step the
833
                 parent once.  When we have hardware single-step, this
834
                 would work; with software single-step it could still
835
                 be made to work but we'd have to be able to insert
836
                 single-step breakpoints in the child, and we'd have
837
                 to insert -just- the single-step breakpoint in the
838
                 parent.  Very awkward.
839
 
840
                 In the end, the best we can do is to make sure it
841
                 runs for a little while.  Hopefully it will be out of
842
                 range of any breakpoints we reinsert.  Usually this
843
                 is only the single-step breakpoint at vfork's return
844
                 point.  */
845
 
846
              if (debug_linux_nat)
847
                fprintf_unfiltered (gdb_stdlog,
848
                                    "LCFF: no VFORK_DONE support, sleeping a bit\n");
849
 
850
              usleep (10000);
851
 
852
              /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
853
                 and leave it pending.  The next linux_nat_resume call
854
                 will notice a pending event, and bypasses actually
855
                 resuming the inferior.  */
856
              lp->status = 0;
857
              lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
858
              lp->stopped = 0;
859
              lp->resumed = 1;
860
 
861
              /* If we're in async mode, need to tell the event loop
862
                 there's something here to process.  */
863
              if (target_can_async_p ())
864
                async_file_mark ();
865
            }
866
        }
867
    }
868
  else
869
    {
870
      struct thread_info *tp;
871
      struct inferior *parent_inf, *child_inf;
872
      struct lwp_info *lp;
873
      struct program_space *parent_pspace;
874
 
875
      if (info_verbose || debug_linux_nat)
876
        {
877
          target_terminal_ours ();
878
          if (has_vforked)
879
            fprintf_filtered (gdb_stdlog, _("\
880
Attaching after process %d vfork to child process %d.\n"),
881
                              parent_pid, child_pid);
882
          else
883
            fprintf_filtered (gdb_stdlog, _("\
884
Attaching after process %d fork to child process %d.\n"),
885
                              parent_pid, child_pid);
886
        }
887
 
888
      /* Add the new inferior first, so that the target_detach below
889
         doesn't unpush the target.  */
890
 
891
      child_inf = add_inferior (child_pid);
892
 
893
      parent_inf = current_inferior ();
894
      child_inf->attach_flag = parent_inf->attach_flag;
895
      copy_terminal_info (child_inf, parent_inf);
896
 
897
      parent_pspace = parent_inf->pspace;
898
 
899
      /* If we're vforking, we want to hold on to the parent until the
900
         child exits or execs.  At child exec or exit time we can
901
         remove the old breakpoints from the parent and detach or
902
         resume debugging it.  Otherwise, detach the parent now; we'll
903
         want to reuse it's program/address spaces, but we can't set
904
         them to the child before removing breakpoints from the
905
         parent, otherwise, the breakpoints module could decide to
906
         remove breakpoints from the wrong process (since they'd be
907
         assigned to the same address space).  */
908
 
909
      if (has_vforked)
910
        {
911
          gdb_assert (child_inf->vfork_parent == NULL);
912
          gdb_assert (parent_inf->vfork_child == NULL);
913
          child_inf->vfork_parent = parent_inf;
914
          child_inf->pending_detach = 0;
915
          parent_inf->vfork_child = child_inf;
916
          parent_inf->pending_detach = detach_fork;
917
          parent_inf->waiting_for_vfork_done = 0;
918
        }
919
      else if (detach_fork)
920
        target_detach (NULL, 0);
921
 
922
      /* Note that the detach above makes PARENT_INF dangling.  */
923
 
924
      /* Add the child thread to the appropriate lists, and switch to
925
         this new thread, before cloning the program space, and
926
         informing the solib layer about this new process.  */
927
 
928
      inferior_ptid = ptid_build (child_pid, child_pid, 0);
929
      add_thread (inferior_ptid);
930
      lp = add_lwp (inferior_ptid);
931
      lp->stopped = 1;
932
      lp->resumed = 1;
933
 
934
      /* If this is a vfork child, then the address-space is shared
935
         with the parent.  If we detached from the parent, then we can
936
         reuse the parent's program/address spaces.  */
937
      if (has_vforked || detach_fork)
938
        {
939
          child_inf->pspace = parent_pspace;
940
          child_inf->aspace = child_inf->pspace->aspace;
941
        }
942
      else
943
        {
944
          child_inf->aspace = new_address_space ();
945
          child_inf->pspace = add_program_space (child_inf->aspace);
946
          child_inf->removable = 1;
947
          set_current_program_space (child_inf->pspace);
948
          clone_program_space (child_inf->pspace, parent_pspace);
949
 
950
          /* Let the shared library layer (solib-svr4) learn about
951
             this new process, relocate the cloned exec, pull in
952
             shared libraries, and install the solib event breakpoint.
953
             If a "cloned-VM" event was propagated better throughout
954
             the core, this wouldn't be required.  */
955
          solib_create_inferior_hook (0);
956
        }
957
 
958
      /* Let the thread_db layer learn about this new process.  */
959
      check_for_thread_db ();
960
    }
961
 
962
  restore_child_signals_mask (&prev_mask);
963
  return 0;
964
}
965
 
966
 
967
static void
968
linux_child_insert_fork_catchpoint (int pid)
969
{
970
  if (! linux_supports_tracefork (pid))
971
    error (_("Your system does not support fork catchpoints."));
972
}
973
 
974
static void
975
linux_child_insert_vfork_catchpoint (int pid)
976
{
977
  if (!linux_supports_tracefork (pid))
978
    error (_("Your system does not support vfork catchpoints."));
979
}
980
 
981
static void
982
linux_child_insert_exec_catchpoint (int pid)
983
{
984
  if (!linux_supports_tracefork (pid))
985
    error (_("Your system does not support exec catchpoints."));
986
}
987
 
988
static int
989
linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
990
                                    int table_size, int *table)
991
{
992
  if (! linux_supports_tracesysgood (pid))
993
    error (_("Your system does not support syscall catchpoints."));
994
  /* On GNU/Linux, we ignore the arguments.  It means that we only
995
     enable the syscall catchpoints, but do not disable them.
996
 
997
     Also, we do not use the `table' information because we do not
998
     filter system calls here.  We let GDB do the logic for us.  */
999
  return 0;
1000
}
1001
 
1002
/* On GNU/Linux there are no real LWP's.  The closest thing to LWP's
1003
   are processes sharing the same VM space.  A multi-threaded process
1004
   is basically a group of such processes.  However, such a grouping
1005
   is almost entirely a user-space issue; the kernel doesn't enforce
1006
   such a grouping at all (this might change in the future).  In
1007
   general, we'll rely on the threads library (i.e. the GNU/Linux
1008
   Threads library) to provide such a grouping.
1009
 
1010
   It is perfectly well possible to write a multi-threaded application
1011
   without the assistance of a threads library, by using the clone
1012
   system call directly.  This module should be able to give some
1013
   rudimentary support for debugging such applications if developers
1014
   specify the CLONE_PTRACE flag in the clone system call, and are
1015
   using the Linux kernel 2.4 or above.
1016
 
1017
   Note that there are some peculiarities in GNU/Linux that affect
1018
   this code:
1019
 
1020
   - In general one should specify the __WCLONE flag to waitpid in
1021
     order to make it report events for any of the cloned processes
1022
     (and leave it out for the initial process).  However, if a cloned
1023
     process has exited the exit status is only reported if the
1024
     __WCLONE flag is absent.  Linux kernel 2.4 has a __WALL flag, but
1025
     we cannot use it since GDB must work on older systems too.
1026
 
1027
   - When a traced, cloned process exits and is waited for by the
1028
     debugger, the kernel reassigns it to the original parent and
1029
     keeps it around as a "zombie".  Somehow, the GNU/Linux Threads
1030
     library doesn't notice this, which leads to the "zombie problem":
1031
     When debugged a multi-threaded process that spawns a lot of
1032
     threads will run out of processes, even if the threads exit,
1033
     because the "zombies" stay around.  */
1034
 
1035
/* List of known LWPs.  */
1036
struct lwp_info *lwp_list;
1037
 
1038
 
1039
/* Original signal mask.  */
1040
static sigset_t normal_mask;
1041
 
1042
/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1043
   _initialize_linux_nat.  */
1044
static sigset_t suspend_mask;
1045
 
1046
/* Signals to block to make that sigsuspend work.  */
1047
static sigset_t blocked_mask;
1048
 
1049
/* SIGCHLD action.  */
1050
struct sigaction sigchld_action;
1051
 
1052
/* Block child signals (SIGCHLD and linux threads signals), and store
1053
   the previous mask in PREV_MASK.  */
1054
 
1055
static void
1056
block_child_signals (sigset_t *prev_mask)
1057
{
1058
  /* Make sure SIGCHLD is blocked.  */
1059
  if (!sigismember (&blocked_mask, SIGCHLD))
1060
    sigaddset (&blocked_mask, SIGCHLD);
1061
 
1062
  sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1063
}
1064
 
1065
/* Restore child signals mask, previously returned by
1066
   block_child_signals.  */
1067
 
1068
static void
1069
restore_child_signals_mask (sigset_t *prev_mask)
1070
{
1071
  sigprocmask (SIG_SETMASK, prev_mask, NULL);
1072
}
1073
 
1074
 
1075
/* Prototypes for local functions.  */
1076
static int stop_wait_callback (struct lwp_info *lp, void *data);
1077
static int linux_thread_alive (ptid_t ptid);
1078
static char *linux_child_pid_to_exec_file (int pid);
1079
static int cancel_breakpoint (struct lwp_info *lp);
1080
 
1081
 
1082
/* Convert wait status STATUS to a string.  Used for printing debug
1083
   messages only.  */
1084
 
1085
static char *
1086
status_to_str (int status)
1087
{
1088
  static char buf[64];
1089
 
1090
  if (WIFSTOPPED (status))
1091
    {
1092
      if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1093
        snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1094
                  strsignal (SIGTRAP));
1095
      else
1096
        snprintf (buf, sizeof (buf), "%s (stopped)",
1097
                  strsignal (WSTOPSIG (status)));
1098
    }
1099
  else if (WIFSIGNALED (status))
1100
    snprintf (buf, sizeof (buf), "%s (terminated)",
1101
              strsignal (WSTOPSIG (status)));
1102
  else
1103
    snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1104
 
1105
  return buf;
1106
}
1107
 
1108
/* Remove all LWPs belong to PID from the lwp list.  */
1109
 
1110
static void
1111
purge_lwp_list (int pid)
1112
{
1113
  struct lwp_info *lp, *lpprev, *lpnext;
1114
 
1115
  lpprev = NULL;
1116
 
1117
  for (lp = lwp_list; lp; lp = lpnext)
1118
    {
1119
      lpnext = lp->next;
1120
 
1121
      if (ptid_get_pid (lp->ptid) == pid)
1122
        {
1123
          if (lp == lwp_list)
1124
            lwp_list = lp->next;
1125
          else
1126
            lpprev->next = lp->next;
1127
 
1128
          xfree (lp);
1129
        }
1130
      else
1131
        lpprev = lp;
1132
    }
1133
}
1134
 
1135
/* Return the number of known LWPs in the tgid given by PID.  */
1136
 
1137
static int
1138
num_lwps (int pid)
1139
{
1140
  int count = 0;
1141
  struct lwp_info *lp;
1142
 
1143
  for (lp = lwp_list; lp; lp = lp->next)
1144
    if (ptid_get_pid (lp->ptid) == pid)
1145
      count++;
1146
 
1147
  return count;
1148
}
1149
 
1150
/* Add the LWP specified by PID to the list.  Return a pointer to the
1151
   structure describing the new LWP.  The LWP should already be stopped
1152
   (with an exception for the very first LWP).  */
1153
 
1154
static struct lwp_info *
1155
add_lwp (ptid_t ptid)
1156
{
1157
  struct lwp_info *lp;
1158
 
1159
  gdb_assert (is_lwp (ptid));
1160
 
1161
  lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1162
 
1163
  memset (lp, 0, sizeof (struct lwp_info));
1164
 
1165
  lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1166
 
1167
  lp->ptid = ptid;
1168
  lp->core = -1;
1169
 
1170
  lp->next = lwp_list;
1171
  lwp_list = lp;
1172
 
1173
  if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
1174
    linux_nat_new_thread (ptid);
1175
 
1176
  return lp;
1177
}
1178
 
1179
/* Remove the LWP specified by PID from the list.  */
1180
 
1181
static void
1182
delete_lwp (ptid_t ptid)
1183
{
1184
  struct lwp_info *lp, *lpprev;
1185
 
1186
  lpprev = NULL;
1187
 
1188
  for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1189
    if (ptid_equal (lp->ptid, ptid))
1190
      break;
1191
 
1192
  if (!lp)
1193
    return;
1194
 
1195
  if (lpprev)
1196
    lpprev->next = lp->next;
1197
  else
1198
    lwp_list = lp->next;
1199
 
1200
  xfree (lp);
1201
}
1202
 
1203
/* Return a pointer to the structure describing the LWP corresponding
1204
   to PID.  If no corresponding LWP could be found, return NULL.  */
1205
 
1206
static struct lwp_info *
1207
find_lwp_pid (ptid_t ptid)
1208
{
1209
  struct lwp_info *lp;
1210
  int lwp;
1211
 
1212
  if (is_lwp (ptid))
1213
    lwp = GET_LWP (ptid);
1214
  else
1215
    lwp = GET_PID (ptid);
1216
 
1217
  for (lp = lwp_list; lp; lp = lp->next)
1218
    if (lwp == GET_LWP (lp->ptid))
1219
      return lp;
1220
 
1221
  return NULL;
1222
}
1223
 
1224
/* Returns true if PTID matches filter FILTER.  FILTER can be the wild
1225
   card MINUS_ONE_PTID (all ptid match it); can be a ptid representing
1226
   a process (ptid_is_pid returns true), in which case, all lwps of
1227
   that give process match, lwps of other process do not; or, it can
1228
   represent a specific thread, in which case, only that thread will
1229
   match true.  PTID must represent an LWP, it can never be a wild
1230
   card.  */
1231
 
1232
static int
1233
ptid_match (ptid_t ptid, ptid_t filter)
1234
{
1235
  /* Since both parameters have the same type, prevent easy mistakes
1236
     from happening.  */
1237
  gdb_assert (!ptid_equal (ptid, minus_one_ptid)
1238
              && !ptid_equal (ptid, null_ptid));
1239
 
1240
  if (ptid_equal (filter, minus_one_ptid))
1241
    return 1;
1242
  if (ptid_is_pid (filter)
1243
      && ptid_get_pid (ptid) == ptid_get_pid (filter))
1244
    return 1;
1245
  else if (ptid_equal (ptid, filter))
1246
    return 1;
1247
 
1248
  return 0;
1249
}
1250
 
1251
/* Call CALLBACK with its second argument set to DATA for every LWP in
1252
   the list.  If CALLBACK returns 1 for a particular LWP, return a
1253
   pointer to the structure describing that LWP immediately.
1254
   Otherwise return NULL.  */
1255
 
1256
struct lwp_info *
1257
iterate_over_lwps (ptid_t filter,
1258
                   int (*callback) (struct lwp_info *, void *),
1259
                   void *data)
1260
{
1261
  struct lwp_info *lp, *lpnext;
1262
 
1263
  for (lp = lwp_list; lp; lp = lpnext)
1264
    {
1265
      lpnext = lp->next;
1266
 
1267
      if (ptid_match (lp->ptid, filter))
1268
        {
1269
          if ((*callback) (lp, data))
1270
            return lp;
1271
        }
1272
    }
1273
 
1274
  return NULL;
1275
}
1276
 
1277
/* Update our internal state when changing from one checkpoint to
1278
   another indicated by NEW_PTID.  We can only switch single-threaded
1279
   applications, so we only create one new LWP, and the previous list
1280
   is discarded.  */
1281
 
1282
void
1283
linux_nat_switch_fork (ptid_t new_ptid)
1284
{
1285
  struct lwp_info *lp;
1286
 
1287
  purge_lwp_list (GET_PID (inferior_ptid));
1288
 
1289
  lp = add_lwp (new_ptid);
1290
  lp->stopped = 1;
1291
 
1292
  /* This changes the thread's ptid while preserving the gdb thread
1293
     num.  Also changes the inferior pid, while preserving the
1294
     inferior num.  */
1295
  thread_change_ptid (inferior_ptid, new_ptid);
1296
 
1297
  /* We've just told GDB core that the thread changed target id, but,
1298
     in fact, it really is a different thread, with different register
1299
     contents.  */
1300
  registers_changed ();
1301
}
1302
 
1303
/* Handle the exit of a single thread LP.  */
1304
 
1305
static void
1306
exit_lwp (struct lwp_info *lp)
1307
{
1308
  struct thread_info *th = find_thread_ptid (lp->ptid);
1309
 
1310
  if (th)
1311
    {
1312
      if (print_thread_events)
1313
        printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1314
 
1315
      delete_thread (lp->ptid);
1316
    }
1317
 
1318
  delete_lwp (lp->ptid);
1319
}
1320
 
1321
/* Return an lwp's tgid, found in `/proc/PID/status'.  */
1322
 
1323
int
1324
linux_proc_get_tgid (int lwpid)
1325
{
1326
  FILE *status_file;
1327
  char buf[100];
1328
  int tgid = -1;
1329
 
1330
  snprintf (buf, sizeof (buf), "/proc/%d/status", (int) lwpid);
1331
  status_file = fopen (buf, "r");
1332
  if (status_file != NULL)
1333
    {
1334
      while (fgets (buf, sizeof (buf), status_file))
1335
        {
1336
          if (strncmp (buf, "Tgid:", 5) == 0)
1337
            {
1338
              tgid = strtoul (buf + strlen ("Tgid:"), NULL, 10);
1339
              break;
1340
            }
1341
        }
1342
 
1343
      fclose (status_file);
1344
    }
1345
 
1346
  return tgid;
1347
}
1348
 
1349
/* Detect `T (stopped)' in `/proc/PID/status'.
1350
   Other states including `T (tracing stop)' are reported as false.  */
1351
 
1352
static int
1353
pid_is_stopped (pid_t pid)
1354
{
1355
  FILE *status_file;
1356
  char buf[100];
1357
  int retval = 0;
1358
 
1359
  snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1360
  status_file = fopen (buf, "r");
1361
  if (status_file != NULL)
1362
    {
1363
      int have_state = 0;
1364
 
1365
      while (fgets (buf, sizeof (buf), status_file))
1366
        {
1367
          if (strncmp (buf, "State:", 6) == 0)
1368
            {
1369
              have_state = 1;
1370
              break;
1371
            }
1372
        }
1373
      if (have_state && strstr (buf, "T (stopped)") != NULL)
1374
        retval = 1;
1375
      fclose (status_file);
1376
    }
1377
  return retval;
1378
}
1379
 
1380
/* Wait for the LWP specified by LP, which we have just attached to.
1381
   Returns a wait status for that LWP, to cache.  */
1382
 
1383
static int
1384
linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1385
                            int *signalled)
1386
{
1387
  pid_t new_pid, pid = GET_LWP (ptid);
1388
  int status;
1389
 
1390
  if (pid_is_stopped (pid))
1391
    {
1392
      if (debug_linux_nat)
1393
        fprintf_unfiltered (gdb_stdlog,
1394
                            "LNPAW: Attaching to a stopped process\n");
1395
 
1396
      /* The process is definitely stopped.  It is in a job control
1397
         stop, unless the kernel predates the TASK_STOPPED /
1398
         TASK_TRACED distinction, in which case it might be in a
1399
         ptrace stop.  Make sure it is in a ptrace stop; from there we
1400
         can kill it, signal it, et cetera.
1401
 
1402
         First make sure there is a pending SIGSTOP.  Since we are
1403
         already attached, the process can not transition from stopped
1404
         to running without a PTRACE_CONT; so we know this signal will
1405
         go into the queue.  The SIGSTOP generated by PTRACE_ATTACH is
1406
         probably already in the queue (unless this kernel is old
1407
         enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1408
         is not an RT signal, it can only be queued once.  */
1409
      kill_lwp (pid, SIGSTOP);
1410
 
1411
      /* Finally, resume the stopped process.  This will deliver the SIGSTOP
1412
         (or a higher priority signal, just like normal PTRACE_ATTACH).  */
1413
      ptrace (PTRACE_CONT, pid, 0, 0);
1414
    }
1415
 
1416
  /* Make sure the initial process is stopped.  The user-level threads
1417
     layer might want to poke around in the inferior, and that won't
1418
     work if things haven't stabilized yet.  */
1419
  new_pid = my_waitpid (pid, &status, 0);
1420
  if (new_pid == -1 && errno == ECHILD)
1421
    {
1422
      if (first)
1423
        warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1424
 
1425
      /* Try again with __WCLONE to check cloned processes.  */
1426
      new_pid = my_waitpid (pid, &status, __WCLONE);
1427
      *cloned = 1;
1428
    }
1429
 
1430
  gdb_assert (pid == new_pid);
1431
 
1432
  if (!WIFSTOPPED (status))
1433
    {
1434
      /* The pid we tried to attach has apparently just exited.  */
1435
      if (debug_linux_nat)
1436
        fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1437
                            pid, status_to_str (status));
1438
      return status;
1439
    }
1440
 
1441
  if (WSTOPSIG (status) != SIGSTOP)
1442
    {
1443
      *signalled = 1;
1444
      if (debug_linux_nat)
1445
        fprintf_unfiltered (gdb_stdlog,
1446
                            "LNPAW: Received %s after attaching\n",
1447
                            status_to_str (status));
1448
    }
1449
 
1450
  return status;
1451
}
1452
 
1453
/* Attach to the LWP specified by PID.  Return 0 if successful or -1
1454
   if the new LWP could not be attached.  */
1455
 
1456
int
1457
lin_lwp_attach_lwp (ptid_t ptid)
1458
{
1459
  struct lwp_info *lp;
1460
  sigset_t prev_mask;
1461
 
1462
  gdb_assert (is_lwp (ptid));
1463
 
1464
  block_child_signals (&prev_mask);
1465
 
1466
  lp = find_lwp_pid (ptid);
1467
 
1468
  /* We assume that we're already attached to any LWP that has an id
1469
     equal to the overall process id, and to any LWP that is already
1470
     in our list of LWPs.  If we're not seeing exit events from threads
1471
     and we've had PID wraparound since we last tried to stop all threads,
1472
     this assumption might be wrong; fortunately, this is very unlikely
1473
     to happen.  */
1474
  if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
1475
    {
1476
      int status, cloned = 0, signalled = 0;
1477
 
1478
      if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
1479
        {
1480
          /* If we fail to attach to the thread, issue a warning,
1481
             but continue.  One way this can happen is if thread
1482
             creation is interrupted; as of Linux kernel 2.6.19, a
1483
             bug may place threads in the thread list and then fail
1484
             to create them.  */
1485
          warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1486
                   safe_strerror (errno));
1487
          restore_child_signals_mask (&prev_mask);
1488
          return -1;
1489
        }
1490
 
1491
      if (debug_linux_nat)
1492
        fprintf_unfiltered (gdb_stdlog,
1493
                            "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1494
                            target_pid_to_str (ptid));
1495
 
1496
      status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1497
      if (!WIFSTOPPED (status))
1498
        return -1;
1499
 
1500
      lp = add_lwp (ptid);
1501
      lp->stopped = 1;
1502
      lp->cloned = cloned;
1503
      lp->signalled = signalled;
1504
      if (WSTOPSIG (status) != SIGSTOP)
1505
        {
1506
          lp->resumed = 1;
1507
          lp->status = status;
1508
        }
1509
 
1510
      target_post_attach (GET_LWP (lp->ptid));
1511
 
1512
      if (debug_linux_nat)
1513
        {
1514
          fprintf_unfiltered (gdb_stdlog,
1515
                              "LLAL: waitpid %s received %s\n",
1516
                              target_pid_to_str (ptid),
1517
                              status_to_str (status));
1518
        }
1519
    }
1520
  else
1521
    {
1522
      /* We assume that the LWP representing the original process is
1523
         already stopped.  Mark it as stopped in the data structure
1524
         that the GNU/linux ptrace layer uses to keep track of
1525
         threads.  Note that this won't have already been done since
1526
         the main thread will have, we assume, been stopped by an
1527
         attach from a different layer.  */
1528
      if (lp == NULL)
1529
        lp = add_lwp (ptid);
1530
      lp->stopped = 1;
1531
    }
1532
 
1533
  restore_child_signals_mask (&prev_mask);
1534
  return 0;
1535
}
1536
 
1537
static void
1538
linux_nat_create_inferior (struct target_ops *ops,
1539
                           char *exec_file, char *allargs, char **env,
1540
                           int from_tty)
1541
{
1542
#ifdef HAVE_PERSONALITY
1543
  int personality_orig = 0, personality_set = 0;
1544
#endif /* HAVE_PERSONALITY */
1545
 
1546
  /* The fork_child mechanism is synchronous and calls target_wait, so
1547
     we have to mask the async mode.  */
1548
 
1549
#ifdef HAVE_PERSONALITY
1550
  if (disable_randomization)
1551
    {
1552
      errno = 0;
1553
      personality_orig = personality (0xffffffff);
1554
      if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1555
        {
1556
          personality_set = 1;
1557
          personality (personality_orig | ADDR_NO_RANDOMIZE);
1558
        }
1559
      if (errno != 0 || (personality_set
1560
                         && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1561
        warning (_("Error disabling address space randomization: %s"),
1562
                 safe_strerror (errno));
1563
    }
1564
#endif /* HAVE_PERSONALITY */
1565
 
1566
  linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1567
 
1568
#ifdef HAVE_PERSONALITY
1569
  if (personality_set)
1570
    {
1571
      errno = 0;
1572
      personality (personality_orig);
1573
      if (errno != 0)
1574
        warning (_("Error restoring address space randomization: %s"),
1575
                 safe_strerror (errno));
1576
    }
1577
#endif /* HAVE_PERSONALITY */
1578
}
1579
 
1580
static void
1581
linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1582
{
1583
  struct lwp_info *lp;
1584
  int status;
1585
  ptid_t ptid;
1586
 
1587
  linux_ops->to_attach (ops, args, from_tty);
1588
 
1589
  /* The ptrace base target adds the main thread with (pid,0,0)
1590
     format.  Decorate it with lwp info.  */
1591
  ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1592
  thread_change_ptid (inferior_ptid, ptid);
1593
 
1594
  /* Add the initial process as the first LWP to the list.  */
1595
  lp = add_lwp (ptid);
1596
 
1597
  status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1598
                                       &lp->signalled);
1599
  if (!WIFSTOPPED (status))
1600
    {
1601
      if (WIFEXITED (status))
1602
        {
1603
          int exit_code = WEXITSTATUS (status);
1604
 
1605
          target_terminal_ours ();
1606
          target_mourn_inferior ();
1607
          if (exit_code == 0)
1608
            error (_("Unable to attach: program exited normally."));
1609
          else
1610
            error (_("Unable to attach: program exited with code %d."),
1611
                   exit_code);
1612
        }
1613
      else if (WIFSIGNALED (status))
1614
        {
1615
          enum target_signal signo;
1616
 
1617
          target_terminal_ours ();
1618
          target_mourn_inferior ();
1619
 
1620
          signo = target_signal_from_host (WTERMSIG (status));
1621
          error (_("Unable to attach: program terminated with signal "
1622
                   "%s, %s."),
1623
                 target_signal_to_name (signo),
1624
                 target_signal_to_string (signo));
1625
        }
1626
 
1627
      internal_error (__FILE__, __LINE__,
1628
                      _("unexpected status %d for PID %ld"),
1629
                      status, (long) GET_LWP (ptid));
1630
    }
1631
 
1632
  lp->stopped = 1;
1633
 
1634
  /* Save the wait status to report later.  */
1635
  lp->resumed = 1;
1636
  if (debug_linux_nat)
1637
    fprintf_unfiltered (gdb_stdlog,
1638
                        "LNA: waitpid %ld, saving status %s\n",
1639
                        (long) GET_PID (lp->ptid), status_to_str (status));
1640
 
1641
  lp->status = status;
1642
 
1643
  if (target_can_async_p ())
1644
    target_async (inferior_event_handler, 0);
1645
}
1646
 
1647
/* Get pending status of LP.  */
1648
static int
1649
get_pending_status (struct lwp_info *lp, int *status)
1650
{
1651
  enum target_signal signo = TARGET_SIGNAL_0;
1652
 
1653
  /* If we paused threads momentarily, we may have stored pending
1654
     events in lp->status or lp->waitstatus (see stop_wait_callback),
1655
     and GDB core hasn't seen any signal for those threads.
1656
     Otherwise, the last signal reported to the core is found in the
1657
     thread object's stop_signal.
1658
 
1659
     There's a corner case that isn't handled here at present.  Only
1660
     if the thread stopped with a TARGET_WAITKIND_STOPPED does
1661
     stop_signal make sense as a real signal to pass to the inferior.
1662
     Some catchpoint related events, like
1663
     TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1664
     to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers.  But,
1665
     those traps are debug API (ptrace in our case) related and
1666
     induced; the inferior wouldn't see them if it wasn't being
1667
     traced.  Hence, we should never pass them to the inferior, even
1668
     when set to pass state.  Since this corner case isn't handled by
1669
     infrun.c when proceeding with a signal, for consistency, neither
1670
     do we handle it here (or elsewhere in the file we check for
1671
     signal pass state).  Normally SIGTRAP isn't set to pass state, so
1672
     this is really a corner case.  */
1673
 
1674
  if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1675
    signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal.  */
1676
  else if (lp->status)
1677
    signo = target_signal_from_host (WSTOPSIG (lp->status));
1678
  else if (non_stop && !is_executing (lp->ptid))
1679
    {
1680
      struct thread_info *tp = find_thread_ptid (lp->ptid);
1681
      signo = tp->stop_signal;
1682
    }
1683
  else if (!non_stop)
1684
    {
1685
      struct target_waitstatus last;
1686
      ptid_t last_ptid;
1687
 
1688
      get_last_target_status (&last_ptid, &last);
1689
 
1690
      if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1691
        {
1692
          struct thread_info *tp = find_thread_ptid (lp->ptid);
1693
          signo = tp->stop_signal;
1694
        }
1695
    }
1696
 
1697
  *status = 0;
1698
 
1699
  if (signo == TARGET_SIGNAL_0)
1700
    {
1701
      if (debug_linux_nat)
1702
        fprintf_unfiltered (gdb_stdlog,
1703
                            "GPT: lwp %s has no pending signal\n",
1704
                            target_pid_to_str (lp->ptid));
1705
    }
1706
  else if (!signal_pass_state (signo))
1707
    {
1708
      if (debug_linux_nat)
1709
        fprintf_unfiltered (gdb_stdlog, "\
1710
GPT: lwp %s had signal %s, but it is in no pass state\n",
1711
                            target_pid_to_str (lp->ptid),
1712
                            target_signal_to_string (signo));
1713
    }
1714
  else
1715
    {
1716
      *status = W_STOPCODE (target_signal_to_host (signo));
1717
 
1718
      if (debug_linux_nat)
1719
        fprintf_unfiltered (gdb_stdlog,
1720
                            "GPT: lwp %s has pending signal %s\n",
1721
                            target_pid_to_str (lp->ptid),
1722
                            target_signal_to_string (signo));
1723
    }
1724
 
1725
  return 0;
1726
}
1727
 
1728
static int
1729
detach_callback (struct lwp_info *lp, void *data)
1730
{
1731
  gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1732
 
1733
  if (debug_linux_nat && lp->status)
1734
    fprintf_unfiltered (gdb_stdlog, "DC:  Pending %s for %s on detach.\n",
1735
                        strsignal (WSTOPSIG (lp->status)),
1736
                        target_pid_to_str (lp->ptid));
1737
 
1738
  /* If there is a pending SIGSTOP, get rid of it.  */
1739
  if (lp->signalled)
1740
    {
1741
      if (debug_linux_nat)
1742
        fprintf_unfiltered (gdb_stdlog,
1743
                            "DC: Sending SIGCONT to %s\n",
1744
                            target_pid_to_str (lp->ptid));
1745
 
1746
      kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1747
      lp->signalled = 0;
1748
    }
1749
 
1750
  /* We don't actually detach from the LWP that has an id equal to the
1751
     overall process id just yet.  */
1752
  if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1753
    {
1754
      int status = 0;
1755
 
1756
      /* Pass on any pending signal for this LWP.  */
1757
      get_pending_status (lp, &status);
1758
 
1759
      errno = 0;
1760
      if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1761
                  WSTOPSIG (status)) < 0)
1762
        error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1763
               safe_strerror (errno));
1764
 
1765
      if (debug_linux_nat)
1766
        fprintf_unfiltered (gdb_stdlog,
1767
                            "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1768
                            target_pid_to_str (lp->ptid),
1769
                            strsignal (WSTOPSIG (status)));
1770
 
1771
      delete_lwp (lp->ptid);
1772
    }
1773
 
1774
  return 0;
1775
}
1776
 
1777
static void
1778
linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1779
{
1780
  int pid;
1781
  int status;
1782
  enum target_signal sig;
1783
  struct lwp_info *main_lwp;
1784
 
1785
  pid = GET_PID (inferior_ptid);
1786
 
1787
  if (target_can_async_p ())
1788
    linux_nat_async (NULL, 0);
1789
 
1790
  /* Stop all threads before detaching.  ptrace requires that the
1791
     thread is stopped to sucessfully detach.  */
1792
  iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1793
  /* ... and wait until all of them have reported back that
1794
     they're no longer running.  */
1795
  iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1796
 
1797
  iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1798
 
1799
  /* Only the initial process should be left right now.  */
1800
  gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1801
 
1802
  main_lwp = find_lwp_pid (pid_to_ptid (pid));
1803
 
1804
  /* Pass on any pending signal for the last LWP.  */
1805
  if ((args == NULL || *args == '\0')
1806
      && get_pending_status (main_lwp, &status) != -1
1807
      && WIFSTOPPED (status))
1808
    {
1809
      /* Put the signal number in ARGS so that inf_ptrace_detach will
1810
         pass it along with PTRACE_DETACH.  */
1811
      args = alloca (8);
1812
      sprintf (args, "%d", (int) WSTOPSIG (status));
1813
      if (debug_linux_nat)
1814
        fprintf_unfiltered (gdb_stdlog,
1815
                            "LND: Sending signal %s to %s\n",
1816
                            args,
1817
                            target_pid_to_str (main_lwp->ptid));
1818
    }
1819
 
1820
  delete_lwp (main_lwp->ptid);
1821
 
1822
  if (forks_exist_p ())
1823
    {
1824
      /* Multi-fork case.  The current inferior_ptid is being detached
1825
         from, but there are other viable forks to debug.  Detach from
1826
         the current fork, and context-switch to the first
1827
         available.  */
1828
      linux_fork_detach (args, from_tty);
1829
 
1830
      if (non_stop && target_can_async_p ())
1831
        target_async (inferior_event_handler, 0);
1832
    }
1833
  else
1834
    linux_ops->to_detach (ops, args, from_tty);
1835
}
1836
 
1837
/* Resume LP.  */
1838
 
1839
static int
1840
resume_callback (struct lwp_info *lp, void *data)
1841
{
1842
  struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1843
 
1844
  if (lp->stopped && inf->vfork_child != NULL)
1845
    {
1846
      if (debug_linux_nat)
1847
        fprintf_unfiltered (gdb_stdlog,
1848
                            "RC: Not resuming %s (vfork parent)\n",
1849
                            target_pid_to_str (lp->ptid));
1850
    }
1851
  else if (lp->stopped && lp->status == 0)
1852
    {
1853
      if (debug_linux_nat)
1854
        fprintf_unfiltered (gdb_stdlog,
1855
                            "RC:  PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1856
                            target_pid_to_str (lp->ptid));
1857
 
1858
      linux_ops->to_resume (linux_ops,
1859
                            pid_to_ptid (GET_LWP (lp->ptid)),
1860
                            0, TARGET_SIGNAL_0);
1861
      if (debug_linux_nat)
1862
        fprintf_unfiltered (gdb_stdlog,
1863
                            "RC:  PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1864
                            target_pid_to_str (lp->ptid));
1865
      lp->stopped = 0;
1866
      lp->step = 0;
1867
      memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1868
      lp->stopped_by_watchpoint = 0;
1869
    }
1870
  else if (lp->stopped && debug_linux_nat)
1871
    fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
1872
                        target_pid_to_str (lp->ptid));
1873
  else if (debug_linux_nat)
1874
    fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n",
1875
                        target_pid_to_str (lp->ptid));
1876
 
1877
  return 0;
1878
}
1879
 
1880
static int
1881
resume_clear_callback (struct lwp_info *lp, void *data)
1882
{
1883
  lp->resumed = 0;
1884
  return 0;
1885
}
1886
 
1887
static int
1888
resume_set_callback (struct lwp_info *lp, void *data)
1889
{
1890
  lp->resumed = 1;
1891
  return 0;
1892
}
1893
 
1894
static void
1895
linux_nat_resume (struct target_ops *ops,
1896
                  ptid_t ptid, int step, enum target_signal signo)
1897
{
1898
  sigset_t prev_mask;
1899
  struct lwp_info *lp;
1900
  int resume_many;
1901
 
1902
  if (debug_linux_nat)
1903
    fprintf_unfiltered (gdb_stdlog,
1904
                        "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1905
                        step ? "step" : "resume",
1906
                        target_pid_to_str (ptid),
1907
                        signo ? strsignal (signo) : "0",
1908
                        target_pid_to_str (inferior_ptid));
1909
 
1910
  block_child_signals (&prev_mask);
1911
 
1912
  /* A specific PTID means `step only this process id'.  */
1913
  resume_many = (ptid_equal (minus_one_ptid, ptid)
1914
                 || ptid_is_pid (ptid));
1915
 
1916
  /* Mark the lwps we're resuming as resumed.  */
1917
  iterate_over_lwps (ptid, resume_set_callback, NULL);
1918
 
1919
  /* See if it's the current inferior that should be handled
1920
     specially.  */
1921
  if (resume_many)
1922
    lp = find_lwp_pid (inferior_ptid);
1923
  else
1924
    lp = find_lwp_pid (ptid);
1925
  gdb_assert (lp != NULL);
1926
 
1927
  /* Remember if we're stepping.  */
1928
  lp->step = step;
1929
 
1930
  /* If we have a pending wait status for this thread, there is no
1931
     point in resuming the process.  But first make sure that
1932
     linux_nat_wait won't preemptively handle the event - we
1933
     should never take this short-circuit if we are going to
1934
     leave LP running, since we have skipped resuming all the
1935
     other threads.  This bit of code needs to be synchronized
1936
     with linux_nat_wait.  */
1937
 
1938
  if (lp->status && WIFSTOPPED (lp->status))
1939
    {
1940
      int saved_signo;
1941
      struct inferior *inf;
1942
 
1943
      inf = find_inferior_pid (ptid_get_pid (lp->ptid));
1944
      gdb_assert (inf);
1945
      saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1946
 
1947
      /* Defer to common code if we're gaining control of the
1948
         inferior.  */
1949
      if (inf->stop_soon == NO_STOP_QUIETLY
1950
          && signal_stop_state (saved_signo) == 0
1951
          && signal_print_state (saved_signo) == 0
1952
          && signal_pass_state (saved_signo) == 1)
1953
        {
1954
          if (debug_linux_nat)
1955
            fprintf_unfiltered (gdb_stdlog,
1956
                                "LLR: Not short circuiting for ignored "
1957
                                "status 0x%x\n", lp->status);
1958
 
1959
          /* FIXME: What should we do if we are supposed to continue
1960
             this thread with a signal?  */
1961
          gdb_assert (signo == TARGET_SIGNAL_0);
1962
          signo = saved_signo;
1963
          lp->status = 0;
1964
        }
1965
    }
1966
 
1967
  if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1968
    {
1969
      /* FIXME: What should we do if we are supposed to continue
1970
         this thread with a signal?  */
1971
      gdb_assert (signo == TARGET_SIGNAL_0);
1972
 
1973
      if (debug_linux_nat)
1974
        fprintf_unfiltered (gdb_stdlog,
1975
                            "LLR: Short circuiting for status 0x%x\n",
1976
                            lp->status);
1977
 
1978
      restore_child_signals_mask (&prev_mask);
1979
      if (target_can_async_p ())
1980
        {
1981
          target_async (inferior_event_handler, 0);
1982
          /* Tell the event loop we have something to process.  */
1983
          async_file_mark ();
1984
        }
1985
      return;
1986
    }
1987
 
1988
  /* Mark LWP as not stopped to prevent it from being continued by
1989
     resume_callback.  */
1990
  lp->stopped = 0;
1991
 
1992
  if (resume_many)
1993
    iterate_over_lwps (ptid, resume_callback, NULL);
1994
 
1995
  /* Convert to something the lower layer understands.  */
1996
  ptid = pid_to_ptid (GET_LWP (lp->ptid));
1997
 
1998
  linux_ops->to_resume (linux_ops, ptid, step, signo);
1999
  memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2000
  lp->stopped_by_watchpoint = 0;
2001
 
2002
  if (debug_linux_nat)
2003
    fprintf_unfiltered (gdb_stdlog,
2004
                        "LLR: %s %s, %s (resume event thread)\n",
2005
                        step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2006
                        target_pid_to_str (ptid),
2007
                        signo ? strsignal (signo) : "0");
2008
 
2009
  restore_child_signals_mask (&prev_mask);
2010
  if (target_can_async_p ())
2011
    target_async (inferior_event_handler, 0);
2012
}
2013
 
2014
/* Send a signal to an LWP.  */
2015
 
2016
static int
2017
kill_lwp (int lwpid, int signo)
2018
{
2019
  /* Use tkill, if possible, in case we are using nptl threads.  If tkill
2020
     fails, then we are not using nptl threads and we should be using kill.  */
2021
 
2022
#ifdef HAVE_TKILL_SYSCALL
2023
  {
2024
    static int tkill_failed;
2025
 
2026
    if (!tkill_failed)
2027
      {
2028
        int ret;
2029
 
2030
        errno = 0;
2031
        ret = syscall (__NR_tkill, lwpid, signo);
2032
        if (errno != ENOSYS)
2033
          return ret;
2034
        tkill_failed = 1;
2035
      }
2036
  }
2037
#endif
2038
 
2039
  return kill (lwpid, signo);
2040
}
2041
 
2042
/* Handle a GNU/Linux syscall trap wait response.  If we see a syscall
2043
   event, check if the core is interested in it: if not, ignore the
2044
   event, and keep waiting; otherwise, we need to toggle the LWP's
2045
   syscall entry/exit status, since the ptrace event itself doesn't
2046
   indicate it, and report the trap to higher layers.  */
2047
 
2048
static int
2049
linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2050
{
2051
  struct target_waitstatus *ourstatus = &lp->waitstatus;
2052
  struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2053
  int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2054
 
2055
  if (stopping)
2056
    {
2057
      /* If we're stopping threads, there's a SIGSTOP pending, which
2058
         makes it so that the LWP reports an immediate syscall return,
2059
         followed by the SIGSTOP.  Skip seeing that "return" using
2060
         PTRACE_CONT directly, and let stop_wait_callback collect the
2061
         SIGSTOP.  Later when the thread is resumed, a new syscall
2062
         entry event.  If we didn't do this (and returned 0), we'd
2063
         leave a syscall entry pending, and our caller, by using
2064
         PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2065
         itself.  Later, when the user re-resumes this LWP, we'd see
2066
         another syscall entry event and we'd mistake it for a return.
2067
 
2068
         If stop_wait_callback didn't force the SIGSTOP out of the LWP
2069
         (leaving immediately with LWP->signalled set, without issuing
2070
         a PTRACE_CONT), it would still be problematic to leave this
2071
         syscall enter pending, as later when the thread is resumed,
2072
         it would then see the same syscall exit mentioned above,
2073
         followed by the delayed SIGSTOP, while the syscall didn't
2074
         actually get to execute.  It seems it would be even more
2075
         confusing to the user.  */
2076
 
2077
      if (debug_linux_nat)
2078
        fprintf_unfiltered (gdb_stdlog,
2079
                            "LHST: ignoring syscall %d "
2080
                            "for LWP %ld (stopping threads), "
2081
                            "resuming with PTRACE_CONT for SIGSTOP\n",
2082
                            syscall_number,
2083
                            GET_LWP (lp->ptid));
2084
 
2085
      lp->syscall_state = TARGET_WAITKIND_IGNORE;
2086
      ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2087
      return 1;
2088
    }
2089
 
2090
  if (catch_syscall_enabled ())
2091
    {
2092
      /* Always update the entry/return state, even if this particular
2093
         syscall isn't interesting to the core now.  In async mode,
2094
         the user could install a new catchpoint for this syscall
2095
         between syscall enter/return, and we'll need to know to
2096
         report a syscall return if that happens.  */
2097
      lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2098
                           ? TARGET_WAITKIND_SYSCALL_RETURN
2099
                           : TARGET_WAITKIND_SYSCALL_ENTRY);
2100
 
2101
      if (catching_syscall_number (syscall_number))
2102
        {
2103
          /* Alright, an event to report.  */
2104
          ourstatus->kind = lp->syscall_state;
2105
          ourstatus->value.syscall_number = syscall_number;
2106
 
2107
          if (debug_linux_nat)
2108
            fprintf_unfiltered (gdb_stdlog,
2109
                                "LHST: stopping for %s of syscall %d"
2110
                                " for LWP %ld\n",
2111
                                lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2112
                                ? "entry" : "return",
2113
                                syscall_number,
2114
                                GET_LWP (lp->ptid));
2115
          return 0;
2116
        }
2117
 
2118
      if (debug_linux_nat)
2119
        fprintf_unfiltered (gdb_stdlog,
2120
                            "LHST: ignoring %s of syscall %d "
2121
                            "for LWP %ld\n",
2122
                            lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2123
                            ? "entry" : "return",
2124
                            syscall_number,
2125
                            GET_LWP (lp->ptid));
2126
    }
2127
  else
2128
    {
2129
      /* If we had been syscall tracing, and hence used PT_SYSCALL
2130
         before on this LWP, it could happen that the user removes all
2131
         syscall catchpoints before we get to process this event.
2132
         There are two noteworthy issues here:
2133
 
2134
         - When stopped at a syscall entry event, resuming with
2135
           PT_STEP still resumes executing the syscall and reports a
2136
           syscall return.
2137
 
2138
         - Only PT_SYSCALL catches syscall enters.  If we last
2139
           single-stepped this thread, then this event can't be a
2140
           syscall enter.  If we last single-stepped this thread, this
2141
           has to be a syscall exit.
2142
 
2143
         The points above mean that the next resume, be it PT_STEP or
2144
         PT_CONTINUE, can not trigger a syscall trace event.  */
2145
      if (debug_linux_nat)
2146
        fprintf_unfiltered (gdb_stdlog,
2147
                            "LHST: caught syscall event with no syscall catchpoints."
2148
                            " %d for LWP %ld, ignoring\n",
2149
                            syscall_number,
2150
                            GET_LWP (lp->ptid));
2151
      lp->syscall_state = TARGET_WAITKIND_IGNORE;
2152
    }
2153
 
2154
  /* The core isn't interested in this event.  For efficiency, avoid
2155
     stopping all threads only to have the core resume them all again.
2156
     Since we're not stopping threads, if we're still syscall tracing
2157
     and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2158
     subsequent syscall.  Simply resume using the inf-ptrace layer,
2159
     which knows when to use PT_SYSCALL or PT_CONTINUE.  */
2160
 
2161
  /* Note that gdbarch_get_syscall_number may access registers, hence
2162
     fill a regcache.  */
2163
  registers_changed ();
2164
  linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2165
                        lp->step, TARGET_SIGNAL_0);
2166
  return 1;
2167
}
2168
 
2169
/* Handle a GNU/Linux extended wait response.  If we see a clone
2170
   event, we need to add the new LWP to our list (and not report the
2171
   trap to higher layers).  This function returns non-zero if the
2172
   event should be ignored and we should wait again.  If STOPPING is
2173
   true, the new LWP remains stopped, otherwise it is continued.  */
2174
 
2175
static int
2176
linux_handle_extended_wait (struct lwp_info *lp, int status,
2177
                            int stopping)
2178
{
2179
  int pid = GET_LWP (lp->ptid);
2180
  struct target_waitstatus *ourstatus = &lp->waitstatus;
2181
  struct lwp_info *new_lp = NULL;
2182
  int event = status >> 16;
2183
 
2184
  if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2185
      || event == PTRACE_EVENT_CLONE)
2186
    {
2187
      unsigned long new_pid;
2188
      int ret;
2189
 
2190
      ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2191
 
2192
      /* If we haven't already seen the new PID stop, wait for it now.  */
2193
      if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2194
        {
2195
          /* The new child has a pending SIGSTOP.  We can't affect it until it
2196
             hits the SIGSTOP, but we're already attached.  */
2197
          ret = my_waitpid (new_pid, &status,
2198
                            (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2199
          if (ret == -1)
2200
            perror_with_name (_("waiting for new child"));
2201
          else if (ret != new_pid)
2202
            internal_error (__FILE__, __LINE__,
2203
                            _("wait returned unexpected PID %d"), ret);
2204
          else if (!WIFSTOPPED (status))
2205
            internal_error (__FILE__, __LINE__,
2206
                            _("wait returned unexpected status 0x%x"), status);
2207
        }
2208
 
2209
      ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2210
 
2211
      if (event == PTRACE_EVENT_FORK
2212
          && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2213
        {
2214
          struct fork_info *fp;
2215
 
2216
          /* Handle checkpointing by linux-fork.c here as a special
2217
             case.  We don't want the follow-fork-mode or 'catch fork'
2218
             to interfere with this.  */
2219
 
2220
          /* This won't actually modify the breakpoint list, but will
2221
             physically remove the breakpoints from the child.  */
2222
          detach_breakpoints (new_pid);
2223
 
2224
          /* Retain child fork in ptrace (stopped) state.  */
2225
          fp = find_fork_pid (new_pid);
2226
          if (!fp)
2227
            fp = add_fork (new_pid);
2228
 
2229
          /* Report as spurious, so that infrun doesn't want to follow
2230
             this fork.  We're actually doing an infcall in
2231
             linux-fork.c.  */
2232
          ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2233
          linux_enable_event_reporting (pid_to_ptid (new_pid));
2234
 
2235
          /* Report the stop to the core.  */
2236
          return 0;
2237
        }
2238
 
2239
      if (event == PTRACE_EVENT_FORK)
2240
        ourstatus->kind = TARGET_WAITKIND_FORKED;
2241
      else if (event == PTRACE_EVENT_VFORK)
2242
        ourstatus->kind = TARGET_WAITKIND_VFORKED;
2243
      else
2244
        {
2245
          struct cleanup *old_chain;
2246
 
2247
          ourstatus->kind = TARGET_WAITKIND_IGNORE;
2248
          new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2249
          new_lp->cloned = 1;
2250
          new_lp->stopped = 1;
2251
 
2252
          if (WSTOPSIG (status) != SIGSTOP)
2253
            {
2254
              /* This can happen if someone starts sending signals to
2255
                 the new thread before it gets a chance to run, which
2256
                 have a lower number than SIGSTOP (e.g. SIGUSR1).
2257
                 This is an unlikely case, and harder to handle for
2258
                 fork / vfork than for clone, so we do not try - but
2259
                 we handle it for clone events here.  We'll send
2260
                 the other signal on to the thread below.  */
2261
 
2262
              new_lp->signalled = 1;
2263
            }
2264
          else
2265
            status = 0;
2266
 
2267
          if (non_stop)
2268
            {
2269
              /* Add the new thread to GDB's lists as soon as possible
2270
                 so that:
2271
 
2272
                 1) the frontend doesn't have to wait for a stop to
2273
                 display them, and,
2274
 
2275
                 2) we tag it with the correct running state.  */
2276
 
2277
              /* If the thread_db layer is active, let it know about
2278
                 this new thread, and add it to GDB's list.  */
2279
              if (!thread_db_attach_lwp (new_lp->ptid))
2280
                {
2281
                  /* We're not using thread_db.  Add it to GDB's
2282
                     list.  */
2283
                  target_post_attach (GET_LWP (new_lp->ptid));
2284
                  add_thread (new_lp->ptid);
2285
                }
2286
 
2287
              if (!stopping)
2288
                {
2289
                  set_running (new_lp->ptid, 1);
2290
                  set_executing (new_lp->ptid, 1);
2291
                }
2292
            }
2293
 
2294
          /* Note the need to use the low target ops to resume, to
2295
             handle resuming with PT_SYSCALL if we have syscall
2296
             catchpoints.  */
2297
          if (!stopping)
2298
            {
2299
              int signo;
2300
 
2301
              new_lp->stopped = 0;
2302
              new_lp->resumed = 1;
2303
 
2304
              signo = (status
2305
                       ? target_signal_from_host (WSTOPSIG (status))
2306
                       : TARGET_SIGNAL_0);
2307
 
2308
              linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2309
                                    0, signo);
2310
            }
2311
 
2312
          if (debug_linux_nat)
2313
            fprintf_unfiltered (gdb_stdlog,
2314
                                "LHEW: Got clone event from LWP %ld, resuming\n",
2315
                                GET_LWP (lp->ptid));
2316
          linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2317
                                0, TARGET_SIGNAL_0);
2318
 
2319
          return 1;
2320
        }
2321
 
2322
      return 0;
2323
    }
2324
 
2325
  if (event == PTRACE_EVENT_EXEC)
2326
    {
2327
      if (debug_linux_nat)
2328
        fprintf_unfiltered (gdb_stdlog,
2329
                            "LHEW: Got exec event from LWP %ld\n",
2330
                            GET_LWP (lp->ptid));
2331
 
2332
      ourstatus->kind = TARGET_WAITKIND_EXECD;
2333
      ourstatus->value.execd_pathname
2334
        = xstrdup (linux_child_pid_to_exec_file (pid));
2335
 
2336
      return 0;
2337
    }
2338
 
2339
  if (event == PTRACE_EVENT_VFORK_DONE)
2340
    {
2341
      if (current_inferior ()->waiting_for_vfork_done)
2342
        {
2343
          if (debug_linux_nat)
2344
            fprintf_unfiltered (gdb_stdlog, "\
2345
LHEW: Got expected PTRACE_EVENT_VFORK_DONE from LWP %ld: stopping\n",
2346
                                GET_LWP (lp->ptid));
2347
 
2348
          ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2349
          return 0;
2350
        }
2351
 
2352
      if (debug_linux_nat)
2353
        fprintf_unfiltered (gdb_stdlog, "\
2354
LHEW: Got PTRACE_EVENT_VFORK_DONE from LWP %ld: resuming\n",
2355
                            GET_LWP (lp->ptid));
2356
      ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2357
      return 1;
2358
    }
2359
 
2360
  internal_error (__FILE__, __LINE__,
2361
                  _("unknown ptrace event %d"), event);
2362
}
2363
 
2364
/* Wait for LP to stop.  Returns the wait status, or 0 if the LWP has
2365
   exited.  */
2366
 
2367
static int
2368
wait_lwp (struct lwp_info *lp)
2369
{
2370
  pid_t pid;
2371
  int status;
2372
  int thread_dead = 0;
2373
 
2374
  gdb_assert (!lp->stopped);
2375
  gdb_assert (lp->status == 0);
2376
 
2377
  pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
2378
  if (pid == -1 && errno == ECHILD)
2379
    {
2380
      pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
2381
      if (pid == -1 && errno == ECHILD)
2382
        {
2383
          /* The thread has previously exited.  We need to delete it
2384
             now because, for some vendor 2.4 kernels with NPTL
2385
             support backported, there won't be an exit event unless
2386
             it is the main thread.  2.6 kernels will report an exit
2387
             event for each thread that exits, as expected.  */
2388
          thread_dead = 1;
2389
          if (debug_linux_nat)
2390
            fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2391
                                target_pid_to_str (lp->ptid));
2392
        }
2393
    }
2394
 
2395
  if (!thread_dead)
2396
    {
2397
      gdb_assert (pid == GET_LWP (lp->ptid));
2398
 
2399
      if (debug_linux_nat)
2400
        {
2401
          fprintf_unfiltered (gdb_stdlog,
2402
                              "WL: waitpid %s received %s\n",
2403
                              target_pid_to_str (lp->ptid),
2404
                              status_to_str (status));
2405
        }
2406
    }
2407
 
2408
  /* Check if the thread has exited.  */
2409
  if (WIFEXITED (status) || WIFSIGNALED (status))
2410
    {
2411
      thread_dead = 1;
2412
      if (debug_linux_nat)
2413
        fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2414
                            target_pid_to_str (lp->ptid));
2415
    }
2416
 
2417
  if (thread_dead)
2418
    {
2419
      exit_lwp (lp);
2420
      return 0;
2421
    }
2422
 
2423
  gdb_assert (WIFSTOPPED (status));
2424
 
2425
  /* Handle GNU/Linux's syscall SIGTRAPs.  */
2426
  if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2427
    {
2428
      /* No longer need the sysgood bit.  The ptrace event ends up
2429
         recorded in lp->waitstatus if we care for it.  We can carry
2430
         on handling the event like a regular SIGTRAP from here
2431
         on.  */
2432
      status = W_STOPCODE (SIGTRAP);
2433
      if (linux_handle_syscall_trap (lp, 1))
2434
        return wait_lwp (lp);
2435
    }
2436
 
2437
  /* Handle GNU/Linux's extended waitstatus for trace events.  */
2438
  if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2439
    {
2440
      if (debug_linux_nat)
2441
        fprintf_unfiltered (gdb_stdlog,
2442
                            "WL: Handling extended status 0x%06x\n",
2443
                            status);
2444
      if (linux_handle_extended_wait (lp, status, 1))
2445
        return wait_lwp (lp);
2446
    }
2447
 
2448
  return status;
2449
}
2450
 
2451
/* Save the most recent siginfo for LP.  This is currently only called
2452
   for SIGTRAP; some ports use the si_addr field for
2453
   target_stopped_data_address.  In the future, it may also be used to
2454
   restore the siginfo of requeued signals.  */
2455
 
2456
static void
2457
save_siginfo (struct lwp_info *lp)
2458
{
2459
  errno = 0;
2460
  ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2461
          (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2462
 
2463
  if (errno != 0)
2464
    memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2465
}
2466
 
2467
/* Send a SIGSTOP to LP.  */
2468
 
2469
static int
2470
stop_callback (struct lwp_info *lp, void *data)
2471
{
2472
  if (!lp->stopped && !lp->signalled)
2473
    {
2474
      int ret;
2475
 
2476
      if (debug_linux_nat)
2477
        {
2478
          fprintf_unfiltered (gdb_stdlog,
2479
                              "SC:  kill %s **<SIGSTOP>**\n",
2480
                              target_pid_to_str (lp->ptid));
2481
        }
2482
      errno = 0;
2483
      ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2484
      if (debug_linux_nat)
2485
        {
2486
          fprintf_unfiltered (gdb_stdlog,
2487
                              "SC:  lwp kill %d %s\n",
2488
                              ret,
2489
                              errno ? safe_strerror (errno) : "ERRNO-OK");
2490
        }
2491
 
2492
      lp->signalled = 1;
2493
      gdb_assert (lp->status == 0);
2494
    }
2495
 
2496
  return 0;
2497
}
2498
 
2499
/* Return non-zero if LWP PID has a pending SIGINT.  */
2500
 
2501
static int
2502
linux_nat_has_pending_sigint (int pid)
2503
{
2504
  sigset_t pending, blocked, ignored;
2505
  int i;
2506
 
2507
  linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2508
 
2509
  if (sigismember (&pending, SIGINT)
2510
      && !sigismember (&ignored, SIGINT))
2511
    return 1;
2512
 
2513
  return 0;
2514
}
2515
 
2516
/* Set a flag in LP indicating that we should ignore its next SIGINT.  */
2517
 
2518
static int
2519
set_ignore_sigint (struct lwp_info *lp, void *data)
2520
{
2521
  /* If a thread has a pending SIGINT, consume it; otherwise, set a
2522
     flag to consume the next one.  */
2523
  if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2524
      && WSTOPSIG (lp->status) == SIGINT)
2525
    lp->status = 0;
2526
  else
2527
    lp->ignore_sigint = 1;
2528
 
2529
  return 0;
2530
}
2531
 
2532
/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2533
   This function is called after we know the LWP has stopped; if the LWP
2534
   stopped before the expected SIGINT was delivered, then it will never have
2535
   arrived.  Also, if the signal was delivered to a shared queue and consumed
2536
   by a different thread, it will never be delivered to this LWP.  */
2537
 
2538
static void
2539
maybe_clear_ignore_sigint (struct lwp_info *lp)
2540
{
2541
  if (!lp->ignore_sigint)
2542
    return;
2543
 
2544
  if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2545
    {
2546
      if (debug_linux_nat)
2547
        fprintf_unfiltered (gdb_stdlog,
2548
                            "MCIS: Clearing bogus flag for %s\n",
2549
                            target_pid_to_str (lp->ptid));
2550
      lp->ignore_sigint = 0;
2551
    }
2552
}
2553
 
2554
/* Fetch the possible triggered data watchpoint info and store it in
2555
   LP.
2556
 
2557
   On some archs, like x86, that use debug registers to set
2558
   watchpoints, it's possible that the way to know which watched
2559
   address trapped, is to check the register that is used to select
2560
   which address to watch.  Problem is, between setting the watchpoint
2561
   and reading back which data address trapped, the user may change
2562
   the set of watchpoints, and, as a consequence, GDB changes the
2563
   debug registers in the inferior.  To avoid reading back a stale
2564
   stopped-data-address when that happens, we cache in LP the fact
2565
   that a watchpoint trapped, and the corresponding data address, as
2566
   soon as we see LP stop with a SIGTRAP.  If GDB changes the debug
2567
   registers meanwhile, we have the cached data we can rely on.  */
2568
 
2569
static void
2570
save_sigtrap (struct lwp_info *lp)
2571
{
2572
  struct cleanup *old_chain;
2573
 
2574
  if (linux_ops->to_stopped_by_watchpoint == NULL)
2575
    {
2576
      lp->stopped_by_watchpoint = 0;
2577
      return;
2578
    }
2579
 
2580
  old_chain = save_inferior_ptid ();
2581
  inferior_ptid = lp->ptid;
2582
 
2583
  lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2584
 
2585
  if (lp->stopped_by_watchpoint)
2586
    {
2587
      if (linux_ops->to_stopped_data_address != NULL)
2588
        lp->stopped_data_address_p =
2589
          linux_ops->to_stopped_data_address (&current_target,
2590
                                              &lp->stopped_data_address);
2591
      else
2592
        lp->stopped_data_address_p = 0;
2593
    }
2594
 
2595
  do_cleanups (old_chain);
2596
}
2597
 
2598
/* See save_sigtrap.  */
2599
 
2600
static int
2601
linux_nat_stopped_by_watchpoint (void)
2602
{
2603
  struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2604
 
2605
  gdb_assert (lp != NULL);
2606
 
2607
  return lp->stopped_by_watchpoint;
2608
}
2609
 
2610
static int
2611
linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2612
{
2613
  struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2614
 
2615
  gdb_assert (lp != NULL);
2616
 
2617
  *addr_p = lp->stopped_data_address;
2618
 
2619
  return lp->stopped_data_address_p;
2620
}
2621
 
2622
/* Wait until LP is stopped.  */
2623
 
2624
static int
2625
stop_wait_callback (struct lwp_info *lp, void *data)
2626
{
2627
  struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2628
 
2629
  /* If this is a vfork parent, bail out, it is not going to report
2630
     any SIGSTOP until the vfork is done with.  */
2631
  if (inf->vfork_child != NULL)
2632
    return 0;
2633
 
2634
  if (!lp->stopped)
2635
    {
2636
      int status;
2637
 
2638
      status = wait_lwp (lp);
2639
      if (status == 0)
2640
        return 0;
2641
 
2642
      if (lp->ignore_sigint && WIFSTOPPED (status)
2643
          && WSTOPSIG (status) == SIGINT)
2644
        {
2645
          lp->ignore_sigint = 0;
2646
 
2647
          errno = 0;
2648
          ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2649
          if (debug_linux_nat)
2650
            fprintf_unfiltered (gdb_stdlog,
2651
                                "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
2652
                                target_pid_to_str (lp->ptid),
2653
                                errno ? safe_strerror (errno) : "OK");
2654
 
2655
          return stop_wait_callback (lp, NULL);
2656
        }
2657
 
2658
      maybe_clear_ignore_sigint (lp);
2659
 
2660
      if (WSTOPSIG (status) != SIGSTOP)
2661
        {
2662
          if (WSTOPSIG (status) == SIGTRAP)
2663
            {
2664
              /* If a LWP other than the LWP that we're reporting an
2665
                 event for has hit a GDB breakpoint (as opposed to
2666
                 some random trap signal), then just arrange for it to
2667
                 hit it again later.  We don't keep the SIGTRAP status
2668
                 and don't forward the SIGTRAP signal to the LWP.  We
2669
                 will handle the current event, eventually we will
2670
                 resume all LWPs, and this one will get its breakpoint
2671
                 trap again.
2672
 
2673
                 If we do not do this, then we run the risk that the
2674
                 user will delete or disable the breakpoint, but the
2675
                 thread will have already tripped on it.  */
2676
 
2677
              /* Save the trap's siginfo in case we need it later.  */
2678
              save_siginfo (lp);
2679
 
2680
              save_sigtrap (lp);
2681
 
2682
              /* Now resume this LWP and get the SIGSTOP event. */
2683
              errno = 0;
2684
              ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2685
              if (debug_linux_nat)
2686
                {
2687
                  fprintf_unfiltered (gdb_stdlog,
2688
                                      "PTRACE_CONT %s, 0, 0 (%s)\n",
2689
                                      target_pid_to_str (lp->ptid),
2690
                                      errno ? safe_strerror (errno) : "OK");
2691
 
2692
                  fprintf_unfiltered (gdb_stdlog,
2693
                                      "SWC: Candidate SIGTRAP event in %s\n",
2694
                                      target_pid_to_str (lp->ptid));
2695
                }
2696
              /* Hold this event/waitstatus while we check to see if
2697
                 there are any more (we still want to get that SIGSTOP). */
2698
              stop_wait_callback (lp, NULL);
2699
 
2700
              /* Hold the SIGTRAP for handling by linux_nat_wait.  If
2701
                 there's another event, throw it back into the
2702
                 queue. */
2703
              if (lp->status)
2704
                {
2705
                  if (debug_linux_nat)
2706
                    fprintf_unfiltered (gdb_stdlog,
2707
                                        "SWC: kill %s, %s\n",
2708
                                        target_pid_to_str (lp->ptid),
2709
                                        status_to_str ((int) status));
2710
                  kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
2711
                }
2712
 
2713
              /* Save the sigtrap event. */
2714
              lp->status = status;
2715
              return 0;
2716
            }
2717
          else
2718
            {
2719
              /* The thread was stopped with a signal other than
2720
                 SIGSTOP, and didn't accidentally trip a breakpoint. */
2721
 
2722
              if (debug_linux_nat)
2723
                {
2724
                  fprintf_unfiltered (gdb_stdlog,
2725
                                      "SWC: Pending event %s in %s\n",
2726
                                      status_to_str ((int) status),
2727
                                      target_pid_to_str (lp->ptid));
2728
                }
2729
              /* Now resume this LWP and get the SIGSTOP event. */
2730
              errno = 0;
2731
              ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2732
              if (debug_linux_nat)
2733
                fprintf_unfiltered (gdb_stdlog,
2734
                                    "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2735
                                    target_pid_to_str (lp->ptid),
2736
                                    errno ? safe_strerror (errno) : "OK");
2737
 
2738
              /* Hold this event/waitstatus while we check to see if
2739
                 there are any more (we still want to get that SIGSTOP). */
2740
              stop_wait_callback (lp, NULL);
2741
 
2742
              /* If the lp->status field is still empty, use it to
2743
                 hold this event.  If not, then this event must be
2744
                 returned to the event queue of the LWP.  */
2745
              if (lp->status)
2746
                {
2747
                  if (debug_linux_nat)
2748
                    {
2749
                      fprintf_unfiltered (gdb_stdlog,
2750
                                          "SWC: kill %s, %s\n",
2751
                                          target_pid_to_str (lp->ptid),
2752
                                          status_to_str ((int) status));
2753
                    }
2754
                  kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2755
                }
2756
              else
2757
                lp->status = status;
2758
              return 0;
2759
            }
2760
        }
2761
      else
2762
        {
2763
          /* We caught the SIGSTOP that we intended to catch, so
2764
             there's no SIGSTOP pending.  */
2765
          lp->stopped = 1;
2766
          lp->signalled = 0;
2767
        }
2768
    }
2769
 
2770
  return 0;
2771
}
2772
 
2773
/* Return non-zero if LP has a wait status pending.  */
2774
 
2775
static int
2776
status_callback (struct lwp_info *lp, void *data)
2777
{
2778
  /* Only report a pending wait status if we pretend that this has
2779
     indeed been resumed.  */
2780
  if (!lp->resumed)
2781
    return 0;
2782
 
2783
  if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2784
    {
2785
      /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2786
         or a a pending process exit.  Note that `W_EXITCODE(0,0) ==
2787
         0', so a clean process exit can not be stored pending in
2788
         lp->status, it is indistinguishable from
2789
         no-pending-status.  */
2790
      return 1;
2791
    }
2792
 
2793
  if (lp->status != 0)
2794
    return 1;
2795
 
2796
  return 0;
2797
}
2798
 
2799
/* Return non-zero if LP isn't stopped.  */
2800
 
2801
static int
2802
running_callback (struct lwp_info *lp, void *data)
2803
{
2804
  return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2805
}
2806
 
2807
/* Count the LWP's that have had events.  */
2808
 
2809
static int
2810
count_events_callback (struct lwp_info *lp, void *data)
2811
{
2812
  int *count = data;
2813
 
2814
  gdb_assert (count != NULL);
2815
 
2816
  /* Count only resumed LWPs that have a SIGTRAP event pending.  */
2817
  if (lp->status != 0 && lp->resumed
2818
      && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2819
    (*count)++;
2820
 
2821
  return 0;
2822
}
2823
 
2824
/* Select the LWP (if any) that is currently being single-stepped.  */
2825
 
2826
static int
2827
select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2828
{
2829
  if (lp->step && lp->status != 0)
2830
    return 1;
2831
  else
2832
    return 0;
2833
}
2834
 
2835
/* Select the Nth LWP that has had a SIGTRAP event.  */
2836
 
2837
static int
2838
select_event_lwp_callback (struct lwp_info *lp, void *data)
2839
{
2840
  int *selector = data;
2841
 
2842
  gdb_assert (selector != NULL);
2843
 
2844
  /* Select only resumed LWPs that have a SIGTRAP event pending. */
2845
  if (lp->status != 0 && lp->resumed
2846
      && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2847
    if ((*selector)-- == 0)
2848
      return 1;
2849
 
2850
  return 0;
2851
}
2852
 
2853
static int
2854
cancel_breakpoint (struct lwp_info *lp)
2855
{
2856
  /* Arrange for a breakpoint to be hit again later.  We don't keep
2857
     the SIGTRAP status and don't forward the SIGTRAP signal to the
2858
     LWP.  We will handle the current event, eventually we will resume
2859
     this LWP, and this breakpoint will trap again.
2860
 
2861
     If we do not do this, then we run the risk that the user will
2862
     delete or disable the breakpoint, but the LWP will have already
2863
     tripped on it.  */
2864
 
2865
  struct regcache *regcache = get_thread_regcache (lp->ptid);
2866
  struct gdbarch *gdbarch = get_regcache_arch (regcache);
2867
  CORE_ADDR pc;
2868
 
2869
  pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
2870
  if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2871
    {
2872
      if (debug_linux_nat)
2873
        fprintf_unfiltered (gdb_stdlog,
2874
                            "CB: Push back breakpoint for %s\n",
2875
                            target_pid_to_str (lp->ptid));
2876
 
2877
      /* Back up the PC if necessary.  */
2878
      if (gdbarch_decr_pc_after_break (gdbarch))
2879
        regcache_write_pc (regcache, pc);
2880
 
2881
      return 1;
2882
    }
2883
  return 0;
2884
}
2885
 
2886
static int
2887
cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2888
{
2889
  struct lwp_info *event_lp = data;
2890
 
2891
  /* Leave the LWP that has been elected to receive a SIGTRAP alone.  */
2892
  if (lp == event_lp)
2893
    return 0;
2894
 
2895
  /* If a LWP other than the LWP that we're reporting an event for has
2896
     hit a GDB breakpoint (as opposed to some random trap signal),
2897
     then just arrange for it to hit it again later.  We don't keep
2898
     the SIGTRAP status and don't forward the SIGTRAP signal to the
2899
     LWP.  We will handle the current event, eventually we will resume
2900
     all LWPs, and this one will get its breakpoint trap again.
2901
 
2902
     If we do not do this, then we run the risk that the user will
2903
     delete or disable the breakpoint, but the LWP will have already
2904
     tripped on it.  */
2905
 
2906
  if (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2907
      && lp->status != 0
2908
      && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
2909
      && cancel_breakpoint (lp))
2910
    /* Throw away the SIGTRAP.  */
2911
    lp->status = 0;
2912
 
2913
  return 0;
2914
}
2915
 
2916
/* Select one LWP out of those that have events pending.  */
2917
 
2918
static void
2919
select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2920
{
2921
  int num_events = 0;
2922
  int random_selector;
2923
  struct lwp_info *event_lp;
2924
 
2925
  /* Record the wait status for the original LWP.  */
2926
  (*orig_lp)->status = *status;
2927
 
2928
  /* Give preference to any LWP that is being single-stepped.  */
2929
  event_lp = iterate_over_lwps (filter,
2930
                                select_singlestep_lwp_callback, NULL);
2931
  if (event_lp != NULL)
2932
    {
2933
      if (debug_linux_nat)
2934
        fprintf_unfiltered (gdb_stdlog,
2935
                            "SEL: Select single-step %s\n",
2936
                            target_pid_to_str (event_lp->ptid));
2937
    }
2938
  else
2939
    {
2940
      /* No single-stepping LWP.  Select one at random, out of those
2941
         which have had SIGTRAP events.  */
2942
 
2943
      /* First see how many SIGTRAP events we have.  */
2944
      iterate_over_lwps (filter, count_events_callback, &num_events);
2945
 
2946
      /* Now randomly pick a LWP out of those that have had a SIGTRAP.  */
2947
      random_selector = (int)
2948
        ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2949
 
2950
      if (debug_linux_nat && num_events > 1)
2951
        fprintf_unfiltered (gdb_stdlog,
2952
                            "SEL: Found %d SIGTRAP events, selecting #%d\n",
2953
                            num_events, random_selector);
2954
 
2955
      event_lp = iterate_over_lwps (filter,
2956
                                    select_event_lwp_callback,
2957
                                    &random_selector);
2958
    }
2959
 
2960
  if (event_lp != NULL)
2961
    {
2962
      /* Switch the event LWP.  */
2963
      *orig_lp = event_lp;
2964
      *status = event_lp->status;
2965
    }
2966
 
2967
  /* Flush the wait status for the event LWP.  */
2968
  (*orig_lp)->status = 0;
2969
}
2970
 
2971
/* Return non-zero if LP has been resumed.  */
2972
 
2973
static int
2974
resumed_callback (struct lwp_info *lp, void *data)
2975
{
2976
  return lp->resumed;
2977
}
2978
 
2979
/* Stop an active thread, verify it still exists, then resume it.  */
2980
 
2981
static int
2982
stop_and_resume_callback (struct lwp_info *lp, void *data)
2983
{
2984
  struct lwp_info *ptr;
2985
 
2986
  if (!lp->stopped && !lp->signalled)
2987
    {
2988
      stop_callback (lp, NULL);
2989
      stop_wait_callback (lp, NULL);
2990
      /* Resume if the lwp still exists.  */
2991
      for (ptr = lwp_list; ptr; ptr = ptr->next)
2992
        if (lp == ptr)
2993
          {
2994
            resume_callback (lp, NULL);
2995
            resume_set_callback (lp, NULL);
2996
          }
2997
    }
2998
  return 0;
2999
}
3000
 
3001
/* Check if we should go on and pass this event to common code.
3002
   Return the affected lwp if we are, or NULL otherwise.  */
3003
static struct lwp_info *
3004
linux_nat_filter_event (int lwpid, int status, int options)
3005
{
3006
  struct lwp_info *lp;
3007
 
3008
  lp = find_lwp_pid (pid_to_ptid (lwpid));
3009
 
3010
  /* Check for stop events reported by a process we didn't already
3011
     know about - anything not already in our LWP list.
3012
 
3013
     If we're expecting to receive stopped processes after
3014
     fork, vfork, and clone events, then we'll just add the
3015
     new one to our list and go back to waiting for the event
3016
     to be reported - the stopped process might be returned
3017
     from waitpid before or after the event is.  */
3018
  if (WIFSTOPPED (status) && !lp)
3019
    {
3020
      linux_record_stopped_pid (lwpid, status);
3021
      return NULL;
3022
    }
3023
 
3024
  /* Make sure we don't report an event for the exit of an LWP not in
3025
     our list, i.e.  not part of the current process.  This can happen
3026
     if we detach from a program we original forked and then it
3027
     exits.  */
3028
  if (!WIFSTOPPED (status) && !lp)
3029
    return NULL;
3030
 
3031
  /* NOTE drow/2003-06-17: This code seems to be meant for debugging
3032
     CLONE_PTRACE processes which do not use the thread library -
3033
     otherwise we wouldn't find the new LWP this way.  That doesn't
3034
     currently work, and the following code is currently unreachable
3035
     due to the two blocks above.  If it's fixed some day, this code
3036
     should be broken out into a function so that we can also pick up
3037
     LWPs from the new interface.  */
3038
  if (!lp)
3039
    {
3040
      lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
3041
      if (options & __WCLONE)
3042
        lp->cloned = 1;
3043
 
3044
      gdb_assert (WIFSTOPPED (status)
3045
                  && WSTOPSIG (status) == SIGSTOP);
3046
      lp->signalled = 1;
3047
 
3048
      if (!in_thread_list (inferior_ptid))
3049
        {
3050
          inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
3051
                                     GET_PID (inferior_ptid));
3052
          add_thread (inferior_ptid);
3053
        }
3054
 
3055
      add_thread (lp->ptid);
3056
    }
3057
 
3058
  /* Handle GNU/Linux's syscall SIGTRAPs.  */
3059
  if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3060
    {
3061
      /* No longer need the sysgood bit.  The ptrace event ends up
3062
         recorded in lp->waitstatus if we care for it.  We can carry
3063
         on handling the event like a regular SIGTRAP from here
3064
         on.  */
3065
      status = W_STOPCODE (SIGTRAP);
3066
      if (linux_handle_syscall_trap (lp, 0))
3067
        return NULL;
3068
    }
3069
 
3070
  /* Handle GNU/Linux's extended waitstatus for trace events.  */
3071
  if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3072
    {
3073
      if (debug_linux_nat)
3074
        fprintf_unfiltered (gdb_stdlog,
3075
                            "LLW: Handling extended status 0x%06x\n",
3076
                            status);
3077
      if (linux_handle_extended_wait (lp, status, 0))
3078
        return NULL;
3079
    }
3080
 
3081
  if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
3082
    {
3083
      /* Save the trap's siginfo in case we need it later.  */
3084
      save_siginfo (lp);
3085
 
3086
      save_sigtrap (lp);
3087
    }
3088
 
3089
  /* Check if the thread has exited.  */
3090
  if ((WIFEXITED (status) || WIFSIGNALED (status))
3091
      && num_lwps (GET_PID (lp->ptid)) > 1)
3092
    {
3093
      /* If this is the main thread, we must stop all threads and verify
3094
         if they are still alive.  This is because in the nptl thread model
3095
         on Linux 2.4, there is no signal issued for exiting LWPs
3096
         other than the main thread.  We only get the main thread exit
3097
         signal once all child threads have already exited.  If we
3098
         stop all the threads and use the stop_wait_callback to check
3099
         if they have exited we can determine whether this signal
3100
         should be ignored or whether it means the end of the debugged
3101
         application, regardless of which threading model is being
3102
         used.  */
3103
      if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3104
        {
3105
          lp->stopped = 1;
3106
          iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3107
                             stop_and_resume_callback, NULL);
3108
        }
3109
 
3110
      if (debug_linux_nat)
3111
        fprintf_unfiltered (gdb_stdlog,
3112
                            "LLW: %s exited.\n",
3113
                            target_pid_to_str (lp->ptid));
3114
 
3115
      if (num_lwps (GET_PID (lp->ptid)) > 1)
3116
       {
3117
         /* If there is at least one more LWP, then the exit signal
3118
            was not the end of the debugged application and should be
3119
            ignored.  */
3120
         exit_lwp (lp);
3121
         return NULL;
3122
       }
3123
    }
3124
 
3125
  /* Check if the current LWP has previously exited.  In the nptl
3126
     thread model, LWPs other than the main thread do not issue
3127
     signals when they exit so we must check whenever the thread has
3128
     stopped.  A similar check is made in stop_wait_callback().  */
3129
  if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3130
    {
3131
      ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3132
 
3133
      if (debug_linux_nat)
3134
        fprintf_unfiltered (gdb_stdlog,
3135
                            "LLW: %s exited.\n",
3136
                            target_pid_to_str (lp->ptid));
3137
 
3138
      exit_lwp (lp);
3139
 
3140
      /* Make sure there is at least one thread running.  */
3141
      gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3142
 
3143
      /* Discard the event.  */
3144
      return NULL;
3145
    }
3146
 
3147
  /* Make sure we don't report a SIGSTOP that we sent ourselves in
3148
     an attempt to stop an LWP.  */
3149
  if (lp->signalled
3150
      && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3151
    {
3152
      if (debug_linux_nat)
3153
        fprintf_unfiltered (gdb_stdlog,
3154
                            "LLW: Delayed SIGSTOP caught for %s.\n",
3155
                            target_pid_to_str (lp->ptid));
3156
 
3157
      /* This is a delayed SIGSTOP.  */
3158
      lp->signalled = 0;
3159
 
3160
      registers_changed ();
3161
 
3162
      linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3163
                            lp->step, TARGET_SIGNAL_0);
3164
      if (debug_linux_nat)
3165
        fprintf_unfiltered (gdb_stdlog,
3166
                            "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3167
                            lp->step ?
3168
                            "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3169
                            target_pid_to_str (lp->ptid));
3170
 
3171
      lp->stopped = 0;
3172
      gdb_assert (lp->resumed);
3173
 
3174
      /* Discard the event.  */
3175
      return NULL;
3176
    }
3177
 
3178
  /* Make sure we don't report a SIGINT that we have already displayed
3179
     for another thread.  */
3180
  if (lp->ignore_sigint
3181
      && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3182
    {
3183
      if (debug_linux_nat)
3184
        fprintf_unfiltered (gdb_stdlog,
3185
                            "LLW: Delayed SIGINT caught for %s.\n",
3186
                            target_pid_to_str (lp->ptid));
3187
 
3188
      /* This is a delayed SIGINT.  */
3189
      lp->ignore_sigint = 0;
3190
 
3191
      registers_changed ();
3192
      linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3193
                            lp->step, TARGET_SIGNAL_0);
3194
      if (debug_linux_nat)
3195
        fprintf_unfiltered (gdb_stdlog,
3196
                            "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3197
                            lp->step ?
3198
                            "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3199
                            target_pid_to_str (lp->ptid));
3200
 
3201
      lp->stopped = 0;
3202
      gdb_assert (lp->resumed);
3203
 
3204
      /* Discard the event.  */
3205
      return NULL;
3206
    }
3207
 
3208
  /* An interesting event.  */
3209
  gdb_assert (lp);
3210
  lp->status = status;
3211
  return lp;
3212
}
3213
 
3214
static ptid_t
3215
linux_nat_wait_1 (struct target_ops *ops,
3216
                  ptid_t ptid, struct target_waitstatus *ourstatus,
3217
                  int target_options)
3218
{
3219
  static sigset_t prev_mask;
3220
  struct lwp_info *lp = NULL;
3221
  int options = 0;
3222
  int status = 0;
3223
  pid_t pid;
3224
 
3225
  if (debug_linux_nat_async)
3226
    fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3227
 
3228
  /* The first time we get here after starting a new inferior, we may
3229
     not have added it to the LWP list yet - this is the earliest
3230
     moment at which we know its PID.  */
3231
  if (ptid_is_pid (inferior_ptid))
3232
    {
3233
      /* Upgrade the main thread's ptid.  */
3234
      thread_change_ptid (inferior_ptid,
3235
                          BUILD_LWP (GET_PID (inferior_ptid),
3236
                                     GET_PID (inferior_ptid)));
3237
 
3238
      lp = add_lwp (inferior_ptid);
3239
      lp->resumed = 1;
3240
    }
3241
 
3242
  /* Make sure SIGCHLD is blocked.  */
3243
  block_child_signals (&prev_mask);
3244
 
3245
  if (ptid_equal (ptid, minus_one_ptid))
3246
    pid = -1;
3247
  else if (ptid_is_pid (ptid))
3248
    /* A request to wait for a specific tgid.  This is not possible
3249
       with waitpid, so instead, we wait for any child, and leave
3250
       children we're not interested in right now with a pending
3251
       status to report later.  */
3252
    pid = -1;
3253
  else
3254
    pid = GET_LWP (ptid);
3255
 
3256
retry:
3257
  lp = NULL;
3258
  status = 0;
3259
 
3260
  /* Make sure that of those LWPs we want to get an event from, there
3261
     is at least one LWP that has been resumed.  If there's none, just
3262
     bail out.  The core may just be flushing asynchronously all
3263
     events.  */
3264
  if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3265
    {
3266
      ourstatus->kind = TARGET_WAITKIND_IGNORE;
3267
 
3268
      if (debug_linux_nat_async)
3269
        fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3270
 
3271
      restore_child_signals_mask (&prev_mask);
3272
      return minus_one_ptid;
3273
    }
3274
 
3275
  /* First check if there is a LWP with a wait status pending.  */
3276
  if (pid == -1)
3277
    {
3278
      /* Any LWP that's been resumed will do.  */
3279
      lp = iterate_over_lwps (ptid, status_callback, NULL);
3280
      if (lp)
3281
        {
3282
          if (debug_linux_nat && lp->status)
3283
            fprintf_unfiltered (gdb_stdlog,
3284
                                "LLW: Using pending wait status %s for %s.\n",
3285
                                status_to_str (lp->status),
3286
                                target_pid_to_str (lp->ptid));
3287
        }
3288
 
3289
      /* But if we don't find one, we'll have to wait, and check both
3290
         cloned and uncloned processes.  We start with the cloned
3291
         processes.  */
3292
      options = __WCLONE | WNOHANG;
3293
    }
3294
  else if (is_lwp (ptid))
3295
    {
3296
      if (debug_linux_nat)
3297
        fprintf_unfiltered (gdb_stdlog,
3298
                            "LLW: Waiting for specific LWP %s.\n",
3299
                            target_pid_to_str (ptid));
3300
 
3301
      /* We have a specific LWP to check.  */
3302
      lp = find_lwp_pid (ptid);
3303
      gdb_assert (lp);
3304
 
3305
      if (debug_linux_nat && lp->status)
3306
        fprintf_unfiltered (gdb_stdlog,
3307
                            "LLW: Using pending wait status %s for %s.\n",
3308
                            status_to_str (lp->status),
3309
                            target_pid_to_str (lp->ptid));
3310
 
3311
      /* If we have to wait, take into account whether PID is a cloned
3312
         process or not.  And we have to convert it to something that
3313
         the layer beneath us can understand.  */
3314
      options = lp->cloned ? __WCLONE : 0;
3315
      pid = GET_LWP (ptid);
3316
 
3317
      /* We check for lp->waitstatus in addition to lp->status,
3318
         because we can have pending process exits recorded in
3319
         lp->status and W_EXITCODE(0,0) == 0.  We should probably have
3320
         an additional lp->status_p flag.  */
3321
      if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3322
        lp = NULL;
3323
    }
3324
 
3325
  if (lp && lp->signalled)
3326
    {
3327
      /* A pending SIGSTOP may interfere with the normal stream of
3328
         events.  In a typical case where interference is a problem,
3329
         we have a SIGSTOP signal pending for LWP A while
3330
         single-stepping it, encounter an event in LWP B, and take the
3331
         pending SIGSTOP while trying to stop LWP A.  After processing
3332
         the event in LWP B, LWP A is continued, and we'll never see
3333
         the SIGTRAP associated with the last time we were
3334
         single-stepping LWP A.  */
3335
 
3336
      /* Resume the thread.  It should halt immediately returning the
3337
         pending SIGSTOP.  */
3338
      registers_changed ();
3339
      linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3340
                            lp->step, TARGET_SIGNAL_0);
3341
      if (debug_linux_nat)
3342
        fprintf_unfiltered (gdb_stdlog,
3343
                            "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3344
                            lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3345
                            target_pid_to_str (lp->ptid));
3346
      lp->stopped = 0;
3347
      gdb_assert (lp->resumed);
3348
 
3349
      /* Catch the pending SIGSTOP.  */
3350
      status = lp->status;
3351
      lp->status = 0;
3352
 
3353
      stop_wait_callback (lp, NULL);
3354
 
3355
      /* If the lp->status field isn't empty, we caught another signal
3356
         while flushing the SIGSTOP.  Return it back to the event
3357
         queue of the LWP, as we already have an event to handle.  */
3358
      if (lp->status)
3359
        {
3360
          if (debug_linux_nat)
3361
            fprintf_unfiltered (gdb_stdlog,
3362
                                "LLW: kill %s, %s\n",
3363
                                target_pid_to_str (lp->ptid),
3364
                                status_to_str (lp->status));
3365
          kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3366
        }
3367
 
3368
      lp->status = status;
3369
    }
3370
 
3371
  if (!target_can_async_p ())
3372
    {
3373
      /* Causes SIGINT to be passed on to the attached process.  */
3374
      set_sigint_trap ();
3375
    }
3376
 
3377
  /* Translate generic target_wait options into waitpid options.  */
3378
  if (target_options & TARGET_WNOHANG)
3379
    options |= WNOHANG;
3380
 
3381
  while (lp == NULL)
3382
    {
3383
      pid_t lwpid;
3384
 
3385
      lwpid = my_waitpid (pid, &status, options);
3386
 
3387
      if (lwpid > 0)
3388
        {
3389
          gdb_assert (pid == -1 || lwpid == pid);
3390
 
3391
          if (debug_linux_nat)
3392
            {
3393
              fprintf_unfiltered (gdb_stdlog,
3394
                                  "LLW: waitpid %ld received %s\n",
3395
                                  (long) lwpid, status_to_str (status));
3396
            }
3397
 
3398
          lp = linux_nat_filter_event (lwpid, status, options);
3399
 
3400
          if (lp
3401
              && ptid_is_pid (ptid)
3402
              && ptid_get_pid (lp->ptid) != ptid_get_pid (ptid))
3403
            {
3404
              gdb_assert (lp->resumed);
3405
 
3406
              if (debug_linux_nat)
3407
                fprintf (stderr, "LWP %ld got an event %06x, leaving pending.\n",
3408
                         ptid_get_lwp (lp->ptid), status);
3409
 
3410
              if (WIFSTOPPED (lp->status))
3411
                {
3412
                  if (WSTOPSIG (lp->status) != SIGSTOP)
3413
                    {
3414
                      /* Cancel breakpoint hits.  The breakpoint may
3415
                         be removed before we fetch events from this
3416
                         process to report to the core.  It is best
3417
                         not to assume the moribund breakpoints
3418
                         heuristic always handles these cases --- it
3419
                         could be too many events go through to the
3420
                         core before this one is handled.  All-stop
3421
                         always cancels breakpoint hits in all
3422
                         threads.  */
3423
                      if (non_stop
3424
                          && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
3425
                          && WSTOPSIG (lp->status) == SIGTRAP
3426
                          && cancel_breakpoint (lp))
3427
                        {
3428
                          /* Throw away the SIGTRAP.  */
3429
                          lp->status = 0;
3430
 
3431
                          if (debug_linux_nat)
3432
                            fprintf (stderr,
3433
                                     "LLW: LWP %ld hit a breakpoint while waiting "
3434
                                     "for another process; cancelled it\n",
3435
                                     ptid_get_lwp (lp->ptid));
3436
                        }
3437
                      lp->stopped = 1;
3438
                    }
3439
                  else
3440
                    {
3441
                      lp->stopped = 1;
3442
                      lp->signalled = 0;
3443
                    }
3444
                }
3445
              else if (WIFEXITED (status) || WIFSIGNALED (status))
3446
                {
3447
                  if (debug_linux_nat)
3448
                    fprintf (stderr, "Process %ld exited while stopping LWPs\n",
3449
                             ptid_get_lwp (lp->ptid));
3450
 
3451
                  /* This was the last lwp in the process.  Since
3452
                     events are serialized to GDB core, and we can't
3453
                     report this one right now, but GDB core and the
3454
                     other target layers will want to be notified
3455
                     about the exit code/signal, leave the status
3456
                     pending for the next time we're able to report
3457
                     it.  */
3458
 
3459
                  /* Prevent trying to stop this thread again.  We'll
3460
                     never try to resume it because it has a pending
3461
                     status.  */
3462
                  lp->stopped = 1;
3463
 
3464
                  /* Dead LWP's aren't expected to reported a pending
3465
                     sigstop.  */
3466
                  lp->signalled = 0;
3467
 
3468
                  /* Store the pending event in the waitstatus as
3469
                     well, because W_EXITCODE(0,0) == 0.  */
3470
                  store_waitstatus (&lp->waitstatus, lp->status);
3471
                }
3472
 
3473
              /* Keep looking.  */
3474
              lp = NULL;
3475
              continue;
3476
            }
3477
 
3478
          if (lp)
3479
            break;
3480
          else
3481
            {
3482
              if (pid == -1)
3483
                {
3484
                  /* waitpid did return something.  Restart over.  */
3485
                  options |= __WCLONE;
3486
                }
3487
              continue;
3488
            }
3489
        }
3490
 
3491
      if (pid == -1)
3492
        {
3493
          /* Alternate between checking cloned and uncloned processes.  */
3494
          options ^= __WCLONE;
3495
 
3496
          /* And every time we have checked both:
3497
             In async mode, return to event loop;
3498
             In sync mode, suspend waiting for a SIGCHLD signal.  */
3499
          if (options & __WCLONE)
3500
            {
3501
              if (target_options & TARGET_WNOHANG)
3502
                {
3503
                  /* No interesting event.  */
3504
                  ourstatus->kind = TARGET_WAITKIND_IGNORE;
3505
 
3506
                  if (debug_linux_nat_async)
3507
                    fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3508
 
3509
                  restore_child_signals_mask (&prev_mask);
3510
                  return minus_one_ptid;
3511
                }
3512
 
3513
              sigsuspend (&suspend_mask);
3514
            }
3515
        }
3516
      else if (target_options & TARGET_WNOHANG)
3517
        {
3518
          /* No interesting event for PID yet.  */
3519
          ourstatus->kind = TARGET_WAITKIND_IGNORE;
3520
 
3521
          if (debug_linux_nat_async)
3522
            fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3523
 
3524
          restore_child_signals_mask (&prev_mask);
3525
          return minus_one_ptid;
3526
        }
3527
 
3528
      /* We shouldn't end up here unless we want to try again.  */
3529
      gdb_assert (lp == NULL);
3530
    }
3531
 
3532
  if (!target_can_async_p ())
3533
    clear_sigint_trap ();
3534
 
3535
  gdb_assert (lp);
3536
 
3537
  status = lp->status;
3538
  lp->status = 0;
3539
 
3540
  /* Don't report signals that GDB isn't interested in, such as
3541
     signals that are neither printed nor stopped upon.  Stopping all
3542
     threads can be a bit time-consuming so if we want decent
3543
     performance with heavily multi-threaded programs, especially when
3544
     they're using a high frequency timer, we'd better avoid it if we
3545
     can.  */
3546
 
3547
  if (WIFSTOPPED (status))
3548
    {
3549
      int signo = target_signal_from_host (WSTOPSIG (status));
3550
      struct inferior *inf;
3551
 
3552
      inf = find_inferior_pid (ptid_get_pid (lp->ptid));
3553
      gdb_assert (inf);
3554
 
3555
      /* Defer to common code if we get a signal while
3556
         single-stepping, since that may need special care, e.g. to
3557
         skip the signal handler, or, if we're gaining control of the
3558
         inferior.  */
3559
      if (!lp->step
3560
          && inf->stop_soon == NO_STOP_QUIETLY
3561
          && signal_stop_state (signo) == 0
3562
          && signal_print_state (signo) == 0
3563
          && signal_pass_state (signo) == 1)
3564
        {
3565
          /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3566
             here?  It is not clear we should.  GDB may not expect
3567
             other threads to run.  On the other hand, not resuming
3568
             newly attached threads may cause an unwanted delay in
3569
             getting them running.  */
3570
          registers_changed ();
3571
          linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3572
                                lp->step, signo);
3573
          if (debug_linux_nat)
3574
            fprintf_unfiltered (gdb_stdlog,
3575
                                "LLW: %s %s, %s (preempt 'handle')\n",
3576
                                lp->step ?
3577
                                "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3578
                                target_pid_to_str (lp->ptid),
3579
                                signo ? strsignal (signo) : "0");
3580
          lp->stopped = 0;
3581
          goto retry;
3582
        }
3583
 
3584
      if (!non_stop)
3585
        {
3586
          /* Only do the below in all-stop, as we currently use SIGINT
3587
             to implement target_stop (see linux_nat_stop) in
3588
             non-stop.  */
3589
          if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3590
            {
3591
              /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3592
                 forwarded to the entire process group, that is, all LWPs
3593
                 will receive it - unless they're using CLONE_THREAD to
3594
                 share signals.  Since we only want to report it once, we
3595
                 mark it as ignored for all LWPs except this one.  */
3596
              iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3597
                                              set_ignore_sigint, NULL);
3598
              lp->ignore_sigint = 0;
3599
            }
3600
          else
3601
            maybe_clear_ignore_sigint (lp);
3602
        }
3603
    }
3604
 
3605
  /* This LWP is stopped now.  */
3606
  lp->stopped = 1;
3607
 
3608
  if (debug_linux_nat)
3609
    fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3610
                        status_to_str (status), target_pid_to_str (lp->ptid));
3611
 
3612
  if (!non_stop)
3613
    {
3614
      /* Now stop all other LWP's ...  */
3615
      iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3616
 
3617
      /* ... and wait until all of them have reported back that
3618
         they're no longer running.  */
3619
      iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3620
 
3621
      /* If we're not waiting for a specific LWP, choose an event LWP
3622
         from among those that have had events.  Giving equal priority
3623
         to all LWPs that have had events helps prevent
3624
         starvation.  */
3625
      if (pid == -1)
3626
        select_event_lwp (ptid, &lp, &status);
3627
 
3628
      /* Now that we've selected our final event LWP, cancel any
3629
         breakpoints in other LWPs that have hit a GDB breakpoint.
3630
         See the comment in cancel_breakpoints_callback to find out
3631
         why.  */
3632
      iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3633
 
3634
      /* In all-stop, from the core's perspective, all LWPs are now
3635
         stopped until a new resume action is sent over.  */
3636
      iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3637
    }
3638
  else
3639
    lp->resumed = 0;
3640
 
3641
  if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
3642
    {
3643
      if (debug_linux_nat)
3644
        fprintf_unfiltered (gdb_stdlog,
3645
                            "LLW: trap ptid is %s.\n",
3646
                            target_pid_to_str (lp->ptid));
3647
    }
3648
 
3649
  if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3650
    {
3651
      *ourstatus = lp->waitstatus;
3652
      lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3653
    }
3654
  else
3655
    store_waitstatus (ourstatus, status);
3656
 
3657
  if (debug_linux_nat_async)
3658
    fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3659
 
3660
  restore_child_signals_mask (&prev_mask);
3661
  lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3662
  return lp->ptid;
3663
}
3664
 
3665
/* Resume LWPs that are currently stopped without any pending status
3666
   to report, but are resumed from the core's perspective.  */
3667
 
3668
static int
3669
resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3670
{
3671
  ptid_t *wait_ptid_p = data;
3672
 
3673
  if (lp->stopped
3674
      && lp->resumed
3675
      && lp->status == 0
3676
      && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3677
    {
3678
      gdb_assert (is_executing (lp->ptid));
3679
 
3680
      /* Don't bother if there's a breakpoint at PC that we'd hit
3681
         immediately, and we're not waiting for this LWP.  */
3682
      if (!ptid_match (lp->ptid, *wait_ptid_p))
3683
        {
3684
          struct regcache *regcache = get_thread_regcache (lp->ptid);
3685
          CORE_ADDR pc = regcache_read_pc (regcache);
3686
 
3687
          if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3688
            return 0;
3689
        }
3690
 
3691
      if (debug_linux_nat)
3692
        fprintf_unfiltered (gdb_stdlog,
3693
                            "RSRL: resuming stopped-resumed LWP %s\n",
3694
                            target_pid_to_str (lp->ptid));
3695
 
3696
      linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3697
                            lp->step, TARGET_SIGNAL_0);
3698
      lp->stopped = 0;
3699
      memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3700
      lp->stopped_by_watchpoint = 0;
3701
    }
3702
 
3703
  return 0;
3704
}
3705
 
3706
static ptid_t
3707
linux_nat_wait (struct target_ops *ops,
3708
                ptid_t ptid, struct target_waitstatus *ourstatus,
3709
                int target_options)
3710
{
3711
  ptid_t event_ptid;
3712
 
3713
  if (debug_linux_nat)
3714
    fprintf_unfiltered (gdb_stdlog, "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
3715
 
3716
  /* Flush the async file first.  */
3717
  if (target_can_async_p ())
3718
    async_file_flush ();
3719
 
3720
  /* Resume LWPs that are currently stopped without any pending status
3721
     to report, but are resumed from the core's perspective.  LWPs get
3722
     in this state if we find them stopping at a time we're not
3723
     interested in reporting the event (target_wait on a
3724
     specific_process, for example, see linux_nat_wait_1), and
3725
     meanwhile the event became uninteresting.  Don't bother resuming
3726
     LWPs we're not going to wait for if they'd stop immediately.  */
3727
  if (non_stop)
3728
    iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3729
 
3730
  event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3731
 
3732
  /* If we requested any event, and something came out, assume there
3733
     may be more.  If we requested a specific lwp or process, also
3734
     assume there may be more.  */
3735
  if (target_can_async_p ()
3736
      && (ourstatus->kind != TARGET_WAITKIND_IGNORE
3737
          || !ptid_equal (ptid, minus_one_ptid)))
3738
    async_file_mark ();
3739
 
3740
  /* Get ready for the next event.  */
3741
  if (target_can_async_p ())
3742
    target_async (inferior_event_handler, 0);
3743
 
3744
  return event_ptid;
3745
}
3746
 
3747
static int
3748
kill_callback (struct lwp_info *lp, void *data)
3749
{
3750
  errno = 0;
3751
  ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3752
  if (debug_linux_nat)
3753
    fprintf_unfiltered (gdb_stdlog,
3754
                        "KC:  PTRACE_KILL %s, 0, 0 (%s)\n",
3755
                        target_pid_to_str (lp->ptid),
3756
                        errno ? safe_strerror (errno) : "OK");
3757
 
3758
  return 0;
3759
}
3760
 
3761
static int
3762
kill_wait_callback (struct lwp_info *lp, void *data)
3763
{
3764
  pid_t pid;
3765
 
3766
  /* We must make sure that there are no pending events (delayed
3767
     SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3768
     program doesn't interfere with any following debugging session.  */
3769
 
3770
  /* For cloned processes we must check both with __WCLONE and
3771
     without, since the exit status of a cloned process isn't reported
3772
     with __WCLONE.  */
3773
  if (lp->cloned)
3774
    {
3775
      do
3776
        {
3777
          pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
3778
          if (pid != (pid_t) -1)
3779
            {
3780
              if (debug_linux_nat)
3781
                fprintf_unfiltered (gdb_stdlog,
3782
                                    "KWC: wait %s received unknown.\n",
3783
                                    target_pid_to_str (lp->ptid));
3784
              /* The Linux kernel sometimes fails to kill a thread
3785
                 completely after PTRACE_KILL; that goes from the stop
3786
                 point in do_fork out to the one in
3787
                 get_signal_to_deliever and waits again.  So kill it
3788
                 again.  */
3789
              kill_callback (lp, NULL);
3790
            }
3791
        }
3792
      while (pid == GET_LWP (lp->ptid));
3793
 
3794
      gdb_assert (pid == -1 && errno == ECHILD);
3795
    }
3796
 
3797
  do
3798
    {
3799
      pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
3800
      if (pid != (pid_t) -1)
3801
        {
3802
          if (debug_linux_nat)
3803
            fprintf_unfiltered (gdb_stdlog,
3804
                                "KWC: wait %s received unk.\n",
3805
                                target_pid_to_str (lp->ptid));
3806
          /* See the call to kill_callback above.  */
3807
          kill_callback (lp, NULL);
3808
        }
3809
    }
3810
  while (pid == GET_LWP (lp->ptid));
3811
 
3812
  gdb_assert (pid == -1 && errno == ECHILD);
3813
  return 0;
3814
}
3815
 
3816
static void
3817
linux_nat_kill (struct target_ops *ops)
3818
{
3819
  struct target_waitstatus last;
3820
  ptid_t last_ptid;
3821
  int status;
3822
 
3823
  /* If we're stopped while forking and we haven't followed yet,
3824
     kill the other task.  We need to do this first because the
3825
     parent will be sleeping if this is a vfork.  */
3826
 
3827
  get_last_target_status (&last_ptid, &last);
3828
 
3829
  if (last.kind == TARGET_WAITKIND_FORKED
3830
      || last.kind == TARGET_WAITKIND_VFORKED)
3831
    {
3832
      ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
3833
      wait (&status);
3834
    }
3835
 
3836
  if (forks_exist_p ())
3837
    linux_fork_killall ();
3838
  else
3839
    {
3840
      ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
3841
      /* Stop all threads before killing them, since ptrace requires
3842
         that the thread is stopped to sucessfully PTRACE_KILL.  */
3843
      iterate_over_lwps (ptid, stop_callback, NULL);
3844
      /* ... and wait until all of them have reported back that
3845
         they're no longer running.  */
3846
      iterate_over_lwps (ptid, stop_wait_callback, NULL);
3847
 
3848
      /* Kill all LWP's ...  */
3849
      iterate_over_lwps (ptid, kill_callback, NULL);
3850
 
3851
      /* ... and wait until we've flushed all events.  */
3852
      iterate_over_lwps (ptid, kill_wait_callback, NULL);
3853
    }
3854
 
3855
  target_mourn_inferior ();
3856
}
3857
 
3858
static void
3859
linux_nat_mourn_inferior (struct target_ops *ops)
3860
{
3861
  purge_lwp_list (ptid_get_pid (inferior_ptid));
3862
 
3863
  if (! forks_exist_p ())
3864
    /* Normal case, no other forks available.  */
3865
    linux_ops->to_mourn_inferior (ops);
3866
  else
3867
    /* Multi-fork case.  The current inferior_ptid has exited, but
3868
       there are other viable forks to debug.  Delete the exiting
3869
       one and context-switch to the first available.  */
3870
    linux_fork_mourn_inferior ();
3871
}
3872
 
3873
/* Convert a native/host siginfo object, into/from the siginfo in the
3874
   layout of the inferiors' architecture.  */
3875
 
3876
static void
3877
siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
3878
{
3879
  int done = 0;
3880
 
3881
  if (linux_nat_siginfo_fixup != NULL)
3882
    done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3883
 
3884
  /* If there was no callback, or the callback didn't do anything,
3885
     then just do a straight memcpy.  */
3886
  if (!done)
3887
    {
3888
      if (direction == 1)
3889
        memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3890
      else
3891
        memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3892
    }
3893
}
3894
 
3895
static LONGEST
3896
linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3897
                    const char *annex, gdb_byte *readbuf,
3898
                    const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3899
{
3900
  int pid;
3901
  struct siginfo siginfo;
3902
  gdb_byte inf_siginfo[sizeof (struct siginfo)];
3903
 
3904
  gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3905
  gdb_assert (readbuf || writebuf);
3906
 
3907
  pid = GET_LWP (inferior_ptid);
3908
  if (pid == 0)
3909
    pid = GET_PID (inferior_ptid);
3910
 
3911
  if (offset > sizeof (siginfo))
3912
    return -1;
3913
 
3914
  errno = 0;
3915
  ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3916
  if (errno != 0)
3917
    return -1;
3918
 
3919
  /* When GDB is built as a 64-bit application, ptrace writes into
3920
     SIGINFO an object with 64-bit layout.  Since debugging a 32-bit
3921
     inferior with a 64-bit GDB should look the same as debugging it
3922
     with a 32-bit GDB, we need to convert it.  GDB core always sees
3923
     the converted layout, so any read/write will have to be done
3924
     post-conversion.  */
3925
  siginfo_fixup (&siginfo, inf_siginfo, 0);
3926
 
3927
  if (offset + len > sizeof (siginfo))
3928
    len = sizeof (siginfo) - offset;
3929
 
3930
  if (readbuf != NULL)
3931
    memcpy (readbuf, inf_siginfo + offset, len);
3932
  else
3933
    {
3934
      memcpy (inf_siginfo + offset, writebuf, len);
3935
 
3936
      /* Convert back to ptrace layout before flushing it out.  */
3937
      siginfo_fixup (&siginfo, inf_siginfo, 1);
3938
 
3939
      errno = 0;
3940
      ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3941
      if (errno != 0)
3942
        return -1;
3943
    }
3944
 
3945
  return len;
3946
}
3947
 
3948
static LONGEST
3949
linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3950
                        const char *annex, gdb_byte *readbuf,
3951
                        const gdb_byte *writebuf,
3952
                        ULONGEST offset, LONGEST len)
3953
{
3954
  struct cleanup *old_chain;
3955
  LONGEST xfer;
3956
 
3957
  if (object == TARGET_OBJECT_SIGNAL_INFO)
3958
    return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
3959
                               offset, len);
3960
 
3961
  /* The target is connected but no live inferior is selected.  Pass
3962
     this request down to a lower stratum (e.g., the executable
3963
     file).  */
3964
  if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
3965
    return 0;
3966
 
3967
  old_chain = save_inferior_ptid ();
3968
 
3969
  if (is_lwp (inferior_ptid))
3970
    inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
3971
 
3972
  xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3973
                                     offset, len);
3974
 
3975
  do_cleanups (old_chain);
3976
  return xfer;
3977
}
3978
 
3979
static int
3980
linux_thread_alive (ptid_t ptid)
3981
{
3982
  int err;
3983
 
3984
  gdb_assert (is_lwp (ptid));
3985
 
3986
  /* Send signal 0 instead of anything ptrace, because ptracing a
3987
     running thread errors out claiming that the thread doesn't
3988
     exist.  */
3989
  err = kill_lwp (GET_LWP (ptid), 0);
3990
 
3991
  if (debug_linux_nat)
3992
    fprintf_unfiltered (gdb_stdlog,
3993
                        "LLTA: KILL(SIG0) %s (%s)\n",
3994
                        target_pid_to_str (ptid),
3995
                        err ? safe_strerror (err) : "OK");
3996
 
3997
  if (err != 0)
3998
    return 0;
3999
 
4000
  return 1;
4001
}
4002
 
4003
static int
4004
linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4005
{
4006
  return linux_thread_alive (ptid);
4007
}
4008
 
4009
static char *
4010
linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4011
{
4012
  static char buf[64];
4013
 
4014
  if (is_lwp (ptid)
4015
      && (GET_PID (ptid) != GET_LWP (ptid)
4016
          || num_lwps (GET_PID (ptid)) > 1))
4017
    {
4018
      snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4019
      return buf;
4020
    }
4021
 
4022
  return normal_pid_to_str (ptid);
4023
}
4024
 
4025
/* Accepts an integer PID; Returns a string representing a file that
4026
   can be opened to get the symbols for the child process.  */
4027
 
4028
static char *
4029
linux_child_pid_to_exec_file (int pid)
4030
{
4031
  char *name1, *name2;
4032
 
4033
  name1 = xmalloc (MAXPATHLEN);
4034
  name2 = xmalloc (MAXPATHLEN);
4035
  make_cleanup (xfree, name1);
4036
  make_cleanup (xfree, name2);
4037
  memset (name2, 0, MAXPATHLEN);
4038
 
4039
  sprintf (name1, "/proc/%d/exe", pid);
4040
  if (readlink (name1, name2, MAXPATHLEN) > 0)
4041
    return name2;
4042
  else
4043
    return name1;
4044
}
4045
 
4046
/* Service function for corefiles and info proc.  */
4047
 
4048
static int
4049
read_mapping (FILE *mapfile,
4050
              long long *addr,
4051
              long long *endaddr,
4052
              char *permissions,
4053
              long long *offset,
4054
              char *device, long long *inode, char *filename)
4055
{
4056
  int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4057
                    addr, endaddr, permissions, offset, device, inode);
4058
 
4059
  filename[0] = '\0';
4060
  if (ret > 0 && ret != EOF)
4061
    {
4062
      /* Eat everything up to EOL for the filename.  This will prevent
4063
         weird filenames (such as one with embedded whitespace) from
4064
         confusing this code.  It also makes this code more robust in
4065
         respect to annotations the kernel may add after the filename.
4066
 
4067
         Note the filename is used for informational purposes
4068
         only.  */
4069
      ret += fscanf (mapfile, "%[^\n]\n", filename);
4070
    }
4071
 
4072
  return (ret != 0 && ret != EOF);
4073
}
4074
 
4075
/* Fills the "to_find_memory_regions" target vector.  Lists the memory
4076
   regions in the inferior for a corefile.  */
4077
 
4078
static int
4079
linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
4080
                                            unsigned long,
4081
                                            int, int, int, void *), void *obfd)
4082
{
4083
  int pid = PIDGET (inferior_ptid);
4084
  char mapsfilename[MAXPATHLEN];
4085
  FILE *mapsfile;
4086
  long long addr, endaddr, size, offset, inode;
4087
  char permissions[8], device[8], filename[MAXPATHLEN];
4088
  int read, write, exec;
4089
  int ret;
4090
  struct cleanup *cleanup;
4091
 
4092
  /* Compose the filename for the /proc memory map, and open it.  */
4093
  sprintf (mapsfilename, "/proc/%d/maps", pid);
4094
  if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
4095
    error (_("Could not open %s."), mapsfilename);
4096
  cleanup = make_cleanup_fclose (mapsfile);
4097
 
4098
  if (info_verbose)
4099
    fprintf_filtered (gdb_stdout,
4100
                      "Reading memory regions from %s\n", mapsfilename);
4101
 
4102
  /* Now iterate until end-of-file.  */
4103
  while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4104
                       &offset, &device[0], &inode, &filename[0]))
4105
    {
4106
      size = endaddr - addr;
4107
 
4108
      /* Get the segment's permissions.  */
4109
      read = (strchr (permissions, 'r') != 0);
4110
      write = (strchr (permissions, 'w') != 0);
4111
      exec = (strchr (permissions, 'x') != 0);
4112
 
4113
      if (info_verbose)
4114
        {
4115
          fprintf_filtered (gdb_stdout,
4116
                            "Save segment, %lld bytes at %s (%c%c%c)",
4117
                            size, paddress (target_gdbarch, addr),
4118
                            read ? 'r' : ' ',
4119
                            write ? 'w' : ' ', exec ? 'x' : ' ');
4120
          if (filename[0])
4121
            fprintf_filtered (gdb_stdout, " for %s", filename);
4122
          fprintf_filtered (gdb_stdout, "\n");
4123
        }
4124
 
4125
      /* Invoke the callback function to create the corefile
4126
         segment.  */
4127
      func (addr, size, read, write, exec, obfd);
4128
    }
4129
  do_cleanups (cleanup);
4130
  return 0;
4131
}
4132
 
4133
static int
4134
find_signalled_thread (struct thread_info *info, void *data)
4135
{
4136
  if (info->stop_signal != TARGET_SIGNAL_0
4137
      && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4138
    return 1;
4139
 
4140
  return 0;
4141
}
4142
 
4143
static enum target_signal
4144
find_stop_signal (void)
4145
{
4146
  struct thread_info *info =
4147
    iterate_over_threads (find_signalled_thread, NULL);
4148
 
4149
  if (info)
4150
    return info->stop_signal;
4151
  else
4152
    return TARGET_SIGNAL_0;
4153
}
4154
 
4155
/* Records the thread's register state for the corefile note
4156
   section.  */
4157
 
4158
static char *
4159
linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
4160
                               char *note_data, int *note_size,
4161
                               enum target_signal stop_signal)
4162
{
4163
  gdb_gregset_t gregs;
4164
  gdb_fpregset_t fpregs;
4165
  unsigned long lwp = ptid_get_lwp (ptid);
4166
  struct gdbarch *gdbarch = target_gdbarch;
4167
  struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4168
  const struct regset *regset;
4169
  int core_regset_p;
4170
  struct cleanup *old_chain;
4171
  struct core_regset_section *sect_list;
4172
  char *gdb_regset;
4173
 
4174
  old_chain = save_inferior_ptid ();
4175
  inferior_ptid = ptid;
4176
  target_fetch_registers (regcache, -1);
4177
  do_cleanups (old_chain);
4178
 
4179
  core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4180
  sect_list = gdbarch_core_regset_sections (gdbarch);
4181
 
4182
  if (core_regset_p
4183
      && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4184
                                                     sizeof (gregs))) != NULL
4185
      && regset->collect_regset != NULL)
4186
    regset->collect_regset (regset, regcache, -1,
4187
                            &gregs, sizeof (gregs));
4188
  else
4189
    fill_gregset (regcache, &gregs, -1);
4190
 
4191
  note_data = (char *) elfcore_write_prstatus (obfd,
4192
                                               note_data,
4193
                                               note_size,
4194
                                               lwp,
4195
                                               stop_signal, &gregs);
4196
 
4197
  /* The loop below uses the new struct core_regset_section, which stores
4198
     the supported section names and sizes for the core file.  Note that
4199
     note PRSTATUS needs to be treated specially.  But the other notes are
4200
     structurally the same, so they can benefit from the new struct.  */
4201
  if (core_regset_p && sect_list != NULL)
4202
    while (sect_list->sect_name != NULL)
4203
      {
4204
        /* .reg was already handled above.  */
4205
        if (strcmp (sect_list->sect_name, ".reg") == 0)
4206
          {
4207
            sect_list++;
4208
            continue;
4209
          }
4210
        regset = gdbarch_regset_from_core_section (gdbarch,
4211
                                                   sect_list->sect_name,
4212
                                                   sect_list->size);
4213
        gdb_assert (regset && regset->collect_regset);
4214
        gdb_regset = xmalloc (sect_list->size);
4215
        regset->collect_regset (regset, regcache, -1,
4216
                                gdb_regset, sect_list->size);
4217
        note_data = (char *) elfcore_write_register_note (obfd,
4218
                                                          note_data,
4219
                                                          note_size,
4220
                                                          sect_list->sect_name,
4221
                                                          gdb_regset,
4222
                                                          sect_list->size);
4223
        xfree (gdb_regset);
4224
        sect_list++;
4225
      }
4226
 
4227
  /* For architectures that does not have the struct core_regset_section
4228
     implemented, we use the old method.  When all the architectures have
4229
     the new support, the code below should be deleted.  */
4230
  else
4231
    {
4232
      if (core_regset_p
4233
          && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4234
                                                         sizeof (fpregs))) != NULL
4235
          && regset->collect_regset != NULL)
4236
        regset->collect_regset (regset, regcache, -1,
4237
                                &fpregs, sizeof (fpregs));
4238
      else
4239
        fill_fpregset (regcache, &fpregs, -1);
4240
 
4241
      note_data = (char *) elfcore_write_prfpreg (obfd,
4242
                                                  note_data,
4243
                                                  note_size,
4244
                                                  &fpregs, sizeof (fpregs));
4245
    }
4246
 
4247
  return note_data;
4248
}
4249
 
4250
struct linux_nat_corefile_thread_data
4251
{
4252
  bfd *obfd;
4253
  char *note_data;
4254
  int *note_size;
4255
  int num_notes;
4256
  enum target_signal stop_signal;
4257
};
4258
 
4259
/* Called by gdbthread.c once per thread.  Records the thread's
4260
   register state for the corefile note section.  */
4261
 
4262
static int
4263
linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4264
{
4265
  struct linux_nat_corefile_thread_data *args = data;
4266
 
4267
  args->note_data = linux_nat_do_thread_registers (args->obfd,
4268
                                                   ti->ptid,
4269
                                                   args->note_data,
4270
                                                   args->note_size,
4271
                                                   args->stop_signal);
4272
  args->num_notes++;
4273
 
4274
  return 0;
4275
}
4276
 
4277
/* Enumerate spufs IDs for process PID.  */
4278
 
4279
static void
4280
iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4281
{
4282
  char path[128];
4283
  DIR *dir;
4284
  struct dirent *entry;
4285
 
4286
  xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4287
  dir = opendir (path);
4288
  if (!dir)
4289
    return;
4290
 
4291
  rewinddir (dir);
4292
  while ((entry = readdir (dir)) != NULL)
4293
    {
4294
      struct stat st;
4295
      struct statfs stfs;
4296
      int fd;
4297
 
4298
      fd = atoi (entry->d_name);
4299
      if (!fd)
4300
        continue;
4301
 
4302
      xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4303
      if (stat (path, &st) != 0)
4304
        continue;
4305
      if (!S_ISDIR (st.st_mode))
4306
        continue;
4307
 
4308
      if (statfs (path, &stfs) != 0)
4309
        continue;
4310
      if (stfs.f_type != SPUFS_MAGIC)
4311
        continue;
4312
 
4313
      callback (data, fd);
4314
    }
4315
 
4316
  closedir (dir);
4317
}
4318
 
4319
/* Generate corefile notes for SPU contexts.  */
4320
 
4321
struct linux_spu_corefile_data
4322
{
4323
  bfd *obfd;
4324
  char *note_data;
4325
  int *note_size;
4326
};
4327
 
4328
static void
4329
linux_spu_corefile_callback (void *data, int fd)
4330
{
4331
  struct linux_spu_corefile_data *args = data;
4332
  int i;
4333
 
4334
  static const char *spu_files[] =
4335
    {
4336
      "object-id",
4337
      "mem",
4338
      "regs",
4339
      "fpcr",
4340
      "lslr",
4341
      "decr",
4342
      "decr_status",
4343
      "signal1",
4344
      "signal1_type",
4345
      "signal2",
4346
      "signal2_type",
4347
      "event_mask",
4348
      "event_status",
4349
      "mbox_info",
4350
      "ibox_info",
4351
      "wbox_info",
4352
      "dma_info",
4353
      "proxydma_info",
4354
   };
4355
 
4356
  for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4357
    {
4358
      char annex[32], note_name[32];
4359
      gdb_byte *spu_data;
4360
      LONGEST spu_len;
4361
 
4362
      xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4363
      spu_len = target_read_alloc (&current_target, TARGET_OBJECT_SPU,
4364
                                   annex, &spu_data);
4365
      if (spu_len > 0)
4366
        {
4367
          xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4368
          args->note_data = elfcore_write_note (args->obfd, args->note_data,
4369
                                                args->note_size, note_name,
4370
                                                NT_SPU, spu_data, spu_len);
4371
          xfree (spu_data);
4372
        }
4373
    }
4374
}
4375
 
4376
static char *
4377
linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4378
{
4379
  struct linux_spu_corefile_data args;
4380
  args.obfd = obfd;
4381
  args.note_data = note_data;
4382
  args.note_size = note_size;
4383
 
4384
  iterate_over_spus (PIDGET (inferior_ptid),
4385
                     linux_spu_corefile_callback, &args);
4386
 
4387
  return args.note_data;
4388
}
4389
 
4390
/* Fills the "to_make_corefile_note" target vector.  Builds the note
4391
   section for a corefile, and returns it in a malloc buffer.  */
4392
 
4393
static char *
4394
linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4395
{
4396
  struct linux_nat_corefile_thread_data thread_args;
4397
  struct cleanup *old_chain;
4398
  /* The variable size must be >= sizeof (prpsinfo_t.pr_fname).  */
4399
  char fname[16] = { '\0' };
4400
  /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs).  */
4401
  char psargs[80] = { '\0' };
4402
  char *note_data = NULL;
4403
  ptid_t current_ptid = inferior_ptid;
4404
  ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
4405
  gdb_byte *auxv;
4406
  int auxv_len;
4407
 
4408
  if (get_exec_file (0))
4409
    {
4410
      strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
4411
      strncpy (psargs, get_exec_file (0), sizeof (psargs));
4412
      if (get_inferior_args ())
4413
        {
4414
          char *string_end;
4415
          char *psargs_end = psargs + sizeof (psargs);
4416
 
4417
          /* linux_elfcore_write_prpsinfo () handles zero unterminated
4418
             strings fine.  */
4419
          string_end = memchr (psargs, 0, sizeof (psargs));
4420
          if (string_end != NULL)
4421
            {
4422
              *string_end++ = ' ';
4423
              strncpy (string_end, get_inferior_args (),
4424
                       psargs_end - string_end);
4425
            }
4426
        }
4427
      note_data = (char *) elfcore_write_prpsinfo (obfd,
4428
                                                   note_data,
4429
                                                   note_size, fname, psargs);
4430
    }
4431
 
4432
  /* Dump information for threads.  */
4433
  thread_args.obfd = obfd;
4434
  thread_args.note_data = note_data;
4435
  thread_args.note_size = note_size;
4436
  thread_args.num_notes = 0;
4437
  thread_args.stop_signal = find_stop_signal ();
4438
  iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
4439
  gdb_assert (thread_args.num_notes != 0);
4440
  note_data = thread_args.note_data;
4441
 
4442
  auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
4443
                                NULL, &auxv);
4444
  if (auxv_len > 0)
4445
    {
4446
      note_data = elfcore_write_note (obfd, note_data, note_size,
4447
                                      "CORE", NT_AUXV, auxv, auxv_len);
4448
      xfree (auxv);
4449
    }
4450
 
4451
  note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4452
 
4453
  make_cleanup (xfree, note_data);
4454
  return note_data;
4455
}
4456
 
4457
/* Implement the "info proc" command.  */
4458
 
4459
static void
4460
linux_nat_info_proc_cmd (char *args, int from_tty)
4461
{
4462
  /* A long is used for pid instead of an int to avoid a loss of precision
4463
     compiler warning from the output of strtoul.  */
4464
  long pid = PIDGET (inferior_ptid);
4465
  FILE *procfile;
4466
  char **argv = NULL;
4467
  char buffer[MAXPATHLEN];
4468
  char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
4469
  int cmdline_f = 1;
4470
  int cwd_f = 1;
4471
  int exe_f = 1;
4472
  int mappings_f = 0;
4473
  int environ_f = 0;
4474
  int status_f = 0;
4475
  int stat_f = 0;
4476
  int all = 0;
4477
  struct stat dummy;
4478
 
4479
  if (args)
4480
    {
4481
      /* Break up 'args' into an argv array.  */
4482
      argv = gdb_buildargv (args);
4483
      make_cleanup_freeargv (argv);
4484
    }
4485
  while (argv != NULL && *argv != NULL)
4486
    {
4487
      if (isdigit (argv[0][0]))
4488
        {
4489
          pid = strtoul (argv[0], NULL, 10);
4490
        }
4491
      else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
4492
        {
4493
          mappings_f = 1;
4494
        }
4495
      else if (strcmp (argv[0], "status") == 0)
4496
        {
4497
          status_f = 1;
4498
        }
4499
      else if (strcmp (argv[0], "stat") == 0)
4500
        {
4501
          stat_f = 1;
4502
        }
4503
      else if (strcmp (argv[0], "cmd") == 0)
4504
        {
4505
          cmdline_f = 1;
4506
        }
4507
      else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
4508
        {
4509
          exe_f = 1;
4510
        }
4511
      else if (strcmp (argv[0], "cwd") == 0)
4512
        {
4513
          cwd_f = 1;
4514
        }
4515
      else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
4516
        {
4517
          all = 1;
4518
        }
4519
      else
4520
        {
4521
          /* [...] (future options here) */
4522
        }
4523
      argv++;
4524
    }
4525
  if (pid == 0)
4526
    error (_("No current process: you must name one."));
4527
 
4528
  sprintf (fname1, "/proc/%ld", pid);
4529
  if (stat (fname1, &dummy) != 0)
4530
    error (_("No /proc directory: '%s'"), fname1);
4531
 
4532
  printf_filtered (_("process %ld\n"), pid);
4533
  if (cmdline_f || all)
4534
    {
4535
      sprintf (fname1, "/proc/%ld/cmdline", pid);
4536
      if ((procfile = fopen (fname1, "r")) != NULL)
4537
        {
4538
          struct cleanup *cleanup = make_cleanup_fclose (procfile);
4539
          if (fgets (buffer, sizeof (buffer), procfile))
4540
            printf_filtered ("cmdline = '%s'\n", buffer);
4541
          else
4542
            warning (_("unable to read '%s'"), fname1);
4543
          do_cleanups (cleanup);
4544
        }
4545
      else
4546
        warning (_("unable to open /proc file '%s'"), fname1);
4547
    }
4548
  if (cwd_f || all)
4549
    {
4550
      sprintf (fname1, "/proc/%ld/cwd", pid);
4551
      memset (fname2, 0, sizeof (fname2));
4552
      if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4553
        printf_filtered ("cwd = '%s'\n", fname2);
4554
      else
4555
        warning (_("unable to read link '%s'"), fname1);
4556
    }
4557
  if (exe_f || all)
4558
    {
4559
      sprintf (fname1, "/proc/%ld/exe", pid);
4560
      memset (fname2, 0, sizeof (fname2));
4561
      if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4562
        printf_filtered ("exe = '%s'\n", fname2);
4563
      else
4564
        warning (_("unable to read link '%s'"), fname1);
4565
    }
4566
  if (mappings_f || all)
4567
    {
4568
      sprintf (fname1, "/proc/%ld/maps", pid);
4569
      if ((procfile = fopen (fname1, "r")) != NULL)
4570
        {
4571
          long long addr, endaddr, size, offset, inode;
4572
          char permissions[8], device[8], filename[MAXPATHLEN];
4573
          struct cleanup *cleanup;
4574
 
4575
          cleanup = make_cleanup_fclose (procfile);
4576
          printf_filtered (_("Mapped address spaces:\n\n"));
4577
          if (gdbarch_addr_bit (target_gdbarch) == 32)
4578
            {
4579
              printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4580
                           "Start Addr",
4581
                           "  End Addr",
4582
                           "      Size", "    Offset", "objfile");
4583
            }
4584
          else
4585
            {
4586
              printf_filtered ("  %18s %18s %10s %10s %7s\n",
4587
                           "Start Addr",
4588
                           "  End Addr",
4589
                           "      Size", "    Offset", "objfile");
4590
            }
4591
 
4592
          while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4593
                               &offset, &device[0], &inode, &filename[0]))
4594
            {
4595
              size = endaddr - addr;
4596
 
4597
              /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4598
                 calls here (and possibly above) should be abstracted
4599
                 out into their own functions?  Andrew suggests using
4600
                 a generic local_address_string instead to print out
4601
                 the addresses; that makes sense to me, too.  */
4602
 
4603
              if (gdbarch_addr_bit (target_gdbarch) == 32)
4604
                {
4605
                  printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4606
                               (unsigned long) addr,    /* FIXME: pr_addr */
4607
                               (unsigned long) endaddr,
4608
                               (int) size,
4609
                               (unsigned int) offset,
4610
                               filename[0] ? filename : "");
4611
                }
4612
              else
4613
                {
4614
                  printf_filtered ("  %#18lx %#18lx %#10x %#10x %7s\n",
4615
                               (unsigned long) addr,    /* FIXME: pr_addr */
4616
                               (unsigned long) endaddr,
4617
                               (int) size,
4618
                               (unsigned int) offset,
4619
                               filename[0] ? filename : "");
4620
                }
4621
            }
4622
 
4623
          do_cleanups (cleanup);
4624
        }
4625
      else
4626
        warning (_("unable to open /proc file '%s'"), fname1);
4627
    }
4628
  if (status_f || all)
4629
    {
4630
      sprintf (fname1, "/proc/%ld/status", pid);
4631
      if ((procfile = fopen (fname1, "r")) != NULL)
4632
        {
4633
          struct cleanup *cleanup = make_cleanup_fclose (procfile);
4634
          while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4635
            puts_filtered (buffer);
4636
          do_cleanups (cleanup);
4637
        }
4638
      else
4639
        warning (_("unable to open /proc file '%s'"), fname1);
4640
    }
4641
  if (stat_f || all)
4642
    {
4643
      sprintf (fname1, "/proc/%ld/stat", pid);
4644
      if ((procfile = fopen (fname1, "r")) != NULL)
4645
        {
4646
          int itmp;
4647
          char ctmp;
4648
          long ltmp;
4649
          struct cleanup *cleanup = make_cleanup_fclose (procfile);
4650
 
4651
          if (fscanf (procfile, "%d ", &itmp) > 0)
4652
            printf_filtered (_("Process: %d\n"), itmp);
4653
          if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
4654
            printf_filtered (_("Exec file: %s\n"), buffer);
4655
          if (fscanf (procfile, "%c ", &ctmp) > 0)
4656
            printf_filtered (_("State: %c\n"), ctmp);
4657
          if (fscanf (procfile, "%d ", &itmp) > 0)
4658
            printf_filtered (_("Parent process: %d\n"), itmp);
4659
          if (fscanf (procfile, "%d ", &itmp) > 0)
4660
            printf_filtered (_("Process group: %d\n"), itmp);
4661
          if (fscanf (procfile, "%d ", &itmp) > 0)
4662
            printf_filtered (_("Session id: %d\n"), itmp);
4663
          if (fscanf (procfile, "%d ", &itmp) > 0)
4664
            printf_filtered (_("TTY: %d\n"), itmp);
4665
          if (fscanf (procfile, "%d ", &itmp) > 0)
4666
            printf_filtered (_("TTY owner process group: %d\n"), itmp);
4667
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4668
            printf_filtered (_("Flags: 0x%lx\n"), ltmp);
4669
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4670
            printf_filtered (_("Minor faults (no memory page): %lu\n"),
4671
                             (unsigned long) ltmp);
4672
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4673
            printf_filtered (_("Minor faults, children: %lu\n"),
4674
                             (unsigned long) ltmp);
4675
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4676
            printf_filtered (_("Major faults (memory page faults): %lu\n"),
4677
                             (unsigned long) ltmp);
4678
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4679
            printf_filtered (_("Major faults, children: %lu\n"),
4680
                             (unsigned long) ltmp);
4681
          if (fscanf (procfile, "%ld ", &ltmp) > 0)
4682
            printf_filtered (_("utime: %ld\n"), ltmp);
4683
          if (fscanf (procfile, "%ld ", &ltmp) > 0)
4684
            printf_filtered (_("stime: %ld\n"), ltmp);
4685
          if (fscanf (procfile, "%ld ", &ltmp) > 0)
4686
            printf_filtered (_("utime, children: %ld\n"), ltmp);
4687
          if (fscanf (procfile, "%ld ", &ltmp) > 0)
4688
            printf_filtered (_("stime, children: %ld\n"), ltmp);
4689
          if (fscanf (procfile, "%ld ", &ltmp) > 0)
4690
            printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
4691
                             ltmp);
4692
          if (fscanf (procfile, "%ld ", &ltmp) > 0)
4693
            printf_filtered (_("'nice' value: %ld\n"), ltmp);
4694
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4695
            printf_filtered (_("jiffies until next timeout: %lu\n"),
4696
                             (unsigned long) ltmp);
4697
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4698
            printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
4699
                             (unsigned long) ltmp);
4700
          if (fscanf (procfile, "%ld ", &ltmp) > 0)
4701
            printf_filtered (_("start time (jiffies since system boot): %ld\n"),
4702
                             ltmp);
4703
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4704
            printf_filtered (_("Virtual memory size: %lu\n"),
4705
                             (unsigned long) ltmp);
4706
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4707
            printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
4708
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4709
            printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
4710
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4711
            printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
4712
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4713
            printf_filtered (_("End of text: 0x%lx\n"), ltmp);
4714
          if (fscanf (procfile, "%lu ", &ltmp) > 0)
4715
            printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
4716
#if 0                           /* Don't know how architecture-dependent the rest is...
4717
                                   Anyway the signal bitmap info is available from "status".  */
4718
          if (fscanf (procfile, "%lu ", &ltmp) > 0)      /* FIXME arch? */
4719
            printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
4720
          if (fscanf (procfile, "%lu ", &ltmp) > 0)      /* FIXME arch? */
4721
            printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
4722
          if (fscanf (procfile, "%ld ", &ltmp) > 0)
4723
            printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
4724
          if (fscanf (procfile, "%ld ", &ltmp) > 0)
4725
            printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
4726
          if (fscanf (procfile, "%ld ", &ltmp) > 0)
4727
            printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
4728
          if (fscanf (procfile, "%ld ", &ltmp) > 0)
4729
            printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
4730
          if (fscanf (procfile, "%lu ", &ltmp) > 0)      /* FIXME arch? */
4731
            printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
4732
#endif
4733
          do_cleanups (cleanup);
4734
        }
4735
      else
4736
        warning (_("unable to open /proc file '%s'"), fname1);
4737
    }
4738
}
4739
 
4740
/* Implement the to_xfer_partial interface for memory reads using the /proc
4741
   filesystem.  Because we can use a single read() call for /proc, this
4742
   can be much more efficient than banging away at PTRACE_PEEKTEXT,
4743
   but it doesn't support writes.  */
4744
 
4745
static LONGEST
4746
linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4747
                         const char *annex, gdb_byte *readbuf,
4748
                         const gdb_byte *writebuf,
4749
                         ULONGEST offset, LONGEST len)
4750
{
4751
  LONGEST ret;
4752
  int fd;
4753
  char filename[64];
4754
 
4755
  if (object != TARGET_OBJECT_MEMORY || !readbuf)
4756
    return 0;
4757
 
4758
  /* Don't bother for one word.  */
4759
  if (len < 3 * sizeof (long))
4760
    return 0;
4761
 
4762
  /* We could keep this file open and cache it - possibly one per
4763
     thread.  That requires some juggling, but is even faster.  */
4764
  sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4765
  fd = open (filename, O_RDONLY | O_LARGEFILE);
4766
  if (fd == -1)
4767
    return 0;
4768
 
4769
  /* If pread64 is available, use it.  It's faster if the kernel
4770
     supports it (only one syscall), and it's 64-bit safe even on
4771
     32-bit platforms (for instance, SPARC debugging a SPARC64
4772
     application).  */
4773
#ifdef HAVE_PREAD64
4774
  if (pread64 (fd, readbuf, len, offset) != len)
4775
#else
4776
  if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4777
#endif
4778
    ret = 0;
4779
  else
4780
    ret = len;
4781
 
4782
  close (fd);
4783
  return ret;
4784
}
4785
 
4786
 
4787
/* Enumerate spufs IDs for process PID.  */
4788
static LONGEST
4789
spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4790
{
4791
  enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4792
  LONGEST pos = 0;
4793
  LONGEST written = 0;
4794
  char path[128];
4795
  DIR *dir;
4796
  struct dirent *entry;
4797
 
4798
  xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4799
  dir = opendir (path);
4800
  if (!dir)
4801
    return -1;
4802
 
4803
  rewinddir (dir);
4804
  while ((entry = readdir (dir)) != NULL)
4805
    {
4806
      struct stat st;
4807
      struct statfs stfs;
4808
      int fd;
4809
 
4810
      fd = atoi (entry->d_name);
4811
      if (!fd)
4812
        continue;
4813
 
4814
      xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4815
      if (stat (path, &st) != 0)
4816
        continue;
4817
      if (!S_ISDIR (st.st_mode))
4818
        continue;
4819
 
4820
      if (statfs (path, &stfs) != 0)
4821
        continue;
4822
      if (stfs.f_type != SPUFS_MAGIC)
4823
        continue;
4824
 
4825
      if (pos >= offset && pos + 4 <= offset + len)
4826
        {
4827
          store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4828
          written += 4;
4829
        }
4830
      pos += 4;
4831
    }
4832
 
4833
  closedir (dir);
4834
  return written;
4835
}
4836
 
4837
/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4838
   object type, using the /proc file system.  */
4839
static LONGEST
4840
linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4841
                     const char *annex, gdb_byte *readbuf,
4842
                     const gdb_byte *writebuf,
4843
                     ULONGEST offset, LONGEST len)
4844
{
4845
  char buf[128];
4846
  int fd = 0;
4847
  int ret = -1;
4848
  int pid = PIDGET (inferior_ptid);
4849
 
4850
  if (!annex)
4851
    {
4852
      if (!readbuf)
4853
        return -1;
4854
      else
4855
        return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4856
    }
4857
 
4858
  xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4859
  fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4860
  if (fd <= 0)
4861
    return -1;
4862
 
4863
  if (offset != 0
4864
      && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4865
    {
4866
      close (fd);
4867
      return 0;
4868
    }
4869
 
4870
  if (writebuf)
4871
    ret = write (fd, writebuf, (size_t) len);
4872
  else if (readbuf)
4873
    ret = read (fd, readbuf, (size_t) len);
4874
 
4875
  close (fd);
4876
  return ret;
4877
}
4878
 
4879
 
4880
/* Parse LINE as a signal set and add its set bits to SIGS.  */
4881
 
4882
static void
4883
add_line_to_sigset (const char *line, sigset_t *sigs)
4884
{
4885
  int len = strlen (line) - 1;
4886
  const char *p;
4887
  int signum;
4888
 
4889
  if (line[len] != '\n')
4890
    error (_("Could not parse signal set: %s"), line);
4891
 
4892
  p = line;
4893
  signum = len * 4;
4894
  while (len-- > 0)
4895
    {
4896
      int digit;
4897
 
4898
      if (*p >= '0' && *p <= '9')
4899
        digit = *p - '0';
4900
      else if (*p >= 'a' && *p <= 'f')
4901
        digit = *p - 'a' + 10;
4902
      else
4903
        error (_("Could not parse signal set: %s"), line);
4904
 
4905
      signum -= 4;
4906
 
4907
      if (digit & 1)
4908
        sigaddset (sigs, signum + 1);
4909
      if (digit & 2)
4910
        sigaddset (sigs, signum + 2);
4911
      if (digit & 4)
4912
        sigaddset (sigs, signum + 3);
4913
      if (digit & 8)
4914
        sigaddset (sigs, signum + 4);
4915
 
4916
      p++;
4917
    }
4918
}
4919
 
4920
/* Find process PID's pending signals from /proc/pid/status and set
4921
   SIGS to match.  */
4922
 
4923
void
4924
linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
4925
{
4926
  FILE *procfile;
4927
  char buffer[MAXPATHLEN], fname[MAXPATHLEN];
4928
  int signum;
4929
  struct cleanup *cleanup;
4930
 
4931
  sigemptyset (pending);
4932
  sigemptyset (blocked);
4933
  sigemptyset (ignored);
4934
  sprintf (fname, "/proc/%d/status", pid);
4935
  procfile = fopen (fname, "r");
4936
  if (procfile == NULL)
4937
    error (_("Could not open %s"), fname);
4938
  cleanup = make_cleanup_fclose (procfile);
4939
 
4940
  while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4941
    {
4942
      /* Normal queued signals are on the SigPnd line in the status
4943
         file.  However, 2.6 kernels also have a "shared" pending
4944
         queue for delivering signals to a thread group, so check for
4945
         a ShdPnd line also.
4946
 
4947
         Unfortunately some Red Hat kernels include the shared pending
4948
         queue but not the ShdPnd status field.  */
4949
 
4950
      if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4951
        add_line_to_sigset (buffer + 8, pending);
4952
      else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4953
        add_line_to_sigset (buffer + 8, pending);
4954
      else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4955
        add_line_to_sigset (buffer + 8, blocked);
4956
      else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4957
        add_line_to_sigset (buffer + 8, ignored);
4958
    }
4959
 
4960
  do_cleanups (cleanup);
4961
}
4962
 
4963
static LONGEST
4964
linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4965
                    const char *annex, gdb_byte *readbuf,
4966
                    const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4967
{
4968
  /* We make the process list snapshot when the object starts to be
4969
     read.  */
4970
  static const char *buf;
4971
  static LONGEST len_avail = -1;
4972
  static struct obstack obstack;
4973
 
4974
  DIR *dirp;
4975
 
4976
  gdb_assert (object == TARGET_OBJECT_OSDATA);
4977
 
4978
  if (strcmp (annex, "processes") != 0)
4979
    return 0;
4980
 
4981
  gdb_assert (readbuf && !writebuf);
4982
 
4983
  if (offset == 0)
4984
    {
4985
      if (len_avail != -1 && len_avail != 0)
4986
       obstack_free (&obstack, NULL);
4987
      len_avail = 0;
4988
      buf = NULL;
4989
      obstack_init (&obstack);
4990
      obstack_grow_str (&obstack, "<osdata type=\"processes\">\n");
4991
 
4992
      dirp = opendir ("/proc");
4993
      if (dirp)
4994
       {
4995
         struct dirent *dp;
4996
         while ((dp = readdir (dirp)) != NULL)
4997
           {
4998
             struct stat statbuf;
4999
             char procentry[sizeof ("/proc/4294967295")];
5000
 
5001
             if (!isdigit (dp->d_name[0])
5002
                 || NAMELEN (dp) > sizeof ("4294967295") - 1)
5003
               continue;
5004
 
5005
             sprintf (procentry, "/proc/%s", dp->d_name);
5006
             if (stat (procentry, &statbuf) == 0
5007
                 && S_ISDIR (statbuf.st_mode))
5008
               {
5009
                 char *pathname;
5010
                 FILE *f;
5011
                 char cmd[MAXPATHLEN + 1];
5012
                 struct passwd *entry;
5013
 
5014
                 pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
5015
                 entry = getpwuid (statbuf.st_uid);
5016
 
5017
                 if ((f = fopen (pathname, "r")) != NULL)
5018
                   {
5019
                     size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
5020
                     if (len > 0)
5021
                       {
5022
                         int i;
5023
                         for (i = 0; i < len; i++)
5024
                           if (cmd[i] == '\0')
5025
                             cmd[i] = ' ';
5026
                         cmd[len] = '\0';
5027
 
5028
                         obstack_xml_printf (
5029
                           &obstack,
5030
                           "<item>"
5031
                           "<column name=\"pid\">%s</column>"
5032
                           "<column name=\"user\">%s</column>"
5033
                           "<column name=\"command\">%s</column>"
5034
                           "</item>",
5035
                           dp->d_name,
5036
                           entry ? entry->pw_name : "?",
5037
                           cmd);
5038
                       }
5039
                     fclose (f);
5040
                   }
5041
 
5042
                 xfree (pathname);
5043
               }
5044
           }
5045
 
5046
         closedir (dirp);
5047
       }
5048
 
5049
      obstack_grow_str0 (&obstack, "</osdata>\n");
5050
      buf = obstack_finish (&obstack);
5051
      len_avail = strlen (buf);
5052
    }
5053
 
5054
  if (offset >= len_avail)
5055
    {
5056
      /* Done.  Get rid of the obstack.  */
5057
      obstack_free (&obstack, NULL);
5058
      buf = NULL;
5059
      len_avail = 0;
5060
      return 0;
5061
    }
5062
 
5063
  if (len > len_avail - offset)
5064
    len = len_avail - offset;
5065
  memcpy (readbuf, buf + offset, len);
5066
 
5067
  return len;
5068
}
5069
 
5070
static LONGEST
5071
linux_xfer_partial (struct target_ops *ops, enum target_object object,
5072
                    const char *annex, gdb_byte *readbuf,
5073
                    const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5074
{
5075
  LONGEST xfer;
5076
 
5077
  if (object == TARGET_OBJECT_AUXV)
5078
    return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
5079
                             offset, len);
5080
 
5081
  if (object == TARGET_OBJECT_OSDATA)
5082
    return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5083
                               offset, len);
5084
 
5085
  if (object == TARGET_OBJECT_SPU)
5086
    return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5087
                                offset, len);
5088
 
5089
  /* GDB calculates all the addresses in possibly larget width of the address.
5090
     Address width needs to be masked before its final use - either by
5091
     linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5092
 
5093
     Compare ADDR_BIT first to avoid a compiler warning on shift overflow.  */
5094
 
5095
  if (object == TARGET_OBJECT_MEMORY)
5096
    {
5097
      int addr_bit = gdbarch_addr_bit (target_gdbarch);
5098
 
5099
      if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5100
        offset &= ((ULONGEST) 1 << addr_bit) - 1;
5101
    }
5102
 
5103
  xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5104
                                  offset, len);
5105
  if (xfer != 0)
5106
    return xfer;
5107
 
5108
  return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5109
                             offset, len);
5110
}
5111
 
5112
/* Create a prototype generic GNU/Linux target.  The client can override
5113
   it with local methods.  */
5114
 
5115
static void
5116
linux_target_install_ops (struct target_ops *t)
5117
{
5118
  t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
5119
  t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
5120
  t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
5121
  t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
5122
  t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
5123
  t->to_post_startup_inferior = linux_child_post_startup_inferior;
5124
  t->to_post_attach = linux_child_post_attach;
5125
  t->to_follow_fork = linux_child_follow_fork;
5126
  t->to_find_memory_regions = linux_nat_find_memory_regions;
5127
  t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5128
 
5129
  super_xfer_partial = t->to_xfer_partial;
5130
  t->to_xfer_partial = linux_xfer_partial;
5131
}
5132
 
5133
struct target_ops *
5134
linux_target (void)
5135
{
5136
  struct target_ops *t;
5137
 
5138
  t = inf_ptrace_target ();
5139
  linux_target_install_ops (t);
5140
 
5141
  return t;
5142
}
5143
 
5144
struct target_ops *
5145
linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
5146
{
5147
  struct target_ops *t;
5148
 
5149
  t = inf_ptrace_trad_target (register_u_offset);
5150
  linux_target_install_ops (t);
5151
 
5152
  return t;
5153
}
5154
 
5155
/* target_is_async_p implementation.  */
5156
 
5157
static int
5158
linux_nat_is_async_p (void)
5159
{
5160
  /* NOTE: palves 2008-03-21: We're only async when the user requests
5161
     it explicitly with the "set target-async" command.
5162
     Someday, linux will always be async.  */
5163
  if (!target_async_permitted)
5164
    return 0;
5165
 
5166
  /* See target.h/target_async_mask.  */
5167
  return linux_nat_async_mask_value;
5168
}
5169
 
5170
/* target_can_async_p implementation.  */
5171
 
5172
static int
5173
linux_nat_can_async_p (void)
5174
{
5175
  /* NOTE: palves 2008-03-21: We're only async when the user requests
5176
     it explicitly with the "set target-async" command.
5177
     Someday, linux will always be async.  */
5178
  if (!target_async_permitted)
5179
    return 0;
5180
 
5181
  /* See target.h/target_async_mask.  */
5182
  return linux_nat_async_mask_value;
5183
}
5184
 
5185
static int
5186
linux_nat_supports_non_stop (void)
5187
{
5188
  return 1;
5189
}
5190
 
5191
/* True if we want to support multi-process.  To be removed when GDB
5192
   supports multi-exec.  */
5193
 
5194
int linux_multi_process = 1;
5195
 
5196
static int
5197
linux_nat_supports_multi_process (void)
5198
{
5199
  return linux_multi_process;
5200
}
5201
 
5202
/* target_async_mask implementation.  */
5203
 
5204
static int
5205
linux_nat_async_mask (int new_mask)
5206
{
5207
  int curr_mask = linux_nat_async_mask_value;
5208
 
5209
  if (curr_mask != new_mask)
5210
    {
5211
      if (new_mask == 0)
5212
        {
5213
          linux_nat_async (NULL, 0);
5214
          linux_nat_async_mask_value = new_mask;
5215
        }
5216
      else
5217
        {
5218
          linux_nat_async_mask_value = new_mask;
5219
 
5220
          /* If we're going out of async-mask in all-stop, then the
5221
             inferior is stopped.  The next resume will call
5222
             target_async.  In non-stop, the target event source
5223
             should be always registered in the event loop.  Do so
5224
             now.  */
5225
          if (non_stop)
5226
            linux_nat_async (inferior_event_handler, 0);
5227
        }
5228
    }
5229
 
5230
  return curr_mask;
5231
}
5232
 
5233
static int async_terminal_is_ours = 1;
5234
 
5235
/* target_terminal_inferior implementation.  */
5236
 
5237
static void
5238
linux_nat_terminal_inferior (void)
5239
{
5240
  if (!target_is_async_p ())
5241
    {
5242
      /* Async mode is disabled.  */
5243
      terminal_inferior ();
5244
      return;
5245
    }
5246
 
5247
  terminal_inferior ();
5248
 
5249
  /* Calls to target_terminal_*() are meant to be idempotent.  */
5250
  if (!async_terminal_is_ours)
5251
    return;
5252
 
5253
  delete_file_handler (input_fd);
5254
  async_terminal_is_ours = 0;
5255
  set_sigint_trap ();
5256
}
5257
 
5258
/* target_terminal_ours implementation.  */
5259
 
5260
static void
5261
linux_nat_terminal_ours (void)
5262
{
5263
  if (!target_is_async_p ())
5264
    {
5265
      /* Async mode is disabled.  */
5266
      terminal_ours ();
5267
      return;
5268
    }
5269
 
5270
  /* GDB should never give the terminal to the inferior if the
5271
     inferior is running in the background (run&, continue&, etc.),
5272
     but claiming it sure should.  */
5273
  terminal_ours ();
5274
 
5275
  if (async_terminal_is_ours)
5276
    return;
5277
 
5278
  clear_sigint_trap ();
5279
  add_file_handler (input_fd, stdin_event_handler, 0);
5280
  async_terminal_is_ours = 1;
5281
}
5282
 
5283
static void (*async_client_callback) (enum inferior_event_type event_type,
5284
                                      void *context);
5285
static void *async_client_context;
5286
 
5287
/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5288
   so we notice when any child changes state, and notify the
5289
   event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5290
   above to wait for the arrival of a SIGCHLD.  */
5291
 
5292
static void
5293
sigchld_handler (int signo)
5294
{
5295
  int old_errno = errno;
5296
 
5297
  if (debug_linux_nat_async)
5298
    fprintf_unfiltered (gdb_stdlog, "sigchld\n");
5299
 
5300
  if (signo == SIGCHLD
5301
      && linux_nat_event_pipe[0] != -1)
5302
    async_file_mark (); /* Let the event loop know that there are
5303
                           events to handle.  */
5304
 
5305
  errno = old_errno;
5306
}
5307
 
5308
/* Callback registered with the target events file descriptor.  */
5309
 
5310
static void
5311
handle_target_event (int error, gdb_client_data client_data)
5312
{
5313
  (*async_client_callback) (INF_REG_EVENT, async_client_context);
5314
}
5315
 
5316
/* Create/destroy the target events pipe.  Returns previous state.  */
5317
 
5318
static int
5319
linux_async_pipe (int enable)
5320
{
5321
  int previous = (linux_nat_event_pipe[0] != -1);
5322
 
5323
  if (previous != enable)
5324
    {
5325
      sigset_t prev_mask;
5326
 
5327
      block_child_signals (&prev_mask);
5328
 
5329
      if (enable)
5330
        {
5331
          if (pipe (linux_nat_event_pipe) == -1)
5332
            internal_error (__FILE__, __LINE__,
5333
                            "creating event pipe failed.");
5334
 
5335
          fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5336
          fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5337
        }
5338
      else
5339
        {
5340
          close (linux_nat_event_pipe[0]);
5341
          close (linux_nat_event_pipe[1]);
5342
          linux_nat_event_pipe[0] = -1;
5343
          linux_nat_event_pipe[1] = -1;
5344
        }
5345
 
5346
      restore_child_signals_mask (&prev_mask);
5347
    }
5348
 
5349
  return previous;
5350
}
5351
 
5352
/* target_async implementation.  */
5353
 
5354
static void
5355
linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5356
                                   void *context), void *context)
5357
{
5358
  if (linux_nat_async_mask_value == 0 || !target_async_permitted)
5359
    internal_error (__FILE__, __LINE__,
5360
                    "Calling target_async when async is masked");
5361
 
5362
  if (callback != NULL)
5363
    {
5364
      async_client_callback = callback;
5365
      async_client_context = context;
5366
      if (!linux_async_pipe (1))
5367
        {
5368
          add_file_handler (linux_nat_event_pipe[0],
5369
                            handle_target_event, NULL);
5370
          /* There may be pending events to handle.  Tell the event loop
5371
             to poll them.  */
5372
          async_file_mark ();
5373
        }
5374
    }
5375
  else
5376
    {
5377
      async_client_callback = callback;
5378
      async_client_context = context;
5379
      delete_file_handler (linux_nat_event_pipe[0]);
5380
      linux_async_pipe (0);
5381
    }
5382
  return;
5383
}
5384
 
5385
/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5386
   event came out.  */
5387
 
5388
static int
5389
linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
5390
{
5391
  if (!lwp->stopped)
5392
    {
5393
      int pid, status;
5394
      ptid_t ptid = lwp->ptid;
5395
 
5396
      if (debug_linux_nat)
5397
        fprintf_unfiltered (gdb_stdlog,
5398
                            "LNSL: running -> suspending %s\n",
5399
                            target_pid_to_str (lwp->ptid));
5400
 
5401
 
5402
      stop_callback (lwp, NULL);
5403
      stop_wait_callback (lwp, NULL);
5404
 
5405
      /* If the lwp exits while we try to stop it, there's nothing
5406
         else to do.  */
5407
      lwp = find_lwp_pid (ptid);
5408
      if (lwp == NULL)
5409
        return 0;
5410
 
5411
      /* If we didn't collect any signal other than SIGSTOP while
5412
         stopping the LWP, push a SIGNAL_0 event.  In either case, the
5413
         event-loop will end up calling target_wait which will collect
5414
         these.  */
5415
      if (lwp->status == 0)
5416
        lwp->status = W_STOPCODE (0);
5417
      async_file_mark ();
5418
    }
5419
  else
5420
    {
5421
      /* Already known to be stopped; do nothing.  */
5422
 
5423
      if (debug_linux_nat)
5424
        {
5425
          if (find_thread_ptid (lwp->ptid)->stop_requested)
5426
            fprintf_unfiltered (gdb_stdlog, "\
5427
LNSL: already stopped/stop_requested %s\n",
5428
                                target_pid_to_str (lwp->ptid));
5429
          else
5430
            fprintf_unfiltered (gdb_stdlog, "\
5431
LNSL: already stopped/no stop_requested yet %s\n",
5432
                                target_pid_to_str (lwp->ptid));
5433
        }
5434
    }
5435
  return 0;
5436
}
5437
 
5438
static void
5439
linux_nat_stop (ptid_t ptid)
5440
{
5441
  if (non_stop)
5442
    iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
5443
  else
5444
    linux_ops->to_stop (ptid);
5445
}
5446
 
5447
static void
5448
linux_nat_close (int quitting)
5449
{
5450
  /* Unregister from the event loop.  */
5451
  if (target_is_async_p ())
5452
    target_async (NULL, 0);
5453
 
5454
  /* Reset the async_masking.  */
5455
  linux_nat_async_mask_value = 1;
5456
 
5457
  if (linux_ops->to_close)
5458
    linux_ops->to_close (quitting);
5459
}
5460
 
5461
/* When requests are passed down from the linux-nat layer to the
5462
   single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5463
   used.  The address space pointer is stored in the inferior object,
5464
   but the common code that is passed such ptid can't tell whether
5465
   lwpid is a "main" process id or not (it assumes so).  We reverse
5466
   look up the "main" process id from the lwp here.  */
5467
 
5468
struct address_space *
5469
linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5470
{
5471
  struct lwp_info *lwp;
5472
  struct inferior *inf;
5473
  int pid;
5474
 
5475
  pid = GET_LWP (ptid);
5476
  if (GET_LWP (ptid) == 0)
5477
    {
5478
      /* An (lwpid,0,0) ptid.  Look up the lwp object to get at the
5479
         tgid.  */
5480
      lwp = find_lwp_pid (ptid);
5481
      pid = GET_PID (lwp->ptid);
5482
    }
5483
  else
5484
    {
5485
      /* A (pid,lwpid,0) ptid.  */
5486
      pid = GET_PID (ptid);
5487
    }
5488
 
5489
  inf = find_inferior_pid (pid);
5490
  gdb_assert (inf != NULL);
5491
  return inf->aspace;
5492
}
5493
 
5494
int
5495
linux_nat_core_of_thread_1 (ptid_t ptid)
5496
{
5497
  struct cleanup *back_to;
5498
  char *filename;
5499
  FILE *f;
5500
  char *content = NULL;
5501
  char *p;
5502
  char *ts = 0;
5503
  int content_read = 0;
5504
  int i;
5505
  int core;
5506
 
5507
  filename = xstrprintf ("/proc/%d/task/%ld/stat",
5508
                         GET_PID (ptid), GET_LWP (ptid));
5509
  back_to = make_cleanup (xfree, filename);
5510
 
5511
  f = fopen (filename, "r");
5512
  if (!f)
5513
    {
5514
      do_cleanups (back_to);
5515
      return -1;
5516
    }
5517
 
5518
  make_cleanup_fclose (f);
5519
 
5520
  for (;;)
5521
    {
5522
      int n;
5523
      content = xrealloc (content, content_read + 1024);
5524
      n = fread (content + content_read, 1, 1024, f);
5525
      content_read += n;
5526
      if (n < 1024)
5527
        {
5528
          content[content_read] = '\0';
5529
          break;
5530
        }
5531
    }
5532
 
5533
  make_cleanup (xfree, content);
5534
 
5535
  p = strchr (content, '(');
5536
  p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
5537
 
5538
  /* If the first field after program name has index 0, then core number is
5539
     the field with index 36.  There's no constant for that anywhere.  */
5540
  p = strtok_r (p, " ", &ts);
5541
  for (i = 0; i != 36; ++i)
5542
    p = strtok_r (NULL, " ", &ts);
5543
 
5544
  if (sscanf (p, "%d", &core) == 0)
5545
    core = -1;
5546
 
5547
  do_cleanups (back_to);
5548
 
5549
  return core;
5550
}
5551
 
5552
/* Return the cached value of the processor core for thread PTID.  */
5553
 
5554
int
5555
linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5556
{
5557
  struct lwp_info *info = find_lwp_pid (ptid);
5558
  if (info)
5559
    return info->core;
5560
  return -1;
5561
}
5562
 
5563
void
5564
linux_nat_add_target (struct target_ops *t)
5565
{
5566
  /* Save the provided single-threaded target.  We save this in a separate
5567
     variable because another target we've inherited from (e.g. inf-ptrace)
5568
     may have saved a pointer to T; we want to use it for the final
5569
     process stratum target.  */
5570
  linux_ops_saved = *t;
5571
  linux_ops = &linux_ops_saved;
5572
 
5573
  /* Override some methods for multithreading.  */
5574
  t->to_create_inferior = linux_nat_create_inferior;
5575
  t->to_attach = linux_nat_attach;
5576
  t->to_detach = linux_nat_detach;
5577
  t->to_resume = linux_nat_resume;
5578
  t->to_wait = linux_nat_wait;
5579
  t->to_xfer_partial = linux_nat_xfer_partial;
5580
  t->to_kill = linux_nat_kill;
5581
  t->to_mourn_inferior = linux_nat_mourn_inferior;
5582
  t->to_thread_alive = linux_nat_thread_alive;
5583
  t->to_pid_to_str = linux_nat_pid_to_str;
5584
  t->to_has_thread_control = tc_schedlock;
5585
  t->to_thread_address_space = linux_nat_thread_address_space;
5586
  t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5587
  t->to_stopped_data_address = linux_nat_stopped_data_address;
5588
 
5589
  t->to_can_async_p = linux_nat_can_async_p;
5590
  t->to_is_async_p = linux_nat_is_async_p;
5591
  t->to_supports_non_stop = linux_nat_supports_non_stop;
5592
  t->to_async = linux_nat_async;
5593
  t->to_async_mask = linux_nat_async_mask;
5594
  t->to_terminal_inferior = linux_nat_terminal_inferior;
5595
  t->to_terminal_ours = linux_nat_terminal_ours;
5596
  t->to_close = linux_nat_close;
5597
 
5598
  /* Methods for non-stop support.  */
5599
  t->to_stop = linux_nat_stop;
5600
 
5601
  t->to_supports_multi_process = linux_nat_supports_multi_process;
5602
 
5603
  t->to_core_of_thread = linux_nat_core_of_thread;
5604
 
5605
  /* We don't change the stratum; this target will sit at
5606
     process_stratum and thread_db will set at thread_stratum.  This
5607
     is a little strange, since this is a multi-threaded-capable
5608
     target, but we want to be on the stack below thread_db, and we
5609
     also want to be used for single-threaded processes.  */
5610
 
5611
  add_target (t);
5612
}
5613
 
5614
/* Register a method to call whenever a new thread is attached.  */
5615
void
5616
linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
5617
{
5618
  /* Save the pointer.  We only support a single registered instance
5619
     of the GNU/Linux native target, so we do not need to map this to
5620
     T.  */
5621
  linux_nat_new_thread = new_thread;
5622
}
5623
 
5624
/* Register a method that converts a siginfo object between the layout
5625
   that ptrace returns, and the layout in the architecture of the
5626
   inferior.  */
5627
void
5628
linux_nat_set_siginfo_fixup (struct target_ops *t,
5629
                             int (*siginfo_fixup) (struct siginfo *,
5630
                                                   gdb_byte *,
5631
                                                   int))
5632
{
5633
  /* Save the pointer.  */
5634
  linux_nat_siginfo_fixup = siginfo_fixup;
5635
}
5636
 
5637
/* Return the saved siginfo associated with PTID.  */
5638
struct siginfo *
5639
linux_nat_get_siginfo (ptid_t ptid)
5640
{
5641
  struct lwp_info *lp = find_lwp_pid (ptid);
5642
 
5643
  gdb_assert (lp != NULL);
5644
 
5645
  return &lp->siginfo;
5646
}
5647
 
5648
/* Provide a prototype to silence -Wmissing-prototypes.  */
5649
extern initialize_file_ftype _initialize_linux_nat;
5650
 
5651
void
5652
_initialize_linux_nat (void)
5653
{
5654
  sigset_t mask;
5655
 
5656
  add_info ("proc", linux_nat_info_proc_cmd, _("\
5657
Show /proc process information about any running process.\n\
5658
Specify any process id, or use the program being debugged by default.\n\
5659
Specify any of the following keywords for detailed info:\n\
5660
  mappings -- list of mapped memory regions.\n\
5661
  stat     -- list a bunch of random process info.\n\
5662
  status   -- list a different bunch of random process info.\n\
5663
  all      -- list all available /proc info."));
5664
 
5665
  add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5666
                            &debug_linux_nat, _("\
5667
Set debugging of GNU/Linux lwp module."), _("\
5668
Show debugging of GNU/Linux lwp module."), _("\
5669
Enables printf debugging output."),
5670
                            NULL,
5671
                            show_debug_linux_nat,
5672
                            &setdebuglist, &showdebuglist);
5673
 
5674
  add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
5675
                            &debug_linux_nat_async, _("\
5676
Set debugging of GNU/Linux async lwp module."), _("\
5677
Show debugging of GNU/Linux async lwp module."), _("\
5678
Enables printf debugging output."),
5679
                            NULL,
5680
                            show_debug_linux_nat_async,
5681
                            &setdebuglist, &showdebuglist);
5682
 
5683
  /* Save this mask as the default.  */
5684
  sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5685
 
5686
  /* Install a SIGCHLD handler.  */
5687
  sigchld_action.sa_handler = sigchld_handler;
5688
  sigemptyset (&sigchld_action.sa_mask);
5689
  sigchld_action.sa_flags = SA_RESTART;
5690
 
5691
  /* Make it the default.  */
5692
  sigaction (SIGCHLD, &sigchld_action, NULL);
5693
 
5694
  /* Make sure we don't block SIGCHLD during a sigsuspend.  */
5695
  sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5696
  sigdelset (&suspend_mask, SIGCHLD);
5697
 
5698
  sigemptyset (&blocked_mask);
5699
 
5700
  add_setshow_boolean_cmd ("disable-randomization", class_support,
5701
                           &disable_randomization, _("\
5702
Set disabling of debuggee's virtual address space randomization."), _("\
5703
Show disabling of debuggee's virtual address space randomization."), _("\
5704
When this mode is on (which is the default), randomization of the virtual\n\
5705
address space is disabled.  Standalone programs run with the randomization\n\
5706
enabled by default on some platforms."),
5707
                           &set_disable_randomization,
5708
                           &show_disable_randomization,
5709
                           &setlist, &showlist);
5710
}
5711
 
5712
 
5713
/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5714
   the GNU/Linux Threads library and therefore doesn't really belong
5715
   here.  */
5716
 
5717
/* Read variable NAME in the target and return its value if found.
5718
   Otherwise return zero.  It is assumed that the type of the variable
5719
   is `int'.  */
5720
 
5721
static int
5722
get_signo (const char *name)
5723
{
5724
  struct minimal_symbol *ms;
5725
  int signo;
5726
 
5727
  ms = lookup_minimal_symbol (name, NULL, NULL);
5728
  if (ms == NULL)
5729
    return 0;
5730
 
5731
  if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
5732
                          sizeof (signo)) != 0)
5733
    return 0;
5734
 
5735
  return signo;
5736
}
5737
 
5738
/* Return the set of signals used by the threads library in *SET.  */
5739
 
5740
void
5741
lin_thread_get_thread_signals (sigset_t *set)
5742
{
5743
  struct sigaction action;
5744
  int restart, cancel;
5745
 
5746
  sigemptyset (&blocked_mask);
5747
  sigemptyset (set);
5748
 
5749
  restart = get_signo ("__pthread_sig_restart");
5750
  cancel = get_signo ("__pthread_sig_cancel");
5751
 
5752
  /* LinuxThreads normally uses the first two RT signals, but in some legacy
5753
     cases may use SIGUSR1/SIGUSR2.  NPTL always uses RT signals, but does
5754
     not provide any way for the debugger to query the signal numbers -
5755
     fortunately they don't change!  */
5756
 
5757
  if (restart == 0)
5758
    restart = __SIGRTMIN;
5759
 
5760
  if (cancel == 0)
5761
    cancel = __SIGRTMIN + 1;
5762
 
5763
  sigaddset (set, restart);
5764
  sigaddset (set, cancel);
5765
 
5766
  /* The GNU/Linux Threads library makes terminating threads send a
5767
     special "cancel" signal instead of SIGCHLD.  Make sure we catch
5768
     those (to prevent them from terminating GDB itself, which is
5769
     likely to be their default action) and treat them the same way as
5770
     SIGCHLD.  */
5771
 
5772
  action.sa_handler = sigchld_handler;
5773
  sigemptyset (&action.sa_mask);
5774
  action.sa_flags = SA_RESTART;
5775
  sigaction (cancel, &action, NULL);
5776
 
5777
  /* We block the "cancel" signal throughout this code ...  */
5778
  sigaddset (&blocked_mask, cancel);
5779
  sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5780
 
5781
  /* ... except during a sigsuspend.  */
5782
  sigdelset (&suspend_mask, cancel);
5783
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.