OpenCores
URL https://opencores.org/ocsvn/openrisc_me/openrisc_me/trunk

Subversion Repositories openrisc_me

[/] [openrisc/] [trunk/] [gnu-src/] [gdb-7.1/] [gdb/] [gdbserver/] [linux-low.c] - Blame information for rev 234

Go to most recent revision | Details | Compare with Previous | View Log

Line No. Rev Author Line
1 227 jeremybenn
/* Low level interface to ptrace, for the remote server for GDB.
2
   Copyright (C) 1995, 1996, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005,
3
   2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4
 
5
   This file is part of GDB.
6
 
7
   This program is free software; you can redistribute it and/or modify
8
   it under the terms of the GNU General Public License as published by
9
   the Free Software Foundation; either version 3 of the License, or
10
   (at your option) any later version.
11
 
12
   This program is distributed in the hope that it will be useful,
13
   but WITHOUT ANY WARRANTY; without even the implied warranty of
14
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15
   GNU General Public License for more details.
16
 
17
   You should have received a copy of the GNU General Public License
18
   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */
19
 
20
#include "server.h"
21
#include "linux-low.h"
22
 
23
#include <sys/wait.h>
24
#include <stdio.h>
25
#include <sys/param.h>
26
#include <sys/ptrace.h>
27
#include <signal.h>
28
#include <sys/ioctl.h>
29
#include <fcntl.h>
30
#include <string.h>
31
#include <stdlib.h>
32
#include <unistd.h>
33
#include <errno.h>
34
#include <sys/syscall.h>
35
#include <sched.h>
36
#include <ctype.h>
37
#include <pwd.h>
38
#include <sys/types.h>
39
#include <dirent.h>
40
#include <sys/stat.h>
41
#include <sys/vfs.h>
42
#ifndef ELFMAG0
43
/* Don't include <linux/elf.h> here.  If it got included by gdb_proc_service.h
44
   then ELFMAG0 will have been defined.  If it didn't get included by
45
   gdb_proc_service.h then including it will likely introduce a duplicate
46
   definition of elf_fpregset_t.  */
47
#include <elf.h>
48
#endif
49
 
50
#ifndef SPUFS_MAGIC
51
#define SPUFS_MAGIC 0x23c9b64e
52
#endif
53
 
54
#ifndef PTRACE_GETSIGINFO
55
# define PTRACE_GETSIGINFO 0x4202
56
# define PTRACE_SETSIGINFO 0x4203
57
#endif
58
 
59
#ifndef O_LARGEFILE
60
#define O_LARGEFILE 0
61
#endif
62
 
63
/* If the system headers did not provide the constants, hard-code the normal
64
   values.  */
65
#ifndef PTRACE_EVENT_FORK
66
 
67
#define PTRACE_SETOPTIONS       0x4200
68
#define PTRACE_GETEVENTMSG      0x4201
69
 
70
/* options set using PTRACE_SETOPTIONS */
71
#define PTRACE_O_TRACESYSGOOD   0x00000001
72
#define PTRACE_O_TRACEFORK      0x00000002
73
#define PTRACE_O_TRACEVFORK     0x00000004
74
#define PTRACE_O_TRACECLONE     0x00000008
75
#define PTRACE_O_TRACEEXEC      0x00000010
76
#define PTRACE_O_TRACEVFORKDONE 0x00000020
77
#define PTRACE_O_TRACEEXIT      0x00000040
78
 
79
/* Wait extended result codes for the above trace options.  */
80
#define PTRACE_EVENT_FORK       1
81
#define PTRACE_EVENT_VFORK      2
82
#define PTRACE_EVENT_CLONE      3
83
#define PTRACE_EVENT_EXEC       4
84
#define PTRACE_EVENT_VFORK_DONE 5
85
#define PTRACE_EVENT_EXIT       6
86
 
87
#endif /* PTRACE_EVENT_FORK */
88
 
89
/* We can't always assume that this flag is available, but all systems
90
   with the ptrace event handlers also have __WALL, so it's safe to use
91
   in some contexts.  */
92
#ifndef __WALL
93
#define __WALL          0x40000000 /* Wait for any child.  */
94
#endif
95
 
96
#ifndef W_STOPCODE
97
#define W_STOPCODE(sig) ((sig) << 8 | 0x7f)
98
#endif
99
 
100
#ifdef __UCLIBC__
101
#if !(defined(__UCLIBC_HAS_MMU__) || defined(__ARCH_HAS_MMU__))
102
#define HAS_NOMMU
103
#endif
104
#endif
105
 
106
/* ``all_threads'' is keyed by the LWP ID, which we use as the GDB protocol
107
   representation of the thread ID.
108
 
109
   ``all_lwps'' is keyed by the process ID - which on Linux is (presently)
110
   the same as the LWP ID.
111
 
112
   ``all_processes'' is keyed by the "overall process ID", which
113
   GNU/Linux calls tgid, "thread group ID".  */
114
 
115
struct inferior_list all_lwps;
116
 
117
/* A list of all unknown processes which receive stop signals.  Some other
118
   process will presumably claim each of these as forked children
119
   momentarily.  */
120
 
121
struct inferior_list stopped_pids;
122
 
123
/* FIXME this is a bit of a hack, and could be removed.  */
124
int stopping_threads;
125
 
126
/* FIXME make into a target method?  */
127
int using_threads = 1;
128
 
129
/* This flag is true iff we've just created or attached to our first
130
   inferior but it has not stopped yet.  As soon as it does, we need
131
   to call the low target's arch_setup callback.  Doing this only on
132
   the first inferior avoids reinializing the architecture on every
133
   inferior, and avoids messing with the register caches of the
134
   already running inferiors.  NOTE: this assumes all inferiors under
135
   control of gdbserver have the same architecture.  */
136
static int new_inferior;
137
 
138
static void linux_resume_one_lwp (struct lwp_info *lwp,
139
                                  int step, int signal, siginfo_t *info);
140
static void linux_resume (struct thread_resume *resume_info, size_t n);
141
static void stop_all_lwps (void);
142
static int linux_wait_for_event (ptid_t ptid, int *wstat, int options);
143
static int check_removed_breakpoint (struct lwp_info *event_child);
144
static void *add_lwp (ptid_t ptid);
145
static int linux_stopped_by_watchpoint (void);
146
static void mark_lwp_dead (struct lwp_info *lwp, int wstat);
147
static int linux_core_of_thread (ptid_t ptid);
148
 
149
struct pending_signals
150
{
151
  int signal;
152
  siginfo_t info;
153
  struct pending_signals *prev;
154
};
155
 
156
#define PTRACE_ARG3_TYPE void *
157
#define PTRACE_ARG4_TYPE void *
158
#define PTRACE_XFER_TYPE long
159
 
160
#ifdef HAVE_LINUX_REGSETS
161
static char *disabled_regsets;
162
static int num_regsets;
163
#endif
164
 
165
/* The read/write ends of the pipe registered as waitable file in the
166
   event loop.  */
167
static int linux_event_pipe[2] = { -1, -1 };
168
 
169
/* True if we're currently in async mode.  */
170
#define target_is_async_p() (linux_event_pipe[0] != -1)
171
 
172
static void send_sigstop (struct inferior_list_entry *entry);
173
static void wait_for_sigstop (struct inferior_list_entry *entry);
174
 
175
/* Accepts an integer PID; Returns a string representing a file that
176
   can be opened to get info for the child process.
177
   Space for the result is malloc'd, caller must free.  */
178
 
179
char *
180
linux_child_pid_to_exec_file (int pid)
181
{
182
  char *name1, *name2;
183
 
184
  name1 = xmalloc (MAXPATHLEN);
185
  name2 = xmalloc (MAXPATHLEN);
186
  memset (name2, 0, MAXPATHLEN);
187
 
188
  sprintf (name1, "/proc/%d/exe", pid);
189
  if (readlink (name1, name2, MAXPATHLEN) > 0)
190
    {
191
      free (name1);
192
      return name2;
193
    }
194
  else
195
    {
196
      free (name2);
197
      return name1;
198
    }
199
}
200
 
201
/* Return non-zero if HEADER is a 64-bit ELF file.  */
202
 
203
static int
204
elf_64_header_p (const Elf64_Ehdr *header)
205
{
206
  return (header->e_ident[EI_MAG0] == ELFMAG0
207
          && header->e_ident[EI_MAG1] == ELFMAG1
208
          && header->e_ident[EI_MAG2] == ELFMAG2
209
          && header->e_ident[EI_MAG3] == ELFMAG3
210
          && header->e_ident[EI_CLASS] == ELFCLASS64);
211
}
212
 
213
/* Return non-zero if FILE is a 64-bit ELF file,
214
   zero if the file is not a 64-bit ELF file,
215
   and -1 if the file is not accessible or doesn't exist.  */
216
 
217
int
218
elf_64_file_p (const char *file)
219
{
220
  Elf64_Ehdr header;
221
  int fd;
222
 
223
  fd = open (file, O_RDONLY);
224
  if (fd < 0)
225
    return -1;
226
 
227
  if (read (fd, &header, sizeof (header)) != sizeof (header))
228
    {
229
      close (fd);
230
      return 0;
231
    }
232
  close (fd);
233
 
234
  return elf_64_header_p (&header);
235
}
236
 
237
static void
238
delete_lwp (struct lwp_info *lwp)
239
{
240
  remove_thread (get_lwp_thread (lwp));
241
  remove_inferior (&all_lwps, &lwp->head);
242
  free (lwp->arch_private);
243
  free (lwp);
244
}
245
 
246
/* Add a process to the common process list, and set its private
247
   data.  */
248
 
249
static struct process_info *
250
linux_add_process (int pid, int attached)
251
{
252
  struct process_info *proc;
253
 
254
  /* Is this the first process?  If so, then set the arch.  */
255
  if (all_processes.head == NULL)
256
    new_inferior = 1;
257
 
258
  proc = add_process (pid, attached);
259
  proc->private = xcalloc (1, sizeof (*proc->private));
260
 
261
  if (the_low_target.new_process != NULL)
262
    proc->private->arch_private = the_low_target.new_process ();
263
 
264
  return proc;
265
}
266
 
267
/* Remove a process from the common process list,
268
   also freeing all private data.  */
269
 
270
static void
271
linux_remove_process (struct process_info *process)
272
{
273
  struct process_info_private *priv = process->private;
274
 
275
  free (priv->arch_private);
276
  free (priv);
277
  remove_process (process);
278
}
279
 
280
/* Wrapper function for waitpid which handles EINTR, and emulates
281
   __WALL for systems where that is not available.  */
282
 
283
static int
284
my_waitpid (int pid, int *status, int flags)
285
{
286
  int ret, out_errno;
287
 
288
  if (debug_threads)
289
    fprintf (stderr, "my_waitpid (%d, 0x%x)\n", pid, flags);
290
 
291
  if (flags & __WALL)
292
    {
293
      sigset_t block_mask, org_mask, wake_mask;
294
      int wnohang;
295
 
296
      wnohang = (flags & WNOHANG) != 0;
297
      flags &= ~(__WALL | __WCLONE);
298
      flags |= WNOHANG;
299
 
300
      /* Block all signals while here.  This avoids knowing about
301
         LinuxThread's signals.  */
302
      sigfillset (&block_mask);
303
      sigprocmask (SIG_BLOCK, &block_mask, &org_mask);
304
 
305
      /* ... except during the sigsuspend below.  */
306
      sigemptyset (&wake_mask);
307
 
308
      while (1)
309
        {
310
          /* Since all signals are blocked, there's no need to check
311
             for EINTR here.  */
312
          ret = waitpid (pid, status, flags);
313
          out_errno = errno;
314
 
315
          if (ret == -1 && out_errno != ECHILD)
316
            break;
317
          else if (ret > 0)
318
            break;
319
 
320
          if (flags & __WCLONE)
321
            {
322
              /* We've tried both flavors now.  If WNOHANG is set,
323
                 there's nothing else to do, just bail out.  */
324
              if (wnohang)
325
                break;
326
 
327
              if (debug_threads)
328
                fprintf (stderr, "blocking\n");
329
 
330
              /* Block waiting for signals.  */
331
              sigsuspend (&wake_mask);
332
            }
333
 
334
          flags ^= __WCLONE;
335
        }
336
 
337
      sigprocmask (SIG_SETMASK, &org_mask, NULL);
338
    }
339
  else
340
    {
341
      do
342
        ret = waitpid (pid, status, flags);
343
      while (ret == -1 && errno == EINTR);
344
      out_errno = errno;
345
    }
346
 
347
  if (debug_threads)
348
    fprintf (stderr, "my_waitpid (%d, 0x%x): status(%x), %d\n",
349
             pid, flags, status ? *status : -1, ret);
350
 
351
  errno = out_errno;
352
  return ret;
353
}
354
 
355
/* Handle a GNU/Linux extended wait response.  If we see a clone
356
   event, we need to add the new LWP to our list (and not report the
357
   trap to higher layers).  */
358
 
359
static void
360
handle_extended_wait (struct lwp_info *event_child, int wstat)
361
{
362
  int event = wstat >> 16;
363
  struct lwp_info *new_lwp;
364
 
365
  if (event == PTRACE_EVENT_CLONE)
366
    {
367
      ptid_t ptid;
368
      unsigned long new_pid;
369
      int ret, status = W_STOPCODE (SIGSTOP);
370
 
371
      ptrace (PTRACE_GETEVENTMSG, lwpid_of (event_child), 0, &new_pid);
372
 
373
      /* If we haven't already seen the new PID stop, wait for it now.  */
374
      if (! pull_pid_from_list (&stopped_pids, new_pid))
375
        {
376
          /* The new child has a pending SIGSTOP.  We can't affect it until it
377
             hits the SIGSTOP, but we're already attached.  */
378
 
379
          ret = my_waitpid (new_pid, &status, __WALL);
380
 
381
          if (ret == -1)
382
            perror_with_name ("waiting for new child");
383
          else if (ret != new_pid)
384
            warning ("wait returned unexpected PID %d", ret);
385
          else if (!WIFSTOPPED (status))
386
            warning ("wait returned unexpected status 0x%x", status);
387
        }
388
 
389
      ptrace (PTRACE_SETOPTIONS, new_pid, 0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
390
 
391
      ptid = ptid_build (pid_of (event_child), new_pid, 0);
392
      new_lwp = (struct lwp_info *) add_lwp (ptid);
393
      add_thread (ptid, new_lwp);
394
 
395
      /* Either we're going to immediately resume the new thread
396
         or leave it stopped.  linux_resume_one_lwp is a nop if it
397
         thinks the thread is currently running, so set this first
398
         before calling linux_resume_one_lwp.  */
399
      new_lwp->stopped = 1;
400
 
401
      /* Normally we will get the pending SIGSTOP.  But in some cases
402
         we might get another signal delivered to the group first.
403
         If we do get another signal, be sure not to lose it.  */
404
      if (WSTOPSIG (status) == SIGSTOP)
405
        {
406
          if (! stopping_threads)
407
            linux_resume_one_lwp (new_lwp, 0, 0, NULL);
408
        }
409
      else
410
        {
411
          new_lwp->stop_expected = 1;
412
          if (stopping_threads)
413
            {
414
              new_lwp->status_pending_p = 1;
415
              new_lwp->status_pending = status;
416
            }
417
          else
418
            /* Pass the signal on.  This is what GDB does - except
419
               shouldn't we really report it instead?  */
420
            linux_resume_one_lwp (new_lwp, 0, WSTOPSIG (status), NULL);
421
        }
422
 
423
      /* Always resume the current thread.  If we are stopping
424
         threads, it will have a pending SIGSTOP; we may as well
425
         collect it now.  */
426
      linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
427
    }
428
}
429
 
430
/* This function should only be called if the process got a SIGTRAP.
431
   The SIGTRAP could mean several things.
432
 
433
   On i386, where decr_pc_after_break is non-zero:
434
   If we were single-stepping this process using PTRACE_SINGLESTEP,
435
   we will get only the one SIGTRAP (even if the instruction we
436
   stepped over was a breakpoint).  The value of $eip will be the
437
   next instruction.
438
   If we continue the process using PTRACE_CONT, we will get a
439
   SIGTRAP when we hit a breakpoint.  The value of $eip will be
440
   the instruction after the breakpoint (i.e. needs to be
441
   decremented).  If we report the SIGTRAP to GDB, we must also
442
   report the undecremented PC.  If we cancel the SIGTRAP, we
443
   must resume at the decremented PC.
444
 
445
   (Presumably, not yet tested) On a non-decr_pc_after_break machine
446
   with hardware or kernel single-step:
447
   If we single-step over a breakpoint instruction, our PC will
448
   point at the following instruction.  If we continue and hit a
449
   breakpoint instruction, our PC will point at the breakpoint
450
   instruction.  */
451
 
452
static CORE_ADDR
453
get_stop_pc (void)
454
{
455
  struct regcache *regcache = get_thread_regcache (current_inferior, 1);
456
  CORE_ADDR stop_pc = (*the_low_target.get_pc) (regcache);
457
 
458
  if (! get_thread_lwp (current_inferior)->stepping
459
      && WSTOPSIG (get_thread_lwp (current_inferior)->last_status) == SIGTRAP)
460
    stop_pc -= the_low_target.decr_pc_after_break;
461
 
462
  if (debug_threads)
463
    fprintf (stderr, "stop pc is 0x%lx\n", (long) stop_pc);
464
 
465
  return stop_pc;
466
}
467
 
468
static void *
469
add_lwp (ptid_t ptid)
470
{
471
  struct lwp_info *lwp;
472
 
473
  lwp = (struct lwp_info *) xmalloc (sizeof (*lwp));
474
  memset (lwp, 0, sizeof (*lwp));
475
 
476
  lwp->head.id = ptid;
477
 
478
  if (the_low_target.new_thread != NULL)
479
    lwp->arch_private = the_low_target.new_thread ();
480
 
481
  add_inferior_to_list (&all_lwps, &lwp->head);
482
 
483
  return lwp;
484
}
485
 
486
/* Start an inferior process and returns its pid.
487
   ALLARGS is a vector of program-name and args. */
488
 
489
static int
490
linux_create_inferior (char *program, char **allargs)
491
{
492
  struct lwp_info *new_lwp;
493
  int pid;
494
  ptid_t ptid;
495
 
496
#if defined(__UCLIBC__) && defined(HAS_NOMMU)
497
  pid = vfork ();
498
#else
499
  pid = fork ();
500
#endif
501
  if (pid < 0)
502
    perror_with_name ("fork");
503
 
504
  if (pid == 0)
505
    {
506
      ptrace (PTRACE_TRACEME, 0, 0, 0);
507
 
508
#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does.  */
509
      signal (__SIGRTMIN + 1, SIG_DFL);
510
#endif
511
 
512
      setpgid (0, 0);
513
 
514
      execv (program, allargs);
515
      if (errno == ENOENT)
516
        execvp (program, allargs);
517
 
518
      fprintf (stderr, "Cannot exec %s: %s.\n", program,
519
               strerror (errno));
520
      fflush (stderr);
521
      _exit (0177);
522
    }
523
 
524
  linux_add_process (pid, 0);
525
 
526
  ptid = ptid_build (pid, pid, 0);
527
  new_lwp = add_lwp (ptid);
528
  add_thread (ptid, new_lwp);
529
  new_lwp->must_set_ptrace_flags = 1;
530
 
531
  return pid;
532
}
533
 
534
/* Attach to an inferior process.  */
535
 
536
static void
537
linux_attach_lwp_1 (unsigned long lwpid, int initial)
538
{
539
  ptid_t ptid;
540
  struct lwp_info *new_lwp;
541
 
542
  if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) != 0)
543
    {
544
      if (!initial)
545
        {
546
          /* If we fail to attach to an LWP, just warn.  */
547
          fprintf (stderr, "Cannot attach to lwp %ld: %s (%d)\n", lwpid,
548
                   strerror (errno), errno);
549
          fflush (stderr);
550
          return;
551
        }
552
      else
553
        /* If we fail to attach to a process, report an error.  */
554
        error ("Cannot attach to lwp %ld: %s (%d)\n", lwpid,
555
               strerror (errno), errno);
556
    }
557
 
558
  if (initial)
559
    /* NOTE/FIXME: This lwp might have not been the tgid.  */
560
    ptid = ptid_build (lwpid, lwpid, 0);
561
  else
562
    {
563
      /* Note that extracting the pid from the current inferior is
564
         safe, since we're always called in the context of the same
565
         process as this new thread.  */
566
      int pid = pid_of (get_thread_lwp (current_inferior));
567
      ptid = ptid_build (pid, lwpid, 0);
568
    }
569
 
570
  new_lwp = (struct lwp_info *) add_lwp (ptid);
571
  add_thread (ptid, new_lwp);
572
 
573
  /* We need to wait for SIGSTOP before being able to make the next
574
     ptrace call on this LWP.  */
575
  new_lwp->must_set_ptrace_flags = 1;
576
 
577
  /* The next time we wait for this LWP we'll see a SIGSTOP as PTRACE_ATTACH
578
     brings it to a halt.
579
 
580
     There are several cases to consider here:
581
 
582
     1) gdbserver has already attached to the process and is being notified
583
        of a new thread that is being created.
584
        In this case we should ignore that SIGSTOP and resume the process.
585
        This is handled below by setting stop_expected = 1.
586
 
587
     2) This is the first thread (the process thread), and we're attaching
588
        to it via attach_inferior.
589
        In this case we want the process thread to stop.
590
        This is handled by having linux_attach clear stop_expected after
591
        we return.
592
        ??? If the process already has several threads we leave the other
593
        threads running.
594
 
595
     3) GDB is connecting to gdbserver and is requesting an enumeration of all
596
        existing threads.
597
        In this case we want the thread to stop.
598
        FIXME: This case is currently not properly handled.
599
        We should wait for the SIGSTOP but don't.  Things work apparently
600
        because enough time passes between when we ptrace (ATTACH) and when
601
        gdb makes the next ptrace call on the thread.
602
 
603
     On the other hand, if we are currently trying to stop all threads, we
604
     should treat the new thread as if we had sent it a SIGSTOP.  This works
605
     because we are guaranteed that the add_lwp call above added us to the
606
     end of the list, and so the new thread has not yet reached
607
     wait_for_sigstop (but will).  */
608
  if (! stopping_threads)
609
    new_lwp->stop_expected = 1;
610
}
611
 
612
void
613
linux_attach_lwp (unsigned long lwpid)
614
{
615
  linux_attach_lwp_1 (lwpid, 0);
616
}
617
 
618
int
619
linux_attach (unsigned long pid)
620
{
621
  struct lwp_info *lwp;
622
 
623
  linux_attach_lwp_1 (pid, 1);
624
 
625
  linux_add_process (pid, 1);
626
 
627
  if (!non_stop)
628
    {
629
      /* Don't ignore the initial SIGSTOP if we just attached to this
630
         process.  It will be collected by wait shortly.  */
631
      lwp = (struct lwp_info *) find_inferior_id (&all_lwps,
632
                                                  ptid_build (pid, pid, 0));
633
      lwp->stop_expected = 0;
634
    }
635
 
636
  return 0;
637
}
638
 
639
struct counter
640
{
641
  int pid;
642
  int count;
643
};
644
 
645
static int
646
second_thread_of_pid_p (struct inferior_list_entry *entry, void *args)
647
{
648
  struct counter *counter = args;
649
 
650
  if (ptid_get_pid (entry->id) == counter->pid)
651
    {
652
      if (++counter->count > 1)
653
        return 1;
654
    }
655
 
656
  return 0;
657
}
658
 
659
static int
660
last_thread_of_process_p (struct thread_info *thread)
661
{
662
  ptid_t ptid = ((struct inferior_list_entry *)thread)->id;
663
  int pid = ptid_get_pid (ptid);
664
  struct counter counter = { pid , 0 };
665
 
666
  return (find_inferior (&all_threads,
667
                         second_thread_of_pid_p, &counter) == NULL);
668
}
669
 
670
/* Kill the inferior lwp.  */
671
 
672
static int
673
linux_kill_one_lwp (struct inferior_list_entry *entry, void *args)
674
{
675
  struct thread_info *thread = (struct thread_info *) entry;
676
  struct lwp_info *lwp = get_thread_lwp (thread);
677
  int wstat;
678
  int pid = * (int *) args;
679
 
680
  if (ptid_get_pid (entry->id) != pid)
681
    return 0;
682
 
683
  /* We avoid killing the first thread here, because of a Linux kernel (at
684
     least 2.6.0-test7 through 2.6.8-rc4) bug; if we kill the parent before
685
     the children get a chance to be reaped, it will remain a zombie
686
     forever.  */
687
 
688
  if (lwpid_of (lwp) == pid)
689
    {
690
      if (debug_threads)
691
        fprintf (stderr, "lkop: is last of process %s\n",
692
                 target_pid_to_str (entry->id));
693
      return 0;
694
    }
695
 
696
  /* If we're killing a running inferior, make sure it is stopped
697
     first, as PTRACE_KILL will not work otherwise.  */
698
  if (!lwp->stopped)
699
    send_sigstop (&lwp->head);
700
 
701
  do
702
    {
703
      ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
704
 
705
      /* Make sure it died.  The loop is most likely unnecessary.  */
706
      pid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
707
    } while (pid > 0 && WIFSTOPPED (wstat));
708
 
709
  return 0;
710
}
711
 
712
static int
713
linux_kill (int pid)
714
{
715
  struct process_info *process;
716
  struct lwp_info *lwp;
717
  struct thread_info *thread;
718
  int wstat;
719
  int lwpid;
720
 
721
  process = find_process_pid (pid);
722
  if (process == NULL)
723
    return -1;
724
 
725
  find_inferior (&all_threads, linux_kill_one_lwp, &pid);
726
 
727
  /* See the comment in linux_kill_one_lwp.  We did not kill the first
728
     thread in the list, so do so now.  */
729
  lwp = find_lwp_pid (pid_to_ptid (pid));
730
  thread = get_lwp_thread (lwp);
731
 
732
  if (debug_threads)
733
    fprintf (stderr, "lk_1: killing lwp %ld, for pid: %d\n",
734
             lwpid_of (lwp), pid);
735
 
736
  /* If we're killing a running inferior, make sure it is stopped
737
     first, as PTRACE_KILL will not work otherwise.  */
738
  if (!lwp->stopped)
739
    send_sigstop (&lwp->head);
740
 
741
  do
742
    {
743
      ptrace (PTRACE_KILL, lwpid_of (lwp), 0, 0);
744
 
745
      /* Make sure it died.  The loop is most likely unnecessary.  */
746
      lwpid = linux_wait_for_event (lwp->head.id, &wstat, __WALL);
747
    } while (lwpid > 0 && WIFSTOPPED (wstat));
748
 
749
#ifdef USE_THREAD_DB
750
  thread_db_free (process, 0);
751
#endif
752
  delete_lwp (lwp);
753
  linux_remove_process (process);
754
  return 0;
755
}
756
 
757
static int
758
linux_detach_one_lwp (struct inferior_list_entry *entry, void *args)
759
{
760
  struct thread_info *thread = (struct thread_info *) entry;
761
  struct lwp_info *lwp = get_thread_lwp (thread);
762
  int pid = * (int *) args;
763
 
764
  if (ptid_get_pid (entry->id) != pid)
765
    return 0;
766
 
767
  /* If we're detaching from a running inferior, make sure it is
768
     stopped first, as PTRACE_DETACH will not work otherwise.  */
769
  if (!lwp->stopped)
770
    {
771
      int lwpid = lwpid_of (lwp);
772
 
773
      stopping_threads = 1;
774
      send_sigstop (&lwp->head);
775
 
776
      /* If this detects a new thread through a clone event, the new
777
         thread is appended to the end of the lwp list, so we'll
778
         eventually detach from it.  */
779
      wait_for_sigstop (&lwp->head);
780
      stopping_threads = 0;
781
 
782
      /* If LWP exits while we're trying to stop it, there's nothing
783
         left to do.  */
784
      lwp = find_lwp_pid (pid_to_ptid (lwpid));
785
      if (lwp == NULL)
786
        return 0;
787
    }
788
 
789
  /* Make sure the process isn't stopped at a breakpoint that's
790
     no longer there.  */
791
  check_removed_breakpoint (lwp);
792
 
793
  /* If this process is stopped but is expecting a SIGSTOP, then make
794
     sure we take care of that now.  This isn't absolutely guaranteed
795
     to collect the SIGSTOP, but is fairly likely to.  */
796
  if (lwp->stop_expected)
797
    {
798
      int wstat;
799
      /* Clear stop_expected, so that the SIGSTOP will be reported.  */
800
      lwp->stop_expected = 0;
801
      if (lwp->stopped)
802
        linux_resume_one_lwp (lwp, 0, 0, NULL);
803
      linux_wait_for_event (lwp->head.id, &wstat, __WALL);
804
    }
805
 
806
  /* Flush any pending changes to the process's registers.  */
807
  regcache_invalidate_one ((struct inferior_list_entry *)
808
                           get_lwp_thread (lwp));
809
 
810
  /* Finally, let it resume.  */
811
  ptrace (PTRACE_DETACH, lwpid_of (lwp), 0, 0);
812
 
813
  delete_lwp (lwp);
814
  return 0;
815
}
816
 
817
static int
818
any_thread_of (struct inferior_list_entry *entry, void *args)
819
{
820
  int *pid_p = args;
821
 
822
  if (ptid_get_pid (entry->id) == *pid_p)
823
    return 1;
824
 
825
  return 0;
826
}
827
 
828
static int
829
linux_detach (int pid)
830
{
831
  struct process_info *process;
832
 
833
  process = find_process_pid (pid);
834
  if (process == NULL)
835
    return -1;
836
 
837
#ifdef USE_THREAD_DB
838
  thread_db_free (process, 1);
839
#endif
840
 
841
  current_inferior =
842
    (struct thread_info *) find_inferior (&all_threads, any_thread_of, &pid);
843
 
844
  delete_all_breakpoints ();
845
  find_inferior (&all_threads, linux_detach_one_lwp, &pid);
846
  linux_remove_process (process);
847
  return 0;
848
}
849
 
850
static void
851
linux_join (int pid)
852
{
853
  int status, ret;
854
  struct process_info *process;
855
 
856
  process = find_process_pid (pid);
857
  if (process == NULL)
858
    return;
859
 
860
  do {
861
    ret = my_waitpid (pid, &status, 0);
862
    if (WIFEXITED (status) || WIFSIGNALED (status))
863
      break;
864
  } while (ret != -1 || errno != ECHILD);
865
}
866
 
867
/* Return nonzero if the given thread is still alive.  */
868
static int
869
linux_thread_alive (ptid_t ptid)
870
{
871
  struct lwp_info *lwp = find_lwp_pid (ptid);
872
 
873
  /* We assume we always know if a thread exits.  If a whole process
874
     exited but we still haven't been able to report it to GDB, we'll
875
     hold on to the last lwp of the dead process.  */
876
  if (lwp != NULL)
877
    return !lwp->dead;
878
  else
879
    return 0;
880
}
881
 
882
/* Return nonzero if this process stopped at a breakpoint which
883
   no longer appears to be inserted.  Also adjust the PC
884
   appropriately to resume where the breakpoint used to be.  */
885
static int
886
check_removed_breakpoint (struct lwp_info *event_child)
887
{
888
  CORE_ADDR stop_pc;
889
  struct thread_info *saved_inferior;
890
  struct regcache *regcache;
891
 
892
  if (event_child->pending_is_breakpoint == 0)
893
    return 0;
894
 
895
  if (debug_threads)
896
    fprintf (stderr, "Checking for breakpoint in lwp %ld.\n",
897
             lwpid_of (event_child));
898
 
899
  saved_inferior = current_inferior;
900
  current_inferior = get_lwp_thread (event_child);
901
  regcache = get_thread_regcache (current_inferior, 1);
902
  stop_pc = get_stop_pc ();
903
 
904
  /* If the PC has changed since we stopped, then we shouldn't do
905
     anything.  This happens if, for instance, GDB handled the
906
     decr_pc_after_break subtraction itself.  */
907
  if (stop_pc != event_child->pending_stop_pc)
908
    {
909
      if (debug_threads)
910
        fprintf (stderr, "Ignoring, PC was changed.  Old PC was 0x%08llx\n",
911
                 event_child->pending_stop_pc);
912
 
913
      event_child->pending_is_breakpoint = 0;
914
      current_inferior = saved_inferior;
915
      return 0;
916
    }
917
 
918
  /* If the breakpoint is still there, we will report hitting it.  */
919
  if ((*the_low_target.breakpoint_at) (stop_pc))
920
    {
921
      if (debug_threads)
922
        fprintf (stderr, "Ignoring, breakpoint is still present.\n");
923
      current_inferior = saved_inferior;
924
      return 0;
925
    }
926
 
927
  if (debug_threads)
928
    fprintf (stderr, "Removed breakpoint.\n");
929
 
930
  /* For decr_pc_after_break targets, here is where we perform the
931
     decrement.  We go immediately from this function to resuming,
932
     and can not safely call get_stop_pc () again.  */
933
  if (the_low_target.set_pc != NULL)
934
    {
935
      if (debug_threads)
936
        fprintf (stderr, "Set pc to 0x%lx\n", (long) stop_pc);
937
      (*the_low_target.set_pc) (regcache, stop_pc);
938
    }
939
 
940
  /* We consumed the pending SIGTRAP.  */
941
  event_child->pending_is_breakpoint = 0;
942
  event_child->status_pending_p = 0;
943
  event_child->status_pending = 0;
944
 
945
  current_inferior = saved_inferior;
946
  return 1;
947
}
948
 
949
/* Return 1 if this lwp has an interesting status pending.  This
950
   function may silently resume an inferior lwp.  */
951
static int
952
status_pending_p (struct inferior_list_entry *entry, void *arg)
953
{
954
  struct lwp_info *lwp = (struct lwp_info *) entry;
955
  ptid_t ptid = * (ptid_t *) arg;
956
 
957
  /* Check if we're only interested in events from a specific process
958
     or its lwps.  */
959
  if (!ptid_equal (minus_one_ptid, ptid)
960
      && ptid_get_pid (ptid) != ptid_get_pid (lwp->head.id))
961
    return 0;
962
 
963
  if (lwp->status_pending_p && !lwp->suspended)
964
    if (check_removed_breakpoint (lwp))
965
      {
966
        /* This thread was stopped at a breakpoint, and the breakpoint
967
           is now gone.  We were told to continue (or step...) all threads,
968
           so GDB isn't trying to single-step past this breakpoint.
969
           So instead of reporting the old SIGTRAP, pretend we got to
970
           the breakpoint just after it was removed instead of just
971
           before; resume the process.  */
972
        linux_resume_one_lwp (lwp, 0, 0, NULL);
973
        return 0;
974
      }
975
 
976
  return (lwp->status_pending_p && !lwp->suspended);
977
}
978
 
979
static int
980
same_lwp (struct inferior_list_entry *entry, void *data)
981
{
982
  ptid_t ptid = *(ptid_t *) data;
983
  int lwp;
984
 
985
  if (ptid_get_lwp (ptid) != 0)
986
    lwp = ptid_get_lwp (ptid);
987
  else
988
    lwp = ptid_get_pid (ptid);
989
 
990
  if (ptid_get_lwp (entry->id) == lwp)
991
    return 1;
992
 
993
  return 0;
994
}
995
 
996
struct lwp_info *
997
find_lwp_pid (ptid_t ptid)
998
{
999
  return (struct lwp_info*) find_inferior (&all_lwps, same_lwp, &ptid);
1000
}
1001
 
1002
static struct lwp_info *
1003
linux_wait_for_lwp (ptid_t ptid, int *wstatp, int options)
1004
{
1005
  int ret;
1006
  int to_wait_for = -1;
1007
  struct lwp_info *child = NULL;
1008
 
1009
  if (debug_threads)
1010
    fprintf (stderr, "linux_wait_for_lwp: %s\n", target_pid_to_str (ptid));
1011
 
1012
  if (ptid_equal (ptid, minus_one_ptid))
1013
    to_wait_for = -1;                   /* any child */
1014
  else
1015
    to_wait_for = ptid_get_lwp (ptid);  /* this lwp only */
1016
 
1017
  options |= __WALL;
1018
 
1019
retry:
1020
 
1021
  ret = my_waitpid (to_wait_for, wstatp, options);
1022
  if (ret == 0 || (ret == -1 && errno == ECHILD && (options & WNOHANG)))
1023
    return NULL;
1024
  else if (ret == -1)
1025
    perror_with_name ("waitpid");
1026
 
1027
  if (debug_threads
1028
      && (!WIFSTOPPED (*wstatp)
1029
          || (WSTOPSIG (*wstatp) != 32
1030
              && WSTOPSIG (*wstatp) != 33)))
1031
    fprintf (stderr, "Got an event from %d (%x)\n", ret, *wstatp);
1032
 
1033
  child = find_lwp_pid (pid_to_ptid (ret));
1034
 
1035
  /* If we didn't find a process, one of two things presumably happened:
1036
     - A process we started and then detached from has exited.  Ignore it.
1037
     - A process we are controlling has forked and the new child's stop
1038
     was reported to us by the kernel.  Save its PID.  */
1039
  if (child == NULL && WIFSTOPPED (*wstatp))
1040
    {
1041
      add_pid_to_list (&stopped_pids, ret);
1042
      goto retry;
1043
    }
1044
  else if (child == NULL)
1045
    goto retry;
1046
 
1047
  child->stopped = 1;
1048
  child->pending_is_breakpoint = 0;
1049
 
1050
  child->last_status = *wstatp;
1051
 
1052
  /* Architecture-specific setup after inferior is running.
1053
     This needs to happen after we have attached to the inferior
1054
     and it is stopped for the first time, but before we access
1055
     any inferior registers.  */
1056
  if (new_inferior)
1057
    {
1058
      the_low_target.arch_setup ();
1059
#ifdef HAVE_LINUX_REGSETS
1060
      memset (disabled_regsets, 0, num_regsets);
1061
#endif
1062
      new_inferior = 0;
1063
    }
1064
 
1065
  if (debug_threads
1066
      && WIFSTOPPED (*wstatp)
1067
      && the_low_target.get_pc != NULL)
1068
    {
1069
      struct thread_info *saved_inferior = current_inferior;
1070
      struct regcache *regcache;
1071
      CORE_ADDR pc;
1072
 
1073
      current_inferior = (struct thread_info *)
1074
        find_inferior_id (&all_threads, child->head.id);
1075
      regcache = get_thread_regcache (current_inferior, 1);
1076
      pc = (*the_low_target.get_pc) (regcache);
1077
      fprintf (stderr, "linux_wait_for_lwp: pc is 0x%lx\n", (long) pc);
1078
      current_inferior = saved_inferior;
1079
    }
1080
 
1081
  return child;
1082
}
1083
 
1084
/* Wait for an event from child PID.  If PID is -1, wait for any
1085
   child.  Store the stop status through the status pointer WSTAT.
1086
   OPTIONS is passed to the waitpid call.  Return 0 if no child stop
1087
   event was found and OPTIONS contains WNOHANG.  Return the PID of
1088
   the stopped child otherwise.  */
1089
 
1090
static int
1091
linux_wait_for_event_1 (ptid_t ptid, int *wstat, int options)
1092
{
1093
  CORE_ADDR stop_pc;
1094
  struct lwp_info *event_child = NULL;
1095
  int bp_status;
1096
  struct lwp_info *requested_child = NULL;
1097
 
1098
  /* Check for a lwp with a pending status.  */
1099
  /* It is possible that the user changed the pending task's registers since
1100
     it stopped.  We correctly handle the change of PC if we hit a breakpoint
1101
     (in check_removed_breakpoint); signals should be reported anyway.  */
1102
 
1103
  if (ptid_equal (ptid, minus_one_ptid)
1104
      || ptid_equal (pid_to_ptid (ptid_get_pid (ptid)), ptid))
1105
    {
1106
      event_child = (struct lwp_info *)
1107
        find_inferior (&all_lwps, status_pending_p, &ptid);
1108
      if (debug_threads && event_child)
1109
        fprintf (stderr, "Got a pending child %ld\n", lwpid_of (event_child));
1110
    }
1111
  else
1112
    {
1113
      requested_child = find_lwp_pid (ptid);
1114
      if (requested_child->status_pending_p
1115
          && !check_removed_breakpoint (requested_child))
1116
        event_child = requested_child;
1117
    }
1118
 
1119
  if (event_child != NULL)
1120
    {
1121
      if (debug_threads)
1122
        fprintf (stderr, "Got an event from pending child %ld (%04x)\n",
1123
                 lwpid_of (event_child), event_child->status_pending);
1124
      *wstat = event_child->status_pending;
1125
      event_child->status_pending_p = 0;
1126
      event_child->status_pending = 0;
1127
      current_inferior = get_lwp_thread (event_child);
1128
      return lwpid_of (event_child);
1129
    }
1130
 
1131
  /* We only enter this loop if no process has a pending wait status.  Thus
1132
     any action taken in response to a wait status inside this loop is
1133
     responding as soon as we detect the status, not after any pending
1134
     events.  */
1135
  while (1)
1136
    {
1137
      event_child = linux_wait_for_lwp (ptid, wstat, options);
1138
 
1139
      if ((options & WNOHANG) && event_child == NULL)
1140
        return 0;
1141
 
1142
      if (event_child == NULL)
1143
        error ("event from unknown child");
1144
 
1145
      current_inferior = get_lwp_thread (event_child);
1146
 
1147
      /* Check for thread exit.  */
1148
      if (! WIFSTOPPED (*wstat))
1149
        {
1150
          if (debug_threads)
1151
            fprintf (stderr, "LWP %ld exiting\n", lwpid_of (event_child));
1152
 
1153
          /* If the last thread is exiting, just return.  */
1154
          if (last_thread_of_process_p (current_inferior))
1155
            {
1156
              if (debug_threads)
1157
                fprintf (stderr, "LWP %ld is last lwp of process\n",
1158
                         lwpid_of (event_child));
1159
              return lwpid_of (event_child);
1160
            }
1161
 
1162
          delete_lwp (event_child);
1163
 
1164
          if (!non_stop)
1165
            {
1166
              current_inferior = (struct thread_info *) all_threads.head;
1167
              if (debug_threads)
1168
                fprintf (stderr, "Current inferior is now %ld\n",
1169
                         lwpid_of (get_thread_lwp (current_inferior)));
1170
            }
1171
          else
1172
            {
1173
              current_inferior = NULL;
1174
              if (debug_threads)
1175
                fprintf (stderr, "Current inferior is now <NULL>\n");
1176
            }
1177
 
1178
          /* If we were waiting for this particular child to do something...
1179
             well, it did something.  */
1180
          if (requested_child != NULL)
1181
            return lwpid_of (event_child);
1182
 
1183
          /* Wait for a more interesting event.  */
1184
          continue;
1185
        }
1186
 
1187
      if (event_child->must_set_ptrace_flags)
1188
        {
1189
          ptrace (PTRACE_SETOPTIONS, lwpid_of (event_child),
1190
                  0, (PTRACE_ARG4_TYPE) PTRACE_O_TRACECLONE);
1191
          event_child->must_set_ptrace_flags = 0;
1192
        }
1193
 
1194
      if (WIFSTOPPED (*wstat)
1195
          && WSTOPSIG (*wstat) == SIGSTOP
1196
          && event_child->stop_expected)
1197
        {
1198
          if (debug_threads)
1199
            fprintf (stderr, "Expected stop.\n");
1200
          event_child->stop_expected = 0;
1201
          linux_resume_one_lwp (event_child, event_child->stepping, 0, NULL);
1202
          continue;
1203
        }
1204
 
1205
      if (WIFSTOPPED (*wstat) && WSTOPSIG (*wstat) == SIGTRAP
1206
          && *wstat >> 16 != 0)
1207
        {
1208
          handle_extended_wait (event_child, *wstat);
1209
          continue;
1210
        }
1211
 
1212
      /* If GDB is not interested in this signal, don't stop other
1213
         threads, and don't report it to GDB.  Just resume the
1214
         inferior right away.  We do this for threading-related
1215
         signals as well as any that GDB specifically requested we
1216
         ignore.  But never ignore SIGSTOP if we sent it ourselves,
1217
         and do not ignore signals when stepping - they may require
1218
         special handling to skip the signal handler.  */
1219
      /* FIXME drow/2002-06-09: Get signal numbers from the inferior's
1220
         thread library?  */
1221
      if (WIFSTOPPED (*wstat)
1222
          && !event_child->stepping
1223
          && (
1224
#if defined (USE_THREAD_DB) && defined (__SIGRTMIN)
1225
              (current_process ()->private->thread_db != NULL
1226
               && (WSTOPSIG (*wstat) == __SIGRTMIN
1227
                   || WSTOPSIG (*wstat) == __SIGRTMIN + 1))
1228
              ||
1229
#endif
1230
              (pass_signals[target_signal_from_host (WSTOPSIG (*wstat))]
1231
               && (WSTOPSIG (*wstat) != SIGSTOP || !stopping_threads))))
1232
        {
1233
          siginfo_t info, *info_p;
1234
 
1235
          if (debug_threads)
1236
            fprintf (stderr, "Ignored signal %d for LWP %ld.\n",
1237
                     WSTOPSIG (*wstat), lwpid_of (event_child));
1238
 
1239
          if (ptrace (PTRACE_GETSIGINFO, lwpid_of (event_child), 0, &info) == 0)
1240
            info_p = &info;
1241
          else
1242
            info_p = NULL;
1243
          linux_resume_one_lwp (event_child,
1244
                                event_child->stepping,
1245
                                WSTOPSIG (*wstat), info_p);
1246
          continue;
1247
        }
1248
 
1249
      /* If this event was not handled above, and is not a SIGTRAP,
1250
         report it.  SIGILL and SIGSEGV are also treated as traps in case
1251
         a breakpoint is inserted at the current PC.  */
1252
      if (!WIFSTOPPED (*wstat)
1253
          || (WSTOPSIG (*wstat) != SIGTRAP && WSTOPSIG (*wstat) != SIGILL
1254
              && WSTOPSIG (*wstat) != SIGSEGV))
1255
        return lwpid_of (event_child);
1256
 
1257
      /* If this target does not support breakpoints, we simply report the
1258
         signal; it's of no concern to us.  */
1259
      if (the_low_target.get_pc == NULL)
1260
        return lwpid_of (event_child);
1261
 
1262
      stop_pc = get_stop_pc ();
1263
 
1264
      /* Only handle SIGILL or SIGSEGV if we've hit a recognized
1265
         breakpoint.  */
1266
      if (WSTOPSIG (*wstat) != SIGTRAP
1267
          && (event_child->stepping
1268
              || ! (*the_low_target.breakpoint_at) (stop_pc)))
1269
        return lwpid_of (event_child);
1270
 
1271
      /* bp_reinsert will only be set if we were single-stepping.
1272
         Notice that we will resume the process after hitting
1273
         a gdbserver breakpoint; single-stepping to/over one
1274
         is not supported (yet).  */
1275
      if (event_child->bp_reinsert != 0)
1276
        {
1277
          if (debug_threads)
1278
            fprintf (stderr, "Reinserted breakpoint.\n");
1279
          reinsert_breakpoint (event_child->bp_reinsert);
1280
          event_child->bp_reinsert = 0;
1281
 
1282
          /* Clear the single-stepping flag and SIGTRAP as we resume.  */
1283
          linux_resume_one_lwp (event_child, 0, 0, NULL);
1284
          continue;
1285
        }
1286
 
1287
      bp_status = check_breakpoints (stop_pc);
1288
 
1289
      if (bp_status != 0)
1290
        {
1291
          if (debug_threads)
1292
            fprintf (stderr, "Hit a gdbserver breakpoint.\n");
1293
 
1294
          /* We hit one of our own breakpoints.  We mark it as a pending
1295
             breakpoint, so that check_removed_breakpoint () will do the PC
1296
             adjustment for us at the appropriate time.  */
1297
          event_child->pending_is_breakpoint = 1;
1298
          event_child->pending_stop_pc = stop_pc;
1299
 
1300
          /* We may need to put the breakpoint back.  We continue in the event
1301
             loop instead of simply replacing the breakpoint right away,
1302
             in order to not lose signals sent to the thread that hit the
1303
             breakpoint.  Unfortunately this increases the window where another
1304
             thread could sneak past the removed breakpoint.  For the current
1305
             use of server-side breakpoints (thread creation) this is
1306
             acceptable; but it needs to be considered before this breakpoint
1307
             mechanism can be used in more general ways.  For some breakpoints
1308
             it may be necessary to stop all other threads, but that should
1309
             be avoided where possible.
1310
 
1311
             If breakpoint_reinsert_addr is NULL, that means that we can
1312
             use PTRACE_SINGLESTEP on this platform.  Uninsert the breakpoint,
1313
             mark it for reinsertion, and single-step.
1314
 
1315
             Otherwise, call the target function to figure out where we need
1316
             our temporary breakpoint, create it, and continue executing this
1317
             process.  */
1318
 
1319
          /* NOTE: we're lifting breakpoints in non-stop mode.  This
1320
             is currently only used for thread event breakpoints, so
1321
             it isn't that bad as long as we have PTRACE_EVENT_CLONE
1322
             events.  */
1323
          if (bp_status == 2)
1324
            /* No need to reinsert.  */
1325
            linux_resume_one_lwp (event_child, 0, 0, NULL);
1326
          else if (the_low_target.breakpoint_reinsert_addr == NULL)
1327
            {
1328
              event_child->bp_reinsert = stop_pc;
1329
              uninsert_breakpoint (stop_pc);
1330
              linux_resume_one_lwp (event_child, 1, 0, NULL);
1331
            }
1332
          else
1333
            {
1334
              reinsert_breakpoint_by_bp
1335
                (stop_pc, (*the_low_target.breakpoint_reinsert_addr) ());
1336
              linux_resume_one_lwp (event_child, 0, 0, NULL);
1337
            }
1338
 
1339
          continue;
1340
        }
1341
 
1342
      if (debug_threads)
1343
        fprintf (stderr, "Hit a non-gdbserver breakpoint.\n");
1344
 
1345
      /* If we were single-stepping, we definitely want to report the
1346
         SIGTRAP.  Although the single-step operation has completed,
1347
         do not clear clear the stepping flag yet; we need to check it
1348
         in wait_for_sigstop.  */
1349
      if (event_child->stepping)
1350
        return lwpid_of (event_child);
1351
 
1352
      /* A SIGTRAP that we can't explain.  It may have been a breakpoint.
1353
         Check if it is a breakpoint, and if so mark the process information
1354
         accordingly.  This will handle both the necessary fiddling with the
1355
         PC on decr_pc_after_break targets and suppressing extra threads
1356
         hitting a breakpoint if two hit it at once and then GDB removes it
1357
         after the first is reported.  Arguably it would be better to report
1358
         multiple threads hitting breakpoints simultaneously, but the current
1359
         remote protocol does not allow this.  */
1360
      if ((*the_low_target.breakpoint_at) (stop_pc))
1361
        {
1362
          event_child->pending_is_breakpoint = 1;
1363
          event_child->pending_stop_pc = stop_pc;
1364
        }
1365
 
1366
      return lwpid_of (event_child);
1367
    }
1368
 
1369
  /* NOTREACHED */
1370
  return 0;
1371
}
1372
 
1373
static int
1374
linux_wait_for_event (ptid_t ptid, int *wstat, int options)
1375
{
1376
  ptid_t wait_ptid;
1377
 
1378
  if (ptid_is_pid (ptid))
1379
    {
1380
      /* A request to wait for a specific tgid.  This is not possible
1381
         with waitpid, so instead, we wait for any child, and leave
1382
         children we're not interested in right now with a pending
1383
         status to report later.  */
1384
      wait_ptid = minus_one_ptid;
1385
    }
1386
  else
1387
    wait_ptid = ptid;
1388
 
1389
  while (1)
1390
    {
1391
      int event_pid;
1392
 
1393
      event_pid = linux_wait_for_event_1 (wait_ptid, wstat, options);
1394
 
1395
      if (event_pid > 0
1396
          && ptid_is_pid (ptid) && ptid_get_pid (ptid) != event_pid)
1397
        {
1398
          struct lwp_info *event_child = find_lwp_pid (pid_to_ptid (event_pid));
1399
 
1400
          if (! WIFSTOPPED (*wstat))
1401
            mark_lwp_dead (event_child, *wstat);
1402
          else
1403
            {
1404
              event_child->status_pending_p = 1;
1405
              event_child->status_pending = *wstat;
1406
            }
1407
        }
1408
      else
1409
        return event_pid;
1410
    }
1411
}
1412
 
1413
/* Wait for process, returns status.  */
1414
 
1415
static ptid_t
1416
linux_wait_1 (ptid_t ptid,
1417
              struct target_waitstatus *ourstatus, int target_options)
1418
{
1419
  int w;
1420
  struct thread_info *thread = NULL;
1421
  struct lwp_info *lwp = NULL;
1422
  int options;
1423
  int pid;
1424
 
1425
  /* Translate generic target options into linux options.  */
1426
  options = __WALL;
1427
  if (target_options & TARGET_WNOHANG)
1428
    options |= WNOHANG;
1429
 
1430
retry:
1431
  ourstatus->kind = TARGET_WAITKIND_IGNORE;
1432
 
1433
  /* If we were only supposed to resume one thread, only wait for
1434
     that thread - if it's still alive.  If it died, however - which
1435
     can happen if we're coming from the thread death case below -
1436
     then we need to make sure we restart the other threads.  We could
1437
     pick a thread at random or restart all; restarting all is less
1438
     arbitrary.  */
1439
  if (!non_stop
1440
      && !ptid_equal (cont_thread, null_ptid)
1441
      && !ptid_equal (cont_thread, minus_one_ptid))
1442
    {
1443
      thread = (struct thread_info *) find_inferior_id (&all_threads,
1444
                                                        cont_thread);
1445
 
1446
      /* No stepping, no signal - unless one is pending already, of course.  */
1447
      if (thread == NULL)
1448
        {
1449
          struct thread_resume resume_info;
1450
          resume_info.thread = minus_one_ptid;
1451
          resume_info.kind = resume_continue;
1452
          resume_info.sig = 0;
1453
          linux_resume (&resume_info, 1);
1454
        }
1455
      else
1456
        ptid = cont_thread;
1457
    }
1458
 
1459
  pid = linux_wait_for_event (ptid, &w, options);
1460
  if (pid == 0) /* only if TARGET_WNOHANG */
1461
    return null_ptid;
1462
 
1463
  lwp = get_thread_lwp (current_inferior);
1464
 
1465
  /* If we are waiting for a particular child, and it exited,
1466
     linux_wait_for_event will return its exit status.  Similarly if
1467
     the last child exited.  If this is not the last child, however,
1468
     do not report it as exited until there is a 'thread exited' response
1469
     available in the remote protocol.  Instead, just wait for another event.
1470
     This should be safe, because if the thread crashed we will already
1471
     have reported the termination signal to GDB; that should stop any
1472
     in-progress stepping operations, etc.
1473
 
1474
     Report the exit status of the last thread to exit.  This matches
1475
     LinuxThreads' behavior.  */
1476
 
1477
  if (last_thread_of_process_p (current_inferior))
1478
    {
1479
      if (WIFEXITED (w) || WIFSIGNALED (w))
1480
        {
1481
          int pid = pid_of (lwp);
1482
          struct process_info *process = find_process_pid (pid);
1483
 
1484
#ifdef USE_THREAD_DB
1485
          thread_db_free (process, 0);
1486
#endif
1487
          delete_lwp (lwp);
1488
          linux_remove_process (process);
1489
 
1490
          current_inferior = NULL;
1491
 
1492
          if (WIFEXITED (w))
1493
            {
1494
              ourstatus->kind = TARGET_WAITKIND_EXITED;
1495
              ourstatus->value.integer = WEXITSTATUS (w);
1496
 
1497
              if (debug_threads)
1498
                fprintf (stderr, "\nChild exited with retcode = %x \n", WEXITSTATUS (w));
1499
            }
1500
          else
1501
            {
1502
              ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1503
              ourstatus->value.sig = target_signal_from_host (WTERMSIG (w));
1504
 
1505
              if (debug_threads)
1506
                fprintf (stderr, "\nChild terminated with signal = %x \n", WTERMSIG (w));
1507
 
1508
            }
1509
 
1510
          return pid_to_ptid (pid);
1511
        }
1512
    }
1513
  else
1514
    {
1515
      if (!WIFSTOPPED (w))
1516
        goto retry;
1517
    }
1518
 
1519
  /* In all-stop, stop all threads.  Be careful to only do this if
1520
     we're about to report an event to GDB.  */
1521
  if (!non_stop)
1522
    stop_all_lwps ();
1523
 
1524
  ourstatus->kind = TARGET_WAITKIND_STOPPED;
1525
 
1526
  if (lwp->suspended && WSTOPSIG (w) == SIGSTOP)
1527
    {
1528
      /* A thread that has been requested to stop by GDB with vCont;t,
1529
         and it stopped cleanly, so report as SIG0.  The use of
1530
         SIGSTOP is an implementation detail.  */
1531
      ourstatus->value.sig = TARGET_SIGNAL_0;
1532
    }
1533
  else if (lwp->suspended && WSTOPSIG (w) != SIGSTOP)
1534
    {
1535
      /* A thread that has been requested to stop by GDB with vCont;t,
1536
         but, it stopped for other reasons.  Set stop_expected so the
1537
         pending SIGSTOP is ignored and the LWP is resumed.  */
1538
      lwp->stop_expected = 1;
1539
      ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1540
    }
1541
  else
1542
    {
1543
      ourstatus->value.sig = target_signal_from_host (WSTOPSIG (w));
1544
    }
1545
 
1546
  if (debug_threads)
1547
    fprintf (stderr, "linux_wait ret = %s, %d, %d\n",
1548
             target_pid_to_str (lwp->head.id),
1549
             ourstatus->kind,
1550
             ourstatus->value.sig);
1551
 
1552
  return lwp->head.id;
1553
}
1554
 
1555
/* Get rid of any pending event in the pipe.  */
1556
static void
1557
async_file_flush (void)
1558
{
1559
  int ret;
1560
  char buf;
1561
 
1562
  do
1563
    ret = read (linux_event_pipe[0], &buf, 1);
1564
  while (ret >= 0 || (ret == -1 && errno == EINTR));
1565
}
1566
 
1567
/* Put something in the pipe, so the event loop wakes up.  */
1568
static void
1569
async_file_mark (void)
1570
{
1571
  int ret;
1572
 
1573
  async_file_flush ();
1574
 
1575
  do
1576
    ret = write (linux_event_pipe[1], "+", 1);
1577
  while (ret == 0 || (ret == -1 && errno == EINTR));
1578
 
1579
  /* Ignore EAGAIN.  If the pipe is full, the event loop will already
1580
     be awakened anyway.  */
1581
}
1582
 
1583
static ptid_t
1584
linux_wait (ptid_t ptid,
1585
            struct target_waitstatus *ourstatus, int target_options)
1586
{
1587
  ptid_t event_ptid;
1588
 
1589
  if (debug_threads)
1590
    fprintf (stderr, "linux_wait: [%s]\n", target_pid_to_str (ptid));
1591
 
1592
  /* Flush the async file first.  */
1593
  if (target_is_async_p ())
1594
    async_file_flush ();
1595
 
1596
  event_ptid = linux_wait_1 (ptid, ourstatus, target_options);
1597
 
1598
  /* If at least one stop was reported, there may be more.  A single
1599
     SIGCHLD can signal more than one child stop.  */
1600
  if (target_is_async_p ()
1601
      && (target_options & TARGET_WNOHANG) != 0
1602
      && !ptid_equal (event_ptid, null_ptid))
1603
    async_file_mark ();
1604
 
1605
  return event_ptid;
1606
}
1607
 
1608
/* Send a signal to an LWP.  */
1609
 
1610
static int
1611
kill_lwp (unsigned long lwpid, int signo)
1612
{
1613
  /* Use tkill, if possible, in case we are using nptl threads.  If tkill
1614
     fails, then we are not using nptl threads and we should be using kill.  */
1615
 
1616
#ifdef __NR_tkill
1617
  {
1618
    static int tkill_failed;
1619
 
1620
    if (!tkill_failed)
1621
      {
1622
        int ret;
1623
 
1624
        errno = 0;
1625
        ret = syscall (__NR_tkill, lwpid, signo);
1626
        if (errno != ENOSYS)
1627
          return ret;
1628
        tkill_failed = 1;
1629
      }
1630
  }
1631
#endif
1632
 
1633
  return kill (lwpid, signo);
1634
}
1635
 
1636
static void
1637
send_sigstop (struct inferior_list_entry *entry)
1638
{
1639
  struct lwp_info *lwp = (struct lwp_info *) entry;
1640
  int pid;
1641
 
1642
  if (lwp->stopped)
1643
    return;
1644
 
1645
  pid = lwpid_of (lwp);
1646
 
1647
  /* If we already have a pending stop signal for this process, don't
1648
     send another.  */
1649
  if (lwp->stop_expected)
1650
    {
1651
      if (debug_threads)
1652
        fprintf (stderr, "Have pending sigstop for lwp %d\n", pid);
1653
 
1654
      /* We clear the stop_expected flag so that wait_for_sigstop
1655
         will receive the SIGSTOP event (instead of silently resuming and
1656
         waiting again).  It'll be reset below.  */
1657
      lwp->stop_expected = 0;
1658
      return;
1659
    }
1660
 
1661
  if (debug_threads)
1662
    fprintf (stderr, "Sending sigstop to lwp %d\n", pid);
1663
 
1664
  kill_lwp (pid, SIGSTOP);
1665
}
1666
 
1667
static void
1668
mark_lwp_dead (struct lwp_info *lwp, int wstat)
1669
{
1670
  /* It's dead, really.  */
1671
  lwp->dead = 1;
1672
 
1673
  /* Store the exit status for later.  */
1674
  lwp->status_pending_p = 1;
1675
  lwp->status_pending = wstat;
1676
 
1677
  /* So that check_removed_breakpoint doesn't try to figure out if
1678
     this is stopped at a breakpoint.  */
1679
  lwp->pending_is_breakpoint = 0;
1680
 
1681
  /* Prevent trying to stop it.  */
1682
  lwp->stopped = 1;
1683
 
1684
  /* No further stops are expected from a dead lwp.  */
1685
  lwp->stop_expected = 0;
1686
}
1687
 
1688
static void
1689
wait_for_sigstop (struct inferior_list_entry *entry)
1690
{
1691
  struct lwp_info *lwp = (struct lwp_info *) entry;
1692
  struct thread_info *saved_inferior;
1693
  int wstat;
1694
  ptid_t saved_tid;
1695
  ptid_t ptid;
1696
 
1697
  if (lwp->stopped)
1698
    return;
1699
 
1700
  saved_inferior = current_inferior;
1701
  if (saved_inferior != NULL)
1702
    saved_tid = ((struct inferior_list_entry *) saved_inferior)->id;
1703
  else
1704
    saved_tid = null_ptid; /* avoid bogus unused warning */
1705
 
1706
  ptid = lwp->head.id;
1707
 
1708
  linux_wait_for_event (ptid, &wstat, __WALL);
1709
 
1710
  /* If we stopped with a non-SIGSTOP signal, save it for later
1711
     and record the pending SIGSTOP.  If the process exited, just
1712
     return.  */
1713
  if (WIFSTOPPED (wstat)
1714
      && WSTOPSIG (wstat) != SIGSTOP)
1715
    {
1716
      if (debug_threads)
1717
        fprintf (stderr, "LWP %ld stopped with non-sigstop status %06x\n",
1718
                 lwpid_of (lwp), wstat);
1719
 
1720
      /* Do not leave a pending single-step finish to be reported to
1721
         the client.  The client will give us a new action for this
1722
         thread, possibly a continue request --- otherwise, the client
1723
         would consider this pending SIGTRAP reported later a spurious
1724
         signal.  */
1725
      if (WSTOPSIG (wstat) == SIGTRAP
1726
          && lwp->stepping
1727
          && !linux_stopped_by_watchpoint ())
1728
        {
1729
          if (debug_threads)
1730
            fprintf (stderr, "  single-step SIGTRAP ignored\n");
1731
        }
1732
      else
1733
        {
1734
          lwp->status_pending_p = 1;
1735
          lwp->status_pending = wstat;
1736
        }
1737
      lwp->stop_expected = 1;
1738
    }
1739
  else if (!WIFSTOPPED (wstat))
1740
    {
1741
      if (debug_threads)
1742
        fprintf (stderr, "Process %ld exited while stopping LWPs\n",
1743
                 lwpid_of (lwp));
1744
 
1745
      /* Leave this status pending for the next time we're able to
1746
         report it.  In the mean time, we'll report this lwp as dead
1747
         to GDB, so GDB doesn't try to read registers and memory from
1748
         it.  */
1749
      mark_lwp_dead (lwp, wstat);
1750
    }
1751
 
1752
  if (saved_inferior == NULL || linux_thread_alive (saved_tid))
1753
    current_inferior = saved_inferior;
1754
  else
1755
    {
1756
      if (debug_threads)
1757
        fprintf (stderr, "Previously current thread died.\n");
1758
 
1759
      if (non_stop)
1760
        {
1761
          /* We can't change the current inferior behind GDB's back,
1762
             otherwise, a subsequent command may apply to the wrong
1763
             process.  */
1764
          current_inferior = NULL;
1765
        }
1766
      else
1767
        {
1768
          /* Set a valid thread as current.  */
1769
          set_desired_inferior (0);
1770
        }
1771
    }
1772
}
1773
 
1774
static void
1775
stop_all_lwps (void)
1776
{
1777
  stopping_threads = 1;
1778
  for_each_inferior (&all_lwps, send_sigstop);
1779
  for_each_inferior (&all_lwps, wait_for_sigstop);
1780
  stopping_threads = 0;
1781
}
1782
 
1783
/* Resume execution of the inferior process.
1784
   If STEP is nonzero, single-step it.
1785
   If SIGNAL is nonzero, give it that signal.  */
1786
 
1787
static void
1788
linux_resume_one_lwp (struct lwp_info *lwp,
1789
                      int step, int signal, siginfo_t *info)
1790
{
1791
  struct thread_info *saved_inferior;
1792
 
1793
  if (lwp->stopped == 0)
1794
    return;
1795
 
1796
  /* If we have pending signals or status, and a new signal, enqueue the
1797
     signal.  Also enqueue the signal if we are waiting to reinsert a
1798
     breakpoint; it will be picked up again below.  */
1799
  if (signal != 0
1800
      && (lwp->status_pending_p || lwp->pending_signals != NULL
1801
          || lwp->bp_reinsert != 0))
1802
    {
1803
      struct pending_signals *p_sig;
1804
      p_sig = xmalloc (sizeof (*p_sig));
1805
      p_sig->prev = lwp->pending_signals;
1806
      p_sig->signal = signal;
1807
      if (info == NULL)
1808
        memset (&p_sig->info, 0, sizeof (siginfo_t));
1809
      else
1810
        memcpy (&p_sig->info, info, sizeof (siginfo_t));
1811
      lwp->pending_signals = p_sig;
1812
    }
1813
 
1814
  if (lwp->status_pending_p && !check_removed_breakpoint (lwp))
1815
    return;
1816
 
1817
  saved_inferior = current_inferior;
1818
  current_inferior = get_lwp_thread (lwp);
1819
 
1820
  if (debug_threads)
1821
    fprintf (stderr, "Resuming lwp %ld (%s, signal %d, stop %s)\n",
1822
             lwpid_of (lwp), step ? "step" : "continue", signal,
1823
             lwp->stop_expected ? "expected" : "not expected");
1824
 
1825
  /* This bit needs some thinking about.  If we get a signal that
1826
     we must report while a single-step reinsert is still pending,
1827
     we often end up resuming the thread.  It might be better to
1828
     (ew) allow a stack of pending events; then we could be sure that
1829
     the reinsert happened right away and not lose any signals.
1830
 
1831
     Making this stack would also shrink the window in which breakpoints are
1832
     uninserted (see comment in linux_wait_for_lwp) but not enough for
1833
     complete correctness, so it won't solve that problem.  It may be
1834
     worthwhile just to solve this one, however.  */
1835
  if (lwp->bp_reinsert != 0)
1836
    {
1837
      if (debug_threads)
1838
        fprintf (stderr, "  pending reinsert at %08lx", (long)lwp->bp_reinsert);
1839
      if (step == 0)
1840
        fprintf (stderr, "BAD - reinserting but not stepping.\n");
1841
      step = 1;
1842
 
1843
      /* Postpone any pending signal.  It was enqueued above.  */
1844
      signal = 0;
1845
    }
1846
 
1847
  check_removed_breakpoint (lwp);
1848
 
1849
  if (debug_threads && the_low_target.get_pc != NULL)
1850
    {
1851
      struct regcache *regcache = get_thread_regcache (current_inferior, 1);
1852
      CORE_ADDR pc = (*the_low_target.get_pc) (regcache);
1853
      fprintf (stderr, "  resuming from pc 0x%lx\n", (long) pc);
1854
    }
1855
 
1856
  /* If we have pending signals, consume one unless we are trying to reinsert
1857
     a breakpoint.  */
1858
  if (lwp->pending_signals != NULL && lwp->bp_reinsert == 0)
1859
    {
1860
      struct pending_signals **p_sig;
1861
 
1862
      p_sig = &lwp->pending_signals;
1863
      while ((*p_sig)->prev != NULL)
1864
        p_sig = &(*p_sig)->prev;
1865
 
1866
      signal = (*p_sig)->signal;
1867
      if ((*p_sig)->info.si_signo != 0)
1868
        ptrace (PTRACE_SETSIGINFO, lwpid_of (lwp), 0, &(*p_sig)->info);
1869
 
1870
      free (*p_sig);
1871
      *p_sig = NULL;
1872
    }
1873
 
1874
  if (the_low_target.prepare_to_resume != NULL)
1875
    the_low_target.prepare_to_resume (lwp);
1876
 
1877
  regcache_invalidate_one ((struct inferior_list_entry *)
1878
                           get_lwp_thread (lwp));
1879
  errno = 0;
1880
  lwp->stopped = 0;
1881
  lwp->stepping = step;
1882
  ptrace (step ? PTRACE_SINGLESTEP : PTRACE_CONT, lwpid_of (lwp), 0,
1883
          /* Coerce to a uintptr_t first to avoid potential gcc warning
1884
             of coercing an 8 byte integer to a 4 byte pointer.  */
1885
          (PTRACE_ARG4_TYPE) (uintptr_t) signal);
1886
 
1887
  current_inferior = saved_inferior;
1888
  if (errno)
1889
    {
1890
      /* ESRCH from ptrace either means that the thread was already
1891
         running (an error) or that it is gone (a race condition).  If
1892
         it's gone, we will get a notification the next time we wait,
1893
         so we can ignore the error.  We could differentiate these
1894
         two, but it's tricky without waiting; the thread still exists
1895
         as a zombie, so sending it signal 0 would succeed.  So just
1896
         ignore ESRCH.  */
1897
      if (errno == ESRCH)
1898
        return;
1899
 
1900
      perror_with_name ("ptrace");
1901
    }
1902
}
1903
 
1904
struct thread_resume_array
1905
{
1906
  struct thread_resume *resume;
1907
  size_t n;
1908
};
1909
 
1910
/* This function is called once per thread.  We look up the thread
1911
   in RESUME_PTR, and mark the thread with a pointer to the appropriate
1912
   resume request.
1913
 
1914
   This algorithm is O(threads * resume elements), but resume elements
1915
   is small (and will remain small at least until GDB supports thread
1916
   suspension).  */
1917
static int
1918
linux_set_resume_request (struct inferior_list_entry *entry, void *arg)
1919
{
1920
  struct lwp_info *lwp;
1921
  struct thread_info *thread;
1922
  int ndx;
1923
  struct thread_resume_array *r;
1924
 
1925
  thread = (struct thread_info *) entry;
1926
  lwp = get_thread_lwp (thread);
1927
  r = arg;
1928
 
1929
  for (ndx = 0; ndx < r->n; ndx++)
1930
    {
1931
      ptid_t ptid = r->resume[ndx].thread;
1932
      if (ptid_equal (ptid, minus_one_ptid)
1933
          || ptid_equal (ptid, entry->id)
1934
          || (ptid_is_pid (ptid)
1935
              && (ptid_get_pid (ptid) == pid_of (lwp)))
1936
          || (ptid_get_lwp (ptid) == -1
1937
              && (ptid_get_pid (ptid) == pid_of (lwp))))
1938
        {
1939
          lwp->resume = &r->resume[ndx];
1940
          return 0;
1941
        }
1942
    }
1943
 
1944
  /* No resume action for this thread.  */
1945
  lwp->resume = NULL;
1946
 
1947
  return 0;
1948
}
1949
 
1950
 
1951
/* Set *FLAG_P if this lwp has an interesting status pending.  */
1952
static int
1953
resume_status_pending_p (struct inferior_list_entry *entry, void *flag_p)
1954
{
1955
  struct lwp_info *lwp = (struct lwp_info *) entry;
1956
 
1957
  /* LWPs which will not be resumed are not interesting, because
1958
     we might not wait for them next time through linux_wait.  */
1959
  if (lwp->resume == NULL)
1960
    return 0;
1961
 
1962
  /* If this thread has a removed breakpoint, we won't have any
1963
     events to report later, so check now.  check_removed_breakpoint
1964
     may clear status_pending_p.  We avoid calling check_removed_breakpoint
1965
     for any thread that we are not otherwise going to resume - this
1966
     lets us preserve stopped status when two threads hit a breakpoint.
1967
     GDB removes the breakpoint to single-step a particular thread
1968
     past it, then re-inserts it and resumes all threads.  We want
1969
     to report the second thread without resuming it in the interim.  */
1970
  if (lwp->status_pending_p)
1971
    check_removed_breakpoint (lwp);
1972
 
1973
  if (lwp->status_pending_p)
1974
    * (int *) flag_p = 1;
1975
 
1976
  return 0;
1977
}
1978
 
1979
/* This function is called once per thread.  We check the thread's resume
1980
   request, which will tell us whether to resume, step, or leave the thread
1981
   stopped; and what signal, if any, it should be sent.
1982
 
1983
   For threads which we aren't explicitly told otherwise, we preserve
1984
   the stepping flag; this is used for stepping over gdbserver-placed
1985
   breakpoints.
1986
 
1987
   If pending_flags was set in any thread, we queue any needed
1988
   signals, since we won't actually resume.  We already have a pending
1989
   event to report, so we don't need to preserve any step requests;
1990
   they should be re-issued if necessary.  */
1991
 
1992
static int
1993
linux_resume_one_thread (struct inferior_list_entry *entry, void *arg)
1994
{
1995
  struct lwp_info *lwp;
1996
  struct thread_info *thread;
1997
  int step;
1998
  int pending_flag = * (int *) arg;
1999
 
2000
  thread = (struct thread_info *) entry;
2001
  lwp = get_thread_lwp (thread);
2002
 
2003
  if (lwp->resume == NULL)
2004
    return 0;
2005
 
2006
  if (lwp->resume->kind == resume_stop)
2007
    {
2008
      if (debug_threads)
2009
        fprintf (stderr, "suspending LWP %ld\n", lwpid_of (lwp));
2010
 
2011
      if (!lwp->stopped)
2012
        {
2013
          if (debug_threads)
2014
            fprintf (stderr, "running -> suspending LWP %ld\n", lwpid_of (lwp));
2015
 
2016
          lwp->suspended = 1;
2017
          send_sigstop (&lwp->head);
2018
        }
2019
      else
2020
        {
2021
          if (debug_threads)
2022
            {
2023
              if (lwp->suspended)
2024
                fprintf (stderr, "already stopped/suspended LWP %ld\n",
2025
                         lwpid_of (lwp));
2026
              else
2027
                fprintf (stderr, "already stopped/not suspended LWP %ld\n",
2028
                         lwpid_of (lwp));
2029
            }
2030
 
2031
          /* Make sure we leave the LWP suspended, so we don't try to
2032
             resume it without GDB telling us to.  FIXME: The LWP may
2033
             have been stopped in an internal event that was not meant
2034
             to be notified back to GDB (e.g., gdbserver breakpoint),
2035
             so we should be reporting a stop event in that case
2036
             too.  */
2037
          lwp->suspended = 1;
2038
        }
2039
 
2040
      /* For stop requests, we're done.  */
2041
      lwp->resume = NULL;
2042
      return 0;
2043
    }
2044
  else
2045
    lwp->suspended = 0;
2046
 
2047
  /* If this thread which is about to be resumed has a pending status,
2048
     then don't resume any threads - we can just report the pending
2049
     status.  Make sure to queue any signals that would otherwise be
2050
     sent.  In all-stop mode, we do this decision based on if *any*
2051
     thread has a pending status.  */
2052
  if (non_stop)
2053
    resume_status_pending_p (&lwp->head, &pending_flag);
2054
 
2055
  if (!pending_flag)
2056
    {
2057
      if (debug_threads)
2058
        fprintf (stderr, "resuming LWP %ld\n", lwpid_of (lwp));
2059
 
2060
      if (ptid_equal (lwp->resume->thread, minus_one_ptid)
2061
          && lwp->stepping
2062
          && lwp->pending_is_breakpoint)
2063
        step = 1;
2064
      else
2065
        step = (lwp->resume->kind == resume_step);
2066
 
2067
      linux_resume_one_lwp (lwp, step, lwp->resume->sig, NULL);
2068
    }
2069
  else
2070
    {
2071
      if (debug_threads)
2072
        fprintf (stderr, "leaving LWP %ld stopped\n", lwpid_of (lwp));
2073
 
2074
      /* If we have a new signal, enqueue the signal.  */
2075
      if (lwp->resume->sig != 0)
2076
        {
2077
          struct pending_signals *p_sig;
2078
          p_sig = xmalloc (sizeof (*p_sig));
2079
          p_sig->prev = lwp->pending_signals;
2080
          p_sig->signal = lwp->resume->sig;
2081
          memset (&p_sig->info, 0, sizeof (siginfo_t));
2082
 
2083
          /* If this is the same signal we were previously stopped by,
2084
             make sure to queue its siginfo.  We can ignore the return
2085
             value of ptrace; if it fails, we'll skip
2086
             PTRACE_SETSIGINFO.  */
2087
          if (WIFSTOPPED (lwp->last_status)
2088
              && WSTOPSIG (lwp->last_status) == lwp->resume->sig)
2089
            ptrace (PTRACE_GETSIGINFO, lwpid_of (lwp), 0, &p_sig->info);
2090
 
2091
          lwp->pending_signals = p_sig;
2092
        }
2093
    }
2094
 
2095
  lwp->resume = NULL;
2096
  return 0;
2097
}
2098
 
2099
static void
2100
linux_resume (struct thread_resume *resume_info, size_t n)
2101
{
2102
  int pending_flag;
2103
  struct thread_resume_array array = { resume_info, n };
2104
 
2105
  find_inferior (&all_threads, linux_set_resume_request, &array);
2106
 
2107
  /* If there is a thread which would otherwise be resumed, which
2108
     has a pending status, then don't resume any threads - we can just
2109
     report the pending status.  Make sure to queue any signals
2110
     that would otherwise be sent.  In non-stop mode, we'll apply this
2111
     logic to each thread individually.  */
2112
  pending_flag = 0;
2113
  if (!non_stop)
2114
    find_inferior (&all_lwps, resume_status_pending_p, &pending_flag);
2115
 
2116
  if (debug_threads)
2117
    {
2118
      if (pending_flag)
2119
        fprintf (stderr, "Not resuming, pending status\n");
2120
      else
2121
        fprintf (stderr, "Resuming, no pending status\n");
2122
    }
2123
 
2124
  find_inferior (&all_threads, linux_resume_one_thread, &pending_flag);
2125
}
2126
 
2127
#ifdef HAVE_LINUX_USRREGS
2128
 
2129
int
2130
register_addr (int regnum)
2131
{
2132
  int addr;
2133
 
2134
  if (regnum < 0 || regnum >= the_low_target.num_regs)
2135
    error ("Invalid register number %d.", regnum);
2136
 
2137
  addr = the_low_target.regmap[regnum];
2138
 
2139
  return addr;
2140
}
2141
 
2142
/* Fetch one register.  */
2143
static void
2144
fetch_register (struct regcache *regcache, int regno)
2145
{
2146
  CORE_ADDR regaddr;
2147
  int i, size;
2148
  char *buf;
2149
  int pid;
2150
 
2151
  if (regno >= the_low_target.num_regs)
2152
    return;
2153
  if ((*the_low_target.cannot_fetch_register) (regno))
2154
    return;
2155
 
2156
  regaddr = register_addr (regno);
2157
  if (regaddr == -1)
2158
    return;
2159
 
2160
  pid = lwpid_of (get_thread_lwp (current_inferior));
2161
  size = ((register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2162
          & - sizeof (PTRACE_XFER_TYPE));
2163
  buf = alloca (size);
2164
  for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2165
    {
2166
      errno = 0;
2167
      *(PTRACE_XFER_TYPE *) (buf + i) =
2168
        ptrace (PTRACE_PEEKUSER, pid,
2169
                /* Coerce to a uintptr_t first to avoid potential gcc warning
2170
                   of coercing an 8 byte integer to a 4 byte pointer.  */
2171
                (PTRACE_ARG3_TYPE) (uintptr_t) regaddr, 0);
2172
      regaddr += sizeof (PTRACE_XFER_TYPE);
2173
      if (errno != 0)
2174
        {
2175
          /* Warning, not error, in case we are attached; sometimes the
2176
             kernel doesn't let us at the registers.  */
2177
          char *err = strerror (errno);
2178
          char *msg = alloca (strlen (err) + 128);
2179
          sprintf (msg, "reading register %d: %s", regno, err);
2180
          error (msg);
2181
          goto error_exit;
2182
        }
2183
    }
2184
 
2185
  if (the_low_target.supply_ptrace_register)
2186
    the_low_target.supply_ptrace_register (regcache, regno, buf);
2187
  else
2188
    supply_register (regcache, regno, buf);
2189
 
2190
error_exit:;
2191
}
2192
 
2193
/* Fetch all registers, or just one, from the child process.  */
2194
static void
2195
usr_fetch_inferior_registers (struct regcache *regcache, int regno)
2196
{
2197
  if (regno == -1)
2198
    for (regno = 0; regno < the_low_target.num_regs; regno++)
2199
      fetch_register (regcache, regno);
2200
  else
2201
    fetch_register (regcache, regno);
2202
}
2203
 
2204
/* Store our register values back into the inferior.
2205
   If REGNO is -1, do this for all registers.
2206
   Otherwise, REGNO specifies which register (so we can save time).  */
2207
static void
2208
usr_store_inferior_registers (struct regcache *regcache, int regno)
2209
{
2210
  CORE_ADDR regaddr;
2211
  int i, size;
2212
  char *buf;
2213
  int pid;
2214
 
2215
  if (regno >= 0)
2216
    {
2217
      if (regno >= the_low_target.num_regs)
2218
        return;
2219
 
2220
      if ((*the_low_target.cannot_store_register) (regno) == 1)
2221
        return;
2222
 
2223
      regaddr = register_addr (regno);
2224
      if (regaddr == -1)
2225
        return;
2226
      errno = 0;
2227
      size = (register_size (regno) + sizeof (PTRACE_XFER_TYPE) - 1)
2228
             & - sizeof (PTRACE_XFER_TYPE);
2229
      buf = alloca (size);
2230
      memset (buf, 0, size);
2231
 
2232
      if (the_low_target.collect_ptrace_register)
2233
        the_low_target.collect_ptrace_register (regcache, regno, buf);
2234
      else
2235
        collect_register (regcache, regno, buf);
2236
 
2237
      pid = lwpid_of (get_thread_lwp (current_inferior));
2238
      for (i = 0; i < size; i += sizeof (PTRACE_XFER_TYPE))
2239
        {
2240
          errno = 0;
2241
          ptrace (PTRACE_POKEUSER, pid,
2242
                /* Coerce to a uintptr_t first to avoid potential gcc warning
2243
                   about coercing an 8 byte integer to a 4 byte pointer.  */
2244
                  (PTRACE_ARG3_TYPE) (uintptr_t) regaddr,
2245
                  (PTRACE_ARG4_TYPE) *(PTRACE_XFER_TYPE *) (buf + i));
2246
          if (errno != 0)
2247
            {
2248
              /* At this point, ESRCH should mean the process is
2249
                 already gone, in which case we simply ignore attempts
2250
                 to change its registers.  See also the related
2251
                 comment in linux_resume_one_lwp.  */
2252
              if (errno == ESRCH)
2253
                return;
2254
 
2255
              if ((*the_low_target.cannot_store_register) (regno) == 0)
2256
                {
2257
                  char *err = strerror (errno);
2258
                  char *msg = alloca (strlen (err) + 128);
2259
                  sprintf (msg, "writing register %d: %s",
2260
                           regno, err);
2261
                  error (msg);
2262
                  return;
2263
                }
2264
            }
2265
          regaddr += sizeof (PTRACE_XFER_TYPE);
2266
        }
2267
    }
2268
  else
2269
    for (regno = 0; regno < the_low_target.num_regs; regno++)
2270
      usr_store_inferior_registers (regcache, regno);
2271
}
2272
#endif /* HAVE_LINUX_USRREGS */
2273
 
2274
 
2275
 
2276
#ifdef HAVE_LINUX_REGSETS
2277
 
2278
static int
2279
regsets_fetch_inferior_registers (struct regcache *regcache)
2280
{
2281
  struct regset_info *regset;
2282
  int saw_general_regs = 0;
2283
  int pid;
2284
 
2285
  regset = target_regsets;
2286
 
2287
  pid = lwpid_of (get_thread_lwp (current_inferior));
2288
  while (regset->size >= 0)
2289
    {
2290
      void *buf;
2291
      int res;
2292
 
2293
      if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2294
        {
2295
          regset ++;
2296
          continue;
2297
        }
2298
 
2299
      buf = xmalloc (regset->size);
2300
#ifndef __sparc__
2301
      res = ptrace (regset->get_request, pid, 0, buf);
2302
#else
2303
      res = ptrace (regset->get_request, pid, buf, 0);
2304
#endif
2305
      if (res < 0)
2306
        {
2307
          if (errno == EIO)
2308
            {
2309
              /* If we get EIO on a regset, do not try it again for
2310
                 this process.  */
2311
              disabled_regsets[regset - target_regsets] = 1;
2312
              free (buf);
2313
              continue;
2314
            }
2315
          else
2316
            {
2317
              char s[256];
2318
              sprintf (s, "ptrace(regsets_fetch_inferior_registers) PID=%d",
2319
                       pid);
2320
              perror (s);
2321
            }
2322
        }
2323
      else if (regset->type == GENERAL_REGS)
2324
        saw_general_regs = 1;
2325
      regset->store_function (regcache, buf);
2326
      regset ++;
2327
      free (buf);
2328
    }
2329
  if (saw_general_regs)
2330
    return 0;
2331
  else
2332
    return 1;
2333
}
2334
 
2335
static int
2336
regsets_store_inferior_registers (struct regcache *regcache)
2337
{
2338
  struct regset_info *regset;
2339
  int saw_general_regs = 0;
2340
  int pid;
2341
 
2342
  regset = target_regsets;
2343
 
2344
  pid = lwpid_of (get_thread_lwp (current_inferior));
2345
  while (regset->size >= 0)
2346
    {
2347
      void *buf;
2348
      int res;
2349
 
2350
      if (regset->size == 0 || disabled_regsets[regset - target_regsets])
2351
        {
2352
          regset ++;
2353
          continue;
2354
        }
2355
 
2356
      buf = xmalloc (regset->size);
2357
 
2358
      /* First fill the buffer with the current register set contents,
2359
         in case there are any items in the kernel's regset that are
2360
         not in gdbserver's regcache.  */
2361
#ifndef __sparc__
2362
      res = ptrace (regset->get_request, pid, 0, buf);
2363
#else
2364
      res = ptrace (regset->get_request, pid, buf, 0);
2365
#endif
2366
 
2367
      if (res == 0)
2368
        {
2369
          /* Then overlay our cached registers on that.  */
2370
          regset->fill_function (regcache, buf);
2371
 
2372
          /* Only now do we write the register set.  */
2373
#ifndef __sparc__
2374
          res = ptrace (regset->set_request, pid, 0, buf);
2375
#else
2376
          res = ptrace (regset->set_request, pid, buf, 0);
2377
#endif
2378
        }
2379
 
2380
      if (res < 0)
2381
        {
2382
          if (errno == EIO)
2383
            {
2384
              /* If we get EIO on a regset, do not try it again for
2385
                 this process.  */
2386
              disabled_regsets[regset - target_regsets] = 1;
2387
              free (buf);
2388
              continue;
2389
            }
2390
          else if (errno == ESRCH)
2391
            {
2392
              /* At this point, ESRCH should mean the process is
2393
                 already gone, in which case we simply ignore attempts
2394
                 to change its registers.  See also the related
2395
                 comment in linux_resume_one_lwp.  */
2396
              free (buf);
2397
              return 0;
2398
            }
2399
          else
2400
            {
2401
              perror ("Warning: ptrace(regsets_store_inferior_registers)");
2402
            }
2403
        }
2404
      else if (regset->type == GENERAL_REGS)
2405
        saw_general_regs = 1;
2406
      regset ++;
2407
      free (buf);
2408
    }
2409
  if (saw_general_regs)
2410
    return 0;
2411
  else
2412
    return 1;
2413
  return 0;
2414
}
2415
 
2416
#endif /* HAVE_LINUX_REGSETS */
2417
 
2418
 
2419
void
2420
linux_fetch_registers (struct regcache *regcache, int regno)
2421
{
2422
#ifdef HAVE_LINUX_REGSETS
2423
  if (regsets_fetch_inferior_registers (regcache) == 0)
2424
    return;
2425
#endif
2426
#ifdef HAVE_LINUX_USRREGS
2427
  usr_fetch_inferior_registers (regcache, regno);
2428
#endif
2429
}
2430
 
2431
void
2432
linux_store_registers (struct regcache *regcache, int regno)
2433
{
2434
#ifdef HAVE_LINUX_REGSETS
2435
  if (regsets_store_inferior_registers (regcache) == 0)
2436
    return;
2437
#endif
2438
#ifdef HAVE_LINUX_USRREGS
2439
  usr_store_inferior_registers (regcache, regno);
2440
#endif
2441
}
2442
 
2443
 
2444
/* Copy LEN bytes from inferior's memory starting at MEMADDR
2445
   to debugger memory starting at MYADDR.  */
2446
 
2447
static int
2448
linux_read_memory (CORE_ADDR memaddr, unsigned char *myaddr, int len)
2449
{
2450
  register int i;
2451
  /* Round starting address down to longword boundary.  */
2452
  register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2453
  /* Round ending address up; get number of longwords that makes.  */
2454
  register int count
2455
    = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1)
2456
      / sizeof (PTRACE_XFER_TYPE);
2457
  /* Allocate buffer of that many longwords.  */
2458
  register PTRACE_XFER_TYPE *buffer
2459
    = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2460
  int fd;
2461
  char filename[64];
2462
  int pid = lwpid_of (get_thread_lwp (current_inferior));
2463
 
2464
  /* Try using /proc.  Don't bother for one word.  */
2465
  if (len >= 3 * sizeof (long))
2466
    {
2467
      /* We could keep this file open and cache it - possibly one per
2468
         thread.  That requires some juggling, but is even faster.  */
2469
      sprintf (filename, "/proc/%d/mem", pid);
2470
      fd = open (filename, O_RDONLY | O_LARGEFILE);
2471
      if (fd == -1)
2472
        goto no_proc;
2473
 
2474
      /* If pread64 is available, use it.  It's faster if the kernel
2475
         supports it (only one syscall), and it's 64-bit safe even on
2476
         32-bit platforms (for instance, SPARC debugging a SPARC64
2477
         application).  */
2478
#ifdef HAVE_PREAD64
2479
      if (pread64 (fd, myaddr, len, memaddr) != len)
2480
#else
2481
      if (lseek (fd, memaddr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
2482
#endif
2483
        {
2484
          close (fd);
2485
          goto no_proc;
2486
        }
2487
 
2488
      close (fd);
2489
      return 0;
2490
    }
2491
 
2492
 no_proc:
2493
  /* Read all the longwords */
2494
  for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2495
    {
2496
      errno = 0;
2497
      /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
2498
         about coercing an 8 byte integer to a 4 byte pointer.  */
2499
      buffer[i] = ptrace (PTRACE_PEEKTEXT, pid,
2500
                          (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
2501
      if (errno)
2502
        return errno;
2503
    }
2504
 
2505
  /* Copy appropriate bytes out of the buffer.  */
2506
  memcpy (myaddr,
2507
          (char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)),
2508
          len);
2509
 
2510
  return 0;
2511
}
2512
 
2513
/* Copy LEN bytes of data from debugger memory at MYADDR
2514
   to inferior's memory at MEMADDR.
2515
   On failure (cannot write the inferior)
2516
   returns the value of errno.  */
2517
 
2518
static int
2519
linux_write_memory (CORE_ADDR memaddr, const unsigned char *myaddr, int len)
2520
{
2521
  register int i;
2522
  /* Round starting address down to longword boundary.  */
2523
  register CORE_ADDR addr = memaddr & -(CORE_ADDR) sizeof (PTRACE_XFER_TYPE);
2524
  /* Round ending address up; get number of longwords that makes.  */
2525
  register int count
2526
  = (((memaddr + len) - addr) + sizeof (PTRACE_XFER_TYPE) - 1) / sizeof (PTRACE_XFER_TYPE);
2527
  /* Allocate buffer of that many longwords.  */
2528
  register PTRACE_XFER_TYPE *buffer = (PTRACE_XFER_TYPE *) alloca (count * sizeof (PTRACE_XFER_TYPE));
2529
  int pid = lwpid_of (get_thread_lwp (current_inferior));
2530
 
2531
  if (debug_threads)
2532
    {
2533
      /* Dump up to four bytes.  */
2534
      unsigned int val = * (unsigned int *) myaddr;
2535
      if (len == 1)
2536
        val = val & 0xff;
2537
      else if (len == 2)
2538
        val = val & 0xffff;
2539
      else if (len == 3)
2540
        val = val & 0xffffff;
2541
      fprintf (stderr, "Writing %0*x to 0x%08lx\n", 2 * ((len < 4) ? len : 4),
2542
               val, (long)memaddr);
2543
    }
2544
 
2545
  /* Fill start and end extra bytes of buffer with existing memory data.  */
2546
 
2547
  /* Coerce the 3rd arg to a uintptr_t first to avoid potential gcc warning
2548
     about coercing an 8 byte integer to a 4 byte pointer.  */
2549
  buffer[0] = ptrace (PTRACE_PEEKTEXT, pid,
2550
                      (PTRACE_ARG3_TYPE) (uintptr_t) addr, 0);
2551
 
2552
  if (count > 1)
2553
    {
2554
      buffer[count - 1]
2555
        = ptrace (PTRACE_PEEKTEXT, pid,
2556
                  /* Coerce to a uintptr_t first to avoid potential gcc warning
2557
                     about coercing an 8 byte integer to a 4 byte pointer.  */
2558
                  (PTRACE_ARG3_TYPE) (uintptr_t) (addr + (count - 1)
2559
                                                  * sizeof (PTRACE_XFER_TYPE)),
2560
                  0);
2561
    }
2562
 
2563
  /* Copy data to be written over corresponding part of buffer */
2564
 
2565
  memcpy ((char *) buffer + (memaddr & (sizeof (PTRACE_XFER_TYPE) - 1)), myaddr, len);
2566
 
2567
  /* Write the entire buffer.  */
2568
 
2569
  for (i = 0; i < count; i++, addr += sizeof (PTRACE_XFER_TYPE))
2570
    {
2571
      errno = 0;
2572
      ptrace (PTRACE_POKETEXT, pid,
2573
              /* Coerce to a uintptr_t first to avoid potential gcc warning
2574
                 about coercing an 8 byte integer to a 4 byte pointer.  */
2575
              (PTRACE_ARG3_TYPE) (uintptr_t) addr,
2576
              (PTRACE_ARG4_TYPE) buffer[i]);
2577
      if (errno)
2578
        return errno;
2579
    }
2580
 
2581
  return 0;
2582
}
2583
 
2584
/* Non-zero if the kernel supports PTRACE_O_TRACEFORK.  */
2585
static int linux_supports_tracefork_flag;
2586
 
2587
/* Helper functions for linux_test_for_tracefork, called via clone ().  */
2588
 
2589
static int
2590
linux_tracefork_grandchild (void *arg)
2591
{
2592
  _exit (0);
2593
}
2594
 
2595
#define STACK_SIZE 4096
2596
 
2597
static int
2598
linux_tracefork_child (void *arg)
2599
{
2600
  ptrace (PTRACE_TRACEME, 0, 0, 0);
2601
  kill (getpid (), SIGSTOP);
2602
 
2603
#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
2604
 
2605
  if (fork () == 0)
2606
    linux_tracefork_grandchild (NULL);
2607
 
2608
#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
2609
 
2610
#ifdef __ia64__
2611
  __clone2 (linux_tracefork_grandchild, arg, STACK_SIZE,
2612
            CLONE_VM | SIGCHLD, NULL);
2613
#else
2614
  clone (linux_tracefork_grandchild, arg + STACK_SIZE,
2615
         CLONE_VM | SIGCHLD, NULL);
2616
#endif
2617
 
2618
#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
2619
 
2620
  _exit (0);
2621
}
2622
 
2623
/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.  Make
2624
   sure that we can enable the option, and that it had the desired
2625
   effect.  */
2626
 
2627
static void
2628
linux_test_for_tracefork (void)
2629
{
2630
  int child_pid, ret, status;
2631
  long second_pid;
2632
#if defined(__UCLIBC__) && defined(HAS_NOMMU)
2633
  char *stack = xmalloc (STACK_SIZE * 4);
2634
#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
2635
 
2636
  linux_supports_tracefork_flag = 0;
2637
 
2638
#if !(defined(__UCLIBC__) && defined(HAS_NOMMU))
2639
 
2640
  child_pid = fork ();
2641
  if (child_pid == 0)
2642
    linux_tracefork_child (NULL);
2643
 
2644
#else /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
2645
 
2646
  /* Use CLONE_VM instead of fork, to support uClinux (no MMU).  */
2647
#ifdef __ia64__
2648
  child_pid = __clone2 (linux_tracefork_child, stack, STACK_SIZE,
2649
                        CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2650
#else /* !__ia64__ */
2651
  child_pid = clone (linux_tracefork_child, stack + STACK_SIZE,
2652
                     CLONE_VM | SIGCHLD, stack + STACK_SIZE * 2);
2653
#endif /* !__ia64__ */
2654
 
2655
#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
2656
 
2657
  if (child_pid == -1)
2658
    perror_with_name ("clone");
2659
 
2660
  ret = my_waitpid (child_pid, &status, 0);
2661
  if (ret == -1)
2662
    perror_with_name ("waitpid");
2663
  else if (ret != child_pid)
2664
    error ("linux_test_for_tracefork: waitpid: unexpected result %d.", ret);
2665
  if (! WIFSTOPPED (status))
2666
    error ("linux_test_for_tracefork: waitpid: unexpected status %d.", status);
2667
 
2668
  ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
2669
                (PTRACE_ARG4_TYPE) PTRACE_O_TRACEFORK);
2670
  if (ret != 0)
2671
    {
2672
      ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2673
      if (ret != 0)
2674
        {
2675
          warning ("linux_test_for_tracefork: failed to kill child");
2676
          return;
2677
        }
2678
 
2679
      ret = my_waitpid (child_pid, &status, 0);
2680
      if (ret != child_pid)
2681
        warning ("linux_test_for_tracefork: failed to wait for killed child");
2682
      else if (!WIFSIGNALED (status))
2683
        warning ("linux_test_for_tracefork: unexpected wait status 0x%x from "
2684
                 "killed child", status);
2685
 
2686
      return;
2687
    }
2688
 
2689
  ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
2690
  if (ret != 0)
2691
    warning ("linux_test_for_tracefork: failed to resume child");
2692
 
2693
  ret = my_waitpid (child_pid, &status, 0);
2694
 
2695
  if (ret == child_pid && WIFSTOPPED (status)
2696
      && status >> 16 == PTRACE_EVENT_FORK)
2697
    {
2698
      second_pid = 0;
2699
      ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
2700
      if (ret == 0 && second_pid != 0)
2701
        {
2702
          int second_status;
2703
 
2704
          linux_supports_tracefork_flag = 1;
2705
          my_waitpid (second_pid, &second_status, 0);
2706
          ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
2707
          if (ret != 0)
2708
            warning ("linux_test_for_tracefork: failed to kill second child");
2709
          my_waitpid (second_pid, &status, 0);
2710
        }
2711
    }
2712
  else
2713
    warning ("linux_test_for_tracefork: unexpected result from waitpid "
2714
             "(%d, status 0x%x)", ret, status);
2715
 
2716
  do
2717
    {
2718
      ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
2719
      if (ret != 0)
2720
        warning ("linux_test_for_tracefork: failed to kill child");
2721
      my_waitpid (child_pid, &status, 0);
2722
    }
2723
  while (WIFSTOPPED (status));
2724
 
2725
#if defined(__UCLIBC__) && defined(HAS_NOMMU)
2726
  free (stack);
2727
#endif /* defined(__UCLIBC__) && defined(HAS_NOMMU) */
2728
}
2729
 
2730
 
2731
static void
2732
linux_look_up_symbols (void)
2733
{
2734
#ifdef USE_THREAD_DB
2735
  struct process_info *proc = current_process ();
2736
 
2737
  if (proc->private->thread_db != NULL)
2738
    return;
2739
 
2740
  /* If the kernel supports tracing forks then it also supports tracing
2741
     clones, and then we don't need to use the magic thread event breakpoint
2742
     to learn about threads.  */
2743
  thread_db_init (!linux_supports_tracefork_flag);
2744
#endif
2745
}
2746
 
2747
static void
2748
linux_request_interrupt (void)
2749
{
2750
  extern unsigned long signal_pid;
2751
 
2752
  if (!ptid_equal (cont_thread, null_ptid)
2753
      && !ptid_equal (cont_thread, minus_one_ptid))
2754
    {
2755
      struct lwp_info *lwp;
2756
      int lwpid;
2757
 
2758
      lwp = get_thread_lwp (current_inferior);
2759
      lwpid = lwpid_of (lwp);
2760
      kill_lwp (lwpid, SIGINT);
2761
    }
2762
  else
2763
    kill_lwp (signal_pid, SIGINT);
2764
}
2765
 
2766
/* Copy LEN bytes from inferior's auxiliary vector starting at OFFSET
2767
   to debugger memory starting at MYADDR.  */
2768
 
2769
static int
2770
linux_read_auxv (CORE_ADDR offset, unsigned char *myaddr, unsigned int len)
2771
{
2772
  char filename[PATH_MAX];
2773
  int fd, n;
2774
  int pid = lwpid_of (get_thread_lwp (current_inferior));
2775
 
2776
  snprintf (filename, sizeof filename, "/proc/%d/auxv", pid);
2777
 
2778
  fd = open (filename, O_RDONLY);
2779
  if (fd < 0)
2780
    return -1;
2781
 
2782
  if (offset != (CORE_ADDR) 0
2783
      && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
2784
    n = -1;
2785
  else
2786
    n = read (fd, myaddr, len);
2787
 
2788
  close (fd);
2789
 
2790
  return n;
2791
}
2792
 
2793
/* These breakpoint and watchpoint related wrapper functions simply
2794
   pass on the function call if the target has registered a
2795
   corresponding function.  */
2796
 
2797
static int
2798
linux_insert_point (char type, CORE_ADDR addr, int len)
2799
{
2800
  if (the_low_target.insert_point != NULL)
2801
    return the_low_target.insert_point (type, addr, len);
2802
  else
2803
    /* Unsupported (see target.h).  */
2804
    return 1;
2805
}
2806
 
2807
static int
2808
linux_remove_point (char type, CORE_ADDR addr, int len)
2809
{
2810
  if (the_low_target.remove_point != NULL)
2811
    return the_low_target.remove_point (type, addr, len);
2812
  else
2813
    /* Unsupported (see target.h).  */
2814
    return 1;
2815
}
2816
 
2817
static int
2818
linux_stopped_by_watchpoint (void)
2819
{
2820
  if (the_low_target.stopped_by_watchpoint != NULL)
2821
    return the_low_target.stopped_by_watchpoint ();
2822
  else
2823
    return 0;
2824
}
2825
 
2826
static CORE_ADDR
2827
linux_stopped_data_address (void)
2828
{
2829
  if (the_low_target.stopped_data_address != NULL)
2830
    return the_low_target.stopped_data_address ();
2831
  else
2832
    return 0;
2833
}
2834
 
2835
#if defined(__UCLIBC__) && defined(HAS_NOMMU)
2836
#if defined(__mcoldfire__)
2837
/* These should really be defined in the kernel's ptrace.h header.  */
2838
#define PT_TEXT_ADDR 49*4
2839
#define PT_DATA_ADDR 50*4
2840
#define PT_TEXT_END_ADDR  51*4
2841
#endif
2842
 
2843
/* Under uClinux, programs are loaded at non-zero offsets, which we need
2844
   to tell gdb about.  */
2845
 
2846
static int
2847
linux_read_offsets (CORE_ADDR *text_p, CORE_ADDR *data_p)
2848
{
2849
#if defined(PT_TEXT_ADDR) && defined(PT_DATA_ADDR) && defined(PT_TEXT_END_ADDR)
2850
  unsigned long text, text_end, data;
2851
  int pid = lwpid_of (get_thread_lwp (current_inferior));
2852
 
2853
  errno = 0;
2854
 
2855
  text = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_ADDR, 0);
2856
  text_end = ptrace (PTRACE_PEEKUSER, pid, (long)PT_TEXT_END_ADDR, 0);
2857
  data = ptrace (PTRACE_PEEKUSER, pid, (long)PT_DATA_ADDR, 0);
2858
 
2859
  if (errno == 0)
2860
    {
2861
      /* Both text and data offsets produced at compile-time (and so
2862
         used by gdb) are relative to the beginning of the program,
2863
         with the data segment immediately following the text segment.
2864
         However, the actual runtime layout in memory may put the data
2865
         somewhere else, so when we send gdb a data base-address, we
2866
         use the real data base address and subtract the compile-time
2867
         data base-address from it (which is just the length of the
2868
         text segment).  BSS immediately follows data in both
2869
         cases.  */
2870
      *text_p = text;
2871
      *data_p = data - (text_end - text);
2872
 
2873
      return 1;
2874
    }
2875
#endif
2876
 return 0;
2877
}
2878
#endif
2879
 
2880
static int
2881
compare_ints (const void *xa, const void *xb)
2882
{
2883
  int a = *(const int *)xa;
2884
  int b = *(const int *)xb;
2885
 
2886
  return a - b;
2887
}
2888
 
2889
static int *
2890
unique (int *b, int *e)
2891
{
2892
  int *d = b;
2893
  while (++b != e)
2894
    if (*d != *b)
2895
      *++d = *b;
2896
  return ++d;
2897
}
2898
 
2899
/* Given PID, iterates over all threads in that process.
2900
 
2901
   Information about each thread, in a format suitable for qXfer:osdata:thread
2902
   is printed to BUFFER, if it's not NULL.  BUFFER is assumed to be already
2903
   initialized, and the caller is responsible for finishing and appending '\0'
2904
   to it.
2905
 
2906
   The list of cores that threads are running on is assigned to *CORES, if it
2907
   is not NULL.  If no cores are found, *CORES will be set to NULL.  Caller
2908
   should free *CORES.  */
2909
 
2910
static void
2911
list_threads (int pid, struct buffer *buffer, char **cores)
2912
{
2913
  int count = 0;
2914
  int allocated = 10;
2915
  int *core_numbers = xmalloc (sizeof (int) * allocated);
2916
  char pathname[128];
2917
  DIR *dir;
2918
  struct dirent *dp;
2919
  struct stat statbuf;
2920
 
2921
  sprintf (pathname, "/proc/%d/task", pid);
2922
  if (stat (pathname, &statbuf) == 0 && S_ISDIR (statbuf.st_mode))
2923
    {
2924
      dir = opendir (pathname);
2925
      if (!dir)
2926
        {
2927
          free (core_numbers);
2928
          return;
2929
        }
2930
 
2931
      while ((dp = readdir (dir)) != NULL)
2932
        {
2933
          unsigned long lwp = strtoul (dp->d_name, NULL, 10);
2934
 
2935
          if (lwp != 0)
2936
            {
2937
              unsigned core = linux_core_of_thread (ptid_build (pid, lwp, 0));
2938
 
2939
              if (core != -1)
2940
                {
2941
                  char s[sizeof ("4294967295")];
2942
                  sprintf (s, "%u", core);
2943
 
2944
                  if (count == allocated)
2945
                    {
2946
                      allocated *= 2;
2947
                      core_numbers = realloc (core_numbers,
2948
                                              sizeof (int) * allocated);
2949
                    }
2950
                  core_numbers[count++] = core;
2951
                  if (buffer)
2952
                    buffer_xml_printf (buffer,
2953
                                       "<item>"
2954
                                       "<column name=\"pid\">%d</column>"
2955
                                       "<column name=\"tid\">%s</column>"
2956
                                       "<column name=\"core\">%s</column>"
2957
                                       "</item>", pid, dp->d_name, s);
2958
                }
2959
              else
2960
                {
2961
                  if (buffer)
2962
                    buffer_xml_printf (buffer,
2963
                                       "<item>"
2964
                                       "<column name=\"pid\">%d</column>"
2965
                                       "<column name=\"tid\">%s</column>"
2966
                                       "</item>", pid, dp->d_name);
2967
                }
2968
            }
2969
        }
2970
    }
2971
 
2972
  if (cores)
2973
    {
2974
      *cores = NULL;
2975
      if (count > 0)
2976
        {
2977
          struct buffer buffer2;
2978
          int *b;
2979
          int *e;
2980
          qsort (core_numbers, count, sizeof (int), compare_ints);
2981
 
2982
          /* Remove duplicates. */
2983
          b = core_numbers;
2984
          e = unique (b, core_numbers + count);
2985
 
2986
          buffer_init (&buffer2);
2987
 
2988
          for (b = core_numbers; b != e; ++b)
2989
            {
2990
              char number[sizeof ("4294967295")];
2991
              sprintf (number, "%u", *b);
2992
              buffer_xml_printf (&buffer2, "%s%s",
2993
                                 (b == core_numbers) ? "" : ",", number);
2994
            }
2995
          buffer_grow_str0 (&buffer2, "");
2996
 
2997
          *cores = buffer_finish (&buffer2);
2998
        }
2999
    }
3000
  free (core_numbers);
3001
}
3002
 
3003
static void
3004
show_process (int pid, const char *username, struct buffer *buffer)
3005
{
3006
  char pathname[128];
3007
  FILE *f;
3008
  char cmd[MAXPATHLEN + 1];
3009
 
3010
  sprintf (pathname, "/proc/%d/cmdline", pid);
3011
 
3012
  if ((f = fopen (pathname, "r")) != NULL)
3013
    {
3014
      size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
3015
      if (len > 0)
3016
        {
3017
          char *cores = 0;
3018
          int i;
3019
          for (i = 0; i < len; i++)
3020
            if (cmd[i] == '\0')
3021
              cmd[i] = ' ';
3022
          cmd[len] = '\0';
3023
 
3024
          buffer_xml_printf (buffer,
3025
                             "<item>"
3026
                             "<column name=\"pid\">%d</column>"
3027
                             "<column name=\"user\">%s</column>"
3028
                             "<column name=\"command\">%s</column>",
3029
                             pid,
3030
                             username,
3031
                             cmd);
3032
 
3033
          /* This only collects core numbers, and does not print threads.  */
3034
          list_threads (pid, NULL, &cores);
3035
 
3036
          if (cores)
3037
            {
3038
              buffer_xml_printf (buffer,
3039
                                 "<column name=\"cores\">%s</column>", cores);
3040
              free (cores);
3041
            }
3042
 
3043
          buffer_xml_printf (buffer, "</item>");
3044
        }
3045
      fclose (f);
3046
    }
3047
}
3048
 
3049
static int
3050
linux_qxfer_osdata (const char *annex,
3051
                    unsigned char *readbuf, unsigned const char *writebuf,
3052
                    CORE_ADDR offset, int len)
3053
{
3054
  /* We make the process list snapshot when the object starts to be
3055
     read.  */
3056
  static const char *buf;
3057
  static long len_avail = -1;
3058
  static struct buffer buffer;
3059
  int processes = 0;
3060
  int threads = 0;
3061
 
3062
  DIR *dirp;
3063
 
3064
  if (strcmp (annex, "processes") == 0)
3065
    processes = 1;
3066
  else if (strcmp (annex, "threads") == 0)
3067
    threads = 1;
3068
  else
3069
    return 0;
3070
 
3071
  if (!readbuf || writebuf)
3072
    return 0;
3073
 
3074
  if (offset == 0)
3075
    {
3076
      if (len_avail != -1 && len_avail != 0)
3077
       buffer_free (&buffer);
3078
      len_avail = 0;
3079
      buf = NULL;
3080
      buffer_init (&buffer);
3081
      if (processes)
3082
        buffer_grow_str (&buffer, "<osdata type=\"processes\">");
3083
      else if (threads)
3084
        buffer_grow_str (&buffer, "<osdata type=\"threads\">");
3085
 
3086
      dirp = opendir ("/proc");
3087
      if (dirp)
3088
       {
3089
         struct dirent *dp;
3090
         while ((dp = readdir (dirp)) != NULL)
3091
           {
3092
             struct stat statbuf;
3093
             char procentry[sizeof ("/proc/4294967295")];
3094
 
3095
             if (!isdigit (dp->d_name[0])
3096
                 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
3097
               continue;
3098
 
3099
             sprintf (procentry, "/proc/%s", dp->d_name);
3100
             if (stat (procentry, &statbuf) == 0
3101
                 && S_ISDIR (statbuf.st_mode))
3102
               {
3103
                 int pid = (int) strtoul (dp->d_name, NULL, 10);
3104
 
3105
                 if (processes)
3106
                   {
3107
                     struct passwd *entry = getpwuid (statbuf.st_uid);
3108
                     show_process (pid, entry ? entry->pw_name : "?", &buffer);
3109
                   }
3110
                 else if (threads)
3111
                   {
3112
                     list_threads (pid, &buffer, NULL);
3113
                   }
3114
               }
3115
           }
3116
 
3117
         closedir (dirp);
3118
       }
3119
      buffer_grow_str0 (&buffer, "</osdata>\n");
3120
      buf = buffer_finish (&buffer);
3121
      len_avail = strlen (buf);
3122
    }
3123
 
3124
  if (offset >= len_avail)
3125
    {
3126
      /* Done.  Get rid of the data.  */
3127
      buffer_free (&buffer);
3128
      buf = NULL;
3129
      len_avail = 0;
3130
      return 0;
3131
    }
3132
 
3133
  if (len > len_avail - offset)
3134
    len = len_avail - offset;
3135
  memcpy (readbuf, buf + offset, len);
3136
 
3137
  return len;
3138
}
3139
 
3140
/* Convert a native/host siginfo object, into/from the siginfo in the
3141
   layout of the inferiors' architecture.  */
3142
 
3143
static void
3144
siginfo_fixup (struct siginfo *siginfo, void *inf_siginfo, int direction)
3145
{
3146
  int done = 0;
3147
 
3148
  if (the_low_target.siginfo_fixup != NULL)
3149
    done = the_low_target.siginfo_fixup (siginfo, inf_siginfo, direction);
3150
 
3151
  /* If there was no callback, or the callback didn't do anything,
3152
     then just do a straight memcpy.  */
3153
  if (!done)
3154
    {
3155
      if (direction == 1)
3156
        memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3157
      else
3158
        memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3159
    }
3160
}
3161
 
3162
static int
3163
linux_xfer_siginfo (const char *annex, unsigned char *readbuf,
3164
                    unsigned const char *writebuf, CORE_ADDR offset, int len)
3165
{
3166
  int pid;
3167
  struct siginfo siginfo;
3168
  char inf_siginfo[sizeof (struct siginfo)];
3169
 
3170
  if (current_inferior == NULL)
3171
    return -1;
3172
 
3173
  pid = lwpid_of (get_thread_lwp (current_inferior));
3174
 
3175
  if (debug_threads)
3176
    fprintf (stderr, "%s siginfo for lwp %d.\n",
3177
             readbuf != NULL ? "Reading" : "Writing",
3178
             pid);
3179
 
3180
  if (offset > sizeof (siginfo))
3181
    return -1;
3182
 
3183
  if (ptrace (PTRACE_GETSIGINFO, pid, 0, &siginfo) != 0)
3184
    return -1;
3185
 
3186
  /* When GDBSERVER is built as a 64-bit application, ptrace writes into
3187
     SIGINFO an object with 64-bit layout.  Since debugging a 32-bit
3188
     inferior with a 64-bit GDBSERVER should look the same as debugging it
3189
     with a 32-bit GDBSERVER, we need to convert it.  */
3190
  siginfo_fixup (&siginfo, inf_siginfo, 0);
3191
 
3192
  if (offset + len > sizeof (siginfo))
3193
    len = sizeof (siginfo) - offset;
3194
 
3195
  if (readbuf != NULL)
3196
    memcpy (readbuf, inf_siginfo + offset, len);
3197
  else
3198
    {
3199
      memcpy (inf_siginfo + offset, writebuf, len);
3200
 
3201
      /* Convert back to ptrace layout before flushing it out.  */
3202
      siginfo_fixup (&siginfo, inf_siginfo, 1);
3203
 
3204
      if (ptrace (PTRACE_SETSIGINFO, pid, 0, &siginfo) != 0)
3205
        return -1;
3206
    }
3207
 
3208
  return len;
3209
}
3210
 
3211
/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
3212
   so we notice when children change state; as the handler for the
3213
   sigsuspend in my_waitpid.  */
3214
 
3215
static void
3216
sigchld_handler (int signo)
3217
{
3218
  int old_errno = errno;
3219
 
3220
  if (debug_threads)
3221
    /* fprintf is not async-signal-safe, so call write directly.  */
3222
    write (2, "sigchld_handler\n", sizeof ("sigchld_handler\n") - 1);
3223
 
3224
  if (target_is_async_p ())
3225
    async_file_mark (); /* trigger a linux_wait */
3226
 
3227
  errno = old_errno;
3228
}
3229
 
3230
static int
3231
linux_supports_non_stop (void)
3232
{
3233
  return 1;
3234
}
3235
 
3236
static int
3237
linux_async (int enable)
3238
{
3239
  int previous = (linux_event_pipe[0] != -1);
3240
 
3241
  if (previous != enable)
3242
    {
3243
      sigset_t mask;
3244
      sigemptyset (&mask);
3245
      sigaddset (&mask, SIGCHLD);
3246
 
3247
      sigprocmask (SIG_BLOCK, &mask, NULL);
3248
 
3249
      if (enable)
3250
        {
3251
          if (pipe (linux_event_pipe) == -1)
3252
            fatal ("creating event pipe failed.");
3253
 
3254
          fcntl (linux_event_pipe[0], F_SETFL, O_NONBLOCK);
3255
          fcntl (linux_event_pipe[1], F_SETFL, O_NONBLOCK);
3256
 
3257
          /* Register the event loop handler.  */
3258
          add_file_handler (linux_event_pipe[0],
3259
                            handle_target_event, NULL);
3260
 
3261
          /* Always trigger a linux_wait.  */
3262
          async_file_mark ();
3263
        }
3264
      else
3265
        {
3266
          delete_file_handler (linux_event_pipe[0]);
3267
 
3268
          close (linux_event_pipe[0]);
3269
          close (linux_event_pipe[1]);
3270
          linux_event_pipe[0] = -1;
3271
          linux_event_pipe[1] = -1;
3272
        }
3273
 
3274
      sigprocmask (SIG_UNBLOCK, &mask, NULL);
3275
    }
3276
 
3277
  return previous;
3278
}
3279
 
3280
static int
3281
linux_start_non_stop (int nonstop)
3282
{
3283
  /* Register or unregister from event-loop accordingly.  */
3284
  linux_async (nonstop);
3285
  return 0;
3286
}
3287
 
3288
static int
3289
linux_supports_multi_process (void)
3290
{
3291
  return 1;
3292
}
3293
 
3294
 
3295
/* Enumerate spufs IDs for process PID.  */
3296
static int
3297
spu_enumerate_spu_ids (long pid, unsigned char *buf, CORE_ADDR offset, int len)
3298
{
3299
  int pos = 0;
3300
  int written = 0;
3301
  char path[128];
3302
  DIR *dir;
3303
  struct dirent *entry;
3304
 
3305
  sprintf (path, "/proc/%ld/fd", pid);
3306
  dir = opendir (path);
3307
  if (!dir)
3308
    return -1;
3309
 
3310
  rewinddir (dir);
3311
  while ((entry = readdir (dir)) != NULL)
3312
    {
3313
      struct stat st;
3314
      struct statfs stfs;
3315
      int fd;
3316
 
3317
      fd = atoi (entry->d_name);
3318
      if (!fd)
3319
        continue;
3320
 
3321
      sprintf (path, "/proc/%ld/fd/%d", pid, fd);
3322
      if (stat (path, &st) != 0)
3323
        continue;
3324
      if (!S_ISDIR (st.st_mode))
3325
        continue;
3326
 
3327
      if (statfs (path, &stfs) != 0)
3328
        continue;
3329
      if (stfs.f_type != SPUFS_MAGIC)
3330
        continue;
3331
 
3332
      if (pos >= offset && pos + 4 <= offset + len)
3333
        {
3334
          *(unsigned int *)(buf + pos - offset) = fd;
3335
          written += 4;
3336
        }
3337
      pos += 4;
3338
    }
3339
 
3340
  closedir (dir);
3341
  return written;
3342
}
3343
 
3344
/* Implements the to_xfer_partial interface for the TARGET_OBJECT_SPU
3345
   object type, using the /proc file system.  */
3346
static int
3347
linux_qxfer_spu (const char *annex, unsigned char *readbuf,
3348
                 unsigned const char *writebuf,
3349
                 CORE_ADDR offset, int len)
3350
{
3351
  long pid = lwpid_of (get_thread_lwp (current_inferior));
3352
  char buf[128];
3353
  int fd = 0;
3354
  int ret = 0;
3355
 
3356
  if (!writebuf && !readbuf)
3357
    return -1;
3358
 
3359
  if (!*annex)
3360
    {
3361
      if (!readbuf)
3362
        return -1;
3363
      else
3364
        return spu_enumerate_spu_ids (pid, readbuf, offset, len);
3365
    }
3366
 
3367
  sprintf (buf, "/proc/%ld/fd/%s", pid, annex);
3368
  fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
3369
  if (fd <= 0)
3370
    return -1;
3371
 
3372
  if (offset != 0
3373
      && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3374
    {
3375
      close (fd);
3376
      return 0;
3377
    }
3378
 
3379
  if (writebuf)
3380
    ret = write (fd, writebuf, (size_t) len);
3381
  else
3382
    ret = read (fd, readbuf, (size_t) len);
3383
 
3384
  close (fd);
3385
  return ret;
3386
}
3387
 
3388
static int
3389
linux_core_of_thread (ptid_t ptid)
3390
{
3391
  char filename[sizeof ("/proc//task//stat")
3392
                 + 2 * 20 /* decimal digits for 2 numbers, max 2^64 bit each */
3393
                 + 1];
3394
  FILE *f;
3395
  char *content = NULL;
3396
  char *p;
3397
  char *ts = 0;
3398
  int content_read = 0;
3399
  int i;
3400
  int core;
3401
 
3402
  sprintf (filename, "/proc/%d/task/%ld/stat",
3403
           ptid_get_pid (ptid), ptid_get_lwp (ptid));
3404
  f = fopen (filename, "r");
3405
  if (!f)
3406
    return -1;
3407
 
3408
  for (;;)
3409
    {
3410
      int n;
3411
      content = realloc (content, content_read + 1024);
3412
      n = fread (content + content_read, 1, 1024, f);
3413
      content_read += n;
3414
      if (n < 1024)
3415
        {
3416
          content[content_read] = '\0';
3417
          break;
3418
        }
3419
    }
3420
 
3421
  p = strchr (content, '(');
3422
  p = strchr (p, ')') + 2; /* skip ")" and a whitespace. */
3423
 
3424
  p = strtok_r (p, " ", &ts);
3425
  for (i = 0; i != 36; ++i)
3426
    p = strtok_r (NULL, " ", &ts);
3427
 
3428
  if (sscanf (p, "%d", &core) == 0)
3429
    core = -1;
3430
 
3431
  free (content);
3432
  fclose (f);
3433
 
3434
  return core;
3435
}
3436
 
3437
static struct target_ops linux_target_ops = {
3438
  linux_create_inferior,
3439
  linux_attach,
3440
  linux_kill,
3441
  linux_detach,
3442
  linux_join,
3443
  linux_thread_alive,
3444
  linux_resume,
3445
  linux_wait,
3446
  linux_fetch_registers,
3447
  linux_store_registers,
3448
  linux_read_memory,
3449
  linux_write_memory,
3450
  linux_look_up_symbols,
3451
  linux_request_interrupt,
3452
  linux_read_auxv,
3453
  linux_insert_point,
3454
  linux_remove_point,
3455
  linux_stopped_by_watchpoint,
3456
  linux_stopped_data_address,
3457
#if defined(__UCLIBC__) && defined(HAS_NOMMU)
3458
  linux_read_offsets,
3459
#else
3460
  NULL,
3461
#endif
3462
#ifdef USE_THREAD_DB
3463
  thread_db_get_tls_address,
3464
#else
3465
  NULL,
3466
#endif
3467
  linux_qxfer_spu,
3468
  hostio_last_error_from_errno,
3469
  linux_qxfer_osdata,
3470
  linux_xfer_siginfo,
3471
  linux_supports_non_stop,
3472
  linux_async,
3473
  linux_start_non_stop,
3474
  linux_supports_multi_process,
3475
#ifdef USE_THREAD_DB
3476
  thread_db_handle_monitor_command,
3477
#else
3478
  NULL,
3479
#endif
3480
  linux_core_of_thread
3481
};
3482
 
3483
static void
3484
linux_init_signals ()
3485
{
3486
  /* FIXME drow/2002-06-09: As above, we should check with LinuxThreads
3487
     to find what the cancel signal actually is.  */
3488
#ifdef __SIGRTMIN /* Bionic doesn't use SIGRTMIN the way glibc does.  */
3489
  signal (__SIGRTMIN+1, SIG_IGN);
3490
#endif
3491
}
3492
 
3493
void
3494
initialize_low (void)
3495
{
3496
  struct sigaction sigchld_action;
3497
  memset (&sigchld_action, 0, sizeof (sigchld_action));
3498
  set_target_ops (&linux_target_ops);
3499
  set_breakpoint_data (the_low_target.breakpoint,
3500
                       the_low_target.breakpoint_len);
3501
  linux_init_signals ();
3502
  linux_test_for_tracefork ();
3503
#ifdef HAVE_LINUX_REGSETS
3504
  for (num_regsets = 0; target_regsets[num_regsets].size >= 0; num_regsets++)
3505
    ;
3506
  disabled_regsets = xmalloc (num_regsets);
3507
#endif
3508
 
3509
  sigchld_action.sa_handler = sigchld_handler;
3510
  sigemptyset (&sigchld_action.sa_mask);
3511
  sigchld_action.sa_flags = SA_RESTART;
3512
  sigaction (SIGCHLD, &sigchld_action, NULL);
3513
}

powered by: WebSVN 2.1.0

© copyright 1999-2024 OpenCores.org, equivalent to Oliscience, all rights reserved. OpenCores®, registered trademark.