2011-03-02 Michael Snyder <msnyder@vmware.com>
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
7b6bb8da
JB
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f
AC
34#include "gdbthread.h"
35#include "gdbcmd.h"
36#include "regcache.h"
4f844a66 37#include "regset.h"
10d6c8cd
DJ
38#include "inf-ptrace.h"
39#include "auxv.h"
dba24537 40#include <sys/param.h> /* for MAXPATHLEN */
1777feb0 41#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
42#include "elf-bfd.h" /* for elfcore_write_* */
43#include "gregset.h" /* for gregset */
44#include "gdbcore.h" /* for get_exec_file */
45#include <ctype.h> /* for isdigit */
1777feb0 46#include "gdbthread.h" /* for struct thread_info etc. */
dba24537
AC
47#include "gdb_stat.h" /* for struct stat */
48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
54#include "gdb_dirent.h"
55#include "xml-support.h"
191c4426 56#include "terminal.h"
efcbbd14 57#include <sys/vfs.h>
6c95b8df 58#include "solib.h"
efcbbd14
UW
59
60#ifndef SPUFS_MAGIC
61#define SPUFS_MAGIC 0x23c9b64e
62#endif
dba24537 63
10568435
JK
64#ifdef HAVE_PERSONALITY
65# include <sys/personality.h>
66# if !HAVE_DECL_ADDR_NO_RANDOMIZE
67# define ADDR_NO_RANDOMIZE 0x0040000
68# endif
69#endif /* HAVE_PERSONALITY */
70
1777feb0 71/* This comment documents high-level logic of this file.
8a77dff3
VP
72
73Waiting for events in sync mode
74===============================
75
76When waiting for an event in a specific thread, we just use waitpid, passing
77the specific pid, and not passing WNOHANG.
78
1777feb0 79When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 80version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 81threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
82miss an event. The solution is to use non-blocking waitpid, together with
83sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 84process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
85flag to check for events in cloned processes. If nothing is found, we use
86sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
87happened to a child process -- and SIGCHLD will be delivered both for events
88in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
89an event, we get back to calling nonblocking waitpid with and without
90__WCLONED.
8a77dff3
VP
91
92Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 93so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
94blocked, the signal becomes pending and sigsuspend immediately
95notices it and returns.
96
97Waiting for events in async mode
98================================
99
7feb7d06
PA
100In async mode, GDB should always be ready to handle both user input
101and target events, so neither blocking waitpid nor sigsuspend are
102viable options. Instead, we should asynchronously notify the GDB main
103event loop whenever there's an unprocessed event from the target. We
104detect asynchronous target events by handling SIGCHLD signals. To
105notify the event loop about target events, the self-pipe trick is used
106--- a pipe is registered as waitable event source in the event loop,
107the event loop select/poll's on the read end of this pipe (as well on
108other event sources, e.g., stdin), and the SIGCHLD handler writes a
109byte to this pipe. This is more portable than relying on
110pselect/ppoll, since on kernels that lack those syscalls, libc
111emulates them with select/poll+sigprocmask, and that is racy
112(a.k.a. plain broken).
113
114Obviously, if we fail to notify the event loop if there's a target
115event, it's bad. OTOH, if we notify the event loop when there's no
116event from the target, linux_nat_wait will detect that there's no real
117event to report, and return event of type TARGET_WAITKIND_IGNORE.
118This is mostly harmless, but it will waste time and is better avoided.
119
120The main design point is that every time GDB is outside linux-nat.c,
121we have a SIGCHLD handler installed that is called when something
122happens to the target and notifies the GDB event loop. Whenever GDB
123core decides to handle the event, and calls into linux-nat.c, we
124process things as in sync mode, except that the we never block in
125sigsuspend.
126
127While processing an event, we may end up momentarily blocked in
128waitpid calls. Those waitpid calls, while blocking, are guarantied to
129return quickly. E.g., in all-stop mode, before reporting to the core
130that an LWP hit a breakpoint, all LWPs are stopped by sending them
131SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
132Note that this is different from blocking indefinitely waiting for the
133next event --- here, we're already handling an event.
8a77dff3
VP
134
135Use of signals
136==============
137
138We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
139signal is not entirely significant; we just need for a signal to be delivered,
140so that we can intercept it. SIGSTOP's advantage is that it can not be
141blocked. A disadvantage is that it is not a real-time signal, so it can only
142be queued once; we do not keep track of other sources of SIGSTOP.
143
144Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
145use them, because they have special behavior when the signal is generated -
146not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
147kills the entire thread group.
148
149A delivered SIGSTOP would stop the entire thread group, not just the thread we
150tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
151cancel it (by PTRACE_CONT without passing SIGSTOP).
152
153We could use a real-time signal instead. This would solve those problems; we
154could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
155But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
156generates it, and there are races with trying to find a signal that is not
157blocked. */
a0ef4274 158
dba24537
AC
159#ifndef O_LARGEFILE
160#define O_LARGEFILE 0
161#endif
0274a8ce 162
3993f6b1
DJ
163/* If the system headers did not provide the constants, hard-code the normal
164 values. */
165#ifndef PTRACE_EVENT_FORK
166
167#define PTRACE_SETOPTIONS 0x4200
168#define PTRACE_GETEVENTMSG 0x4201
169
1777feb0 170/* Options set using PTRACE_SETOPTIONS. */
3993f6b1
DJ
171#define PTRACE_O_TRACESYSGOOD 0x00000001
172#define PTRACE_O_TRACEFORK 0x00000002
173#define PTRACE_O_TRACEVFORK 0x00000004
174#define PTRACE_O_TRACECLONE 0x00000008
175#define PTRACE_O_TRACEEXEC 0x00000010
9016a515
DJ
176#define PTRACE_O_TRACEVFORKDONE 0x00000020
177#define PTRACE_O_TRACEEXIT 0x00000040
3993f6b1
DJ
178
179/* Wait extended result codes for the above trace options. */
180#define PTRACE_EVENT_FORK 1
181#define PTRACE_EVENT_VFORK 2
182#define PTRACE_EVENT_CLONE 3
183#define PTRACE_EVENT_EXEC 4
c874c7fc 184#define PTRACE_EVENT_VFORK_DONE 5
9016a515 185#define PTRACE_EVENT_EXIT 6
3993f6b1
DJ
186
187#endif /* PTRACE_EVENT_FORK */
188
ca2163eb
PA
189/* Unlike other extended result codes, WSTOPSIG (status) on
190 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
191 instead SIGTRAP with bit 7 set. */
192#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
193
3993f6b1
DJ
194/* We can't always assume that this flag is available, but all systems
195 with the ptrace event handlers also have __WALL, so it's safe to use
196 here. */
197#ifndef __WALL
198#define __WALL 0x40000000 /* Wait for any child. */
199#endif
200
02d3ff8c 201#ifndef PTRACE_GETSIGINFO
1ef18d08
PA
202# define PTRACE_GETSIGINFO 0x4202
203# define PTRACE_SETSIGINFO 0x4203
02d3ff8c
UW
204#endif
205
10d6c8cd
DJ
206/* The single-threaded native GNU/Linux target_ops. We save a pointer for
207 the use of the multi-threaded target. */
208static struct target_ops *linux_ops;
f973ed9c 209static struct target_ops linux_ops_saved;
10d6c8cd 210
9f0bdab8
DJ
211/* The method to call, if any, when a new thread is attached. */
212static void (*linux_nat_new_thread) (ptid_t);
213
5b009018
PA
214/* The method to call, if any, when the siginfo object needs to be
215 converted between the layout returned by ptrace, and the layout in
216 the architecture of the inferior. */
217static int (*linux_nat_siginfo_fixup) (struct siginfo *,
218 gdb_byte *,
219 int);
220
ac264b3b
MS
221/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
222 Called by our to_xfer_partial. */
223static LONGEST (*super_xfer_partial) (struct target_ops *,
224 enum target_object,
225 const char *, gdb_byte *,
226 const gdb_byte *,
10d6c8cd
DJ
227 ULONGEST, LONGEST);
228
d6b0e80f 229static int debug_linux_nat;
920d2a44
AC
230static void
231show_debug_linux_nat (struct ui_file *file, int from_tty,
232 struct cmd_list_element *c, const char *value)
233{
234 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
235 value);
236}
d6b0e80f 237
b84876c2
PA
238static int debug_linux_nat_async = 0;
239static void
240show_debug_linux_nat_async (struct ui_file *file, int from_tty,
241 struct cmd_list_element *c, const char *value)
242{
3e43a32a
MS
243 fprintf_filtered (file,
244 _("Debugging of GNU/Linux async lwp module is %s.\n"),
b84876c2
PA
245 value);
246}
247
10568435
JK
248static int disable_randomization = 1;
249
250static void
251show_disable_randomization (struct ui_file *file, int from_tty,
252 struct cmd_list_element *c, const char *value)
253{
254#ifdef HAVE_PERSONALITY
3e43a32a
MS
255 fprintf_filtered (file,
256 _("Disabling randomization of debuggee's "
257 "virtual address space is %s.\n"),
10568435
JK
258 value);
259#else /* !HAVE_PERSONALITY */
3e43a32a
MS
260 fputs_filtered (_("Disabling randomization of debuggee's "
261 "virtual address space is unsupported on\n"
262 "this platform.\n"), file);
10568435
JK
263#endif /* !HAVE_PERSONALITY */
264}
265
266static void
3e43a32a
MS
267set_disable_randomization (char *args, int from_tty,
268 struct cmd_list_element *c)
10568435
JK
269{
270#ifndef HAVE_PERSONALITY
3e43a32a
MS
271 error (_("Disabling randomization of debuggee's "
272 "virtual address space is unsupported on\n"
273 "this platform."));
10568435
JK
274#endif /* !HAVE_PERSONALITY */
275}
276
ae087d01
DJ
277struct simple_pid_list
278{
279 int pid;
3d799a95 280 int status;
ae087d01
DJ
281 struct simple_pid_list *next;
282};
283struct simple_pid_list *stopped_pids;
284
3993f6b1
DJ
285/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
286 can not be used, 1 if it can. */
287
288static int linux_supports_tracefork_flag = -1;
289
3e43a32a
MS
290/* This variable is a tri-state flag: -1 for unknown, 0 if
291 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
a96d9b2e
SDJ
292
293static int linux_supports_tracesysgood_flag = -1;
294
9016a515
DJ
295/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
296 PTRACE_O_TRACEVFORKDONE. */
297
298static int linux_supports_tracevforkdone_flag = -1;
299
1777feb0 300/* Async mode support. */
b84876c2 301
b84876c2
PA
302/* Zero if the async mode, although enabled, is masked, which means
303 linux_nat_wait should behave as if async mode was off. */
304static int linux_nat_async_mask_value = 1;
305
a96d9b2e
SDJ
306/* Stores the current used ptrace() options. */
307static int current_ptrace_options = 0;
308
b84876c2
PA
309/* The read/write ends of the pipe registered as waitable file in the
310 event loop. */
311static int linux_nat_event_pipe[2] = { -1, -1 };
312
7feb7d06 313/* Flush the event pipe. */
b84876c2 314
7feb7d06
PA
315static void
316async_file_flush (void)
b84876c2 317{
7feb7d06
PA
318 int ret;
319 char buf;
b84876c2 320
7feb7d06 321 do
b84876c2 322 {
7feb7d06 323 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 324 }
7feb7d06 325 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
326}
327
7feb7d06
PA
328/* Put something (anything, doesn't matter what, or how much) in event
329 pipe, so that the select/poll in the event-loop realizes we have
330 something to process. */
252fbfc8 331
b84876c2 332static void
7feb7d06 333async_file_mark (void)
b84876c2 334{
7feb7d06 335 int ret;
b84876c2 336
7feb7d06
PA
337 /* It doesn't really matter what the pipe contains, as long we end
338 up with something in it. Might as well flush the previous
339 left-overs. */
340 async_file_flush ();
b84876c2 341
7feb7d06 342 do
b84876c2 343 {
7feb7d06 344 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 345 }
7feb7d06 346 while (ret == -1 && errno == EINTR);
b84876c2 347
7feb7d06
PA
348 /* Ignore EAGAIN. If the pipe is full, the event loop will already
349 be awakened anyway. */
b84876c2
PA
350}
351
7feb7d06 352static void linux_nat_async (void (*callback)
3e43a32a
MS
353 (enum inferior_event_type event_type,
354 void *context),
7feb7d06
PA
355 void *context);
356static int linux_nat_async_mask (int mask);
357static int kill_lwp (int lwpid, int signo);
358
359static int stop_callback (struct lwp_info *lp, void *data);
360
361static void block_child_signals (sigset_t *prev_mask);
362static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
363
364struct lwp_info;
365static struct lwp_info *add_lwp (ptid_t ptid);
366static void purge_lwp_list (int pid);
367static struct lwp_info *find_lwp_pid (ptid_t ptid);
368
ae087d01
DJ
369\f
370/* Trivial list manipulation functions to keep track of a list of
371 new stopped processes. */
372static void
3d799a95 373add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
374{
375 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 376
ae087d01 377 new_pid->pid = pid;
3d799a95 378 new_pid->status = status;
ae087d01
DJ
379 new_pid->next = *listp;
380 *listp = new_pid;
381}
382
383static int
46a96992 384pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
385{
386 struct simple_pid_list **p;
387
388 for (p = listp; *p != NULL; p = &(*p)->next)
389 if ((*p)->pid == pid)
390 {
391 struct simple_pid_list *next = (*p)->next;
e0881a8e 392
46a96992 393 *statusp = (*p)->status;
ae087d01
DJ
394 xfree (*p);
395 *p = next;
396 return 1;
397 }
398 return 0;
399}
400
3d799a95
DJ
401static void
402linux_record_stopped_pid (int pid, int status)
ae087d01 403{
3d799a95 404 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
405}
406
3993f6b1
DJ
407\f
408/* A helper function for linux_test_for_tracefork, called after fork (). */
409
410static void
411linux_tracefork_child (void)
412{
3993f6b1
DJ
413 ptrace (PTRACE_TRACEME, 0, 0, 0);
414 kill (getpid (), SIGSTOP);
415 fork ();
48bb3cce 416 _exit (0);
3993f6b1
DJ
417}
418
7feb7d06 419/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
420
421static int
46a96992 422my_waitpid (int pid, int *statusp, int flags)
b957e937
DJ
423{
424 int ret;
b84876c2 425
b957e937
DJ
426 do
427 {
46a96992 428 ret = waitpid (pid, statusp, flags);
b957e937
DJ
429 }
430 while (ret == -1 && errno == EINTR);
431
432 return ret;
433}
434
435/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
436
437 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
438 we know that the feature is not available. This may change the tracing
439 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
440
441 However, if it succeeds, we don't know for sure that the feature is
442 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 443 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
444 fork tracing, and let it fork. If the process exits, we assume that we
445 can't use TRACEFORK; if we get the fork notification, and we can extract
446 the new child's PID, then we assume that we can. */
3993f6b1
DJ
447
448static void
b957e937 449linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
450{
451 int child_pid, ret, status;
452 long second_pid;
7feb7d06 453 sigset_t prev_mask;
4c28f408 454
7feb7d06
PA
455 /* We don't want those ptrace calls to be interrupted. */
456 block_child_signals (&prev_mask);
3993f6b1 457
b957e937
DJ
458 linux_supports_tracefork_flag = 0;
459 linux_supports_tracevforkdone_flag = 0;
460
461 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
462 if (ret != 0)
7feb7d06
PA
463 {
464 restore_child_signals_mask (&prev_mask);
465 return;
466 }
b957e937 467
3993f6b1
DJ
468 child_pid = fork ();
469 if (child_pid == -1)
e2e0b3e5 470 perror_with_name (("fork"));
3993f6b1
DJ
471
472 if (child_pid == 0)
473 linux_tracefork_child ();
474
b957e937 475 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 476 if (ret == -1)
e2e0b3e5 477 perror_with_name (("waitpid"));
3993f6b1 478 else if (ret != child_pid)
8a3fe4f8 479 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 480 if (! WIFSTOPPED (status))
3e43a32a
MS
481 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
482 status);
3993f6b1 483
3993f6b1
DJ
484 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
485 if (ret != 0)
486 {
b957e937
DJ
487 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
488 if (ret != 0)
489 {
8a3fe4f8 490 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 491 restore_child_signals_mask (&prev_mask);
b957e937
DJ
492 return;
493 }
494
495 ret = my_waitpid (child_pid, &status, 0);
496 if (ret != child_pid)
3e43a32a
MS
497 warning (_("linux_test_for_tracefork: failed "
498 "to wait for killed child"));
b957e937 499 else if (!WIFSIGNALED (status))
3e43a32a
MS
500 warning (_("linux_test_for_tracefork: unexpected "
501 "wait status 0x%x from killed child"), status);
b957e937 502
7feb7d06 503 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
504 return;
505 }
506
9016a515
DJ
507 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
508 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
509 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
510 linux_supports_tracevforkdone_flag = (ret == 0);
511
b957e937
DJ
512 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
513 if (ret != 0)
8a3fe4f8 514 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
515
516 ret = my_waitpid (child_pid, &status, 0);
517
3993f6b1
DJ
518 if (ret == child_pid && WIFSTOPPED (status)
519 && status >> 16 == PTRACE_EVENT_FORK)
520 {
521 second_pid = 0;
522 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
523 if (ret == 0 && second_pid != 0)
524 {
525 int second_status;
526
527 linux_supports_tracefork_flag = 1;
b957e937
DJ
528 my_waitpid (second_pid, &second_status, 0);
529 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
530 if (ret != 0)
3e43a32a
MS
531 warning (_("linux_test_for_tracefork: "
532 "failed to kill second child"));
97725dc4 533 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
534 }
535 }
b957e937 536 else
8a3fe4f8
AC
537 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
538 "(%d, status 0x%x)"), ret, status);
3993f6b1 539
b957e937
DJ
540 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
541 if (ret != 0)
8a3fe4f8 542 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 543 my_waitpid (child_pid, &status, 0);
4c28f408 544
7feb7d06 545 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
546}
547
a96d9b2e
SDJ
548/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
549
550 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
551 we know that the feature is not available. This may change the tracing
552 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
553
554static void
555linux_test_for_tracesysgood (int original_pid)
556{
557 int ret;
558 sigset_t prev_mask;
559
560 /* We don't want those ptrace calls to be interrupted. */
561 block_child_signals (&prev_mask);
562
563 linux_supports_tracesysgood_flag = 0;
564
565 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
566 if (ret != 0)
567 goto out;
568
569 linux_supports_tracesysgood_flag = 1;
570out:
571 restore_child_signals_mask (&prev_mask);
572}
573
574/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
575 This function also sets linux_supports_tracesysgood_flag. */
576
577static int
578linux_supports_tracesysgood (int pid)
579{
580 if (linux_supports_tracesysgood_flag == -1)
581 linux_test_for_tracesysgood (pid);
582 return linux_supports_tracesysgood_flag;
583}
584
3993f6b1
DJ
585/* Return non-zero iff we have tracefork functionality available.
586 This function also sets linux_supports_tracefork_flag. */
587
588static int
b957e937 589linux_supports_tracefork (int pid)
3993f6b1
DJ
590{
591 if (linux_supports_tracefork_flag == -1)
b957e937 592 linux_test_for_tracefork (pid);
3993f6b1
DJ
593 return linux_supports_tracefork_flag;
594}
595
9016a515 596static int
b957e937 597linux_supports_tracevforkdone (int pid)
9016a515
DJ
598{
599 if (linux_supports_tracefork_flag == -1)
b957e937 600 linux_test_for_tracefork (pid);
9016a515
DJ
601 return linux_supports_tracevforkdone_flag;
602}
603
a96d9b2e
SDJ
604static void
605linux_enable_tracesysgood (ptid_t ptid)
606{
607 int pid = ptid_get_lwp (ptid);
608
609 if (pid == 0)
610 pid = ptid_get_pid (ptid);
611
612 if (linux_supports_tracesysgood (pid) == 0)
613 return;
614
615 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
616
617 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
618}
619
3993f6b1 620\f
4de4c07c
DJ
621void
622linux_enable_event_reporting (ptid_t ptid)
623{
d3587048 624 int pid = ptid_get_lwp (ptid);
4de4c07c 625
d3587048
DJ
626 if (pid == 0)
627 pid = ptid_get_pid (ptid);
628
b957e937 629 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
630 return;
631
a96d9b2e
SDJ
632 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
633 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
634
b957e937 635 if (linux_supports_tracevforkdone (pid))
a96d9b2e 636 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
637
638 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
639 read-only process state. */
4de4c07c 640
a96d9b2e 641 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
642}
643
6d8fd2b7
UW
644static void
645linux_child_post_attach (int pid)
4de4c07c
DJ
646{
647 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 648 check_for_thread_db ();
a96d9b2e 649 linux_enable_tracesysgood (pid_to_ptid (pid));
4de4c07c
DJ
650}
651
10d6c8cd 652static void
4de4c07c
DJ
653linux_child_post_startup_inferior (ptid_t ptid)
654{
655 linux_enable_event_reporting (ptid);
0ec9a092 656 check_for_thread_db ();
a96d9b2e 657 linux_enable_tracesysgood (ptid);
4de4c07c
DJ
658}
659
6d8fd2b7
UW
660static int
661linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 662{
7feb7d06 663 sigset_t prev_mask;
9016a515 664 int has_vforked;
4de4c07c
DJ
665 int parent_pid, child_pid;
666
7feb7d06 667 block_child_signals (&prev_mask);
b84876c2 668
e58b0e63
PA
669 has_vforked = (inferior_thread ()->pending_follow.kind
670 == TARGET_WAITKIND_VFORKED);
671 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 672 if (parent_pid == 0)
e58b0e63
PA
673 parent_pid = ptid_get_pid (inferior_ptid);
674 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 675
2277426b
PA
676 if (!detach_fork)
677 linux_enable_event_reporting (pid_to_ptid (child_pid));
678
6c95b8df
PA
679 if (has_vforked
680 && !non_stop /* Non-stop always resumes both branches. */
681 && (!target_is_async_p () || sync_execution)
682 && !(follow_child || detach_fork || sched_multi))
683 {
684 /* The parent stays blocked inside the vfork syscall until the
685 child execs or exits. If we don't let the child run, then
686 the parent stays blocked. If we're telling the parent to run
687 in the foreground, the user will not be able to ctrl-c to get
688 back the terminal, effectively hanging the debug session. */
ac74f770
MS
689 fprintf_filtered (gdb_stderr, _("\
690Can not resume the parent process over vfork in the foreground while\n\
691holding the child stopped. Try \"set detach-on-fork\" or \
692\"set schedule-multiple\".\n"));
693 /* FIXME output string > 80 columns. */
6c95b8df
PA
694 return 1;
695 }
696
4de4c07c
DJ
697 if (! follow_child)
698 {
6c95b8df 699 struct lwp_info *child_lp = NULL;
4de4c07c 700
1777feb0 701 /* We're already attached to the parent, by default. */
4de4c07c 702
ac264b3b
MS
703 /* Detach new forked process? */
704 if (detach_fork)
f75c00e4 705 {
6c95b8df
PA
706 /* Before detaching from the child, remove all breakpoints
707 from it. If we forked, then this has already been taken
708 care of by infrun.c. If we vforked however, any
709 breakpoint inserted in the parent is visible in the
710 child, even those added while stopped in a vfork
711 catchpoint. This will remove the breakpoints from the
712 parent also, but they'll be reinserted below. */
713 if (has_vforked)
714 {
715 /* keep breakpoints list in sync. */
716 remove_breakpoints_pid (GET_PID (inferior_ptid));
717 }
718
e85a822c 719 if (info_verbose || debug_linux_nat)
ac264b3b
MS
720 {
721 target_terminal_ours ();
722 fprintf_filtered (gdb_stdlog,
3e43a32a
MS
723 "Detaching after fork from "
724 "child process %d.\n",
ac264b3b
MS
725 child_pid);
726 }
4de4c07c 727
ac264b3b
MS
728 ptrace (PTRACE_DETACH, child_pid, 0, 0);
729 }
730 else
731 {
77435e4c 732 struct inferior *parent_inf, *child_inf;
2277426b 733 struct cleanup *old_chain;
7f9f62ba
PA
734
735 /* Add process to GDB's tables. */
77435e4c
PA
736 child_inf = add_inferior (child_pid);
737
e58b0e63 738 parent_inf = current_inferior ();
77435e4c 739 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 740 copy_terminal_info (child_inf, parent_inf);
7f9f62ba 741
2277426b 742 old_chain = save_inferior_ptid ();
6c95b8df 743 save_current_program_space ();
2277426b
PA
744
745 inferior_ptid = ptid_build (child_pid, child_pid, 0);
746 add_thread (inferior_ptid);
6c95b8df
PA
747 child_lp = add_lwp (inferior_ptid);
748 child_lp->stopped = 1;
749 child_lp->resumed = 1;
2277426b 750
6c95b8df
PA
751 /* If this is a vfork child, then the address-space is
752 shared with the parent. */
753 if (has_vforked)
754 {
755 child_inf->pspace = parent_inf->pspace;
756 child_inf->aspace = parent_inf->aspace;
757
758 /* The parent will be frozen until the child is done
759 with the shared region. Keep track of the
760 parent. */
761 child_inf->vfork_parent = parent_inf;
762 child_inf->pending_detach = 0;
763 parent_inf->vfork_child = child_inf;
764 parent_inf->pending_detach = 0;
765 }
766 else
767 {
768 child_inf->aspace = new_address_space ();
769 child_inf->pspace = add_program_space (child_inf->aspace);
770 child_inf->removable = 1;
771 set_current_program_space (child_inf->pspace);
772 clone_program_space (child_inf->pspace, parent_inf->pspace);
773
774 /* Let the shared library layer (solib-svr4) learn about
775 this new process, relocate the cloned exec, pull in
776 shared libraries, and install the solib event
777 breakpoint. If a "cloned-VM" event was propagated
778 better throughout the core, this wouldn't be
779 required. */
268a4a75 780 solib_create_inferior_hook (0);
6c95b8df
PA
781 }
782
783 /* Let the thread_db layer learn about this new process. */
2277426b
PA
784 check_for_thread_db ();
785
786 do_cleanups (old_chain);
ac264b3b 787 }
9016a515
DJ
788
789 if (has_vforked)
790 {
6c95b8df
PA
791 struct lwp_info *lp;
792 struct inferior *parent_inf;
793
794 parent_inf = current_inferior ();
795
796 /* If we detached from the child, then we have to be careful
797 to not insert breakpoints in the parent until the child
798 is done with the shared memory region. However, if we're
799 staying attached to the child, then we can and should
800 insert breakpoints, so that we can debug it. A
801 subsequent child exec or exit is enough to know when does
802 the child stops using the parent's address space. */
803 parent_inf->waiting_for_vfork_done = detach_fork;
56710373 804 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
6c95b8df
PA
805
806 lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937
DJ
807 gdb_assert (linux_supports_tracefork_flag >= 0);
808 if (linux_supports_tracevforkdone (0))
9016a515 809 {
6c95b8df
PA
810 if (debug_linux_nat)
811 fprintf_unfiltered (gdb_stdlog,
812 "LCFF: waiting for VFORK_DONE on %d\n",
813 parent_pid);
814
815 lp->stopped = 1;
816 lp->resumed = 1;
9016a515 817
6c95b8df
PA
818 /* We'll handle the VFORK_DONE event like any other
819 event, in target_wait. */
9016a515
DJ
820 }
821 else
822 {
823 /* We can't insert breakpoints until the child has
824 finished with the shared memory region. We need to
825 wait until that happens. Ideal would be to just
826 call:
827 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
828 - waitpid (parent_pid, &status, __WALL);
829 However, most architectures can't handle a syscall
830 being traced on the way out if it wasn't traced on
831 the way in.
832
833 We might also think to loop, continuing the child
834 until it exits or gets a SIGTRAP. One problem is
835 that the child might call ptrace with PTRACE_TRACEME.
836
837 There's no simple and reliable way to figure out when
838 the vforked child will be done with its copy of the
839 shared memory. We could step it out of the syscall,
840 two instructions, let it go, and then single-step the
841 parent once. When we have hardware single-step, this
842 would work; with software single-step it could still
843 be made to work but we'd have to be able to insert
844 single-step breakpoints in the child, and we'd have
845 to insert -just- the single-step breakpoint in the
846 parent. Very awkward.
847
848 In the end, the best we can do is to make sure it
849 runs for a little while. Hopefully it will be out of
850 range of any breakpoints we reinsert. Usually this
851 is only the single-step breakpoint at vfork's return
852 point. */
853
6c95b8df
PA
854 if (debug_linux_nat)
855 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
856 "LCFF: no VFORK_DONE "
857 "support, sleeping a bit\n");
6c95b8df 858
9016a515 859 usleep (10000);
9016a515 860
6c95b8df
PA
861 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
862 and leave it pending. The next linux_nat_resume call
863 will notice a pending event, and bypasses actually
864 resuming the inferior. */
865 lp->status = 0;
866 lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
867 lp->stopped = 0;
868 lp->resumed = 1;
869
870 /* If we're in async mode, need to tell the event loop
871 there's something here to process. */
872 if (target_can_async_p ())
873 async_file_mark ();
874 }
9016a515 875 }
4de4c07c 876 }
3993f6b1 877 else
4de4c07c 878 {
77435e4c 879 struct inferior *parent_inf, *child_inf;
2277426b 880 struct lwp_info *lp;
6c95b8df 881 struct program_space *parent_pspace;
4de4c07c 882
e85a822c 883 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
884 {
885 target_terminal_ours ();
6c95b8df 886 if (has_vforked)
3e43a32a
MS
887 fprintf_filtered (gdb_stdlog,
888 _("Attaching after process %d "
889 "vfork to child process %d.\n"),
6c95b8df
PA
890 parent_pid, child_pid);
891 else
3e43a32a
MS
892 fprintf_filtered (gdb_stdlog,
893 _("Attaching after process %d "
894 "fork to child process %d.\n"),
6c95b8df 895 parent_pid, child_pid);
f75c00e4 896 }
4de4c07c 897
7a7d3353
PA
898 /* Add the new inferior first, so that the target_detach below
899 doesn't unpush the target. */
900
77435e4c
PA
901 child_inf = add_inferior (child_pid);
902
e58b0e63 903 parent_inf = current_inferior ();
77435e4c 904 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 905 copy_terminal_info (child_inf, parent_inf);
7a7d3353 906
6c95b8df 907 parent_pspace = parent_inf->pspace;
9016a515 908
6c95b8df
PA
909 /* If we're vforking, we want to hold on to the parent until the
910 child exits or execs. At child exec or exit time we can
911 remove the old breakpoints from the parent and detach or
912 resume debugging it. Otherwise, detach the parent now; we'll
913 want to reuse it's program/address spaces, but we can't set
914 them to the child before removing breakpoints from the
915 parent, otherwise, the breakpoints module could decide to
916 remove breakpoints from the wrong process (since they'd be
917 assigned to the same address space). */
9016a515
DJ
918
919 if (has_vforked)
7f9f62ba 920 {
6c95b8df
PA
921 gdb_assert (child_inf->vfork_parent == NULL);
922 gdb_assert (parent_inf->vfork_child == NULL);
923 child_inf->vfork_parent = parent_inf;
924 child_inf->pending_detach = 0;
925 parent_inf->vfork_child = child_inf;
926 parent_inf->pending_detach = detach_fork;
927 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 928 }
2277426b 929 else if (detach_fork)
b84876c2 930 target_detach (NULL, 0);
4de4c07c 931
6c95b8df
PA
932 /* Note that the detach above makes PARENT_INF dangling. */
933
934 /* Add the child thread to the appropriate lists, and switch to
935 this new thread, before cloning the program space, and
936 informing the solib layer about this new process. */
937
9f0bdab8 938 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b
PA
939 add_thread (inferior_ptid);
940 lp = add_lwp (inferior_ptid);
941 lp->stopped = 1;
6c95b8df
PA
942 lp->resumed = 1;
943
944 /* If this is a vfork child, then the address-space is shared
945 with the parent. If we detached from the parent, then we can
946 reuse the parent's program/address spaces. */
947 if (has_vforked || detach_fork)
948 {
949 child_inf->pspace = parent_pspace;
950 child_inf->aspace = child_inf->pspace->aspace;
951 }
952 else
953 {
954 child_inf->aspace = new_address_space ();
955 child_inf->pspace = add_program_space (child_inf->aspace);
956 child_inf->removable = 1;
957 set_current_program_space (child_inf->pspace);
958 clone_program_space (child_inf->pspace, parent_pspace);
959
960 /* Let the shared library layer (solib-svr4) learn about
961 this new process, relocate the cloned exec, pull in
962 shared libraries, and install the solib event breakpoint.
963 If a "cloned-VM" event was propagated better throughout
964 the core, this wouldn't be required. */
268a4a75 965 solib_create_inferior_hook (0);
6c95b8df 966 }
ac264b3b 967
6c95b8df 968 /* Let the thread_db layer learn about this new process. */
ef29ce1a 969 check_for_thread_db ();
4de4c07c
DJ
970 }
971
7feb7d06 972 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
973 return 0;
974}
975
4de4c07c 976\f
77b06cd7 977static int
6d8fd2b7 978linux_child_insert_fork_catchpoint (int pid)
4de4c07c 979{
77b06cd7 980 return !linux_supports_tracefork (pid);
3993f6b1
DJ
981}
982
77b06cd7 983static int
6d8fd2b7 984linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 985{
77b06cd7 986 return !linux_supports_tracefork (pid);
3993f6b1
DJ
987}
988
77b06cd7 989static int
6d8fd2b7 990linux_child_insert_exec_catchpoint (int pid)
3993f6b1 991{
77b06cd7 992 return !linux_supports_tracefork (pid);
3993f6b1
DJ
993}
994
a96d9b2e
SDJ
995static int
996linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
997 int table_size, int *table)
998{
77b06cd7
TJB
999 if (!linux_supports_tracesysgood (pid))
1000 return 1;
1001
a96d9b2e
SDJ
1002 /* On GNU/Linux, we ignore the arguments. It means that we only
1003 enable the syscall catchpoints, but do not disable them.
77b06cd7 1004
a96d9b2e
SDJ
1005 Also, we do not use the `table' information because we do not
1006 filter system calls here. We let GDB do the logic for us. */
1007 return 0;
1008}
1009
d6b0e80f
AC
1010/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1011 are processes sharing the same VM space. A multi-threaded process
1012 is basically a group of such processes. However, such a grouping
1013 is almost entirely a user-space issue; the kernel doesn't enforce
1014 such a grouping at all (this might change in the future). In
1015 general, we'll rely on the threads library (i.e. the GNU/Linux
1016 Threads library) to provide such a grouping.
1017
1018 It is perfectly well possible to write a multi-threaded application
1019 without the assistance of a threads library, by using the clone
1020 system call directly. This module should be able to give some
1021 rudimentary support for debugging such applications if developers
1022 specify the CLONE_PTRACE flag in the clone system call, and are
1023 using the Linux kernel 2.4 or above.
1024
1025 Note that there are some peculiarities in GNU/Linux that affect
1026 this code:
1027
1028 - In general one should specify the __WCLONE flag to waitpid in
1029 order to make it report events for any of the cloned processes
1030 (and leave it out for the initial process). However, if a cloned
1031 process has exited the exit status is only reported if the
1032 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1033 we cannot use it since GDB must work on older systems too.
1034
1035 - When a traced, cloned process exits and is waited for by the
1036 debugger, the kernel reassigns it to the original parent and
1037 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1038 library doesn't notice this, which leads to the "zombie problem":
1039 When debugged a multi-threaded process that spawns a lot of
1040 threads will run out of processes, even if the threads exit,
1041 because the "zombies" stay around. */
1042
1043/* List of known LWPs. */
9f0bdab8 1044struct lwp_info *lwp_list;
d6b0e80f
AC
1045\f
1046
d6b0e80f
AC
1047/* Original signal mask. */
1048static sigset_t normal_mask;
1049
1050/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1051 _initialize_linux_nat. */
1052static sigset_t suspend_mask;
1053
7feb7d06
PA
1054/* Signals to block to make that sigsuspend work. */
1055static sigset_t blocked_mask;
1056
1057/* SIGCHLD action. */
1058struct sigaction sigchld_action;
b84876c2 1059
7feb7d06
PA
1060/* Block child signals (SIGCHLD and linux threads signals), and store
1061 the previous mask in PREV_MASK. */
84e46146 1062
7feb7d06
PA
1063static void
1064block_child_signals (sigset_t *prev_mask)
1065{
1066 /* Make sure SIGCHLD is blocked. */
1067 if (!sigismember (&blocked_mask, SIGCHLD))
1068 sigaddset (&blocked_mask, SIGCHLD);
1069
1070 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1071}
1072
1073/* Restore child signals mask, previously returned by
1074 block_child_signals. */
1075
1076static void
1077restore_child_signals_mask (sigset_t *prev_mask)
1078{
1079 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1080}
d6b0e80f
AC
1081\f
1082
1083/* Prototypes for local functions. */
1084static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1085static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1086static char *linux_child_pid_to_exec_file (int pid);
710151dd 1087
d6b0e80f
AC
1088\f
1089/* Convert wait status STATUS to a string. Used for printing debug
1090 messages only. */
1091
1092static char *
1093status_to_str (int status)
1094{
1095 static char buf[64];
1096
1097 if (WIFSTOPPED (status))
206aa767 1098 {
ca2163eb 1099 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1100 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1101 strsignal (SIGTRAP));
1102 else
1103 snprintf (buf, sizeof (buf), "%s (stopped)",
1104 strsignal (WSTOPSIG (status)));
1105 }
d6b0e80f
AC
1106 else if (WIFSIGNALED (status))
1107 snprintf (buf, sizeof (buf), "%s (terminated)",
ba9b2ec3 1108 strsignal (WTERMSIG (status)));
d6b0e80f
AC
1109 else
1110 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1111
1112 return buf;
1113}
1114
d90e17a7
PA
1115/* Remove all LWPs belong to PID from the lwp list. */
1116
1117static void
1118purge_lwp_list (int pid)
1119{
1120 struct lwp_info *lp, *lpprev, *lpnext;
1121
1122 lpprev = NULL;
1123
1124 for (lp = lwp_list; lp; lp = lpnext)
1125 {
1126 lpnext = lp->next;
1127
1128 if (ptid_get_pid (lp->ptid) == pid)
1129 {
1130 if (lp == lwp_list)
1131 lwp_list = lp->next;
1132 else
1133 lpprev->next = lp->next;
1134
1135 xfree (lp);
1136 }
1137 else
1138 lpprev = lp;
1139 }
1140}
1141
1142/* Return the number of known LWPs in the tgid given by PID. */
1143
1144static int
1145num_lwps (int pid)
1146{
1147 int count = 0;
1148 struct lwp_info *lp;
1149
1150 for (lp = lwp_list; lp; lp = lp->next)
1151 if (ptid_get_pid (lp->ptid) == pid)
1152 count++;
1153
1154 return count;
d6b0e80f
AC
1155}
1156
f973ed9c 1157/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1158 structure describing the new LWP. The LWP should already be stopped
1159 (with an exception for the very first LWP). */
d6b0e80f
AC
1160
1161static struct lwp_info *
1162add_lwp (ptid_t ptid)
1163{
1164 struct lwp_info *lp;
1165
1166 gdb_assert (is_lwp (ptid));
1167
1168 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1169
1170 memset (lp, 0, sizeof (struct lwp_info));
1171
1172 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1173
1174 lp->ptid = ptid;
dc146f7c 1175 lp->core = -1;
d6b0e80f
AC
1176
1177 lp->next = lwp_list;
1178 lwp_list = lp;
d6b0e80f 1179
d90e17a7 1180 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
9f0bdab8
DJ
1181 linux_nat_new_thread (ptid);
1182
d6b0e80f
AC
1183 return lp;
1184}
1185
1186/* Remove the LWP specified by PID from the list. */
1187
1188static void
1189delete_lwp (ptid_t ptid)
1190{
1191 struct lwp_info *lp, *lpprev;
1192
1193 lpprev = NULL;
1194
1195 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1196 if (ptid_equal (lp->ptid, ptid))
1197 break;
1198
1199 if (!lp)
1200 return;
1201
d6b0e80f
AC
1202 if (lpprev)
1203 lpprev->next = lp->next;
1204 else
1205 lwp_list = lp->next;
1206
1207 xfree (lp);
1208}
1209
1210/* Return a pointer to the structure describing the LWP corresponding
1211 to PID. If no corresponding LWP could be found, return NULL. */
1212
1213static struct lwp_info *
1214find_lwp_pid (ptid_t ptid)
1215{
1216 struct lwp_info *lp;
1217 int lwp;
1218
1219 if (is_lwp (ptid))
1220 lwp = GET_LWP (ptid);
1221 else
1222 lwp = GET_PID (ptid);
1223
1224 for (lp = lwp_list; lp; lp = lp->next)
1225 if (lwp == GET_LWP (lp->ptid))
1226 return lp;
1227
1228 return NULL;
1229}
1230
1231/* Call CALLBACK with its second argument set to DATA for every LWP in
1232 the list. If CALLBACK returns 1 for a particular LWP, return a
1233 pointer to the structure describing that LWP immediately.
1234 Otherwise return NULL. */
1235
1236struct lwp_info *
d90e17a7
PA
1237iterate_over_lwps (ptid_t filter,
1238 int (*callback) (struct lwp_info *, void *),
1239 void *data)
d6b0e80f
AC
1240{
1241 struct lwp_info *lp, *lpnext;
1242
1243 for (lp = lwp_list; lp; lp = lpnext)
1244 {
1245 lpnext = lp->next;
d90e17a7
PA
1246
1247 if (ptid_match (lp->ptid, filter))
1248 {
1249 if ((*callback) (lp, data))
1250 return lp;
1251 }
d6b0e80f
AC
1252 }
1253
1254 return NULL;
1255}
1256
2277426b
PA
1257/* Update our internal state when changing from one checkpoint to
1258 another indicated by NEW_PTID. We can only switch single-threaded
1259 applications, so we only create one new LWP, and the previous list
1260 is discarded. */
f973ed9c
DJ
1261
1262void
1263linux_nat_switch_fork (ptid_t new_ptid)
1264{
1265 struct lwp_info *lp;
1266
2277426b
PA
1267 purge_lwp_list (GET_PID (inferior_ptid));
1268
f973ed9c
DJ
1269 lp = add_lwp (new_ptid);
1270 lp->stopped = 1;
e26af52f 1271
2277426b
PA
1272 /* This changes the thread's ptid while preserving the gdb thread
1273 num. Also changes the inferior pid, while preserving the
1274 inferior num. */
1275 thread_change_ptid (inferior_ptid, new_ptid);
1276
1277 /* We've just told GDB core that the thread changed target id, but,
1278 in fact, it really is a different thread, with different register
1279 contents. */
1280 registers_changed ();
e26af52f
DJ
1281}
1282
e26af52f
DJ
1283/* Handle the exit of a single thread LP. */
1284
1285static void
1286exit_lwp (struct lwp_info *lp)
1287{
e09875d4 1288 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1289
1290 if (th)
e26af52f 1291 {
17faa917
DJ
1292 if (print_thread_events)
1293 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1294
4f8d22e3 1295 delete_thread (lp->ptid);
e26af52f
DJ
1296 }
1297
1298 delete_lwp (lp->ptid);
1299}
1300
4d062f1a
PA
1301/* Return an lwp's tgid, found in `/proc/PID/status'. */
1302
1303int
1304linux_proc_get_tgid (int lwpid)
1305{
1306 FILE *status_file;
1307 char buf[100];
1308 int tgid = -1;
1309
1310 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) lwpid);
1311 status_file = fopen (buf, "r");
1312 if (status_file != NULL)
1313 {
1314 while (fgets (buf, sizeof (buf), status_file))
1315 {
1316 if (strncmp (buf, "Tgid:", 5) == 0)
1317 {
1318 tgid = strtoul (buf + strlen ("Tgid:"), NULL, 10);
1319 break;
1320 }
1321 }
1322
1323 fclose (status_file);
1324 }
1325
1326 return tgid;
1327}
1328
a0ef4274
DJ
1329/* Detect `T (stopped)' in `/proc/PID/status'.
1330 Other states including `T (tracing stop)' are reported as false. */
1331
1332static int
1333pid_is_stopped (pid_t pid)
1334{
1335 FILE *status_file;
1336 char buf[100];
1337 int retval = 0;
1338
1339 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1340 status_file = fopen (buf, "r");
1341 if (status_file != NULL)
1342 {
1343 int have_state = 0;
1344
1345 while (fgets (buf, sizeof (buf), status_file))
1346 {
1347 if (strncmp (buf, "State:", 6) == 0)
1348 {
1349 have_state = 1;
1350 break;
1351 }
1352 }
1353 if (have_state && strstr (buf, "T (stopped)") != NULL)
1354 retval = 1;
1355 fclose (status_file);
1356 }
1357 return retval;
1358}
1359
1360/* Wait for the LWP specified by LP, which we have just attached to.
1361 Returns a wait status for that LWP, to cache. */
1362
1363static int
1364linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1365 int *signalled)
1366{
1367 pid_t new_pid, pid = GET_LWP (ptid);
1368 int status;
1369
1370 if (pid_is_stopped (pid))
1371 {
1372 if (debug_linux_nat)
1373 fprintf_unfiltered (gdb_stdlog,
1374 "LNPAW: Attaching to a stopped process\n");
1375
1376 /* The process is definitely stopped. It is in a job control
1377 stop, unless the kernel predates the TASK_STOPPED /
1378 TASK_TRACED distinction, in which case it might be in a
1379 ptrace stop. Make sure it is in a ptrace stop; from there we
1380 can kill it, signal it, et cetera.
1381
1382 First make sure there is a pending SIGSTOP. Since we are
1383 already attached, the process can not transition from stopped
1384 to running without a PTRACE_CONT; so we know this signal will
1385 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1386 probably already in the queue (unless this kernel is old
1387 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1388 is not an RT signal, it can only be queued once. */
1389 kill_lwp (pid, SIGSTOP);
1390
1391 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1392 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1393 ptrace (PTRACE_CONT, pid, 0, 0);
1394 }
1395
1396 /* Make sure the initial process is stopped. The user-level threads
1397 layer might want to poke around in the inferior, and that won't
1398 work if things haven't stabilized yet. */
1399 new_pid = my_waitpid (pid, &status, 0);
1400 if (new_pid == -1 && errno == ECHILD)
1401 {
1402 if (first)
1403 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1404
1405 /* Try again with __WCLONE to check cloned processes. */
1406 new_pid = my_waitpid (pid, &status, __WCLONE);
1407 *cloned = 1;
1408 }
1409
dacc9cb2
PP
1410 gdb_assert (pid == new_pid);
1411
1412 if (!WIFSTOPPED (status))
1413 {
1414 /* The pid we tried to attach has apparently just exited. */
1415 if (debug_linux_nat)
1416 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1417 pid, status_to_str (status));
1418 return status;
1419 }
a0ef4274
DJ
1420
1421 if (WSTOPSIG (status) != SIGSTOP)
1422 {
1423 *signalled = 1;
1424 if (debug_linux_nat)
1425 fprintf_unfiltered (gdb_stdlog,
1426 "LNPAW: Received %s after attaching\n",
1427 status_to_str (status));
1428 }
1429
1430 return status;
1431}
1432
1433/* Attach to the LWP specified by PID. Return 0 if successful or -1
1434 if the new LWP could not be attached. */
d6b0e80f 1435
9ee57c33 1436int
93815fbf 1437lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1438{
9ee57c33 1439 struct lwp_info *lp;
7feb7d06 1440 sigset_t prev_mask;
d6b0e80f
AC
1441
1442 gdb_assert (is_lwp (ptid));
1443
7feb7d06 1444 block_child_signals (&prev_mask);
d6b0e80f 1445
9ee57c33 1446 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1447
1448 /* We assume that we're already attached to any LWP that has an id
1449 equal to the overall process id, and to any LWP that is already
1450 in our list of LWPs. If we're not seeing exit events from threads
1451 and we've had PID wraparound since we last tried to stop all threads,
1452 this assumption might be wrong; fortunately, this is very unlikely
1453 to happen. */
9ee57c33 1454 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1455 {
a0ef4274 1456 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1457
1458 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1459 {
1460 /* If we fail to attach to the thread, issue a warning,
1461 but continue. One way this can happen is if thread
e9efe249 1462 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1463 bug may place threads in the thread list and then fail
1464 to create them. */
1465 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1466 safe_strerror (errno));
7feb7d06 1467 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1468 return -1;
1469 }
1470
d6b0e80f
AC
1471 if (debug_linux_nat)
1472 fprintf_unfiltered (gdb_stdlog,
1473 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1474 target_pid_to_str (ptid));
1475
a0ef4274 1476 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2
PP
1477 if (!WIFSTOPPED (status))
1478 return -1;
1479
a0ef4274
DJ
1480 lp = add_lwp (ptid);
1481 lp->stopped = 1;
1482 lp->cloned = cloned;
1483 lp->signalled = signalled;
1484 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1485 {
a0ef4274
DJ
1486 lp->resumed = 1;
1487 lp->status = status;
d6b0e80f
AC
1488 }
1489
a0ef4274 1490 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1491
1492 if (debug_linux_nat)
1493 {
1494 fprintf_unfiltered (gdb_stdlog,
1495 "LLAL: waitpid %s received %s\n",
1496 target_pid_to_str (ptid),
1497 status_to_str (status));
1498 }
1499 }
1500 else
1501 {
1502 /* We assume that the LWP representing the original process is
1503 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1504 that the GNU/linux ptrace layer uses to keep track of
1505 threads. Note that this won't have already been done since
1506 the main thread will have, we assume, been stopped by an
1507 attach from a different layer. */
9ee57c33
DJ
1508 if (lp == NULL)
1509 lp = add_lwp (ptid);
d6b0e80f
AC
1510 lp->stopped = 1;
1511 }
9ee57c33 1512
7feb7d06 1513 restore_child_signals_mask (&prev_mask);
9ee57c33 1514 return 0;
d6b0e80f
AC
1515}
1516
b84876c2 1517static void
136d6dae
VP
1518linux_nat_create_inferior (struct target_ops *ops,
1519 char *exec_file, char *allargs, char **env,
b84876c2
PA
1520 int from_tty)
1521{
10568435
JK
1522#ifdef HAVE_PERSONALITY
1523 int personality_orig = 0, personality_set = 0;
1524#endif /* HAVE_PERSONALITY */
b84876c2
PA
1525
1526 /* The fork_child mechanism is synchronous and calls target_wait, so
1527 we have to mask the async mode. */
1528
10568435
JK
1529#ifdef HAVE_PERSONALITY
1530 if (disable_randomization)
1531 {
1532 errno = 0;
1533 personality_orig = personality (0xffffffff);
1534 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1535 {
1536 personality_set = 1;
1537 personality (personality_orig | ADDR_NO_RANDOMIZE);
1538 }
1539 if (errno != 0 || (personality_set
1540 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1541 warning (_("Error disabling address space randomization: %s"),
1542 safe_strerror (errno));
1543 }
1544#endif /* HAVE_PERSONALITY */
1545
136d6dae 1546 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1547
10568435
JK
1548#ifdef HAVE_PERSONALITY
1549 if (personality_set)
1550 {
1551 errno = 0;
1552 personality (personality_orig);
1553 if (errno != 0)
1554 warning (_("Error restoring address space randomization: %s"),
1555 safe_strerror (errno));
1556 }
1557#endif /* HAVE_PERSONALITY */
b84876c2
PA
1558}
1559
d6b0e80f 1560static void
136d6dae 1561linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1562{
1563 struct lwp_info *lp;
d6b0e80f 1564 int status;
af990527 1565 ptid_t ptid;
d6b0e80f 1566
136d6dae 1567 linux_ops->to_attach (ops, args, from_tty);
d6b0e80f 1568
af990527
PA
1569 /* The ptrace base target adds the main thread with (pid,0,0)
1570 format. Decorate it with lwp info. */
1571 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1572 thread_change_ptid (inferior_ptid, ptid);
1573
9f0bdab8 1574 /* Add the initial process as the first LWP to the list. */
af990527 1575 lp = add_lwp (ptid);
a0ef4274
DJ
1576
1577 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1578 &lp->signalled);
dacc9cb2
PP
1579 if (!WIFSTOPPED (status))
1580 {
1581 if (WIFEXITED (status))
1582 {
1583 int exit_code = WEXITSTATUS (status);
1584
1585 target_terminal_ours ();
1586 target_mourn_inferior ();
1587 if (exit_code == 0)
1588 error (_("Unable to attach: program exited normally."));
1589 else
1590 error (_("Unable to attach: program exited with code %d."),
1591 exit_code);
1592 }
1593 else if (WIFSIGNALED (status))
1594 {
1595 enum target_signal signo;
1596
1597 target_terminal_ours ();
1598 target_mourn_inferior ();
1599
1600 signo = target_signal_from_host (WTERMSIG (status));
1601 error (_("Unable to attach: program terminated with signal "
1602 "%s, %s."),
1603 target_signal_to_name (signo),
1604 target_signal_to_string (signo));
1605 }
1606
1607 internal_error (__FILE__, __LINE__,
1608 _("unexpected status %d for PID %ld"),
1609 status, (long) GET_LWP (ptid));
1610 }
1611
a0ef4274 1612 lp->stopped = 1;
9f0bdab8 1613
a0ef4274 1614 /* Save the wait status to report later. */
d6b0e80f 1615 lp->resumed = 1;
a0ef4274
DJ
1616 if (debug_linux_nat)
1617 fprintf_unfiltered (gdb_stdlog,
1618 "LNA: waitpid %ld, saving status %s\n",
1619 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1620
7feb7d06
PA
1621 lp->status = status;
1622
1623 if (target_can_async_p ())
1624 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1625}
1626
a0ef4274
DJ
1627/* Get pending status of LP. */
1628static int
1629get_pending_status (struct lwp_info *lp, int *status)
1630{
ca2163eb
PA
1631 enum target_signal signo = TARGET_SIGNAL_0;
1632
1633 /* If we paused threads momentarily, we may have stored pending
1634 events in lp->status or lp->waitstatus (see stop_wait_callback),
1635 and GDB core hasn't seen any signal for those threads.
1636 Otherwise, the last signal reported to the core is found in the
1637 thread object's stop_signal.
1638
1639 There's a corner case that isn't handled here at present. Only
1640 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1641 stop_signal make sense as a real signal to pass to the inferior.
1642 Some catchpoint related events, like
1643 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1644 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1645 those traps are debug API (ptrace in our case) related and
1646 induced; the inferior wouldn't see them if it wasn't being
1647 traced. Hence, we should never pass them to the inferior, even
1648 when set to pass state. Since this corner case isn't handled by
1649 infrun.c when proceeding with a signal, for consistency, neither
1650 do we handle it here (or elsewhere in the file we check for
1651 signal pass state). Normally SIGTRAP isn't set to pass state, so
1652 this is really a corner case. */
1653
1654 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1655 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1656 else if (lp->status)
1657 signo = target_signal_from_host (WSTOPSIG (lp->status));
1658 else if (non_stop && !is_executing (lp->ptid))
1659 {
1660 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1661
16c381f0 1662 signo = tp->suspend.stop_signal;
ca2163eb
PA
1663 }
1664 else if (!non_stop)
a0ef4274 1665 {
ca2163eb
PA
1666 struct target_waitstatus last;
1667 ptid_t last_ptid;
4c28f408 1668
ca2163eb 1669 get_last_target_status (&last_ptid, &last);
4c28f408 1670
ca2163eb
PA
1671 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1672 {
e09875d4 1673 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1674
16c381f0 1675 signo = tp->suspend.stop_signal;
4c28f408 1676 }
ca2163eb 1677 }
4c28f408 1678
ca2163eb 1679 *status = 0;
4c28f408 1680
ca2163eb
PA
1681 if (signo == TARGET_SIGNAL_0)
1682 {
1683 if (debug_linux_nat)
1684 fprintf_unfiltered (gdb_stdlog,
1685 "GPT: lwp %s has no pending signal\n",
1686 target_pid_to_str (lp->ptid));
1687 }
1688 else if (!signal_pass_state (signo))
1689 {
1690 if (debug_linux_nat)
3e43a32a
MS
1691 fprintf_unfiltered (gdb_stdlog,
1692 "GPT: lwp %s had signal %s, "
1693 "but it is in no pass state\n",
ca2163eb
PA
1694 target_pid_to_str (lp->ptid),
1695 target_signal_to_string (signo));
a0ef4274 1696 }
a0ef4274 1697 else
4c28f408 1698 {
ca2163eb
PA
1699 *status = W_STOPCODE (target_signal_to_host (signo));
1700
1701 if (debug_linux_nat)
1702 fprintf_unfiltered (gdb_stdlog,
1703 "GPT: lwp %s has pending signal %s\n",
1704 target_pid_to_str (lp->ptid),
1705 target_signal_to_string (signo));
4c28f408 1706 }
a0ef4274
DJ
1707
1708 return 0;
1709}
1710
d6b0e80f
AC
1711static int
1712detach_callback (struct lwp_info *lp, void *data)
1713{
1714 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1715
1716 if (debug_linux_nat && lp->status)
1717 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1718 strsignal (WSTOPSIG (lp->status)),
1719 target_pid_to_str (lp->ptid));
1720
a0ef4274
DJ
1721 /* If there is a pending SIGSTOP, get rid of it. */
1722 if (lp->signalled)
d6b0e80f 1723 {
d6b0e80f
AC
1724 if (debug_linux_nat)
1725 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1726 "DC: Sending SIGCONT to %s\n",
1727 target_pid_to_str (lp->ptid));
d6b0e80f 1728
a0ef4274 1729 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1730 lp->signalled = 0;
d6b0e80f
AC
1731 }
1732
1733 /* We don't actually detach from the LWP that has an id equal to the
1734 overall process id just yet. */
1735 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1736 {
a0ef4274
DJ
1737 int status = 0;
1738
1739 /* Pass on any pending signal for this LWP. */
1740 get_pending_status (lp, &status);
1741
d6b0e80f
AC
1742 errno = 0;
1743 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1744 WSTOPSIG (status)) < 0)
8a3fe4f8 1745 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1746 safe_strerror (errno));
1747
1748 if (debug_linux_nat)
1749 fprintf_unfiltered (gdb_stdlog,
1750 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1751 target_pid_to_str (lp->ptid),
7feb7d06 1752 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1753
1754 delete_lwp (lp->ptid);
1755 }
1756
1757 return 0;
1758}
1759
1760static void
136d6dae 1761linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1762{
b84876c2 1763 int pid;
a0ef4274 1764 int status;
d90e17a7
PA
1765 struct lwp_info *main_lwp;
1766
1767 pid = GET_PID (inferior_ptid);
a0ef4274 1768
b84876c2
PA
1769 if (target_can_async_p ())
1770 linux_nat_async (NULL, 0);
1771
4c28f408
PA
1772 /* Stop all threads before detaching. ptrace requires that the
1773 thread is stopped to sucessfully detach. */
d90e17a7 1774 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1775 /* ... and wait until all of them have reported back that
1776 they're no longer running. */
d90e17a7 1777 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1778
d90e17a7 1779 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1780
1781 /* Only the initial process should be left right now. */
d90e17a7
PA
1782 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1783
1784 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1785
a0ef4274
DJ
1786 /* Pass on any pending signal for the last LWP. */
1787 if ((args == NULL || *args == '\0')
d90e17a7 1788 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1789 && WIFSTOPPED (status))
1790 {
1791 /* Put the signal number in ARGS so that inf_ptrace_detach will
1792 pass it along with PTRACE_DETACH. */
1793 args = alloca (8);
1794 sprintf (args, "%d", (int) WSTOPSIG (status));
ddabfc73
TT
1795 if (debug_linux_nat)
1796 fprintf_unfiltered (gdb_stdlog,
1797 "LND: Sending signal %s to %s\n",
1798 args,
1799 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1800 }
1801
d90e17a7 1802 delete_lwp (main_lwp->ptid);
b84876c2 1803
7a7d3353
PA
1804 if (forks_exist_p ())
1805 {
1806 /* Multi-fork case. The current inferior_ptid is being detached
1807 from, but there are other viable forks to debug. Detach from
1808 the current fork, and context-switch to the first
1809 available. */
1810 linux_fork_detach (args, from_tty);
1811
1812 if (non_stop && target_can_async_p ())
1813 target_async (inferior_event_handler, 0);
1814 }
1815 else
1816 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1817}
1818
1819/* Resume LP. */
1820
1821static int
1822resume_callback (struct lwp_info *lp, void *data)
1823{
6c95b8df
PA
1824 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1825
1826 if (lp->stopped && inf->vfork_child != NULL)
1827 {
1828 if (debug_linux_nat)
1829 fprintf_unfiltered (gdb_stdlog,
1830 "RC: Not resuming %s (vfork parent)\n",
1831 target_pid_to_str (lp->ptid));
1832 }
1833 else if (lp->stopped && lp->status == 0)
d6b0e80f 1834 {
d90e17a7
PA
1835 if (debug_linux_nat)
1836 fprintf_unfiltered (gdb_stdlog,
a289b8f6 1837 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
d90e17a7
PA
1838 target_pid_to_str (lp->ptid));
1839
28439f5e
PA
1840 linux_ops->to_resume (linux_ops,
1841 pid_to_ptid (GET_LWP (lp->ptid)),
a289b8f6 1842 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1843 if (debug_linux_nat)
1844 fprintf_unfiltered (gdb_stdlog,
a289b8f6 1845 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
d6b0e80f
AC
1846 target_pid_to_str (lp->ptid));
1847 lp->stopped = 0;
a289b8f6 1848 lp->step = 0;
9f0bdab8 1849 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 1850 lp->stopped_by_watchpoint = 0;
d6b0e80f 1851 }
57380f4e 1852 else if (lp->stopped && debug_linux_nat)
3e43a32a
MS
1853 fprintf_unfiltered (gdb_stdlog,
1854 "RC: Not resuming sibling %s (has pending)\n",
57380f4e
DJ
1855 target_pid_to_str (lp->ptid));
1856 else if (debug_linux_nat)
3e43a32a
MS
1857 fprintf_unfiltered (gdb_stdlog,
1858 "RC: Not resuming sibling %s (not stopped)\n",
57380f4e 1859 target_pid_to_str (lp->ptid));
d6b0e80f
AC
1860
1861 return 0;
1862}
1863
1864static int
1865resume_clear_callback (struct lwp_info *lp, void *data)
1866{
1867 lp->resumed = 0;
1868 return 0;
1869}
1870
1871static int
1872resume_set_callback (struct lwp_info *lp, void *data)
1873{
1874 lp->resumed = 1;
1875 return 0;
1876}
1877
1878static void
28439f5e
PA
1879linux_nat_resume (struct target_ops *ops,
1880 ptid_t ptid, int step, enum target_signal signo)
d6b0e80f 1881{
7feb7d06 1882 sigset_t prev_mask;
d6b0e80f 1883 struct lwp_info *lp;
d90e17a7 1884 int resume_many;
d6b0e80f 1885
76f50ad1
DJ
1886 if (debug_linux_nat)
1887 fprintf_unfiltered (gdb_stdlog,
1888 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1889 step ? "step" : "resume",
1890 target_pid_to_str (ptid),
423ec54c
JK
1891 (signo != TARGET_SIGNAL_0
1892 ? strsignal (target_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1893 target_pid_to_str (inferior_ptid));
1894
7feb7d06 1895 block_child_signals (&prev_mask);
b84876c2 1896
d6b0e80f 1897 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1898 resume_many = (ptid_equal (minus_one_ptid, ptid)
1899 || ptid_is_pid (ptid));
4c28f408 1900
e3e9f5a2
PA
1901 /* Mark the lwps we're resuming as resumed. */
1902 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1903
d90e17a7
PA
1904 /* See if it's the current inferior that should be handled
1905 specially. */
1906 if (resume_many)
1907 lp = find_lwp_pid (inferior_ptid);
1908 else
1909 lp = find_lwp_pid (ptid);
9f0bdab8 1910 gdb_assert (lp != NULL);
d6b0e80f 1911
9f0bdab8
DJ
1912 /* Remember if we're stepping. */
1913 lp->step = step;
d6b0e80f 1914
9f0bdab8
DJ
1915 /* If we have a pending wait status for this thread, there is no
1916 point in resuming the process. But first make sure that
1917 linux_nat_wait won't preemptively handle the event - we
1918 should never take this short-circuit if we are going to
1919 leave LP running, since we have skipped resuming all the
1920 other threads. This bit of code needs to be synchronized
1921 with linux_nat_wait. */
76f50ad1 1922
9f0bdab8
DJ
1923 if (lp->status && WIFSTOPPED (lp->status))
1924 {
423ec54c 1925 enum target_signal saved_signo;
d6b48e9c 1926 struct inferior *inf;
76f50ad1 1927
d90e17a7 1928 inf = find_inferior_pid (ptid_get_pid (lp->ptid));
d6b48e9c
PA
1929 gdb_assert (inf);
1930 saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1931
1932 /* Defer to common code if we're gaining control of the
1933 inferior. */
16c381f0 1934 if (inf->control.stop_soon == NO_STOP_QUIETLY
d6b48e9c 1935 && signal_stop_state (saved_signo) == 0
9f0bdab8
DJ
1936 && signal_print_state (saved_signo) == 0
1937 && signal_pass_state (saved_signo) == 1)
d6b0e80f 1938 {
9f0bdab8
DJ
1939 if (debug_linux_nat)
1940 fprintf_unfiltered (gdb_stdlog,
1941 "LLR: Not short circuiting for ignored "
1942 "status 0x%x\n", lp->status);
1943
d6b0e80f
AC
1944 /* FIXME: What should we do if we are supposed to continue
1945 this thread with a signal? */
1946 gdb_assert (signo == TARGET_SIGNAL_0);
9f0bdab8
DJ
1947 signo = saved_signo;
1948 lp->status = 0;
1949 }
1950 }
76f50ad1 1951
6c95b8df 1952 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
1953 {
1954 /* FIXME: What should we do if we are supposed to continue
1955 this thread with a signal? */
1956 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1957
9f0bdab8
DJ
1958 if (debug_linux_nat)
1959 fprintf_unfiltered (gdb_stdlog,
1960 "LLR: Short circuiting for status 0x%x\n",
1961 lp->status);
d6b0e80f 1962
7feb7d06
PA
1963 restore_child_signals_mask (&prev_mask);
1964 if (target_can_async_p ())
1965 {
1966 target_async (inferior_event_handler, 0);
1967 /* Tell the event loop we have something to process. */
1968 async_file_mark ();
1969 }
9f0bdab8 1970 return;
d6b0e80f
AC
1971 }
1972
9f0bdab8
DJ
1973 /* Mark LWP as not stopped to prevent it from being continued by
1974 resume_callback. */
1975 lp->stopped = 0;
1976
d90e17a7
PA
1977 if (resume_many)
1978 iterate_over_lwps (ptid, resume_callback, NULL);
1979
1980 /* Convert to something the lower layer understands. */
1981 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1982
28439f5e 1983 linux_ops->to_resume (linux_ops, ptid, step, signo);
9f0bdab8 1984 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 1985 lp->stopped_by_watchpoint = 0;
9f0bdab8 1986
d6b0e80f
AC
1987 if (debug_linux_nat)
1988 fprintf_unfiltered (gdb_stdlog,
1989 "LLR: %s %s, %s (resume event thread)\n",
1990 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1991 target_pid_to_str (ptid),
423ec54c
JK
1992 (signo != TARGET_SIGNAL_0
1993 ? strsignal (target_signal_to_host (signo)) : "0"));
b84876c2 1994
7feb7d06 1995 restore_child_signals_mask (&prev_mask);
b84876c2 1996 if (target_can_async_p ())
8ea051c5 1997 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1998}
1999
c5f62d5f 2000/* Send a signal to an LWP. */
d6b0e80f
AC
2001
2002static int
2003kill_lwp (int lwpid, int signo)
2004{
c5f62d5f
DE
2005 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2006 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
2007
2008#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
2009 {
2010 static int tkill_failed;
2011
2012 if (!tkill_failed)
2013 {
2014 int ret;
2015
2016 errno = 0;
2017 ret = syscall (__NR_tkill, lwpid, signo);
2018 if (errno != ENOSYS)
2019 return ret;
2020 tkill_failed = 1;
2021 }
2022 }
d6b0e80f
AC
2023#endif
2024
2025 return kill (lwpid, signo);
2026}
2027
ca2163eb
PA
2028/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2029 event, check if the core is interested in it: if not, ignore the
2030 event, and keep waiting; otherwise, we need to toggle the LWP's
2031 syscall entry/exit status, since the ptrace event itself doesn't
2032 indicate it, and report the trap to higher layers. */
2033
2034static int
2035linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2036{
2037 struct target_waitstatus *ourstatus = &lp->waitstatus;
2038 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2039 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2040
2041 if (stopping)
2042 {
2043 /* If we're stopping threads, there's a SIGSTOP pending, which
2044 makes it so that the LWP reports an immediate syscall return,
2045 followed by the SIGSTOP. Skip seeing that "return" using
2046 PTRACE_CONT directly, and let stop_wait_callback collect the
2047 SIGSTOP. Later when the thread is resumed, a new syscall
2048 entry event. If we didn't do this (and returned 0), we'd
2049 leave a syscall entry pending, and our caller, by using
2050 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2051 itself. Later, when the user re-resumes this LWP, we'd see
2052 another syscall entry event and we'd mistake it for a return.
2053
2054 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2055 (leaving immediately with LWP->signalled set, without issuing
2056 a PTRACE_CONT), it would still be problematic to leave this
2057 syscall enter pending, as later when the thread is resumed,
2058 it would then see the same syscall exit mentioned above,
2059 followed by the delayed SIGSTOP, while the syscall didn't
2060 actually get to execute. It seems it would be even more
2061 confusing to the user. */
2062
2063 if (debug_linux_nat)
2064 fprintf_unfiltered (gdb_stdlog,
2065 "LHST: ignoring syscall %d "
2066 "for LWP %ld (stopping threads), "
2067 "resuming with PTRACE_CONT for SIGSTOP\n",
2068 syscall_number,
2069 GET_LWP (lp->ptid));
2070
2071 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2072 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2073 return 1;
2074 }
2075
2076 if (catch_syscall_enabled ())
2077 {
2078 /* Always update the entry/return state, even if this particular
2079 syscall isn't interesting to the core now. In async mode,
2080 the user could install a new catchpoint for this syscall
2081 between syscall enter/return, and we'll need to know to
2082 report a syscall return if that happens. */
2083 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2084 ? TARGET_WAITKIND_SYSCALL_RETURN
2085 : TARGET_WAITKIND_SYSCALL_ENTRY);
2086
2087 if (catching_syscall_number (syscall_number))
2088 {
2089 /* Alright, an event to report. */
2090 ourstatus->kind = lp->syscall_state;
2091 ourstatus->value.syscall_number = syscall_number;
2092
2093 if (debug_linux_nat)
2094 fprintf_unfiltered (gdb_stdlog,
2095 "LHST: stopping for %s of syscall %d"
2096 " for LWP %ld\n",
3e43a32a
MS
2097 lp->syscall_state
2098 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
2099 ? "entry" : "return",
2100 syscall_number,
2101 GET_LWP (lp->ptid));
2102 return 0;
2103 }
2104
2105 if (debug_linux_nat)
2106 fprintf_unfiltered (gdb_stdlog,
2107 "LHST: ignoring %s of syscall %d "
2108 "for LWP %ld\n",
2109 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2110 ? "entry" : "return",
2111 syscall_number,
2112 GET_LWP (lp->ptid));
2113 }
2114 else
2115 {
2116 /* If we had been syscall tracing, and hence used PT_SYSCALL
2117 before on this LWP, it could happen that the user removes all
2118 syscall catchpoints before we get to process this event.
2119 There are two noteworthy issues here:
2120
2121 - When stopped at a syscall entry event, resuming with
2122 PT_STEP still resumes executing the syscall and reports a
2123 syscall return.
2124
2125 - Only PT_SYSCALL catches syscall enters. If we last
2126 single-stepped this thread, then this event can't be a
2127 syscall enter. If we last single-stepped this thread, this
2128 has to be a syscall exit.
2129
2130 The points above mean that the next resume, be it PT_STEP or
2131 PT_CONTINUE, can not trigger a syscall trace event. */
2132 if (debug_linux_nat)
2133 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2134 "LHST: caught syscall event "
2135 "with no syscall catchpoints."
ca2163eb
PA
2136 " %d for LWP %ld, ignoring\n",
2137 syscall_number,
2138 GET_LWP (lp->ptid));
2139 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2140 }
2141
2142 /* The core isn't interested in this event. For efficiency, avoid
2143 stopping all threads only to have the core resume them all again.
2144 Since we're not stopping threads, if we're still syscall tracing
2145 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2146 subsequent syscall. Simply resume using the inf-ptrace layer,
2147 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2148
2149 /* Note that gdbarch_get_syscall_number may access registers, hence
2150 fill a regcache. */
2151 registers_changed ();
2152 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2153 lp->step, TARGET_SIGNAL_0);
2154 return 1;
2155}
2156
3d799a95
DJ
2157/* Handle a GNU/Linux extended wait response. If we see a clone
2158 event, we need to add the new LWP to our list (and not report the
2159 trap to higher layers). This function returns non-zero if the
2160 event should be ignored and we should wait again. If STOPPING is
2161 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2162
2163static int
3d799a95
DJ
2164linux_handle_extended_wait (struct lwp_info *lp, int status,
2165 int stopping)
d6b0e80f 2166{
3d799a95
DJ
2167 int pid = GET_LWP (lp->ptid);
2168 struct target_waitstatus *ourstatus = &lp->waitstatus;
3d799a95 2169 int event = status >> 16;
d6b0e80f 2170
3d799a95
DJ
2171 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2172 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2173 {
3d799a95
DJ
2174 unsigned long new_pid;
2175 int ret;
2176
2177 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2178
3d799a95
DJ
2179 /* If we haven't already seen the new PID stop, wait for it now. */
2180 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2181 {
2182 /* The new child has a pending SIGSTOP. We can't affect it until it
2183 hits the SIGSTOP, but we're already attached. */
2184 ret = my_waitpid (new_pid, &status,
2185 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2186 if (ret == -1)
2187 perror_with_name (_("waiting for new child"));
2188 else if (ret != new_pid)
2189 internal_error (__FILE__, __LINE__,
2190 _("wait returned unexpected PID %d"), ret);
2191 else if (!WIFSTOPPED (status))
2192 internal_error (__FILE__, __LINE__,
2193 _("wait returned unexpected status 0x%x"), status);
2194 }
2195
3a3e9ee3 2196 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2197
2277426b
PA
2198 if (event == PTRACE_EVENT_FORK
2199 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2200 {
2277426b
PA
2201 /* Handle checkpointing by linux-fork.c here as a special
2202 case. We don't want the follow-fork-mode or 'catch fork'
2203 to interfere with this. */
2204
2205 /* This won't actually modify the breakpoint list, but will
2206 physically remove the breakpoints from the child. */
2207 detach_breakpoints (new_pid);
2208
2209 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2210 if (!find_fork_pid (new_pid))
2211 add_fork (new_pid);
2277426b
PA
2212
2213 /* Report as spurious, so that infrun doesn't want to follow
2214 this fork. We're actually doing an infcall in
2215 linux-fork.c. */
2216 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2217 linux_enable_event_reporting (pid_to_ptid (new_pid));
2218
2219 /* Report the stop to the core. */
2220 return 0;
2221 }
2222
3d799a95
DJ
2223 if (event == PTRACE_EVENT_FORK)
2224 ourstatus->kind = TARGET_WAITKIND_FORKED;
2225 else if (event == PTRACE_EVENT_VFORK)
2226 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2227 else
3d799a95 2228 {
78768c4a
JK
2229 struct lwp_info *new_lp;
2230
3d799a95 2231 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2232
d90e17a7 2233 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2234 new_lp->cloned = 1;
4c28f408 2235 new_lp->stopped = 1;
d6b0e80f 2236
3d799a95
DJ
2237 if (WSTOPSIG (status) != SIGSTOP)
2238 {
2239 /* This can happen if someone starts sending signals to
2240 the new thread before it gets a chance to run, which
2241 have a lower number than SIGSTOP (e.g. SIGUSR1).
2242 This is an unlikely case, and harder to handle for
2243 fork / vfork than for clone, so we do not try - but
2244 we handle it for clone events here. We'll send
2245 the other signal on to the thread below. */
2246
2247 new_lp->signalled = 1;
2248 }
2249 else
2250 status = 0;
d6b0e80f 2251
4c28f408 2252 if (non_stop)
3d799a95 2253 {
4c28f408
PA
2254 /* Add the new thread to GDB's lists as soon as possible
2255 so that:
2256
2257 1) the frontend doesn't have to wait for a stop to
2258 display them, and,
2259
2260 2) we tag it with the correct running state. */
2261
2262 /* If the thread_db layer is active, let it know about
2263 this new thread, and add it to GDB's list. */
2264 if (!thread_db_attach_lwp (new_lp->ptid))
2265 {
2266 /* We're not using thread_db. Add it to GDB's
2267 list. */
2268 target_post_attach (GET_LWP (new_lp->ptid));
2269 add_thread (new_lp->ptid);
2270 }
2271
2272 if (!stopping)
2273 {
2274 set_running (new_lp->ptid, 1);
2275 set_executing (new_lp->ptid, 1);
2276 }
2277 }
2278
ca2163eb
PA
2279 /* Note the need to use the low target ops to resume, to
2280 handle resuming with PT_SYSCALL if we have syscall
2281 catchpoints. */
4c28f408
PA
2282 if (!stopping)
2283 {
423ec54c 2284 enum target_signal signo;
ca2163eb 2285
4c28f408 2286 new_lp->stopped = 0;
3d799a95 2287 new_lp->resumed = 1;
ca2163eb
PA
2288
2289 signo = (status
2290 ? target_signal_from_host (WSTOPSIG (status))
2291 : TARGET_SIGNAL_0);
2292
2293 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2294 0, signo);
3d799a95 2295 }
ad34eb2f
JK
2296 else
2297 {
2298 if (status != 0)
2299 {
2300 /* We created NEW_LP so it cannot yet contain STATUS. */
2301 gdb_assert (new_lp->status == 0);
2302
2303 /* Save the wait status to report later. */
2304 if (debug_linux_nat)
2305 fprintf_unfiltered (gdb_stdlog,
2306 "LHEW: waitpid of new LWP %ld, "
2307 "saving status %s\n",
2308 (long) GET_LWP (new_lp->ptid),
2309 status_to_str (status));
2310 new_lp->status = status;
2311 }
2312 }
d6b0e80f 2313
3d799a95
DJ
2314 if (debug_linux_nat)
2315 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2316 "LHEW: Got clone event "
2317 "from LWP %ld, resuming\n",
3d799a95 2318 GET_LWP (lp->ptid));
ca2163eb
PA
2319 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2320 0, TARGET_SIGNAL_0);
3d799a95
DJ
2321
2322 return 1;
2323 }
2324
2325 return 0;
d6b0e80f
AC
2326 }
2327
3d799a95
DJ
2328 if (event == PTRACE_EVENT_EXEC)
2329 {
a75724bc
PA
2330 if (debug_linux_nat)
2331 fprintf_unfiltered (gdb_stdlog,
2332 "LHEW: Got exec event from LWP %ld\n",
2333 GET_LWP (lp->ptid));
2334
3d799a95
DJ
2335 ourstatus->kind = TARGET_WAITKIND_EXECD;
2336 ourstatus->value.execd_pathname
6d8fd2b7 2337 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2338
6c95b8df
PA
2339 return 0;
2340 }
2341
2342 if (event == PTRACE_EVENT_VFORK_DONE)
2343 {
2344 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2345 {
6c95b8df 2346 if (debug_linux_nat)
3e43a32a
MS
2347 fprintf_unfiltered (gdb_stdlog,
2348 "LHEW: Got expected PTRACE_EVENT_"
2349 "VFORK_DONE from LWP %ld: stopping\n",
6c95b8df 2350 GET_LWP (lp->ptid));
3d799a95 2351
6c95b8df
PA
2352 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2353 return 0;
3d799a95
DJ
2354 }
2355
6c95b8df 2356 if (debug_linux_nat)
3e43a32a
MS
2357 fprintf_unfiltered (gdb_stdlog,
2358 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2359 "from LWP %ld: resuming\n",
6c95b8df
PA
2360 GET_LWP (lp->ptid));
2361 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2362 return 1;
3d799a95
DJ
2363 }
2364
2365 internal_error (__FILE__, __LINE__,
2366 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2367}
2368
2369/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2370 exited. */
2371
2372static int
2373wait_lwp (struct lwp_info *lp)
2374{
2375 pid_t pid;
2376 int status;
2377 int thread_dead = 0;
2378
2379 gdb_assert (!lp->stopped);
2380 gdb_assert (lp->status == 0);
2381
58aecb61 2382 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
2383 if (pid == -1 && errno == ECHILD)
2384 {
58aecb61 2385 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
2386 if (pid == -1 && errno == ECHILD)
2387 {
2388 /* The thread has previously exited. We need to delete it
2389 now because, for some vendor 2.4 kernels with NPTL
2390 support backported, there won't be an exit event unless
2391 it is the main thread. 2.6 kernels will report an exit
2392 event for each thread that exits, as expected. */
2393 thread_dead = 1;
2394 if (debug_linux_nat)
2395 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2396 target_pid_to_str (lp->ptid));
2397 }
2398 }
2399
2400 if (!thread_dead)
2401 {
2402 gdb_assert (pid == GET_LWP (lp->ptid));
2403
2404 if (debug_linux_nat)
2405 {
2406 fprintf_unfiltered (gdb_stdlog,
2407 "WL: waitpid %s received %s\n",
2408 target_pid_to_str (lp->ptid),
2409 status_to_str (status));
2410 }
2411 }
2412
2413 /* Check if the thread has exited. */
2414 if (WIFEXITED (status) || WIFSIGNALED (status))
2415 {
2416 thread_dead = 1;
2417 if (debug_linux_nat)
2418 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2419 target_pid_to_str (lp->ptid));
2420 }
2421
2422 if (thread_dead)
2423 {
e26af52f 2424 exit_lwp (lp);
d6b0e80f
AC
2425 return 0;
2426 }
2427
2428 gdb_assert (WIFSTOPPED (status));
2429
ca2163eb
PA
2430 /* Handle GNU/Linux's syscall SIGTRAPs. */
2431 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2432 {
2433 /* No longer need the sysgood bit. The ptrace event ends up
2434 recorded in lp->waitstatus if we care for it. We can carry
2435 on handling the event like a regular SIGTRAP from here
2436 on. */
2437 status = W_STOPCODE (SIGTRAP);
2438 if (linux_handle_syscall_trap (lp, 1))
2439 return wait_lwp (lp);
2440 }
2441
d6b0e80f
AC
2442 /* Handle GNU/Linux's extended waitstatus for trace events. */
2443 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2444 {
2445 if (debug_linux_nat)
2446 fprintf_unfiltered (gdb_stdlog,
2447 "WL: Handling extended status 0x%06x\n",
2448 status);
3d799a95 2449 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2450 return wait_lwp (lp);
2451 }
2452
2453 return status;
2454}
2455
9f0bdab8
DJ
2456/* Save the most recent siginfo for LP. This is currently only called
2457 for SIGTRAP; some ports use the si_addr field for
2458 target_stopped_data_address. In the future, it may also be used to
2459 restore the siginfo of requeued signals. */
2460
2461static void
2462save_siginfo (struct lwp_info *lp)
2463{
2464 errno = 0;
2465 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2466 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2467
2468 if (errno != 0)
2469 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2470}
2471
d6b0e80f
AC
2472/* Send a SIGSTOP to LP. */
2473
2474static int
2475stop_callback (struct lwp_info *lp, void *data)
2476{
2477 if (!lp->stopped && !lp->signalled)
2478 {
2479 int ret;
2480
2481 if (debug_linux_nat)
2482 {
2483 fprintf_unfiltered (gdb_stdlog,
2484 "SC: kill %s **<SIGSTOP>**\n",
2485 target_pid_to_str (lp->ptid));
2486 }
2487 errno = 0;
2488 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2489 if (debug_linux_nat)
2490 {
2491 fprintf_unfiltered (gdb_stdlog,
2492 "SC: lwp kill %d %s\n",
2493 ret,
2494 errno ? safe_strerror (errno) : "ERRNO-OK");
2495 }
2496
2497 lp->signalled = 1;
2498 gdb_assert (lp->status == 0);
2499 }
2500
2501 return 0;
2502}
2503
57380f4e 2504/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2505
2506static int
57380f4e
DJ
2507linux_nat_has_pending_sigint (int pid)
2508{
2509 sigset_t pending, blocked, ignored;
57380f4e
DJ
2510
2511 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2512
2513 if (sigismember (&pending, SIGINT)
2514 && !sigismember (&ignored, SIGINT))
2515 return 1;
2516
2517 return 0;
2518}
2519
2520/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2521
2522static int
2523set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2524{
57380f4e
DJ
2525 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2526 flag to consume the next one. */
2527 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2528 && WSTOPSIG (lp->status) == SIGINT)
2529 lp->status = 0;
2530 else
2531 lp->ignore_sigint = 1;
2532
2533 return 0;
2534}
2535
2536/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2537 This function is called after we know the LWP has stopped; if the LWP
2538 stopped before the expected SIGINT was delivered, then it will never have
2539 arrived. Also, if the signal was delivered to a shared queue and consumed
2540 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2541
57380f4e
DJ
2542static void
2543maybe_clear_ignore_sigint (struct lwp_info *lp)
2544{
2545 if (!lp->ignore_sigint)
2546 return;
2547
2548 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2549 {
2550 if (debug_linux_nat)
2551 fprintf_unfiltered (gdb_stdlog,
2552 "MCIS: Clearing bogus flag for %s\n",
2553 target_pid_to_str (lp->ptid));
2554 lp->ignore_sigint = 0;
2555 }
2556}
2557
ebec9a0f
PA
2558/* Fetch the possible triggered data watchpoint info and store it in
2559 LP.
2560
2561 On some archs, like x86, that use debug registers to set
2562 watchpoints, it's possible that the way to know which watched
2563 address trapped, is to check the register that is used to select
2564 which address to watch. Problem is, between setting the watchpoint
2565 and reading back which data address trapped, the user may change
2566 the set of watchpoints, and, as a consequence, GDB changes the
2567 debug registers in the inferior. To avoid reading back a stale
2568 stopped-data-address when that happens, we cache in LP the fact
2569 that a watchpoint trapped, and the corresponding data address, as
2570 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2571 registers meanwhile, we have the cached data we can rely on. */
2572
2573static void
2574save_sigtrap (struct lwp_info *lp)
2575{
2576 struct cleanup *old_chain;
2577
2578 if (linux_ops->to_stopped_by_watchpoint == NULL)
2579 {
2580 lp->stopped_by_watchpoint = 0;
2581 return;
2582 }
2583
2584 old_chain = save_inferior_ptid ();
2585 inferior_ptid = lp->ptid;
2586
2587 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2588
2589 if (lp->stopped_by_watchpoint)
2590 {
2591 if (linux_ops->to_stopped_data_address != NULL)
2592 lp->stopped_data_address_p =
2593 linux_ops->to_stopped_data_address (&current_target,
2594 &lp->stopped_data_address);
2595 else
2596 lp->stopped_data_address_p = 0;
2597 }
2598
2599 do_cleanups (old_chain);
2600}
2601
2602/* See save_sigtrap. */
2603
2604static int
2605linux_nat_stopped_by_watchpoint (void)
2606{
2607 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2608
2609 gdb_assert (lp != NULL);
2610
2611 return lp->stopped_by_watchpoint;
2612}
2613
2614static int
2615linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2616{
2617 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2618
2619 gdb_assert (lp != NULL);
2620
2621 *addr_p = lp->stopped_data_address;
2622
2623 return lp->stopped_data_address_p;
2624}
2625
26ab7092
JK
2626/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2627
2628static int
2629sigtrap_is_event (int status)
2630{
2631 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2632}
2633
2634/* SIGTRAP-like events recognizer. */
2635
2636static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2637
00390b84
JK
2638/* Check for SIGTRAP-like events in LP. */
2639
2640static int
2641linux_nat_lp_status_is_event (struct lwp_info *lp)
2642{
2643 /* We check for lp->waitstatus in addition to lp->status, because we can
2644 have pending process exits recorded in lp->status
2645 and W_EXITCODE(0,0) == 0. We should probably have an additional
2646 lp->status_p flag. */
2647
2648 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2649 && linux_nat_status_is_event (lp->status));
2650}
2651
26ab7092
JK
2652/* Set alternative SIGTRAP-like events recognizer. If
2653 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2654 applied. */
2655
2656void
2657linux_nat_set_status_is_event (struct target_ops *t,
2658 int (*status_is_event) (int status))
2659{
2660 linux_nat_status_is_event = status_is_event;
2661}
2662
57380f4e
DJ
2663/* Wait until LP is stopped. */
2664
2665static int
2666stop_wait_callback (struct lwp_info *lp, void *data)
2667{
6c95b8df
PA
2668 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2669
2670 /* If this is a vfork parent, bail out, it is not going to report
2671 any SIGSTOP until the vfork is done with. */
2672 if (inf->vfork_child != NULL)
2673 return 0;
2674
d6b0e80f
AC
2675 if (!lp->stopped)
2676 {
2677 int status;
2678
2679 status = wait_lwp (lp);
2680 if (status == 0)
2681 return 0;
2682
57380f4e
DJ
2683 if (lp->ignore_sigint && WIFSTOPPED (status)
2684 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2685 {
57380f4e 2686 lp->ignore_sigint = 0;
d6b0e80f
AC
2687
2688 errno = 0;
2689 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2690 if (debug_linux_nat)
2691 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2692 "PTRACE_CONT %s, 0, 0 (%s) "
2693 "(discarding SIGINT)\n",
d6b0e80f
AC
2694 target_pid_to_str (lp->ptid),
2695 errno ? safe_strerror (errno) : "OK");
2696
57380f4e 2697 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2698 }
2699
57380f4e
DJ
2700 maybe_clear_ignore_sigint (lp);
2701
d6b0e80f
AC
2702 if (WSTOPSIG (status) != SIGSTOP)
2703 {
26ab7092 2704 if (linux_nat_status_is_event (status))
d6b0e80f
AC
2705 {
2706 /* If a LWP other than the LWP that we're reporting an
2707 event for has hit a GDB breakpoint (as opposed to
2708 some random trap signal), then just arrange for it to
2709 hit it again later. We don't keep the SIGTRAP status
2710 and don't forward the SIGTRAP signal to the LWP. We
2711 will handle the current event, eventually we will
2712 resume all LWPs, and this one will get its breakpoint
2713 trap again.
2714
2715 If we do not do this, then we run the risk that the
2716 user will delete or disable the breakpoint, but the
2717 thread will have already tripped on it. */
2718
9f0bdab8
DJ
2719 /* Save the trap's siginfo in case we need it later. */
2720 save_siginfo (lp);
2721
ebec9a0f
PA
2722 save_sigtrap (lp);
2723
1777feb0 2724 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2725 errno = 0;
2726 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2727 if (debug_linux_nat)
2728 {
2729 fprintf_unfiltered (gdb_stdlog,
2730 "PTRACE_CONT %s, 0, 0 (%s)\n",
2731 target_pid_to_str (lp->ptid),
2732 errno ? safe_strerror (errno) : "OK");
2733
2734 fprintf_unfiltered (gdb_stdlog,
2735 "SWC: Candidate SIGTRAP event in %s\n",
2736 target_pid_to_str (lp->ptid));
2737 }
710151dd 2738 /* Hold this event/waitstatus while we check to see if
1777feb0 2739 there are any more (we still want to get that SIGSTOP). */
57380f4e 2740 stop_wait_callback (lp, NULL);
710151dd 2741
7feb7d06
PA
2742 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2743 there's another event, throw it back into the
1777feb0 2744 queue. */
7feb7d06 2745 if (lp->status)
710151dd 2746 {
7feb7d06
PA
2747 if (debug_linux_nat)
2748 fprintf_unfiltered (gdb_stdlog,
2749 "SWC: kill %s, %s\n",
2750 target_pid_to_str (lp->ptid),
2751 status_to_str ((int) status));
2752 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2753 }
7feb7d06 2754
1777feb0 2755 /* Save the sigtrap event. */
7feb7d06 2756 lp->status = status;
d6b0e80f
AC
2757 return 0;
2758 }
2759 else
2760 {
2761 /* The thread was stopped with a signal other than
1777feb0 2762 SIGSTOP, and didn't accidentally trip a breakpoint. */
d6b0e80f
AC
2763
2764 if (debug_linux_nat)
2765 {
2766 fprintf_unfiltered (gdb_stdlog,
2767 "SWC: Pending event %s in %s\n",
2768 status_to_str ((int) status),
2769 target_pid_to_str (lp->ptid));
2770 }
1777feb0 2771 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2772 errno = 0;
2773 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2774 if (debug_linux_nat)
2775 fprintf_unfiltered (gdb_stdlog,
2776 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2777 target_pid_to_str (lp->ptid),
2778 errno ? safe_strerror (errno) : "OK");
2779
2780 /* Hold this event/waitstatus while we check to see if
1777feb0 2781 there are any more (we still want to get that SIGSTOP). */
57380f4e 2782 stop_wait_callback (lp, NULL);
710151dd
PA
2783
2784 /* If the lp->status field is still empty, use it to
2785 hold this event. If not, then this event must be
2786 returned to the event queue of the LWP. */
7feb7d06 2787 if (lp->status)
d6b0e80f
AC
2788 {
2789 if (debug_linux_nat)
2790 {
2791 fprintf_unfiltered (gdb_stdlog,
2792 "SWC: kill %s, %s\n",
2793 target_pid_to_str (lp->ptid),
2794 status_to_str ((int) status));
2795 }
2796 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2797 }
710151dd
PA
2798 else
2799 lp->status = status;
d6b0e80f
AC
2800 return 0;
2801 }
2802 }
2803 else
2804 {
2805 /* We caught the SIGSTOP that we intended to catch, so
2806 there's no SIGSTOP pending. */
2807 lp->stopped = 1;
2808 lp->signalled = 0;
2809 }
2810 }
2811
2812 return 0;
2813}
2814
d6b0e80f
AC
2815/* Return non-zero if LP has a wait status pending. */
2816
2817static int
2818status_callback (struct lwp_info *lp, void *data)
2819{
2820 /* Only report a pending wait status if we pretend that this has
2821 indeed been resumed. */
ca2163eb
PA
2822 if (!lp->resumed)
2823 return 0;
2824
2825 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2826 {
2827 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
766062f6 2828 or a pending process exit. Note that `W_EXITCODE(0,0) ==
ca2163eb
PA
2829 0', so a clean process exit can not be stored pending in
2830 lp->status, it is indistinguishable from
2831 no-pending-status. */
2832 return 1;
2833 }
2834
2835 if (lp->status != 0)
2836 return 1;
2837
2838 return 0;
d6b0e80f
AC
2839}
2840
2841/* Return non-zero if LP isn't stopped. */
2842
2843static int
2844running_callback (struct lwp_info *lp, void *data)
2845{
2846 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2847}
2848
2849/* Count the LWP's that have had events. */
2850
2851static int
2852count_events_callback (struct lwp_info *lp, void *data)
2853{
2854 int *count = data;
2855
2856 gdb_assert (count != NULL);
2857
e09490f1 2858 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2859 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2860 (*count)++;
2861
2862 return 0;
2863}
2864
2865/* Select the LWP (if any) that is currently being single-stepped. */
2866
2867static int
2868select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2869{
2870 if (lp->step && lp->status != 0)
2871 return 1;
2872 else
2873 return 0;
2874}
2875
2876/* Select the Nth LWP that has had a SIGTRAP event. */
2877
2878static int
2879select_event_lwp_callback (struct lwp_info *lp, void *data)
2880{
2881 int *selector = data;
2882
2883 gdb_assert (selector != NULL);
2884
1777feb0 2885 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2886 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2887 if ((*selector)-- == 0)
2888 return 1;
2889
2890 return 0;
2891}
2892
710151dd
PA
2893static int
2894cancel_breakpoint (struct lwp_info *lp)
2895{
2896 /* Arrange for a breakpoint to be hit again later. We don't keep
2897 the SIGTRAP status and don't forward the SIGTRAP signal to the
2898 LWP. We will handle the current event, eventually we will resume
2899 this LWP, and this breakpoint will trap again.
2900
2901 If we do not do this, then we run the risk that the user will
2902 delete or disable the breakpoint, but the LWP will have already
2903 tripped on it. */
2904
515630c5
UW
2905 struct regcache *regcache = get_thread_regcache (lp->ptid);
2906 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2907 CORE_ADDR pc;
2908
2909 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 2910 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
2911 {
2912 if (debug_linux_nat)
2913 fprintf_unfiltered (gdb_stdlog,
2914 "CB: Push back breakpoint for %s\n",
2915 target_pid_to_str (lp->ptid));
2916
2917 /* Back up the PC if necessary. */
515630c5
UW
2918 if (gdbarch_decr_pc_after_break (gdbarch))
2919 regcache_write_pc (regcache, pc);
2920
710151dd
PA
2921 return 1;
2922 }
2923 return 0;
2924}
2925
d6b0e80f
AC
2926static int
2927cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2928{
2929 struct lwp_info *event_lp = data;
2930
2931 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2932 if (lp == event_lp)
2933 return 0;
2934
2935 /* If a LWP other than the LWP that we're reporting an event for has
2936 hit a GDB breakpoint (as opposed to some random trap signal),
2937 then just arrange for it to hit it again later. We don't keep
2938 the SIGTRAP status and don't forward the SIGTRAP signal to the
2939 LWP. We will handle the current event, eventually we will resume
2940 all LWPs, and this one will get its breakpoint trap again.
2941
2942 If we do not do this, then we run the risk that the user will
2943 delete or disable the breakpoint, but the LWP will have already
2944 tripped on it. */
2945
00390b84 2946 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
2947 && cancel_breakpoint (lp))
2948 /* Throw away the SIGTRAP. */
2949 lp->status = 0;
d6b0e80f
AC
2950
2951 return 0;
2952}
2953
2954/* Select one LWP out of those that have events pending. */
2955
2956static void
d90e17a7 2957select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2958{
2959 int num_events = 0;
2960 int random_selector;
2961 struct lwp_info *event_lp;
2962
ac264b3b 2963 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2964 (*orig_lp)->status = *status;
2965
2966 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
2967 event_lp = iterate_over_lwps (filter,
2968 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
2969 if (event_lp != NULL)
2970 {
2971 if (debug_linux_nat)
2972 fprintf_unfiltered (gdb_stdlog,
2973 "SEL: Select single-step %s\n",
2974 target_pid_to_str (event_lp->ptid));
2975 }
2976 else
2977 {
2978 /* No single-stepping LWP. Select one at random, out of those
2979 which have had SIGTRAP events. */
2980
2981 /* First see how many SIGTRAP events we have. */
d90e17a7 2982 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
2983
2984 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2985 random_selector = (int)
2986 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2987
2988 if (debug_linux_nat && num_events > 1)
2989 fprintf_unfiltered (gdb_stdlog,
2990 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2991 num_events, random_selector);
2992
d90e17a7
PA
2993 event_lp = iterate_over_lwps (filter,
2994 select_event_lwp_callback,
d6b0e80f
AC
2995 &random_selector);
2996 }
2997
2998 if (event_lp != NULL)
2999 {
3000 /* Switch the event LWP. */
3001 *orig_lp = event_lp;
3002 *status = event_lp->status;
3003 }
3004
3005 /* Flush the wait status for the event LWP. */
3006 (*orig_lp)->status = 0;
3007}
3008
3009/* Return non-zero if LP has been resumed. */
3010
3011static int
3012resumed_callback (struct lwp_info *lp, void *data)
3013{
3014 return lp->resumed;
3015}
3016
d6b0e80f
AC
3017/* Stop an active thread, verify it still exists, then resume it. */
3018
3019static int
3020stop_and_resume_callback (struct lwp_info *lp, void *data)
3021{
3022 struct lwp_info *ptr;
3023
3024 if (!lp->stopped && !lp->signalled)
3025 {
3026 stop_callback (lp, NULL);
3027 stop_wait_callback (lp, NULL);
3028 /* Resume if the lwp still exists. */
3029 for (ptr = lwp_list; ptr; ptr = ptr->next)
3030 if (lp == ptr)
3031 {
3032 resume_callback (lp, NULL);
3033 resume_set_callback (lp, NULL);
3034 }
3035 }
3036 return 0;
3037}
3038
02f3fc28 3039/* Check if we should go on and pass this event to common code.
fa2c6a57 3040 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
3041static struct lwp_info *
3042linux_nat_filter_event (int lwpid, int status, int options)
3043{
3044 struct lwp_info *lp;
3045
3046 lp = find_lwp_pid (pid_to_ptid (lwpid));
3047
3048 /* Check for stop events reported by a process we didn't already
3049 know about - anything not already in our LWP list.
3050
3051 If we're expecting to receive stopped processes after
3052 fork, vfork, and clone events, then we'll just add the
3053 new one to our list and go back to waiting for the event
3054 to be reported - the stopped process might be returned
3055 from waitpid before or after the event is. */
3056 if (WIFSTOPPED (status) && !lp)
3057 {
3058 linux_record_stopped_pid (lwpid, status);
3059 return NULL;
3060 }
3061
3062 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3063 our list, i.e. not part of the current process. This can happen
fd62cb89 3064 if we detach from a program we originally forked and then it
02f3fc28
PA
3065 exits. */
3066 if (!WIFSTOPPED (status) && !lp)
3067 return NULL;
3068
3069 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
3070 CLONE_PTRACE processes which do not use the thread library -
3071 otherwise we wouldn't find the new LWP this way. That doesn't
3072 currently work, and the following code is currently unreachable
3073 due to the two blocks above. If it's fixed some day, this code
3074 should be broken out into a function so that we can also pick up
3075 LWPs from the new interface. */
3076 if (!lp)
3077 {
3078 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
3079 if (options & __WCLONE)
3080 lp->cloned = 1;
3081
3082 gdb_assert (WIFSTOPPED (status)
3083 && WSTOPSIG (status) == SIGSTOP);
3084 lp->signalled = 1;
3085
3086 if (!in_thread_list (inferior_ptid))
3087 {
3088 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
3089 GET_PID (inferior_ptid));
3090 add_thread (inferior_ptid);
3091 }
3092
3093 add_thread (lp->ptid);
3094 }
3095
ca2163eb
PA
3096 /* Handle GNU/Linux's syscall SIGTRAPs. */
3097 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3098 {
3099 /* No longer need the sysgood bit. The ptrace event ends up
3100 recorded in lp->waitstatus if we care for it. We can carry
3101 on handling the event like a regular SIGTRAP from here
3102 on. */
3103 status = W_STOPCODE (SIGTRAP);
3104 if (linux_handle_syscall_trap (lp, 0))
3105 return NULL;
3106 }
02f3fc28 3107
ca2163eb
PA
3108 /* Handle GNU/Linux's extended waitstatus for trace events. */
3109 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3110 {
3111 if (debug_linux_nat)
3112 fprintf_unfiltered (gdb_stdlog,
3113 "LLW: Handling extended status 0x%06x\n",
3114 status);
3115 if (linux_handle_extended_wait (lp, status, 0))
3116 return NULL;
3117 }
3118
26ab7092 3119 if (linux_nat_status_is_event (status))
ebec9a0f
PA
3120 {
3121 /* Save the trap's siginfo in case we need it later. */
3122 save_siginfo (lp);
3123
3124 save_sigtrap (lp);
3125 }
ca2163eb 3126
02f3fc28 3127 /* Check if the thread has exited. */
d90e17a7
PA
3128 if ((WIFEXITED (status) || WIFSIGNALED (status))
3129 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3130 {
9db03742
JB
3131 /* If this is the main thread, we must stop all threads and verify
3132 if they are still alive. This is because in the nptl thread model
3133 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3134 other than the main thread. We only get the main thread exit
3135 signal once all child threads have already exited. If we
3136 stop all the threads and use the stop_wait_callback to check
3137 if they have exited we can determine whether this signal
3138 should be ignored or whether it means the end of the debugged
3139 application, regardless of which threading model is being
5d3b6af6 3140 used. */
02f3fc28
PA
3141 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3142 {
3143 lp->stopped = 1;
d90e17a7
PA
3144 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3145 stop_and_resume_callback, NULL);
02f3fc28
PA
3146 }
3147
3148 if (debug_linux_nat)
3149 fprintf_unfiltered (gdb_stdlog,
3150 "LLW: %s exited.\n",
3151 target_pid_to_str (lp->ptid));
3152
d90e17a7 3153 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3154 {
3155 /* If there is at least one more LWP, then the exit signal
3156 was not the end of the debugged application and should be
3157 ignored. */
3158 exit_lwp (lp);
3159 return NULL;
3160 }
02f3fc28
PA
3161 }
3162
3163 /* Check if the current LWP has previously exited. In the nptl
3164 thread model, LWPs other than the main thread do not issue
3165 signals when they exit so we must check whenever the thread has
3166 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3167 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3168 {
d90e17a7
PA
3169 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3170
02f3fc28
PA
3171 if (debug_linux_nat)
3172 fprintf_unfiltered (gdb_stdlog,
3173 "LLW: %s exited.\n",
3174 target_pid_to_str (lp->ptid));
3175
3176 exit_lwp (lp);
3177
3178 /* Make sure there is at least one thread running. */
d90e17a7 3179 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3180
3181 /* Discard the event. */
3182 return NULL;
3183 }
3184
3185 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3186 an attempt to stop an LWP. */
3187 if (lp->signalled
3188 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3189 {
3190 if (debug_linux_nat)
3191 fprintf_unfiltered (gdb_stdlog,
3192 "LLW: Delayed SIGSTOP caught for %s.\n",
3193 target_pid_to_str (lp->ptid));
3194
3195 /* This is a delayed SIGSTOP. */
3196 lp->signalled = 0;
3197
3198 registers_changed ();
3199
28439f5e 3200 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
02f3fc28
PA
3201 lp->step, TARGET_SIGNAL_0);
3202 if (debug_linux_nat)
3203 fprintf_unfiltered (gdb_stdlog,
3204 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3205 lp->step ?
3206 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3207 target_pid_to_str (lp->ptid));
3208
3209 lp->stopped = 0;
3210 gdb_assert (lp->resumed);
3211
3212 /* Discard the event. */
3213 return NULL;
3214 }
3215
57380f4e
DJ
3216 /* Make sure we don't report a SIGINT that we have already displayed
3217 for another thread. */
3218 if (lp->ignore_sigint
3219 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3220 {
3221 if (debug_linux_nat)
3222 fprintf_unfiltered (gdb_stdlog,
3223 "LLW: Delayed SIGINT caught for %s.\n",
3224 target_pid_to_str (lp->ptid));
3225
3226 /* This is a delayed SIGINT. */
3227 lp->ignore_sigint = 0;
3228
3229 registers_changed ();
28439f5e 3230 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
57380f4e
DJ
3231 lp->step, TARGET_SIGNAL_0);
3232 if (debug_linux_nat)
3233 fprintf_unfiltered (gdb_stdlog,
3234 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3235 lp->step ?
3236 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3237 target_pid_to_str (lp->ptid));
3238
3239 lp->stopped = 0;
3240 gdb_assert (lp->resumed);
3241
3242 /* Discard the event. */
3243 return NULL;
3244 }
3245
02f3fc28
PA
3246 /* An interesting event. */
3247 gdb_assert (lp);
ca2163eb 3248 lp->status = status;
02f3fc28
PA
3249 return lp;
3250}
3251
d6b0e80f 3252static ptid_t
7feb7d06 3253linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3254 ptid_t ptid, struct target_waitstatus *ourstatus,
3255 int target_options)
d6b0e80f 3256{
7feb7d06 3257 static sigset_t prev_mask;
d6b0e80f
AC
3258 struct lwp_info *lp = NULL;
3259 int options = 0;
3260 int status = 0;
d90e17a7 3261 pid_t pid;
d6b0e80f 3262
b84876c2
PA
3263 if (debug_linux_nat_async)
3264 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3265
f973ed9c
DJ
3266 /* The first time we get here after starting a new inferior, we may
3267 not have added it to the LWP list yet - this is the earliest
3268 moment at which we know its PID. */
d90e17a7 3269 if (ptid_is_pid (inferior_ptid))
f973ed9c 3270 {
27c9d204
PA
3271 /* Upgrade the main thread's ptid. */
3272 thread_change_ptid (inferior_ptid,
3273 BUILD_LWP (GET_PID (inferior_ptid),
3274 GET_PID (inferior_ptid)));
3275
f973ed9c
DJ
3276 lp = add_lwp (inferior_ptid);
3277 lp->resumed = 1;
3278 }
3279
7feb7d06
PA
3280 /* Make sure SIGCHLD is blocked. */
3281 block_child_signals (&prev_mask);
d6b0e80f 3282
d90e17a7
PA
3283 if (ptid_equal (ptid, minus_one_ptid))
3284 pid = -1;
3285 else if (ptid_is_pid (ptid))
3286 /* A request to wait for a specific tgid. This is not possible
3287 with waitpid, so instead, we wait for any child, and leave
3288 children we're not interested in right now with a pending
3289 status to report later. */
3290 pid = -1;
3291 else
3292 pid = GET_LWP (ptid);
3293
d6b0e80f 3294retry:
d90e17a7
PA
3295 lp = NULL;
3296 status = 0;
d6b0e80f 3297
e3e9f5a2
PA
3298 /* Make sure that of those LWPs we want to get an event from, there
3299 is at least one LWP that has been resumed. If there's none, just
3300 bail out. The core may just be flushing asynchronously all
3301 events. */
3302 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3303 {
3304 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3305
3306 if (debug_linux_nat_async)
3307 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3308
3309 restore_child_signals_mask (&prev_mask);
3310 return minus_one_ptid;
3311 }
d6b0e80f
AC
3312
3313 /* First check if there is a LWP with a wait status pending. */
3314 if (pid == -1)
3315 {
3316 /* Any LWP that's been resumed will do. */
d90e17a7 3317 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3318 if (lp)
3319 {
ca2163eb 3320 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3321 fprintf_unfiltered (gdb_stdlog,
3322 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3323 status_to_str (lp->status),
d6b0e80f
AC
3324 target_pid_to_str (lp->ptid));
3325 }
3326
b84876c2 3327 /* But if we don't find one, we'll have to wait, and check both
7feb7d06
PA
3328 cloned and uncloned processes. We start with the cloned
3329 processes. */
d6b0e80f
AC
3330 options = __WCLONE | WNOHANG;
3331 }
3332 else if (is_lwp (ptid))
3333 {
3334 if (debug_linux_nat)
3335 fprintf_unfiltered (gdb_stdlog,
3336 "LLW: Waiting for specific LWP %s.\n",
3337 target_pid_to_str (ptid));
3338
3339 /* We have a specific LWP to check. */
3340 lp = find_lwp_pid (ptid);
3341 gdb_assert (lp);
d6b0e80f 3342
ca2163eb 3343 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3344 fprintf_unfiltered (gdb_stdlog,
3345 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3346 status_to_str (lp->status),
d6b0e80f
AC
3347 target_pid_to_str (lp->ptid));
3348
3349 /* If we have to wait, take into account whether PID is a cloned
3350 process or not. And we have to convert it to something that
3351 the layer beneath us can understand. */
3352 options = lp->cloned ? __WCLONE : 0;
3353 pid = GET_LWP (ptid);
d90e17a7
PA
3354
3355 /* We check for lp->waitstatus in addition to lp->status,
3356 because we can have pending process exits recorded in
3357 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3358 an additional lp->status_p flag. */
ca2163eb 3359 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3360 lp = NULL;
d6b0e80f
AC
3361 }
3362
d90e17a7 3363 if (lp && lp->signalled)
d6b0e80f
AC
3364 {
3365 /* A pending SIGSTOP may interfere with the normal stream of
3366 events. In a typical case where interference is a problem,
3367 we have a SIGSTOP signal pending for LWP A while
3368 single-stepping it, encounter an event in LWP B, and take the
3369 pending SIGSTOP while trying to stop LWP A. After processing
3370 the event in LWP B, LWP A is continued, and we'll never see
3371 the SIGTRAP associated with the last time we were
3372 single-stepping LWP A. */
3373
3374 /* Resume the thread. It should halt immediately returning the
3375 pending SIGSTOP. */
3376 registers_changed ();
28439f5e 3377 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3378 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
3379 if (debug_linux_nat)
3380 fprintf_unfiltered (gdb_stdlog,
3381 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3382 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3383 target_pid_to_str (lp->ptid));
3384 lp->stopped = 0;
3385 gdb_assert (lp->resumed);
3386
ca2163eb
PA
3387 /* Catch the pending SIGSTOP. */
3388 status = lp->status;
3389 lp->status = 0;
3390
d6b0e80f 3391 stop_wait_callback (lp, NULL);
ca2163eb
PA
3392
3393 /* If the lp->status field isn't empty, we caught another signal
3394 while flushing the SIGSTOP. Return it back to the event
3395 queue of the LWP, as we already have an event to handle. */
3396 if (lp->status)
3397 {
3398 if (debug_linux_nat)
3399 fprintf_unfiltered (gdb_stdlog,
3400 "LLW: kill %s, %s\n",
3401 target_pid_to_str (lp->ptid),
3402 status_to_str (lp->status));
3403 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3404 }
3405
3406 lp->status = status;
d6b0e80f
AC
3407 }
3408
b84876c2
PA
3409 if (!target_can_async_p ())
3410 {
3411 /* Causes SIGINT to be passed on to the attached process. */
3412 set_sigint_trap ();
b84876c2 3413 }
d6b0e80f 3414
47608cb1
PA
3415 /* Translate generic target_wait options into waitpid options. */
3416 if (target_options & TARGET_WNOHANG)
3417 options |= WNOHANG;
7feb7d06 3418
d90e17a7 3419 while (lp == NULL)
d6b0e80f
AC
3420 {
3421 pid_t lwpid;
3422
7feb7d06 3423 lwpid = my_waitpid (pid, &status, options);
b84876c2 3424
d6b0e80f
AC
3425 if (lwpid > 0)
3426 {
3427 gdb_assert (pid == -1 || lwpid == pid);
3428
3429 if (debug_linux_nat)
3430 {
3431 fprintf_unfiltered (gdb_stdlog,
3432 "LLW: waitpid %ld received %s\n",
3433 (long) lwpid, status_to_str (status));
3434 }
3435
02f3fc28 3436 lp = linux_nat_filter_event (lwpid, status, options);
d90e17a7 3437
33355866
JK
3438 /* STATUS is now no longer valid, use LP->STATUS instead. */
3439 status = 0;
3440
d90e17a7
PA
3441 if (lp
3442 && ptid_is_pid (ptid)
3443 && ptid_get_pid (lp->ptid) != ptid_get_pid (ptid))
d6b0e80f 3444 {
e3e9f5a2
PA
3445 gdb_assert (lp->resumed);
3446
d90e17a7 3447 if (debug_linux_nat)
3e43a32a
MS
3448 fprintf (stderr,
3449 "LWP %ld got an event %06x, leaving pending.\n",
33355866 3450 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3451
ca2163eb 3452 if (WIFSTOPPED (lp->status))
d90e17a7 3453 {
ca2163eb 3454 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3455 {
e3e9f5a2
PA
3456 /* Cancel breakpoint hits. The breakpoint may
3457 be removed before we fetch events from this
3458 process to report to the core. It is best
3459 not to assume the moribund breakpoints
3460 heuristic always handles these cases --- it
3461 could be too many events go through to the
3462 core before this one is handled. All-stop
3463 always cancels breakpoint hits in all
3464 threads. */
3465 if (non_stop
00390b84 3466 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3467 && cancel_breakpoint (lp))
3468 {
3469 /* Throw away the SIGTRAP. */
3470 lp->status = 0;
3471
3472 if (debug_linux_nat)
3473 fprintf (stderr,
3e43a32a
MS
3474 "LLW: LWP %ld hit a breakpoint while"
3475 " waiting for another process;"
3476 " cancelled it\n",
e3e9f5a2
PA
3477 ptid_get_lwp (lp->ptid));
3478 }
3479 lp->stopped = 1;
d90e17a7
PA
3480 }
3481 else
3482 {
3483 lp->stopped = 1;
3484 lp->signalled = 0;
3485 }
3486 }
33355866 3487 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3488 {
3489 if (debug_linux_nat)
3e43a32a
MS
3490 fprintf (stderr,
3491 "Process %ld exited while stopping LWPs\n",
d90e17a7
PA
3492 ptid_get_lwp (lp->ptid));
3493
3494 /* This was the last lwp in the process. Since
3495 events are serialized to GDB core, and we can't
3496 report this one right now, but GDB core and the
3497 other target layers will want to be notified
3498 about the exit code/signal, leave the status
3499 pending for the next time we're able to report
3500 it. */
d90e17a7
PA
3501
3502 /* Prevent trying to stop this thread again. We'll
3503 never try to resume it because it has a pending
3504 status. */
3505 lp->stopped = 1;
3506
3507 /* Dead LWP's aren't expected to reported a pending
3508 sigstop. */
3509 lp->signalled = 0;
3510
3511 /* Store the pending event in the waitstatus as
3512 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3513 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3514 }
3515
3516 /* Keep looking. */
3517 lp = NULL;
d6b0e80f
AC
3518 continue;
3519 }
3520
d90e17a7
PA
3521 if (lp)
3522 break;
3523 else
3524 {
3525 if (pid == -1)
3526 {
3527 /* waitpid did return something. Restart over. */
3528 options |= __WCLONE;
3529 }
3530 continue;
3531 }
d6b0e80f
AC
3532 }
3533
3534 if (pid == -1)
3535 {
3536 /* Alternate between checking cloned and uncloned processes. */
3537 options ^= __WCLONE;
3538
b84876c2
PA
3539 /* And every time we have checked both:
3540 In async mode, return to event loop;
3541 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 3542 if (options & __WCLONE)
b84876c2 3543 {
47608cb1 3544 if (target_options & TARGET_WNOHANG)
b84876c2
PA
3545 {
3546 /* No interesting event. */
3547 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3548
b84876c2
PA
3549 if (debug_linux_nat_async)
3550 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3551
7feb7d06 3552 restore_child_signals_mask (&prev_mask);
b84876c2
PA
3553 return minus_one_ptid;
3554 }
3555
3556 sigsuspend (&suspend_mask);
3557 }
d6b0e80f 3558 }
28736962
PA
3559 else if (target_options & TARGET_WNOHANG)
3560 {
3561 /* No interesting event for PID yet. */
3562 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3563
3564 if (debug_linux_nat_async)
3565 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3566
3567 restore_child_signals_mask (&prev_mask);
3568 return minus_one_ptid;
3569 }
d6b0e80f
AC
3570
3571 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3572 gdb_assert (lp == NULL);
d6b0e80f
AC
3573 }
3574
b84876c2 3575 if (!target_can_async_p ())
d26b5354 3576 clear_sigint_trap ();
d6b0e80f
AC
3577
3578 gdb_assert (lp);
3579
ca2163eb
PA
3580 status = lp->status;
3581 lp->status = 0;
3582
d6b0e80f
AC
3583 /* Don't report signals that GDB isn't interested in, such as
3584 signals that are neither printed nor stopped upon. Stopping all
3585 threads can be a bit time-consuming so if we want decent
3586 performance with heavily multi-threaded programs, especially when
3587 they're using a high frequency timer, we'd better avoid it if we
3588 can. */
3589
3590 if (WIFSTOPPED (status))
3591 {
423ec54c 3592 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
d6b48e9c
PA
3593 struct inferior *inf;
3594
3595 inf = find_inferior_pid (ptid_get_pid (lp->ptid));
3596 gdb_assert (inf);
d6b0e80f 3597
d6b48e9c
PA
3598 /* Defer to common code if we get a signal while
3599 single-stepping, since that may need special care, e.g. to
3600 skip the signal handler, or, if we're gaining control of the
3601 inferior. */
d539ed7e 3602 if (!lp->step
16c381f0 3603 && inf->control.stop_soon == NO_STOP_QUIETLY
d539ed7e 3604 && signal_stop_state (signo) == 0
d6b0e80f
AC
3605 && signal_print_state (signo) == 0
3606 && signal_pass_state (signo) == 1)
3607 {
3608 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3609 here? It is not clear we should. GDB may not expect
3610 other threads to run. On the other hand, not resuming
3611 newly attached threads may cause an unwanted delay in
3612 getting them running. */
3613 registers_changed ();
28439f5e 3614 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3615 lp->step, signo);
d6b0e80f
AC
3616 if (debug_linux_nat)
3617 fprintf_unfiltered (gdb_stdlog,
3618 "LLW: %s %s, %s (preempt 'handle')\n",
3619 lp->step ?
3620 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3621 target_pid_to_str (lp->ptid),
423ec54c
JK
3622 (signo != TARGET_SIGNAL_0
3623 ? strsignal (target_signal_to_host (signo))
3624 : "0"));
d6b0e80f 3625 lp->stopped = 0;
d6b0e80f
AC
3626 goto retry;
3627 }
3628
1ad15515 3629 if (!non_stop)
d6b0e80f 3630 {
1ad15515
PA
3631 /* Only do the below in all-stop, as we currently use SIGINT
3632 to implement target_stop (see linux_nat_stop) in
3633 non-stop. */
3634 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3635 {
3636 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3637 forwarded to the entire process group, that is, all LWPs
3638 will receive it - unless they're using CLONE_THREAD to
3639 share signals. Since we only want to report it once, we
3640 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3641 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3642 set_ignore_sigint, NULL);
1ad15515
PA
3643 lp->ignore_sigint = 0;
3644 }
3645 else
3646 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3647 }
3648 }
3649
3650 /* This LWP is stopped now. */
3651 lp->stopped = 1;
3652
3653 if (debug_linux_nat)
3654 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3655 status_to_str (status), target_pid_to_str (lp->ptid));
3656
4c28f408
PA
3657 if (!non_stop)
3658 {
3659 /* Now stop all other LWP's ... */
d90e17a7 3660 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3661
3662 /* ... and wait until all of them have reported back that
3663 they're no longer running. */
d90e17a7 3664 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3665
3666 /* If we're not waiting for a specific LWP, choose an event LWP
3667 from among those that have had events. Giving equal priority
3668 to all LWPs that have had events helps prevent
3669 starvation. */
3670 if (pid == -1)
d90e17a7 3671 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3672
e3e9f5a2
PA
3673 /* Now that we've selected our final event LWP, cancel any
3674 breakpoints in other LWPs that have hit a GDB breakpoint.
3675 See the comment in cancel_breakpoints_callback to find out
3676 why. */
3677 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3678
3679 /* In all-stop, from the core's perspective, all LWPs are now
3680 stopped until a new resume action is sent over. */
3681 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3682 }
3683 else
3684 lp->resumed = 0;
d6b0e80f 3685
26ab7092 3686 if (linux_nat_status_is_event (status))
d6b0e80f 3687 {
d6b0e80f
AC
3688 if (debug_linux_nat)
3689 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3690 "LLW: trap ptid is %s.\n",
3691 target_pid_to_str (lp->ptid));
d6b0e80f 3692 }
d6b0e80f
AC
3693
3694 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3695 {
3696 *ourstatus = lp->waitstatus;
3697 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3698 }
3699 else
3700 store_waitstatus (ourstatus, status);
3701
b84876c2
PA
3702 if (debug_linux_nat_async)
3703 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3704
7feb7d06 3705 restore_child_signals_mask (&prev_mask);
1e225492
JK
3706
3707 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3708 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3709 lp->core = -1;
3710 else
3711 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3712
f973ed9c 3713 return lp->ptid;
d6b0e80f
AC
3714}
3715
e3e9f5a2
PA
3716/* Resume LWPs that are currently stopped without any pending status
3717 to report, but are resumed from the core's perspective. */
3718
3719static int
3720resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3721{
3722 ptid_t *wait_ptid_p = data;
3723
3724 if (lp->stopped
3725 && lp->resumed
3726 && lp->status == 0
3727 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3728 {
3729 gdb_assert (is_executing (lp->ptid));
3730
3731 /* Don't bother if there's a breakpoint at PC that we'd hit
3732 immediately, and we're not waiting for this LWP. */
3733 if (!ptid_match (lp->ptid, *wait_ptid_p))
3734 {
3735 struct regcache *regcache = get_thread_regcache (lp->ptid);
3736 CORE_ADDR pc = regcache_read_pc (regcache);
3737
3738 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3739 return 0;
3740 }
3741
3742 if (debug_linux_nat)
3743 fprintf_unfiltered (gdb_stdlog,
3744 "RSRL: resuming stopped-resumed LWP %s\n",
3745 target_pid_to_str (lp->ptid));
3746
3747 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3748 lp->step, TARGET_SIGNAL_0);
3749 lp->stopped = 0;
3750 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3751 lp->stopped_by_watchpoint = 0;
3752 }
3753
3754 return 0;
3755}
3756
7feb7d06
PA
3757static ptid_t
3758linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3759 ptid_t ptid, struct target_waitstatus *ourstatus,
3760 int target_options)
7feb7d06
PA
3761{
3762 ptid_t event_ptid;
3763
3764 if (debug_linux_nat)
3e43a32a
MS
3765 fprintf_unfiltered (gdb_stdlog,
3766 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
7feb7d06
PA
3767
3768 /* Flush the async file first. */
3769 if (target_can_async_p ())
3770 async_file_flush ();
3771
e3e9f5a2
PA
3772 /* Resume LWPs that are currently stopped without any pending status
3773 to report, but are resumed from the core's perspective. LWPs get
3774 in this state if we find them stopping at a time we're not
3775 interested in reporting the event (target_wait on a
3776 specific_process, for example, see linux_nat_wait_1), and
3777 meanwhile the event became uninteresting. Don't bother resuming
3778 LWPs we're not going to wait for if they'd stop immediately. */
3779 if (non_stop)
3780 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3781
47608cb1 3782 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3783
3784 /* If we requested any event, and something came out, assume there
3785 may be more. If we requested a specific lwp or process, also
3786 assume there may be more. */
3787 if (target_can_async_p ()
3788 && (ourstatus->kind != TARGET_WAITKIND_IGNORE
3789 || !ptid_equal (ptid, minus_one_ptid)))
3790 async_file_mark ();
3791
3792 /* Get ready for the next event. */
3793 if (target_can_async_p ())
3794 target_async (inferior_event_handler, 0);
3795
3796 return event_ptid;
3797}
3798
d6b0e80f
AC
3799static int
3800kill_callback (struct lwp_info *lp, void *data)
3801{
3802 errno = 0;
3803 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3804 if (debug_linux_nat)
3805 fprintf_unfiltered (gdb_stdlog,
3806 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3807 target_pid_to_str (lp->ptid),
3808 errno ? safe_strerror (errno) : "OK");
3809
3810 return 0;
3811}
3812
3813static int
3814kill_wait_callback (struct lwp_info *lp, void *data)
3815{
3816 pid_t pid;
3817
3818 /* We must make sure that there are no pending events (delayed
3819 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3820 program doesn't interfere with any following debugging session. */
3821
3822 /* For cloned processes we must check both with __WCLONE and
3823 without, since the exit status of a cloned process isn't reported
3824 with __WCLONE. */
3825 if (lp->cloned)
3826 {
3827 do
3828 {
58aecb61 3829 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 3830 if (pid != (pid_t) -1)
d6b0e80f 3831 {
e85a822c
DJ
3832 if (debug_linux_nat)
3833 fprintf_unfiltered (gdb_stdlog,
3834 "KWC: wait %s received unknown.\n",
3835 target_pid_to_str (lp->ptid));
3836 /* The Linux kernel sometimes fails to kill a thread
3837 completely after PTRACE_KILL; that goes from the stop
3838 point in do_fork out to the one in
3839 get_signal_to_deliever and waits again. So kill it
3840 again. */
3841 kill_callback (lp, NULL);
d6b0e80f
AC
3842 }
3843 }
3844 while (pid == GET_LWP (lp->ptid));
3845
3846 gdb_assert (pid == -1 && errno == ECHILD);
3847 }
3848
3849 do
3850 {
58aecb61 3851 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 3852 if (pid != (pid_t) -1)
d6b0e80f 3853 {
e85a822c
DJ
3854 if (debug_linux_nat)
3855 fprintf_unfiltered (gdb_stdlog,
3856 "KWC: wait %s received unk.\n",
3857 target_pid_to_str (lp->ptid));
3858 /* See the call to kill_callback above. */
3859 kill_callback (lp, NULL);
d6b0e80f
AC
3860 }
3861 }
3862 while (pid == GET_LWP (lp->ptid));
3863
3864 gdb_assert (pid == -1 && errno == ECHILD);
3865 return 0;
3866}
3867
3868static void
7d85a9c0 3869linux_nat_kill (struct target_ops *ops)
d6b0e80f 3870{
f973ed9c
DJ
3871 struct target_waitstatus last;
3872 ptid_t last_ptid;
3873 int status;
d6b0e80f 3874
f973ed9c
DJ
3875 /* If we're stopped while forking and we haven't followed yet,
3876 kill the other task. We need to do this first because the
3877 parent will be sleeping if this is a vfork. */
d6b0e80f 3878
f973ed9c 3879 get_last_target_status (&last_ptid, &last);
d6b0e80f 3880
f973ed9c
DJ
3881 if (last.kind == TARGET_WAITKIND_FORKED
3882 || last.kind == TARGET_WAITKIND_VFORKED)
3883 {
3a3e9ee3 3884 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
3885 wait (&status);
3886 }
3887
3888 if (forks_exist_p ())
7feb7d06 3889 linux_fork_killall ();
f973ed9c
DJ
3890 else
3891 {
d90e17a7 3892 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3893
4c28f408
PA
3894 /* Stop all threads before killing them, since ptrace requires
3895 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3896 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3897 /* ... and wait until all of them have reported back that
3898 they're no longer running. */
d90e17a7 3899 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3900
f973ed9c 3901 /* Kill all LWP's ... */
d90e17a7 3902 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3903
3904 /* ... and wait until we've flushed all events. */
d90e17a7 3905 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3906 }
3907
3908 target_mourn_inferior ();
d6b0e80f
AC
3909}
3910
3911static void
136d6dae 3912linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3913{
d90e17a7 3914 purge_lwp_list (ptid_get_pid (inferior_ptid));
d6b0e80f 3915
f973ed9c 3916 if (! forks_exist_p ())
d90e17a7
PA
3917 /* Normal case, no other forks available. */
3918 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3919 else
3920 /* Multi-fork case. The current inferior_ptid has exited, but
3921 there are other viable forks to debug. Delete the exiting
3922 one and context-switch to the first available. */
3923 linux_fork_mourn_inferior ();
d6b0e80f
AC
3924}
3925
5b009018
PA
3926/* Convert a native/host siginfo object, into/from the siginfo in the
3927 layout of the inferiors' architecture. */
3928
3929static void
3930siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
3931{
3932 int done = 0;
3933
3934 if (linux_nat_siginfo_fixup != NULL)
3935 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3936
3937 /* If there was no callback, or the callback didn't do anything,
3938 then just do a straight memcpy. */
3939 if (!done)
3940 {
3941 if (direction == 1)
3942 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3943 else
3944 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3945 }
3946}
3947
4aa995e1
PA
3948static LONGEST
3949linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3950 const char *annex, gdb_byte *readbuf,
3951 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3952{
4aa995e1
PA
3953 int pid;
3954 struct siginfo siginfo;
5b009018 3955 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
3956
3957 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3958 gdb_assert (readbuf || writebuf);
3959
3960 pid = GET_LWP (inferior_ptid);
3961 if (pid == 0)
3962 pid = GET_PID (inferior_ptid);
3963
3964 if (offset > sizeof (siginfo))
3965 return -1;
3966
3967 errno = 0;
3968 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3969 if (errno != 0)
3970 return -1;
3971
5b009018
PA
3972 /* When GDB is built as a 64-bit application, ptrace writes into
3973 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3974 inferior with a 64-bit GDB should look the same as debugging it
3975 with a 32-bit GDB, we need to convert it. GDB core always sees
3976 the converted layout, so any read/write will have to be done
3977 post-conversion. */
3978 siginfo_fixup (&siginfo, inf_siginfo, 0);
3979
4aa995e1
PA
3980 if (offset + len > sizeof (siginfo))
3981 len = sizeof (siginfo) - offset;
3982
3983 if (readbuf != NULL)
5b009018 3984 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3985 else
3986 {
5b009018
PA
3987 memcpy (inf_siginfo + offset, writebuf, len);
3988
3989 /* Convert back to ptrace layout before flushing it out. */
3990 siginfo_fixup (&siginfo, inf_siginfo, 1);
3991
4aa995e1
PA
3992 errno = 0;
3993 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3994 if (errno != 0)
3995 return -1;
3996 }
3997
3998 return len;
3999}
4000
10d6c8cd
DJ
4001static LONGEST
4002linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4003 const char *annex, gdb_byte *readbuf,
4004 const gdb_byte *writebuf,
4005 ULONGEST offset, LONGEST len)
d6b0e80f 4006{
4aa995e1 4007 struct cleanup *old_chain;
10d6c8cd 4008 LONGEST xfer;
d6b0e80f 4009
4aa995e1
PA
4010 if (object == TARGET_OBJECT_SIGNAL_INFO)
4011 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4012 offset, len);
4013
c35b1492
PA
4014 /* The target is connected but no live inferior is selected. Pass
4015 this request down to a lower stratum (e.g., the executable
4016 file). */
4017 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4018 return 0;
4019
4aa995e1
PA
4020 old_chain = save_inferior_ptid ();
4021
d6b0e80f
AC
4022 if (is_lwp (inferior_ptid))
4023 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4024
10d6c8cd
DJ
4025 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4026 offset, len);
d6b0e80f
AC
4027
4028 do_cleanups (old_chain);
4029 return xfer;
4030}
4031
4032static int
28439f5e 4033linux_thread_alive (ptid_t ptid)
d6b0e80f 4034{
4c28f408
PA
4035 int err;
4036
d6b0e80f
AC
4037 gdb_assert (is_lwp (ptid));
4038
4c28f408
PA
4039 /* Send signal 0 instead of anything ptrace, because ptracing a
4040 running thread errors out claiming that the thread doesn't
4041 exist. */
4042 err = kill_lwp (GET_LWP (ptid), 0);
4043
d6b0e80f
AC
4044 if (debug_linux_nat)
4045 fprintf_unfiltered (gdb_stdlog,
4c28f408 4046 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4047 target_pid_to_str (ptid),
4c28f408 4048 err ? safe_strerror (err) : "OK");
9c0dd46b 4049
4c28f408 4050 if (err != 0)
d6b0e80f
AC
4051 return 0;
4052
4053 return 1;
4054}
4055
28439f5e
PA
4056static int
4057linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4058{
4059 return linux_thread_alive (ptid);
4060}
4061
d6b0e80f 4062static char *
117de6a9 4063linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4064{
4065 static char buf[64];
4066
a0ef4274 4067 if (is_lwp (ptid)
d90e17a7
PA
4068 && (GET_PID (ptid) != GET_LWP (ptid)
4069 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
4070 {
4071 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4072 return buf;
4073 }
4074
4075 return normal_pid_to_str (ptid);
4076}
4077
4694da01
TT
4078static char *
4079linux_nat_thread_name (struct thread_info *thr)
4080{
4081 int pid = ptid_get_pid (thr->ptid);
4082 long lwp = ptid_get_lwp (thr->ptid);
4083#define FORMAT "/proc/%d/task/%ld/comm"
4084 char buf[sizeof (FORMAT) + 30];
4085 FILE *comm_file;
4086 char *result = NULL;
4087
4088 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4089 comm_file = fopen (buf, "r");
4090 if (comm_file)
4091 {
4092 /* Not exported by the kernel, so we define it here. */
4093#define COMM_LEN 16
4094 static char line[COMM_LEN + 1];
4095
4096 if (fgets (line, sizeof (line), comm_file))
4097 {
4098 char *nl = strchr (line, '\n');
4099
4100 if (nl)
4101 *nl = '\0';
4102 if (*line != '\0')
4103 result = line;
4104 }
4105
4106 fclose (comm_file);
4107 }
4108
4109#undef COMM_LEN
4110#undef FORMAT
4111
4112 return result;
4113}
4114
dba24537
AC
4115/* Accepts an integer PID; Returns a string representing a file that
4116 can be opened to get the symbols for the child process. */
4117
6d8fd2b7
UW
4118static char *
4119linux_child_pid_to_exec_file (int pid)
dba24537
AC
4120{
4121 char *name1, *name2;
4122
4123 name1 = xmalloc (MAXPATHLEN);
4124 name2 = xmalloc (MAXPATHLEN);
4125 make_cleanup (xfree, name1);
4126 make_cleanup (xfree, name2);
4127 memset (name2, 0, MAXPATHLEN);
4128
4129 sprintf (name1, "/proc/%d/exe", pid);
4130 if (readlink (name1, name2, MAXPATHLEN) > 0)
4131 return name2;
4132 else
4133 return name1;
4134}
4135
4136/* Service function for corefiles and info proc. */
4137
4138static int
4139read_mapping (FILE *mapfile,
4140 long long *addr,
4141 long long *endaddr,
4142 char *permissions,
4143 long long *offset,
4144 char *device, long long *inode, char *filename)
4145{
4146 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4147 addr, endaddr, permissions, offset, device, inode);
4148
2e14c2ea
MS
4149 filename[0] = '\0';
4150 if (ret > 0 && ret != EOF)
dba24537
AC
4151 {
4152 /* Eat everything up to EOL for the filename. This will prevent
4153 weird filenames (such as one with embedded whitespace) from
4154 confusing this code. It also makes this code more robust in
4155 respect to annotations the kernel may add after the filename.
4156
4157 Note the filename is used for informational purposes
4158 only. */
4159 ret += fscanf (mapfile, "%[^\n]\n", filename);
4160 }
2e14c2ea 4161
dba24537
AC
4162 return (ret != 0 && ret != EOF);
4163}
4164
4165/* Fills the "to_find_memory_regions" target vector. Lists the memory
4166 regions in the inferior for a corefile. */
4167
4168static int
b8edc417 4169linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
dba24537 4170{
89ecc4f5 4171 int pid = PIDGET (inferior_ptid);
dba24537
AC
4172 char mapsfilename[MAXPATHLEN];
4173 FILE *mapsfile;
4174 long long addr, endaddr, size, offset, inode;
4175 char permissions[8], device[8], filename[MAXPATHLEN];
4176 int read, write, exec;
7c8a8b04 4177 struct cleanup *cleanup;
dba24537
AC
4178
4179 /* Compose the filename for the /proc memory map, and open it. */
89ecc4f5 4180 sprintf (mapsfilename, "/proc/%d/maps", pid);
dba24537 4181 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 4182 error (_("Could not open %s."), mapsfilename);
7c8a8b04 4183 cleanup = make_cleanup_fclose (mapsfile);
dba24537
AC
4184
4185 if (info_verbose)
4186 fprintf_filtered (gdb_stdout,
4187 "Reading memory regions from %s\n", mapsfilename);
4188
4189 /* Now iterate until end-of-file. */
4190 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4191 &offset, &device[0], &inode, &filename[0]))
4192 {
4193 size = endaddr - addr;
4194
4195 /* Get the segment's permissions. */
4196 read = (strchr (permissions, 'r') != 0);
4197 write = (strchr (permissions, 'w') != 0);
4198 exec = (strchr (permissions, 'x') != 0);
4199
4200 if (info_verbose)
4201 {
4202 fprintf_filtered (gdb_stdout,
2244ba2e
PM
4203 "Save segment, %s bytes at %s (%c%c%c)",
4204 plongest (size), paddress (target_gdbarch, addr),
dba24537
AC
4205 read ? 'r' : ' ',
4206 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 4207 if (filename[0])
dba24537
AC
4208 fprintf_filtered (gdb_stdout, " for %s", filename);
4209 fprintf_filtered (gdb_stdout, "\n");
4210 }
4211
4212 /* Invoke the callback function to create the corefile
4213 segment. */
4214 func (addr, size, read, write, exec, obfd);
4215 }
7c8a8b04 4216 do_cleanups (cleanup);
dba24537
AC
4217 return 0;
4218}
4219
2020b7ab
PA
4220static int
4221find_signalled_thread (struct thread_info *info, void *data)
4222{
16c381f0 4223 if (info->suspend.stop_signal != TARGET_SIGNAL_0
2020b7ab
PA
4224 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4225 return 1;
4226
4227 return 0;
4228}
4229
4230static enum target_signal
4231find_stop_signal (void)
4232{
4233 struct thread_info *info =
4234 iterate_over_threads (find_signalled_thread, NULL);
4235
4236 if (info)
16c381f0 4237 return info->suspend.stop_signal;
2020b7ab
PA
4238 else
4239 return TARGET_SIGNAL_0;
4240}
4241
dba24537
AC
4242/* Records the thread's register state for the corefile note
4243 section. */
4244
4245static char *
4246linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2020b7ab
PA
4247 char *note_data, int *note_size,
4248 enum target_signal stop_signal)
dba24537 4249{
dba24537 4250 unsigned long lwp = ptid_get_lwp (ptid);
c2250ad1
UW
4251 struct gdbarch *gdbarch = target_gdbarch;
4252 struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4f844a66 4253 const struct regset *regset;
55e969c1 4254 int core_regset_p;
594f7785 4255 struct cleanup *old_chain;
17ea7499
CES
4256 struct core_regset_section *sect_list;
4257 char *gdb_regset;
594f7785
UW
4258
4259 old_chain = save_inferior_ptid ();
4260 inferior_ptid = ptid;
4261 target_fetch_registers (regcache, -1);
4262 do_cleanups (old_chain);
4f844a66
DM
4263
4264 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
4265 sect_list = gdbarch_core_regset_sections (gdbarch);
4266
17ea7499
CES
4267 /* The loop below uses the new struct core_regset_section, which stores
4268 the supported section names and sizes for the core file. Note that
4269 note PRSTATUS needs to be treated specially. But the other notes are
4270 structurally the same, so they can benefit from the new struct. */
4271 if (core_regset_p && sect_list != NULL)
4272 while (sect_list->sect_name != NULL)
4273 {
17ea7499
CES
4274 regset = gdbarch_regset_from_core_section (gdbarch,
4275 sect_list->sect_name,
4276 sect_list->size);
4277 gdb_assert (regset && regset->collect_regset);
4278 gdb_regset = xmalloc (sect_list->size);
4279 regset->collect_regset (regset, regcache, -1,
4280 gdb_regset, sect_list->size);
2f2241f1
UW
4281
4282 if (strcmp (sect_list->sect_name, ".reg") == 0)
4283 note_data = (char *) elfcore_write_prstatus
4284 (obfd, note_data, note_size,
857d11d0
JK
4285 lwp, target_signal_to_host (stop_signal),
4286 gdb_regset);
2f2241f1
UW
4287 else
4288 note_data = (char *) elfcore_write_register_note
4289 (obfd, note_data, note_size,
4290 sect_list->sect_name, gdb_regset,
4291 sect_list->size);
17ea7499
CES
4292 xfree (gdb_regset);
4293 sect_list++;
4294 }
dba24537 4295
17ea7499
CES
4296 /* For architectures that does not have the struct core_regset_section
4297 implemented, we use the old method. When all the architectures have
4298 the new support, the code below should be deleted. */
4f844a66 4299 else
17ea7499 4300 {
2f2241f1
UW
4301 gdb_gregset_t gregs;
4302 gdb_fpregset_t fpregs;
4303
4304 if (core_regset_p
4305 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3e43a32a
MS
4306 sizeof (gregs)))
4307 != NULL && regset->collect_regset != NULL)
2f2241f1
UW
4308 regset->collect_regset (regset, regcache, -1,
4309 &gregs, sizeof (gregs));
4310 else
4311 fill_gregset (regcache, &gregs, -1);
4312
857d11d0
JK
4313 note_data = (char *) elfcore_write_prstatus
4314 (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
4315 &gregs);
2f2241f1 4316
17ea7499
CES
4317 if (core_regset_p
4318 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3e43a32a
MS
4319 sizeof (fpregs)))
4320 != NULL && regset->collect_regset != NULL)
17ea7499
CES
4321 regset->collect_regset (regset, regcache, -1,
4322 &fpregs, sizeof (fpregs));
4323 else
4324 fill_fpregset (regcache, &fpregs, -1);
4325
4326 note_data = (char *) elfcore_write_prfpreg (obfd,
4327 note_data,
4328 note_size,
4329 &fpregs, sizeof (fpregs));
4330 }
4f844a66 4331
dba24537
AC
4332 return note_data;
4333}
4334
4335struct linux_nat_corefile_thread_data
4336{
4337 bfd *obfd;
4338 char *note_data;
4339 int *note_size;
4340 int num_notes;
2020b7ab 4341 enum target_signal stop_signal;
dba24537
AC
4342};
4343
4344/* Called by gdbthread.c once per thread. Records the thread's
4345 register state for the corefile note section. */
4346
4347static int
4348linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4349{
4350 struct linux_nat_corefile_thread_data *args = data;
dba24537 4351
dba24537
AC
4352 args->note_data = linux_nat_do_thread_registers (args->obfd,
4353 ti->ptid,
4354 args->note_data,
2020b7ab
PA
4355 args->note_size,
4356 args->stop_signal);
dba24537 4357 args->num_notes++;
56be3814 4358
dba24537
AC
4359 return 0;
4360}
4361
efcbbd14
UW
4362/* Enumerate spufs IDs for process PID. */
4363
4364static void
4365iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4366{
4367 char path[128];
4368 DIR *dir;
4369 struct dirent *entry;
4370
4371 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4372 dir = opendir (path);
4373 if (!dir)
4374 return;
4375
4376 rewinddir (dir);
4377 while ((entry = readdir (dir)) != NULL)
4378 {
4379 struct stat st;
4380 struct statfs stfs;
4381 int fd;
4382
4383 fd = atoi (entry->d_name);
4384 if (!fd)
4385 continue;
4386
4387 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4388 if (stat (path, &st) != 0)
4389 continue;
4390 if (!S_ISDIR (st.st_mode))
4391 continue;
4392
4393 if (statfs (path, &stfs) != 0)
4394 continue;
4395 if (stfs.f_type != SPUFS_MAGIC)
4396 continue;
4397
4398 callback (data, fd);
4399 }
4400
4401 closedir (dir);
4402}
4403
4404/* Generate corefile notes for SPU contexts. */
4405
4406struct linux_spu_corefile_data
4407{
4408 bfd *obfd;
4409 char *note_data;
4410 int *note_size;
4411};
4412
4413static void
4414linux_spu_corefile_callback (void *data, int fd)
4415{
4416 struct linux_spu_corefile_data *args = data;
4417 int i;
4418
4419 static const char *spu_files[] =
4420 {
4421 "object-id",
4422 "mem",
4423 "regs",
4424 "fpcr",
4425 "lslr",
4426 "decr",
4427 "decr_status",
4428 "signal1",
4429 "signal1_type",
4430 "signal2",
4431 "signal2_type",
4432 "event_mask",
4433 "event_status",
4434 "mbox_info",
4435 "ibox_info",
4436 "wbox_info",
4437 "dma_info",
4438 "proxydma_info",
4439 };
4440
4441 for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4442 {
4443 char annex[32], note_name[32];
4444 gdb_byte *spu_data;
4445 LONGEST spu_len;
4446
4447 xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4448 spu_len = target_read_alloc (&current_target, TARGET_OBJECT_SPU,
4449 annex, &spu_data);
4450 if (spu_len > 0)
4451 {
4452 xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4453 args->note_data = elfcore_write_note (args->obfd, args->note_data,
4454 args->note_size, note_name,
4455 NT_SPU, spu_data, spu_len);
4456 xfree (spu_data);
4457 }
4458 }
4459}
4460
4461static char *
4462linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4463{
4464 struct linux_spu_corefile_data args;
e0881a8e 4465
efcbbd14
UW
4466 args.obfd = obfd;
4467 args.note_data = note_data;
4468 args.note_size = note_size;
4469
4470 iterate_over_spus (PIDGET (inferior_ptid),
4471 linux_spu_corefile_callback, &args);
4472
4473 return args.note_data;
4474}
4475
dba24537
AC
4476/* Fills the "to_make_corefile_note" target vector. Builds the note
4477 section for a corefile, and returns it in a malloc buffer. */
4478
4479static char *
4480linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4481{
4482 struct linux_nat_corefile_thread_data thread_args;
d99148ef 4483 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 4484 char fname[16] = { '\0' };
d99148ef 4485 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
4486 char psargs[80] = { '\0' };
4487 char *note_data = NULL;
d90e17a7 4488 ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
c6826062 4489 gdb_byte *auxv;
dba24537
AC
4490 int auxv_len;
4491
4492 if (get_exec_file (0))
4493 {
4494 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
4495 strncpy (psargs, get_exec_file (0), sizeof (psargs));
4496 if (get_inferior_args ())
4497 {
d99148ef
JK
4498 char *string_end;
4499 char *psargs_end = psargs + sizeof (psargs);
4500
4501 /* linux_elfcore_write_prpsinfo () handles zero unterminated
4502 strings fine. */
4503 string_end = memchr (psargs, 0, sizeof (psargs));
4504 if (string_end != NULL)
4505 {
4506 *string_end++ = ' ';
4507 strncpy (string_end, get_inferior_args (),
4508 psargs_end - string_end);
4509 }
dba24537
AC
4510 }
4511 note_data = (char *) elfcore_write_prpsinfo (obfd,
4512 note_data,
4513 note_size, fname, psargs);
4514 }
4515
4516 /* Dump information for threads. */
4517 thread_args.obfd = obfd;
4518 thread_args.note_data = note_data;
4519 thread_args.note_size = note_size;
4520 thread_args.num_notes = 0;
2020b7ab 4521 thread_args.stop_signal = find_stop_signal ();
d90e17a7 4522 iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
2020b7ab
PA
4523 gdb_assert (thread_args.num_notes != 0);
4524 note_data = thread_args.note_data;
dba24537 4525
13547ab6
DJ
4526 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
4527 NULL, &auxv);
dba24537
AC
4528 if (auxv_len > 0)
4529 {
4530 note_data = elfcore_write_note (obfd, note_data, note_size,
4531 "CORE", NT_AUXV, auxv, auxv_len);
4532 xfree (auxv);
4533 }
4534
efcbbd14
UW
4535 note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4536
dba24537
AC
4537 make_cleanup (xfree, note_data);
4538 return note_data;
4539}
4540
4541/* Implement the "info proc" command. */
4542
4543static void
4544linux_nat_info_proc_cmd (char *args, int from_tty)
4545{
89ecc4f5
DE
4546 /* A long is used for pid instead of an int to avoid a loss of precision
4547 compiler warning from the output of strtoul. */
4548 long pid = PIDGET (inferior_ptid);
dba24537
AC
4549 FILE *procfile;
4550 char **argv = NULL;
4551 char buffer[MAXPATHLEN];
4552 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
4553 int cmdline_f = 1;
4554 int cwd_f = 1;
4555 int exe_f = 1;
4556 int mappings_f = 0;
dba24537
AC
4557 int status_f = 0;
4558 int stat_f = 0;
4559 int all = 0;
4560 struct stat dummy;
4561
4562 if (args)
4563 {
4564 /* Break up 'args' into an argv array. */
d1a41061
PP
4565 argv = gdb_buildargv (args);
4566 make_cleanup_freeargv (argv);
dba24537
AC
4567 }
4568 while (argv != NULL && *argv != NULL)
4569 {
4570 if (isdigit (argv[0][0]))
4571 {
4572 pid = strtoul (argv[0], NULL, 10);
4573 }
4574 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
4575 {
4576 mappings_f = 1;
4577 }
4578 else if (strcmp (argv[0], "status") == 0)
4579 {
4580 status_f = 1;
4581 }
4582 else if (strcmp (argv[0], "stat") == 0)
4583 {
4584 stat_f = 1;
4585 }
4586 else if (strcmp (argv[0], "cmd") == 0)
4587 {
4588 cmdline_f = 1;
4589 }
4590 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
4591 {
4592 exe_f = 1;
4593 }
4594 else if (strcmp (argv[0], "cwd") == 0)
4595 {
4596 cwd_f = 1;
4597 }
4598 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
4599 {
4600 all = 1;
4601 }
4602 else
4603 {
1777feb0 4604 /* [...] (future options here). */
dba24537
AC
4605 }
4606 argv++;
4607 }
4608 if (pid == 0)
8a3fe4f8 4609 error (_("No current process: you must name one."));
dba24537 4610
89ecc4f5 4611 sprintf (fname1, "/proc/%ld", pid);
dba24537 4612 if (stat (fname1, &dummy) != 0)
8a3fe4f8 4613 error (_("No /proc directory: '%s'"), fname1);
dba24537 4614
89ecc4f5 4615 printf_filtered (_("process %ld\n"), pid);
dba24537
AC
4616 if (cmdline_f || all)
4617 {
89ecc4f5 4618 sprintf (fname1, "/proc/%ld/cmdline", pid);
d5d6fca5 4619 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4620 {
7c8a8b04 4621 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4622
bf1d7d9c
JB
4623 if (fgets (buffer, sizeof (buffer), procfile))
4624 printf_filtered ("cmdline = '%s'\n", buffer);
4625 else
4626 warning (_("unable to read '%s'"), fname1);
7c8a8b04 4627 do_cleanups (cleanup);
dba24537
AC
4628 }
4629 else
8a3fe4f8 4630 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4631 }
4632 if (cwd_f || all)
4633 {
89ecc4f5 4634 sprintf (fname1, "/proc/%ld/cwd", pid);
dba24537
AC
4635 memset (fname2, 0, sizeof (fname2));
4636 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4637 printf_filtered ("cwd = '%s'\n", fname2);
4638 else
8a3fe4f8 4639 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4640 }
4641 if (exe_f || all)
4642 {
89ecc4f5 4643 sprintf (fname1, "/proc/%ld/exe", pid);
dba24537
AC
4644 memset (fname2, 0, sizeof (fname2));
4645 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4646 printf_filtered ("exe = '%s'\n", fname2);
4647 else
8a3fe4f8 4648 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4649 }
4650 if (mappings_f || all)
4651 {
89ecc4f5 4652 sprintf (fname1, "/proc/%ld/maps", pid);
d5d6fca5 4653 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4654 {
4655 long long addr, endaddr, size, offset, inode;
4656 char permissions[8], device[8], filename[MAXPATHLEN];
7c8a8b04 4657 struct cleanup *cleanup;
dba24537 4658
7c8a8b04 4659 cleanup = make_cleanup_fclose (procfile);
a3f17187 4660 printf_filtered (_("Mapped address spaces:\n\n"));
a97b0ac8 4661 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4662 {
4663 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4664 "Start Addr",
4665 " End Addr",
4666 " Size", " Offset", "objfile");
4667 }
4668 else
4669 {
4670 printf_filtered (" %18s %18s %10s %10s %7s\n",
4671 "Start Addr",
4672 " End Addr",
4673 " Size", " Offset", "objfile");
4674 }
4675
4676 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4677 &offset, &device[0], &inode, &filename[0]))
4678 {
4679 size = endaddr - addr;
4680
4681 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4682 calls here (and possibly above) should be abstracted
4683 out into their own functions? Andrew suggests using
4684 a generic local_address_string instead to print out
4685 the addresses; that makes sense to me, too. */
4686
a97b0ac8 4687 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4688 {
4689 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4690 (unsigned long) addr, /* FIXME: pr_addr */
4691 (unsigned long) endaddr,
4692 (int) size,
4693 (unsigned int) offset,
4694 filename[0] ? filename : "");
4695 }
4696 else
4697 {
4698 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
4699 (unsigned long) addr, /* FIXME: pr_addr */
4700 (unsigned long) endaddr,
4701 (int) size,
4702 (unsigned int) offset,
4703 filename[0] ? filename : "");
4704 }
4705 }
4706
7c8a8b04 4707 do_cleanups (cleanup);
dba24537
AC
4708 }
4709 else
8a3fe4f8 4710 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4711 }
4712 if (status_f || all)
4713 {
89ecc4f5 4714 sprintf (fname1, "/proc/%ld/status", pid);
d5d6fca5 4715 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4716 {
7c8a8b04 4717 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4718
dba24537
AC
4719 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4720 puts_filtered (buffer);
7c8a8b04 4721 do_cleanups (cleanup);
dba24537
AC
4722 }
4723 else
8a3fe4f8 4724 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4725 }
4726 if (stat_f || all)
4727 {
89ecc4f5 4728 sprintf (fname1, "/proc/%ld/stat", pid);
d5d6fca5 4729 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4730 {
4731 int itmp;
4732 char ctmp;
a25694b4 4733 long ltmp;
7c8a8b04 4734 struct cleanup *cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4735
4736 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4737 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 4738 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 4739 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 4740 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 4741 printf_filtered (_("State: %c\n"), ctmp);
dba24537 4742 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4743 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 4744 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4745 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 4746 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4747 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 4748 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4749 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 4750 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4751 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
4752 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4753 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
4754 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4755 printf_filtered (_("Minor faults (no memory page): %lu\n"),
4756 (unsigned long) ltmp);
4757 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4758 printf_filtered (_("Minor faults, children: %lu\n"),
4759 (unsigned long) ltmp);
4760 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4761 printf_filtered (_("Major faults (memory page faults): %lu\n"),
4762 (unsigned long) ltmp);
4763 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4764 printf_filtered (_("Major faults, children: %lu\n"),
4765 (unsigned long) ltmp);
4766 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4767 printf_filtered (_("utime: %ld\n"), ltmp);
4768 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4769 printf_filtered (_("stime: %ld\n"), ltmp);
4770 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4771 printf_filtered (_("utime, children: %ld\n"), ltmp);
4772 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4773 printf_filtered (_("stime, children: %ld\n"), ltmp);
4774 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
4775 printf_filtered (_("jiffies remaining in current "
4776 "time slice: %ld\n"), ltmp);
a25694b4
AS
4777 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4778 printf_filtered (_("'nice' value: %ld\n"), ltmp);
4779 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4780 printf_filtered (_("jiffies until next timeout: %lu\n"),
4781 (unsigned long) ltmp);
4782 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4783 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
4784 (unsigned long) ltmp);
4785 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
4786 printf_filtered (_("start time (jiffies since "
4787 "system boot): %ld\n"), ltmp);
a25694b4
AS
4788 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4789 printf_filtered (_("Virtual memory size: %lu\n"),
4790 (unsigned long) ltmp);
4791 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3e43a32a
MS
4792 printf_filtered (_("Resident set size: %lu\n"),
4793 (unsigned long) ltmp);
a25694b4
AS
4794 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4795 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
4796 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4797 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
4798 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4799 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
4800 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4801 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3e43a32a
MS
4802#if 0 /* Don't know how architecture-dependent the rest is...
4803 Anyway the signal bitmap info is available from "status". */
1777feb0 4804 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 4805 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
1777feb0 4806 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4
AS
4807 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
4808 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4809 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
4810 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4811 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
4812 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4813 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
4814 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4815 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
1777feb0 4816 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 4817 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537 4818#endif
7c8a8b04 4819 do_cleanups (cleanup);
dba24537
AC
4820 }
4821 else
8a3fe4f8 4822 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4823 }
4824}
4825
10d6c8cd
DJ
4826/* Implement the to_xfer_partial interface for memory reads using the /proc
4827 filesystem. Because we can use a single read() call for /proc, this
4828 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4829 but it doesn't support writes. */
4830
4831static LONGEST
4832linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4833 const char *annex, gdb_byte *readbuf,
4834 const gdb_byte *writebuf,
4835 ULONGEST offset, LONGEST len)
dba24537 4836{
10d6c8cd
DJ
4837 LONGEST ret;
4838 int fd;
dba24537
AC
4839 char filename[64];
4840
10d6c8cd 4841 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4842 return 0;
4843
4844 /* Don't bother for one word. */
4845 if (len < 3 * sizeof (long))
4846 return 0;
4847
4848 /* We could keep this file open and cache it - possibly one per
4849 thread. That requires some juggling, but is even faster. */
4850 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4851 fd = open (filename, O_RDONLY | O_LARGEFILE);
4852 if (fd == -1)
4853 return 0;
4854
4855 /* If pread64 is available, use it. It's faster if the kernel
4856 supports it (only one syscall), and it's 64-bit safe even on
4857 32-bit platforms (for instance, SPARC debugging a SPARC64
4858 application). */
4859#ifdef HAVE_PREAD64
10d6c8cd 4860 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4861#else
10d6c8cd 4862 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4863#endif
4864 ret = 0;
4865 else
4866 ret = len;
4867
4868 close (fd);
4869 return ret;
4870}
4871
efcbbd14
UW
4872
4873/* Enumerate spufs IDs for process PID. */
4874static LONGEST
4875spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4876{
4877 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4878 LONGEST pos = 0;
4879 LONGEST written = 0;
4880 char path[128];
4881 DIR *dir;
4882 struct dirent *entry;
4883
4884 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4885 dir = opendir (path);
4886 if (!dir)
4887 return -1;
4888
4889 rewinddir (dir);
4890 while ((entry = readdir (dir)) != NULL)
4891 {
4892 struct stat st;
4893 struct statfs stfs;
4894 int fd;
4895
4896 fd = atoi (entry->d_name);
4897 if (!fd)
4898 continue;
4899
4900 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4901 if (stat (path, &st) != 0)
4902 continue;
4903 if (!S_ISDIR (st.st_mode))
4904 continue;
4905
4906 if (statfs (path, &stfs) != 0)
4907 continue;
4908 if (stfs.f_type != SPUFS_MAGIC)
4909 continue;
4910
4911 if (pos >= offset && pos + 4 <= offset + len)
4912 {
4913 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4914 written += 4;
4915 }
4916 pos += 4;
4917 }
4918
4919 closedir (dir);
4920 return written;
4921}
4922
4923/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4924 object type, using the /proc file system. */
4925static LONGEST
4926linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4927 const char *annex, gdb_byte *readbuf,
4928 const gdb_byte *writebuf,
4929 ULONGEST offset, LONGEST len)
4930{
4931 char buf[128];
4932 int fd = 0;
4933 int ret = -1;
4934 int pid = PIDGET (inferior_ptid);
4935
4936 if (!annex)
4937 {
4938 if (!readbuf)
4939 return -1;
4940 else
4941 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4942 }
4943
4944 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4945 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4946 if (fd <= 0)
4947 return -1;
4948
4949 if (offset != 0
4950 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4951 {
4952 close (fd);
4953 return 0;
4954 }
4955
4956 if (writebuf)
4957 ret = write (fd, writebuf, (size_t) len);
4958 else if (readbuf)
4959 ret = read (fd, readbuf, (size_t) len);
4960
4961 close (fd);
4962 return ret;
4963}
4964
4965
dba24537
AC
4966/* Parse LINE as a signal set and add its set bits to SIGS. */
4967
4968static void
4969add_line_to_sigset (const char *line, sigset_t *sigs)
4970{
4971 int len = strlen (line) - 1;
4972 const char *p;
4973 int signum;
4974
4975 if (line[len] != '\n')
8a3fe4f8 4976 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4977
4978 p = line;
4979 signum = len * 4;
4980 while (len-- > 0)
4981 {
4982 int digit;
4983
4984 if (*p >= '0' && *p <= '9')
4985 digit = *p - '0';
4986 else if (*p >= 'a' && *p <= 'f')
4987 digit = *p - 'a' + 10;
4988 else
8a3fe4f8 4989 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4990
4991 signum -= 4;
4992
4993 if (digit & 1)
4994 sigaddset (sigs, signum + 1);
4995 if (digit & 2)
4996 sigaddset (sigs, signum + 2);
4997 if (digit & 4)
4998 sigaddset (sigs, signum + 3);
4999 if (digit & 8)
5000 sigaddset (sigs, signum + 4);
5001
5002 p++;
5003 }
5004}
5005
5006/* Find process PID's pending signals from /proc/pid/status and set
5007 SIGS to match. */
5008
5009void
3e43a32a
MS
5010linux_proc_pending_signals (int pid, sigset_t *pending,
5011 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
5012{
5013 FILE *procfile;
5014 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
7c8a8b04 5015 struct cleanup *cleanup;
dba24537
AC
5016
5017 sigemptyset (pending);
5018 sigemptyset (blocked);
5019 sigemptyset (ignored);
5020 sprintf (fname, "/proc/%d/status", pid);
5021 procfile = fopen (fname, "r");
5022 if (procfile == NULL)
8a3fe4f8 5023 error (_("Could not open %s"), fname);
7c8a8b04 5024 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
5025
5026 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
5027 {
5028 /* Normal queued signals are on the SigPnd line in the status
5029 file. However, 2.6 kernels also have a "shared" pending
5030 queue for delivering signals to a thread group, so check for
5031 a ShdPnd line also.
5032
5033 Unfortunately some Red Hat kernels include the shared pending
5034 queue but not the ShdPnd status field. */
5035
5036 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
5037 add_line_to_sigset (buffer + 8, pending);
5038 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
5039 add_line_to_sigset (buffer + 8, pending);
5040 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
5041 add_line_to_sigset (buffer + 8, blocked);
5042 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
5043 add_line_to_sigset (buffer + 8, ignored);
5044 }
5045
7c8a8b04 5046 do_cleanups (cleanup);
dba24537
AC
5047}
5048
07e059b5
VP
5049static LONGEST
5050linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e
MS
5051 const char *annex, gdb_byte *readbuf,
5052 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
07e059b5
VP
5053{
5054 /* We make the process list snapshot when the object starts to be
5055 read. */
5056 static const char *buf;
5057 static LONGEST len_avail = -1;
5058 static struct obstack obstack;
5059
5060 DIR *dirp;
5061
5062 gdb_assert (object == TARGET_OBJECT_OSDATA);
5063
a61408f8
SS
5064 if (!annex)
5065 {
5066 if (offset == 0)
5067 {
5068 if (len_avail != -1 && len_avail != 0)
5069 obstack_free (&obstack, NULL);
5070 len_avail = 0;
5071 buf = NULL;
5072 obstack_init (&obstack);
5073 obstack_grow_str (&obstack, "<osdata type=\"types\">\n");
5074
3e43a32a 5075 obstack_xml_printf (&obstack,
a61408f8
SS
5076 "<item>"
5077 "<column name=\"Type\">processes</column>"
3e43a32a
MS
5078 "<column name=\"Description\">"
5079 "Listing of all processes</column>"
a61408f8
SS
5080 "</item>");
5081
5082 obstack_grow_str0 (&obstack, "</osdata>\n");
5083 buf = obstack_finish (&obstack);
5084 len_avail = strlen (buf);
5085 }
5086
5087 if (offset >= len_avail)
5088 {
5089 /* Done. Get rid of the obstack. */
5090 obstack_free (&obstack, NULL);
5091 buf = NULL;
5092 len_avail = 0;
5093 return 0;
5094 }
5095
5096 if (len > len_avail - offset)
5097 len = len_avail - offset;
5098 memcpy (readbuf, buf + offset, len);
5099
5100 return len;
5101 }
5102
07e059b5
VP
5103 if (strcmp (annex, "processes") != 0)
5104 return 0;
5105
5106 gdb_assert (readbuf && !writebuf);
5107
5108 if (offset == 0)
5109 {
5110 if (len_avail != -1 && len_avail != 0)
e0881a8e 5111 obstack_free (&obstack, NULL);
07e059b5
VP
5112 len_avail = 0;
5113 buf = NULL;
5114 obstack_init (&obstack);
5115 obstack_grow_str (&obstack, "<osdata type=\"processes\">\n");
5116
5117 dirp = opendir ("/proc");
5118 if (dirp)
e0881a8e
MS
5119 {
5120 struct dirent *dp;
5121
5122 while ((dp = readdir (dirp)) != NULL)
5123 {
5124 struct stat statbuf;
5125 char procentry[sizeof ("/proc/4294967295")];
5126
5127 if (!isdigit (dp->d_name[0])
5128 || NAMELEN (dp) > sizeof ("4294967295") - 1)
5129 continue;
5130
5131 sprintf (procentry, "/proc/%s", dp->d_name);
5132 if (stat (procentry, &statbuf) == 0
5133 && S_ISDIR (statbuf.st_mode))
5134 {
5135 char *pathname;
5136 FILE *f;
5137 char cmd[MAXPATHLEN + 1];
5138 struct passwd *entry;
5139
5140 pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
5141 entry = getpwuid (statbuf.st_uid);
5142
5143 if ((f = fopen (pathname, "r")) != NULL)
5144 {
5eee517d 5145 size_t length = fread (cmd, 1, sizeof (cmd) - 1, f);
e0881a8e 5146
5eee517d 5147 if (length > 0)
e0881a8e
MS
5148 {
5149 int i;
5150
5eee517d 5151 for (i = 0; i < length; i++)
e0881a8e
MS
5152 if (cmd[i] == '\0')
5153 cmd[i] = ' ';
5eee517d 5154 cmd[length] = '\0';
e0881a8e
MS
5155
5156 obstack_xml_printf (
5157 &obstack,
5158 "<item>"
5159 "<column name=\"pid\">%s</column>"
5160 "<column name=\"user\">%s</column>"
5161 "<column name=\"command\">%s</column>"
5162 "</item>",
5163 dp->d_name,
5164 entry ? entry->pw_name : "?",
5165 cmd);
5166 }
5167 fclose (f);
5168 }
5169
5170 xfree (pathname);
5171 }
5172 }
5173
5174 closedir (dirp);
5175 }
07e059b5
VP
5176
5177 obstack_grow_str0 (&obstack, "</osdata>\n");
5178 buf = obstack_finish (&obstack);
5179 len_avail = strlen (buf);
5180 }
5181
5182 if (offset >= len_avail)
5183 {
5184 /* Done. Get rid of the obstack. */
5185 obstack_free (&obstack, NULL);
5186 buf = NULL;
5187 len_avail = 0;
5188 return 0;
5189 }
5190
5191 if (len > len_avail - offset)
5192 len = len_avail - offset;
5193 memcpy (readbuf, buf + offset, len);
5194
5195 return len;
5196}
5197
10d6c8cd
DJ
5198static LONGEST
5199linux_xfer_partial (struct target_ops *ops, enum target_object object,
5200 const char *annex, gdb_byte *readbuf,
5201 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5202{
5203 LONGEST xfer;
5204
5205 if (object == TARGET_OBJECT_AUXV)
9f2982ff 5206 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
10d6c8cd
DJ
5207 offset, len);
5208
07e059b5
VP
5209 if (object == TARGET_OBJECT_OSDATA)
5210 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5211 offset, len);
5212
efcbbd14
UW
5213 if (object == TARGET_OBJECT_SPU)
5214 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5215 offset, len);
5216
8f313923
JK
5217 /* GDB calculates all the addresses in possibly larget width of the address.
5218 Address width needs to be masked before its final use - either by
5219 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5220
5221 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
5222
5223 if (object == TARGET_OBJECT_MEMORY)
5224 {
5225 int addr_bit = gdbarch_addr_bit (target_gdbarch);
5226
5227 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5228 offset &= ((ULONGEST) 1 << addr_bit) - 1;
5229 }
5230
10d6c8cd
DJ
5231 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5232 offset, len);
5233 if (xfer != 0)
5234 return xfer;
5235
5236 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5237 offset, len);
5238}
5239
e9efe249 5240/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
5241 it with local methods. */
5242
910122bf
UW
5243static void
5244linux_target_install_ops (struct target_ops *t)
10d6c8cd 5245{
6d8fd2b7
UW
5246 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
5247 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
5248 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
a96d9b2e 5249 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 5250 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 5251 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
5252 t->to_post_attach = linux_child_post_attach;
5253 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
5254 t->to_find_memory_regions = linux_nat_find_memory_regions;
5255 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5256
5257 super_xfer_partial = t->to_xfer_partial;
5258 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
5259}
5260
5261struct target_ops *
5262linux_target (void)
5263{
5264 struct target_ops *t;
5265
5266 t = inf_ptrace_target ();
5267 linux_target_install_ops (t);
5268
5269 return t;
5270}
5271
5272struct target_ops *
7714d83a 5273linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
5274{
5275 struct target_ops *t;
5276
5277 t = inf_ptrace_trad_target (register_u_offset);
5278 linux_target_install_ops (t);
10d6c8cd 5279
10d6c8cd
DJ
5280 return t;
5281}
5282
b84876c2
PA
5283/* target_is_async_p implementation. */
5284
5285static int
5286linux_nat_is_async_p (void)
5287{
5288 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5289 it explicitly with the "set target-async" command.
b84876c2 5290 Someday, linux will always be async. */
c6ebd6cf 5291 if (!target_async_permitted)
b84876c2
PA
5292 return 0;
5293
d90e17a7
PA
5294 /* See target.h/target_async_mask. */
5295 return linux_nat_async_mask_value;
b84876c2
PA
5296}
5297
5298/* target_can_async_p implementation. */
5299
5300static int
5301linux_nat_can_async_p (void)
5302{
5303 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5304 it explicitly with the "set target-async" command.
b84876c2 5305 Someday, linux will always be async. */
c6ebd6cf 5306 if (!target_async_permitted)
b84876c2
PA
5307 return 0;
5308
5309 /* See target.h/target_async_mask. */
5310 return linux_nat_async_mask_value;
5311}
5312
9908b566
VP
5313static int
5314linux_nat_supports_non_stop (void)
5315{
5316 return 1;
5317}
5318
d90e17a7
PA
5319/* True if we want to support multi-process. To be removed when GDB
5320 supports multi-exec. */
5321
2277426b 5322int linux_multi_process = 1;
d90e17a7
PA
5323
5324static int
5325linux_nat_supports_multi_process (void)
5326{
5327 return linux_multi_process;
5328}
5329
b84876c2
PA
5330/* target_async_mask implementation. */
5331
5332static int
7feb7d06 5333linux_nat_async_mask (int new_mask)
b84876c2 5334{
7feb7d06 5335 int curr_mask = linux_nat_async_mask_value;
b84876c2 5336
7feb7d06 5337 if (curr_mask != new_mask)
b84876c2 5338 {
7feb7d06 5339 if (new_mask == 0)
b84876c2
PA
5340 {
5341 linux_nat_async (NULL, 0);
7feb7d06 5342 linux_nat_async_mask_value = new_mask;
b84876c2
PA
5343 }
5344 else
5345 {
7feb7d06 5346 linux_nat_async_mask_value = new_mask;
84e46146 5347
7feb7d06
PA
5348 /* If we're going out of async-mask in all-stop, then the
5349 inferior is stopped. The next resume will call
5350 target_async. In non-stop, the target event source
5351 should be always registered in the event loop. Do so
5352 now. */
5353 if (non_stop)
5354 linux_nat_async (inferior_event_handler, 0);
b84876c2
PA
5355 }
5356 }
5357
7feb7d06 5358 return curr_mask;
b84876c2
PA
5359}
5360
5361static int async_terminal_is_ours = 1;
5362
5363/* target_terminal_inferior implementation. */
5364
5365static void
5366linux_nat_terminal_inferior (void)
5367{
5368 if (!target_is_async_p ())
5369 {
5370 /* Async mode is disabled. */
5371 terminal_inferior ();
5372 return;
5373 }
5374
b84876c2
PA
5375 terminal_inferior ();
5376
d9d2d8b6 5377 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
5378 if (!async_terminal_is_ours)
5379 return;
5380
5381 delete_file_handler (input_fd);
5382 async_terminal_is_ours = 0;
5383 set_sigint_trap ();
5384}
5385
5386/* target_terminal_ours implementation. */
5387
2c0b251b 5388static void
b84876c2
PA
5389linux_nat_terminal_ours (void)
5390{
5391 if (!target_is_async_p ())
5392 {
5393 /* Async mode is disabled. */
5394 terminal_ours ();
5395 return;
5396 }
5397
5398 /* GDB should never give the terminal to the inferior if the
5399 inferior is running in the background (run&, continue&, etc.),
5400 but claiming it sure should. */
5401 terminal_ours ();
5402
b84876c2
PA
5403 if (async_terminal_is_ours)
5404 return;
5405
5406 clear_sigint_trap ();
5407 add_file_handler (input_fd, stdin_event_handler, 0);
5408 async_terminal_is_ours = 1;
5409}
5410
5411static void (*async_client_callback) (enum inferior_event_type event_type,
5412 void *context);
5413static void *async_client_context;
5414
7feb7d06
PA
5415/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5416 so we notice when any child changes state, and notify the
5417 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5418 above to wait for the arrival of a SIGCHLD. */
5419
b84876c2 5420static void
7feb7d06 5421sigchld_handler (int signo)
b84876c2 5422{
7feb7d06
PA
5423 int old_errno = errno;
5424
5425 if (debug_linux_nat_async)
5426 fprintf_unfiltered (gdb_stdlog, "sigchld\n");
5427
5428 if (signo == SIGCHLD
5429 && linux_nat_event_pipe[0] != -1)
5430 async_file_mark (); /* Let the event loop know that there are
5431 events to handle. */
5432
5433 errno = old_errno;
5434}
5435
5436/* Callback registered with the target events file descriptor. */
5437
5438static void
5439handle_target_event (int error, gdb_client_data client_data)
5440{
5441 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5442}
5443
5444/* Create/destroy the target events pipe. Returns previous state. */
5445
5446static int
5447linux_async_pipe (int enable)
5448{
5449 int previous = (linux_nat_event_pipe[0] != -1);
5450
5451 if (previous != enable)
5452 {
5453 sigset_t prev_mask;
5454
5455 block_child_signals (&prev_mask);
5456
5457 if (enable)
5458 {
5459 if (pipe (linux_nat_event_pipe) == -1)
5460 internal_error (__FILE__, __LINE__,
5461 "creating event pipe failed.");
5462
5463 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5464 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5465 }
5466 else
5467 {
5468 close (linux_nat_event_pipe[0]);
5469 close (linux_nat_event_pipe[1]);
5470 linux_nat_event_pipe[0] = -1;
5471 linux_nat_event_pipe[1] = -1;
5472 }
5473
5474 restore_child_signals_mask (&prev_mask);
5475 }
5476
5477 return previous;
b84876c2
PA
5478}
5479
5480/* target_async implementation. */
5481
5482static void
5483linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5484 void *context), void *context)
5485{
c6ebd6cf 5486 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
b84876c2
PA
5487 internal_error (__FILE__, __LINE__,
5488 "Calling target_async when async is masked");
5489
5490 if (callback != NULL)
5491 {
5492 async_client_callback = callback;
5493 async_client_context = context;
7feb7d06
PA
5494 if (!linux_async_pipe (1))
5495 {
5496 add_file_handler (linux_nat_event_pipe[0],
5497 handle_target_event, NULL);
5498 /* There may be pending events to handle. Tell the event loop
5499 to poll them. */
5500 async_file_mark ();
5501 }
b84876c2
PA
5502 }
5503 else
5504 {
5505 async_client_callback = callback;
5506 async_client_context = context;
b84876c2 5507 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 5508 linux_async_pipe (0);
b84876c2
PA
5509 }
5510 return;
5511}
5512
252fbfc8
PA
5513/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5514 event came out. */
5515
4c28f408 5516static int
252fbfc8 5517linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 5518{
d90e17a7 5519 if (!lwp->stopped)
252fbfc8 5520 {
d90e17a7 5521 ptid_t ptid = lwp->ptid;
252fbfc8 5522
d90e17a7
PA
5523 if (debug_linux_nat)
5524 fprintf_unfiltered (gdb_stdlog,
5525 "LNSL: running -> suspending %s\n",
5526 target_pid_to_str (lwp->ptid));
252fbfc8 5527
252fbfc8 5528
d90e17a7
PA
5529 stop_callback (lwp, NULL);
5530 stop_wait_callback (lwp, NULL);
252fbfc8 5531
d90e17a7
PA
5532 /* If the lwp exits while we try to stop it, there's nothing
5533 else to do. */
5534 lwp = find_lwp_pid (ptid);
5535 if (lwp == NULL)
5536 return 0;
252fbfc8 5537
d90e17a7
PA
5538 /* If we didn't collect any signal other than SIGSTOP while
5539 stopping the LWP, push a SIGNAL_0 event. In either case, the
5540 event-loop will end up calling target_wait which will collect
5541 these. */
5542 if (lwp->status == 0)
5543 lwp->status = W_STOPCODE (0);
5544 async_file_mark ();
5545 }
5546 else
5547 {
5548 /* Already known to be stopped; do nothing. */
252fbfc8 5549
d90e17a7
PA
5550 if (debug_linux_nat)
5551 {
e09875d4 5552 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
5553 fprintf_unfiltered (gdb_stdlog,
5554 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5555 target_pid_to_str (lwp->ptid));
5556 else
3e43a32a
MS
5557 fprintf_unfiltered (gdb_stdlog,
5558 "LNSL: already stopped/no "
5559 "stop_requested yet %s\n",
d90e17a7 5560 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5561 }
5562 }
4c28f408
PA
5563 return 0;
5564}
5565
5566static void
5567linux_nat_stop (ptid_t ptid)
5568{
5569 if (non_stop)
d90e17a7 5570 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5571 else
5572 linux_ops->to_stop (ptid);
5573}
5574
d90e17a7
PA
5575static void
5576linux_nat_close (int quitting)
5577{
5578 /* Unregister from the event loop. */
5579 if (target_is_async_p ())
5580 target_async (NULL, 0);
5581
5582 /* Reset the async_masking. */
5583 linux_nat_async_mask_value = 1;
5584
5585 if (linux_ops->to_close)
5586 linux_ops->to_close (quitting);
5587}
5588
c0694254
PA
5589/* When requests are passed down from the linux-nat layer to the
5590 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5591 used. The address space pointer is stored in the inferior object,
5592 but the common code that is passed such ptid can't tell whether
5593 lwpid is a "main" process id or not (it assumes so). We reverse
5594 look up the "main" process id from the lwp here. */
5595
5596struct address_space *
5597linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5598{
5599 struct lwp_info *lwp;
5600 struct inferior *inf;
5601 int pid;
5602
5603 pid = GET_LWP (ptid);
5604 if (GET_LWP (ptid) == 0)
5605 {
5606 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5607 tgid. */
5608 lwp = find_lwp_pid (ptid);
5609 pid = GET_PID (lwp->ptid);
5610 }
5611 else
5612 {
5613 /* A (pid,lwpid,0) ptid. */
5614 pid = GET_PID (ptid);
5615 }
5616
5617 inf = find_inferior_pid (pid);
5618 gdb_assert (inf != NULL);
5619 return inf->aspace;
5620}
5621
dc146f7c
VP
5622int
5623linux_nat_core_of_thread_1 (ptid_t ptid)
5624{
5625 struct cleanup *back_to;
5626 char *filename;
5627 FILE *f;
5628 char *content = NULL;
5629 char *p;
5630 char *ts = 0;
5631 int content_read = 0;
5632 int i;
5633 int core;
5634
5635 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5636 GET_PID (ptid), GET_LWP (ptid));
5637 back_to = make_cleanup (xfree, filename);
5638
5639 f = fopen (filename, "r");
5640 if (!f)
5641 {
5642 do_cleanups (back_to);
5643 return -1;
5644 }
5645
5646 make_cleanup_fclose (f);
5647
5648 for (;;)
5649 {
5650 int n;
e0881a8e 5651
dc146f7c
VP
5652 content = xrealloc (content, content_read + 1024);
5653 n = fread (content + content_read, 1, 1024, f);
5654 content_read += n;
5655 if (n < 1024)
5656 {
5657 content[content_read] = '\0';
5658 break;
5659 }
5660 }
5661
5662 make_cleanup (xfree, content);
5663
5664 p = strchr (content, '(');
ca2a87a0
JK
5665
5666 /* Skip ")". */
5667 if (p != NULL)
5668 p = strchr (p, ')');
5669 if (p != NULL)
5670 p++;
dc146f7c
VP
5671
5672 /* If the first field after program name has index 0, then core number is
5673 the field with index 36. There's no constant for that anywhere. */
ca2a87a0
JK
5674 if (p != NULL)
5675 p = strtok_r (p, " ", &ts);
5676 for (i = 0; p != NULL && i != 36; ++i)
dc146f7c
VP
5677 p = strtok_r (NULL, " ", &ts);
5678
ca2a87a0 5679 if (p == NULL || sscanf (p, "%d", &core) == 0)
dc146f7c
VP
5680 core = -1;
5681
5682 do_cleanups (back_to);
5683
5684 return core;
5685}
5686
5687/* Return the cached value of the processor core for thread PTID. */
5688
5689int
5690linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5691{
5692 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 5693
dc146f7c
VP
5694 if (info)
5695 return info->core;
5696 return -1;
5697}
5698
f973ed9c
DJ
5699void
5700linux_nat_add_target (struct target_ops *t)
5701{
f973ed9c
DJ
5702 /* Save the provided single-threaded target. We save this in a separate
5703 variable because another target we've inherited from (e.g. inf-ptrace)
5704 may have saved a pointer to T; we want to use it for the final
5705 process stratum target. */
5706 linux_ops_saved = *t;
5707 linux_ops = &linux_ops_saved;
5708
5709 /* Override some methods for multithreading. */
b84876c2 5710 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5711 t->to_attach = linux_nat_attach;
5712 t->to_detach = linux_nat_detach;
5713 t->to_resume = linux_nat_resume;
5714 t->to_wait = linux_nat_wait;
5715 t->to_xfer_partial = linux_nat_xfer_partial;
5716 t->to_kill = linux_nat_kill;
5717 t->to_mourn_inferior = linux_nat_mourn_inferior;
5718 t->to_thread_alive = linux_nat_thread_alive;
5719 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 5720 t->to_thread_name = linux_nat_thread_name;
f973ed9c 5721 t->to_has_thread_control = tc_schedlock;
c0694254 5722 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
5723 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5724 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 5725
b84876c2
PA
5726 t->to_can_async_p = linux_nat_can_async_p;
5727 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5728 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2
PA
5729 t->to_async = linux_nat_async;
5730 t->to_async_mask = linux_nat_async_mask;
5731 t->to_terminal_inferior = linux_nat_terminal_inferior;
5732 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5733 t->to_close = linux_nat_close;
b84876c2 5734
4c28f408
PA
5735 /* Methods for non-stop support. */
5736 t->to_stop = linux_nat_stop;
5737
d90e17a7
PA
5738 t->to_supports_multi_process = linux_nat_supports_multi_process;
5739
dc146f7c
VP
5740 t->to_core_of_thread = linux_nat_core_of_thread;
5741
f973ed9c
DJ
5742 /* We don't change the stratum; this target will sit at
5743 process_stratum and thread_db will set at thread_stratum. This
5744 is a little strange, since this is a multi-threaded-capable
5745 target, but we want to be on the stack below thread_db, and we
5746 also want to be used for single-threaded processes. */
5747
5748 add_target (t);
f973ed9c
DJ
5749}
5750
9f0bdab8
DJ
5751/* Register a method to call whenever a new thread is attached. */
5752void
5753linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
5754{
5755 /* Save the pointer. We only support a single registered instance
5756 of the GNU/Linux native target, so we do not need to map this to
5757 T. */
5758 linux_nat_new_thread = new_thread;
5759}
5760
5b009018
PA
5761/* Register a method that converts a siginfo object between the layout
5762 that ptrace returns, and the layout in the architecture of the
5763 inferior. */
5764void
5765linux_nat_set_siginfo_fixup (struct target_ops *t,
5766 int (*siginfo_fixup) (struct siginfo *,
5767 gdb_byte *,
5768 int))
5769{
5770 /* Save the pointer. */
5771 linux_nat_siginfo_fixup = siginfo_fixup;
5772}
5773
9f0bdab8
DJ
5774/* Return the saved siginfo associated with PTID. */
5775struct siginfo *
5776linux_nat_get_siginfo (ptid_t ptid)
5777{
5778 struct lwp_info *lp = find_lwp_pid (ptid);
5779
5780 gdb_assert (lp != NULL);
5781
5782 return &lp->siginfo;
5783}
5784
2c0b251b
PA
5785/* Provide a prototype to silence -Wmissing-prototypes. */
5786extern initialize_file_ftype _initialize_linux_nat;
5787
d6b0e80f
AC
5788void
5789_initialize_linux_nat (void)
5790{
1bedd215
AC
5791 add_info ("proc", linux_nat_info_proc_cmd, _("\
5792Show /proc process information about any running process.\n\
dba24537
AC
5793Specify any process id, or use the program being debugged by default.\n\
5794Specify any of the following keywords for detailed info:\n\
5795 mappings -- list of mapped memory regions.\n\
5796 stat -- list a bunch of random process info.\n\
5797 status -- list a different bunch of random process info.\n\
1bedd215 5798 all -- list all available /proc info."));
d6b0e80f 5799
b84876c2
PA
5800 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5801 &debug_linux_nat, _("\
5802Set debugging of GNU/Linux lwp module."), _("\
5803Show debugging of GNU/Linux lwp module."), _("\
5804Enables printf debugging output."),
5805 NULL,
5806 show_debug_linux_nat,
5807 &setdebuglist, &showdebuglist);
5808
5809 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
5810 &debug_linux_nat_async, _("\
5811Set debugging of GNU/Linux async lwp module."), _("\
5812Show debugging of GNU/Linux async lwp module."), _("\
5813Enables printf debugging output."),
5814 NULL,
5815 show_debug_linux_nat_async,
5816 &setdebuglist, &showdebuglist);
5817
b84876c2 5818 /* Save this mask as the default. */
d6b0e80f
AC
5819 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5820
7feb7d06
PA
5821 /* Install a SIGCHLD handler. */
5822 sigchld_action.sa_handler = sigchld_handler;
5823 sigemptyset (&sigchld_action.sa_mask);
5824 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5825
5826 /* Make it the default. */
7feb7d06 5827 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5828
5829 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5830 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5831 sigdelset (&suspend_mask, SIGCHLD);
5832
7feb7d06 5833 sigemptyset (&blocked_mask);
10568435
JK
5834
5835 add_setshow_boolean_cmd ("disable-randomization", class_support,
5836 &disable_randomization, _("\
5837Set disabling of debuggee's virtual address space randomization."), _("\
5838Show disabling of debuggee's virtual address space randomization."), _("\
5839When this mode is on (which is the default), randomization of the virtual\n\
5840address space is disabled. Standalone programs run with the randomization\n\
5841enabled by default on some platforms."),
5842 &set_disable_randomization,
5843 &show_disable_randomization,
5844 &setlist, &showlist);
d6b0e80f
AC
5845}
5846\f
5847
5848/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5849 the GNU/Linux Threads library and therefore doesn't really belong
5850 here. */
5851
5852/* Read variable NAME in the target and return its value if found.
5853 Otherwise return zero. It is assumed that the type of the variable
5854 is `int'. */
5855
5856static int
5857get_signo (const char *name)
5858{
5859 struct minimal_symbol *ms;
5860 int signo;
5861
5862 ms = lookup_minimal_symbol (name, NULL, NULL);
5863 if (ms == NULL)
5864 return 0;
5865
8e70166d 5866 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5867 sizeof (signo)) != 0)
5868 return 0;
5869
5870 return signo;
5871}
5872
5873/* Return the set of signals used by the threads library in *SET. */
5874
5875void
5876lin_thread_get_thread_signals (sigset_t *set)
5877{
5878 struct sigaction action;
5879 int restart, cancel;
5880
b84876c2 5881 sigemptyset (&blocked_mask);
d6b0e80f
AC
5882 sigemptyset (set);
5883
5884 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5885 cancel = get_signo ("__pthread_sig_cancel");
5886
5887 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5888 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5889 not provide any way for the debugger to query the signal numbers -
5890 fortunately they don't change! */
5891
d6b0e80f 5892 if (restart == 0)
17fbb0bd 5893 restart = __SIGRTMIN;
d6b0e80f 5894
d6b0e80f 5895 if (cancel == 0)
17fbb0bd 5896 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5897
5898 sigaddset (set, restart);
5899 sigaddset (set, cancel);
5900
5901 /* The GNU/Linux Threads library makes terminating threads send a
5902 special "cancel" signal instead of SIGCHLD. Make sure we catch
5903 those (to prevent them from terminating GDB itself, which is
5904 likely to be their default action) and treat them the same way as
5905 SIGCHLD. */
5906
5907 action.sa_handler = sigchld_handler;
5908 sigemptyset (&action.sa_mask);
58aecb61 5909 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5910 sigaction (cancel, &action, NULL);
5911
5912 /* We block the "cancel" signal throughout this code ... */
5913 sigaddset (&blocked_mask, cancel);
5914 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5915
5916 /* ... except during a sigsuspend. */
5917 sigdelset (&suspend_mask, cancel);
5918}
This page took 1.447716 seconds and 4 git commands to generate.