sim: allow memory maps to default to mapped files
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
7b6bb8da
JB
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f
AC
34#include "gdbthread.h"
35#include "gdbcmd.h"
36#include "regcache.h"
4f844a66 37#include "regset.h"
10d6c8cd
DJ
38#include "inf-ptrace.h"
39#include "auxv.h"
dba24537 40#include <sys/param.h> /* for MAXPATHLEN */
1777feb0 41#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
42#include "elf-bfd.h" /* for elfcore_write_* */
43#include "gregset.h" /* for gregset */
44#include "gdbcore.h" /* for get_exec_file */
45#include <ctype.h> /* for isdigit */
1777feb0 46#include "gdbthread.h" /* for struct thread_info etc. */
dba24537
AC
47#include "gdb_stat.h" /* for struct stat */
48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
54#include "gdb_dirent.h"
55#include "xml-support.h"
191c4426 56#include "terminal.h"
efcbbd14 57#include <sys/vfs.h>
6c95b8df 58#include "solib.h"
efcbbd14
UW
59
60#ifndef SPUFS_MAGIC
61#define SPUFS_MAGIC 0x23c9b64e
62#endif
dba24537 63
10568435
JK
64#ifdef HAVE_PERSONALITY
65# include <sys/personality.h>
66# if !HAVE_DECL_ADDR_NO_RANDOMIZE
67# define ADDR_NO_RANDOMIZE 0x0040000
68# endif
69#endif /* HAVE_PERSONALITY */
70
1777feb0 71/* This comment documents high-level logic of this file.
8a77dff3
VP
72
73Waiting for events in sync mode
74===============================
75
76When waiting for an event in a specific thread, we just use waitpid, passing
77the specific pid, and not passing WNOHANG.
78
1777feb0 79When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 80version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 81threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
82miss an event. The solution is to use non-blocking waitpid, together with
83sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 84process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
85flag to check for events in cloned processes. If nothing is found, we use
86sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
87happened to a child process -- and SIGCHLD will be delivered both for events
88in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
89an event, we get back to calling nonblocking waitpid with and without
90__WCLONED.
8a77dff3
VP
91
92Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 93so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
94blocked, the signal becomes pending and sigsuspend immediately
95notices it and returns.
96
97Waiting for events in async mode
98================================
99
7feb7d06
PA
100In async mode, GDB should always be ready to handle both user input
101and target events, so neither blocking waitpid nor sigsuspend are
102viable options. Instead, we should asynchronously notify the GDB main
103event loop whenever there's an unprocessed event from the target. We
104detect asynchronous target events by handling SIGCHLD signals. To
105notify the event loop about target events, the self-pipe trick is used
106--- a pipe is registered as waitable event source in the event loop,
107the event loop select/poll's on the read end of this pipe (as well on
108other event sources, e.g., stdin), and the SIGCHLD handler writes a
109byte to this pipe. This is more portable than relying on
110pselect/ppoll, since on kernels that lack those syscalls, libc
111emulates them with select/poll+sigprocmask, and that is racy
112(a.k.a. plain broken).
113
114Obviously, if we fail to notify the event loop if there's a target
115event, it's bad. OTOH, if we notify the event loop when there's no
116event from the target, linux_nat_wait will detect that there's no real
117event to report, and return event of type TARGET_WAITKIND_IGNORE.
118This is mostly harmless, but it will waste time and is better avoided.
119
120The main design point is that every time GDB is outside linux-nat.c,
121we have a SIGCHLD handler installed that is called when something
122happens to the target and notifies the GDB event loop. Whenever GDB
123core decides to handle the event, and calls into linux-nat.c, we
124process things as in sync mode, except that the we never block in
125sigsuspend.
126
127While processing an event, we may end up momentarily blocked in
128waitpid calls. Those waitpid calls, while blocking, are guarantied to
129return quickly. E.g., in all-stop mode, before reporting to the core
130that an LWP hit a breakpoint, all LWPs are stopped by sending them
131SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
132Note that this is different from blocking indefinitely waiting for the
133next event --- here, we're already handling an event.
8a77dff3
VP
134
135Use of signals
136==============
137
138We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
139signal is not entirely significant; we just need for a signal to be delivered,
140so that we can intercept it. SIGSTOP's advantage is that it can not be
141blocked. A disadvantage is that it is not a real-time signal, so it can only
142be queued once; we do not keep track of other sources of SIGSTOP.
143
144Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
145use them, because they have special behavior when the signal is generated -
146not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
147kills the entire thread group.
148
149A delivered SIGSTOP would stop the entire thread group, not just the thread we
150tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
151cancel it (by PTRACE_CONT without passing SIGSTOP).
152
153We could use a real-time signal instead. This would solve those problems; we
154could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
155But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
156generates it, and there are races with trying to find a signal that is not
157blocked. */
a0ef4274 158
dba24537
AC
159#ifndef O_LARGEFILE
160#define O_LARGEFILE 0
161#endif
0274a8ce 162
3993f6b1
DJ
163/* If the system headers did not provide the constants, hard-code the normal
164 values. */
165#ifndef PTRACE_EVENT_FORK
166
167#define PTRACE_SETOPTIONS 0x4200
168#define PTRACE_GETEVENTMSG 0x4201
169
1777feb0 170/* Options set using PTRACE_SETOPTIONS. */
3993f6b1
DJ
171#define PTRACE_O_TRACESYSGOOD 0x00000001
172#define PTRACE_O_TRACEFORK 0x00000002
173#define PTRACE_O_TRACEVFORK 0x00000004
174#define PTRACE_O_TRACECLONE 0x00000008
175#define PTRACE_O_TRACEEXEC 0x00000010
9016a515
DJ
176#define PTRACE_O_TRACEVFORKDONE 0x00000020
177#define PTRACE_O_TRACEEXIT 0x00000040
3993f6b1
DJ
178
179/* Wait extended result codes for the above trace options. */
180#define PTRACE_EVENT_FORK 1
181#define PTRACE_EVENT_VFORK 2
182#define PTRACE_EVENT_CLONE 3
183#define PTRACE_EVENT_EXEC 4
c874c7fc 184#define PTRACE_EVENT_VFORK_DONE 5
9016a515 185#define PTRACE_EVENT_EXIT 6
3993f6b1
DJ
186
187#endif /* PTRACE_EVENT_FORK */
188
ca2163eb
PA
189/* Unlike other extended result codes, WSTOPSIG (status) on
190 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
191 instead SIGTRAP with bit 7 set. */
192#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
193
3993f6b1
DJ
194/* We can't always assume that this flag is available, but all systems
195 with the ptrace event handlers also have __WALL, so it's safe to use
196 here. */
197#ifndef __WALL
198#define __WALL 0x40000000 /* Wait for any child. */
199#endif
200
02d3ff8c 201#ifndef PTRACE_GETSIGINFO
1ef18d08
PA
202# define PTRACE_GETSIGINFO 0x4202
203# define PTRACE_SETSIGINFO 0x4203
02d3ff8c
UW
204#endif
205
10d6c8cd
DJ
206/* The single-threaded native GNU/Linux target_ops. We save a pointer for
207 the use of the multi-threaded target. */
208static struct target_ops *linux_ops;
f973ed9c 209static struct target_ops linux_ops_saved;
10d6c8cd 210
9f0bdab8
DJ
211/* The method to call, if any, when a new thread is attached. */
212static void (*linux_nat_new_thread) (ptid_t);
213
5b009018
PA
214/* The method to call, if any, when the siginfo object needs to be
215 converted between the layout returned by ptrace, and the layout in
216 the architecture of the inferior. */
217static int (*linux_nat_siginfo_fixup) (struct siginfo *,
218 gdb_byte *,
219 int);
220
ac264b3b
MS
221/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
222 Called by our to_xfer_partial. */
223static LONGEST (*super_xfer_partial) (struct target_ops *,
224 enum target_object,
225 const char *, gdb_byte *,
226 const gdb_byte *,
10d6c8cd
DJ
227 ULONGEST, LONGEST);
228
d6b0e80f 229static int debug_linux_nat;
920d2a44
AC
230static void
231show_debug_linux_nat (struct ui_file *file, int from_tty,
232 struct cmd_list_element *c, const char *value)
233{
234 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
235 value);
236}
d6b0e80f 237
b84876c2
PA
238static int debug_linux_nat_async = 0;
239static void
240show_debug_linux_nat_async (struct ui_file *file, int from_tty,
241 struct cmd_list_element *c, const char *value)
242{
3e43a32a
MS
243 fprintf_filtered (file,
244 _("Debugging of GNU/Linux async lwp module is %s.\n"),
b84876c2
PA
245 value);
246}
247
10568435
JK
248static int disable_randomization = 1;
249
250static void
251show_disable_randomization (struct ui_file *file, int from_tty,
252 struct cmd_list_element *c, const char *value)
253{
254#ifdef HAVE_PERSONALITY
3e43a32a
MS
255 fprintf_filtered (file,
256 _("Disabling randomization of debuggee's "
257 "virtual address space is %s.\n"),
10568435
JK
258 value);
259#else /* !HAVE_PERSONALITY */
3e43a32a
MS
260 fputs_filtered (_("Disabling randomization of debuggee's "
261 "virtual address space is unsupported on\n"
262 "this platform.\n"), file);
10568435
JK
263#endif /* !HAVE_PERSONALITY */
264}
265
266static void
3e43a32a
MS
267set_disable_randomization (char *args, int from_tty,
268 struct cmd_list_element *c)
10568435
JK
269{
270#ifndef HAVE_PERSONALITY
3e43a32a
MS
271 error (_("Disabling randomization of debuggee's "
272 "virtual address space is unsupported on\n"
273 "this platform."));
10568435
JK
274#endif /* !HAVE_PERSONALITY */
275}
276
ae087d01
DJ
277struct simple_pid_list
278{
279 int pid;
3d799a95 280 int status;
ae087d01
DJ
281 struct simple_pid_list *next;
282};
283struct simple_pid_list *stopped_pids;
284
3993f6b1
DJ
285/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
286 can not be used, 1 if it can. */
287
288static int linux_supports_tracefork_flag = -1;
289
3e43a32a
MS
290/* This variable is a tri-state flag: -1 for unknown, 0 if
291 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
a96d9b2e
SDJ
292
293static int linux_supports_tracesysgood_flag = -1;
294
9016a515
DJ
295/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
296 PTRACE_O_TRACEVFORKDONE. */
297
298static int linux_supports_tracevforkdone_flag = -1;
299
1777feb0 300/* Async mode support. */
b84876c2 301
b84876c2
PA
302/* Zero if the async mode, although enabled, is masked, which means
303 linux_nat_wait should behave as if async mode was off. */
304static int linux_nat_async_mask_value = 1;
305
a96d9b2e
SDJ
306/* Stores the current used ptrace() options. */
307static int current_ptrace_options = 0;
308
b84876c2
PA
309/* The read/write ends of the pipe registered as waitable file in the
310 event loop. */
311static int linux_nat_event_pipe[2] = { -1, -1 };
312
7feb7d06 313/* Flush the event pipe. */
b84876c2 314
7feb7d06
PA
315static void
316async_file_flush (void)
b84876c2 317{
7feb7d06
PA
318 int ret;
319 char buf;
b84876c2 320
7feb7d06 321 do
b84876c2 322 {
7feb7d06 323 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 324 }
7feb7d06 325 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
326}
327
7feb7d06
PA
328/* Put something (anything, doesn't matter what, or how much) in event
329 pipe, so that the select/poll in the event-loop realizes we have
330 something to process. */
252fbfc8 331
b84876c2 332static void
7feb7d06 333async_file_mark (void)
b84876c2 334{
7feb7d06 335 int ret;
b84876c2 336
7feb7d06
PA
337 /* It doesn't really matter what the pipe contains, as long we end
338 up with something in it. Might as well flush the previous
339 left-overs. */
340 async_file_flush ();
b84876c2 341
7feb7d06 342 do
b84876c2 343 {
7feb7d06 344 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 345 }
7feb7d06 346 while (ret == -1 && errno == EINTR);
b84876c2 347
7feb7d06
PA
348 /* Ignore EAGAIN. If the pipe is full, the event loop will already
349 be awakened anyway. */
b84876c2
PA
350}
351
7feb7d06 352static void linux_nat_async (void (*callback)
3e43a32a
MS
353 (enum inferior_event_type event_type,
354 void *context),
7feb7d06
PA
355 void *context);
356static int linux_nat_async_mask (int mask);
357static int kill_lwp (int lwpid, int signo);
358
359static int stop_callback (struct lwp_info *lp, void *data);
360
361static void block_child_signals (sigset_t *prev_mask);
362static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
363
364struct lwp_info;
365static struct lwp_info *add_lwp (ptid_t ptid);
366static void purge_lwp_list (int pid);
367static struct lwp_info *find_lwp_pid (ptid_t ptid);
368
ae087d01
DJ
369\f
370/* Trivial list manipulation functions to keep track of a list of
371 new stopped processes. */
372static void
3d799a95 373add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
374{
375 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 376
ae087d01 377 new_pid->pid = pid;
3d799a95 378 new_pid->status = status;
ae087d01
DJ
379 new_pid->next = *listp;
380 *listp = new_pid;
381}
382
383static int
46a96992 384pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
385{
386 struct simple_pid_list **p;
387
388 for (p = listp; *p != NULL; p = &(*p)->next)
389 if ((*p)->pid == pid)
390 {
391 struct simple_pid_list *next = (*p)->next;
e0881a8e 392
46a96992 393 *statusp = (*p)->status;
ae087d01
DJ
394 xfree (*p);
395 *p = next;
396 return 1;
397 }
398 return 0;
399}
400
3d799a95
DJ
401static void
402linux_record_stopped_pid (int pid, int status)
ae087d01 403{
3d799a95 404 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
405}
406
3993f6b1
DJ
407\f
408/* A helper function for linux_test_for_tracefork, called after fork (). */
409
410static void
411linux_tracefork_child (void)
412{
3993f6b1
DJ
413 ptrace (PTRACE_TRACEME, 0, 0, 0);
414 kill (getpid (), SIGSTOP);
415 fork ();
48bb3cce 416 _exit (0);
3993f6b1
DJ
417}
418
7feb7d06 419/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
420
421static int
46a96992 422my_waitpid (int pid, int *statusp, int flags)
b957e937
DJ
423{
424 int ret;
b84876c2 425
b957e937
DJ
426 do
427 {
46a96992 428 ret = waitpid (pid, statusp, flags);
b957e937
DJ
429 }
430 while (ret == -1 && errno == EINTR);
431
432 return ret;
433}
434
435/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
436
437 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
438 we know that the feature is not available. This may change the tracing
439 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
440
441 However, if it succeeds, we don't know for sure that the feature is
442 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 443 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
444 fork tracing, and let it fork. If the process exits, we assume that we
445 can't use TRACEFORK; if we get the fork notification, and we can extract
446 the new child's PID, then we assume that we can. */
3993f6b1
DJ
447
448static void
b957e937 449linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
450{
451 int child_pid, ret, status;
452 long second_pid;
7feb7d06 453 sigset_t prev_mask;
4c28f408 454
7feb7d06
PA
455 /* We don't want those ptrace calls to be interrupted. */
456 block_child_signals (&prev_mask);
3993f6b1 457
b957e937
DJ
458 linux_supports_tracefork_flag = 0;
459 linux_supports_tracevforkdone_flag = 0;
460
461 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
462 if (ret != 0)
7feb7d06
PA
463 {
464 restore_child_signals_mask (&prev_mask);
465 return;
466 }
b957e937 467
3993f6b1
DJ
468 child_pid = fork ();
469 if (child_pid == -1)
e2e0b3e5 470 perror_with_name (("fork"));
3993f6b1
DJ
471
472 if (child_pid == 0)
473 linux_tracefork_child ();
474
b957e937 475 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 476 if (ret == -1)
e2e0b3e5 477 perror_with_name (("waitpid"));
3993f6b1 478 else if (ret != child_pid)
8a3fe4f8 479 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 480 if (! WIFSTOPPED (status))
3e43a32a
MS
481 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
482 status);
3993f6b1 483
3993f6b1
DJ
484 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
485 if (ret != 0)
486 {
b957e937
DJ
487 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
488 if (ret != 0)
489 {
8a3fe4f8 490 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 491 restore_child_signals_mask (&prev_mask);
b957e937
DJ
492 return;
493 }
494
495 ret = my_waitpid (child_pid, &status, 0);
496 if (ret != child_pid)
3e43a32a
MS
497 warning (_("linux_test_for_tracefork: failed "
498 "to wait for killed child"));
b957e937 499 else if (!WIFSIGNALED (status))
3e43a32a
MS
500 warning (_("linux_test_for_tracefork: unexpected "
501 "wait status 0x%x from killed child"), status);
b957e937 502
7feb7d06 503 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
504 return;
505 }
506
9016a515
DJ
507 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
508 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
509 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
510 linux_supports_tracevforkdone_flag = (ret == 0);
511
b957e937
DJ
512 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
513 if (ret != 0)
8a3fe4f8 514 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
515
516 ret = my_waitpid (child_pid, &status, 0);
517
3993f6b1
DJ
518 if (ret == child_pid && WIFSTOPPED (status)
519 && status >> 16 == PTRACE_EVENT_FORK)
520 {
521 second_pid = 0;
522 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
523 if (ret == 0 && second_pid != 0)
524 {
525 int second_status;
526
527 linux_supports_tracefork_flag = 1;
b957e937
DJ
528 my_waitpid (second_pid, &second_status, 0);
529 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
530 if (ret != 0)
3e43a32a
MS
531 warning (_("linux_test_for_tracefork: "
532 "failed to kill second child"));
97725dc4 533 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
534 }
535 }
b957e937 536 else
8a3fe4f8
AC
537 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
538 "(%d, status 0x%x)"), ret, status);
3993f6b1 539
b957e937
DJ
540 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
541 if (ret != 0)
8a3fe4f8 542 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 543 my_waitpid (child_pid, &status, 0);
4c28f408 544
7feb7d06 545 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
546}
547
a96d9b2e
SDJ
548/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
549
550 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
551 we know that the feature is not available. This may change the tracing
552 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
553
554static void
555linux_test_for_tracesysgood (int original_pid)
556{
557 int ret;
558 sigset_t prev_mask;
559
560 /* We don't want those ptrace calls to be interrupted. */
561 block_child_signals (&prev_mask);
562
563 linux_supports_tracesysgood_flag = 0;
564
565 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
566 if (ret != 0)
567 goto out;
568
569 linux_supports_tracesysgood_flag = 1;
570out:
571 restore_child_signals_mask (&prev_mask);
572}
573
574/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
575 This function also sets linux_supports_tracesysgood_flag. */
576
577static int
578linux_supports_tracesysgood (int pid)
579{
580 if (linux_supports_tracesysgood_flag == -1)
581 linux_test_for_tracesysgood (pid);
582 return linux_supports_tracesysgood_flag;
583}
584
3993f6b1
DJ
585/* Return non-zero iff we have tracefork functionality available.
586 This function also sets linux_supports_tracefork_flag. */
587
588static int
b957e937 589linux_supports_tracefork (int pid)
3993f6b1
DJ
590{
591 if (linux_supports_tracefork_flag == -1)
b957e937 592 linux_test_for_tracefork (pid);
3993f6b1
DJ
593 return linux_supports_tracefork_flag;
594}
595
9016a515 596static int
b957e937 597linux_supports_tracevforkdone (int pid)
9016a515
DJ
598{
599 if (linux_supports_tracefork_flag == -1)
b957e937 600 linux_test_for_tracefork (pid);
9016a515
DJ
601 return linux_supports_tracevforkdone_flag;
602}
603
a96d9b2e
SDJ
604static void
605linux_enable_tracesysgood (ptid_t ptid)
606{
607 int pid = ptid_get_lwp (ptid);
608
609 if (pid == 0)
610 pid = ptid_get_pid (ptid);
611
612 if (linux_supports_tracesysgood (pid) == 0)
613 return;
614
615 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
616
617 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
618}
619
3993f6b1 620\f
4de4c07c
DJ
621void
622linux_enable_event_reporting (ptid_t ptid)
623{
d3587048 624 int pid = ptid_get_lwp (ptid);
4de4c07c 625
d3587048
DJ
626 if (pid == 0)
627 pid = ptid_get_pid (ptid);
628
b957e937 629 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
630 return;
631
a96d9b2e
SDJ
632 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
633 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
634
b957e937 635 if (linux_supports_tracevforkdone (pid))
a96d9b2e 636 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
637
638 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
639 read-only process state. */
4de4c07c 640
a96d9b2e 641 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
642}
643
6d8fd2b7
UW
644static void
645linux_child_post_attach (int pid)
4de4c07c
DJ
646{
647 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 648 check_for_thread_db ();
a96d9b2e 649 linux_enable_tracesysgood (pid_to_ptid (pid));
4de4c07c
DJ
650}
651
10d6c8cd 652static void
4de4c07c
DJ
653linux_child_post_startup_inferior (ptid_t ptid)
654{
655 linux_enable_event_reporting (ptid);
0ec9a092 656 check_for_thread_db ();
a96d9b2e 657 linux_enable_tracesysgood (ptid);
4de4c07c
DJ
658}
659
6d8fd2b7
UW
660static int
661linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 662{
7feb7d06 663 sigset_t prev_mask;
9016a515 664 int has_vforked;
4de4c07c
DJ
665 int parent_pid, child_pid;
666
7feb7d06 667 block_child_signals (&prev_mask);
b84876c2 668
e58b0e63
PA
669 has_vforked = (inferior_thread ()->pending_follow.kind
670 == TARGET_WAITKIND_VFORKED);
671 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 672 if (parent_pid == 0)
e58b0e63
PA
673 parent_pid = ptid_get_pid (inferior_ptid);
674 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 675
2277426b
PA
676 if (!detach_fork)
677 linux_enable_event_reporting (pid_to_ptid (child_pid));
678
6c95b8df
PA
679 if (has_vforked
680 && !non_stop /* Non-stop always resumes both branches. */
681 && (!target_is_async_p () || sync_execution)
682 && !(follow_child || detach_fork || sched_multi))
683 {
684 /* The parent stays blocked inside the vfork syscall until the
685 child execs or exits. If we don't let the child run, then
686 the parent stays blocked. If we're telling the parent to run
687 in the foreground, the user will not be able to ctrl-c to get
688 back the terminal, effectively hanging the debug session. */
ac74f770
MS
689 fprintf_filtered (gdb_stderr, _("\
690Can not resume the parent process over vfork in the foreground while\n\
691holding the child stopped. Try \"set detach-on-fork\" or \
692\"set schedule-multiple\".\n"));
693 /* FIXME output string > 80 columns. */
6c95b8df
PA
694 return 1;
695 }
696
4de4c07c
DJ
697 if (! follow_child)
698 {
6c95b8df 699 struct lwp_info *child_lp = NULL;
4de4c07c 700
1777feb0 701 /* We're already attached to the parent, by default. */
4de4c07c 702
ac264b3b
MS
703 /* Detach new forked process? */
704 if (detach_fork)
f75c00e4 705 {
6c95b8df
PA
706 /* Before detaching from the child, remove all breakpoints
707 from it. If we forked, then this has already been taken
708 care of by infrun.c. If we vforked however, any
709 breakpoint inserted in the parent is visible in the
710 child, even those added while stopped in a vfork
711 catchpoint. This will remove the breakpoints from the
712 parent also, but they'll be reinserted below. */
713 if (has_vforked)
714 {
715 /* keep breakpoints list in sync. */
716 remove_breakpoints_pid (GET_PID (inferior_ptid));
717 }
718
e85a822c 719 if (info_verbose || debug_linux_nat)
ac264b3b
MS
720 {
721 target_terminal_ours ();
722 fprintf_filtered (gdb_stdlog,
3e43a32a
MS
723 "Detaching after fork from "
724 "child process %d.\n",
ac264b3b
MS
725 child_pid);
726 }
4de4c07c 727
ac264b3b
MS
728 ptrace (PTRACE_DETACH, child_pid, 0, 0);
729 }
730 else
731 {
77435e4c 732 struct inferior *parent_inf, *child_inf;
2277426b 733 struct cleanup *old_chain;
7f9f62ba
PA
734
735 /* Add process to GDB's tables. */
77435e4c
PA
736 child_inf = add_inferior (child_pid);
737
e58b0e63 738 parent_inf = current_inferior ();
77435e4c 739 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 740 copy_terminal_info (child_inf, parent_inf);
7f9f62ba 741
2277426b 742 old_chain = save_inferior_ptid ();
6c95b8df 743 save_current_program_space ();
2277426b
PA
744
745 inferior_ptid = ptid_build (child_pid, child_pid, 0);
746 add_thread (inferior_ptid);
6c95b8df
PA
747 child_lp = add_lwp (inferior_ptid);
748 child_lp->stopped = 1;
749 child_lp->resumed = 1;
2277426b 750
6c95b8df
PA
751 /* If this is a vfork child, then the address-space is
752 shared with the parent. */
753 if (has_vforked)
754 {
755 child_inf->pspace = parent_inf->pspace;
756 child_inf->aspace = parent_inf->aspace;
757
758 /* The parent will be frozen until the child is done
759 with the shared region. Keep track of the
760 parent. */
761 child_inf->vfork_parent = parent_inf;
762 child_inf->pending_detach = 0;
763 parent_inf->vfork_child = child_inf;
764 parent_inf->pending_detach = 0;
765 }
766 else
767 {
768 child_inf->aspace = new_address_space ();
769 child_inf->pspace = add_program_space (child_inf->aspace);
770 child_inf->removable = 1;
771 set_current_program_space (child_inf->pspace);
772 clone_program_space (child_inf->pspace, parent_inf->pspace);
773
774 /* Let the shared library layer (solib-svr4) learn about
775 this new process, relocate the cloned exec, pull in
776 shared libraries, and install the solib event
777 breakpoint. If a "cloned-VM" event was propagated
778 better throughout the core, this wouldn't be
779 required. */
268a4a75 780 solib_create_inferior_hook (0);
6c95b8df
PA
781 }
782
783 /* Let the thread_db layer learn about this new process. */
2277426b
PA
784 check_for_thread_db ();
785
786 do_cleanups (old_chain);
ac264b3b 787 }
9016a515
DJ
788
789 if (has_vforked)
790 {
6c95b8df
PA
791 struct lwp_info *lp;
792 struct inferior *parent_inf;
793
794 parent_inf = current_inferior ();
795
796 /* If we detached from the child, then we have to be careful
797 to not insert breakpoints in the parent until the child
798 is done with the shared memory region. However, if we're
799 staying attached to the child, then we can and should
800 insert breakpoints, so that we can debug it. A
801 subsequent child exec or exit is enough to know when does
802 the child stops using the parent's address space. */
803 parent_inf->waiting_for_vfork_done = detach_fork;
56710373 804 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
6c95b8df
PA
805
806 lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937
DJ
807 gdb_assert (linux_supports_tracefork_flag >= 0);
808 if (linux_supports_tracevforkdone (0))
9016a515 809 {
6c95b8df
PA
810 if (debug_linux_nat)
811 fprintf_unfiltered (gdb_stdlog,
812 "LCFF: waiting for VFORK_DONE on %d\n",
813 parent_pid);
814
815 lp->stopped = 1;
816 lp->resumed = 1;
9016a515 817
6c95b8df
PA
818 /* We'll handle the VFORK_DONE event like any other
819 event, in target_wait. */
9016a515
DJ
820 }
821 else
822 {
823 /* We can't insert breakpoints until the child has
824 finished with the shared memory region. We need to
825 wait until that happens. Ideal would be to just
826 call:
827 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
828 - waitpid (parent_pid, &status, __WALL);
829 However, most architectures can't handle a syscall
830 being traced on the way out if it wasn't traced on
831 the way in.
832
833 We might also think to loop, continuing the child
834 until it exits or gets a SIGTRAP. One problem is
835 that the child might call ptrace with PTRACE_TRACEME.
836
837 There's no simple and reliable way to figure out when
838 the vforked child will be done with its copy of the
839 shared memory. We could step it out of the syscall,
840 two instructions, let it go, and then single-step the
841 parent once. When we have hardware single-step, this
842 would work; with software single-step it could still
843 be made to work but we'd have to be able to insert
844 single-step breakpoints in the child, and we'd have
845 to insert -just- the single-step breakpoint in the
846 parent. Very awkward.
847
848 In the end, the best we can do is to make sure it
849 runs for a little while. Hopefully it will be out of
850 range of any breakpoints we reinsert. Usually this
851 is only the single-step breakpoint at vfork's return
852 point. */
853
6c95b8df
PA
854 if (debug_linux_nat)
855 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
856 "LCFF: no VFORK_DONE "
857 "support, sleeping a bit\n");
6c95b8df 858
9016a515 859 usleep (10000);
9016a515 860
6c95b8df
PA
861 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
862 and leave it pending. The next linux_nat_resume call
863 will notice a pending event, and bypasses actually
864 resuming the inferior. */
865 lp->status = 0;
866 lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
867 lp->stopped = 0;
868 lp->resumed = 1;
869
870 /* If we're in async mode, need to tell the event loop
871 there's something here to process. */
872 if (target_can_async_p ())
873 async_file_mark ();
874 }
9016a515 875 }
4de4c07c 876 }
3993f6b1 877 else
4de4c07c 878 {
77435e4c 879 struct inferior *parent_inf, *child_inf;
2277426b 880 struct lwp_info *lp;
6c95b8df 881 struct program_space *parent_pspace;
4de4c07c 882
e85a822c 883 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
884 {
885 target_terminal_ours ();
6c95b8df 886 if (has_vforked)
3e43a32a
MS
887 fprintf_filtered (gdb_stdlog,
888 _("Attaching after process %d "
889 "vfork to child process %d.\n"),
6c95b8df
PA
890 parent_pid, child_pid);
891 else
3e43a32a
MS
892 fprintf_filtered (gdb_stdlog,
893 _("Attaching after process %d "
894 "fork to child process %d.\n"),
6c95b8df 895 parent_pid, child_pid);
f75c00e4 896 }
4de4c07c 897
7a7d3353
PA
898 /* Add the new inferior first, so that the target_detach below
899 doesn't unpush the target. */
900
77435e4c
PA
901 child_inf = add_inferior (child_pid);
902
e58b0e63 903 parent_inf = current_inferior ();
77435e4c 904 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 905 copy_terminal_info (child_inf, parent_inf);
7a7d3353 906
6c95b8df 907 parent_pspace = parent_inf->pspace;
9016a515 908
6c95b8df
PA
909 /* If we're vforking, we want to hold on to the parent until the
910 child exits or execs. At child exec or exit time we can
911 remove the old breakpoints from the parent and detach or
912 resume debugging it. Otherwise, detach the parent now; we'll
913 want to reuse it's program/address spaces, but we can't set
914 them to the child before removing breakpoints from the
915 parent, otherwise, the breakpoints module could decide to
916 remove breakpoints from the wrong process (since they'd be
917 assigned to the same address space). */
9016a515
DJ
918
919 if (has_vforked)
7f9f62ba 920 {
6c95b8df
PA
921 gdb_assert (child_inf->vfork_parent == NULL);
922 gdb_assert (parent_inf->vfork_child == NULL);
923 child_inf->vfork_parent = parent_inf;
924 child_inf->pending_detach = 0;
925 parent_inf->vfork_child = child_inf;
926 parent_inf->pending_detach = detach_fork;
927 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 928 }
2277426b 929 else if (detach_fork)
b84876c2 930 target_detach (NULL, 0);
4de4c07c 931
6c95b8df
PA
932 /* Note that the detach above makes PARENT_INF dangling. */
933
934 /* Add the child thread to the appropriate lists, and switch to
935 this new thread, before cloning the program space, and
936 informing the solib layer about this new process. */
937
9f0bdab8 938 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b
PA
939 add_thread (inferior_ptid);
940 lp = add_lwp (inferior_ptid);
941 lp->stopped = 1;
6c95b8df
PA
942 lp->resumed = 1;
943
944 /* If this is a vfork child, then the address-space is shared
945 with the parent. If we detached from the parent, then we can
946 reuse the parent's program/address spaces. */
947 if (has_vforked || detach_fork)
948 {
949 child_inf->pspace = parent_pspace;
950 child_inf->aspace = child_inf->pspace->aspace;
951 }
952 else
953 {
954 child_inf->aspace = new_address_space ();
955 child_inf->pspace = add_program_space (child_inf->aspace);
956 child_inf->removable = 1;
957 set_current_program_space (child_inf->pspace);
958 clone_program_space (child_inf->pspace, parent_pspace);
959
960 /* Let the shared library layer (solib-svr4) learn about
961 this new process, relocate the cloned exec, pull in
962 shared libraries, and install the solib event breakpoint.
963 If a "cloned-VM" event was propagated better throughout
964 the core, this wouldn't be required. */
268a4a75 965 solib_create_inferior_hook (0);
6c95b8df 966 }
ac264b3b 967
6c95b8df 968 /* Let the thread_db layer learn about this new process. */
ef29ce1a 969 check_for_thread_db ();
4de4c07c
DJ
970 }
971
7feb7d06 972 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
973 return 0;
974}
975
4de4c07c 976\f
6d8fd2b7
UW
977static void
978linux_child_insert_fork_catchpoint (int pid)
4de4c07c 979{
b957e937 980 if (! linux_supports_tracefork (pid))
8a3fe4f8 981 error (_("Your system does not support fork catchpoints."));
3993f6b1
DJ
982}
983
6d8fd2b7
UW
984static void
985linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 986{
b957e937 987 if (!linux_supports_tracefork (pid))
8a3fe4f8 988 error (_("Your system does not support vfork catchpoints."));
3993f6b1
DJ
989}
990
6d8fd2b7
UW
991static void
992linux_child_insert_exec_catchpoint (int pid)
3993f6b1 993{
b957e937 994 if (!linux_supports_tracefork (pid))
8a3fe4f8 995 error (_("Your system does not support exec catchpoints."));
3993f6b1
DJ
996}
997
a96d9b2e
SDJ
998static int
999linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
1000 int table_size, int *table)
1001{
1002 if (! linux_supports_tracesysgood (pid))
1003 error (_("Your system does not support syscall catchpoints."));
1004 /* On GNU/Linux, we ignore the arguments. It means that we only
1005 enable the syscall catchpoints, but do not disable them.
1006
1007 Also, we do not use the `table' information because we do not
1008 filter system calls here. We let GDB do the logic for us. */
1009 return 0;
1010}
1011
d6b0e80f
AC
1012/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1013 are processes sharing the same VM space. A multi-threaded process
1014 is basically a group of such processes. However, such a grouping
1015 is almost entirely a user-space issue; the kernel doesn't enforce
1016 such a grouping at all (this might change in the future). In
1017 general, we'll rely on the threads library (i.e. the GNU/Linux
1018 Threads library) to provide such a grouping.
1019
1020 It is perfectly well possible to write a multi-threaded application
1021 without the assistance of a threads library, by using the clone
1022 system call directly. This module should be able to give some
1023 rudimentary support for debugging such applications if developers
1024 specify the CLONE_PTRACE flag in the clone system call, and are
1025 using the Linux kernel 2.4 or above.
1026
1027 Note that there are some peculiarities in GNU/Linux that affect
1028 this code:
1029
1030 - In general one should specify the __WCLONE flag to waitpid in
1031 order to make it report events for any of the cloned processes
1032 (and leave it out for the initial process). However, if a cloned
1033 process has exited the exit status is only reported if the
1034 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1035 we cannot use it since GDB must work on older systems too.
1036
1037 - When a traced, cloned process exits and is waited for by the
1038 debugger, the kernel reassigns it to the original parent and
1039 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1040 library doesn't notice this, which leads to the "zombie problem":
1041 When debugged a multi-threaded process that spawns a lot of
1042 threads will run out of processes, even if the threads exit,
1043 because the "zombies" stay around. */
1044
1045/* List of known LWPs. */
9f0bdab8 1046struct lwp_info *lwp_list;
d6b0e80f
AC
1047\f
1048
d6b0e80f
AC
1049/* Original signal mask. */
1050static sigset_t normal_mask;
1051
1052/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1053 _initialize_linux_nat. */
1054static sigset_t suspend_mask;
1055
7feb7d06
PA
1056/* Signals to block to make that sigsuspend work. */
1057static sigset_t blocked_mask;
1058
1059/* SIGCHLD action. */
1060struct sigaction sigchld_action;
b84876c2 1061
7feb7d06
PA
1062/* Block child signals (SIGCHLD and linux threads signals), and store
1063 the previous mask in PREV_MASK. */
84e46146 1064
7feb7d06
PA
1065static void
1066block_child_signals (sigset_t *prev_mask)
1067{
1068 /* Make sure SIGCHLD is blocked. */
1069 if (!sigismember (&blocked_mask, SIGCHLD))
1070 sigaddset (&blocked_mask, SIGCHLD);
1071
1072 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1073}
1074
1075/* Restore child signals mask, previously returned by
1076 block_child_signals. */
1077
1078static void
1079restore_child_signals_mask (sigset_t *prev_mask)
1080{
1081 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1082}
d6b0e80f
AC
1083\f
1084
1085/* Prototypes for local functions. */
1086static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1087static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1088static char *linux_child_pid_to_exec_file (int pid);
710151dd 1089
d6b0e80f
AC
1090\f
1091/* Convert wait status STATUS to a string. Used for printing debug
1092 messages only. */
1093
1094static char *
1095status_to_str (int status)
1096{
1097 static char buf[64];
1098
1099 if (WIFSTOPPED (status))
206aa767 1100 {
ca2163eb 1101 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1102 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1103 strsignal (SIGTRAP));
1104 else
1105 snprintf (buf, sizeof (buf), "%s (stopped)",
1106 strsignal (WSTOPSIG (status)));
1107 }
d6b0e80f
AC
1108 else if (WIFSIGNALED (status))
1109 snprintf (buf, sizeof (buf), "%s (terminated)",
ba9b2ec3 1110 strsignal (WTERMSIG (status)));
d6b0e80f
AC
1111 else
1112 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1113
1114 return buf;
1115}
1116
d90e17a7
PA
1117/* Remove all LWPs belong to PID from the lwp list. */
1118
1119static void
1120purge_lwp_list (int pid)
1121{
1122 struct lwp_info *lp, *lpprev, *lpnext;
1123
1124 lpprev = NULL;
1125
1126 for (lp = lwp_list; lp; lp = lpnext)
1127 {
1128 lpnext = lp->next;
1129
1130 if (ptid_get_pid (lp->ptid) == pid)
1131 {
1132 if (lp == lwp_list)
1133 lwp_list = lp->next;
1134 else
1135 lpprev->next = lp->next;
1136
1137 xfree (lp);
1138 }
1139 else
1140 lpprev = lp;
1141 }
1142}
1143
1144/* Return the number of known LWPs in the tgid given by PID. */
1145
1146static int
1147num_lwps (int pid)
1148{
1149 int count = 0;
1150 struct lwp_info *lp;
1151
1152 for (lp = lwp_list; lp; lp = lp->next)
1153 if (ptid_get_pid (lp->ptid) == pid)
1154 count++;
1155
1156 return count;
d6b0e80f
AC
1157}
1158
f973ed9c 1159/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1160 structure describing the new LWP. The LWP should already be stopped
1161 (with an exception for the very first LWP). */
d6b0e80f
AC
1162
1163static struct lwp_info *
1164add_lwp (ptid_t ptid)
1165{
1166 struct lwp_info *lp;
1167
1168 gdb_assert (is_lwp (ptid));
1169
1170 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1171
1172 memset (lp, 0, sizeof (struct lwp_info));
1173
1174 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1175
1176 lp->ptid = ptid;
dc146f7c 1177 lp->core = -1;
d6b0e80f
AC
1178
1179 lp->next = lwp_list;
1180 lwp_list = lp;
d6b0e80f 1181
d90e17a7 1182 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
9f0bdab8
DJ
1183 linux_nat_new_thread (ptid);
1184
d6b0e80f
AC
1185 return lp;
1186}
1187
1188/* Remove the LWP specified by PID from the list. */
1189
1190static void
1191delete_lwp (ptid_t ptid)
1192{
1193 struct lwp_info *lp, *lpprev;
1194
1195 lpprev = NULL;
1196
1197 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1198 if (ptid_equal (lp->ptid, ptid))
1199 break;
1200
1201 if (!lp)
1202 return;
1203
d6b0e80f
AC
1204 if (lpprev)
1205 lpprev->next = lp->next;
1206 else
1207 lwp_list = lp->next;
1208
1209 xfree (lp);
1210}
1211
1212/* Return a pointer to the structure describing the LWP corresponding
1213 to PID. If no corresponding LWP could be found, return NULL. */
1214
1215static struct lwp_info *
1216find_lwp_pid (ptid_t ptid)
1217{
1218 struct lwp_info *lp;
1219 int lwp;
1220
1221 if (is_lwp (ptid))
1222 lwp = GET_LWP (ptid);
1223 else
1224 lwp = GET_PID (ptid);
1225
1226 for (lp = lwp_list; lp; lp = lp->next)
1227 if (lwp == GET_LWP (lp->ptid))
1228 return lp;
1229
1230 return NULL;
1231}
1232
1233/* Call CALLBACK with its second argument set to DATA for every LWP in
1234 the list. If CALLBACK returns 1 for a particular LWP, return a
1235 pointer to the structure describing that LWP immediately.
1236 Otherwise return NULL. */
1237
1238struct lwp_info *
d90e17a7
PA
1239iterate_over_lwps (ptid_t filter,
1240 int (*callback) (struct lwp_info *, void *),
1241 void *data)
d6b0e80f
AC
1242{
1243 struct lwp_info *lp, *lpnext;
1244
1245 for (lp = lwp_list; lp; lp = lpnext)
1246 {
1247 lpnext = lp->next;
d90e17a7
PA
1248
1249 if (ptid_match (lp->ptid, filter))
1250 {
1251 if ((*callback) (lp, data))
1252 return lp;
1253 }
d6b0e80f
AC
1254 }
1255
1256 return NULL;
1257}
1258
2277426b
PA
1259/* Update our internal state when changing from one checkpoint to
1260 another indicated by NEW_PTID. We can only switch single-threaded
1261 applications, so we only create one new LWP, and the previous list
1262 is discarded. */
f973ed9c
DJ
1263
1264void
1265linux_nat_switch_fork (ptid_t new_ptid)
1266{
1267 struct lwp_info *lp;
1268
2277426b
PA
1269 purge_lwp_list (GET_PID (inferior_ptid));
1270
f973ed9c
DJ
1271 lp = add_lwp (new_ptid);
1272 lp->stopped = 1;
e26af52f 1273
2277426b
PA
1274 /* This changes the thread's ptid while preserving the gdb thread
1275 num. Also changes the inferior pid, while preserving the
1276 inferior num. */
1277 thread_change_ptid (inferior_ptid, new_ptid);
1278
1279 /* We've just told GDB core that the thread changed target id, but,
1280 in fact, it really is a different thread, with different register
1281 contents. */
1282 registers_changed ();
e26af52f
DJ
1283}
1284
e26af52f
DJ
1285/* Handle the exit of a single thread LP. */
1286
1287static void
1288exit_lwp (struct lwp_info *lp)
1289{
e09875d4 1290 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1291
1292 if (th)
e26af52f 1293 {
17faa917
DJ
1294 if (print_thread_events)
1295 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1296
4f8d22e3 1297 delete_thread (lp->ptid);
e26af52f
DJ
1298 }
1299
1300 delete_lwp (lp->ptid);
1301}
1302
4d062f1a
PA
1303/* Return an lwp's tgid, found in `/proc/PID/status'. */
1304
1305int
1306linux_proc_get_tgid (int lwpid)
1307{
1308 FILE *status_file;
1309 char buf[100];
1310 int tgid = -1;
1311
1312 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) lwpid);
1313 status_file = fopen (buf, "r");
1314 if (status_file != NULL)
1315 {
1316 while (fgets (buf, sizeof (buf), status_file))
1317 {
1318 if (strncmp (buf, "Tgid:", 5) == 0)
1319 {
1320 tgid = strtoul (buf + strlen ("Tgid:"), NULL, 10);
1321 break;
1322 }
1323 }
1324
1325 fclose (status_file);
1326 }
1327
1328 return tgid;
1329}
1330
a0ef4274
DJ
1331/* Detect `T (stopped)' in `/proc/PID/status'.
1332 Other states including `T (tracing stop)' are reported as false. */
1333
1334static int
1335pid_is_stopped (pid_t pid)
1336{
1337 FILE *status_file;
1338 char buf[100];
1339 int retval = 0;
1340
1341 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1342 status_file = fopen (buf, "r");
1343 if (status_file != NULL)
1344 {
1345 int have_state = 0;
1346
1347 while (fgets (buf, sizeof (buf), status_file))
1348 {
1349 if (strncmp (buf, "State:", 6) == 0)
1350 {
1351 have_state = 1;
1352 break;
1353 }
1354 }
1355 if (have_state && strstr (buf, "T (stopped)") != NULL)
1356 retval = 1;
1357 fclose (status_file);
1358 }
1359 return retval;
1360}
1361
1362/* Wait for the LWP specified by LP, which we have just attached to.
1363 Returns a wait status for that LWP, to cache. */
1364
1365static int
1366linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1367 int *signalled)
1368{
1369 pid_t new_pid, pid = GET_LWP (ptid);
1370 int status;
1371
1372 if (pid_is_stopped (pid))
1373 {
1374 if (debug_linux_nat)
1375 fprintf_unfiltered (gdb_stdlog,
1376 "LNPAW: Attaching to a stopped process\n");
1377
1378 /* The process is definitely stopped. It is in a job control
1379 stop, unless the kernel predates the TASK_STOPPED /
1380 TASK_TRACED distinction, in which case it might be in a
1381 ptrace stop. Make sure it is in a ptrace stop; from there we
1382 can kill it, signal it, et cetera.
1383
1384 First make sure there is a pending SIGSTOP. Since we are
1385 already attached, the process can not transition from stopped
1386 to running without a PTRACE_CONT; so we know this signal will
1387 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1388 probably already in the queue (unless this kernel is old
1389 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1390 is not an RT signal, it can only be queued once. */
1391 kill_lwp (pid, SIGSTOP);
1392
1393 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1394 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1395 ptrace (PTRACE_CONT, pid, 0, 0);
1396 }
1397
1398 /* Make sure the initial process is stopped. The user-level threads
1399 layer might want to poke around in the inferior, and that won't
1400 work if things haven't stabilized yet. */
1401 new_pid = my_waitpid (pid, &status, 0);
1402 if (new_pid == -1 && errno == ECHILD)
1403 {
1404 if (first)
1405 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1406
1407 /* Try again with __WCLONE to check cloned processes. */
1408 new_pid = my_waitpid (pid, &status, __WCLONE);
1409 *cloned = 1;
1410 }
1411
dacc9cb2
PP
1412 gdb_assert (pid == new_pid);
1413
1414 if (!WIFSTOPPED (status))
1415 {
1416 /* The pid we tried to attach has apparently just exited. */
1417 if (debug_linux_nat)
1418 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1419 pid, status_to_str (status));
1420 return status;
1421 }
a0ef4274
DJ
1422
1423 if (WSTOPSIG (status) != SIGSTOP)
1424 {
1425 *signalled = 1;
1426 if (debug_linux_nat)
1427 fprintf_unfiltered (gdb_stdlog,
1428 "LNPAW: Received %s after attaching\n",
1429 status_to_str (status));
1430 }
1431
1432 return status;
1433}
1434
1435/* Attach to the LWP specified by PID. Return 0 if successful or -1
1436 if the new LWP could not be attached. */
d6b0e80f 1437
9ee57c33 1438int
93815fbf 1439lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1440{
9ee57c33 1441 struct lwp_info *lp;
7feb7d06 1442 sigset_t prev_mask;
d6b0e80f
AC
1443
1444 gdb_assert (is_lwp (ptid));
1445
7feb7d06 1446 block_child_signals (&prev_mask);
d6b0e80f 1447
9ee57c33 1448 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1449
1450 /* We assume that we're already attached to any LWP that has an id
1451 equal to the overall process id, and to any LWP that is already
1452 in our list of LWPs. If we're not seeing exit events from threads
1453 and we've had PID wraparound since we last tried to stop all threads,
1454 this assumption might be wrong; fortunately, this is very unlikely
1455 to happen. */
9ee57c33 1456 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1457 {
a0ef4274 1458 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1459
1460 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1461 {
1462 /* If we fail to attach to the thread, issue a warning,
1463 but continue. One way this can happen is if thread
e9efe249 1464 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1465 bug may place threads in the thread list and then fail
1466 to create them. */
1467 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1468 safe_strerror (errno));
7feb7d06 1469 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1470 return -1;
1471 }
1472
d6b0e80f
AC
1473 if (debug_linux_nat)
1474 fprintf_unfiltered (gdb_stdlog,
1475 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1476 target_pid_to_str (ptid));
1477
a0ef4274 1478 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2
PP
1479 if (!WIFSTOPPED (status))
1480 return -1;
1481
a0ef4274
DJ
1482 lp = add_lwp (ptid);
1483 lp->stopped = 1;
1484 lp->cloned = cloned;
1485 lp->signalled = signalled;
1486 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1487 {
a0ef4274
DJ
1488 lp->resumed = 1;
1489 lp->status = status;
d6b0e80f
AC
1490 }
1491
a0ef4274 1492 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1493
1494 if (debug_linux_nat)
1495 {
1496 fprintf_unfiltered (gdb_stdlog,
1497 "LLAL: waitpid %s received %s\n",
1498 target_pid_to_str (ptid),
1499 status_to_str (status));
1500 }
1501 }
1502 else
1503 {
1504 /* We assume that the LWP representing the original process is
1505 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1506 that the GNU/linux ptrace layer uses to keep track of
1507 threads. Note that this won't have already been done since
1508 the main thread will have, we assume, been stopped by an
1509 attach from a different layer. */
9ee57c33
DJ
1510 if (lp == NULL)
1511 lp = add_lwp (ptid);
d6b0e80f
AC
1512 lp->stopped = 1;
1513 }
9ee57c33 1514
7feb7d06 1515 restore_child_signals_mask (&prev_mask);
9ee57c33 1516 return 0;
d6b0e80f
AC
1517}
1518
b84876c2 1519static void
136d6dae
VP
1520linux_nat_create_inferior (struct target_ops *ops,
1521 char *exec_file, char *allargs, char **env,
b84876c2
PA
1522 int from_tty)
1523{
10568435
JK
1524#ifdef HAVE_PERSONALITY
1525 int personality_orig = 0, personality_set = 0;
1526#endif /* HAVE_PERSONALITY */
b84876c2
PA
1527
1528 /* The fork_child mechanism is synchronous and calls target_wait, so
1529 we have to mask the async mode. */
1530
10568435
JK
1531#ifdef HAVE_PERSONALITY
1532 if (disable_randomization)
1533 {
1534 errno = 0;
1535 personality_orig = personality (0xffffffff);
1536 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1537 {
1538 personality_set = 1;
1539 personality (personality_orig | ADDR_NO_RANDOMIZE);
1540 }
1541 if (errno != 0 || (personality_set
1542 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1543 warning (_("Error disabling address space randomization: %s"),
1544 safe_strerror (errno));
1545 }
1546#endif /* HAVE_PERSONALITY */
1547
136d6dae 1548 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1549
10568435
JK
1550#ifdef HAVE_PERSONALITY
1551 if (personality_set)
1552 {
1553 errno = 0;
1554 personality (personality_orig);
1555 if (errno != 0)
1556 warning (_("Error restoring address space randomization: %s"),
1557 safe_strerror (errno));
1558 }
1559#endif /* HAVE_PERSONALITY */
b84876c2
PA
1560}
1561
d6b0e80f 1562static void
136d6dae 1563linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1564{
1565 struct lwp_info *lp;
d6b0e80f 1566 int status;
af990527 1567 ptid_t ptid;
d6b0e80f 1568
136d6dae 1569 linux_ops->to_attach (ops, args, from_tty);
d6b0e80f 1570
af990527
PA
1571 /* The ptrace base target adds the main thread with (pid,0,0)
1572 format. Decorate it with lwp info. */
1573 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1574 thread_change_ptid (inferior_ptid, ptid);
1575
9f0bdab8 1576 /* Add the initial process as the first LWP to the list. */
af990527 1577 lp = add_lwp (ptid);
a0ef4274
DJ
1578
1579 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1580 &lp->signalled);
dacc9cb2
PP
1581 if (!WIFSTOPPED (status))
1582 {
1583 if (WIFEXITED (status))
1584 {
1585 int exit_code = WEXITSTATUS (status);
1586
1587 target_terminal_ours ();
1588 target_mourn_inferior ();
1589 if (exit_code == 0)
1590 error (_("Unable to attach: program exited normally."));
1591 else
1592 error (_("Unable to attach: program exited with code %d."),
1593 exit_code);
1594 }
1595 else if (WIFSIGNALED (status))
1596 {
1597 enum target_signal signo;
1598
1599 target_terminal_ours ();
1600 target_mourn_inferior ();
1601
1602 signo = target_signal_from_host (WTERMSIG (status));
1603 error (_("Unable to attach: program terminated with signal "
1604 "%s, %s."),
1605 target_signal_to_name (signo),
1606 target_signal_to_string (signo));
1607 }
1608
1609 internal_error (__FILE__, __LINE__,
1610 _("unexpected status %d for PID %ld"),
1611 status, (long) GET_LWP (ptid));
1612 }
1613
a0ef4274 1614 lp->stopped = 1;
9f0bdab8 1615
a0ef4274 1616 /* Save the wait status to report later. */
d6b0e80f 1617 lp->resumed = 1;
a0ef4274
DJ
1618 if (debug_linux_nat)
1619 fprintf_unfiltered (gdb_stdlog,
1620 "LNA: waitpid %ld, saving status %s\n",
1621 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1622
7feb7d06
PA
1623 lp->status = status;
1624
1625 if (target_can_async_p ())
1626 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1627}
1628
a0ef4274
DJ
1629/* Get pending status of LP. */
1630static int
1631get_pending_status (struct lwp_info *lp, int *status)
1632{
ca2163eb
PA
1633 enum target_signal signo = TARGET_SIGNAL_0;
1634
1635 /* If we paused threads momentarily, we may have stored pending
1636 events in lp->status or lp->waitstatus (see stop_wait_callback),
1637 and GDB core hasn't seen any signal for those threads.
1638 Otherwise, the last signal reported to the core is found in the
1639 thread object's stop_signal.
1640
1641 There's a corner case that isn't handled here at present. Only
1642 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1643 stop_signal make sense as a real signal to pass to the inferior.
1644 Some catchpoint related events, like
1645 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1646 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1647 those traps are debug API (ptrace in our case) related and
1648 induced; the inferior wouldn't see them if it wasn't being
1649 traced. Hence, we should never pass them to the inferior, even
1650 when set to pass state. Since this corner case isn't handled by
1651 infrun.c when proceeding with a signal, for consistency, neither
1652 do we handle it here (or elsewhere in the file we check for
1653 signal pass state). Normally SIGTRAP isn't set to pass state, so
1654 this is really a corner case. */
1655
1656 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1657 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1658 else if (lp->status)
1659 signo = target_signal_from_host (WSTOPSIG (lp->status));
1660 else if (non_stop && !is_executing (lp->ptid))
1661 {
1662 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1663
16c381f0 1664 signo = tp->suspend.stop_signal;
ca2163eb
PA
1665 }
1666 else if (!non_stop)
a0ef4274 1667 {
ca2163eb
PA
1668 struct target_waitstatus last;
1669 ptid_t last_ptid;
4c28f408 1670
ca2163eb 1671 get_last_target_status (&last_ptid, &last);
4c28f408 1672
ca2163eb
PA
1673 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1674 {
e09875d4 1675 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1676
16c381f0 1677 signo = tp->suspend.stop_signal;
4c28f408 1678 }
ca2163eb 1679 }
4c28f408 1680
ca2163eb 1681 *status = 0;
4c28f408 1682
ca2163eb
PA
1683 if (signo == TARGET_SIGNAL_0)
1684 {
1685 if (debug_linux_nat)
1686 fprintf_unfiltered (gdb_stdlog,
1687 "GPT: lwp %s has no pending signal\n",
1688 target_pid_to_str (lp->ptid));
1689 }
1690 else if (!signal_pass_state (signo))
1691 {
1692 if (debug_linux_nat)
3e43a32a
MS
1693 fprintf_unfiltered (gdb_stdlog,
1694 "GPT: lwp %s had signal %s, "
1695 "but it is in no pass state\n",
ca2163eb
PA
1696 target_pid_to_str (lp->ptid),
1697 target_signal_to_string (signo));
a0ef4274 1698 }
a0ef4274 1699 else
4c28f408 1700 {
ca2163eb
PA
1701 *status = W_STOPCODE (target_signal_to_host (signo));
1702
1703 if (debug_linux_nat)
1704 fprintf_unfiltered (gdb_stdlog,
1705 "GPT: lwp %s has pending signal %s\n",
1706 target_pid_to_str (lp->ptid),
1707 target_signal_to_string (signo));
4c28f408 1708 }
a0ef4274
DJ
1709
1710 return 0;
1711}
1712
d6b0e80f
AC
1713static int
1714detach_callback (struct lwp_info *lp, void *data)
1715{
1716 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1717
1718 if (debug_linux_nat && lp->status)
1719 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1720 strsignal (WSTOPSIG (lp->status)),
1721 target_pid_to_str (lp->ptid));
1722
a0ef4274
DJ
1723 /* If there is a pending SIGSTOP, get rid of it. */
1724 if (lp->signalled)
d6b0e80f 1725 {
d6b0e80f
AC
1726 if (debug_linux_nat)
1727 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1728 "DC: Sending SIGCONT to %s\n",
1729 target_pid_to_str (lp->ptid));
d6b0e80f 1730
a0ef4274 1731 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1732 lp->signalled = 0;
d6b0e80f
AC
1733 }
1734
1735 /* We don't actually detach from the LWP that has an id equal to the
1736 overall process id just yet. */
1737 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1738 {
a0ef4274
DJ
1739 int status = 0;
1740
1741 /* Pass on any pending signal for this LWP. */
1742 get_pending_status (lp, &status);
1743
d6b0e80f
AC
1744 errno = 0;
1745 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1746 WSTOPSIG (status)) < 0)
8a3fe4f8 1747 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1748 safe_strerror (errno));
1749
1750 if (debug_linux_nat)
1751 fprintf_unfiltered (gdb_stdlog,
1752 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1753 target_pid_to_str (lp->ptid),
7feb7d06 1754 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1755
1756 delete_lwp (lp->ptid);
1757 }
1758
1759 return 0;
1760}
1761
1762static void
136d6dae 1763linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1764{
b84876c2 1765 int pid;
a0ef4274 1766 int status;
d90e17a7
PA
1767 struct lwp_info *main_lwp;
1768
1769 pid = GET_PID (inferior_ptid);
a0ef4274 1770
b84876c2
PA
1771 if (target_can_async_p ())
1772 linux_nat_async (NULL, 0);
1773
4c28f408
PA
1774 /* Stop all threads before detaching. ptrace requires that the
1775 thread is stopped to sucessfully detach. */
d90e17a7 1776 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1777 /* ... and wait until all of them have reported back that
1778 they're no longer running. */
d90e17a7 1779 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1780
d90e17a7 1781 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1782
1783 /* Only the initial process should be left right now. */
d90e17a7
PA
1784 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1785
1786 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1787
a0ef4274
DJ
1788 /* Pass on any pending signal for the last LWP. */
1789 if ((args == NULL || *args == '\0')
d90e17a7 1790 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1791 && WIFSTOPPED (status))
1792 {
1793 /* Put the signal number in ARGS so that inf_ptrace_detach will
1794 pass it along with PTRACE_DETACH. */
1795 args = alloca (8);
1796 sprintf (args, "%d", (int) WSTOPSIG (status));
ddabfc73
TT
1797 if (debug_linux_nat)
1798 fprintf_unfiltered (gdb_stdlog,
1799 "LND: Sending signal %s to %s\n",
1800 args,
1801 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1802 }
1803
d90e17a7 1804 delete_lwp (main_lwp->ptid);
b84876c2 1805
7a7d3353
PA
1806 if (forks_exist_p ())
1807 {
1808 /* Multi-fork case. The current inferior_ptid is being detached
1809 from, but there are other viable forks to debug. Detach from
1810 the current fork, and context-switch to the first
1811 available. */
1812 linux_fork_detach (args, from_tty);
1813
1814 if (non_stop && target_can_async_p ())
1815 target_async (inferior_event_handler, 0);
1816 }
1817 else
1818 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1819}
1820
1821/* Resume LP. */
1822
1823static int
1824resume_callback (struct lwp_info *lp, void *data)
1825{
6c95b8df
PA
1826 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1827
1828 if (lp->stopped && inf->vfork_child != NULL)
1829 {
1830 if (debug_linux_nat)
1831 fprintf_unfiltered (gdb_stdlog,
1832 "RC: Not resuming %s (vfork parent)\n",
1833 target_pid_to_str (lp->ptid));
1834 }
1835 else if (lp->stopped && lp->status == 0)
d6b0e80f 1836 {
d90e17a7
PA
1837 if (debug_linux_nat)
1838 fprintf_unfiltered (gdb_stdlog,
a289b8f6 1839 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
d90e17a7
PA
1840 target_pid_to_str (lp->ptid));
1841
28439f5e
PA
1842 linux_ops->to_resume (linux_ops,
1843 pid_to_ptid (GET_LWP (lp->ptid)),
a289b8f6 1844 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1845 if (debug_linux_nat)
1846 fprintf_unfiltered (gdb_stdlog,
a289b8f6 1847 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
d6b0e80f
AC
1848 target_pid_to_str (lp->ptid));
1849 lp->stopped = 0;
a289b8f6 1850 lp->step = 0;
9f0bdab8 1851 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 1852 lp->stopped_by_watchpoint = 0;
d6b0e80f 1853 }
57380f4e 1854 else if (lp->stopped && debug_linux_nat)
3e43a32a
MS
1855 fprintf_unfiltered (gdb_stdlog,
1856 "RC: Not resuming sibling %s (has pending)\n",
57380f4e
DJ
1857 target_pid_to_str (lp->ptid));
1858 else if (debug_linux_nat)
3e43a32a
MS
1859 fprintf_unfiltered (gdb_stdlog,
1860 "RC: Not resuming sibling %s (not stopped)\n",
57380f4e 1861 target_pid_to_str (lp->ptid));
d6b0e80f
AC
1862
1863 return 0;
1864}
1865
1866static int
1867resume_clear_callback (struct lwp_info *lp, void *data)
1868{
1869 lp->resumed = 0;
1870 return 0;
1871}
1872
1873static int
1874resume_set_callback (struct lwp_info *lp, void *data)
1875{
1876 lp->resumed = 1;
1877 return 0;
1878}
1879
1880static void
28439f5e
PA
1881linux_nat_resume (struct target_ops *ops,
1882 ptid_t ptid, int step, enum target_signal signo)
d6b0e80f 1883{
7feb7d06 1884 sigset_t prev_mask;
d6b0e80f 1885 struct lwp_info *lp;
d90e17a7 1886 int resume_many;
d6b0e80f 1887
76f50ad1
DJ
1888 if (debug_linux_nat)
1889 fprintf_unfiltered (gdb_stdlog,
1890 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1891 step ? "step" : "resume",
1892 target_pid_to_str (ptid),
423ec54c
JK
1893 (signo != TARGET_SIGNAL_0
1894 ? strsignal (target_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1895 target_pid_to_str (inferior_ptid));
1896
7feb7d06 1897 block_child_signals (&prev_mask);
b84876c2 1898
d6b0e80f 1899 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1900 resume_many = (ptid_equal (minus_one_ptid, ptid)
1901 || ptid_is_pid (ptid));
4c28f408 1902
e3e9f5a2
PA
1903 /* Mark the lwps we're resuming as resumed. */
1904 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1905
d90e17a7
PA
1906 /* See if it's the current inferior that should be handled
1907 specially. */
1908 if (resume_many)
1909 lp = find_lwp_pid (inferior_ptid);
1910 else
1911 lp = find_lwp_pid (ptid);
9f0bdab8 1912 gdb_assert (lp != NULL);
d6b0e80f 1913
9f0bdab8
DJ
1914 /* Remember if we're stepping. */
1915 lp->step = step;
d6b0e80f 1916
9f0bdab8
DJ
1917 /* If we have a pending wait status for this thread, there is no
1918 point in resuming the process. But first make sure that
1919 linux_nat_wait won't preemptively handle the event - we
1920 should never take this short-circuit if we are going to
1921 leave LP running, since we have skipped resuming all the
1922 other threads. This bit of code needs to be synchronized
1923 with linux_nat_wait. */
76f50ad1 1924
9f0bdab8
DJ
1925 if (lp->status && WIFSTOPPED (lp->status))
1926 {
423ec54c 1927 enum target_signal saved_signo;
d6b48e9c 1928 struct inferior *inf;
76f50ad1 1929
d90e17a7 1930 inf = find_inferior_pid (ptid_get_pid (lp->ptid));
d6b48e9c
PA
1931 gdb_assert (inf);
1932 saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1933
1934 /* Defer to common code if we're gaining control of the
1935 inferior. */
16c381f0 1936 if (inf->control.stop_soon == NO_STOP_QUIETLY
d6b48e9c 1937 && signal_stop_state (saved_signo) == 0
9f0bdab8
DJ
1938 && signal_print_state (saved_signo) == 0
1939 && signal_pass_state (saved_signo) == 1)
d6b0e80f 1940 {
9f0bdab8
DJ
1941 if (debug_linux_nat)
1942 fprintf_unfiltered (gdb_stdlog,
1943 "LLR: Not short circuiting for ignored "
1944 "status 0x%x\n", lp->status);
1945
d6b0e80f
AC
1946 /* FIXME: What should we do if we are supposed to continue
1947 this thread with a signal? */
1948 gdb_assert (signo == TARGET_SIGNAL_0);
9f0bdab8
DJ
1949 signo = saved_signo;
1950 lp->status = 0;
1951 }
1952 }
76f50ad1 1953
6c95b8df 1954 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
1955 {
1956 /* FIXME: What should we do if we are supposed to continue
1957 this thread with a signal? */
1958 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1959
9f0bdab8
DJ
1960 if (debug_linux_nat)
1961 fprintf_unfiltered (gdb_stdlog,
1962 "LLR: Short circuiting for status 0x%x\n",
1963 lp->status);
d6b0e80f 1964
7feb7d06
PA
1965 restore_child_signals_mask (&prev_mask);
1966 if (target_can_async_p ())
1967 {
1968 target_async (inferior_event_handler, 0);
1969 /* Tell the event loop we have something to process. */
1970 async_file_mark ();
1971 }
9f0bdab8 1972 return;
d6b0e80f
AC
1973 }
1974
9f0bdab8
DJ
1975 /* Mark LWP as not stopped to prevent it from being continued by
1976 resume_callback. */
1977 lp->stopped = 0;
1978
d90e17a7
PA
1979 if (resume_many)
1980 iterate_over_lwps (ptid, resume_callback, NULL);
1981
1982 /* Convert to something the lower layer understands. */
1983 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1984
28439f5e 1985 linux_ops->to_resume (linux_ops, ptid, step, signo);
9f0bdab8 1986 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 1987 lp->stopped_by_watchpoint = 0;
9f0bdab8 1988
d6b0e80f
AC
1989 if (debug_linux_nat)
1990 fprintf_unfiltered (gdb_stdlog,
1991 "LLR: %s %s, %s (resume event thread)\n",
1992 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1993 target_pid_to_str (ptid),
423ec54c
JK
1994 (signo != TARGET_SIGNAL_0
1995 ? strsignal (target_signal_to_host (signo)) : "0"));
b84876c2 1996
7feb7d06 1997 restore_child_signals_mask (&prev_mask);
b84876c2 1998 if (target_can_async_p ())
8ea051c5 1999 target_async (inferior_event_handler, 0);
d6b0e80f
AC
2000}
2001
c5f62d5f 2002/* Send a signal to an LWP. */
d6b0e80f
AC
2003
2004static int
2005kill_lwp (int lwpid, int signo)
2006{
c5f62d5f
DE
2007 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2008 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
2009
2010#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
2011 {
2012 static int tkill_failed;
2013
2014 if (!tkill_failed)
2015 {
2016 int ret;
2017
2018 errno = 0;
2019 ret = syscall (__NR_tkill, lwpid, signo);
2020 if (errno != ENOSYS)
2021 return ret;
2022 tkill_failed = 1;
2023 }
2024 }
d6b0e80f
AC
2025#endif
2026
2027 return kill (lwpid, signo);
2028}
2029
ca2163eb
PA
2030/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2031 event, check if the core is interested in it: if not, ignore the
2032 event, and keep waiting; otherwise, we need to toggle the LWP's
2033 syscall entry/exit status, since the ptrace event itself doesn't
2034 indicate it, and report the trap to higher layers. */
2035
2036static int
2037linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2038{
2039 struct target_waitstatus *ourstatus = &lp->waitstatus;
2040 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2041 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2042
2043 if (stopping)
2044 {
2045 /* If we're stopping threads, there's a SIGSTOP pending, which
2046 makes it so that the LWP reports an immediate syscall return,
2047 followed by the SIGSTOP. Skip seeing that "return" using
2048 PTRACE_CONT directly, and let stop_wait_callback collect the
2049 SIGSTOP. Later when the thread is resumed, a new syscall
2050 entry event. If we didn't do this (and returned 0), we'd
2051 leave a syscall entry pending, and our caller, by using
2052 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2053 itself. Later, when the user re-resumes this LWP, we'd see
2054 another syscall entry event and we'd mistake it for a return.
2055
2056 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2057 (leaving immediately with LWP->signalled set, without issuing
2058 a PTRACE_CONT), it would still be problematic to leave this
2059 syscall enter pending, as later when the thread is resumed,
2060 it would then see the same syscall exit mentioned above,
2061 followed by the delayed SIGSTOP, while the syscall didn't
2062 actually get to execute. It seems it would be even more
2063 confusing to the user. */
2064
2065 if (debug_linux_nat)
2066 fprintf_unfiltered (gdb_stdlog,
2067 "LHST: ignoring syscall %d "
2068 "for LWP %ld (stopping threads), "
2069 "resuming with PTRACE_CONT for SIGSTOP\n",
2070 syscall_number,
2071 GET_LWP (lp->ptid));
2072
2073 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2074 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2075 return 1;
2076 }
2077
2078 if (catch_syscall_enabled ())
2079 {
2080 /* Always update the entry/return state, even if this particular
2081 syscall isn't interesting to the core now. In async mode,
2082 the user could install a new catchpoint for this syscall
2083 between syscall enter/return, and we'll need to know to
2084 report a syscall return if that happens. */
2085 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2086 ? TARGET_WAITKIND_SYSCALL_RETURN
2087 : TARGET_WAITKIND_SYSCALL_ENTRY);
2088
2089 if (catching_syscall_number (syscall_number))
2090 {
2091 /* Alright, an event to report. */
2092 ourstatus->kind = lp->syscall_state;
2093 ourstatus->value.syscall_number = syscall_number;
2094
2095 if (debug_linux_nat)
2096 fprintf_unfiltered (gdb_stdlog,
2097 "LHST: stopping for %s of syscall %d"
2098 " for LWP %ld\n",
3e43a32a
MS
2099 lp->syscall_state
2100 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
2101 ? "entry" : "return",
2102 syscall_number,
2103 GET_LWP (lp->ptid));
2104 return 0;
2105 }
2106
2107 if (debug_linux_nat)
2108 fprintf_unfiltered (gdb_stdlog,
2109 "LHST: ignoring %s of syscall %d "
2110 "for LWP %ld\n",
2111 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2112 ? "entry" : "return",
2113 syscall_number,
2114 GET_LWP (lp->ptid));
2115 }
2116 else
2117 {
2118 /* If we had been syscall tracing, and hence used PT_SYSCALL
2119 before on this LWP, it could happen that the user removes all
2120 syscall catchpoints before we get to process this event.
2121 There are two noteworthy issues here:
2122
2123 - When stopped at a syscall entry event, resuming with
2124 PT_STEP still resumes executing the syscall and reports a
2125 syscall return.
2126
2127 - Only PT_SYSCALL catches syscall enters. If we last
2128 single-stepped this thread, then this event can't be a
2129 syscall enter. If we last single-stepped this thread, this
2130 has to be a syscall exit.
2131
2132 The points above mean that the next resume, be it PT_STEP or
2133 PT_CONTINUE, can not trigger a syscall trace event. */
2134 if (debug_linux_nat)
2135 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2136 "LHST: caught syscall event "
2137 "with no syscall catchpoints."
ca2163eb
PA
2138 " %d for LWP %ld, ignoring\n",
2139 syscall_number,
2140 GET_LWP (lp->ptid));
2141 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2142 }
2143
2144 /* The core isn't interested in this event. For efficiency, avoid
2145 stopping all threads only to have the core resume them all again.
2146 Since we're not stopping threads, if we're still syscall tracing
2147 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2148 subsequent syscall. Simply resume using the inf-ptrace layer,
2149 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2150
2151 /* Note that gdbarch_get_syscall_number may access registers, hence
2152 fill a regcache. */
2153 registers_changed ();
2154 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2155 lp->step, TARGET_SIGNAL_0);
2156 return 1;
2157}
2158
3d799a95
DJ
2159/* Handle a GNU/Linux extended wait response. If we see a clone
2160 event, we need to add the new LWP to our list (and not report the
2161 trap to higher layers). This function returns non-zero if the
2162 event should be ignored and we should wait again. If STOPPING is
2163 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2164
2165static int
3d799a95
DJ
2166linux_handle_extended_wait (struct lwp_info *lp, int status,
2167 int stopping)
d6b0e80f 2168{
3d799a95
DJ
2169 int pid = GET_LWP (lp->ptid);
2170 struct target_waitstatus *ourstatus = &lp->waitstatus;
3d799a95 2171 int event = status >> 16;
d6b0e80f 2172
3d799a95
DJ
2173 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2174 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2175 {
3d799a95
DJ
2176 unsigned long new_pid;
2177 int ret;
2178
2179 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2180
3d799a95
DJ
2181 /* If we haven't already seen the new PID stop, wait for it now. */
2182 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2183 {
2184 /* The new child has a pending SIGSTOP. We can't affect it until it
2185 hits the SIGSTOP, but we're already attached. */
2186 ret = my_waitpid (new_pid, &status,
2187 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2188 if (ret == -1)
2189 perror_with_name (_("waiting for new child"));
2190 else if (ret != new_pid)
2191 internal_error (__FILE__, __LINE__,
2192 _("wait returned unexpected PID %d"), ret);
2193 else if (!WIFSTOPPED (status))
2194 internal_error (__FILE__, __LINE__,
2195 _("wait returned unexpected status 0x%x"), status);
2196 }
2197
3a3e9ee3 2198 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2199
2277426b
PA
2200 if (event == PTRACE_EVENT_FORK
2201 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2202 {
2203 struct fork_info *fp;
2204
2205 /* Handle checkpointing by linux-fork.c here as a special
2206 case. We don't want the follow-fork-mode or 'catch fork'
2207 to interfere with this. */
2208
2209 /* This won't actually modify the breakpoint list, but will
2210 physically remove the breakpoints from the child. */
2211 detach_breakpoints (new_pid);
2212
2213 /* Retain child fork in ptrace (stopped) state. */
2214 fp = find_fork_pid (new_pid);
2215 if (!fp)
2216 fp = add_fork (new_pid);
2217
2218 /* Report as spurious, so that infrun doesn't want to follow
2219 this fork. We're actually doing an infcall in
2220 linux-fork.c. */
2221 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2222 linux_enable_event_reporting (pid_to_ptid (new_pid));
2223
2224 /* Report the stop to the core. */
2225 return 0;
2226 }
2227
3d799a95
DJ
2228 if (event == PTRACE_EVENT_FORK)
2229 ourstatus->kind = TARGET_WAITKIND_FORKED;
2230 else if (event == PTRACE_EVENT_VFORK)
2231 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2232 else
3d799a95 2233 {
78768c4a
JK
2234 struct lwp_info *new_lp;
2235
3d799a95 2236 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2237
d90e17a7 2238 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2239 new_lp->cloned = 1;
4c28f408 2240 new_lp->stopped = 1;
d6b0e80f 2241
3d799a95
DJ
2242 if (WSTOPSIG (status) != SIGSTOP)
2243 {
2244 /* This can happen if someone starts sending signals to
2245 the new thread before it gets a chance to run, which
2246 have a lower number than SIGSTOP (e.g. SIGUSR1).
2247 This is an unlikely case, and harder to handle for
2248 fork / vfork than for clone, so we do not try - but
2249 we handle it for clone events here. We'll send
2250 the other signal on to the thread below. */
2251
2252 new_lp->signalled = 1;
2253 }
2254 else
2255 status = 0;
d6b0e80f 2256
4c28f408 2257 if (non_stop)
3d799a95 2258 {
4c28f408
PA
2259 /* Add the new thread to GDB's lists as soon as possible
2260 so that:
2261
2262 1) the frontend doesn't have to wait for a stop to
2263 display them, and,
2264
2265 2) we tag it with the correct running state. */
2266
2267 /* If the thread_db layer is active, let it know about
2268 this new thread, and add it to GDB's list. */
2269 if (!thread_db_attach_lwp (new_lp->ptid))
2270 {
2271 /* We're not using thread_db. Add it to GDB's
2272 list. */
2273 target_post_attach (GET_LWP (new_lp->ptid));
2274 add_thread (new_lp->ptid);
2275 }
2276
2277 if (!stopping)
2278 {
2279 set_running (new_lp->ptid, 1);
2280 set_executing (new_lp->ptid, 1);
2281 }
2282 }
2283
ca2163eb
PA
2284 /* Note the need to use the low target ops to resume, to
2285 handle resuming with PT_SYSCALL if we have syscall
2286 catchpoints. */
4c28f408
PA
2287 if (!stopping)
2288 {
423ec54c 2289 enum target_signal signo;
ca2163eb 2290
4c28f408 2291 new_lp->stopped = 0;
3d799a95 2292 new_lp->resumed = 1;
ca2163eb
PA
2293
2294 signo = (status
2295 ? target_signal_from_host (WSTOPSIG (status))
2296 : TARGET_SIGNAL_0);
2297
2298 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2299 0, signo);
3d799a95 2300 }
ad34eb2f
JK
2301 else
2302 {
2303 if (status != 0)
2304 {
2305 /* We created NEW_LP so it cannot yet contain STATUS. */
2306 gdb_assert (new_lp->status == 0);
2307
2308 /* Save the wait status to report later. */
2309 if (debug_linux_nat)
2310 fprintf_unfiltered (gdb_stdlog,
2311 "LHEW: waitpid of new LWP %ld, "
2312 "saving status %s\n",
2313 (long) GET_LWP (new_lp->ptid),
2314 status_to_str (status));
2315 new_lp->status = status;
2316 }
2317 }
d6b0e80f 2318
3d799a95
DJ
2319 if (debug_linux_nat)
2320 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2321 "LHEW: Got clone event "
2322 "from LWP %ld, resuming\n",
3d799a95 2323 GET_LWP (lp->ptid));
ca2163eb
PA
2324 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2325 0, TARGET_SIGNAL_0);
3d799a95
DJ
2326
2327 return 1;
2328 }
2329
2330 return 0;
d6b0e80f
AC
2331 }
2332
3d799a95
DJ
2333 if (event == PTRACE_EVENT_EXEC)
2334 {
a75724bc
PA
2335 if (debug_linux_nat)
2336 fprintf_unfiltered (gdb_stdlog,
2337 "LHEW: Got exec event from LWP %ld\n",
2338 GET_LWP (lp->ptid));
2339
3d799a95
DJ
2340 ourstatus->kind = TARGET_WAITKIND_EXECD;
2341 ourstatus->value.execd_pathname
6d8fd2b7 2342 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2343
6c95b8df
PA
2344 return 0;
2345 }
2346
2347 if (event == PTRACE_EVENT_VFORK_DONE)
2348 {
2349 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2350 {
6c95b8df 2351 if (debug_linux_nat)
3e43a32a
MS
2352 fprintf_unfiltered (gdb_stdlog,
2353 "LHEW: Got expected PTRACE_EVENT_"
2354 "VFORK_DONE from LWP %ld: stopping\n",
6c95b8df 2355 GET_LWP (lp->ptid));
3d799a95 2356
6c95b8df
PA
2357 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2358 return 0;
3d799a95
DJ
2359 }
2360
6c95b8df 2361 if (debug_linux_nat)
3e43a32a
MS
2362 fprintf_unfiltered (gdb_stdlog,
2363 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2364 "from LWP %ld: resuming\n",
6c95b8df
PA
2365 GET_LWP (lp->ptid));
2366 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2367 return 1;
3d799a95
DJ
2368 }
2369
2370 internal_error (__FILE__, __LINE__,
2371 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2372}
2373
2374/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2375 exited. */
2376
2377static int
2378wait_lwp (struct lwp_info *lp)
2379{
2380 pid_t pid;
2381 int status;
2382 int thread_dead = 0;
2383
2384 gdb_assert (!lp->stopped);
2385 gdb_assert (lp->status == 0);
2386
58aecb61 2387 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
2388 if (pid == -1 && errno == ECHILD)
2389 {
58aecb61 2390 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
2391 if (pid == -1 && errno == ECHILD)
2392 {
2393 /* The thread has previously exited. We need to delete it
2394 now because, for some vendor 2.4 kernels with NPTL
2395 support backported, there won't be an exit event unless
2396 it is the main thread. 2.6 kernels will report an exit
2397 event for each thread that exits, as expected. */
2398 thread_dead = 1;
2399 if (debug_linux_nat)
2400 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2401 target_pid_to_str (lp->ptid));
2402 }
2403 }
2404
2405 if (!thread_dead)
2406 {
2407 gdb_assert (pid == GET_LWP (lp->ptid));
2408
2409 if (debug_linux_nat)
2410 {
2411 fprintf_unfiltered (gdb_stdlog,
2412 "WL: waitpid %s received %s\n",
2413 target_pid_to_str (lp->ptid),
2414 status_to_str (status));
2415 }
2416 }
2417
2418 /* Check if the thread has exited. */
2419 if (WIFEXITED (status) || WIFSIGNALED (status))
2420 {
2421 thread_dead = 1;
2422 if (debug_linux_nat)
2423 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2424 target_pid_to_str (lp->ptid));
2425 }
2426
2427 if (thread_dead)
2428 {
e26af52f 2429 exit_lwp (lp);
d6b0e80f
AC
2430 return 0;
2431 }
2432
2433 gdb_assert (WIFSTOPPED (status));
2434
ca2163eb
PA
2435 /* Handle GNU/Linux's syscall SIGTRAPs. */
2436 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2437 {
2438 /* No longer need the sysgood bit. The ptrace event ends up
2439 recorded in lp->waitstatus if we care for it. We can carry
2440 on handling the event like a regular SIGTRAP from here
2441 on. */
2442 status = W_STOPCODE (SIGTRAP);
2443 if (linux_handle_syscall_trap (lp, 1))
2444 return wait_lwp (lp);
2445 }
2446
d6b0e80f
AC
2447 /* Handle GNU/Linux's extended waitstatus for trace events. */
2448 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2449 {
2450 if (debug_linux_nat)
2451 fprintf_unfiltered (gdb_stdlog,
2452 "WL: Handling extended status 0x%06x\n",
2453 status);
3d799a95 2454 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2455 return wait_lwp (lp);
2456 }
2457
2458 return status;
2459}
2460
9f0bdab8
DJ
2461/* Save the most recent siginfo for LP. This is currently only called
2462 for SIGTRAP; some ports use the si_addr field for
2463 target_stopped_data_address. In the future, it may also be used to
2464 restore the siginfo of requeued signals. */
2465
2466static void
2467save_siginfo (struct lwp_info *lp)
2468{
2469 errno = 0;
2470 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2471 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2472
2473 if (errno != 0)
2474 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2475}
2476
d6b0e80f
AC
2477/* Send a SIGSTOP to LP. */
2478
2479static int
2480stop_callback (struct lwp_info *lp, void *data)
2481{
2482 if (!lp->stopped && !lp->signalled)
2483 {
2484 int ret;
2485
2486 if (debug_linux_nat)
2487 {
2488 fprintf_unfiltered (gdb_stdlog,
2489 "SC: kill %s **<SIGSTOP>**\n",
2490 target_pid_to_str (lp->ptid));
2491 }
2492 errno = 0;
2493 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2494 if (debug_linux_nat)
2495 {
2496 fprintf_unfiltered (gdb_stdlog,
2497 "SC: lwp kill %d %s\n",
2498 ret,
2499 errno ? safe_strerror (errno) : "ERRNO-OK");
2500 }
2501
2502 lp->signalled = 1;
2503 gdb_assert (lp->status == 0);
2504 }
2505
2506 return 0;
2507}
2508
57380f4e 2509/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2510
2511static int
57380f4e
DJ
2512linux_nat_has_pending_sigint (int pid)
2513{
2514 sigset_t pending, blocked, ignored;
57380f4e
DJ
2515
2516 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2517
2518 if (sigismember (&pending, SIGINT)
2519 && !sigismember (&ignored, SIGINT))
2520 return 1;
2521
2522 return 0;
2523}
2524
2525/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2526
2527static int
2528set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2529{
57380f4e
DJ
2530 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2531 flag to consume the next one. */
2532 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2533 && WSTOPSIG (lp->status) == SIGINT)
2534 lp->status = 0;
2535 else
2536 lp->ignore_sigint = 1;
2537
2538 return 0;
2539}
2540
2541/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2542 This function is called after we know the LWP has stopped; if the LWP
2543 stopped before the expected SIGINT was delivered, then it will never have
2544 arrived. Also, if the signal was delivered to a shared queue and consumed
2545 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2546
57380f4e
DJ
2547static void
2548maybe_clear_ignore_sigint (struct lwp_info *lp)
2549{
2550 if (!lp->ignore_sigint)
2551 return;
2552
2553 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2554 {
2555 if (debug_linux_nat)
2556 fprintf_unfiltered (gdb_stdlog,
2557 "MCIS: Clearing bogus flag for %s\n",
2558 target_pid_to_str (lp->ptid));
2559 lp->ignore_sigint = 0;
2560 }
2561}
2562
ebec9a0f
PA
2563/* Fetch the possible triggered data watchpoint info and store it in
2564 LP.
2565
2566 On some archs, like x86, that use debug registers to set
2567 watchpoints, it's possible that the way to know which watched
2568 address trapped, is to check the register that is used to select
2569 which address to watch. Problem is, between setting the watchpoint
2570 and reading back which data address trapped, the user may change
2571 the set of watchpoints, and, as a consequence, GDB changes the
2572 debug registers in the inferior. To avoid reading back a stale
2573 stopped-data-address when that happens, we cache in LP the fact
2574 that a watchpoint trapped, and the corresponding data address, as
2575 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2576 registers meanwhile, we have the cached data we can rely on. */
2577
2578static void
2579save_sigtrap (struct lwp_info *lp)
2580{
2581 struct cleanup *old_chain;
2582
2583 if (linux_ops->to_stopped_by_watchpoint == NULL)
2584 {
2585 lp->stopped_by_watchpoint = 0;
2586 return;
2587 }
2588
2589 old_chain = save_inferior_ptid ();
2590 inferior_ptid = lp->ptid;
2591
2592 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2593
2594 if (lp->stopped_by_watchpoint)
2595 {
2596 if (linux_ops->to_stopped_data_address != NULL)
2597 lp->stopped_data_address_p =
2598 linux_ops->to_stopped_data_address (&current_target,
2599 &lp->stopped_data_address);
2600 else
2601 lp->stopped_data_address_p = 0;
2602 }
2603
2604 do_cleanups (old_chain);
2605}
2606
2607/* See save_sigtrap. */
2608
2609static int
2610linux_nat_stopped_by_watchpoint (void)
2611{
2612 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2613
2614 gdb_assert (lp != NULL);
2615
2616 return lp->stopped_by_watchpoint;
2617}
2618
2619static int
2620linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2621{
2622 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2623
2624 gdb_assert (lp != NULL);
2625
2626 *addr_p = lp->stopped_data_address;
2627
2628 return lp->stopped_data_address_p;
2629}
2630
26ab7092
JK
2631/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2632
2633static int
2634sigtrap_is_event (int status)
2635{
2636 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2637}
2638
2639/* SIGTRAP-like events recognizer. */
2640
2641static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2642
00390b84
JK
2643/* Check for SIGTRAP-like events in LP. */
2644
2645static int
2646linux_nat_lp_status_is_event (struct lwp_info *lp)
2647{
2648 /* We check for lp->waitstatus in addition to lp->status, because we can
2649 have pending process exits recorded in lp->status
2650 and W_EXITCODE(0,0) == 0. We should probably have an additional
2651 lp->status_p flag. */
2652
2653 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2654 && linux_nat_status_is_event (lp->status));
2655}
2656
26ab7092
JK
2657/* Set alternative SIGTRAP-like events recognizer. If
2658 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2659 applied. */
2660
2661void
2662linux_nat_set_status_is_event (struct target_ops *t,
2663 int (*status_is_event) (int status))
2664{
2665 linux_nat_status_is_event = status_is_event;
2666}
2667
57380f4e
DJ
2668/* Wait until LP is stopped. */
2669
2670static int
2671stop_wait_callback (struct lwp_info *lp, void *data)
2672{
6c95b8df
PA
2673 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2674
2675 /* If this is a vfork parent, bail out, it is not going to report
2676 any SIGSTOP until the vfork is done with. */
2677 if (inf->vfork_child != NULL)
2678 return 0;
2679
d6b0e80f
AC
2680 if (!lp->stopped)
2681 {
2682 int status;
2683
2684 status = wait_lwp (lp);
2685 if (status == 0)
2686 return 0;
2687
57380f4e
DJ
2688 if (lp->ignore_sigint && WIFSTOPPED (status)
2689 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2690 {
57380f4e 2691 lp->ignore_sigint = 0;
d6b0e80f
AC
2692
2693 errno = 0;
2694 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2695 if (debug_linux_nat)
2696 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2697 "PTRACE_CONT %s, 0, 0 (%s) "
2698 "(discarding SIGINT)\n",
d6b0e80f
AC
2699 target_pid_to_str (lp->ptid),
2700 errno ? safe_strerror (errno) : "OK");
2701
57380f4e 2702 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2703 }
2704
57380f4e
DJ
2705 maybe_clear_ignore_sigint (lp);
2706
d6b0e80f
AC
2707 if (WSTOPSIG (status) != SIGSTOP)
2708 {
26ab7092 2709 if (linux_nat_status_is_event (status))
d6b0e80f
AC
2710 {
2711 /* If a LWP other than the LWP that we're reporting an
2712 event for has hit a GDB breakpoint (as opposed to
2713 some random trap signal), then just arrange for it to
2714 hit it again later. We don't keep the SIGTRAP status
2715 and don't forward the SIGTRAP signal to the LWP. We
2716 will handle the current event, eventually we will
2717 resume all LWPs, and this one will get its breakpoint
2718 trap again.
2719
2720 If we do not do this, then we run the risk that the
2721 user will delete or disable the breakpoint, but the
2722 thread will have already tripped on it. */
2723
9f0bdab8
DJ
2724 /* Save the trap's siginfo in case we need it later. */
2725 save_siginfo (lp);
2726
ebec9a0f
PA
2727 save_sigtrap (lp);
2728
1777feb0 2729 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2730 errno = 0;
2731 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2732 if (debug_linux_nat)
2733 {
2734 fprintf_unfiltered (gdb_stdlog,
2735 "PTRACE_CONT %s, 0, 0 (%s)\n",
2736 target_pid_to_str (lp->ptid),
2737 errno ? safe_strerror (errno) : "OK");
2738
2739 fprintf_unfiltered (gdb_stdlog,
2740 "SWC: Candidate SIGTRAP event in %s\n",
2741 target_pid_to_str (lp->ptid));
2742 }
710151dd 2743 /* Hold this event/waitstatus while we check to see if
1777feb0 2744 there are any more (we still want to get that SIGSTOP). */
57380f4e 2745 stop_wait_callback (lp, NULL);
710151dd 2746
7feb7d06
PA
2747 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2748 there's another event, throw it back into the
1777feb0 2749 queue. */
7feb7d06 2750 if (lp->status)
710151dd 2751 {
7feb7d06
PA
2752 if (debug_linux_nat)
2753 fprintf_unfiltered (gdb_stdlog,
2754 "SWC: kill %s, %s\n",
2755 target_pid_to_str (lp->ptid),
2756 status_to_str ((int) status));
2757 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2758 }
7feb7d06 2759
1777feb0 2760 /* Save the sigtrap event. */
7feb7d06 2761 lp->status = status;
d6b0e80f
AC
2762 return 0;
2763 }
2764 else
2765 {
2766 /* The thread was stopped with a signal other than
1777feb0 2767 SIGSTOP, and didn't accidentally trip a breakpoint. */
d6b0e80f
AC
2768
2769 if (debug_linux_nat)
2770 {
2771 fprintf_unfiltered (gdb_stdlog,
2772 "SWC: Pending event %s in %s\n",
2773 status_to_str ((int) status),
2774 target_pid_to_str (lp->ptid));
2775 }
1777feb0 2776 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2777 errno = 0;
2778 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2779 if (debug_linux_nat)
2780 fprintf_unfiltered (gdb_stdlog,
2781 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2782 target_pid_to_str (lp->ptid),
2783 errno ? safe_strerror (errno) : "OK");
2784
2785 /* Hold this event/waitstatus while we check to see if
1777feb0 2786 there are any more (we still want to get that SIGSTOP). */
57380f4e 2787 stop_wait_callback (lp, NULL);
710151dd
PA
2788
2789 /* If the lp->status field is still empty, use it to
2790 hold this event. If not, then this event must be
2791 returned to the event queue of the LWP. */
7feb7d06 2792 if (lp->status)
d6b0e80f
AC
2793 {
2794 if (debug_linux_nat)
2795 {
2796 fprintf_unfiltered (gdb_stdlog,
2797 "SWC: kill %s, %s\n",
2798 target_pid_to_str (lp->ptid),
2799 status_to_str ((int) status));
2800 }
2801 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2802 }
710151dd
PA
2803 else
2804 lp->status = status;
d6b0e80f
AC
2805 return 0;
2806 }
2807 }
2808 else
2809 {
2810 /* We caught the SIGSTOP that we intended to catch, so
2811 there's no SIGSTOP pending. */
2812 lp->stopped = 1;
2813 lp->signalled = 0;
2814 }
2815 }
2816
2817 return 0;
2818}
2819
d6b0e80f
AC
2820/* Return non-zero if LP has a wait status pending. */
2821
2822static int
2823status_callback (struct lwp_info *lp, void *data)
2824{
2825 /* Only report a pending wait status if we pretend that this has
2826 indeed been resumed. */
ca2163eb
PA
2827 if (!lp->resumed)
2828 return 0;
2829
2830 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2831 {
2832 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2833 or a a pending process exit. Note that `W_EXITCODE(0,0) ==
2834 0', so a clean process exit can not be stored pending in
2835 lp->status, it is indistinguishable from
2836 no-pending-status. */
2837 return 1;
2838 }
2839
2840 if (lp->status != 0)
2841 return 1;
2842
2843 return 0;
d6b0e80f
AC
2844}
2845
2846/* Return non-zero if LP isn't stopped. */
2847
2848static int
2849running_callback (struct lwp_info *lp, void *data)
2850{
2851 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2852}
2853
2854/* Count the LWP's that have had events. */
2855
2856static int
2857count_events_callback (struct lwp_info *lp, void *data)
2858{
2859 int *count = data;
2860
2861 gdb_assert (count != NULL);
2862
e09490f1 2863 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2864 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2865 (*count)++;
2866
2867 return 0;
2868}
2869
2870/* Select the LWP (if any) that is currently being single-stepped. */
2871
2872static int
2873select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2874{
2875 if (lp->step && lp->status != 0)
2876 return 1;
2877 else
2878 return 0;
2879}
2880
2881/* Select the Nth LWP that has had a SIGTRAP event. */
2882
2883static int
2884select_event_lwp_callback (struct lwp_info *lp, void *data)
2885{
2886 int *selector = data;
2887
2888 gdb_assert (selector != NULL);
2889
1777feb0 2890 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2891 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2892 if ((*selector)-- == 0)
2893 return 1;
2894
2895 return 0;
2896}
2897
710151dd
PA
2898static int
2899cancel_breakpoint (struct lwp_info *lp)
2900{
2901 /* Arrange for a breakpoint to be hit again later. We don't keep
2902 the SIGTRAP status and don't forward the SIGTRAP signal to the
2903 LWP. We will handle the current event, eventually we will resume
2904 this LWP, and this breakpoint will trap again.
2905
2906 If we do not do this, then we run the risk that the user will
2907 delete or disable the breakpoint, but the LWP will have already
2908 tripped on it. */
2909
515630c5
UW
2910 struct regcache *regcache = get_thread_regcache (lp->ptid);
2911 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2912 CORE_ADDR pc;
2913
2914 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 2915 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
2916 {
2917 if (debug_linux_nat)
2918 fprintf_unfiltered (gdb_stdlog,
2919 "CB: Push back breakpoint for %s\n",
2920 target_pid_to_str (lp->ptid));
2921
2922 /* Back up the PC if necessary. */
515630c5
UW
2923 if (gdbarch_decr_pc_after_break (gdbarch))
2924 regcache_write_pc (regcache, pc);
2925
710151dd
PA
2926 return 1;
2927 }
2928 return 0;
2929}
2930
d6b0e80f
AC
2931static int
2932cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2933{
2934 struct lwp_info *event_lp = data;
2935
2936 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2937 if (lp == event_lp)
2938 return 0;
2939
2940 /* If a LWP other than the LWP that we're reporting an event for has
2941 hit a GDB breakpoint (as opposed to some random trap signal),
2942 then just arrange for it to hit it again later. We don't keep
2943 the SIGTRAP status and don't forward the SIGTRAP signal to the
2944 LWP. We will handle the current event, eventually we will resume
2945 all LWPs, and this one will get its breakpoint trap again.
2946
2947 If we do not do this, then we run the risk that the user will
2948 delete or disable the breakpoint, but the LWP will have already
2949 tripped on it. */
2950
00390b84 2951 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
2952 && cancel_breakpoint (lp))
2953 /* Throw away the SIGTRAP. */
2954 lp->status = 0;
d6b0e80f
AC
2955
2956 return 0;
2957}
2958
2959/* Select one LWP out of those that have events pending. */
2960
2961static void
d90e17a7 2962select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2963{
2964 int num_events = 0;
2965 int random_selector;
2966 struct lwp_info *event_lp;
2967
ac264b3b 2968 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2969 (*orig_lp)->status = *status;
2970
2971 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
2972 event_lp = iterate_over_lwps (filter,
2973 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
2974 if (event_lp != NULL)
2975 {
2976 if (debug_linux_nat)
2977 fprintf_unfiltered (gdb_stdlog,
2978 "SEL: Select single-step %s\n",
2979 target_pid_to_str (event_lp->ptid));
2980 }
2981 else
2982 {
2983 /* No single-stepping LWP. Select one at random, out of those
2984 which have had SIGTRAP events. */
2985
2986 /* First see how many SIGTRAP events we have. */
d90e17a7 2987 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
2988
2989 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2990 random_selector = (int)
2991 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2992
2993 if (debug_linux_nat && num_events > 1)
2994 fprintf_unfiltered (gdb_stdlog,
2995 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2996 num_events, random_selector);
2997
d90e17a7
PA
2998 event_lp = iterate_over_lwps (filter,
2999 select_event_lwp_callback,
d6b0e80f
AC
3000 &random_selector);
3001 }
3002
3003 if (event_lp != NULL)
3004 {
3005 /* Switch the event LWP. */
3006 *orig_lp = event_lp;
3007 *status = event_lp->status;
3008 }
3009
3010 /* Flush the wait status for the event LWP. */
3011 (*orig_lp)->status = 0;
3012}
3013
3014/* Return non-zero if LP has been resumed. */
3015
3016static int
3017resumed_callback (struct lwp_info *lp, void *data)
3018{
3019 return lp->resumed;
3020}
3021
d6b0e80f
AC
3022/* Stop an active thread, verify it still exists, then resume it. */
3023
3024static int
3025stop_and_resume_callback (struct lwp_info *lp, void *data)
3026{
3027 struct lwp_info *ptr;
3028
3029 if (!lp->stopped && !lp->signalled)
3030 {
3031 stop_callback (lp, NULL);
3032 stop_wait_callback (lp, NULL);
3033 /* Resume if the lwp still exists. */
3034 for (ptr = lwp_list; ptr; ptr = ptr->next)
3035 if (lp == ptr)
3036 {
3037 resume_callback (lp, NULL);
3038 resume_set_callback (lp, NULL);
3039 }
3040 }
3041 return 0;
3042}
3043
02f3fc28 3044/* Check if we should go on and pass this event to common code.
fa2c6a57 3045 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
3046static struct lwp_info *
3047linux_nat_filter_event (int lwpid, int status, int options)
3048{
3049 struct lwp_info *lp;
3050
3051 lp = find_lwp_pid (pid_to_ptid (lwpid));
3052
3053 /* Check for stop events reported by a process we didn't already
3054 know about - anything not already in our LWP list.
3055
3056 If we're expecting to receive stopped processes after
3057 fork, vfork, and clone events, then we'll just add the
3058 new one to our list and go back to waiting for the event
3059 to be reported - the stopped process might be returned
3060 from waitpid before or after the event is. */
3061 if (WIFSTOPPED (status) && !lp)
3062 {
3063 linux_record_stopped_pid (lwpid, status);
3064 return NULL;
3065 }
3066
3067 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3068 our list, i.e. not part of the current process. This can happen
02f3fc28
PA
3069 if we detach from a program we original forked and then it
3070 exits. */
3071 if (!WIFSTOPPED (status) && !lp)
3072 return NULL;
3073
3074 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
3075 CLONE_PTRACE processes which do not use the thread library -
3076 otherwise we wouldn't find the new LWP this way. That doesn't
3077 currently work, and the following code is currently unreachable
3078 due to the two blocks above. If it's fixed some day, this code
3079 should be broken out into a function so that we can also pick up
3080 LWPs from the new interface. */
3081 if (!lp)
3082 {
3083 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
3084 if (options & __WCLONE)
3085 lp->cloned = 1;
3086
3087 gdb_assert (WIFSTOPPED (status)
3088 && WSTOPSIG (status) == SIGSTOP);
3089 lp->signalled = 1;
3090
3091 if (!in_thread_list (inferior_ptid))
3092 {
3093 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
3094 GET_PID (inferior_ptid));
3095 add_thread (inferior_ptid);
3096 }
3097
3098 add_thread (lp->ptid);
3099 }
3100
ca2163eb
PA
3101 /* Handle GNU/Linux's syscall SIGTRAPs. */
3102 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3103 {
3104 /* No longer need the sysgood bit. The ptrace event ends up
3105 recorded in lp->waitstatus if we care for it. We can carry
3106 on handling the event like a regular SIGTRAP from here
3107 on. */
3108 status = W_STOPCODE (SIGTRAP);
3109 if (linux_handle_syscall_trap (lp, 0))
3110 return NULL;
3111 }
02f3fc28 3112
ca2163eb
PA
3113 /* Handle GNU/Linux's extended waitstatus for trace events. */
3114 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3115 {
3116 if (debug_linux_nat)
3117 fprintf_unfiltered (gdb_stdlog,
3118 "LLW: Handling extended status 0x%06x\n",
3119 status);
3120 if (linux_handle_extended_wait (lp, status, 0))
3121 return NULL;
3122 }
3123
26ab7092 3124 if (linux_nat_status_is_event (status))
ebec9a0f
PA
3125 {
3126 /* Save the trap's siginfo in case we need it later. */
3127 save_siginfo (lp);
3128
3129 save_sigtrap (lp);
3130 }
ca2163eb 3131
02f3fc28 3132 /* Check if the thread has exited. */
d90e17a7
PA
3133 if ((WIFEXITED (status) || WIFSIGNALED (status))
3134 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3135 {
9db03742
JB
3136 /* If this is the main thread, we must stop all threads and verify
3137 if they are still alive. This is because in the nptl thread model
3138 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3139 other than the main thread. We only get the main thread exit
3140 signal once all child threads have already exited. If we
3141 stop all the threads and use the stop_wait_callback to check
3142 if they have exited we can determine whether this signal
3143 should be ignored or whether it means the end of the debugged
3144 application, regardless of which threading model is being
5d3b6af6 3145 used. */
02f3fc28
PA
3146 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3147 {
3148 lp->stopped = 1;
d90e17a7
PA
3149 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3150 stop_and_resume_callback, NULL);
02f3fc28
PA
3151 }
3152
3153 if (debug_linux_nat)
3154 fprintf_unfiltered (gdb_stdlog,
3155 "LLW: %s exited.\n",
3156 target_pid_to_str (lp->ptid));
3157
d90e17a7 3158 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3159 {
3160 /* If there is at least one more LWP, then the exit signal
3161 was not the end of the debugged application and should be
3162 ignored. */
3163 exit_lwp (lp);
3164 return NULL;
3165 }
02f3fc28
PA
3166 }
3167
3168 /* Check if the current LWP has previously exited. In the nptl
3169 thread model, LWPs other than the main thread do not issue
3170 signals when they exit so we must check whenever the thread has
3171 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3172 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3173 {
d90e17a7
PA
3174 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3175
02f3fc28
PA
3176 if (debug_linux_nat)
3177 fprintf_unfiltered (gdb_stdlog,
3178 "LLW: %s exited.\n",
3179 target_pid_to_str (lp->ptid));
3180
3181 exit_lwp (lp);
3182
3183 /* Make sure there is at least one thread running. */
d90e17a7 3184 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3185
3186 /* Discard the event. */
3187 return NULL;
3188 }
3189
3190 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3191 an attempt to stop an LWP. */
3192 if (lp->signalled
3193 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3194 {
3195 if (debug_linux_nat)
3196 fprintf_unfiltered (gdb_stdlog,
3197 "LLW: Delayed SIGSTOP caught for %s.\n",
3198 target_pid_to_str (lp->ptid));
3199
3200 /* This is a delayed SIGSTOP. */
3201 lp->signalled = 0;
3202
3203 registers_changed ();
3204
28439f5e 3205 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
02f3fc28
PA
3206 lp->step, TARGET_SIGNAL_0);
3207 if (debug_linux_nat)
3208 fprintf_unfiltered (gdb_stdlog,
3209 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3210 lp->step ?
3211 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3212 target_pid_to_str (lp->ptid));
3213
3214 lp->stopped = 0;
3215 gdb_assert (lp->resumed);
3216
3217 /* Discard the event. */
3218 return NULL;
3219 }
3220
57380f4e
DJ
3221 /* Make sure we don't report a SIGINT that we have already displayed
3222 for another thread. */
3223 if (lp->ignore_sigint
3224 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3225 {
3226 if (debug_linux_nat)
3227 fprintf_unfiltered (gdb_stdlog,
3228 "LLW: Delayed SIGINT caught for %s.\n",
3229 target_pid_to_str (lp->ptid));
3230
3231 /* This is a delayed SIGINT. */
3232 lp->ignore_sigint = 0;
3233
3234 registers_changed ();
28439f5e 3235 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
57380f4e
DJ
3236 lp->step, TARGET_SIGNAL_0);
3237 if (debug_linux_nat)
3238 fprintf_unfiltered (gdb_stdlog,
3239 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3240 lp->step ?
3241 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3242 target_pid_to_str (lp->ptid));
3243
3244 lp->stopped = 0;
3245 gdb_assert (lp->resumed);
3246
3247 /* Discard the event. */
3248 return NULL;
3249 }
3250
02f3fc28
PA
3251 /* An interesting event. */
3252 gdb_assert (lp);
ca2163eb 3253 lp->status = status;
02f3fc28
PA
3254 return lp;
3255}
3256
d6b0e80f 3257static ptid_t
7feb7d06 3258linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3259 ptid_t ptid, struct target_waitstatus *ourstatus,
3260 int target_options)
d6b0e80f 3261{
7feb7d06 3262 static sigset_t prev_mask;
d6b0e80f
AC
3263 struct lwp_info *lp = NULL;
3264 int options = 0;
3265 int status = 0;
d90e17a7 3266 pid_t pid;
d6b0e80f 3267
b84876c2
PA
3268 if (debug_linux_nat_async)
3269 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3270
f973ed9c
DJ
3271 /* The first time we get here after starting a new inferior, we may
3272 not have added it to the LWP list yet - this is the earliest
3273 moment at which we know its PID. */
d90e17a7 3274 if (ptid_is_pid (inferior_ptid))
f973ed9c 3275 {
27c9d204
PA
3276 /* Upgrade the main thread's ptid. */
3277 thread_change_ptid (inferior_ptid,
3278 BUILD_LWP (GET_PID (inferior_ptid),
3279 GET_PID (inferior_ptid)));
3280
f973ed9c
DJ
3281 lp = add_lwp (inferior_ptid);
3282 lp->resumed = 1;
3283 }
3284
7feb7d06
PA
3285 /* Make sure SIGCHLD is blocked. */
3286 block_child_signals (&prev_mask);
d6b0e80f 3287
d90e17a7
PA
3288 if (ptid_equal (ptid, minus_one_ptid))
3289 pid = -1;
3290 else if (ptid_is_pid (ptid))
3291 /* A request to wait for a specific tgid. This is not possible
3292 with waitpid, so instead, we wait for any child, and leave
3293 children we're not interested in right now with a pending
3294 status to report later. */
3295 pid = -1;
3296 else
3297 pid = GET_LWP (ptid);
3298
d6b0e80f 3299retry:
d90e17a7
PA
3300 lp = NULL;
3301 status = 0;
d6b0e80f 3302
e3e9f5a2
PA
3303 /* Make sure that of those LWPs we want to get an event from, there
3304 is at least one LWP that has been resumed. If there's none, just
3305 bail out. The core may just be flushing asynchronously all
3306 events. */
3307 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3308 {
3309 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3310
3311 if (debug_linux_nat_async)
3312 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3313
3314 restore_child_signals_mask (&prev_mask);
3315 return minus_one_ptid;
3316 }
d6b0e80f
AC
3317
3318 /* First check if there is a LWP with a wait status pending. */
3319 if (pid == -1)
3320 {
3321 /* Any LWP that's been resumed will do. */
d90e17a7 3322 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3323 if (lp)
3324 {
ca2163eb 3325 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3326 fprintf_unfiltered (gdb_stdlog,
3327 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3328 status_to_str (lp->status),
d6b0e80f
AC
3329 target_pid_to_str (lp->ptid));
3330 }
3331
b84876c2 3332 /* But if we don't find one, we'll have to wait, and check both
7feb7d06
PA
3333 cloned and uncloned processes. We start with the cloned
3334 processes. */
d6b0e80f
AC
3335 options = __WCLONE | WNOHANG;
3336 }
3337 else if (is_lwp (ptid))
3338 {
3339 if (debug_linux_nat)
3340 fprintf_unfiltered (gdb_stdlog,
3341 "LLW: Waiting for specific LWP %s.\n",
3342 target_pid_to_str (ptid));
3343
3344 /* We have a specific LWP to check. */
3345 lp = find_lwp_pid (ptid);
3346 gdb_assert (lp);
d6b0e80f 3347
ca2163eb 3348 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3349 fprintf_unfiltered (gdb_stdlog,
3350 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3351 status_to_str (lp->status),
d6b0e80f
AC
3352 target_pid_to_str (lp->ptid));
3353
3354 /* If we have to wait, take into account whether PID is a cloned
3355 process or not. And we have to convert it to something that
3356 the layer beneath us can understand. */
3357 options = lp->cloned ? __WCLONE : 0;
3358 pid = GET_LWP (ptid);
d90e17a7
PA
3359
3360 /* We check for lp->waitstatus in addition to lp->status,
3361 because we can have pending process exits recorded in
3362 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3363 an additional lp->status_p flag. */
ca2163eb 3364 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3365 lp = NULL;
d6b0e80f
AC
3366 }
3367
d90e17a7 3368 if (lp && lp->signalled)
d6b0e80f
AC
3369 {
3370 /* A pending SIGSTOP may interfere with the normal stream of
3371 events. In a typical case where interference is a problem,
3372 we have a SIGSTOP signal pending for LWP A while
3373 single-stepping it, encounter an event in LWP B, and take the
3374 pending SIGSTOP while trying to stop LWP A. After processing
3375 the event in LWP B, LWP A is continued, and we'll never see
3376 the SIGTRAP associated with the last time we were
3377 single-stepping LWP A. */
3378
3379 /* Resume the thread. It should halt immediately returning the
3380 pending SIGSTOP. */
3381 registers_changed ();
28439f5e 3382 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3383 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
3384 if (debug_linux_nat)
3385 fprintf_unfiltered (gdb_stdlog,
3386 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3387 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3388 target_pid_to_str (lp->ptid));
3389 lp->stopped = 0;
3390 gdb_assert (lp->resumed);
3391
ca2163eb
PA
3392 /* Catch the pending SIGSTOP. */
3393 status = lp->status;
3394 lp->status = 0;
3395
d6b0e80f 3396 stop_wait_callback (lp, NULL);
ca2163eb
PA
3397
3398 /* If the lp->status field isn't empty, we caught another signal
3399 while flushing the SIGSTOP. Return it back to the event
3400 queue of the LWP, as we already have an event to handle. */
3401 if (lp->status)
3402 {
3403 if (debug_linux_nat)
3404 fprintf_unfiltered (gdb_stdlog,
3405 "LLW: kill %s, %s\n",
3406 target_pid_to_str (lp->ptid),
3407 status_to_str (lp->status));
3408 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3409 }
3410
3411 lp->status = status;
d6b0e80f
AC
3412 }
3413
b84876c2
PA
3414 if (!target_can_async_p ())
3415 {
3416 /* Causes SIGINT to be passed on to the attached process. */
3417 set_sigint_trap ();
b84876c2 3418 }
d6b0e80f 3419
47608cb1
PA
3420 /* Translate generic target_wait options into waitpid options. */
3421 if (target_options & TARGET_WNOHANG)
3422 options |= WNOHANG;
7feb7d06 3423
d90e17a7 3424 while (lp == NULL)
d6b0e80f
AC
3425 {
3426 pid_t lwpid;
3427
7feb7d06 3428 lwpid = my_waitpid (pid, &status, options);
b84876c2 3429
d6b0e80f
AC
3430 if (lwpid > 0)
3431 {
3432 gdb_assert (pid == -1 || lwpid == pid);
3433
3434 if (debug_linux_nat)
3435 {
3436 fprintf_unfiltered (gdb_stdlog,
3437 "LLW: waitpid %ld received %s\n",
3438 (long) lwpid, status_to_str (status));
3439 }
3440
02f3fc28 3441 lp = linux_nat_filter_event (lwpid, status, options);
d90e17a7 3442
33355866
JK
3443 /* STATUS is now no longer valid, use LP->STATUS instead. */
3444 status = 0;
3445
d90e17a7
PA
3446 if (lp
3447 && ptid_is_pid (ptid)
3448 && ptid_get_pid (lp->ptid) != ptid_get_pid (ptid))
d6b0e80f 3449 {
e3e9f5a2
PA
3450 gdb_assert (lp->resumed);
3451
d90e17a7 3452 if (debug_linux_nat)
3e43a32a
MS
3453 fprintf (stderr,
3454 "LWP %ld got an event %06x, leaving pending.\n",
33355866 3455 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3456
ca2163eb 3457 if (WIFSTOPPED (lp->status))
d90e17a7 3458 {
ca2163eb 3459 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3460 {
e3e9f5a2
PA
3461 /* Cancel breakpoint hits. The breakpoint may
3462 be removed before we fetch events from this
3463 process to report to the core. It is best
3464 not to assume the moribund breakpoints
3465 heuristic always handles these cases --- it
3466 could be too many events go through to the
3467 core before this one is handled. All-stop
3468 always cancels breakpoint hits in all
3469 threads. */
3470 if (non_stop
00390b84 3471 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3472 && cancel_breakpoint (lp))
3473 {
3474 /* Throw away the SIGTRAP. */
3475 lp->status = 0;
3476
3477 if (debug_linux_nat)
3478 fprintf (stderr,
3e43a32a
MS
3479 "LLW: LWP %ld hit a breakpoint while"
3480 " waiting for another process;"
3481 " cancelled it\n",
e3e9f5a2
PA
3482 ptid_get_lwp (lp->ptid));
3483 }
3484 lp->stopped = 1;
d90e17a7
PA
3485 }
3486 else
3487 {
3488 lp->stopped = 1;
3489 lp->signalled = 0;
3490 }
3491 }
33355866 3492 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3493 {
3494 if (debug_linux_nat)
3e43a32a
MS
3495 fprintf (stderr,
3496 "Process %ld exited while stopping LWPs\n",
d90e17a7
PA
3497 ptid_get_lwp (lp->ptid));
3498
3499 /* This was the last lwp in the process. Since
3500 events are serialized to GDB core, and we can't
3501 report this one right now, but GDB core and the
3502 other target layers will want to be notified
3503 about the exit code/signal, leave the status
3504 pending for the next time we're able to report
3505 it. */
d90e17a7
PA
3506
3507 /* Prevent trying to stop this thread again. We'll
3508 never try to resume it because it has a pending
3509 status. */
3510 lp->stopped = 1;
3511
3512 /* Dead LWP's aren't expected to reported a pending
3513 sigstop. */
3514 lp->signalled = 0;
3515
3516 /* Store the pending event in the waitstatus as
3517 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3518 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3519 }
3520
3521 /* Keep looking. */
3522 lp = NULL;
d6b0e80f
AC
3523 continue;
3524 }
3525
d90e17a7
PA
3526 if (lp)
3527 break;
3528 else
3529 {
3530 if (pid == -1)
3531 {
3532 /* waitpid did return something. Restart over. */
3533 options |= __WCLONE;
3534 }
3535 continue;
3536 }
d6b0e80f
AC
3537 }
3538
3539 if (pid == -1)
3540 {
3541 /* Alternate between checking cloned and uncloned processes. */
3542 options ^= __WCLONE;
3543
b84876c2
PA
3544 /* And every time we have checked both:
3545 In async mode, return to event loop;
3546 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 3547 if (options & __WCLONE)
b84876c2 3548 {
47608cb1 3549 if (target_options & TARGET_WNOHANG)
b84876c2
PA
3550 {
3551 /* No interesting event. */
3552 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3553
b84876c2
PA
3554 if (debug_linux_nat_async)
3555 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3556
7feb7d06 3557 restore_child_signals_mask (&prev_mask);
b84876c2
PA
3558 return minus_one_ptid;
3559 }
3560
3561 sigsuspend (&suspend_mask);
3562 }
d6b0e80f 3563 }
28736962
PA
3564 else if (target_options & TARGET_WNOHANG)
3565 {
3566 /* No interesting event for PID yet. */
3567 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3568
3569 if (debug_linux_nat_async)
3570 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3571
3572 restore_child_signals_mask (&prev_mask);
3573 return minus_one_ptid;
3574 }
d6b0e80f
AC
3575
3576 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3577 gdb_assert (lp == NULL);
d6b0e80f
AC
3578 }
3579
b84876c2 3580 if (!target_can_async_p ())
d26b5354 3581 clear_sigint_trap ();
d6b0e80f
AC
3582
3583 gdb_assert (lp);
3584
ca2163eb
PA
3585 status = lp->status;
3586 lp->status = 0;
3587
d6b0e80f
AC
3588 /* Don't report signals that GDB isn't interested in, such as
3589 signals that are neither printed nor stopped upon. Stopping all
3590 threads can be a bit time-consuming so if we want decent
3591 performance with heavily multi-threaded programs, especially when
3592 they're using a high frequency timer, we'd better avoid it if we
3593 can. */
3594
3595 if (WIFSTOPPED (status))
3596 {
423ec54c 3597 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
d6b48e9c
PA
3598 struct inferior *inf;
3599
3600 inf = find_inferior_pid (ptid_get_pid (lp->ptid));
3601 gdb_assert (inf);
d6b0e80f 3602
d6b48e9c
PA
3603 /* Defer to common code if we get a signal while
3604 single-stepping, since that may need special care, e.g. to
3605 skip the signal handler, or, if we're gaining control of the
3606 inferior. */
d539ed7e 3607 if (!lp->step
16c381f0 3608 && inf->control.stop_soon == NO_STOP_QUIETLY
d539ed7e 3609 && signal_stop_state (signo) == 0
d6b0e80f
AC
3610 && signal_print_state (signo) == 0
3611 && signal_pass_state (signo) == 1)
3612 {
3613 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3614 here? It is not clear we should. GDB may not expect
3615 other threads to run. On the other hand, not resuming
3616 newly attached threads may cause an unwanted delay in
3617 getting them running. */
3618 registers_changed ();
28439f5e 3619 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3620 lp->step, signo);
d6b0e80f
AC
3621 if (debug_linux_nat)
3622 fprintf_unfiltered (gdb_stdlog,
3623 "LLW: %s %s, %s (preempt 'handle')\n",
3624 lp->step ?
3625 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3626 target_pid_to_str (lp->ptid),
423ec54c
JK
3627 (signo != TARGET_SIGNAL_0
3628 ? strsignal (target_signal_to_host (signo))
3629 : "0"));
d6b0e80f 3630 lp->stopped = 0;
d6b0e80f
AC
3631 goto retry;
3632 }
3633
1ad15515 3634 if (!non_stop)
d6b0e80f 3635 {
1ad15515
PA
3636 /* Only do the below in all-stop, as we currently use SIGINT
3637 to implement target_stop (see linux_nat_stop) in
3638 non-stop. */
3639 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3640 {
3641 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3642 forwarded to the entire process group, that is, all LWPs
3643 will receive it - unless they're using CLONE_THREAD to
3644 share signals. Since we only want to report it once, we
3645 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3646 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3647 set_ignore_sigint, NULL);
1ad15515
PA
3648 lp->ignore_sigint = 0;
3649 }
3650 else
3651 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3652 }
3653 }
3654
3655 /* This LWP is stopped now. */
3656 lp->stopped = 1;
3657
3658 if (debug_linux_nat)
3659 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3660 status_to_str (status), target_pid_to_str (lp->ptid));
3661
4c28f408
PA
3662 if (!non_stop)
3663 {
3664 /* Now stop all other LWP's ... */
d90e17a7 3665 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3666
3667 /* ... and wait until all of them have reported back that
3668 they're no longer running. */
d90e17a7 3669 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3670
3671 /* If we're not waiting for a specific LWP, choose an event LWP
3672 from among those that have had events. Giving equal priority
3673 to all LWPs that have had events helps prevent
3674 starvation. */
3675 if (pid == -1)
d90e17a7 3676 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3677
e3e9f5a2
PA
3678 /* Now that we've selected our final event LWP, cancel any
3679 breakpoints in other LWPs that have hit a GDB breakpoint.
3680 See the comment in cancel_breakpoints_callback to find out
3681 why. */
3682 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3683
3684 /* In all-stop, from the core's perspective, all LWPs are now
3685 stopped until a new resume action is sent over. */
3686 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3687 }
3688 else
3689 lp->resumed = 0;
d6b0e80f 3690
26ab7092 3691 if (linux_nat_status_is_event (status))
d6b0e80f 3692 {
d6b0e80f
AC
3693 if (debug_linux_nat)
3694 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3695 "LLW: trap ptid is %s.\n",
3696 target_pid_to_str (lp->ptid));
d6b0e80f 3697 }
d6b0e80f
AC
3698
3699 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3700 {
3701 *ourstatus = lp->waitstatus;
3702 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3703 }
3704 else
3705 store_waitstatus (ourstatus, status);
3706
b84876c2
PA
3707 if (debug_linux_nat_async)
3708 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3709
7feb7d06 3710 restore_child_signals_mask (&prev_mask);
1e225492
JK
3711
3712 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3713 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3714 lp->core = -1;
3715 else
3716 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3717
f973ed9c 3718 return lp->ptid;
d6b0e80f
AC
3719}
3720
e3e9f5a2
PA
3721/* Resume LWPs that are currently stopped without any pending status
3722 to report, but are resumed from the core's perspective. */
3723
3724static int
3725resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3726{
3727 ptid_t *wait_ptid_p = data;
3728
3729 if (lp->stopped
3730 && lp->resumed
3731 && lp->status == 0
3732 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3733 {
3734 gdb_assert (is_executing (lp->ptid));
3735
3736 /* Don't bother if there's a breakpoint at PC that we'd hit
3737 immediately, and we're not waiting for this LWP. */
3738 if (!ptid_match (lp->ptid, *wait_ptid_p))
3739 {
3740 struct regcache *regcache = get_thread_regcache (lp->ptid);
3741 CORE_ADDR pc = regcache_read_pc (regcache);
3742
3743 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3744 return 0;
3745 }
3746
3747 if (debug_linux_nat)
3748 fprintf_unfiltered (gdb_stdlog,
3749 "RSRL: resuming stopped-resumed LWP %s\n",
3750 target_pid_to_str (lp->ptid));
3751
3752 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3753 lp->step, TARGET_SIGNAL_0);
3754 lp->stopped = 0;
3755 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3756 lp->stopped_by_watchpoint = 0;
3757 }
3758
3759 return 0;
3760}
3761
7feb7d06
PA
3762static ptid_t
3763linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3764 ptid_t ptid, struct target_waitstatus *ourstatus,
3765 int target_options)
7feb7d06
PA
3766{
3767 ptid_t event_ptid;
3768
3769 if (debug_linux_nat)
3e43a32a
MS
3770 fprintf_unfiltered (gdb_stdlog,
3771 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
7feb7d06
PA
3772
3773 /* Flush the async file first. */
3774 if (target_can_async_p ())
3775 async_file_flush ();
3776
e3e9f5a2
PA
3777 /* Resume LWPs that are currently stopped without any pending status
3778 to report, but are resumed from the core's perspective. LWPs get
3779 in this state if we find them stopping at a time we're not
3780 interested in reporting the event (target_wait on a
3781 specific_process, for example, see linux_nat_wait_1), and
3782 meanwhile the event became uninteresting. Don't bother resuming
3783 LWPs we're not going to wait for if they'd stop immediately. */
3784 if (non_stop)
3785 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3786
47608cb1 3787 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3788
3789 /* If we requested any event, and something came out, assume there
3790 may be more. If we requested a specific lwp or process, also
3791 assume there may be more. */
3792 if (target_can_async_p ()
3793 && (ourstatus->kind != TARGET_WAITKIND_IGNORE
3794 || !ptid_equal (ptid, minus_one_ptid)))
3795 async_file_mark ();
3796
3797 /* Get ready for the next event. */
3798 if (target_can_async_p ())
3799 target_async (inferior_event_handler, 0);
3800
3801 return event_ptid;
3802}
3803
d6b0e80f
AC
3804static int
3805kill_callback (struct lwp_info *lp, void *data)
3806{
3807 errno = 0;
3808 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3809 if (debug_linux_nat)
3810 fprintf_unfiltered (gdb_stdlog,
3811 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3812 target_pid_to_str (lp->ptid),
3813 errno ? safe_strerror (errno) : "OK");
3814
3815 return 0;
3816}
3817
3818static int
3819kill_wait_callback (struct lwp_info *lp, void *data)
3820{
3821 pid_t pid;
3822
3823 /* We must make sure that there are no pending events (delayed
3824 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3825 program doesn't interfere with any following debugging session. */
3826
3827 /* For cloned processes we must check both with __WCLONE and
3828 without, since the exit status of a cloned process isn't reported
3829 with __WCLONE. */
3830 if (lp->cloned)
3831 {
3832 do
3833 {
58aecb61 3834 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 3835 if (pid != (pid_t) -1)
d6b0e80f 3836 {
e85a822c
DJ
3837 if (debug_linux_nat)
3838 fprintf_unfiltered (gdb_stdlog,
3839 "KWC: wait %s received unknown.\n",
3840 target_pid_to_str (lp->ptid));
3841 /* The Linux kernel sometimes fails to kill a thread
3842 completely after PTRACE_KILL; that goes from the stop
3843 point in do_fork out to the one in
3844 get_signal_to_deliever and waits again. So kill it
3845 again. */
3846 kill_callback (lp, NULL);
d6b0e80f
AC
3847 }
3848 }
3849 while (pid == GET_LWP (lp->ptid));
3850
3851 gdb_assert (pid == -1 && errno == ECHILD);
3852 }
3853
3854 do
3855 {
58aecb61 3856 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 3857 if (pid != (pid_t) -1)
d6b0e80f 3858 {
e85a822c
DJ
3859 if (debug_linux_nat)
3860 fprintf_unfiltered (gdb_stdlog,
3861 "KWC: wait %s received unk.\n",
3862 target_pid_to_str (lp->ptid));
3863 /* See the call to kill_callback above. */
3864 kill_callback (lp, NULL);
d6b0e80f
AC
3865 }
3866 }
3867 while (pid == GET_LWP (lp->ptid));
3868
3869 gdb_assert (pid == -1 && errno == ECHILD);
3870 return 0;
3871}
3872
3873static void
7d85a9c0 3874linux_nat_kill (struct target_ops *ops)
d6b0e80f 3875{
f973ed9c
DJ
3876 struct target_waitstatus last;
3877 ptid_t last_ptid;
3878 int status;
d6b0e80f 3879
f973ed9c
DJ
3880 /* If we're stopped while forking and we haven't followed yet,
3881 kill the other task. We need to do this first because the
3882 parent will be sleeping if this is a vfork. */
d6b0e80f 3883
f973ed9c 3884 get_last_target_status (&last_ptid, &last);
d6b0e80f 3885
f973ed9c
DJ
3886 if (last.kind == TARGET_WAITKIND_FORKED
3887 || last.kind == TARGET_WAITKIND_VFORKED)
3888 {
3a3e9ee3 3889 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
3890 wait (&status);
3891 }
3892
3893 if (forks_exist_p ())
7feb7d06 3894 linux_fork_killall ();
f973ed9c
DJ
3895 else
3896 {
d90e17a7 3897 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3898
4c28f408
PA
3899 /* Stop all threads before killing them, since ptrace requires
3900 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3901 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3902 /* ... and wait until all of them have reported back that
3903 they're no longer running. */
d90e17a7 3904 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3905
f973ed9c 3906 /* Kill all LWP's ... */
d90e17a7 3907 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3908
3909 /* ... and wait until we've flushed all events. */
d90e17a7 3910 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3911 }
3912
3913 target_mourn_inferior ();
d6b0e80f
AC
3914}
3915
3916static void
136d6dae 3917linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3918{
d90e17a7 3919 purge_lwp_list (ptid_get_pid (inferior_ptid));
d6b0e80f 3920
f973ed9c 3921 if (! forks_exist_p ())
d90e17a7
PA
3922 /* Normal case, no other forks available. */
3923 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3924 else
3925 /* Multi-fork case. The current inferior_ptid has exited, but
3926 there are other viable forks to debug. Delete the exiting
3927 one and context-switch to the first available. */
3928 linux_fork_mourn_inferior ();
d6b0e80f
AC
3929}
3930
5b009018
PA
3931/* Convert a native/host siginfo object, into/from the siginfo in the
3932 layout of the inferiors' architecture. */
3933
3934static void
3935siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
3936{
3937 int done = 0;
3938
3939 if (linux_nat_siginfo_fixup != NULL)
3940 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3941
3942 /* If there was no callback, or the callback didn't do anything,
3943 then just do a straight memcpy. */
3944 if (!done)
3945 {
3946 if (direction == 1)
3947 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3948 else
3949 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3950 }
3951}
3952
4aa995e1
PA
3953static LONGEST
3954linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3955 const char *annex, gdb_byte *readbuf,
3956 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3957{
4aa995e1
PA
3958 int pid;
3959 struct siginfo siginfo;
5b009018 3960 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
3961
3962 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3963 gdb_assert (readbuf || writebuf);
3964
3965 pid = GET_LWP (inferior_ptid);
3966 if (pid == 0)
3967 pid = GET_PID (inferior_ptid);
3968
3969 if (offset > sizeof (siginfo))
3970 return -1;
3971
3972 errno = 0;
3973 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3974 if (errno != 0)
3975 return -1;
3976
5b009018
PA
3977 /* When GDB is built as a 64-bit application, ptrace writes into
3978 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3979 inferior with a 64-bit GDB should look the same as debugging it
3980 with a 32-bit GDB, we need to convert it. GDB core always sees
3981 the converted layout, so any read/write will have to be done
3982 post-conversion. */
3983 siginfo_fixup (&siginfo, inf_siginfo, 0);
3984
4aa995e1
PA
3985 if (offset + len > sizeof (siginfo))
3986 len = sizeof (siginfo) - offset;
3987
3988 if (readbuf != NULL)
5b009018 3989 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3990 else
3991 {
5b009018
PA
3992 memcpy (inf_siginfo + offset, writebuf, len);
3993
3994 /* Convert back to ptrace layout before flushing it out. */
3995 siginfo_fixup (&siginfo, inf_siginfo, 1);
3996
4aa995e1
PA
3997 errno = 0;
3998 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3999 if (errno != 0)
4000 return -1;
4001 }
4002
4003 return len;
4004}
4005
10d6c8cd
DJ
4006static LONGEST
4007linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4008 const char *annex, gdb_byte *readbuf,
4009 const gdb_byte *writebuf,
4010 ULONGEST offset, LONGEST len)
d6b0e80f 4011{
4aa995e1 4012 struct cleanup *old_chain;
10d6c8cd 4013 LONGEST xfer;
d6b0e80f 4014
4aa995e1
PA
4015 if (object == TARGET_OBJECT_SIGNAL_INFO)
4016 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4017 offset, len);
4018
c35b1492
PA
4019 /* The target is connected but no live inferior is selected. Pass
4020 this request down to a lower stratum (e.g., the executable
4021 file). */
4022 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4023 return 0;
4024
4aa995e1
PA
4025 old_chain = save_inferior_ptid ();
4026
d6b0e80f
AC
4027 if (is_lwp (inferior_ptid))
4028 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4029
10d6c8cd
DJ
4030 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4031 offset, len);
d6b0e80f
AC
4032
4033 do_cleanups (old_chain);
4034 return xfer;
4035}
4036
4037static int
28439f5e 4038linux_thread_alive (ptid_t ptid)
d6b0e80f 4039{
4c28f408
PA
4040 int err;
4041
d6b0e80f
AC
4042 gdb_assert (is_lwp (ptid));
4043
4c28f408
PA
4044 /* Send signal 0 instead of anything ptrace, because ptracing a
4045 running thread errors out claiming that the thread doesn't
4046 exist. */
4047 err = kill_lwp (GET_LWP (ptid), 0);
4048
d6b0e80f
AC
4049 if (debug_linux_nat)
4050 fprintf_unfiltered (gdb_stdlog,
4c28f408 4051 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4052 target_pid_to_str (ptid),
4c28f408 4053 err ? safe_strerror (err) : "OK");
9c0dd46b 4054
4c28f408 4055 if (err != 0)
d6b0e80f
AC
4056 return 0;
4057
4058 return 1;
4059}
4060
28439f5e
PA
4061static int
4062linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4063{
4064 return linux_thread_alive (ptid);
4065}
4066
d6b0e80f 4067static char *
117de6a9 4068linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4069{
4070 static char buf[64];
4071
a0ef4274 4072 if (is_lwp (ptid)
d90e17a7
PA
4073 && (GET_PID (ptid) != GET_LWP (ptid)
4074 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
4075 {
4076 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4077 return buf;
4078 }
4079
4080 return normal_pid_to_str (ptid);
4081}
4082
dba24537
AC
4083/* Accepts an integer PID; Returns a string representing a file that
4084 can be opened to get the symbols for the child process. */
4085
6d8fd2b7
UW
4086static char *
4087linux_child_pid_to_exec_file (int pid)
dba24537
AC
4088{
4089 char *name1, *name2;
4090
4091 name1 = xmalloc (MAXPATHLEN);
4092 name2 = xmalloc (MAXPATHLEN);
4093 make_cleanup (xfree, name1);
4094 make_cleanup (xfree, name2);
4095 memset (name2, 0, MAXPATHLEN);
4096
4097 sprintf (name1, "/proc/%d/exe", pid);
4098 if (readlink (name1, name2, MAXPATHLEN) > 0)
4099 return name2;
4100 else
4101 return name1;
4102}
4103
4104/* Service function for corefiles and info proc. */
4105
4106static int
4107read_mapping (FILE *mapfile,
4108 long long *addr,
4109 long long *endaddr,
4110 char *permissions,
4111 long long *offset,
4112 char *device, long long *inode, char *filename)
4113{
4114 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4115 addr, endaddr, permissions, offset, device, inode);
4116
2e14c2ea
MS
4117 filename[0] = '\0';
4118 if (ret > 0 && ret != EOF)
dba24537
AC
4119 {
4120 /* Eat everything up to EOL for the filename. This will prevent
4121 weird filenames (such as one with embedded whitespace) from
4122 confusing this code. It also makes this code more robust in
4123 respect to annotations the kernel may add after the filename.
4124
4125 Note the filename is used for informational purposes
4126 only. */
4127 ret += fscanf (mapfile, "%[^\n]\n", filename);
4128 }
2e14c2ea 4129
dba24537
AC
4130 return (ret != 0 && ret != EOF);
4131}
4132
4133/* Fills the "to_find_memory_regions" target vector. Lists the memory
4134 regions in the inferior for a corefile. */
4135
4136static int
b8edc417 4137linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
dba24537 4138{
89ecc4f5 4139 int pid = PIDGET (inferior_ptid);
dba24537
AC
4140 char mapsfilename[MAXPATHLEN];
4141 FILE *mapsfile;
4142 long long addr, endaddr, size, offset, inode;
4143 char permissions[8], device[8], filename[MAXPATHLEN];
4144 int read, write, exec;
7c8a8b04 4145 struct cleanup *cleanup;
dba24537
AC
4146
4147 /* Compose the filename for the /proc memory map, and open it. */
89ecc4f5 4148 sprintf (mapsfilename, "/proc/%d/maps", pid);
dba24537 4149 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 4150 error (_("Could not open %s."), mapsfilename);
7c8a8b04 4151 cleanup = make_cleanup_fclose (mapsfile);
dba24537
AC
4152
4153 if (info_verbose)
4154 fprintf_filtered (gdb_stdout,
4155 "Reading memory regions from %s\n", mapsfilename);
4156
4157 /* Now iterate until end-of-file. */
4158 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4159 &offset, &device[0], &inode, &filename[0]))
4160 {
4161 size = endaddr - addr;
4162
4163 /* Get the segment's permissions. */
4164 read = (strchr (permissions, 'r') != 0);
4165 write = (strchr (permissions, 'w') != 0);
4166 exec = (strchr (permissions, 'x') != 0);
4167
4168 if (info_verbose)
4169 {
4170 fprintf_filtered (gdb_stdout,
2244ba2e
PM
4171 "Save segment, %s bytes at %s (%c%c%c)",
4172 plongest (size), paddress (target_gdbarch, addr),
dba24537
AC
4173 read ? 'r' : ' ',
4174 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 4175 if (filename[0])
dba24537
AC
4176 fprintf_filtered (gdb_stdout, " for %s", filename);
4177 fprintf_filtered (gdb_stdout, "\n");
4178 }
4179
4180 /* Invoke the callback function to create the corefile
4181 segment. */
4182 func (addr, size, read, write, exec, obfd);
4183 }
7c8a8b04 4184 do_cleanups (cleanup);
dba24537
AC
4185 return 0;
4186}
4187
2020b7ab
PA
4188static int
4189find_signalled_thread (struct thread_info *info, void *data)
4190{
16c381f0 4191 if (info->suspend.stop_signal != TARGET_SIGNAL_0
2020b7ab
PA
4192 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4193 return 1;
4194
4195 return 0;
4196}
4197
4198static enum target_signal
4199find_stop_signal (void)
4200{
4201 struct thread_info *info =
4202 iterate_over_threads (find_signalled_thread, NULL);
4203
4204 if (info)
16c381f0 4205 return info->suspend.stop_signal;
2020b7ab
PA
4206 else
4207 return TARGET_SIGNAL_0;
4208}
4209
dba24537
AC
4210/* Records the thread's register state for the corefile note
4211 section. */
4212
4213static char *
4214linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2020b7ab
PA
4215 char *note_data, int *note_size,
4216 enum target_signal stop_signal)
dba24537 4217{
dba24537 4218 unsigned long lwp = ptid_get_lwp (ptid);
c2250ad1
UW
4219 struct gdbarch *gdbarch = target_gdbarch;
4220 struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4f844a66 4221 const struct regset *regset;
55e969c1 4222 int core_regset_p;
594f7785 4223 struct cleanup *old_chain;
17ea7499
CES
4224 struct core_regset_section *sect_list;
4225 char *gdb_regset;
594f7785
UW
4226
4227 old_chain = save_inferior_ptid ();
4228 inferior_ptid = ptid;
4229 target_fetch_registers (regcache, -1);
4230 do_cleanups (old_chain);
4f844a66
DM
4231
4232 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
4233 sect_list = gdbarch_core_regset_sections (gdbarch);
4234
17ea7499
CES
4235 /* The loop below uses the new struct core_regset_section, which stores
4236 the supported section names and sizes for the core file. Note that
4237 note PRSTATUS needs to be treated specially. But the other notes are
4238 structurally the same, so they can benefit from the new struct. */
4239 if (core_regset_p && sect_list != NULL)
4240 while (sect_list->sect_name != NULL)
4241 {
17ea7499
CES
4242 regset = gdbarch_regset_from_core_section (gdbarch,
4243 sect_list->sect_name,
4244 sect_list->size);
4245 gdb_assert (regset && regset->collect_regset);
4246 gdb_regset = xmalloc (sect_list->size);
4247 regset->collect_regset (regset, regcache, -1,
4248 gdb_regset, sect_list->size);
2f2241f1
UW
4249
4250 if (strcmp (sect_list->sect_name, ".reg") == 0)
4251 note_data = (char *) elfcore_write_prstatus
4252 (obfd, note_data, note_size,
857d11d0
JK
4253 lwp, target_signal_to_host (stop_signal),
4254 gdb_regset);
2f2241f1
UW
4255 else
4256 note_data = (char *) elfcore_write_register_note
4257 (obfd, note_data, note_size,
4258 sect_list->sect_name, gdb_regset,
4259 sect_list->size);
17ea7499
CES
4260 xfree (gdb_regset);
4261 sect_list++;
4262 }
dba24537 4263
17ea7499
CES
4264 /* For architectures that does not have the struct core_regset_section
4265 implemented, we use the old method. When all the architectures have
4266 the new support, the code below should be deleted. */
4f844a66 4267 else
17ea7499 4268 {
2f2241f1
UW
4269 gdb_gregset_t gregs;
4270 gdb_fpregset_t fpregs;
4271
4272 if (core_regset_p
4273 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3e43a32a
MS
4274 sizeof (gregs)))
4275 != NULL && regset->collect_regset != NULL)
2f2241f1
UW
4276 regset->collect_regset (regset, regcache, -1,
4277 &gregs, sizeof (gregs));
4278 else
4279 fill_gregset (regcache, &gregs, -1);
4280
857d11d0
JK
4281 note_data = (char *) elfcore_write_prstatus
4282 (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
4283 &gregs);
2f2241f1 4284
17ea7499
CES
4285 if (core_regset_p
4286 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3e43a32a
MS
4287 sizeof (fpregs)))
4288 != NULL && regset->collect_regset != NULL)
17ea7499
CES
4289 regset->collect_regset (regset, regcache, -1,
4290 &fpregs, sizeof (fpregs));
4291 else
4292 fill_fpregset (regcache, &fpregs, -1);
4293
4294 note_data = (char *) elfcore_write_prfpreg (obfd,
4295 note_data,
4296 note_size,
4297 &fpregs, sizeof (fpregs));
4298 }
4f844a66 4299
dba24537
AC
4300 return note_data;
4301}
4302
4303struct linux_nat_corefile_thread_data
4304{
4305 bfd *obfd;
4306 char *note_data;
4307 int *note_size;
4308 int num_notes;
2020b7ab 4309 enum target_signal stop_signal;
dba24537
AC
4310};
4311
4312/* Called by gdbthread.c once per thread. Records the thread's
4313 register state for the corefile note section. */
4314
4315static int
4316linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4317{
4318 struct linux_nat_corefile_thread_data *args = data;
dba24537 4319
dba24537
AC
4320 args->note_data = linux_nat_do_thread_registers (args->obfd,
4321 ti->ptid,
4322 args->note_data,
2020b7ab
PA
4323 args->note_size,
4324 args->stop_signal);
dba24537 4325 args->num_notes++;
56be3814 4326
dba24537
AC
4327 return 0;
4328}
4329
efcbbd14
UW
4330/* Enumerate spufs IDs for process PID. */
4331
4332static void
4333iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4334{
4335 char path[128];
4336 DIR *dir;
4337 struct dirent *entry;
4338
4339 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4340 dir = opendir (path);
4341 if (!dir)
4342 return;
4343
4344 rewinddir (dir);
4345 while ((entry = readdir (dir)) != NULL)
4346 {
4347 struct stat st;
4348 struct statfs stfs;
4349 int fd;
4350
4351 fd = atoi (entry->d_name);
4352 if (!fd)
4353 continue;
4354
4355 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4356 if (stat (path, &st) != 0)
4357 continue;
4358 if (!S_ISDIR (st.st_mode))
4359 continue;
4360
4361 if (statfs (path, &stfs) != 0)
4362 continue;
4363 if (stfs.f_type != SPUFS_MAGIC)
4364 continue;
4365
4366 callback (data, fd);
4367 }
4368
4369 closedir (dir);
4370}
4371
4372/* Generate corefile notes for SPU contexts. */
4373
4374struct linux_spu_corefile_data
4375{
4376 bfd *obfd;
4377 char *note_data;
4378 int *note_size;
4379};
4380
4381static void
4382linux_spu_corefile_callback (void *data, int fd)
4383{
4384 struct linux_spu_corefile_data *args = data;
4385 int i;
4386
4387 static const char *spu_files[] =
4388 {
4389 "object-id",
4390 "mem",
4391 "regs",
4392 "fpcr",
4393 "lslr",
4394 "decr",
4395 "decr_status",
4396 "signal1",
4397 "signal1_type",
4398 "signal2",
4399 "signal2_type",
4400 "event_mask",
4401 "event_status",
4402 "mbox_info",
4403 "ibox_info",
4404 "wbox_info",
4405 "dma_info",
4406 "proxydma_info",
4407 };
4408
4409 for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4410 {
4411 char annex[32], note_name[32];
4412 gdb_byte *spu_data;
4413 LONGEST spu_len;
4414
4415 xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4416 spu_len = target_read_alloc (&current_target, TARGET_OBJECT_SPU,
4417 annex, &spu_data);
4418 if (spu_len > 0)
4419 {
4420 xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4421 args->note_data = elfcore_write_note (args->obfd, args->note_data,
4422 args->note_size, note_name,
4423 NT_SPU, spu_data, spu_len);
4424 xfree (spu_data);
4425 }
4426 }
4427}
4428
4429static char *
4430linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4431{
4432 struct linux_spu_corefile_data args;
e0881a8e 4433
efcbbd14
UW
4434 args.obfd = obfd;
4435 args.note_data = note_data;
4436 args.note_size = note_size;
4437
4438 iterate_over_spus (PIDGET (inferior_ptid),
4439 linux_spu_corefile_callback, &args);
4440
4441 return args.note_data;
4442}
4443
dba24537
AC
4444/* Fills the "to_make_corefile_note" target vector. Builds the note
4445 section for a corefile, and returns it in a malloc buffer. */
4446
4447static char *
4448linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4449{
4450 struct linux_nat_corefile_thread_data thread_args;
d99148ef 4451 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 4452 char fname[16] = { '\0' };
d99148ef 4453 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
4454 char psargs[80] = { '\0' };
4455 char *note_data = NULL;
d90e17a7 4456 ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
c6826062 4457 gdb_byte *auxv;
dba24537
AC
4458 int auxv_len;
4459
4460 if (get_exec_file (0))
4461 {
4462 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
4463 strncpy (psargs, get_exec_file (0), sizeof (psargs));
4464 if (get_inferior_args ())
4465 {
d99148ef
JK
4466 char *string_end;
4467 char *psargs_end = psargs + sizeof (psargs);
4468
4469 /* linux_elfcore_write_prpsinfo () handles zero unterminated
4470 strings fine. */
4471 string_end = memchr (psargs, 0, sizeof (psargs));
4472 if (string_end != NULL)
4473 {
4474 *string_end++ = ' ';
4475 strncpy (string_end, get_inferior_args (),
4476 psargs_end - string_end);
4477 }
dba24537
AC
4478 }
4479 note_data = (char *) elfcore_write_prpsinfo (obfd,
4480 note_data,
4481 note_size, fname, psargs);
4482 }
4483
4484 /* Dump information for threads. */
4485 thread_args.obfd = obfd;
4486 thread_args.note_data = note_data;
4487 thread_args.note_size = note_size;
4488 thread_args.num_notes = 0;
2020b7ab 4489 thread_args.stop_signal = find_stop_signal ();
d90e17a7 4490 iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
2020b7ab
PA
4491 gdb_assert (thread_args.num_notes != 0);
4492 note_data = thread_args.note_data;
dba24537 4493
13547ab6
DJ
4494 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
4495 NULL, &auxv);
dba24537
AC
4496 if (auxv_len > 0)
4497 {
4498 note_data = elfcore_write_note (obfd, note_data, note_size,
4499 "CORE", NT_AUXV, auxv, auxv_len);
4500 xfree (auxv);
4501 }
4502
efcbbd14
UW
4503 note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4504
dba24537
AC
4505 make_cleanup (xfree, note_data);
4506 return note_data;
4507}
4508
4509/* Implement the "info proc" command. */
4510
4511static void
4512linux_nat_info_proc_cmd (char *args, int from_tty)
4513{
89ecc4f5
DE
4514 /* A long is used for pid instead of an int to avoid a loss of precision
4515 compiler warning from the output of strtoul. */
4516 long pid = PIDGET (inferior_ptid);
dba24537
AC
4517 FILE *procfile;
4518 char **argv = NULL;
4519 char buffer[MAXPATHLEN];
4520 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
4521 int cmdline_f = 1;
4522 int cwd_f = 1;
4523 int exe_f = 1;
4524 int mappings_f = 0;
dba24537
AC
4525 int status_f = 0;
4526 int stat_f = 0;
4527 int all = 0;
4528 struct stat dummy;
4529
4530 if (args)
4531 {
4532 /* Break up 'args' into an argv array. */
d1a41061
PP
4533 argv = gdb_buildargv (args);
4534 make_cleanup_freeargv (argv);
dba24537
AC
4535 }
4536 while (argv != NULL && *argv != NULL)
4537 {
4538 if (isdigit (argv[0][0]))
4539 {
4540 pid = strtoul (argv[0], NULL, 10);
4541 }
4542 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
4543 {
4544 mappings_f = 1;
4545 }
4546 else if (strcmp (argv[0], "status") == 0)
4547 {
4548 status_f = 1;
4549 }
4550 else if (strcmp (argv[0], "stat") == 0)
4551 {
4552 stat_f = 1;
4553 }
4554 else if (strcmp (argv[0], "cmd") == 0)
4555 {
4556 cmdline_f = 1;
4557 }
4558 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
4559 {
4560 exe_f = 1;
4561 }
4562 else if (strcmp (argv[0], "cwd") == 0)
4563 {
4564 cwd_f = 1;
4565 }
4566 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
4567 {
4568 all = 1;
4569 }
4570 else
4571 {
1777feb0 4572 /* [...] (future options here). */
dba24537
AC
4573 }
4574 argv++;
4575 }
4576 if (pid == 0)
8a3fe4f8 4577 error (_("No current process: you must name one."));
dba24537 4578
89ecc4f5 4579 sprintf (fname1, "/proc/%ld", pid);
dba24537 4580 if (stat (fname1, &dummy) != 0)
8a3fe4f8 4581 error (_("No /proc directory: '%s'"), fname1);
dba24537 4582
89ecc4f5 4583 printf_filtered (_("process %ld\n"), pid);
dba24537
AC
4584 if (cmdline_f || all)
4585 {
89ecc4f5 4586 sprintf (fname1, "/proc/%ld/cmdline", pid);
d5d6fca5 4587 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4588 {
7c8a8b04 4589 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4590
bf1d7d9c
JB
4591 if (fgets (buffer, sizeof (buffer), procfile))
4592 printf_filtered ("cmdline = '%s'\n", buffer);
4593 else
4594 warning (_("unable to read '%s'"), fname1);
7c8a8b04 4595 do_cleanups (cleanup);
dba24537
AC
4596 }
4597 else
8a3fe4f8 4598 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4599 }
4600 if (cwd_f || all)
4601 {
89ecc4f5 4602 sprintf (fname1, "/proc/%ld/cwd", pid);
dba24537
AC
4603 memset (fname2, 0, sizeof (fname2));
4604 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4605 printf_filtered ("cwd = '%s'\n", fname2);
4606 else
8a3fe4f8 4607 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4608 }
4609 if (exe_f || all)
4610 {
89ecc4f5 4611 sprintf (fname1, "/proc/%ld/exe", pid);
dba24537
AC
4612 memset (fname2, 0, sizeof (fname2));
4613 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4614 printf_filtered ("exe = '%s'\n", fname2);
4615 else
8a3fe4f8 4616 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4617 }
4618 if (mappings_f || all)
4619 {
89ecc4f5 4620 sprintf (fname1, "/proc/%ld/maps", pid);
d5d6fca5 4621 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4622 {
4623 long long addr, endaddr, size, offset, inode;
4624 char permissions[8], device[8], filename[MAXPATHLEN];
7c8a8b04 4625 struct cleanup *cleanup;
dba24537 4626
7c8a8b04 4627 cleanup = make_cleanup_fclose (procfile);
a3f17187 4628 printf_filtered (_("Mapped address spaces:\n\n"));
a97b0ac8 4629 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4630 {
4631 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4632 "Start Addr",
4633 " End Addr",
4634 " Size", " Offset", "objfile");
4635 }
4636 else
4637 {
4638 printf_filtered (" %18s %18s %10s %10s %7s\n",
4639 "Start Addr",
4640 " End Addr",
4641 " Size", " Offset", "objfile");
4642 }
4643
4644 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4645 &offset, &device[0], &inode, &filename[0]))
4646 {
4647 size = endaddr - addr;
4648
4649 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4650 calls here (and possibly above) should be abstracted
4651 out into their own functions? Andrew suggests using
4652 a generic local_address_string instead to print out
4653 the addresses; that makes sense to me, too. */
4654
a97b0ac8 4655 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4656 {
4657 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4658 (unsigned long) addr, /* FIXME: pr_addr */
4659 (unsigned long) endaddr,
4660 (int) size,
4661 (unsigned int) offset,
4662 filename[0] ? filename : "");
4663 }
4664 else
4665 {
4666 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
4667 (unsigned long) addr, /* FIXME: pr_addr */
4668 (unsigned long) endaddr,
4669 (int) size,
4670 (unsigned int) offset,
4671 filename[0] ? filename : "");
4672 }
4673 }
4674
7c8a8b04 4675 do_cleanups (cleanup);
dba24537
AC
4676 }
4677 else
8a3fe4f8 4678 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4679 }
4680 if (status_f || all)
4681 {
89ecc4f5 4682 sprintf (fname1, "/proc/%ld/status", pid);
d5d6fca5 4683 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4684 {
7c8a8b04 4685 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4686
dba24537
AC
4687 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4688 puts_filtered (buffer);
7c8a8b04 4689 do_cleanups (cleanup);
dba24537
AC
4690 }
4691 else
8a3fe4f8 4692 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4693 }
4694 if (stat_f || all)
4695 {
89ecc4f5 4696 sprintf (fname1, "/proc/%ld/stat", pid);
d5d6fca5 4697 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4698 {
4699 int itmp;
4700 char ctmp;
a25694b4 4701 long ltmp;
7c8a8b04 4702 struct cleanup *cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4703
4704 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4705 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 4706 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 4707 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 4708 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 4709 printf_filtered (_("State: %c\n"), ctmp);
dba24537 4710 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4711 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 4712 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4713 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 4714 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4715 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 4716 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4717 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 4718 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4719 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
4720 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4721 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
4722 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4723 printf_filtered (_("Minor faults (no memory page): %lu\n"),
4724 (unsigned long) ltmp);
4725 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4726 printf_filtered (_("Minor faults, children: %lu\n"),
4727 (unsigned long) ltmp);
4728 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4729 printf_filtered (_("Major faults (memory page faults): %lu\n"),
4730 (unsigned long) ltmp);
4731 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4732 printf_filtered (_("Major faults, children: %lu\n"),
4733 (unsigned long) ltmp);
4734 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4735 printf_filtered (_("utime: %ld\n"), ltmp);
4736 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4737 printf_filtered (_("stime: %ld\n"), ltmp);
4738 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4739 printf_filtered (_("utime, children: %ld\n"), ltmp);
4740 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4741 printf_filtered (_("stime, children: %ld\n"), ltmp);
4742 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
4743 printf_filtered (_("jiffies remaining in current "
4744 "time slice: %ld\n"), ltmp);
a25694b4
AS
4745 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4746 printf_filtered (_("'nice' value: %ld\n"), ltmp);
4747 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4748 printf_filtered (_("jiffies until next timeout: %lu\n"),
4749 (unsigned long) ltmp);
4750 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4751 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
4752 (unsigned long) ltmp);
4753 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
4754 printf_filtered (_("start time (jiffies since "
4755 "system boot): %ld\n"), ltmp);
a25694b4
AS
4756 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4757 printf_filtered (_("Virtual memory size: %lu\n"),
4758 (unsigned long) ltmp);
4759 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3e43a32a
MS
4760 printf_filtered (_("Resident set size: %lu\n"),
4761 (unsigned long) ltmp);
a25694b4
AS
4762 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4763 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
4764 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4765 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
4766 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4767 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
4768 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4769 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3e43a32a
MS
4770#if 0 /* Don't know how architecture-dependent the rest is...
4771 Anyway the signal bitmap info is available from "status". */
1777feb0 4772 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 4773 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
1777feb0 4774 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4
AS
4775 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
4776 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4777 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
4778 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4779 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
4780 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4781 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
4782 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4783 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
1777feb0 4784 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 4785 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537 4786#endif
7c8a8b04 4787 do_cleanups (cleanup);
dba24537
AC
4788 }
4789 else
8a3fe4f8 4790 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4791 }
4792}
4793
10d6c8cd
DJ
4794/* Implement the to_xfer_partial interface for memory reads using the /proc
4795 filesystem. Because we can use a single read() call for /proc, this
4796 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4797 but it doesn't support writes. */
4798
4799static LONGEST
4800linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4801 const char *annex, gdb_byte *readbuf,
4802 const gdb_byte *writebuf,
4803 ULONGEST offset, LONGEST len)
dba24537 4804{
10d6c8cd
DJ
4805 LONGEST ret;
4806 int fd;
dba24537
AC
4807 char filename[64];
4808
10d6c8cd 4809 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4810 return 0;
4811
4812 /* Don't bother for one word. */
4813 if (len < 3 * sizeof (long))
4814 return 0;
4815
4816 /* We could keep this file open and cache it - possibly one per
4817 thread. That requires some juggling, but is even faster. */
4818 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4819 fd = open (filename, O_RDONLY | O_LARGEFILE);
4820 if (fd == -1)
4821 return 0;
4822
4823 /* If pread64 is available, use it. It's faster if the kernel
4824 supports it (only one syscall), and it's 64-bit safe even on
4825 32-bit platforms (for instance, SPARC debugging a SPARC64
4826 application). */
4827#ifdef HAVE_PREAD64
10d6c8cd 4828 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4829#else
10d6c8cd 4830 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4831#endif
4832 ret = 0;
4833 else
4834 ret = len;
4835
4836 close (fd);
4837 return ret;
4838}
4839
efcbbd14
UW
4840
4841/* Enumerate spufs IDs for process PID. */
4842static LONGEST
4843spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4844{
4845 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4846 LONGEST pos = 0;
4847 LONGEST written = 0;
4848 char path[128];
4849 DIR *dir;
4850 struct dirent *entry;
4851
4852 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4853 dir = opendir (path);
4854 if (!dir)
4855 return -1;
4856
4857 rewinddir (dir);
4858 while ((entry = readdir (dir)) != NULL)
4859 {
4860 struct stat st;
4861 struct statfs stfs;
4862 int fd;
4863
4864 fd = atoi (entry->d_name);
4865 if (!fd)
4866 continue;
4867
4868 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4869 if (stat (path, &st) != 0)
4870 continue;
4871 if (!S_ISDIR (st.st_mode))
4872 continue;
4873
4874 if (statfs (path, &stfs) != 0)
4875 continue;
4876 if (stfs.f_type != SPUFS_MAGIC)
4877 continue;
4878
4879 if (pos >= offset && pos + 4 <= offset + len)
4880 {
4881 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4882 written += 4;
4883 }
4884 pos += 4;
4885 }
4886
4887 closedir (dir);
4888 return written;
4889}
4890
4891/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4892 object type, using the /proc file system. */
4893static LONGEST
4894linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4895 const char *annex, gdb_byte *readbuf,
4896 const gdb_byte *writebuf,
4897 ULONGEST offset, LONGEST len)
4898{
4899 char buf[128];
4900 int fd = 0;
4901 int ret = -1;
4902 int pid = PIDGET (inferior_ptid);
4903
4904 if (!annex)
4905 {
4906 if (!readbuf)
4907 return -1;
4908 else
4909 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4910 }
4911
4912 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4913 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4914 if (fd <= 0)
4915 return -1;
4916
4917 if (offset != 0
4918 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4919 {
4920 close (fd);
4921 return 0;
4922 }
4923
4924 if (writebuf)
4925 ret = write (fd, writebuf, (size_t) len);
4926 else if (readbuf)
4927 ret = read (fd, readbuf, (size_t) len);
4928
4929 close (fd);
4930 return ret;
4931}
4932
4933
dba24537
AC
4934/* Parse LINE as a signal set and add its set bits to SIGS. */
4935
4936static void
4937add_line_to_sigset (const char *line, sigset_t *sigs)
4938{
4939 int len = strlen (line) - 1;
4940 const char *p;
4941 int signum;
4942
4943 if (line[len] != '\n')
8a3fe4f8 4944 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4945
4946 p = line;
4947 signum = len * 4;
4948 while (len-- > 0)
4949 {
4950 int digit;
4951
4952 if (*p >= '0' && *p <= '9')
4953 digit = *p - '0';
4954 else if (*p >= 'a' && *p <= 'f')
4955 digit = *p - 'a' + 10;
4956 else
8a3fe4f8 4957 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4958
4959 signum -= 4;
4960
4961 if (digit & 1)
4962 sigaddset (sigs, signum + 1);
4963 if (digit & 2)
4964 sigaddset (sigs, signum + 2);
4965 if (digit & 4)
4966 sigaddset (sigs, signum + 3);
4967 if (digit & 8)
4968 sigaddset (sigs, signum + 4);
4969
4970 p++;
4971 }
4972}
4973
4974/* Find process PID's pending signals from /proc/pid/status and set
4975 SIGS to match. */
4976
4977void
3e43a32a
MS
4978linux_proc_pending_signals (int pid, sigset_t *pending,
4979 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4980{
4981 FILE *procfile;
4982 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
7c8a8b04 4983 struct cleanup *cleanup;
dba24537
AC
4984
4985 sigemptyset (pending);
4986 sigemptyset (blocked);
4987 sigemptyset (ignored);
4988 sprintf (fname, "/proc/%d/status", pid);
4989 procfile = fopen (fname, "r");
4990 if (procfile == NULL)
8a3fe4f8 4991 error (_("Could not open %s"), fname);
7c8a8b04 4992 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4993
4994 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4995 {
4996 /* Normal queued signals are on the SigPnd line in the status
4997 file. However, 2.6 kernels also have a "shared" pending
4998 queue for delivering signals to a thread group, so check for
4999 a ShdPnd line also.
5000
5001 Unfortunately some Red Hat kernels include the shared pending
5002 queue but not the ShdPnd status field. */
5003
5004 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
5005 add_line_to_sigset (buffer + 8, pending);
5006 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
5007 add_line_to_sigset (buffer + 8, pending);
5008 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
5009 add_line_to_sigset (buffer + 8, blocked);
5010 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
5011 add_line_to_sigset (buffer + 8, ignored);
5012 }
5013
7c8a8b04 5014 do_cleanups (cleanup);
dba24537
AC
5015}
5016
07e059b5
VP
5017static LONGEST
5018linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e
MS
5019 const char *annex, gdb_byte *readbuf,
5020 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
07e059b5
VP
5021{
5022 /* We make the process list snapshot when the object starts to be
5023 read. */
5024 static const char *buf;
5025 static LONGEST len_avail = -1;
5026 static struct obstack obstack;
5027
5028 DIR *dirp;
5029
5030 gdb_assert (object == TARGET_OBJECT_OSDATA);
5031
a61408f8
SS
5032 if (!annex)
5033 {
5034 if (offset == 0)
5035 {
5036 if (len_avail != -1 && len_avail != 0)
5037 obstack_free (&obstack, NULL);
5038 len_avail = 0;
5039 buf = NULL;
5040 obstack_init (&obstack);
5041 obstack_grow_str (&obstack, "<osdata type=\"types\">\n");
5042
3e43a32a 5043 obstack_xml_printf (&obstack,
a61408f8
SS
5044 "<item>"
5045 "<column name=\"Type\">processes</column>"
3e43a32a
MS
5046 "<column name=\"Description\">"
5047 "Listing of all processes</column>"
a61408f8
SS
5048 "</item>");
5049
5050 obstack_grow_str0 (&obstack, "</osdata>\n");
5051 buf = obstack_finish (&obstack);
5052 len_avail = strlen (buf);
5053 }
5054
5055 if (offset >= len_avail)
5056 {
5057 /* Done. Get rid of the obstack. */
5058 obstack_free (&obstack, NULL);
5059 buf = NULL;
5060 len_avail = 0;
5061 return 0;
5062 }
5063
5064 if (len > len_avail - offset)
5065 len = len_avail - offset;
5066 memcpy (readbuf, buf + offset, len);
5067
5068 return len;
5069 }
5070
07e059b5
VP
5071 if (strcmp (annex, "processes") != 0)
5072 return 0;
5073
5074 gdb_assert (readbuf && !writebuf);
5075
5076 if (offset == 0)
5077 {
5078 if (len_avail != -1 && len_avail != 0)
e0881a8e 5079 obstack_free (&obstack, NULL);
07e059b5
VP
5080 len_avail = 0;
5081 buf = NULL;
5082 obstack_init (&obstack);
5083 obstack_grow_str (&obstack, "<osdata type=\"processes\">\n");
5084
5085 dirp = opendir ("/proc");
5086 if (dirp)
e0881a8e
MS
5087 {
5088 struct dirent *dp;
5089
5090 while ((dp = readdir (dirp)) != NULL)
5091 {
5092 struct stat statbuf;
5093 char procentry[sizeof ("/proc/4294967295")];
5094
5095 if (!isdigit (dp->d_name[0])
5096 || NAMELEN (dp) > sizeof ("4294967295") - 1)
5097 continue;
5098
5099 sprintf (procentry, "/proc/%s", dp->d_name);
5100 if (stat (procentry, &statbuf) == 0
5101 && S_ISDIR (statbuf.st_mode))
5102 {
5103 char *pathname;
5104 FILE *f;
5105 char cmd[MAXPATHLEN + 1];
5106 struct passwd *entry;
5107
5108 pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
5109 entry = getpwuid (statbuf.st_uid);
5110
5111 if ((f = fopen (pathname, "r")) != NULL)
5112 {
5113 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
5114
5115 if (len > 0)
5116 {
5117 int i;
5118
5119 for (i = 0; i < len; i++)
5120 if (cmd[i] == '\0')
5121 cmd[i] = ' ';
5122 cmd[len] = '\0';
5123
5124 obstack_xml_printf (
5125 &obstack,
5126 "<item>"
5127 "<column name=\"pid\">%s</column>"
5128 "<column name=\"user\">%s</column>"
5129 "<column name=\"command\">%s</column>"
5130 "</item>",
5131 dp->d_name,
5132 entry ? entry->pw_name : "?",
5133 cmd);
5134 }
5135 fclose (f);
5136 }
5137
5138 xfree (pathname);
5139 }
5140 }
5141
5142 closedir (dirp);
5143 }
07e059b5
VP
5144
5145 obstack_grow_str0 (&obstack, "</osdata>\n");
5146 buf = obstack_finish (&obstack);
5147 len_avail = strlen (buf);
5148 }
5149
5150 if (offset >= len_avail)
5151 {
5152 /* Done. Get rid of the obstack. */
5153 obstack_free (&obstack, NULL);
5154 buf = NULL;
5155 len_avail = 0;
5156 return 0;
5157 }
5158
5159 if (len > len_avail - offset)
5160 len = len_avail - offset;
5161 memcpy (readbuf, buf + offset, len);
5162
5163 return len;
5164}
5165
10d6c8cd
DJ
5166static LONGEST
5167linux_xfer_partial (struct target_ops *ops, enum target_object object,
5168 const char *annex, gdb_byte *readbuf,
5169 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5170{
5171 LONGEST xfer;
5172
5173 if (object == TARGET_OBJECT_AUXV)
9f2982ff 5174 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
10d6c8cd
DJ
5175 offset, len);
5176
07e059b5
VP
5177 if (object == TARGET_OBJECT_OSDATA)
5178 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5179 offset, len);
5180
efcbbd14
UW
5181 if (object == TARGET_OBJECT_SPU)
5182 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5183 offset, len);
5184
8f313923
JK
5185 /* GDB calculates all the addresses in possibly larget width of the address.
5186 Address width needs to be masked before its final use - either by
5187 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5188
5189 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
5190
5191 if (object == TARGET_OBJECT_MEMORY)
5192 {
5193 int addr_bit = gdbarch_addr_bit (target_gdbarch);
5194
5195 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5196 offset &= ((ULONGEST) 1 << addr_bit) - 1;
5197 }
5198
10d6c8cd
DJ
5199 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5200 offset, len);
5201 if (xfer != 0)
5202 return xfer;
5203
5204 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5205 offset, len);
5206}
5207
e9efe249 5208/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
5209 it with local methods. */
5210
910122bf
UW
5211static void
5212linux_target_install_ops (struct target_ops *t)
10d6c8cd 5213{
6d8fd2b7
UW
5214 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
5215 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
5216 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
a96d9b2e 5217 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 5218 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 5219 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
5220 t->to_post_attach = linux_child_post_attach;
5221 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
5222 t->to_find_memory_regions = linux_nat_find_memory_regions;
5223 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5224
5225 super_xfer_partial = t->to_xfer_partial;
5226 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
5227}
5228
5229struct target_ops *
5230linux_target (void)
5231{
5232 struct target_ops *t;
5233
5234 t = inf_ptrace_target ();
5235 linux_target_install_ops (t);
5236
5237 return t;
5238}
5239
5240struct target_ops *
7714d83a 5241linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
5242{
5243 struct target_ops *t;
5244
5245 t = inf_ptrace_trad_target (register_u_offset);
5246 linux_target_install_ops (t);
10d6c8cd 5247
10d6c8cd
DJ
5248 return t;
5249}
5250
b84876c2
PA
5251/* target_is_async_p implementation. */
5252
5253static int
5254linux_nat_is_async_p (void)
5255{
5256 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5257 it explicitly with the "set target-async" command.
b84876c2 5258 Someday, linux will always be async. */
c6ebd6cf 5259 if (!target_async_permitted)
b84876c2
PA
5260 return 0;
5261
d90e17a7
PA
5262 /* See target.h/target_async_mask. */
5263 return linux_nat_async_mask_value;
b84876c2
PA
5264}
5265
5266/* target_can_async_p implementation. */
5267
5268static int
5269linux_nat_can_async_p (void)
5270{
5271 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5272 it explicitly with the "set target-async" command.
b84876c2 5273 Someday, linux will always be async. */
c6ebd6cf 5274 if (!target_async_permitted)
b84876c2
PA
5275 return 0;
5276
5277 /* See target.h/target_async_mask. */
5278 return linux_nat_async_mask_value;
5279}
5280
9908b566
VP
5281static int
5282linux_nat_supports_non_stop (void)
5283{
5284 return 1;
5285}
5286
d90e17a7
PA
5287/* True if we want to support multi-process. To be removed when GDB
5288 supports multi-exec. */
5289
2277426b 5290int linux_multi_process = 1;
d90e17a7
PA
5291
5292static int
5293linux_nat_supports_multi_process (void)
5294{
5295 return linux_multi_process;
5296}
5297
b84876c2
PA
5298/* target_async_mask implementation. */
5299
5300static int
7feb7d06 5301linux_nat_async_mask (int new_mask)
b84876c2 5302{
7feb7d06 5303 int curr_mask = linux_nat_async_mask_value;
b84876c2 5304
7feb7d06 5305 if (curr_mask != new_mask)
b84876c2 5306 {
7feb7d06 5307 if (new_mask == 0)
b84876c2
PA
5308 {
5309 linux_nat_async (NULL, 0);
7feb7d06 5310 linux_nat_async_mask_value = new_mask;
b84876c2
PA
5311 }
5312 else
5313 {
7feb7d06 5314 linux_nat_async_mask_value = new_mask;
84e46146 5315
7feb7d06
PA
5316 /* If we're going out of async-mask in all-stop, then the
5317 inferior is stopped. The next resume will call
5318 target_async. In non-stop, the target event source
5319 should be always registered in the event loop. Do so
5320 now. */
5321 if (non_stop)
5322 linux_nat_async (inferior_event_handler, 0);
b84876c2
PA
5323 }
5324 }
5325
7feb7d06 5326 return curr_mask;
b84876c2
PA
5327}
5328
5329static int async_terminal_is_ours = 1;
5330
5331/* target_terminal_inferior implementation. */
5332
5333static void
5334linux_nat_terminal_inferior (void)
5335{
5336 if (!target_is_async_p ())
5337 {
5338 /* Async mode is disabled. */
5339 terminal_inferior ();
5340 return;
5341 }
5342
b84876c2
PA
5343 terminal_inferior ();
5344
d9d2d8b6 5345 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
5346 if (!async_terminal_is_ours)
5347 return;
5348
5349 delete_file_handler (input_fd);
5350 async_terminal_is_ours = 0;
5351 set_sigint_trap ();
5352}
5353
5354/* target_terminal_ours implementation. */
5355
2c0b251b 5356static void
b84876c2
PA
5357linux_nat_terminal_ours (void)
5358{
5359 if (!target_is_async_p ())
5360 {
5361 /* Async mode is disabled. */
5362 terminal_ours ();
5363 return;
5364 }
5365
5366 /* GDB should never give the terminal to the inferior if the
5367 inferior is running in the background (run&, continue&, etc.),
5368 but claiming it sure should. */
5369 terminal_ours ();
5370
b84876c2
PA
5371 if (async_terminal_is_ours)
5372 return;
5373
5374 clear_sigint_trap ();
5375 add_file_handler (input_fd, stdin_event_handler, 0);
5376 async_terminal_is_ours = 1;
5377}
5378
5379static void (*async_client_callback) (enum inferior_event_type event_type,
5380 void *context);
5381static void *async_client_context;
5382
7feb7d06
PA
5383/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5384 so we notice when any child changes state, and notify the
5385 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5386 above to wait for the arrival of a SIGCHLD. */
5387
b84876c2 5388static void
7feb7d06 5389sigchld_handler (int signo)
b84876c2 5390{
7feb7d06
PA
5391 int old_errno = errno;
5392
5393 if (debug_linux_nat_async)
5394 fprintf_unfiltered (gdb_stdlog, "sigchld\n");
5395
5396 if (signo == SIGCHLD
5397 && linux_nat_event_pipe[0] != -1)
5398 async_file_mark (); /* Let the event loop know that there are
5399 events to handle. */
5400
5401 errno = old_errno;
5402}
5403
5404/* Callback registered with the target events file descriptor. */
5405
5406static void
5407handle_target_event (int error, gdb_client_data client_data)
5408{
5409 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5410}
5411
5412/* Create/destroy the target events pipe. Returns previous state. */
5413
5414static int
5415linux_async_pipe (int enable)
5416{
5417 int previous = (linux_nat_event_pipe[0] != -1);
5418
5419 if (previous != enable)
5420 {
5421 sigset_t prev_mask;
5422
5423 block_child_signals (&prev_mask);
5424
5425 if (enable)
5426 {
5427 if (pipe (linux_nat_event_pipe) == -1)
5428 internal_error (__FILE__, __LINE__,
5429 "creating event pipe failed.");
5430
5431 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5432 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5433 }
5434 else
5435 {
5436 close (linux_nat_event_pipe[0]);
5437 close (linux_nat_event_pipe[1]);
5438 linux_nat_event_pipe[0] = -1;
5439 linux_nat_event_pipe[1] = -1;
5440 }
5441
5442 restore_child_signals_mask (&prev_mask);
5443 }
5444
5445 return previous;
b84876c2
PA
5446}
5447
5448/* target_async implementation. */
5449
5450static void
5451linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5452 void *context), void *context)
5453{
c6ebd6cf 5454 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
b84876c2
PA
5455 internal_error (__FILE__, __LINE__,
5456 "Calling target_async when async is masked");
5457
5458 if (callback != NULL)
5459 {
5460 async_client_callback = callback;
5461 async_client_context = context;
7feb7d06
PA
5462 if (!linux_async_pipe (1))
5463 {
5464 add_file_handler (linux_nat_event_pipe[0],
5465 handle_target_event, NULL);
5466 /* There may be pending events to handle. Tell the event loop
5467 to poll them. */
5468 async_file_mark ();
5469 }
b84876c2
PA
5470 }
5471 else
5472 {
5473 async_client_callback = callback;
5474 async_client_context = context;
b84876c2 5475 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 5476 linux_async_pipe (0);
b84876c2
PA
5477 }
5478 return;
5479}
5480
252fbfc8
PA
5481/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5482 event came out. */
5483
4c28f408 5484static int
252fbfc8 5485linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 5486{
d90e17a7 5487 if (!lwp->stopped)
252fbfc8 5488 {
d90e17a7 5489 ptid_t ptid = lwp->ptid;
252fbfc8 5490
d90e17a7
PA
5491 if (debug_linux_nat)
5492 fprintf_unfiltered (gdb_stdlog,
5493 "LNSL: running -> suspending %s\n",
5494 target_pid_to_str (lwp->ptid));
252fbfc8 5495
252fbfc8 5496
d90e17a7
PA
5497 stop_callback (lwp, NULL);
5498 stop_wait_callback (lwp, NULL);
252fbfc8 5499
d90e17a7
PA
5500 /* If the lwp exits while we try to stop it, there's nothing
5501 else to do. */
5502 lwp = find_lwp_pid (ptid);
5503 if (lwp == NULL)
5504 return 0;
252fbfc8 5505
d90e17a7
PA
5506 /* If we didn't collect any signal other than SIGSTOP while
5507 stopping the LWP, push a SIGNAL_0 event. In either case, the
5508 event-loop will end up calling target_wait which will collect
5509 these. */
5510 if (lwp->status == 0)
5511 lwp->status = W_STOPCODE (0);
5512 async_file_mark ();
5513 }
5514 else
5515 {
5516 /* Already known to be stopped; do nothing. */
252fbfc8 5517
d90e17a7
PA
5518 if (debug_linux_nat)
5519 {
e09875d4 5520 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
5521 fprintf_unfiltered (gdb_stdlog,
5522 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5523 target_pid_to_str (lwp->ptid));
5524 else
3e43a32a
MS
5525 fprintf_unfiltered (gdb_stdlog,
5526 "LNSL: already stopped/no "
5527 "stop_requested yet %s\n",
d90e17a7 5528 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5529 }
5530 }
4c28f408
PA
5531 return 0;
5532}
5533
5534static void
5535linux_nat_stop (ptid_t ptid)
5536{
5537 if (non_stop)
d90e17a7 5538 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5539 else
5540 linux_ops->to_stop (ptid);
5541}
5542
d90e17a7
PA
5543static void
5544linux_nat_close (int quitting)
5545{
5546 /* Unregister from the event loop. */
5547 if (target_is_async_p ())
5548 target_async (NULL, 0);
5549
5550 /* Reset the async_masking. */
5551 linux_nat_async_mask_value = 1;
5552
5553 if (linux_ops->to_close)
5554 linux_ops->to_close (quitting);
5555}
5556
c0694254
PA
5557/* When requests are passed down from the linux-nat layer to the
5558 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5559 used. The address space pointer is stored in the inferior object,
5560 but the common code that is passed such ptid can't tell whether
5561 lwpid is a "main" process id or not (it assumes so). We reverse
5562 look up the "main" process id from the lwp here. */
5563
5564struct address_space *
5565linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5566{
5567 struct lwp_info *lwp;
5568 struct inferior *inf;
5569 int pid;
5570
5571 pid = GET_LWP (ptid);
5572 if (GET_LWP (ptid) == 0)
5573 {
5574 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5575 tgid. */
5576 lwp = find_lwp_pid (ptid);
5577 pid = GET_PID (lwp->ptid);
5578 }
5579 else
5580 {
5581 /* A (pid,lwpid,0) ptid. */
5582 pid = GET_PID (ptid);
5583 }
5584
5585 inf = find_inferior_pid (pid);
5586 gdb_assert (inf != NULL);
5587 return inf->aspace;
5588}
5589
dc146f7c
VP
5590int
5591linux_nat_core_of_thread_1 (ptid_t ptid)
5592{
5593 struct cleanup *back_to;
5594 char *filename;
5595 FILE *f;
5596 char *content = NULL;
5597 char *p;
5598 char *ts = 0;
5599 int content_read = 0;
5600 int i;
5601 int core;
5602
5603 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5604 GET_PID (ptid), GET_LWP (ptid));
5605 back_to = make_cleanup (xfree, filename);
5606
5607 f = fopen (filename, "r");
5608 if (!f)
5609 {
5610 do_cleanups (back_to);
5611 return -1;
5612 }
5613
5614 make_cleanup_fclose (f);
5615
5616 for (;;)
5617 {
5618 int n;
e0881a8e 5619
dc146f7c
VP
5620 content = xrealloc (content, content_read + 1024);
5621 n = fread (content + content_read, 1, 1024, f);
5622 content_read += n;
5623 if (n < 1024)
5624 {
5625 content[content_read] = '\0';
5626 break;
5627 }
5628 }
5629
5630 make_cleanup (xfree, content);
5631
5632 p = strchr (content, '(');
ca2a87a0
JK
5633
5634 /* Skip ")". */
5635 if (p != NULL)
5636 p = strchr (p, ')');
5637 if (p != NULL)
5638 p++;
dc146f7c
VP
5639
5640 /* If the first field after program name has index 0, then core number is
5641 the field with index 36. There's no constant for that anywhere. */
ca2a87a0
JK
5642 if (p != NULL)
5643 p = strtok_r (p, " ", &ts);
5644 for (i = 0; p != NULL && i != 36; ++i)
dc146f7c
VP
5645 p = strtok_r (NULL, " ", &ts);
5646
ca2a87a0 5647 if (p == NULL || sscanf (p, "%d", &core) == 0)
dc146f7c
VP
5648 core = -1;
5649
5650 do_cleanups (back_to);
5651
5652 return core;
5653}
5654
5655/* Return the cached value of the processor core for thread PTID. */
5656
5657int
5658linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5659{
5660 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 5661
dc146f7c
VP
5662 if (info)
5663 return info->core;
5664 return -1;
5665}
5666
f973ed9c
DJ
5667void
5668linux_nat_add_target (struct target_ops *t)
5669{
f973ed9c
DJ
5670 /* Save the provided single-threaded target. We save this in a separate
5671 variable because another target we've inherited from (e.g. inf-ptrace)
5672 may have saved a pointer to T; we want to use it for the final
5673 process stratum target. */
5674 linux_ops_saved = *t;
5675 linux_ops = &linux_ops_saved;
5676
5677 /* Override some methods for multithreading. */
b84876c2 5678 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5679 t->to_attach = linux_nat_attach;
5680 t->to_detach = linux_nat_detach;
5681 t->to_resume = linux_nat_resume;
5682 t->to_wait = linux_nat_wait;
5683 t->to_xfer_partial = linux_nat_xfer_partial;
5684 t->to_kill = linux_nat_kill;
5685 t->to_mourn_inferior = linux_nat_mourn_inferior;
5686 t->to_thread_alive = linux_nat_thread_alive;
5687 t->to_pid_to_str = linux_nat_pid_to_str;
5688 t->to_has_thread_control = tc_schedlock;
c0694254 5689 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
5690 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5691 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 5692
b84876c2
PA
5693 t->to_can_async_p = linux_nat_can_async_p;
5694 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5695 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2
PA
5696 t->to_async = linux_nat_async;
5697 t->to_async_mask = linux_nat_async_mask;
5698 t->to_terminal_inferior = linux_nat_terminal_inferior;
5699 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5700 t->to_close = linux_nat_close;
b84876c2 5701
4c28f408
PA
5702 /* Methods for non-stop support. */
5703 t->to_stop = linux_nat_stop;
5704
d90e17a7
PA
5705 t->to_supports_multi_process = linux_nat_supports_multi_process;
5706
dc146f7c
VP
5707 t->to_core_of_thread = linux_nat_core_of_thread;
5708
f973ed9c
DJ
5709 /* We don't change the stratum; this target will sit at
5710 process_stratum and thread_db will set at thread_stratum. This
5711 is a little strange, since this is a multi-threaded-capable
5712 target, but we want to be on the stack below thread_db, and we
5713 also want to be used for single-threaded processes. */
5714
5715 add_target (t);
f973ed9c
DJ
5716}
5717
9f0bdab8
DJ
5718/* Register a method to call whenever a new thread is attached. */
5719void
5720linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
5721{
5722 /* Save the pointer. We only support a single registered instance
5723 of the GNU/Linux native target, so we do not need to map this to
5724 T. */
5725 linux_nat_new_thread = new_thread;
5726}
5727
5b009018
PA
5728/* Register a method that converts a siginfo object between the layout
5729 that ptrace returns, and the layout in the architecture of the
5730 inferior. */
5731void
5732linux_nat_set_siginfo_fixup (struct target_ops *t,
5733 int (*siginfo_fixup) (struct siginfo *,
5734 gdb_byte *,
5735 int))
5736{
5737 /* Save the pointer. */
5738 linux_nat_siginfo_fixup = siginfo_fixup;
5739}
5740
9f0bdab8
DJ
5741/* Return the saved siginfo associated with PTID. */
5742struct siginfo *
5743linux_nat_get_siginfo (ptid_t ptid)
5744{
5745 struct lwp_info *lp = find_lwp_pid (ptid);
5746
5747 gdb_assert (lp != NULL);
5748
5749 return &lp->siginfo;
5750}
5751
2c0b251b
PA
5752/* Provide a prototype to silence -Wmissing-prototypes. */
5753extern initialize_file_ftype _initialize_linux_nat;
5754
d6b0e80f
AC
5755void
5756_initialize_linux_nat (void)
5757{
1bedd215
AC
5758 add_info ("proc", linux_nat_info_proc_cmd, _("\
5759Show /proc process information about any running process.\n\
dba24537
AC
5760Specify any process id, or use the program being debugged by default.\n\
5761Specify any of the following keywords for detailed info:\n\
5762 mappings -- list of mapped memory regions.\n\
5763 stat -- list a bunch of random process info.\n\
5764 status -- list a different bunch of random process info.\n\
1bedd215 5765 all -- list all available /proc info."));
d6b0e80f 5766
b84876c2
PA
5767 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5768 &debug_linux_nat, _("\
5769Set debugging of GNU/Linux lwp module."), _("\
5770Show debugging of GNU/Linux lwp module."), _("\
5771Enables printf debugging output."),
5772 NULL,
5773 show_debug_linux_nat,
5774 &setdebuglist, &showdebuglist);
5775
5776 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
5777 &debug_linux_nat_async, _("\
5778Set debugging of GNU/Linux async lwp module."), _("\
5779Show debugging of GNU/Linux async lwp module."), _("\
5780Enables printf debugging output."),
5781 NULL,
5782 show_debug_linux_nat_async,
5783 &setdebuglist, &showdebuglist);
5784
b84876c2 5785 /* Save this mask as the default. */
d6b0e80f
AC
5786 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5787
7feb7d06
PA
5788 /* Install a SIGCHLD handler. */
5789 sigchld_action.sa_handler = sigchld_handler;
5790 sigemptyset (&sigchld_action.sa_mask);
5791 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5792
5793 /* Make it the default. */
7feb7d06 5794 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5795
5796 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5797 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5798 sigdelset (&suspend_mask, SIGCHLD);
5799
7feb7d06 5800 sigemptyset (&blocked_mask);
10568435
JK
5801
5802 add_setshow_boolean_cmd ("disable-randomization", class_support,
5803 &disable_randomization, _("\
5804Set disabling of debuggee's virtual address space randomization."), _("\
5805Show disabling of debuggee's virtual address space randomization."), _("\
5806When this mode is on (which is the default), randomization of the virtual\n\
5807address space is disabled. Standalone programs run with the randomization\n\
5808enabled by default on some platforms."),
5809 &set_disable_randomization,
5810 &show_disable_randomization,
5811 &setlist, &showlist);
d6b0e80f
AC
5812}
5813\f
5814
5815/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5816 the GNU/Linux Threads library and therefore doesn't really belong
5817 here. */
5818
5819/* Read variable NAME in the target and return its value if found.
5820 Otherwise return zero. It is assumed that the type of the variable
5821 is `int'. */
5822
5823static int
5824get_signo (const char *name)
5825{
5826 struct minimal_symbol *ms;
5827 int signo;
5828
5829 ms = lookup_minimal_symbol (name, NULL, NULL);
5830 if (ms == NULL)
5831 return 0;
5832
8e70166d 5833 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5834 sizeof (signo)) != 0)
5835 return 0;
5836
5837 return signo;
5838}
5839
5840/* Return the set of signals used by the threads library in *SET. */
5841
5842void
5843lin_thread_get_thread_signals (sigset_t *set)
5844{
5845 struct sigaction action;
5846 int restart, cancel;
5847
b84876c2 5848 sigemptyset (&blocked_mask);
d6b0e80f
AC
5849 sigemptyset (set);
5850
5851 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5852 cancel = get_signo ("__pthread_sig_cancel");
5853
5854 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5855 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5856 not provide any way for the debugger to query the signal numbers -
5857 fortunately they don't change! */
5858
d6b0e80f 5859 if (restart == 0)
17fbb0bd 5860 restart = __SIGRTMIN;
d6b0e80f 5861
d6b0e80f 5862 if (cancel == 0)
17fbb0bd 5863 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5864
5865 sigaddset (set, restart);
5866 sigaddset (set, cancel);
5867
5868 /* The GNU/Linux Threads library makes terminating threads send a
5869 special "cancel" signal instead of SIGCHLD. Make sure we catch
5870 those (to prevent them from terminating GDB itself, which is
5871 likely to be their default action) and treat them the same way as
5872 SIGCHLD. */
5873
5874 action.sa_handler = sigchld_handler;
5875 sigemptyset (&action.sa_mask);
58aecb61 5876 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5877 sigaction (cancel, &action, NULL);
5878
5879 /* We block the "cancel" signal throughout this code ... */
5880 sigaddset (&blocked_mask, cancel);
5881 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5882
5883 /* ... except during a sigsuspend. */
5884 sigdelset (&suspend_mask, cancel);
5885}
This page took 1.072424 seconds and 4 git commands to generate.