* target.h (struct target_ops): Remove to_notice_signals;
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
7b6bb8da
JB
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
af96c192 33#include "linux-ptrace.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
10d6c8cd
DJ
39#include "inf-ptrace.h"
40#include "auxv.h"
dba24537 41#include <sys/param.h> /* for MAXPATHLEN */
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
1777feb0 47#include "gdbthread.h" /* for struct thread_info etc. */
dba24537
AC
48#include "gdb_stat.h" /* for struct stat */
49#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
50#include "inf-loop.h"
51#include "event-loop.h"
52#include "event-top.h"
07e059b5
VP
53#include <pwd.h>
54#include <sys/types.h>
55#include "gdb_dirent.h"
56#include "xml-support.h"
191c4426 57#include "terminal.h"
efcbbd14 58#include <sys/vfs.h>
6c95b8df 59#include "solib.h"
efcbbd14
UW
60
61#ifndef SPUFS_MAGIC
62#define SPUFS_MAGIC 0x23c9b64e
63#endif
dba24537 64
10568435
JK
65#ifdef HAVE_PERSONALITY
66# include <sys/personality.h>
67# if !HAVE_DECL_ADDR_NO_RANDOMIZE
68# define ADDR_NO_RANDOMIZE 0x0040000
69# endif
70#endif /* HAVE_PERSONALITY */
71
1777feb0 72/* This comment documents high-level logic of this file.
8a77dff3
VP
73
74Waiting for events in sync mode
75===============================
76
77When waiting for an event in a specific thread, we just use waitpid, passing
78the specific pid, and not passing WNOHANG.
79
1777feb0 80When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 81version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 82threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
83miss an event. The solution is to use non-blocking waitpid, together with
84sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 85process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
86flag to check for events in cloned processes. If nothing is found, we use
87sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
88happened to a child process -- and SIGCHLD will be delivered both for events
89in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
90an event, we get back to calling nonblocking waitpid with and without
91__WCLONED.
8a77dff3
VP
92
93Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 94so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
95blocked, the signal becomes pending and sigsuspend immediately
96notices it and returns.
97
98Waiting for events in async mode
99================================
100
7feb7d06
PA
101In async mode, GDB should always be ready to handle both user input
102and target events, so neither blocking waitpid nor sigsuspend are
103viable options. Instead, we should asynchronously notify the GDB main
104event loop whenever there's an unprocessed event from the target. We
105detect asynchronous target events by handling SIGCHLD signals. To
106notify the event loop about target events, the self-pipe trick is used
107--- a pipe is registered as waitable event source in the event loop,
108the event loop select/poll's on the read end of this pipe (as well on
109other event sources, e.g., stdin), and the SIGCHLD handler writes a
110byte to this pipe. This is more portable than relying on
111pselect/ppoll, since on kernels that lack those syscalls, libc
112emulates them with select/poll+sigprocmask, and that is racy
113(a.k.a. plain broken).
114
115Obviously, if we fail to notify the event loop if there's a target
116event, it's bad. OTOH, if we notify the event loop when there's no
117event from the target, linux_nat_wait will detect that there's no real
118event to report, and return event of type TARGET_WAITKIND_IGNORE.
119This is mostly harmless, but it will waste time and is better avoided.
120
121The main design point is that every time GDB is outside linux-nat.c,
122we have a SIGCHLD handler installed that is called when something
123happens to the target and notifies the GDB event loop. Whenever GDB
124core decides to handle the event, and calls into linux-nat.c, we
125process things as in sync mode, except that the we never block in
126sigsuspend.
127
128While processing an event, we may end up momentarily blocked in
129waitpid calls. Those waitpid calls, while blocking, are guarantied to
130return quickly. E.g., in all-stop mode, before reporting to the core
131that an LWP hit a breakpoint, all LWPs are stopped by sending them
132SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
133Note that this is different from blocking indefinitely waiting for the
134next event --- here, we're already handling an event.
8a77dff3
VP
135
136Use of signals
137==============
138
139We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
140signal is not entirely significant; we just need for a signal to be delivered,
141so that we can intercept it. SIGSTOP's advantage is that it can not be
142blocked. A disadvantage is that it is not a real-time signal, so it can only
143be queued once; we do not keep track of other sources of SIGSTOP.
144
145Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
146use them, because they have special behavior when the signal is generated -
147not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
148kills the entire thread group.
149
150A delivered SIGSTOP would stop the entire thread group, not just the thread we
151tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
152cancel it (by PTRACE_CONT without passing SIGSTOP).
153
154We could use a real-time signal instead. This would solve those problems; we
155could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
156But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
157generates it, and there are races with trying to find a signal that is not
158blocked. */
a0ef4274 159
dba24537
AC
160#ifndef O_LARGEFILE
161#define O_LARGEFILE 0
162#endif
0274a8ce 163
ca2163eb
PA
164/* Unlike other extended result codes, WSTOPSIG (status) on
165 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
166 instead SIGTRAP with bit 7 set. */
167#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
168
10d6c8cd
DJ
169/* The single-threaded native GNU/Linux target_ops. We save a pointer for
170 the use of the multi-threaded target. */
171static struct target_ops *linux_ops;
f973ed9c 172static struct target_ops linux_ops_saved;
10d6c8cd 173
9f0bdab8
DJ
174/* The method to call, if any, when a new thread is attached. */
175static void (*linux_nat_new_thread) (ptid_t);
176
5b009018
PA
177/* The method to call, if any, when the siginfo object needs to be
178 converted between the layout returned by ptrace, and the layout in
179 the architecture of the inferior. */
180static int (*linux_nat_siginfo_fixup) (struct siginfo *,
181 gdb_byte *,
182 int);
183
ac264b3b
MS
184/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
185 Called by our to_xfer_partial. */
186static LONGEST (*super_xfer_partial) (struct target_ops *,
187 enum target_object,
188 const char *, gdb_byte *,
189 const gdb_byte *,
10d6c8cd
DJ
190 ULONGEST, LONGEST);
191
d6b0e80f 192static int debug_linux_nat;
920d2a44
AC
193static void
194show_debug_linux_nat (struct ui_file *file, int from_tty,
195 struct cmd_list_element *c, const char *value)
196{
197 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
198 value);
199}
d6b0e80f 200
b84876c2
PA
201static int debug_linux_nat_async = 0;
202static void
203show_debug_linux_nat_async (struct ui_file *file, int from_tty,
204 struct cmd_list_element *c, const char *value)
205{
3e43a32a
MS
206 fprintf_filtered (file,
207 _("Debugging of GNU/Linux async lwp module is %s.\n"),
b84876c2
PA
208 value);
209}
210
10568435
JK
211static int disable_randomization = 1;
212
213static void
214show_disable_randomization (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216{
217#ifdef HAVE_PERSONALITY
3e43a32a
MS
218 fprintf_filtered (file,
219 _("Disabling randomization of debuggee's "
220 "virtual address space is %s.\n"),
10568435
JK
221 value);
222#else /* !HAVE_PERSONALITY */
3e43a32a
MS
223 fputs_filtered (_("Disabling randomization of debuggee's "
224 "virtual address space is unsupported on\n"
225 "this platform.\n"), file);
10568435
JK
226#endif /* !HAVE_PERSONALITY */
227}
228
229static void
3e43a32a
MS
230set_disable_randomization (char *args, int from_tty,
231 struct cmd_list_element *c)
10568435
JK
232{
233#ifndef HAVE_PERSONALITY
3e43a32a
MS
234 error (_("Disabling randomization of debuggee's "
235 "virtual address space is unsupported on\n"
236 "this platform."));
10568435
JK
237#endif /* !HAVE_PERSONALITY */
238}
239
ae087d01
DJ
240struct simple_pid_list
241{
242 int pid;
3d799a95 243 int status;
ae087d01
DJ
244 struct simple_pid_list *next;
245};
246struct simple_pid_list *stopped_pids;
247
3993f6b1
DJ
248/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
249 can not be used, 1 if it can. */
250
251static int linux_supports_tracefork_flag = -1;
252
3e43a32a
MS
253/* This variable is a tri-state flag: -1 for unknown, 0 if
254 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
a96d9b2e
SDJ
255
256static int linux_supports_tracesysgood_flag = -1;
257
9016a515
DJ
258/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
259 PTRACE_O_TRACEVFORKDONE. */
260
261static int linux_supports_tracevforkdone_flag = -1;
262
1777feb0 263/* Async mode support. */
b84876c2 264
b84876c2
PA
265/* Zero if the async mode, although enabled, is masked, which means
266 linux_nat_wait should behave as if async mode was off. */
267static int linux_nat_async_mask_value = 1;
268
a96d9b2e
SDJ
269/* Stores the current used ptrace() options. */
270static int current_ptrace_options = 0;
271
b84876c2
PA
272/* The read/write ends of the pipe registered as waitable file in the
273 event loop. */
274static int linux_nat_event_pipe[2] = { -1, -1 };
275
7feb7d06 276/* Flush the event pipe. */
b84876c2 277
7feb7d06
PA
278static void
279async_file_flush (void)
b84876c2 280{
7feb7d06
PA
281 int ret;
282 char buf;
b84876c2 283
7feb7d06 284 do
b84876c2 285 {
7feb7d06 286 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 287 }
7feb7d06 288 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
289}
290
7feb7d06
PA
291/* Put something (anything, doesn't matter what, or how much) in event
292 pipe, so that the select/poll in the event-loop realizes we have
293 something to process. */
252fbfc8 294
b84876c2 295static void
7feb7d06 296async_file_mark (void)
b84876c2 297{
7feb7d06 298 int ret;
b84876c2 299
7feb7d06
PA
300 /* It doesn't really matter what the pipe contains, as long we end
301 up with something in it. Might as well flush the previous
302 left-overs. */
303 async_file_flush ();
b84876c2 304
7feb7d06 305 do
b84876c2 306 {
7feb7d06 307 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 308 }
7feb7d06 309 while (ret == -1 && errno == EINTR);
b84876c2 310
7feb7d06
PA
311 /* Ignore EAGAIN. If the pipe is full, the event loop will already
312 be awakened anyway. */
b84876c2
PA
313}
314
7feb7d06 315static void linux_nat_async (void (*callback)
3e43a32a
MS
316 (enum inferior_event_type event_type,
317 void *context),
7feb7d06
PA
318 void *context);
319static int linux_nat_async_mask (int mask);
320static int kill_lwp (int lwpid, int signo);
321
322static int stop_callback (struct lwp_info *lp, void *data);
323
324static void block_child_signals (sigset_t *prev_mask);
325static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
326
327struct lwp_info;
328static struct lwp_info *add_lwp (ptid_t ptid);
329static void purge_lwp_list (int pid);
330static struct lwp_info *find_lwp_pid (ptid_t ptid);
331
ae087d01
DJ
332\f
333/* Trivial list manipulation functions to keep track of a list of
334 new stopped processes. */
335static void
3d799a95 336add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
337{
338 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 339
ae087d01 340 new_pid->pid = pid;
3d799a95 341 new_pid->status = status;
ae087d01
DJ
342 new_pid->next = *listp;
343 *listp = new_pid;
344}
345
346static int
46a96992 347pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
348{
349 struct simple_pid_list **p;
350
351 for (p = listp; *p != NULL; p = &(*p)->next)
352 if ((*p)->pid == pid)
353 {
354 struct simple_pid_list *next = (*p)->next;
e0881a8e 355
46a96992 356 *statusp = (*p)->status;
ae087d01
DJ
357 xfree (*p);
358 *p = next;
359 return 1;
360 }
361 return 0;
362}
363
3d799a95
DJ
364static void
365linux_record_stopped_pid (int pid, int status)
ae087d01 366{
3d799a95 367 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
368}
369
3993f6b1
DJ
370\f
371/* A helper function for linux_test_for_tracefork, called after fork (). */
372
373static void
374linux_tracefork_child (void)
375{
3993f6b1
DJ
376 ptrace (PTRACE_TRACEME, 0, 0, 0);
377 kill (getpid (), SIGSTOP);
378 fork ();
48bb3cce 379 _exit (0);
3993f6b1
DJ
380}
381
7feb7d06 382/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
383
384static int
46a96992 385my_waitpid (int pid, int *statusp, int flags)
b957e937
DJ
386{
387 int ret;
b84876c2 388
b957e937
DJ
389 do
390 {
46a96992 391 ret = waitpid (pid, statusp, flags);
b957e937
DJ
392 }
393 while (ret == -1 && errno == EINTR);
394
395 return ret;
396}
397
398/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
399
400 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
401 we know that the feature is not available. This may change the tracing
402 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
403
404 However, if it succeeds, we don't know for sure that the feature is
405 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 406 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
407 fork tracing, and let it fork. If the process exits, we assume that we
408 can't use TRACEFORK; if we get the fork notification, and we can extract
409 the new child's PID, then we assume that we can. */
3993f6b1
DJ
410
411static void
b957e937 412linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
413{
414 int child_pid, ret, status;
415 long second_pid;
7feb7d06 416 sigset_t prev_mask;
4c28f408 417
7feb7d06
PA
418 /* We don't want those ptrace calls to be interrupted. */
419 block_child_signals (&prev_mask);
3993f6b1 420
b957e937
DJ
421 linux_supports_tracefork_flag = 0;
422 linux_supports_tracevforkdone_flag = 0;
423
424 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
425 if (ret != 0)
7feb7d06
PA
426 {
427 restore_child_signals_mask (&prev_mask);
428 return;
429 }
b957e937 430
3993f6b1
DJ
431 child_pid = fork ();
432 if (child_pid == -1)
e2e0b3e5 433 perror_with_name (("fork"));
3993f6b1
DJ
434
435 if (child_pid == 0)
436 linux_tracefork_child ();
437
b957e937 438 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 439 if (ret == -1)
e2e0b3e5 440 perror_with_name (("waitpid"));
3993f6b1 441 else if (ret != child_pid)
8a3fe4f8 442 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 443 if (! WIFSTOPPED (status))
3e43a32a
MS
444 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
445 status);
3993f6b1 446
3993f6b1
DJ
447 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
448 if (ret != 0)
449 {
b957e937
DJ
450 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
451 if (ret != 0)
452 {
8a3fe4f8 453 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 454 restore_child_signals_mask (&prev_mask);
b957e937
DJ
455 return;
456 }
457
458 ret = my_waitpid (child_pid, &status, 0);
459 if (ret != child_pid)
3e43a32a
MS
460 warning (_("linux_test_for_tracefork: failed "
461 "to wait for killed child"));
b957e937 462 else if (!WIFSIGNALED (status))
3e43a32a
MS
463 warning (_("linux_test_for_tracefork: unexpected "
464 "wait status 0x%x from killed child"), status);
b957e937 465
7feb7d06 466 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
467 return;
468 }
469
9016a515
DJ
470 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
471 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
472 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
473 linux_supports_tracevforkdone_flag = (ret == 0);
474
b957e937
DJ
475 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
476 if (ret != 0)
8a3fe4f8 477 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
478
479 ret = my_waitpid (child_pid, &status, 0);
480
3993f6b1
DJ
481 if (ret == child_pid && WIFSTOPPED (status)
482 && status >> 16 == PTRACE_EVENT_FORK)
483 {
484 second_pid = 0;
485 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
486 if (ret == 0 && second_pid != 0)
487 {
488 int second_status;
489
490 linux_supports_tracefork_flag = 1;
b957e937
DJ
491 my_waitpid (second_pid, &second_status, 0);
492 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
493 if (ret != 0)
3e43a32a
MS
494 warning (_("linux_test_for_tracefork: "
495 "failed to kill second child"));
97725dc4 496 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
497 }
498 }
b957e937 499 else
8a3fe4f8
AC
500 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
501 "(%d, status 0x%x)"), ret, status);
3993f6b1 502
b957e937
DJ
503 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
504 if (ret != 0)
8a3fe4f8 505 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 506 my_waitpid (child_pid, &status, 0);
4c28f408 507
7feb7d06 508 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
509}
510
a96d9b2e
SDJ
511/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
512
513 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
514 we know that the feature is not available. This may change the tracing
515 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
516
517static void
518linux_test_for_tracesysgood (int original_pid)
519{
520 int ret;
521 sigset_t prev_mask;
522
523 /* We don't want those ptrace calls to be interrupted. */
524 block_child_signals (&prev_mask);
525
526 linux_supports_tracesysgood_flag = 0;
527
528 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
529 if (ret != 0)
530 goto out;
531
532 linux_supports_tracesysgood_flag = 1;
533out:
534 restore_child_signals_mask (&prev_mask);
535}
536
537/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
538 This function also sets linux_supports_tracesysgood_flag. */
539
540static int
541linux_supports_tracesysgood (int pid)
542{
543 if (linux_supports_tracesysgood_flag == -1)
544 linux_test_for_tracesysgood (pid);
545 return linux_supports_tracesysgood_flag;
546}
547
3993f6b1
DJ
548/* Return non-zero iff we have tracefork functionality available.
549 This function also sets linux_supports_tracefork_flag. */
550
551static int
b957e937 552linux_supports_tracefork (int pid)
3993f6b1
DJ
553{
554 if (linux_supports_tracefork_flag == -1)
b957e937 555 linux_test_for_tracefork (pid);
3993f6b1
DJ
556 return linux_supports_tracefork_flag;
557}
558
9016a515 559static int
b957e937 560linux_supports_tracevforkdone (int pid)
9016a515
DJ
561{
562 if (linux_supports_tracefork_flag == -1)
b957e937 563 linux_test_for_tracefork (pid);
9016a515
DJ
564 return linux_supports_tracevforkdone_flag;
565}
566
a96d9b2e
SDJ
567static void
568linux_enable_tracesysgood (ptid_t ptid)
569{
570 int pid = ptid_get_lwp (ptid);
571
572 if (pid == 0)
573 pid = ptid_get_pid (ptid);
574
575 if (linux_supports_tracesysgood (pid) == 0)
576 return;
577
578 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
579
580 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
581}
582
3993f6b1 583\f
4de4c07c
DJ
584void
585linux_enable_event_reporting (ptid_t ptid)
586{
d3587048 587 int pid = ptid_get_lwp (ptid);
4de4c07c 588
d3587048
DJ
589 if (pid == 0)
590 pid = ptid_get_pid (ptid);
591
b957e937 592 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
593 return;
594
a96d9b2e
SDJ
595 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
596 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
597
b957e937 598 if (linux_supports_tracevforkdone (pid))
a96d9b2e 599 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
600
601 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
602 read-only process state. */
4de4c07c 603
a96d9b2e 604 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
605}
606
6d8fd2b7
UW
607static void
608linux_child_post_attach (int pid)
4de4c07c
DJ
609{
610 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 611 check_for_thread_db ();
a96d9b2e 612 linux_enable_tracesysgood (pid_to_ptid (pid));
4de4c07c
DJ
613}
614
10d6c8cd 615static void
4de4c07c
DJ
616linux_child_post_startup_inferior (ptid_t ptid)
617{
618 linux_enable_event_reporting (ptid);
0ec9a092 619 check_for_thread_db ();
a96d9b2e 620 linux_enable_tracesysgood (ptid);
4de4c07c
DJ
621}
622
6d8fd2b7
UW
623static int
624linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 625{
7feb7d06 626 sigset_t prev_mask;
9016a515 627 int has_vforked;
4de4c07c
DJ
628 int parent_pid, child_pid;
629
7feb7d06 630 block_child_signals (&prev_mask);
b84876c2 631
e58b0e63
PA
632 has_vforked = (inferior_thread ()->pending_follow.kind
633 == TARGET_WAITKIND_VFORKED);
634 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 635 if (parent_pid == 0)
e58b0e63
PA
636 parent_pid = ptid_get_pid (inferior_ptid);
637 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 638
2277426b
PA
639 if (!detach_fork)
640 linux_enable_event_reporting (pid_to_ptid (child_pid));
641
6c95b8df
PA
642 if (has_vforked
643 && !non_stop /* Non-stop always resumes both branches. */
644 && (!target_is_async_p () || sync_execution)
645 && !(follow_child || detach_fork || sched_multi))
646 {
647 /* The parent stays blocked inside the vfork syscall until the
648 child execs or exits. If we don't let the child run, then
649 the parent stays blocked. If we're telling the parent to run
650 in the foreground, the user will not be able to ctrl-c to get
651 back the terminal, effectively hanging the debug session. */
ac74f770
MS
652 fprintf_filtered (gdb_stderr, _("\
653Can not resume the parent process over vfork in the foreground while\n\
654holding the child stopped. Try \"set detach-on-fork\" or \
655\"set schedule-multiple\".\n"));
656 /* FIXME output string > 80 columns. */
6c95b8df
PA
657 return 1;
658 }
659
4de4c07c
DJ
660 if (! follow_child)
661 {
6c95b8df 662 struct lwp_info *child_lp = NULL;
4de4c07c 663
1777feb0 664 /* We're already attached to the parent, by default. */
4de4c07c 665
ac264b3b
MS
666 /* Detach new forked process? */
667 if (detach_fork)
f75c00e4 668 {
6c95b8df
PA
669 /* Before detaching from the child, remove all breakpoints
670 from it. If we forked, then this has already been taken
671 care of by infrun.c. If we vforked however, any
672 breakpoint inserted in the parent is visible in the
673 child, even those added while stopped in a vfork
674 catchpoint. This will remove the breakpoints from the
675 parent also, but they'll be reinserted below. */
676 if (has_vforked)
677 {
678 /* keep breakpoints list in sync. */
679 remove_breakpoints_pid (GET_PID (inferior_ptid));
680 }
681
e85a822c 682 if (info_verbose || debug_linux_nat)
ac264b3b
MS
683 {
684 target_terminal_ours ();
685 fprintf_filtered (gdb_stdlog,
3e43a32a
MS
686 "Detaching after fork from "
687 "child process %d.\n",
ac264b3b
MS
688 child_pid);
689 }
4de4c07c 690
ac264b3b
MS
691 ptrace (PTRACE_DETACH, child_pid, 0, 0);
692 }
693 else
694 {
77435e4c 695 struct inferior *parent_inf, *child_inf;
2277426b 696 struct cleanup *old_chain;
7f9f62ba
PA
697
698 /* Add process to GDB's tables. */
77435e4c
PA
699 child_inf = add_inferior (child_pid);
700
e58b0e63 701 parent_inf = current_inferior ();
77435e4c 702 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 703 copy_terminal_info (child_inf, parent_inf);
7f9f62ba 704
2277426b 705 old_chain = save_inferior_ptid ();
6c95b8df 706 save_current_program_space ();
2277426b
PA
707
708 inferior_ptid = ptid_build (child_pid, child_pid, 0);
709 add_thread (inferior_ptid);
6c95b8df
PA
710 child_lp = add_lwp (inferior_ptid);
711 child_lp->stopped = 1;
712 child_lp->resumed = 1;
2277426b 713
6c95b8df
PA
714 /* If this is a vfork child, then the address-space is
715 shared with the parent. */
716 if (has_vforked)
717 {
718 child_inf->pspace = parent_inf->pspace;
719 child_inf->aspace = parent_inf->aspace;
720
721 /* The parent will be frozen until the child is done
722 with the shared region. Keep track of the
723 parent. */
724 child_inf->vfork_parent = parent_inf;
725 child_inf->pending_detach = 0;
726 parent_inf->vfork_child = child_inf;
727 parent_inf->pending_detach = 0;
728 }
729 else
730 {
731 child_inf->aspace = new_address_space ();
732 child_inf->pspace = add_program_space (child_inf->aspace);
733 child_inf->removable = 1;
734 set_current_program_space (child_inf->pspace);
735 clone_program_space (child_inf->pspace, parent_inf->pspace);
736
737 /* Let the shared library layer (solib-svr4) learn about
738 this new process, relocate the cloned exec, pull in
739 shared libraries, and install the solib event
740 breakpoint. If a "cloned-VM" event was propagated
741 better throughout the core, this wouldn't be
742 required. */
268a4a75 743 solib_create_inferior_hook (0);
6c95b8df
PA
744 }
745
746 /* Let the thread_db layer learn about this new process. */
2277426b
PA
747 check_for_thread_db ();
748
749 do_cleanups (old_chain);
ac264b3b 750 }
9016a515
DJ
751
752 if (has_vforked)
753 {
6c95b8df
PA
754 struct lwp_info *lp;
755 struct inferior *parent_inf;
756
757 parent_inf = current_inferior ();
758
759 /* If we detached from the child, then we have to be careful
760 to not insert breakpoints in the parent until the child
761 is done with the shared memory region. However, if we're
762 staying attached to the child, then we can and should
763 insert breakpoints, so that we can debug it. A
764 subsequent child exec or exit is enough to know when does
765 the child stops using the parent's address space. */
766 parent_inf->waiting_for_vfork_done = detach_fork;
56710373 767 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
6c95b8df
PA
768
769 lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937
DJ
770 gdb_assert (linux_supports_tracefork_flag >= 0);
771 if (linux_supports_tracevforkdone (0))
9016a515 772 {
6c95b8df
PA
773 if (debug_linux_nat)
774 fprintf_unfiltered (gdb_stdlog,
775 "LCFF: waiting for VFORK_DONE on %d\n",
776 parent_pid);
777
778 lp->stopped = 1;
779 lp->resumed = 1;
9016a515 780
6c95b8df
PA
781 /* We'll handle the VFORK_DONE event like any other
782 event, in target_wait. */
9016a515
DJ
783 }
784 else
785 {
786 /* We can't insert breakpoints until the child has
787 finished with the shared memory region. We need to
788 wait until that happens. Ideal would be to just
789 call:
790 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
791 - waitpid (parent_pid, &status, __WALL);
792 However, most architectures can't handle a syscall
793 being traced on the way out if it wasn't traced on
794 the way in.
795
796 We might also think to loop, continuing the child
797 until it exits or gets a SIGTRAP. One problem is
798 that the child might call ptrace with PTRACE_TRACEME.
799
800 There's no simple and reliable way to figure out when
801 the vforked child will be done with its copy of the
802 shared memory. We could step it out of the syscall,
803 two instructions, let it go, and then single-step the
804 parent once. When we have hardware single-step, this
805 would work; with software single-step it could still
806 be made to work but we'd have to be able to insert
807 single-step breakpoints in the child, and we'd have
808 to insert -just- the single-step breakpoint in the
809 parent. Very awkward.
810
811 In the end, the best we can do is to make sure it
812 runs for a little while. Hopefully it will be out of
813 range of any breakpoints we reinsert. Usually this
814 is only the single-step breakpoint at vfork's return
815 point. */
816
6c95b8df
PA
817 if (debug_linux_nat)
818 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
819 "LCFF: no VFORK_DONE "
820 "support, sleeping a bit\n");
6c95b8df 821
9016a515 822 usleep (10000);
9016a515 823
6c95b8df
PA
824 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
825 and leave it pending. The next linux_nat_resume call
826 will notice a pending event, and bypasses actually
827 resuming the inferior. */
828 lp->status = 0;
829 lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
830 lp->stopped = 0;
831 lp->resumed = 1;
832
833 /* If we're in async mode, need to tell the event loop
834 there's something here to process. */
835 if (target_can_async_p ())
836 async_file_mark ();
837 }
9016a515 838 }
4de4c07c 839 }
3993f6b1 840 else
4de4c07c 841 {
77435e4c 842 struct inferior *parent_inf, *child_inf;
2277426b 843 struct lwp_info *lp;
6c95b8df 844 struct program_space *parent_pspace;
4de4c07c 845
e85a822c 846 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
847 {
848 target_terminal_ours ();
6c95b8df 849 if (has_vforked)
3e43a32a
MS
850 fprintf_filtered (gdb_stdlog,
851 _("Attaching after process %d "
852 "vfork to child process %d.\n"),
6c95b8df
PA
853 parent_pid, child_pid);
854 else
3e43a32a
MS
855 fprintf_filtered (gdb_stdlog,
856 _("Attaching after process %d "
857 "fork to child process %d.\n"),
6c95b8df 858 parent_pid, child_pid);
f75c00e4 859 }
4de4c07c 860
7a7d3353
PA
861 /* Add the new inferior first, so that the target_detach below
862 doesn't unpush the target. */
863
77435e4c
PA
864 child_inf = add_inferior (child_pid);
865
e58b0e63 866 parent_inf = current_inferior ();
77435e4c 867 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 868 copy_terminal_info (child_inf, parent_inf);
7a7d3353 869
6c95b8df 870 parent_pspace = parent_inf->pspace;
9016a515 871
6c95b8df
PA
872 /* If we're vforking, we want to hold on to the parent until the
873 child exits or execs. At child exec or exit time we can
874 remove the old breakpoints from the parent and detach or
875 resume debugging it. Otherwise, detach the parent now; we'll
876 want to reuse it's program/address spaces, but we can't set
877 them to the child before removing breakpoints from the
878 parent, otherwise, the breakpoints module could decide to
879 remove breakpoints from the wrong process (since they'd be
880 assigned to the same address space). */
9016a515
DJ
881
882 if (has_vforked)
7f9f62ba 883 {
6c95b8df
PA
884 gdb_assert (child_inf->vfork_parent == NULL);
885 gdb_assert (parent_inf->vfork_child == NULL);
886 child_inf->vfork_parent = parent_inf;
887 child_inf->pending_detach = 0;
888 parent_inf->vfork_child = child_inf;
889 parent_inf->pending_detach = detach_fork;
890 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 891 }
2277426b 892 else if (detach_fork)
b84876c2 893 target_detach (NULL, 0);
4de4c07c 894
6c95b8df
PA
895 /* Note that the detach above makes PARENT_INF dangling. */
896
897 /* Add the child thread to the appropriate lists, and switch to
898 this new thread, before cloning the program space, and
899 informing the solib layer about this new process. */
900
9f0bdab8 901 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b
PA
902 add_thread (inferior_ptid);
903 lp = add_lwp (inferior_ptid);
904 lp->stopped = 1;
6c95b8df
PA
905 lp->resumed = 1;
906
907 /* If this is a vfork child, then the address-space is shared
908 with the parent. If we detached from the parent, then we can
909 reuse the parent's program/address spaces. */
910 if (has_vforked || detach_fork)
911 {
912 child_inf->pspace = parent_pspace;
913 child_inf->aspace = child_inf->pspace->aspace;
914 }
915 else
916 {
917 child_inf->aspace = new_address_space ();
918 child_inf->pspace = add_program_space (child_inf->aspace);
919 child_inf->removable = 1;
920 set_current_program_space (child_inf->pspace);
921 clone_program_space (child_inf->pspace, parent_pspace);
922
923 /* Let the shared library layer (solib-svr4) learn about
924 this new process, relocate the cloned exec, pull in
925 shared libraries, and install the solib event breakpoint.
926 If a "cloned-VM" event was propagated better throughout
927 the core, this wouldn't be required. */
268a4a75 928 solib_create_inferior_hook (0);
6c95b8df 929 }
ac264b3b 930
6c95b8df 931 /* Let the thread_db layer learn about this new process. */
ef29ce1a 932 check_for_thread_db ();
4de4c07c
DJ
933 }
934
7feb7d06 935 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
936 return 0;
937}
938
4de4c07c 939\f
77b06cd7 940static int
6d8fd2b7 941linux_child_insert_fork_catchpoint (int pid)
4de4c07c 942{
77b06cd7 943 return !linux_supports_tracefork (pid);
3993f6b1
DJ
944}
945
77b06cd7 946static int
6d8fd2b7 947linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 948{
77b06cd7 949 return !linux_supports_tracefork (pid);
3993f6b1
DJ
950}
951
77b06cd7 952static int
6d8fd2b7 953linux_child_insert_exec_catchpoint (int pid)
3993f6b1 954{
77b06cd7 955 return !linux_supports_tracefork (pid);
3993f6b1
DJ
956}
957
a96d9b2e
SDJ
958static int
959linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
960 int table_size, int *table)
961{
77b06cd7
TJB
962 if (!linux_supports_tracesysgood (pid))
963 return 1;
964
a96d9b2e
SDJ
965 /* On GNU/Linux, we ignore the arguments. It means that we only
966 enable the syscall catchpoints, but do not disable them.
77b06cd7 967
a96d9b2e
SDJ
968 Also, we do not use the `table' information because we do not
969 filter system calls here. We let GDB do the logic for us. */
970 return 0;
971}
972
d6b0e80f
AC
973/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
974 are processes sharing the same VM space. A multi-threaded process
975 is basically a group of such processes. However, such a grouping
976 is almost entirely a user-space issue; the kernel doesn't enforce
977 such a grouping at all (this might change in the future). In
978 general, we'll rely on the threads library (i.e. the GNU/Linux
979 Threads library) to provide such a grouping.
980
981 It is perfectly well possible to write a multi-threaded application
982 without the assistance of a threads library, by using the clone
983 system call directly. This module should be able to give some
984 rudimentary support for debugging such applications if developers
985 specify the CLONE_PTRACE flag in the clone system call, and are
986 using the Linux kernel 2.4 or above.
987
988 Note that there are some peculiarities in GNU/Linux that affect
989 this code:
990
991 - In general one should specify the __WCLONE flag to waitpid in
992 order to make it report events for any of the cloned processes
993 (and leave it out for the initial process). However, if a cloned
994 process has exited the exit status is only reported if the
995 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
996 we cannot use it since GDB must work on older systems too.
997
998 - When a traced, cloned process exits and is waited for by the
999 debugger, the kernel reassigns it to the original parent and
1000 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1001 library doesn't notice this, which leads to the "zombie problem":
1002 When debugged a multi-threaded process that spawns a lot of
1003 threads will run out of processes, even if the threads exit,
1004 because the "zombies" stay around. */
1005
1006/* List of known LWPs. */
9f0bdab8 1007struct lwp_info *lwp_list;
d6b0e80f
AC
1008\f
1009
d6b0e80f
AC
1010/* Original signal mask. */
1011static sigset_t normal_mask;
1012
1013/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1014 _initialize_linux_nat. */
1015static sigset_t suspend_mask;
1016
7feb7d06
PA
1017/* Signals to block to make that sigsuspend work. */
1018static sigset_t blocked_mask;
1019
1020/* SIGCHLD action. */
1021struct sigaction sigchld_action;
b84876c2 1022
7feb7d06
PA
1023/* Block child signals (SIGCHLD and linux threads signals), and store
1024 the previous mask in PREV_MASK. */
84e46146 1025
7feb7d06
PA
1026static void
1027block_child_signals (sigset_t *prev_mask)
1028{
1029 /* Make sure SIGCHLD is blocked. */
1030 if (!sigismember (&blocked_mask, SIGCHLD))
1031 sigaddset (&blocked_mask, SIGCHLD);
1032
1033 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1034}
1035
1036/* Restore child signals mask, previously returned by
1037 block_child_signals. */
1038
1039static void
1040restore_child_signals_mask (sigset_t *prev_mask)
1041{
1042 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1043}
2455069d
UW
1044
1045/* Mask of signals to pass directly to the inferior. */
1046static sigset_t pass_mask;
1047
1048/* Update signals to pass to the inferior. */
1049static void
1050linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1051{
1052 int signo;
1053
1054 sigemptyset (&pass_mask);
1055
1056 for (signo = 1; signo < NSIG; signo++)
1057 {
1058 int target_signo = target_signal_from_host (signo);
1059 if (target_signo < numsigs && pass_signals[target_signo])
1060 sigaddset (&pass_mask, signo);
1061 }
1062}
1063
d6b0e80f
AC
1064\f
1065
1066/* Prototypes for local functions. */
1067static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1068static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1069static char *linux_child_pid_to_exec_file (int pid);
710151dd 1070
d6b0e80f
AC
1071\f
1072/* Convert wait status STATUS to a string. Used for printing debug
1073 messages only. */
1074
1075static char *
1076status_to_str (int status)
1077{
1078 static char buf[64];
1079
1080 if (WIFSTOPPED (status))
206aa767 1081 {
ca2163eb 1082 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1083 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1084 strsignal (SIGTRAP));
1085 else
1086 snprintf (buf, sizeof (buf), "%s (stopped)",
1087 strsignal (WSTOPSIG (status)));
1088 }
d6b0e80f
AC
1089 else if (WIFSIGNALED (status))
1090 snprintf (buf, sizeof (buf), "%s (terminated)",
ba9b2ec3 1091 strsignal (WTERMSIG (status)));
d6b0e80f
AC
1092 else
1093 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1094
1095 return buf;
1096}
1097
d90e17a7
PA
1098/* Remove all LWPs belong to PID from the lwp list. */
1099
1100static void
1101purge_lwp_list (int pid)
1102{
1103 struct lwp_info *lp, *lpprev, *lpnext;
1104
1105 lpprev = NULL;
1106
1107 for (lp = lwp_list; lp; lp = lpnext)
1108 {
1109 lpnext = lp->next;
1110
1111 if (ptid_get_pid (lp->ptid) == pid)
1112 {
1113 if (lp == lwp_list)
1114 lwp_list = lp->next;
1115 else
1116 lpprev->next = lp->next;
1117
1118 xfree (lp);
1119 }
1120 else
1121 lpprev = lp;
1122 }
1123}
1124
1125/* Return the number of known LWPs in the tgid given by PID. */
1126
1127static int
1128num_lwps (int pid)
1129{
1130 int count = 0;
1131 struct lwp_info *lp;
1132
1133 for (lp = lwp_list; lp; lp = lp->next)
1134 if (ptid_get_pid (lp->ptid) == pid)
1135 count++;
1136
1137 return count;
d6b0e80f
AC
1138}
1139
f973ed9c 1140/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1141 structure describing the new LWP. The LWP should already be stopped
1142 (with an exception for the very first LWP). */
d6b0e80f
AC
1143
1144static struct lwp_info *
1145add_lwp (ptid_t ptid)
1146{
1147 struct lwp_info *lp;
1148
1149 gdb_assert (is_lwp (ptid));
1150
1151 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1152
1153 memset (lp, 0, sizeof (struct lwp_info));
1154
1155 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1156
1157 lp->ptid = ptid;
dc146f7c 1158 lp->core = -1;
d6b0e80f
AC
1159
1160 lp->next = lwp_list;
1161 lwp_list = lp;
d6b0e80f 1162
d90e17a7 1163 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
9f0bdab8
DJ
1164 linux_nat_new_thread (ptid);
1165
d6b0e80f
AC
1166 return lp;
1167}
1168
1169/* Remove the LWP specified by PID from the list. */
1170
1171static void
1172delete_lwp (ptid_t ptid)
1173{
1174 struct lwp_info *lp, *lpprev;
1175
1176 lpprev = NULL;
1177
1178 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1179 if (ptid_equal (lp->ptid, ptid))
1180 break;
1181
1182 if (!lp)
1183 return;
1184
d6b0e80f
AC
1185 if (lpprev)
1186 lpprev->next = lp->next;
1187 else
1188 lwp_list = lp->next;
1189
1190 xfree (lp);
1191}
1192
1193/* Return a pointer to the structure describing the LWP corresponding
1194 to PID. If no corresponding LWP could be found, return NULL. */
1195
1196static struct lwp_info *
1197find_lwp_pid (ptid_t ptid)
1198{
1199 struct lwp_info *lp;
1200 int lwp;
1201
1202 if (is_lwp (ptid))
1203 lwp = GET_LWP (ptid);
1204 else
1205 lwp = GET_PID (ptid);
1206
1207 for (lp = lwp_list; lp; lp = lp->next)
1208 if (lwp == GET_LWP (lp->ptid))
1209 return lp;
1210
1211 return NULL;
1212}
1213
1214/* Call CALLBACK with its second argument set to DATA for every LWP in
1215 the list. If CALLBACK returns 1 for a particular LWP, return a
1216 pointer to the structure describing that LWP immediately.
1217 Otherwise return NULL. */
1218
1219struct lwp_info *
d90e17a7
PA
1220iterate_over_lwps (ptid_t filter,
1221 int (*callback) (struct lwp_info *, void *),
1222 void *data)
d6b0e80f
AC
1223{
1224 struct lwp_info *lp, *lpnext;
1225
1226 for (lp = lwp_list; lp; lp = lpnext)
1227 {
1228 lpnext = lp->next;
d90e17a7
PA
1229
1230 if (ptid_match (lp->ptid, filter))
1231 {
1232 if ((*callback) (lp, data))
1233 return lp;
1234 }
d6b0e80f
AC
1235 }
1236
1237 return NULL;
1238}
1239
2277426b
PA
1240/* Update our internal state when changing from one checkpoint to
1241 another indicated by NEW_PTID. We can only switch single-threaded
1242 applications, so we only create one new LWP, and the previous list
1243 is discarded. */
f973ed9c
DJ
1244
1245void
1246linux_nat_switch_fork (ptid_t new_ptid)
1247{
1248 struct lwp_info *lp;
1249
2277426b
PA
1250 purge_lwp_list (GET_PID (inferior_ptid));
1251
f973ed9c
DJ
1252 lp = add_lwp (new_ptid);
1253 lp->stopped = 1;
e26af52f 1254
2277426b
PA
1255 /* This changes the thread's ptid while preserving the gdb thread
1256 num. Also changes the inferior pid, while preserving the
1257 inferior num. */
1258 thread_change_ptid (inferior_ptid, new_ptid);
1259
1260 /* We've just told GDB core that the thread changed target id, but,
1261 in fact, it really is a different thread, with different register
1262 contents. */
1263 registers_changed ();
e26af52f
DJ
1264}
1265
e26af52f
DJ
1266/* Handle the exit of a single thread LP. */
1267
1268static void
1269exit_lwp (struct lwp_info *lp)
1270{
e09875d4 1271 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1272
1273 if (th)
e26af52f 1274 {
17faa917
DJ
1275 if (print_thread_events)
1276 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1277
4f8d22e3 1278 delete_thread (lp->ptid);
e26af52f
DJ
1279 }
1280
1281 delete_lwp (lp->ptid);
1282}
1283
4d062f1a
PA
1284/* Return an lwp's tgid, found in `/proc/PID/status'. */
1285
1286int
1287linux_proc_get_tgid (int lwpid)
1288{
1289 FILE *status_file;
1290 char buf[100];
1291 int tgid = -1;
1292
1293 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) lwpid);
1294 status_file = fopen (buf, "r");
1295 if (status_file != NULL)
1296 {
1297 while (fgets (buf, sizeof (buf), status_file))
1298 {
1299 if (strncmp (buf, "Tgid:", 5) == 0)
1300 {
1301 tgid = strtoul (buf + strlen ("Tgid:"), NULL, 10);
1302 break;
1303 }
1304 }
1305
1306 fclose (status_file);
1307 }
1308
1309 return tgid;
1310}
1311
a0ef4274
DJ
1312/* Detect `T (stopped)' in `/proc/PID/status'.
1313 Other states including `T (tracing stop)' are reported as false. */
1314
1315static int
1316pid_is_stopped (pid_t pid)
1317{
1318 FILE *status_file;
1319 char buf[100];
1320 int retval = 0;
1321
1322 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1323 status_file = fopen (buf, "r");
1324 if (status_file != NULL)
1325 {
1326 int have_state = 0;
1327
1328 while (fgets (buf, sizeof (buf), status_file))
1329 {
1330 if (strncmp (buf, "State:", 6) == 0)
1331 {
1332 have_state = 1;
1333 break;
1334 }
1335 }
1336 if (have_state && strstr (buf, "T (stopped)") != NULL)
1337 retval = 1;
1338 fclose (status_file);
1339 }
1340 return retval;
1341}
1342
1343/* Wait for the LWP specified by LP, which we have just attached to.
1344 Returns a wait status for that LWP, to cache. */
1345
1346static int
1347linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1348 int *signalled)
1349{
1350 pid_t new_pid, pid = GET_LWP (ptid);
1351 int status;
1352
1353 if (pid_is_stopped (pid))
1354 {
1355 if (debug_linux_nat)
1356 fprintf_unfiltered (gdb_stdlog,
1357 "LNPAW: Attaching to a stopped process\n");
1358
1359 /* The process is definitely stopped. It is in a job control
1360 stop, unless the kernel predates the TASK_STOPPED /
1361 TASK_TRACED distinction, in which case it might be in a
1362 ptrace stop. Make sure it is in a ptrace stop; from there we
1363 can kill it, signal it, et cetera.
1364
1365 First make sure there is a pending SIGSTOP. Since we are
1366 already attached, the process can not transition from stopped
1367 to running without a PTRACE_CONT; so we know this signal will
1368 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1369 probably already in the queue (unless this kernel is old
1370 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1371 is not an RT signal, it can only be queued once. */
1372 kill_lwp (pid, SIGSTOP);
1373
1374 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1375 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1376 ptrace (PTRACE_CONT, pid, 0, 0);
1377 }
1378
1379 /* Make sure the initial process is stopped. The user-level threads
1380 layer might want to poke around in the inferior, and that won't
1381 work if things haven't stabilized yet. */
1382 new_pid = my_waitpid (pid, &status, 0);
1383 if (new_pid == -1 && errno == ECHILD)
1384 {
1385 if (first)
1386 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1387
1388 /* Try again with __WCLONE to check cloned processes. */
1389 new_pid = my_waitpid (pid, &status, __WCLONE);
1390 *cloned = 1;
1391 }
1392
dacc9cb2
PP
1393 gdb_assert (pid == new_pid);
1394
1395 if (!WIFSTOPPED (status))
1396 {
1397 /* The pid we tried to attach has apparently just exited. */
1398 if (debug_linux_nat)
1399 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1400 pid, status_to_str (status));
1401 return status;
1402 }
a0ef4274
DJ
1403
1404 if (WSTOPSIG (status) != SIGSTOP)
1405 {
1406 *signalled = 1;
1407 if (debug_linux_nat)
1408 fprintf_unfiltered (gdb_stdlog,
1409 "LNPAW: Received %s after attaching\n",
1410 status_to_str (status));
1411 }
1412
1413 return status;
1414}
1415
1416/* Attach to the LWP specified by PID. Return 0 if successful or -1
1417 if the new LWP could not be attached. */
d6b0e80f 1418
9ee57c33 1419int
93815fbf 1420lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1421{
9ee57c33 1422 struct lwp_info *lp;
7feb7d06 1423 sigset_t prev_mask;
d6b0e80f
AC
1424
1425 gdb_assert (is_lwp (ptid));
1426
7feb7d06 1427 block_child_signals (&prev_mask);
d6b0e80f 1428
9ee57c33 1429 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1430
1431 /* We assume that we're already attached to any LWP that has an id
1432 equal to the overall process id, and to any LWP that is already
1433 in our list of LWPs. If we're not seeing exit events from threads
1434 and we've had PID wraparound since we last tried to stop all threads,
1435 this assumption might be wrong; fortunately, this is very unlikely
1436 to happen. */
9ee57c33 1437 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1438 {
a0ef4274 1439 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1440
1441 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1442 {
1443 /* If we fail to attach to the thread, issue a warning,
1444 but continue. One way this can happen is if thread
e9efe249 1445 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1446 bug may place threads in the thread list and then fail
1447 to create them. */
1448 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1449 safe_strerror (errno));
7feb7d06 1450 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1451 return -1;
1452 }
1453
d6b0e80f
AC
1454 if (debug_linux_nat)
1455 fprintf_unfiltered (gdb_stdlog,
1456 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1457 target_pid_to_str (ptid));
1458
a0ef4274 1459 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2
PP
1460 if (!WIFSTOPPED (status))
1461 return -1;
1462
a0ef4274
DJ
1463 lp = add_lwp (ptid);
1464 lp->stopped = 1;
1465 lp->cloned = cloned;
1466 lp->signalled = signalled;
1467 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1468 {
a0ef4274
DJ
1469 lp->resumed = 1;
1470 lp->status = status;
d6b0e80f
AC
1471 }
1472
a0ef4274 1473 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1474
1475 if (debug_linux_nat)
1476 {
1477 fprintf_unfiltered (gdb_stdlog,
1478 "LLAL: waitpid %s received %s\n",
1479 target_pid_to_str (ptid),
1480 status_to_str (status));
1481 }
1482 }
1483 else
1484 {
1485 /* We assume that the LWP representing the original process is
1486 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1487 that the GNU/linux ptrace layer uses to keep track of
1488 threads. Note that this won't have already been done since
1489 the main thread will have, we assume, been stopped by an
1490 attach from a different layer. */
9ee57c33
DJ
1491 if (lp == NULL)
1492 lp = add_lwp (ptid);
d6b0e80f
AC
1493 lp->stopped = 1;
1494 }
9ee57c33 1495
7feb7d06 1496 restore_child_signals_mask (&prev_mask);
9ee57c33 1497 return 0;
d6b0e80f
AC
1498}
1499
b84876c2 1500static void
136d6dae
VP
1501linux_nat_create_inferior (struct target_ops *ops,
1502 char *exec_file, char *allargs, char **env,
b84876c2
PA
1503 int from_tty)
1504{
10568435
JK
1505#ifdef HAVE_PERSONALITY
1506 int personality_orig = 0, personality_set = 0;
1507#endif /* HAVE_PERSONALITY */
b84876c2
PA
1508
1509 /* The fork_child mechanism is synchronous and calls target_wait, so
1510 we have to mask the async mode. */
1511
10568435
JK
1512#ifdef HAVE_PERSONALITY
1513 if (disable_randomization)
1514 {
1515 errno = 0;
1516 personality_orig = personality (0xffffffff);
1517 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1518 {
1519 personality_set = 1;
1520 personality (personality_orig | ADDR_NO_RANDOMIZE);
1521 }
1522 if (errno != 0 || (personality_set
1523 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1524 warning (_("Error disabling address space randomization: %s"),
1525 safe_strerror (errno));
1526 }
1527#endif /* HAVE_PERSONALITY */
1528
2455069d
UW
1529 /* Make sure we report all signals during startup. */
1530 linux_nat_pass_signals (0, NULL);
1531
136d6dae 1532 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1533
10568435
JK
1534#ifdef HAVE_PERSONALITY
1535 if (personality_set)
1536 {
1537 errno = 0;
1538 personality (personality_orig);
1539 if (errno != 0)
1540 warning (_("Error restoring address space randomization: %s"),
1541 safe_strerror (errno));
1542 }
1543#endif /* HAVE_PERSONALITY */
b84876c2
PA
1544}
1545
d6b0e80f 1546static void
136d6dae 1547linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1548{
1549 struct lwp_info *lp;
d6b0e80f 1550 int status;
af990527 1551 ptid_t ptid;
d6b0e80f 1552
2455069d
UW
1553 /* Make sure we report all signals during attach. */
1554 linux_nat_pass_signals (0, NULL);
1555
136d6dae 1556 linux_ops->to_attach (ops, args, from_tty);
d6b0e80f 1557
af990527
PA
1558 /* The ptrace base target adds the main thread with (pid,0,0)
1559 format. Decorate it with lwp info. */
1560 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1561 thread_change_ptid (inferior_ptid, ptid);
1562
9f0bdab8 1563 /* Add the initial process as the first LWP to the list. */
af990527 1564 lp = add_lwp (ptid);
a0ef4274
DJ
1565
1566 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1567 &lp->signalled);
dacc9cb2
PP
1568 if (!WIFSTOPPED (status))
1569 {
1570 if (WIFEXITED (status))
1571 {
1572 int exit_code = WEXITSTATUS (status);
1573
1574 target_terminal_ours ();
1575 target_mourn_inferior ();
1576 if (exit_code == 0)
1577 error (_("Unable to attach: program exited normally."));
1578 else
1579 error (_("Unable to attach: program exited with code %d."),
1580 exit_code);
1581 }
1582 else if (WIFSIGNALED (status))
1583 {
1584 enum target_signal signo;
1585
1586 target_terminal_ours ();
1587 target_mourn_inferior ();
1588
1589 signo = target_signal_from_host (WTERMSIG (status));
1590 error (_("Unable to attach: program terminated with signal "
1591 "%s, %s."),
1592 target_signal_to_name (signo),
1593 target_signal_to_string (signo));
1594 }
1595
1596 internal_error (__FILE__, __LINE__,
1597 _("unexpected status %d for PID %ld"),
1598 status, (long) GET_LWP (ptid));
1599 }
1600
a0ef4274 1601 lp->stopped = 1;
9f0bdab8 1602
a0ef4274 1603 /* Save the wait status to report later. */
d6b0e80f 1604 lp->resumed = 1;
a0ef4274
DJ
1605 if (debug_linux_nat)
1606 fprintf_unfiltered (gdb_stdlog,
1607 "LNA: waitpid %ld, saving status %s\n",
1608 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1609
7feb7d06
PA
1610 lp->status = status;
1611
1612 if (target_can_async_p ())
1613 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1614}
1615
a0ef4274
DJ
1616/* Get pending status of LP. */
1617static int
1618get_pending_status (struct lwp_info *lp, int *status)
1619{
ca2163eb
PA
1620 enum target_signal signo = TARGET_SIGNAL_0;
1621
1622 /* If we paused threads momentarily, we may have stored pending
1623 events in lp->status or lp->waitstatus (see stop_wait_callback),
1624 and GDB core hasn't seen any signal for those threads.
1625 Otherwise, the last signal reported to the core is found in the
1626 thread object's stop_signal.
1627
1628 There's a corner case that isn't handled here at present. Only
1629 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1630 stop_signal make sense as a real signal to pass to the inferior.
1631 Some catchpoint related events, like
1632 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1633 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1634 those traps are debug API (ptrace in our case) related and
1635 induced; the inferior wouldn't see them if it wasn't being
1636 traced. Hence, we should never pass them to the inferior, even
1637 when set to pass state. Since this corner case isn't handled by
1638 infrun.c when proceeding with a signal, for consistency, neither
1639 do we handle it here (or elsewhere in the file we check for
1640 signal pass state). Normally SIGTRAP isn't set to pass state, so
1641 this is really a corner case. */
1642
1643 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1644 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1645 else if (lp->status)
1646 signo = target_signal_from_host (WSTOPSIG (lp->status));
1647 else if (non_stop && !is_executing (lp->ptid))
1648 {
1649 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1650
16c381f0 1651 signo = tp->suspend.stop_signal;
ca2163eb
PA
1652 }
1653 else if (!non_stop)
a0ef4274 1654 {
ca2163eb
PA
1655 struct target_waitstatus last;
1656 ptid_t last_ptid;
4c28f408 1657
ca2163eb 1658 get_last_target_status (&last_ptid, &last);
4c28f408 1659
ca2163eb
PA
1660 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1661 {
e09875d4 1662 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1663
16c381f0 1664 signo = tp->suspend.stop_signal;
4c28f408 1665 }
ca2163eb 1666 }
4c28f408 1667
ca2163eb 1668 *status = 0;
4c28f408 1669
ca2163eb
PA
1670 if (signo == TARGET_SIGNAL_0)
1671 {
1672 if (debug_linux_nat)
1673 fprintf_unfiltered (gdb_stdlog,
1674 "GPT: lwp %s has no pending signal\n",
1675 target_pid_to_str (lp->ptid));
1676 }
1677 else if (!signal_pass_state (signo))
1678 {
1679 if (debug_linux_nat)
3e43a32a
MS
1680 fprintf_unfiltered (gdb_stdlog,
1681 "GPT: lwp %s had signal %s, "
1682 "but it is in no pass state\n",
ca2163eb
PA
1683 target_pid_to_str (lp->ptid),
1684 target_signal_to_string (signo));
a0ef4274 1685 }
a0ef4274 1686 else
4c28f408 1687 {
ca2163eb
PA
1688 *status = W_STOPCODE (target_signal_to_host (signo));
1689
1690 if (debug_linux_nat)
1691 fprintf_unfiltered (gdb_stdlog,
1692 "GPT: lwp %s has pending signal %s\n",
1693 target_pid_to_str (lp->ptid),
1694 target_signal_to_string (signo));
4c28f408 1695 }
a0ef4274
DJ
1696
1697 return 0;
1698}
1699
d6b0e80f
AC
1700static int
1701detach_callback (struct lwp_info *lp, void *data)
1702{
1703 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1704
1705 if (debug_linux_nat && lp->status)
1706 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1707 strsignal (WSTOPSIG (lp->status)),
1708 target_pid_to_str (lp->ptid));
1709
a0ef4274
DJ
1710 /* If there is a pending SIGSTOP, get rid of it. */
1711 if (lp->signalled)
d6b0e80f 1712 {
d6b0e80f
AC
1713 if (debug_linux_nat)
1714 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1715 "DC: Sending SIGCONT to %s\n",
1716 target_pid_to_str (lp->ptid));
d6b0e80f 1717
a0ef4274 1718 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1719 lp->signalled = 0;
d6b0e80f
AC
1720 }
1721
1722 /* We don't actually detach from the LWP that has an id equal to the
1723 overall process id just yet. */
1724 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1725 {
a0ef4274
DJ
1726 int status = 0;
1727
1728 /* Pass on any pending signal for this LWP. */
1729 get_pending_status (lp, &status);
1730
d6b0e80f
AC
1731 errno = 0;
1732 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1733 WSTOPSIG (status)) < 0)
8a3fe4f8 1734 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1735 safe_strerror (errno));
1736
1737 if (debug_linux_nat)
1738 fprintf_unfiltered (gdb_stdlog,
1739 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1740 target_pid_to_str (lp->ptid),
7feb7d06 1741 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1742
1743 delete_lwp (lp->ptid);
1744 }
1745
1746 return 0;
1747}
1748
1749static void
136d6dae 1750linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1751{
b84876c2 1752 int pid;
a0ef4274 1753 int status;
d90e17a7
PA
1754 struct lwp_info *main_lwp;
1755
1756 pid = GET_PID (inferior_ptid);
a0ef4274 1757
b84876c2
PA
1758 if (target_can_async_p ())
1759 linux_nat_async (NULL, 0);
1760
4c28f408
PA
1761 /* Stop all threads before detaching. ptrace requires that the
1762 thread is stopped to sucessfully detach. */
d90e17a7 1763 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1764 /* ... and wait until all of them have reported back that
1765 they're no longer running. */
d90e17a7 1766 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1767
d90e17a7 1768 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1769
1770 /* Only the initial process should be left right now. */
d90e17a7
PA
1771 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1772
1773 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1774
a0ef4274
DJ
1775 /* Pass on any pending signal for the last LWP. */
1776 if ((args == NULL || *args == '\0')
d90e17a7 1777 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1778 && WIFSTOPPED (status))
1779 {
1780 /* Put the signal number in ARGS so that inf_ptrace_detach will
1781 pass it along with PTRACE_DETACH. */
1782 args = alloca (8);
1783 sprintf (args, "%d", (int) WSTOPSIG (status));
ddabfc73
TT
1784 if (debug_linux_nat)
1785 fprintf_unfiltered (gdb_stdlog,
1786 "LND: Sending signal %s to %s\n",
1787 args,
1788 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1789 }
1790
d90e17a7 1791 delete_lwp (main_lwp->ptid);
b84876c2 1792
7a7d3353
PA
1793 if (forks_exist_p ())
1794 {
1795 /* Multi-fork case. The current inferior_ptid is being detached
1796 from, but there are other viable forks to debug. Detach from
1797 the current fork, and context-switch to the first
1798 available. */
1799 linux_fork_detach (args, from_tty);
1800
1801 if (non_stop && target_can_async_p ())
1802 target_async (inferior_event_handler, 0);
1803 }
1804 else
1805 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1806}
1807
1808/* Resume LP. */
1809
1810static int
1811resume_callback (struct lwp_info *lp, void *data)
1812{
6c95b8df
PA
1813 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1814
1815 if (lp->stopped && inf->vfork_child != NULL)
1816 {
1817 if (debug_linux_nat)
1818 fprintf_unfiltered (gdb_stdlog,
1819 "RC: Not resuming %s (vfork parent)\n",
1820 target_pid_to_str (lp->ptid));
1821 }
1822 else if (lp->stopped && lp->status == 0)
d6b0e80f 1823 {
d90e17a7
PA
1824 if (debug_linux_nat)
1825 fprintf_unfiltered (gdb_stdlog,
a289b8f6 1826 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
d90e17a7
PA
1827 target_pid_to_str (lp->ptid));
1828
28439f5e
PA
1829 linux_ops->to_resume (linux_ops,
1830 pid_to_ptid (GET_LWP (lp->ptid)),
a289b8f6 1831 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1832 if (debug_linux_nat)
1833 fprintf_unfiltered (gdb_stdlog,
a289b8f6 1834 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
d6b0e80f
AC
1835 target_pid_to_str (lp->ptid));
1836 lp->stopped = 0;
a289b8f6 1837 lp->step = 0;
9f0bdab8 1838 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 1839 lp->stopped_by_watchpoint = 0;
d6b0e80f 1840 }
57380f4e 1841 else if (lp->stopped && debug_linux_nat)
3e43a32a
MS
1842 fprintf_unfiltered (gdb_stdlog,
1843 "RC: Not resuming sibling %s (has pending)\n",
57380f4e
DJ
1844 target_pid_to_str (lp->ptid));
1845 else if (debug_linux_nat)
3e43a32a
MS
1846 fprintf_unfiltered (gdb_stdlog,
1847 "RC: Not resuming sibling %s (not stopped)\n",
57380f4e 1848 target_pid_to_str (lp->ptid));
d6b0e80f
AC
1849
1850 return 0;
1851}
1852
1853static int
1854resume_clear_callback (struct lwp_info *lp, void *data)
1855{
1856 lp->resumed = 0;
1857 return 0;
1858}
1859
1860static int
1861resume_set_callback (struct lwp_info *lp, void *data)
1862{
1863 lp->resumed = 1;
1864 return 0;
1865}
1866
1867static void
28439f5e
PA
1868linux_nat_resume (struct target_ops *ops,
1869 ptid_t ptid, int step, enum target_signal signo)
d6b0e80f 1870{
7feb7d06 1871 sigset_t prev_mask;
d6b0e80f 1872 struct lwp_info *lp;
d90e17a7 1873 int resume_many;
d6b0e80f 1874
76f50ad1
DJ
1875 if (debug_linux_nat)
1876 fprintf_unfiltered (gdb_stdlog,
1877 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1878 step ? "step" : "resume",
1879 target_pid_to_str (ptid),
423ec54c
JK
1880 (signo != TARGET_SIGNAL_0
1881 ? strsignal (target_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1882 target_pid_to_str (inferior_ptid));
1883
7feb7d06 1884 block_child_signals (&prev_mask);
b84876c2 1885
d6b0e80f 1886 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1887 resume_many = (ptid_equal (minus_one_ptid, ptid)
1888 || ptid_is_pid (ptid));
4c28f408 1889
e3e9f5a2
PA
1890 /* Mark the lwps we're resuming as resumed. */
1891 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1892
d90e17a7
PA
1893 /* See if it's the current inferior that should be handled
1894 specially. */
1895 if (resume_many)
1896 lp = find_lwp_pid (inferior_ptid);
1897 else
1898 lp = find_lwp_pid (ptid);
9f0bdab8 1899 gdb_assert (lp != NULL);
d6b0e80f 1900
9f0bdab8
DJ
1901 /* Remember if we're stepping. */
1902 lp->step = step;
d6b0e80f 1903
9f0bdab8
DJ
1904 /* If we have a pending wait status for this thread, there is no
1905 point in resuming the process. But first make sure that
1906 linux_nat_wait won't preemptively handle the event - we
1907 should never take this short-circuit if we are going to
1908 leave LP running, since we have skipped resuming all the
1909 other threads. This bit of code needs to be synchronized
1910 with linux_nat_wait. */
76f50ad1 1911
9f0bdab8
DJ
1912 if (lp->status && WIFSTOPPED (lp->status))
1913 {
2455069d
UW
1914 if (!lp->step
1915 && WSTOPSIG (lp->status)
1916 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1917 {
9f0bdab8
DJ
1918 if (debug_linux_nat)
1919 fprintf_unfiltered (gdb_stdlog,
1920 "LLR: Not short circuiting for ignored "
1921 "status 0x%x\n", lp->status);
1922
d6b0e80f
AC
1923 /* FIXME: What should we do if we are supposed to continue
1924 this thread with a signal? */
1925 gdb_assert (signo == TARGET_SIGNAL_0);
2455069d 1926 signo = target_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1927 lp->status = 0;
1928 }
1929 }
76f50ad1 1930
6c95b8df 1931 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
1932 {
1933 /* FIXME: What should we do if we are supposed to continue
1934 this thread with a signal? */
1935 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1936
9f0bdab8
DJ
1937 if (debug_linux_nat)
1938 fprintf_unfiltered (gdb_stdlog,
1939 "LLR: Short circuiting for status 0x%x\n",
1940 lp->status);
d6b0e80f 1941
7feb7d06
PA
1942 restore_child_signals_mask (&prev_mask);
1943 if (target_can_async_p ())
1944 {
1945 target_async (inferior_event_handler, 0);
1946 /* Tell the event loop we have something to process. */
1947 async_file_mark ();
1948 }
9f0bdab8 1949 return;
d6b0e80f
AC
1950 }
1951
9f0bdab8
DJ
1952 /* Mark LWP as not stopped to prevent it from being continued by
1953 resume_callback. */
1954 lp->stopped = 0;
1955
d90e17a7
PA
1956 if (resume_many)
1957 iterate_over_lwps (ptid, resume_callback, NULL);
1958
1959 /* Convert to something the lower layer understands. */
1960 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1961
28439f5e 1962 linux_ops->to_resume (linux_ops, ptid, step, signo);
9f0bdab8 1963 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 1964 lp->stopped_by_watchpoint = 0;
9f0bdab8 1965
d6b0e80f
AC
1966 if (debug_linux_nat)
1967 fprintf_unfiltered (gdb_stdlog,
1968 "LLR: %s %s, %s (resume event thread)\n",
1969 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1970 target_pid_to_str (ptid),
423ec54c
JK
1971 (signo != TARGET_SIGNAL_0
1972 ? strsignal (target_signal_to_host (signo)) : "0"));
b84876c2 1973
7feb7d06 1974 restore_child_signals_mask (&prev_mask);
b84876c2 1975 if (target_can_async_p ())
8ea051c5 1976 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1977}
1978
c5f62d5f 1979/* Send a signal to an LWP. */
d6b0e80f
AC
1980
1981static int
1982kill_lwp (int lwpid, int signo)
1983{
c5f62d5f
DE
1984 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1985 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
1986
1987#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
1988 {
1989 static int tkill_failed;
1990
1991 if (!tkill_failed)
1992 {
1993 int ret;
1994
1995 errno = 0;
1996 ret = syscall (__NR_tkill, lwpid, signo);
1997 if (errno != ENOSYS)
1998 return ret;
1999 tkill_failed = 1;
2000 }
2001 }
d6b0e80f
AC
2002#endif
2003
2004 return kill (lwpid, signo);
2005}
2006
ca2163eb
PA
2007/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2008 event, check if the core is interested in it: if not, ignore the
2009 event, and keep waiting; otherwise, we need to toggle the LWP's
2010 syscall entry/exit status, since the ptrace event itself doesn't
2011 indicate it, and report the trap to higher layers. */
2012
2013static int
2014linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2015{
2016 struct target_waitstatus *ourstatus = &lp->waitstatus;
2017 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2018 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2019
2020 if (stopping)
2021 {
2022 /* If we're stopping threads, there's a SIGSTOP pending, which
2023 makes it so that the LWP reports an immediate syscall return,
2024 followed by the SIGSTOP. Skip seeing that "return" using
2025 PTRACE_CONT directly, and let stop_wait_callback collect the
2026 SIGSTOP. Later when the thread is resumed, a new syscall
2027 entry event. If we didn't do this (and returned 0), we'd
2028 leave a syscall entry pending, and our caller, by using
2029 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2030 itself. Later, when the user re-resumes this LWP, we'd see
2031 another syscall entry event and we'd mistake it for a return.
2032
2033 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2034 (leaving immediately with LWP->signalled set, without issuing
2035 a PTRACE_CONT), it would still be problematic to leave this
2036 syscall enter pending, as later when the thread is resumed,
2037 it would then see the same syscall exit mentioned above,
2038 followed by the delayed SIGSTOP, while the syscall didn't
2039 actually get to execute. It seems it would be even more
2040 confusing to the user. */
2041
2042 if (debug_linux_nat)
2043 fprintf_unfiltered (gdb_stdlog,
2044 "LHST: ignoring syscall %d "
2045 "for LWP %ld (stopping threads), "
2046 "resuming with PTRACE_CONT for SIGSTOP\n",
2047 syscall_number,
2048 GET_LWP (lp->ptid));
2049
2050 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2051 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2052 return 1;
2053 }
2054
2055 if (catch_syscall_enabled ())
2056 {
2057 /* Always update the entry/return state, even if this particular
2058 syscall isn't interesting to the core now. In async mode,
2059 the user could install a new catchpoint for this syscall
2060 between syscall enter/return, and we'll need to know to
2061 report a syscall return if that happens. */
2062 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2063 ? TARGET_WAITKIND_SYSCALL_RETURN
2064 : TARGET_WAITKIND_SYSCALL_ENTRY);
2065
2066 if (catching_syscall_number (syscall_number))
2067 {
2068 /* Alright, an event to report. */
2069 ourstatus->kind = lp->syscall_state;
2070 ourstatus->value.syscall_number = syscall_number;
2071
2072 if (debug_linux_nat)
2073 fprintf_unfiltered (gdb_stdlog,
2074 "LHST: stopping for %s of syscall %d"
2075 " for LWP %ld\n",
3e43a32a
MS
2076 lp->syscall_state
2077 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
2078 ? "entry" : "return",
2079 syscall_number,
2080 GET_LWP (lp->ptid));
2081 return 0;
2082 }
2083
2084 if (debug_linux_nat)
2085 fprintf_unfiltered (gdb_stdlog,
2086 "LHST: ignoring %s of syscall %d "
2087 "for LWP %ld\n",
2088 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2089 ? "entry" : "return",
2090 syscall_number,
2091 GET_LWP (lp->ptid));
2092 }
2093 else
2094 {
2095 /* If we had been syscall tracing, and hence used PT_SYSCALL
2096 before on this LWP, it could happen that the user removes all
2097 syscall catchpoints before we get to process this event.
2098 There are two noteworthy issues here:
2099
2100 - When stopped at a syscall entry event, resuming with
2101 PT_STEP still resumes executing the syscall and reports a
2102 syscall return.
2103
2104 - Only PT_SYSCALL catches syscall enters. If we last
2105 single-stepped this thread, then this event can't be a
2106 syscall enter. If we last single-stepped this thread, this
2107 has to be a syscall exit.
2108
2109 The points above mean that the next resume, be it PT_STEP or
2110 PT_CONTINUE, can not trigger a syscall trace event. */
2111 if (debug_linux_nat)
2112 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2113 "LHST: caught syscall event "
2114 "with no syscall catchpoints."
ca2163eb
PA
2115 " %d for LWP %ld, ignoring\n",
2116 syscall_number,
2117 GET_LWP (lp->ptid));
2118 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2119 }
2120
2121 /* The core isn't interested in this event. For efficiency, avoid
2122 stopping all threads only to have the core resume them all again.
2123 Since we're not stopping threads, if we're still syscall tracing
2124 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2125 subsequent syscall. Simply resume using the inf-ptrace layer,
2126 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2127
2128 /* Note that gdbarch_get_syscall_number may access registers, hence
2129 fill a regcache. */
2130 registers_changed ();
2131 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2132 lp->step, TARGET_SIGNAL_0);
2133 return 1;
2134}
2135
3d799a95
DJ
2136/* Handle a GNU/Linux extended wait response. If we see a clone
2137 event, we need to add the new LWP to our list (and not report the
2138 trap to higher layers). This function returns non-zero if the
2139 event should be ignored and we should wait again. If STOPPING is
2140 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2141
2142static int
3d799a95
DJ
2143linux_handle_extended_wait (struct lwp_info *lp, int status,
2144 int stopping)
d6b0e80f 2145{
3d799a95
DJ
2146 int pid = GET_LWP (lp->ptid);
2147 struct target_waitstatus *ourstatus = &lp->waitstatus;
3d799a95 2148 int event = status >> 16;
d6b0e80f 2149
3d799a95
DJ
2150 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2151 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2152 {
3d799a95
DJ
2153 unsigned long new_pid;
2154 int ret;
2155
2156 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2157
3d799a95
DJ
2158 /* If we haven't already seen the new PID stop, wait for it now. */
2159 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2160 {
2161 /* The new child has a pending SIGSTOP. We can't affect it until it
2162 hits the SIGSTOP, but we're already attached. */
2163 ret = my_waitpid (new_pid, &status,
2164 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2165 if (ret == -1)
2166 perror_with_name (_("waiting for new child"));
2167 else if (ret != new_pid)
2168 internal_error (__FILE__, __LINE__,
2169 _("wait returned unexpected PID %d"), ret);
2170 else if (!WIFSTOPPED (status))
2171 internal_error (__FILE__, __LINE__,
2172 _("wait returned unexpected status 0x%x"), status);
2173 }
2174
3a3e9ee3 2175 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2176
2277426b
PA
2177 if (event == PTRACE_EVENT_FORK
2178 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2179 {
2277426b
PA
2180 /* Handle checkpointing by linux-fork.c here as a special
2181 case. We don't want the follow-fork-mode or 'catch fork'
2182 to interfere with this. */
2183
2184 /* This won't actually modify the breakpoint list, but will
2185 physically remove the breakpoints from the child. */
2186 detach_breakpoints (new_pid);
2187
2188 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2189 if (!find_fork_pid (new_pid))
2190 add_fork (new_pid);
2277426b
PA
2191
2192 /* Report as spurious, so that infrun doesn't want to follow
2193 this fork. We're actually doing an infcall in
2194 linux-fork.c. */
2195 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2196 linux_enable_event_reporting (pid_to_ptid (new_pid));
2197
2198 /* Report the stop to the core. */
2199 return 0;
2200 }
2201
3d799a95
DJ
2202 if (event == PTRACE_EVENT_FORK)
2203 ourstatus->kind = TARGET_WAITKIND_FORKED;
2204 else if (event == PTRACE_EVENT_VFORK)
2205 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2206 else
3d799a95 2207 {
78768c4a
JK
2208 struct lwp_info *new_lp;
2209
3d799a95 2210 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2211
d90e17a7 2212 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2213 new_lp->cloned = 1;
4c28f408 2214 new_lp->stopped = 1;
d6b0e80f 2215
3d799a95
DJ
2216 if (WSTOPSIG (status) != SIGSTOP)
2217 {
2218 /* This can happen if someone starts sending signals to
2219 the new thread before it gets a chance to run, which
2220 have a lower number than SIGSTOP (e.g. SIGUSR1).
2221 This is an unlikely case, and harder to handle for
2222 fork / vfork than for clone, so we do not try - but
2223 we handle it for clone events here. We'll send
2224 the other signal on to the thread below. */
2225
2226 new_lp->signalled = 1;
2227 }
2228 else
2229 status = 0;
d6b0e80f 2230
4c28f408 2231 if (non_stop)
3d799a95 2232 {
4c28f408
PA
2233 /* Add the new thread to GDB's lists as soon as possible
2234 so that:
2235
2236 1) the frontend doesn't have to wait for a stop to
2237 display them, and,
2238
2239 2) we tag it with the correct running state. */
2240
2241 /* If the thread_db layer is active, let it know about
2242 this new thread, and add it to GDB's list. */
2243 if (!thread_db_attach_lwp (new_lp->ptid))
2244 {
2245 /* We're not using thread_db. Add it to GDB's
2246 list. */
2247 target_post_attach (GET_LWP (new_lp->ptid));
2248 add_thread (new_lp->ptid);
2249 }
2250
2251 if (!stopping)
2252 {
2253 set_running (new_lp->ptid, 1);
2254 set_executing (new_lp->ptid, 1);
2255 }
2256 }
2257
ca2163eb
PA
2258 /* Note the need to use the low target ops to resume, to
2259 handle resuming with PT_SYSCALL if we have syscall
2260 catchpoints. */
4c28f408
PA
2261 if (!stopping)
2262 {
423ec54c 2263 enum target_signal signo;
ca2163eb 2264
4c28f408 2265 new_lp->stopped = 0;
3d799a95 2266 new_lp->resumed = 1;
ca2163eb
PA
2267
2268 signo = (status
2269 ? target_signal_from_host (WSTOPSIG (status))
2270 : TARGET_SIGNAL_0);
2271
2272 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2273 0, signo);
3d799a95 2274 }
ad34eb2f
JK
2275 else
2276 {
2277 if (status != 0)
2278 {
2279 /* We created NEW_LP so it cannot yet contain STATUS. */
2280 gdb_assert (new_lp->status == 0);
2281
2282 /* Save the wait status to report later. */
2283 if (debug_linux_nat)
2284 fprintf_unfiltered (gdb_stdlog,
2285 "LHEW: waitpid of new LWP %ld, "
2286 "saving status %s\n",
2287 (long) GET_LWP (new_lp->ptid),
2288 status_to_str (status));
2289 new_lp->status = status;
2290 }
2291 }
d6b0e80f 2292
3d799a95
DJ
2293 if (debug_linux_nat)
2294 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2295 "LHEW: Got clone event "
2296 "from LWP %ld, resuming\n",
3d799a95 2297 GET_LWP (lp->ptid));
ca2163eb
PA
2298 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2299 0, TARGET_SIGNAL_0);
3d799a95
DJ
2300
2301 return 1;
2302 }
2303
2304 return 0;
d6b0e80f
AC
2305 }
2306
3d799a95
DJ
2307 if (event == PTRACE_EVENT_EXEC)
2308 {
a75724bc
PA
2309 if (debug_linux_nat)
2310 fprintf_unfiltered (gdb_stdlog,
2311 "LHEW: Got exec event from LWP %ld\n",
2312 GET_LWP (lp->ptid));
2313
3d799a95
DJ
2314 ourstatus->kind = TARGET_WAITKIND_EXECD;
2315 ourstatus->value.execd_pathname
6d8fd2b7 2316 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2317
6c95b8df
PA
2318 return 0;
2319 }
2320
2321 if (event == PTRACE_EVENT_VFORK_DONE)
2322 {
2323 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2324 {
6c95b8df 2325 if (debug_linux_nat)
3e43a32a
MS
2326 fprintf_unfiltered (gdb_stdlog,
2327 "LHEW: Got expected PTRACE_EVENT_"
2328 "VFORK_DONE from LWP %ld: stopping\n",
6c95b8df 2329 GET_LWP (lp->ptid));
3d799a95 2330
6c95b8df
PA
2331 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2332 return 0;
3d799a95
DJ
2333 }
2334
6c95b8df 2335 if (debug_linux_nat)
3e43a32a
MS
2336 fprintf_unfiltered (gdb_stdlog,
2337 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2338 "from LWP %ld: resuming\n",
6c95b8df
PA
2339 GET_LWP (lp->ptid));
2340 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2341 return 1;
3d799a95
DJ
2342 }
2343
2344 internal_error (__FILE__, __LINE__,
2345 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2346}
2347
2348/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2349 exited. */
2350
2351static int
2352wait_lwp (struct lwp_info *lp)
2353{
2354 pid_t pid;
2355 int status;
2356 int thread_dead = 0;
2357
2358 gdb_assert (!lp->stopped);
2359 gdb_assert (lp->status == 0);
2360
58aecb61 2361 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
2362 if (pid == -1 && errno == ECHILD)
2363 {
58aecb61 2364 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
2365 if (pid == -1 && errno == ECHILD)
2366 {
2367 /* The thread has previously exited. We need to delete it
2368 now because, for some vendor 2.4 kernels with NPTL
2369 support backported, there won't be an exit event unless
2370 it is the main thread. 2.6 kernels will report an exit
2371 event for each thread that exits, as expected. */
2372 thread_dead = 1;
2373 if (debug_linux_nat)
2374 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2375 target_pid_to_str (lp->ptid));
2376 }
2377 }
2378
2379 if (!thread_dead)
2380 {
2381 gdb_assert (pid == GET_LWP (lp->ptid));
2382
2383 if (debug_linux_nat)
2384 {
2385 fprintf_unfiltered (gdb_stdlog,
2386 "WL: waitpid %s received %s\n",
2387 target_pid_to_str (lp->ptid),
2388 status_to_str (status));
2389 }
2390 }
2391
2392 /* Check if the thread has exited. */
2393 if (WIFEXITED (status) || WIFSIGNALED (status))
2394 {
2395 thread_dead = 1;
2396 if (debug_linux_nat)
2397 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2398 target_pid_to_str (lp->ptid));
2399 }
2400
2401 if (thread_dead)
2402 {
e26af52f 2403 exit_lwp (lp);
d6b0e80f
AC
2404 return 0;
2405 }
2406
2407 gdb_assert (WIFSTOPPED (status));
2408
ca2163eb
PA
2409 /* Handle GNU/Linux's syscall SIGTRAPs. */
2410 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2411 {
2412 /* No longer need the sysgood bit. The ptrace event ends up
2413 recorded in lp->waitstatus if we care for it. We can carry
2414 on handling the event like a regular SIGTRAP from here
2415 on. */
2416 status = W_STOPCODE (SIGTRAP);
2417 if (linux_handle_syscall_trap (lp, 1))
2418 return wait_lwp (lp);
2419 }
2420
d6b0e80f
AC
2421 /* Handle GNU/Linux's extended waitstatus for trace events. */
2422 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2423 {
2424 if (debug_linux_nat)
2425 fprintf_unfiltered (gdb_stdlog,
2426 "WL: Handling extended status 0x%06x\n",
2427 status);
3d799a95 2428 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2429 return wait_lwp (lp);
2430 }
2431
2432 return status;
2433}
2434
9f0bdab8
DJ
2435/* Save the most recent siginfo for LP. This is currently only called
2436 for SIGTRAP; some ports use the si_addr field for
2437 target_stopped_data_address. In the future, it may also be used to
2438 restore the siginfo of requeued signals. */
2439
2440static void
2441save_siginfo (struct lwp_info *lp)
2442{
2443 errno = 0;
2444 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2445 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2446
2447 if (errno != 0)
2448 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2449}
2450
d6b0e80f
AC
2451/* Send a SIGSTOP to LP. */
2452
2453static int
2454stop_callback (struct lwp_info *lp, void *data)
2455{
2456 if (!lp->stopped && !lp->signalled)
2457 {
2458 int ret;
2459
2460 if (debug_linux_nat)
2461 {
2462 fprintf_unfiltered (gdb_stdlog,
2463 "SC: kill %s **<SIGSTOP>**\n",
2464 target_pid_to_str (lp->ptid));
2465 }
2466 errno = 0;
2467 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2468 if (debug_linux_nat)
2469 {
2470 fprintf_unfiltered (gdb_stdlog,
2471 "SC: lwp kill %d %s\n",
2472 ret,
2473 errno ? safe_strerror (errno) : "ERRNO-OK");
2474 }
2475
2476 lp->signalled = 1;
2477 gdb_assert (lp->status == 0);
2478 }
2479
2480 return 0;
2481}
2482
57380f4e 2483/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2484
2485static int
57380f4e
DJ
2486linux_nat_has_pending_sigint (int pid)
2487{
2488 sigset_t pending, blocked, ignored;
57380f4e
DJ
2489
2490 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2491
2492 if (sigismember (&pending, SIGINT)
2493 && !sigismember (&ignored, SIGINT))
2494 return 1;
2495
2496 return 0;
2497}
2498
2499/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2500
2501static int
2502set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2503{
57380f4e
DJ
2504 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2505 flag to consume the next one. */
2506 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2507 && WSTOPSIG (lp->status) == SIGINT)
2508 lp->status = 0;
2509 else
2510 lp->ignore_sigint = 1;
2511
2512 return 0;
2513}
2514
2515/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2516 This function is called after we know the LWP has stopped; if the LWP
2517 stopped before the expected SIGINT was delivered, then it will never have
2518 arrived. Also, if the signal was delivered to a shared queue and consumed
2519 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2520
57380f4e
DJ
2521static void
2522maybe_clear_ignore_sigint (struct lwp_info *lp)
2523{
2524 if (!lp->ignore_sigint)
2525 return;
2526
2527 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2528 {
2529 if (debug_linux_nat)
2530 fprintf_unfiltered (gdb_stdlog,
2531 "MCIS: Clearing bogus flag for %s\n",
2532 target_pid_to_str (lp->ptid));
2533 lp->ignore_sigint = 0;
2534 }
2535}
2536
ebec9a0f
PA
2537/* Fetch the possible triggered data watchpoint info and store it in
2538 LP.
2539
2540 On some archs, like x86, that use debug registers to set
2541 watchpoints, it's possible that the way to know which watched
2542 address trapped, is to check the register that is used to select
2543 which address to watch. Problem is, between setting the watchpoint
2544 and reading back which data address trapped, the user may change
2545 the set of watchpoints, and, as a consequence, GDB changes the
2546 debug registers in the inferior. To avoid reading back a stale
2547 stopped-data-address when that happens, we cache in LP the fact
2548 that a watchpoint trapped, and the corresponding data address, as
2549 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2550 registers meanwhile, we have the cached data we can rely on. */
2551
2552static void
2553save_sigtrap (struct lwp_info *lp)
2554{
2555 struct cleanup *old_chain;
2556
2557 if (linux_ops->to_stopped_by_watchpoint == NULL)
2558 {
2559 lp->stopped_by_watchpoint = 0;
2560 return;
2561 }
2562
2563 old_chain = save_inferior_ptid ();
2564 inferior_ptid = lp->ptid;
2565
2566 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2567
2568 if (lp->stopped_by_watchpoint)
2569 {
2570 if (linux_ops->to_stopped_data_address != NULL)
2571 lp->stopped_data_address_p =
2572 linux_ops->to_stopped_data_address (&current_target,
2573 &lp->stopped_data_address);
2574 else
2575 lp->stopped_data_address_p = 0;
2576 }
2577
2578 do_cleanups (old_chain);
2579}
2580
2581/* See save_sigtrap. */
2582
2583static int
2584linux_nat_stopped_by_watchpoint (void)
2585{
2586 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2587
2588 gdb_assert (lp != NULL);
2589
2590 return lp->stopped_by_watchpoint;
2591}
2592
2593static int
2594linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2595{
2596 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2597
2598 gdb_assert (lp != NULL);
2599
2600 *addr_p = lp->stopped_data_address;
2601
2602 return lp->stopped_data_address_p;
2603}
2604
26ab7092
JK
2605/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2606
2607static int
2608sigtrap_is_event (int status)
2609{
2610 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2611}
2612
2613/* SIGTRAP-like events recognizer. */
2614
2615static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2616
00390b84
JK
2617/* Check for SIGTRAP-like events in LP. */
2618
2619static int
2620linux_nat_lp_status_is_event (struct lwp_info *lp)
2621{
2622 /* We check for lp->waitstatus in addition to lp->status, because we can
2623 have pending process exits recorded in lp->status
2624 and W_EXITCODE(0,0) == 0. We should probably have an additional
2625 lp->status_p flag. */
2626
2627 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2628 && linux_nat_status_is_event (lp->status));
2629}
2630
26ab7092
JK
2631/* Set alternative SIGTRAP-like events recognizer. If
2632 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2633 applied. */
2634
2635void
2636linux_nat_set_status_is_event (struct target_ops *t,
2637 int (*status_is_event) (int status))
2638{
2639 linux_nat_status_is_event = status_is_event;
2640}
2641
57380f4e
DJ
2642/* Wait until LP is stopped. */
2643
2644static int
2645stop_wait_callback (struct lwp_info *lp, void *data)
2646{
6c95b8df
PA
2647 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2648
2649 /* If this is a vfork parent, bail out, it is not going to report
2650 any SIGSTOP until the vfork is done with. */
2651 if (inf->vfork_child != NULL)
2652 return 0;
2653
d6b0e80f
AC
2654 if (!lp->stopped)
2655 {
2656 int status;
2657
2658 status = wait_lwp (lp);
2659 if (status == 0)
2660 return 0;
2661
57380f4e
DJ
2662 if (lp->ignore_sigint && WIFSTOPPED (status)
2663 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2664 {
57380f4e 2665 lp->ignore_sigint = 0;
d6b0e80f
AC
2666
2667 errno = 0;
2668 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2669 if (debug_linux_nat)
2670 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2671 "PTRACE_CONT %s, 0, 0 (%s) "
2672 "(discarding SIGINT)\n",
d6b0e80f
AC
2673 target_pid_to_str (lp->ptid),
2674 errno ? safe_strerror (errno) : "OK");
2675
57380f4e 2676 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2677 }
2678
57380f4e
DJ
2679 maybe_clear_ignore_sigint (lp);
2680
d6b0e80f
AC
2681 if (WSTOPSIG (status) != SIGSTOP)
2682 {
26ab7092 2683 if (linux_nat_status_is_event (status))
d6b0e80f
AC
2684 {
2685 /* If a LWP other than the LWP that we're reporting an
2686 event for has hit a GDB breakpoint (as opposed to
2687 some random trap signal), then just arrange for it to
2688 hit it again later. We don't keep the SIGTRAP status
2689 and don't forward the SIGTRAP signal to the LWP. We
2690 will handle the current event, eventually we will
2691 resume all LWPs, and this one will get its breakpoint
2692 trap again.
2693
2694 If we do not do this, then we run the risk that the
2695 user will delete or disable the breakpoint, but the
2696 thread will have already tripped on it. */
2697
9f0bdab8
DJ
2698 /* Save the trap's siginfo in case we need it later. */
2699 save_siginfo (lp);
2700
ebec9a0f
PA
2701 save_sigtrap (lp);
2702
1777feb0 2703 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2704 errno = 0;
2705 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2706 if (debug_linux_nat)
2707 {
2708 fprintf_unfiltered (gdb_stdlog,
2709 "PTRACE_CONT %s, 0, 0 (%s)\n",
2710 target_pid_to_str (lp->ptid),
2711 errno ? safe_strerror (errno) : "OK");
2712
2713 fprintf_unfiltered (gdb_stdlog,
2714 "SWC: Candidate SIGTRAP event in %s\n",
2715 target_pid_to_str (lp->ptid));
2716 }
710151dd 2717 /* Hold this event/waitstatus while we check to see if
1777feb0 2718 there are any more (we still want to get that SIGSTOP). */
57380f4e 2719 stop_wait_callback (lp, NULL);
710151dd 2720
7feb7d06
PA
2721 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2722 there's another event, throw it back into the
1777feb0 2723 queue. */
7feb7d06 2724 if (lp->status)
710151dd 2725 {
7feb7d06
PA
2726 if (debug_linux_nat)
2727 fprintf_unfiltered (gdb_stdlog,
2728 "SWC: kill %s, %s\n",
2729 target_pid_to_str (lp->ptid),
2730 status_to_str ((int) status));
2731 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2732 }
7feb7d06 2733
1777feb0 2734 /* Save the sigtrap event. */
7feb7d06 2735 lp->status = status;
d6b0e80f
AC
2736 return 0;
2737 }
2738 else
2739 {
2740 /* The thread was stopped with a signal other than
1777feb0 2741 SIGSTOP, and didn't accidentally trip a breakpoint. */
d6b0e80f
AC
2742
2743 if (debug_linux_nat)
2744 {
2745 fprintf_unfiltered (gdb_stdlog,
2746 "SWC: Pending event %s in %s\n",
2747 status_to_str ((int) status),
2748 target_pid_to_str (lp->ptid));
2749 }
1777feb0 2750 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2751 errno = 0;
2752 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2753 if (debug_linux_nat)
2754 fprintf_unfiltered (gdb_stdlog,
2755 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2756 target_pid_to_str (lp->ptid),
2757 errno ? safe_strerror (errno) : "OK");
2758
2759 /* Hold this event/waitstatus while we check to see if
1777feb0 2760 there are any more (we still want to get that SIGSTOP). */
57380f4e 2761 stop_wait_callback (lp, NULL);
710151dd
PA
2762
2763 /* If the lp->status field is still empty, use it to
2764 hold this event. If not, then this event must be
2765 returned to the event queue of the LWP. */
7feb7d06 2766 if (lp->status)
d6b0e80f
AC
2767 {
2768 if (debug_linux_nat)
2769 {
2770 fprintf_unfiltered (gdb_stdlog,
2771 "SWC: kill %s, %s\n",
2772 target_pid_to_str (lp->ptid),
2773 status_to_str ((int) status));
2774 }
2775 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2776 }
710151dd
PA
2777 else
2778 lp->status = status;
d6b0e80f
AC
2779 return 0;
2780 }
2781 }
2782 else
2783 {
2784 /* We caught the SIGSTOP that we intended to catch, so
2785 there's no SIGSTOP pending. */
2786 lp->stopped = 1;
2787 lp->signalled = 0;
2788 }
2789 }
2790
2791 return 0;
2792}
2793
d6b0e80f
AC
2794/* Return non-zero if LP has a wait status pending. */
2795
2796static int
2797status_callback (struct lwp_info *lp, void *data)
2798{
2799 /* Only report a pending wait status if we pretend that this has
2800 indeed been resumed. */
ca2163eb
PA
2801 if (!lp->resumed)
2802 return 0;
2803
2804 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2805 {
2806 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
766062f6 2807 or a pending process exit. Note that `W_EXITCODE(0,0) ==
ca2163eb
PA
2808 0', so a clean process exit can not be stored pending in
2809 lp->status, it is indistinguishable from
2810 no-pending-status. */
2811 return 1;
2812 }
2813
2814 if (lp->status != 0)
2815 return 1;
2816
2817 return 0;
d6b0e80f
AC
2818}
2819
2820/* Return non-zero if LP isn't stopped. */
2821
2822static int
2823running_callback (struct lwp_info *lp, void *data)
2824{
2825 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2826}
2827
2828/* Count the LWP's that have had events. */
2829
2830static int
2831count_events_callback (struct lwp_info *lp, void *data)
2832{
2833 int *count = data;
2834
2835 gdb_assert (count != NULL);
2836
e09490f1 2837 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2838 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2839 (*count)++;
2840
2841 return 0;
2842}
2843
2844/* Select the LWP (if any) that is currently being single-stepped. */
2845
2846static int
2847select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2848{
2849 if (lp->step && lp->status != 0)
2850 return 1;
2851 else
2852 return 0;
2853}
2854
2855/* Select the Nth LWP that has had a SIGTRAP event. */
2856
2857static int
2858select_event_lwp_callback (struct lwp_info *lp, void *data)
2859{
2860 int *selector = data;
2861
2862 gdb_assert (selector != NULL);
2863
1777feb0 2864 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2865 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2866 if ((*selector)-- == 0)
2867 return 1;
2868
2869 return 0;
2870}
2871
710151dd
PA
2872static int
2873cancel_breakpoint (struct lwp_info *lp)
2874{
2875 /* Arrange for a breakpoint to be hit again later. We don't keep
2876 the SIGTRAP status and don't forward the SIGTRAP signal to the
2877 LWP. We will handle the current event, eventually we will resume
2878 this LWP, and this breakpoint will trap again.
2879
2880 If we do not do this, then we run the risk that the user will
2881 delete or disable the breakpoint, but the LWP will have already
2882 tripped on it. */
2883
515630c5
UW
2884 struct regcache *regcache = get_thread_regcache (lp->ptid);
2885 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2886 CORE_ADDR pc;
2887
2888 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 2889 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
2890 {
2891 if (debug_linux_nat)
2892 fprintf_unfiltered (gdb_stdlog,
2893 "CB: Push back breakpoint for %s\n",
2894 target_pid_to_str (lp->ptid));
2895
2896 /* Back up the PC if necessary. */
515630c5
UW
2897 if (gdbarch_decr_pc_after_break (gdbarch))
2898 regcache_write_pc (regcache, pc);
2899
710151dd
PA
2900 return 1;
2901 }
2902 return 0;
2903}
2904
d6b0e80f
AC
2905static int
2906cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2907{
2908 struct lwp_info *event_lp = data;
2909
2910 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2911 if (lp == event_lp)
2912 return 0;
2913
2914 /* If a LWP other than the LWP that we're reporting an event for has
2915 hit a GDB breakpoint (as opposed to some random trap signal),
2916 then just arrange for it to hit it again later. We don't keep
2917 the SIGTRAP status and don't forward the SIGTRAP signal to the
2918 LWP. We will handle the current event, eventually we will resume
2919 all LWPs, and this one will get its breakpoint trap again.
2920
2921 If we do not do this, then we run the risk that the user will
2922 delete or disable the breakpoint, but the LWP will have already
2923 tripped on it. */
2924
00390b84 2925 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
2926 && cancel_breakpoint (lp))
2927 /* Throw away the SIGTRAP. */
2928 lp->status = 0;
d6b0e80f
AC
2929
2930 return 0;
2931}
2932
2933/* Select one LWP out of those that have events pending. */
2934
2935static void
d90e17a7 2936select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2937{
2938 int num_events = 0;
2939 int random_selector;
2940 struct lwp_info *event_lp;
2941
ac264b3b 2942 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2943 (*orig_lp)->status = *status;
2944
2945 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
2946 event_lp = iterate_over_lwps (filter,
2947 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
2948 if (event_lp != NULL)
2949 {
2950 if (debug_linux_nat)
2951 fprintf_unfiltered (gdb_stdlog,
2952 "SEL: Select single-step %s\n",
2953 target_pid_to_str (event_lp->ptid));
2954 }
2955 else
2956 {
2957 /* No single-stepping LWP. Select one at random, out of those
2958 which have had SIGTRAP events. */
2959
2960 /* First see how many SIGTRAP events we have. */
d90e17a7 2961 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
2962
2963 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2964 random_selector = (int)
2965 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2966
2967 if (debug_linux_nat && num_events > 1)
2968 fprintf_unfiltered (gdb_stdlog,
2969 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2970 num_events, random_selector);
2971
d90e17a7
PA
2972 event_lp = iterate_over_lwps (filter,
2973 select_event_lwp_callback,
d6b0e80f
AC
2974 &random_selector);
2975 }
2976
2977 if (event_lp != NULL)
2978 {
2979 /* Switch the event LWP. */
2980 *orig_lp = event_lp;
2981 *status = event_lp->status;
2982 }
2983
2984 /* Flush the wait status for the event LWP. */
2985 (*orig_lp)->status = 0;
2986}
2987
2988/* Return non-zero if LP has been resumed. */
2989
2990static int
2991resumed_callback (struct lwp_info *lp, void *data)
2992{
2993 return lp->resumed;
2994}
2995
d6b0e80f
AC
2996/* Stop an active thread, verify it still exists, then resume it. */
2997
2998static int
2999stop_and_resume_callback (struct lwp_info *lp, void *data)
3000{
3001 struct lwp_info *ptr;
3002
3003 if (!lp->stopped && !lp->signalled)
3004 {
3005 stop_callback (lp, NULL);
3006 stop_wait_callback (lp, NULL);
3007 /* Resume if the lwp still exists. */
3008 for (ptr = lwp_list; ptr; ptr = ptr->next)
3009 if (lp == ptr)
3010 {
3011 resume_callback (lp, NULL);
3012 resume_set_callback (lp, NULL);
3013 }
3014 }
3015 return 0;
3016}
3017
02f3fc28 3018/* Check if we should go on and pass this event to common code.
fa2c6a57 3019 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
3020static struct lwp_info *
3021linux_nat_filter_event (int lwpid, int status, int options)
3022{
3023 struct lwp_info *lp;
3024
3025 lp = find_lwp_pid (pid_to_ptid (lwpid));
3026
3027 /* Check for stop events reported by a process we didn't already
3028 know about - anything not already in our LWP list.
3029
3030 If we're expecting to receive stopped processes after
3031 fork, vfork, and clone events, then we'll just add the
3032 new one to our list and go back to waiting for the event
3033 to be reported - the stopped process might be returned
3034 from waitpid before or after the event is. */
3035 if (WIFSTOPPED (status) && !lp)
3036 {
3037 linux_record_stopped_pid (lwpid, status);
3038 return NULL;
3039 }
3040
3041 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3042 our list, i.e. not part of the current process. This can happen
fd62cb89 3043 if we detach from a program we originally forked and then it
02f3fc28
PA
3044 exits. */
3045 if (!WIFSTOPPED (status) && !lp)
3046 return NULL;
3047
3048 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
3049 CLONE_PTRACE processes which do not use the thread library -
3050 otherwise we wouldn't find the new LWP this way. That doesn't
3051 currently work, and the following code is currently unreachable
3052 due to the two blocks above. If it's fixed some day, this code
3053 should be broken out into a function so that we can also pick up
3054 LWPs from the new interface. */
3055 if (!lp)
3056 {
3057 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
3058 if (options & __WCLONE)
3059 lp->cloned = 1;
3060
3061 gdb_assert (WIFSTOPPED (status)
3062 && WSTOPSIG (status) == SIGSTOP);
3063 lp->signalled = 1;
3064
3065 if (!in_thread_list (inferior_ptid))
3066 {
3067 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
3068 GET_PID (inferior_ptid));
3069 add_thread (inferior_ptid);
3070 }
3071
3072 add_thread (lp->ptid);
3073 }
3074
ca2163eb
PA
3075 /* Handle GNU/Linux's syscall SIGTRAPs. */
3076 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3077 {
3078 /* No longer need the sysgood bit. The ptrace event ends up
3079 recorded in lp->waitstatus if we care for it. We can carry
3080 on handling the event like a regular SIGTRAP from here
3081 on. */
3082 status = W_STOPCODE (SIGTRAP);
3083 if (linux_handle_syscall_trap (lp, 0))
3084 return NULL;
3085 }
02f3fc28 3086
ca2163eb
PA
3087 /* Handle GNU/Linux's extended waitstatus for trace events. */
3088 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3089 {
3090 if (debug_linux_nat)
3091 fprintf_unfiltered (gdb_stdlog,
3092 "LLW: Handling extended status 0x%06x\n",
3093 status);
3094 if (linux_handle_extended_wait (lp, status, 0))
3095 return NULL;
3096 }
3097
26ab7092 3098 if (linux_nat_status_is_event (status))
ebec9a0f
PA
3099 {
3100 /* Save the trap's siginfo in case we need it later. */
3101 save_siginfo (lp);
3102
3103 save_sigtrap (lp);
3104 }
ca2163eb 3105
02f3fc28 3106 /* Check if the thread has exited. */
d90e17a7
PA
3107 if ((WIFEXITED (status) || WIFSIGNALED (status))
3108 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3109 {
9db03742
JB
3110 /* If this is the main thread, we must stop all threads and verify
3111 if they are still alive. This is because in the nptl thread model
3112 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3113 other than the main thread. We only get the main thread exit
3114 signal once all child threads have already exited. If we
3115 stop all the threads and use the stop_wait_callback to check
3116 if they have exited we can determine whether this signal
3117 should be ignored or whether it means the end of the debugged
3118 application, regardless of which threading model is being
5d3b6af6 3119 used. */
02f3fc28
PA
3120 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3121 {
3122 lp->stopped = 1;
d90e17a7
PA
3123 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3124 stop_and_resume_callback, NULL);
02f3fc28
PA
3125 }
3126
3127 if (debug_linux_nat)
3128 fprintf_unfiltered (gdb_stdlog,
3129 "LLW: %s exited.\n",
3130 target_pid_to_str (lp->ptid));
3131
d90e17a7 3132 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3133 {
3134 /* If there is at least one more LWP, then the exit signal
3135 was not the end of the debugged application and should be
3136 ignored. */
3137 exit_lwp (lp);
3138 return NULL;
3139 }
02f3fc28
PA
3140 }
3141
3142 /* Check if the current LWP has previously exited. In the nptl
3143 thread model, LWPs other than the main thread do not issue
3144 signals when they exit so we must check whenever the thread has
3145 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3146 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3147 {
d90e17a7
PA
3148 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3149
02f3fc28
PA
3150 if (debug_linux_nat)
3151 fprintf_unfiltered (gdb_stdlog,
3152 "LLW: %s exited.\n",
3153 target_pid_to_str (lp->ptid));
3154
3155 exit_lwp (lp);
3156
3157 /* Make sure there is at least one thread running. */
d90e17a7 3158 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3159
3160 /* Discard the event. */
3161 return NULL;
3162 }
3163
3164 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3165 an attempt to stop an LWP. */
3166 if (lp->signalled
3167 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3168 {
3169 if (debug_linux_nat)
3170 fprintf_unfiltered (gdb_stdlog,
3171 "LLW: Delayed SIGSTOP caught for %s.\n",
3172 target_pid_to_str (lp->ptid));
3173
3174 /* This is a delayed SIGSTOP. */
3175 lp->signalled = 0;
3176
3177 registers_changed ();
3178
28439f5e 3179 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
02f3fc28
PA
3180 lp->step, TARGET_SIGNAL_0);
3181 if (debug_linux_nat)
3182 fprintf_unfiltered (gdb_stdlog,
3183 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3184 lp->step ?
3185 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3186 target_pid_to_str (lp->ptid));
3187
3188 lp->stopped = 0;
3189 gdb_assert (lp->resumed);
3190
3191 /* Discard the event. */
3192 return NULL;
3193 }
3194
57380f4e
DJ
3195 /* Make sure we don't report a SIGINT that we have already displayed
3196 for another thread. */
3197 if (lp->ignore_sigint
3198 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3199 {
3200 if (debug_linux_nat)
3201 fprintf_unfiltered (gdb_stdlog,
3202 "LLW: Delayed SIGINT caught for %s.\n",
3203 target_pid_to_str (lp->ptid));
3204
3205 /* This is a delayed SIGINT. */
3206 lp->ignore_sigint = 0;
3207
3208 registers_changed ();
28439f5e 3209 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
57380f4e
DJ
3210 lp->step, TARGET_SIGNAL_0);
3211 if (debug_linux_nat)
3212 fprintf_unfiltered (gdb_stdlog,
3213 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3214 lp->step ?
3215 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3216 target_pid_to_str (lp->ptid));
3217
3218 lp->stopped = 0;
3219 gdb_assert (lp->resumed);
3220
3221 /* Discard the event. */
3222 return NULL;
3223 }
3224
02f3fc28
PA
3225 /* An interesting event. */
3226 gdb_assert (lp);
ca2163eb 3227 lp->status = status;
02f3fc28
PA
3228 return lp;
3229}
3230
d6b0e80f 3231static ptid_t
7feb7d06 3232linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3233 ptid_t ptid, struct target_waitstatus *ourstatus,
3234 int target_options)
d6b0e80f 3235{
7feb7d06 3236 static sigset_t prev_mask;
d6b0e80f
AC
3237 struct lwp_info *lp = NULL;
3238 int options = 0;
3239 int status = 0;
d90e17a7 3240 pid_t pid;
d6b0e80f 3241
b84876c2
PA
3242 if (debug_linux_nat_async)
3243 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3244
f973ed9c
DJ
3245 /* The first time we get here after starting a new inferior, we may
3246 not have added it to the LWP list yet - this is the earliest
3247 moment at which we know its PID. */
d90e17a7 3248 if (ptid_is_pid (inferior_ptid))
f973ed9c 3249 {
27c9d204
PA
3250 /* Upgrade the main thread's ptid. */
3251 thread_change_ptid (inferior_ptid,
3252 BUILD_LWP (GET_PID (inferior_ptid),
3253 GET_PID (inferior_ptid)));
3254
f973ed9c
DJ
3255 lp = add_lwp (inferior_ptid);
3256 lp->resumed = 1;
3257 }
3258
7feb7d06
PA
3259 /* Make sure SIGCHLD is blocked. */
3260 block_child_signals (&prev_mask);
d6b0e80f 3261
d90e17a7
PA
3262 if (ptid_equal (ptid, minus_one_ptid))
3263 pid = -1;
3264 else if (ptid_is_pid (ptid))
3265 /* A request to wait for a specific tgid. This is not possible
3266 with waitpid, so instead, we wait for any child, and leave
3267 children we're not interested in right now with a pending
3268 status to report later. */
3269 pid = -1;
3270 else
3271 pid = GET_LWP (ptid);
3272
d6b0e80f 3273retry:
d90e17a7
PA
3274 lp = NULL;
3275 status = 0;
d6b0e80f 3276
e3e9f5a2
PA
3277 /* Make sure that of those LWPs we want to get an event from, there
3278 is at least one LWP that has been resumed. If there's none, just
3279 bail out. The core may just be flushing asynchronously all
3280 events. */
3281 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3282 {
3283 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3284
3285 if (debug_linux_nat_async)
3286 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3287
3288 restore_child_signals_mask (&prev_mask);
3289 return minus_one_ptid;
3290 }
d6b0e80f
AC
3291
3292 /* First check if there is a LWP with a wait status pending. */
3293 if (pid == -1)
3294 {
3295 /* Any LWP that's been resumed will do. */
d90e17a7 3296 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3297 if (lp)
3298 {
ca2163eb 3299 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3300 fprintf_unfiltered (gdb_stdlog,
3301 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3302 status_to_str (lp->status),
d6b0e80f
AC
3303 target_pid_to_str (lp->ptid));
3304 }
3305
b84876c2 3306 /* But if we don't find one, we'll have to wait, and check both
7feb7d06
PA
3307 cloned and uncloned processes. We start with the cloned
3308 processes. */
d6b0e80f
AC
3309 options = __WCLONE | WNOHANG;
3310 }
3311 else if (is_lwp (ptid))
3312 {
3313 if (debug_linux_nat)
3314 fprintf_unfiltered (gdb_stdlog,
3315 "LLW: Waiting for specific LWP %s.\n",
3316 target_pid_to_str (ptid));
3317
3318 /* We have a specific LWP to check. */
3319 lp = find_lwp_pid (ptid);
3320 gdb_assert (lp);
d6b0e80f 3321
ca2163eb 3322 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3323 fprintf_unfiltered (gdb_stdlog,
3324 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3325 status_to_str (lp->status),
d6b0e80f
AC
3326 target_pid_to_str (lp->ptid));
3327
3328 /* If we have to wait, take into account whether PID is a cloned
3329 process or not. And we have to convert it to something that
3330 the layer beneath us can understand. */
3331 options = lp->cloned ? __WCLONE : 0;
3332 pid = GET_LWP (ptid);
d90e17a7
PA
3333
3334 /* We check for lp->waitstatus in addition to lp->status,
3335 because we can have pending process exits recorded in
3336 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3337 an additional lp->status_p flag. */
ca2163eb 3338 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3339 lp = NULL;
d6b0e80f
AC
3340 }
3341
d90e17a7 3342 if (lp && lp->signalled)
d6b0e80f
AC
3343 {
3344 /* A pending SIGSTOP may interfere with the normal stream of
3345 events. In a typical case where interference is a problem,
3346 we have a SIGSTOP signal pending for LWP A while
3347 single-stepping it, encounter an event in LWP B, and take the
3348 pending SIGSTOP while trying to stop LWP A. After processing
3349 the event in LWP B, LWP A is continued, and we'll never see
3350 the SIGTRAP associated with the last time we were
3351 single-stepping LWP A. */
3352
3353 /* Resume the thread. It should halt immediately returning the
3354 pending SIGSTOP. */
3355 registers_changed ();
28439f5e 3356 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3357 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
3358 if (debug_linux_nat)
3359 fprintf_unfiltered (gdb_stdlog,
3360 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3361 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3362 target_pid_to_str (lp->ptid));
3363 lp->stopped = 0;
3364 gdb_assert (lp->resumed);
3365
ca2163eb
PA
3366 /* Catch the pending SIGSTOP. */
3367 status = lp->status;
3368 lp->status = 0;
3369
d6b0e80f 3370 stop_wait_callback (lp, NULL);
ca2163eb
PA
3371
3372 /* If the lp->status field isn't empty, we caught another signal
3373 while flushing the SIGSTOP. Return it back to the event
3374 queue of the LWP, as we already have an event to handle. */
3375 if (lp->status)
3376 {
3377 if (debug_linux_nat)
3378 fprintf_unfiltered (gdb_stdlog,
3379 "LLW: kill %s, %s\n",
3380 target_pid_to_str (lp->ptid),
3381 status_to_str (lp->status));
3382 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3383 }
3384
3385 lp->status = status;
d6b0e80f
AC
3386 }
3387
b84876c2
PA
3388 if (!target_can_async_p ())
3389 {
3390 /* Causes SIGINT to be passed on to the attached process. */
3391 set_sigint_trap ();
b84876c2 3392 }
d6b0e80f 3393
47608cb1
PA
3394 /* Translate generic target_wait options into waitpid options. */
3395 if (target_options & TARGET_WNOHANG)
3396 options |= WNOHANG;
7feb7d06 3397
d90e17a7 3398 while (lp == NULL)
d6b0e80f
AC
3399 {
3400 pid_t lwpid;
3401
7feb7d06 3402 lwpid = my_waitpid (pid, &status, options);
b84876c2 3403
d6b0e80f
AC
3404 if (lwpid > 0)
3405 {
3406 gdb_assert (pid == -1 || lwpid == pid);
3407
3408 if (debug_linux_nat)
3409 {
3410 fprintf_unfiltered (gdb_stdlog,
3411 "LLW: waitpid %ld received %s\n",
3412 (long) lwpid, status_to_str (status));
3413 }
3414
02f3fc28 3415 lp = linux_nat_filter_event (lwpid, status, options);
d90e17a7 3416
33355866
JK
3417 /* STATUS is now no longer valid, use LP->STATUS instead. */
3418 status = 0;
3419
d90e17a7
PA
3420 if (lp
3421 && ptid_is_pid (ptid)
3422 && ptid_get_pid (lp->ptid) != ptid_get_pid (ptid))
d6b0e80f 3423 {
e3e9f5a2
PA
3424 gdb_assert (lp->resumed);
3425
d90e17a7 3426 if (debug_linux_nat)
3e43a32a
MS
3427 fprintf (stderr,
3428 "LWP %ld got an event %06x, leaving pending.\n",
33355866 3429 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3430
ca2163eb 3431 if (WIFSTOPPED (lp->status))
d90e17a7 3432 {
ca2163eb 3433 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3434 {
e3e9f5a2
PA
3435 /* Cancel breakpoint hits. The breakpoint may
3436 be removed before we fetch events from this
3437 process to report to the core. It is best
3438 not to assume the moribund breakpoints
3439 heuristic always handles these cases --- it
3440 could be too many events go through to the
3441 core before this one is handled. All-stop
3442 always cancels breakpoint hits in all
3443 threads. */
3444 if (non_stop
00390b84 3445 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3446 && cancel_breakpoint (lp))
3447 {
3448 /* Throw away the SIGTRAP. */
3449 lp->status = 0;
3450
3451 if (debug_linux_nat)
3452 fprintf (stderr,
3e43a32a
MS
3453 "LLW: LWP %ld hit a breakpoint while"
3454 " waiting for another process;"
3455 " cancelled it\n",
e3e9f5a2
PA
3456 ptid_get_lwp (lp->ptid));
3457 }
3458 lp->stopped = 1;
d90e17a7
PA
3459 }
3460 else
3461 {
3462 lp->stopped = 1;
3463 lp->signalled = 0;
3464 }
3465 }
33355866 3466 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3467 {
3468 if (debug_linux_nat)
3e43a32a
MS
3469 fprintf (stderr,
3470 "Process %ld exited while stopping LWPs\n",
d90e17a7
PA
3471 ptid_get_lwp (lp->ptid));
3472
3473 /* This was the last lwp in the process. Since
3474 events are serialized to GDB core, and we can't
3475 report this one right now, but GDB core and the
3476 other target layers will want to be notified
3477 about the exit code/signal, leave the status
3478 pending for the next time we're able to report
3479 it. */
d90e17a7
PA
3480
3481 /* Prevent trying to stop this thread again. We'll
3482 never try to resume it because it has a pending
3483 status. */
3484 lp->stopped = 1;
3485
3486 /* Dead LWP's aren't expected to reported a pending
3487 sigstop. */
3488 lp->signalled = 0;
3489
3490 /* Store the pending event in the waitstatus as
3491 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3492 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3493 }
3494
3495 /* Keep looking. */
3496 lp = NULL;
d6b0e80f
AC
3497 continue;
3498 }
3499
d90e17a7
PA
3500 if (lp)
3501 break;
3502 else
3503 {
3504 if (pid == -1)
3505 {
3506 /* waitpid did return something. Restart over. */
3507 options |= __WCLONE;
3508 }
3509 continue;
3510 }
d6b0e80f
AC
3511 }
3512
3513 if (pid == -1)
3514 {
3515 /* Alternate between checking cloned and uncloned processes. */
3516 options ^= __WCLONE;
3517
b84876c2
PA
3518 /* And every time we have checked both:
3519 In async mode, return to event loop;
3520 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 3521 if (options & __WCLONE)
b84876c2 3522 {
47608cb1 3523 if (target_options & TARGET_WNOHANG)
b84876c2
PA
3524 {
3525 /* No interesting event. */
3526 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3527
b84876c2
PA
3528 if (debug_linux_nat_async)
3529 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3530
7feb7d06 3531 restore_child_signals_mask (&prev_mask);
b84876c2
PA
3532 return minus_one_ptid;
3533 }
3534
3535 sigsuspend (&suspend_mask);
3536 }
d6b0e80f 3537 }
28736962
PA
3538 else if (target_options & TARGET_WNOHANG)
3539 {
3540 /* No interesting event for PID yet. */
3541 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3542
3543 if (debug_linux_nat_async)
3544 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3545
3546 restore_child_signals_mask (&prev_mask);
3547 return minus_one_ptid;
3548 }
d6b0e80f
AC
3549
3550 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3551 gdb_assert (lp == NULL);
d6b0e80f
AC
3552 }
3553
b84876c2 3554 if (!target_can_async_p ())
d26b5354 3555 clear_sigint_trap ();
d6b0e80f
AC
3556
3557 gdb_assert (lp);
3558
ca2163eb
PA
3559 status = lp->status;
3560 lp->status = 0;
3561
d6b0e80f
AC
3562 /* Don't report signals that GDB isn't interested in, such as
3563 signals that are neither printed nor stopped upon. Stopping all
3564 threads can be a bit time-consuming so if we want decent
3565 performance with heavily multi-threaded programs, especially when
3566 they're using a high frequency timer, we'd better avoid it if we
3567 can. */
3568
3569 if (WIFSTOPPED (status))
3570 {
423ec54c 3571 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
d6b0e80f 3572
2455069d
UW
3573 /* When using hardware single-step, we need to report every signal.
3574 Otherwise, signals in pass_mask may be short-circuited. */
d539ed7e 3575 if (!lp->step
2455069d 3576 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
d6b0e80f
AC
3577 {
3578 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3579 here? It is not clear we should. GDB may not expect
3580 other threads to run. On the other hand, not resuming
3581 newly attached threads may cause an unwanted delay in
3582 getting them running. */
3583 registers_changed ();
28439f5e 3584 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3585 lp->step, signo);
d6b0e80f
AC
3586 if (debug_linux_nat)
3587 fprintf_unfiltered (gdb_stdlog,
3588 "LLW: %s %s, %s (preempt 'handle')\n",
3589 lp->step ?
3590 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3591 target_pid_to_str (lp->ptid),
423ec54c
JK
3592 (signo != TARGET_SIGNAL_0
3593 ? strsignal (target_signal_to_host (signo))
3594 : "0"));
d6b0e80f 3595 lp->stopped = 0;
d6b0e80f
AC
3596 goto retry;
3597 }
3598
1ad15515 3599 if (!non_stop)
d6b0e80f 3600 {
1ad15515
PA
3601 /* Only do the below in all-stop, as we currently use SIGINT
3602 to implement target_stop (see linux_nat_stop) in
3603 non-stop. */
3604 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3605 {
3606 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3607 forwarded to the entire process group, that is, all LWPs
3608 will receive it - unless they're using CLONE_THREAD to
3609 share signals. Since we only want to report it once, we
3610 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3611 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3612 set_ignore_sigint, NULL);
1ad15515
PA
3613 lp->ignore_sigint = 0;
3614 }
3615 else
3616 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3617 }
3618 }
3619
3620 /* This LWP is stopped now. */
3621 lp->stopped = 1;
3622
3623 if (debug_linux_nat)
3624 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3625 status_to_str (status), target_pid_to_str (lp->ptid));
3626
4c28f408
PA
3627 if (!non_stop)
3628 {
3629 /* Now stop all other LWP's ... */
d90e17a7 3630 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3631
3632 /* ... and wait until all of them have reported back that
3633 they're no longer running. */
d90e17a7 3634 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3635
3636 /* If we're not waiting for a specific LWP, choose an event LWP
3637 from among those that have had events. Giving equal priority
3638 to all LWPs that have had events helps prevent
3639 starvation. */
3640 if (pid == -1)
d90e17a7 3641 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3642
e3e9f5a2
PA
3643 /* Now that we've selected our final event LWP, cancel any
3644 breakpoints in other LWPs that have hit a GDB breakpoint.
3645 See the comment in cancel_breakpoints_callback to find out
3646 why. */
3647 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3648
3649 /* In all-stop, from the core's perspective, all LWPs are now
3650 stopped until a new resume action is sent over. */
3651 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3652 }
3653 else
3654 lp->resumed = 0;
d6b0e80f 3655
26ab7092 3656 if (linux_nat_status_is_event (status))
d6b0e80f 3657 {
d6b0e80f
AC
3658 if (debug_linux_nat)
3659 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3660 "LLW: trap ptid is %s.\n",
3661 target_pid_to_str (lp->ptid));
d6b0e80f 3662 }
d6b0e80f
AC
3663
3664 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3665 {
3666 *ourstatus = lp->waitstatus;
3667 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3668 }
3669 else
3670 store_waitstatus (ourstatus, status);
3671
b84876c2
PA
3672 if (debug_linux_nat_async)
3673 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3674
7feb7d06 3675 restore_child_signals_mask (&prev_mask);
1e225492
JK
3676
3677 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3678 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3679 lp->core = -1;
3680 else
3681 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3682
f973ed9c 3683 return lp->ptid;
d6b0e80f
AC
3684}
3685
e3e9f5a2
PA
3686/* Resume LWPs that are currently stopped without any pending status
3687 to report, but are resumed from the core's perspective. */
3688
3689static int
3690resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3691{
3692 ptid_t *wait_ptid_p = data;
3693
3694 if (lp->stopped
3695 && lp->resumed
3696 && lp->status == 0
3697 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3698 {
3699 gdb_assert (is_executing (lp->ptid));
3700
3701 /* Don't bother if there's a breakpoint at PC that we'd hit
3702 immediately, and we're not waiting for this LWP. */
3703 if (!ptid_match (lp->ptid, *wait_ptid_p))
3704 {
3705 struct regcache *regcache = get_thread_regcache (lp->ptid);
3706 CORE_ADDR pc = regcache_read_pc (regcache);
3707
3708 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3709 return 0;
3710 }
3711
3712 if (debug_linux_nat)
3713 fprintf_unfiltered (gdb_stdlog,
3714 "RSRL: resuming stopped-resumed LWP %s\n",
3715 target_pid_to_str (lp->ptid));
3716
3717 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3718 lp->step, TARGET_SIGNAL_0);
3719 lp->stopped = 0;
3720 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3721 lp->stopped_by_watchpoint = 0;
3722 }
3723
3724 return 0;
3725}
3726
7feb7d06
PA
3727static ptid_t
3728linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3729 ptid_t ptid, struct target_waitstatus *ourstatus,
3730 int target_options)
7feb7d06
PA
3731{
3732 ptid_t event_ptid;
3733
3734 if (debug_linux_nat)
3e43a32a
MS
3735 fprintf_unfiltered (gdb_stdlog,
3736 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
7feb7d06
PA
3737
3738 /* Flush the async file first. */
3739 if (target_can_async_p ())
3740 async_file_flush ();
3741
e3e9f5a2
PA
3742 /* Resume LWPs that are currently stopped without any pending status
3743 to report, but are resumed from the core's perspective. LWPs get
3744 in this state if we find them stopping at a time we're not
3745 interested in reporting the event (target_wait on a
3746 specific_process, for example, see linux_nat_wait_1), and
3747 meanwhile the event became uninteresting. Don't bother resuming
3748 LWPs we're not going to wait for if they'd stop immediately. */
3749 if (non_stop)
3750 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3751
47608cb1 3752 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3753
3754 /* If we requested any event, and something came out, assume there
3755 may be more. If we requested a specific lwp or process, also
3756 assume there may be more. */
3757 if (target_can_async_p ()
3758 && (ourstatus->kind != TARGET_WAITKIND_IGNORE
3759 || !ptid_equal (ptid, minus_one_ptid)))
3760 async_file_mark ();
3761
3762 /* Get ready for the next event. */
3763 if (target_can_async_p ())
3764 target_async (inferior_event_handler, 0);
3765
3766 return event_ptid;
3767}
3768
d6b0e80f
AC
3769static int
3770kill_callback (struct lwp_info *lp, void *data)
3771{
3772 errno = 0;
3773 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3774 if (debug_linux_nat)
3775 fprintf_unfiltered (gdb_stdlog,
3776 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3777 target_pid_to_str (lp->ptid),
3778 errno ? safe_strerror (errno) : "OK");
3779
3780 return 0;
3781}
3782
3783static int
3784kill_wait_callback (struct lwp_info *lp, void *data)
3785{
3786 pid_t pid;
3787
3788 /* We must make sure that there are no pending events (delayed
3789 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3790 program doesn't interfere with any following debugging session. */
3791
3792 /* For cloned processes we must check both with __WCLONE and
3793 without, since the exit status of a cloned process isn't reported
3794 with __WCLONE. */
3795 if (lp->cloned)
3796 {
3797 do
3798 {
58aecb61 3799 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 3800 if (pid != (pid_t) -1)
d6b0e80f 3801 {
e85a822c
DJ
3802 if (debug_linux_nat)
3803 fprintf_unfiltered (gdb_stdlog,
3804 "KWC: wait %s received unknown.\n",
3805 target_pid_to_str (lp->ptid));
3806 /* The Linux kernel sometimes fails to kill a thread
3807 completely after PTRACE_KILL; that goes from the stop
3808 point in do_fork out to the one in
3809 get_signal_to_deliever and waits again. So kill it
3810 again. */
3811 kill_callback (lp, NULL);
d6b0e80f
AC
3812 }
3813 }
3814 while (pid == GET_LWP (lp->ptid));
3815
3816 gdb_assert (pid == -1 && errno == ECHILD);
3817 }
3818
3819 do
3820 {
58aecb61 3821 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 3822 if (pid != (pid_t) -1)
d6b0e80f 3823 {
e85a822c
DJ
3824 if (debug_linux_nat)
3825 fprintf_unfiltered (gdb_stdlog,
3826 "KWC: wait %s received unk.\n",
3827 target_pid_to_str (lp->ptid));
3828 /* See the call to kill_callback above. */
3829 kill_callback (lp, NULL);
d6b0e80f
AC
3830 }
3831 }
3832 while (pid == GET_LWP (lp->ptid));
3833
3834 gdb_assert (pid == -1 && errno == ECHILD);
3835 return 0;
3836}
3837
3838static void
7d85a9c0 3839linux_nat_kill (struct target_ops *ops)
d6b0e80f 3840{
f973ed9c
DJ
3841 struct target_waitstatus last;
3842 ptid_t last_ptid;
3843 int status;
d6b0e80f 3844
f973ed9c
DJ
3845 /* If we're stopped while forking and we haven't followed yet,
3846 kill the other task. We need to do this first because the
3847 parent will be sleeping if this is a vfork. */
d6b0e80f 3848
f973ed9c 3849 get_last_target_status (&last_ptid, &last);
d6b0e80f 3850
f973ed9c
DJ
3851 if (last.kind == TARGET_WAITKIND_FORKED
3852 || last.kind == TARGET_WAITKIND_VFORKED)
3853 {
3a3e9ee3 3854 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
3855 wait (&status);
3856 }
3857
3858 if (forks_exist_p ())
7feb7d06 3859 linux_fork_killall ();
f973ed9c
DJ
3860 else
3861 {
d90e17a7 3862 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3863
4c28f408
PA
3864 /* Stop all threads before killing them, since ptrace requires
3865 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3866 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3867 /* ... and wait until all of them have reported back that
3868 they're no longer running. */
d90e17a7 3869 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3870
f973ed9c 3871 /* Kill all LWP's ... */
d90e17a7 3872 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3873
3874 /* ... and wait until we've flushed all events. */
d90e17a7 3875 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3876 }
3877
3878 target_mourn_inferior ();
d6b0e80f
AC
3879}
3880
3881static void
136d6dae 3882linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3883{
d90e17a7 3884 purge_lwp_list (ptid_get_pid (inferior_ptid));
d6b0e80f 3885
f973ed9c 3886 if (! forks_exist_p ())
d90e17a7
PA
3887 /* Normal case, no other forks available. */
3888 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3889 else
3890 /* Multi-fork case. The current inferior_ptid has exited, but
3891 there are other viable forks to debug. Delete the exiting
3892 one and context-switch to the first available. */
3893 linux_fork_mourn_inferior ();
d6b0e80f
AC
3894}
3895
5b009018
PA
3896/* Convert a native/host siginfo object, into/from the siginfo in the
3897 layout of the inferiors' architecture. */
3898
3899static void
3900siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
3901{
3902 int done = 0;
3903
3904 if (linux_nat_siginfo_fixup != NULL)
3905 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3906
3907 /* If there was no callback, or the callback didn't do anything,
3908 then just do a straight memcpy. */
3909 if (!done)
3910 {
3911 if (direction == 1)
3912 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3913 else
3914 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3915 }
3916}
3917
4aa995e1
PA
3918static LONGEST
3919linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3920 const char *annex, gdb_byte *readbuf,
3921 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3922{
4aa995e1
PA
3923 int pid;
3924 struct siginfo siginfo;
5b009018 3925 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
3926
3927 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3928 gdb_assert (readbuf || writebuf);
3929
3930 pid = GET_LWP (inferior_ptid);
3931 if (pid == 0)
3932 pid = GET_PID (inferior_ptid);
3933
3934 if (offset > sizeof (siginfo))
3935 return -1;
3936
3937 errno = 0;
3938 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3939 if (errno != 0)
3940 return -1;
3941
5b009018
PA
3942 /* When GDB is built as a 64-bit application, ptrace writes into
3943 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3944 inferior with a 64-bit GDB should look the same as debugging it
3945 with a 32-bit GDB, we need to convert it. GDB core always sees
3946 the converted layout, so any read/write will have to be done
3947 post-conversion. */
3948 siginfo_fixup (&siginfo, inf_siginfo, 0);
3949
4aa995e1
PA
3950 if (offset + len > sizeof (siginfo))
3951 len = sizeof (siginfo) - offset;
3952
3953 if (readbuf != NULL)
5b009018 3954 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3955 else
3956 {
5b009018
PA
3957 memcpy (inf_siginfo + offset, writebuf, len);
3958
3959 /* Convert back to ptrace layout before flushing it out. */
3960 siginfo_fixup (&siginfo, inf_siginfo, 1);
3961
4aa995e1
PA
3962 errno = 0;
3963 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3964 if (errno != 0)
3965 return -1;
3966 }
3967
3968 return len;
3969}
3970
10d6c8cd
DJ
3971static LONGEST
3972linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3973 const char *annex, gdb_byte *readbuf,
3974 const gdb_byte *writebuf,
3975 ULONGEST offset, LONGEST len)
d6b0e80f 3976{
4aa995e1 3977 struct cleanup *old_chain;
10d6c8cd 3978 LONGEST xfer;
d6b0e80f 3979
4aa995e1
PA
3980 if (object == TARGET_OBJECT_SIGNAL_INFO)
3981 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
3982 offset, len);
3983
c35b1492
PA
3984 /* The target is connected but no live inferior is selected. Pass
3985 this request down to a lower stratum (e.g., the executable
3986 file). */
3987 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
3988 return 0;
3989
4aa995e1
PA
3990 old_chain = save_inferior_ptid ();
3991
d6b0e80f
AC
3992 if (is_lwp (inferior_ptid))
3993 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
3994
10d6c8cd
DJ
3995 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3996 offset, len);
d6b0e80f
AC
3997
3998 do_cleanups (old_chain);
3999 return xfer;
4000}
4001
4002static int
28439f5e 4003linux_thread_alive (ptid_t ptid)
d6b0e80f 4004{
8c6a60d1 4005 int err, tmp_errno;
4c28f408 4006
d6b0e80f
AC
4007 gdb_assert (is_lwp (ptid));
4008
4c28f408
PA
4009 /* Send signal 0 instead of anything ptrace, because ptracing a
4010 running thread errors out claiming that the thread doesn't
4011 exist. */
4012 err = kill_lwp (GET_LWP (ptid), 0);
8c6a60d1 4013 tmp_errno = errno;
d6b0e80f
AC
4014 if (debug_linux_nat)
4015 fprintf_unfiltered (gdb_stdlog,
4c28f408 4016 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4017 target_pid_to_str (ptid),
8c6a60d1 4018 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 4019
4c28f408 4020 if (err != 0)
d6b0e80f
AC
4021 return 0;
4022
4023 return 1;
4024}
4025
28439f5e
PA
4026static int
4027linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4028{
4029 return linux_thread_alive (ptid);
4030}
4031
d6b0e80f 4032static char *
117de6a9 4033linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4034{
4035 static char buf[64];
4036
a0ef4274 4037 if (is_lwp (ptid)
d90e17a7
PA
4038 && (GET_PID (ptid) != GET_LWP (ptid)
4039 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
4040 {
4041 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4042 return buf;
4043 }
4044
4045 return normal_pid_to_str (ptid);
4046}
4047
4694da01
TT
4048static char *
4049linux_nat_thread_name (struct thread_info *thr)
4050{
4051 int pid = ptid_get_pid (thr->ptid);
4052 long lwp = ptid_get_lwp (thr->ptid);
4053#define FORMAT "/proc/%d/task/%ld/comm"
4054 char buf[sizeof (FORMAT) + 30];
4055 FILE *comm_file;
4056 char *result = NULL;
4057
4058 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4059 comm_file = fopen (buf, "r");
4060 if (comm_file)
4061 {
4062 /* Not exported by the kernel, so we define it here. */
4063#define COMM_LEN 16
4064 static char line[COMM_LEN + 1];
4065
4066 if (fgets (line, sizeof (line), comm_file))
4067 {
4068 char *nl = strchr (line, '\n');
4069
4070 if (nl)
4071 *nl = '\0';
4072 if (*line != '\0')
4073 result = line;
4074 }
4075
4076 fclose (comm_file);
4077 }
4078
4079#undef COMM_LEN
4080#undef FORMAT
4081
4082 return result;
4083}
4084
dba24537
AC
4085/* Accepts an integer PID; Returns a string representing a file that
4086 can be opened to get the symbols for the child process. */
4087
6d8fd2b7
UW
4088static char *
4089linux_child_pid_to_exec_file (int pid)
dba24537
AC
4090{
4091 char *name1, *name2;
4092
4093 name1 = xmalloc (MAXPATHLEN);
4094 name2 = xmalloc (MAXPATHLEN);
4095 make_cleanup (xfree, name1);
4096 make_cleanup (xfree, name2);
4097 memset (name2, 0, MAXPATHLEN);
4098
4099 sprintf (name1, "/proc/%d/exe", pid);
4100 if (readlink (name1, name2, MAXPATHLEN) > 0)
4101 return name2;
4102 else
4103 return name1;
4104}
4105
4106/* Service function for corefiles and info proc. */
4107
4108static int
4109read_mapping (FILE *mapfile,
4110 long long *addr,
4111 long long *endaddr,
4112 char *permissions,
4113 long long *offset,
4114 char *device, long long *inode, char *filename)
4115{
4116 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4117 addr, endaddr, permissions, offset, device, inode);
4118
2e14c2ea
MS
4119 filename[0] = '\0';
4120 if (ret > 0 && ret != EOF)
dba24537
AC
4121 {
4122 /* Eat everything up to EOL for the filename. This will prevent
4123 weird filenames (such as one with embedded whitespace) from
4124 confusing this code. It also makes this code more robust in
4125 respect to annotations the kernel may add after the filename.
4126
4127 Note the filename is used for informational purposes
4128 only. */
4129 ret += fscanf (mapfile, "%[^\n]\n", filename);
4130 }
2e14c2ea 4131
dba24537
AC
4132 return (ret != 0 && ret != EOF);
4133}
4134
4135/* Fills the "to_find_memory_regions" target vector. Lists the memory
4136 regions in the inferior for a corefile. */
4137
4138static int
b8edc417 4139linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
dba24537 4140{
89ecc4f5 4141 int pid = PIDGET (inferior_ptid);
dba24537
AC
4142 char mapsfilename[MAXPATHLEN];
4143 FILE *mapsfile;
4144 long long addr, endaddr, size, offset, inode;
4145 char permissions[8], device[8], filename[MAXPATHLEN];
4146 int read, write, exec;
7c8a8b04 4147 struct cleanup *cleanup;
dba24537
AC
4148
4149 /* Compose the filename for the /proc memory map, and open it. */
89ecc4f5 4150 sprintf (mapsfilename, "/proc/%d/maps", pid);
dba24537 4151 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 4152 error (_("Could not open %s."), mapsfilename);
7c8a8b04 4153 cleanup = make_cleanup_fclose (mapsfile);
dba24537
AC
4154
4155 if (info_verbose)
4156 fprintf_filtered (gdb_stdout,
4157 "Reading memory regions from %s\n", mapsfilename);
4158
4159 /* Now iterate until end-of-file. */
4160 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4161 &offset, &device[0], &inode, &filename[0]))
4162 {
4163 size = endaddr - addr;
4164
4165 /* Get the segment's permissions. */
4166 read = (strchr (permissions, 'r') != 0);
4167 write = (strchr (permissions, 'w') != 0);
4168 exec = (strchr (permissions, 'x') != 0);
4169
4170 if (info_verbose)
4171 {
4172 fprintf_filtered (gdb_stdout,
2244ba2e
PM
4173 "Save segment, %s bytes at %s (%c%c%c)",
4174 plongest (size), paddress (target_gdbarch, addr),
dba24537
AC
4175 read ? 'r' : ' ',
4176 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 4177 if (filename[0])
dba24537
AC
4178 fprintf_filtered (gdb_stdout, " for %s", filename);
4179 fprintf_filtered (gdb_stdout, "\n");
4180 }
4181
4182 /* Invoke the callback function to create the corefile
4183 segment. */
4184 func (addr, size, read, write, exec, obfd);
4185 }
7c8a8b04 4186 do_cleanups (cleanup);
dba24537
AC
4187 return 0;
4188}
4189
2020b7ab
PA
4190static int
4191find_signalled_thread (struct thread_info *info, void *data)
4192{
16c381f0 4193 if (info->suspend.stop_signal != TARGET_SIGNAL_0
2020b7ab
PA
4194 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4195 return 1;
4196
4197 return 0;
4198}
4199
4200static enum target_signal
4201find_stop_signal (void)
4202{
4203 struct thread_info *info =
4204 iterate_over_threads (find_signalled_thread, NULL);
4205
4206 if (info)
16c381f0 4207 return info->suspend.stop_signal;
2020b7ab
PA
4208 else
4209 return TARGET_SIGNAL_0;
4210}
4211
dba24537
AC
4212/* Records the thread's register state for the corefile note
4213 section. */
4214
4215static char *
4216linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2020b7ab
PA
4217 char *note_data, int *note_size,
4218 enum target_signal stop_signal)
dba24537 4219{
dba24537 4220 unsigned long lwp = ptid_get_lwp (ptid);
c2250ad1
UW
4221 struct gdbarch *gdbarch = target_gdbarch;
4222 struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4f844a66 4223 const struct regset *regset;
55e969c1 4224 int core_regset_p;
594f7785 4225 struct cleanup *old_chain;
17ea7499
CES
4226 struct core_regset_section *sect_list;
4227 char *gdb_regset;
594f7785
UW
4228
4229 old_chain = save_inferior_ptid ();
4230 inferior_ptid = ptid;
4231 target_fetch_registers (regcache, -1);
4232 do_cleanups (old_chain);
4f844a66
DM
4233
4234 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
4235 sect_list = gdbarch_core_regset_sections (gdbarch);
4236
17ea7499
CES
4237 /* The loop below uses the new struct core_regset_section, which stores
4238 the supported section names and sizes for the core file. Note that
4239 note PRSTATUS needs to be treated specially. But the other notes are
4240 structurally the same, so they can benefit from the new struct. */
4241 if (core_regset_p && sect_list != NULL)
4242 while (sect_list->sect_name != NULL)
4243 {
17ea7499
CES
4244 regset = gdbarch_regset_from_core_section (gdbarch,
4245 sect_list->sect_name,
4246 sect_list->size);
4247 gdb_assert (regset && regset->collect_regset);
4248 gdb_regset = xmalloc (sect_list->size);
4249 regset->collect_regset (regset, regcache, -1,
4250 gdb_regset, sect_list->size);
2f2241f1
UW
4251
4252 if (strcmp (sect_list->sect_name, ".reg") == 0)
4253 note_data = (char *) elfcore_write_prstatus
4254 (obfd, note_data, note_size,
857d11d0
JK
4255 lwp, target_signal_to_host (stop_signal),
4256 gdb_regset);
2f2241f1
UW
4257 else
4258 note_data = (char *) elfcore_write_register_note
4259 (obfd, note_data, note_size,
4260 sect_list->sect_name, gdb_regset,
4261 sect_list->size);
17ea7499
CES
4262 xfree (gdb_regset);
4263 sect_list++;
4264 }
dba24537 4265
17ea7499
CES
4266 /* For architectures that does not have the struct core_regset_section
4267 implemented, we use the old method. When all the architectures have
4268 the new support, the code below should be deleted. */
4f844a66 4269 else
17ea7499 4270 {
2f2241f1
UW
4271 gdb_gregset_t gregs;
4272 gdb_fpregset_t fpregs;
4273
4274 if (core_regset_p
4275 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3e43a32a
MS
4276 sizeof (gregs)))
4277 != NULL && regset->collect_regset != NULL)
2f2241f1
UW
4278 regset->collect_regset (regset, regcache, -1,
4279 &gregs, sizeof (gregs));
4280 else
4281 fill_gregset (regcache, &gregs, -1);
4282
857d11d0
JK
4283 note_data = (char *) elfcore_write_prstatus
4284 (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
4285 &gregs);
2f2241f1 4286
17ea7499
CES
4287 if (core_regset_p
4288 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3e43a32a
MS
4289 sizeof (fpregs)))
4290 != NULL && regset->collect_regset != NULL)
17ea7499
CES
4291 regset->collect_regset (regset, regcache, -1,
4292 &fpregs, sizeof (fpregs));
4293 else
4294 fill_fpregset (regcache, &fpregs, -1);
4295
4296 note_data = (char *) elfcore_write_prfpreg (obfd,
4297 note_data,
4298 note_size,
4299 &fpregs, sizeof (fpregs));
4300 }
4f844a66 4301
dba24537
AC
4302 return note_data;
4303}
4304
4305struct linux_nat_corefile_thread_data
4306{
4307 bfd *obfd;
4308 char *note_data;
4309 int *note_size;
4310 int num_notes;
2020b7ab 4311 enum target_signal stop_signal;
dba24537
AC
4312};
4313
4314/* Called by gdbthread.c once per thread. Records the thread's
4315 register state for the corefile note section. */
4316
4317static int
4318linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4319{
4320 struct linux_nat_corefile_thread_data *args = data;
dba24537 4321
dba24537
AC
4322 args->note_data = linux_nat_do_thread_registers (args->obfd,
4323 ti->ptid,
4324 args->note_data,
2020b7ab
PA
4325 args->note_size,
4326 args->stop_signal);
dba24537 4327 args->num_notes++;
56be3814 4328
dba24537
AC
4329 return 0;
4330}
4331
efcbbd14
UW
4332/* Enumerate spufs IDs for process PID. */
4333
4334static void
4335iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4336{
4337 char path[128];
4338 DIR *dir;
4339 struct dirent *entry;
4340
4341 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4342 dir = opendir (path);
4343 if (!dir)
4344 return;
4345
4346 rewinddir (dir);
4347 while ((entry = readdir (dir)) != NULL)
4348 {
4349 struct stat st;
4350 struct statfs stfs;
4351 int fd;
4352
4353 fd = atoi (entry->d_name);
4354 if (!fd)
4355 continue;
4356
4357 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4358 if (stat (path, &st) != 0)
4359 continue;
4360 if (!S_ISDIR (st.st_mode))
4361 continue;
4362
4363 if (statfs (path, &stfs) != 0)
4364 continue;
4365 if (stfs.f_type != SPUFS_MAGIC)
4366 continue;
4367
4368 callback (data, fd);
4369 }
4370
4371 closedir (dir);
4372}
4373
4374/* Generate corefile notes for SPU contexts. */
4375
4376struct linux_spu_corefile_data
4377{
4378 bfd *obfd;
4379 char *note_data;
4380 int *note_size;
4381};
4382
4383static void
4384linux_spu_corefile_callback (void *data, int fd)
4385{
4386 struct linux_spu_corefile_data *args = data;
4387 int i;
4388
4389 static const char *spu_files[] =
4390 {
4391 "object-id",
4392 "mem",
4393 "regs",
4394 "fpcr",
4395 "lslr",
4396 "decr",
4397 "decr_status",
4398 "signal1",
4399 "signal1_type",
4400 "signal2",
4401 "signal2_type",
4402 "event_mask",
4403 "event_status",
4404 "mbox_info",
4405 "ibox_info",
4406 "wbox_info",
4407 "dma_info",
4408 "proxydma_info",
4409 };
4410
4411 for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4412 {
4413 char annex[32], note_name[32];
4414 gdb_byte *spu_data;
4415 LONGEST spu_len;
4416
4417 xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4418 spu_len = target_read_alloc (&current_target, TARGET_OBJECT_SPU,
4419 annex, &spu_data);
4420 if (spu_len > 0)
4421 {
4422 xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4423 args->note_data = elfcore_write_note (args->obfd, args->note_data,
4424 args->note_size, note_name,
4425 NT_SPU, spu_data, spu_len);
4426 xfree (spu_data);
4427 }
4428 }
4429}
4430
4431static char *
4432linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4433{
4434 struct linux_spu_corefile_data args;
e0881a8e 4435
efcbbd14
UW
4436 args.obfd = obfd;
4437 args.note_data = note_data;
4438 args.note_size = note_size;
4439
4440 iterate_over_spus (PIDGET (inferior_ptid),
4441 linux_spu_corefile_callback, &args);
4442
4443 return args.note_data;
4444}
4445
dba24537
AC
4446/* Fills the "to_make_corefile_note" target vector. Builds the note
4447 section for a corefile, and returns it in a malloc buffer. */
4448
4449static char *
4450linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4451{
4452 struct linux_nat_corefile_thread_data thread_args;
d99148ef 4453 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 4454 char fname[16] = { '\0' };
d99148ef 4455 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
4456 char psargs[80] = { '\0' };
4457 char *note_data = NULL;
d90e17a7 4458 ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
c6826062 4459 gdb_byte *auxv;
dba24537
AC
4460 int auxv_len;
4461
4462 if (get_exec_file (0))
4463 {
9f37bbcc 4464 strncpy (fname, lbasename (get_exec_file (0)), sizeof (fname));
dba24537
AC
4465 strncpy (psargs, get_exec_file (0), sizeof (psargs));
4466 if (get_inferior_args ())
4467 {
d99148ef
JK
4468 char *string_end;
4469 char *psargs_end = psargs + sizeof (psargs);
4470
4471 /* linux_elfcore_write_prpsinfo () handles zero unterminated
4472 strings fine. */
4473 string_end = memchr (psargs, 0, sizeof (psargs));
4474 if (string_end != NULL)
4475 {
4476 *string_end++ = ' ';
4477 strncpy (string_end, get_inferior_args (),
4478 psargs_end - string_end);
4479 }
dba24537
AC
4480 }
4481 note_data = (char *) elfcore_write_prpsinfo (obfd,
4482 note_data,
4483 note_size, fname, psargs);
4484 }
4485
4486 /* Dump information for threads. */
4487 thread_args.obfd = obfd;
4488 thread_args.note_data = note_data;
4489 thread_args.note_size = note_size;
4490 thread_args.num_notes = 0;
2020b7ab 4491 thread_args.stop_signal = find_stop_signal ();
d90e17a7 4492 iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
2020b7ab
PA
4493 gdb_assert (thread_args.num_notes != 0);
4494 note_data = thread_args.note_data;
dba24537 4495
13547ab6
DJ
4496 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
4497 NULL, &auxv);
dba24537
AC
4498 if (auxv_len > 0)
4499 {
4500 note_data = elfcore_write_note (obfd, note_data, note_size,
4501 "CORE", NT_AUXV, auxv, auxv_len);
4502 xfree (auxv);
4503 }
4504
efcbbd14
UW
4505 note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4506
dba24537
AC
4507 make_cleanup (xfree, note_data);
4508 return note_data;
4509}
4510
4511/* Implement the "info proc" command. */
4512
4513static void
4514linux_nat_info_proc_cmd (char *args, int from_tty)
4515{
89ecc4f5
DE
4516 /* A long is used for pid instead of an int to avoid a loss of precision
4517 compiler warning from the output of strtoul. */
4518 long pid = PIDGET (inferior_ptid);
dba24537
AC
4519 FILE *procfile;
4520 char **argv = NULL;
4521 char buffer[MAXPATHLEN];
4522 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
4523 int cmdline_f = 1;
4524 int cwd_f = 1;
4525 int exe_f = 1;
4526 int mappings_f = 0;
dba24537
AC
4527 int status_f = 0;
4528 int stat_f = 0;
4529 int all = 0;
4530 struct stat dummy;
4531
4532 if (args)
4533 {
4534 /* Break up 'args' into an argv array. */
d1a41061
PP
4535 argv = gdb_buildargv (args);
4536 make_cleanup_freeargv (argv);
dba24537
AC
4537 }
4538 while (argv != NULL && *argv != NULL)
4539 {
4540 if (isdigit (argv[0][0]))
4541 {
4542 pid = strtoul (argv[0], NULL, 10);
4543 }
4544 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
4545 {
4546 mappings_f = 1;
4547 }
4548 else if (strcmp (argv[0], "status") == 0)
4549 {
4550 status_f = 1;
4551 }
4552 else if (strcmp (argv[0], "stat") == 0)
4553 {
4554 stat_f = 1;
4555 }
4556 else if (strcmp (argv[0], "cmd") == 0)
4557 {
4558 cmdline_f = 1;
4559 }
4560 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
4561 {
4562 exe_f = 1;
4563 }
4564 else if (strcmp (argv[0], "cwd") == 0)
4565 {
4566 cwd_f = 1;
4567 }
4568 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
4569 {
4570 all = 1;
4571 }
4572 else
4573 {
1777feb0 4574 /* [...] (future options here). */
dba24537
AC
4575 }
4576 argv++;
4577 }
4578 if (pid == 0)
8a3fe4f8 4579 error (_("No current process: you must name one."));
dba24537 4580
89ecc4f5 4581 sprintf (fname1, "/proc/%ld", pid);
dba24537 4582 if (stat (fname1, &dummy) != 0)
8a3fe4f8 4583 error (_("No /proc directory: '%s'"), fname1);
dba24537 4584
89ecc4f5 4585 printf_filtered (_("process %ld\n"), pid);
dba24537
AC
4586 if (cmdline_f || all)
4587 {
89ecc4f5 4588 sprintf (fname1, "/proc/%ld/cmdline", pid);
d5d6fca5 4589 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4590 {
7c8a8b04 4591 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4592
bf1d7d9c
JB
4593 if (fgets (buffer, sizeof (buffer), procfile))
4594 printf_filtered ("cmdline = '%s'\n", buffer);
4595 else
4596 warning (_("unable to read '%s'"), fname1);
7c8a8b04 4597 do_cleanups (cleanup);
dba24537
AC
4598 }
4599 else
8a3fe4f8 4600 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4601 }
4602 if (cwd_f || all)
4603 {
89ecc4f5 4604 sprintf (fname1, "/proc/%ld/cwd", pid);
dba24537
AC
4605 memset (fname2, 0, sizeof (fname2));
4606 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4607 printf_filtered ("cwd = '%s'\n", fname2);
4608 else
8a3fe4f8 4609 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4610 }
4611 if (exe_f || all)
4612 {
89ecc4f5 4613 sprintf (fname1, "/proc/%ld/exe", pid);
dba24537
AC
4614 memset (fname2, 0, sizeof (fname2));
4615 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4616 printf_filtered ("exe = '%s'\n", fname2);
4617 else
8a3fe4f8 4618 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4619 }
4620 if (mappings_f || all)
4621 {
89ecc4f5 4622 sprintf (fname1, "/proc/%ld/maps", pid);
d5d6fca5 4623 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4624 {
4625 long long addr, endaddr, size, offset, inode;
4626 char permissions[8], device[8], filename[MAXPATHLEN];
7c8a8b04 4627 struct cleanup *cleanup;
dba24537 4628
7c8a8b04 4629 cleanup = make_cleanup_fclose (procfile);
a3f17187 4630 printf_filtered (_("Mapped address spaces:\n\n"));
a97b0ac8 4631 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4632 {
4633 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4634 "Start Addr",
4635 " End Addr",
4636 " Size", " Offset", "objfile");
4637 }
4638 else
4639 {
4640 printf_filtered (" %18s %18s %10s %10s %7s\n",
4641 "Start Addr",
4642 " End Addr",
4643 " Size", " Offset", "objfile");
4644 }
4645
4646 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4647 &offset, &device[0], &inode, &filename[0]))
4648 {
4649 size = endaddr - addr;
4650
4651 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4652 calls here (and possibly above) should be abstracted
4653 out into their own functions? Andrew suggests using
4654 a generic local_address_string instead to print out
4655 the addresses; that makes sense to me, too. */
4656
a97b0ac8 4657 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4658 {
4659 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4660 (unsigned long) addr, /* FIXME: pr_addr */
4661 (unsigned long) endaddr,
4662 (int) size,
4663 (unsigned int) offset,
4664 filename[0] ? filename : "");
4665 }
4666 else
4667 {
4668 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
4669 (unsigned long) addr, /* FIXME: pr_addr */
4670 (unsigned long) endaddr,
4671 (int) size,
4672 (unsigned int) offset,
4673 filename[0] ? filename : "");
4674 }
4675 }
4676
7c8a8b04 4677 do_cleanups (cleanup);
dba24537
AC
4678 }
4679 else
8a3fe4f8 4680 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4681 }
4682 if (status_f || all)
4683 {
89ecc4f5 4684 sprintf (fname1, "/proc/%ld/status", pid);
d5d6fca5 4685 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4686 {
7c8a8b04 4687 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4688
dba24537
AC
4689 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4690 puts_filtered (buffer);
7c8a8b04 4691 do_cleanups (cleanup);
dba24537
AC
4692 }
4693 else
8a3fe4f8 4694 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4695 }
4696 if (stat_f || all)
4697 {
89ecc4f5 4698 sprintf (fname1, "/proc/%ld/stat", pid);
d5d6fca5 4699 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4700 {
4701 int itmp;
4702 char ctmp;
a25694b4 4703 long ltmp;
7c8a8b04 4704 struct cleanup *cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4705
4706 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4707 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 4708 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 4709 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 4710 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 4711 printf_filtered (_("State: %c\n"), ctmp);
dba24537 4712 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4713 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 4714 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4715 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 4716 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4717 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 4718 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4719 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 4720 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4721 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
4722 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4723 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
4724 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4725 printf_filtered (_("Minor faults (no memory page): %lu\n"),
4726 (unsigned long) ltmp);
4727 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4728 printf_filtered (_("Minor faults, children: %lu\n"),
4729 (unsigned long) ltmp);
4730 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4731 printf_filtered (_("Major faults (memory page faults): %lu\n"),
4732 (unsigned long) ltmp);
4733 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4734 printf_filtered (_("Major faults, children: %lu\n"),
4735 (unsigned long) ltmp);
4736 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4737 printf_filtered (_("utime: %ld\n"), ltmp);
4738 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4739 printf_filtered (_("stime: %ld\n"), ltmp);
4740 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4741 printf_filtered (_("utime, children: %ld\n"), ltmp);
4742 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4743 printf_filtered (_("stime, children: %ld\n"), ltmp);
4744 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
4745 printf_filtered (_("jiffies remaining in current "
4746 "time slice: %ld\n"), ltmp);
a25694b4
AS
4747 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4748 printf_filtered (_("'nice' value: %ld\n"), ltmp);
4749 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4750 printf_filtered (_("jiffies until next timeout: %lu\n"),
4751 (unsigned long) ltmp);
4752 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4753 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
4754 (unsigned long) ltmp);
4755 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
4756 printf_filtered (_("start time (jiffies since "
4757 "system boot): %ld\n"), ltmp);
a25694b4
AS
4758 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4759 printf_filtered (_("Virtual memory size: %lu\n"),
4760 (unsigned long) ltmp);
4761 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3e43a32a
MS
4762 printf_filtered (_("Resident set size: %lu\n"),
4763 (unsigned long) ltmp);
a25694b4
AS
4764 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4765 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
4766 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4767 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
4768 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4769 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
4770 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4771 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3e43a32a
MS
4772#if 0 /* Don't know how architecture-dependent the rest is...
4773 Anyway the signal bitmap info is available from "status". */
1777feb0 4774 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 4775 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
1777feb0 4776 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4
AS
4777 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
4778 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4779 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
4780 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4781 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
4782 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4783 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
4784 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4785 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
1777feb0 4786 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 4787 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537 4788#endif
7c8a8b04 4789 do_cleanups (cleanup);
dba24537
AC
4790 }
4791 else
8a3fe4f8 4792 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4793 }
4794}
4795
10d6c8cd
DJ
4796/* Implement the to_xfer_partial interface for memory reads using the /proc
4797 filesystem. Because we can use a single read() call for /proc, this
4798 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4799 but it doesn't support writes. */
4800
4801static LONGEST
4802linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4803 const char *annex, gdb_byte *readbuf,
4804 const gdb_byte *writebuf,
4805 ULONGEST offset, LONGEST len)
dba24537 4806{
10d6c8cd
DJ
4807 LONGEST ret;
4808 int fd;
dba24537
AC
4809 char filename[64];
4810
10d6c8cd 4811 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4812 return 0;
4813
4814 /* Don't bother for one word. */
4815 if (len < 3 * sizeof (long))
4816 return 0;
4817
4818 /* We could keep this file open and cache it - possibly one per
4819 thread. That requires some juggling, but is even faster. */
4820 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4821 fd = open (filename, O_RDONLY | O_LARGEFILE);
4822 if (fd == -1)
4823 return 0;
4824
4825 /* If pread64 is available, use it. It's faster if the kernel
4826 supports it (only one syscall), and it's 64-bit safe even on
4827 32-bit platforms (for instance, SPARC debugging a SPARC64
4828 application). */
4829#ifdef HAVE_PREAD64
10d6c8cd 4830 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4831#else
10d6c8cd 4832 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4833#endif
4834 ret = 0;
4835 else
4836 ret = len;
4837
4838 close (fd);
4839 return ret;
4840}
4841
efcbbd14
UW
4842
4843/* Enumerate spufs IDs for process PID. */
4844static LONGEST
4845spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4846{
4847 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4848 LONGEST pos = 0;
4849 LONGEST written = 0;
4850 char path[128];
4851 DIR *dir;
4852 struct dirent *entry;
4853
4854 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4855 dir = opendir (path);
4856 if (!dir)
4857 return -1;
4858
4859 rewinddir (dir);
4860 while ((entry = readdir (dir)) != NULL)
4861 {
4862 struct stat st;
4863 struct statfs stfs;
4864 int fd;
4865
4866 fd = atoi (entry->d_name);
4867 if (!fd)
4868 continue;
4869
4870 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4871 if (stat (path, &st) != 0)
4872 continue;
4873 if (!S_ISDIR (st.st_mode))
4874 continue;
4875
4876 if (statfs (path, &stfs) != 0)
4877 continue;
4878 if (stfs.f_type != SPUFS_MAGIC)
4879 continue;
4880
4881 if (pos >= offset && pos + 4 <= offset + len)
4882 {
4883 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4884 written += 4;
4885 }
4886 pos += 4;
4887 }
4888
4889 closedir (dir);
4890 return written;
4891}
4892
4893/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4894 object type, using the /proc file system. */
4895static LONGEST
4896linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4897 const char *annex, gdb_byte *readbuf,
4898 const gdb_byte *writebuf,
4899 ULONGEST offset, LONGEST len)
4900{
4901 char buf[128];
4902 int fd = 0;
4903 int ret = -1;
4904 int pid = PIDGET (inferior_ptid);
4905
4906 if (!annex)
4907 {
4908 if (!readbuf)
4909 return -1;
4910 else
4911 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4912 }
4913
4914 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4915 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4916 if (fd <= 0)
4917 return -1;
4918
4919 if (offset != 0
4920 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4921 {
4922 close (fd);
4923 return 0;
4924 }
4925
4926 if (writebuf)
4927 ret = write (fd, writebuf, (size_t) len);
4928 else if (readbuf)
4929 ret = read (fd, readbuf, (size_t) len);
4930
4931 close (fd);
4932 return ret;
4933}
4934
4935
dba24537
AC
4936/* Parse LINE as a signal set and add its set bits to SIGS. */
4937
4938static void
4939add_line_to_sigset (const char *line, sigset_t *sigs)
4940{
4941 int len = strlen (line) - 1;
4942 const char *p;
4943 int signum;
4944
4945 if (line[len] != '\n')
8a3fe4f8 4946 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4947
4948 p = line;
4949 signum = len * 4;
4950 while (len-- > 0)
4951 {
4952 int digit;
4953
4954 if (*p >= '0' && *p <= '9')
4955 digit = *p - '0';
4956 else if (*p >= 'a' && *p <= 'f')
4957 digit = *p - 'a' + 10;
4958 else
8a3fe4f8 4959 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4960
4961 signum -= 4;
4962
4963 if (digit & 1)
4964 sigaddset (sigs, signum + 1);
4965 if (digit & 2)
4966 sigaddset (sigs, signum + 2);
4967 if (digit & 4)
4968 sigaddset (sigs, signum + 3);
4969 if (digit & 8)
4970 sigaddset (sigs, signum + 4);
4971
4972 p++;
4973 }
4974}
4975
4976/* Find process PID's pending signals from /proc/pid/status and set
4977 SIGS to match. */
4978
4979void
3e43a32a
MS
4980linux_proc_pending_signals (int pid, sigset_t *pending,
4981 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4982{
4983 FILE *procfile;
4984 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
7c8a8b04 4985 struct cleanup *cleanup;
dba24537
AC
4986
4987 sigemptyset (pending);
4988 sigemptyset (blocked);
4989 sigemptyset (ignored);
4990 sprintf (fname, "/proc/%d/status", pid);
4991 procfile = fopen (fname, "r");
4992 if (procfile == NULL)
8a3fe4f8 4993 error (_("Could not open %s"), fname);
7c8a8b04 4994 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4995
4996 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4997 {
4998 /* Normal queued signals are on the SigPnd line in the status
4999 file. However, 2.6 kernels also have a "shared" pending
5000 queue for delivering signals to a thread group, so check for
5001 a ShdPnd line also.
5002
5003 Unfortunately some Red Hat kernels include the shared pending
5004 queue but not the ShdPnd status field. */
5005
5006 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
5007 add_line_to_sigset (buffer + 8, pending);
5008 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
5009 add_line_to_sigset (buffer + 8, pending);
5010 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
5011 add_line_to_sigset (buffer + 8, blocked);
5012 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
5013 add_line_to_sigset (buffer + 8, ignored);
5014 }
5015
7c8a8b04 5016 do_cleanups (cleanup);
dba24537
AC
5017}
5018
07e059b5
VP
5019static LONGEST
5020linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e
MS
5021 const char *annex, gdb_byte *readbuf,
5022 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
07e059b5
VP
5023{
5024 /* We make the process list snapshot when the object starts to be
5025 read. */
5026 static const char *buf;
5027 static LONGEST len_avail = -1;
5028 static struct obstack obstack;
5029
5030 DIR *dirp;
5031
5032 gdb_assert (object == TARGET_OBJECT_OSDATA);
5033
a61408f8
SS
5034 if (!annex)
5035 {
5036 if (offset == 0)
5037 {
5038 if (len_avail != -1 && len_avail != 0)
5039 obstack_free (&obstack, NULL);
5040 len_avail = 0;
5041 buf = NULL;
5042 obstack_init (&obstack);
5043 obstack_grow_str (&obstack, "<osdata type=\"types\">\n");
5044
3e43a32a 5045 obstack_xml_printf (&obstack,
a61408f8
SS
5046 "<item>"
5047 "<column name=\"Type\">processes</column>"
3e43a32a
MS
5048 "<column name=\"Description\">"
5049 "Listing of all processes</column>"
a61408f8
SS
5050 "</item>");
5051
5052 obstack_grow_str0 (&obstack, "</osdata>\n");
5053 buf = obstack_finish (&obstack);
5054 len_avail = strlen (buf);
5055 }
5056
5057 if (offset >= len_avail)
5058 {
5059 /* Done. Get rid of the obstack. */
5060 obstack_free (&obstack, NULL);
5061 buf = NULL;
5062 len_avail = 0;
5063 return 0;
5064 }
5065
5066 if (len > len_avail - offset)
5067 len = len_avail - offset;
5068 memcpy (readbuf, buf + offset, len);
5069
5070 return len;
5071 }
5072
07e059b5
VP
5073 if (strcmp (annex, "processes") != 0)
5074 return 0;
5075
5076 gdb_assert (readbuf && !writebuf);
5077
5078 if (offset == 0)
5079 {
5080 if (len_avail != -1 && len_avail != 0)
e0881a8e 5081 obstack_free (&obstack, NULL);
07e059b5
VP
5082 len_avail = 0;
5083 buf = NULL;
5084 obstack_init (&obstack);
5085 obstack_grow_str (&obstack, "<osdata type=\"processes\">\n");
5086
5087 dirp = opendir ("/proc");
5088 if (dirp)
e0881a8e
MS
5089 {
5090 struct dirent *dp;
5091
5092 while ((dp = readdir (dirp)) != NULL)
5093 {
5094 struct stat statbuf;
5095 char procentry[sizeof ("/proc/4294967295")];
5096
5097 if (!isdigit (dp->d_name[0])
5098 || NAMELEN (dp) > sizeof ("4294967295") - 1)
5099 continue;
5100
5101 sprintf (procentry, "/proc/%s", dp->d_name);
5102 if (stat (procentry, &statbuf) == 0
5103 && S_ISDIR (statbuf.st_mode))
5104 {
5105 char *pathname;
5106 FILE *f;
5107 char cmd[MAXPATHLEN + 1];
5108 struct passwd *entry;
5109
5110 pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
5111 entry = getpwuid (statbuf.st_uid);
5112
5113 if ((f = fopen (pathname, "r")) != NULL)
5114 {
5eee517d 5115 size_t length = fread (cmd, 1, sizeof (cmd) - 1, f);
e0881a8e 5116
5eee517d 5117 if (length > 0)
e0881a8e
MS
5118 {
5119 int i;
5120
5eee517d 5121 for (i = 0; i < length; i++)
e0881a8e
MS
5122 if (cmd[i] == '\0')
5123 cmd[i] = ' ';
5eee517d 5124 cmd[length] = '\0';
e0881a8e
MS
5125
5126 obstack_xml_printf (
5127 &obstack,
5128 "<item>"
5129 "<column name=\"pid\">%s</column>"
5130 "<column name=\"user\">%s</column>"
5131 "<column name=\"command\">%s</column>"
5132 "</item>",
5133 dp->d_name,
5134 entry ? entry->pw_name : "?",
5135 cmd);
5136 }
5137 fclose (f);
5138 }
5139
5140 xfree (pathname);
5141 }
5142 }
5143
5144 closedir (dirp);
5145 }
07e059b5
VP
5146
5147 obstack_grow_str0 (&obstack, "</osdata>\n");
5148 buf = obstack_finish (&obstack);
5149 len_avail = strlen (buf);
5150 }
5151
5152 if (offset >= len_avail)
5153 {
5154 /* Done. Get rid of the obstack. */
5155 obstack_free (&obstack, NULL);
5156 buf = NULL;
5157 len_avail = 0;
5158 return 0;
5159 }
5160
5161 if (len > len_avail - offset)
5162 len = len_avail - offset;
5163 memcpy (readbuf, buf + offset, len);
5164
5165 return len;
5166}
5167
10d6c8cd
DJ
5168static LONGEST
5169linux_xfer_partial (struct target_ops *ops, enum target_object object,
5170 const char *annex, gdb_byte *readbuf,
5171 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5172{
5173 LONGEST xfer;
5174
5175 if (object == TARGET_OBJECT_AUXV)
9f2982ff 5176 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
10d6c8cd
DJ
5177 offset, len);
5178
07e059b5
VP
5179 if (object == TARGET_OBJECT_OSDATA)
5180 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5181 offset, len);
5182
efcbbd14
UW
5183 if (object == TARGET_OBJECT_SPU)
5184 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5185 offset, len);
5186
8f313923
JK
5187 /* GDB calculates all the addresses in possibly larget width of the address.
5188 Address width needs to be masked before its final use - either by
5189 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5190
5191 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
5192
5193 if (object == TARGET_OBJECT_MEMORY)
5194 {
5195 int addr_bit = gdbarch_addr_bit (target_gdbarch);
5196
5197 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5198 offset &= ((ULONGEST) 1 << addr_bit) - 1;
5199 }
5200
10d6c8cd
DJ
5201 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5202 offset, len);
5203 if (xfer != 0)
5204 return xfer;
5205
5206 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5207 offset, len);
5208}
5209
e9efe249 5210/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
5211 it with local methods. */
5212
910122bf
UW
5213static void
5214linux_target_install_ops (struct target_ops *t)
10d6c8cd 5215{
6d8fd2b7
UW
5216 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
5217 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
5218 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
a96d9b2e 5219 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 5220 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 5221 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
5222 t->to_post_attach = linux_child_post_attach;
5223 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
5224 t->to_find_memory_regions = linux_nat_find_memory_regions;
5225 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5226
5227 super_xfer_partial = t->to_xfer_partial;
5228 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
5229}
5230
5231struct target_ops *
5232linux_target (void)
5233{
5234 struct target_ops *t;
5235
5236 t = inf_ptrace_target ();
5237 linux_target_install_ops (t);
5238
5239 return t;
5240}
5241
5242struct target_ops *
7714d83a 5243linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
5244{
5245 struct target_ops *t;
5246
5247 t = inf_ptrace_trad_target (register_u_offset);
5248 linux_target_install_ops (t);
10d6c8cd 5249
10d6c8cd
DJ
5250 return t;
5251}
5252
b84876c2
PA
5253/* target_is_async_p implementation. */
5254
5255static int
5256linux_nat_is_async_p (void)
5257{
5258 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5259 it explicitly with the "set target-async" command.
b84876c2 5260 Someday, linux will always be async. */
c6ebd6cf 5261 if (!target_async_permitted)
b84876c2
PA
5262 return 0;
5263
d90e17a7
PA
5264 /* See target.h/target_async_mask. */
5265 return linux_nat_async_mask_value;
b84876c2
PA
5266}
5267
5268/* target_can_async_p implementation. */
5269
5270static int
5271linux_nat_can_async_p (void)
5272{
5273 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5274 it explicitly with the "set target-async" command.
b84876c2 5275 Someday, linux will always be async. */
c6ebd6cf 5276 if (!target_async_permitted)
b84876c2
PA
5277 return 0;
5278
5279 /* See target.h/target_async_mask. */
5280 return linux_nat_async_mask_value;
5281}
5282
9908b566
VP
5283static int
5284linux_nat_supports_non_stop (void)
5285{
5286 return 1;
5287}
5288
d90e17a7
PA
5289/* True if we want to support multi-process. To be removed when GDB
5290 supports multi-exec. */
5291
2277426b 5292int linux_multi_process = 1;
d90e17a7
PA
5293
5294static int
5295linux_nat_supports_multi_process (void)
5296{
5297 return linux_multi_process;
5298}
5299
b84876c2
PA
5300/* target_async_mask implementation. */
5301
5302static int
7feb7d06 5303linux_nat_async_mask (int new_mask)
b84876c2 5304{
7feb7d06 5305 int curr_mask = linux_nat_async_mask_value;
b84876c2 5306
7feb7d06 5307 if (curr_mask != new_mask)
b84876c2 5308 {
7feb7d06 5309 if (new_mask == 0)
b84876c2
PA
5310 {
5311 linux_nat_async (NULL, 0);
7feb7d06 5312 linux_nat_async_mask_value = new_mask;
b84876c2
PA
5313 }
5314 else
5315 {
7feb7d06 5316 linux_nat_async_mask_value = new_mask;
84e46146 5317
7feb7d06
PA
5318 /* If we're going out of async-mask in all-stop, then the
5319 inferior is stopped. The next resume will call
5320 target_async. In non-stop, the target event source
5321 should be always registered in the event loop. Do so
5322 now. */
5323 if (non_stop)
5324 linux_nat_async (inferior_event_handler, 0);
b84876c2
PA
5325 }
5326 }
5327
7feb7d06 5328 return curr_mask;
b84876c2
PA
5329}
5330
5331static int async_terminal_is_ours = 1;
5332
5333/* target_terminal_inferior implementation. */
5334
5335static void
5336linux_nat_terminal_inferior (void)
5337{
5338 if (!target_is_async_p ())
5339 {
5340 /* Async mode is disabled. */
5341 terminal_inferior ();
5342 return;
5343 }
5344
b84876c2
PA
5345 terminal_inferior ();
5346
d9d2d8b6 5347 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
5348 if (!async_terminal_is_ours)
5349 return;
5350
5351 delete_file_handler (input_fd);
5352 async_terminal_is_ours = 0;
5353 set_sigint_trap ();
5354}
5355
5356/* target_terminal_ours implementation. */
5357
2c0b251b 5358static void
b84876c2
PA
5359linux_nat_terminal_ours (void)
5360{
5361 if (!target_is_async_p ())
5362 {
5363 /* Async mode is disabled. */
5364 terminal_ours ();
5365 return;
5366 }
5367
5368 /* GDB should never give the terminal to the inferior if the
5369 inferior is running in the background (run&, continue&, etc.),
5370 but claiming it sure should. */
5371 terminal_ours ();
5372
b84876c2
PA
5373 if (async_terminal_is_ours)
5374 return;
5375
5376 clear_sigint_trap ();
5377 add_file_handler (input_fd, stdin_event_handler, 0);
5378 async_terminal_is_ours = 1;
5379}
5380
5381static void (*async_client_callback) (enum inferior_event_type event_type,
5382 void *context);
5383static void *async_client_context;
5384
7feb7d06
PA
5385/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5386 so we notice when any child changes state, and notify the
5387 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5388 above to wait for the arrival of a SIGCHLD. */
5389
b84876c2 5390static void
7feb7d06 5391sigchld_handler (int signo)
b84876c2 5392{
7feb7d06
PA
5393 int old_errno = errno;
5394
5395 if (debug_linux_nat_async)
5396 fprintf_unfiltered (gdb_stdlog, "sigchld\n");
5397
5398 if (signo == SIGCHLD
5399 && linux_nat_event_pipe[0] != -1)
5400 async_file_mark (); /* Let the event loop know that there are
5401 events to handle. */
5402
5403 errno = old_errno;
5404}
5405
5406/* Callback registered with the target events file descriptor. */
5407
5408static void
5409handle_target_event (int error, gdb_client_data client_data)
5410{
5411 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5412}
5413
5414/* Create/destroy the target events pipe. Returns previous state. */
5415
5416static int
5417linux_async_pipe (int enable)
5418{
5419 int previous = (linux_nat_event_pipe[0] != -1);
5420
5421 if (previous != enable)
5422 {
5423 sigset_t prev_mask;
5424
5425 block_child_signals (&prev_mask);
5426
5427 if (enable)
5428 {
5429 if (pipe (linux_nat_event_pipe) == -1)
5430 internal_error (__FILE__, __LINE__,
5431 "creating event pipe failed.");
5432
5433 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5434 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5435 }
5436 else
5437 {
5438 close (linux_nat_event_pipe[0]);
5439 close (linux_nat_event_pipe[1]);
5440 linux_nat_event_pipe[0] = -1;
5441 linux_nat_event_pipe[1] = -1;
5442 }
5443
5444 restore_child_signals_mask (&prev_mask);
5445 }
5446
5447 return previous;
b84876c2
PA
5448}
5449
5450/* target_async implementation. */
5451
5452static void
5453linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5454 void *context), void *context)
5455{
c6ebd6cf 5456 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
b84876c2
PA
5457 internal_error (__FILE__, __LINE__,
5458 "Calling target_async when async is masked");
5459
5460 if (callback != NULL)
5461 {
5462 async_client_callback = callback;
5463 async_client_context = context;
7feb7d06
PA
5464 if (!linux_async_pipe (1))
5465 {
5466 add_file_handler (linux_nat_event_pipe[0],
5467 handle_target_event, NULL);
5468 /* There may be pending events to handle. Tell the event loop
5469 to poll them. */
5470 async_file_mark ();
5471 }
b84876c2
PA
5472 }
5473 else
5474 {
5475 async_client_callback = callback;
5476 async_client_context = context;
b84876c2 5477 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 5478 linux_async_pipe (0);
b84876c2
PA
5479 }
5480 return;
5481}
5482
252fbfc8
PA
5483/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5484 event came out. */
5485
4c28f408 5486static int
252fbfc8 5487linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 5488{
d90e17a7 5489 if (!lwp->stopped)
252fbfc8 5490 {
d90e17a7 5491 ptid_t ptid = lwp->ptid;
252fbfc8 5492
d90e17a7
PA
5493 if (debug_linux_nat)
5494 fprintf_unfiltered (gdb_stdlog,
5495 "LNSL: running -> suspending %s\n",
5496 target_pid_to_str (lwp->ptid));
252fbfc8 5497
252fbfc8 5498
d90e17a7
PA
5499 stop_callback (lwp, NULL);
5500 stop_wait_callback (lwp, NULL);
252fbfc8 5501
d90e17a7
PA
5502 /* If the lwp exits while we try to stop it, there's nothing
5503 else to do. */
5504 lwp = find_lwp_pid (ptid);
5505 if (lwp == NULL)
5506 return 0;
252fbfc8 5507
d90e17a7
PA
5508 /* If we didn't collect any signal other than SIGSTOP while
5509 stopping the LWP, push a SIGNAL_0 event. In either case, the
5510 event-loop will end up calling target_wait which will collect
5511 these. */
5512 if (lwp->status == 0)
5513 lwp->status = W_STOPCODE (0);
5514 async_file_mark ();
5515 }
5516 else
5517 {
5518 /* Already known to be stopped; do nothing. */
252fbfc8 5519
d90e17a7
PA
5520 if (debug_linux_nat)
5521 {
e09875d4 5522 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
5523 fprintf_unfiltered (gdb_stdlog,
5524 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5525 target_pid_to_str (lwp->ptid));
5526 else
3e43a32a
MS
5527 fprintf_unfiltered (gdb_stdlog,
5528 "LNSL: already stopped/no "
5529 "stop_requested yet %s\n",
d90e17a7 5530 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5531 }
5532 }
4c28f408
PA
5533 return 0;
5534}
5535
5536static void
5537linux_nat_stop (ptid_t ptid)
5538{
5539 if (non_stop)
d90e17a7 5540 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5541 else
5542 linux_ops->to_stop (ptid);
5543}
5544
d90e17a7
PA
5545static void
5546linux_nat_close (int quitting)
5547{
5548 /* Unregister from the event loop. */
5549 if (target_is_async_p ())
5550 target_async (NULL, 0);
5551
5552 /* Reset the async_masking. */
5553 linux_nat_async_mask_value = 1;
5554
5555 if (linux_ops->to_close)
5556 linux_ops->to_close (quitting);
5557}
5558
c0694254
PA
5559/* When requests are passed down from the linux-nat layer to the
5560 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5561 used. The address space pointer is stored in the inferior object,
5562 but the common code that is passed such ptid can't tell whether
5563 lwpid is a "main" process id or not (it assumes so). We reverse
5564 look up the "main" process id from the lwp here. */
5565
5566struct address_space *
5567linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5568{
5569 struct lwp_info *lwp;
5570 struct inferior *inf;
5571 int pid;
5572
5573 pid = GET_LWP (ptid);
5574 if (GET_LWP (ptid) == 0)
5575 {
5576 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5577 tgid. */
5578 lwp = find_lwp_pid (ptid);
5579 pid = GET_PID (lwp->ptid);
5580 }
5581 else
5582 {
5583 /* A (pid,lwpid,0) ptid. */
5584 pid = GET_PID (ptid);
5585 }
5586
5587 inf = find_inferior_pid (pid);
5588 gdb_assert (inf != NULL);
5589 return inf->aspace;
5590}
5591
dc146f7c
VP
5592int
5593linux_nat_core_of_thread_1 (ptid_t ptid)
5594{
5595 struct cleanup *back_to;
5596 char *filename;
5597 FILE *f;
5598 char *content = NULL;
5599 char *p;
5600 char *ts = 0;
5601 int content_read = 0;
5602 int i;
5603 int core;
5604
5605 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5606 GET_PID (ptid), GET_LWP (ptid));
5607 back_to = make_cleanup (xfree, filename);
5608
5609 f = fopen (filename, "r");
5610 if (!f)
5611 {
5612 do_cleanups (back_to);
5613 return -1;
5614 }
5615
5616 make_cleanup_fclose (f);
5617
5618 for (;;)
5619 {
5620 int n;
e0881a8e 5621
dc146f7c
VP
5622 content = xrealloc (content, content_read + 1024);
5623 n = fread (content + content_read, 1, 1024, f);
5624 content_read += n;
5625 if (n < 1024)
5626 {
5627 content[content_read] = '\0';
5628 break;
5629 }
5630 }
5631
5632 make_cleanup (xfree, content);
5633
5634 p = strchr (content, '(');
ca2a87a0
JK
5635
5636 /* Skip ")". */
5637 if (p != NULL)
5638 p = strchr (p, ')');
5639 if (p != NULL)
5640 p++;
dc146f7c
VP
5641
5642 /* If the first field after program name has index 0, then core number is
5643 the field with index 36. There's no constant for that anywhere. */
ca2a87a0
JK
5644 if (p != NULL)
5645 p = strtok_r (p, " ", &ts);
5646 for (i = 0; p != NULL && i != 36; ++i)
dc146f7c
VP
5647 p = strtok_r (NULL, " ", &ts);
5648
ca2a87a0 5649 if (p == NULL || sscanf (p, "%d", &core) == 0)
dc146f7c
VP
5650 core = -1;
5651
5652 do_cleanups (back_to);
5653
5654 return core;
5655}
5656
5657/* Return the cached value of the processor core for thread PTID. */
5658
5659int
5660linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5661{
5662 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 5663
dc146f7c
VP
5664 if (info)
5665 return info->core;
5666 return -1;
5667}
5668
f973ed9c
DJ
5669void
5670linux_nat_add_target (struct target_ops *t)
5671{
f973ed9c
DJ
5672 /* Save the provided single-threaded target. We save this in a separate
5673 variable because another target we've inherited from (e.g. inf-ptrace)
5674 may have saved a pointer to T; we want to use it for the final
5675 process stratum target. */
5676 linux_ops_saved = *t;
5677 linux_ops = &linux_ops_saved;
5678
5679 /* Override some methods for multithreading. */
b84876c2 5680 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5681 t->to_attach = linux_nat_attach;
5682 t->to_detach = linux_nat_detach;
5683 t->to_resume = linux_nat_resume;
5684 t->to_wait = linux_nat_wait;
2455069d 5685 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
5686 t->to_xfer_partial = linux_nat_xfer_partial;
5687 t->to_kill = linux_nat_kill;
5688 t->to_mourn_inferior = linux_nat_mourn_inferior;
5689 t->to_thread_alive = linux_nat_thread_alive;
5690 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 5691 t->to_thread_name = linux_nat_thread_name;
f973ed9c 5692 t->to_has_thread_control = tc_schedlock;
c0694254 5693 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
5694 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5695 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 5696
b84876c2
PA
5697 t->to_can_async_p = linux_nat_can_async_p;
5698 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5699 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2
PA
5700 t->to_async = linux_nat_async;
5701 t->to_async_mask = linux_nat_async_mask;
5702 t->to_terminal_inferior = linux_nat_terminal_inferior;
5703 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5704 t->to_close = linux_nat_close;
b84876c2 5705
4c28f408
PA
5706 /* Methods for non-stop support. */
5707 t->to_stop = linux_nat_stop;
5708
d90e17a7
PA
5709 t->to_supports_multi_process = linux_nat_supports_multi_process;
5710
dc146f7c
VP
5711 t->to_core_of_thread = linux_nat_core_of_thread;
5712
f973ed9c
DJ
5713 /* We don't change the stratum; this target will sit at
5714 process_stratum and thread_db will set at thread_stratum. This
5715 is a little strange, since this is a multi-threaded-capable
5716 target, but we want to be on the stack below thread_db, and we
5717 also want to be used for single-threaded processes. */
5718
5719 add_target (t);
f973ed9c
DJ
5720}
5721
9f0bdab8
DJ
5722/* Register a method to call whenever a new thread is attached. */
5723void
5724linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
5725{
5726 /* Save the pointer. We only support a single registered instance
5727 of the GNU/Linux native target, so we do not need to map this to
5728 T. */
5729 linux_nat_new_thread = new_thread;
5730}
5731
5b009018
PA
5732/* Register a method that converts a siginfo object between the layout
5733 that ptrace returns, and the layout in the architecture of the
5734 inferior. */
5735void
5736linux_nat_set_siginfo_fixup (struct target_ops *t,
5737 int (*siginfo_fixup) (struct siginfo *,
5738 gdb_byte *,
5739 int))
5740{
5741 /* Save the pointer. */
5742 linux_nat_siginfo_fixup = siginfo_fixup;
5743}
5744
9f0bdab8
DJ
5745/* Return the saved siginfo associated with PTID. */
5746struct siginfo *
5747linux_nat_get_siginfo (ptid_t ptid)
5748{
5749 struct lwp_info *lp = find_lwp_pid (ptid);
5750
5751 gdb_assert (lp != NULL);
5752
5753 return &lp->siginfo;
5754}
5755
2c0b251b
PA
5756/* Provide a prototype to silence -Wmissing-prototypes. */
5757extern initialize_file_ftype _initialize_linux_nat;
5758
d6b0e80f
AC
5759void
5760_initialize_linux_nat (void)
5761{
1bedd215
AC
5762 add_info ("proc", linux_nat_info_proc_cmd, _("\
5763Show /proc process information about any running process.\n\
dba24537
AC
5764Specify any process id, or use the program being debugged by default.\n\
5765Specify any of the following keywords for detailed info:\n\
5766 mappings -- list of mapped memory regions.\n\
5767 stat -- list a bunch of random process info.\n\
5768 status -- list a different bunch of random process info.\n\
1bedd215 5769 all -- list all available /proc info."));
d6b0e80f 5770
b84876c2
PA
5771 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5772 &debug_linux_nat, _("\
5773Set debugging of GNU/Linux lwp module."), _("\
5774Show debugging of GNU/Linux lwp module."), _("\
5775Enables printf debugging output."),
5776 NULL,
5777 show_debug_linux_nat,
5778 &setdebuglist, &showdebuglist);
5779
5780 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
5781 &debug_linux_nat_async, _("\
5782Set debugging of GNU/Linux async lwp module."), _("\
5783Show debugging of GNU/Linux async lwp module."), _("\
5784Enables printf debugging output."),
5785 NULL,
5786 show_debug_linux_nat_async,
5787 &setdebuglist, &showdebuglist);
5788
b84876c2 5789 /* Save this mask as the default. */
d6b0e80f
AC
5790 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5791
7feb7d06
PA
5792 /* Install a SIGCHLD handler. */
5793 sigchld_action.sa_handler = sigchld_handler;
5794 sigemptyset (&sigchld_action.sa_mask);
5795 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5796
5797 /* Make it the default. */
7feb7d06 5798 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5799
5800 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5801 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5802 sigdelset (&suspend_mask, SIGCHLD);
5803
7feb7d06 5804 sigemptyset (&blocked_mask);
10568435
JK
5805
5806 add_setshow_boolean_cmd ("disable-randomization", class_support,
5807 &disable_randomization, _("\
5808Set disabling of debuggee's virtual address space randomization."), _("\
5809Show disabling of debuggee's virtual address space randomization."), _("\
5810When this mode is on (which is the default), randomization of the virtual\n\
5811address space is disabled. Standalone programs run with the randomization\n\
5812enabled by default on some platforms."),
5813 &set_disable_randomization,
5814 &show_disable_randomization,
5815 &setlist, &showlist);
d6b0e80f
AC
5816}
5817\f
5818
5819/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5820 the GNU/Linux Threads library and therefore doesn't really belong
5821 here. */
5822
5823/* Read variable NAME in the target and return its value if found.
5824 Otherwise return zero. It is assumed that the type of the variable
5825 is `int'. */
5826
5827static int
5828get_signo (const char *name)
5829{
5830 struct minimal_symbol *ms;
5831 int signo;
5832
5833 ms = lookup_minimal_symbol (name, NULL, NULL);
5834 if (ms == NULL)
5835 return 0;
5836
8e70166d 5837 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5838 sizeof (signo)) != 0)
5839 return 0;
5840
5841 return signo;
5842}
5843
5844/* Return the set of signals used by the threads library in *SET. */
5845
5846void
5847lin_thread_get_thread_signals (sigset_t *set)
5848{
5849 struct sigaction action;
5850 int restart, cancel;
5851
b84876c2 5852 sigemptyset (&blocked_mask);
d6b0e80f
AC
5853 sigemptyset (set);
5854
5855 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5856 cancel = get_signo ("__pthread_sig_cancel");
5857
5858 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5859 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5860 not provide any way for the debugger to query the signal numbers -
5861 fortunately they don't change! */
5862
d6b0e80f 5863 if (restart == 0)
17fbb0bd 5864 restart = __SIGRTMIN;
d6b0e80f 5865
d6b0e80f 5866 if (cancel == 0)
17fbb0bd 5867 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5868
5869 sigaddset (set, restart);
5870 sigaddset (set, cancel);
5871
5872 /* The GNU/Linux Threads library makes terminating threads send a
5873 special "cancel" signal instead of SIGCHLD. Make sure we catch
5874 those (to prevent them from terminating GDB itself, which is
5875 likely to be their default action) and treat them the same way as
5876 SIGCHLD. */
5877
5878 action.sa_handler = sigchld_handler;
5879 sigemptyset (&action.sa_mask);
58aecb61 5880 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5881 sigaction (cancel, &action, NULL);
5882
5883 /* We block the "cancel" signal throughout this code ... */
5884 sigaddset (&blocked_mask, cancel);
5885 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5886
5887 /* ... except during a sigsuspend. */
5888 sigdelset (&suspend_mask, cancel);
5889}
This page took 1.033914 seconds and 4 git commands to generate.