* lib/gdb.exp (is_x86_like_target): New proc.
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
7b6bb8da
JB
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
af96c192 33#include "linux-ptrace.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
10d6c8cd
DJ
39#include "inf-ptrace.h"
40#include "auxv.h"
dba24537 41#include <sys/param.h> /* for MAXPATHLEN */
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
1777feb0 47#include "gdbthread.h" /* for struct thread_info etc. */
dba24537
AC
48#include "gdb_stat.h" /* for struct stat */
49#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
50#include "inf-loop.h"
51#include "event-loop.h"
52#include "event-top.h"
07e059b5
VP
53#include <pwd.h>
54#include <sys/types.h>
55#include "gdb_dirent.h"
56#include "xml-support.h"
191c4426 57#include "terminal.h"
efcbbd14 58#include <sys/vfs.h>
6c95b8df 59#include "solib.h"
efcbbd14
UW
60
61#ifndef SPUFS_MAGIC
62#define SPUFS_MAGIC 0x23c9b64e
63#endif
dba24537 64
10568435
JK
65#ifdef HAVE_PERSONALITY
66# include <sys/personality.h>
67# if !HAVE_DECL_ADDR_NO_RANDOMIZE
68# define ADDR_NO_RANDOMIZE 0x0040000
69# endif
70#endif /* HAVE_PERSONALITY */
71
1777feb0 72/* This comment documents high-level logic of this file.
8a77dff3
VP
73
74Waiting for events in sync mode
75===============================
76
77When waiting for an event in a specific thread, we just use waitpid, passing
78the specific pid, and not passing WNOHANG.
79
1777feb0 80When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 81version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 82threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
83miss an event. The solution is to use non-blocking waitpid, together with
84sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 85process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
86flag to check for events in cloned processes. If nothing is found, we use
87sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
88happened to a child process -- and SIGCHLD will be delivered both for events
89in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
90an event, we get back to calling nonblocking waitpid with and without
91__WCLONED.
8a77dff3
VP
92
93Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 94so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
95blocked, the signal becomes pending and sigsuspend immediately
96notices it and returns.
97
98Waiting for events in async mode
99================================
100
7feb7d06
PA
101In async mode, GDB should always be ready to handle both user input
102and target events, so neither blocking waitpid nor sigsuspend are
103viable options. Instead, we should asynchronously notify the GDB main
104event loop whenever there's an unprocessed event from the target. We
105detect asynchronous target events by handling SIGCHLD signals. To
106notify the event loop about target events, the self-pipe trick is used
107--- a pipe is registered as waitable event source in the event loop,
108the event loop select/poll's on the read end of this pipe (as well on
109other event sources, e.g., stdin), and the SIGCHLD handler writes a
110byte to this pipe. This is more portable than relying on
111pselect/ppoll, since on kernels that lack those syscalls, libc
112emulates them with select/poll+sigprocmask, and that is racy
113(a.k.a. plain broken).
114
115Obviously, if we fail to notify the event loop if there's a target
116event, it's bad. OTOH, if we notify the event loop when there's no
117event from the target, linux_nat_wait will detect that there's no real
118event to report, and return event of type TARGET_WAITKIND_IGNORE.
119This is mostly harmless, but it will waste time and is better avoided.
120
121The main design point is that every time GDB is outside linux-nat.c,
122we have a SIGCHLD handler installed that is called when something
123happens to the target and notifies the GDB event loop. Whenever GDB
124core decides to handle the event, and calls into linux-nat.c, we
125process things as in sync mode, except that the we never block in
126sigsuspend.
127
128While processing an event, we may end up momentarily blocked in
129waitpid calls. Those waitpid calls, while blocking, are guarantied to
130return quickly. E.g., in all-stop mode, before reporting to the core
131that an LWP hit a breakpoint, all LWPs are stopped by sending them
132SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
133Note that this is different from blocking indefinitely waiting for the
134next event --- here, we're already handling an event.
8a77dff3
VP
135
136Use of signals
137==============
138
139We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
140signal is not entirely significant; we just need for a signal to be delivered,
141so that we can intercept it. SIGSTOP's advantage is that it can not be
142blocked. A disadvantage is that it is not a real-time signal, so it can only
143be queued once; we do not keep track of other sources of SIGSTOP.
144
145Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
146use them, because they have special behavior when the signal is generated -
147not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
148kills the entire thread group.
149
150A delivered SIGSTOP would stop the entire thread group, not just the thread we
151tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
152cancel it (by PTRACE_CONT without passing SIGSTOP).
153
154We could use a real-time signal instead. This would solve those problems; we
155could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
156But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
157generates it, and there are races with trying to find a signal that is not
158blocked. */
a0ef4274 159
dba24537
AC
160#ifndef O_LARGEFILE
161#define O_LARGEFILE 0
162#endif
0274a8ce 163
ca2163eb
PA
164/* Unlike other extended result codes, WSTOPSIG (status) on
165 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
166 instead SIGTRAP with bit 7 set. */
167#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
168
10d6c8cd
DJ
169/* The single-threaded native GNU/Linux target_ops. We save a pointer for
170 the use of the multi-threaded target. */
171static struct target_ops *linux_ops;
f973ed9c 172static struct target_ops linux_ops_saved;
10d6c8cd 173
9f0bdab8
DJ
174/* The method to call, if any, when a new thread is attached. */
175static void (*linux_nat_new_thread) (ptid_t);
176
5b009018
PA
177/* The method to call, if any, when the siginfo object needs to be
178 converted between the layout returned by ptrace, and the layout in
179 the architecture of the inferior. */
180static int (*linux_nat_siginfo_fixup) (struct siginfo *,
181 gdb_byte *,
182 int);
183
ac264b3b
MS
184/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
185 Called by our to_xfer_partial. */
186static LONGEST (*super_xfer_partial) (struct target_ops *,
187 enum target_object,
188 const char *, gdb_byte *,
189 const gdb_byte *,
10d6c8cd
DJ
190 ULONGEST, LONGEST);
191
d6b0e80f 192static int debug_linux_nat;
920d2a44
AC
193static void
194show_debug_linux_nat (struct ui_file *file, int from_tty,
195 struct cmd_list_element *c, const char *value)
196{
197 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
198 value);
199}
d6b0e80f 200
b84876c2
PA
201static int debug_linux_nat_async = 0;
202static void
203show_debug_linux_nat_async (struct ui_file *file, int from_tty,
204 struct cmd_list_element *c, const char *value)
205{
3e43a32a
MS
206 fprintf_filtered (file,
207 _("Debugging of GNU/Linux async lwp module is %s.\n"),
b84876c2
PA
208 value);
209}
210
10568435
JK
211static int disable_randomization = 1;
212
213static void
214show_disable_randomization (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216{
217#ifdef HAVE_PERSONALITY
3e43a32a
MS
218 fprintf_filtered (file,
219 _("Disabling randomization of debuggee's "
220 "virtual address space is %s.\n"),
10568435
JK
221 value);
222#else /* !HAVE_PERSONALITY */
3e43a32a
MS
223 fputs_filtered (_("Disabling randomization of debuggee's "
224 "virtual address space is unsupported on\n"
225 "this platform.\n"), file);
10568435
JK
226#endif /* !HAVE_PERSONALITY */
227}
228
229static void
3e43a32a
MS
230set_disable_randomization (char *args, int from_tty,
231 struct cmd_list_element *c)
10568435
JK
232{
233#ifndef HAVE_PERSONALITY
3e43a32a
MS
234 error (_("Disabling randomization of debuggee's "
235 "virtual address space is unsupported on\n"
236 "this platform."));
10568435
JK
237#endif /* !HAVE_PERSONALITY */
238}
239
ae087d01
DJ
240struct simple_pid_list
241{
242 int pid;
3d799a95 243 int status;
ae087d01
DJ
244 struct simple_pid_list *next;
245};
246struct simple_pid_list *stopped_pids;
247
3993f6b1
DJ
248/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
249 can not be used, 1 if it can. */
250
251static int linux_supports_tracefork_flag = -1;
252
3e43a32a
MS
253/* This variable is a tri-state flag: -1 for unknown, 0 if
254 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
a96d9b2e
SDJ
255
256static int linux_supports_tracesysgood_flag = -1;
257
9016a515
DJ
258/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
259 PTRACE_O_TRACEVFORKDONE. */
260
261static int linux_supports_tracevforkdone_flag = -1;
262
1777feb0 263/* Async mode support. */
b84876c2 264
b84876c2
PA
265/* Zero if the async mode, although enabled, is masked, which means
266 linux_nat_wait should behave as if async mode was off. */
267static int linux_nat_async_mask_value = 1;
268
a96d9b2e
SDJ
269/* Stores the current used ptrace() options. */
270static int current_ptrace_options = 0;
271
b84876c2
PA
272/* The read/write ends of the pipe registered as waitable file in the
273 event loop. */
274static int linux_nat_event_pipe[2] = { -1, -1 };
275
7feb7d06 276/* Flush the event pipe. */
b84876c2 277
7feb7d06
PA
278static void
279async_file_flush (void)
b84876c2 280{
7feb7d06
PA
281 int ret;
282 char buf;
b84876c2 283
7feb7d06 284 do
b84876c2 285 {
7feb7d06 286 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 287 }
7feb7d06 288 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
289}
290
7feb7d06
PA
291/* Put something (anything, doesn't matter what, or how much) in event
292 pipe, so that the select/poll in the event-loop realizes we have
293 something to process. */
252fbfc8 294
b84876c2 295static void
7feb7d06 296async_file_mark (void)
b84876c2 297{
7feb7d06 298 int ret;
b84876c2 299
7feb7d06
PA
300 /* It doesn't really matter what the pipe contains, as long we end
301 up with something in it. Might as well flush the previous
302 left-overs. */
303 async_file_flush ();
b84876c2 304
7feb7d06 305 do
b84876c2 306 {
7feb7d06 307 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 308 }
7feb7d06 309 while (ret == -1 && errno == EINTR);
b84876c2 310
7feb7d06
PA
311 /* Ignore EAGAIN. If the pipe is full, the event loop will already
312 be awakened anyway. */
b84876c2
PA
313}
314
7feb7d06 315static void linux_nat_async (void (*callback)
3e43a32a
MS
316 (enum inferior_event_type event_type,
317 void *context),
7feb7d06
PA
318 void *context);
319static int linux_nat_async_mask (int mask);
320static int kill_lwp (int lwpid, int signo);
321
322static int stop_callback (struct lwp_info *lp, void *data);
323
324static void block_child_signals (sigset_t *prev_mask);
325static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
326
327struct lwp_info;
328static struct lwp_info *add_lwp (ptid_t ptid);
329static void purge_lwp_list (int pid);
330static struct lwp_info *find_lwp_pid (ptid_t ptid);
331
ae087d01
DJ
332\f
333/* Trivial list manipulation functions to keep track of a list of
334 new stopped processes. */
335static void
3d799a95 336add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
337{
338 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 339
ae087d01 340 new_pid->pid = pid;
3d799a95 341 new_pid->status = status;
ae087d01
DJ
342 new_pid->next = *listp;
343 *listp = new_pid;
344}
345
346static int
46a96992 347pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
348{
349 struct simple_pid_list **p;
350
351 for (p = listp; *p != NULL; p = &(*p)->next)
352 if ((*p)->pid == pid)
353 {
354 struct simple_pid_list *next = (*p)->next;
e0881a8e 355
46a96992 356 *statusp = (*p)->status;
ae087d01
DJ
357 xfree (*p);
358 *p = next;
359 return 1;
360 }
361 return 0;
362}
363
3d799a95
DJ
364static void
365linux_record_stopped_pid (int pid, int status)
ae087d01 366{
3d799a95 367 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
368}
369
3993f6b1
DJ
370\f
371/* A helper function for linux_test_for_tracefork, called after fork (). */
372
373static void
374linux_tracefork_child (void)
375{
3993f6b1
DJ
376 ptrace (PTRACE_TRACEME, 0, 0, 0);
377 kill (getpid (), SIGSTOP);
378 fork ();
48bb3cce 379 _exit (0);
3993f6b1
DJ
380}
381
7feb7d06 382/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
383
384static int
46a96992 385my_waitpid (int pid, int *statusp, int flags)
b957e937
DJ
386{
387 int ret;
b84876c2 388
b957e937
DJ
389 do
390 {
46a96992 391 ret = waitpid (pid, statusp, flags);
b957e937
DJ
392 }
393 while (ret == -1 && errno == EINTR);
394
395 return ret;
396}
397
398/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
399
400 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
401 we know that the feature is not available. This may change the tracing
402 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
403
404 However, if it succeeds, we don't know for sure that the feature is
405 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 406 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
407 fork tracing, and let it fork. If the process exits, we assume that we
408 can't use TRACEFORK; if we get the fork notification, and we can extract
409 the new child's PID, then we assume that we can. */
3993f6b1
DJ
410
411static void
b957e937 412linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
413{
414 int child_pid, ret, status;
415 long second_pid;
7feb7d06 416 sigset_t prev_mask;
4c28f408 417
7feb7d06
PA
418 /* We don't want those ptrace calls to be interrupted. */
419 block_child_signals (&prev_mask);
3993f6b1 420
b957e937
DJ
421 linux_supports_tracefork_flag = 0;
422 linux_supports_tracevforkdone_flag = 0;
423
424 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
425 if (ret != 0)
7feb7d06
PA
426 {
427 restore_child_signals_mask (&prev_mask);
428 return;
429 }
b957e937 430
3993f6b1
DJ
431 child_pid = fork ();
432 if (child_pid == -1)
e2e0b3e5 433 perror_with_name (("fork"));
3993f6b1
DJ
434
435 if (child_pid == 0)
436 linux_tracefork_child ();
437
b957e937 438 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 439 if (ret == -1)
e2e0b3e5 440 perror_with_name (("waitpid"));
3993f6b1 441 else if (ret != child_pid)
8a3fe4f8 442 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 443 if (! WIFSTOPPED (status))
3e43a32a
MS
444 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
445 status);
3993f6b1 446
3993f6b1
DJ
447 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
448 if (ret != 0)
449 {
b957e937
DJ
450 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
451 if (ret != 0)
452 {
8a3fe4f8 453 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 454 restore_child_signals_mask (&prev_mask);
b957e937
DJ
455 return;
456 }
457
458 ret = my_waitpid (child_pid, &status, 0);
459 if (ret != child_pid)
3e43a32a
MS
460 warning (_("linux_test_for_tracefork: failed "
461 "to wait for killed child"));
b957e937 462 else if (!WIFSIGNALED (status))
3e43a32a
MS
463 warning (_("linux_test_for_tracefork: unexpected "
464 "wait status 0x%x from killed child"), status);
b957e937 465
7feb7d06 466 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
467 return;
468 }
469
9016a515
DJ
470 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
471 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
472 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
473 linux_supports_tracevforkdone_flag = (ret == 0);
474
b957e937
DJ
475 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
476 if (ret != 0)
8a3fe4f8 477 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
478
479 ret = my_waitpid (child_pid, &status, 0);
480
3993f6b1
DJ
481 if (ret == child_pid && WIFSTOPPED (status)
482 && status >> 16 == PTRACE_EVENT_FORK)
483 {
484 second_pid = 0;
485 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
486 if (ret == 0 && second_pid != 0)
487 {
488 int second_status;
489
490 linux_supports_tracefork_flag = 1;
b957e937
DJ
491 my_waitpid (second_pid, &second_status, 0);
492 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
493 if (ret != 0)
3e43a32a
MS
494 warning (_("linux_test_for_tracefork: "
495 "failed to kill second child"));
97725dc4 496 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
497 }
498 }
b957e937 499 else
8a3fe4f8
AC
500 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
501 "(%d, status 0x%x)"), ret, status);
3993f6b1 502
b957e937
DJ
503 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
504 if (ret != 0)
8a3fe4f8 505 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 506 my_waitpid (child_pid, &status, 0);
4c28f408 507
7feb7d06 508 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
509}
510
a96d9b2e
SDJ
511/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
512
513 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
514 we know that the feature is not available. This may change the tracing
515 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
516
517static void
518linux_test_for_tracesysgood (int original_pid)
519{
520 int ret;
521 sigset_t prev_mask;
522
523 /* We don't want those ptrace calls to be interrupted. */
524 block_child_signals (&prev_mask);
525
526 linux_supports_tracesysgood_flag = 0;
527
528 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
529 if (ret != 0)
530 goto out;
531
532 linux_supports_tracesysgood_flag = 1;
533out:
534 restore_child_signals_mask (&prev_mask);
535}
536
537/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
538 This function also sets linux_supports_tracesysgood_flag. */
539
540static int
541linux_supports_tracesysgood (int pid)
542{
543 if (linux_supports_tracesysgood_flag == -1)
544 linux_test_for_tracesysgood (pid);
545 return linux_supports_tracesysgood_flag;
546}
547
3993f6b1
DJ
548/* Return non-zero iff we have tracefork functionality available.
549 This function also sets linux_supports_tracefork_flag. */
550
551static int
b957e937 552linux_supports_tracefork (int pid)
3993f6b1
DJ
553{
554 if (linux_supports_tracefork_flag == -1)
b957e937 555 linux_test_for_tracefork (pid);
3993f6b1
DJ
556 return linux_supports_tracefork_flag;
557}
558
9016a515 559static int
b957e937 560linux_supports_tracevforkdone (int pid)
9016a515
DJ
561{
562 if (linux_supports_tracefork_flag == -1)
b957e937 563 linux_test_for_tracefork (pid);
9016a515
DJ
564 return linux_supports_tracevforkdone_flag;
565}
566
a96d9b2e
SDJ
567static void
568linux_enable_tracesysgood (ptid_t ptid)
569{
570 int pid = ptid_get_lwp (ptid);
571
572 if (pid == 0)
573 pid = ptid_get_pid (ptid);
574
575 if (linux_supports_tracesysgood (pid) == 0)
576 return;
577
578 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
579
580 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
581}
582
3993f6b1 583\f
4de4c07c
DJ
584void
585linux_enable_event_reporting (ptid_t ptid)
586{
d3587048 587 int pid = ptid_get_lwp (ptid);
4de4c07c 588
d3587048
DJ
589 if (pid == 0)
590 pid = ptid_get_pid (ptid);
591
b957e937 592 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
593 return;
594
a96d9b2e
SDJ
595 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
596 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
597
b957e937 598 if (linux_supports_tracevforkdone (pid))
a96d9b2e 599 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
600
601 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
602 read-only process state. */
4de4c07c 603
a96d9b2e 604 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
605}
606
6d8fd2b7
UW
607static void
608linux_child_post_attach (int pid)
4de4c07c
DJ
609{
610 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 611 check_for_thread_db ();
a96d9b2e 612 linux_enable_tracesysgood (pid_to_ptid (pid));
4de4c07c
DJ
613}
614
10d6c8cd 615static void
4de4c07c
DJ
616linux_child_post_startup_inferior (ptid_t ptid)
617{
618 linux_enable_event_reporting (ptid);
0ec9a092 619 check_for_thread_db ();
a96d9b2e 620 linux_enable_tracesysgood (ptid);
4de4c07c
DJ
621}
622
6d8fd2b7
UW
623static int
624linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 625{
7feb7d06 626 sigset_t prev_mask;
9016a515 627 int has_vforked;
4de4c07c
DJ
628 int parent_pid, child_pid;
629
7feb7d06 630 block_child_signals (&prev_mask);
b84876c2 631
e58b0e63
PA
632 has_vforked = (inferior_thread ()->pending_follow.kind
633 == TARGET_WAITKIND_VFORKED);
634 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 635 if (parent_pid == 0)
e58b0e63
PA
636 parent_pid = ptid_get_pid (inferior_ptid);
637 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 638
2277426b
PA
639 if (!detach_fork)
640 linux_enable_event_reporting (pid_to_ptid (child_pid));
641
6c95b8df
PA
642 if (has_vforked
643 && !non_stop /* Non-stop always resumes both branches. */
644 && (!target_is_async_p () || sync_execution)
645 && !(follow_child || detach_fork || sched_multi))
646 {
647 /* The parent stays blocked inside the vfork syscall until the
648 child execs or exits. If we don't let the child run, then
649 the parent stays blocked. If we're telling the parent to run
650 in the foreground, the user will not be able to ctrl-c to get
651 back the terminal, effectively hanging the debug session. */
ac74f770
MS
652 fprintf_filtered (gdb_stderr, _("\
653Can not resume the parent process over vfork in the foreground while\n\
654holding the child stopped. Try \"set detach-on-fork\" or \
655\"set schedule-multiple\".\n"));
656 /* FIXME output string > 80 columns. */
6c95b8df
PA
657 return 1;
658 }
659
4de4c07c
DJ
660 if (! follow_child)
661 {
6c95b8df 662 struct lwp_info *child_lp = NULL;
4de4c07c 663
1777feb0 664 /* We're already attached to the parent, by default. */
4de4c07c 665
ac264b3b
MS
666 /* Detach new forked process? */
667 if (detach_fork)
f75c00e4 668 {
6c95b8df
PA
669 /* Before detaching from the child, remove all breakpoints
670 from it. If we forked, then this has already been taken
671 care of by infrun.c. If we vforked however, any
672 breakpoint inserted in the parent is visible in the
673 child, even those added while stopped in a vfork
674 catchpoint. This will remove the breakpoints from the
675 parent also, but they'll be reinserted below. */
676 if (has_vforked)
677 {
678 /* keep breakpoints list in sync. */
679 remove_breakpoints_pid (GET_PID (inferior_ptid));
680 }
681
e85a822c 682 if (info_verbose || debug_linux_nat)
ac264b3b
MS
683 {
684 target_terminal_ours ();
685 fprintf_filtered (gdb_stdlog,
3e43a32a
MS
686 "Detaching after fork from "
687 "child process %d.\n",
ac264b3b
MS
688 child_pid);
689 }
4de4c07c 690
ac264b3b
MS
691 ptrace (PTRACE_DETACH, child_pid, 0, 0);
692 }
693 else
694 {
77435e4c 695 struct inferior *parent_inf, *child_inf;
2277426b 696 struct cleanup *old_chain;
7f9f62ba
PA
697
698 /* Add process to GDB's tables. */
77435e4c
PA
699 child_inf = add_inferior (child_pid);
700
e58b0e63 701 parent_inf = current_inferior ();
77435e4c 702 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 703 copy_terminal_info (child_inf, parent_inf);
7f9f62ba 704
2277426b 705 old_chain = save_inferior_ptid ();
6c95b8df 706 save_current_program_space ();
2277426b
PA
707
708 inferior_ptid = ptid_build (child_pid, child_pid, 0);
709 add_thread (inferior_ptid);
6c95b8df
PA
710 child_lp = add_lwp (inferior_ptid);
711 child_lp->stopped = 1;
712 child_lp->resumed = 1;
2277426b 713
6c95b8df
PA
714 /* If this is a vfork child, then the address-space is
715 shared with the parent. */
716 if (has_vforked)
717 {
718 child_inf->pspace = parent_inf->pspace;
719 child_inf->aspace = parent_inf->aspace;
720
721 /* The parent will be frozen until the child is done
722 with the shared region. Keep track of the
723 parent. */
724 child_inf->vfork_parent = parent_inf;
725 child_inf->pending_detach = 0;
726 parent_inf->vfork_child = child_inf;
727 parent_inf->pending_detach = 0;
728 }
729 else
730 {
731 child_inf->aspace = new_address_space ();
732 child_inf->pspace = add_program_space (child_inf->aspace);
733 child_inf->removable = 1;
734 set_current_program_space (child_inf->pspace);
735 clone_program_space (child_inf->pspace, parent_inf->pspace);
736
737 /* Let the shared library layer (solib-svr4) learn about
738 this new process, relocate the cloned exec, pull in
739 shared libraries, and install the solib event
740 breakpoint. If a "cloned-VM" event was propagated
741 better throughout the core, this wouldn't be
742 required. */
268a4a75 743 solib_create_inferior_hook (0);
6c95b8df
PA
744 }
745
746 /* Let the thread_db layer learn about this new process. */
2277426b
PA
747 check_for_thread_db ();
748
749 do_cleanups (old_chain);
ac264b3b 750 }
9016a515
DJ
751
752 if (has_vforked)
753 {
6c95b8df
PA
754 struct lwp_info *lp;
755 struct inferior *parent_inf;
756
757 parent_inf = current_inferior ();
758
759 /* If we detached from the child, then we have to be careful
760 to not insert breakpoints in the parent until the child
761 is done with the shared memory region. However, if we're
762 staying attached to the child, then we can and should
763 insert breakpoints, so that we can debug it. A
764 subsequent child exec or exit is enough to know when does
765 the child stops using the parent's address space. */
766 parent_inf->waiting_for_vfork_done = detach_fork;
56710373 767 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
6c95b8df
PA
768
769 lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937
DJ
770 gdb_assert (linux_supports_tracefork_flag >= 0);
771 if (linux_supports_tracevforkdone (0))
9016a515 772 {
6c95b8df
PA
773 if (debug_linux_nat)
774 fprintf_unfiltered (gdb_stdlog,
775 "LCFF: waiting for VFORK_DONE on %d\n",
776 parent_pid);
777
778 lp->stopped = 1;
779 lp->resumed = 1;
9016a515 780
6c95b8df
PA
781 /* We'll handle the VFORK_DONE event like any other
782 event, in target_wait. */
9016a515
DJ
783 }
784 else
785 {
786 /* We can't insert breakpoints until the child has
787 finished with the shared memory region. We need to
788 wait until that happens. Ideal would be to just
789 call:
790 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
791 - waitpid (parent_pid, &status, __WALL);
792 However, most architectures can't handle a syscall
793 being traced on the way out if it wasn't traced on
794 the way in.
795
796 We might also think to loop, continuing the child
797 until it exits or gets a SIGTRAP. One problem is
798 that the child might call ptrace with PTRACE_TRACEME.
799
800 There's no simple and reliable way to figure out when
801 the vforked child will be done with its copy of the
802 shared memory. We could step it out of the syscall,
803 two instructions, let it go, and then single-step the
804 parent once. When we have hardware single-step, this
805 would work; with software single-step it could still
806 be made to work but we'd have to be able to insert
807 single-step breakpoints in the child, and we'd have
808 to insert -just- the single-step breakpoint in the
809 parent. Very awkward.
810
811 In the end, the best we can do is to make sure it
812 runs for a little while. Hopefully it will be out of
813 range of any breakpoints we reinsert. Usually this
814 is only the single-step breakpoint at vfork's return
815 point. */
816
6c95b8df
PA
817 if (debug_linux_nat)
818 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
819 "LCFF: no VFORK_DONE "
820 "support, sleeping a bit\n");
6c95b8df 821
9016a515 822 usleep (10000);
9016a515 823
6c95b8df
PA
824 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
825 and leave it pending. The next linux_nat_resume call
826 will notice a pending event, and bypasses actually
827 resuming the inferior. */
828 lp->status = 0;
829 lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
830 lp->stopped = 0;
831 lp->resumed = 1;
832
833 /* If we're in async mode, need to tell the event loop
834 there's something here to process. */
835 if (target_can_async_p ())
836 async_file_mark ();
837 }
9016a515 838 }
4de4c07c 839 }
3993f6b1 840 else
4de4c07c 841 {
77435e4c 842 struct inferior *parent_inf, *child_inf;
2277426b 843 struct lwp_info *lp;
6c95b8df 844 struct program_space *parent_pspace;
4de4c07c 845
e85a822c 846 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
847 {
848 target_terminal_ours ();
6c95b8df 849 if (has_vforked)
3e43a32a
MS
850 fprintf_filtered (gdb_stdlog,
851 _("Attaching after process %d "
852 "vfork to child process %d.\n"),
6c95b8df
PA
853 parent_pid, child_pid);
854 else
3e43a32a
MS
855 fprintf_filtered (gdb_stdlog,
856 _("Attaching after process %d "
857 "fork to child process %d.\n"),
6c95b8df 858 parent_pid, child_pid);
f75c00e4 859 }
4de4c07c 860
7a7d3353
PA
861 /* Add the new inferior first, so that the target_detach below
862 doesn't unpush the target. */
863
77435e4c
PA
864 child_inf = add_inferior (child_pid);
865
e58b0e63 866 parent_inf = current_inferior ();
77435e4c 867 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 868 copy_terminal_info (child_inf, parent_inf);
7a7d3353 869
6c95b8df 870 parent_pspace = parent_inf->pspace;
9016a515 871
6c95b8df
PA
872 /* If we're vforking, we want to hold on to the parent until the
873 child exits or execs. At child exec or exit time we can
874 remove the old breakpoints from the parent and detach or
875 resume debugging it. Otherwise, detach the parent now; we'll
876 want to reuse it's program/address spaces, but we can't set
877 them to the child before removing breakpoints from the
878 parent, otherwise, the breakpoints module could decide to
879 remove breakpoints from the wrong process (since they'd be
880 assigned to the same address space). */
9016a515
DJ
881
882 if (has_vforked)
7f9f62ba 883 {
6c95b8df
PA
884 gdb_assert (child_inf->vfork_parent == NULL);
885 gdb_assert (parent_inf->vfork_child == NULL);
886 child_inf->vfork_parent = parent_inf;
887 child_inf->pending_detach = 0;
888 parent_inf->vfork_child = child_inf;
889 parent_inf->pending_detach = detach_fork;
890 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 891 }
2277426b 892 else if (detach_fork)
b84876c2 893 target_detach (NULL, 0);
4de4c07c 894
6c95b8df
PA
895 /* Note that the detach above makes PARENT_INF dangling. */
896
897 /* Add the child thread to the appropriate lists, and switch to
898 this new thread, before cloning the program space, and
899 informing the solib layer about this new process. */
900
9f0bdab8 901 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b
PA
902 add_thread (inferior_ptid);
903 lp = add_lwp (inferior_ptid);
904 lp->stopped = 1;
6c95b8df
PA
905 lp->resumed = 1;
906
907 /* If this is a vfork child, then the address-space is shared
908 with the parent. If we detached from the parent, then we can
909 reuse the parent's program/address spaces. */
910 if (has_vforked || detach_fork)
911 {
912 child_inf->pspace = parent_pspace;
913 child_inf->aspace = child_inf->pspace->aspace;
914 }
915 else
916 {
917 child_inf->aspace = new_address_space ();
918 child_inf->pspace = add_program_space (child_inf->aspace);
919 child_inf->removable = 1;
920 set_current_program_space (child_inf->pspace);
921 clone_program_space (child_inf->pspace, parent_pspace);
922
923 /* Let the shared library layer (solib-svr4) learn about
924 this new process, relocate the cloned exec, pull in
925 shared libraries, and install the solib event breakpoint.
926 If a "cloned-VM" event was propagated better throughout
927 the core, this wouldn't be required. */
268a4a75 928 solib_create_inferior_hook (0);
6c95b8df 929 }
ac264b3b 930
6c95b8df 931 /* Let the thread_db layer learn about this new process. */
ef29ce1a 932 check_for_thread_db ();
4de4c07c
DJ
933 }
934
7feb7d06 935 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
936 return 0;
937}
938
4de4c07c 939\f
77b06cd7 940static int
6d8fd2b7 941linux_child_insert_fork_catchpoint (int pid)
4de4c07c 942{
77b06cd7 943 return !linux_supports_tracefork (pid);
3993f6b1
DJ
944}
945
eb73ad13
PA
946static int
947linux_child_remove_fork_catchpoint (int pid)
948{
949 return 0;
950}
951
77b06cd7 952static int
6d8fd2b7 953linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 954{
77b06cd7 955 return !linux_supports_tracefork (pid);
3993f6b1
DJ
956}
957
eb73ad13
PA
958static int
959linux_child_remove_vfork_catchpoint (int pid)
960{
961 return 0;
962}
963
77b06cd7 964static int
6d8fd2b7 965linux_child_insert_exec_catchpoint (int pid)
3993f6b1 966{
77b06cd7 967 return !linux_supports_tracefork (pid);
3993f6b1
DJ
968}
969
eb73ad13
PA
970static int
971linux_child_remove_exec_catchpoint (int pid)
972{
973 return 0;
974}
975
a96d9b2e
SDJ
976static int
977linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
978 int table_size, int *table)
979{
77b06cd7
TJB
980 if (!linux_supports_tracesysgood (pid))
981 return 1;
982
a96d9b2e
SDJ
983 /* On GNU/Linux, we ignore the arguments. It means that we only
984 enable the syscall catchpoints, but do not disable them.
77b06cd7 985
a96d9b2e
SDJ
986 Also, we do not use the `table' information because we do not
987 filter system calls here. We let GDB do the logic for us. */
988 return 0;
989}
990
d6b0e80f
AC
991/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
992 are processes sharing the same VM space. A multi-threaded process
993 is basically a group of such processes. However, such a grouping
994 is almost entirely a user-space issue; the kernel doesn't enforce
995 such a grouping at all (this might change in the future). In
996 general, we'll rely on the threads library (i.e. the GNU/Linux
997 Threads library) to provide such a grouping.
998
999 It is perfectly well possible to write a multi-threaded application
1000 without the assistance of a threads library, by using the clone
1001 system call directly. This module should be able to give some
1002 rudimentary support for debugging such applications if developers
1003 specify the CLONE_PTRACE flag in the clone system call, and are
1004 using the Linux kernel 2.4 or above.
1005
1006 Note that there are some peculiarities in GNU/Linux that affect
1007 this code:
1008
1009 - In general one should specify the __WCLONE flag to waitpid in
1010 order to make it report events for any of the cloned processes
1011 (and leave it out for the initial process). However, if a cloned
1012 process has exited the exit status is only reported if the
1013 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1014 we cannot use it since GDB must work on older systems too.
1015
1016 - When a traced, cloned process exits and is waited for by the
1017 debugger, the kernel reassigns it to the original parent and
1018 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1019 library doesn't notice this, which leads to the "zombie problem":
1020 When debugged a multi-threaded process that spawns a lot of
1021 threads will run out of processes, even if the threads exit,
1022 because the "zombies" stay around. */
1023
1024/* List of known LWPs. */
9f0bdab8 1025struct lwp_info *lwp_list;
d6b0e80f
AC
1026\f
1027
d6b0e80f
AC
1028/* Original signal mask. */
1029static sigset_t normal_mask;
1030
1031/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1032 _initialize_linux_nat. */
1033static sigset_t suspend_mask;
1034
7feb7d06
PA
1035/* Signals to block to make that sigsuspend work. */
1036static sigset_t blocked_mask;
1037
1038/* SIGCHLD action. */
1039struct sigaction sigchld_action;
b84876c2 1040
7feb7d06
PA
1041/* Block child signals (SIGCHLD and linux threads signals), and store
1042 the previous mask in PREV_MASK. */
84e46146 1043
7feb7d06
PA
1044static void
1045block_child_signals (sigset_t *prev_mask)
1046{
1047 /* Make sure SIGCHLD is blocked. */
1048 if (!sigismember (&blocked_mask, SIGCHLD))
1049 sigaddset (&blocked_mask, SIGCHLD);
1050
1051 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1052}
1053
1054/* Restore child signals mask, previously returned by
1055 block_child_signals. */
1056
1057static void
1058restore_child_signals_mask (sigset_t *prev_mask)
1059{
1060 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1061}
2455069d
UW
1062
1063/* Mask of signals to pass directly to the inferior. */
1064static sigset_t pass_mask;
1065
1066/* Update signals to pass to the inferior. */
1067static void
1068linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1069{
1070 int signo;
1071
1072 sigemptyset (&pass_mask);
1073
1074 for (signo = 1; signo < NSIG; signo++)
1075 {
1076 int target_signo = target_signal_from_host (signo);
1077 if (target_signo < numsigs && pass_signals[target_signo])
1078 sigaddset (&pass_mask, signo);
1079 }
1080}
1081
d6b0e80f
AC
1082\f
1083
1084/* Prototypes for local functions. */
1085static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1086static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1087static char *linux_child_pid_to_exec_file (int pid);
710151dd 1088
d6b0e80f
AC
1089\f
1090/* Convert wait status STATUS to a string. Used for printing debug
1091 messages only. */
1092
1093static char *
1094status_to_str (int status)
1095{
1096 static char buf[64];
1097
1098 if (WIFSTOPPED (status))
206aa767 1099 {
ca2163eb 1100 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1101 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1102 strsignal (SIGTRAP));
1103 else
1104 snprintf (buf, sizeof (buf), "%s (stopped)",
1105 strsignal (WSTOPSIG (status)));
1106 }
d6b0e80f
AC
1107 else if (WIFSIGNALED (status))
1108 snprintf (buf, sizeof (buf), "%s (terminated)",
ba9b2ec3 1109 strsignal (WTERMSIG (status)));
d6b0e80f
AC
1110 else
1111 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1112
1113 return buf;
1114}
1115
d90e17a7
PA
1116/* Remove all LWPs belong to PID from the lwp list. */
1117
1118static void
1119purge_lwp_list (int pid)
1120{
1121 struct lwp_info *lp, *lpprev, *lpnext;
1122
1123 lpprev = NULL;
1124
1125 for (lp = lwp_list; lp; lp = lpnext)
1126 {
1127 lpnext = lp->next;
1128
1129 if (ptid_get_pid (lp->ptid) == pid)
1130 {
1131 if (lp == lwp_list)
1132 lwp_list = lp->next;
1133 else
1134 lpprev->next = lp->next;
1135
1136 xfree (lp);
1137 }
1138 else
1139 lpprev = lp;
1140 }
1141}
1142
1143/* Return the number of known LWPs in the tgid given by PID. */
1144
1145static int
1146num_lwps (int pid)
1147{
1148 int count = 0;
1149 struct lwp_info *lp;
1150
1151 for (lp = lwp_list; lp; lp = lp->next)
1152 if (ptid_get_pid (lp->ptid) == pid)
1153 count++;
1154
1155 return count;
d6b0e80f
AC
1156}
1157
f973ed9c 1158/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1159 structure describing the new LWP. The LWP should already be stopped
1160 (with an exception for the very first LWP). */
d6b0e80f
AC
1161
1162static struct lwp_info *
1163add_lwp (ptid_t ptid)
1164{
1165 struct lwp_info *lp;
1166
1167 gdb_assert (is_lwp (ptid));
1168
1169 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1170
1171 memset (lp, 0, sizeof (struct lwp_info));
1172
1173 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1174
1175 lp->ptid = ptid;
dc146f7c 1176 lp->core = -1;
d6b0e80f
AC
1177
1178 lp->next = lwp_list;
1179 lwp_list = lp;
d6b0e80f 1180
d90e17a7 1181 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
9f0bdab8
DJ
1182 linux_nat_new_thread (ptid);
1183
d6b0e80f
AC
1184 return lp;
1185}
1186
1187/* Remove the LWP specified by PID from the list. */
1188
1189static void
1190delete_lwp (ptid_t ptid)
1191{
1192 struct lwp_info *lp, *lpprev;
1193
1194 lpprev = NULL;
1195
1196 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1197 if (ptid_equal (lp->ptid, ptid))
1198 break;
1199
1200 if (!lp)
1201 return;
1202
d6b0e80f
AC
1203 if (lpprev)
1204 lpprev->next = lp->next;
1205 else
1206 lwp_list = lp->next;
1207
1208 xfree (lp);
1209}
1210
1211/* Return a pointer to the structure describing the LWP corresponding
1212 to PID. If no corresponding LWP could be found, return NULL. */
1213
1214static struct lwp_info *
1215find_lwp_pid (ptid_t ptid)
1216{
1217 struct lwp_info *lp;
1218 int lwp;
1219
1220 if (is_lwp (ptid))
1221 lwp = GET_LWP (ptid);
1222 else
1223 lwp = GET_PID (ptid);
1224
1225 for (lp = lwp_list; lp; lp = lp->next)
1226 if (lwp == GET_LWP (lp->ptid))
1227 return lp;
1228
1229 return NULL;
1230}
1231
1232/* Call CALLBACK with its second argument set to DATA for every LWP in
1233 the list. If CALLBACK returns 1 for a particular LWP, return a
1234 pointer to the structure describing that LWP immediately.
1235 Otherwise return NULL. */
1236
1237struct lwp_info *
d90e17a7
PA
1238iterate_over_lwps (ptid_t filter,
1239 int (*callback) (struct lwp_info *, void *),
1240 void *data)
d6b0e80f
AC
1241{
1242 struct lwp_info *lp, *lpnext;
1243
1244 for (lp = lwp_list; lp; lp = lpnext)
1245 {
1246 lpnext = lp->next;
d90e17a7
PA
1247
1248 if (ptid_match (lp->ptid, filter))
1249 {
1250 if ((*callback) (lp, data))
1251 return lp;
1252 }
d6b0e80f
AC
1253 }
1254
1255 return NULL;
1256}
1257
2277426b
PA
1258/* Update our internal state when changing from one checkpoint to
1259 another indicated by NEW_PTID. We can only switch single-threaded
1260 applications, so we only create one new LWP, and the previous list
1261 is discarded. */
f973ed9c
DJ
1262
1263void
1264linux_nat_switch_fork (ptid_t new_ptid)
1265{
1266 struct lwp_info *lp;
1267
2277426b
PA
1268 purge_lwp_list (GET_PID (inferior_ptid));
1269
f973ed9c
DJ
1270 lp = add_lwp (new_ptid);
1271 lp->stopped = 1;
e26af52f 1272
2277426b
PA
1273 /* This changes the thread's ptid while preserving the gdb thread
1274 num. Also changes the inferior pid, while preserving the
1275 inferior num. */
1276 thread_change_ptid (inferior_ptid, new_ptid);
1277
1278 /* We've just told GDB core that the thread changed target id, but,
1279 in fact, it really is a different thread, with different register
1280 contents. */
1281 registers_changed ();
e26af52f
DJ
1282}
1283
e26af52f
DJ
1284/* Handle the exit of a single thread LP. */
1285
1286static void
1287exit_lwp (struct lwp_info *lp)
1288{
e09875d4 1289 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1290
1291 if (th)
e26af52f 1292 {
17faa917
DJ
1293 if (print_thread_events)
1294 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1295
4f8d22e3 1296 delete_thread (lp->ptid);
e26af52f
DJ
1297 }
1298
1299 delete_lwp (lp->ptid);
1300}
1301
4d062f1a
PA
1302/* Return an lwp's tgid, found in `/proc/PID/status'. */
1303
1304int
1305linux_proc_get_tgid (int lwpid)
1306{
1307 FILE *status_file;
1308 char buf[100];
1309 int tgid = -1;
1310
1311 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) lwpid);
1312 status_file = fopen (buf, "r");
1313 if (status_file != NULL)
1314 {
1315 while (fgets (buf, sizeof (buf), status_file))
1316 {
1317 if (strncmp (buf, "Tgid:", 5) == 0)
1318 {
1319 tgid = strtoul (buf + strlen ("Tgid:"), NULL, 10);
1320 break;
1321 }
1322 }
1323
1324 fclose (status_file);
1325 }
1326
1327 return tgid;
1328}
1329
a0ef4274
DJ
1330/* Detect `T (stopped)' in `/proc/PID/status'.
1331 Other states including `T (tracing stop)' are reported as false. */
1332
1333static int
1334pid_is_stopped (pid_t pid)
1335{
1336 FILE *status_file;
1337 char buf[100];
1338 int retval = 0;
1339
1340 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1341 status_file = fopen (buf, "r");
1342 if (status_file != NULL)
1343 {
1344 int have_state = 0;
1345
1346 while (fgets (buf, sizeof (buf), status_file))
1347 {
1348 if (strncmp (buf, "State:", 6) == 0)
1349 {
1350 have_state = 1;
1351 break;
1352 }
1353 }
1354 if (have_state && strstr (buf, "T (stopped)") != NULL)
1355 retval = 1;
1356 fclose (status_file);
1357 }
1358 return retval;
1359}
1360
1361/* Wait for the LWP specified by LP, which we have just attached to.
1362 Returns a wait status for that LWP, to cache. */
1363
1364static int
1365linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1366 int *signalled)
1367{
1368 pid_t new_pid, pid = GET_LWP (ptid);
1369 int status;
1370
1371 if (pid_is_stopped (pid))
1372 {
1373 if (debug_linux_nat)
1374 fprintf_unfiltered (gdb_stdlog,
1375 "LNPAW: Attaching to a stopped process\n");
1376
1377 /* The process is definitely stopped. It is in a job control
1378 stop, unless the kernel predates the TASK_STOPPED /
1379 TASK_TRACED distinction, in which case it might be in a
1380 ptrace stop. Make sure it is in a ptrace stop; from there we
1381 can kill it, signal it, et cetera.
1382
1383 First make sure there is a pending SIGSTOP. Since we are
1384 already attached, the process can not transition from stopped
1385 to running without a PTRACE_CONT; so we know this signal will
1386 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1387 probably already in the queue (unless this kernel is old
1388 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1389 is not an RT signal, it can only be queued once. */
1390 kill_lwp (pid, SIGSTOP);
1391
1392 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1393 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1394 ptrace (PTRACE_CONT, pid, 0, 0);
1395 }
1396
1397 /* Make sure the initial process is stopped. The user-level threads
1398 layer might want to poke around in the inferior, and that won't
1399 work if things haven't stabilized yet. */
1400 new_pid = my_waitpid (pid, &status, 0);
1401 if (new_pid == -1 && errno == ECHILD)
1402 {
1403 if (first)
1404 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1405
1406 /* Try again with __WCLONE to check cloned processes. */
1407 new_pid = my_waitpid (pid, &status, __WCLONE);
1408 *cloned = 1;
1409 }
1410
dacc9cb2
PP
1411 gdb_assert (pid == new_pid);
1412
1413 if (!WIFSTOPPED (status))
1414 {
1415 /* The pid we tried to attach has apparently just exited. */
1416 if (debug_linux_nat)
1417 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1418 pid, status_to_str (status));
1419 return status;
1420 }
a0ef4274
DJ
1421
1422 if (WSTOPSIG (status) != SIGSTOP)
1423 {
1424 *signalled = 1;
1425 if (debug_linux_nat)
1426 fprintf_unfiltered (gdb_stdlog,
1427 "LNPAW: Received %s after attaching\n",
1428 status_to_str (status));
1429 }
1430
1431 return status;
1432}
1433
1434/* Attach to the LWP specified by PID. Return 0 if successful or -1
1435 if the new LWP could not be attached. */
d6b0e80f 1436
9ee57c33 1437int
93815fbf 1438lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1439{
9ee57c33 1440 struct lwp_info *lp;
7feb7d06 1441 sigset_t prev_mask;
d6b0e80f
AC
1442
1443 gdb_assert (is_lwp (ptid));
1444
7feb7d06 1445 block_child_signals (&prev_mask);
d6b0e80f 1446
9ee57c33 1447 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1448
1449 /* We assume that we're already attached to any LWP that has an id
1450 equal to the overall process id, and to any LWP that is already
1451 in our list of LWPs. If we're not seeing exit events from threads
1452 and we've had PID wraparound since we last tried to stop all threads,
1453 this assumption might be wrong; fortunately, this is very unlikely
1454 to happen. */
9ee57c33 1455 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1456 {
a0ef4274 1457 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1458
1459 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1460 {
1461 /* If we fail to attach to the thread, issue a warning,
1462 but continue. One way this can happen is if thread
e9efe249 1463 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1464 bug may place threads in the thread list and then fail
1465 to create them. */
1466 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1467 safe_strerror (errno));
7feb7d06 1468 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1469 return -1;
1470 }
1471
d6b0e80f
AC
1472 if (debug_linux_nat)
1473 fprintf_unfiltered (gdb_stdlog,
1474 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1475 target_pid_to_str (ptid));
1476
a0ef4274 1477 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1478 if (!WIFSTOPPED (status))
673c2bbe
DE
1479 {
1480 restore_child_signals_mask (&prev_mask);
1481 return -1;
1482 }
dacc9cb2 1483
a0ef4274
DJ
1484 lp = add_lwp (ptid);
1485 lp->stopped = 1;
1486 lp->cloned = cloned;
1487 lp->signalled = signalled;
1488 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1489 {
a0ef4274
DJ
1490 lp->resumed = 1;
1491 lp->status = status;
d6b0e80f
AC
1492 }
1493
a0ef4274 1494 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1495
1496 if (debug_linux_nat)
1497 {
1498 fprintf_unfiltered (gdb_stdlog,
1499 "LLAL: waitpid %s received %s\n",
1500 target_pid_to_str (ptid),
1501 status_to_str (status));
1502 }
1503 }
1504 else
1505 {
1506 /* We assume that the LWP representing the original process is
1507 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1508 that the GNU/linux ptrace layer uses to keep track of
1509 threads. Note that this won't have already been done since
1510 the main thread will have, we assume, been stopped by an
1511 attach from a different layer. */
9ee57c33
DJ
1512 if (lp == NULL)
1513 lp = add_lwp (ptid);
d6b0e80f
AC
1514 lp->stopped = 1;
1515 }
9ee57c33 1516
7feb7d06 1517 restore_child_signals_mask (&prev_mask);
9ee57c33 1518 return 0;
d6b0e80f
AC
1519}
1520
b84876c2 1521static void
136d6dae
VP
1522linux_nat_create_inferior (struct target_ops *ops,
1523 char *exec_file, char *allargs, char **env,
b84876c2
PA
1524 int from_tty)
1525{
10568435
JK
1526#ifdef HAVE_PERSONALITY
1527 int personality_orig = 0, personality_set = 0;
1528#endif /* HAVE_PERSONALITY */
b84876c2
PA
1529
1530 /* The fork_child mechanism is synchronous and calls target_wait, so
1531 we have to mask the async mode. */
1532
10568435
JK
1533#ifdef HAVE_PERSONALITY
1534 if (disable_randomization)
1535 {
1536 errno = 0;
1537 personality_orig = personality (0xffffffff);
1538 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1539 {
1540 personality_set = 1;
1541 personality (personality_orig | ADDR_NO_RANDOMIZE);
1542 }
1543 if (errno != 0 || (personality_set
1544 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1545 warning (_("Error disabling address space randomization: %s"),
1546 safe_strerror (errno));
1547 }
1548#endif /* HAVE_PERSONALITY */
1549
2455069d
UW
1550 /* Make sure we report all signals during startup. */
1551 linux_nat_pass_signals (0, NULL);
1552
136d6dae 1553 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1554
10568435
JK
1555#ifdef HAVE_PERSONALITY
1556 if (personality_set)
1557 {
1558 errno = 0;
1559 personality (personality_orig);
1560 if (errno != 0)
1561 warning (_("Error restoring address space randomization: %s"),
1562 safe_strerror (errno));
1563 }
1564#endif /* HAVE_PERSONALITY */
b84876c2
PA
1565}
1566
d6b0e80f 1567static void
136d6dae 1568linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1569{
1570 struct lwp_info *lp;
d6b0e80f 1571 int status;
af990527 1572 ptid_t ptid;
d6b0e80f 1573
2455069d
UW
1574 /* Make sure we report all signals during attach. */
1575 linux_nat_pass_signals (0, NULL);
1576
136d6dae 1577 linux_ops->to_attach (ops, args, from_tty);
d6b0e80f 1578
af990527
PA
1579 /* The ptrace base target adds the main thread with (pid,0,0)
1580 format. Decorate it with lwp info. */
1581 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1582 thread_change_ptid (inferior_ptid, ptid);
1583
9f0bdab8 1584 /* Add the initial process as the first LWP to the list. */
af990527 1585 lp = add_lwp (ptid);
a0ef4274
DJ
1586
1587 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1588 &lp->signalled);
dacc9cb2
PP
1589 if (!WIFSTOPPED (status))
1590 {
1591 if (WIFEXITED (status))
1592 {
1593 int exit_code = WEXITSTATUS (status);
1594
1595 target_terminal_ours ();
1596 target_mourn_inferior ();
1597 if (exit_code == 0)
1598 error (_("Unable to attach: program exited normally."));
1599 else
1600 error (_("Unable to attach: program exited with code %d."),
1601 exit_code);
1602 }
1603 else if (WIFSIGNALED (status))
1604 {
1605 enum target_signal signo;
1606
1607 target_terminal_ours ();
1608 target_mourn_inferior ();
1609
1610 signo = target_signal_from_host (WTERMSIG (status));
1611 error (_("Unable to attach: program terminated with signal "
1612 "%s, %s."),
1613 target_signal_to_name (signo),
1614 target_signal_to_string (signo));
1615 }
1616
1617 internal_error (__FILE__, __LINE__,
1618 _("unexpected status %d for PID %ld"),
1619 status, (long) GET_LWP (ptid));
1620 }
1621
a0ef4274 1622 lp->stopped = 1;
9f0bdab8 1623
a0ef4274 1624 /* Save the wait status to report later. */
d6b0e80f 1625 lp->resumed = 1;
a0ef4274
DJ
1626 if (debug_linux_nat)
1627 fprintf_unfiltered (gdb_stdlog,
1628 "LNA: waitpid %ld, saving status %s\n",
1629 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1630
7feb7d06
PA
1631 lp->status = status;
1632
1633 if (target_can_async_p ())
1634 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1635}
1636
a0ef4274
DJ
1637/* Get pending status of LP. */
1638static int
1639get_pending_status (struct lwp_info *lp, int *status)
1640{
ca2163eb
PA
1641 enum target_signal signo = TARGET_SIGNAL_0;
1642
1643 /* If we paused threads momentarily, we may have stored pending
1644 events in lp->status or lp->waitstatus (see stop_wait_callback),
1645 and GDB core hasn't seen any signal for those threads.
1646 Otherwise, the last signal reported to the core is found in the
1647 thread object's stop_signal.
1648
1649 There's a corner case that isn't handled here at present. Only
1650 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1651 stop_signal make sense as a real signal to pass to the inferior.
1652 Some catchpoint related events, like
1653 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1654 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1655 those traps are debug API (ptrace in our case) related and
1656 induced; the inferior wouldn't see them if it wasn't being
1657 traced. Hence, we should never pass them to the inferior, even
1658 when set to pass state. Since this corner case isn't handled by
1659 infrun.c when proceeding with a signal, for consistency, neither
1660 do we handle it here (or elsewhere in the file we check for
1661 signal pass state). Normally SIGTRAP isn't set to pass state, so
1662 this is really a corner case. */
1663
1664 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1665 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1666 else if (lp->status)
1667 signo = target_signal_from_host (WSTOPSIG (lp->status));
1668 else if (non_stop && !is_executing (lp->ptid))
1669 {
1670 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1671
16c381f0 1672 signo = tp->suspend.stop_signal;
ca2163eb
PA
1673 }
1674 else if (!non_stop)
a0ef4274 1675 {
ca2163eb
PA
1676 struct target_waitstatus last;
1677 ptid_t last_ptid;
4c28f408 1678
ca2163eb 1679 get_last_target_status (&last_ptid, &last);
4c28f408 1680
ca2163eb
PA
1681 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1682 {
e09875d4 1683 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1684
16c381f0 1685 signo = tp->suspend.stop_signal;
4c28f408 1686 }
ca2163eb 1687 }
4c28f408 1688
ca2163eb 1689 *status = 0;
4c28f408 1690
ca2163eb
PA
1691 if (signo == TARGET_SIGNAL_0)
1692 {
1693 if (debug_linux_nat)
1694 fprintf_unfiltered (gdb_stdlog,
1695 "GPT: lwp %s has no pending signal\n",
1696 target_pid_to_str (lp->ptid));
1697 }
1698 else if (!signal_pass_state (signo))
1699 {
1700 if (debug_linux_nat)
3e43a32a
MS
1701 fprintf_unfiltered (gdb_stdlog,
1702 "GPT: lwp %s had signal %s, "
1703 "but it is in no pass state\n",
ca2163eb
PA
1704 target_pid_to_str (lp->ptid),
1705 target_signal_to_string (signo));
a0ef4274 1706 }
a0ef4274 1707 else
4c28f408 1708 {
ca2163eb
PA
1709 *status = W_STOPCODE (target_signal_to_host (signo));
1710
1711 if (debug_linux_nat)
1712 fprintf_unfiltered (gdb_stdlog,
1713 "GPT: lwp %s has pending signal %s\n",
1714 target_pid_to_str (lp->ptid),
1715 target_signal_to_string (signo));
4c28f408 1716 }
a0ef4274
DJ
1717
1718 return 0;
1719}
1720
d6b0e80f
AC
1721static int
1722detach_callback (struct lwp_info *lp, void *data)
1723{
1724 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1725
1726 if (debug_linux_nat && lp->status)
1727 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1728 strsignal (WSTOPSIG (lp->status)),
1729 target_pid_to_str (lp->ptid));
1730
a0ef4274
DJ
1731 /* If there is a pending SIGSTOP, get rid of it. */
1732 if (lp->signalled)
d6b0e80f 1733 {
d6b0e80f
AC
1734 if (debug_linux_nat)
1735 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1736 "DC: Sending SIGCONT to %s\n",
1737 target_pid_to_str (lp->ptid));
d6b0e80f 1738
a0ef4274 1739 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1740 lp->signalled = 0;
d6b0e80f
AC
1741 }
1742
1743 /* We don't actually detach from the LWP that has an id equal to the
1744 overall process id just yet. */
1745 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1746 {
a0ef4274
DJ
1747 int status = 0;
1748
1749 /* Pass on any pending signal for this LWP. */
1750 get_pending_status (lp, &status);
1751
d6b0e80f
AC
1752 errno = 0;
1753 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1754 WSTOPSIG (status)) < 0)
8a3fe4f8 1755 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1756 safe_strerror (errno));
1757
1758 if (debug_linux_nat)
1759 fprintf_unfiltered (gdb_stdlog,
1760 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1761 target_pid_to_str (lp->ptid),
7feb7d06 1762 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1763
1764 delete_lwp (lp->ptid);
1765 }
1766
1767 return 0;
1768}
1769
1770static void
136d6dae 1771linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1772{
b84876c2 1773 int pid;
a0ef4274 1774 int status;
d90e17a7
PA
1775 struct lwp_info *main_lwp;
1776
1777 pid = GET_PID (inferior_ptid);
a0ef4274 1778
b84876c2
PA
1779 if (target_can_async_p ())
1780 linux_nat_async (NULL, 0);
1781
4c28f408
PA
1782 /* Stop all threads before detaching. ptrace requires that the
1783 thread is stopped to sucessfully detach. */
d90e17a7 1784 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1785 /* ... and wait until all of them have reported back that
1786 they're no longer running. */
d90e17a7 1787 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1788
d90e17a7 1789 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1790
1791 /* Only the initial process should be left right now. */
d90e17a7
PA
1792 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1793
1794 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1795
a0ef4274
DJ
1796 /* Pass on any pending signal for the last LWP. */
1797 if ((args == NULL || *args == '\0')
d90e17a7 1798 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1799 && WIFSTOPPED (status))
1800 {
1801 /* Put the signal number in ARGS so that inf_ptrace_detach will
1802 pass it along with PTRACE_DETACH. */
1803 args = alloca (8);
1804 sprintf (args, "%d", (int) WSTOPSIG (status));
ddabfc73
TT
1805 if (debug_linux_nat)
1806 fprintf_unfiltered (gdb_stdlog,
1807 "LND: Sending signal %s to %s\n",
1808 args,
1809 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1810 }
1811
d90e17a7 1812 delete_lwp (main_lwp->ptid);
b84876c2 1813
7a7d3353
PA
1814 if (forks_exist_p ())
1815 {
1816 /* Multi-fork case. The current inferior_ptid is being detached
1817 from, but there are other viable forks to debug. Detach from
1818 the current fork, and context-switch to the first
1819 available. */
1820 linux_fork_detach (args, from_tty);
1821
1822 if (non_stop && target_can_async_p ())
1823 target_async (inferior_event_handler, 0);
1824 }
1825 else
1826 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1827}
1828
1829/* Resume LP. */
1830
1831static int
1832resume_callback (struct lwp_info *lp, void *data)
1833{
6c95b8df
PA
1834 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1835
1836 if (lp->stopped && inf->vfork_child != NULL)
1837 {
1838 if (debug_linux_nat)
1839 fprintf_unfiltered (gdb_stdlog,
1840 "RC: Not resuming %s (vfork parent)\n",
1841 target_pid_to_str (lp->ptid));
1842 }
1843 else if (lp->stopped && lp->status == 0)
d6b0e80f 1844 {
d90e17a7
PA
1845 if (debug_linux_nat)
1846 fprintf_unfiltered (gdb_stdlog,
a289b8f6 1847 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
d90e17a7
PA
1848 target_pid_to_str (lp->ptid));
1849
28439f5e
PA
1850 linux_ops->to_resume (linux_ops,
1851 pid_to_ptid (GET_LWP (lp->ptid)),
a289b8f6 1852 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1853 if (debug_linux_nat)
1854 fprintf_unfiltered (gdb_stdlog,
a289b8f6 1855 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
d6b0e80f
AC
1856 target_pid_to_str (lp->ptid));
1857 lp->stopped = 0;
a289b8f6 1858 lp->step = 0;
9f0bdab8 1859 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 1860 lp->stopped_by_watchpoint = 0;
d6b0e80f 1861 }
57380f4e 1862 else if (lp->stopped && debug_linux_nat)
3e43a32a
MS
1863 fprintf_unfiltered (gdb_stdlog,
1864 "RC: Not resuming sibling %s (has pending)\n",
57380f4e
DJ
1865 target_pid_to_str (lp->ptid));
1866 else if (debug_linux_nat)
3e43a32a
MS
1867 fprintf_unfiltered (gdb_stdlog,
1868 "RC: Not resuming sibling %s (not stopped)\n",
57380f4e 1869 target_pid_to_str (lp->ptid));
d6b0e80f
AC
1870
1871 return 0;
1872}
1873
1874static int
1875resume_clear_callback (struct lwp_info *lp, void *data)
1876{
1877 lp->resumed = 0;
1878 return 0;
1879}
1880
1881static int
1882resume_set_callback (struct lwp_info *lp, void *data)
1883{
1884 lp->resumed = 1;
1885 return 0;
1886}
1887
1888static void
28439f5e
PA
1889linux_nat_resume (struct target_ops *ops,
1890 ptid_t ptid, int step, enum target_signal signo)
d6b0e80f 1891{
7feb7d06 1892 sigset_t prev_mask;
d6b0e80f 1893 struct lwp_info *lp;
d90e17a7 1894 int resume_many;
d6b0e80f 1895
76f50ad1
DJ
1896 if (debug_linux_nat)
1897 fprintf_unfiltered (gdb_stdlog,
1898 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1899 step ? "step" : "resume",
1900 target_pid_to_str (ptid),
423ec54c
JK
1901 (signo != TARGET_SIGNAL_0
1902 ? strsignal (target_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1903 target_pid_to_str (inferior_ptid));
1904
7feb7d06 1905 block_child_signals (&prev_mask);
b84876c2 1906
d6b0e80f 1907 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1908 resume_many = (ptid_equal (minus_one_ptid, ptid)
1909 || ptid_is_pid (ptid));
4c28f408 1910
e3e9f5a2
PA
1911 /* Mark the lwps we're resuming as resumed. */
1912 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1913
d90e17a7
PA
1914 /* See if it's the current inferior that should be handled
1915 specially. */
1916 if (resume_many)
1917 lp = find_lwp_pid (inferior_ptid);
1918 else
1919 lp = find_lwp_pid (ptid);
9f0bdab8 1920 gdb_assert (lp != NULL);
d6b0e80f 1921
9f0bdab8
DJ
1922 /* Remember if we're stepping. */
1923 lp->step = step;
d6b0e80f 1924
9f0bdab8
DJ
1925 /* If we have a pending wait status for this thread, there is no
1926 point in resuming the process. But first make sure that
1927 linux_nat_wait won't preemptively handle the event - we
1928 should never take this short-circuit if we are going to
1929 leave LP running, since we have skipped resuming all the
1930 other threads. This bit of code needs to be synchronized
1931 with linux_nat_wait. */
76f50ad1 1932
9f0bdab8
DJ
1933 if (lp->status && WIFSTOPPED (lp->status))
1934 {
2455069d
UW
1935 if (!lp->step
1936 && WSTOPSIG (lp->status)
1937 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1938 {
9f0bdab8
DJ
1939 if (debug_linux_nat)
1940 fprintf_unfiltered (gdb_stdlog,
1941 "LLR: Not short circuiting for ignored "
1942 "status 0x%x\n", lp->status);
1943
d6b0e80f
AC
1944 /* FIXME: What should we do if we are supposed to continue
1945 this thread with a signal? */
1946 gdb_assert (signo == TARGET_SIGNAL_0);
2455069d 1947 signo = target_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1948 lp->status = 0;
1949 }
1950 }
76f50ad1 1951
6c95b8df 1952 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
1953 {
1954 /* FIXME: What should we do if we are supposed to continue
1955 this thread with a signal? */
1956 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1957
9f0bdab8
DJ
1958 if (debug_linux_nat)
1959 fprintf_unfiltered (gdb_stdlog,
1960 "LLR: Short circuiting for status 0x%x\n",
1961 lp->status);
d6b0e80f 1962
7feb7d06
PA
1963 restore_child_signals_mask (&prev_mask);
1964 if (target_can_async_p ())
1965 {
1966 target_async (inferior_event_handler, 0);
1967 /* Tell the event loop we have something to process. */
1968 async_file_mark ();
1969 }
9f0bdab8 1970 return;
d6b0e80f
AC
1971 }
1972
9f0bdab8
DJ
1973 /* Mark LWP as not stopped to prevent it from being continued by
1974 resume_callback. */
1975 lp->stopped = 0;
1976
d90e17a7
PA
1977 if (resume_many)
1978 iterate_over_lwps (ptid, resume_callback, NULL);
1979
1980 /* Convert to something the lower layer understands. */
1981 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1982
28439f5e 1983 linux_ops->to_resume (linux_ops, ptid, step, signo);
9f0bdab8 1984 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 1985 lp->stopped_by_watchpoint = 0;
9f0bdab8 1986
d6b0e80f
AC
1987 if (debug_linux_nat)
1988 fprintf_unfiltered (gdb_stdlog,
1989 "LLR: %s %s, %s (resume event thread)\n",
1990 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1991 target_pid_to_str (ptid),
423ec54c
JK
1992 (signo != TARGET_SIGNAL_0
1993 ? strsignal (target_signal_to_host (signo)) : "0"));
b84876c2 1994
7feb7d06 1995 restore_child_signals_mask (&prev_mask);
b84876c2 1996 if (target_can_async_p ())
8ea051c5 1997 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1998}
1999
c5f62d5f 2000/* Send a signal to an LWP. */
d6b0e80f
AC
2001
2002static int
2003kill_lwp (int lwpid, int signo)
2004{
c5f62d5f
DE
2005 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2006 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
2007
2008#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
2009 {
2010 static int tkill_failed;
2011
2012 if (!tkill_failed)
2013 {
2014 int ret;
2015
2016 errno = 0;
2017 ret = syscall (__NR_tkill, lwpid, signo);
2018 if (errno != ENOSYS)
2019 return ret;
2020 tkill_failed = 1;
2021 }
2022 }
d6b0e80f
AC
2023#endif
2024
2025 return kill (lwpid, signo);
2026}
2027
ca2163eb
PA
2028/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2029 event, check if the core is interested in it: if not, ignore the
2030 event, and keep waiting; otherwise, we need to toggle the LWP's
2031 syscall entry/exit status, since the ptrace event itself doesn't
2032 indicate it, and report the trap to higher layers. */
2033
2034static int
2035linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2036{
2037 struct target_waitstatus *ourstatus = &lp->waitstatus;
2038 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2039 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2040
2041 if (stopping)
2042 {
2043 /* If we're stopping threads, there's a SIGSTOP pending, which
2044 makes it so that the LWP reports an immediate syscall return,
2045 followed by the SIGSTOP. Skip seeing that "return" using
2046 PTRACE_CONT directly, and let stop_wait_callback collect the
2047 SIGSTOP. Later when the thread is resumed, a new syscall
2048 entry event. If we didn't do this (and returned 0), we'd
2049 leave a syscall entry pending, and our caller, by using
2050 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2051 itself. Later, when the user re-resumes this LWP, we'd see
2052 another syscall entry event and we'd mistake it for a return.
2053
2054 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2055 (leaving immediately with LWP->signalled set, without issuing
2056 a PTRACE_CONT), it would still be problematic to leave this
2057 syscall enter pending, as later when the thread is resumed,
2058 it would then see the same syscall exit mentioned above,
2059 followed by the delayed SIGSTOP, while the syscall didn't
2060 actually get to execute. It seems it would be even more
2061 confusing to the user. */
2062
2063 if (debug_linux_nat)
2064 fprintf_unfiltered (gdb_stdlog,
2065 "LHST: ignoring syscall %d "
2066 "for LWP %ld (stopping threads), "
2067 "resuming with PTRACE_CONT for SIGSTOP\n",
2068 syscall_number,
2069 GET_LWP (lp->ptid));
2070
2071 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2072 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2073 return 1;
2074 }
2075
2076 if (catch_syscall_enabled ())
2077 {
2078 /* Always update the entry/return state, even if this particular
2079 syscall isn't interesting to the core now. In async mode,
2080 the user could install a new catchpoint for this syscall
2081 between syscall enter/return, and we'll need to know to
2082 report a syscall return if that happens. */
2083 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2084 ? TARGET_WAITKIND_SYSCALL_RETURN
2085 : TARGET_WAITKIND_SYSCALL_ENTRY);
2086
2087 if (catching_syscall_number (syscall_number))
2088 {
2089 /* Alright, an event to report. */
2090 ourstatus->kind = lp->syscall_state;
2091 ourstatus->value.syscall_number = syscall_number;
2092
2093 if (debug_linux_nat)
2094 fprintf_unfiltered (gdb_stdlog,
2095 "LHST: stopping for %s of syscall %d"
2096 " for LWP %ld\n",
3e43a32a
MS
2097 lp->syscall_state
2098 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
2099 ? "entry" : "return",
2100 syscall_number,
2101 GET_LWP (lp->ptid));
2102 return 0;
2103 }
2104
2105 if (debug_linux_nat)
2106 fprintf_unfiltered (gdb_stdlog,
2107 "LHST: ignoring %s of syscall %d "
2108 "for LWP %ld\n",
2109 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2110 ? "entry" : "return",
2111 syscall_number,
2112 GET_LWP (lp->ptid));
2113 }
2114 else
2115 {
2116 /* If we had been syscall tracing, and hence used PT_SYSCALL
2117 before on this LWP, it could happen that the user removes all
2118 syscall catchpoints before we get to process this event.
2119 There are two noteworthy issues here:
2120
2121 - When stopped at a syscall entry event, resuming with
2122 PT_STEP still resumes executing the syscall and reports a
2123 syscall return.
2124
2125 - Only PT_SYSCALL catches syscall enters. If we last
2126 single-stepped this thread, then this event can't be a
2127 syscall enter. If we last single-stepped this thread, this
2128 has to be a syscall exit.
2129
2130 The points above mean that the next resume, be it PT_STEP or
2131 PT_CONTINUE, can not trigger a syscall trace event. */
2132 if (debug_linux_nat)
2133 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2134 "LHST: caught syscall event "
2135 "with no syscall catchpoints."
ca2163eb
PA
2136 " %d for LWP %ld, ignoring\n",
2137 syscall_number,
2138 GET_LWP (lp->ptid));
2139 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2140 }
2141
2142 /* The core isn't interested in this event. For efficiency, avoid
2143 stopping all threads only to have the core resume them all again.
2144 Since we're not stopping threads, if we're still syscall tracing
2145 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2146 subsequent syscall. Simply resume using the inf-ptrace layer,
2147 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2148
2149 /* Note that gdbarch_get_syscall_number may access registers, hence
2150 fill a regcache. */
2151 registers_changed ();
2152 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2153 lp->step, TARGET_SIGNAL_0);
2154 return 1;
2155}
2156
3d799a95
DJ
2157/* Handle a GNU/Linux extended wait response. If we see a clone
2158 event, we need to add the new LWP to our list (and not report the
2159 trap to higher layers). This function returns non-zero if the
2160 event should be ignored and we should wait again. If STOPPING is
2161 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2162
2163static int
3d799a95
DJ
2164linux_handle_extended_wait (struct lwp_info *lp, int status,
2165 int stopping)
d6b0e80f 2166{
3d799a95
DJ
2167 int pid = GET_LWP (lp->ptid);
2168 struct target_waitstatus *ourstatus = &lp->waitstatus;
3d799a95 2169 int event = status >> 16;
d6b0e80f 2170
3d799a95
DJ
2171 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2172 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2173 {
3d799a95
DJ
2174 unsigned long new_pid;
2175 int ret;
2176
2177 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2178
3d799a95
DJ
2179 /* If we haven't already seen the new PID stop, wait for it now. */
2180 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2181 {
2182 /* The new child has a pending SIGSTOP. We can't affect it until it
2183 hits the SIGSTOP, but we're already attached. */
2184 ret = my_waitpid (new_pid, &status,
2185 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2186 if (ret == -1)
2187 perror_with_name (_("waiting for new child"));
2188 else if (ret != new_pid)
2189 internal_error (__FILE__, __LINE__,
2190 _("wait returned unexpected PID %d"), ret);
2191 else if (!WIFSTOPPED (status))
2192 internal_error (__FILE__, __LINE__,
2193 _("wait returned unexpected status 0x%x"), status);
2194 }
2195
3a3e9ee3 2196 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2197
2277426b
PA
2198 if (event == PTRACE_EVENT_FORK
2199 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2200 {
2277426b
PA
2201 /* Handle checkpointing by linux-fork.c here as a special
2202 case. We don't want the follow-fork-mode or 'catch fork'
2203 to interfere with this. */
2204
2205 /* This won't actually modify the breakpoint list, but will
2206 physically remove the breakpoints from the child. */
2207 detach_breakpoints (new_pid);
2208
2209 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2210 if (!find_fork_pid (new_pid))
2211 add_fork (new_pid);
2277426b
PA
2212
2213 /* Report as spurious, so that infrun doesn't want to follow
2214 this fork. We're actually doing an infcall in
2215 linux-fork.c. */
2216 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2217 linux_enable_event_reporting (pid_to_ptid (new_pid));
2218
2219 /* Report the stop to the core. */
2220 return 0;
2221 }
2222
3d799a95
DJ
2223 if (event == PTRACE_EVENT_FORK)
2224 ourstatus->kind = TARGET_WAITKIND_FORKED;
2225 else if (event == PTRACE_EVENT_VFORK)
2226 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2227 else
3d799a95 2228 {
78768c4a
JK
2229 struct lwp_info *new_lp;
2230
3d799a95 2231 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2232
d90e17a7 2233 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2234 new_lp->cloned = 1;
4c28f408 2235 new_lp->stopped = 1;
d6b0e80f 2236
3d799a95
DJ
2237 if (WSTOPSIG (status) != SIGSTOP)
2238 {
2239 /* This can happen if someone starts sending signals to
2240 the new thread before it gets a chance to run, which
2241 have a lower number than SIGSTOP (e.g. SIGUSR1).
2242 This is an unlikely case, and harder to handle for
2243 fork / vfork than for clone, so we do not try - but
2244 we handle it for clone events here. We'll send
2245 the other signal on to the thread below. */
2246
2247 new_lp->signalled = 1;
2248 }
2249 else
2250 status = 0;
d6b0e80f 2251
4c28f408 2252 if (non_stop)
3d799a95 2253 {
4c28f408
PA
2254 /* Add the new thread to GDB's lists as soon as possible
2255 so that:
2256
2257 1) the frontend doesn't have to wait for a stop to
2258 display them, and,
2259
2260 2) we tag it with the correct running state. */
2261
2262 /* If the thread_db layer is active, let it know about
2263 this new thread, and add it to GDB's list. */
2264 if (!thread_db_attach_lwp (new_lp->ptid))
2265 {
2266 /* We're not using thread_db. Add it to GDB's
2267 list. */
2268 target_post_attach (GET_LWP (new_lp->ptid));
2269 add_thread (new_lp->ptid);
2270 }
2271
2272 if (!stopping)
2273 {
2274 set_running (new_lp->ptid, 1);
2275 set_executing (new_lp->ptid, 1);
2276 }
2277 }
2278
ca2163eb
PA
2279 /* Note the need to use the low target ops to resume, to
2280 handle resuming with PT_SYSCALL if we have syscall
2281 catchpoints. */
4c28f408
PA
2282 if (!stopping)
2283 {
423ec54c 2284 enum target_signal signo;
ca2163eb 2285
4c28f408 2286 new_lp->stopped = 0;
3d799a95 2287 new_lp->resumed = 1;
ca2163eb
PA
2288
2289 signo = (status
2290 ? target_signal_from_host (WSTOPSIG (status))
2291 : TARGET_SIGNAL_0);
2292
2293 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2294 0, signo);
3d799a95 2295 }
ad34eb2f
JK
2296 else
2297 {
2298 if (status != 0)
2299 {
2300 /* We created NEW_LP so it cannot yet contain STATUS. */
2301 gdb_assert (new_lp->status == 0);
2302
2303 /* Save the wait status to report later. */
2304 if (debug_linux_nat)
2305 fprintf_unfiltered (gdb_stdlog,
2306 "LHEW: waitpid of new LWP %ld, "
2307 "saving status %s\n",
2308 (long) GET_LWP (new_lp->ptid),
2309 status_to_str (status));
2310 new_lp->status = status;
2311 }
2312 }
d6b0e80f 2313
3d799a95
DJ
2314 if (debug_linux_nat)
2315 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2316 "LHEW: Got clone event "
2317 "from LWP %ld, resuming\n",
3d799a95 2318 GET_LWP (lp->ptid));
ca2163eb
PA
2319 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2320 0, TARGET_SIGNAL_0);
3d799a95
DJ
2321
2322 return 1;
2323 }
2324
2325 return 0;
d6b0e80f
AC
2326 }
2327
3d799a95
DJ
2328 if (event == PTRACE_EVENT_EXEC)
2329 {
a75724bc
PA
2330 if (debug_linux_nat)
2331 fprintf_unfiltered (gdb_stdlog,
2332 "LHEW: Got exec event from LWP %ld\n",
2333 GET_LWP (lp->ptid));
2334
3d799a95
DJ
2335 ourstatus->kind = TARGET_WAITKIND_EXECD;
2336 ourstatus->value.execd_pathname
6d8fd2b7 2337 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2338
6c95b8df
PA
2339 return 0;
2340 }
2341
2342 if (event == PTRACE_EVENT_VFORK_DONE)
2343 {
2344 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2345 {
6c95b8df 2346 if (debug_linux_nat)
3e43a32a
MS
2347 fprintf_unfiltered (gdb_stdlog,
2348 "LHEW: Got expected PTRACE_EVENT_"
2349 "VFORK_DONE from LWP %ld: stopping\n",
6c95b8df 2350 GET_LWP (lp->ptid));
3d799a95 2351
6c95b8df
PA
2352 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2353 return 0;
3d799a95
DJ
2354 }
2355
6c95b8df 2356 if (debug_linux_nat)
3e43a32a
MS
2357 fprintf_unfiltered (gdb_stdlog,
2358 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2359 "from LWP %ld: resuming\n",
6c95b8df
PA
2360 GET_LWP (lp->ptid));
2361 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2362 return 1;
3d799a95
DJ
2363 }
2364
2365 internal_error (__FILE__, __LINE__,
2366 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2367}
2368
2369/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2370 exited. */
2371
2372static int
2373wait_lwp (struct lwp_info *lp)
2374{
2375 pid_t pid;
2376 int status;
2377 int thread_dead = 0;
2378
2379 gdb_assert (!lp->stopped);
2380 gdb_assert (lp->status == 0);
2381
58aecb61 2382 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
2383 if (pid == -1 && errno == ECHILD)
2384 {
58aecb61 2385 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
2386 if (pid == -1 && errno == ECHILD)
2387 {
2388 /* The thread has previously exited. We need to delete it
2389 now because, for some vendor 2.4 kernels with NPTL
2390 support backported, there won't be an exit event unless
2391 it is the main thread. 2.6 kernels will report an exit
2392 event for each thread that exits, as expected. */
2393 thread_dead = 1;
2394 if (debug_linux_nat)
2395 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2396 target_pid_to_str (lp->ptid));
2397 }
2398 }
2399
2400 if (!thread_dead)
2401 {
2402 gdb_assert (pid == GET_LWP (lp->ptid));
2403
2404 if (debug_linux_nat)
2405 {
2406 fprintf_unfiltered (gdb_stdlog,
2407 "WL: waitpid %s received %s\n",
2408 target_pid_to_str (lp->ptid),
2409 status_to_str (status));
2410 }
2411 }
2412
2413 /* Check if the thread has exited. */
2414 if (WIFEXITED (status) || WIFSIGNALED (status))
2415 {
2416 thread_dead = 1;
2417 if (debug_linux_nat)
2418 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2419 target_pid_to_str (lp->ptid));
2420 }
2421
2422 if (thread_dead)
2423 {
e26af52f 2424 exit_lwp (lp);
d6b0e80f
AC
2425 return 0;
2426 }
2427
2428 gdb_assert (WIFSTOPPED (status));
2429
ca2163eb
PA
2430 /* Handle GNU/Linux's syscall SIGTRAPs. */
2431 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2432 {
2433 /* No longer need the sysgood bit. The ptrace event ends up
2434 recorded in lp->waitstatus if we care for it. We can carry
2435 on handling the event like a regular SIGTRAP from here
2436 on. */
2437 status = W_STOPCODE (SIGTRAP);
2438 if (linux_handle_syscall_trap (lp, 1))
2439 return wait_lwp (lp);
2440 }
2441
d6b0e80f
AC
2442 /* Handle GNU/Linux's extended waitstatus for trace events. */
2443 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2444 {
2445 if (debug_linux_nat)
2446 fprintf_unfiltered (gdb_stdlog,
2447 "WL: Handling extended status 0x%06x\n",
2448 status);
3d799a95 2449 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2450 return wait_lwp (lp);
2451 }
2452
2453 return status;
2454}
2455
9f0bdab8
DJ
2456/* Save the most recent siginfo for LP. This is currently only called
2457 for SIGTRAP; some ports use the si_addr field for
2458 target_stopped_data_address. In the future, it may also be used to
2459 restore the siginfo of requeued signals. */
2460
2461static void
2462save_siginfo (struct lwp_info *lp)
2463{
2464 errno = 0;
2465 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2466 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2467
2468 if (errno != 0)
2469 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2470}
2471
d6b0e80f
AC
2472/* Send a SIGSTOP to LP. */
2473
2474static int
2475stop_callback (struct lwp_info *lp, void *data)
2476{
2477 if (!lp->stopped && !lp->signalled)
2478 {
2479 int ret;
2480
2481 if (debug_linux_nat)
2482 {
2483 fprintf_unfiltered (gdb_stdlog,
2484 "SC: kill %s **<SIGSTOP>**\n",
2485 target_pid_to_str (lp->ptid));
2486 }
2487 errno = 0;
2488 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2489 if (debug_linux_nat)
2490 {
2491 fprintf_unfiltered (gdb_stdlog,
2492 "SC: lwp kill %d %s\n",
2493 ret,
2494 errno ? safe_strerror (errno) : "ERRNO-OK");
2495 }
2496
2497 lp->signalled = 1;
2498 gdb_assert (lp->status == 0);
2499 }
2500
2501 return 0;
2502}
2503
57380f4e 2504/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2505
2506static int
57380f4e
DJ
2507linux_nat_has_pending_sigint (int pid)
2508{
2509 sigset_t pending, blocked, ignored;
57380f4e
DJ
2510
2511 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2512
2513 if (sigismember (&pending, SIGINT)
2514 && !sigismember (&ignored, SIGINT))
2515 return 1;
2516
2517 return 0;
2518}
2519
2520/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2521
2522static int
2523set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2524{
57380f4e
DJ
2525 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2526 flag to consume the next one. */
2527 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2528 && WSTOPSIG (lp->status) == SIGINT)
2529 lp->status = 0;
2530 else
2531 lp->ignore_sigint = 1;
2532
2533 return 0;
2534}
2535
2536/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2537 This function is called after we know the LWP has stopped; if the LWP
2538 stopped before the expected SIGINT was delivered, then it will never have
2539 arrived. Also, if the signal was delivered to a shared queue and consumed
2540 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2541
57380f4e
DJ
2542static void
2543maybe_clear_ignore_sigint (struct lwp_info *lp)
2544{
2545 if (!lp->ignore_sigint)
2546 return;
2547
2548 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2549 {
2550 if (debug_linux_nat)
2551 fprintf_unfiltered (gdb_stdlog,
2552 "MCIS: Clearing bogus flag for %s\n",
2553 target_pid_to_str (lp->ptid));
2554 lp->ignore_sigint = 0;
2555 }
2556}
2557
ebec9a0f
PA
2558/* Fetch the possible triggered data watchpoint info and store it in
2559 LP.
2560
2561 On some archs, like x86, that use debug registers to set
2562 watchpoints, it's possible that the way to know which watched
2563 address trapped, is to check the register that is used to select
2564 which address to watch. Problem is, between setting the watchpoint
2565 and reading back which data address trapped, the user may change
2566 the set of watchpoints, and, as a consequence, GDB changes the
2567 debug registers in the inferior. To avoid reading back a stale
2568 stopped-data-address when that happens, we cache in LP the fact
2569 that a watchpoint trapped, and the corresponding data address, as
2570 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2571 registers meanwhile, we have the cached data we can rely on. */
2572
2573static void
2574save_sigtrap (struct lwp_info *lp)
2575{
2576 struct cleanup *old_chain;
2577
2578 if (linux_ops->to_stopped_by_watchpoint == NULL)
2579 {
2580 lp->stopped_by_watchpoint = 0;
2581 return;
2582 }
2583
2584 old_chain = save_inferior_ptid ();
2585 inferior_ptid = lp->ptid;
2586
2587 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2588
2589 if (lp->stopped_by_watchpoint)
2590 {
2591 if (linux_ops->to_stopped_data_address != NULL)
2592 lp->stopped_data_address_p =
2593 linux_ops->to_stopped_data_address (&current_target,
2594 &lp->stopped_data_address);
2595 else
2596 lp->stopped_data_address_p = 0;
2597 }
2598
2599 do_cleanups (old_chain);
2600}
2601
2602/* See save_sigtrap. */
2603
2604static int
2605linux_nat_stopped_by_watchpoint (void)
2606{
2607 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2608
2609 gdb_assert (lp != NULL);
2610
2611 return lp->stopped_by_watchpoint;
2612}
2613
2614static int
2615linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2616{
2617 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2618
2619 gdb_assert (lp != NULL);
2620
2621 *addr_p = lp->stopped_data_address;
2622
2623 return lp->stopped_data_address_p;
2624}
2625
26ab7092
JK
2626/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2627
2628static int
2629sigtrap_is_event (int status)
2630{
2631 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2632}
2633
2634/* SIGTRAP-like events recognizer. */
2635
2636static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2637
00390b84
JK
2638/* Check for SIGTRAP-like events in LP. */
2639
2640static int
2641linux_nat_lp_status_is_event (struct lwp_info *lp)
2642{
2643 /* We check for lp->waitstatus in addition to lp->status, because we can
2644 have pending process exits recorded in lp->status
2645 and W_EXITCODE(0,0) == 0. We should probably have an additional
2646 lp->status_p flag. */
2647
2648 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2649 && linux_nat_status_is_event (lp->status));
2650}
2651
26ab7092
JK
2652/* Set alternative SIGTRAP-like events recognizer. If
2653 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2654 applied. */
2655
2656void
2657linux_nat_set_status_is_event (struct target_ops *t,
2658 int (*status_is_event) (int status))
2659{
2660 linux_nat_status_is_event = status_is_event;
2661}
2662
57380f4e
DJ
2663/* Wait until LP is stopped. */
2664
2665static int
2666stop_wait_callback (struct lwp_info *lp, void *data)
2667{
6c95b8df
PA
2668 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2669
2670 /* If this is a vfork parent, bail out, it is not going to report
2671 any SIGSTOP until the vfork is done with. */
2672 if (inf->vfork_child != NULL)
2673 return 0;
2674
d6b0e80f
AC
2675 if (!lp->stopped)
2676 {
2677 int status;
2678
2679 status = wait_lwp (lp);
2680 if (status == 0)
2681 return 0;
2682
57380f4e
DJ
2683 if (lp->ignore_sigint && WIFSTOPPED (status)
2684 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2685 {
57380f4e 2686 lp->ignore_sigint = 0;
d6b0e80f
AC
2687
2688 errno = 0;
2689 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2690 if (debug_linux_nat)
2691 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2692 "PTRACE_CONT %s, 0, 0 (%s) "
2693 "(discarding SIGINT)\n",
d6b0e80f
AC
2694 target_pid_to_str (lp->ptid),
2695 errno ? safe_strerror (errno) : "OK");
2696
57380f4e 2697 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2698 }
2699
57380f4e
DJ
2700 maybe_clear_ignore_sigint (lp);
2701
d6b0e80f
AC
2702 if (WSTOPSIG (status) != SIGSTOP)
2703 {
26ab7092 2704 if (linux_nat_status_is_event (status))
d6b0e80f
AC
2705 {
2706 /* If a LWP other than the LWP that we're reporting an
2707 event for has hit a GDB breakpoint (as opposed to
2708 some random trap signal), then just arrange for it to
2709 hit it again later. We don't keep the SIGTRAP status
2710 and don't forward the SIGTRAP signal to the LWP. We
2711 will handle the current event, eventually we will
2712 resume all LWPs, and this one will get its breakpoint
2713 trap again.
2714
2715 If we do not do this, then we run the risk that the
2716 user will delete or disable the breakpoint, but the
2717 thread will have already tripped on it. */
2718
9f0bdab8
DJ
2719 /* Save the trap's siginfo in case we need it later. */
2720 save_siginfo (lp);
2721
ebec9a0f
PA
2722 save_sigtrap (lp);
2723
1777feb0 2724 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2725 errno = 0;
2726 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2727 if (debug_linux_nat)
2728 {
2729 fprintf_unfiltered (gdb_stdlog,
2730 "PTRACE_CONT %s, 0, 0 (%s)\n",
2731 target_pid_to_str (lp->ptid),
2732 errno ? safe_strerror (errno) : "OK");
2733
2734 fprintf_unfiltered (gdb_stdlog,
2735 "SWC: Candidate SIGTRAP event in %s\n",
2736 target_pid_to_str (lp->ptid));
2737 }
710151dd 2738 /* Hold this event/waitstatus while we check to see if
1777feb0 2739 there are any more (we still want to get that SIGSTOP). */
57380f4e 2740 stop_wait_callback (lp, NULL);
710151dd 2741
7feb7d06
PA
2742 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2743 there's another event, throw it back into the
1777feb0 2744 queue. */
7feb7d06 2745 if (lp->status)
710151dd 2746 {
7feb7d06
PA
2747 if (debug_linux_nat)
2748 fprintf_unfiltered (gdb_stdlog,
2749 "SWC: kill %s, %s\n",
2750 target_pid_to_str (lp->ptid),
2751 status_to_str ((int) status));
2752 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2753 }
7feb7d06 2754
1777feb0 2755 /* Save the sigtrap event. */
7feb7d06 2756 lp->status = status;
d6b0e80f
AC
2757 return 0;
2758 }
2759 else
2760 {
2761 /* The thread was stopped with a signal other than
1777feb0 2762 SIGSTOP, and didn't accidentally trip a breakpoint. */
d6b0e80f
AC
2763
2764 if (debug_linux_nat)
2765 {
2766 fprintf_unfiltered (gdb_stdlog,
2767 "SWC: Pending event %s in %s\n",
2768 status_to_str ((int) status),
2769 target_pid_to_str (lp->ptid));
2770 }
1777feb0 2771 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2772 errno = 0;
2773 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2774 if (debug_linux_nat)
2775 fprintf_unfiltered (gdb_stdlog,
2776 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2777 target_pid_to_str (lp->ptid),
2778 errno ? safe_strerror (errno) : "OK");
2779
2780 /* Hold this event/waitstatus while we check to see if
1777feb0 2781 there are any more (we still want to get that SIGSTOP). */
57380f4e 2782 stop_wait_callback (lp, NULL);
710151dd
PA
2783
2784 /* If the lp->status field is still empty, use it to
2785 hold this event. If not, then this event must be
2786 returned to the event queue of the LWP. */
7feb7d06 2787 if (lp->status)
d6b0e80f
AC
2788 {
2789 if (debug_linux_nat)
2790 {
2791 fprintf_unfiltered (gdb_stdlog,
2792 "SWC: kill %s, %s\n",
2793 target_pid_to_str (lp->ptid),
2794 status_to_str ((int) status));
2795 }
2796 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2797 }
710151dd
PA
2798 else
2799 lp->status = status;
d6b0e80f
AC
2800 return 0;
2801 }
2802 }
2803 else
2804 {
2805 /* We caught the SIGSTOP that we intended to catch, so
2806 there's no SIGSTOP pending. */
2807 lp->stopped = 1;
2808 lp->signalled = 0;
2809 }
2810 }
2811
2812 return 0;
2813}
2814
d6b0e80f
AC
2815/* Return non-zero if LP has a wait status pending. */
2816
2817static int
2818status_callback (struct lwp_info *lp, void *data)
2819{
2820 /* Only report a pending wait status if we pretend that this has
2821 indeed been resumed. */
ca2163eb
PA
2822 if (!lp->resumed)
2823 return 0;
2824
2825 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2826 {
2827 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
766062f6 2828 or a pending process exit. Note that `W_EXITCODE(0,0) ==
ca2163eb
PA
2829 0', so a clean process exit can not be stored pending in
2830 lp->status, it is indistinguishable from
2831 no-pending-status. */
2832 return 1;
2833 }
2834
2835 if (lp->status != 0)
2836 return 1;
2837
2838 return 0;
d6b0e80f
AC
2839}
2840
2841/* Return non-zero if LP isn't stopped. */
2842
2843static int
2844running_callback (struct lwp_info *lp, void *data)
2845{
2846 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2847}
2848
2849/* Count the LWP's that have had events. */
2850
2851static int
2852count_events_callback (struct lwp_info *lp, void *data)
2853{
2854 int *count = data;
2855
2856 gdb_assert (count != NULL);
2857
e09490f1 2858 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2859 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2860 (*count)++;
2861
2862 return 0;
2863}
2864
2865/* Select the LWP (if any) that is currently being single-stepped. */
2866
2867static int
2868select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2869{
2870 if (lp->step && lp->status != 0)
2871 return 1;
2872 else
2873 return 0;
2874}
2875
2876/* Select the Nth LWP that has had a SIGTRAP event. */
2877
2878static int
2879select_event_lwp_callback (struct lwp_info *lp, void *data)
2880{
2881 int *selector = data;
2882
2883 gdb_assert (selector != NULL);
2884
1777feb0 2885 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2886 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2887 if ((*selector)-- == 0)
2888 return 1;
2889
2890 return 0;
2891}
2892
710151dd
PA
2893static int
2894cancel_breakpoint (struct lwp_info *lp)
2895{
2896 /* Arrange for a breakpoint to be hit again later. We don't keep
2897 the SIGTRAP status and don't forward the SIGTRAP signal to the
2898 LWP. We will handle the current event, eventually we will resume
2899 this LWP, and this breakpoint will trap again.
2900
2901 If we do not do this, then we run the risk that the user will
2902 delete or disable the breakpoint, but the LWP will have already
2903 tripped on it. */
2904
515630c5
UW
2905 struct regcache *regcache = get_thread_regcache (lp->ptid);
2906 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2907 CORE_ADDR pc;
2908
2909 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 2910 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
2911 {
2912 if (debug_linux_nat)
2913 fprintf_unfiltered (gdb_stdlog,
2914 "CB: Push back breakpoint for %s\n",
2915 target_pid_to_str (lp->ptid));
2916
2917 /* Back up the PC if necessary. */
515630c5
UW
2918 if (gdbarch_decr_pc_after_break (gdbarch))
2919 regcache_write_pc (regcache, pc);
2920
710151dd
PA
2921 return 1;
2922 }
2923 return 0;
2924}
2925
d6b0e80f
AC
2926static int
2927cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2928{
2929 struct lwp_info *event_lp = data;
2930
2931 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2932 if (lp == event_lp)
2933 return 0;
2934
2935 /* If a LWP other than the LWP that we're reporting an event for has
2936 hit a GDB breakpoint (as opposed to some random trap signal),
2937 then just arrange for it to hit it again later. We don't keep
2938 the SIGTRAP status and don't forward the SIGTRAP signal to the
2939 LWP. We will handle the current event, eventually we will resume
2940 all LWPs, and this one will get its breakpoint trap again.
2941
2942 If we do not do this, then we run the risk that the user will
2943 delete or disable the breakpoint, but the LWP will have already
2944 tripped on it. */
2945
00390b84 2946 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
2947 && cancel_breakpoint (lp))
2948 /* Throw away the SIGTRAP. */
2949 lp->status = 0;
d6b0e80f
AC
2950
2951 return 0;
2952}
2953
2954/* Select one LWP out of those that have events pending. */
2955
2956static void
d90e17a7 2957select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2958{
2959 int num_events = 0;
2960 int random_selector;
2961 struct lwp_info *event_lp;
2962
ac264b3b 2963 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2964 (*orig_lp)->status = *status;
2965
2966 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
2967 event_lp = iterate_over_lwps (filter,
2968 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
2969 if (event_lp != NULL)
2970 {
2971 if (debug_linux_nat)
2972 fprintf_unfiltered (gdb_stdlog,
2973 "SEL: Select single-step %s\n",
2974 target_pid_to_str (event_lp->ptid));
2975 }
2976 else
2977 {
2978 /* No single-stepping LWP. Select one at random, out of those
2979 which have had SIGTRAP events. */
2980
2981 /* First see how many SIGTRAP events we have. */
d90e17a7 2982 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
2983
2984 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2985 random_selector = (int)
2986 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2987
2988 if (debug_linux_nat && num_events > 1)
2989 fprintf_unfiltered (gdb_stdlog,
2990 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2991 num_events, random_selector);
2992
d90e17a7
PA
2993 event_lp = iterate_over_lwps (filter,
2994 select_event_lwp_callback,
d6b0e80f
AC
2995 &random_selector);
2996 }
2997
2998 if (event_lp != NULL)
2999 {
3000 /* Switch the event LWP. */
3001 *orig_lp = event_lp;
3002 *status = event_lp->status;
3003 }
3004
3005 /* Flush the wait status for the event LWP. */
3006 (*orig_lp)->status = 0;
3007}
3008
3009/* Return non-zero if LP has been resumed. */
3010
3011static int
3012resumed_callback (struct lwp_info *lp, void *data)
3013{
3014 return lp->resumed;
3015}
3016
d6b0e80f
AC
3017/* Stop an active thread, verify it still exists, then resume it. */
3018
3019static int
3020stop_and_resume_callback (struct lwp_info *lp, void *data)
3021{
3022 struct lwp_info *ptr;
3023
3024 if (!lp->stopped && !lp->signalled)
3025 {
3026 stop_callback (lp, NULL);
3027 stop_wait_callback (lp, NULL);
3028 /* Resume if the lwp still exists. */
3029 for (ptr = lwp_list; ptr; ptr = ptr->next)
3030 if (lp == ptr)
3031 {
3032 resume_callback (lp, NULL);
3033 resume_set_callback (lp, NULL);
3034 }
3035 }
3036 return 0;
3037}
3038
02f3fc28 3039/* Check if we should go on and pass this event to common code.
fa2c6a57 3040 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
3041static struct lwp_info *
3042linux_nat_filter_event (int lwpid, int status, int options)
3043{
3044 struct lwp_info *lp;
3045
3046 lp = find_lwp_pid (pid_to_ptid (lwpid));
3047
3048 /* Check for stop events reported by a process we didn't already
3049 know about - anything not already in our LWP list.
3050
3051 If we're expecting to receive stopped processes after
3052 fork, vfork, and clone events, then we'll just add the
3053 new one to our list and go back to waiting for the event
3054 to be reported - the stopped process might be returned
3055 from waitpid before or after the event is. */
3056 if (WIFSTOPPED (status) && !lp)
3057 {
3058 linux_record_stopped_pid (lwpid, status);
3059 return NULL;
3060 }
3061
3062 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3063 our list, i.e. not part of the current process. This can happen
fd62cb89 3064 if we detach from a program we originally forked and then it
02f3fc28
PA
3065 exits. */
3066 if (!WIFSTOPPED (status) && !lp)
3067 return NULL;
3068
3069 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
3070 CLONE_PTRACE processes which do not use the thread library -
3071 otherwise we wouldn't find the new LWP this way. That doesn't
3072 currently work, and the following code is currently unreachable
3073 due to the two blocks above. If it's fixed some day, this code
3074 should be broken out into a function so that we can also pick up
3075 LWPs from the new interface. */
3076 if (!lp)
3077 {
3078 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
3079 if (options & __WCLONE)
3080 lp->cloned = 1;
3081
3082 gdb_assert (WIFSTOPPED (status)
3083 && WSTOPSIG (status) == SIGSTOP);
3084 lp->signalled = 1;
3085
3086 if (!in_thread_list (inferior_ptid))
3087 {
3088 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
3089 GET_PID (inferior_ptid));
3090 add_thread (inferior_ptid);
3091 }
3092
3093 add_thread (lp->ptid);
3094 }
3095
ca2163eb
PA
3096 /* Handle GNU/Linux's syscall SIGTRAPs. */
3097 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3098 {
3099 /* No longer need the sysgood bit. The ptrace event ends up
3100 recorded in lp->waitstatus if we care for it. We can carry
3101 on handling the event like a regular SIGTRAP from here
3102 on. */
3103 status = W_STOPCODE (SIGTRAP);
3104 if (linux_handle_syscall_trap (lp, 0))
3105 return NULL;
3106 }
02f3fc28 3107
ca2163eb
PA
3108 /* Handle GNU/Linux's extended waitstatus for trace events. */
3109 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3110 {
3111 if (debug_linux_nat)
3112 fprintf_unfiltered (gdb_stdlog,
3113 "LLW: Handling extended status 0x%06x\n",
3114 status);
3115 if (linux_handle_extended_wait (lp, status, 0))
3116 return NULL;
3117 }
3118
26ab7092 3119 if (linux_nat_status_is_event (status))
ebec9a0f
PA
3120 {
3121 /* Save the trap's siginfo in case we need it later. */
3122 save_siginfo (lp);
3123
3124 save_sigtrap (lp);
3125 }
ca2163eb 3126
02f3fc28 3127 /* Check if the thread has exited. */
d90e17a7
PA
3128 if ((WIFEXITED (status) || WIFSIGNALED (status))
3129 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3130 {
9db03742
JB
3131 /* If this is the main thread, we must stop all threads and verify
3132 if they are still alive. This is because in the nptl thread model
3133 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3134 other than the main thread. We only get the main thread exit
3135 signal once all child threads have already exited. If we
3136 stop all the threads and use the stop_wait_callback to check
3137 if they have exited we can determine whether this signal
3138 should be ignored or whether it means the end of the debugged
3139 application, regardless of which threading model is being
5d3b6af6 3140 used. */
02f3fc28
PA
3141 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3142 {
3143 lp->stopped = 1;
d90e17a7
PA
3144 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3145 stop_and_resume_callback, NULL);
02f3fc28
PA
3146 }
3147
3148 if (debug_linux_nat)
3149 fprintf_unfiltered (gdb_stdlog,
3150 "LLW: %s exited.\n",
3151 target_pid_to_str (lp->ptid));
3152
d90e17a7 3153 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3154 {
3155 /* If there is at least one more LWP, then the exit signal
3156 was not the end of the debugged application and should be
3157 ignored. */
3158 exit_lwp (lp);
3159 return NULL;
3160 }
02f3fc28
PA
3161 }
3162
3163 /* Check if the current LWP has previously exited. In the nptl
3164 thread model, LWPs other than the main thread do not issue
3165 signals when they exit so we must check whenever the thread has
3166 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3167 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3168 {
d90e17a7
PA
3169 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3170
02f3fc28
PA
3171 if (debug_linux_nat)
3172 fprintf_unfiltered (gdb_stdlog,
3173 "LLW: %s exited.\n",
3174 target_pid_to_str (lp->ptid));
3175
3176 exit_lwp (lp);
3177
3178 /* Make sure there is at least one thread running. */
d90e17a7 3179 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3180
3181 /* Discard the event. */
3182 return NULL;
3183 }
3184
3185 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3186 an attempt to stop an LWP. */
3187 if (lp->signalled
3188 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3189 {
3190 if (debug_linux_nat)
3191 fprintf_unfiltered (gdb_stdlog,
3192 "LLW: Delayed SIGSTOP caught for %s.\n",
3193 target_pid_to_str (lp->ptid));
3194
3195 /* This is a delayed SIGSTOP. */
3196 lp->signalled = 0;
3197
3198 registers_changed ();
3199
28439f5e 3200 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
02f3fc28
PA
3201 lp->step, TARGET_SIGNAL_0);
3202 if (debug_linux_nat)
3203 fprintf_unfiltered (gdb_stdlog,
3204 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3205 lp->step ?
3206 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3207 target_pid_to_str (lp->ptid));
3208
3209 lp->stopped = 0;
3210 gdb_assert (lp->resumed);
3211
3212 /* Discard the event. */
3213 return NULL;
3214 }
3215
57380f4e
DJ
3216 /* Make sure we don't report a SIGINT that we have already displayed
3217 for another thread. */
3218 if (lp->ignore_sigint
3219 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3220 {
3221 if (debug_linux_nat)
3222 fprintf_unfiltered (gdb_stdlog,
3223 "LLW: Delayed SIGINT caught for %s.\n",
3224 target_pid_to_str (lp->ptid));
3225
3226 /* This is a delayed SIGINT. */
3227 lp->ignore_sigint = 0;
3228
3229 registers_changed ();
28439f5e 3230 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
57380f4e
DJ
3231 lp->step, TARGET_SIGNAL_0);
3232 if (debug_linux_nat)
3233 fprintf_unfiltered (gdb_stdlog,
3234 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3235 lp->step ?
3236 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3237 target_pid_to_str (lp->ptid));
3238
3239 lp->stopped = 0;
3240 gdb_assert (lp->resumed);
3241
3242 /* Discard the event. */
3243 return NULL;
3244 }
3245
02f3fc28
PA
3246 /* An interesting event. */
3247 gdb_assert (lp);
ca2163eb 3248 lp->status = status;
02f3fc28
PA
3249 return lp;
3250}
3251
d6b0e80f 3252static ptid_t
7feb7d06 3253linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3254 ptid_t ptid, struct target_waitstatus *ourstatus,
3255 int target_options)
d6b0e80f 3256{
7feb7d06 3257 static sigset_t prev_mask;
d6b0e80f
AC
3258 struct lwp_info *lp = NULL;
3259 int options = 0;
3260 int status = 0;
d90e17a7 3261 pid_t pid;
d6b0e80f 3262
b84876c2
PA
3263 if (debug_linux_nat_async)
3264 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3265
f973ed9c
DJ
3266 /* The first time we get here after starting a new inferior, we may
3267 not have added it to the LWP list yet - this is the earliest
3268 moment at which we know its PID. */
d90e17a7 3269 if (ptid_is_pid (inferior_ptid))
f973ed9c 3270 {
27c9d204
PA
3271 /* Upgrade the main thread's ptid. */
3272 thread_change_ptid (inferior_ptid,
3273 BUILD_LWP (GET_PID (inferior_ptid),
3274 GET_PID (inferior_ptid)));
3275
f973ed9c
DJ
3276 lp = add_lwp (inferior_ptid);
3277 lp->resumed = 1;
3278 }
3279
7feb7d06
PA
3280 /* Make sure SIGCHLD is blocked. */
3281 block_child_signals (&prev_mask);
d6b0e80f 3282
d90e17a7
PA
3283 if (ptid_equal (ptid, minus_one_ptid))
3284 pid = -1;
3285 else if (ptid_is_pid (ptid))
3286 /* A request to wait for a specific tgid. This is not possible
3287 with waitpid, so instead, we wait for any child, and leave
3288 children we're not interested in right now with a pending
3289 status to report later. */
3290 pid = -1;
3291 else
3292 pid = GET_LWP (ptid);
3293
d6b0e80f 3294retry:
d90e17a7
PA
3295 lp = NULL;
3296 status = 0;
d6b0e80f 3297
e3e9f5a2
PA
3298 /* Make sure that of those LWPs we want to get an event from, there
3299 is at least one LWP that has been resumed. If there's none, just
3300 bail out. The core may just be flushing asynchronously all
3301 events. */
3302 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3303 {
3304 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3305
3306 if (debug_linux_nat_async)
3307 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3308
3309 restore_child_signals_mask (&prev_mask);
3310 return minus_one_ptid;
3311 }
d6b0e80f
AC
3312
3313 /* First check if there is a LWP with a wait status pending. */
3314 if (pid == -1)
3315 {
3316 /* Any LWP that's been resumed will do. */
d90e17a7 3317 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3318 if (lp)
3319 {
ca2163eb 3320 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3321 fprintf_unfiltered (gdb_stdlog,
3322 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3323 status_to_str (lp->status),
d6b0e80f
AC
3324 target_pid_to_str (lp->ptid));
3325 }
3326
b84876c2 3327 /* But if we don't find one, we'll have to wait, and check both
7feb7d06
PA
3328 cloned and uncloned processes. We start with the cloned
3329 processes. */
d6b0e80f
AC
3330 options = __WCLONE | WNOHANG;
3331 }
3332 else if (is_lwp (ptid))
3333 {
3334 if (debug_linux_nat)
3335 fprintf_unfiltered (gdb_stdlog,
3336 "LLW: Waiting for specific LWP %s.\n",
3337 target_pid_to_str (ptid));
3338
3339 /* We have a specific LWP to check. */
3340 lp = find_lwp_pid (ptid);
3341 gdb_assert (lp);
d6b0e80f 3342
ca2163eb 3343 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3344 fprintf_unfiltered (gdb_stdlog,
3345 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3346 status_to_str (lp->status),
d6b0e80f
AC
3347 target_pid_to_str (lp->ptid));
3348
3349 /* If we have to wait, take into account whether PID is a cloned
3350 process or not. And we have to convert it to something that
3351 the layer beneath us can understand. */
3352 options = lp->cloned ? __WCLONE : 0;
3353 pid = GET_LWP (ptid);
d90e17a7
PA
3354
3355 /* We check for lp->waitstatus in addition to lp->status,
3356 because we can have pending process exits recorded in
3357 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3358 an additional lp->status_p flag. */
ca2163eb 3359 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3360 lp = NULL;
d6b0e80f
AC
3361 }
3362
d90e17a7 3363 if (lp && lp->signalled)
d6b0e80f
AC
3364 {
3365 /* A pending SIGSTOP may interfere with the normal stream of
3366 events. In a typical case where interference is a problem,
3367 we have a SIGSTOP signal pending for LWP A while
3368 single-stepping it, encounter an event in LWP B, and take the
3369 pending SIGSTOP while trying to stop LWP A. After processing
3370 the event in LWP B, LWP A is continued, and we'll never see
3371 the SIGTRAP associated with the last time we were
3372 single-stepping LWP A. */
3373
3374 /* Resume the thread. It should halt immediately returning the
3375 pending SIGSTOP. */
3376 registers_changed ();
28439f5e 3377 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3378 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
3379 if (debug_linux_nat)
3380 fprintf_unfiltered (gdb_stdlog,
3381 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3382 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3383 target_pid_to_str (lp->ptid));
3384 lp->stopped = 0;
3385 gdb_assert (lp->resumed);
3386
ca2163eb
PA
3387 /* Catch the pending SIGSTOP. */
3388 status = lp->status;
3389 lp->status = 0;
3390
d6b0e80f 3391 stop_wait_callback (lp, NULL);
ca2163eb
PA
3392
3393 /* If the lp->status field isn't empty, we caught another signal
3394 while flushing the SIGSTOP. Return it back to the event
3395 queue of the LWP, as we already have an event to handle. */
3396 if (lp->status)
3397 {
3398 if (debug_linux_nat)
3399 fprintf_unfiltered (gdb_stdlog,
3400 "LLW: kill %s, %s\n",
3401 target_pid_to_str (lp->ptid),
3402 status_to_str (lp->status));
3403 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3404 }
3405
3406 lp->status = status;
d6b0e80f
AC
3407 }
3408
b84876c2
PA
3409 if (!target_can_async_p ())
3410 {
3411 /* Causes SIGINT to be passed on to the attached process. */
3412 set_sigint_trap ();
b84876c2 3413 }
d6b0e80f 3414
47608cb1
PA
3415 /* Translate generic target_wait options into waitpid options. */
3416 if (target_options & TARGET_WNOHANG)
3417 options |= WNOHANG;
7feb7d06 3418
d90e17a7 3419 while (lp == NULL)
d6b0e80f
AC
3420 {
3421 pid_t lwpid;
3422
7feb7d06 3423 lwpid = my_waitpid (pid, &status, options);
b84876c2 3424
d6b0e80f
AC
3425 if (lwpid > 0)
3426 {
3427 gdb_assert (pid == -1 || lwpid == pid);
3428
3429 if (debug_linux_nat)
3430 {
3431 fprintf_unfiltered (gdb_stdlog,
3432 "LLW: waitpid %ld received %s\n",
3433 (long) lwpid, status_to_str (status));
3434 }
3435
02f3fc28 3436 lp = linux_nat_filter_event (lwpid, status, options);
d90e17a7 3437
33355866
JK
3438 /* STATUS is now no longer valid, use LP->STATUS instead. */
3439 status = 0;
3440
d90e17a7
PA
3441 if (lp
3442 && ptid_is_pid (ptid)
3443 && ptid_get_pid (lp->ptid) != ptid_get_pid (ptid))
d6b0e80f 3444 {
e3e9f5a2
PA
3445 gdb_assert (lp->resumed);
3446
d90e17a7 3447 if (debug_linux_nat)
3e43a32a
MS
3448 fprintf (stderr,
3449 "LWP %ld got an event %06x, leaving pending.\n",
33355866 3450 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3451
ca2163eb 3452 if (WIFSTOPPED (lp->status))
d90e17a7 3453 {
ca2163eb 3454 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3455 {
e3e9f5a2
PA
3456 /* Cancel breakpoint hits. The breakpoint may
3457 be removed before we fetch events from this
3458 process to report to the core. It is best
3459 not to assume the moribund breakpoints
3460 heuristic always handles these cases --- it
3461 could be too many events go through to the
3462 core before this one is handled. All-stop
3463 always cancels breakpoint hits in all
3464 threads. */
3465 if (non_stop
00390b84 3466 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3467 && cancel_breakpoint (lp))
3468 {
3469 /* Throw away the SIGTRAP. */
3470 lp->status = 0;
3471
3472 if (debug_linux_nat)
3473 fprintf (stderr,
3e43a32a
MS
3474 "LLW: LWP %ld hit a breakpoint while"
3475 " waiting for another process;"
3476 " cancelled it\n",
e3e9f5a2
PA
3477 ptid_get_lwp (lp->ptid));
3478 }
3479 lp->stopped = 1;
d90e17a7
PA
3480 }
3481 else
3482 {
3483 lp->stopped = 1;
3484 lp->signalled = 0;
3485 }
3486 }
33355866 3487 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3488 {
3489 if (debug_linux_nat)
3e43a32a
MS
3490 fprintf (stderr,
3491 "Process %ld exited while stopping LWPs\n",
d90e17a7
PA
3492 ptid_get_lwp (lp->ptid));
3493
3494 /* This was the last lwp in the process. Since
3495 events are serialized to GDB core, and we can't
3496 report this one right now, but GDB core and the
3497 other target layers will want to be notified
3498 about the exit code/signal, leave the status
3499 pending for the next time we're able to report
3500 it. */
d90e17a7
PA
3501
3502 /* Prevent trying to stop this thread again. We'll
3503 never try to resume it because it has a pending
3504 status. */
3505 lp->stopped = 1;
3506
3507 /* Dead LWP's aren't expected to reported a pending
3508 sigstop. */
3509 lp->signalled = 0;
3510
3511 /* Store the pending event in the waitstatus as
3512 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3513 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3514 }
3515
3516 /* Keep looking. */
3517 lp = NULL;
d6b0e80f
AC
3518 continue;
3519 }
3520
d90e17a7
PA
3521 if (lp)
3522 break;
3523 else
3524 {
3525 if (pid == -1)
3526 {
3527 /* waitpid did return something. Restart over. */
3528 options |= __WCLONE;
3529 }
3530 continue;
3531 }
d6b0e80f
AC
3532 }
3533
3534 if (pid == -1)
3535 {
3536 /* Alternate between checking cloned and uncloned processes. */
3537 options ^= __WCLONE;
3538
b84876c2
PA
3539 /* And every time we have checked both:
3540 In async mode, return to event loop;
3541 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 3542 if (options & __WCLONE)
b84876c2 3543 {
47608cb1 3544 if (target_options & TARGET_WNOHANG)
b84876c2
PA
3545 {
3546 /* No interesting event. */
3547 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3548
b84876c2
PA
3549 if (debug_linux_nat_async)
3550 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3551
7feb7d06 3552 restore_child_signals_mask (&prev_mask);
b84876c2
PA
3553 return minus_one_ptid;
3554 }
3555
3556 sigsuspend (&suspend_mask);
3557 }
d6b0e80f 3558 }
28736962
PA
3559 else if (target_options & TARGET_WNOHANG)
3560 {
3561 /* No interesting event for PID yet. */
3562 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3563
3564 if (debug_linux_nat_async)
3565 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3566
3567 restore_child_signals_mask (&prev_mask);
3568 return minus_one_ptid;
3569 }
d6b0e80f
AC
3570
3571 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3572 gdb_assert (lp == NULL);
d6b0e80f
AC
3573 }
3574
b84876c2 3575 if (!target_can_async_p ())
d26b5354 3576 clear_sigint_trap ();
d6b0e80f
AC
3577
3578 gdb_assert (lp);
3579
ca2163eb
PA
3580 status = lp->status;
3581 lp->status = 0;
3582
d6b0e80f
AC
3583 /* Don't report signals that GDB isn't interested in, such as
3584 signals that are neither printed nor stopped upon. Stopping all
3585 threads can be a bit time-consuming so if we want decent
3586 performance with heavily multi-threaded programs, especially when
3587 they're using a high frequency timer, we'd better avoid it if we
3588 can. */
3589
3590 if (WIFSTOPPED (status))
3591 {
423ec54c 3592 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
d6b0e80f 3593
2455069d
UW
3594 /* When using hardware single-step, we need to report every signal.
3595 Otherwise, signals in pass_mask may be short-circuited. */
d539ed7e 3596 if (!lp->step
2455069d 3597 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
d6b0e80f
AC
3598 {
3599 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3600 here? It is not clear we should. GDB may not expect
3601 other threads to run. On the other hand, not resuming
3602 newly attached threads may cause an unwanted delay in
3603 getting them running. */
3604 registers_changed ();
28439f5e 3605 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3606 lp->step, signo);
d6b0e80f
AC
3607 if (debug_linux_nat)
3608 fprintf_unfiltered (gdb_stdlog,
3609 "LLW: %s %s, %s (preempt 'handle')\n",
3610 lp->step ?
3611 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3612 target_pid_to_str (lp->ptid),
423ec54c
JK
3613 (signo != TARGET_SIGNAL_0
3614 ? strsignal (target_signal_to_host (signo))
3615 : "0"));
d6b0e80f 3616 lp->stopped = 0;
d6b0e80f
AC
3617 goto retry;
3618 }
3619
1ad15515 3620 if (!non_stop)
d6b0e80f 3621 {
1ad15515
PA
3622 /* Only do the below in all-stop, as we currently use SIGINT
3623 to implement target_stop (see linux_nat_stop) in
3624 non-stop. */
3625 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3626 {
3627 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3628 forwarded to the entire process group, that is, all LWPs
3629 will receive it - unless they're using CLONE_THREAD to
3630 share signals. Since we only want to report it once, we
3631 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3632 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3633 set_ignore_sigint, NULL);
1ad15515
PA
3634 lp->ignore_sigint = 0;
3635 }
3636 else
3637 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3638 }
3639 }
3640
3641 /* This LWP is stopped now. */
3642 lp->stopped = 1;
3643
3644 if (debug_linux_nat)
3645 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3646 status_to_str (status), target_pid_to_str (lp->ptid));
3647
4c28f408
PA
3648 if (!non_stop)
3649 {
3650 /* Now stop all other LWP's ... */
d90e17a7 3651 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3652
3653 /* ... and wait until all of them have reported back that
3654 they're no longer running. */
d90e17a7 3655 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3656
3657 /* If we're not waiting for a specific LWP, choose an event LWP
3658 from among those that have had events. Giving equal priority
3659 to all LWPs that have had events helps prevent
3660 starvation. */
3661 if (pid == -1)
d90e17a7 3662 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3663
e3e9f5a2
PA
3664 /* Now that we've selected our final event LWP, cancel any
3665 breakpoints in other LWPs that have hit a GDB breakpoint.
3666 See the comment in cancel_breakpoints_callback to find out
3667 why. */
3668 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3669
3670 /* In all-stop, from the core's perspective, all LWPs are now
3671 stopped until a new resume action is sent over. */
3672 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3673 }
3674 else
3675 lp->resumed = 0;
d6b0e80f 3676
26ab7092 3677 if (linux_nat_status_is_event (status))
d6b0e80f 3678 {
d6b0e80f
AC
3679 if (debug_linux_nat)
3680 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3681 "LLW: trap ptid is %s.\n",
3682 target_pid_to_str (lp->ptid));
d6b0e80f 3683 }
d6b0e80f
AC
3684
3685 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3686 {
3687 *ourstatus = lp->waitstatus;
3688 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3689 }
3690 else
3691 store_waitstatus (ourstatus, status);
3692
b84876c2
PA
3693 if (debug_linux_nat_async)
3694 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3695
7feb7d06 3696 restore_child_signals_mask (&prev_mask);
1e225492
JK
3697
3698 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3699 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3700 lp->core = -1;
3701 else
3702 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3703
f973ed9c 3704 return lp->ptid;
d6b0e80f
AC
3705}
3706
e3e9f5a2
PA
3707/* Resume LWPs that are currently stopped without any pending status
3708 to report, but are resumed from the core's perspective. */
3709
3710static int
3711resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3712{
3713 ptid_t *wait_ptid_p = data;
3714
3715 if (lp->stopped
3716 && lp->resumed
3717 && lp->status == 0
3718 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3719 {
3720 gdb_assert (is_executing (lp->ptid));
3721
3722 /* Don't bother if there's a breakpoint at PC that we'd hit
3723 immediately, and we're not waiting for this LWP. */
3724 if (!ptid_match (lp->ptid, *wait_ptid_p))
3725 {
3726 struct regcache *regcache = get_thread_regcache (lp->ptid);
3727 CORE_ADDR pc = regcache_read_pc (regcache);
3728
3729 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3730 return 0;
3731 }
3732
3733 if (debug_linux_nat)
3734 fprintf_unfiltered (gdb_stdlog,
3735 "RSRL: resuming stopped-resumed LWP %s\n",
3736 target_pid_to_str (lp->ptid));
3737
3738 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3739 lp->step, TARGET_SIGNAL_0);
3740 lp->stopped = 0;
3741 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3742 lp->stopped_by_watchpoint = 0;
3743 }
3744
3745 return 0;
3746}
3747
7feb7d06
PA
3748static ptid_t
3749linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3750 ptid_t ptid, struct target_waitstatus *ourstatus,
3751 int target_options)
7feb7d06
PA
3752{
3753 ptid_t event_ptid;
3754
3755 if (debug_linux_nat)
3e43a32a
MS
3756 fprintf_unfiltered (gdb_stdlog,
3757 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
7feb7d06
PA
3758
3759 /* Flush the async file first. */
3760 if (target_can_async_p ())
3761 async_file_flush ();
3762
e3e9f5a2
PA
3763 /* Resume LWPs that are currently stopped without any pending status
3764 to report, but are resumed from the core's perspective. LWPs get
3765 in this state if we find them stopping at a time we're not
3766 interested in reporting the event (target_wait on a
3767 specific_process, for example, see linux_nat_wait_1), and
3768 meanwhile the event became uninteresting. Don't bother resuming
3769 LWPs we're not going to wait for if they'd stop immediately. */
3770 if (non_stop)
3771 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3772
47608cb1 3773 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3774
3775 /* If we requested any event, and something came out, assume there
3776 may be more. If we requested a specific lwp or process, also
3777 assume there may be more. */
3778 if (target_can_async_p ()
3779 && (ourstatus->kind != TARGET_WAITKIND_IGNORE
3780 || !ptid_equal (ptid, minus_one_ptid)))
3781 async_file_mark ();
3782
3783 /* Get ready for the next event. */
3784 if (target_can_async_p ())
3785 target_async (inferior_event_handler, 0);
3786
3787 return event_ptid;
3788}
3789
d6b0e80f
AC
3790static int
3791kill_callback (struct lwp_info *lp, void *data)
3792{
3793 errno = 0;
3794 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3795 if (debug_linux_nat)
3796 fprintf_unfiltered (gdb_stdlog,
3797 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3798 target_pid_to_str (lp->ptid),
3799 errno ? safe_strerror (errno) : "OK");
3800
3801 return 0;
3802}
3803
3804static int
3805kill_wait_callback (struct lwp_info *lp, void *data)
3806{
3807 pid_t pid;
3808
3809 /* We must make sure that there are no pending events (delayed
3810 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3811 program doesn't interfere with any following debugging session. */
3812
3813 /* For cloned processes we must check both with __WCLONE and
3814 without, since the exit status of a cloned process isn't reported
3815 with __WCLONE. */
3816 if (lp->cloned)
3817 {
3818 do
3819 {
58aecb61 3820 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 3821 if (pid != (pid_t) -1)
d6b0e80f 3822 {
e85a822c
DJ
3823 if (debug_linux_nat)
3824 fprintf_unfiltered (gdb_stdlog,
3825 "KWC: wait %s received unknown.\n",
3826 target_pid_to_str (lp->ptid));
3827 /* The Linux kernel sometimes fails to kill a thread
3828 completely after PTRACE_KILL; that goes from the stop
3829 point in do_fork out to the one in
3830 get_signal_to_deliever and waits again. So kill it
3831 again. */
3832 kill_callback (lp, NULL);
d6b0e80f
AC
3833 }
3834 }
3835 while (pid == GET_LWP (lp->ptid));
3836
3837 gdb_assert (pid == -1 && errno == ECHILD);
3838 }
3839
3840 do
3841 {
58aecb61 3842 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 3843 if (pid != (pid_t) -1)
d6b0e80f 3844 {
e85a822c
DJ
3845 if (debug_linux_nat)
3846 fprintf_unfiltered (gdb_stdlog,
3847 "KWC: wait %s received unk.\n",
3848 target_pid_to_str (lp->ptid));
3849 /* See the call to kill_callback above. */
3850 kill_callback (lp, NULL);
d6b0e80f
AC
3851 }
3852 }
3853 while (pid == GET_LWP (lp->ptid));
3854
3855 gdb_assert (pid == -1 && errno == ECHILD);
3856 return 0;
3857}
3858
3859static void
7d85a9c0 3860linux_nat_kill (struct target_ops *ops)
d6b0e80f 3861{
f973ed9c
DJ
3862 struct target_waitstatus last;
3863 ptid_t last_ptid;
3864 int status;
d6b0e80f 3865
f973ed9c
DJ
3866 /* If we're stopped while forking and we haven't followed yet,
3867 kill the other task. We need to do this first because the
3868 parent will be sleeping if this is a vfork. */
d6b0e80f 3869
f973ed9c 3870 get_last_target_status (&last_ptid, &last);
d6b0e80f 3871
f973ed9c
DJ
3872 if (last.kind == TARGET_WAITKIND_FORKED
3873 || last.kind == TARGET_WAITKIND_VFORKED)
3874 {
3a3e9ee3 3875 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
3876 wait (&status);
3877 }
3878
3879 if (forks_exist_p ())
7feb7d06 3880 linux_fork_killall ();
f973ed9c
DJ
3881 else
3882 {
d90e17a7 3883 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3884
4c28f408
PA
3885 /* Stop all threads before killing them, since ptrace requires
3886 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3887 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3888 /* ... and wait until all of them have reported back that
3889 they're no longer running. */
d90e17a7 3890 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3891
f973ed9c 3892 /* Kill all LWP's ... */
d90e17a7 3893 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3894
3895 /* ... and wait until we've flushed all events. */
d90e17a7 3896 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3897 }
3898
3899 target_mourn_inferior ();
d6b0e80f
AC
3900}
3901
3902static void
136d6dae 3903linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3904{
d90e17a7 3905 purge_lwp_list (ptid_get_pid (inferior_ptid));
d6b0e80f 3906
f973ed9c 3907 if (! forks_exist_p ())
d90e17a7
PA
3908 /* Normal case, no other forks available. */
3909 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3910 else
3911 /* Multi-fork case. The current inferior_ptid has exited, but
3912 there are other viable forks to debug. Delete the exiting
3913 one and context-switch to the first available. */
3914 linux_fork_mourn_inferior ();
d6b0e80f
AC
3915}
3916
5b009018
PA
3917/* Convert a native/host siginfo object, into/from the siginfo in the
3918 layout of the inferiors' architecture. */
3919
3920static void
3921siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
3922{
3923 int done = 0;
3924
3925 if (linux_nat_siginfo_fixup != NULL)
3926 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3927
3928 /* If there was no callback, or the callback didn't do anything,
3929 then just do a straight memcpy. */
3930 if (!done)
3931 {
3932 if (direction == 1)
3933 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3934 else
3935 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3936 }
3937}
3938
4aa995e1
PA
3939static LONGEST
3940linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3941 const char *annex, gdb_byte *readbuf,
3942 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3943{
4aa995e1
PA
3944 int pid;
3945 struct siginfo siginfo;
5b009018 3946 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
3947
3948 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3949 gdb_assert (readbuf || writebuf);
3950
3951 pid = GET_LWP (inferior_ptid);
3952 if (pid == 0)
3953 pid = GET_PID (inferior_ptid);
3954
3955 if (offset > sizeof (siginfo))
3956 return -1;
3957
3958 errno = 0;
3959 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3960 if (errno != 0)
3961 return -1;
3962
5b009018
PA
3963 /* When GDB is built as a 64-bit application, ptrace writes into
3964 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3965 inferior with a 64-bit GDB should look the same as debugging it
3966 with a 32-bit GDB, we need to convert it. GDB core always sees
3967 the converted layout, so any read/write will have to be done
3968 post-conversion. */
3969 siginfo_fixup (&siginfo, inf_siginfo, 0);
3970
4aa995e1
PA
3971 if (offset + len > sizeof (siginfo))
3972 len = sizeof (siginfo) - offset;
3973
3974 if (readbuf != NULL)
5b009018 3975 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3976 else
3977 {
5b009018
PA
3978 memcpy (inf_siginfo + offset, writebuf, len);
3979
3980 /* Convert back to ptrace layout before flushing it out. */
3981 siginfo_fixup (&siginfo, inf_siginfo, 1);
3982
4aa995e1
PA
3983 errno = 0;
3984 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3985 if (errno != 0)
3986 return -1;
3987 }
3988
3989 return len;
3990}
3991
10d6c8cd
DJ
3992static LONGEST
3993linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3994 const char *annex, gdb_byte *readbuf,
3995 const gdb_byte *writebuf,
3996 ULONGEST offset, LONGEST len)
d6b0e80f 3997{
4aa995e1 3998 struct cleanup *old_chain;
10d6c8cd 3999 LONGEST xfer;
d6b0e80f 4000
4aa995e1
PA
4001 if (object == TARGET_OBJECT_SIGNAL_INFO)
4002 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4003 offset, len);
4004
c35b1492
PA
4005 /* The target is connected but no live inferior is selected. Pass
4006 this request down to a lower stratum (e.g., the executable
4007 file). */
4008 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4009 return 0;
4010
4aa995e1
PA
4011 old_chain = save_inferior_ptid ();
4012
d6b0e80f
AC
4013 if (is_lwp (inferior_ptid))
4014 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4015
10d6c8cd
DJ
4016 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4017 offset, len);
d6b0e80f
AC
4018
4019 do_cleanups (old_chain);
4020 return xfer;
4021}
4022
4023static int
28439f5e 4024linux_thread_alive (ptid_t ptid)
d6b0e80f 4025{
8c6a60d1 4026 int err, tmp_errno;
4c28f408 4027
d6b0e80f
AC
4028 gdb_assert (is_lwp (ptid));
4029
4c28f408
PA
4030 /* Send signal 0 instead of anything ptrace, because ptracing a
4031 running thread errors out claiming that the thread doesn't
4032 exist. */
4033 err = kill_lwp (GET_LWP (ptid), 0);
8c6a60d1 4034 tmp_errno = errno;
d6b0e80f
AC
4035 if (debug_linux_nat)
4036 fprintf_unfiltered (gdb_stdlog,
4c28f408 4037 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4038 target_pid_to_str (ptid),
8c6a60d1 4039 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 4040
4c28f408 4041 if (err != 0)
d6b0e80f
AC
4042 return 0;
4043
4044 return 1;
4045}
4046
28439f5e
PA
4047static int
4048linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4049{
4050 return linux_thread_alive (ptid);
4051}
4052
d6b0e80f 4053static char *
117de6a9 4054linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4055{
4056 static char buf[64];
4057
a0ef4274 4058 if (is_lwp (ptid)
d90e17a7
PA
4059 && (GET_PID (ptid) != GET_LWP (ptid)
4060 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
4061 {
4062 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4063 return buf;
4064 }
4065
4066 return normal_pid_to_str (ptid);
4067}
4068
4694da01
TT
4069static char *
4070linux_nat_thread_name (struct thread_info *thr)
4071{
4072 int pid = ptid_get_pid (thr->ptid);
4073 long lwp = ptid_get_lwp (thr->ptid);
4074#define FORMAT "/proc/%d/task/%ld/comm"
4075 char buf[sizeof (FORMAT) + 30];
4076 FILE *comm_file;
4077 char *result = NULL;
4078
4079 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4080 comm_file = fopen (buf, "r");
4081 if (comm_file)
4082 {
4083 /* Not exported by the kernel, so we define it here. */
4084#define COMM_LEN 16
4085 static char line[COMM_LEN + 1];
4086
4087 if (fgets (line, sizeof (line), comm_file))
4088 {
4089 char *nl = strchr (line, '\n');
4090
4091 if (nl)
4092 *nl = '\0';
4093 if (*line != '\0')
4094 result = line;
4095 }
4096
4097 fclose (comm_file);
4098 }
4099
4100#undef COMM_LEN
4101#undef FORMAT
4102
4103 return result;
4104}
4105
dba24537
AC
4106/* Accepts an integer PID; Returns a string representing a file that
4107 can be opened to get the symbols for the child process. */
4108
6d8fd2b7
UW
4109static char *
4110linux_child_pid_to_exec_file (int pid)
dba24537
AC
4111{
4112 char *name1, *name2;
4113
4114 name1 = xmalloc (MAXPATHLEN);
4115 name2 = xmalloc (MAXPATHLEN);
4116 make_cleanup (xfree, name1);
4117 make_cleanup (xfree, name2);
4118 memset (name2, 0, MAXPATHLEN);
4119
4120 sprintf (name1, "/proc/%d/exe", pid);
4121 if (readlink (name1, name2, MAXPATHLEN) > 0)
4122 return name2;
4123 else
4124 return name1;
4125}
4126
4127/* Service function for corefiles and info proc. */
4128
4129static int
4130read_mapping (FILE *mapfile,
4131 long long *addr,
4132 long long *endaddr,
4133 char *permissions,
4134 long long *offset,
4135 char *device, long long *inode, char *filename)
4136{
4137 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4138 addr, endaddr, permissions, offset, device, inode);
4139
2e14c2ea
MS
4140 filename[0] = '\0';
4141 if (ret > 0 && ret != EOF)
dba24537
AC
4142 {
4143 /* Eat everything up to EOL for the filename. This will prevent
4144 weird filenames (such as one with embedded whitespace) from
4145 confusing this code. It also makes this code more robust in
4146 respect to annotations the kernel may add after the filename.
4147
4148 Note the filename is used for informational purposes
4149 only. */
4150 ret += fscanf (mapfile, "%[^\n]\n", filename);
4151 }
2e14c2ea 4152
dba24537
AC
4153 return (ret != 0 && ret != EOF);
4154}
4155
4156/* Fills the "to_find_memory_regions" target vector. Lists the memory
4157 regions in the inferior for a corefile. */
4158
4159static int
b8edc417 4160linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
dba24537 4161{
89ecc4f5 4162 int pid = PIDGET (inferior_ptid);
dba24537
AC
4163 char mapsfilename[MAXPATHLEN];
4164 FILE *mapsfile;
4165 long long addr, endaddr, size, offset, inode;
4166 char permissions[8], device[8], filename[MAXPATHLEN];
4167 int read, write, exec;
7c8a8b04 4168 struct cleanup *cleanup;
dba24537
AC
4169
4170 /* Compose the filename for the /proc memory map, and open it. */
89ecc4f5 4171 sprintf (mapsfilename, "/proc/%d/maps", pid);
dba24537 4172 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 4173 error (_("Could not open %s."), mapsfilename);
7c8a8b04 4174 cleanup = make_cleanup_fclose (mapsfile);
dba24537
AC
4175
4176 if (info_verbose)
4177 fprintf_filtered (gdb_stdout,
4178 "Reading memory regions from %s\n", mapsfilename);
4179
4180 /* Now iterate until end-of-file. */
4181 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4182 &offset, &device[0], &inode, &filename[0]))
4183 {
4184 size = endaddr - addr;
4185
4186 /* Get the segment's permissions. */
4187 read = (strchr (permissions, 'r') != 0);
4188 write = (strchr (permissions, 'w') != 0);
4189 exec = (strchr (permissions, 'x') != 0);
4190
4191 if (info_verbose)
4192 {
4193 fprintf_filtered (gdb_stdout,
2244ba2e
PM
4194 "Save segment, %s bytes at %s (%c%c%c)",
4195 plongest (size), paddress (target_gdbarch, addr),
dba24537
AC
4196 read ? 'r' : ' ',
4197 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 4198 if (filename[0])
dba24537
AC
4199 fprintf_filtered (gdb_stdout, " for %s", filename);
4200 fprintf_filtered (gdb_stdout, "\n");
4201 }
4202
4203 /* Invoke the callback function to create the corefile
4204 segment. */
4205 func (addr, size, read, write, exec, obfd);
4206 }
7c8a8b04 4207 do_cleanups (cleanup);
dba24537
AC
4208 return 0;
4209}
4210
2020b7ab
PA
4211static int
4212find_signalled_thread (struct thread_info *info, void *data)
4213{
16c381f0 4214 if (info->suspend.stop_signal != TARGET_SIGNAL_0
2020b7ab
PA
4215 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4216 return 1;
4217
4218 return 0;
4219}
4220
4221static enum target_signal
4222find_stop_signal (void)
4223{
4224 struct thread_info *info =
4225 iterate_over_threads (find_signalled_thread, NULL);
4226
4227 if (info)
16c381f0 4228 return info->suspend.stop_signal;
2020b7ab
PA
4229 else
4230 return TARGET_SIGNAL_0;
4231}
4232
dba24537
AC
4233/* Records the thread's register state for the corefile note
4234 section. */
4235
4236static char *
4237linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2020b7ab
PA
4238 char *note_data, int *note_size,
4239 enum target_signal stop_signal)
dba24537 4240{
dba24537 4241 unsigned long lwp = ptid_get_lwp (ptid);
c2250ad1
UW
4242 struct gdbarch *gdbarch = target_gdbarch;
4243 struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4f844a66 4244 const struct regset *regset;
55e969c1 4245 int core_regset_p;
594f7785 4246 struct cleanup *old_chain;
17ea7499
CES
4247 struct core_regset_section *sect_list;
4248 char *gdb_regset;
594f7785
UW
4249
4250 old_chain = save_inferior_ptid ();
4251 inferior_ptid = ptid;
4252 target_fetch_registers (regcache, -1);
4253 do_cleanups (old_chain);
4f844a66
DM
4254
4255 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
4256 sect_list = gdbarch_core_regset_sections (gdbarch);
4257
17ea7499
CES
4258 /* The loop below uses the new struct core_regset_section, which stores
4259 the supported section names and sizes for the core file. Note that
4260 note PRSTATUS needs to be treated specially. But the other notes are
4261 structurally the same, so they can benefit from the new struct. */
4262 if (core_regset_p && sect_list != NULL)
4263 while (sect_list->sect_name != NULL)
4264 {
17ea7499
CES
4265 regset = gdbarch_regset_from_core_section (gdbarch,
4266 sect_list->sect_name,
4267 sect_list->size);
4268 gdb_assert (regset && regset->collect_regset);
4269 gdb_regset = xmalloc (sect_list->size);
4270 regset->collect_regset (regset, regcache, -1,
4271 gdb_regset, sect_list->size);
2f2241f1
UW
4272
4273 if (strcmp (sect_list->sect_name, ".reg") == 0)
4274 note_data = (char *) elfcore_write_prstatus
4275 (obfd, note_data, note_size,
857d11d0
JK
4276 lwp, target_signal_to_host (stop_signal),
4277 gdb_regset);
2f2241f1
UW
4278 else
4279 note_data = (char *) elfcore_write_register_note
4280 (obfd, note_data, note_size,
4281 sect_list->sect_name, gdb_regset,
4282 sect_list->size);
17ea7499
CES
4283 xfree (gdb_regset);
4284 sect_list++;
4285 }
dba24537 4286
17ea7499
CES
4287 /* For architectures that does not have the struct core_regset_section
4288 implemented, we use the old method. When all the architectures have
4289 the new support, the code below should be deleted. */
4f844a66 4290 else
17ea7499 4291 {
2f2241f1
UW
4292 gdb_gregset_t gregs;
4293 gdb_fpregset_t fpregs;
4294
4295 if (core_regset_p
4296 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3e43a32a
MS
4297 sizeof (gregs)))
4298 != NULL && regset->collect_regset != NULL)
2f2241f1
UW
4299 regset->collect_regset (regset, regcache, -1,
4300 &gregs, sizeof (gregs));
4301 else
4302 fill_gregset (regcache, &gregs, -1);
4303
857d11d0
JK
4304 note_data = (char *) elfcore_write_prstatus
4305 (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
4306 &gregs);
2f2241f1 4307
17ea7499
CES
4308 if (core_regset_p
4309 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3e43a32a
MS
4310 sizeof (fpregs)))
4311 != NULL && regset->collect_regset != NULL)
17ea7499
CES
4312 regset->collect_regset (regset, regcache, -1,
4313 &fpregs, sizeof (fpregs));
4314 else
4315 fill_fpregset (regcache, &fpregs, -1);
4316
4317 note_data = (char *) elfcore_write_prfpreg (obfd,
4318 note_data,
4319 note_size,
4320 &fpregs, sizeof (fpregs));
4321 }
4f844a66 4322
dba24537
AC
4323 return note_data;
4324}
4325
4326struct linux_nat_corefile_thread_data
4327{
4328 bfd *obfd;
4329 char *note_data;
4330 int *note_size;
4331 int num_notes;
2020b7ab 4332 enum target_signal stop_signal;
dba24537
AC
4333};
4334
4335/* Called by gdbthread.c once per thread. Records the thread's
4336 register state for the corefile note section. */
4337
4338static int
4339linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4340{
4341 struct linux_nat_corefile_thread_data *args = data;
dba24537 4342
dba24537
AC
4343 args->note_data = linux_nat_do_thread_registers (args->obfd,
4344 ti->ptid,
4345 args->note_data,
2020b7ab
PA
4346 args->note_size,
4347 args->stop_signal);
dba24537 4348 args->num_notes++;
56be3814 4349
dba24537
AC
4350 return 0;
4351}
4352
efcbbd14
UW
4353/* Enumerate spufs IDs for process PID. */
4354
4355static void
4356iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4357{
4358 char path[128];
4359 DIR *dir;
4360 struct dirent *entry;
4361
4362 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4363 dir = opendir (path);
4364 if (!dir)
4365 return;
4366
4367 rewinddir (dir);
4368 while ((entry = readdir (dir)) != NULL)
4369 {
4370 struct stat st;
4371 struct statfs stfs;
4372 int fd;
4373
4374 fd = atoi (entry->d_name);
4375 if (!fd)
4376 continue;
4377
4378 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4379 if (stat (path, &st) != 0)
4380 continue;
4381 if (!S_ISDIR (st.st_mode))
4382 continue;
4383
4384 if (statfs (path, &stfs) != 0)
4385 continue;
4386 if (stfs.f_type != SPUFS_MAGIC)
4387 continue;
4388
4389 callback (data, fd);
4390 }
4391
4392 closedir (dir);
4393}
4394
4395/* Generate corefile notes for SPU contexts. */
4396
4397struct linux_spu_corefile_data
4398{
4399 bfd *obfd;
4400 char *note_data;
4401 int *note_size;
4402};
4403
4404static void
4405linux_spu_corefile_callback (void *data, int fd)
4406{
4407 struct linux_spu_corefile_data *args = data;
4408 int i;
4409
4410 static const char *spu_files[] =
4411 {
4412 "object-id",
4413 "mem",
4414 "regs",
4415 "fpcr",
4416 "lslr",
4417 "decr",
4418 "decr_status",
4419 "signal1",
4420 "signal1_type",
4421 "signal2",
4422 "signal2_type",
4423 "event_mask",
4424 "event_status",
4425 "mbox_info",
4426 "ibox_info",
4427 "wbox_info",
4428 "dma_info",
4429 "proxydma_info",
4430 };
4431
4432 for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4433 {
4434 char annex[32], note_name[32];
4435 gdb_byte *spu_data;
4436 LONGEST spu_len;
4437
4438 xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4439 spu_len = target_read_alloc (&current_target, TARGET_OBJECT_SPU,
4440 annex, &spu_data);
4441 if (spu_len > 0)
4442 {
4443 xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4444 args->note_data = elfcore_write_note (args->obfd, args->note_data,
4445 args->note_size, note_name,
4446 NT_SPU, spu_data, spu_len);
4447 xfree (spu_data);
4448 }
4449 }
4450}
4451
4452static char *
4453linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4454{
4455 struct linux_spu_corefile_data args;
e0881a8e 4456
efcbbd14
UW
4457 args.obfd = obfd;
4458 args.note_data = note_data;
4459 args.note_size = note_size;
4460
4461 iterate_over_spus (PIDGET (inferior_ptid),
4462 linux_spu_corefile_callback, &args);
4463
4464 return args.note_data;
4465}
4466
dba24537
AC
4467/* Fills the "to_make_corefile_note" target vector. Builds the note
4468 section for a corefile, and returns it in a malloc buffer. */
4469
4470static char *
4471linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4472{
4473 struct linux_nat_corefile_thread_data thread_args;
d99148ef 4474 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 4475 char fname[16] = { '\0' };
d99148ef 4476 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
4477 char psargs[80] = { '\0' };
4478 char *note_data = NULL;
d90e17a7 4479 ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
c6826062 4480 gdb_byte *auxv;
dba24537
AC
4481 int auxv_len;
4482
4483 if (get_exec_file (0))
4484 {
9f37bbcc 4485 strncpy (fname, lbasename (get_exec_file (0)), sizeof (fname));
dba24537
AC
4486 strncpy (psargs, get_exec_file (0), sizeof (psargs));
4487 if (get_inferior_args ())
4488 {
d99148ef
JK
4489 char *string_end;
4490 char *psargs_end = psargs + sizeof (psargs);
4491
4492 /* linux_elfcore_write_prpsinfo () handles zero unterminated
4493 strings fine. */
4494 string_end = memchr (psargs, 0, sizeof (psargs));
4495 if (string_end != NULL)
4496 {
4497 *string_end++ = ' ';
4498 strncpy (string_end, get_inferior_args (),
4499 psargs_end - string_end);
4500 }
dba24537
AC
4501 }
4502 note_data = (char *) elfcore_write_prpsinfo (obfd,
4503 note_data,
4504 note_size, fname, psargs);
4505 }
4506
4507 /* Dump information for threads. */
4508 thread_args.obfd = obfd;
4509 thread_args.note_data = note_data;
4510 thread_args.note_size = note_size;
4511 thread_args.num_notes = 0;
2020b7ab 4512 thread_args.stop_signal = find_stop_signal ();
d90e17a7 4513 iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
2020b7ab
PA
4514 gdb_assert (thread_args.num_notes != 0);
4515 note_data = thread_args.note_data;
dba24537 4516
13547ab6
DJ
4517 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
4518 NULL, &auxv);
dba24537
AC
4519 if (auxv_len > 0)
4520 {
4521 note_data = elfcore_write_note (obfd, note_data, note_size,
4522 "CORE", NT_AUXV, auxv, auxv_len);
4523 xfree (auxv);
4524 }
4525
efcbbd14
UW
4526 note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4527
dba24537
AC
4528 make_cleanup (xfree, note_data);
4529 return note_data;
4530}
4531
4532/* Implement the "info proc" command. */
4533
4534static void
4535linux_nat_info_proc_cmd (char *args, int from_tty)
4536{
89ecc4f5
DE
4537 /* A long is used for pid instead of an int to avoid a loss of precision
4538 compiler warning from the output of strtoul. */
4539 long pid = PIDGET (inferior_ptid);
dba24537
AC
4540 FILE *procfile;
4541 char **argv = NULL;
4542 char buffer[MAXPATHLEN];
4543 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
4544 int cmdline_f = 1;
4545 int cwd_f = 1;
4546 int exe_f = 1;
4547 int mappings_f = 0;
dba24537
AC
4548 int status_f = 0;
4549 int stat_f = 0;
4550 int all = 0;
4551 struct stat dummy;
4552
4553 if (args)
4554 {
4555 /* Break up 'args' into an argv array. */
d1a41061
PP
4556 argv = gdb_buildargv (args);
4557 make_cleanup_freeargv (argv);
dba24537
AC
4558 }
4559 while (argv != NULL && *argv != NULL)
4560 {
4561 if (isdigit (argv[0][0]))
4562 {
4563 pid = strtoul (argv[0], NULL, 10);
4564 }
4565 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
4566 {
4567 mappings_f = 1;
4568 }
4569 else if (strcmp (argv[0], "status") == 0)
4570 {
4571 status_f = 1;
4572 }
4573 else if (strcmp (argv[0], "stat") == 0)
4574 {
4575 stat_f = 1;
4576 }
4577 else if (strcmp (argv[0], "cmd") == 0)
4578 {
4579 cmdline_f = 1;
4580 }
4581 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
4582 {
4583 exe_f = 1;
4584 }
4585 else if (strcmp (argv[0], "cwd") == 0)
4586 {
4587 cwd_f = 1;
4588 }
4589 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
4590 {
4591 all = 1;
4592 }
4593 else
4594 {
1777feb0 4595 /* [...] (future options here). */
dba24537
AC
4596 }
4597 argv++;
4598 }
4599 if (pid == 0)
8a3fe4f8 4600 error (_("No current process: you must name one."));
dba24537 4601
89ecc4f5 4602 sprintf (fname1, "/proc/%ld", pid);
dba24537 4603 if (stat (fname1, &dummy) != 0)
8a3fe4f8 4604 error (_("No /proc directory: '%s'"), fname1);
dba24537 4605
89ecc4f5 4606 printf_filtered (_("process %ld\n"), pid);
dba24537
AC
4607 if (cmdline_f || all)
4608 {
89ecc4f5 4609 sprintf (fname1, "/proc/%ld/cmdline", pid);
d5d6fca5 4610 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4611 {
7c8a8b04 4612 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4613
bf1d7d9c
JB
4614 if (fgets (buffer, sizeof (buffer), procfile))
4615 printf_filtered ("cmdline = '%s'\n", buffer);
4616 else
4617 warning (_("unable to read '%s'"), fname1);
7c8a8b04 4618 do_cleanups (cleanup);
dba24537
AC
4619 }
4620 else
8a3fe4f8 4621 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4622 }
4623 if (cwd_f || all)
4624 {
89ecc4f5 4625 sprintf (fname1, "/proc/%ld/cwd", pid);
dba24537
AC
4626 memset (fname2, 0, sizeof (fname2));
4627 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4628 printf_filtered ("cwd = '%s'\n", fname2);
4629 else
8a3fe4f8 4630 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4631 }
4632 if (exe_f || all)
4633 {
89ecc4f5 4634 sprintf (fname1, "/proc/%ld/exe", pid);
dba24537
AC
4635 memset (fname2, 0, sizeof (fname2));
4636 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4637 printf_filtered ("exe = '%s'\n", fname2);
4638 else
8a3fe4f8 4639 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4640 }
4641 if (mappings_f || all)
4642 {
89ecc4f5 4643 sprintf (fname1, "/proc/%ld/maps", pid);
d5d6fca5 4644 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4645 {
4646 long long addr, endaddr, size, offset, inode;
4647 char permissions[8], device[8], filename[MAXPATHLEN];
7c8a8b04 4648 struct cleanup *cleanup;
dba24537 4649
7c8a8b04 4650 cleanup = make_cleanup_fclose (procfile);
a3f17187 4651 printf_filtered (_("Mapped address spaces:\n\n"));
a97b0ac8 4652 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4653 {
4654 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4655 "Start Addr",
4656 " End Addr",
4657 " Size", " Offset", "objfile");
4658 }
4659 else
4660 {
4661 printf_filtered (" %18s %18s %10s %10s %7s\n",
4662 "Start Addr",
4663 " End Addr",
4664 " Size", " Offset", "objfile");
4665 }
4666
4667 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4668 &offset, &device[0], &inode, &filename[0]))
4669 {
4670 size = endaddr - addr;
4671
4672 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4673 calls here (and possibly above) should be abstracted
4674 out into their own functions? Andrew suggests using
4675 a generic local_address_string instead to print out
4676 the addresses; that makes sense to me, too. */
4677
a97b0ac8 4678 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4679 {
4680 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4681 (unsigned long) addr, /* FIXME: pr_addr */
4682 (unsigned long) endaddr,
4683 (int) size,
4684 (unsigned int) offset,
4685 filename[0] ? filename : "");
4686 }
4687 else
4688 {
4689 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
4690 (unsigned long) addr, /* FIXME: pr_addr */
4691 (unsigned long) endaddr,
4692 (int) size,
4693 (unsigned int) offset,
4694 filename[0] ? filename : "");
4695 }
4696 }
4697
7c8a8b04 4698 do_cleanups (cleanup);
dba24537
AC
4699 }
4700 else
8a3fe4f8 4701 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4702 }
4703 if (status_f || all)
4704 {
89ecc4f5 4705 sprintf (fname1, "/proc/%ld/status", pid);
d5d6fca5 4706 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4707 {
7c8a8b04 4708 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4709
dba24537
AC
4710 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4711 puts_filtered (buffer);
7c8a8b04 4712 do_cleanups (cleanup);
dba24537
AC
4713 }
4714 else
8a3fe4f8 4715 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4716 }
4717 if (stat_f || all)
4718 {
89ecc4f5 4719 sprintf (fname1, "/proc/%ld/stat", pid);
d5d6fca5 4720 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4721 {
4722 int itmp;
4723 char ctmp;
a25694b4 4724 long ltmp;
7c8a8b04 4725 struct cleanup *cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4726
4727 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4728 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 4729 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 4730 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 4731 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 4732 printf_filtered (_("State: %c\n"), ctmp);
dba24537 4733 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4734 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 4735 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4736 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 4737 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4738 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 4739 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4740 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 4741 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4742 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
4743 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4744 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
4745 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4746 printf_filtered (_("Minor faults (no memory page): %lu\n"),
4747 (unsigned long) ltmp);
4748 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4749 printf_filtered (_("Minor faults, children: %lu\n"),
4750 (unsigned long) ltmp);
4751 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4752 printf_filtered (_("Major faults (memory page faults): %lu\n"),
4753 (unsigned long) ltmp);
4754 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4755 printf_filtered (_("Major faults, children: %lu\n"),
4756 (unsigned long) ltmp);
4757 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4758 printf_filtered (_("utime: %ld\n"), ltmp);
4759 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4760 printf_filtered (_("stime: %ld\n"), ltmp);
4761 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4762 printf_filtered (_("utime, children: %ld\n"), ltmp);
4763 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4764 printf_filtered (_("stime, children: %ld\n"), ltmp);
4765 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
4766 printf_filtered (_("jiffies remaining in current "
4767 "time slice: %ld\n"), ltmp);
a25694b4
AS
4768 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4769 printf_filtered (_("'nice' value: %ld\n"), ltmp);
4770 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4771 printf_filtered (_("jiffies until next timeout: %lu\n"),
4772 (unsigned long) ltmp);
4773 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4774 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
4775 (unsigned long) ltmp);
4776 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
4777 printf_filtered (_("start time (jiffies since "
4778 "system boot): %ld\n"), ltmp);
a25694b4
AS
4779 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4780 printf_filtered (_("Virtual memory size: %lu\n"),
4781 (unsigned long) ltmp);
4782 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3e43a32a
MS
4783 printf_filtered (_("Resident set size: %lu\n"),
4784 (unsigned long) ltmp);
a25694b4
AS
4785 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4786 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
4787 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4788 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
4789 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4790 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
4791 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4792 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3e43a32a
MS
4793#if 0 /* Don't know how architecture-dependent the rest is...
4794 Anyway the signal bitmap info is available from "status". */
1777feb0 4795 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 4796 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
1777feb0 4797 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4
AS
4798 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
4799 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4800 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
4801 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4802 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
4803 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4804 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
4805 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4806 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
1777feb0 4807 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 4808 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537 4809#endif
7c8a8b04 4810 do_cleanups (cleanup);
dba24537
AC
4811 }
4812 else
8a3fe4f8 4813 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4814 }
4815}
4816
10d6c8cd
DJ
4817/* Implement the to_xfer_partial interface for memory reads using the /proc
4818 filesystem. Because we can use a single read() call for /proc, this
4819 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4820 but it doesn't support writes. */
4821
4822static LONGEST
4823linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4824 const char *annex, gdb_byte *readbuf,
4825 const gdb_byte *writebuf,
4826 ULONGEST offset, LONGEST len)
dba24537 4827{
10d6c8cd
DJ
4828 LONGEST ret;
4829 int fd;
dba24537
AC
4830 char filename[64];
4831
10d6c8cd 4832 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4833 return 0;
4834
4835 /* Don't bother for one word. */
4836 if (len < 3 * sizeof (long))
4837 return 0;
4838
4839 /* We could keep this file open and cache it - possibly one per
4840 thread. That requires some juggling, but is even faster. */
4841 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4842 fd = open (filename, O_RDONLY | O_LARGEFILE);
4843 if (fd == -1)
4844 return 0;
4845
4846 /* If pread64 is available, use it. It's faster if the kernel
4847 supports it (only one syscall), and it's 64-bit safe even on
4848 32-bit platforms (for instance, SPARC debugging a SPARC64
4849 application). */
4850#ifdef HAVE_PREAD64
10d6c8cd 4851 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4852#else
10d6c8cd 4853 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4854#endif
4855 ret = 0;
4856 else
4857 ret = len;
4858
4859 close (fd);
4860 return ret;
4861}
4862
efcbbd14
UW
4863
4864/* Enumerate spufs IDs for process PID. */
4865static LONGEST
4866spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4867{
4868 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4869 LONGEST pos = 0;
4870 LONGEST written = 0;
4871 char path[128];
4872 DIR *dir;
4873 struct dirent *entry;
4874
4875 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4876 dir = opendir (path);
4877 if (!dir)
4878 return -1;
4879
4880 rewinddir (dir);
4881 while ((entry = readdir (dir)) != NULL)
4882 {
4883 struct stat st;
4884 struct statfs stfs;
4885 int fd;
4886
4887 fd = atoi (entry->d_name);
4888 if (!fd)
4889 continue;
4890
4891 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4892 if (stat (path, &st) != 0)
4893 continue;
4894 if (!S_ISDIR (st.st_mode))
4895 continue;
4896
4897 if (statfs (path, &stfs) != 0)
4898 continue;
4899 if (stfs.f_type != SPUFS_MAGIC)
4900 continue;
4901
4902 if (pos >= offset && pos + 4 <= offset + len)
4903 {
4904 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4905 written += 4;
4906 }
4907 pos += 4;
4908 }
4909
4910 closedir (dir);
4911 return written;
4912}
4913
4914/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4915 object type, using the /proc file system. */
4916static LONGEST
4917linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4918 const char *annex, gdb_byte *readbuf,
4919 const gdb_byte *writebuf,
4920 ULONGEST offset, LONGEST len)
4921{
4922 char buf[128];
4923 int fd = 0;
4924 int ret = -1;
4925 int pid = PIDGET (inferior_ptid);
4926
4927 if (!annex)
4928 {
4929 if (!readbuf)
4930 return -1;
4931 else
4932 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4933 }
4934
4935 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4936 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4937 if (fd <= 0)
4938 return -1;
4939
4940 if (offset != 0
4941 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4942 {
4943 close (fd);
4944 return 0;
4945 }
4946
4947 if (writebuf)
4948 ret = write (fd, writebuf, (size_t) len);
4949 else if (readbuf)
4950 ret = read (fd, readbuf, (size_t) len);
4951
4952 close (fd);
4953 return ret;
4954}
4955
4956
dba24537
AC
4957/* Parse LINE as a signal set and add its set bits to SIGS. */
4958
4959static void
4960add_line_to_sigset (const char *line, sigset_t *sigs)
4961{
4962 int len = strlen (line) - 1;
4963 const char *p;
4964 int signum;
4965
4966 if (line[len] != '\n')
8a3fe4f8 4967 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4968
4969 p = line;
4970 signum = len * 4;
4971 while (len-- > 0)
4972 {
4973 int digit;
4974
4975 if (*p >= '0' && *p <= '9')
4976 digit = *p - '0';
4977 else if (*p >= 'a' && *p <= 'f')
4978 digit = *p - 'a' + 10;
4979 else
8a3fe4f8 4980 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4981
4982 signum -= 4;
4983
4984 if (digit & 1)
4985 sigaddset (sigs, signum + 1);
4986 if (digit & 2)
4987 sigaddset (sigs, signum + 2);
4988 if (digit & 4)
4989 sigaddset (sigs, signum + 3);
4990 if (digit & 8)
4991 sigaddset (sigs, signum + 4);
4992
4993 p++;
4994 }
4995}
4996
4997/* Find process PID's pending signals from /proc/pid/status and set
4998 SIGS to match. */
4999
5000void
3e43a32a
MS
5001linux_proc_pending_signals (int pid, sigset_t *pending,
5002 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
5003{
5004 FILE *procfile;
5005 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
7c8a8b04 5006 struct cleanup *cleanup;
dba24537
AC
5007
5008 sigemptyset (pending);
5009 sigemptyset (blocked);
5010 sigemptyset (ignored);
5011 sprintf (fname, "/proc/%d/status", pid);
5012 procfile = fopen (fname, "r");
5013 if (procfile == NULL)
8a3fe4f8 5014 error (_("Could not open %s"), fname);
7c8a8b04 5015 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
5016
5017 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
5018 {
5019 /* Normal queued signals are on the SigPnd line in the status
5020 file. However, 2.6 kernels also have a "shared" pending
5021 queue for delivering signals to a thread group, so check for
5022 a ShdPnd line also.
5023
5024 Unfortunately some Red Hat kernels include the shared pending
5025 queue but not the ShdPnd status field. */
5026
5027 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
5028 add_line_to_sigset (buffer + 8, pending);
5029 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
5030 add_line_to_sigset (buffer + 8, pending);
5031 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
5032 add_line_to_sigset (buffer + 8, blocked);
5033 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
5034 add_line_to_sigset (buffer + 8, ignored);
5035 }
5036
7c8a8b04 5037 do_cleanups (cleanup);
dba24537
AC
5038}
5039
07e059b5
VP
5040static LONGEST
5041linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e
MS
5042 const char *annex, gdb_byte *readbuf,
5043 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
07e059b5
VP
5044{
5045 /* We make the process list snapshot when the object starts to be
5046 read. */
5047 static const char *buf;
5048 static LONGEST len_avail = -1;
5049 static struct obstack obstack;
5050
5051 DIR *dirp;
5052
5053 gdb_assert (object == TARGET_OBJECT_OSDATA);
5054
a61408f8
SS
5055 if (!annex)
5056 {
5057 if (offset == 0)
5058 {
5059 if (len_avail != -1 && len_avail != 0)
5060 obstack_free (&obstack, NULL);
5061 len_avail = 0;
5062 buf = NULL;
5063 obstack_init (&obstack);
5064 obstack_grow_str (&obstack, "<osdata type=\"types\">\n");
5065
3e43a32a 5066 obstack_xml_printf (&obstack,
a61408f8
SS
5067 "<item>"
5068 "<column name=\"Type\">processes</column>"
3e43a32a
MS
5069 "<column name=\"Description\">"
5070 "Listing of all processes</column>"
a61408f8
SS
5071 "</item>");
5072
5073 obstack_grow_str0 (&obstack, "</osdata>\n");
5074 buf = obstack_finish (&obstack);
5075 len_avail = strlen (buf);
5076 }
5077
5078 if (offset >= len_avail)
5079 {
5080 /* Done. Get rid of the obstack. */
5081 obstack_free (&obstack, NULL);
5082 buf = NULL;
5083 len_avail = 0;
5084 return 0;
5085 }
5086
5087 if (len > len_avail - offset)
5088 len = len_avail - offset;
5089 memcpy (readbuf, buf + offset, len);
5090
5091 return len;
5092 }
5093
07e059b5
VP
5094 if (strcmp (annex, "processes") != 0)
5095 return 0;
5096
5097 gdb_assert (readbuf && !writebuf);
5098
5099 if (offset == 0)
5100 {
5101 if (len_avail != -1 && len_avail != 0)
e0881a8e 5102 obstack_free (&obstack, NULL);
07e059b5
VP
5103 len_avail = 0;
5104 buf = NULL;
5105 obstack_init (&obstack);
5106 obstack_grow_str (&obstack, "<osdata type=\"processes\">\n");
5107
5108 dirp = opendir ("/proc");
5109 if (dirp)
e0881a8e
MS
5110 {
5111 struct dirent *dp;
5112
5113 while ((dp = readdir (dirp)) != NULL)
5114 {
5115 struct stat statbuf;
5116 char procentry[sizeof ("/proc/4294967295")];
5117
5118 if (!isdigit (dp->d_name[0])
5119 || NAMELEN (dp) > sizeof ("4294967295") - 1)
5120 continue;
5121
5122 sprintf (procentry, "/proc/%s", dp->d_name);
5123 if (stat (procentry, &statbuf) == 0
5124 && S_ISDIR (statbuf.st_mode))
5125 {
5126 char *pathname;
5127 FILE *f;
5128 char cmd[MAXPATHLEN + 1];
5129 struct passwd *entry;
5130
5131 pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
5132 entry = getpwuid (statbuf.st_uid);
5133
5134 if ((f = fopen (pathname, "r")) != NULL)
5135 {
5eee517d 5136 size_t length = fread (cmd, 1, sizeof (cmd) - 1, f);
e0881a8e 5137
5eee517d 5138 if (length > 0)
e0881a8e
MS
5139 {
5140 int i;
5141
5eee517d 5142 for (i = 0; i < length; i++)
e0881a8e
MS
5143 if (cmd[i] == '\0')
5144 cmd[i] = ' ';
5eee517d 5145 cmd[length] = '\0';
e0881a8e
MS
5146
5147 obstack_xml_printf (
5148 &obstack,
5149 "<item>"
5150 "<column name=\"pid\">%s</column>"
5151 "<column name=\"user\">%s</column>"
5152 "<column name=\"command\">%s</column>"
5153 "</item>",
5154 dp->d_name,
5155 entry ? entry->pw_name : "?",
5156 cmd);
5157 }
5158 fclose (f);
5159 }
5160
5161 xfree (pathname);
5162 }
5163 }
5164
5165 closedir (dirp);
5166 }
07e059b5
VP
5167
5168 obstack_grow_str0 (&obstack, "</osdata>\n");
5169 buf = obstack_finish (&obstack);
5170 len_avail = strlen (buf);
5171 }
5172
5173 if (offset >= len_avail)
5174 {
5175 /* Done. Get rid of the obstack. */
5176 obstack_free (&obstack, NULL);
5177 buf = NULL;
5178 len_avail = 0;
5179 return 0;
5180 }
5181
5182 if (len > len_avail - offset)
5183 len = len_avail - offset;
5184 memcpy (readbuf, buf + offset, len);
5185
5186 return len;
5187}
5188
10d6c8cd
DJ
5189static LONGEST
5190linux_xfer_partial (struct target_ops *ops, enum target_object object,
5191 const char *annex, gdb_byte *readbuf,
5192 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5193{
5194 LONGEST xfer;
5195
5196 if (object == TARGET_OBJECT_AUXV)
9f2982ff 5197 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
10d6c8cd
DJ
5198 offset, len);
5199
07e059b5
VP
5200 if (object == TARGET_OBJECT_OSDATA)
5201 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5202 offset, len);
5203
efcbbd14
UW
5204 if (object == TARGET_OBJECT_SPU)
5205 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5206 offset, len);
5207
8f313923
JK
5208 /* GDB calculates all the addresses in possibly larget width of the address.
5209 Address width needs to be masked before its final use - either by
5210 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5211
5212 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
5213
5214 if (object == TARGET_OBJECT_MEMORY)
5215 {
5216 int addr_bit = gdbarch_addr_bit (target_gdbarch);
5217
5218 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5219 offset &= ((ULONGEST) 1 << addr_bit) - 1;
5220 }
5221
10d6c8cd
DJ
5222 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5223 offset, len);
5224 if (xfer != 0)
5225 return xfer;
5226
5227 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5228 offset, len);
5229}
5230
e9efe249 5231/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
5232 it with local methods. */
5233
910122bf
UW
5234static void
5235linux_target_install_ops (struct target_ops *t)
10d6c8cd 5236{
6d8fd2b7 5237 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 5238 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 5239 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 5240 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 5241 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 5242 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 5243 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 5244 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 5245 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
5246 t->to_post_attach = linux_child_post_attach;
5247 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
5248 t->to_find_memory_regions = linux_nat_find_memory_regions;
5249 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5250
5251 super_xfer_partial = t->to_xfer_partial;
5252 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
5253}
5254
5255struct target_ops *
5256linux_target (void)
5257{
5258 struct target_ops *t;
5259
5260 t = inf_ptrace_target ();
5261 linux_target_install_ops (t);
5262
5263 return t;
5264}
5265
5266struct target_ops *
7714d83a 5267linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
5268{
5269 struct target_ops *t;
5270
5271 t = inf_ptrace_trad_target (register_u_offset);
5272 linux_target_install_ops (t);
10d6c8cd 5273
10d6c8cd
DJ
5274 return t;
5275}
5276
b84876c2
PA
5277/* target_is_async_p implementation. */
5278
5279static int
5280linux_nat_is_async_p (void)
5281{
5282 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5283 it explicitly with the "set target-async" command.
b84876c2 5284 Someday, linux will always be async. */
c6ebd6cf 5285 if (!target_async_permitted)
b84876c2
PA
5286 return 0;
5287
d90e17a7
PA
5288 /* See target.h/target_async_mask. */
5289 return linux_nat_async_mask_value;
b84876c2
PA
5290}
5291
5292/* target_can_async_p implementation. */
5293
5294static int
5295linux_nat_can_async_p (void)
5296{
5297 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5298 it explicitly with the "set target-async" command.
b84876c2 5299 Someday, linux will always be async. */
c6ebd6cf 5300 if (!target_async_permitted)
b84876c2
PA
5301 return 0;
5302
5303 /* See target.h/target_async_mask. */
5304 return linux_nat_async_mask_value;
5305}
5306
9908b566
VP
5307static int
5308linux_nat_supports_non_stop (void)
5309{
5310 return 1;
5311}
5312
d90e17a7
PA
5313/* True if we want to support multi-process. To be removed when GDB
5314 supports multi-exec. */
5315
2277426b 5316int linux_multi_process = 1;
d90e17a7
PA
5317
5318static int
5319linux_nat_supports_multi_process (void)
5320{
5321 return linux_multi_process;
5322}
5323
b84876c2
PA
5324/* target_async_mask implementation. */
5325
5326static int
7feb7d06 5327linux_nat_async_mask (int new_mask)
b84876c2 5328{
7feb7d06 5329 int curr_mask = linux_nat_async_mask_value;
b84876c2 5330
7feb7d06 5331 if (curr_mask != new_mask)
b84876c2 5332 {
7feb7d06 5333 if (new_mask == 0)
b84876c2
PA
5334 {
5335 linux_nat_async (NULL, 0);
7feb7d06 5336 linux_nat_async_mask_value = new_mask;
b84876c2
PA
5337 }
5338 else
5339 {
7feb7d06 5340 linux_nat_async_mask_value = new_mask;
84e46146 5341
7feb7d06
PA
5342 /* If we're going out of async-mask in all-stop, then the
5343 inferior is stopped. The next resume will call
5344 target_async. In non-stop, the target event source
5345 should be always registered in the event loop. Do so
5346 now. */
5347 if (non_stop)
5348 linux_nat_async (inferior_event_handler, 0);
b84876c2
PA
5349 }
5350 }
5351
7feb7d06 5352 return curr_mask;
b84876c2
PA
5353}
5354
5355static int async_terminal_is_ours = 1;
5356
5357/* target_terminal_inferior implementation. */
5358
5359static void
5360linux_nat_terminal_inferior (void)
5361{
5362 if (!target_is_async_p ())
5363 {
5364 /* Async mode is disabled. */
5365 terminal_inferior ();
5366 return;
5367 }
5368
b84876c2
PA
5369 terminal_inferior ();
5370
d9d2d8b6 5371 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
5372 if (!async_terminal_is_ours)
5373 return;
5374
5375 delete_file_handler (input_fd);
5376 async_terminal_is_ours = 0;
5377 set_sigint_trap ();
5378}
5379
5380/* target_terminal_ours implementation. */
5381
2c0b251b 5382static void
b84876c2
PA
5383linux_nat_terminal_ours (void)
5384{
5385 if (!target_is_async_p ())
5386 {
5387 /* Async mode is disabled. */
5388 terminal_ours ();
5389 return;
5390 }
5391
5392 /* GDB should never give the terminal to the inferior if the
5393 inferior is running in the background (run&, continue&, etc.),
5394 but claiming it sure should. */
5395 terminal_ours ();
5396
b84876c2
PA
5397 if (async_terminal_is_ours)
5398 return;
5399
5400 clear_sigint_trap ();
5401 add_file_handler (input_fd, stdin_event_handler, 0);
5402 async_terminal_is_ours = 1;
5403}
5404
5405static void (*async_client_callback) (enum inferior_event_type event_type,
5406 void *context);
5407static void *async_client_context;
5408
7feb7d06
PA
5409/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5410 so we notice when any child changes state, and notify the
5411 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5412 above to wait for the arrival of a SIGCHLD. */
5413
b84876c2 5414static void
7feb7d06 5415sigchld_handler (int signo)
b84876c2 5416{
7feb7d06
PA
5417 int old_errno = errno;
5418
5419 if (debug_linux_nat_async)
5420 fprintf_unfiltered (gdb_stdlog, "sigchld\n");
5421
5422 if (signo == SIGCHLD
5423 && linux_nat_event_pipe[0] != -1)
5424 async_file_mark (); /* Let the event loop know that there are
5425 events to handle. */
5426
5427 errno = old_errno;
5428}
5429
5430/* Callback registered with the target events file descriptor. */
5431
5432static void
5433handle_target_event (int error, gdb_client_data client_data)
5434{
5435 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5436}
5437
5438/* Create/destroy the target events pipe. Returns previous state. */
5439
5440static int
5441linux_async_pipe (int enable)
5442{
5443 int previous = (linux_nat_event_pipe[0] != -1);
5444
5445 if (previous != enable)
5446 {
5447 sigset_t prev_mask;
5448
5449 block_child_signals (&prev_mask);
5450
5451 if (enable)
5452 {
5453 if (pipe (linux_nat_event_pipe) == -1)
5454 internal_error (__FILE__, __LINE__,
5455 "creating event pipe failed.");
5456
5457 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5458 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5459 }
5460 else
5461 {
5462 close (linux_nat_event_pipe[0]);
5463 close (linux_nat_event_pipe[1]);
5464 linux_nat_event_pipe[0] = -1;
5465 linux_nat_event_pipe[1] = -1;
5466 }
5467
5468 restore_child_signals_mask (&prev_mask);
5469 }
5470
5471 return previous;
b84876c2
PA
5472}
5473
5474/* target_async implementation. */
5475
5476static void
5477linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5478 void *context), void *context)
5479{
c6ebd6cf 5480 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
b84876c2
PA
5481 internal_error (__FILE__, __LINE__,
5482 "Calling target_async when async is masked");
5483
5484 if (callback != NULL)
5485 {
5486 async_client_callback = callback;
5487 async_client_context = context;
7feb7d06
PA
5488 if (!linux_async_pipe (1))
5489 {
5490 add_file_handler (linux_nat_event_pipe[0],
5491 handle_target_event, NULL);
5492 /* There may be pending events to handle. Tell the event loop
5493 to poll them. */
5494 async_file_mark ();
5495 }
b84876c2
PA
5496 }
5497 else
5498 {
5499 async_client_callback = callback;
5500 async_client_context = context;
b84876c2 5501 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 5502 linux_async_pipe (0);
b84876c2
PA
5503 }
5504 return;
5505}
5506
252fbfc8
PA
5507/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5508 event came out. */
5509
4c28f408 5510static int
252fbfc8 5511linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 5512{
d90e17a7 5513 if (!lwp->stopped)
252fbfc8 5514 {
d90e17a7 5515 ptid_t ptid = lwp->ptid;
252fbfc8 5516
d90e17a7
PA
5517 if (debug_linux_nat)
5518 fprintf_unfiltered (gdb_stdlog,
5519 "LNSL: running -> suspending %s\n",
5520 target_pid_to_str (lwp->ptid));
252fbfc8 5521
252fbfc8 5522
d90e17a7
PA
5523 stop_callback (lwp, NULL);
5524 stop_wait_callback (lwp, NULL);
252fbfc8 5525
d90e17a7
PA
5526 /* If the lwp exits while we try to stop it, there's nothing
5527 else to do. */
5528 lwp = find_lwp_pid (ptid);
5529 if (lwp == NULL)
5530 return 0;
252fbfc8 5531
d90e17a7
PA
5532 /* If we didn't collect any signal other than SIGSTOP while
5533 stopping the LWP, push a SIGNAL_0 event. In either case, the
5534 event-loop will end up calling target_wait which will collect
5535 these. */
5536 if (lwp->status == 0)
5537 lwp->status = W_STOPCODE (0);
5538 async_file_mark ();
5539 }
5540 else
5541 {
5542 /* Already known to be stopped; do nothing. */
252fbfc8 5543
d90e17a7
PA
5544 if (debug_linux_nat)
5545 {
e09875d4 5546 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
5547 fprintf_unfiltered (gdb_stdlog,
5548 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5549 target_pid_to_str (lwp->ptid));
5550 else
3e43a32a
MS
5551 fprintf_unfiltered (gdb_stdlog,
5552 "LNSL: already stopped/no "
5553 "stop_requested yet %s\n",
d90e17a7 5554 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5555 }
5556 }
4c28f408
PA
5557 return 0;
5558}
5559
5560static void
5561linux_nat_stop (ptid_t ptid)
5562{
5563 if (non_stop)
d90e17a7 5564 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5565 else
5566 linux_ops->to_stop (ptid);
5567}
5568
d90e17a7
PA
5569static void
5570linux_nat_close (int quitting)
5571{
5572 /* Unregister from the event loop. */
5573 if (target_is_async_p ())
5574 target_async (NULL, 0);
5575
5576 /* Reset the async_masking. */
5577 linux_nat_async_mask_value = 1;
5578
5579 if (linux_ops->to_close)
5580 linux_ops->to_close (quitting);
5581}
5582
c0694254
PA
5583/* When requests are passed down from the linux-nat layer to the
5584 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5585 used. The address space pointer is stored in the inferior object,
5586 but the common code that is passed such ptid can't tell whether
5587 lwpid is a "main" process id or not (it assumes so). We reverse
5588 look up the "main" process id from the lwp here. */
5589
5590struct address_space *
5591linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5592{
5593 struct lwp_info *lwp;
5594 struct inferior *inf;
5595 int pid;
5596
5597 pid = GET_LWP (ptid);
5598 if (GET_LWP (ptid) == 0)
5599 {
5600 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5601 tgid. */
5602 lwp = find_lwp_pid (ptid);
5603 pid = GET_PID (lwp->ptid);
5604 }
5605 else
5606 {
5607 /* A (pid,lwpid,0) ptid. */
5608 pid = GET_PID (ptid);
5609 }
5610
5611 inf = find_inferior_pid (pid);
5612 gdb_assert (inf != NULL);
5613 return inf->aspace;
5614}
5615
dc146f7c
VP
5616int
5617linux_nat_core_of_thread_1 (ptid_t ptid)
5618{
5619 struct cleanup *back_to;
5620 char *filename;
5621 FILE *f;
5622 char *content = NULL;
5623 char *p;
5624 char *ts = 0;
5625 int content_read = 0;
5626 int i;
5627 int core;
5628
5629 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5630 GET_PID (ptid), GET_LWP (ptid));
5631 back_to = make_cleanup (xfree, filename);
5632
5633 f = fopen (filename, "r");
5634 if (!f)
5635 {
5636 do_cleanups (back_to);
5637 return -1;
5638 }
5639
5640 make_cleanup_fclose (f);
5641
5642 for (;;)
5643 {
5644 int n;
e0881a8e 5645
dc146f7c
VP
5646 content = xrealloc (content, content_read + 1024);
5647 n = fread (content + content_read, 1, 1024, f);
5648 content_read += n;
5649 if (n < 1024)
5650 {
5651 content[content_read] = '\0';
5652 break;
5653 }
5654 }
5655
5656 make_cleanup (xfree, content);
5657
5658 p = strchr (content, '(');
ca2a87a0
JK
5659
5660 /* Skip ")". */
5661 if (p != NULL)
5662 p = strchr (p, ')');
5663 if (p != NULL)
5664 p++;
dc146f7c
VP
5665
5666 /* If the first field after program name has index 0, then core number is
5667 the field with index 36. There's no constant for that anywhere. */
ca2a87a0
JK
5668 if (p != NULL)
5669 p = strtok_r (p, " ", &ts);
5670 for (i = 0; p != NULL && i != 36; ++i)
dc146f7c
VP
5671 p = strtok_r (NULL, " ", &ts);
5672
ca2a87a0 5673 if (p == NULL || sscanf (p, "%d", &core) == 0)
dc146f7c
VP
5674 core = -1;
5675
5676 do_cleanups (back_to);
5677
5678 return core;
5679}
5680
5681/* Return the cached value of the processor core for thread PTID. */
5682
5683int
5684linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5685{
5686 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 5687
dc146f7c
VP
5688 if (info)
5689 return info->core;
5690 return -1;
5691}
5692
f973ed9c
DJ
5693void
5694linux_nat_add_target (struct target_ops *t)
5695{
f973ed9c
DJ
5696 /* Save the provided single-threaded target. We save this in a separate
5697 variable because another target we've inherited from (e.g. inf-ptrace)
5698 may have saved a pointer to T; we want to use it for the final
5699 process stratum target. */
5700 linux_ops_saved = *t;
5701 linux_ops = &linux_ops_saved;
5702
5703 /* Override some methods for multithreading. */
b84876c2 5704 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5705 t->to_attach = linux_nat_attach;
5706 t->to_detach = linux_nat_detach;
5707 t->to_resume = linux_nat_resume;
5708 t->to_wait = linux_nat_wait;
2455069d 5709 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
5710 t->to_xfer_partial = linux_nat_xfer_partial;
5711 t->to_kill = linux_nat_kill;
5712 t->to_mourn_inferior = linux_nat_mourn_inferior;
5713 t->to_thread_alive = linux_nat_thread_alive;
5714 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 5715 t->to_thread_name = linux_nat_thread_name;
f973ed9c 5716 t->to_has_thread_control = tc_schedlock;
c0694254 5717 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
5718 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5719 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 5720
b84876c2
PA
5721 t->to_can_async_p = linux_nat_can_async_p;
5722 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5723 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2
PA
5724 t->to_async = linux_nat_async;
5725 t->to_async_mask = linux_nat_async_mask;
5726 t->to_terminal_inferior = linux_nat_terminal_inferior;
5727 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5728 t->to_close = linux_nat_close;
b84876c2 5729
4c28f408
PA
5730 /* Methods for non-stop support. */
5731 t->to_stop = linux_nat_stop;
5732
d90e17a7
PA
5733 t->to_supports_multi_process = linux_nat_supports_multi_process;
5734
dc146f7c
VP
5735 t->to_core_of_thread = linux_nat_core_of_thread;
5736
f973ed9c
DJ
5737 /* We don't change the stratum; this target will sit at
5738 process_stratum and thread_db will set at thread_stratum. This
5739 is a little strange, since this is a multi-threaded-capable
5740 target, but we want to be on the stack below thread_db, and we
5741 also want to be used for single-threaded processes. */
5742
5743 add_target (t);
f973ed9c
DJ
5744}
5745
9f0bdab8
DJ
5746/* Register a method to call whenever a new thread is attached. */
5747void
5748linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
5749{
5750 /* Save the pointer. We only support a single registered instance
5751 of the GNU/Linux native target, so we do not need to map this to
5752 T. */
5753 linux_nat_new_thread = new_thread;
5754}
5755
5b009018
PA
5756/* Register a method that converts a siginfo object between the layout
5757 that ptrace returns, and the layout in the architecture of the
5758 inferior. */
5759void
5760linux_nat_set_siginfo_fixup (struct target_ops *t,
5761 int (*siginfo_fixup) (struct siginfo *,
5762 gdb_byte *,
5763 int))
5764{
5765 /* Save the pointer. */
5766 linux_nat_siginfo_fixup = siginfo_fixup;
5767}
5768
9f0bdab8
DJ
5769/* Return the saved siginfo associated with PTID. */
5770struct siginfo *
5771linux_nat_get_siginfo (ptid_t ptid)
5772{
5773 struct lwp_info *lp = find_lwp_pid (ptid);
5774
5775 gdb_assert (lp != NULL);
5776
5777 return &lp->siginfo;
5778}
5779
2c0b251b
PA
5780/* Provide a prototype to silence -Wmissing-prototypes. */
5781extern initialize_file_ftype _initialize_linux_nat;
5782
d6b0e80f
AC
5783void
5784_initialize_linux_nat (void)
5785{
1bedd215
AC
5786 add_info ("proc", linux_nat_info_proc_cmd, _("\
5787Show /proc process information about any running process.\n\
dba24537
AC
5788Specify any process id, or use the program being debugged by default.\n\
5789Specify any of the following keywords for detailed info:\n\
5790 mappings -- list of mapped memory regions.\n\
5791 stat -- list a bunch of random process info.\n\
5792 status -- list a different bunch of random process info.\n\
1bedd215 5793 all -- list all available /proc info."));
d6b0e80f 5794
b84876c2
PA
5795 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5796 &debug_linux_nat, _("\
5797Set debugging of GNU/Linux lwp module."), _("\
5798Show debugging of GNU/Linux lwp module."), _("\
5799Enables printf debugging output."),
5800 NULL,
5801 show_debug_linux_nat,
5802 &setdebuglist, &showdebuglist);
5803
5804 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
5805 &debug_linux_nat_async, _("\
5806Set debugging of GNU/Linux async lwp module."), _("\
5807Show debugging of GNU/Linux async lwp module."), _("\
5808Enables printf debugging output."),
5809 NULL,
5810 show_debug_linux_nat_async,
5811 &setdebuglist, &showdebuglist);
5812
b84876c2 5813 /* Save this mask as the default. */
d6b0e80f
AC
5814 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5815
7feb7d06
PA
5816 /* Install a SIGCHLD handler. */
5817 sigchld_action.sa_handler = sigchld_handler;
5818 sigemptyset (&sigchld_action.sa_mask);
5819 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5820
5821 /* Make it the default. */
7feb7d06 5822 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5823
5824 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5825 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5826 sigdelset (&suspend_mask, SIGCHLD);
5827
7feb7d06 5828 sigemptyset (&blocked_mask);
10568435
JK
5829
5830 add_setshow_boolean_cmd ("disable-randomization", class_support,
5831 &disable_randomization, _("\
5832Set disabling of debuggee's virtual address space randomization."), _("\
5833Show disabling of debuggee's virtual address space randomization."), _("\
5834When this mode is on (which is the default), randomization of the virtual\n\
5835address space is disabled. Standalone programs run with the randomization\n\
5836enabled by default on some platforms."),
5837 &set_disable_randomization,
5838 &show_disable_randomization,
5839 &setlist, &showlist);
d6b0e80f
AC
5840}
5841\f
5842
5843/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5844 the GNU/Linux Threads library and therefore doesn't really belong
5845 here. */
5846
5847/* Read variable NAME in the target and return its value if found.
5848 Otherwise return zero. It is assumed that the type of the variable
5849 is `int'. */
5850
5851static int
5852get_signo (const char *name)
5853{
5854 struct minimal_symbol *ms;
5855 int signo;
5856
5857 ms = lookup_minimal_symbol (name, NULL, NULL);
5858 if (ms == NULL)
5859 return 0;
5860
8e70166d 5861 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5862 sizeof (signo)) != 0)
5863 return 0;
5864
5865 return signo;
5866}
5867
5868/* Return the set of signals used by the threads library in *SET. */
5869
5870void
5871lin_thread_get_thread_signals (sigset_t *set)
5872{
5873 struct sigaction action;
5874 int restart, cancel;
5875
b84876c2 5876 sigemptyset (&blocked_mask);
d6b0e80f
AC
5877 sigemptyset (set);
5878
5879 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5880 cancel = get_signo ("__pthread_sig_cancel");
5881
5882 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5883 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5884 not provide any way for the debugger to query the signal numbers -
5885 fortunately they don't change! */
5886
d6b0e80f 5887 if (restart == 0)
17fbb0bd 5888 restart = __SIGRTMIN;
d6b0e80f 5889
d6b0e80f 5890 if (cancel == 0)
17fbb0bd 5891 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5892
5893 sigaddset (set, restart);
5894 sigaddset (set, cancel);
5895
5896 /* The GNU/Linux Threads library makes terminating threads send a
5897 special "cancel" signal instead of SIGCHLD. Make sure we catch
5898 those (to prevent them from terminating GDB itself, which is
5899 likely to be their default action) and treat them the same way as
5900 SIGCHLD. */
5901
5902 action.sa_handler = sigchld_handler;
5903 sigemptyset (&action.sa_mask);
58aecb61 5904 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5905 sigaction (cancel, &action, NULL);
5906
5907 /* We block the "cancel" signal throughout this code ... */
5908 sigaddset (&blocked_mask, cancel);
5909 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5910
5911 /* ... except during a sigsuspend. */
5912 sigdelset (&suspend_mask, cancel);
5913}
This page took 0.9738 seconds and 4 git commands to generate.