sim: bfin: fix UART LSR read-only bit saturation
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
7b6bb8da
JB
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
af96c192 33#include "linux-ptrace.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
10d6c8cd
DJ
39#include "inf-ptrace.h"
40#include "auxv.h"
dba24537 41#include <sys/param.h> /* for MAXPATHLEN */
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
1777feb0 47#include "gdbthread.h" /* for struct thread_info etc. */
dba24537
AC
48#include "gdb_stat.h" /* for struct stat */
49#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
50#include "inf-loop.h"
51#include "event-loop.h"
52#include "event-top.h"
07e059b5
VP
53#include <pwd.h>
54#include <sys/types.h>
55#include "gdb_dirent.h"
56#include "xml-support.h"
191c4426 57#include "terminal.h"
efcbbd14 58#include <sys/vfs.h>
6c95b8df 59#include "solib.h"
efcbbd14
UW
60
61#ifndef SPUFS_MAGIC
62#define SPUFS_MAGIC 0x23c9b64e
63#endif
dba24537 64
10568435
JK
65#ifdef HAVE_PERSONALITY
66# include <sys/personality.h>
67# if !HAVE_DECL_ADDR_NO_RANDOMIZE
68# define ADDR_NO_RANDOMIZE 0x0040000
69# endif
70#endif /* HAVE_PERSONALITY */
71
1777feb0 72/* This comment documents high-level logic of this file.
8a77dff3
VP
73
74Waiting for events in sync mode
75===============================
76
77When waiting for an event in a specific thread, we just use waitpid, passing
78the specific pid, and not passing WNOHANG.
79
1777feb0 80When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 81version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 82threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
83miss an event. The solution is to use non-blocking waitpid, together with
84sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 85process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
86flag to check for events in cloned processes. If nothing is found, we use
87sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
88happened to a child process -- and SIGCHLD will be delivered both for events
89in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
90an event, we get back to calling nonblocking waitpid with and without
91__WCLONED.
8a77dff3
VP
92
93Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 94so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
95blocked, the signal becomes pending and sigsuspend immediately
96notices it and returns.
97
98Waiting for events in async mode
99================================
100
7feb7d06
PA
101In async mode, GDB should always be ready to handle both user input
102and target events, so neither blocking waitpid nor sigsuspend are
103viable options. Instead, we should asynchronously notify the GDB main
104event loop whenever there's an unprocessed event from the target. We
105detect asynchronous target events by handling SIGCHLD signals. To
106notify the event loop about target events, the self-pipe trick is used
107--- a pipe is registered as waitable event source in the event loop,
108the event loop select/poll's on the read end of this pipe (as well on
109other event sources, e.g., stdin), and the SIGCHLD handler writes a
110byte to this pipe. This is more portable than relying on
111pselect/ppoll, since on kernels that lack those syscalls, libc
112emulates them with select/poll+sigprocmask, and that is racy
113(a.k.a. plain broken).
114
115Obviously, if we fail to notify the event loop if there's a target
116event, it's bad. OTOH, if we notify the event loop when there's no
117event from the target, linux_nat_wait will detect that there's no real
118event to report, and return event of type TARGET_WAITKIND_IGNORE.
119This is mostly harmless, but it will waste time and is better avoided.
120
121The main design point is that every time GDB is outside linux-nat.c,
122we have a SIGCHLD handler installed that is called when something
123happens to the target and notifies the GDB event loop. Whenever GDB
124core decides to handle the event, and calls into linux-nat.c, we
125process things as in sync mode, except that the we never block in
126sigsuspend.
127
128While processing an event, we may end up momentarily blocked in
129waitpid calls. Those waitpid calls, while blocking, are guarantied to
130return quickly. E.g., in all-stop mode, before reporting to the core
131that an LWP hit a breakpoint, all LWPs are stopped by sending them
132SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
133Note that this is different from blocking indefinitely waiting for the
134next event --- here, we're already handling an event.
8a77dff3
VP
135
136Use of signals
137==============
138
139We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
140signal is not entirely significant; we just need for a signal to be delivered,
141so that we can intercept it. SIGSTOP's advantage is that it can not be
142blocked. A disadvantage is that it is not a real-time signal, so it can only
143be queued once; we do not keep track of other sources of SIGSTOP.
144
145Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
146use them, because they have special behavior when the signal is generated -
147not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
148kills the entire thread group.
149
150A delivered SIGSTOP would stop the entire thread group, not just the thread we
151tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
152cancel it (by PTRACE_CONT without passing SIGSTOP).
153
154We could use a real-time signal instead. This would solve those problems; we
155could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
156But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
157generates it, and there are races with trying to find a signal that is not
158blocked. */
a0ef4274 159
dba24537
AC
160#ifndef O_LARGEFILE
161#define O_LARGEFILE 0
162#endif
0274a8ce 163
ca2163eb
PA
164/* Unlike other extended result codes, WSTOPSIG (status) on
165 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
166 instead SIGTRAP with bit 7 set. */
167#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
168
10d6c8cd
DJ
169/* The single-threaded native GNU/Linux target_ops. We save a pointer for
170 the use of the multi-threaded target. */
171static struct target_ops *linux_ops;
f973ed9c 172static struct target_ops linux_ops_saved;
10d6c8cd 173
9f0bdab8
DJ
174/* The method to call, if any, when a new thread is attached. */
175static void (*linux_nat_new_thread) (ptid_t);
176
5b009018
PA
177/* The method to call, if any, when the siginfo object needs to be
178 converted between the layout returned by ptrace, and the layout in
179 the architecture of the inferior. */
180static int (*linux_nat_siginfo_fixup) (struct siginfo *,
181 gdb_byte *,
182 int);
183
ac264b3b
MS
184/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
185 Called by our to_xfer_partial. */
186static LONGEST (*super_xfer_partial) (struct target_ops *,
187 enum target_object,
188 const char *, gdb_byte *,
189 const gdb_byte *,
10d6c8cd
DJ
190 ULONGEST, LONGEST);
191
d6b0e80f 192static int debug_linux_nat;
920d2a44
AC
193static void
194show_debug_linux_nat (struct ui_file *file, int from_tty,
195 struct cmd_list_element *c, const char *value)
196{
197 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
198 value);
199}
d6b0e80f 200
b84876c2
PA
201static int debug_linux_nat_async = 0;
202static void
203show_debug_linux_nat_async (struct ui_file *file, int from_tty,
204 struct cmd_list_element *c, const char *value)
205{
3e43a32a
MS
206 fprintf_filtered (file,
207 _("Debugging of GNU/Linux async lwp module is %s.\n"),
b84876c2
PA
208 value);
209}
210
10568435
JK
211static int disable_randomization = 1;
212
213static void
214show_disable_randomization (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216{
217#ifdef HAVE_PERSONALITY
3e43a32a
MS
218 fprintf_filtered (file,
219 _("Disabling randomization of debuggee's "
220 "virtual address space is %s.\n"),
10568435
JK
221 value);
222#else /* !HAVE_PERSONALITY */
3e43a32a
MS
223 fputs_filtered (_("Disabling randomization of debuggee's "
224 "virtual address space is unsupported on\n"
225 "this platform.\n"), file);
10568435
JK
226#endif /* !HAVE_PERSONALITY */
227}
228
229static void
3e43a32a
MS
230set_disable_randomization (char *args, int from_tty,
231 struct cmd_list_element *c)
10568435
JK
232{
233#ifndef HAVE_PERSONALITY
3e43a32a
MS
234 error (_("Disabling randomization of debuggee's "
235 "virtual address space is unsupported on\n"
236 "this platform."));
10568435
JK
237#endif /* !HAVE_PERSONALITY */
238}
239
ae087d01
DJ
240struct simple_pid_list
241{
242 int pid;
3d799a95 243 int status;
ae087d01
DJ
244 struct simple_pid_list *next;
245};
246struct simple_pid_list *stopped_pids;
247
3993f6b1
DJ
248/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
249 can not be used, 1 if it can. */
250
251static int linux_supports_tracefork_flag = -1;
252
3e43a32a
MS
253/* This variable is a tri-state flag: -1 for unknown, 0 if
254 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
a96d9b2e
SDJ
255
256static int linux_supports_tracesysgood_flag = -1;
257
9016a515
DJ
258/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
259 PTRACE_O_TRACEVFORKDONE. */
260
261static int linux_supports_tracevforkdone_flag = -1;
262
1777feb0 263/* Async mode support. */
b84876c2 264
b84876c2
PA
265/* Zero if the async mode, although enabled, is masked, which means
266 linux_nat_wait should behave as if async mode was off. */
267static int linux_nat_async_mask_value = 1;
268
a96d9b2e
SDJ
269/* Stores the current used ptrace() options. */
270static int current_ptrace_options = 0;
271
b84876c2
PA
272/* The read/write ends of the pipe registered as waitable file in the
273 event loop. */
274static int linux_nat_event_pipe[2] = { -1, -1 };
275
7feb7d06 276/* Flush the event pipe. */
b84876c2 277
7feb7d06
PA
278static void
279async_file_flush (void)
b84876c2 280{
7feb7d06
PA
281 int ret;
282 char buf;
b84876c2 283
7feb7d06 284 do
b84876c2 285 {
7feb7d06 286 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 287 }
7feb7d06 288 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
289}
290
7feb7d06
PA
291/* Put something (anything, doesn't matter what, or how much) in event
292 pipe, so that the select/poll in the event-loop realizes we have
293 something to process. */
252fbfc8 294
b84876c2 295static void
7feb7d06 296async_file_mark (void)
b84876c2 297{
7feb7d06 298 int ret;
b84876c2 299
7feb7d06
PA
300 /* It doesn't really matter what the pipe contains, as long we end
301 up with something in it. Might as well flush the previous
302 left-overs. */
303 async_file_flush ();
b84876c2 304
7feb7d06 305 do
b84876c2 306 {
7feb7d06 307 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 308 }
7feb7d06 309 while (ret == -1 && errno == EINTR);
b84876c2 310
7feb7d06
PA
311 /* Ignore EAGAIN. If the pipe is full, the event loop will already
312 be awakened anyway. */
b84876c2
PA
313}
314
7feb7d06 315static void linux_nat_async (void (*callback)
3e43a32a
MS
316 (enum inferior_event_type event_type,
317 void *context),
7feb7d06
PA
318 void *context);
319static int linux_nat_async_mask (int mask);
320static int kill_lwp (int lwpid, int signo);
321
322static int stop_callback (struct lwp_info *lp, void *data);
323
324static void block_child_signals (sigset_t *prev_mask);
325static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
326
327struct lwp_info;
328static struct lwp_info *add_lwp (ptid_t ptid);
329static void purge_lwp_list (int pid);
330static struct lwp_info *find_lwp_pid (ptid_t ptid);
331
ae087d01
DJ
332\f
333/* Trivial list manipulation functions to keep track of a list of
334 new stopped processes. */
335static void
3d799a95 336add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
337{
338 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 339
ae087d01 340 new_pid->pid = pid;
3d799a95 341 new_pid->status = status;
ae087d01
DJ
342 new_pid->next = *listp;
343 *listp = new_pid;
344}
345
346static int
46a96992 347pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
348{
349 struct simple_pid_list **p;
350
351 for (p = listp; *p != NULL; p = &(*p)->next)
352 if ((*p)->pid == pid)
353 {
354 struct simple_pid_list *next = (*p)->next;
e0881a8e 355
46a96992 356 *statusp = (*p)->status;
ae087d01
DJ
357 xfree (*p);
358 *p = next;
359 return 1;
360 }
361 return 0;
362}
363
3d799a95
DJ
364static void
365linux_record_stopped_pid (int pid, int status)
ae087d01 366{
3d799a95 367 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
368}
369
3993f6b1
DJ
370\f
371/* A helper function for linux_test_for_tracefork, called after fork (). */
372
373static void
374linux_tracefork_child (void)
375{
3993f6b1
DJ
376 ptrace (PTRACE_TRACEME, 0, 0, 0);
377 kill (getpid (), SIGSTOP);
378 fork ();
48bb3cce 379 _exit (0);
3993f6b1
DJ
380}
381
7feb7d06 382/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
383
384static int
46a96992 385my_waitpid (int pid, int *statusp, int flags)
b957e937
DJ
386{
387 int ret;
b84876c2 388
b957e937
DJ
389 do
390 {
46a96992 391 ret = waitpid (pid, statusp, flags);
b957e937
DJ
392 }
393 while (ret == -1 && errno == EINTR);
394
395 return ret;
396}
397
398/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
399
400 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
401 we know that the feature is not available. This may change the tracing
402 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
403
404 However, if it succeeds, we don't know for sure that the feature is
405 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 406 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
407 fork tracing, and let it fork. If the process exits, we assume that we
408 can't use TRACEFORK; if we get the fork notification, and we can extract
409 the new child's PID, then we assume that we can. */
3993f6b1
DJ
410
411static void
b957e937 412linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
413{
414 int child_pid, ret, status;
415 long second_pid;
7feb7d06 416 sigset_t prev_mask;
4c28f408 417
7feb7d06
PA
418 /* We don't want those ptrace calls to be interrupted. */
419 block_child_signals (&prev_mask);
3993f6b1 420
b957e937
DJ
421 linux_supports_tracefork_flag = 0;
422 linux_supports_tracevforkdone_flag = 0;
423
424 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
425 if (ret != 0)
7feb7d06
PA
426 {
427 restore_child_signals_mask (&prev_mask);
428 return;
429 }
b957e937 430
3993f6b1
DJ
431 child_pid = fork ();
432 if (child_pid == -1)
e2e0b3e5 433 perror_with_name (("fork"));
3993f6b1
DJ
434
435 if (child_pid == 0)
436 linux_tracefork_child ();
437
b957e937 438 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 439 if (ret == -1)
e2e0b3e5 440 perror_with_name (("waitpid"));
3993f6b1 441 else if (ret != child_pid)
8a3fe4f8 442 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 443 if (! WIFSTOPPED (status))
3e43a32a
MS
444 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
445 status);
3993f6b1 446
3993f6b1
DJ
447 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
448 if (ret != 0)
449 {
b957e937
DJ
450 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
451 if (ret != 0)
452 {
8a3fe4f8 453 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 454 restore_child_signals_mask (&prev_mask);
b957e937
DJ
455 return;
456 }
457
458 ret = my_waitpid (child_pid, &status, 0);
459 if (ret != child_pid)
3e43a32a
MS
460 warning (_("linux_test_for_tracefork: failed "
461 "to wait for killed child"));
b957e937 462 else if (!WIFSIGNALED (status))
3e43a32a
MS
463 warning (_("linux_test_for_tracefork: unexpected "
464 "wait status 0x%x from killed child"), status);
b957e937 465
7feb7d06 466 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
467 return;
468 }
469
9016a515
DJ
470 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
471 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
472 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
473 linux_supports_tracevforkdone_flag = (ret == 0);
474
b957e937
DJ
475 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
476 if (ret != 0)
8a3fe4f8 477 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
478
479 ret = my_waitpid (child_pid, &status, 0);
480
3993f6b1
DJ
481 if (ret == child_pid && WIFSTOPPED (status)
482 && status >> 16 == PTRACE_EVENT_FORK)
483 {
484 second_pid = 0;
485 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
486 if (ret == 0 && second_pid != 0)
487 {
488 int second_status;
489
490 linux_supports_tracefork_flag = 1;
b957e937
DJ
491 my_waitpid (second_pid, &second_status, 0);
492 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
493 if (ret != 0)
3e43a32a
MS
494 warning (_("linux_test_for_tracefork: "
495 "failed to kill second child"));
97725dc4 496 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
497 }
498 }
b957e937 499 else
8a3fe4f8
AC
500 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
501 "(%d, status 0x%x)"), ret, status);
3993f6b1 502
b957e937
DJ
503 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
504 if (ret != 0)
8a3fe4f8 505 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 506 my_waitpid (child_pid, &status, 0);
4c28f408 507
7feb7d06 508 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
509}
510
a96d9b2e
SDJ
511/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
512
513 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
514 we know that the feature is not available. This may change the tracing
515 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
516
517static void
518linux_test_for_tracesysgood (int original_pid)
519{
520 int ret;
521 sigset_t prev_mask;
522
523 /* We don't want those ptrace calls to be interrupted. */
524 block_child_signals (&prev_mask);
525
526 linux_supports_tracesysgood_flag = 0;
527
528 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
529 if (ret != 0)
530 goto out;
531
532 linux_supports_tracesysgood_flag = 1;
533out:
534 restore_child_signals_mask (&prev_mask);
535}
536
537/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
538 This function also sets linux_supports_tracesysgood_flag. */
539
540static int
541linux_supports_tracesysgood (int pid)
542{
543 if (linux_supports_tracesysgood_flag == -1)
544 linux_test_for_tracesysgood (pid);
545 return linux_supports_tracesysgood_flag;
546}
547
3993f6b1
DJ
548/* Return non-zero iff we have tracefork functionality available.
549 This function also sets linux_supports_tracefork_flag. */
550
551static int
b957e937 552linux_supports_tracefork (int pid)
3993f6b1
DJ
553{
554 if (linux_supports_tracefork_flag == -1)
b957e937 555 linux_test_for_tracefork (pid);
3993f6b1
DJ
556 return linux_supports_tracefork_flag;
557}
558
9016a515 559static int
b957e937 560linux_supports_tracevforkdone (int pid)
9016a515
DJ
561{
562 if (linux_supports_tracefork_flag == -1)
b957e937 563 linux_test_for_tracefork (pid);
9016a515
DJ
564 return linux_supports_tracevforkdone_flag;
565}
566
a96d9b2e
SDJ
567static void
568linux_enable_tracesysgood (ptid_t ptid)
569{
570 int pid = ptid_get_lwp (ptid);
571
572 if (pid == 0)
573 pid = ptid_get_pid (ptid);
574
575 if (linux_supports_tracesysgood (pid) == 0)
576 return;
577
578 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
579
580 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
581}
582
3993f6b1 583\f
4de4c07c
DJ
584void
585linux_enable_event_reporting (ptid_t ptid)
586{
d3587048 587 int pid = ptid_get_lwp (ptid);
4de4c07c 588
d3587048
DJ
589 if (pid == 0)
590 pid = ptid_get_pid (ptid);
591
b957e937 592 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
593 return;
594
a96d9b2e
SDJ
595 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
596 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
597
b957e937 598 if (linux_supports_tracevforkdone (pid))
a96d9b2e 599 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
600
601 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
602 read-only process state. */
4de4c07c 603
a96d9b2e 604 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
605}
606
6d8fd2b7
UW
607static void
608linux_child_post_attach (int pid)
4de4c07c
DJ
609{
610 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 611 check_for_thread_db ();
a96d9b2e 612 linux_enable_tracesysgood (pid_to_ptid (pid));
4de4c07c
DJ
613}
614
10d6c8cd 615static void
4de4c07c
DJ
616linux_child_post_startup_inferior (ptid_t ptid)
617{
618 linux_enable_event_reporting (ptid);
0ec9a092 619 check_for_thread_db ();
a96d9b2e 620 linux_enable_tracesysgood (ptid);
4de4c07c
DJ
621}
622
6d8fd2b7
UW
623static int
624linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 625{
7feb7d06 626 sigset_t prev_mask;
9016a515 627 int has_vforked;
4de4c07c
DJ
628 int parent_pid, child_pid;
629
7feb7d06 630 block_child_signals (&prev_mask);
b84876c2 631
e58b0e63
PA
632 has_vforked = (inferior_thread ()->pending_follow.kind
633 == TARGET_WAITKIND_VFORKED);
634 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 635 if (parent_pid == 0)
e58b0e63
PA
636 parent_pid = ptid_get_pid (inferior_ptid);
637 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 638
2277426b
PA
639 if (!detach_fork)
640 linux_enable_event_reporting (pid_to_ptid (child_pid));
641
6c95b8df
PA
642 if (has_vforked
643 && !non_stop /* Non-stop always resumes both branches. */
644 && (!target_is_async_p () || sync_execution)
645 && !(follow_child || detach_fork || sched_multi))
646 {
647 /* The parent stays blocked inside the vfork syscall until the
648 child execs or exits. If we don't let the child run, then
649 the parent stays blocked. If we're telling the parent to run
650 in the foreground, the user will not be able to ctrl-c to get
651 back the terminal, effectively hanging the debug session. */
ac74f770
MS
652 fprintf_filtered (gdb_stderr, _("\
653Can not resume the parent process over vfork in the foreground while\n\
654holding the child stopped. Try \"set detach-on-fork\" or \
655\"set schedule-multiple\".\n"));
656 /* FIXME output string > 80 columns. */
6c95b8df
PA
657 return 1;
658 }
659
4de4c07c
DJ
660 if (! follow_child)
661 {
6c95b8df 662 struct lwp_info *child_lp = NULL;
4de4c07c 663
1777feb0 664 /* We're already attached to the parent, by default. */
4de4c07c 665
ac264b3b
MS
666 /* Detach new forked process? */
667 if (detach_fork)
f75c00e4 668 {
6c95b8df
PA
669 /* Before detaching from the child, remove all breakpoints
670 from it. If we forked, then this has already been taken
671 care of by infrun.c. If we vforked however, any
672 breakpoint inserted in the parent is visible in the
673 child, even those added while stopped in a vfork
674 catchpoint. This will remove the breakpoints from the
675 parent also, but they'll be reinserted below. */
676 if (has_vforked)
677 {
678 /* keep breakpoints list in sync. */
679 remove_breakpoints_pid (GET_PID (inferior_ptid));
680 }
681
e85a822c 682 if (info_verbose || debug_linux_nat)
ac264b3b
MS
683 {
684 target_terminal_ours ();
685 fprintf_filtered (gdb_stdlog,
3e43a32a
MS
686 "Detaching after fork from "
687 "child process %d.\n",
ac264b3b
MS
688 child_pid);
689 }
4de4c07c 690
ac264b3b
MS
691 ptrace (PTRACE_DETACH, child_pid, 0, 0);
692 }
693 else
694 {
77435e4c 695 struct inferior *parent_inf, *child_inf;
2277426b 696 struct cleanup *old_chain;
7f9f62ba
PA
697
698 /* Add process to GDB's tables. */
77435e4c
PA
699 child_inf = add_inferior (child_pid);
700
e58b0e63 701 parent_inf = current_inferior ();
77435e4c 702 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 703 copy_terminal_info (child_inf, parent_inf);
7f9f62ba 704
2277426b 705 old_chain = save_inferior_ptid ();
6c95b8df 706 save_current_program_space ();
2277426b
PA
707
708 inferior_ptid = ptid_build (child_pid, child_pid, 0);
709 add_thread (inferior_ptid);
6c95b8df
PA
710 child_lp = add_lwp (inferior_ptid);
711 child_lp->stopped = 1;
712 child_lp->resumed = 1;
2277426b 713
6c95b8df
PA
714 /* If this is a vfork child, then the address-space is
715 shared with the parent. */
716 if (has_vforked)
717 {
718 child_inf->pspace = parent_inf->pspace;
719 child_inf->aspace = parent_inf->aspace;
720
721 /* The parent will be frozen until the child is done
722 with the shared region. Keep track of the
723 parent. */
724 child_inf->vfork_parent = parent_inf;
725 child_inf->pending_detach = 0;
726 parent_inf->vfork_child = child_inf;
727 parent_inf->pending_detach = 0;
728 }
729 else
730 {
731 child_inf->aspace = new_address_space ();
732 child_inf->pspace = add_program_space (child_inf->aspace);
733 child_inf->removable = 1;
734 set_current_program_space (child_inf->pspace);
735 clone_program_space (child_inf->pspace, parent_inf->pspace);
736
737 /* Let the shared library layer (solib-svr4) learn about
738 this new process, relocate the cloned exec, pull in
739 shared libraries, and install the solib event
740 breakpoint. If a "cloned-VM" event was propagated
741 better throughout the core, this wouldn't be
742 required. */
268a4a75 743 solib_create_inferior_hook (0);
6c95b8df
PA
744 }
745
746 /* Let the thread_db layer learn about this new process. */
2277426b
PA
747 check_for_thread_db ();
748
749 do_cleanups (old_chain);
ac264b3b 750 }
9016a515
DJ
751
752 if (has_vforked)
753 {
6c95b8df
PA
754 struct lwp_info *lp;
755 struct inferior *parent_inf;
756
757 parent_inf = current_inferior ();
758
759 /* If we detached from the child, then we have to be careful
760 to not insert breakpoints in the parent until the child
761 is done with the shared memory region. However, if we're
762 staying attached to the child, then we can and should
763 insert breakpoints, so that we can debug it. A
764 subsequent child exec or exit is enough to know when does
765 the child stops using the parent's address space. */
766 parent_inf->waiting_for_vfork_done = detach_fork;
56710373 767 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
6c95b8df
PA
768
769 lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937
DJ
770 gdb_assert (linux_supports_tracefork_flag >= 0);
771 if (linux_supports_tracevforkdone (0))
9016a515 772 {
6c95b8df
PA
773 if (debug_linux_nat)
774 fprintf_unfiltered (gdb_stdlog,
775 "LCFF: waiting for VFORK_DONE on %d\n",
776 parent_pid);
777
778 lp->stopped = 1;
779 lp->resumed = 1;
9016a515 780
6c95b8df
PA
781 /* We'll handle the VFORK_DONE event like any other
782 event, in target_wait. */
9016a515
DJ
783 }
784 else
785 {
786 /* We can't insert breakpoints until the child has
787 finished with the shared memory region. We need to
788 wait until that happens. Ideal would be to just
789 call:
790 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
791 - waitpid (parent_pid, &status, __WALL);
792 However, most architectures can't handle a syscall
793 being traced on the way out if it wasn't traced on
794 the way in.
795
796 We might also think to loop, continuing the child
797 until it exits or gets a SIGTRAP. One problem is
798 that the child might call ptrace with PTRACE_TRACEME.
799
800 There's no simple and reliable way to figure out when
801 the vforked child will be done with its copy of the
802 shared memory. We could step it out of the syscall,
803 two instructions, let it go, and then single-step the
804 parent once. When we have hardware single-step, this
805 would work; with software single-step it could still
806 be made to work but we'd have to be able to insert
807 single-step breakpoints in the child, and we'd have
808 to insert -just- the single-step breakpoint in the
809 parent. Very awkward.
810
811 In the end, the best we can do is to make sure it
812 runs for a little while. Hopefully it will be out of
813 range of any breakpoints we reinsert. Usually this
814 is only the single-step breakpoint at vfork's return
815 point. */
816
6c95b8df
PA
817 if (debug_linux_nat)
818 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
819 "LCFF: no VFORK_DONE "
820 "support, sleeping a bit\n");
6c95b8df 821
9016a515 822 usleep (10000);
9016a515 823
6c95b8df
PA
824 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
825 and leave it pending. The next linux_nat_resume call
826 will notice a pending event, and bypasses actually
827 resuming the inferior. */
828 lp->status = 0;
829 lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
830 lp->stopped = 0;
831 lp->resumed = 1;
832
833 /* If we're in async mode, need to tell the event loop
834 there's something here to process. */
835 if (target_can_async_p ())
836 async_file_mark ();
837 }
9016a515 838 }
4de4c07c 839 }
3993f6b1 840 else
4de4c07c 841 {
77435e4c 842 struct inferior *parent_inf, *child_inf;
2277426b 843 struct lwp_info *lp;
6c95b8df 844 struct program_space *parent_pspace;
4de4c07c 845
e85a822c 846 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
847 {
848 target_terminal_ours ();
6c95b8df 849 if (has_vforked)
3e43a32a
MS
850 fprintf_filtered (gdb_stdlog,
851 _("Attaching after process %d "
852 "vfork to child process %d.\n"),
6c95b8df
PA
853 parent_pid, child_pid);
854 else
3e43a32a
MS
855 fprintf_filtered (gdb_stdlog,
856 _("Attaching after process %d "
857 "fork to child process %d.\n"),
6c95b8df 858 parent_pid, child_pid);
f75c00e4 859 }
4de4c07c 860
7a7d3353
PA
861 /* Add the new inferior first, so that the target_detach below
862 doesn't unpush the target. */
863
77435e4c
PA
864 child_inf = add_inferior (child_pid);
865
e58b0e63 866 parent_inf = current_inferior ();
77435e4c 867 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 868 copy_terminal_info (child_inf, parent_inf);
7a7d3353 869
6c95b8df 870 parent_pspace = parent_inf->pspace;
9016a515 871
6c95b8df
PA
872 /* If we're vforking, we want to hold on to the parent until the
873 child exits or execs. At child exec or exit time we can
874 remove the old breakpoints from the parent and detach or
875 resume debugging it. Otherwise, detach the parent now; we'll
876 want to reuse it's program/address spaces, but we can't set
877 them to the child before removing breakpoints from the
878 parent, otherwise, the breakpoints module could decide to
879 remove breakpoints from the wrong process (since they'd be
880 assigned to the same address space). */
9016a515
DJ
881
882 if (has_vforked)
7f9f62ba 883 {
6c95b8df
PA
884 gdb_assert (child_inf->vfork_parent == NULL);
885 gdb_assert (parent_inf->vfork_child == NULL);
886 child_inf->vfork_parent = parent_inf;
887 child_inf->pending_detach = 0;
888 parent_inf->vfork_child = child_inf;
889 parent_inf->pending_detach = detach_fork;
890 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 891 }
2277426b 892 else if (detach_fork)
b84876c2 893 target_detach (NULL, 0);
4de4c07c 894
6c95b8df
PA
895 /* Note that the detach above makes PARENT_INF dangling. */
896
897 /* Add the child thread to the appropriate lists, and switch to
898 this new thread, before cloning the program space, and
899 informing the solib layer about this new process. */
900
9f0bdab8 901 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b
PA
902 add_thread (inferior_ptid);
903 lp = add_lwp (inferior_ptid);
904 lp->stopped = 1;
6c95b8df
PA
905 lp->resumed = 1;
906
907 /* If this is a vfork child, then the address-space is shared
908 with the parent. If we detached from the parent, then we can
909 reuse the parent's program/address spaces. */
910 if (has_vforked || detach_fork)
911 {
912 child_inf->pspace = parent_pspace;
913 child_inf->aspace = child_inf->pspace->aspace;
914 }
915 else
916 {
917 child_inf->aspace = new_address_space ();
918 child_inf->pspace = add_program_space (child_inf->aspace);
919 child_inf->removable = 1;
920 set_current_program_space (child_inf->pspace);
921 clone_program_space (child_inf->pspace, parent_pspace);
922
923 /* Let the shared library layer (solib-svr4) learn about
924 this new process, relocate the cloned exec, pull in
925 shared libraries, and install the solib event breakpoint.
926 If a "cloned-VM" event was propagated better throughout
927 the core, this wouldn't be required. */
268a4a75 928 solib_create_inferior_hook (0);
6c95b8df 929 }
ac264b3b 930
6c95b8df 931 /* Let the thread_db layer learn about this new process. */
ef29ce1a 932 check_for_thread_db ();
4de4c07c
DJ
933 }
934
7feb7d06 935 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
936 return 0;
937}
938
4de4c07c 939\f
77b06cd7 940static int
6d8fd2b7 941linux_child_insert_fork_catchpoint (int pid)
4de4c07c 942{
77b06cd7 943 return !linux_supports_tracefork (pid);
3993f6b1
DJ
944}
945
eb73ad13
PA
946static int
947linux_child_remove_fork_catchpoint (int pid)
948{
949 return 0;
950}
951
77b06cd7 952static int
6d8fd2b7 953linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 954{
77b06cd7 955 return !linux_supports_tracefork (pid);
3993f6b1
DJ
956}
957
eb73ad13
PA
958static int
959linux_child_remove_vfork_catchpoint (int pid)
960{
961 return 0;
962}
963
77b06cd7 964static int
6d8fd2b7 965linux_child_insert_exec_catchpoint (int pid)
3993f6b1 966{
77b06cd7 967 return !linux_supports_tracefork (pid);
3993f6b1
DJ
968}
969
eb73ad13
PA
970static int
971linux_child_remove_exec_catchpoint (int pid)
972{
973 return 0;
974}
975
a96d9b2e
SDJ
976static int
977linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
978 int table_size, int *table)
979{
77b06cd7
TJB
980 if (!linux_supports_tracesysgood (pid))
981 return 1;
982
a96d9b2e
SDJ
983 /* On GNU/Linux, we ignore the arguments. It means that we only
984 enable the syscall catchpoints, but do not disable them.
77b06cd7 985
a96d9b2e
SDJ
986 Also, we do not use the `table' information because we do not
987 filter system calls here. We let GDB do the logic for us. */
988 return 0;
989}
990
d6b0e80f
AC
991/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
992 are processes sharing the same VM space. A multi-threaded process
993 is basically a group of such processes. However, such a grouping
994 is almost entirely a user-space issue; the kernel doesn't enforce
995 such a grouping at all (this might change in the future). In
996 general, we'll rely on the threads library (i.e. the GNU/Linux
997 Threads library) to provide such a grouping.
998
999 It is perfectly well possible to write a multi-threaded application
1000 without the assistance of a threads library, by using the clone
1001 system call directly. This module should be able to give some
1002 rudimentary support for debugging such applications if developers
1003 specify the CLONE_PTRACE flag in the clone system call, and are
1004 using the Linux kernel 2.4 or above.
1005
1006 Note that there are some peculiarities in GNU/Linux that affect
1007 this code:
1008
1009 - In general one should specify the __WCLONE flag to waitpid in
1010 order to make it report events for any of the cloned processes
1011 (and leave it out for the initial process). However, if a cloned
1012 process has exited the exit status is only reported if the
1013 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1014 we cannot use it since GDB must work on older systems too.
1015
1016 - When a traced, cloned process exits and is waited for by the
1017 debugger, the kernel reassigns it to the original parent and
1018 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1019 library doesn't notice this, which leads to the "zombie problem":
1020 When debugged a multi-threaded process that spawns a lot of
1021 threads will run out of processes, even if the threads exit,
1022 because the "zombies" stay around. */
1023
1024/* List of known LWPs. */
9f0bdab8 1025struct lwp_info *lwp_list;
d6b0e80f
AC
1026\f
1027
d6b0e80f
AC
1028/* Original signal mask. */
1029static sigset_t normal_mask;
1030
1031/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1032 _initialize_linux_nat. */
1033static sigset_t suspend_mask;
1034
7feb7d06
PA
1035/* Signals to block to make that sigsuspend work. */
1036static sigset_t blocked_mask;
1037
1038/* SIGCHLD action. */
1039struct sigaction sigchld_action;
b84876c2 1040
7feb7d06
PA
1041/* Block child signals (SIGCHLD and linux threads signals), and store
1042 the previous mask in PREV_MASK. */
84e46146 1043
7feb7d06
PA
1044static void
1045block_child_signals (sigset_t *prev_mask)
1046{
1047 /* Make sure SIGCHLD is blocked. */
1048 if (!sigismember (&blocked_mask, SIGCHLD))
1049 sigaddset (&blocked_mask, SIGCHLD);
1050
1051 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1052}
1053
1054/* Restore child signals mask, previously returned by
1055 block_child_signals. */
1056
1057static void
1058restore_child_signals_mask (sigset_t *prev_mask)
1059{
1060 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1061}
2455069d
UW
1062
1063/* Mask of signals to pass directly to the inferior. */
1064static sigset_t pass_mask;
1065
1066/* Update signals to pass to the inferior. */
1067static void
1068linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1069{
1070 int signo;
1071
1072 sigemptyset (&pass_mask);
1073
1074 for (signo = 1; signo < NSIG; signo++)
1075 {
1076 int target_signo = target_signal_from_host (signo);
1077 if (target_signo < numsigs && pass_signals[target_signo])
1078 sigaddset (&pass_mask, signo);
1079 }
1080}
1081
d6b0e80f
AC
1082\f
1083
1084/* Prototypes for local functions. */
1085static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1086static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1087static char *linux_child_pid_to_exec_file (int pid);
710151dd 1088
d6b0e80f
AC
1089\f
1090/* Convert wait status STATUS to a string. Used for printing debug
1091 messages only. */
1092
1093static char *
1094status_to_str (int status)
1095{
1096 static char buf[64];
1097
1098 if (WIFSTOPPED (status))
206aa767 1099 {
ca2163eb 1100 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1101 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1102 strsignal (SIGTRAP));
1103 else
1104 snprintf (buf, sizeof (buf), "%s (stopped)",
1105 strsignal (WSTOPSIG (status)));
1106 }
d6b0e80f
AC
1107 else if (WIFSIGNALED (status))
1108 snprintf (buf, sizeof (buf), "%s (terminated)",
ba9b2ec3 1109 strsignal (WTERMSIG (status)));
d6b0e80f
AC
1110 else
1111 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1112
1113 return buf;
1114}
1115
d90e17a7
PA
1116/* Remove all LWPs belong to PID from the lwp list. */
1117
1118static void
1119purge_lwp_list (int pid)
1120{
1121 struct lwp_info *lp, *lpprev, *lpnext;
1122
1123 lpprev = NULL;
1124
1125 for (lp = lwp_list; lp; lp = lpnext)
1126 {
1127 lpnext = lp->next;
1128
1129 if (ptid_get_pid (lp->ptid) == pid)
1130 {
1131 if (lp == lwp_list)
1132 lwp_list = lp->next;
1133 else
1134 lpprev->next = lp->next;
1135
1136 xfree (lp);
1137 }
1138 else
1139 lpprev = lp;
1140 }
1141}
1142
1143/* Return the number of known LWPs in the tgid given by PID. */
1144
1145static int
1146num_lwps (int pid)
1147{
1148 int count = 0;
1149 struct lwp_info *lp;
1150
1151 for (lp = lwp_list; lp; lp = lp->next)
1152 if (ptid_get_pid (lp->ptid) == pid)
1153 count++;
1154
1155 return count;
d6b0e80f
AC
1156}
1157
f973ed9c 1158/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1159 structure describing the new LWP. The LWP should already be stopped
1160 (with an exception for the very first LWP). */
d6b0e80f
AC
1161
1162static struct lwp_info *
1163add_lwp (ptid_t ptid)
1164{
1165 struct lwp_info *lp;
1166
1167 gdb_assert (is_lwp (ptid));
1168
1169 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1170
1171 memset (lp, 0, sizeof (struct lwp_info));
1172
1173 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1174
1175 lp->ptid = ptid;
dc146f7c 1176 lp->core = -1;
d6b0e80f
AC
1177
1178 lp->next = lwp_list;
1179 lwp_list = lp;
d6b0e80f 1180
d90e17a7 1181 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
9f0bdab8
DJ
1182 linux_nat_new_thread (ptid);
1183
d6b0e80f
AC
1184 return lp;
1185}
1186
1187/* Remove the LWP specified by PID from the list. */
1188
1189static void
1190delete_lwp (ptid_t ptid)
1191{
1192 struct lwp_info *lp, *lpprev;
1193
1194 lpprev = NULL;
1195
1196 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1197 if (ptid_equal (lp->ptid, ptid))
1198 break;
1199
1200 if (!lp)
1201 return;
1202
d6b0e80f
AC
1203 if (lpprev)
1204 lpprev->next = lp->next;
1205 else
1206 lwp_list = lp->next;
1207
1208 xfree (lp);
1209}
1210
1211/* Return a pointer to the structure describing the LWP corresponding
1212 to PID. If no corresponding LWP could be found, return NULL. */
1213
1214static struct lwp_info *
1215find_lwp_pid (ptid_t ptid)
1216{
1217 struct lwp_info *lp;
1218 int lwp;
1219
1220 if (is_lwp (ptid))
1221 lwp = GET_LWP (ptid);
1222 else
1223 lwp = GET_PID (ptid);
1224
1225 for (lp = lwp_list; lp; lp = lp->next)
1226 if (lwp == GET_LWP (lp->ptid))
1227 return lp;
1228
1229 return NULL;
1230}
1231
1232/* Call CALLBACK with its second argument set to DATA for every LWP in
1233 the list. If CALLBACK returns 1 for a particular LWP, return a
1234 pointer to the structure describing that LWP immediately.
1235 Otherwise return NULL. */
1236
1237struct lwp_info *
d90e17a7
PA
1238iterate_over_lwps (ptid_t filter,
1239 int (*callback) (struct lwp_info *, void *),
1240 void *data)
d6b0e80f
AC
1241{
1242 struct lwp_info *lp, *lpnext;
1243
1244 for (lp = lwp_list; lp; lp = lpnext)
1245 {
1246 lpnext = lp->next;
d90e17a7
PA
1247
1248 if (ptid_match (lp->ptid, filter))
1249 {
1250 if ((*callback) (lp, data))
1251 return lp;
1252 }
d6b0e80f
AC
1253 }
1254
1255 return NULL;
1256}
1257
2277426b
PA
1258/* Update our internal state when changing from one checkpoint to
1259 another indicated by NEW_PTID. We can only switch single-threaded
1260 applications, so we only create one new LWP, and the previous list
1261 is discarded. */
f973ed9c
DJ
1262
1263void
1264linux_nat_switch_fork (ptid_t new_ptid)
1265{
1266 struct lwp_info *lp;
1267
2277426b
PA
1268 purge_lwp_list (GET_PID (inferior_ptid));
1269
f973ed9c
DJ
1270 lp = add_lwp (new_ptid);
1271 lp->stopped = 1;
e26af52f 1272
2277426b
PA
1273 /* This changes the thread's ptid while preserving the gdb thread
1274 num. Also changes the inferior pid, while preserving the
1275 inferior num. */
1276 thread_change_ptid (inferior_ptid, new_ptid);
1277
1278 /* We've just told GDB core that the thread changed target id, but,
1279 in fact, it really is a different thread, with different register
1280 contents. */
1281 registers_changed ();
e26af52f
DJ
1282}
1283
e26af52f
DJ
1284/* Handle the exit of a single thread LP. */
1285
1286static void
1287exit_lwp (struct lwp_info *lp)
1288{
e09875d4 1289 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1290
1291 if (th)
e26af52f 1292 {
17faa917
DJ
1293 if (print_thread_events)
1294 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1295
4f8d22e3 1296 delete_thread (lp->ptid);
e26af52f
DJ
1297 }
1298
1299 delete_lwp (lp->ptid);
1300}
1301
4d062f1a
PA
1302/* Return an lwp's tgid, found in `/proc/PID/status'. */
1303
1304int
1305linux_proc_get_tgid (int lwpid)
1306{
1307 FILE *status_file;
1308 char buf[100];
1309 int tgid = -1;
1310
1311 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) lwpid);
1312 status_file = fopen (buf, "r");
1313 if (status_file != NULL)
1314 {
1315 while (fgets (buf, sizeof (buf), status_file))
1316 {
1317 if (strncmp (buf, "Tgid:", 5) == 0)
1318 {
1319 tgid = strtoul (buf + strlen ("Tgid:"), NULL, 10);
1320 break;
1321 }
1322 }
1323
1324 fclose (status_file);
1325 }
1326
1327 return tgid;
1328}
1329
a0ef4274
DJ
1330/* Detect `T (stopped)' in `/proc/PID/status'.
1331 Other states including `T (tracing stop)' are reported as false. */
1332
1333static int
1334pid_is_stopped (pid_t pid)
1335{
1336 FILE *status_file;
1337 char buf[100];
1338 int retval = 0;
1339
1340 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1341 status_file = fopen (buf, "r");
1342 if (status_file != NULL)
1343 {
1344 int have_state = 0;
1345
1346 while (fgets (buf, sizeof (buf), status_file))
1347 {
1348 if (strncmp (buf, "State:", 6) == 0)
1349 {
1350 have_state = 1;
1351 break;
1352 }
1353 }
1354 if (have_state && strstr (buf, "T (stopped)") != NULL)
1355 retval = 1;
1356 fclose (status_file);
1357 }
1358 return retval;
1359}
1360
1361/* Wait for the LWP specified by LP, which we have just attached to.
1362 Returns a wait status for that LWP, to cache. */
1363
1364static int
1365linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1366 int *signalled)
1367{
1368 pid_t new_pid, pid = GET_LWP (ptid);
1369 int status;
1370
1371 if (pid_is_stopped (pid))
1372 {
1373 if (debug_linux_nat)
1374 fprintf_unfiltered (gdb_stdlog,
1375 "LNPAW: Attaching to a stopped process\n");
1376
1377 /* The process is definitely stopped. It is in a job control
1378 stop, unless the kernel predates the TASK_STOPPED /
1379 TASK_TRACED distinction, in which case it might be in a
1380 ptrace stop. Make sure it is in a ptrace stop; from there we
1381 can kill it, signal it, et cetera.
1382
1383 First make sure there is a pending SIGSTOP. Since we are
1384 already attached, the process can not transition from stopped
1385 to running without a PTRACE_CONT; so we know this signal will
1386 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1387 probably already in the queue (unless this kernel is old
1388 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1389 is not an RT signal, it can only be queued once. */
1390 kill_lwp (pid, SIGSTOP);
1391
1392 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1393 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1394 ptrace (PTRACE_CONT, pid, 0, 0);
1395 }
1396
1397 /* Make sure the initial process is stopped. The user-level threads
1398 layer might want to poke around in the inferior, and that won't
1399 work if things haven't stabilized yet. */
1400 new_pid = my_waitpid (pid, &status, 0);
1401 if (new_pid == -1 && errno == ECHILD)
1402 {
1403 if (first)
1404 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1405
1406 /* Try again with __WCLONE to check cloned processes. */
1407 new_pid = my_waitpid (pid, &status, __WCLONE);
1408 *cloned = 1;
1409 }
1410
dacc9cb2
PP
1411 gdb_assert (pid == new_pid);
1412
1413 if (!WIFSTOPPED (status))
1414 {
1415 /* The pid we tried to attach has apparently just exited. */
1416 if (debug_linux_nat)
1417 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1418 pid, status_to_str (status));
1419 return status;
1420 }
a0ef4274
DJ
1421
1422 if (WSTOPSIG (status) != SIGSTOP)
1423 {
1424 *signalled = 1;
1425 if (debug_linux_nat)
1426 fprintf_unfiltered (gdb_stdlog,
1427 "LNPAW: Received %s after attaching\n",
1428 status_to_str (status));
1429 }
1430
1431 return status;
1432}
1433
1434/* Attach to the LWP specified by PID. Return 0 if successful or -1
1435 if the new LWP could not be attached. */
d6b0e80f 1436
9ee57c33 1437int
93815fbf 1438lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1439{
9ee57c33 1440 struct lwp_info *lp;
7feb7d06 1441 sigset_t prev_mask;
d6b0e80f
AC
1442
1443 gdb_assert (is_lwp (ptid));
1444
7feb7d06 1445 block_child_signals (&prev_mask);
d6b0e80f 1446
9ee57c33 1447 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1448
1449 /* We assume that we're already attached to any LWP that has an id
1450 equal to the overall process id, and to any LWP that is already
1451 in our list of LWPs. If we're not seeing exit events from threads
1452 and we've had PID wraparound since we last tried to stop all threads,
1453 this assumption might be wrong; fortunately, this is very unlikely
1454 to happen. */
9ee57c33 1455 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1456 {
a0ef4274 1457 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1458
1459 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1460 {
1461 /* If we fail to attach to the thread, issue a warning,
1462 but continue. One way this can happen is if thread
e9efe249 1463 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1464 bug may place threads in the thread list and then fail
1465 to create them. */
1466 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1467 safe_strerror (errno));
7feb7d06 1468 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1469 return -1;
1470 }
1471
d6b0e80f
AC
1472 if (debug_linux_nat)
1473 fprintf_unfiltered (gdb_stdlog,
1474 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1475 target_pid_to_str (ptid));
1476
a0ef4274 1477 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2
PP
1478 if (!WIFSTOPPED (status))
1479 return -1;
1480
a0ef4274
DJ
1481 lp = add_lwp (ptid);
1482 lp->stopped = 1;
1483 lp->cloned = cloned;
1484 lp->signalled = signalled;
1485 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1486 {
a0ef4274
DJ
1487 lp->resumed = 1;
1488 lp->status = status;
d6b0e80f
AC
1489 }
1490
a0ef4274 1491 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1492
1493 if (debug_linux_nat)
1494 {
1495 fprintf_unfiltered (gdb_stdlog,
1496 "LLAL: waitpid %s received %s\n",
1497 target_pid_to_str (ptid),
1498 status_to_str (status));
1499 }
1500 }
1501 else
1502 {
1503 /* We assume that the LWP representing the original process is
1504 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1505 that the GNU/linux ptrace layer uses to keep track of
1506 threads. Note that this won't have already been done since
1507 the main thread will have, we assume, been stopped by an
1508 attach from a different layer. */
9ee57c33
DJ
1509 if (lp == NULL)
1510 lp = add_lwp (ptid);
d6b0e80f
AC
1511 lp->stopped = 1;
1512 }
9ee57c33 1513
7feb7d06 1514 restore_child_signals_mask (&prev_mask);
9ee57c33 1515 return 0;
d6b0e80f
AC
1516}
1517
b84876c2 1518static void
136d6dae
VP
1519linux_nat_create_inferior (struct target_ops *ops,
1520 char *exec_file, char *allargs, char **env,
b84876c2
PA
1521 int from_tty)
1522{
10568435
JK
1523#ifdef HAVE_PERSONALITY
1524 int personality_orig = 0, personality_set = 0;
1525#endif /* HAVE_PERSONALITY */
b84876c2
PA
1526
1527 /* The fork_child mechanism is synchronous and calls target_wait, so
1528 we have to mask the async mode. */
1529
10568435
JK
1530#ifdef HAVE_PERSONALITY
1531 if (disable_randomization)
1532 {
1533 errno = 0;
1534 personality_orig = personality (0xffffffff);
1535 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1536 {
1537 personality_set = 1;
1538 personality (personality_orig | ADDR_NO_RANDOMIZE);
1539 }
1540 if (errno != 0 || (personality_set
1541 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1542 warning (_("Error disabling address space randomization: %s"),
1543 safe_strerror (errno));
1544 }
1545#endif /* HAVE_PERSONALITY */
1546
2455069d
UW
1547 /* Make sure we report all signals during startup. */
1548 linux_nat_pass_signals (0, NULL);
1549
136d6dae 1550 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1551
10568435
JK
1552#ifdef HAVE_PERSONALITY
1553 if (personality_set)
1554 {
1555 errno = 0;
1556 personality (personality_orig);
1557 if (errno != 0)
1558 warning (_("Error restoring address space randomization: %s"),
1559 safe_strerror (errno));
1560 }
1561#endif /* HAVE_PERSONALITY */
b84876c2
PA
1562}
1563
d6b0e80f 1564static void
136d6dae 1565linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1566{
1567 struct lwp_info *lp;
d6b0e80f 1568 int status;
af990527 1569 ptid_t ptid;
d6b0e80f 1570
2455069d
UW
1571 /* Make sure we report all signals during attach. */
1572 linux_nat_pass_signals (0, NULL);
1573
136d6dae 1574 linux_ops->to_attach (ops, args, from_tty);
d6b0e80f 1575
af990527
PA
1576 /* The ptrace base target adds the main thread with (pid,0,0)
1577 format. Decorate it with lwp info. */
1578 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1579 thread_change_ptid (inferior_ptid, ptid);
1580
9f0bdab8 1581 /* Add the initial process as the first LWP to the list. */
af990527 1582 lp = add_lwp (ptid);
a0ef4274
DJ
1583
1584 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1585 &lp->signalled);
dacc9cb2
PP
1586 if (!WIFSTOPPED (status))
1587 {
1588 if (WIFEXITED (status))
1589 {
1590 int exit_code = WEXITSTATUS (status);
1591
1592 target_terminal_ours ();
1593 target_mourn_inferior ();
1594 if (exit_code == 0)
1595 error (_("Unable to attach: program exited normally."));
1596 else
1597 error (_("Unable to attach: program exited with code %d."),
1598 exit_code);
1599 }
1600 else if (WIFSIGNALED (status))
1601 {
1602 enum target_signal signo;
1603
1604 target_terminal_ours ();
1605 target_mourn_inferior ();
1606
1607 signo = target_signal_from_host (WTERMSIG (status));
1608 error (_("Unable to attach: program terminated with signal "
1609 "%s, %s."),
1610 target_signal_to_name (signo),
1611 target_signal_to_string (signo));
1612 }
1613
1614 internal_error (__FILE__, __LINE__,
1615 _("unexpected status %d for PID %ld"),
1616 status, (long) GET_LWP (ptid));
1617 }
1618
a0ef4274 1619 lp->stopped = 1;
9f0bdab8 1620
a0ef4274 1621 /* Save the wait status to report later. */
d6b0e80f 1622 lp->resumed = 1;
a0ef4274
DJ
1623 if (debug_linux_nat)
1624 fprintf_unfiltered (gdb_stdlog,
1625 "LNA: waitpid %ld, saving status %s\n",
1626 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1627
7feb7d06
PA
1628 lp->status = status;
1629
1630 if (target_can_async_p ())
1631 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1632}
1633
a0ef4274
DJ
1634/* Get pending status of LP. */
1635static int
1636get_pending_status (struct lwp_info *lp, int *status)
1637{
ca2163eb
PA
1638 enum target_signal signo = TARGET_SIGNAL_0;
1639
1640 /* If we paused threads momentarily, we may have stored pending
1641 events in lp->status or lp->waitstatus (see stop_wait_callback),
1642 and GDB core hasn't seen any signal for those threads.
1643 Otherwise, the last signal reported to the core is found in the
1644 thread object's stop_signal.
1645
1646 There's a corner case that isn't handled here at present. Only
1647 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1648 stop_signal make sense as a real signal to pass to the inferior.
1649 Some catchpoint related events, like
1650 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1651 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1652 those traps are debug API (ptrace in our case) related and
1653 induced; the inferior wouldn't see them if it wasn't being
1654 traced. Hence, we should never pass them to the inferior, even
1655 when set to pass state. Since this corner case isn't handled by
1656 infrun.c when proceeding with a signal, for consistency, neither
1657 do we handle it here (or elsewhere in the file we check for
1658 signal pass state). Normally SIGTRAP isn't set to pass state, so
1659 this is really a corner case. */
1660
1661 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1662 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1663 else if (lp->status)
1664 signo = target_signal_from_host (WSTOPSIG (lp->status));
1665 else if (non_stop && !is_executing (lp->ptid))
1666 {
1667 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1668
16c381f0 1669 signo = tp->suspend.stop_signal;
ca2163eb
PA
1670 }
1671 else if (!non_stop)
a0ef4274 1672 {
ca2163eb
PA
1673 struct target_waitstatus last;
1674 ptid_t last_ptid;
4c28f408 1675
ca2163eb 1676 get_last_target_status (&last_ptid, &last);
4c28f408 1677
ca2163eb
PA
1678 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1679 {
e09875d4 1680 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1681
16c381f0 1682 signo = tp->suspend.stop_signal;
4c28f408 1683 }
ca2163eb 1684 }
4c28f408 1685
ca2163eb 1686 *status = 0;
4c28f408 1687
ca2163eb
PA
1688 if (signo == TARGET_SIGNAL_0)
1689 {
1690 if (debug_linux_nat)
1691 fprintf_unfiltered (gdb_stdlog,
1692 "GPT: lwp %s has no pending signal\n",
1693 target_pid_to_str (lp->ptid));
1694 }
1695 else if (!signal_pass_state (signo))
1696 {
1697 if (debug_linux_nat)
3e43a32a
MS
1698 fprintf_unfiltered (gdb_stdlog,
1699 "GPT: lwp %s had signal %s, "
1700 "but it is in no pass state\n",
ca2163eb
PA
1701 target_pid_to_str (lp->ptid),
1702 target_signal_to_string (signo));
a0ef4274 1703 }
a0ef4274 1704 else
4c28f408 1705 {
ca2163eb
PA
1706 *status = W_STOPCODE (target_signal_to_host (signo));
1707
1708 if (debug_linux_nat)
1709 fprintf_unfiltered (gdb_stdlog,
1710 "GPT: lwp %s has pending signal %s\n",
1711 target_pid_to_str (lp->ptid),
1712 target_signal_to_string (signo));
4c28f408 1713 }
a0ef4274
DJ
1714
1715 return 0;
1716}
1717
d6b0e80f
AC
1718static int
1719detach_callback (struct lwp_info *lp, void *data)
1720{
1721 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1722
1723 if (debug_linux_nat && lp->status)
1724 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1725 strsignal (WSTOPSIG (lp->status)),
1726 target_pid_to_str (lp->ptid));
1727
a0ef4274
DJ
1728 /* If there is a pending SIGSTOP, get rid of it. */
1729 if (lp->signalled)
d6b0e80f 1730 {
d6b0e80f
AC
1731 if (debug_linux_nat)
1732 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1733 "DC: Sending SIGCONT to %s\n",
1734 target_pid_to_str (lp->ptid));
d6b0e80f 1735
a0ef4274 1736 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1737 lp->signalled = 0;
d6b0e80f
AC
1738 }
1739
1740 /* We don't actually detach from the LWP that has an id equal to the
1741 overall process id just yet. */
1742 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1743 {
a0ef4274
DJ
1744 int status = 0;
1745
1746 /* Pass on any pending signal for this LWP. */
1747 get_pending_status (lp, &status);
1748
d6b0e80f
AC
1749 errno = 0;
1750 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1751 WSTOPSIG (status)) < 0)
8a3fe4f8 1752 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1753 safe_strerror (errno));
1754
1755 if (debug_linux_nat)
1756 fprintf_unfiltered (gdb_stdlog,
1757 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1758 target_pid_to_str (lp->ptid),
7feb7d06 1759 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1760
1761 delete_lwp (lp->ptid);
1762 }
1763
1764 return 0;
1765}
1766
1767static void
136d6dae 1768linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1769{
b84876c2 1770 int pid;
a0ef4274 1771 int status;
d90e17a7
PA
1772 struct lwp_info *main_lwp;
1773
1774 pid = GET_PID (inferior_ptid);
a0ef4274 1775
b84876c2
PA
1776 if (target_can_async_p ())
1777 linux_nat_async (NULL, 0);
1778
4c28f408
PA
1779 /* Stop all threads before detaching. ptrace requires that the
1780 thread is stopped to sucessfully detach. */
d90e17a7 1781 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1782 /* ... and wait until all of them have reported back that
1783 they're no longer running. */
d90e17a7 1784 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1785
d90e17a7 1786 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1787
1788 /* Only the initial process should be left right now. */
d90e17a7
PA
1789 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1790
1791 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1792
a0ef4274
DJ
1793 /* Pass on any pending signal for the last LWP. */
1794 if ((args == NULL || *args == '\0')
d90e17a7 1795 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1796 && WIFSTOPPED (status))
1797 {
1798 /* Put the signal number in ARGS so that inf_ptrace_detach will
1799 pass it along with PTRACE_DETACH. */
1800 args = alloca (8);
1801 sprintf (args, "%d", (int) WSTOPSIG (status));
ddabfc73
TT
1802 if (debug_linux_nat)
1803 fprintf_unfiltered (gdb_stdlog,
1804 "LND: Sending signal %s to %s\n",
1805 args,
1806 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1807 }
1808
d90e17a7 1809 delete_lwp (main_lwp->ptid);
b84876c2 1810
7a7d3353
PA
1811 if (forks_exist_p ())
1812 {
1813 /* Multi-fork case. The current inferior_ptid is being detached
1814 from, but there are other viable forks to debug. Detach from
1815 the current fork, and context-switch to the first
1816 available. */
1817 linux_fork_detach (args, from_tty);
1818
1819 if (non_stop && target_can_async_p ())
1820 target_async (inferior_event_handler, 0);
1821 }
1822 else
1823 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1824}
1825
1826/* Resume LP. */
1827
1828static int
1829resume_callback (struct lwp_info *lp, void *data)
1830{
6c95b8df
PA
1831 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1832
1833 if (lp->stopped && inf->vfork_child != NULL)
1834 {
1835 if (debug_linux_nat)
1836 fprintf_unfiltered (gdb_stdlog,
1837 "RC: Not resuming %s (vfork parent)\n",
1838 target_pid_to_str (lp->ptid));
1839 }
1840 else if (lp->stopped && lp->status == 0)
d6b0e80f 1841 {
d90e17a7
PA
1842 if (debug_linux_nat)
1843 fprintf_unfiltered (gdb_stdlog,
a289b8f6 1844 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
d90e17a7
PA
1845 target_pid_to_str (lp->ptid));
1846
28439f5e
PA
1847 linux_ops->to_resume (linux_ops,
1848 pid_to_ptid (GET_LWP (lp->ptid)),
a289b8f6 1849 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1850 if (debug_linux_nat)
1851 fprintf_unfiltered (gdb_stdlog,
a289b8f6 1852 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
d6b0e80f
AC
1853 target_pid_to_str (lp->ptid));
1854 lp->stopped = 0;
a289b8f6 1855 lp->step = 0;
9f0bdab8 1856 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 1857 lp->stopped_by_watchpoint = 0;
d6b0e80f 1858 }
57380f4e 1859 else if (lp->stopped && debug_linux_nat)
3e43a32a
MS
1860 fprintf_unfiltered (gdb_stdlog,
1861 "RC: Not resuming sibling %s (has pending)\n",
57380f4e
DJ
1862 target_pid_to_str (lp->ptid));
1863 else if (debug_linux_nat)
3e43a32a
MS
1864 fprintf_unfiltered (gdb_stdlog,
1865 "RC: Not resuming sibling %s (not stopped)\n",
57380f4e 1866 target_pid_to_str (lp->ptid));
d6b0e80f
AC
1867
1868 return 0;
1869}
1870
1871static int
1872resume_clear_callback (struct lwp_info *lp, void *data)
1873{
1874 lp->resumed = 0;
1875 return 0;
1876}
1877
1878static int
1879resume_set_callback (struct lwp_info *lp, void *data)
1880{
1881 lp->resumed = 1;
1882 return 0;
1883}
1884
1885static void
28439f5e
PA
1886linux_nat_resume (struct target_ops *ops,
1887 ptid_t ptid, int step, enum target_signal signo)
d6b0e80f 1888{
7feb7d06 1889 sigset_t prev_mask;
d6b0e80f 1890 struct lwp_info *lp;
d90e17a7 1891 int resume_many;
d6b0e80f 1892
76f50ad1
DJ
1893 if (debug_linux_nat)
1894 fprintf_unfiltered (gdb_stdlog,
1895 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1896 step ? "step" : "resume",
1897 target_pid_to_str (ptid),
423ec54c
JK
1898 (signo != TARGET_SIGNAL_0
1899 ? strsignal (target_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1900 target_pid_to_str (inferior_ptid));
1901
7feb7d06 1902 block_child_signals (&prev_mask);
b84876c2 1903
d6b0e80f 1904 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1905 resume_many = (ptid_equal (minus_one_ptid, ptid)
1906 || ptid_is_pid (ptid));
4c28f408 1907
e3e9f5a2
PA
1908 /* Mark the lwps we're resuming as resumed. */
1909 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1910
d90e17a7
PA
1911 /* See if it's the current inferior that should be handled
1912 specially. */
1913 if (resume_many)
1914 lp = find_lwp_pid (inferior_ptid);
1915 else
1916 lp = find_lwp_pid (ptid);
9f0bdab8 1917 gdb_assert (lp != NULL);
d6b0e80f 1918
9f0bdab8
DJ
1919 /* Remember if we're stepping. */
1920 lp->step = step;
d6b0e80f 1921
9f0bdab8
DJ
1922 /* If we have a pending wait status for this thread, there is no
1923 point in resuming the process. But first make sure that
1924 linux_nat_wait won't preemptively handle the event - we
1925 should never take this short-circuit if we are going to
1926 leave LP running, since we have skipped resuming all the
1927 other threads. This bit of code needs to be synchronized
1928 with linux_nat_wait. */
76f50ad1 1929
9f0bdab8
DJ
1930 if (lp->status && WIFSTOPPED (lp->status))
1931 {
2455069d
UW
1932 if (!lp->step
1933 && WSTOPSIG (lp->status)
1934 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1935 {
9f0bdab8
DJ
1936 if (debug_linux_nat)
1937 fprintf_unfiltered (gdb_stdlog,
1938 "LLR: Not short circuiting for ignored "
1939 "status 0x%x\n", lp->status);
1940
d6b0e80f
AC
1941 /* FIXME: What should we do if we are supposed to continue
1942 this thread with a signal? */
1943 gdb_assert (signo == TARGET_SIGNAL_0);
2455069d 1944 signo = target_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1945 lp->status = 0;
1946 }
1947 }
76f50ad1 1948
6c95b8df 1949 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
1950 {
1951 /* FIXME: What should we do if we are supposed to continue
1952 this thread with a signal? */
1953 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1954
9f0bdab8
DJ
1955 if (debug_linux_nat)
1956 fprintf_unfiltered (gdb_stdlog,
1957 "LLR: Short circuiting for status 0x%x\n",
1958 lp->status);
d6b0e80f 1959
7feb7d06
PA
1960 restore_child_signals_mask (&prev_mask);
1961 if (target_can_async_p ())
1962 {
1963 target_async (inferior_event_handler, 0);
1964 /* Tell the event loop we have something to process. */
1965 async_file_mark ();
1966 }
9f0bdab8 1967 return;
d6b0e80f
AC
1968 }
1969
9f0bdab8
DJ
1970 /* Mark LWP as not stopped to prevent it from being continued by
1971 resume_callback. */
1972 lp->stopped = 0;
1973
d90e17a7
PA
1974 if (resume_many)
1975 iterate_over_lwps (ptid, resume_callback, NULL);
1976
1977 /* Convert to something the lower layer understands. */
1978 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1979
28439f5e 1980 linux_ops->to_resume (linux_ops, ptid, step, signo);
9f0bdab8 1981 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 1982 lp->stopped_by_watchpoint = 0;
9f0bdab8 1983
d6b0e80f
AC
1984 if (debug_linux_nat)
1985 fprintf_unfiltered (gdb_stdlog,
1986 "LLR: %s %s, %s (resume event thread)\n",
1987 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1988 target_pid_to_str (ptid),
423ec54c
JK
1989 (signo != TARGET_SIGNAL_0
1990 ? strsignal (target_signal_to_host (signo)) : "0"));
b84876c2 1991
7feb7d06 1992 restore_child_signals_mask (&prev_mask);
b84876c2 1993 if (target_can_async_p ())
8ea051c5 1994 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1995}
1996
c5f62d5f 1997/* Send a signal to an LWP. */
d6b0e80f
AC
1998
1999static int
2000kill_lwp (int lwpid, int signo)
2001{
c5f62d5f
DE
2002 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2003 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
2004
2005#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
2006 {
2007 static int tkill_failed;
2008
2009 if (!tkill_failed)
2010 {
2011 int ret;
2012
2013 errno = 0;
2014 ret = syscall (__NR_tkill, lwpid, signo);
2015 if (errno != ENOSYS)
2016 return ret;
2017 tkill_failed = 1;
2018 }
2019 }
d6b0e80f
AC
2020#endif
2021
2022 return kill (lwpid, signo);
2023}
2024
ca2163eb
PA
2025/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2026 event, check if the core is interested in it: if not, ignore the
2027 event, and keep waiting; otherwise, we need to toggle the LWP's
2028 syscall entry/exit status, since the ptrace event itself doesn't
2029 indicate it, and report the trap to higher layers. */
2030
2031static int
2032linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2033{
2034 struct target_waitstatus *ourstatus = &lp->waitstatus;
2035 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2036 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2037
2038 if (stopping)
2039 {
2040 /* If we're stopping threads, there's a SIGSTOP pending, which
2041 makes it so that the LWP reports an immediate syscall return,
2042 followed by the SIGSTOP. Skip seeing that "return" using
2043 PTRACE_CONT directly, and let stop_wait_callback collect the
2044 SIGSTOP. Later when the thread is resumed, a new syscall
2045 entry event. If we didn't do this (and returned 0), we'd
2046 leave a syscall entry pending, and our caller, by using
2047 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2048 itself. Later, when the user re-resumes this LWP, we'd see
2049 another syscall entry event and we'd mistake it for a return.
2050
2051 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2052 (leaving immediately with LWP->signalled set, without issuing
2053 a PTRACE_CONT), it would still be problematic to leave this
2054 syscall enter pending, as later when the thread is resumed,
2055 it would then see the same syscall exit mentioned above,
2056 followed by the delayed SIGSTOP, while the syscall didn't
2057 actually get to execute. It seems it would be even more
2058 confusing to the user. */
2059
2060 if (debug_linux_nat)
2061 fprintf_unfiltered (gdb_stdlog,
2062 "LHST: ignoring syscall %d "
2063 "for LWP %ld (stopping threads), "
2064 "resuming with PTRACE_CONT for SIGSTOP\n",
2065 syscall_number,
2066 GET_LWP (lp->ptid));
2067
2068 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2069 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2070 return 1;
2071 }
2072
2073 if (catch_syscall_enabled ())
2074 {
2075 /* Always update the entry/return state, even if this particular
2076 syscall isn't interesting to the core now. In async mode,
2077 the user could install a new catchpoint for this syscall
2078 between syscall enter/return, and we'll need to know to
2079 report a syscall return if that happens. */
2080 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2081 ? TARGET_WAITKIND_SYSCALL_RETURN
2082 : TARGET_WAITKIND_SYSCALL_ENTRY);
2083
2084 if (catching_syscall_number (syscall_number))
2085 {
2086 /* Alright, an event to report. */
2087 ourstatus->kind = lp->syscall_state;
2088 ourstatus->value.syscall_number = syscall_number;
2089
2090 if (debug_linux_nat)
2091 fprintf_unfiltered (gdb_stdlog,
2092 "LHST: stopping for %s of syscall %d"
2093 " for LWP %ld\n",
3e43a32a
MS
2094 lp->syscall_state
2095 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
2096 ? "entry" : "return",
2097 syscall_number,
2098 GET_LWP (lp->ptid));
2099 return 0;
2100 }
2101
2102 if (debug_linux_nat)
2103 fprintf_unfiltered (gdb_stdlog,
2104 "LHST: ignoring %s of syscall %d "
2105 "for LWP %ld\n",
2106 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2107 ? "entry" : "return",
2108 syscall_number,
2109 GET_LWP (lp->ptid));
2110 }
2111 else
2112 {
2113 /* If we had been syscall tracing, and hence used PT_SYSCALL
2114 before on this LWP, it could happen that the user removes all
2115 syscall catchpoints before we get to process this event.
2116 There are two noteworthy issues here:
2117
2118 - When stopped at a syscall entry event, resuming with
2119 PT_STEP still resumes executing the syscall and reports a
2120 syscall return.
2121
2122 - Only PT_SYSCALL catches syscall enters. If we last
2123 single-stepped this thread, then this event can't be a
2124 syscall enter. If we last single-stepped this thread, this
2125 has to be a syscall exit.
2126
2127 The points above mean that the next resume, be it PT_STEP or
2128 PT_CONTINUE, can not trigger a syscall trace event. */
2129 if (debug_linux_nat)
2130 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2131 "LHST: caught syscall event "
2132 "with no syscall catchpoints."
ca2163eb
PA
2133 " %d for LWP %ld, ignoring\n",
2134 syscall_number,
2135 GET_LWP (lp->ptid));
2136 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2137 }
2138
2139 /* The core isn't interested in this event. For efficiency, avoid
2140 stopping all threads only to have the core resume them all again.
2141 Since we're not stopping threads, if we're still syscall tracing
2142 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2143 subsequent syscall. Simply resume using the inf-ptrace layer,
2144 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2145
2146 /* Note that gdbarch_get_syscall_number may access registers, hence
2147 fill a regcache. */
2148 registers_changed ();
2149 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2150 lp->step, TARGET_SIGNAL_0);
2151 return 1;
2152}
2153
3d799a95
DJ
2154/* Handle a GNU/Linux extended wait response. If we see a clone
2155 event, we need to add the new LWP to our list (and not report the
2156 trap to higher layers). This function returns non-zero if the
2157 event should be ignored and we should wait again. If STOPPING is
2158 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2159
2160static int
3d799a95
DJ
2161linux_handle_extended_wait (struct lwp_info *lp, int status,
2162 int stopping)
d6b0e80f 2163{
3d799a95
DJ
2164 int pid = GET_LWP (lp->ptid);
2165 struct target_waitstatus *ourstatus = &lp->waitstatus;
3d799a95 2166 int event = status >> 16;
d6b0e80f 2167
3d799a95
DJ
2168 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2169 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2170 {
3d799a95
DJ
2171 unsigned long new_pid;
2172 int ret;
2173
2174 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2175
3d799a95
DJ
2176 /* If we haven't already seen the new PID stop, wait for it now. */
2177 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2178 {
2179 /* The new child has a pending SIGSTOP. We can't affect it until it
2180 hits the SIGSTOP, but we're already attached. */
2181 ret = my_waitpid (new_pid, &status,
2182 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2183 if (ret == -1)
2184 perror_with_name (_("waiting for new child"));
2185 else if (ret != new_pid)
2186 internal_error (__FILE__, __LINE__,
2187 _("wait returned unexpected PID %d"), ret);
2188 else if (!WIFSTOPPED (status))
2189 internal_error (__FILE__, __LINE__,
2190 _("wait returned unexpected status 0x%x"), status);
2191 }
2192
3a3e9ee3 2193 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2194
2277426b
PA
2195 if (event == PTRACE_EVENT_FORK
2196 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2197 {
2277426b
PA
2198 /* Handle checkpointing by linux-fork.c here as a special
2199 case. We don't want the follow-fork-mode or 'catch fork'
2200 to interfere with this. */
2201
2202 /* This won't actually modify the breakpoint list, but will
2203 physically remove the breakpoints from the child. */
2204 detach_breakpoints (new_pid);
2205
2206 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2207 if (!find_fork_pid (new_pid))
2208 add_fork (new_pid);
2277426b
PA
2209
2210 /* Report as spurious, so that infrun doesn't want to follow
2211 this fork. We're actually doing an infcall in
2212 linux-fork.c. */
2213 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2214 linux_enable_event_reporting (pid_to_ptid (new_pid));
2215
2216 /* Report the stop to the core. */
2217 return 0;
2218 }
2219
3d799a95
DJ
2220 if (event == PTRACE_EVENT_FORK)
2221 ourstatus->kind = TARGET_WAITKIND_FORKED;
2222 else if (event == PTRACE_EVENT_VFORK)
2223 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2224 else
3d799a95 2225 {
78768c4a
JK
2226 struct lwp_info *new_lp;
2227
3d799a95 2228 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2229
d90e17a7 2230 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2231 new_lp->cloned = 1;
4c28f408 2232 new_lp->stopped = 1;
d6b0e80f 2233
3d799a95
DJ
2234 if (WSTOPSIG (status) != SIGSTOP)
2235 {
2236 /* This can happen if someone starts sending signals to
2237 the new thread before it gets a chance to run, which
2238 have a lower number than SIGSTOP (e.g. SIGUSR1).
2239 This is an unlikely case, and harder to handle for
2240 fork / vfork than for clone, so we do not try - but
2241 we handle it for clone events here. We'll send
2242 the other signal on to the thread below. */
2243
2244 new_lp->signalled = 1;
2245 }
2246 else
2247 status = 0;
d6b0e80f 2248
4c28f408 2249 if (non_stop)
3d799a95 2250 {
4c28f408
PA
2251 /* Add the new thread to GDB's lists as soon as possible
2252 so that:
2253
2254 1) the frontend doesn't have to wait for a stop to
2255 display them, and,
2256
2257 2) we tag it with the correct running state. */
2258
2259 /* If the thread_db layer is active, let it know about
2260 this new thread, and add it to GDB's list. */
2261 if (!thread_db_attach_lwp (new_lp->ptid))
2262 {
2263 /* We're not using thread_db. Add it to GDB's
2264 list. */
2265 target_post_attach (GET_LWP (new_lp->ptid));
2266 add_thread (new_lp->ptid);
2267 }
2268
2269 if (!stopping)
2270 {
2271 set_running (new_lp->ptid, 1);
2272 set_executing (new_lp->ptid, 1);
2273 }
2274 }
2275
ca2163eb
PA
2276 /* Note the need to use the low target ops to resume, to
2277 handle resuming with PT_SYSCALL if we have syscall
2278 catchpoints. */
4c28f408
PA
2279 if (!stopping)
2280 {
423ec54c 2281 enum target_signal signo;
ca2163eb 2282
4c28f408 2283 new_lp->stopped = 0;
3d799a95 2284 new_lp->resumed = 1;
ca2163eb
PA
2285
2286 signo = (status
2287 ? target_signal_from_host (WSTOPSIG (status))
2288 : TARGET_SIGNAL_0);
2289
2290 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2291 0, signo);
3d799a95 2292 }
ad34eb2f
JK
2293 else
2294 {
2295 if (status != 0)
2296 {
2297 /* We created NEW_LP so it cannot yet contain STATUS. */
2298 gdb_assert (new_lp->status == 0);
2299
2300 /* Save the wait status to report later. */
2301 if (debug_linux_nat)
2302 fprintf_unfiltered (gdb_stdlog,
2303 "LHEW: waitpid of new LWP %ld, "
2304 "saving status %s\n",
2305 (long) GET_LWP (new_lp->ptid),
2306 status_to_str (status));
2307 new_lp->status = status;
2308 }
2309 }
d6b0e80f 2310
3d799a95
DJ
2311 if (debug_linux_nat)
2312 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2313 "LHEW: Got clone event "
2314 "from LWP %ld, resuming\n",
3d799a95 2315 GET_LWP (lp->ptid));
ca2163eb
PA
2316 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2317 0, TARGET_SIGNAL_0);
3d799a95
DJ
2318
2319 return 1;
2320 }
2321
2322 return 0;
d6b0e80f
AC
2323 }
2324
3d799a95
DJ
2325 if (event == PTRACE_EVENT_EXEC)
2326 {
a75724bc
PA
2327 if (debug_linux_nat)
2328 fprintf_unfiltered (gdb_stdlog,
2329 "LHEW: Got exec event from LWP %ld\n",
2330 GET_LWP (lp->ptid));
2331
3d799a95
DJ
2332 ourstatus->kind = TARGET_WAITKIND_EXECD;
2333 ourstatus->value.execd_pathname
6d8fd2b7 2334 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2335
6c95b8df
PA
2336 return 0;
2337 }
2338
2339 if (event == PTRACE_EVENT_VFORK_DONE)
2340 {
2341 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2342 {
6c95b8df 2343 if (debug_linux_nat)
3e43a32a
MS
2344 fprintf_unfiltered (gdb_stdlog,
2345 "LHEW: Got expected PTRACE_EVENT_"
2346 "VFORK_DONE from LWP %ld: stopping\n",
6c95b8df 2347 GET_LWP (lp->ptid));
3d799a95 2348
6c95b8df
PA
2349 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2350 return 0;
3d799a95
DJ
2351 }
2352
6c95b8df 2353 if (debug_linux_nat)
3e43a32a
MS
2354 fprintf_unfiltered (gdb_stdlog,
2355 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2356 "from LWP %ld: resuming\n",
6c95b8df
PA
2357 GET_LWP (lp->ptid));
2358 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2359 return 1;
3d799a95
DJ
2360 }
2361
2362 internal_error (__FILE__, __LINE__,
2363 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2364}
2365
2366/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2367 exited. */
2368
2369static int
2370wait_lwp (struct lwp_info *lp)
2371{
2372 pid_t pid;
2373 int status;
2374 int thread_dead = 0;
2375
2376 gdb_assert (!lp->stopped);
2377 gdb_assert (lp->status == 0);
2378
58aecb61 2379 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
2380 if (pid == -1 && errno == ECHILD)
2381 {
58aecb61 2382 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
2383 if (pid == -1 && errno == ECHILD)
2384 {
2385 /* The thread has previously exited. We need to delete it
2386 now because, for some vendor 2.4 kernels with NPTL
2387 support backported, there won't be an exit event unless
2388 it is the main thread. 2.6 kernels will report an exit
2389 event for each thread that exits, as expected. */
2390 thread_dead = 1;
2391 if (debug_linux_nat)
2392 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2393 target_pid_to_str (lp->ptid));
2394 }
2395 }
2396
2397 if (!thread_dead)
2398 {
2399 gdb_assert (pid == GET_LWP (lp->ptid));
2400
2401 if (debug_linux_nat)
2402 {
2403 fprintf_unfiltered (gdb_stdlog,
2404 "WL: waitpid %s received %s\n",
2405 target_pid_to_str (lp->ptid),
2406 status_to_str (status));
2407 }
2408 }
2409
2410 /* Check if the thread has exited. */
2411 if (WIFEXITED (status) || WIFSIGNALED (status))
2412 {
2413 thread_dead = 1;
2414 if (debug_linux_nat)
2415 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2416 target_pid_to_str (lp->ptid));
2417 }
2418
2419 if (thread_dead)
2420 {
e26af52f 2421 exit_lwp (lp);
d6b0e80f
AC
2422 return 0;
2423 }
2424
2425 gdb_assert (WIFSTOPPED (status));
2426
ca2163eb
PA
2427 /* Handle GNU/Linux's syscall SIGTRAPs. */
2428 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2429 {
2430 /* No longer need the sysgood bit. The ptrace event ends up
2431 recorded in lp->waitstatus if we care for it. We can carry
2432 on handling the event like a regular SIGTRAP from here
2433 on. */
2434 status = W_STOPCODE (SIGTRAP);
2435 if (linux_handle_syscall_trap (lp, 1))
2436 return wait_lwp (lp);
2437 }
2438
d6b0e80f
AC
2439 /* Handle GNU/Linux's extended waitstatus for trace events. */
2440 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2441 {
2442 if (debug_linux_nat)
2443 fprintf_unfiltered (gdb_stdlog,
2444 "WL: Handling extended status 0x%06x\n",
2445 status);
3d799a95 2446 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2447 return wait_lwp (lp);
2448 }
2449
2450 return status;
2451}
2452
9f0bdab8
DJ
2453/* Save the most recent siginfo for LP. This is currently only called
2454 for SIGTRAP; some ports use the si_addr field for
2455 target_stopped_data_address. In the future, it may also be used to
2456 restore the siginfo of requeued signals. */
2457
2458static void
2459save_siginfo (struct lwp_info *lp)
2460{
2461 errno = 0;
2462 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2463 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2464
2465 if (errno != 0)
2466 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2467}
2468
d6b0e80f
AC
2469/* Send a SIGSTOP to LP. */
2470
2471static int
2472stop_callback (struct lwp_info *lp, void *data)
2473{
2474 if (!lp->stopped && !lp->signalled)
2475 {
2476 int ret;
2477
2478 if (debug_linux_nat)
2479 {
2480 fprintf_unfiltered (gdb_stdlog,
2481 "SC: kill %s **<SIGSTOP>**\n",
2482 target_pid_to_str (lp->ptid));
2483 }
2484 errno = 0;
2485 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2486 if (debug_linux_nat)
2487 {
2488 fprintf_unfiltered (gdb_stdlog,
2489 "SC: lwp kill %d %s\n",
2490 ret,
2491 errno ? safe_strerror (errno) : "ERRNO-OK");
2492 }
2493
2494 lp->signalled = 1;
2495 gdb_assert (lp->status == 0);
2496 }
2497
2498 return 0;
2499}
2500
57380f4e 2501/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2502
2503static int
57380f4e
DJ
2504linux_nat_has_pending_sigint (int pid)
2505{
2506 sigset_t pending, blocked, ignored;
57380f4e
DJ
2507
2508 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2509
2510 if (sigismember (&pending, SIGINT)
2511 && !sigismember (&ignored, SIGINT))
2512 return 1;
2513
2514 return 0;
2515}
2516
2517/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2518
2519static int
2520set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2521{
57380f4e
DJ
2522 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2523 flag to consume the next one. */
2524 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2525 && WSTOPSIG (lp->status) == SIGINT)
2526 lp->status = 0;
2527 else
2528 lp->ignore_sigint = 1;
2529
2530 return 0;
2531}
2532
2533/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2534 This function is called after we know the LWP has stopped; if the LWP
2535 stopped before the expected SIGINT was delivered, then it will never have
2536 arrived. Also, if the signal was delivered to a shared queue and consumed
2537 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2538
57380f4e
DJ
2539static void
2540maybe_clear_ignore_sigint (struct lwp_info *lp)
2541{
2542 if (!lp->ignore_sigint)
2543 return;
2544
2545 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2546 {
2547 if (debug_linux_nat)
2548 fprintf_unfiltered (gdb_stdlog,
2549 "MCIS: Clearing bogus flag for %s\n",
2550 target_pid_to_str (lp->ptid));
2551 lp->ignore_sigint = 0;
2552 }
2553}
2554
ebec9a0f
PA
2555/* Fetch the possible triggered data watchpoint info and store it in
2556 LP.
2557
2558 On some archs, like x86, that use debug registers to set
2559 watchpoints, it's possible that the way to know which watched
2560 address trapped, is to check the register that is used to select
2561 which address to watch. Problem is, between setting the watchpoint
2562 and reading back which data address trapped, the user may change
2563 the set of watchpoints, and, as a consequence, GDB changes the
2564 debug registers in the inferior. To avoid reading back a stale
2565 stopped-data-address when that happens, we cache in LP the fact
2566 that a watchpoint trapped, and the corresponding data address, as
2567 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2568 registers meanwhile, we have the cached data we can rely on. */
2569
2570static void
2571save_sigtrap (struct lwp_info *lp)
2572{
2573 struct cleanup *old_chain;
2574
2575 if (linux_ops->to_stopped_by_watchpoint == NULL)
2576 {
2577 lp->stopped_by_watchpoint = 0;
2578 return;
2579 }
2580
2581 old_chain = save_inferior_ptid ();
2582 inferior_ptid = lp->ptid;
2583
2584 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2585
2586 if (lp->stopped_by_watchpoint)
2587 {
2588 if (linux_ops->to_stopped_data_address != NULL)
2589 lp->stopped_data_address_p =
2590 linux_ops->to_stopped_data_address (&current_target,
2591 &lp->stopped_data_address);
2592 else
2593 lp->stopped_data_address_p = 0;
2594 }
2595
2596 do_cleanups (old_chain);
2597}
2598
2599/* See save_sigtrap. */
2600
2601static int
2602linux_nat_stopped_by_watchpoint (void)
2603{
2604 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2605
2606 gdb_assert (lp != NULL);
2607
2608 return lp->stopped_by_watchpoint;
2609}
2610
2611static int
2612linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2613{
2614 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2615
2616 gdb_assert (lp != NULL);
2617
2618 *addr_p = lp->stopped_data_address;
2619
2620 return lp->stopped_data_address_p;
2621}
2622
26ab7092
JK
2623/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2624
2625static int
2626sigtrap_is_event (int status)
2627{
2628 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2629}
2630
2631/* SIGTRAP-like events recognizer. */
2632
2633static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2634
00390b84
JK
2635/* Check for SIGTRAP-like events in LP. */
2636
2637static int
2638linux_nat_lp_status_is_event (struct lwp_info *lp)
2639{
2640 /* We check for lp->waitstatus in addition to lp->status, because we can
2641 have pending process exits recorded in lp->status
2642 and W_EXITCODE(0,0) == 0. We should probably have an additional
2643 lp->status_p flag. */
2644
2645 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2646 && linux_nat_status_is_event (lp->status));
2647}
2648
26ab7092
JK
2649/* Set alternative SIGTRAP-like events recognizer. If
2650 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2651 applied. */
2652
2653void
2654linux_nat_set_status_is_event (struct target_ops *t,
2655 int (*status_is_event) (int status))
2656{
2657 linux_nat_status_is_event = status_is_event;
2658}
2659
57380f4e
DJ
2660/* Wait until LP is stopped. */
2661
2662static int
2663stop_wait_callback (struct lwp_info *lp, void *data)
2664{
6c95b8df
PA
2665 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2666
2667 /* If this is a vfork parent, bail out, it is not going to report
2668 any SIGSTOP until the vfork is done with. */
2669 if (inf->vfork_child != NULL)
2670 return 0;
2671
d6b0e80f
AC
2672 if (!lp->stopped)
2673 {
2674 int status;
2675
2676 status = wait_lwp (lp);
2677 if (status == 0)
2678 return 0;
2679
57380f4e
DJ
2680 if (lp->ignore_sigint && WIFSTOPPED (status)
2681 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2682 {
57380f4e 2683 lp->ignore_sigint = 0;
d6b0e80f
AC
2684
2685 errno = 0;
2686 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2687 if (debug_linux_nat)
2688 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2689 "PTRACE_CONT %s, 0, 0 (%s) "
2690 "(discarding SIGINT)\n",
d6b0e80f
AC
2691 target_pid_to_str (lp->ptid),
2692 errno ? safe_strerror (errno) : "OK");
2693
57380f4e 2694 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2695 }
2696
57380f4e
DJ
2697 maybe_clear_ignore_sigint (lp);
2698
d6b0e80f
AC
2699 if (WSTOPSIG (status) != SIGSTOP)
2700 {
26ab7092 2701 if (linux_nat_status_is_event (status))
d6b0e80f
AC
2702 {
2703 /* If a LWP other than the LWP that we're reporting an
2704 event for has hit a GDB breakpoint (as opposed to
2705 some random trap signal), then just arrange for it to
2706 hit it again later. We don't keep the SIGTRAP status
2707 and don't forward the SIGTRAP signal to the LWP. We
2708 will handle the current event, eventually we will
2709 resume all LWPs, and this one will get its breakpoint
2710 trap again.
2711
2712 If we do not do this, then we run the risk that the
2713 user will delete or disable the breakpoint, but the
2714 thread will have already tripped on it. */
2715
9f0bdab8
DJ
2716 /* Save the trap's siginfo in case we need it later. */
2717 save_siginfo (lp);
2718
ebec9a0f
PA
2719 save_sigtrap (lp);
2720
1777feb0 2721 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2722 errno = 0;
2723 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2724 if (debug_linux_nat)
2725 {
2726 fprintf_unfiltered (gdb_stdlog,
2727 "PTRACE_CONT %s, 0, 0 (%s)\n",
2728 target_pid_to_str (lp->ptid),
2729 errno ? safe_strerror (errno) : "OK");
2730
2731 fprintf_unfiltered (gdb_stdlog,
2732 "SWC: Candidate SIGTRAP event in %s\n",
2733 target_pid_to_str (lp->ptid));
2734 }
710151dd 2735 /* Hold this event/waitstatus while we check to see if
1777feb0 2736 there are any more (we still want to get that SIGSTOP). */
57380f4e 2737 stop_wait_callback (lp, NULL);
710151dd 2738
7feb7d06
PA
2739 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2740 there's another event, throw it back into the
1777feb0 2741 queue. */
7feb7d06 2742 if (lp->status)
710151dd 2743 {
7feb7d06
PA
2744 if (debug_linux_nat)
2745 fprintf_unfiltered (gdb_stdlog,
2746 "SWC: kill %s, %s\n",
2747 target_pid_to_str (lp->ptid),
2748 status_to_str ((int) status));
2749 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2750 }
7feb7d06 2751
1777feb0 2752 /* Save the sigtrap event. */
7feb7d06 2753 lp->status = status;
d6b0e80f
AC
2754 return 0;
2755 }
2756 else
2757 {
2758 /* The thread was stopped with a signal other than
1777feb0 2759 SIGSTOP, and didn't accidentally trip a breakpoint. */
d6b0e80f
AC
2760
2761 if (debug_linux_nat)
2762 {
2763 fprintf_unfiltered (gdb_stdlog,
2764 "SWC: Pending event %s in %s\n",
2765 status_to_str ((int) status),
2766 target_pid_to_str (lp->ptid));
2767 }
1777feb0 2768 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2769 errno = 0;
2770 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2771 if (debug_linux_nat)
2772 fprintf_unfiltered (gdb_stdlog,
2773 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2774 target_pid_to_str (lp->ptid),
2775 errno ? safe_strerror (errno) : "OK");
2776
2777 /* Hold this event/waitstatus while we check to see if
1777feb0 2778 there are any more (we still want to get that SIGSTOP). */
57380f4e 2779 stop_wait_callback (lp, NULL);
710151dd
PA
2780
2781 /* If the lp->status field is still empty, use it to
2782 hold this event. If not, then this event must be
2783 returned to the event queue of the LWP. */
7feb7d06 2784 if (lp->status)
d6b0e80f
AC
2785 {
2786 if (debug_linux_nat)
2787 {
2788 fprintf_unfiltered (gdb_stdlog,
2789 "SWC: kill %s, %s\n",
2790 target_pid_to_str (lp->ptid),
2791 status_to_str ((int) status));
2792 }
2793 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2794 }
710151dd
PA
2795 else
2796 lp->status = status;
d6b0e80f
AC
2797 return 0;
2798 }
2799 }
2800 else
2801 {
2802 /* We caught the SIGSTOP that we intended to catch, so
2803 there's no SIGSTOP pending. */
2804 lp->stopped = 1;
2805 lp->signalled = 0;
2806 }
2807 }
2808
2809 return 0;
2810}
2811
d6b0e80f
AC
2812/* Return non-zero if LP has a wait status pending. */
2813
2814static int
2815status_callback (struct lwp_info *lp, void *data)
2816{
2817 /* Only report a pending wait status if we pretend that this has
2818 indeed been resumed. */
ca2163eb
PA
2819 if (!lp->resumed)
2820 return 0;
2821
2822 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2823 {
2824 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
766062f6 2825 or a pending process exit. Note that `W_EXITCODE(0,0) ==
ca2163eb
PA
2826 0', so a clean process exit can not be stored pending in
2827 lp->status, it is indistinguishable from
2828 no-pending-status. */
2829 return 1;
2830 }
2831
2832 if (lp->status != 0)
2833 return 1;
2834
2835 return 0;
d6b0e80f
AC
2836}
2837
2838/* Return non-zero if LP isn't stopped. */
2839
2840static int
2841running_callback (struct lwp_info *lp, void *data)
2842{
2843 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2844}
2845
2846/* Count the LWP's that have had events. */
2847
2848static int
2849count_events_callback (struct lwp_info *lp, void *data)
2850{
2851 int *count = data;
2852
2853 gdb_assert (count != NULL);
2854
e09490f1 2855 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2856 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2857 (*count)++;
2858
2859 return 0;
2860}
2861
2862/* Select the LWP (if any) that is currently being single-stepped. */
2863
2864static int
2865select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2866{
2867 if (lp->step && lp->status != 0)
2868 return 1;
2869 else
2870 return 0;
2871}
2872
2873/* Select the Nth LWP that has had a SIGTRAP event. */
2874
2875static int
2876select_event_lwp_callback (struct lwp_info *lp, void *data)
2877{
2878 int *selector = data;
2879
2880 gdb_assert (selector != NULL);
2881
1777feb0 2882 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2883 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2884 if ((*selector)-- == 0)
2885 return 1;
2886
2887 return 0;
2888}
2889
710151dd
PA
2890static int
2891cancel_breakpoint (struct lwp_info *lp)
2892{
2893 /* Arrange for a breakpoint to be hit again later. We don't keep
2894 the SIGTRAP status and don't forward the SIGTRAP signal to the
2895 LWP. We will handle the current event, eventually we will resume
2896 this LWP, and this breakpoint will trap again.
2897
2898 If we do not do this, then we run the risk that the user will
2899 delete or disable the breakpoint, but the LWP will have already
2900 tripped on it. */
2901
515630c5
UW
2902 struct regcache *regcache = get_thread_regcache (lp->ptid);
2903 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2904 CORE_ADDR pc;
2905
2906 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 2907 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
2908 {
2909 if (debug_linux_nat)
2910 fprintf_unfiltered (gdb_stdlog,
2911 "CB: Push back breakpoint for %s\n",
2912 target_pid_to_str (lp->ptid));
2913
2914 /* Back up the PC if necessary. */
515630c5
UW
2915 if (gdbarch_decr_pc_after_break (gdbarch))
2916 regcache_write_pc (regcache, pc);
2917
710151dd
PA
2918 return 1;
2919 }
2920 return 0;
2921}
2922
d6b0e80f
AC
2923static int
2924cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2925{
2926 struct lwp_info *event_lp = data;
2927
2928 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2929 if (lp == event_lp)
2930 return 0;
2931
2932 /* If a LWP other than the LWP that we're reporting an event for has
2933 hit a GDB breakpoint (as opposed to some random trap signal),
2934 then just arrange for it to hit it again later. We don't keep
2935 the SIGTRAP status and don't forward the SIGTRAP signal to the
2936 LWP. We will handle the current event, eventually we will resume
2937 all LWPs, and this one will get its breakpoint trap again.
2938
2939 If we do not do this, then we run the risk that the user will
2940 delete or disable the breakpoint, but the LWP will have already
2941 tripped on it. */
2942
00390b84 2943 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
2944 && cancel_breakpoint (lp))
2945 /* Throw away the SIGTRAP. */
2946 lp->status = 0;
d6b0e80f
AC
2947
2948 return 0;
2949}
2950
2951/* Select one LWP out of those that have events pending. */
2952
2953static void
d90e17a7 2954select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2955{
2956 int num_events = 0;
2957 int random_selector;
2958 struct lwp_info *event_lp;
2959
ac264b3b 2960 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2961 (*orig_lp)->status = *status;
2962
2963 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
2964 event_lp = iterate_over_lwps (filter,
2965 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
2966 if (event_lp != NULL)
2967 {
2968 if (debug_linux_nat)
2969 fprintf_unfiltered (gdb_stdlog,
2970 "SEL: Select single-step %s\n",
2971 target_pid_to_str (event_lp->ptid));
2972 }
2973 else
2974 {
2975 /* No single-stepping LWP. Select one at random, out of those
2976 which have had SIGTRAP events. */
2977
2978 /* First see how many SIGTRAP events we have. */
d90e17a7 2979 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
2980
2981 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2982 random_selector = (int)
2983 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2984
2985 if (debug_linux_nat && num_events > 1)
2986 fprintf_unfiltered (gdb_stdlog,
2987 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2988 num_events, random_selector);
2989
d90e17a7
PA
2990 event_lp = iterate_over_lwps (filter,
2991 select_event_lwp_callback,
d6b0e80f
AC
2992 &random_selector);
2993 }
2994
2995 if (event_lp != NULL)
2996 {
2997 /* Switch the event LWP. */
2998 *orig_lp = event_lp;
2999 *status = event_lp->status;
3000 }
3001
3002 /* Flush the wait status for the event LWP. */
3003 (*orig_lp)->status = 0;
3004}
3005
3006/* Return non-zero if LP has been resumed. */
3007
3008static int
3009resumed_callback (struct lwp_info *lp, void *data)
3010{
3011 return lp->resumed;
3012}
3013
d6b0e80f
AC
3014/* Stop an active thread, verify it still exists, then resume it. */
3015
3016static int
3017stop_and_resume_callback (struct lwp_info *lp, void *data)
3018{
3019 struct lwp_info *ptr;
3020
3021 if (!lp->stopped && !lp->signalled)
3022 {
3023 stop_callback (lp, NULL);
3024 stop_wait_callback (lp, NULL);
3025 /* Resume if the lwp still exists. */
3026 for (ptr = lwp_list; ptr; ptr = ptr->next)
3027 if (lp == ptr)
3028 {
3029 resume_callback (lp, NULL);
3030 resume_set_callback (lp, NULL);
3031 }
3032 }
3033 return 0;
3034}
3035
02f3fc28 3036/* Check if we should go on and pass this event to common code.
fa2c6a57 3037 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
3038static struct lwp_info *
3039linux_nat_filter_event (int lwpid, int status, int options)
3040{
3041 struct lwp_info *lp;
3042
3043 lp = find_lwp_pid (pid_to_ptid (lwpid));
3044
3045 /* Check for stop events reported by a process we didn't already
3046 know about - anything not already in our LWP list.
3047
3048 If we're expecting to receive stopped processes after
3049 fork, vfork, and clone events, then we'll just add the
3050 new one to our list and go back to waiting for the event
3051 to be reported - the stopped process might be returned
3052 from waitpid before or after the event is. */
3053 if (WIFSTOPPED (status) && !lp)
3054 {
3055 linux_record_stopped_pid (lwpid, status);
3056 return NULL;
3057 }
3058
3059 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3060 our list, i.e. not part of the current process. This can happen
fd62cb89 3061 if we detach from a program we originally forked and then it
02f3fc28
PA
3062 exits. */
3063 if (!WIFSTOPPED (status) && !lp)
3064 return NULL;
3065
3066 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
3067 CLONE_PTRACE processes which do not use the thread library -
3068 otherwise we wouldn't find the new LWP this way. That doesn't
3069 currently work, and the following code is currently unreachable
3070 due to the two blocks above. If it's fixed some day, this code
3071 should be broken out into a function so that we can also pick up
3072 LWPs from the new interface. */
3073 if (!lp)
3074 {
3075 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
3076 if (options & __WCLONE)
3077 lp->cloned = 1;
3078
3079 gdb_assert (WIFSTOPPED (status)
3080 && WSTOPSIG (status) == SIGSTOP);
3081 lp->signalled = 1;
3082
3083 if (!in_thread_list (inferior_ptid))
3084 {
3085 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
3086 GET_PID (inferior_ptid));
3087 add_thread (inferior_ptid);
3088 }
3089
3090 add_thread (lp->ptid);
3091 }
3092
ca2163eb
PA
3093 /* Handle GNU/Linux's syscall SIGTRAPs. */
3094 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3095 {
3096 /* No longer need the sysgood bit. The ptrace event ends up
3097 recorded in lp->waitstatus if we care for it. We can carry
3098 on handling the event like a regular SIGTRAP from here
3099 on. */
3100 status = W_STOPCODE (SIGTRAP);
3101 if (linux_handle_syscall_trap (lp, 0))
3102 return NULL;
3103 }
02f3fc28 3104
ca2163eb
PA
3105 /* Handle GNU/Linux's extended waitstatus for trace events. */
3106 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3107 {
3108 if (debug_linux_nat)
3109 fprintf_unfiltered (gdb_stdlog,
3110 "LLW: Handling extended status 0x%06x\n",
3111 status);
3112 if (linux_handle_extended_wait (lp, status, 0))
3113 return NULL;
3114 }
3115
26ab7092 3116 if (linux_nat_status_is_event (status))
ebec9a0f
PA
3117 {
3118 /* Save the trap's siginfo in case we need it later. */
3119 save_siginfo (lp);
3120
3121 save_sigtrap (lp);
3122 }
ca2163eb 3123
02f3fc28 3124 /* Check if the thread has exited. */
d90e17a7
PA
3125 if ((WIFEXITED (status) || WIFSIGNALED (status))
3126 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3127 {
9db03742
JB
3128 /* If this is the main thread, we must stop all threads and verify
3129 if they are still alive. This is because in the nptl thread model
3130 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3131 other than the main thread. We only get the main thread exit
3132 signal once all child threads have already exited. If we
3133 stop all the threads and use the stop_wait_callback to check
3134 if they have exited we can determine whether this signal
3135 should be ignored or whether it means the end of the debugged
3136 application, regardless of which threading model is being
5d3b6af6 3137 used. */
02f3fc28
PA
3138 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3139 {
3140 lp->stopped = 1;
d90e17a7
PA
3141 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3142 stop_and_resume_callback, NULL);
02f3fc28
PA
3143 }
3144
3145 if (debug_linux_nat)
3146 fprintf_unfiltered (gdb_stdlog,
3147 "LLW: %s exited.\n",
3148 target_pid_to_str (lp->ptid));
3149
d90e17a7 3150 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3151 {
3152 /* If there is at least one more LWP, then the exit signal
3153 was not the end of the debugged application and should be
3154 ignored. */
3155 exit_lwp (lp);
3156 return NULL;
3157 }
02f3fc28
PA
3158 }
3159
3160 /* Check if the current LWP has previously exited. In the nptl
3161 thread model, LWPs other than the main thread do not issue
3162 signals when they exit so we must check whenever the thread has
3163 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3164 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3165 {
d90e17a7
PA
3166 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3167
02f3fc28
PA
3168 if (debug_linux_nat)
3169 fprintf_unfiltered (gdb_stdlog,
3170 "LLW: %s exited.\n",
3171 target_pid_to_str (lp->ptid));
3172
3173 exit_lwp (lp);
3174
3175 /* Make sure there is at least one thread running. */
d90e17a7 3176 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3177
3178 /* Discard the event. */
3179 return NULL;
3180 }
3181
3182 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3183 an attempt to stop an LWP. */
3184 if (lp->signalled
3185 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3186 {
3187 if (debug_linux_nat)
3188 fprintf_unfiltered (gdb_stdlog,
3189 "LLW: Delayed SIGSTOP caught for %s.\n",
3190 target_pid_to_str (lp->ptid));
3191
3192 /* This is a delayed SIGSTOP. */
3193 lp->signalled = 0;
3194
3195 registers_changed ();
3196
28439f5e 3197 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
02f3fc28
PA
3198 lp->step, TARGET_SIGNAL_0);
3199 if (debug_linux_nat)
3200 fprintf_unfiltered (gdb_stdlog,
3201 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3202 lp->step ?
3203 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3204 target_pid_to_str (lp->ptid));
3205
3206 lp->stopped = 0;
3207 gdb_assert (lp->resumed);
3208
3209 /* Discard the event. */
3210 return NULL;
3211 }
3212
57380f4e
DJ
3213 /* Make sure we don't report a SIGINT that we have already displayed
3214 for another thread. */
3215 if (lp->ignore_sigint
3216 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3217 {
3218 if (debug_linux_nat)
3219 fprintf_unfiltered (gdb_stdlog,
3220 "LLW: Delayed SIGINT caught for %s.\n",
3221 target_pid_to_str (lp->ptid));
3222
3223 /* This is a delayed SIGINT. */
3224 lp->ignore_sigint = 0;
3225
3226 registers_changed ();
28439f5e 3227 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
57380f4e
DJ
3228 lp->step, TARGET_SIGNAL_0);
3229 if (debug_linux_nat)
3230 fprintf_unfiltered (gdb_stdlog,
3231 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3232 lp->step ?
3233 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3234 target_pid_to_str (lp->ptid));
3235
3236 lp->stopped = 0;
3237 gdb_assert (lp->resumed);
3238
3239 /* Discard the event. */
3240 return NULL;
3241 }
3242
02f3fc28
PA
3243 /* An interesting event. */
3244 gdb_assert (lp);
ca2163eb 3245 lp->status = status;
02f3fc28
PA
3246 return lp;
3247}
3248
d6b0e80f 3249static ptid_t
7feb7d06 3250linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3251 ptid_t ptid, struct target_waitstatus *ourstatus,
3252 int target_options)
d6b0e80f 3253{
7feb7d06 3254 static sigset_t prev_mask;
d6b0e80f
AC
3255 struct lwp_info *lp = NULL;
3256 int options = 0;
3257 int status = 0;
d90e17a7 3258 pid_t pid;
d6b0e80f 3259
b84876c2
PA
3260 if (debug_linux_nat_async)
3261 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3262
f973ed9c
DJ
3263 /* The first time we get here after starting a new inferior, we may
3264 not have added it to the LWP list yet - this is the earliest
3265 moment at which we know its PID. */
d90e17a7 3266 if (ptid_is_pid (inferior_ptid))
f973ed9c 3267 {
27c9d204
PA
3268 /* Upgrade the main thread's ptid. */
3269 thread_change_ptid (inferior_ptid,
3270 BUILD_LWP (GET_PID (inferior_ptid),
3271 GET_PID (inferior_ptid)));
3272
f973ed9c
DJ
3273 lp = add_lwp (inferior_ptid);
3274 lp->resumed = 1;
3275 }
3276
7feb7d06
PA
3277 /* Make sure SIGCHLD is blocked. */
3278 block_child_signals (&prev_mask);
d6b0e80f 3279
d90e17a7
PA
3280 if (ptid_equal (ptid, minus_one_ptid))
3281 pid = -1;
3282 else if (ptid_is_pid (ptid))
3283 /* A request to wait for a specific tgid. This is not possible
3284 with waitpid, so instead, we wait for any child, and leave
3285 children we're not interested in right now with a pending
3286 status to report later. */
3287 pid = -1;
3288 else
3289 pid = GET_LWP (ptid);
3290
d6b0e80f 3291retry:
d90e17a7
PA
3292 lp = NULL;
3293 status = 0;
d6b0e80f 3294
e3e9f5a2
PA
3295 /* Make sure that of those LWPs we want to get an event from, there
3296 is at least one LWP that has been resumed. If there's none, just
3297 bail out. The core may just be flushing asynchronously all
3298 events. */
3299 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3300 {
3301 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3302
3303 if (debug_linux_nat_async)
3304 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3305
3306 restore_child_signals_mask (&prev_mask);
3307 return minus_one_ptid;
3308 }
d6b0e80f
AC
3309
3310 /* First check if there is a LWP with a wait status pending. */
3311 if (pid == -1)
3312 {
3313 /* Any LWP that's been resumed will do. */
d90e17a7 3314 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3315 if (lp)
3316 {
ca2163eb 3317 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3318 fprintf_unfiltered (gdb_stdlog,
3319 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3320 status_to_str (lp->status),
d6b0e80f
AC
3321 target_pid_to_str (lp->ptid));
3322 }
3323
b84876c2 3324 /* But if we don't find one, we'll have to wait, and check both
7feb7d06
PA
3325 cloned and uncloned processes. We start with the cloned
3326 processes. */
d6b0e80f
AC
3327 options = __WCLONE | WNOHANG;
3328 }
3329 else if (is_lwp (ptid))
3330 {
3331 if (debug_linux_nat)
3332 fprintf_unfiltered (gdb_stdlog,
3333 "LLW: Waiting for specific LWP %s.\n",
3334 target_pid_to_str (ptid));
3335
3336 /* We have a specific LWP to check. */
3337 lp = find_lwp_pid (ptid);
3338 gdb_assert (lp);
d6b0e80f 3339
ca2163eb 3340 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3341 fprintf_unfiltered (gdb_stdlog,
3342 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3343 status_to_str (lp->status),
d6b0e80f
AC
3344 target_pid_to_str (lp->ptid));
3345
3346 /* If we have to wait, take into account whether PID is a cloned
3347 process or not. And we have to convert it to something that
3348 the layer beneath us can understand. */
3349 options = lp->cloned ? __WCLONE : 0;
3350 pid = GET_LWP (ptid);
d90e17a7
PA
3351
3352 /* We check for lp->waitstatus in addition to lp->status,
3353 because we can have pending process exits recorded in
3354 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3355 an additional lp->status_p flag. */
ca2163eb 3356 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3357 lp = NULL;
d6b0e80f
AC
3358 }
3359
d90e17a7 3360 if (lp && lp->signalled)
d6b0e80f
AC
3361 {
3362 /* A pending SIGSTOP may interfere with the normal stream of
3363 events. In a typical case where interference is a problem,
3364 we have a SIGSTOP signal pending for LWP A while
3365 single-stepping it, encounter an event in LWP B, and take the
3366 pending SIGSTOP while trying to stop LWP A. After processing
3367 the event in LWP B, LWP A is continued, and we'll never see
3368 the SIGTRAP associated with the last time we were
3369 single-stepping LWP A. */
3370
3371 /* Resume the thread. It should halt immediately returning the
3372 pending SIGSTOP. */
3373 registers_changed ();
28439f5e 3374 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3375 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
3376 if (debug_linux_nat)
3377 fprintf_unfiltered (gdb_stdlog,
3378 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3379 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3380 target_pid_to_str (lp->ptid));
3381 lp->stopped = 0;
3382 gdb_assert (lp->resumed);
3383
ca2163eb
PA
3384 /* Catch the pending SIGSTOP. */
3385 status = lp->status;
3386 lp->status = 0;
3387
d6b0e80f 3388 stop_wait_callback (lp, NULL);
ca2163eb
PA
3389
3390 /* If the lp->status field isn't empty, we caught another signal
3391 while flushing the SIGSTOP. Return it back to the event
3392 queue of the LWP, as we already have an event to handle. */
3393 if (lp->status)
3394 {
3395 if (debug_linux_nat)
3396 fprintf_unfiltered (gdb_stdlog,
3397 "LLW: kill %s, %s\n",
3398 target_pid_to_str (lp->ptid),
3399 status_to_str (lp->status));
3400 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3401 }
3402
3403 lp->status = status;
d6b0e80f
AC
3404 }
3405
b84876c2
PA
3406 if (!target_can_async_p ())
3407 {
3408 /* Causes SIGINT to be passed on to the attached process. */
3409 set_sigint_trap ();
b84876c2 3410 }
d6b0e80f 3411
47608cb1
PA
3412 /* Translate generic target_wait options into waitpid options. */
3413 if (target_options & TARGET_WNOHANG)
3414 options |= WNOHANG;
7feb7d06 3415
d90e17a7 3416 while (lp == NULL)
d6b0e80f
AC
3417 {
3418 pid_t lwpid;
3419
7feb7d06 3420 lwpid = my_waitpid (pid, &status, options);
b84876c2 3421
d6b0e80f
AC
3422 if (lwpid > 0)
3423 {
3424 gdb_assert (pid == -1 || lwpid == pid);
3425
3426 if (debug_linux_nat)
3427 {
3428 fprintf_unfiltered (gdb_stdlog,
3429 "LLW: waitpid %ld received %s\n",
3430 (long) lwpid, status_to_str (status));
3431 }
3432
02f3fc28 3433 lp = linux_nat_filter_event (lwpid, status, options);
d90e17a7 3434
33355866
JK
3435 /* STATUS is now no longer valid, use LP->STATUS instead. */
3436 status = 0;
3437
d90e17a7
PA
3438 if (lp
3439 && ptid_is_pid (ptid)
3440 && ptid_get_pid (lp->ptid) != ptid_get_pid (ptid))
d6b0e80f 3441 {
e3e9f5a2
PA
3442 gdb_assert (lp->resumed);
3443
d90e17a7 3444 if (debug_linux_nat)
3e43a32a
MS
3445 fprintf (stderr,
3446 "LWP %ld got an event %06x, leaving pending.\n",
33355866 3447 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3448
ca2163eb 3449 if (WIFSTOPPED (lp->status))
d90e17a7 3450 {
ca2163eb 3451 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3452 {
e3e9f5a2
PA
3453 /* Cancel breakpoint hits. The breakpoint may
3454 be removed before we fetch events from this
3455 process to report to the core. It is best
3456 not to assume the moribund breakpoints
3457 heuristic always handles these cases --- it
3458 could be too many events go through to the
3459 core before this one is handled. All-stop
3460 always cancels breakpoint hits in all
3461 threads. */
3462 if (non_stop
00390b84 3463 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3464 && cancel_breakpoint (lp))
3465 {
3466 /* Throw away the SIGTRAP. */
3467 lp->status = 0;
3468
3469 if (debug_linux_nat)
3470 fprintf (stderr,
3e43a32a
MS
3471 "LLW: LWP %ld hit a breakpoint while"
3472 " waiting for another process;"
3473 " cancelled it\n",
e3e9f5a2
PA
3474 ptid_get_lwp (lp->ptid));
3475 }
3476 lp->stopped = 1;
d90e17a7
PA
3477 }
3478 else
3479 {
3480 lp->stopped = 1;
3481 lp->signalled = 0;
3482 }
3483 }
33355866 3484 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3485 {
3486 if (debug_linux_nat)
3e43a32a
MS
3487 fprintf (stderr,
3488 "Process %ld exited while stopping LWPs\n",
d90e17a7
PA
3489 ptid_get_lwp (lp->ptid));
3490
3491 /* This was the last lwp in the process. Since
3492 events are serialized to GDB core, and we can't
3493 report this one right now, but GDB core and the
3494 other target layers will want to be notified
3495 about the exit code/signal, leave the status
3496 pending for the next time we're able to report
3497 it. */
d90e17a7
PA
3498
3499 /* Prevent trying to stop this thread again. We'll
3500 never try to resume it because it has a pending
3501 status. */
3502 lp->stopped = 1;
3503
3504 /* Dead LWP's aren't expected to reported a pending
3505 sigstop. */
3506 lp->signalled = 0;
3507
3508 /* Store the pending event in the waitstatus as
3509 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3510 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3511 }
3512
3513 /* Keep looking. */
3514 lp = NULL;
d6b0e80f
AC
3515 continue;
3516 }
3517
d90e17a7
PA
3518 if (lp)
3519 break;
3520 else
3521 {
3522 if (pid == -1)
3523 {
3524 /* waitpid did return something. Restart over. */
3525 options |= __WCLONE;
3526 }
3527 continue;
3528 }
d6b0e80f
AC
3529 }
3530
3531 if (pid == -1)
3532 {
3533 /* Alternate between checking cloned and uncloned processes. */
3534 options ^= __WCLONE;
3535
b84876c2
PA
3536 /* And every time we have checked both:
3537 In async mode, return to event loop;
3538 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 3539 if (options & __WCLONE)
b84876c2 3540 {
47608cb1 3541 if (target_options & TARGET_WNOHANG)
b84876c2
PA
3542 {
3543 /* No interesting event. */
3544 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3545
b84876c2
PA
3546 if (debug_linux_nat_async)
3547 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3548
7feb7d06 3549 restore_child_signals_mask (&prev_mask);
b84876c2
PA
3550 return minus_one_ptid;
3551 }
3552
3553 sigsuspend (&suspend_mask);
3554 }
d6b0e80f 3555 }
28736962
PA
3556 else if (target_options & TARGET_WNOHANG)
3557 {
3558 /* No interesting event for PID yet. */
3559 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3560
3561 if (debug_linux_nat_async)
3562 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3563
3564 restore_child_signals_mask (&prev_mask);
3565 return minus_one_ptid;
3566 }
d6b0e80f
AC
3567
3568 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3569 gdb_assert (lp == NULL);
d6b0e80f
AC
3570 }
3571
b84876c2 3572 if (!target_can_async_p ())
d26b5354 3573 clear_sigint_trap ();
d6b0e80f
AC
3574
3575 gdb_assert (lp);
3576
ca2163eb
PA
3577 status = lp->status;
3578 lp->status = 0;
3579
d6b0e80f
AC
3580 /* Don't report signals that GDB isn't interested in, such as
3581 signals that are neither printed nor stopped upon. Stopping all
3582 threads can be a bit time-consuming so if we want decent
3583 performance with heavily multi-threaded programs, especially when
3584 they're using a high frequency timer, we'd better avoid it if we
3585 can. */
3586
3587 if (WIFSTOPPED (status))
3588 {
423ec54c 3589 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
d6b0e80f 3590
2455069d
UW
3591 /* When using hardware single-step, we need to report every signal.
3592 Otherwise, signals in pass_mask may be short-circuited. */
d539ed7e 3593 if (!lp->step
2455069d 3594 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
d6b0e80f
AC
3595 {
3596 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3597 here? It is not clear we should. GDB may not expect
3598 other threads to run. On the other hand, not resuming
3599 newly attached threads may cause an unwanted delay in
3600 getting them running. */
3601 registers_changed ();
28439f5e 3602 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3603 lp->step, signo);
d6b0e80f
AC
3604 if (debug_linux_nat)
3605 fprintf_unfiltered (gdb_stdlog,
3606 "LLW: %s %s, %s (preempt 'handle')\n",
3607 lp->step ?
3608 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3609 target_pid_to_str (lp->ptid),
423ec54c
JK
3610 (signo != TARGET_SIGNAL_0
3611 ? strsignal (target_signal_to_host (signo))
3612 : "0"));
d6b0e80f 3613 lp->stopped = 0;
d6b0e80f
AC
3614 goto retry;
3615 }
3616
1ad15515 3617 if (!non_stop)
d6b0e80f 3618 {
1ad15515
PA
3619 /* Only do the below in all-stop, as we currently use SIGINT
3620 to implement target_stop (see linux_nat_stop) in
3621 non-stop. */
3622 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3623 {
3624 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3625 forwarded to the entire process group, that is, all LWPs
3626 will receive it - unless they're using CLONE_THREAD to
3627 share signals. Since we only want to report it once, we
3628 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3629 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3630 set_ignore_sigint, NULL);
1ad15515
PA
3631 lp->ignore_sigint = 0;
3632 }
3633 else
3634 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3635 }
3636 }
3637
3638 /* This LWP is stopped now. */
3639 lp->stopped = 1;
3640
3641 if (debug_linux_nat)
3642 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3643 status_to_str (status), target_pid_to_str (lp->ptid));
3644
4c28f408
PA
3645 if (!non_stop)
3646 {
3647 /* Now stop all other LWP's ... */
d90e17a7 3648 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3649
3650 /* ... and wait until all of them have reported back that
3651 they're no longer running. */
d90e17a7 3652 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3653
3654 /* If we're not waiting for a specific LWP, choose an event LWP
3655 from among those that have had events. Giving equal priority
3656 to all LWPs that have had events helps prevent
3657 starvation. */
3658 if (pid == -1)
d90e17a7 3659 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3660
e3e9f5a2
PA
3661 /* Now that we've selected our final event LWP, cancel any
3662 breakpoints in other LWPs that have hit a GDB breakpoint.
3663 See the comment in cancel_breakpoints_callback to find out
3664 why. */
3665 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3666
3667 /* In all-stop, from the core's perspective, all LWPs are now
3668 stopped until a new resume action is sent over. */
3669 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3670 }
3671 else
3672 lp->resumed = 0;
d6b0e80f 3673
26ab7092 3674 if (linux_nat_status_is_event (status))
d6b0e80f 3675 {
d6b0e80f
AC
3676 if (debug_linux_nat)
3677 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3678 "LLW: trap ptid is %s.\n",
3679 target_pid_to_str (lp->ptid));
d6b0e80f 3680 }
d6b0e80f
AC
3681
3682 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3683 {
3684 *ourstatus = lp->waitstatus;
3685 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3686 }
3687 else
3688 store_waitstatus (ourstatus, status);
3689
b84876c2
PA
3690 if (debug_linux_nat_async)
3691 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3692
7feb7d06 3693 restore_child_signals_mask (&prev_mask);
1e225492
JK
3694
3695 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3696 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3697 lp->core = -1;
3698 else
3699 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3700
f973ed9c 3701 return lp->ptid;
d6b0e80f
AC
3702}
3703
e3e9f5a2
PA
3704/* Resume LWPs that are currently stopped without any pending status
3705 to report, but are resumed from the core's perspective. */
3706
3707static int
3708resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3709{
3710 ptid_t *wait_ptid_p = data;
3711
3712 if (lp->stopped
3713 && lp->resumed
3714 && lp->status == 0
3715 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3716 {
3717 gdb_assert (is_executing (lp->ptid));
3718
3719 /* Don't bother if there's a breakpoint at PC that we'd hit
3720 immediately, and we're not waiting for this LWP. */
3721 if (!ptid_match (lp->ptid, *wait_ptid_p))
3722 {
3723 struct regcache *regcache = get_thread_regcache (lp->ptid);
3724 CORE_ADDR pc = regcache_read_pc (regcache);
3725
3726 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3727 return 0;
3728 }
3729
3730 if (debug_linux_nat)
3731 fprintf_unfiltered (gdb_stdlog,
3732 "RSRL: resuming stopped-resumed LWP %s\n",
3733 target_pid_to_str (lp->ptid));
3734
3735 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3736 lp->step, TARGET_SIGNAL_0);
3737 lp->stopped = 0;
3738 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3739 lp->stopped_by_watchpoint = 0;
3740 }
3741
3742 return 0;
3743}
3744
7feb7d06
PA
3745static ptid_t
3746linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3747 ptid_t ptid, struct target_waitstatus *ourstatus,
3748 int target_options)
7feb7d06
PA
3749{
3750 ptid_t event_ptid;
3751
3752 if (debug_linux_nat)
3e43a32a
MS
3753 fprintf_unfiltered (gdb_stdlog,
3754 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
7feb7d06
PA
3755
3756 /* Flush the async file first. */
3757 if (target_can_async_p ())
3758 async_file_flush ();
3759
e3e9f5a2
PA
3760 /* Resume LWPs that are currently stopped without any pending status
3761 to report, but are resumed from the core's perspective. LWPs get
3762 in this state if we find them stopping at a time we're not
3763 interested in reporting the event (target_wait on a
3764 specific_process, for example, see linux_nat_wait_1), and
3765 meanwhile the event became uninteresting. Don't bother resuming
3766 LWPs we're not going to wait for if they'd stop immediately. */
3767 if (non_stop)
3768 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3769
47608cb1 3770 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3771
3772 /* If we requested any event, and something came out, assume there
3773 may be more. If we requested a specific lwp or process, also
3774 assume there may be more. */
3775 if (target_can_async_p ()
3776 && (ourstatus->kind != TARGET_WAITKIND_IGNORE
3777 || !ptid_equal (ptid, minus_one_ptid)))
3778 async_file_mark ();
3779
3780 /* Get ready for the next event. */
3781 if (target_can_async_p ())
3782 target_async (inferior_event_handler, 0);
3783
3784 return event_ptid;
3785}
3786
d6b0e80f
AC
3787static int
3788kill_callback (struct lwp_info *lp, void *data)
3789{
3790 errno = 0;
3791 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3792 if (debug_linux_nat)
3793 fprintf_unfiltered (gdb_stdlog,
3794 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3795 target_pid_to_str (lp->ptid),
3796 errno ? safe_strerror (errno) : "OK");
3797
3798 return 0;
3799}
3800
3801static int
3802kill_wait_callback (struct lwp_info *lp, void *data)
3803{
3804 pid_t pid;
3805
3806 /* We must make sure that there are no pending events (delayed
3807 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3808 program doesn't interfere with any following debugging session. */
3809
3810 /* For cloned processes we must check both with __WCLONE and
3811 without, since the exit status of a cloned process isn't reported
3812 with __WCLONE. */
3813 if (lp->cloned)
3814 {
3815 do
3816 {
58aecb61 3817 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 3818 if (pid != (pid_t) -1)
d6b0e80f 3819 {
e85a822c
DJ
3820 if (debug_linux_nat)
3821 fprintf_unfiltered (gdb_stdlog,
3822 "KWC: wait %s received unknown.\n",
3823 target_pid_to_str (lp->ptid));
3824 /* The Linux kernel sometimes fails to kill a thread
3825 completely after PTRACE_KILL; that goes from the stop
3826 point in do_fork out to the one in
3827 get_signal_to_deliever and waits again. So kill it
3828 again. */
3829 kill_callback (lp, NULL);
d6b0e80f
AC
3830 }
3831 }
3832 while (pid == GET_LWP (lp->ptid));
3833
3834 gdb_assert (pid == -1 && errno == ECHILD);
3835 }
3836
3837 do
3838 {
58aecb61 3839 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 3840 if (pid != (pid_t) -1)
d6b0e80f 3841 {
e85a822c
DJ
3842 if (debug_linux_nat)
3843 fprintf_unfiltered (gdb_stdlog,
3844 "KWC: wait %s received unk.\n",
3845 target_pid_to_str (lp->ptid));
3846 /* See the call to kill_callback above. */
3847 kill_callback (lp, NULL);
d6b0e80f
AC
3848 }
3849 }
3850 while (pid == GET_LWP (lp->ptid));
3851
3852 gdb_assert (pid == -1 && errno == ECHILD);
3853 return 0;
3854}
3855
3856static void
7d85a9c0 3857linux_nat_kill (struct target_ops *ops)
d6b0e80f 3858{
f973ed9c
DJ
3859 struct target_waitstatus last;
3860 ptid_t last_ptid;
3861 int status;
d6b0e80f 3862
f973ed9c
DJ
3863 /* If we're stopped while forking and we haven't followed yet,
3864 kill the other task. We need to do this first because the
3865 parent will be sleeping if this is a vfork. */
d6b0e80f 3866
f973ed9c 3867 get_last_target_status (&last_ptid, &last);
d6b0e80f 3868
f973ed9c
DJ
3869 if (last.kind == TARGET_WAITKIND_FORKED
3870 || last.kind == TARGET_WAITKIND_VFORKED)
3871 {
3a3e9ee3 3872 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
3873 wait (&status);
3874 }
3875
3876 if (forks_exist_p ())
7feb7d06 3877 linux_fork_killall ();
f973ed9c
DJ
3878 else
3879 {
d90e17a7 3880 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3881
4c28f408
PA
3882 /* Stop all threads before killing them, since ptrace requires
3883 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3884 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3885 /* ... and wait until all of them have reported back that
3886 they're no longer running. */
d90e17a7 3887 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3888
f973ed9c 3889 /* Kill all LWP's ... */
d90e17a7 3890 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3891
3892 /* ... and wait until we've flushed all events. */
d90e17a7 3893 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3894 }
3895
3896 target_mourn_inferior ();
d6b0e80f
AC
3897}
3898
3899static void
136d6dae 3900linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3901{
d90e17a7 3902 purge_lwp_list (ptid_get_pid (inferior_ptid));
d6b0e80f 3903
f973ed9c 3904 if (! forks_exist_p ())
d90e17a7
PA
3905 /* Normal case, no other forks available. */
3906 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3907 else
3908 /* Multi-fork case. The current inferior_ptid has exited, but
3909 there are other viable forks to debug. Delete the exiting
3910 one and context-switch to the first available. */
3911 linux_fork_mourn_inferior ();
d6b0e80f
AC
3912}
3913
5b009018
PA
3914/* Convert a native/host siginfo object, into/from the siginfo in the
3915 layout of the inferiors' architecture. */
3916
3917static void
3918siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
3919{
3920 int done = 0;
3921
3922 if (linux_nat_siginfo_fixup != NULL)
3923 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3924
3925 /* If there was no callback, or the callback didn't do anything,
3926 then just do a straight memcpy. */
3927 if (!done)
3928 {
3929 if (direction == 1)
3930 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3931 else
3932 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3933 }
3934}
3935
4aa995e1
PA
3936static LONGEST
3937linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3938 const char *annex, gdb_byte *readbuf,
3939 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3940{
4aa995e1
PA
3941 int pid;
3942 struct siginfo siginfo;
5b009018 3943 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
3944
3945 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3946 gdb_assert (readbuf || writebuf);
3947
3948 pid = GET_LWP (inferior_ptid);
3949 if (pid == 0)
3950 pid = GET_PID (inferior_ptid);
3951
3952 if (offset > sizeof (siginfo))
3953 return -1;
3954
3955 errno = 0;
3956 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3957 if (errno != 0)
3958 return -1;
3959
5b009018
PA
3960 /* When GDB is built as a 64-bit application, ptrace writes into
3961 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3962 inferior with a 64-bit GDB should look the same as debugging it
3963 with a 32-bit GDB, we need to convert it. GDB core always sees
3964 the converted layout, so any read/write will have to be done
3965 post-conversion. */
3966 siginfo_fixup (&siginfo, inf_siginfo, 0);
3967
4aa995e1
PA
3968 if (offset + len > sizeof (siginfo))
3969 len = sizeof (siginfo) - offset;
3970
3971 if (readbuf != NULL)
5b009018 3972 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3973 else
3974 {
5b009018
PA
3975 memcpy (inf_siginfo + offset, writebuf, len);
3976
3977 /* Convert back to ptrace layout before flushing it out. */
3978 siginfo_fixup (&siginfo, inf_siginfo, 1);
3979
4aa995e1
PA
3980 errno = 0;
3981 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3982 if (errno != 0)
3983 return -1;
3984 }
3985
3986 return len;
3987}
3988
10d6c8cd
DJ
3989static LONGEST
3990linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3991 const char *annex, gdb_byte *readbuf,
3992 const gdb_byte *writebuf,
3993 ULONGEST offset, LONGEST len)
d6b0e80f 3994{
4aa995e1 3995 struct cleanup *old_chain;
10d6c8cd 3996 LONGEST xfer;
d6b0e80f 3997
4aa995e1
PA
3998 if (object == TARGET_OBJECT_SIGNAL_INFO)
3999 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4000 offset, len);
4001
c35b1492
PA
4002 /* The target is connected but no live inferior is selected. Pass
4003 this request down to a lower stratum (e.g., the executable
4004 file). */
4005 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4006 return 0;
4007
4aa995e1
PA
4008 old_chain = save_inferior_ptid ();
4009
d6b0e80f
AC
4010 if (is_lwp (inferior_ptid))
4011 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4012
10d6c8cd
DJ
4013 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4014 offset, len);
d6b0e80f
AC
4015
4016 do_cleanups (old_chain);
4017 return xfer;
4018}
4019
4020static int
28439f5e 4021linux_thread_alive (ptid_t ptid)
d6b0e80f 4022{
8c6a60d1 4023 int err, tmp_errno;
4c28f408 4024
d6b0e80f
AC
4025 gdb_assert (is_lwp (ptid));
4026
4c28f408
PA
4027 /* Send signal 0 instead of anything ptrace, because ptracing a
4028 running thread errors out claiming that the thread doesn't
4029 exist. */
4030 err = kill_lwp (GET_LWP (ptid), 0);
8c6a60d1 4031 tmp_errno = errno;
d6b0e80f
AC
4032 if (debug_linux_nat)
4033 fprintf_unfiltered (gdb_stdlog,
4c28f408 4034 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4035 target_pid_to_str (ptid),
8c6a60d1 4036 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 4037
4c28f408 4038 if (err != 0)
d6b0e80f
AC
4039 return 0;
4040
4041 return 1;
4042}
4043
28439f5e
PA
4044static int
4045linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4046{
4047 return linux_thread_alive (ptid);
4048}
4049
d6b0e80f 4050static char *
117de6a9 4051linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4052{
4053 static char buf[64];
4054
a0ef4274 4055 if (is_lwp (ptid)
d90e17a7
PA
4056 && (GET_PID (ptid) != GET_LWP (ptid)
4057 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
4058 {
4059 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4060 return buf;
4061 }
4062
4063 return normal_pid_to_str (ptid);
4064}
4065
4694da01
TT
4066static char *
4067linux_nat_thread_name (struct thread_info *thr)
4068{
4069 int pid = ptid_get_pid (thr->ptid);
4070 long lwp = ptid_get_lwp (thr->ptid);
4071#define FORMAT "/proc/%d/task/%ld/comm"
4072 char buf[sizeof (FORMAT) + 30];
4073 FILE *comm_file;
4074 char *result = NULL;
4075
4076 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4077 comm_file = fopen (buf, "r");
4078 if (comm_file)
4079 {
4080 /* Not exported by the kernel, so we define it here. */
4081#define COMM_LEN 16
4082 static char line[COMM_LEN + 1];
4083
4084 if (fgets (line, sizeof (line), comm_file))
4085 {
4086 char *nl = strchr (line, '\n');
4087
4088 if (nl)
4089 *nl = '\0';
4090 if (*line != '\0')
4091 result = line;
4092 }
4093
4094 fclose (comm_file);
4095 }
4096
4097#undef COMM_LEN
4098#undef FORMAT
4099
4100 return result;
4101}
4102
dba24537
AC
4103/* Accepts an integer PID; Returns a string representing a file that
4104 can be opened to get the symbols for the child process. */
4105
6d8fd2b7
UW
4106static char *
4107linux_child_pid_to_exec_file (int pid)
dba24537
AC
4108{
4109 char *name1, *name2;
4110
4111 name1 = xmalloc (MAXPATHLEN);
4112 name2 = xmalloc (MAXPATHLEN);
4113 make_cleanup (xfree, name1);
4114 make_cleanup (xfree, name2);
4115 memset (name2, 0, MAXPATHLEN);
4116
4117 sprintf (name1, "/proc/%d/exe", pid);
4118 if (readlink (name1, name2, MAXPATHLEN) > 0)
4119 return name2;
4120 else
4121 return name1;
4122}
4123
4124/* Service function for corefiles and info proc. */
4125
4126static int
4127read_mapping (FILE *mapfile,
4128 long long *addr,
4129 long long *endaddr,
4130 char *permissions,
4131 long long *offset,
4132 char *device, long long *inode, char *filename)
4133{
4134 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4135 addr, endaddr, permissions, offset, device, inode);
4136
2e14c2ea
MS
4137 filename[0] = '\0';
4138 if (ret > 0 && ret != EOF)
dba24537
AC
4139 {
4140 /* Eat everything up to EOL for the filename. This will prevent
4141 weird filenames (such as one with embedded whitespace) from
4142 confusing this code. It also makes this code more robust in
4143 respect to annotations the kernel may add after the filename.
4144
4145 Note the filename is used for informational purposes
4146 only. */
4147 ret += fscanf (mapfile, "%[^\n]\n", filename);
4148 }
2e14c2ea 4149
dba24537
AC
4150 return (ret != 0 && ret != EOF);
4151}
4152
4153/* Fills the "to_find_memory_regions" target vector. Lists the memory
4154 regions in the inferior for a corefile. */
4155
4156static int
b8edc417 4157linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
dba24537 4158{
89ecc4f5 4159 int pid = PIDGET (inferior_ptid);
dba24537
AC
4160 char mapsfilename[MAXPATHLEN];
4161 FILE *mapsfile;
4162 long long addr, endaddr, size, offset, inode;
4163 char permissions[8], device[8], filename[MAXPATHLEN];
4164 int read, write, exec;
7c8a8b04 4165 struct cleanup *cleanup;
dba24537
AC
4166
4167 /* Compose the filename for the /proc memory map, and open it. */
89ecc4f5 4168 sprintf (mapsfilename, "/proc/%d/maps", pid);
dba24537 4169 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 4170 error (_("Could not open %s."), mapsfilename);
7c8a8b04 4171 cleanup = make_cleanup_fclose (mapsfile);
dba24537
AC
4172
4173 if (info_verbose)
4174 fprintf_filtered (gdb_stdout,
4175 "Reading memory regions from %s\n", mapsfilename);
4176
4177 /* Now iterate until end-of-file. */
4178 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4179 &offset, &device[0], &inode, &filename[0]))
4180 {
4181 size = endaddr - addr;
4182
4183 /* Get the segment's permissions. */
4184 read = (strchr (permissions, 'r') != 0);
4185 write = (strchr (permissions, 'w') != 0);
4186 exec = (strchr (permissions, 'x') != 0);
4187
4188 if (info_verbose)
4189 {
4190 fprintf_filtered (gdb_stdout,
2244ba2e
PM
4191 "Save segment, %s bytes at %s (%c%c%c)",
4192 plongest (size), paddress (target_gdbarch, addr),
dba24537
AC
4193 read ? 'r' : ' ',
4194 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 4195 if (filename[0])
dba24537
AC
4196 fprintf_filtered (gdb_stdout, " for %s", filename);
4197 fprintf_filtered (gdb_stdout, "\n");
4198 }
4199
4200 /* Invoke the callback function to create the corefile
4201 segment. */
4202 func (addr, size, read, write, exec, obfd);
4203 }
7c8a8b04 4204 do_cleanups (cleanup);
dba24537
AC
4205 return 0;
4206}
4207
2020b7ab
PA
4208static int
4209find_signalled_thread (struct thread_info *info, void *data)
4210{
16c381f0 4211 if (info->suspend.stop_signal != TARGET_SIGNAL_0
2020b7ab
PA
4212 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4213 return 1;
4214
4215 return 0;
4216}
4217
4218static enum target_signal
4219find_stop_signal (void)
4220{
4221 struct thread_info *info =
4222 iterate_over_threads (find_signalled_thread, NULL);
4223
4224 if (info)
16c381f0 4225 return info->suspend.stop_signal;
2020b7ab
PA
4226 else
4227 return TARGET_SIGNAL_0;
4228}
4229
dba24537
AC
4230/* Records the thread's register state for the corefile note
4231 section. */
4232
4233static char *
4234linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2020b7ab
PA
4235 char *note_data, int *note_size,
4236 enum target_signal stop_signal)
dba24537 4237{
dba24537 4238 unsigned long lwp = ptid_get_lwp (ptid);
c2250ad1
UW
4239 struct gdbarch *gdbarch = target_gdbarch;
4240 struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4f844a66 4241 const struct regset *regset;
55e969c1 4242 int core_regset_p;
594f7785 4243 struct cleanup *old_chain;
17ea7499
CES
4244 struct core_regset_section *sect_list;
4245 char *gdb_regset;
594f7785
UW
4246
4247 old_chain = save_inferior_ptid ();
4248 inferior_ptid = ptid;
4249 target_fetch_registers (regcache, -1);
4250 do_cleanups (old_chain);
4f844a66
DM
4251
4252 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
4253 sect_list = gdbarch_core_regset_sections (gdbarch);
4254
17ea7499
CES
4255 /* The loop below uses the new struct core_regset_section, which stores
4256 the supported section names and sizes for the core file. Note that
4257 note PRSTATUS needs to be treated specially. But the other notes are
4258 structurally the same, so they can benefit from the new struct. */
4259 if (core_regset_p && sect_list != NULL)
4260 while (sect_list->sect_name != NULL)
4261 {
17ea7499
CES
4262 regset = gdbarch_regset_from_core_section (gdbarch,
4263 sect_list->sect_name,
4264 sect_list->size);
4265 gdb_assert (regset && regset->collect_regset);
4266 gdb_regset = xmalloc (sect_list->size);
4267 regset->collect_regset (regset, regcache, -1,
4268 gdb_regset, sect_list->size);
2f2241f1
UW
4269
4270 if (strcmp (sect_list->sect_name, ".reg") == 0)
4271 note_data = (char *) elfcore_write_prstatus
4272 (obfd, note_data, note_size,
857d11d0
JK
4273 lwp, target_signal_to_host (stop_signal),
4274 gdb_regset);
2f2241f1
UW
4275 else
4276 note_data = (char *) elfcore_write_register_note
4277 (obfd, note_data, note_size,
4278 sect_list->sect_name, gdb_regset,
4279 sect_list->size);
17ea7499
CES
4280 xfree (gdb_regset);
4281 sect_list++;
4282 }
dba24537 4283
17ea7499
CES
4284 /* For architectures that does not have the struct core_regset_section
4285 implemented, we use the old method. When all the architectures have
4286 the new support, the code below should be deleted. */
4f844a66 4287 else
17ea7499 4288 {
2f2241f1
UW
4289 gdb_gregset_t gregs;
4290 gdb_fpregset_t fpregs;
4291
4292 if (core_regset_p
4293 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3e43a32a
MS
4294 sizeof (gregs)))
4295 != NULL && regset->collect_regset != NULL)
2f2241f1
UW
4296 regset->collect_regset (regset, regcache, -1,
4297 &gregs, sizeof (gregs));
4298 else
4299 fill_gregset (regcache, &gregs, -1);
4300
857d11d0
JK
4301 note_data = (char *) elfcore_write_prstatus
4302 (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
4303 &gregs);
2f2241f1 4304
17ea7499
CES
4305 if (core_regset_p
4306 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3e43a32a
MS
4307 sizeof (fpregs)))
4308 != NULL && regset->collect_regset != NULL)
17ea7499
CES
4309 regset->collect_regset (regset, regcache, -1,
4310 &fpregs, sizeof (fpregs));
4311 else
4312 fill_fpregset (regcache, &fpregs, -1);
4313
4314 note_data = (char *) elfcore_write_prfpreg (obfd,
4315 note_data,
4316 note_size,
4317 &fpregs, sizeof (fpregs));
4318 }
4f844a66 4319
dba24537
AC
4320 return note_data;
4321}
4322
4323struct linux_nat_corefile_thread_data
4324{
4325 bfd *obfd;
4326 char *note_data;
4327 int *note_size;
4328 int num_notes;
2020b7ab 4329 enum target_signal stop_signal;
dba24537
AC
4330};
4331
4332/* Called by gdbthread.c once per thread. Records the thread's
4333 register state for the corefile note section. */
4334
4335static int
4336linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4337{
4338 struct linux_nat_corefile_thread_data *args = data;
dba24537 4339
dba24537
AC
4340 args->note_data = linux_nat_do_thread_registers (args->obfd,
4341 ti->ptid,
4342 args->note_data,
2020b7ab
PA
4343 args->note_size,
4344 args->stop_signal);
dba24537 4345 args->num_notes++;
56be3814 4346
dba24537
AC
4347 return 0;
4348}
4349
efcbbd14
UW
4350/* Enumerate spufs IDs for process PID. */
4351
4352static void
4353iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4354{
4355 char path[128];
4356 DIR *dir;
4357 struct dirent *entry;
4358
4359 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4360 dir = opendir (path);
4361 if (!dir)
4362 return;
4363
4364 rewinddir (dir);
4365 while ((entry = readdir (dir)) != NULL)
4366 {
4367 struct stat st;
4368 struct statfs stfs;
4369 int fd;
4370
4371 fd = atoi (entry->d_name);
4372 if (!fd)
4373 continue;
4374
4375 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4376 if (stat (path, &st) != 0)
4377 continue;
4378 if (!S_ISDIR (st.st_mode))
4379 continue;
4380
4381 if (statfs (path, &stfs) != 0)
4382 continue;
4383 if (stfs.f_type != SPUFS_MAGIC)
4384 continue;
4385
4386 callback (data, fd);
4387 }
4388
4389 closedir (dir);
4390}
4391
4392/* Generate corefile notes for SPU contexts. */
4393
4394struct linux_spu_corefile_data
4395{
4396 bfd *obfd;
4397 char *note_data;
4398 int *note_size;
4399};
4400
4401static void
4402linux_spu_corefile_callback (void *data, int fd)
4403{
4404 struct linux_spu_corefile_data *args = data;
4405 int i;
4406
4407 static const char *spu_files[] =
4408 {
4409 "object-id",
4410 "mem",
4411 "regs",
4412 "fpcr",
4413 "lslr",
4414 "decr",
4415 "decr_status",
4416 "signal1",
4417 "signal1_type",
4418 "signal2",
4419 "signal2_type",
4420 "event_mask",
4421 "event_status",
4422 "mbox_info",
4423 "ibox_info",
4424 "wbox_info",
4425 "dma_info",
4426 "proxydma_info",
4427 };
4428
4429 for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4430 {
4431 char annex[32], note_name[32];
4432 gdb_byte *spu_data;
4433 LONGEST spu_len;
4434
4435 xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4436 spu_len = target_read_alloc (&current_target, TARGET_OBJECT_SPU,
4437 annex, &spu_data);
4438 if (spu_len > 0)
4439 {
4440 xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4441 args->note_data = elfcore_write_note (args->obfd, args->note_data,
4442 args->note_size, note_name,
4443 NT_SPU, spu_data, spu_len);
4444 xfree (spu_data);
4445 }
4446 }
4447}
4448
4449static char *
4450linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4451{
4452 struct linux_spu_corefile_data args;
e0881a8e 4453
efcbbd14
UW
4454 args.obfd = obfd;
4455 args.note_data = note_data;
4456 args.note_size = note_size;
4457
4458 iterate_over_spus (PIDGET (inferior_ptid),
4459 linux_spu_corefile_callback, &args);
4460
4461 return args.note_data;
4462}
4463
dba24537
AC
4464/* Fills the "to_make_corefile_note" target vector. Builds the note
4465 section for a corefile, and returns it in a malloc buffer. */
4466
4467static char *
4468linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4469{
4470 struct linux_nat_corefile_thread_data thread_args;
d99148ef 4471 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 4472 char fname[16] = { '\0' };
d99148ef 4473 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
4474 char psargs[80] = { '\0' };
4475 char *note_data = NULL;
d90e17a7 4476 ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
c6826062 4477 gdb_byte *auxv;
dba24537
AC
4478 int auxv_len;
4479
4480 if (get_exec_file (0))
4481 {
9f37bbcc 4482 strncpy (fname, lbasename (get_exec_file (0)), sizeof (fname));
dba24537
AC
4483 strncpy (psargs, get_exec_file (0), sizeof (psargs));
4484 if (get_inferior_args ())
4485 {
d99148ef
JK
4486 char *string_end;
4487 char *psargs_end = psargs + sizeof (psargs);
4488
4489 /* linux_elfcore_write_prpsinfo () handles zero unterminated
4490 strings fine. */
4491 string_end = memchr (psargs, 0, sizeof (psargs));
4492 if (string_end != NULL)
4493 {
4494 *string_end++ = ' ';
4495 strncpy (string_end, get_inferior_args (),
4496 psargs_end - string_end);
4497 }
dba24537
AC
4498 }
4499 note_data = (char *) elfcore_write_prpsinfo (obfd,
4500 note_data,
4501 note_size, fname, psargs);
4502 }
4503
4504 /* Dump information for threads. */
4505 thread_args.obfd = obfd;
4506 thread_args.note_data = note_data;
4507 thread_args.note_size = note_size;
4508 thread_args.num_notes = 0;
2020b7ab 4509 thread_args.stop_signal = find_stop_signal ();
d90e17a7 4510 iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
2020b7ab
PA
4511 gdb_assert (thread_args.num_notes != 0);
4512 note_data = thread_args.note_data;
dba24537 4513
13547ab6
DJ
4514 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
4515 NULL, &auxv);
dba24537
AC
4516 if (auxv_len > 0)
4517 {
4518 note_data = elfcore_write_note (obfd, note_data, note_size,
4519 "CORE", NT_AUXV, auxv, auxv_len);
4520 xfree (auxv);
4521 }
4522
efcbbd14
UW
4523 note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4524
dba24537
AC
4525 make_cleanup (xfree, note_data);
4526 return note_data;
4527}
4528
4529/* Implement the "info proc" command. */
4530
4531static void
4532linux_nat_info_proc_cmd (char *args, int from_tty)
4533{
89ecc4f5
DE
4534 /* A long is used for pid instead of an int to avoid a loss of precision
4535 compiler warning from the output of strtoul. */
4536 long pid = PIDGET (inferior_ptid);
dba24537
AC
4537 FILE *procfile;
4538 char **argv = NULL;
4539 char buffer[MAXPATHLEN];
4540 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
4541 int cmdline_f = 1;
4542 int cwd_f = 1;
4543 int exe_f = 1;
4544 int mappings_f = 0;
dba24537
AC
4545 int status_f = 0;
4546 int stat_f = 0;
4547 int all = 0;
4548 struct stat dummy;
4549
4550 if (args)
4551 {
4552 /* Break up 'args' into an argv array. */
d1a41061
PP
4553 argv = gdb_buildargv (args);
4554 make_cleanup_freeargv (argv);
dba24537
AC
4555 }
4556 while (argv != NULL && *argv != NULL)
4557 {
4558 if (isdigit (argv[0][0]))
4559 {
4560 pid = strtoul (argv[0], NULL, 10);
4561 }
4562 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
4563 {
4564 mappings_f = 1;
4565 }
4566 else if (strcmp (argv[0], "status") == 0)
4567 {
4568 status_f = 1;
4569 }
4570 else if (strcmp (argv[0], "stat") == 0)
4571 {
4572 stat_f = 1;
4573 }
4574 else if (strcmp (argv[0], "cmd") == 0)
4575 {
4576 cmdline_f = 1;
4577 }
4578 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
4579 {
4580 exe_f = 1;
4581 }
4582 else if (strcmp (argv[0], "cwd") == 0)
4583 {
4584 cwd_f = 1;
4585 }
4586 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
4587 {
4588 all = 1;
4589 }
4590 else
4591 {
1777feb0 4592 /* [...] (future options here). */
dba24537
AC
4593 }
4594 argv++;
4595 }
4596 if (pid == 0)
8a3fe4f8 4597 error (_("No current process: you must name one."));
dba24537 4598
89ecc4f5 4599 sprintf (fname1, "/proc/%ld", pid);
dba24537 4600 if (stat (fname1, &dummy) != 0)
8a3fe4f8 4601 error (_("No /proc directory: '%s'"), fname1);
dba24537 4602
89ecc4f5 4603 printf_filtered (_("process %ld\n"), pid);
dba24537
AC
4604 if (cmdline_f || all)
4605 {
89ecc4f5 4606 sprintf (fname1, "/proc/%ld/cmdline", pid);
d5d6fca5 4607 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4608 {
7c8a8b04 4609 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4610
bf1d7d9c
JB
4611 if (fgets (buffer, sizeof (buffer), procfile))
4612 printf_filtered ("cmdline = '%s'\n", buffer);
4613 else
4614 warning (_("unable to read '%s'"), fname1);
7c8a8b04 4615 do_cleanups (cleanup);
dba24537
AC
4616 }
4617 else
8a3fe4f8 4618 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4619 }
4620 if (cwd_f || all)
4621 {
89ecc4f5 4622 sprintf (fname1, "/proc/%ld/cwd", pid);
dba24537
AC
4623 memset (fname2, 0, sizeof (fname2));
4624 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4625 printf_filtered ("cwd = '%s'\n", fname2);
4626 else
8a3fe4f8 4627 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4628 }
4629 if (exe_f || all)
4630 {
89ecc4f5 4631 sprintf (fname1, "/proc/%ld/exe", pid);
dba24537
AC
4632 memset (fname2, 0, sizeof (fname2));
4633 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4634 printf_filtered ("exe = '%s'\n", fname2);
4635 else
8a3fe4f8 4636 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4637 }
4638 if (mappings_f || all)
4639 {
89ecc4f5 4640 sprintf (fname1, "/proc/%ld/maps", pid);
d5d6fca5 4641 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4642 {
4643 long long addr, endaddr, size, offset, inode;
4644 char permissions[8], device[8], filename[MAXPATHLEN];
7c8a8b04 4645 struct cleanup *cleanup;
dba24537 4646
7c8a8b04 4647 cleanup = make_cleanup_fclose (procfile);
a3f17187 4648 printf_filtered (_("Mapped address spaces:\n\n"));
a97b0ac8 4649 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4650 {
4651 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4652 "Start Addr",
4653 " End Addr",
4654 " Size", " Offset", "objfile");
4655 }
4656 else
4657 {
4658 printf_filtered (" %18s %18s %10s %10s %7s\n",
4659 "Start Addr",
4660 " End Addr",
4661 " Size", " Offset", "objfile");
4662 }
4663
4664 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4665 &offset, &device[0], &inode, &filename[0]))
4666 {
4667 size = endaddr - addr;
4668
4669 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4670 calls here (and possibly above) should be abstracted
4671 out into their own functions? Andrew suggests using
4672 a generic local_address_string instead to print out
4673 the addresses; that makes sense to me, too. */
4674
a97b0ac8 4675 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4676 {
4677 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4678 (unsigned long) addr, /* FIXME: pr_addr */
4679 (unsigned long) endaddr,
4680 (int) size,
4681 (unsigned int) offset,
4682 filename[0] ? filename : "");
4683 }
4684 else
4685 {
4686 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
4687 (unsigned long) addr, /* FIXME: pr_addr */
4688 (unsigned long) endaddr,
4689 (int) size,
4690 (unsigned int) offset,
4691 filename[0] ? filename : "");
4692 }
4693 }
4694
7c8a8b04 4695 do_cleanups (cleanup);
dba24537
AC
4696 }
4697 else
8a3fe4f8 4698 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4699 }
4700 if (status_f || all)
4701 {
89ecc4f5 4702 sprintf (fname1, "/proc/%ld/status", pid);
d5d6fca5 4703 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4704 {
7c8a8b04 4705 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4706
dba24537
AC
4707 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4708 puts_filtered (buffer);
7c8a8b04 4709 do_cleanups (cleanup);
dba24537
AC
4710 }
4711 else
8a3fe4f8 4712 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4713 }
4714 if (stat_f || all)
4715 {
89ecc4f5 4716 sprintf (fname1, "/proc/%ld/stat", pid);
d5d6fca5 4717 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4718 {
4719 int itmp;
4720 char ctmp;
a25694b4 4721 long ltmp;
7c8a8b04 4722 struct cleanup *cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4723
4724 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4725 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 4726 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 4727 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 4728 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 4729 printf_filtered (_("State: %c\n"), ctmp);
dba24537 4730 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4731 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 4732 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4733 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 4734 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4735 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 4736 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4737 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 4738 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4739 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
4740 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4741 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
4742 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4743 printf_filtered (_("Minor faults (no memory page): %lu\n"),
4744 (unsigned long) ltmp);
4745 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4746 printf_filtered (_("Minor faults, children: %lu\n"),
4747 (unsigned long) ltmp);
4748 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4749 printf_filtered (_("Major faults (memory page faults): %lu\n"),
4750 (unsigned long) ltmp);
4751 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4752 printf_filtered (_("Major faults, children: %lu\n"),
4753 (unsigned long) ltmp);
4754 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4755 printf_filtered (_("utime: %ld\n"), ltmp);
4756 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4757 printf_filtered (_("stime: %ld\n"), ltmp);
4758 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4759 printf_filtered (_("utime, children: %ld\n"), ltmp);
4760 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4761 printf_filtered (_("stime, children: %ld\n"), ltmp);
4762 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
4763 printf_filtered (_("jiffies remaining in current "
4764 "time slice: %ld\n"), ltmp);
a25694b4
AS
4765 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4766 printf_filtered (_("'nice' value: %ld\n"), ltmp);
4767 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4768 printf_filtered (_("jiffies until next timeout: %lu\n"),
4769 (unsigned long) ltmp);
4770 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4771 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
4772 (unsigned long) ltmp);
4773 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
4774 printf_filtered (_("start time (jiffies since "
4775 "system boot): %ld\n"), ltmp);
a25694b4
AS
4776 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4777 printf_filtered (_("Virtual memory size: %lu\n"),
4778 (unsigned long) ltmp);
4779 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3e43a32a
MS
4780 printf_filtered (_("Resident set size: %lu\n"),
4781 (unsigned long) ltmp);
a25694b4
AS
4782 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4783 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
4784 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4785 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
4786 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4787 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
4788 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4789 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3e43a32a
MS
4790#if 0 /* Don't know how architecture-dependent the rest is...
4791 Anyway the signal bitmap info is available from "status". */
1777feb0 4792 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 4793 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
1777feb0 4794 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4
AS
4795 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
4796 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4797 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
4798 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4799 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
4800 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4801 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
4802 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4803 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
1777feb0 4804 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 4805 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537 4806#endif
7c8a8b04 4807 do_cleanups (cleanup);
dba24537
AC
4808 }
4809 else
8a3fe4f8 4810 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4811 }
4812}
4813
10d6c8cd
DJ
4814/* Implement the to_xfer_partial interface for memory reads using the /proc
4815 filesystem. Because we can use a single read() call for /proc, this
4816 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4817 but it doesn't support writes. */
4818
4819static LONGEST
4820linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4821 const char *annex, gdb_byte *readbuf,
4822 const gdb_byte *writebuf,
4823 ULONGEST offset, LONGEST len)
dba24537 4824{
10d6c8cd
DJ
4825 LONGEST ret;
4826 int fd;
dba24537
AC
4827 char filename[64];
4828
10d6c8cd 4829 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4830 return 0;
4831
4832 /* Don't bother for one word. */
4833 if (len < 3 * sizeof (long))
4834 return 0;
4835
4836 /* We could keep this file open and cache it - possibly one per
4837 thread. That requires some juggling, but is even faster. */
4838 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4839 fd = open (filename, O_RDONLY | O_LARGEFILE);
4840 if (fd == -1)
4841 return 0;
4842
4843 /* If pread64 is available, use it. It's faster if the kernel
4844 supports it (only one syscall), and it's 64-bit safe even on
4845 32-bit platforms (for instance, SPARC debugging a SPARC64
4846 application). */
4847#ifdef HAVE_PREAD64
10d6c8cd 4848 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4849#else
10d6c8cd 4850 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4851#endif
4852 ret = 0;
4853 else
4854 ret = len;
4855
4856 close (fd);
4857 return ret;
4858}
4859
efcbbd14
UW
4860
4861/* Enumerate spufs IDs for process PID. */
4862static LONGEST
4863spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4864{
4865 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4866 LONGEST pos = 0;
4867 LONGEST written = 0;
4868 char path[128];
4869 DIR *dir;
4870 struct dirent *entry;
4871
4872 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4873 dir = opendir (path);
4874 if (!dir)
4875 return -1;
4876
4877 rewinddir (dir);
4878 while ((entry = readdir (dir)) != NULL)
4879 {
4880 struct stat st;
4881 struct statfs stfs;
4882 int fd;
4883
4884 fd = atoi (entry->d_name);
4885 if (!fd)
4886 continue;
4887
4888 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4889 if (stat (path, &st) != 0)
4890 continue;
4891 if (!S_ISDIR (st.st_mode))
4892 continue;
4893
4894 if (statfs (path, &stfs) != 0)
4895 continue;
4896 if (stfs.f_type != SPUFS_MAGIC)
4897 continue;
4898
4899 if (pos >= offset && pos + 4 <= offset + len)
4900 {
4901 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4902 written += 4;
4903 }
4904 pos += 4;
4905 }
4906
4907 closedir (dir);
4908 return written;
4909}
4910
4911/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4912 object type, using the /proc file system. */
4913static LONGEST
4914linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4915 const char *annex, gdb_byte *readbuf,
4916 const gdb_byte *writebuf,
4917 ULONGEST offset, LONGEST len)
4918{
4919 char buf[128];
4920 int fd = 0;
4921 int ret = -1;
4922 int pid = PIDGET (inferior_ptid);
4923
4924 if (!annex)
4925 {
4926 if (!readbuf)
4927 return -1;
4928 else
4929 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4930 }
4931
4932 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4933 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4934 if (fd <= 0)
4935 return -1;
4936
4937 if (offset != 0
4938 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4939 {
4940 close (fd);
4941 return 0;
4942 }
4943
4944 if (writebuf)
4945 ret = write (fd, writebuf, (size_t) len);
4946 else if (readbuf)
4947 ret = read (fd, readbuf, (size_t) len);
4948
4949 close (fd);
4950 return ret;
4951}
4952
4953
dba24537
AC
4954/* Parse LINE as a signal set and add its set bits to SIGS. */
4955
4956static void
4957add_line_to_sigset (const char *line, sigset_t *sigs)
4958{
4959 int len = strlen (line) - 1;
4960 const char *p;
4961 int signum;
4962
4963 if (line[len] != '\n')
8a3fe4f8 4964 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4965
4966 p = line;
4967 signum = len * 4;
4968 while (len-- > 0)
4969 {
4970 int digit;
4971
4972 if (*p >= '0' && *p <= '9')
4973 digit = *p - '0';
4974 else if (*p >= 'a' && *p <= 'f')
4975 digit = *p - 'a' + 10;
4976 else
8a3fe4f8 4977 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4978
4979 signum -= 4;
4980
4981 if (digit & 1)
4982 sigaddset (sigs, signum + 1);
4983 if (digit & 2)
4984 sigaddset (sigs, signum + 2);
4985 if (digit & 4)
4986 sigaddset (sigs, signum + 3);
4987 if (digit & 8)
4988 sigaddset (sigs, signum + 4);
4989
4990 p++;
4991 }
4992}
4993
4994/* Find process PID's pending signals from /proc/pid/status and set
4995 SIGS to match. */
4996
4997void
3e43a32a
MS
4998linux_proc_pending_signals (int pid, sigset_t *pending,
4999 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
5000{
5001 FILE *procfile;
5002 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
7c8a8b04 5003 struct cleanup *cleanup;
dba24537
AC
5004
5005 sigemptyset (pending);
5006 sigemptyset (blocked);
5007 sigemptyset (ignored);
5008 sprintf (fname, "/proc/%d/status", pid);
5009 procfile = fopen (fname, "r");
5010 if (procfile == NULL)
8a3fe4f8 5011 error (_("Could not open %s"), fname);
7c8a8b04 5012 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
5013
5014 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
5015 {
5016 /* Normal queued signals are on the SigPnd line in the status
5017 file. However, 2.6 kernels also have a "shared" pending
5018 queue for delivering signals to a thread group, so check for
5019 a ShdPnd line also.
5020
5021 Unfortunately some Red Hat kernels include the shared pending
5022 queue but not the ShdPnd status field. */
5023
5024 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
5025 add_line_to_sigset (buffer + 8, pending);
5026 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
5027 add_line_to_sigset (buffer + 8, pending);
5028 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
5029 add_line_to_sigset (buffer + 8, blocked);
5030 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
5031 add_line_to_sigset (buffer + 8, ignored);
5032 }
5033
7c8a8b04 5034 do_cleanups (cleanup);
dba24537
AC
5035}
5036
07e059b5
VP
5037static LONGEST
5038linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e
MS
5039 const char *annex, gdb_byte *readbuf,
5040 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
07e059b5
VP
5041{
5042 /* We make the process list snapshot when the object starts to be
5043 read. */
5044 static const char *buf;
5045 static LONGEST len_avail = -1;
5046 static struct obstack obstack;
5047
5048 DIR *dirp;
5049
5050 gdb_assert (object == TARGET_OBJECT_OSDATA);
5051
a61408f8
SS
5052 if (!annex)
5053 {
5054 if (offset == 0)
5055 {
5056 if (len_avail != -1 && len_avail != 0)
5057 obstack_free (&obstack, NULL);
5058 len_avail = 0;
5059 buf = NULL;
5060 obstack_init (&obstack);
5061 obstack_grow_str (&obstack, "<osdata type=\"types\">\n");
5062
3e43a32a 5063 obstack_xml_printf (&obstack,
a61408f8
SS
5064 "<item>"
5065 "<column name=\"Type\">processes</column>"
3e43a32a
MS
5066 "<column name=\"Description\">"
5067 "Listing of all processes</column>"
a61408f8
SS
5068 "</item>");
5069
5070 obstack_grow_str0 (&obstack, "</osdata>\n");
5071 buf = obstack_finish (&obstack);
5072 len_avail = strlen (buf);
5073 }
5074
5075 if (offset >= len_avail)
5076 {
5077 /* Done. Get rid of the obstack. */
5078 obstack_free (&obstack, NULL);
5079 buf = NULL;
5080 len_avail = 0;
5081 return 0;
5082 }
5083
5084 if (len > len_avail - offset)
5085 len = len_avail - offset;
5086 memcpy (readbuf, buf + offset, len);
5087
5088 return len;
5089 }
5090
07e059b5
VP
5091 if (strcmp (annex, "processes") != 0)
5092 return 0;
5093
5094 gdb_assert (readbuf && !writebuf);
5095
5096 if (offset == 0)
5097 {
5098 if (len_avail != -1 && len_avail != 0)
e0881a8e 5099 obstack_free (&obstack, NULL);
07e059b5
VP
5100 len_avail = 0;
5101 buf = NULL;
5102 obstack_init (&obstack);
5103 obstack_grow_str (&obstack, "<osdata type=\"processes\">\n");
5104
5105 dirp = opendir ("/proc");
5106 if (dirp)
e0881a8e
MS
5107 {
5108 struct dirent *dp;
5109
5110 while ((dp = readdir (dirp)) != NULL)
5111 {
5112 struct stat statbuf;
5113 char procentry[sizeof ("/proc/4294967295")];
5114
5115 if (!isdigit (dp->d_name[0])
5116 || NAMELEN (dp) > sizeof ("4294967295") - 1)
5117 continue;
5118
5119 sprintf (procentry, "/proc/%s", dp->d_name);
5120 if (stat (procentry, &statbuf) == 0
5121 && S_ISDIR (statbuf.st_mode))
5122 {
5123 char *pathname;
5124 FILE *f;
5125 char cmd[MAXPATHLEN + 1];
5126 struct passwd *entry;
5127
5128 pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
5129 entry = getpwuid (statbuf.st_uid);
5130
5131 if ((f = fopen (pathname, "r")) != NULL)
5132 {
5eee517d 5133 size_t length = fread (cmd, 1, sizeof (cmd) - 1, f);
e0881a8e 5134
5eee517d 5135 if (length > 0)
e0881a8e
MS
5136 {
5137 int i;
5138
5eee517d 5139 for (i = 0; i < length; i++)
e0881a8e
MS
5140 if (cmd[i] == '\0')
5141 cmd[i] = ' ';
5eee517d 5142 cmd[length] = '\0';
e0881a8e
MS
5143
5144 obstack_xml_printf (
5145 &obstack,
5146 "<item>"
5147 "<column name=\"pid\">%s</column>"
5148 "<column name=\"user\">%s</column>"
5149 "<column name=\"command\">%s</column>"
5150 "</item>",
5151 dp->d_name,
5152 entry ? entry->pw_name : "?",
5153 cmd);
5154 }
5155 fclose (f);
5156 }
5157
5158 xfree (pathname);
5159 }
5160 }
5161
5162 closedir (dirp);
5163 }
07e059b5
VP
5164
5165 obstack_grow_str0 (&obstack, "</osdata>\n");
5166 buf = obstack_finish (&obstack);
5167 len_avail = strlen (buf);
5168 }
5169
5170 if (offset >= len_avail)
5171 {
5172 /* Done. Get rid of the obstack. */
5173 obstack_free (&obstack, NULL);
5174 buf = NULL;
5175 len_avail = 0;
5176 return 0;
5177 }
5178
5179 if (len > len_avail - offset)
5180 len = len_avail - offset;
5181 memcpy (readbuf, buf + offset, len);
5182
5183 return len;
5184}
5185
10d6c8cd
DJ
5186static LONGEST
5187linux_xfer_partial (struct target_ops *ops, enum target_object object,
5188 const char *annex, gdb_byte *readbuf,
5189 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5190{
5191 LONGEST xfer;
5192
5193 if (object == TARGET_OBJECT_AUXV)
9f2982ff 5194 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
10d6c8cd
DJ
5195 offset, len);
5196
07e059b5
VP
5197 if (object == TARGET_OBJECT_OSDATA)
5198 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5199 offset, len);
5200
efcbbd14
UW
5201 if (object == TARGET_OBJECT_SPU)
5202 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5203 offset, len);
5204
8f313923
JK
5205 /* GDB calculates all the addresses in possibly larget width of the address.
5206 Address width needs to be masked before its final use - either by
5207 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5208
5209 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
5210
5211 if (object == TARGET_OBJECT_MEMORY)
5212 {
5213 int addr_bit = gdbarch_addr_bit (target_gdbarch);
5214
5215 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5216 offset &= ((ULONGEST) 1 << addr_bit) - 1;
5217 }
5218
10d6c8cd
DJ
5219 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5220 offset, len);
5221 if (xfer != 0)
5222 return xfer;
5223
5224 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5225 offset, len);
5226}
5227
e9efe249 5228/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
5229 it with local methods. */
5230
910122bf
UW
5231static void
5232linux_target_install_ops (struct target_ops *t)
10d6c8cd 5233{
6d8fd2b7 5234 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 5235 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 5236 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 5237 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 5238 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 5239 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 5240 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 5241 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 5242 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
5243 t->to_post_attach = linux_child_post_attach;
5244 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
5245 t->to_find_memory_regions = linux_nat_find_memory_regions;
5246 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5247
5248 super_xfer_partial = t->to_xfer_partial;
5249 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
5250}
5251
5252struct target_ops *
5253linux_target (void)
5254{
5255 struct target_ops *t;
5256
5257 t = inf_ptrace_target ();
5258 linux_target_install_ops (t);
5259
5260 return t;
5261}
5262
5263struct target_ops *
7714d83a 5264linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
5265{
5266 struct target_ops *t;
5267
5268 t = inf_ptrace_trad_target (register_u_offset);
5269 linux_target_install_ops (t);
10d6c8cd 5270
10d6c8cd
DJ
5271 return t;
5272}
5273
b84876c2
PA
5274/* target_is_async_p implementation. */
5275
5276static int
5277linux_nat_is_async_p (void)
5278{
5279 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5280 it explicitly with the "set target-async" command.
b84876c2 5281 Someday, linux will always be async. */
c6ebd6cf 5282 if (!target_async_permitted)
b84876c2
PA
5283 return 0;
5284
d90e17a7
PA
5285 /* See target.h/target_async_mask. */
5286 return linux_nat_async_mask_value;
b84876c2
PA
5287}
5288
5289/* target_can_async_p implementation. */
5290
5291static int
5292linux_nat_can_async_p (void)
5293{
5294 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5295 it explicitly with the "set target-async" command.
b84876c2 5296 Someday, linux will always be async. */
c6ebd6cf 5297 if (!target_async_permitted)
b84876c2
PA
5298 return 0;
5299
5300 /* See target.h/target_async_mask. */
5301 return linux_nat_async_mask_value;
5302}
5303
9908b566
VP
5304static int
5305linux_nat_supports_non_stop (void)
5306{
5307 return 1;
5308}
5309
d90e17a7
PA
5310/* True if we want to support multi-process. To be removed when GDB
5311 supports multi-exec. */
5312
2277426b 5313int linux_multi_process = 1;
d90e17a7
PA
5314
5315static int
5316linux_nat_supports_multi_process (void)
5317{
5318 return linux_multi_process;
5319}
5320
b84876c2
PA
5321/* target_async_mask implementation. */
5322
5323static int
7feb7d06 5324linux_nat_async_mask (int new_mask)
b84876c2 5325{
7feb7d06 5326 int curr_mask = linux_nat_async_mask_value;
b84876c2 5327
7feb7d06 5328 if (curr_mask != new_mask)
b84876c2 5329 {
7feb7d06 5330 if (new_mask == 0)
b84876c2
PA
5331 {
5332 linux_nat_async (NULL, 0);
7feb7d06 5333 linux_nat_async_mask_value = new_mask;
b84876c2
PA
5334 }
5335 else
5336 {
7feb7d06 5337 linux_nat_async_mask_value = new_mask;
84e46146 5338
7feb7d06
PA
5339 /* If we're going out of async-mask in all-stop, then the
5340 inferior is stopped. The next resume will call
5341 target_async. In non-stop, the target event source
5342 should be always registered in the event loop. Do so
5343 now. */
5344 if (non_stop)
5345 linux_nat_async (inferior_event_handler, 0);
b84876c2
PA
5346 }
5347 }
5348
7feb7d06 5349 return curr_mask;
b84876c2
PA
5350}
5351
5352static int async_terminal_is_ours = 1;
5353
5354/* target_terminal_inferior implementation. */
5355
5356static void
5357linux_nat_terminal_inferior (void)
5358{
5359 if (!target_is_async_p ())
5360 {
5361 /* Async mode is disabled. */
5362 terminal_inferior ();
5363 return;
5364 }
5365
b84876c2
PA
5366 terminal_inferior ();
5367
d9d2d8b6 5368 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
5369 if (!async_terminal_is_ours)
5370 return;
5371
5372 delete_file_handler (input_fd);
5373 async_terminal_is_ours = 0;
5374 set_sigint_trap ();
5375}
5376
5377/* target_terminal_ours implementation. */
5378
2c0b251b 5379static void
b84876c2
PA
5380linux_nat_terminal_ours (void)
5381{
5382 if (!target_is_async_p ())
5383 {
5384 /* Async mode is disabled. */
5385 terminal_ours ();
5386 return;
5387 }
5388
5389 /* GDB should never give the terminal to the inferior if the
5390 inferior is running in the background (run&, continue&, etc.),
5391 but claiming it sure should. */
5392 terminal_ours ();
5393
b84876c2
PA
5394 if (async_terminal_is_ours)
5395 return;
5396
5397 clear_sigint_trap ();
5398 add_file_handler (input_fd, stdin_event_handler, 0);
5399 async_terminal_is_ours = 1;
5400}
5401
5402static void (*async_client_callback) (enum inferior_event_type event_type,
5403 void *context);
5404static void *async_client_context;
5405
7feb7d06
PA
5406/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5407 so we notice when any child changes state, and notify the
5408 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5409 above to wait for the arrival of a SIGCHLD. */
5410
b84876c2 5411static void
7feb7d06 5412sigchld_handler (int signo)
b84876c2 5413{
7feb7d06
PA
5414 int old_errno = errno;
5415
5416 if (debug_linux_nat_async)
5417 fprintf_unfiltered (gdb_stdlog, "sigchld\n");
5418
5419 if (signo == SIGCHLD
5420 && linux_nat_event_pipe[0] != -1)
5421 async_file_mark (); /* Let the event loop know that there are
5422 events to handle. */
5423
5424 errno = old_errno;
5425}
5426
5427/* Callback registered with the target events file descriptor. */
5428
5429static void
5430handle_target_event (int error, gdb_client_data client_data)
5431{
5432 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5433}
5434
5435/* Create/destroy the target events pipe. Returns previous state. */
5436
5437static int
5438linux_async_pipe (int enable)
5439{
5440 int previous = (linux_nat_event_pipe[0] != -1);
5441
5442 if (previous != enable)
5443 {
5444 sigset_t prev_mask;
5445
5446 block_child_signals (&prev_mask);
5447
5448 if (enable)
5449 {
5450 if (pipe (linux_nat_event_pipe) == -1)
5451 internal_error (__FILE__, __LINE__,
5452 "creating event pipe failed.");
5453
5454 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5455 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5456 }
5457 else
5458 {
5459 close (linux_nat_event_pipe[0]);
5460 close (linux_nat_event_pipe[1]);
5461 linux_nat_event_pipe[0] = -1;
5462 linux_nat_event_pipe[1] = -1;
5463 }
5464
5465 restore_child_signals_mask (&prev_mask);
5466 }
5467
5468 return previous;
b84876c2
PA
5469}
5470
5471/* target_async implementation. */
5472
5473static void
5474linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5475 void *context), void *context)
5476{
c6ebd6cf 5477 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
b84876c2
PA
5478 internal_error (__FILE__, __LINE__,
5479 "Calling target_async when async is masked");
5480
5481 if (callback != NULL)
5482 {
5483 async_client_callback = callback;
5484 async_client_context = context;
7feb7d06
PA
5485 if (!linux_async_pipe (1))
5486 {
5487 add_file_handler (linux_nat_event_pipe[0],
5488 handle_target_event, NULL);
5489 /* There may be pending events to handle. Tell the event loop
5490 to poll them. */
5491 async_file_mark ();
5492 }
b84876c2
PA
5493 }
5494 else
5495 {
5496 async_client_callback = callback;
5497 async_client_context = context;
b84876c2 5498 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 5499 linux_async_pipe (0);
b84876c2
PA
5500 }
5501 return;
5502}
5503
252fbfc8
PA
5504/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5505 event came out. */
5506
4c28f408 5507static int
252fbfc8 5508linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 5509{
d90e17a7 5510 if (!lwp->stopped)
252fbfc8 5511 {
d90e17a7 5512 ptid_t ptid = lwp->ptid;
252fbfc8 5513
d90e17a7
PA
5514 if (debug_linux_nat)
5515 fprintf_unfiltered (gdb_stdlog,
5516 "LNSL: running -> suspending %s\n",
5517 target_pid_to_str (lwp->ptid));
252fbfc8 5518
252fbfc8 5519
d90e17a7
PA
5520 stop_callback (lwp, NULL);
5521 stop_wait_callback (lwp, NULL);
252fbfc8 5522
d90e17a7
PA
5523 /* If the lwp exits while we try to stop it, there's nothing
5524 else to do. */
5525 lwp = find_lwp_pid (ptid);
5526 if (lwp == NULL)
5527 return 0;
252fbfc8 5528
d90e17a7
PA
5529 /* If we didn't collect any signal other than SIGSTOP while
5530 stopping the LWP, push a SIGNAL_0 event. In either case, the
5531 event-loop will end up calling target_wait which will collect
5532 these. */
5533 if (lwp->status == 0)
5534 lwp->status = W_STOPCODE (0);
5535 async_file_mark ();
5536 }
5537 else
5538 {
5539 /* Already known to be stopped; do nothing. */
252fbfc8 5540
d90e17a7
PA
5541 if (debug_linux_nat)
5542 {
e09875d4 5543 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
5544 fprintf_unfiltered (gdb_stdlog,
5545 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5546 target_pid_to_str (lwp->ptid));
5547 else
3e43a32a
MS
5548 fprintf_unfiltered (gdb_stdlog,
5549 "LNSL: already stopped/no "
5550 "stop_requested yet %s\n",
d90e17a7 5551 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5552 }
5553 }
4c28f408
PA
5554 return 0;
5555}
5556
5557static void
5558linux_nat_stop (ptid_t ptid)
5559{
5560 if (non_stop)
d90e17a7 5561 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5562 else
5563 linux_ops->to_stop (ptid);
5564}
5565
d90e17a7
PA
5566static void
5567linux_nat_close (int quitting)
5568{
5569 /* Unregister from the event loop. */
5570 if (target_is_async_p ())
5571 target_async (NULL, 0);
5572
5573 /* Reset the async_masking. */
5574 linux_nat_async_mask_value = 1;
5575
5576 if (linux_ops->to_close)
5577 linux_ops->to_close (quitting);
5578}
5579
c0694254
PA
5580/* When requests are passed down from the linux-nat layer to the
5581 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5582 used. The address space pointer is stored in the inferior object,
5583 but the common code that is passed such ptid can't tell whether
5584 lwpid is a "main" process id or not (it assumes so). We reverse
5585 look up the "main" process id from the lwp here. */
5586
5587struct address_space *
5588linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5589{
5590 struct lwp_info *lwp;
5591 struct inferior *inf;
5592 int pid;
5593
5594 pid = GET_LWP (ptid);
5595 if (GET_LWP (ptid) == 0)
5596 {
5597 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5598 tgid. */
5599 lwp = find_lwp_pid (ptid);
5600 pid = GET_PID (lwp->ptid);
5601 }
5602 else
5603 {
5604 /* A (pid,lwpid,0) ptid. */
5605 pid = GET_PID (ptid);
5606 }
5607
5608 inf = find_inferior_pid (pid);
5609 gdb_assert (inf != NULL);
5610 return inf->aspace;
5611}
5612
dc146f7c
VP
5613int
5614linux_nat_core_of_thread_1 (ptid_t ptid)
5615{
5616 struct cleanup *back_to;
5617 char *filename;
5618 FILE *f;
5619 char *content = NULL;
5620 char *p;
5621 char *ts = 0;
5622 int content_read = 0;
5623 int i;
5624 int core;
5625
5626 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5627 GET_PID (ptid), GET_LWP (ptid));
5628 back_to = make_cleanup (xfree, filename);
5629
5630 f = fopen (filename, "r");
5631 if (!f)
5632 {
5633 do_cleanups (back_to);
5634 return -1;
5635 }
5636
5637 make_cleanup_fclose (f);
5638
5639 for (;;)
5640 {
5641 int n;
e0881a8e 5642
dc146f7c
VP
5643 content = xrealloc (content, content_read + 1024);
5644 n = fread (content + content_read, 1, 1024, f);
5645 content_read += n;
5646 if (n < 1024)
5647 {
5648 content[content_read] = '\0';
5649 break;
5650 }
5651 }
5652
5653 make_cleanup (xfree, content);
5654
5655 p = strchr (content, '(');
ca2a87a0
JK
5656
5657 /* Skip ")". */
5658 if (p != NULL)
5659 p = strchr (p, ')');
5660 if (p != NULL)
5661 p++;
dc146f7c
VP
5662
5663 /* If the first field after program name has index 0, then core number is
5664 the field with index 36. There's no constant for that anywhere. */
ca2a87a0
JK
5665 if (p != NULL)
5666 p = strtok_r (p, " ", &ts);
5667 for (i = 0; p != NULL && i != 36; ++i)
dc146f7c
VP
5668 p = strtok_r (NULL, " ", &ts);
5669
ca2a87a0 5670 if (p == NULL || sscanf (p, "%d", &core) == 0)
dc146f7c
VP
5671 core = -1;
5672
5673 do_cleanups (back_to);
5674
5675 return core;
5676}
5677
5678/* Return the cached value of the processor core for thread PTID. */
5679
5680int
5681linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5682{
5683 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 5684
dc146f7c
VP
5685 if (info)
5686 return info->core;
5687 return -1;
5688}
5689
f973ed9c
DJ
5690void
5691linux_nat_add_target (struct target_ops *t)
5692{
f973ed9c
DJ
5693 /* Save the provided single-threaded target. We save this in a separate
5694 variable because another target we've inherited from (e.g. inf-ptrace)
5695 may have saved a pointer to T; we want to use it for the final
5696 process stratum target. */
5697 linux_ops_saved = *t;
5698 linux_ops = &linux_ops_saved;
5699
5700 /* Override some methods for multithreading. */
b84876c2 5701 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5702 t->to_attach = linux_nat_attach;
5703 t->to_detach = linux_nat_detach;
5704 t->to_resume = linux_nat_resume;
5705 t->to_wait = linux_nat_wait;
2455069d 5706 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
5707 t->to_xfer_partial = linux_nat_xfer_partial;
5708 t->to_kill = linux_nat_kill;
5709 t->to_mourn_inferior = linux_nat_mourn_inferior;
5710 t->to_thread_alive = linux_nat_thread_alive;
5711 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 5712 t->to_thread_name = linux_nat_thread_name;
f973ed9c 5713 t->to_has_thread_control = tc_schedlock;
c0694254 5714 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
5715 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5716 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 5717
b84876c2
PA
5718 t->to_can_async_p = linux_nat_can_async_p;
5719 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5720 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2
PA
5721 t->to_async = linux_nat_async;
5722 t->to_async_mask = linux_nat_async_mask;
5723 t->to_terminal_inferior = linux_nat_terminal_inferior;
5724 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5725 t->to_close = linux_nat_close;
b84876c2 5726
4c28f408
PA
5727 /* Methods for non-stop support. */
5728 t->to_stop = linux_nat_stop;
5729
d90e17a7
PA
5730 t->to_supports_multi_process = linux_nat_supports_multi_process;
5731
dc146f7c
VP
5732 t->to_core_of_thread = linux_nat_core_of_thread;
5733
f973ed9c
DJ
5734 /* We don't change the stratum; this target will sit at
5735 process_stratum and thread_db will set at thread_stratum. This
5736 is a little strange, since this is a multi-threaded-capable
5737 target, but we want to be on the stack below thread_db, and we
5738 also want to be used for single-threaded processes. */
5739
5740 add_target (t);
f973ed9c
DJ
5741}
5742
9f0bdab8
DJ
5743/* Register a method to call whenever a new thread is attached. */
5744void
5745linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
5746{
5747 /* Save the pointer. We only support a single registered instance
5748 of the GNU/Linux native target, so we do not need to map this to
5749 T. */
5750 linux_nat_new_thread = new_thread;
5751}
5752
5b009018
PA
5753/* Register a method that converts a siginfo object between the layout
5754 that ptrace returns, and the layout in the architecture of the
5755 inferior. */
5756void
5757linux_nat_set_siginfo_fixup (struct target_ops *t,
5758 int (*siginfo_fixup) (struct siginfo *,
5759 gdb_byte *,
5760 int))
5761{
5762 /* Save the pointer. */
5763 linux_nat_siginfo_fixup = siginfo_fixup;
5764}
5765
9f0bdab8
DJ
5766/* Return the saved siginfo associated with PTID. */
5767struct siginfo *
5768linux_nat_get_siginfo (ptid_t ptid)
5769{
5770 struct lwp_info *lp = find_lwp_pid (ptid);
5771
5772 gdb_assert (lp != NULL);
5773
5774 return &lp->siginfo;
5775}
5776
2c0b251b
PA
5777/* Provide a prototype to silence -Wmissing-prototypes. */
5778extern initialize_file_ftype _initialize_linux_nat;
5779
d6b0e80f
AC
5780void
5781_initialize_linux_nat (void)
5782{
1bedd215
AC
5783 add_info ("proc", linux_nat_info_proc_cmd, _("\
5784Show /proc process information about any running process.\n\
dba24537
AC
5785Specify any process id, or use the program being debugged by default.\n\
5786Specify any of the following keywords for detailed info:\n\
5787 mappings -- list of mapped memory regions.\n\
5788 stat -- list a bunch of random process info.\n\
5789 status -- list a different bunch of random process info.\n\
1bedd215 5790 all -- list all available /proc info."));
d6b0e80f 5791
b84876c2
PA
5792 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5793 &debug_linux_nat, _("\
5794Set debugging of GNU/Linux lwp module."), _("\
5795Show debugging of GNU/Linux lwp module."), _("\
5796Enables printf debugging output."),
5797 NULL,
5798 show_debug_linux_nat,
5799 &setdebuglist, &showdebuglist);
5800
5801 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
5802 &debug_linux_nat_async, _("\
5803Set debugging of GNU/Linux async lwp module."), _("\
5804Show debugging of GNU/Linux async lwp module."), _("\
5805Enables printf debugging output."),
5806 NULL,
5807 show_debug_linux_nat_async,
5808 &setdebuglist, &showdebuglist);
5809
b84876c2 5810 /* Save this mask as the default. */
d6b0e80f
AC
5811 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5812
7feb7d06
PA
5813 /* Install a SIGCHLD handler. */
5814 sigchld_action.sa_handler = sigchld_handler;
5815 sigemptyset (&sigchld_action.sa_mask);
5816 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5817
5818 /* Make it the default. */
7feb7d06 5819 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5820
5821 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5822 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5823 sigdelset (&suspend_mask, SIGCHLD);
5824
7feb7d06 5825 sigemptyset (&blocked_mask);
10568435
JK
5826
5827 add_setshow_boolean_cmd ("disable-randomization", class_support,
5828 &disable_randomization, _("\
5829Set disabling of debuggee's virtual address space randomization."), _("\
5830Show disabling of debuggee's virtual address space randomization."), _("\
5831When this mode is on (which is the default), randomization of the virtual\n\
5832address space is disabled. Standalone programs run with the randomization\n\
5833enabled by default on some platforms."),
5834 &set_disable_randomization,
5835 &show_disable_randomization,
5836 &setlist, &showlist);
d6b0e80f
AC
5837}
5838\f
5839
5840/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5841 the GNU/Linux Threads library and therefore doesn't really belong
5842 here. */
5843
5844/* Read variable NAME in the target and return its value if found.
5845 Otherwise return zero. It is assumed that the type of the variable
5846 is `int'. */
5847
5848static int
5849get_signo (const char *name)
5850{
5851 struct minimal_symbol *ms;
5852 int signo;
5853
5854 ms = lookup_minimal_symbol (name, NULL, NULL);
5855 if (ms == NULL)
5856 return 0;
5857
8e70166d 5858 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5859 sizeof (signo)) != 0)
5860 return 0;
5861
5862 return signo;
5863}
5864
5865/* Return the set of signals used by the threads library in *SET. */
5866
5867void
5868lin_thread_get_thread_signals (sigset_t *set)
5869{
5870 struct sigaction action;
5871 int restart, cancel;
5872
b84876c2 5873 sigemptyset (&blocked_mask);
d6b0e80f
AC
5874 sigemptyset (set);
5875
5876 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5877 cancel = get_signo ("__pthread_sig_cancel");
5878
5879 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5880 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5881 not provide any way for the debugger to query the signal numbers -
5882 fortunately they don't change! */
5883
d6b0e80f 5884 if (restart == 0)
17fbb0bd 5885 restart = __SIGRTMIN;
d6b0e80f 5886
d6b0e80f 5887 if (cancel == 0)
17fbb0bd 5888 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5889
5890 sigaddset (set, restart);
5891 sigaddset (set, cancel);
5892
5893 /* The GNU/Linux Threads library makes terminating threads send a
5894 special "cancel" signal instead of SIGCHLD. Make sure we catch
5895 those (to prevent them from terminating GDB itself, which is
5896 likely to be their default action) and treat them the same way as
5897 SIGCHLD. */
5898
5899 action.sa_handler = sigchld_handler;
5900 sigemptyset (&action.sa_mask);
58aecb61 5901 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5902 sigaction (cancel, &action, NULL);
5903
5904 /* We block the "cancel" signal throughout this code ... */
5905 sigaddset (&blocked_mask, cancel);
5906 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5907
5908 /* ... except during a sigsuspend. */
5909 sigdelset (&suspend_mask, cancel);
5910}
This page took 1.345962 seconds and 4 git commands to generate.