2011-10-10 Pedro Alves <pedro@codesourcery.com>
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
7b6bb8da
JB
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
af96c192 33#include "linux-ptrace.h"
13da1c97 34#include "linux-procfs.h"
ac264b3b 35#include "linux-fork.h"
d6b0e80f
AC
36#include "gdbthread.h"
37#include "gdbcmd.h"
38#include "regcache.h"
4f844a66 39#include "regset.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
dba24537 42#include <sys/param.h> /* for MAXPATHLEN */
1777feb0 43#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
44#include "elf-bfd.h" /* for elfcore_write_* */
45#include "gregset.h" /* for gregset */
46#include "gdbcore.h" /* for get_exec_file */
47#include <ctype.h> /* for isdigit */
1777feb0 48#include "gdbthread.h" /* for struct thread_info etc. */
dba24537
AC
49#include "gdb_stat.h" /* for struct stat */
50#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
51#include "inf-loop.h"
52#include "event-loop.h"
53#include "event-top.h"
07e059b5
VP
54#include <pwd.h>
55#include <sys/types.h>
56#include "gdb_dirent.h"
57#include "xml-support.h"
191c4426 58#include "terminal.h"
efcbbd14 59#include <sys/vfs.h>
6c95b8df 60#include "solib.h"
d26e3629 61#include "linux-osdata.h"
efcbbd14
UW
62
63#ifndef SPUFS_MAGIC
64#define SPUFS_MAGIC 0x23c9b64e
65#endif
dba24537 66
10568435
JK
67#ifdef HAVE_PERSONALITY
68# include <sys/personality.h>
69# if !HAVE_DECL_ADDR_NO_RANDOMIZE
70# define ADDR_NO_RANDOMIZE 0x0040000
71# endif
72#endif /* HAVE_PERSONALITY */
73
1777feb0 74/* This comment documents high-level logic of this file.
8a77dff3
VP
75
76Waiting for events in sync mode
77===============================
78
79When waiting for an event in a specific thread, we just use waitpid, passing
80the specific pid, and not passing WNOHANG.
81
1777feb0 82When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 83version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 84threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
85miss an event. The solution is to use non-blocking waitpid, together with
86sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 87process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
88flag to check for events in cloned processes. If nothing is found, we use
89sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
90happened to a child process -- and SIGCHLD will be delivered both for events
91in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
92an event, we get back to calling nonblocking waitpid with and without
93__WCLONED.
8a77dff3
VP
94
95Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 96so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
97blocked, the signal becomes pending and sigsuspend immediately
98notices it and returns.
99
100Waiting for events in async mode
101================================
102
7feb7d06
PA
103In async mode, GDB should always be ready to handle both user input
104and target events, so neither blocking waitpid nor sigsuspend are
105viable options. Instead, we should asynchronously notify the GDB main
106event loop whenever there's an unprocessed event from the target. We
107detect asynchronous target events by handling SIGCHLD signals. To
108notify the event loop about target events, the self-pipe trick is used
109--- a pipe is registered as waitable event source in the event loop,
110the event loop select/poll's on the read end of this pipe (as well on
111other event sources, e.g., stdin), and the SIGCHLD handler writes a
112byte to this pipe. This is more portable than relying on
113pselect/ppoll, since on kernels that lack those syscalls, libc
114emulates them with select/poll+sigprocmask, and that is racy
115(a.k.a. plain broken).
116
117Obviously, if we fail to notify the event loop if there's a target
118event, it's bad. OTOH, if we notify the event loop when there's no
119event from the target, linux_nat_wait will detect that there's no real
120event to report, and return event of type TARGET_WAITKIND_IGNORE.
121This is mostly harmless, but it will waste time and is better avoided.
122
123The main design point is that every time GDB is outside linux-nat.c,
124we have a SIGCHLD handler installed that is called when something
125happens to the target and notifies the GDB event loop. Whenever GDB
126core decides to handle the event, and calls into linux-nat.c, we
127process things as in sync mode, except that the we never block in
128sigsuspend.
129
130While processing an event, we may end up momentarily blocked in
131waitpid calls. Those waitpid calls, while blocking, are guarantied to
132return quickly. E.g., in all-stop mode, before reporting to the core
133that an LWP hit a breakpoint, all LWPs are stopped by sending them
134SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
135Note that this is different from blocking indefinitely waiting for the
136next event --- here, we're already handling an event.
8a77dff3
VP
137
138Use of signals
139==============
140
141We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
142signal is not entirely significant; we just need for a signal to be delivered,
143so that we can intercept it. SIGSTOP's advantage is that it can not be
144blocked. A disadvantage is that it is not a real-time signal, so it can only
145be queued once; we do not keep track of other sources of SIGSTOP.
146
147Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
148use them, because they have special behavior when the signal is generated -
149not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
150kills the entire thread group.
151
152A delivered SIGSTOP would stop the entire thread group, not just the thread we
153tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
154cancel it (by PTRACE_CONT without passing SIGSTOP).
155
156We could use a real-time signal instead. This would solve those problems; we
157could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
158But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
159generates it, and there are races with trying to find a signal that is not
160blocked. */
a0ef4274 161
dba24537
AC
162#ifndef O_LARGEFILE
163#define O_LARGEFILE 0
164#endif
0274a8ce 165
ca2163eb
PA
166/* Unlike other extended result codes, WSTOPSIG (status) on
167 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
168 instead SIGTRAP with bit 7 set. */
169#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
170
10d6c8cd
DJ
171/* The single-threaded native GNU/Linux target_ops. We save a pointer for
172 the use of the multi-threaded target. */
173static struct target_ops *linux_ops;
f973ed9c 174static struct target_ops linux_ops_saved;
10d6c8cd 175
9f0bdab8
DJ
176/* The method to call, if any, when a new thread is attached. */
177static void (*linux_nat_new_thread) (ptid_t);
178
5b009018
PA
179/* The method to call, if any, when the siginfo object needs to be
180 converted between the layout returned by ptrace, and the layout in
181 the architecture of the inferior. */
182static int (*linux_nat_siginfo_fixup) (struct siginfo *,
183 gdb_byte *,
184 int);
185
ac264b3b
MS
186/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
187 Called by our to_xfer_partial. */
188static LONGEST (*super_xfer_partial) (struct target_ops *,
189 enum target_object,
190 const char *, gdb_byte *,
191 const gdb_byte *,
10d6c8cd
DJ
192 ULONGEST, LONGEST);
193
d6b0e80f 194static int debug_linux_nat;
920d2a44
AC
195static void
196show_debug_linux_nat (struct ui_file *file, int from_tty,
197 struct cmd_list_element *c, const char *value)
198{
199 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
200 value);
201}
d6b0e80f 202
ae087d01
DJ
203struct simple_pid_list
204{
205 int pid;
3d799a95 206 int status;
ae087d01
DJ
207 struct simple_pid_list *next;
208};
209struct simple_pid_list *stopped_pids;
210
3993f6b1
DJ
211/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
212 can not be used, 1 if it can. */
213
214static int linux_supports_tracefork_flag = -1;
215
3e43a32a
MS
216/* This variable is a tri-state flag: -1 for unknown, 0 if
217 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
a96d9b2e
SDJ
218
219static int linux_supports_tracesysgood_flag = -1;
220
9016a515
DJ
221/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
222 PTRACE_O_TRACEVFORKDONE. */
223
224static int linux_supports_tracevforkdone_flag = -1;
225
a96d9b2e
SDJ
226/* Stores the current used ptrace() options. */
227static int current_ptrace_options = 0;
228
3dd5b83d
PA
229/* Async mode support. */
230
b84876c2
PA
231/* The read/write ends of the pipe registered as waitable file in the
232 event loop. */
233static int linux_nat_event_pipe[2] = { -1, -1 };
234
7feb7d06 235/* Flush the event pipe. */
b84876c2 236
7feb7d06
PA
237static void
238async_file_flush (void)
b84876c2 239{
7feb7d06
PA
240 int ret;
241 char buf;
b84876c2 242
7feb7d06 243 do
b84876c2 244 {
7feb7d06 245 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 246 }
7feb7d06 247 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
248}
249
7feb7d06
PA
250/* Put something (anything, doesn't matter what, or how much) in event
251 pipe, so that the select/poll in the event-loop realizes we have
252 something to process. */
252fbfc8 253
b84876c2 254static void
7feb7d06 255async_file_mark (void)
b84876c2 256{
7feb7d06 257 int ret;
b84876c2 258
7feb7d06
PA
259 /* It doesn't really matter what the pipe contains, as long we end
260 up with something in it. Might as well flush the previous
261 left-overs. */
262 async_file_flush ();
b84876c2 263
7feb7d06 264 do
b84876c2 265 {
7feb7d06 266 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 267 }
7feb7d06 268 while (ret == -1 && errno == EINTR);
b84876c2 269
7feb7d06
PA
270 /* Ignore EAGAIN. If the pipe is full, the event loop will already
271 be awakened anyway. */
b84876c2
PA
272}
273
7feb7d06 274static void linux_nat_async (void (*callback)
3e43a32a
MS
275 (enum inferior_event_type event_type,
276 void *context),
7feb7d06 277 void *context);
7feb7d06
PA
278static int kill_lwp (int lwpid, int signo);
279
280static int stop_callback (struct lwp_info *lp, void *data);
281
282static void block_child_signals (sigset_t *prev_mask);
283static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
284
285struct lwp_info;
286static struct lwp_info *add_lwp (ptid_t ptid);
287static void purge_lwp_list (int pid);
288static struct lwp_info *find_lwp_pid (ptid_t ptid);
289
ae087d01
DJ
290\f
291/* Trivial list manipulation functions to keep track of a list of
292 new stopped processes. */
293static void
3d799a95 294add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
295{
296 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 297
ae087d01 298 new_pid->pid = pid;
3d799a95 299 new_pid->status = status;
ae087d01
DJ
300 new_pid->next = *listp;
301 *listp = new_pid;
302}
303
84636d28
PA
304static int
305in_pid_list_p (struct simple_pid_list *list, int pid)
306{
307 struct simple_pid_list *p;
308
309 for (p = list; p != NULL; p = p->next)
310 if (p->pid == pid)
311 return 1;
312 return 0;
313}
314
ae087d01 315static int
46a96992 316pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
317{
318 struct simple_pid_list **p;
319
320 for (p = listp; *p != NULL; p = &(*p)->next)
321 if ((*p)->pid == pid)
322 {
323 struct simple_pid_list *next = (*p)->next;
e0881a8e 324
46a96992 325 *statusp = (*p)->status;
ae087d01
DJ
326 xfree (*p);
327 *p = next;
328 return 1;
329 }
330 return 0;
331}
332
3993f6b1
DJ
333\f
334/* A helper function for linux_test_for_tracefork, called after fork (). */
335
336static void
337linux_tracefork_child (void)
338{
3993f6b1
DJ
339 ptrace (PTRACE_TRACEME, 0, 0, 0);
340 kill (getpid (), SIGSTOP);
341 fork ();
48bb3cce 342 _exit (0);
3993f6b1
DJ
343}
344
7feb7d06 345/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
346
347static int
46a96992 348my_waitpid (int pid, int *statusp, int flags)
b957e937
DJ
349{
350 int ret;
b84876c2 351
b957e937
DJ
352 do
353 {
46a96992 354 ret = waitpid (pid, statusp, flags);
b957e937
DJ
355 }
356 while (ret == -1 && errno == EINTR);
357
358 return ret;
359}
360
361/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
362
363 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
364 we know that the feature is not available. This may change the tracing
365 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
366
367 However, if it succeeds, we don't know for sure that the feature is
368 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 369 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
370 fork tracing, and let it fork. If the process exits, we assume that we
371 can't use TRACEFORK; if we get the fork notification, and we can extract
372 the new child's PID, then we assume that we can. */
3993f6b1
DJ
373
374static void
b957e937 375linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
376{
377 int child_pid, ret, status;
378 long second_pid;
7feb7d06 379 sigset_t prev_mask;
4c28f408 380
7feb7d06
PA
381 /* We don't want those ptrace calls to be interrupted. */
382 block_child_signals (&prev_mask);
3993f6b1 383
b957e937
DJ
384 linux_supports_tracefork_flag = 0;
385 linux_supports_tracevforkdone_flag = 0;
386
387 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
388 if (ret != 0)
7feb7d06
PA
389 {
390 restore_child_signals_mask (&prev_mask);
391 return;
392 }
b957e937 393
3993f6b1
DJ
394 child_pid = fork ();
395 if (child_pid == -1)
e2e0b3e5 396 perror_with_name (("fork"));
3993f6b1
DJ
397
398 if (child_pid == 0)
399 linux_tracefork_child ();
400
b957e937 401 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 402 if (ret == -1)
e2e0b3e5 403 perror_with_name (("waitpid"));
3993f6b1 404 else if (ret != child_pid)
8a3fe4f8 405 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 406 if (! WIFSTOPPED (status))
3e43a32a
MS
407 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
408 status);
3993f6b1 409
3993f6b1
DJ
410 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
411 if (ret != 0)
412 {
b957e937
DJ
413 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
414 if (ret != 0)
415 {
8a3fe4f8 416 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 417 restore_child_signals_mask (&prev_mask);
b957e937
DJ
418 return;
419 }
420
421 ret = my_waitpid (child_pid, &status, 0);
422 if (ret != child_pid)
3e43a32a
MS
423 warning (_("linux_test_for_tracefork: failed "
424 "to wait for killed child"));
b957e937 425 else if (!WIFSIGNALED (status))
3e43a32a
MS
426 warning (_("linux_test_for_tracefork: unexpected "
427 "wait status 0x%x from killed child"), status);
b957e937 428
7feb7d06 429 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
430 return;
431 }
432
9016a515
DJ
433 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
434 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
435 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
436 linux_supports_tracevforkdone_flag = (ret == 0);
437
b957e937
DJ
438 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
439 if (ret != 0)
8a3fe4f8 440 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
441
442 ret = my_waitpid (child_pid, &status, 0);
443
3993f6b1
DJ
444 if (ret == child_pid && WIFSTOPPED (status)
445 && status >> 16 == PTRACE_EVENT_FORK)
446 {
447 second_pid = 0;
448 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
449 if (ret == 0 && second_pid != 0)
450 {
451 int second_status;
452
453 linux_supports_tracefork_flag = 1;
b957e937
DJ
454 my_waitpid (second_pid, &second_status, 0);
455 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
456 if (ret != 0)
3e43a32a
MS
457 warning (_("linux_test_for_tracefork: "
458 "failed to kill second child"));
97725dc4 459 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
460 }
461 }
b957e937 462 else
8a3fe4f8
AC
463 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
464 "(%d, status 0x%x)"), ret, status);
3993f6b1 465
b957e937
DJ
466 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
467 if (ret != 0)
8a3fe4f8 468 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 469 my_waitpid (child_pid, &status, 0);
4c28f408 470
7feb7d06 471 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
472}
473
a96d9b2e
SDJ
474/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
475
476 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
477 we know that the feature is not available. This may change the tracing
478 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
479
480static void
481linux_test_for_tracesysgood (int original_pid)
482{
483 int ret;
484 sigset_t prev_mask;
485
486 /* We don't want those ptrace calls to be interrupted. */
487 block_child_signals (&prev_mask);
488
489 linux_supports_tracesysgood_flag = 0;
490
491 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
492 if (ret != 0)
493 goto out;
494
495 linux_supports_tracesysgood_flag = 1;
496out:
497 restore_child_signals_mask (&prev_mask);
498}
499
500/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
501 This function also sets linux_supports_tracesysgood_flag. */
502
503static int
504linux_supports_tracesysgood (int pid)
505{
506 if (linux_supports_tracesysgood_flag == -1)
507 linux_test_for_tracesysgood (pid);
508 return linux_supports_tracesysgood_flag;
509}
510
3993f6b1
DJ
511/* Return non-zero iff we have tracefork functionality available.
512 This function also sets linux_supports_tracefork_flag. */
513
514static int
b957e937 515linux_supports_tracefork (int pid)
3993f6b1
DJ
516{
517 if (linux_supports_tracefork_flag == -1)
b957e937 518 linux_test_for_tracefork (pid);
3993f6b1
DJ
519 return linux_supports_tracefork_flag;
520}
521
9016a515 522static int
b957e937 523linux_supports_tracevforkdone (int pid)
9016a515
DJ
524{
525 if (linux_supports_tracefork_flag == -1)
b957e937 526 linux_test_for_tracefork (pid);
9016a515
DJ
527 return linux_supports_tracevforkdone_flag;
528}
529
a96d9b2e
SDJ
530static void
531linux_enable_tracesysgood (ptid_t ptid)
532{
533 int pid = ptid_get_lwp (ptid);
534
535 if (pid == 0)
536 pid = ptid_get_pid (ptid);
537
538 if (linux_supports_tracesysgood (pid) == 0)
539 return;
540
541 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
542
543 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
544}
545
3993f6b1 546\f
4de4c07c
DJ
547void
548linux_enable_event_reporting (ptid_t ptid)
549{
d3587048 550 int pid = ptid_get_lwp (ptid);
4de4c07c 551
d3587048
DJ
552 if (pid == 0)
553 pid = ptid_get_pid (ptid);
554
b957e937 555 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
556 return;
557
a96d9b2e
SDJ
558 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
559 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
560
b957e937 561 if (linux_supports_tracevforkdone (pid))
a96d9b2e 562 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
563
564 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
565 read-only process state. */
4de4c07c 566
a96d9b2e 567 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
568}
569
6d8fd2b7
UW
570static void
571linux_child_post_attach (int pid)
4de4c07c
DJ
572{
573 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 574 check_for_thread_db ();
a96d9b2e 575 linux_enable_tracesysgood (pid_to_ptid (pid));
4de4c07c
DJ
576}
577
10d6c8cd 578static void
4de4c07c
DJ
579linux_child_post_startup_inferior (ptid_t ptid)
580{
581 linux_enable_event_reporting (ptid);
0ec9a092 582 check_for_thread_db ();
a96d9b2e 583 linux_enable_tracesysgood (ptid);
4de4c07c
DJ
584}
585
6d8fd2b7
UW
586static int
587linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 588{
7feb7d06 589 sigset_t prev_mask;
9016a515 590 int has_vforked;
4de4c07c
DJ
591 int parent_pid, child_pid;
592
7feb7d06 593 block_child_signals (&prev_mask);
b84876c2 594
e58b0e63
PA
595 has_vforked = (inferior_thread ()->pending_follow.kind
596 == TARGET_WAITKIND_VFORKED);
597 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 598 if (parent_pid == 0)
e58b0e63
PA
599 parent_pid = ptid_get_pid (inferior_ptid);
600 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 601
2277426b
PA
602 if (!detach_fork)
603 linux_enable_event_reporting (pid_to_ptid (child_pid));
604
6c95b8df
PA
605 if (has_vforked
606 && !non_stop /* Non-stop always resumes both branches. */
607 && (!target_is_async_p () || sync_execution)
608 && !(follow_child || detach_fork || sched_multi))
609 {
610 /* The parent stays blocked inside the vfork syscall until the
611 child execs or exits. If we don't let the child run, then
612 the parent stays blocked. If we're telling the parent to run
613 in the foreground, the user will not be able to ctrl-c to get
614 back the terminal, effectively hanging the debug session. */
ac74f770
MS
615 fprintf_filtered (gdb_stderr, _("\
616Can not resume the parent process over vfork in the foreground while\n\
617holding the child stopped. Try \"set detach-on-fork\" or \
618\"set schedule-multiple\".\n"));
619 /* FIXME output string > 80 columns. */
6c95b8df
PA
620 return 1;
621 }
622
4de4c07c
DJ
623 if (! follow_child)
624 {
6c95b8df 625 struct lwp_info *child_lp = NULL;
4de4c07c 626
1777feb0 627 /* We're already attached to the parent, by default. */
4de4c07c 628
ac264b3b
MS
629 /* Detach new forked process? */
630 if (detach_fork)
f75c00e4 631 {
6c95b8df
PA
632 /* Before detaching from the child, remove all breakpoints
633 from it. If we forked, then this has already been taken
634 care of by infrun.c. If we vforked however, any
635 breakpoint inserted in the parent is visible in the
636 child, even those added while stopped in a vfork
637 catchpoint. This will remove the breakpoints from the
638 parent also, but they'll be reinserted below. */
639 if (has_vforked)
640 {
641 /* keep breakpoints list in sync. */
642 remove_breakpoints_pid (GET_PID (inferior_ptid));
643 }
644
e85a822c 645 if (info_verbose || debug_linux_nat)
ac264b3b
MS
646 {
647 target_terminal_ours ();
648 fprintf_filtered (gdb_stdlog,
3e43a32a
MS
649 "Detaching after fork from "
650 "child process %d.\n",
ac264b3b
MS
651 child_pid);
652 }
4de4c07c 653
ac264b3b
MS
654 ptrace (PTRACE_DETACH, child_pid, 0, 0);
655 }
656 else
657 {
77435e4c 658 struct inferior *parent_inf, *child_inf;
2277426b 659 struct cleanup *old_chain;
7f9f62ba
PA
660
661 /* Add process to GDB's tables. */
77435e4c
PA
662 child_inf = add_inferior (child_pid);
663
e58b0e63 664 parent_inf = current_inferior ();
77435e4c 665 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 666 copy_terminal_info (child_inf, parent_inf);
7f9f62ba 667
2277426b 668 old_chain = save_inferior_ptid ();
6c95b8df 669 save_current_program_space ();
2277426b
PA
670
671 inferior_ptid = ptid_build (child_pid, child_pid, 0);
672 add_thread (inferior_ptid);
6c95b8df
PA
673 child_lp = add_lwp (inferior_ptid);
674 child_lp->stopped = 1;
25289eb2 675 child_lp->last_resume_kind = resume_stop;
2277426b 676
6c95b8df
PA
677 /* If this is a vfork child, then the address-space is
678 shared with the parent. */
679 if (has_vforked)
680 {
681 child_inf->pspace = parent_inf->pspace;
682 child_inf->aspace = parent_inf->aspace;
683
684 /* The parent will be frozen until the child is done
685 with the shared region. Keep track of the
686 parent. */
687 child_inf->vfork_parent = parent_inf;
688 child_inf->pending_detach = 0;
689 parent_inf->vfork_child = child_inf;
690 parent_inf->pending_detach = 0;
691 }
692 else
693 {
694 child_inf->aspace = new_address_space ();
695 child_inf->pspace = add_program_space (child_inf->aspace);
696 child_inf->removable = 1;
697 set_current_program_space (child_inf->pspace);
698 clone_program_space (child_inf->pspace, parent_inf->pspace);
699
700 /* Let the shared library layer (solib-svr4) learn about
701 this new process, relocate the cloned exec, pull in
702 shared libraries, and install the solib event
703 breakpoint. If a "cloned-VM" event was propagated
704 better throughout the core, this wouldn't be
705 required. */
268a4a75 706 solib_create_inferior_hook (0);
6c95b8df
PA
707 }
708
709 /* Let the thread_db layer learn about this new process. */
2277426b
PA
710 check_for_thread_db ();
711
712 do_cleanups (old_chain);
ac264b3b 713 }
9016a515
DJ
714
715 if (has_vforked)
716 {
3ced3da4 717 struct lwp_info *parent_lp;
6c95b8df
PA
718 struct inferior *parent_inf;
719
720 parent_inf = current_inferior ();
721
722 /* If we detached from the child, then we have to be careful
723 to not insert breakpoints in the parent until the child
724 is done with the shared memory region. However, if we're
725 staying attached to the child, then we can and should
726 insert breakpoints, so that we can debug it. A
727 subsequent child exec or exit is enough to know when does
728 the child stops using the parent's address space. */
729 parent_inf->waiting_for_vfork_done = detach_fork;
56710373 730 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
6c95b8df 731
3ced3da4 732 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937 733 gdb_assert (linux_supports_tracefork_flag >= 0);
3ced3da4 734
b957e937 735 if (linux_supports_tracevforkdone (0))
9016a515 736 {
6c95b8df
PA
737 if (debug_linux_nat)
738 fprintf_unfiltered (gdb_stdlog,
739 "LCFF: waiting for VFORK_DONE on %d\n",
740 parent_pid);
3ced3da4 741 parent_lp->stopped = 1;
9016a515 742
6c95b8df
PA
743 /* We'll handle the VFORK_DONE event like any other
744 event, in target_wait. */
9016a515
DJ
745 }
746 else
747 {
748 /* We can't insert breakpoints until the child has
749 finished with the shared memory region. We need to
750 wait until that happens. Ideal would be to just
751 call:
752 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
753 - waitpid (parent_pid, &status, __WALL);
754 However, most architectures can't handle a syscall
755 being traced on the way out if it wasn't traced on
756 the way in.
757
758 We might also think to loop, continuing the child
759 until it exits or gets a SIGTRAP. One problem is
760 that the child might call ptrace with PTRACE_TRACEME.
761
762 There's no simple and reliable way to figure out when
763 the vforked child will be done with its copy of the
764 shared memory. We could step it out of the syscall,
765 two instructions, let it go, and then single-step the
766 parent once. When we have hardware single-step, this
767 would work; with software single-step it could still
768 be made to work but we'd have to be able to insert
769 single-step breakpoints in the child, and we'd have
770 to insert -just- the single-step breakpoint in the
771 parent. Very awkward.
772
773 In the end, the best we can do is to make sure it
774 runs for a little while. Hopefully it will be out of
775 range of any breakpoints we reinsert. Usually this
776 is only the single-step breakpoint at vfork's return
777 point. */
778
6c95b8df
PA
779 if (debug_linux_nat)
780 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
781 "LCFF: no VFORK_DONE "
782 "support, sleeping a bit\n");
6c95b8df 783
9016a515 784 usleep (10000);
9016a515 785
6c95b8df
PA
786 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
787 and leave it pending. The next linux_nat_resume call
788 will notice a pending event, and bypasses actually
789 resuming the inferior. */
3ced3da4
PA
790 parent_lp->status = 0;
791 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
792 parent_lp->stopped = 1;
6c95b8df
PA
793
794 /* If we're in async mode, need to tell the event loop
795 there's something here to process. */
796 if (target_can_async_p ())
797 async_file_mark ();
798 }
9016a515 799 }
4de4c07c 800 }
3993f6b1 801 else
4de4c07c 802 {
77435e4c 803 struct inferior *parent_inf, *child_inf;
3ced3da4 804 struct lwp_info *child_lp;
6c95b8df 805 struct program_space *parent_pspace;
4de4c07c 806
e85a822c 807 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
808 {
809 target_terminal_ours ();
6c95b8df 810 if (has_vforked)
3e43a32a
MS
811 fprintf_filtered (gdb_stdlog,
812 _("Attaching after process %d "
813 "vfork to child process %d.\n"),
6c95b8df
PA
814 parent_pid, child_pid);
815 else
3e43a32a
MS
816 fprintf_filtered (gdb_stdlog,
817 _("Attaching after process %d "
818 "fork to child process %d.\n"),
6c95b8df 819 parent_pid, child_pid);
f75c00e4 820 }
4de4c07c 821
7a7d3353
PA
822 /* Add the new inferior first, so that the target_detach below
823 doesn't unpush the target. */
824
77435e4c
PA
825 child_inf = add_inferior (child_pid);
826
e58b0e63 827 parent_inf = current_inferior ();
77435e4c 828 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 829 copy_terminal_info (child_inf, parent_inf);
7a7d3353 830
6c95b8df 831 parent_pspace = parent_inf->pspace;
9016a515 832
6c95b8df
PA
833 /* If we're vforking, we want to hold on to the parent until the
834 child exits or execs. At child exec or exit time we can
835 remove the old breakpoints from the parent and detach or
836 resume debugging it. Otherwise, detach the parent now; we'll
837 want to reuse it's program/address spaces, but we can't set
838 them to the child before removing breakpoints from the
839 parent, otherwise, the breakpoints module could decide to
840 remove breakpoints from the wrong process (since they'd be
841 assigned to the same address space). */
9016a515
DJ
842
843 if (has_vforked)
7f9f62ba 844 {
6c95b8df
PA
845 gdb_assert (child_inf->vfork_parent == NULL);
846 gdb_assert (parent_inf->vfork_child == NULL);
847 child_inf->vfork_parent = parent_inf;
848 child_inf->pending_detach = 0;
849 parent_inf->vfork_child = child_inf;
850 parent_inf->pending_detach = detach_fork;
851 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 852 }
2277426b 853 else if (detach_fork)
b84876c2 854 target_detach (NULL, 0);
4de4c07c 855
6c95b8df
PA
856 /* Note that the detach above makes PARENT_INF dangling. */
857
858 /* Add the child thread to the appropriate lists, and switch to
859 this new thread, before cloning the program space, and
860 informing the solib layer about this new process. */
861
9f0bdab8 862 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b 863 add_thread (inferior_ptid);
3ced3da4
PA
864 child_lp = add_lwp (inferior_ptid);
865 child_lp->stopped = 1;
25289eb2 866 child_lp->last_resume_kind = resume_stop;
6c95b8df
PA
867
868 /* If this is a vfork child, then the address-space is shared
869 with the parent. If we detached from the parent, then we can
870 reuse the parent's program/address spaces. */
871 if (has_vforked || detach_fork)
872 {
873 child_inf->pspace = parent_pspace;
874 child_inf->aspace = child_inf->pspace->aspace;
875 }
876 else
877 {
878 child_inf->aspace = new_address_space ();
879 child_inf->pspace = add_program_space (child_inf->aspace);
880 child_inf->removable = 1;
881 set_current_program_space (child_inf->pspace);
882 clone_program_space (child_inf->pspace, parent_pspace);
883
884 /* Let the shared library layer (solib-svr4) learn about
885 this new process, relocate the cloned exec, pull in
886 shared libraries, and install the solib event breakpoint.
887 If a "cloned-VM" event was propagated better throughout
888 the core, this wouldn't be required. */
268a4a75 889 solib_create_inferior_hook (0);
6c95b8df 890 }
ac264b3b 891
6c95b8df 892 /* Let the thread_db layer learn about this new process. */
ef29ce1a 893 check_for_thread_db ();
4de4c07c
DJ
894 }
895
7feb7d06 896 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
897 return 0;
898}
899
4de4c07c 900\f
77b06cd7 901static int
6d8fd2b7 902linux_child_insert_fork_catchpoint (int pid)
4de4c07c 903{
77b06cd7 904 return !linux_supports_tracefork (pid);
3993f6b1
DJ
905}
906
eb73ad13
PA
907static int
908linux_child_remove_fork_catchpoint (int pid)
909{
910 return 0;
911}
912
77b06cd7 913static int
6d8fd2b7 914linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 915{
77b06cd7 916 return !linux_supports_tracefork (pid);
3993f6b1
DJ
917}
918
eb73ad13
PA
919static int
920linux_child_remove_vfork_catchpoint (int pid)
921{
922 return 0;
923}
924
77b06cd7 925static int
6d8fd2b7 926linux_child_insert_exec_catchpoint (int pid)
3993f6b1 927{
77b06cd7 928 return !linux_supports_tracefork (pid);
3993f6b1
DJ
929}
930
eb73ad13
PA
931static int
932linux_child_remove_exec_catchpoint (int pid)
933{
934 return 0;
935}
936
a96d9b2e
SDJ
937static int
938linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
939 int table_size, int *table)
940{
77b06cd7
TJB
941 if (!linux_supports_tracesysgood (pid))
942 return 1;
943
a96d9b2e
SDJ
944 /* On GNU/Linux, we ignore the arguments. It means that we only
945 enable the syscall catchpoints, but do not disable them.
77b06cd7 946
a96d9b2e
SDJ
947 Also, we do not use the `table' information because we do not
948 filter system calls here. We let GDB do the logic for us. */
949 return 0;
950}
951
d6b0e80f
AC
952/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
953 are processes sharing the same VM space. A multi-threaded process
954 is basically a group of such processes. However, such a grouping
955 is almost entirely a user-space issue; the kernel doesn't enforce
956 such a grouping at all (this might change in the future). In
957 general, we'll rely on the threads library (i.e. the GNU/Linux
958 Threads library) to provide such a grouping.
959
960 It is perfectly well possible to write a multi-threaded application
961 without the assistance of a threads library, by using the clone
962 system call directly. This module should be able to give some
963 rudimentary support for debugging such applications if developers
964 specify the CLONE_PTRACE flag in the clone system call, and are
965 using the Linux kernel 2.4 or above.
966
967 Note that there are some peculiarities in GNU/Linux that affect
968 this code:
969
970 - In general one should specify the __WCLONE flag to waitpid in
971 order to make it report events for any of the cloned processes
972 (and leave it out for the initial process). However, if a cloned
973 process has exited the exit status is only reported if the
974 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
975 we cannot use it since GDB must work on older systems too.
976
977 - When a traced, cloned process exits and is waited for by the
978 debugger, the kernel reassigns it to the original parent and
979 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
980 library doesn't notice this, which leads to the "zombie problem":
981 When debugged a multi-threaded process that spawns a lot of
982 threads will run out of processes, even if the threads exit,
983 because the "zombies" stay around. */
984
985/* List of known LWPs. */
9f0bdab8 986struct lwp_info *lwp_list;
d6b0e80f
AC
987\f
988
d6b0e80f
AC
989/* Original signal mask. */
990static sigset_t normal_mask;
991
992/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
993 _initialize_linux_nat. */
994static sigset_t suspend_mask;
995
7feb7d06
PA
996/* Signals to block to make that sigsuspend work. */
997static sigset_t blocked_mask;
998
999/* SIGCHLD action. */
1000struct sigaction sigchld_action;
b84876c2 1001
7feb7d06
PA
1002/* Block child signals (SIGCHLD and linux threads signals), and store
1003 the previous mask in PREV_MASK. */
84e46146 1004
7feb7d06
PA
1005static void
1006block_child_signals (sigset_t *prev_mask)
1007{
1008 /* Make sure SIGCHLD is blocked. */
1009 if (!sigismember (&blocked_mask, SIGCHLD))
1010 sigaddset (&blocked_mask, SIGCHLD);
1011
1012 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1013}
1014
1015/* Restore child signals mask, previously returned by
1016 block_child_signals. */
1017
1018static void
1019restore_child_signals_mask (sigset_t *prev_mask)
1020{
1021 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1022}
2455069d
UW
1023
1024/* Mask of signals to pass directly to the inferior. */
1025static sigset_t pass_mask;
1026
1027/* Update signals to pass to the inferior. */
1028static void
1029linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1030{
1031 int signo;
1032
1033 sigemptyset (&pass_mask);
1034
1035 for (signo = 1; signo < NSIG; signo++)
1036 {
1037 int target_signo = target_signal_from_host (signo);
1038 if (target_signo < numsigs && pass_signals[target_signo])
1039 sigaddset (&pass_mask, signo);
1040 }
1041}
1042
d6b0e80f
AC
1043\f
1044
1045/* Prototypes for local functions. */
1046static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1047static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1048static char *linux_child_pid_to_exec_file (int pid);
710151dd 1049
d6b0e80f
AC
1050\f
1051/* Convert wait status STATUS to a string. Used for printing debug
1052 messages only. */
1053
1054static char *
1055status_to_str (int status)
1056{
1057 static char buf[64];
1058
1059 if (WIFSTOPPED (status))
206aa767 1060 {
ca2163eb 1061 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1062 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1063 strsignal (SIGTRAP));
1064 else
1065 snprintf (buf, sizeof (buf), "%s (stopped)",
1066 strsignal (WSTOPSIG (status)));
1067 }
d6b0e80f
AC
1068 else if (WIFSIGNALED (status))
1069 snprintf (buf, sizeof (buf), "%s (terminated)",
ba9b2ec3 1070 strsignal (WTERMSIG (status)));
d6b0e80f
AC
1071 else
1072 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1073
1074 return buf;
1075}
1076
d90e17a7
PA
1077/* Remove all LWPs belong to PID from the lwp list. */
1078
1079static void
1080purge_lwp_list (int pid)
1081{
1082 struct lwp_info *lp, *lpprev, *lpnext;
1083
1084 lpprev = NULL;
1085
1086 for (lp = lwp_list; lp; lp = lpnext)
1087 {
1088 lpnext = lp->next;
1089
1090 if (ptid_get_pid (lp->ptid) == pid)
1091 {
1092 if (lp == lwp_list)
1093 lwp_list = lp->next;
1094 else
1095 lpprev->next = lp->next;
1096
1097 xfree (lp);
1098 }
1099 else
1100 lpprev = lp;
1101 }
1102}
1103
1104/* Return the number of known LWPs in the tgid given by PID. */
1105
1106static int
1107num_lwps (int pid)
1108{
1109 int count = 0;
1110 struct lwp_info *lp;
1111
1112 for (lp = lwp_list; lp; lp = lp->next)
1113 if (ptid_get_pid (lp->ptid) == pid)
1114 count++;
1115
1116 return count;
d6b0e80f
AC
1117}
1118
f973ed9c 1119/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1120 structure describing the new LWP. The LWP should already be stopped
1121 (with an exception for the very first LWP). */
d6b0e80f
AC
1122
1123static struct lwp_info *
1124add_lwp (ptid_t ptid)
1125{
1126 struct lwp_info *lp;
1127
1128 gdb_assert (is_lwp (ptid));
1129
1130 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1131
1132 memset (lp, 0, sizeof (struct lwp_info));
1133
25289eb2 1134 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1135 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1136
1137 lp->ptid = ptid;
dc146f7c 1138 lp->core = -1;
d6b0e80f
AC
1139
1140 lp->next = lwp_list;
1141 lwp_list = lp;
d6b0e80f 1142
d90e17a7 1143 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
9f0bdab8
DJ
1144 linux_nat_new_thread (ptid);
1145
d6b0e80f
AC
1146 return lp;
1147}
1148
1149/* Remove the LWP specified by PID from the list. */
1150
1151static void
1152delete_lwp (ptid_t ptid)
1153{
1154 struct lwp_info *lp, *lpprev;
1155
1156 lpprev = NULL;
1157
1158 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1159 if (ptid_equal (lp->ptid, ptid))
1160 break;
1161
1162 if (!lp)
1163 return;
1164
d6b0e80f
AC
1165 if (lpprev)
1166 lpprev->next = lp->next;
1167 else
1168 lwp_list = lp->next;
1169
1170 xfree (lp);
1171}
1172
1173/* Return a pointer to the structure describing the LWP corresponding
1174 to PID. If no corresponding LWP could be found, return NULL. */
1175
1176static struct lwp_info *
1177find_lwp_pid (ptid_t ptid)
1178{
1179 struct lwp_info *lp;
1180 int lwp;
1181
1182 if (is_lwp (ptid))
1183 lwp = GET_LWP (ptid);
1184 else
1185 lwp = GET_PID (ptid);
1186
1187 for (lp = lwp_list; lp; lp = lp->next)
1188 if (lwp == GET_LWP (lp->ptid))
1189 return lp;
1190
1191 return NULL;
1192}
1193
1194/* Call CALLBACK with its second argument set to DATA for every LWP in
1195 the list. If CALLBACK returns 1 for a particular LWP, return a
1196 pointer to the structure describing that LWP immediately.
1197 Otherwise return NULL. */
1198
1199struct lwp_info *
d90e17a7
PA
1200iterate_over_lwps (ptid_t filter,
1201 int (*callback) (struct lwp_info *, void *),
1202 void *data)
d6b0e80f
AC
1203{
1204 struct lwp_info *lp, *lpnext;
1205
1206 for (lp = lwp_list; lp; lp = lpnext)
1207 {
1208 lpnext = lp->next;
d90e17a7
PA
1209
1210 if (ptid_match (lp->ptid, filter))
1211 {
1212 if ((*callback) (lp, data))
1213 return lp;
1214 }
d6b0e80f
AC
1215 }
1216
1217 return NULL;
1218}
1219
2277426b
PA
1220/* Update our internal state when changing from one checkpoint to
1221 another indicated by NEW_PTID. We can only switch single-threaded
1222 applications, so we only create one new LWP, and the previous list
1223 is discarded. */
f973ed9c
DJ
1224
1225void
1226linux_nat_switch_fork (ptid_t new_ptid)
1227{
1228 struct lwp_info *lp;
1229
2277426b
PA
1230 purge_lwp_list (GET_PID (inferior_ptid));
1231
f973ed9c
DJ
1232 lp = add_lwp (new_ptid);
1233 lp->stopped = 1;
e26af52f 1234
2277426b
PA
1235 /* This changes the thread's ptid while preserving the gdb thread
1236 num. Also changes the inferior pid, while preserving the
1237 inferior num. */
1238 thread_change_ptid (inferior_ptid, new_ptid);
1239
1240 /* We've just told GDB core that the thread changed target id, but,
1241 in fact, it really is a different thread, with different register
1242 contents. */
1243 registers_changed ();
e26af52f
DJ
1244}
1245
e26af52f
DJ
1246/* Handle the exit of a single thread LP. */
1247
1248static void
1249exit_lwp (struct lwp_info *lp)
1250{
e09875d4 1251 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1252
1253 if (th)
e26af52f 1254 {
17faa917
DJ
1255 if (print_thread_events)
1256 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1257
4f8d22e3 1258 delete_thread (lp->ptid);
e26af52f
DJ
1259 }
1260
1261 delete_lwp (lp->ptid);
1262}
1263
a0ef4274
DJ
1264/* Detect `T (stopped)' in `/proc/PID/status'.
1265 Other states including `T (tracing stop)' are reported as false. */
1266
1267static int
1268pid_is_stopped (pid_t pid)
1269{
1270 FILE *status_file;
1271 char buf[100];
1272 int retval = 0;
1273
1274 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1275 status_file = fopen (buf, "r");
1276 if (status_file != NULL)
1277 {
1278 int have_state = 0;
1279
1280 while (fgets (buf, sizeof (buf), status_file))
1281 {
1282 if (strncmp (buf, "State:", 6) == 0)
1283 {
1284 have_state = 1;
1285 break;
1286 }
1287 }
1288 if (have_state && strstr (buf, "T (stopped)") != NULL)
1289 retval = 1;
1290 fclose (status_file);
1291 }
1292 return retval;
1293}
1294
1295/* Wait for the LWP specified by LP, which we have just attached to.
1296 Returns a wait status for that LWP, to cache. */
1297
1298static int
1299linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1300 int *signalled)
1301{
1302 pid_t new_pid, pid = GET_LWP (ptid);
1303 int status;
1304
1305 if (pid_is_stopped (pid))
1306 {
1307 if (debug_linux_nat)
1308 fprintf_unfiltered (gdb_stdlog,
1309 "LNPAW: Attaching to a stopped process\n");
1310
1311 /* The process is definitely stopped. It is in a job control
1312 stop, unless the kernel predates the TASK_STOPPED /
1313 TASK_TRACED distinction, in which case it might be in a
1314 ptrace stop. Make sure it is in a ptrace stop; from there we
1315 can kill it, signal it, et cetera.
1316
1317 First make sure there is a pending SIGSTOP. Since we are
1318 already attached, the process can not transition from stopped
1319 to running without a PTRACE_CONT; so we know this signal will
1320 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1321 probably already in the queue (unless this kernel is old
1322 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1323 is not an RT signal, it can only be queued once. */
1324 kill_lwp (pid, SIGSTOP);
1325
1326 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1327 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1328 ptrace (PTRACE_CONT, pid, 0, 0);
1329 }
1330
1331 /* Make sure the initial process is stopped. The user-level threads
1332 layer might want to poke around in the inferior, and that won't
1333 work if things haven't stabilized yet. */
1334 new_pid = my_waitpid (pid, &status, 0);
1335 if (new_pid == -1 && errno == ECHILD)
1336 {
1337 if (first)
1338 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1339
1340 /* Try again with __WCLONE to check cloned processes. */
1341 new_pid = my_waitpid (pid, &status, __WCLONE);
1342 *cloned = 1;
1343 }
1344
dacc9cb2
PP
1345 gdb_assert (pid == new_pid);
1346
1347 if (!WIFSTOPPED (status))
1348 {
1349 /* The pid we tried to attach has apparently just exited. */
1350 if (debug_linux_nat)
1351 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1352 pid, status_to_str (status));
1353 return status;
1354 }
a0ef4274
DJ
1355
1356 if (WSTOPSIG (status) != SIGSTOP)
1357 {
1358 *signalled = 1;
1359 if (debug_linux_nat)
1360 fprintf_unfiltered (gdb_stdlog,
1361 "LNPAW: Received %s after attaching\n",
1362 status_to_str (status));
1363 }
1364
1365 return status;
1366}
1367
84636d28
PA
1368/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1369 the new LWP could not be attached, or 1 if we're already auto
1370 attached to this thread, but haven't processed the
1371 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1372 its existance, without considering it an error. */
d6b0e80f 1373
9ee57c33 1374int
93815fbf 1375lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1376{
9ee57c33 1377 struct lwp_info *lp;
7feb7d06 1378 sigset_t prev_mask;
84636d28 1379 int lwpid;
d6b0e80f
AC
1380
1381 gdb_assert (is_lwp (ptid));
1382
7feb7d06 1383 block_child_signals (&prev_mask);
d6b0e80f 1384
9ee57c33 1385 lp = find_lwp_pid (ptid);
84636d28 1386 lwpid = GET_LWP (ptid);
d6b0e80f
AC
1387
1388 /* We assume that we're already attached to any LWP that has an id
1389 equal to the overall process id, and to any LWP that is already
1390 in our list of LWPs. If we're not seeing exit events from threads
1391 and we've had PID wraparound since we last tried to stop all threads,
1392 this assumption might be wrong; fortunately, this is very unlikely
1393 to happen. */
84636d28 1394 if (lwpid != GET_PID (ptid) && lp == NULL)
d6b0e80f 1395 {
a0ef4274 1396 int status, cloned = 0, signalled = 0;
d6b0e80f 1397
84636d28 1398 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1399 {
84636d28
PA
1400 if (linux_supports_tracefork_flag)
1401 {
1402 /* If we haven't stopped all threads when we get here,
1403 we may have seen a thread listed in thread_db's list,
1404 but not processed the PTRACE_EVENT_CLONE yet. If
1405 that's the case, ignore this new thread, and let
1406 normal event handling discover it later. */
1407 if (in_pid_list_p (stopped_pids, lwpid))
1408 {
1409 /* We've already seen this thread stop, but we
1410 haven't seen the PTRACE_EVENT_CLONE extended
1411 event yet. */
1412 restore_child_signals_mask (&prev_mask);
1413 return 0;
1414 }
1415 else
1416 {
1417 int new_pid;
1418 int status;
1419
1420 /* See if we've got a stop for this new child
1421 pending. If so, we're already attached. */
1422 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1423 if (new_pid == -1 && errno == ECHILD)
1424 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1425 if (new_pid != -1)
1426 {
1427 if (WIFSTOPPED (status))
1428 add_to_pid_list (&stopped_pids, lwpid, status);
1429
1430 restore_child_signals_mask (&prev_mask);
1431 return 1;
1432 }
1433 }
1434 }
1435
9ee57c33
DJ
1436 /* If we fail to attach to the thread, issue a warning,
1437 but continue. One way this can happen is if thread
e9efe249 1438 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1439 bug may place threads in the thread list and then fail
1440 to create them. */
1441 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1442 safe_strerror (errno));
7feb7d06 1443 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1444 return -1;
1445 }
1446
d6b0e80f
AC
1447 if (debug_linux_nat)
1448 fprintf_unfiltered (gdb_stdlog,
1449 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1450 target_pid_to_str (ptid));
1451
a0ef4274 1452 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1453 if (!WIFSTOPPED (status))
673c2bbe
DE
1454 {
1455 restore_child_signals_mask (&prev_mask);
f687d035 1456 return 1;
673c2bbe 1457 }
dacc9cb2 1458
a0ef4274
DJ
1459 lp = add_lwp (ptid);
1460 lp->stopped = 1;
1461 lp->cloned = cloned;
1462 lp->signalled = signalled;
1463 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1464 {
a0ef4274
DJ
1465 lp->resumed = 1;
1466 lp->status = status;
d6b0e80f
AC
1467 }
1468
a0ef4274 1469 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1470
1471 if (debug_linux_nat)
1472 {
1473 fprintf_unfiltered (gdb_stdlog,
1474 "LLAL: waitpid %s received %s\n",
1475 target_pid_to_str (ptid),
1476 status_to_str (status));
1477 }
1478 }
1479 else
1480 {
1481 /* We assume that the LWP representing the original process is
1482 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1483 that the GNU/linux ptrace layer uses to keep track of
1484 threads. Note that this won't have already been done since
1485 the main thread will have, we assume, been stopped by an
1486 attach from a different layer. */
9ee57c33
DJ
1487 if (lp == NULL)
1488 lp = add_lwp (ptid);
d6b0e80f
AC
1489 lp->stopped = 1;
1490 }
9ee57c33 1491
25289eb2 1492 lp->last_resume_kind = resume_stop;
7feb7d06 1493 restore_child_signals_mask (&prev_mask);
9ee57c33 1494 return 0;
d6b0e80f
AC
1495}
1496
b84876c2 1497static void
136d6dae
VP
1498linux_nat_create_inferior (struct target_ops *ops,
1499 char *exec_file, char *allargs, char **env,
b84876c2
PA
1500 int from_tty)
1501{
10568435
JK
1502#ifdef HAVE_PERSONALITY
1503 int personality_orig = 0, personality_set = 0;
1504#endif /* HAVE_PERSONALITY */
b84876c2
PA
1505
1506 /* The fork_child mechanism is synchronous and calls target_wait, so
1507 we have to mask the async mode. */
1508
10568435
JK
1509#ifdef HAVE_PERSONALITY
1510 if (disable_randomization)
1511 {
1512 errno = 0;
1513 personality_orig = personality (0xffffffff);
1514 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1515 {
1516 personality_set = 1;
1517 personality (personality_orig | ADDR_NO_RANDOMIZE);
1518 }
1519 if (errno != 0 || (personality_set
1520 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1521 warning (_("Error disabling address space randomization: %s"),
1522 safe_strerror (errno));
1523 }
1524#endif /* HAVE_PERSONALITY */
1525
2455069d
UW
1526 /* Make sure we report all signals during startup. */
1527 linux_nat_pass_signals (0, NULL);
1528
136d6dae 1529 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1530
10568435
JK
1531#ifdef HAVE_PERSONALITY
1532 if (personality_set)
1533 {
1534 errno = 0;
1535 personality (personality_orig);
1536 if (errno != 0)
1537 warning (_("Error restoring address space randomization: %s"),
1538 safe_strerror (errno));
1539 }
1540#endif /* HAVE_PERSONALITY */
b84876c2
PA
1541}
1542
d6b0e80f 1543static void
136d6dae 1544linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1545{
1546 struct lwp_info *lp;
d6b0e80f 1547 int status;
af990527 1548 ptid_t ptid;
d6b0e80f 1549
2455069d
UW
1550 /* Make sure we report all signals during attach. */
1551 linux_nat_pass_signals (0, NULL);
1552
136d6dae 1553 linux_ops->to_attach (ops, args, from_tty);
d6b0e80f 1554
af990527
PA
1555 /* The ptrace base target adds the main thread with (pid,0,0)
1556 format. Decorate it with lwp info. */
1557 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1558 thread_change_ptid (inferior_ptid, ptid);
1559
9f0bdab8 1560 /* Add the initial process as the first LWP to the list. */
af990527 1561 lp = add_lwp (ptid);
a0ef4274
DJ
1562
1563 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1564 &lp->signalled);
dacc9cb2
PP
1565 if (!WIFSTOPPED (status))
1566 {
1567 if (WIFEXITED (status))
1568 {
1569 int exit_code = WEXITSTATUS (status);
1570
1571 target_terminal_ours ();
1572 target_mourn_inferior ();
1573 if (exit_code == 0)
1574 error (_("Unable to attach: program exited normally."));
1575 else
1576 error (_("Unable to attach: program exited with code %d."),
1577 exit_code);
1578 }
1579 else if (WIFSIGNALED (status))
1580 {
1581 enum target_signal signo;
1582
1583 target_terminal_ours ();
1584 target_mourn_inferior ();
1585
1586 signo = target_signal_from_host (WTERMSIG (status));
1587 error (_("Unable to attach: program terminated with signal "
1588 "%s, %s."),
1589 target_signal_to_name (signo),
1590 target_signal_to_string (signo));
1591 }
1592
1593 internal_error (__FILE__, __LINE__,
1594 _("unexpected status %d for PID %ld"),
1595 status, (long) GET_LWP (ptid));
1596 }
1597
a0ef4274 1598 lp->stopped = 1;
9f0bdab8 1599
a0ef4274 1600 /* Save the wait status to report later. */
d6b0e80f 1601 lp->resumed = 1;
a0ef4274
DJ
1602 if (debug_linux_nat)
1603 fprintf_unfiltered (gdb_stdlog,
1604 "LNA: waitpid %ld, saving status %s\n",
1605 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1606
7feb7d06
PA
1607 lp->status = status;
1608
1609 if (target_can_async_p ())
1610 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1611}
1612
a0ef4274
DJ
1613/* Get pending status of LP. */
1614static int
1615get_pending_status (struct lwp_info *lp, int *status)
1616{
ca2163eb
PA
1617 enum target_signal signo = TARGET_SIGNAL_0;
1618
1619 /* If we paused threads momentarily, we may have stored pending
1620 events in lp->status or lp->waitstatus (see stop_wait_callback),
1621 and GDB core hasn't seen any signal for those threads.
1622 Otherwise, the last signal reported to the core is found in the
1623 thread object's stop_signal.
1624
1625 There's a corner case that isn't handled here at present. Only
1626 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1627 stop_signal make sense as a real signal to pass to the inferior.
1628 Some catchpoint related events, like
1629 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1630 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1631 those traps are debug API (ptrace in our case) related and
1632 induced; the inferior wouldn't see them if it wasn't being
1633 traced. Hence, we should never pass them to the inferior, even
1634 when set to pass state. Since this corner case isn't handled by
1635 infrun.c when proceeding with a signal, for consistency, neither
1636 do we handle it here (or elsewhere in the file we check for
1637 signal pass state). Normally SIGTRAP isn't set to pass state, so
1638 this is really a corner case. */
1639
1640 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1641 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1642 else if (lp->status)
1643 signo = target_signal_from_host (WSTOPSIG (lp->status));
1644 else if (non_stop && !is_executing (lp->ptid))
1645 {
1646 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1647
16c381f0 1648 signo = tp->suspend.stop_signal;
ca2163eb
PA
1649 }
1650 else if (!non_stop)
a0ef4274 1651 {
ca2163eb
PA
1652 struct target_waitstatus last;
1653 ptid_t last_ptid;
4c28f408 1654
ca2163eb 1655 get_last_target_status (&last_ptid, &last);
4c28f408 1656
ca2163eb
PA
1657 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1658 {
e09875d4 1659 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1660
16c381f0 1661 signo = tp->suspend.stop_signal;
4c28f408 1662 }
ca2163eb 1663 }
4c28f408 1664
ca2163eb 1665 *status = 0;
4c28f408 1666
ca2163eb
PA
1667 if (signo == TARGET_SIGNAL_0)
1668 {
1669 if (debug_linux_nat)
1670 fprintf_unfiltered (gdb_stdlog,
1671 "GPT: lwp %s has no pending signal\n",
1672 target_pid_to_str (lp->ptid));
1673 }
1674 else if (!signal_pass_state (signo))
1675 {
1676 if (debug_linux_nat)
3e43a32a
MS
1677 fprintf_unfiltered (gdb_stdlog,
1678 "GPT: lwp %s had signal %s, "
1679 "but it is in no pass state\n",
ca2163eb
PA
1680 target_pid_to_str (lp->ptid),
1681 target_signal_to_string (signo));
a0ef4274 1682 }
a0ef4274 1683 else
4c28f408 1684 {
ca2163eb
PA
1685 *status = W_STOPCODE (target_signal_to_host (signo));
1686
1687 if (debug_linux_nat)
1688 fprintf_unfiltered (gdb_stdlog,
1689 "GPT: lwp %s has pending signal %s\n",
1690 target_pid_to_str (lp->ptid),
1691 target_signal_to_string (signo));
4c28f408 1692 }
a0ef4274
DJ
1693
1694 return 0;
1695}
1696
d6b0e80f
AC
1697static int
1698detach_callback (struct lwp_info *lp, void *data)
1699{
1700 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1701
1702 if (debug_linux_nat && lp->status)
1703 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1704 strsignal (WSTOPSIG (lp->status)),
1705 target_pid_to_str (lp->ptid));
1706
a0ef4274
DJ
1707 /* If there is a pending SIGSTOP, get rid of it. */
1708 if (lp->signalled)
d6b0e80f 1709 {
d6b0e80f
AC
1710 if (debug_linux_nat)
1711 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1712 "DC: Sending SIGCONT to %s\n",
1713 target_pid_to_str (lp->ptid));
d6b0e80f 1714
a0ef4274 1715 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1716 lp->signalled = 0;
d6b0e80f
AC
1717 }
1718
1719 /* We don't actually detach from the LWP that has an id equal to the
1720 overall process id just yet. */
1721 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1722 {
a0ef4274
DJ
1723 int status = 0;
1724
1725 /* Pass on any pending signal for this LWP. */
1726 get_pending_status (lp, &status);
1727
d6b0e80f
AC
1728 errno = 0;
1729 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1730 WSTOPSIG (status)) < 0)
8a3fe4f8 1731 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1732 safe_strerror (errno));
1733
1734 if (debug_linux_nat)
1735 fprintf_unfiltered (gdb_stdlog,
1736 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1737 target_pid_to_str (lp->ptid),
7feb7d06 1738 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1739
1740 delete_lwp (lp->ptid);
1741 }
1742
1743 return 0;
1744}
1745
1746static void
136d6dae 1747linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1748{
b84876c2 1749 int pid;
a0ef4274 1750 int status;
d90e17a7
PA
1751 struct lwp_info *main_lwp;
1752
1753 pid = GET_PID (inferior_ptid);
a0ef4274 1754
b84876c2
PA
1755 if (target_can_async_p ())
1756 linux_nat_async (NULL, 0);
1757
4c28f408
PA
1758 /* Stop all threads before detaching. ptrace requires that the
1759 thread is stopped to sucessfully detach. */
d90e17a7 1760 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1761 /* ... and wait until all of them have reported back that
1762 they're no longer running. */
d90e17a7 1763 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1764
d90e17a7 1765 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1766
1767 /* Only the initial process should be left right now. */
d90e17a7
PA
1768 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1769
1770 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1771
a0ef4274
DJ
1772 /* Pass on any pending signal for the last LWP. */
1773 if ((args == NULL || *args == '\0')
d90e17a7 1774 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1775 && WIFSTOPPED (status))
1776 {
1777 /* Put the signal number in ARGS so that inf_ptrace_detach will
1778 pass it along with PTRACE_DETACH. */
1779 args = alloca (8);
1780 sprintf (args, "%d", (int) WSTOPSIG (status));
ddabfc73
TT
1781 if (debug_linux_nat)
1782 fprintf_unfiltered (gdb_stdlog,
1783 "LND: Sending signal %s to %s\n",
1784 args,
1785 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1786 }
1787
d90e17a7 1788 delete_lwp (main_lwp->ptid);
b84876c2 1789
7a7d3353
PA
1790 if (forks_exist_p ())
1791 {
1792 /* Multi-fork case. The current inferior_ptid is being detached
1793 from, but there are other viable forks to debug. Detach from
1794 the current fork, and context-switch to the first
1795 available. */
1796 linux_fork_detach (args, from_tty);
1797
1798 if (non_stop && target_can_async_p ())
1799 target_async (inferior_event_handler, 0);
1800 }
1801 else
1802 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1803}
1804
1805/* Resume LP. */
1806
25289eb2
PA
1807static void
1808resume_lwp (struct lwp_info *lp, int step)
d6b0e80f 1809{
25289eb2 1810 if (lp->stopped)
6c95b8df 1811 {
25289eb2
PA
1812 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1813
1814 if (inf->vfork_child != NULL)
1815 {
1816 if (debug_linux_nat)
1817 fprintf_unfiltered (gdb_stdlog,
1818 "RC: Not resuming %s (vfork parent)\n",
1819 target_pid_to_str (lp->ptid));
1820 }
1821 else if (lp->status == 0
1822 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1823 {
1824 if (debug_linux_nat)
1825 fprintf_unfiltered (gdb_stdlog,
1826 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1827 target_pid_to_str (lp->ptid));
1828
1829 linux_ops->to_resume (linux_ops,
1830 pid_to_ptid (GET_LWP (lp->ptid)),
1831 step, TARGET_SIGNAL_0);
25289eb2
PA
1832 lp->stopped = 0;
1833 lp->step = step;
1834 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1835 lp->stopped_by_watchpoint = 0;
1836 }
1837 else
1838 {
1839 if (debug_linux_nat)
1840 fprintf_unfiltered (gdb_stdlog,
1841 "RC: Not resuming sibling %s (has pending)\n",
1842 target_pid_to_str (lp->ptid));
1843 }
6c95b8df 1844 }
25289eb2 1845 else
d6b0e80f 1846 {
d90e17a7
PA
1847 if (debug_linux_nat)
1848 fprintf_unfiltered (gdb_stdlog,
25289eb2 1849 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1850 target_pid_to_str (lp->ptid));
d6b0e80f 1851 }
25289eb2 1852}
d6b0e80f 1853
25289eb2
PA
1854static int
1855resume_callback (struct lwp_info *lp, void *data)
1856{
1857 resume_lwp (lp, 0);
d6b0e80f
AC
1858 return 0;
1859}
1860
1861static int
1862resume_clear_callback (struct lwp_info *lp, void *data)
1863{
1864 lp->resumed = 0;
25289eb2 1865 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1866 return 0;
1867}
1868
1869static int
1870resume_set_callback (struct lwp_info *lp, void *data)
1871{
1872 lp->resumed = 1;
25289eb2 1873 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1874 return 0;
1875}
1876
1877static void
28439f5e
PA
1878linux_nat_resume (struct target_ops *ops,
1879 ptid_t ptid, int step, enum target_signal signo)
d6b0e80f 1880{
7feb7d06 1881 sigset_t prev_mask;
d6b0e80f 1882 struct lwp_info *lp;
d90e17a7 1883 int resume_many;
d6b0e80f 1884
76f50ad1
DJ
1885 if (debug_linux_nat)
1886 fprintf_unfiltered (gdb_stdlog,
1887 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1888 step ? "step" : "resume",
1889 target_pid_to_str (ptid),
423ec54c
JK
1890 (signo != TARGET_SIGNAL_0
1891 ? strsignal (target_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1892 target_pid_to_str (inferior_ptid));
1893
7feb7d06 1894 block_child_signals (&prev_mask);
b84876c2 1895
d6b0e80f 1896 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1897 resume_many = (ptid_equal (minus_one_ptid, ptid)
1898 || ptid_is_pid (ptid));
4c28f408 1899
e3e9f5a2
PA
1900 /* Mark the lwps we're resuming as resumed. */
1901 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1902
d90e17a7
PA
1903 /* See if it's the current inferior that should be handled
1904 specially. */
1905 if (resume_many)
1906 lp = find_lwp_pid (inferior_ptid);
1907 else
1908 lp = find_lwp_pid (ptid);
9f0bdab8 1909 gdb_assert (lp != NULL);
d6b0e80f 1910
9f0bdab8
DJ
1911 /* Remember if we're stepping. */
1912 lp->step = step;
25289eb2 1913 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1914
9f0bdab8
DJ
1915 /* If we have a pending wait status for this thread, there is no
1916 point in resuming the process. But first make sure that
1917 linux_nat_wait won't preemptively handle the event - we
1918 should never take this short-circuit if we are going to
1919 leave LP running, since we have skipped resuming all the
1920 other threads. This bit of code needs to be synchronized
1921 with linux_nat_wait. */
76f50ad1 1922
9f0bdab8
DJ
1923 if (lp->status && WIFSTOPPED (lp->status))
1924 {
2455069d
UW
1925 if (!lp->step
1926 && WSTOPSIG (lp->status)
1927 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1928 {
9f0bdab8
DJ
1929 if (debug_linux_nat)
1930 fprintf_unfiltered (gdb_stdlog,
1931 "LLR: Not short circuiting for ignored "
1932 "status 0x%x\n", lp->status);
1933
d6b0e80f
AC
1934 /* FIXME: What should we do if we are supposed to continue
1935 this thread with a signal? */
1936 gdb_assert (signo == TARGET_SIGNAL_0);
2455069d 1937 signo = target_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1938 lp->status = 0;
1939 }
1940 }
76f50ad1 1941
6c95b8df 1942 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
1943 {
1944 /* FIXME: What should we do if we are supposed to continue
1945 this thread with a signal? */
1946 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1947
9f0bdab8
DJ
1948 if (debug_linux_nat)
1949 fprintf_unfiltered (gdb_stdlog,
1950 "LLR: Short circuiting for status 0x%x\n",
1951 lp->status);
d6b0e80f 1952
7feb7d06
PA
1953 restore_child_signals_mask (&prev_mask);
1954 if (target_can_async_p ())
1955 {
1956 target_async (inferior_event_handler, 0);
1957 /* Tell the event loop we have something to process. */
1958 async_file_mark ();
1959 }
9f0bdab8 1960 return;
d6b0e80f
AC
1961 }
1962
9f0bdab8
DJ
1963 /* Mark LWP as not stopped to prevent it from being continued by
1964 resume_callback. */
1965 lp->stopped = 0;
1966
d90e17a7
PA
1967 if (resume_many)
1968 iterate_over_lwps (ptid, resume_callback, NULL);
1969
1970 /* Convert to something the lower layer understands. */
1971 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1972
28439f5e 1973 linux_ops->to_resume (linux_ops, ptid, step, signo);
9f0bdab8 1974 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 1975 lp->stopped_by_watchpoint = 0;
9f0bdab8 1976
d6b0e80f
AC
1977 if (debug_linux_nat)
1978 fprintf_unfiltered (gdb_stdlog,
1979 "LLR: %s %s, %s (resume event thread)\n",
1980 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1981 target_pid_to_str (ptid),
423ec54c
JK
1982 (signo != TARGET_SIGNAL_0
1983 ? strsignal (target_signal_to_host (signo)) : "0"));
b84876c2 1984
7feb7d06 1985 restore_child_signals_mask (&prev_mask);
b84876c2 1986 if (target_can_async_p ())
8ea051c5 1987 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1988}
1989
c5f62d5f 1990/* Send a signal to an LWP. */
d6b0e80f
AC
1991
1992static int
1993kill_lwp (int lwpid, int signo)
1994{
c5f62d5f
DE
1995 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1996 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
1997
1998#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
1999 {
2000 static int tkill_failed;
2001
2002 if (!tkill_failed)
2003 {
2004 int ret;
2005
2006 errno = 0;
2007 ret = syscall (__NR_tkill, lwpid, signo);
2008 if (errno != ENOSYS)
2009 return ret;
2010 tkill_failed = 1;
2011 }
2012 }
d6b0e80f
AC
2013#endif
2014
2015 return kill (lwpid, signo);
2016}
2017
ca2163eb
PA
2018/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2019 event, check if the core is interested in it: if not, ignore the
2020 event, and keep waiting; otherwise, we need to toggle the LWP's
2021 syscall entry/exit status, since the ptrace event itself doesn't
2022 indicate it, and report the trap to higher layers. */
2023
2024static int
2025linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2026{
2027 struct target_waitstatus *ourstatus = &lp->waitstatus;
2028 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2029 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2030
2031 if (stopping)
2032 {
2033 /* If we're stopping threads, there's a SIGSTOP pending, which
2034 makes it so that the LWP reports an immediate syscall return,
2035 followed by the SIGSTOP. Skip seeing that "return" using
2036 PTRACE_CONT directly, and let stop_wait_callback collect the
2037 SIGSTOP. Later when the thread is resumed, a new syscall
2038 entry event. If we didn't do this (and returned 0), we'd
2039 leave a syscall entry pending, and our caller, by using
2040 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2041 itself. Later, when the user re-resumes this LWP, we'd see
2042 another syscall entry event and we'd mistake it for a return.
2043
2044 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2045 (leaving immediately with LWP->signalled set, without issuing
2046 a PTRACE_CONT), it would still be problematic to leave this
2047 syscall enter pending, as later when the thread is resumed,
2048 it would then see the same syscall exit mentioned above,
2049 followed by the delayed SIGSTOP, while the syscall didn't
2050 actually get to execute. It seems it would be even more
2051 confusing to the user. */
2052
2053 if (debug_linux_nat)
2054 fprintf_unfiltered (gdb_stdlog,
2055 "LHST: ignoring syscall %d "
2056 "for LWP %ld (stopping threads), "
2057 "resuming with PTRACE_CONT for SIGSTOP\n",
2058 syscall_number,
2059 GET_LWP (lp->ptid));
2060
2061 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2062 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2063 return 1;
2064 }
2065
2066 if (catch_syscall_enabled ())
2067 {
2068 /* Always update the entry/return state, even if this particular
2069 syscall isn't interesting to the core now. In async mode,
2070 the user could install a new catchpoint for this syscall
2071 between syscall enter/return, and we'll need to know to
2072 report a syscall return if that happens. */
2073 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2074 ? TARGET_WAITKIND_SYSCALL_RETURN
2075 : TARGET_WAITKIND_SYSCALL_ENTRY);
2076
2077 if (catching_syscall_number (syscall_number))
2078 {
2079 /* Alright, an event to report. */
2080 ourstatus->kind = lp->syscall_state;
2081 ourstatus->value.syscall_number = syscall_number;
2082
2083 if (debug_linux_nat)
2084 fprintf_unfiltered (gdb_stdlog,
2085 "LHST: stopping for %s of syscall %d"
2086 " for LWP %ld\n",
3e43a32a
MS
2087 lp->syscall_state
2088 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
2089 ? "entry" : "return",
2090 syscall_number,
2091 GET_LWP (lp->ptid));
2092 return 0;
2093 }
2094
2095 if (debug_linux_nat)
2096 fprintf_unfiltered (gdb_stdlog,
2097 "LHST: ignoring %s of syscall %d "
2098 "for LWP %ld\n",
2099 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2100 ? "entry" : "return",
2101 syscall_number,
2102 GET_LWP (lp->ptid));
2103 }
2104 else
2105 {
2106 /* If we had been syscall tracing, and hence used PT_SYSCALL
2107 before on this LWP, it could happen that the user removes all
2108 syscall catchpoints before we get to process this event.
2109 There are two noteworthy issues here:
2110
2111 - When stopped at a syscall entry event, resuming with
2112 PT_STEP still resumes executing the syscall and reports a
2113 syscall return.
2114
2115 - Only PT_SYSCALL catches syscall enters. If we last
2116 single-stepped this thread, then this event can't be a
2117 syscall enter. If we last single-stepped this thread, this
2118 has to be a syscall exit.
2119
2120 The points above mean that the next resume, be it PT_STEP or
2121 PT_CONTINUE, can not trigger a syscall trace event. */
2122 if (debug_linux_nat)
2123 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2124 "LHST: caught syscall event "
2125 "with no syscall catchpoints."
ca2163eb
PA
2126 " %d for LWP %ld, ignoring\n",
2127 syscall_number,
2128 GET_LWP (lp->ptid));
2129 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2130 }
2131
2132 /* The core isn't interested in this event. For efficiency, avoid
2133 stopping all threads only to have the core resume them all again.
2134 Since we're not stopping threads, if we're still syscall tracing
2135 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2136 subsequent syscall. Simply resume using the inf-ptrace layer,
2137 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2138
2139 /* Note that gdbarch_get_syscall_number may access registers, hence
2140 fill a regcache. */
2141 registers_changed ();
2142 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2143 lp->step, TARGET_SIGNAL_0);
2144 return 1;
2145}
2146
3d799a95
DJ
2147/* Handle a GNU/Linux extended wait response. If we see a clone
2148 event, we need to add the new LWP to our list (and not report the
2149 trap to higher layers). This function returns non-zero if the
2150 event should be ignored and we should wait again. If STOPPING is
2151 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2152
2153static int
3d799a95
DJ
2154linux_handle_extended_wait (struct lwp_info *lp, int status,
2155 int stopping)
d6b0e80f 2156{
3d799a95
DJ
2157 int pid = GET_LWP (lp->ptid);
2158 struct target_waitstatus *ourstatus = &lp->waitstatus;
3d799a95 2159 int event = status >> 16;
d6b0e80f 2160
3d799a95
DJ
2161 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2162 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2163 {
3d799a95
DJ
2164 unsigned long new_pid;
2165 int ret;
2166
2167 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2168
3d799a95
DJ
2169 /* If we haven't already seen the new PID stop, wait for it now. */
2170 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2171 {
2172 /* The new child has a pending SIGSTOP. We can't affect it until it
2173 hits the SIGSTOP, but we're already attached. */
2174 ret = my_waitpid (new_pid, &status,
2175 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2176 if (ret == -1)
2177 perror_with_name (_("waiting for new child"));
2178 else if (ret != new_pid)
2179 internal_error (__FILE__, __LINE__,
2180 _("wait returned unexpected PID %d"), ret);
2181 else if (!WIFSTOPPED (status))
2182 internal_error (__FILE__, __LINE__,
2183 _("wait returned unexpected status 0x%x"), status);
2184 }
2185
3a3e9ee3 2186 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2187
2277426b
PA
2188 if (event == PTRACE_EVENT_FORK
2189 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2190 {
2277426b
PA
2191 /* Handle checkpointing by linux-fork.c here as a special
2192 case. We don't want the follow-fork-mode or 'catch fork'
2193 to interfere with this. */
2194
2195 /* This won't actually modify the breakpoint list, but will
2196 physically remove the breakpoints from the child. */
2197 detach_breakpoints (new_pid);
2198
2199 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2200 if (!find_fork_pid (new_pid))
2201 add_fork (new_pid);
2277426b
PA
2202
2203 /* Report as spurious, so that infrun doesn't want to follow
2204 this fork. We're actually doing an infcall in
2205 linux-fork.c. */
2206 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2207 linux_enable_event_reporting (pid_to_ptid (new_pid));
2208
2209 /* Report the stop to the core. */
2210 return 0;
2211 }
2212
3d799a95
DJ
2213 if (event == PTRACE_EVENT_FORK)
2214 ourstatus->kind = TARGET_WAITKIND_FORKED;
2215 else if (event == PTRACE_EVENT_VFORK)
2216 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2217 else
3d799a95 2218 {
78768c4a
JK
2219 struct lwp_info *new_lp;
2220
3d799a95 2221 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2222
d90e17a7 2223 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2224 new_lp->cloned = 1;
4c28f408 2225 new_lp->stopped = 1;
d6b0e80f 2226
3d799a95
DJ
2227 if (WSTOPSIG (status) != SIGSTOP)
2228 {
2229 /* This can happen if someone starts sending signals to
2230 the new thread before it gets a chance to run, which
2231 have a lower number than SIGSTOP (e.g. SIGUSR1).
2232 This is an unlikely case, and harder to handle for
2233 fork / vfork than for clone, so we do not try - but
2234 we handle it for clone events here. We'll send
2235 the other signal on to the thread below. */
2236
2237 new_lp->signalled = 1;
2238 }
2239 else
2240 status = 0;
d6b0e80f 2241
4c28f408 2242 if (non_stop)
3d799a95 2243 {
4c28f408
PA
2244 /* Add the new thread to GDB's lists as soon as possible
2245 so that:
2246
2247 1) the frontend doesn't have to wait for a stop to
2248 display them, and,
2249
2250 2) we tag it with the correct running state. */
2251
2252 /* If the thread_db layer is active, let it know about
2253 this new thread, and add it to GDB's list. */
2254 if (!thread_db_attach_lwp (new_lp->ptid))
2255 {
2256 /* We're not using thread_db. Add it to GDB's
2257 list. */
2258 target_post_attach (GET_LWP (new_lp->ptid));
2259 add_thread (new_lp->ptid);
2260 }
2261
2262 if (!stopping)
2263 {
2264 set_running (new_lp->ptid, 1);
2265 set_executing (new_lp->ptid, 1);
2266 }
2267 }
2268
ca2163eb
PA
2269 /* Note the need to use the low target ops to resume, to
2270 handle resuming with PT_SYSCALL if we have syscall
2271 catchpoints. */
4c28f408
PA
2272 if (!stopping)
2273 {
423ec54c 2274 enum target_signal signo;
ca2163eb 2275
4c28f408 2276 new_lp->stopped = 0;
3d799a95 2277 new_lp->resumed = 1;
25289eb2 2278 new_lp->last_resume_kind = resume_continue;
ca2163eb
PA
2279
2280 signo = (status
2281 ? target_signal_from_host (WSTOPSIG (status))
2282 : TARGET_SIGNAL_0);
2283
2284 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2285 0, signo);
3d799a95 2286 }
ad34eb2f
JK
2287 else
2288 {
2289 if (status != 0)
2290 {
2291 /* We created NEW_LP so it cannot yet contain STATUS. */
2292 gdb_assert (new_lp->status == 0);
2293
2294 /* Save the wait status to report later. */
2295 if (debug_linux_nat)
2296 fprintf_unfiltered (gdb_stdlog,
2297 "LHEW: waitpid of new LWP %ld, "
2298 "saving status %s\n",
2299 (long) GET_LWP (new_lp->ptid),
2300 status_to_str (status));
2301 new_lp->status = status;
2302 }
2303 }
d6b0e80f 2304
3d799a95
DJ
2305 if (debug_linux_nat)
2306 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2307 "LHEW: Got clone event "
2308 "from LWP %ld, resuming\n",
3d799a95 2309 GET_LWP (lp->ptid));
ca2163eb
PA
2310 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2311 0, TARGET_SIGNAL_0);
3d799a95
DJ
2312
2313 return 1;
2314 }
2315
2316 return 0;
d6b0e80f
AC
2317 }
2318
3d799a95
DJ
2319 if (event == PTRACE_EVENT_EXEC)
2320 {
a75724bc
PA
2321 if (debug_linux_nat)
2322 fprintf_unfiltered (gdb_stdlog,
2323 "LHEW: Got exec event from LWP %ld\n",
2324 GET_LWP (lp->ptid));
2325
3d799a95
DJ
2326 ourstatus->kind = TARGET_WAITKIND_EXECD;
2327 ourstatus->value.execd_pathname
6d8fd2b7 2328 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2329
6c95b8df
PA
2330 return 0;
2331 }
2332
2333 if (event == PTRACE_EVENT_VFORK_DONE)
2334 {
2335 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2336 {
6c95b8df 2337 if (debug_linux_nat)
3e43a32a
MS
2338 fprintf_unfiltered (gdb_stdlog,
2339 "LHEW: Got expected PTRACE_EVENT_"
2340 "VFORK_DONE from LWP %ld: stopping\n",
6c95b8df 2341 GET_LWP (lp->ptid));
3d799a95 2342
6c95b8df
PA
2343 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2344 return 0;
3d799a95
DJ
2345 }
2346
6c95b8df 2347 if (debug_linux_nat)
3e43a32a
MS
2348 fprintf_unfiltered (gdb_stdlog,
2349 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2350 "from LWP %ld: resuming\n",
6c95b8df
PA
2351 GET_LWP (lp->ptid));
2352 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2353 return 1;
3d799a95
DJ
2354 }
2355
2356 internal_error (__FILE__, __LINE__,
2357 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2358}
2359
432b4d03
JK
2360/* Return non-zero if LWP is a zombie. */
2361
2362static int
2363linux_lwp_is_zombie (long lwp)
2364{
2365 char buffer[MAXPATHLEN];
2366 FILE *procfile;
2367 int retval = 0;
2368
07e78767 2369 xsnprintf (buffer, sizeof (buffer), "/proc/%ld/status", lwp);
432b4d03
JK
2370 procfile = fopen (buffer, "r");
2371 if (procfile == NULL)
2372 {
2373 warning (_("unable to open /proc file '%s'"), buffer);
2374 return 0;
2375 }
2376 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2377 if (strcmp (buffer, "State:\tZ (zombie)\n") == 0)
2378 {
2379 retval = 1;
2380 break;
2381 }
2382 fclose (procfile);
2383
2384 return retval;
2385}
2386
d6b0e80f
AC
2387/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2388 exited. */
2389
2390static int
2391wait_lwp (struct lwp_info *lp)
2392{
2393 pid_t pid;
432b4d03 2394 int status = 0;
d6b0e80f 2395 int thread_dead = 0;
432b4d03 2396 sigset_t prev_mask;
d6b0e80f
AC
2397
2398 gdb_assert (!lp->stopped);
2399 gdb_assert (lp->status == 0);
2400
432b4d03
JK
2401 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2402 block_child_signals (&prev_mask);
2403
2404 for (;;)
d6b0e80f 2405 {
432b4d03
JK
2406 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2407 was right and we should just call sigsuspend. */
2408
2409 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
d6b0e80f 2410 if (pid == -1 && errno == ECHILD)
432b4d03 2411 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2412 if (pid == -1 && errno == ECHILD)
2413 {
2414 /* The thread has previously exited. We need to delete it
2415 now because, for some vendor 2.4 kernels with NPTL
2416 support backported, there won't be an exit event unless
2417 it is the main thread. 2.6 kernels will report an exit
2418 event for each thread that exits, as expected. */
2419 thread_dead = 1;
2420 if (debug_linux_nat)
2421 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2422 target_pid_to_str (lp->ptid));
2423 }
432b4d03
JK
2424 if (pid != 0)
2425 break;
2426
2427 /* Bugs 10970, 12702.
2428 Thread group leader may have exited in which case we'll lock up in
2429 waitpid if there are other threads, even if they are all zombies too.
2430 Basically, we're not supposed to use waitpid this way.
2431 __WCLONE is not applicable for the leader so we can't use that.
2432 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2433 process; it gets ESRCH both for the zombie and for running processes.
2434
2435 As a workaround, check if we're waiting for the thread group leader and
2436 if it's a zombie, and avoid calling waitpid if it is.
2437
2438 This is racy, what if the tgl becomes a zombie right after we check?
2439 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2440 waiting waitpid but the linux_lwp_is_zombie is safe this way. */
2441
2442 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2443 && linux_lwp_is_zombie (GET_LWP (lp->ptid)))
d6b0e80f 2444 {
d6b0e80f
AC
2445 thread_dead = 1;
2446 if (debug_linux_nat)
432b4d03
JK
2447 fprintf_unfiltered (gdb_stdlog,
2448 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2449 target_pid_to_str (lp->ptid));
432b4d03 2450 break;
d6b0e80f 2451 }
432b4d03
JK
2452
2453 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2454 get invoked despite our caller had them intentionally blocked by
2455 block_child_signals. This is sensitive only to the loop of
2456 linux_nat_wait_1 and there if we get called my_waitpid gets called
2457 again before it gets to sigsuspend so we can safely let the handlers
2458 get executed here. */
2459
2460 sigsuspend (&suspend_mask);
2461 }
2462
2463 restore_child_signals_mask (&prev_mask);
2464
d6b0e80f
AC
2465 if (!thread_dead)
2466 {
2467 gdb_assert (pid == GET_LWP (lp->ptid));
2468
2469 if (debug_linux_nat)
2470 {
2471 fprintf_unfiltered (gdb_stdlog,
2472 "WL: waitpid %s received %s\n",
2473 target_pid_to_str (lp->ptid),
2474 status_to_str (status));
2475 }
d6b0e80f 2476
a9f4bb21
PA
2477 /* Check if the thread has exited. */
2478 if (WIFEXITED (status) || WIFSIGNALED (status))
2479 {
2480 thread_dead = 1;
2481 if (debug_linux_nat)
2482 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2483 target_pid_to_str (lp->ptid));
2484 }
d6b0e80f
AC
2485 }
2486
2487 if (thread_dead)
2488 {
e26af52f 2489 exit_lwp (lp);
d6b0e80f
AC
2490 return 0;
2491 }
2492
2493 gdb_assert (WIFSTOPPED (status));
2494
ca2163eb
PA
2495 /* Handle GNU/Linux's syscall SIGTRAPs. */
2496 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2497 {
2498 /* No longer need the sysgood bit. The ptrace event ends up
2499 recorded in lp->waitstatus if we care for it. We can carry
2500 on handling the event like a regular SIGTRAP from here
2501 on. */
2502 status = W_STOPCODE (SIGTRAP);
2503 if (linux_handle_syscall_trap (lp, 1))
2504 return wait_lwp (lp);
2505 }
2506
d6b0e80f
AC
2507 /* Handle GNU/Linux's extended waitstatus for trace events. */
2508 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2509 {
2510 if (debug_linux_nat)
2511 fprintf_unfiltered (gdb_stdlog,
2512 "WL: Handling extended status 0x%06x\n",
2513 status);
3d799a95 2514 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2515 return wait_lwp (lp);
2516 }
2517
2518 return status;
2519}
2520
9f0bdab8
DJ
2521/* Save the most recent siginfo for LP. This is currently only called
2522 for SIGTRAP; some ports use the si_addr field for
2523 target_stopped_data_address. In the future, it may also be used to
2524 restore the siginfo of requeued signals. */
2525
2526static void
2527save_siginfo (struct lwp_info *lp)
2528{
2529 errno = 0;
2530 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2531 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2532
2533 if (errno != 0)
2534 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2535}
2536
d6b0e80f
AC
2537/* Send a SIGSTOP to LP. */
2538
2539static int
2540stop_callback (struct lwp_info *lp, void *data)
2541{
2542 if (!lp->stopped && !lp->signalled)
2543 {
2544 int ret;
2545
2546 if (debug_linux_nat)
2547 {
2548 fprintf_unfiltered (gdb_stdlog,
2549 "SC: kill %s **<SIGSTOP>**\n",
2550 target_pid_to_str (lp->ptid));
2551 }
2552 errno = 0;
2553 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2554 if (debug_linux_nat)
2555 {
2556 fprintf_unfiltered (gdb_stdlog,
2557 "SC: lwp kill %d %s\n",
2558 ret,
2559 errno ? safe_strerror (errno) : "ERRNO-OK");
2560 }
2561
2562 lp->signalled = 1;
2563 gdb_assert (lp->status == 0);
2564 }
2565
2566 return 0;
2567}
2568
57380f4e 2569/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2570
2571static int
57380f4e
DJ
2572linux_nat_has_pending_sigint (int pid)
2573{
2574 sigset_t pending, blocked, ignored;
57380f4e
DJ
2575
2576 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2577
2578 if (sigismember (&pending, SIGINT)
2579 && !sigismember (&ignored, SIGINT))
2580 return 1;
2581
2582 return 0;
2583}
2584
2585/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2586
2587static int
2588set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2589{
57380f4e
DJ
2590 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2591 flag to consume the next one. */
2592 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2593 && WSTOPSIG (lp->status) == SIGINT)
2594 lp->status = 0;
2595 else
2596 lp->ignore_sigint = 1;
2597
2598 return 0;
2599}
2600
2601/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2602 This function is called after we know the LWP has stopped; if the LWP
2603 stopped before the expected SIGINT was delivered, then it will never have
2604 arrived. Also, if the signal was delivered to a shared queue and consumed
2605 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2606
57380f4e
DJ
2607static void
2608maybe_clear_ignore_sigint (struct lwp_info *lp)
2609{
2610 if (!lp->ignore_sigint)
2611 return;
2612
2613 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2614 {
2615 if (debug_linux_nat)
2616 fprintf_unfiltered (gdb_stdlog,
2617 "MCIS: Clearing bogus flag for %s\n",
2618 target_pid_to_str (lp->ptid));
2619 lp->ignore_sigint = 0;
2620 }
2621}
2622
ebec9a0f
PA
2623/* Fetch the possible triggered data watchpoint info and store it in
2624 LP.
2625
2626 On some archs, like x86, that use debug registers to set
2627 watchpoints, it's possible that the way to know which watched
2628 address trapped, is to check the register that is used to select
2629 which address to watch. Problem is, between setting the watchpoint
2630 and reading back which data address trapped, the user may change
2631 the set of watchpoints, and, as a consequence, GDB changes the
2632 debug registers in the inferior. To avoid reading back a stale
2633 stopped-data-address when that happens, we cache in LP the fact
2634 that a watchpoint trapped, and the corresponding data address, as
2635 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2636 registers meanwhile, we have the cached data we can rely on. */
2637
2638static void
2639save_sigtrap (struct lwp_info *lp)
2640{
2641 struct cleanup *old_chain;
2642
2643 if (linux_ops->to_stopped_by_watchpoint == NULL)
2644 {
2645 lp->stopped_by_watchpoint = 0;
2646 return;
2647 }
2648
2649 old_chain = save_inferior_ptid ();
2650 inferior_ptid = lp->ptid;
2651
2652 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2653
2654 if (lp->stopped_by_watchpoint)
2655 {
2656 if (linux_ops->to_stopped_data_address != NULL)
2657 lp->stopped_data_address_p =
2658 linux_ops->to_stopped_data_address (&current_target,
2659 &lp->stopped_data_address);
2660 else
2661 lp->stopped_data_address_p = 0;
2662 }
2663
2664 do_cleanups (old_chain);
2665}
2666
2667/* See save_sigtrap. */
2668
2669static int
2670linux_nat_stopped_by_watchpoint (void)
2671{
2672 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2673
2674 gdb_assert (lp != NULL);
2675
2676 return lp->stopped_by_watchpoint;
2677}
2678
2679static int
2680linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2681{
2682 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2683
2684 gdb_assert (lp != NULL);
2685
2686 *addr_p = lp->stopped_data_address;
2687
2688 return lp->stopped_data_address_p;
2689}
2690
26ab7092
JK
2691/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2692
2693static int
2694sigtrap_is_event (int status)
2695{
2696 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2697}
2698
2699/* SIGTRAP-like events recognizer. */
2700
2701static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2702
00390b84
JK
2703/* Check for SIGTRAP-like events in LP. */
2704
2705static int
2706linux_nat_lp_status_is_event (struct lwp_info *lp)
2707{
2708 /* We check for lp->waitstatus in addition to lp->status, because we can
2709 have pending process exits recorded in lp->status
2710 and W_EXITCODE(0,0) == 0. We should probably have an additional
2711 lp->status_p flag. */
2712
2713 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2714 && linux_nat_status_is_event (lp->status));
2715}
2716
26ab7092
JK
2717/* Set alternative SIGTRAP-like events recognizer. If
2718 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2719 applied. */
2720
2721void
2722linux_nat_set_status_is_event (struct target_ops *t,
2723 int (*status_is_event) (int status))
2724{
2725 linux_nat_status_is_event = status_is_event;
2726}
2727
57380f4e
DJ
2728/* Wait until LP is stopped. */
2729
2730static int
2731stop_wait_callback (struct lwp_info *lp, void *data)
2732{
6c95b8df
PA
2733 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2734
2735 /* If this is a vfork parent, bail out, it is not going to report
2736 any SIGSTOP until the vfork is done with. */
2737 if (inf->vfork_child != NULL)
2738 return 0;
2739
d6b0e80f
AC
2740 if (!lp->stopped)
2741 {
2742 int status;
2743
2744 status = wait_lwp (lp);
2745 if (status == 0)
2746 return 0;
2747
57380f4e
DJ
2748 if (lp->ignore_sigint && WIFSTOPPED (status)
2749 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2750 {
57380f4e 2751 lp->ignore_sigint = 0;
d6b0e80f
AC
2752
2753 errno = 0;
2754 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2755 if (debug_linux_nat)
2756 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2757 "PTRACE_CONT %s, 0, 0 (%s) "
2758 "(discarding SIGINT)\n",
d6b0e80f
AC
2759 target_pid_to_str (lp->ptid),
2760 errno ? safe_strerror (errno) : "OK");
2761
57380f4e 2762 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2763 }
2764
57380f4e
DJ
2765 maybe_clear_ignore_sigint (lp);
2766
d6b0e80f
AC
2767 if (WSTOPSIG (status) != SIGSTOP)
2768 {
26ab7092 2769 if (linux_nat_status_is_event (status))
d6b0e80f
AC
2770 {
2771 /* If a LWP other than the LWP that we're reporting an
2772 event for has hit a GDB breakpoint (as opposed to
2773 some random trap signal), then just arrange for it to
2774 hit it again later. We don't keep the SIGTRAP status
2775 and don't forward the SIGTRAP signal to the LWP. We
2776 will handle the current event, eventually we will
2777 resume all LWPs, and this one will get its breakpoint
2778 trap again.
2779
2780 If we do not do this, then we run the risk that the
2781 user will delete or disable the breakpoint, but the
2782 thread will have already tripped on it. */
2783
9f0bdab8
DJ
2784 /* Save the trap's siginfo in case we need it later. */
2785 save_siginfo (lp);
2786
ebec9a0f
PA
2787 save_sigtrap (lp);
2788
1777feb0 2789 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2790 errno = 0;
2791 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2792 if (debug_linux_nat)
2793 {
2794 fprintf_unfiltered (gdb_stdlog,
2795 "PTRACE_CONT %s, 0, 0 (%s)\n",
2796 target_pid_to_str (lp->ptid),
2797 errno ? safe_strerror (errno) : "OK");
2798
2799 fprintf_unfiltered (gdb_stdlog,
2800 "SWC: Candidate SIGTRAP event in %s\n",
2801 target_pid_to_str (lp->ptid));
2802 }
710151dd 2803 /* Hold this event/waitstatus while we check to see if
1777feb0 2804 there are any more (we still want to get that SIGSTOP). */
57380f4e 2805 stop_wait_callback (lp, NULL);
710151dd 2806
7feb7d06
PA
2807 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2808 there's another event, throw it back into the
1777feb0 2809 queue. */
7feb7d06 2810 if (lp->status)
710151dd 2811 {
7feb7d06
PA
2812 if (debug_linux_nat)
2813 fprintf_unfiltered (gdb_stdlog,
2814 "SWC: kill %s, %s\n",
2815 target_pid_to_str (lp->ptid),
2816 status_to_str ((int) status));
2817 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2818 }
7feb7d06 2819
1777feb0 2820 /* Save the sigtrap event. */
7feb7d06 2821 lp->status = status;
d6b0e80f
AC
2822 return 0;
2823 }
2824 else
2825 {
2826 /* The thread was stopped with a signal other than
1777feb0 2827 SIGSTOP, and didn't accidentally trip a breakpoint. */
d6b0e80f
AC
2828
2829 if (debug_linux_nat)
2830 {
2831 fprintf_unfiltered (gdb_stdlog,
2832 "SWC: Pending event %s in %s\n",
2833 status_to_str ((int) status),
2834 target_pid_to_str (lp->ptid));
2835 }
1777feb0 2836 /* Now resume this LWP and get the SIGSTOP event. */
d6b0e80f
AC
2837 errno = 0;
2838 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2839 if (debug_linux_nat)
2840 fprintf_unfiltered (gdb_stdlog,
2841 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2842 target_pid_to_str (lp->ptid),
2843 errno ? safe_strerror (errno) : "OK");
2844
2845 /* Hold this event/waitstatus while we check to see if
1777feb0 2846 there are any more (we still want to get that SIGSTOP). */
57380f4e 2847 stop_wait_callback (lp, NULL);
710151dd
PA
2848
2849 /* If the lp->status field is still empty, use it to
2850 hold this event. If not, then this event must be
2851 returned to the event queue of the LWP. */
7feb7d06 2852 if (lp->status)
d6b0e80f
AC
2853 {
2854 if (debug_linux_nat)
2855 {
2856 fprintf_unfiltered (gdb_stdlog,
2857 "SWC: kill %s, %s\n",
2858 target_pid_to_str (lp->ptid),
2859 status_to_str ((int) status));
2860 }
2861 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2862 }
710151dd
PA
2863 else
2864 lp->status = status;
d6b0e80f
AC
2865 return 0;
2866 }
2867 }
2868 else
2869 {
2870 /* We caught the SIGSTOP that we intended to catch, so
2871 there's no SIGSTOP pending. */
2872 lp->stopped = 1;
2873 lp->signalled = 0;
2874 }
2875 }
2876
2877 return 0;
2878}
2879
d6b0e80f
AC
2880/* Return non-zero if LP has a wait status pending. */
2881
2882static int
2883status_callback (struct lwp_info *lp, void *data)
2884{
2885 /* Only report a pending wait status if we pretend that this has
2886 indeed been resumed. */
ca2163eb
PA
2887 if (!lp->resumed)
2888 return 0;
2889
2890 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2891 {
2892 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
766062f6 2893 or a pending process exit. Note that `W_EXITCODE(0,0) ==
ca2163eb
PA
2894 0', so a clean process exit can not be stored pending in
2895 lp->status, it is indistinguishable from
2896 no-pending-status. */
2897 return 1;
2898 }
2899
2900 if (lp->status != 0)
2901 return 1;
2902
2903 return 0;
d6b0e80f
AC
2904}
2905
2906/* Return non-zero if LP isn't stopped. */
2907
2908static int
2909running_callback (struct lwp_info *lp, void *data)
2910{
25289eb2
PA
2911 return (!lp->stopped
2912 || ((lp->status != 0
2913 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2914 && lp->resumed));
d6b0e80f
AC
2915}
2916
2917/* Count the LWP's that have had events. */
2918
2919static int
2920count_events_callback (struct lwp_info *lp, void *data)
2921{
2922 int *count = data;
2923
2924 gdb_assert (count != NULL);
2925
e09490f1 2926 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2927 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2928 (*count)++;
2929
2930 return 0;
2931}
2932
2933/* Select the LWP (if any) that is currently being single-stepped. */
2934
2935static int
2936select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2937{
25289eb2
PA
2938 if (lp->last_resume_kind == resume_step
2939 && lp->status != 0)
d6b0e80f
AC
2940 return 1;
2941 else
2942 return 0;
2943}
2944
2945/* Select the Nth LWP that has had a SIGTRAP event. */
2946
2947static int
2948select_event_lwp_callback (struct lwp_info *lp, void *data)
2949{
2950 int *selector = data;
2951
2952 gdb_assert (selector != NULL);
2953
1777feb0 2954 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2955 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2956 if ((*selector)-- == 0)
2957 return 1;
2958
2959 return 0;
2960}
2961
710151dd
PA
2962static int
2963cancel_breakpoint (struct lwp_info *lp)
2964{
2965 /* Arrange for a breakpoint to be hit again later. We don't keep
2966 the SIGTRAP status and don't forward the SIGTRAP signal to the
2967 LWP. We will handle the current event, eventually we will resume
2968 this LWP, and this breakpoint will trap again.
2969
2970 If we do not do this, then we run the risk that the user will
2971 delete or disable the breakpoint, but the LWP will have already
2972 tripped on it. */
2973
515630c5
UW
2974 struct regcache *regcache = get_thread_regcache (lp->ptid);
2975 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2976 CORE_ADDR pc;
2977
2978 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 2979 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
2980 {
2981 if (debug_linux_nat)
2982 fprintf_unfiltered (gdb_stdlog,
2983 "CB: Push back breakpoint for %s\n",
2984 target_pid_to_str (lp->ptid));
2985
2986 /* Back up the PC if necessary. */
515630c5
UW
2987 if (gdbarch_decr_pc_after_break (gdbarch))
2988 regcache_write_pc (regcache, pc);
2989
710151dd
PA
2990 return 1;
2991 }
2992 return 0;
2993}
2994
d6b0e80f
AC
2995static int
2996cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2997{
2998 struct lwp_info *event_lp = data;
2999
3000 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3001 if (lp == event_lp)
3002 return 0;
3003
3004 /* If a LWP other than the LWP that we're reporting an event for has
3005 hit a GDB breakpoint (as opposed to some random trap signal),
3006 then just arrange for it to hit it again later. We don't keep
3007 the SIGTRAP status and don't forward the SIGTRAP signal to the
3008 LWP. We will handle the current event, eventually we will resume
3009 all LWPs, and this one will get its breakpoint trap again.
3010
3011 If we do not do this, then we run the risk that the user will
3012 delete or disable the breakpoint, but the LWP will have already
3013 tripped on it. */
3014
00390b84 3015 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
3016 && cancel_breakpoint (lp))
3017 /* Throw away the SIGTRAP. */
3018 lp->status = 0;
d6b0e80f
AC
3019
3020 return 0;
3021}
3022
3023/* Select one LWP out of those that have events pending. */
3024
3025static void
d90e17a7 3026select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
3027{
3028 int num_events = 0;
3029 int random_selector;
3030 struct lwp_info *event_lp;
3031
ac264b3b 3032 /* Record the wait status for the original LWP. */
d6b0e80f
AC
3033 (*orig_lp)->status = *status;
3034
3035 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
3036 event_lp = iterate_over_lwps (filter,
3037 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
3038 if (event_lp != NULL)
3039 {
3040 if (debug_linux_nat)
3041 fprintf_unfiltered (gdb_stdlog,
3042 "SEL: Select single-step %s\n",
3043 target_pid_to_str (event_lp->ptid));
3044 }
3045 else
3046 {
3047 /* No single-stepping LWP. Select one at random, out of those
3048 which have had SIGTRAP events. */
3049
3050 /* First see how many SIGTRAP events we have. */
d90e17a7 3051 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
3052
3053 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3054 random_selector = (int)
3055 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3056
3057 if (debug_linux_nat && num_events > 1)
3058 fprintf_unfiltered (gdb_stdlog,
3059 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3060 num_events, random_selector);
3061
d90e17a7
PA
3062 event_lp = iterate_over_lwps (filter,
3063 select_event_lwp_callback,
d6b0e80f
AC
3064 &random_selector);
3065 }
3066
3067 if (event_lp != NULL)
3068 {
3069 /* Switch the event LWP. */
3070 *orig_lp = event_lp;
3071 *status = event_lp->status;
3072 }
3073
3074 /* Flush the wait status for the event LWP. */
3075 (*orig_lp)->status = 0;
3076}
3077
3078/* Return non-zero if LP has been resumed. */
3079
3080static int
3081resumed_callback (struct lwp_info *lp, void *data)
3082{
3083 return lp->resumed;
3084}
3085
d6b0e80f
AC
3086/* Stop an active thread, verify it still exists, then resume it. */
3087
3088static int
3089stop_and_resume_callback (struct lwp_info *lp, void *data)
3090{
25289eb2 3091 if (!lp->stopped)
d6b0e80f 3092 {
25289eb2
PA
3093 enum resume_kind last_resume_kind = lp->last_resume_kind;
3094 ptid_t ptid = lp->ptid;
3095
d6b0e80f
AC
3096 stop_callback (lp, NULL);
3097 stop_wait_callback (lp, NULL);
25289eb2
PA
3098
3099 /* Resume if the lwp still exists, and the core wanted it
3100 running. */
3101 if (last_resume_kind != resume_stop)
3102 {
3103 lp = find_lwp_pid (ptid);
3104 if (lp)
3105 resume_lwp (lp, lp->step);
3106 }
d6b0e80f
AC
3107 }
3108 return 0;
3109}
3110
02f3fc28 3111/* Check if we should go on and pass this event to common code.
fa2c6a57 3112 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
3113static struct lwp_info *
3114linux_nat_filter_event (int lwpid, int status, int options)
3115{
3116 struct lwp_info *lp;
3117
3118 lp = find_lwp_pid (pid_to_ptid (lwpid));
3119
3120 /* Check for stop events reported by a process we didn't already
3121 know about - anything not already in our LWP list.
3122
3123 If we're expecting to receive stopped processes after
3124 fork, vfork, and clone events, then we'll just add the
3125 new one to our list and go back to waiting for the event
3126 to be reported - the stopped process might be returned
3127 from waitpid before or after the event is. */
3128 if (WIFSTOPPED (status) && !lp)
3129 {
84636d28 3130 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
3131 return NULL;
3132 }
3133
3134 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3135 our list, i.e. not part of the current process. This can happen
fd62cb89 3136 if we detach from a program we originally forked and then it
02f3fc28
PA
3137 exits. */
3138 if (!WIFSTOPPED (status) && !lp)
3139 return NULL;
3140
3141 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
3142 CLONE_PTRACE processes which do not use the thread library -
3143 otherwise we wouldn't find the new LWP this way. That doesn't
3144 currently work, and the following code is currently unreachable
3145 due to the two blocks above. If it's fixed some day, this code
3146 should be broken out into a function so that we can also pick up
3147 LWPs from the new interface. */
3148 if (!lp)
3149 {
3150 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
3151 if (options & __WCLONE)
3152 lp->cloned = 1;
3153
3154 gdb_assert (WIFSTOPPED (status)
3155 && WSTOPSIG (status) == SIGSTOP);
3156 lp->signalled = 1;
3157
3158 if (!in_thread_list (inferior_ptid))
3159 {
3160 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
3161 GET_PID (inferior_ptid));
3162 add_thread (inferior_ptid);
3163 }
3164
3165 add_thread (lp->ptid);
3166 }
3167
ca2163eb
PA
3168 /* Handle GNU/Linux's syscall SIGTRAPs. */
3169 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3170 {
3171 /* No longer need the sysgood bit. The ptrace event ends up
3172 recorded in lp->waitstatus if we care for it. We can carry
3173 on handling the event like a regular SIGTRAP from here
3174 on. */
3175 status = W_STOPCODE (SIGTRAP);
3176 if (linux_handle_syscall_trap (lp, 0))
3177 return NULL;
3178 }
02f3fc28 3179
ca2163eb
PA
3180 /* Handle GNU/Linux's extended waitstatus for trace events. */
3181 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3182 {
3183 if (debug_linux_nat)
3184 fprintf_unfiltered (gdb_stdlog,
3185 "LLW: Handling extended status 0x%06x\n",
3186 status);
3187 if (linux_handle_extended_wait (lp, status, 0))
3188 return NULL;
3189 }
3190
26ab7092 3191 if (linux_nat_status_is_event (status))
ebec9a0f
PA
3192 {
3193 /* Save the trap's siginfo in case we need it later. */
3194 save_siginfo (lp);
3195
3196 save_sigtrap (lp);
3197 }
ca2163eb 3198
02f3fc28 3199 /* Check if the thread has exited. */
d90e17a7
PA
3200 if ((WIFEXITED (status) || WIFSIGNALED (status))
3201 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3202 {
9db03742
JB
3203 /* If this is the main thread, we must stop all threads and verify
3204 if they are still alive. This is because in the nptl thread model
3205 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3206 other than the main thread. We only get the main thread exit
3207 signal once all child threads have already exited. If we
3208 stop all the threads and use the stop_wait_callback to check
3209 if they have exited we can determine whether this signal
3210 should be ignored or whether it means the end of the debugged
3211 application, regardless of which threading model is being
5d3b6af6 3212 used. */
02f3fc28
PA
3213 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3214 {
3215 lp->stopped = 1;
d90e17a7
PA
3216 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3217 stop_and_resume_callback, NULL);
02f3fc28
PA
3218 }
3219
3220 if (debug_linux_nat)
3221 fprintf_unfiltered (gdb_stdlog,
3222 "LLW: %s exited.\n",
3223 target_pid_to_str (lp->ptid));
3224
d90e17a7 3225 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3226 {
3227 /* If there is at least one more LWP, then the exit signal
3228 was not the end of the debugged application and should be
3229 ignored. */
3230 exit_lwp (lp);
3231 return NULL;
3232 }
02f3fc28
PA
3233 }
3234
3235 /* Check if the current LWP has previously exited. In the nptl
3236 thread model, LWPs other than the main thread do not issue
3237 signals when they exit so we must check whenever the thread has
3238 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3239 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3240 {
d90e17a7
PA
3241 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3242
02f3fc28
PA
3243 if (debug_linux_nat)
3244 fprintf_unfiltered (gdb_stdlog,
3245 "LLW: %s exited.\n",
3246 target_pid_to_str (lp->ptid));
3247
3248 exit_lwp (lp);
3249
3250 /* Make sure there is at least one thread running. */
d90e17a7 3251 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3252
3253 /* Discard the event. */
3254 return NULL;
3255 }
3256
3257 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3258 an attempt to stop an LWP. */
3259 if (lp->signalled
3260 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3261 {
3262 if (debug_linux_nat)
3263 fprintf_unfiltered (gdb_stdlog,
3264 "LLW: Delayed SIGSTOP caught for %s.\n",
3265 target_pid_to_str (lp->ptid));
3266
02f3fc28
PA
3267 lp->signalled = 0;
3268
25289eb2
PA
3269 if (lp->last_resume_kind != resume_stop)
3270 {
3271 /* This is a delayed SIGSTOP. */
02f3fc28 3272
25289eb2
PA
3273 registers_changed ();
3274
3275 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
02f3fc28 3276 lp->step, TARGET_SIGNAL_0);
25289eb2
PA
3277 if (debug_linux_nat)
3278 fprintf_unfiltered (gdb_stdlog,
3279 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3280 lp->step ?
3281 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3282 target_pid_to_str (lp->ptid));
02f3fc28 3283
25289eb2
PA
3284 lp->stopped = 0;
3285 gdb_assert (lp->resumed);
02f3fc28 3286
25289eb2
PA
3287 /* Discard the event. */
3288 return NULL;
3289 }
02f3fc28
PA
3290 }
3291
57380f4e
DJ
3292 /* Make sure we don't report a SIGINT that we have already displayed
3293 for another thread. */
3294 if (lp->ignore_sigint
3295 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3296 {
3297 if (debug_linux_nat)
3298 fprintf_unfiltered (gdb_stdlog,
3299 "LLW: Delayed SIGINT caught for %s.\n",
3300 target_pid_to_str (lp->ptid));
3301
3302 /* This is a delayed SIGINT. */
3303 lp->ignore_sigint = 0;
3304
3305 registers_changed ();
28439f5e 3306 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
57380f4e
DJ
3307 lp->step, TARGET_SIGNAL_0);
3308 if (debug_linux_nat)
3309 fprintf_unfiltered (gdb_stdlog,
3310 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3311 lp->step ?
3312 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3313 target_pid_to_str (lp->ptid));
3314
3315 lp->stopped = 0;
3316 gdb_assert (lp->resumed);
3317
3318 /* Discard the event. */
3319 return NULL;
3320 }
3321
02f3fc28
PA
3322 /* An interesting event. */
3323 gdb_assert (lp);
ca2163eb 3324 lp->status = status;
02f3fc28
PA
3325 return lp;
3326}
3327
d6b0e80f 3328static ptid_t
7feb7d06 3329linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3330 ptid_t ptid, struct target_waitstatus *ourstatus,
3331 int target_options)
d6b0e80f 3332{
7feb7d06 3333 static sigset_t prev_mask;
4b60df3d 3334 enum resume_kind last_resume_kind;
d6b0e80f
AC
3335 struct lwp_info *lp = NULL;
3336 int options = 0;
3337 int status = 0;
d90e17a7 3338 pid_t pid;
d6b0e80f 3339
01124a23 3340 if (debug_linux_nat)
b84876c2
PA
3341 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3342
f973ed9c
DJ
3343 /* The first time we get here after starting a new inferior, we may
3344 not have added it to the LWP list yet - this is the earliest
3345 moment at which we know its PID. */
d90e17a7 3346 if (ptid_is_pid (inferior_ptid))
f973ed9c 3347 {
27c9d204
PA
3348 /* Upgrade the main thread's ptid. */
3349 thread_change_ptid (inferior_ptid,
3350 BUILD_LWP (GET_PID (inferior_ptid),
3351 GET_PID (inferior_ptid)));
3352
f973ed9c
DJ
3353 lp = add_lwp (inferior_ptid);
3354 lp->resumed = 1;
3355 }
3356
7feb7d06
PA
3357 /* Make sure SIGCHLD is blocked. */
3358 block_child_signals (&prev_mask);
d6b0e80f 3359
d90e17a7
PA
3360 if (ptid_equal (ptid, minus_one_ptid))
3361 pid = -1;
3362 else if (ptid_is_pid (ptid))
3363 /* A request to wait for a specific tgid. This is not possible
3364 with waitpid, so instead, we wait for any child, and leave
3365 children we're not interested in right now with a pending
3366 status to report later. */
3367 pid = -1;
3368 else
3369 pid = GET_LWP (ptid);
3370
d6b0e80f 3371retry:
d90e17a7
PA
3372 lp = NULL;
3373 status = 0;
d6b0e80f 3374
e3e9f5a2
PA
3375 /* Make sure that of those LWPs we want to get an event from, there
3376 is at least one LWP that has been resumed. If there's none, just
3377 bail out. The core may just be flushing asynchronously all
3378 events. */
3379 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3380 {
3381 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3382
01124a23 3383 if (debug_linux_nat)
e3e9f5a2
PA
3384 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3385
3386 restore_child_signals_mask (&prev_mask);
3387 return minus_one_ptid;
3388 }
d6b0e80f
AC
3389
3390 /* First check if there is a LWP with a wait status pending. */
3391 if (pid == -1)
3392 {
3393 /* Any LWP that's been resumed will do. */
d90e17a7 3394 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3395 if (lp)
3396 {
ca2163eb 3397 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3398 fprintf_unfiltered (gdb_stdlog,
3399 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3400 status_to_str (lp->status),
d6b0e80f
AC
3401 target_pid_to_str (lp->ptid));
3402 }
3403
b84876c2 3404 /* But if we don't find one, we'll have to wait, and check both
7feb7d06
PA
3405 cloned and uncloned processes. We start with the cloned
3406 processes. */
d6b0e80f
AC
3407 options = __WCLONE | WNOHANG;
3408 }
3409 else if (is_lwp (ptid))
3410 {
3411 if (debug_linux_nat)
3412 fprintf_unfiltered (gdb_stdlog,
3413 "LLW: Waiting for specific LWP %s.\n",
3414 target_pid_to_str (ptid));
3415
3416 /* We have a specific LWP to check. */
3417 lp = find_lwp_pid (ptid);
3418 gdb_assert (lp);
d6b0e80f 3419
ca2163eb 3420 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3421 fprintf_unfiltered (gdb_stdlog,
3422 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3423 status_to_str (lp->status),
d6b0e80f
AC
3424 target_pid_to_str (lp->ptid));
3425
3426 /* If we have to wait, take into account whether PID is a cloned
3427 process or not. And we have to convert it to something that
3428 the layer beneath us can understand. */
3429 options = lp->cloned ? __WCLONE : 0;
3430 pid = GET_LWP (ptid);
d90e17a7
PA
3431
3432 /* We check for lp->waitstatus in addition to lp->status,
3433 because we can have pending process exits recorded in
3434 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3435 an additional lp->status_p flag. */
ca2163eb 3436 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3437 lp = NULL;
d6b0e80f
AC
3438 }
3439
25289eb2 3440 if (lp && lp->signalled && lp->last_resume_kind != resume_stop)
d6b0e80f
AC
3441 {
3442 /* A pending SIGSTOP may interfere with the normal stream of
3443 events. In a typical case where interference is a problem,
3444 we have a SIGSTOP signal pending for LWP A while
3445 single-stepping it, encounter an event in LWP B, and take the
3446 pending SIGSTOP while trying to stop LWP A. After processing
3447 the event in LWP B, LWP A is continued, and we'll never see
3448 the SIGTRAP associated with the last time we were
3449 single-stepping LWP A. */
3450
3451 /* Resume the thread. It should halt immediately returning the
3452 pending SIGSTOP. */
3453 registers_changed ();
28439f5e 3454 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3455 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
3456 if (debug_linux_nat)
3457 fprintf_unfiltered (gdb_stdlog,
3458 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3459 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3460 target_pid_to_str (lp->ptid));
3461 lp->stopped = 0;
3462 gdb_assert (lp->resumed);
3463
ca2163eb
PA
3464 /* Catch the pending SIGSTOP. */
3465 status = lp->status;
3466 lp->status = 0;
3467
d6b0e80f 3468 stop_wait_callback (lp, NULL);
ca2163eb
PA
3469
3470 /* If the lp->status field isn't empty, we caught another signal
3471 while flushing the SIGSTOP. Return it back to the event
3472 queue of the LWP, as we already have an event to handle. */
3473 if (lp->status)
3474 {
3475 if (debug_linux_nat)
3476 fprintf_unfiltered (gdb_stdlog,
3477 "LLW: kill %s, %s\n",
3478 target_pid_to_str (lp->ptid),
3479 status_to_str (lp->status));
3480 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3481 }
3482
3483 lp->status = status;
d6b0e80f
AC
3484 }
3485
b84876c2
PA
3486 if (!target_can_async_p ())
3487 {
3488 /* Causes SIGINT to be passed on to the attached process. */
3489 set_sigint_trap ();
b84876c2 3490 }
d6b0e80f 3491
47608cb1
PA
3492 /* Translate generic target_wait options into waitpid options. */
3493 if (target_options & TARGET_WNOHANG)
3494 options |= WNOHANG;
7feb7d06 3495
d90e17a7 3496 while (lp == NULL)
d6b0e80f
AC
3497 {
3498 pid_t lwpid;
3499
7feb7d06 3500 lwpid = my_waitpid (pid, &status, options);
b84876c2 3501
d6b0e80f
AC
3502 if (lwpid > 0)
3503 {
3504 gdb_assert (pid == -1 || lwpid == pid);
3505
3506 if (debug_linux_nat)
3507 {
3508 fprintf_unfiltered (gdb_stdlog,
3509 "LLW: waitpid %ld received %s\n",
3510 (long) lwpid, status_to_str (status));
3511 }
3512
02f3fc28 3513 lp = linux_nat_filter_event (lwpid, status, options);
d90e17a7 3514
33355866
JK
3515 /* STATUS is now no longer valid, use LP->STATUS instead. */
3516 status = 0;
3517
d90e17a7
PA
3518 if (lp
3519 && ptid_is_pid (ptid)
3520 && ptid_get_pid (lp->ptid) != ptid_get_pid (ptid))
d6b0e80f 3521 {
e3e9f5a2
PA
3522 gdb_assert (lp->resumed);
3523
d90e17a7 3524 if (debug_linux_nat)
3e43a32a
MS
3525 fprintf (stderr,
3526 "LWP %ld got an event %06x, leaving pending.\n",
33355866 3527 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3528
ca2163eb 3529 if (WIFSTOPPED (lp->status))
d90e17a7 3530 {
ca2163eb 3531 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3532 {
e3e9f5a2
PA
3533 /* Cancel breakpoint hits. The breakpoint may
3534 be removed before we fetch events from this
3535 process to report to the core. It is best
3536 not to assume the moribund breakpoints
3537 heuristic always handles these cases --- it
3538 could be too many events go through to the
3539 core before this one is handled. All-stop
3540 always cancels breakpoint hits in all
3541 threads. */
3542 if (non_stop
00390b84 3543 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3544 && cancel_breakpoint (lp))
3545 {
3546 /* Throw away the SIGTRAP. */
3547 lp->status = 0;
3548
3549 if (debug_linux_nat)
3550 fprintf (stderr,
3e43a32a
MS
3551 "LLW: LWP %ld hit a breakpoint while"
3552 " waiting for another process;"
3553 " cancelled it\n",
e3e9f5a2
PA
3554 ptid_get_lwp (lp->ptid));
3555 }
3556 lp->stopped = 1;
d90e17a7
PA
3557 }
3558 else
3559 {
3560 lp->stopped = 1;
3561 lp->signalled = 0;
3562 }
3563 }
33355866 3564 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3565 {
3566 if (debug_linux_nat)
3e43a32a
MS
3567 fprintf (stderr,
3568 "Process %ld exited while stopping LWPs\n",
d90e17a7
PA
3569 ptid_get_lwp (lp->ptid));
3570
3571 /* This was the last lwp in the process. Since
3572 events are serialized to GDB core, and we can't
3573 report this one right now, but GDB core and the
3574 other target layers will want to be notified
3575 about the exit code/signal, leave the status
3576 pending for the next time we're able to report
3577 it. */
d90e17a7
PA
3578
3579 /* Prevent trying to stop this thread again. We'll
3580 never try to resume it because it has a pending
3581 status. */
3582 lp->stopped = 1;
3583
3584 /* Dead LWP's aren't expected to reported a pending
3585 sigstop. */
3586 lp->signalled = 0;
3587
3588 /* Store the pending event in the waitstatus as
3589 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3590 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3591 }
3592
3593 /* Keep looking. */
3594 lp = NULL;
d6b0e80f
AC
3595 continue;
3596 }
3597
d90e17a7
PA
3598 if (lp)
3599 break;
3600 else
3601 {
3602 if (pid == -1)
3603 {
3604 /* waitpid did return something. Restart over. */
3605 options |= __WCLONE;
3606 }
3607 continue;
3608 }
d6b0e80f
AC
3609 }
3610
3611 if (pid == -1)
3612 {
3613 /* Alternate between checking cloned and uncloned processes. */
3614 options ^= __WCLONE;
3615
b84876c2
PA
3616 /* And every time we have checked both:
3617 In async mode, return to event loop;
3618 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 3619 if (options & __WCLONE)
b84876c2 3620 {
47608cb1 3621 if (target_options & TARGET_WNOHANG)
b84876c2
PA
3622 {
3623 /* No interesting event. */
3624 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3625
01124a23 3626 if (debug_linux_nat)
b84876c2
PA
3627 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3628
7feb7d06 3629 restore_child_signals_mask (&prev_mask);
b84876c2
PA
3630 return minus_one_ptid;
3631 }
3632
3633 sigsuspend (&suspend_mask);
3634 }
d6b0e80f 3635 }
28736962
PA
3636 else if (target_options & TARGET_WNOHANG)
3637 {
3638 /* No interesting event for PID yet. */
3639 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3640
01124a23 3641 if (debug_linux_nat)
28736962
PA
3642 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3643
3644 restore_child_signals_mask (&prev_mask);
3645 return minus_one_ptid;
3646 }
d6b0e80f
AC
3647
3648 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3649 gdb_assert (lp == NULL);
d6b0e80f
AC
3650 }
3651
b84876c2 3652 if (!target_can_async_p ())
d26b5354 3653 clear_sigint_trap ();
d6b0e80f
AC
3654
3655 gdb_assert (lp);
3656
ca2163eb
PA
3657 status = lp->status;
3658 lp->status = 0;
3659
d6b0e80f
AC
3660 /* Don't report signals that GDB isn't interested in, such as
3661 signals that are neither printed nor stopped upon. Stopping all
3662 threads can be a bit time-consuming so if we want decent
3663 performance with heavily multi-threaded programs, especially when
3664 they're using a high frequency timer, we'd better avoid it if we
3665 can. */
3666
3667 if (WIFSTOPPED (status))
3668 {
423ec54c 3669 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
d6b0e80f 3670
2455069d
UW
3671 /* When using hardware single-step, we need to report every signal.
3672 Otherwise, signals in pass_mask may be short-circuited. */
d539ed7e 3673 if (!lp->step
2455069d 3674 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
d6b0e80f
AC
3675 {
3676 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3677 here? It is not clear we should. GDB may not expect
3678 other threads to run. On the other hand, not resuming
3679 newly attached threads may cause an unwanted delay in
3680 getting them running. */
3681 registers_changed ();
28439f5e 3682 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3683 lp->step, signo);
d6b0e80f
AC
3684 if (debug_linux_nat)
3685 fprintf_unfiltered (gdb_stdlog,
3686 "LLW: %s %s, %s (preempt 'handle')\n",
3687 lp->step ?
3688 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3689 target_pid_to_str (lp->ptid),
423ec54c
JK
3690 (signo != TARGET_SIGNAL_0
3691 ? strsignal (target_signal_to_host (signo))
3692 : "0"));
d6b0e80f 3693 lp->stopped = 0;
d6b0e80f
AC
3694 goto retry;
3695 }
3696
1ad15515 3697 if (!non_stop)
d6b0e80f 3698 {
1ad15515
PA
3699 /* Only do the below in all-stop, as we currently use SIGINT
3700 to implement target_stop (see linux_nat_stop) in
3701 non-stop. */
3702 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3703 {
3704 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3705 forwarded to the entire process group, that is, all LWPs
3706 will receive it - unless they're using CLONE_THREAD to
3707 share signals. Since we only want to report it once, we
3708 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3709 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3710 set_ignore_sigint, NULL);
1ad15515
PA
3711 lp->ignore_sigint = 0;
3712 }
3713 else
3714 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3715 }
3716 }
3717
3718 /* This LWP is stopped now. */
3719 lp->stopped = 1;
3720
3721 if (debug_linux_nat)
3722 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3723 status_to_str (status), target_pid_to_str (lp->ptid));
3724
4c28f408
PA
3725 if (!non_stop)
3726 {
3727 /* Now stop all other LWP's ... */
d90e17a7 3728 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3729
3730 /* ... and wait until all of them have reported back that
3731 they're no longer running. */
d90e17a7 3732 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3733
3734 /* If we're not waiting for a specific LWP, choose an event LWP
3735 from among those that have had events. Giving equal priority
3736 to all LWPs that have had events helps prevent
3737 starvation. */
3738 if (pid == -1)
d90e17a7 3739 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3740
e3e9f5a2
PA
3741 /* Now that we've selected our final event LWP, cancel any
3742 breakpoints in other LWPs that have hit a GDB breakpoint.
3743 See the comment in cancel_breakpoints_callback to find out
3744 why. */
3745 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3746
4b60df3d
PA
3747 /* We'll need this to determine whether to report a SIGSTOP as
3748 TARGET_WAITKIND_0. Need to take a copy because
3749 resume_clear_callback clears it. */
3750 last_resume_kind = lp->last_resume_kind;
3751
e3e9f5a2
PA
3752 /* In all-stop, from the core's perspective, all LWPs are now
3753 stopped until a new resume action is sent over. */
3754 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3755 }
3756 else
25289eb2 3757 {
4b60df3d
PA
3758 /* See above. */
3759 last_resume_kind = lp->last_resume_kind;
3760 resume_clear_callback (lp, NULL);
25289eb2 3761 }
d6b0e80f 3762
26ab7092 3763 if (linux_nat_status_is_event (status))
d6b0e80f 3764 {
d6b0e80f
AC
3765 if (debug_linux_nat)
3766 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3767 "LLW: trap ptid is %s.\n",
3768 target_pid_to_str (lp->ptid));
d6b0e80f 3769 }
d6b0e80f
AC
3770
3771 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3772 {
3773 *ourstatus = lp->waitstatus;
3774 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3775 }
3776 else
3777 store_waitstatus (ourstatus, status);
3778
01124a23 3779 if (debug_linux_nat)
b84876c2
PA
3780 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3781
7feb7d06 3782 restore_child_signals_mask (&prev_mask);
1e225492 3783
4b60df3d 3784 if (last_resume_kind == resume_stop
25289eb2
PA
3785 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3786 && WSTOPSIG (status) == SIGSTOP)
3787 {
3788 /* A thread that has been requested to stop by GDB with
3789 target_stop, and it stopped cleanly, so report as SIG0. The
3790 use of SIGSTOP is an implementation detail. */
3791 ourstatus->value.sig = TARGET_SIGNAL_0;
3792 }
3793
1e225492
JK
3794 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3795 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3796 lp->core = -1;
3797 else
3798 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3799
f973ed9c 3800 return lp->ptid;
d6b0e80f
AC
3801}
3802
e3e9f5a2
PA
3803/* Resume LWPs that are currently stopped without any pending status
3804 to report, but are resumed from the core's perspective. */
3805
3806static int
3807resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3808{
3809 ptid_t *wait_ptid_p = data;
3810
3811 if (lp->stopped
3812 && lp->resumed
3813 && lp->status == 0
3814 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3815 {
3816 gdb_assert (is_executing (lp->ptid));
3817
3818 /* Don't bother if there's a breakpoint at PC that we'd hit
3819 immediately, and we're not waiting for this LWP. */
3820 if (!ptid_match (lp->ptid, *wait_ptid_p))
3821 {
3822 struct regcache *regcache = get_thread_regcache (lp->ptid);
3823 CORE_ADDR pc = regcache_read_pc (regcache);
3824
3825 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3826 return 0;
3827 }
3828
3829 if (debug_linux_nat)
3830 fprintf_unfiltered (gdb_stdlog,
3831 "RSRL: resuming stopped-resumed LWP %s\n",
3832 target_pid_to_str (lp->ptid));
3833
3834 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3835 lp->step, TARGET_SIGNAL_0);
3836 lp->stopped = 0;
3837 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3838 lp->stopped_by_watchpoint = 0;
3839 }
3840
3841 return 0;
3842}
3843
7feb7d06
PA
3844static ptid_t
3845linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3846 ptid_t ptid, struct target_waitstatus *ourstatus,
3847 int target_options)
7feb7d06
PA
3848{
3849 ptid_t event_ptid;
3850
3851 if (debug_linux_nat)
3e43a32a
MS
3852 fprintf_unfiltered (gdb_stdlog,
3853 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
7feb7d06
PA
3854
3855 /* Flush the async file first. */
3856 if (target_can_async_p ())
3857 async_file_flush ();
3858
e3e9f5a2
PA
3859 /* Resume LWPs that are currently stopped without any pending status
3860 to report, but are resumed from the core's perspective. LWPs get
3861 in this state if we find them stopping at a time we're not
3862 interested in reporting the event (target_wait on a
3863 specific_process, for example, see linux_nat_wait_1), and
3864 meanwhile the event became uninteresting. Don't bother resuming
3865 LWPs we're not going to wait for if they'd stop immediately. */
3866 if (non_stop)
3867 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3868
47608cb1 3869 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3870
3871 /* If we requested any event, and something came out, assume there
3872 may be more. If we requested a specific lwp or process, also
3873 assume there may be more. */
3874 if (target_can_async_p ()
3875 && (ourstatus->kind != TARGET_WAITKIND_IGNORE
3876 || !ptid_equal (ptid, minus_one_ptid)))
3877 async_file_mark ();
3878
3879 /* Get ready for the next event. */
3880 if (target_can_async_p ())
3881 target_async (inferior_event_handler, 0);
3882
3883 return event_ptid;
3884}
3885
d6b0e80f
AC
3886static int
3887kill_callback (struct lwp_info *lp, void *data)
3888{
ed731959
JK
3889 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3890
3891 errno = 0;
3892 kill (GET_LWP (lp->ptid), SIGKILL);
3893 if (debug_linux_nat)
3894 fprintf_unfiltered (gdb_stdlog,
3895 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3896 target_pid_to_str (lp->ptid),
3897 errno ? safe_strerror (errno) : "OK");
3898
3899 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3900
d6b0e80f
AC
3901 errno = 0;
3902 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3903 if (debug_linux_nat)
3904 fprintf_unfiltered (gdb_stdlog,
3905 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3906 target_pid_to_str (lp->ptid),
3907 errno ? safe_strerror (errno) : "OK");
3908
3909 return 0;
3910}
3911
3912static int
3913kill_wait_callback (struct lwp_info *lp, void *data)
3914{
3915 pid_t pid;
3916
3917 /* We must make sure that there are no pending events (delayed
3918 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3919 program doesn't interfere with any following debugging session. */
3920
3921 /* For cloned processes we must check both with __WCLONE and
3922 without, since the exit status of a cloned process isn't reported
3923 with __WCLONE. */
3924 if (lp->cloned)
3925 {
3926 do
3927 {
58aecb61 3928 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 3929 if (pid != (pid_t) -1)
d6b0e80f 3930 {
e85a822c
DJ
3931 if (debug_linux_nat)
3932 fprintf_unfiltered (gdb_stdlog,
3933 "KWC: wait %s received unknown.\n",
3934 target_pid_to_str (lp->ptid));
3935 /* The Linux kernel sometimes fails to kill a thread
3936 completely after PTRACE_KILL; that goes from the stop
3937 point in do_fork out to the one in
3938 get_signal_to_deliever and waits again. So kill it
3939 again. */
3940 kill_callback (lp, NULL);
d6b0e80f
AC
3941 }
3942 }
3943 while (pid == GET_LWP (lp->ptid));
3944
3945 gdb_assert (pid == -1 && errno == ECHILD);
3946 }
3947
3948 do
3949 {
58aecb61 3950 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 3951 if (pid != (pid_t) -1)
d6b0e80f 3952 {
e85a822c
DJ
3953 if (debug_linux_nat)
3954 fprintf_unfiltered (gdb_stdlog,
3955 "KWC: wait %s received unk.\n",
3956 target_pid_to_str (lp->ptid));
3957 /* See the call to kill_callback above. */
3958 kill_callback (lp, NULL);
d6b0e80f
AC
3959 }
3960 }
3961 while (pid == GET_LWP (lp->ptid));
3962
3963 gdb_assert (pid == -1 && errno == ECHILD);
3964 return 0;
3965}
3966
3967static void
7d85a9c0 3968linux_nat_kill (struct target_ops *ops)
d6b0e80f 3969{
f973ed9c
DJ
3970 struct target_waitstatus last;
3971 ptid_t last_ptid;
3972 int status;
d6b0e80f 3973
f973ed9c
DJ
3974 /* If we're stopped while forking and we haven't followed yet,
3975 kill the other task. We need to do this first because the
3976 parent will be sleeping if this is a vfork. */
d6b0e80f 3977
f973ed9c 3978 get_last_target_status (&last_ptid, &last);
d6b0e80f 3979
f973ed9c
DJ
3980 if (last.kind == TARGET_WAITKIND_FORKED
3981 || last.kind == TARGET_WAITKIND_VFORKED)
3982 {
3a3e9ee3 3983 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
3984 wait (&status);
3985 }
3986
3987 if (forks_exist_p ())
7feb7d06 3988 linux_fork_killall ();
f973ed9c
DJ
3989 else
3990 {
d90e17a7 3991 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3992
4c28f408
PA
3993 /* Stop all threads before killing them, since ptrace requires
3994 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3995 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3996 /* ... and wait until all of them have reported back that
3997 they're no longer running. */
d90e17a7 3998 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3999
f973ed9c 4000 /* Kill all LWP's ... */
d90e17a7 4001 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
4002
4003 /* ... and wait until we've flushed all events. */
d90e17a7 4004 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
4005 }
4006
4007 target_mourn_inferior ();
d6b0e80f
AC
4008}
4009
4010static void
136d6dae 4011linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 4012{
d90e17a7 4013 purge_lwp_list (ptid_get_pid (inferior_ptid));
d6b0e80f 4014
f973ed9c 4015 if (! forks_exist_p ())
d90e17a7
PA
4016 /* Normal case, no other forks available. */
4017 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
4018 else
4019 /* Multi-fork case. The current inferior_ptid has exited, but
4020 there are other viable forks to debug. Delete the exiting
4021 one and context-switch to the first available. */
4022 linux_fork_mourn_inferior ();
d6b0e80f
AC
4023}
4024
5b009018
PA
4025/* Convert a native/host siginfo object, into/from the siginfo in the
4026 layout of the inferiors' architecture. */
4027
4028static void
4029siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
4030{
4031 int done = 0;
4032
4033 if (linux_nat_siginfo_fixup != NULL)
4034 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4035
4036 /* If there was no callback, or the callback didn't do anything,
4037 then just do a straight memcpy. */
4038 if (!done)
4039 {
4040 if (direction == 1)
4041 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4042 else
4043 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4044 }
4045}
4046
4aa995e1
PA
4047static LONGEST
4048linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4049 const char *annex, gdb_byte *readbuf,
4050 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4051{
4aa995e1
PA
4052 int pid;
4053 struct siginfo siginfo;
5b009018 4054 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4aa995e1
PA
4055
4056 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4057 gdb_assert (readbuf || writebuf);
4058
4059 pid = GET_LWP (inferior_ptid);
4060 if (pid == 0)
4061 pid = GET_PID (inferior_ptid);
4062
4063 if (offset > sizeof (siginfo))
4064 return -1;
4065
4066 errno = 0;
4067 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4068 if (errno != 0)
4069 return -1;
4070
5b009018
PA
4071 /* When GDB is built as a 64-bit application, ptrace writes into
4072 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4073 inferior with a 64-bit GDB should look the same as debugging it
4074 with a 32-bit GDB, we need to convert it. GDB core always sees
4075 the converted layout, so any read/write will have to be done
4076 post-conversion. */
4077 siginfo_fixup (&siginfo, inf_siginfo, 0);
4078
4aa995e1
PA
4079 if (offset + len > sizeof (siginfo))
4080 len = sizeof (siginfo) - offset;
4081
4082 if (readbuf != NULL)
5b009018 4083 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4084 else
4085 {
5b009018
PA
4086 memcpy (inf_siginfo + offset, writebuf, len);
4087
4088 /* Convert back to ptrace layout before flushing it out. */
4089 siginfo_fixup (&siginfo, inf_siginfo, 1);
4090
4aa995e1
PA
4091 errno = 0;
4092 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4093 if (errno != 0)
4094 return -1;
4095 }
4096
4097 return len;
4098}
4099
10d6c8cd
DJ
4100static LONGEST
4101linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4102 const char *annex, gdb_byte *readbuf,
4103 const gdb_byte *writebuf,
4104 ULONGEST offset, LONGEST len)
d6b0e80f 4105{
4aa995e1 4106 struct cleanup *old_chain;
10d6c8cd 4107 LONGEST xfer;
d6b0e80f 4108
4aa995e1
PA
4109 if (object == TARGET_OBJECT_SIGNAL_INFO)
4110 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4111 offset, len);
4112
c35b1492
PA
4113 /* The target is connected but no live inferior is selected. Pass
4114 this request down to a lower stratum (e.g., the executable
4115 file). */
4116 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4117 return 0;
4118
4aa995e1
PA
4119 old_chain = save_inferior_ptid ();
4120
d6b0e80f
AC
4121 if (is_lwp (inferior_ptid))
4122 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4123
10d6c8cd
DJ
4124 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4125 offset, len);
d6b0e80f
AC
4126
4127 do_cleanups (old_chain);
4128 return xfer;
4129}
4130
4131static int
28439f5e 4132linux_thread_alive (ptid_t ptid)
d6b0e80f 4133{
8c6a60d1 4134 int err, tmp_errno;
4c28f408 4135
d6b0e80f
AC
4136 gdb_assert (is_lwp (ptid));
4137
4c28f408
PA
4138 /* Send signal 0 instead of anything ptrace, because ptracing a
4139 running thread errors out claiming that the thread doesn't
4140 exist. */
4141 err = kill_lwp (GET_LWP (ptid), 0);
8c6a60d1 4142 tmp_errno = errno;
d6b0e80f
AC
4143 if (debug_linux_nat)
4144 fprintf_unfiltered (gdb_stdlog,
4c28f408 4145 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4146 target_pid_to_str (ptid),
8c6a60d1 4147 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 4148
4c28f408 4149 if (err != 0)
d6b0e80f
AC
4150 return 0;
4151
4152 return 1;
4153}
4154
28439f5e
PA
4155static int
4156linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4157{
4158 return linux_thread_alive (ptid);
4159}
4160
d6b0e80f 4161static char *
117de6a9 4162linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4163{
4164 static char buf[64];
4165
a0ef4274 4166 if (is_lwp (ptid)
d90e17a7
PA
4167 && (GET_PID (ptid) != GET_LWP (ptid)
4168 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
4169 {
4170 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4171 return buf;
4172 }
4173
4174 return normal_pid_to_str (ptid);
4175}
4176
4694da01
TT
4177static char *
4178linux_nat_thread_name (struct thread_info *thr)
4179{
4180 int pid = ptid_get_pid (thr->ptid);
4181 long lwp = ptid_get_lwp (thr->ptid);
4182#define FORMAT "/proc/%d/task/%ld/comm"
4183 char buf[sizeof (FORMAT) + 30];
4184 FILE *comm_file;
4185 char *result = NULL;
4186
4187 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4188 comm_file = fopen (buf, "r");
4189 if (comm_file)
4190 {
4191 /* Not exported by the kernel, so we define it here. */
4192#define COMM_LEN 16
4193 static char line[COMM_LEN + 1];
4194
4195 if (fgets (line, sizeof (line), comm_file))
4196 {
4197 char *nl = strchr (line, '\n');
4198
4199 if (nl)
4200 *nl = '\0';
4201 if (*line != '\0')
4202 result = line;
4203 }
4204
4205 fclose (comm_file);
4206 }
4207
4208#undef COMM_LEN
4209#undef FORMAT
4210
4211 return result;
4212}
4213
dba24537
AC
4214/* Accepts an integer PID; Returns a string representing a file that
4215 can be opened to get the symbols for the child process. */
4216
6d8fd2b7
UW
4217static char *
4218linux_child_pid_to_exec_file (int pid)
dba24537
AC
4219{
4220 char *name1, *name2;
4221
4222 name1 = xmalloc (MAXPATHLEN);
4223 name2 = xmalloc (MAXPATHLEN);
4224 make_cleanup (xfree, name1);
4225 make_cleanup (xfree, name2);
4226 memset (name2, 0, MAXPATHLEN);
4227
4228 sprintf (name1, "/proc/%d/exe", pid);
4229 if (readlink (name1, name2, MAXPATHLEN) > 0)
4230 return name2;
4231 else
4232 return name1;
4233}
4234
4235/* Service function for corefiles and info proc. */
4236
4237static int
4238read_mapping (FILE *mapfile,
4239 long long *addr,
4240 long long *endaddr,
4241 char *permissions,
4242 long long *offset,
4243 char *device, long long *inode, char *filename)
4244{
4245 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4246 addr, endaddr, permissions, offset, device, inode);
4247
2e14c2ea
MS
4248 filename[0] = '\0';
4249 if (ret > 0 && ret != EOF)
dba24537
AC
4250 {
4251 /* Eat everything up to EOL for the filename. This will prevent
4252 weird filenames (such as one with embedded whitespace) from
4253 confusing this code. It also makes this code more robust in
4254 respect to annotations the kernel may add after the filename.
4255
4256 Note the filename is used for informational purposes
4257 only. */
4258 ret += fscanf (mapfile, "%[^\n]\n", filename);
4259 }
2e14c2ea 4260
dba24537
AC
4261 return (ret != 0 && ret != EOF);
4262}
4263
4264/* Fills the "to_find_memory_regions" target vector. Lists the memory
4265 regions in the inferior for a corefile. */
4266
4267static int
b8edc417 4268linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
dba24537 4269{
89ecc4f5 4270 int pid = PIDGET (inferior_ptid);
dba24537
AC
4271 char mapsfilename[MAXPATHLEN];
4272 FILE *mapsfile;
4273 long long addr, endaddr, size, offset, inode;
4274 char permissions[8], device[8], filename[MAXPATHLEN];
4275 int read, write, exec;
7c8a8b04 4276 struct cleanup *cleanup;
dba24537
AC
4277
4278 /* Compose the filename for the /proc memory map, and open it. */
89ecc4f5 4279 sprintf (mapsfilename, "/proc/%d/maps", pid);
dba24537 4280 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 4281 error (_("Could not open %s."), mapsfilename);
7c8a8b04 4282 cleanup = make_cleanup_fclose (mapsfile);
dba24537
AC
4283
4284 if (info_verbose)
4285 fprintf_filtered (gdb_stdout,
4286 "Reading memory regions from %s\n", mapsfilename);
4287
4288 /* Now iterate until end-of-file. */
4289 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4290 &offset, &device[0], &inode, &filename[0]))
4291 {
4292 size = endaddr - addr;
4293
4294 /* Get the segment's permissions. */
4295 read = (strchr (permissions, 'r') != 0);
4296 write = (strchr (permissions, 'w') != 0);
4297 exec = (strchr (permissions, 'x') != 0);
4298
4299 if (info_verbose)
4300 {
4301 fprintf_filtered (gdb_stdout,
2244ba2e
PM
4302 "Save segment, %s bytes at %s (%c%c%c)",
4303 plongest (size), paddress (target_gdbarch, addr),
dba24537
AC
4304 read ? 'r' : ' ',
4305 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 4306 if (filename[0])
dba24537
AC
4307 fprintf_filtered (gdb_stdout, " for %s", filename);
4308 fprintf_filtered (gdb_stdout, "\n");
4309 }
4310
4311 /* Invoke the callback function to create the corefile
4312 segment. */
4313 func (addr, size, read, write, exec, obfd);
4314 }
7c8a8b04 4315 do_cleanups (cleanup);
dba24537
AC
4316 return 0;
4317}
4318
2020b7ab
PA
4319static int
4320find_signalled_thread (struct thread_info *info, void *data)
4321{
16c381f0 4322 if (info->suspend.stop_signal != TARGET_SIGNAL_0
2020b7ab
PA
4323 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4324 return 1;
4325
4326 return 0;
4327}
4328
4329static enum target_signal
4330find_stop_signal (void)
4331{
4332 struct thread_info *info =
4333 iterate_over_threads (find_signalled_thread, NULL);
4334
4335 if (info)
16c381f0 4336 return info->suspend.stop_signal;
2020b7ab
PA
4337 else
4338 return TARGET_SIGNAL_0;
4339}
4340
dba24537
AC
4341/* Records the thread's register state for the corefile note
4342 section. */
4343
4344static char *
4345linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2020b7ab
PA
4346 char *note_data, int *note_size,
4347 enum target_signal stop_signal)
dba24537 4348{
dba24537 4349 unsigned long lwp = ptid_get_lwp (ptid);
c2250ad1
UW
4350 struct gdbarch *gdbarch = target_gdbarch;
4351 struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4f844a66 4352 const struct regset *regset;
55e969c1 4353 int core_regset_p;
594f7785 4354 struct cleanup *old_chain;
17ea7499
CES
4355 struct core_regset_section *sect_list;
4356 char *gdb_regset;
594f7785
UW
4357
4358 old_chain = save_inferior_ptid ();
4359 inferior_ptid = ptid;
4360 target_fetch_registers (regcache, -1);
4361 do_cleanups (old_chain);
4f844a66
DM
4362
4363 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
4364 sect_list = gdbarch_core_regset_sections (gdbarch);
4365
17ea7499
CES
4366 /* The loop below uses the new struct core_regset_section, which stores
4367 the supported section names and sizes for the core file. Note that
4368 note PRSTATUS needs to be treated specially. But the other notes are
4369 structurally the same, so they can benefit from the new struct. */
4370 if (core_regset_p && sect_list != NULL)
4371 while (sect_list->sect_name != NULL)
4372 {
17ea7499
CES
4373 regset = gdbarch_regset_from_core_section (gdbarch,
4374 sect_list->sect_name,
4375 sect_list->size);
4376 gdb_assert (regset && regset->collect_regset);
4377 gdb_regset = xmalloc (sect_list->size);
4378 regset->collect_regset (regset, regcache, -1,
4379 gdb_regset, sect_list->size);
2f2241f1
UW
4380
4381 if (strcmp (sect_list->sect_name, ".reg") == 0)
4382 note_data = (char *) elfcore_write_prstatus
4383 (obfd, note_data, note_size,
857d11d0
JK
4384 lwp, target_signal_to_host (stop_signal),
4385 gdb_regset);
2f2241f1
UW
4386 else
4387 note_data = (char *) elfcore_write_register_note
4388 (obfd, note_data, note_size,
4389 sect_list->sect_name, gdb_regset,
4390 sect_list->size);
17ea7499
CES
4391 xfree (gdb_regset);
4392 sect_list++;
4393 }
dba24537 4394
17ea7499
CES
4395 /* For architectures that does not have the struct core_regset_section
4396 implemented, we use the old method. When all the architectures have
4397 the new support, the code below should be deleted. */
4f844a66 4398 else
17ea7499 4399 {
2f2241f1
UW
4400 gdb_gregset_t gregs;
4401 gdb_fpregset_t fpregs;
4402
4403 if (core_regset_p
4404 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3e43a32a
MS
4405 sizeof (gregs)))
4406 != NULL && regset->collect_regset != NULL)
2f2241f1
UW
4407 regset->collect_regset (regset, regcache, -1,
4408 &gregs, sizeof (gregs));
4409 else
4410 fill_gregset (regcache, &gregs, -1);
4411
857d11d0
JK
4412 note_data = (char *) elfcore_write_prstatus
4413 (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
4414 &gregs);
2f2241f1 4415
17ea7499
CES
4416 if (core_regset_p
4417 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3e43a32a
MS
4418 sizeof (fpregs)))
4419 != NULL && regset->collect_regset != NULL)
17ea7499
CES
4420 regset->collect_regset (regset, regcache, -1,
4421 &fpregs, sizeof (fpregs));
4422 else
4423 fill_fpregset (regcache, &fpregs, -1);
4424
4425 note_data = (char *) elfcore_write_prfpreg (obfd,
4426 note_data,
4427 note_size,
4428 &fpregs, sizeof (fpregs));
4429 }
4f844a66 4430
dba24537
AC
4431 return note_data;
4432}
4433
4434struct linux_nat_corefile_thread_data
4435{
4436 bfd *obfd;
4437 char *note_data;
4438 int *note_size;
4439 int num_notes;
2020b7ab 4440 enum target_signal stop_signal;
dba24537
AC
4441};
4442
4443/* Called by gdbthread.c once per thread. Records the thread's
4444 register state for the corefile note section. */
4445
4446static int
4447linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4448{
4449 struct linux_nat_corefile_thread_data *args = data;
dba24537 4450
dba24537
AC
4451 args->note_data = linux_nat_do_thread_registers (args->obfd,
4452 ti->ptid,
4453 args->note_data,
2020b7ab
PA
4454 args->note_size,
4455 args->stop_signal);
dba24537 4456 args->num_notes++;
56be3814 4457
dba24537
AC
4458 return 0;
4459}
4460
efcbbd14
UW
4461/* Enumerate spufs IDs for process PID. */
4462
4463static void
4464iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4465{
4466 char path[128];
4467 DIR *dir;
4468 struct dirent *entry;
4469
4470 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4471 dir = opendir (path);
4472 if (!dir)
4473 return;
4474
4475 rewinddir (dir);
4476 while ((entry = readdir (dir)) != NULL)
4477 {
4478 struct stat st;
4479 struct statfs stfs;
4480 int fd;
4481
4482 fd = atoi (entry->d_name);
4483 if (!fd)
4484 continue;
4485
4486 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4487 if (stat (path, &st) != 0)
4488 continue;
4489 if (!S_ISDIR (st.st_mode))
4490 continue;
4491
4492 if (statfs (path, &stfs) != 0)
4493 continue;
4494 if (stfs.f_type != SPUFS_MAGIC)
4495 continue;
4496
4497 callback (data, fd);
4498 }
4499
4500 closedir (dir);
4501}
4502
4503/* Generate corefile notes for SPU contexts. */
4504
4505struct linux_spu_corefile_data
4506{
4507 bfd *obfd;
4508 char *note_data;
4509 int *note_size;
4510};
4511
4512static void
4513linux_spu_corefile_callback (void *data, int fd)
4514{
4515 struct linux_spu_corefile_data *args = data;
4516 int i;
4517
4518 static const char *spu_files[] =
4519 {
4520 "object-id",
4521 "mem",
4522 "regs",
4523 "fpcr",
4524 "lslr",
4525 "decr",
4526 "decr_status",
4527 "signal1",
4528 "signal1_type",
4529 "signal2",
4530 "signal2_type",
4531 "event_mask",
4532 "event_status",
4533 "mbox_info",
4534 "ibox_info",
4535 "wbox_info",
4536 "dma_info",
4537 "proxydma_info",
4538 };
4539
4540 for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4541 {
4542 char annex[32], note_name[32];
4543 gdb_byte *spu_data;
4544 LONGEST spu_len;
4545
4546 xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4547 spu_len = target_read_alloc (&current_target, TARGET_OBJECT_SPU,
4548 annex, &spu_data);
4549 if (spu_len > 0)
4550 {
4551 xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4552 args->note_data = elfcore_write_note (args->obfd, args->note_data,
4553 args->note_size, note_name,
4554 NT_SPU, spu_data, spu_len);
4555 xfree (spu_data);
4556 }
4557 }
4558}
4559
4560static char *
4561linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4562{
4563 struct linux_spu_corefile_data args;
e0881a8e 4564
efcbbd14
UW
4565 args.obfd = obfd;
4566 args.note_data = note_data;
4567 args.note_size = note_size;
4568
4569 iterate_over_spus (PIDGET (inferior_ptid),
4570 linux_spu_corefile_callback, &args);
4571
4572 return args.note_data;
4573}
4574
dba24537
AC
4575/* Fills the "to_make_corefile_note" target vector. Builds the note
4576 section for a corefile, and returns it in a malloc buffer. */
4577
4578static char *
4579linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4580{
4581 struct linux_nat_corefile_thread_data thread_args;
d99148ef 4582 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 4583 char fname[16] = { '\0' };
d99148ef 4584 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
4585 char psargs[80] = { '\0' };
4586 char *note_data = NULL;
d90e17a7 4587 ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
c6826062 4588 gdb_byte *auxv;
dba24537
AC
4589 int auxv_len;
4590
4591 if (get_exec_file (0))
4592 {
9f37bbcc 4593 strncpy (fname, lbasename (get_exec_file (0)), sizeof (fname));
dba24537
AC
4594 strncpy (psargs, get_exec_file (0), sizeof (psargs));
4595 if (get_inferior_args ())
4596 {
d99148ef
JK
4597 char *string_end;
4598 char *psargs_end = psargs + sizeof (psargs);
4599
4600 /* linux_elfcore_write_prpsinfo () handles zero unterminated
4601 strings fine. */
4602 string_end = memchr (psargs, 0, sizeof (psargs));
4603 if (string_end != NULL)
4604 {
4605 *string_end++ = ' ';
4606 strncpy (string_end, get_inferior_args (),
4607 psargs_end - string_end);
4608 }
dba24537
AC
4609 }
4610 note_data = (char *) elfcore_write_prpsinfo (obfd,
4611 note_data,
4612 note_size, fname, psargs);
4613 }
4614
4615 /* Dump information for threads. */
4616 thread_args.obfd = obfd;
4617 thread_args.note_data = note_data;
4618 thread_args.note_size = note_size;
4619 thread_args.num_notes = 0;
2020b7ab 4620 thread_args.stop_signal = find_stop_signal ();
d90e17a7 4621 iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
2020b7ab
PA
4622 gdb_assert (thread_args.num_notes != 0);
4623 note_data = thread_args.note_data;
dba24537 4624
13547ab6
DJ
4625 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
4626 NULL, &auxv);
dba24537
AC
4627 if (auxv_len > 0)
4628 {
4629 note_data = elfcore_write_note (obfd, note_data, note_size,
4630 "CORE", NT_AUXV, auxv, auxv_len);
4631 xfree (auxv);
4632 }
4633
efcbbd14
UW
4634 note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4635
dba24537
AC
4636 make_cleanup (xfree, note_data);
4637 return note_data;
4638}
4639
4640/* Implement the "info proc" command. */
4641
4642static void
4643linux_nat_info_proc_cmd (char *args, int from_tty)
4644{
89ecc4f5
DE
4645 /* A long is used for pid instead of an int to avoid a loss of precision
4646 compiler warning from the output of strtoul. */
4647 long pid = PIDGET (inferior_ptid);
dba24537
AC
4648 FILE *procfile;
4649 char **argv = NULL;
4650 char buffer[MAXPATHLEN];
4651 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
4652 int cmdline_f = 1;
4653 int cwd_f = 1;
4654 int exe_f = 1;
4655 int mappings_f = 0;
dba24537
AC
4656 int status_f = 0;
4657 int stat_f = 0;
4658 int all = 0;
4659 struct stat dummy;
4660
4661 if (args)
4662 {
4663 /* Break up 'args' into an argv array. */
d1a41061
PP
4664 argv = gdb_buildargv (args);
4665 make_cleanup_freeargv (argv);
dba24537
AC
4666 }
4667 while (argv != NULL && *argv != NULL)
4668 {
4669 if (isdigit (argv[0][0]))
4670 {
4671 pid = strtoul (argv[0], NULL, 10);
4672 }
4673 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
4674 {
4675 mappings_f = 1;
4676 }
4677 else if (strcmp (argv[0], "status") == 0)
4678 {
4679 status_f = 1;
4680 }
4681 else if (strcmp (argv[0], "stat") == 0)
4682 {
4683 stat_f = 1;
4684 }
4685 else if (strcmp (argv[0], "cmd") == 0)
4686 {
4687 cmdline_f = 1;
4688 }
4689 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
4690 {
4691 exe_f = 1;
4692 }
4693 else if (strcmp (argv[0], "cwd") == 0)
4694 {
4695 cwd_f = 1;
4696 }
4697 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
4698 {
4699 all = 1;
4700 }
4701 else
4702 {
1777feb0 4703 /* [...] (future options here). */
dba24537
AC
4704 }
4705 argv++;
4706 }
4707 if (pid == 0)
8a3fe4f8 4708 error (_("No current process: you must name one."));
dba24537 4709
89ecc4f5 4710 sprintf (fname1, "/proc/%ld", pid);
dba24537 4711 if (stat (fname1, &dummy) != 0)
8a3fe4f8 4712 error (_("No /proc directory: '%s'"), fname1);
dba24537 4713
89ecc4f5 4714 printf_filtered (_("process %ld\n"), pid);
dba24537
AC
4715 if (cmdline_f || all)
4716 {
89ecc4f5 4717 sprintf (fname1, "/proc/%ld/cmdline", pid);
d5d6fca5 4718 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4719 {
7c8a8b04 4720 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4721
bf1d7d9c
JB
4722 if (fgets (buffer, sizeof (buffer), procfile))
4723 printf_filtered ("cmdline = '%s'\n", buffer);
4724 else
4725 warning (_("unable to read '%s'"), fname1);
7c8a8b04 4726 do_cleanups (cleanup);
dba24537
AC
4727 }
4728 else
8a3fe4f8 4729 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4730 }
4731 if (cwd_f || all)
4732 {
89ecc4f5 4733 sprintf (fname1, "/proc/%ld/cwd", pid);
dba24537
AC
4734 memset (fname2, 0, sizeof (fname2));
4735 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4736 printf_filtered ("cwd = '%s'\n", fname2);
4737 else
8a3fe4f8 4738 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4739 }
4740 if (exe_f || all)
4741 {
89ecc4f5 4742 sprintf (fname1, "/proc/%ld/exe", pid);
dba24537
AC
4743 memset (fname2, 0, sizeof (fname2));
4744 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4745 printf_filtered ("exe = '%s'\n", fname2);
4746 else
8a3fe4f8 4747 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
4748 }
4749 if (mappings_f || all)
4750 {
89ecc4f5 4751 sprintf (fname1, "/proc/%ld/maps", pid);
d5d6fca5 4752 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4753 {
4754 long long addr, endaddr, size, offset, inode;
4755 char permissions[8], device[8], filename[MAXPATHLEN];
7c8a8b04 4756 struct cleanup *cleanup;
dba24537 4757
7c8a8b04 4758 cleanup = make_cleanup_fclose (procfile);
a3f17187 4759 printf_filtered (_("Mapped address spaces:\n\n"));
a97b0ac8 4760 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4761 {
4762 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4763 "Start Addr",
4764 " End Addr",
4765 " Size", " Offset", "objfile");
4766 }
4767 else
4768 {
4769 printf_filtered (" %18s %18s %10s %10s %7s\n",
4770 "Start Addr",
4771 " End Addr",
4772 " Size", " Offset", "objfile");
4773 }
4774
4775 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4776 &offset, &device[0], &inode, &filename[0]))
4777 {
4778 size = endaddr - addr;
4779
4780 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4781 calls here (and possibly above) should be abstracted
4782 out into their own functions? Andrew suggests using
4783 a generic local_address_string instead to print out
4784 the addresses; that makes sense to me, too. */
4785
a97b0ac8 4786 if (gdbarch_addr_bit (target_gdbarch) == 32)
dba24537
AC
4787 {
4788 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4789 (unsigned long) addr, /* FIXME: pr_addr */
4790 (unsigned long) endaddr,
4791 (int) size,
4792 (unsigned int) offset,
4793 filename[0] ? filename : "");
4794 }
4795 else
4796 {
4797 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
4798 (unsigned long) addr, /* FIXME: pr_addr */
4799 (unsigned long) endaddr,
4800 (int) size,
4801 (unsigned int) offset,
4802 filename[0] ? filename : "");
4803 }
4804 }
4805
7c8a8b04 4806 do_cleanups (cleanup);
dba24537
AC
4807 }
4808 else
8a3fe4f8 4809 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4810 }
4811 if (status_f || all)
4812 {
89ecc4f5 4813 sprintf (fname1, "/proc/%ld/status", pid);
d5d6fca5 4814 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 4815 {
7c8a8b04 4816 struct cleanup *cleanup = make_cleanup_fclose (procfile);
e0881a8e 4817
dba24537
AC
4818 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4819 puts_filtered (buffer);
7c8a8b04 4820 do_cleanups (cleanup);
dba24537
AC
4821 }
4822 else
8a3fe4f8 4823 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4824 }
4825 if (stat_f || all)
4826 {
89ecc4f5 4827 sprintf (fname1, "/proc/%ld/stat", pid);
d5d6fca5 4828 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
4829 {
4830 int itmp;
4831 char ctmp;
a25694b4 4832 long ltmp;
7c8a8b04 4833 struct cleanup *cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4834
4835 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4836 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 4837 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 4838 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 4839 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 4840 printf_filtered (_("State: %c\n"), ctmp);
dba24537 4841 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4842 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 4843 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4844 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 4845 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4846 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 4847 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4848 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 4849 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 4850 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
4851 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4852 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
4853 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4854 printf_filtered (_("Minor faults (no memory page): %lu\n"),
4855 (unsigned long) ltmp);
4856 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4857 printf_filtered (_("Minor faults, children: %lu\n"),
4858 (unsigned long) ltmp);
4859 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4860 printf_filtered (_("Major faults (memory page faults): %lu\n"),
4861 (unsigned long) ltmp);
4862 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4863 printf_filtered (_("Major faults, children: %lu\n"),
4864 (unsigned long) ltmp);
4865 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4866 printf_filtered (_("utime: %ld\n"), ltmp);
4867 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4868 printf_filtered (_("stime: %ld\n"), ltmp);
4869 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4870 printf_filtered (_("utime, children: %ld\n"), ltmp);
4871 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4872 printf_filtered (_("stime, children: %ld\n"), ltmp);
4873 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
4874 printf_filtered (_("jiffies remaining in current "
4875 "time slice: %ld\n"), ltmp);
a25694b4
AS
4876 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4877 printf_filtered (_("'nice' value: %ld\n"), ltmp);
4878 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4879 printf_filtered (_("jiffies until next timeout: %lu\n"),
4880 (unsigned long) ltmp);
4881 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4882 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
4883 (unsigned long) ltmp);
4884 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3e43a32a
MS
4885 printf_filtered (_("start time (jiffies since "
4886 "system boot): %ld\n"), ltmp);
a25694b4
AS
4887 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4888 printf_filtered (_("Virtual memory size: %lu\n"),
4889 (unsigned long) ltmp);
4890 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3e43a32a
MS
4891 printf_filtered (_("Resident set size: %lu\n"),
4892 (unsigned long) ltmp);
a25694b4
AS
4893 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4894 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
4895 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4896 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
4897 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4898 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
4899 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4900 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3e43a32a
MS
4901#if 0 /* Don't know how architecture-dependent the rest is...
4902 Anyway the signal bitmap info is available from "status". */
1777feb0 4903 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 4904 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
1777feb0 4905 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4
AS
4906 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
4907 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4908 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
4909 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4910 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
4911 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4912 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
4913 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4914 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
1777feb0 4915 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
a25694b4 4916 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537 4917#endif
7c8a8b04 4918 do_cleanups (cleanup);
dba24537
AC
4919 }
4920 else
8a3fe4f8 4921 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
4922 }
4923}
4924
10d6c8cd
DJ
4925/* Implement the to_xfer_partial interface for memory reads using the /proc
4926 filesystem. Because we can use a single read() call for /proc, this
4927 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4928 but it doesn't support writes. */
4929
4930static LONGEST
4931linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4932 const char *annex, gdb_byte *readbuf,
4933 const gdb_byte *writebuf,
4934 ULONGEST offset, LONGEST len)
dba24537 4935{
10d6c8cd
DJ
4936 LONGEST ret;
4937 int fd;
dba24537
AC
4938 char filename[64];
4939
10d6c8cd 4940 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4941 return 0;
4942
4943 /* Don't bother for one word. */
4944 if (len < 3 * sizeof (long))
4945 return 0;
4946
4947 /* We could keep this file open and cache it - possibly one per
4948 thread. That requires some juggling, but is even faster. */
4949 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4950 fd = open (filename, O_RDONLY | O_LARGEFILE);
4951 if (fd == -1)
4952 return 0;
4953
4954 /* If pread64 is available, use it. It's faster if the kernel
4955 supports it (only one syscall), and it's 64-bit safe even on
4956 32-bit platforms (for instance, SPARC debugging a SPARC64
4957 application). */
4958#ifdef HAVE_PREAD64
10d6c8cd 4959 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4960#else
10d6c8cd 4961 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4962#endif
4963 ret = 0;
4964 else
4965 ret = len;
4966
4967 close (fd);
4968 return ret;
4969}
4970
efcbbd14
UW
4971
4972/* Enumerate spufs IDs for process PID. */
4973static LONGEST
4974spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4975{
4976 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4977 LONGEST pos = 0;
4978 LONGEST written = 0;
4979 char path[128];
4980 DIR *dir;
4981 struct dirent *entry;
4982
4983 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4984 dir = opendir (path);
4985 if (!dir)
4986 return -1;
4987
4988 rewinddir (dir);
4989 while ((entry = readdir (dir)) != NULL)
4990 {
4991 struct stat st;
4992 struct statfs stfs;
4993 int fd;
4994
4995 fd = atoi (entry->d_name);
4996 if (!fd)
4997 continue;
4998
4999 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
5000 if (stat (path, &st) != 0)
5001 continue;
5002 if (!S_ISDIR (st.st_mode))
5003 continue;
5004
5005 if (statfs (path, &stfs) != 0)
5006 continue;
5007 if (stfs.f_type != SPUFS_MAGIC)
5008 continue;
5009
5010 if (pos >= offset && pos + 4 <= offset + len)
5011 {
5012 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
5013 written += 4;
5014 }
5015 pos += 4;
5016 }
5017
5018 closedir (dir);
5019 return written;
5020}
5021
5022/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
5023 object type, using the /proc file system. */
5024static LONGEST
5025linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
5026 const char *annex, gdb_byte *readbuf,
5027 const gdb_byte *writebuf,
5028 ULONGEST offset, LONGEST len)
5029{
5030 char buf[128];
5031 int fd = 0;
5032 int ret = -1;
5033 int pid = PIDGET (inferior_ptid);
5034
5035 if (!annex)
5036 {
5037 if (!readbuf)
5038 return -1;
5039 else
5040 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5041 }
5042
5043 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
5044 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5045 if (fd <= 0)
5046 return -1;
5047
5048 if (offset != 0
5049 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5050 {
5051 close (fd);
5052 return 0;
5053 }
5054
5055 if (writebuf)
5056 ret = write (fd, writebuf, (size_t) len);
5057 else if (readbuf)
5058 ret = read (fd, readbuf, (size_t) len);
5059
5060 close (fd);
5061 return ret;
5062}
5063
5064
dba24537
AC
5065/* Parse LINE as a signal set and add its set bits to SIGS. */
5066
5067static void
5068add_line_to_sigset (const char *line, sigset_t *sigs)
5069{
5070 int len = strlen (line) - 1;
5071 const char *p;
5072 int signum;
5073
5074 if (line[len] != '\n')
8a3fe4f8 5075 error (_("Could not parse signal set: %s"), line);
dba24537
AC
5076
5077 p = line;
5078 signum = len * 4;
5079 while (len-- > 0)
5080 {
5081 int digit;
5082
5083 if (*p >= '0' && *p <= '9')
5084 digit = *p - '0';
5085 else if (*p >= 'a' && *p <= 'f')
5086 digit = *p - 'a' + 10;
5087 else
8a3fe4f8 5088 error (_("Could not parse signal set: %s"), line);
dba24537
AC
5089
5090 signum -= 4;
5091
5092 if (digit & 1)
5093 sigaddset (sigs, signum + 1);
5094 if (digit & 2)
5095 sigaddset (sigs, signum + 2);
5096 if (digit & 4)
5097 sigaddset (sigs, signum + 3);
5098 if (digit & 8)
5099 sigaddset (sigs, signum + 4);
5100
5101 p++;
5102 }
5103}
5104
5105/* Find process PID's pending signals from /proc/pid/status and set
5106 SIGS to match. */
5107
5108void
3e43a32a
MS
5109linux_proc_pending_signals (int pid, sigset_t *pending,
5110 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
5111{
5112 FILE *procfile;
5113 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
7c8a8b04 5114 struct cleanup *cleanup;
dba24537
AC
5115
5116 sigemptyset (pending);
5117 sigemptyset (blocked);
5118 sigemptyset (ignored);
5119 sprintf (fname, "/proc/%d/status", pid);
5120 procfile = fopen (fname, "r");
5121 if (procfile == NULL)
8a3fe4f8 5122 error (_("Could not open %s"), fname);
7c8a8b04 5123 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
5124
5125 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
5126 {
5127 /* Normal queued signals are on the SigPnd line in the status
5128 file. However, 2.6 kernels also have a "shared" pending
5129 queue for delivering signals to a thread group, so check for
5130 a ShdPnd line also.
5131
5132 Unfortunately some Red Hat kernels include the shared pending
5133 queue but not the ShdPnd status field. */
5134
5135 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
5136 add_line_to_sigset (buffer + 8, pending);
5137 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
5138 add_line_to_sigset (buffer + 8, pending);
5139 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
5140 add_line_to_sigset (buffer + 8, blocked);
5141 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
5142 add_line_to_sigset (buffer + 8, ignored);
5143 }
5144
7c8a8b04 5145 do_cleanups (cleanup);
dba24537
AC
5146}
5147
07e059b5
VP
5148static LONGEST
5149linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e
MS
5150 const char *annex, gdb_byte *readbuf,
5151 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
07e059b5 5152{
07e059b5
VP
5153 gdb_assert (object == TARGET_OBJECT_OSDATA);
5154
d26e3629 5155 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
5156}
5157
10d6c8cd
DJ
5158static LONGEST
5159linux_xfer_partial (struct target_ops *ops, enum target_object object,
5160 const char *annex, gdb_byte *readbuf,
5161 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5162{
5163 LONGEST xfer;
5164
5165 if (object == TARGET_OBJECT_AUXV)
9f2982ff 5166 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
10d6c8cd
DJ
5167 offset, len);
5168
07e059b5
VP
5169 if (object == TARGET_OBJECT_OSDATA)
5170 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5171 offset, len);
5172
efcbbd14
UW
5173 if (object == TARGET_OBJECT_SPU)
5174 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5175 offset, len);
5176
8f313923
JK
5177 /* GDB calculates all the addresses in possibly larget width of the address.
5178 Address width needs to be masked before its final use - either by
5179 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5180
5181 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
5182
5183 if (object == TARGET_OBJECT_MEMORY)
5184 {
5185 int addr_bit = gdbarch_addr_bit (target_gdbarch);
5186
5187 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5188 offset &= ((ULONGEST) 1 << addr_bit) - 1;
5189 }
5190
10d6c8cd
DJ
5191 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5192 offset, len);
5193 if (xfer != 0)
5194 return xfer;
5195
5196 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5197 offset, len);
5198}
5199
e9efe249 5200/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
5201 it with local methods. */
5202
910122bf
UW
5203static void
5204linux_target_install_ops (struct target_ops *t)
10d6c8cd 5205{
6d8fd2b7 5206 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 5207 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 5208 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 5209 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 5210 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 5211 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 5212 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 5213 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 5214 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
5215 t->to_post_attach = linux_child_post_attach;
5216 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
5217 t->to_find_memory_regions = linux_nat_find_memory_regions;
5218 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5219
5220 super_xfer_partial = t->to_xfer_partial;
5221 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
5222}
5223
5224struct target_ops *
5225linux_target (void)
5226{
5227 struct target_ops *t;
5228
5229 t = inf_ptrace_target ();
5230 linux_target_install_ops (t);
5231
5232 return t;
5233}
5234
5235struct target_ops *
7714d83a 5236linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
5237{
5238 struct target_ops *t;
5239
5240 t = inf_ptrace_trad_target (register_u_offset);
5241 linux_target_install_ops (t);
10d6c8cd 5242
10d6c8cd
DJ
5243 return t;
5244}
5245
b84876c2
PA
5246/* target_is_async_p implementation. */
5247
5248static int
5249linux_nat_is_async_p (void)
5250{
5251 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5252 it explicitly with the "set target-async" command.
b84876c2 5253 Someday, linux will always be async. */
3dd5b83d 5254 return target_async_permitted;
b84876c2
PA
5255}
5256
5257/* target_can_async_p implementation. */
5258
5259static int
5260linux_nat_can_async_p (void)
5261{
5262 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 5263 it explicitly with the "set target-async" command.
b84876c2 5264 Someday, linux will always be async. */
3dd5b83d 5265 return target_async_permitted;
b84876c2
PA
5266}
5267
9908b566
VP
5268static int
5269linux_nat_supports_non_stop (void)
5270{
5271 return 1;
5272}
5273
d90e17a7
PA
5274/* True if we want to support multi-process. To be removed when GDB
5275 supports multi-exec. */
5276
2277426b 5277int linux_multi_process = 1;
d90e17a7
PA
5278
5279static int
5280linux_nat_supports_multi_process (void)
5281{
5282 return linux_multi_process;
5283}
5284
03583c20
UW
5285static int
5286linux_nat_supports_disable_randomization (void)
5287{
5288#ifdef HAVE_PERSONALITY
5289 return 1;
5290#else
5291 return 0;
5292#endif
5293}
5294
b84876c2
PA
5295static int async_terminal_is_ours = 1;
5296
5297/* target_terminal_inferior implementation. */
5298
5299static void
5300linux_nat_terminal_inferior (void)
5301{
5302 if (!target_is_async_p ())
5303 {
5304 /* Async mode is disabled. */
5305 terminal_inferior ();
5306 return;
5307 }
5308
b84876c2
PA
5309 terminal_inferior ();
5310
d9d2d8b6 5311 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
5312 if (!async_terminal_is_ours)
5313 return;
5314
5315 delete_file_handler (input_fd);
5316 async_terminal_is_ours = 0;
5317 set_sigint_trap ();
5318}
5319
5320/* target_terminal_ours implementation. */
5321
2c0b251b 5322static void
b84876c2
PA
5323linux_nat_terminal_ours (void)
5324{
5325 if (!target_is_async_p ())
5326 {
5327 /* Async mode is disabled. */
5328 terminal_ours ();
5329 return;
5330 }
5331
5332 /* GDB should never give the terminal to the inferior if the
5333 inferior is running in the background (run&, continue&, etc.),
5334 but claiming it sure should. */
5335 terminal_ours ();
5336
b84876c2
PA
5337 if (async_terminal_is_ours)
5338 return;
5339
5340 clear_sigint_trap ();
5341 add_file_handler (input_fd, stdin_event_handler, 0);
5342 async_terminal_is_ours = 1;
5343}
5344
5345static void (*async_client_callback) (enum inferior_event_type event_type,
5346 void *context);
5347static void *async_client_context;
5348
7feb7d06
PA
5349/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5350 so we notice when any child changes state, and notify the
5351 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5352 above to wait for the arrival of a SIGCHLD. */
5353
b84876c2 5354static void
7feb7d06 5355sigchld_handler (int signo)
b84876c2 5356{
7feb7d06
PA
5357 int old_errno = errno;
5358
01124a23
DE
5359 if (debug_linux_nat)
5360 ui_file_write_async_safe (gdb_stdlog,
5361 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
5362
5363 if (signo == SIGCHLD
5364 && linux_nat_event_pipe[0] != -1)
5365 async_file_mark (); /* Let the event loop know that there are
5366 events to handle. */
5367
5368 errno = old_errno;
5369}
5370
5371/* Callback registered with the target events file descriptor. */
5372
5373static void
5374handle_target_event (int error, gdb_client_data client_data)
5375{
5376 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5377}
5378
5379/* Create/destroy the target events pipe. Returns previous state. */
5380
5381static int
5382linux_async_pipe (int enable)
5383{
5384 int previous = (linux_nat_event_pipe[0] != -1);
5385
5386 if (previous != enable)
5387 {
5388 sigset_t prev_mask;
5389
5390 block_child_signals (&prev_mask);
5391
5392 if (enable)
5393 {
5394 if (pipe (linux_nat_event_pipe) == -1)
5395 internal_error (__FILE__, __LINE__,
5396 "creating event pipe failed.");
5397
5398 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5399 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5400 }
5401 else
5402 {
5403 close (linux_nat_event_pipe[0]);
5404 close (linux_nat_event_pipe[1]);
5405 linux_nat_event_pipe[0] = -1;
5406 linux_nat_event_pipe[1] = -1;
5407 }
5408
5409 restore_child_signals_mask (&prev_mask);
5410 }
5411
5412 return previous;
b84876c2
PA
5413}
5414
5415/* target_async implementation. */
5416
5417static void
5418linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5419 void *context), void *context)
5420{
b84876c2
PA
5421 if (callback != NULL)
5422 {
5423 async_client_callback = callback;
5424 async_client_context = context;
7feb7d06
PA
5425 if (!linux_async_pipe (1))
5426 {
5427 add_file_handler (linux_nat_event_pipe[0],
5428 handle_target_event, NULL);
5429 /* There may be pending events to handle. Tell the event loop
5430 to poll them. */
5431 async_file_mark ();
5432 }
b84876c2
PA
5433 }
5434 else
5435 {
5436 async_client_callback = callback;
5437 async_client_context = context;
b84876c2 5438 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 5439 linux_async_pipe (0);
b84876c2
PA
5440 }
5441 return;
5442}
5443
252fbfc8
PA
5444/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5445 event came out. */
5446
4c28f408 5447static int
252fbfc8 5448linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 5449{
d90e17a7 5450 if (!lwp->stopped)
252fbfc8 5451 {
d90e17a7 5452 ptid_t ptid = lwp->ptid;
252fbfc8 5453
d90e17a7
PA
5454 if (debug_linux_nat)
5455 fprintf_unfiltered (gdb_stdlog,
5456 "LNSL: running -> suspending %s\n",
5457 target_pid_to_str (lwp->ptid));
252fbfc8 5458
252fbfc8 5459
25289eb2
PA
5460 if (lwp->last_resume_kind == resume_stop)
5461 {
5462 if (debug_linux_nat)
5463 fprintf_unfiltered (gdb_stdlog,
5464 "linux-nat: already stopping LWP %ld at "
5465 "GDB's request\n",
5466 ptid_get_lwp (lwp->ptid));
5467 return 0;
5468 }
252fbfc8 5469
25289eb2
PA
5470 stop_callback (lwp, NULL);
5471 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
5472 }
5473 else
5474 {
5475 /* Already known to be stopped; do nothing. */
252fbfc8 5476
d90e17a7
PA
5477 if (debug_linux_nat)
5478 {
e09875d4 5479 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
5480 fprintf_unfiltered (gdb_stdlog,
5481 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5482 target_pid_to_str (lwp->ptid));
5483 else
3e43a32a
MS
5484 fprintf_unfiltered (gdb_stdlog,
5485 "LNSL: already stopped/no "
5486 "stop_requested yet %s\n",
d90e17a7 5487 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5488 }
5489 }
4c28f408
PA
5490 return 0;
5491}
5492
5493static void
5494linux_nat_stop (ptid_t ptid)
5495{
5496 if (non_stop)
d90e17a7 5497 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5498 else
5499 linux_ops->to_stop (ptid);
5500}
5501
d90e17a7
PA
5502static void
5503linux_nat_close (int quitting)
5504{
5505 /* Unregister from the event loop. */
5506 if (target_is_async_p ())
5507 target_async (NULL, 0);
5508
d90e17a7
PA
5509 if (linux_ops->to_close)
5510 linux_ops->to_close (quitting);
5511}
5512
c0694254
PA
5513/* When requests are passed down from the linux-nat layer to the
5514 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5515 used. The address space pointer is stored in the inferior object,
5516 but the common code that is passed such ptid can't tell whether
5517 lwpid is a "main" process id or not (it assumes so). We reverse
5518 look up the "main" process id from the lwp here. */
5519
5520struct address_space *
5521linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5522{
5523 struct lwp_info *lwp;
5524 struct inferior *inf;
5525 int pid;
5526
5527 pid = GET_LWP (ptid);
5528 if (GET_LWP (ptid) == 0)
5529 {
5530 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5531 tgid. */
5532 lwp = find_lwp_pid (ptid);
5533 pid = GET_PID (lwp->ptid);
5534 }
5535 else
5536 {
5537 /* A (pid,lwpid,0) ptid. */
5538 pid = GET_PID (ptid);
5539 }
5540
5541 inf = find_inferior_pid (pid);
5542 gdb_assert (inf != NULL);
5543 return inf->aspace;
5544}
5545
dc146f7c
VP
5546int
5547linux_nat_core_of_thread_1 (ptid_t ptid)
5548{
5549 struct cleanup *back_to;
5550 char *filename;
5551 FILE *f;
5552 char *content = NULL;
5553 char *p;
5554 char *ts = 0;
5555 int content_read = 0;
5556 int i;
5557 int core;
5558
5559 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5560 GET_PID (ptid), GET_LWP (ptid));
5561 back_to = make_cleanup (xfree, filename);
5562
5563 f = fopen (filename, "r");
5564 if (!f)
5565 {
5566 do_cleanups (back_to);
5567 return -1;
5568 }
5569
5570 make_cleanup_fclose (f);
5571
5572 for (;;)
5573 {
5574 int n;
e0881a8e 5575
dc146f7c
VP
5576 content = xrealloc (content, content_read + 1024);
5577 n = fread (content + content_read, 1, 1024, f);
5578 content_read += n;
5579 if (n < 1024)
5580 {
5581 content[content_read] = '\0';
5582 break;
5583 }
5584 }
5585
5586 make_cleanup (xfree, content);
5587
5588 p = strchr (content, '(');
ca2a87a0
JK
5589
5590 /* Skip ")". */
5591 if (p != NULL)
5592 p = strchr (p, ')');
5593 if (p != NULL)
5594 p++;
dc146f7c
VP
5595
5596 /* If the first field after program name has index 0, then core number is
5597 the field with index 36. There's no constant for that anywhere. */
ca2a87a0
JK
5598 if (p != NULL)
5599 p = strtok_r (p, " ", &ts);
5600 for (i = 0; p != NULL && i != 36; ++i)
dc146f7c
VP
5601 p = strtok_r (NULL, " ", &ts);
5602
ca2a87a0 5603 if (p == NULL || sscanf (p, "%d", &core) == 0)
dc146f7c
VP
5604 core = -1;
5605
5606 do_cleanups (back_to);
5607
5608 return core;
5609}
5610
5611/* Return the cached value of the processor core for thread PTID. */
5612
5613int
5614linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5615{
5616 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 5617
dc146f7c
VP
5618 if (info)
5619 return info->core;
5620 return -1;
5621}
5622
f973ed9c
DJ
5623void
5624linux_nat_add_target (struct target_ops *t)
5625{
f973ed9c
DJ
5626 /* Save the provided single-threaded target. We save this in a separate
5627 variable because another target we've inherited from (e.g. inf-ptrace)
5628 may have saved a pointer to T; we want to use it for the final
5629 process stratum target. */
5630 linux_ops_saved = *t;
5631 linux_ops = &linux_ops_saved;
5632
5633 /* Override some methods for multithreading. */
b84876c2 5634 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5635 t->to_attach = linux_nat_attach;
5636 t->to_detach = linux_nat_detach;
5637 t->to_resume = linux_nat_resume;
5638 t->to_wait = linux_nat_wait;
2455069d 5639 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
5640 t->to_xfer_partial = linux_nat_xfer_partial;
5641 t->to_kill = linux_nat_kill;
5642 t->to_mourn_inferior = linux_nat_mourn_inferior;
5643 t->to_thread_alive = linux_nat_thread_alive;
5644 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 5645 t->to_thread_name = linux_nat_thread_name;
f973ed9c 5646 t->to_has_thread_control = tc_schedlock;
c0694254 5647 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
5648 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5649 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 5650
b84876c2
PA
5651 t->to_can_async_p = linux_nat_can_async_p;
5652 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5653 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 5654 t->to_async = linux_nat_async;
b84876c2
PA
5655 t->to_terminal_inferior = linux_nat_terminal_inferior;
5656 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5657 t->to_close = linux_nat_close;
b84876c2 5658
4c28f408
PA
5659 /* Methods for non-stop support. */
5660 t->to_stop = linux_nat_stop;
5661
d90e17a7
PA
5662 t->to_supports_multi_process = linux_nat_supports_multi_process;
5663
03583c20
UW
5664 t->to_supports_disable_randomization
5665 = linux_nat_supports_disable_randomization;
5666
dc146f7c
VP
5667 t->to_core_of_thread = linux_nat_core_of_thread;
5668
f973ed9c
DJ
5669 /* We don't change the stratum; this target will sit at
5670 process_stratum and thread_db will set at thread_stratum. This
5671 is a little strange, since this is a multi-threaded-capable
5672 target, but we want to be on the stack below thread_db, and we
5673 also want to be used for single-threaded processes. */
5674
5675 add_target (t);
f973ed9c
DJ
5676}
5677
9f0bdab8
DJ
5678/* Register a method to call whenever a new thread is attached. */
5679void
5680linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
5681{
5682 /* Save the pointer. We only support a single registered instance
5683 of the GNU/Linux native target, so we do not need to map this to
5684 T. */
5685 linux_nat_new_thread = new_thread;
5686}
5687
5b009018
PA
5688/* Register a method that converts a siginfo object between the layout
5689 that ptrace returns, and the layout in the architecture of the
5690 inferior. */
5691void
5692linux_nat_set_siginfo_fixup (struct target_ops *t,
5693 int (*siginfo_fixup) (struct siginfo *,
5694 gdb_byte *,
5695 int))
5696{
5697 /* Save the pointer. */
5698 linux_nat_siginfo_fixup = siginfo_fixup;
5699}
5700
9f0bdab8
DJ
5701/* Return the saved siginfo associated with PTID. */
5702struct siginfo *
5703linux_nat_get_siginfo (ptid_t ptid)
5704{
5705 struct lwp_info *lp = find_lwp_pid (ptid);
5706
5707 gdb_assert (lp != NULL);
5708
5709 return &lp->siginfo;
5710}
5711
2c0b251b
PA
5712/* Provide a prototype to silence -Wmissing-prototypes. */
5713extern initialize_file_ftype _initialize_linux_nat;
5714
d6b0e80f
AC
5715void
5716_initialize_linux_nat (void)
5717{
1bedd215
AC
5718 add_info ("proc", linux_nat_info_proc_cmd, _("\
5719Show /proc process information about any running process.\n\
dba24537
AC
5720Specify any process id, or use the program being debugged by default.\n\
5721Specify any of the following keywords for detailed info:\n\
5722 mappings -- list of mapped memory regions.\n\
5723 stat -- list a bunch of random process info.\n\
5724 status -- list a different bunch of random process info.\n\
1bedd215 5725 all -- list all available /proc info."));
d6b0e80f 5726
b84876c2
PA
5727 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5728 &debug_linux_nat, _("\
5729Set debugging of GNU/Linux lwp module."), _("\
5730Show debugging of GNU/Linux lwp module."), _("\
5731Enables printf debugging output."),
5732 NULL,
5733 show_debug_linux_nat,
5734 &setdebuglist, &showdebuglist);
5735
b84876c2 5736 /* Save this mask as the default. */
d6b0e80f
AC
5737 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5738
7feb7d06
PA
5739 /* Install a SIGCHLD handler. */
5740 sigchld_action.sa_handler = sigchld_handler;
5741 sigemptyset (&sigchld_action.sa_mask);
5742 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5743
5744 /* Make it the default. */
7feb7d06 5745 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5746
5747 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5748 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5749 sigdelset (&suspend_mask, SIGCHLD);
5750
7feb7d06 5751 sigemptyset (&blocked_mask);
d6b0e80f
AC
5752}
5753\f
5754
5755/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5756 the GNU/Linux Threads library and therefore doesn't really belong
5757 here. */
5758
5759/* Read variable NAME in the target and return its value if found.
5760 Otherwise return zero. It is assumed that the type of the variable
5761 is `int'. */
5762
5763static int
5764get_signo (const char *name)
5765{
5766 struct minimal_symbol *ms;
5767 int signo;
5768
5769 ms = lookup_minimal_symbol (name, NULL, NULL);
5770 if (ms == NULL)
5771 return 0;
5772
8e70166d 5773 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5774 sizeof (signo)) != 0)
5775 return 0;
5776
5777 return signo;
5778}
5779
5780/* Return the set of signals used by the threads library in *SET. */
5781
5782void
5783lin_thread_get_thread_signals (sigset_t *set)
5784{
5785 struct sigaction action;
5786 int restart, cancel;
5787
b84876c2 5788 sigemptyset (&blocked_mask);
d6b0e80f
AC
5789 sigemptyset (set);
5790
5791 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5792 cancel = get_signo ("__pthread_sig_cancel");
5793
5794 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5795 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5796 not provide any way for the debugger to query the signal numbers -
5797 fortunately they don't change! */
5798
d6b0e80f 5799 if (restart == 0)
17fbb0bd 5800 restart = __SIGRTMIN;
d6b0e80f 5801
d6b0e80f 5802 if (cancel == 0)
17fbb0bd 5803 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5804
5805 sigaddset (set, restart);
5806 sigaddset (set, cancel);
5807
5808 /* The GNU/Linux Threads library makes terminating threads send a
5809 special "cancel" signal instead of SIGCHLD. Make sure we catch
5810 those (to prevent them from terminating GDB itself, which is
5811 likely to be their default action) and treat them the same way as
5812 SIGCHLD. */
5813
5814 action.sa_handler = sigchld_handler;
5815 sigemptyset (&action.sa_mask);
58aecb61 5816 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5817 sigaction (cancel, &action, NULL);
5818
5819 /* We block the "cancel" signal throughout this code ... */
5820 sigaddset (&blocked_mask, cancel);
5821 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5822
5823 /* ... except during a sigsuspend. */
5824 sigdelset (&suspend_mask, cancel);
5825}
This page took 1.061421 seconds and 4 git commands to generate.