gdb/
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
0b302171 3 Copyright (C) 2001-2012 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
22#include "target.h"
d6b0e80f 23#include "gdb_string.h"
3993f6b1 24#include "gdb_wait.h"
d6b0e80f
AC
25#include "gdb_assert.h"
26#ifdef HAVE_TKILL_SYSCALL
27#include <unistd.h>
28#include <sys/syscall.h>
29#endif
3993f6b1 30#include <sys/ptrace.h>
0274a8ce 31#include "linux-nat.h"
af96c192 32#include "linux-ptrace.h"
13da1c97 33#include "linux-procfs.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
dba24537 42#include <sys/param.h> /* for MAXPATHLEN */
1777feb0 43#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
44#include "elf-bfd.h" /* for elfcore_write_* */
45#include "gregset.h" /* for gregset */
46#include "gdbcore.h" /* for get_exec_file */
47#include <ctype.h> /* for isdigit */
1777feb0 48#include "gdbthread.h" /* for struct thread_info etc. */
dba24537
AC
49#include "gdb_stat.h" /* for struct stat */
50#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
51#include "inf-loop.h"
52#include "event-loop.h"
53#include "event-top.h"
07e059b5
VP
54#include <pwd.h>
55#include <sys/types.h>
56#include "gdb_dirent.h"
57#include "xml-support.h"
191c4426 58#include "terminal.h"
efcbbd14 59#include <sys/vfs.h>
6c95b8df 60#include "solib.h"
d26e3629 61#include "linux-osdata.h"
6432734d 62#include "linux-tdep.h"
7dcd53a0 63#include "symfile.h"
5808517f
YQ
64#include "agent.h"
65#include "tracepoint.h"
87b0bb13
JK
66#include "exceptions.h"
67#include "linux-ptrace.h"
68#include "buffer.h"
efcbbd14
UW
69
70#ifndef SPUFS_MAGIC
71#define SPUFS_MAGIC 0x23c9b64e
72#endif
dba24537 73
10568435
JK
74#ifdef HAVE_PERSONALITY
75# include <sys/personality.h>
76# if !HAVE_DECL_ADDR_NO_RANDOMIZE
77# define ADDR_NO_RANDOMIZE 0x0040000
78# endif
79#endif /* HAVE_PERSONALITY */
80
1777feb0 81/* This comment documents high-level logic of this file.
8a77dff3
VP
82
83Waiting for events in sync mode
84===============================
85
86When waiting for an event in a specific thread, we just use waitpid, passing
87the specific pid, and not passing WNOHANG.
88
1777feb0 89When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 90version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 91threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
92miss an event. The solution is to use non-blocking waitpid, together with
93sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 94process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
95flag to check for events in cloned processes. If nothing is found, we use
96sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
97happened to a child process -- and SIGCHLD will be delivered both for events
98in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
99an event, we get back to calling nonblocking waitpid with and without
100__WCLONED.
8a77dff3
VP
101
102Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 103so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
104blocked, the signal becomes pending and sigsuspend immediately
105notices it and returns.
106
107Waiting for events in async mode
108================================
109
7feb7d06
PA
110In async mode, GDB should always be ready to handle both user input
111and target events, so neither blocking waitpid nor sigsuspend are
112viable options. Instead, we should asynchronously notify the GDB main
113event loop whenever there's an unprocessed event from the target. We
114detect asynchronous target events by handling SIGCHLD signals. To
115notify the event loop about target events, the self-pipe trick is used
116--- a pipe is registered as waitable event source in the event loop,
117the event loop select/poll's on the read end of this pipe (as well on
118other event sources, e.g., stdin), and the SIGCHLD handler writes a
119byte to this pipe. This is more portable than relying on
120pselect/ppoll, since on kernels that lack those syscalls, libc
121emulates them with select/poll+sigprocmask, and that is racy
122(a.k.a. plain broken).
123
124Obviously, if we fail to notify the event loop if there's a target
125event, it's bad. OTOH, if we notify the event loop when there's no
126event from the target, linux_nat_wait will detect that there's no real
127event to report, and return event of type TARGET_WAITKIND_IGNORE.
128This is mostly harmless, but it will waste time and is better avoided.
129
130The main design point is that every time GDB is outside linux-nat.c,
131we have a SIGCHLD handler installed that is called when something
132happens to the target and notifies the GDB event loop. Whenever GDB
133core decides to handle the event, and calls into linux-nat.c, we
134process things as in sync mode, except that the we never block in
135sigsuspend.
136
137While processing an event, we may end up momentarily blocked in
138waitpid calls. Those waitpid calls, while blocking, are guarantied to
139return quickly. E.g., in all-stop mode, before reporting to the core
140that an LWP hit a breakpoint, all LWPs are stopped by sending them
141SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
142Note that this is different from blocking indefinitely waiting for the
143next event --- here, we're already handling an event.
8a77dff3
VP
144
145Use of signals
146==============
147
148We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
149signal is not entirely significant; we just need for a signal to be delivered,
150so that we can intercept it. SIGSTOP's advantage is that it can not be
151blocked. A disadvantage is that it is not a real-time signal, so it can only
152be queued once; we do not keep track of other sources of SIGSTOP.
153
154Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
155use them, because they have special behavior when the signal is generated -
156not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
157kills the entire thread group.
158
159A delivered SIGSTOP would stop the entire thread group, not just the thread we
160tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
161cancel it (by PTRACE_CONT without passing SIGSTOP).
162
163We could use a real-time signal instead. This would solve those problems; we
164could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
165But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
166generates it, and there are races with trying to find a signal that is not
167blocked. */
a0ef4274 168
dba24537
AC
169#ifndef O_LARGEFILE
170#define O_LARGEFILE 0
171#endif
0274a8ce 172
ca2163eb
PA
173/* Unlike other extended result codes, WSTOPSIG (status) on
174 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
175 instead SIGTRAP with bit 7 set. */
176#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
177
10d6c8cd
DJ
178/* The single-threaded native GNU/Linux target_ops. We save a pointer for
179 the use of the multi-threaded target. */
180static struct target_ops *linux_ops;
f973ed9c 181static struct target_ops linux_ops_saved;
10d6c8cd 182
9f0bdab8 183/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
184static void (*linux_nat_new_thread) (struct lwp_info *);
185
186/* Hook to call prior to resuming a thread. */
187static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 188
5b009018
PA
189/* The method to call, if any, when the siginfo object needs to be
190 converted between the layout returned by ptrace, and the layout in
191 the architecture of the inferior. */
a5362b9a 192static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
193 gdb_byte *,
194 int);
195
ac264b3b
MS
196/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
197 Called by our to_xfer_partial. */
198static LONGEST (*super_xfer_partial) (struct target_ops *,
199 enum target_object,
200 const char *, gdb_byte *,
201 const gdb_byte *,
10d6c8cd
DJ
202 ULONGEST, LONGEST);
203
d6b0e80f 204static int debug_linux_nat;
920d2a44
AC
205static void
206show_debug_linux_nat (struct ui_file *file, int from_tty,
207 struct cmd_list_element *c, const char *value)
208{
209 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
210 value);
211}
d6b0e80f 212
ae087d01
DJ
213struct simple_pid_list
214{
215 int pid;
3d799a95 216 int status;
ae087d01
DJ
217 struct simple_pid_list *next;
218};
219struct simple_pid_list *stopped_pids;
220
3993f6b1
DJ
221/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
222 can not be used, 1 if it can. */
223
224static int linux_supports_tracefork_flag = -1;
225
3e43a32a
MS
226/* This variable is a tri-state flag: -1 for unknown, 0 if
227 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
a96d9b2e
SDJ
228
229static int linux_supports_tracesysgood_flag = -1;
230
9016a515
DJ
231/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
232 PTRACE_O_TRACEVFORKDONE. */
233
234static int linux_supports_tracevforkdone_flag = -1;
235
a96d9b2e
SDJ
236/* Stores the current used ptrace() options. */
237static int current_ptrace_options = 0;
238
3dd5b83d
PA
239/* Async mode support. */
240
b84876c2
PA
241/* The read/write ends of the pipe registered as waitable file in the
242 event loop. */
243static int linux_nat_event_pipe[2] = { -1, -1 };
244
7feb7d06 245/* Flush the event pipe. */
b84876c2 246
7feb7d06
PA
247static void
248async_file_flush (void)
b84876c2 249{
7feb7d06
PA
250 int ret;
251 char buf;
b84876c2 252
7feb7d06 253 do
b84876c2 254 {
7feb7d06 255 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 256 }
7feb7d06 257 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
258}
259
7feb7d06
PA
260/* Put something (anything, doesn't matter what, or how much) in event
261 pipe, so that the select/poll in the event-loop realizes we have
262 something to process. */
252fbfc8 263
b84876c2 264static void
7feb7d06 265async_file_mark (void)
b84876c2 266{
7feb7d06 267 int ret;
b84876c2 268
7feb7d06
PA
269 /* It doesn't really matter what the pipe contains, as long we end
270 up with something in it. Might as well flush the previous
271 left-overs. */
272 async_file_flush ();
b84876c2 273
7feb7d06 274 do
b84876c2 275 {
7feb7d06 276 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 277 }
7feb7d06 278 while (ret == -1 && errno == EINTR);
b84876c2 279
7feb7d06
PA
280 /* Ignore EAGAIN. If the pipe is full, the event loop will already
281 be awakened anyway. */
b84876c2
PA
282}
283
7feb7d06 284static void linux_nat_async (void (*callback)
3e43a32a
MS
285 (enum inferior_event_type event_type,
286 void *context),
7feb7d06 287 void *context);
7feb7d06
PA
288static int kill_lwp (int lwpid, int signo);
289
290static int stop_callback (struct lwp_info *lp, void *data);
291
292static void block_child_signals (sigset_t *prev_mask);
293static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
294
295struct lwp_info;
296static struct lwp_info *add_lwp (ptid_t ptid);
297static void purge_lwp_list (int pid);
4403d8e9 298static void delete_lwp (ptid_t ptid);
2277426b
PA
299static struct lwp_info *find_lwp_pid (ptid_t ptid);
300
ae087d01
DJ
301\f
302/* Trivial list manipulation functions to keep track of a list of
303 new stopped processes. */
304static void
3d799a95 305add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
306{
307 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 308
ae087d01 309 new_pid->pid = pid;
3d799a95 310 new_pid->status = status;
ae087d01
DJ
311 new_pid->next = *listp;
312 *listp = new_pid;
313}
314
84636d28
PA
315static int
316in_pid_list_p (struct simple_pid_list *list, int pid)
317{
318 struct simple_pid_list *p;
319
320 for (p = list; p != NULL; p = p->next)
321 if (p->pid == pid)
322 return 1;
323 return 0;
324}
325
ae087d01 326static int
46a96992 327pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
328{
329 struct simple_pid_list **p;
330
331 for (p = listp; *p != NULL; p = &(*p)->next)
332 if ((*p)->pid == pid)
333 {
334 struct simple_pid_list *next = (*p)->next;
e0881a8e 335
46a96992 336 *statusp = (*p)->status;
ae087d01
DJ
337 xfree (*p);
338 *p = next;
339 return 1;
340 }
341 return 0;
342}
343
3993f6b1
DJ
344\f
345/* A helper function for linux_test_for_tracefork, called after fork (). */
346
347static void
348linux_tracefork_child (void)
349{
3993f6b1
DJ
350 ptrace (PTRACE_TRACEME, 0, 0, 0);
351 kill (getpid (), SIGSTOP);
352 fork ();
48bb3cce 353 _exit (0);
3993f6b1
DJ
354}
355
7feb7d06 356/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
357
358static int
46a96992 359my_waitpid (int pid, int *statusp, int flags)
b957e937
DJ
360{
361 int ret;
b84876c2 362
b957e937
DJ
363 do
364 {
46a96992 365 ret = waitpid (pid, statusp, flags);
b957e937
DJ
366 }
367 while (ret == -1 && errno == EINTR);
368
369 return ret;
370}
371
372/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
373
374 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
375 we know that the feature is not available. This may change the tracing
376 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
377
378 However, if it succeeds, we don't know for sure that the feature is
379 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 380 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
381 fork tracing, and let it fork. If the process exits, we assume that we
382 can't use TRACEFORK; if we get the fork notification, and we can extract
383 the new child's PID, then we assume that we can. */
3993f6b1
DJ
384
385static void
b957e937 386linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
387{
388 int child_pid, ret, status;
389 long second_pid;
7feb7d06 390 sigset_t prev_mask;
4c28f408 391
7feb7d06
PA
392 /* We don't want those ptrace calls to be interrupted. */
393 block_child_signals (&prev_mask);
3993f6b1 394
b957e937
DJ
395 linux_supports_tracefork_flag = 0;
396 linux_supports_tracevforkdone_flag = 0;
397
398 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
399 if (ret != 0)
7feb7d06
PA
400 {
401 restore_child_signals_mask (&prev_mask);
402 return;
403 }
b957e937 404
3993f6b1
DJ
405 child_pid = fork ();
406 if (child_pid == -1)
e2e0b3e5 407 perror_with_name (("fork"));
3993f6b1
DJ
408
409 if (child_pid == 0)
410 linux_tracefork_child ();
411
b957e937 412 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 413 if (ret == -1)
e2e0b3e5 414 perror_with_name (("waitpid"));
3993f6b1 415 else if (ret != child_pid)
8a3fe4f8 416 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 417 if (! WIFSTOPPED (status))
3e43a32a
MS
418 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
419 status);
3993f6b1 420
3993f6b1
DJ
421 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
422 if (ret != 0)
423 {
b957e937
DJ
424 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
425 if (ret != 0)
426 {
8a3fe4f8 427 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 428 restore_child_signals_mask (&prev_mask);
b957e937
DJ
429 return;
430 }
431
432 ret = my_waitpid (child_pid, &status, 0);
433 if (ret != child_pid)
3e43a32a
MS
434 warning (_("linux_test_for_tracefork: failed "
435 "to wait for killed child"));
b957e937 436 else if (!WIFSIGNALED (status))
3e43a32a
MS
437 warning (_("linux_test_for_tracefork: unexpected "
438 "wait status 0x%x from killed child"), status);
b957e937 439
7feb7d06 440 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
441 return;
442 }
443
9016a515
DJ
444 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
445 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
446 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
447 linux_supports_tracevforkdone_flag = (ret == 0);
448
b957e937
DJ
449 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
450 if (ret != 0)
8a3fe4f8 451 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
452
453 ret = my_waitpid (child_pid, &status, 0);
454
3993f6b1
DJ
455 if (ret == child_pid && WIFSTOPPED (status)
456 && status >> 16 == PTRACE_EVENT_FORK)
457 {
458 second_pid = 0;
459 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
460 if (ret == 0 && second_pid != 0)
461 {
462 int second_status;
463
464 linux_supports_tracefork_flag = 1;
b957e937
DJ
465 my_waitpid (second_pid, &second_status, 0);
466 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
467 if (ret != 0)
3e43a32a
MS
468 warning (_("linux_test_for_tracefork: "
469 "failed to kill second child"));
97725dc4 470 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
471 }
472 }
b957e937 473 else
8a3fe4f8
AC
474 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
475 "(%d, status 0x%x)"), ret, status);
3993f6b1 476
b957e937
DJ
477 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
478 if (ret != 0)
8a3fe4f8 479 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 480 my_waitpid (child_pid, &status, 0);
4c28f408 481
7feb7d06 482 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
483}
484
a96d9b2e
SDJ
485/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
486
487 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
488 we know that the feature is not available. This may change the tracing
489 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
490
491static void
492linux_test_for_tracesysgood (int original_pid)
493{
494 int ret;
495 sigset_t prev_mask;
496
497 /* We don't want those ptrace calls to be interrupted. */
498 block_child_signals (&prev_mask);
499
500 linux_supports_tracesysgood_flag = 0;
501
502 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
503 if (ret != 0)
504 goto out;
505
506 linux_supports_tracesysgood_flag = 1;
507out:
508 restore_child_signals_mask (&prev_mask);
509}
510
511/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
512 This function also sets linux_supports_tracesysgood_flag. */
513
514static int
515linux_supports_tracesysgood (int pid)
516{
517 if (linux_supports_tracesysgood_flag == -1)
518 linux_test_for_tracesysgood (pid);
519 return linux_supports_tracesysgood_flag;
520}
521
3993f6b1
DJ
522/* Return non-zero iff we have tracefork functionality available.
523 This function also sets linux_supports_tracefork_flag. */
524
525static int
b957e937 526linux_supports_tracefork (int pid)
3993f6b1
DJ
527{
528 if (linux_supports_tracefork_flag == -1)
b957e937 529 linux_test_for_tracefork (pid);
3993f6b1
DJ
530 return linux_supports_tracefork_flag;
531}
532
9016a515 533static int
b957e937 534linux_supports_tracevforkdone (int pid)
9016a515
DJ
535{
536 if (linux_supports_tracefork_flag == -1)
b957e937 537 linux_test_for_tracefork (pid);
9016a515
DJ
538 return linux_supports_tracevforkdone_flag;
539}
540
a96d9b2e
SDJ
541static void
542linux_enable_tracesysgood (ptid_t ptid)
543{
544 int pid = ptid_get_lwp (ptid);
545
546 if (pid == 0)
547 pid = ptid_get_pid (ptid);
548
549 if (linux_supports_tracesysgood (pid) == 0)
550 return;
551
552 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
553
554 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
555}
556
3993f6b1 557\f
4de4c07c
DJ
558void
559linux_enable_event_reporting (ptid_t ptid)
560{
d3587048 561 int pid = ptid_get_lwp (ptid);
4de4c07c 562
d3587048
DJ
563 if (pid == 0)
564 pid = ptid_get_pid (ptid);
565
b957e937 566 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
567 return;
568
a96d9b2e
SDJ
569 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
570 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
571
b957e937 572 if (linux_supports_tracevforkdone (pid))
a96d9b2e 573 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
574
575 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
576 read-only process state. */
4de4c07c 577
a96d9b2e 578 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
579}
580
6d8fd2b7
UW
581static void
582linux_child_post_attach (int pid)
4de4c07c
DJ
583{
584 linux_enable_event_reporting (pid_to_ptid (pid));
a96d9b2e 585 linux_enable_tracesysgood (pid_to_ptid (pid));
4de4c07c
DJ
586}
587
10d6c8cd 588static void
4de4c07c
DJ
589linux_child_post_startup_inferior (ptid_t ptid)
590{
591 linux_enable_event_reporting (ptid);
a96d9b2e 592 linux_enable_tracesysgood (ptid);
4de4c07c
DJ
593}
594
4403d8e9
JK
595/* Return the number of known LWPs in the tgid given by PID. */
596
597static int
598num_lwps (int pid)
599{
600 int count = 0;
601 struct lwp_info *lp;
602
603 for (lp = lwp_list; lp; lp = lp->next)
604 if (ptid_get_pid (lp->ptid) == pid)
605 count++;
606
607 return count;
608}
609
610/* Call delete_lwp with prototype compatible for make_cleanup. */
611
612static void
613delete_lwp_cleanup (void *lp_voidp)
614{
615 struct lwp_info *lp = lp_voidp;
616
617 delete_lwp (lp->ptid);
618}
619
6d8fd2b7
UW
620static int
621linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 622{
7feb7d06 623 sigset_t prev_mask;
9016a515 624 int has_vforked;
4de4c07c
DJ
625 int parent_pid, child_pid;
626
7feb7d06 627 block_child_signals (&prev_mask);
b84876c2 628
e58b0e63
PA
629 has_vforked = (inferior_thread ()->pending_follow.kind
630 == TARGET_WAITKIND_VFORKED);
631 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 632 if (parent_pid == 0)
e58b0e63
PA
633 parent_pid = ptid_get_pid (inferior_ptid);
634 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 635
2277426b
PA
636 if (!detach_fork)
637 linux_enable_event_reporting (pid_to_ptid (child_pid));
638
6c95b8df
PA
639 if (has_vforked
640 && !non_stop /* Non-stop always resumes both branches. */
641 && (!target_is_async_p () || sync_execution)
642 && !(follow_child || detach_fork || sched_multi))
643 {
644 /* The parent stays blocked inside the vfork syscall until the
645 child execs or exits. If we don't let the child run, then
646 the parent stays blocked. If we're telling the parent to run
647 in the foreground, the user will not be able to ctrl-c to get
648 back the terminal, effectively hanging the debug session. */
ac74f770
MS
649 fprintf_filtered (gdb_stderr, _("\
650Can not resume the parent process over vfork in the foreground while\n\
651holding the child stopped. Try \"set detach-on-fork\" or \
652\"set schedule-multiple\".\n"));
653 /* FIXME output string > 80 columns. */
6c95b8df
PA
654 return 1;
655 }
656
4de4c07c
DJ
657 if (! follow_child)
658 {
6c95b8df 659 struct lwp_info *child_lp = NULL;
4de4c07c 660
1777feb0 661 /* We're already attached to the parent, by default. */
4de4c07c 662
ac264b3b
MS
663 /* Detach new forked process? */
664 if (detach_fork)
f75c00e4 665 {
4403d8e9
JK
666 struct cleanup *old_chain;
667
6c95b8df
PA
668 /* Before detaching from the child, remove all breakpoints
669 from it. If we forked, then this has already been taken
670 care of by infrun.c. If we vforked however, any
671 breakpoint inserted in the parent is visible in the
672 child, even those added while stopped in a vfork
673 catchpoint. This will remove the breakpoints from the
674 parent also, but they'll be reinserted below. */
675 if (has_vforked)
676 {
677 /* keep breakpoints list in sync. */
678 remove_breakpoints_pid (GET_PID (inferior_ptid));
679 }
680
e85a822c 681 if (info_verbose || debug_linux_nat)
ac264b3b
MS
682 {
683 target_terminal_ours ();
684 fprintf_filtered (gdb_stdlog,
3e43a32a
MS
685 "Detaching after fork from "
686 "child process %d.\n",
ac264b3b
MS
687 child_pid);
688 }
4de4c07c 689
4403d8e9
JK
690 old_chain = save_inferior_ptid ();
691 inferior_ptid = ptid_build (child_pid, child_pid, 0);
692
693 child_lp = add_lwp (inferior_ptid);
694 child_lp->stopped = 1;
695 child_lp->last_resume_kind = resume_stop;
696 make_cleanup (delete_lwp_cleanup, child_lp);
697
698 /* CHILD_LP has new PID, therefore linux_nat_new_thread is not called for it.
699 See i386_inferior_data_get for the Linux kernel specifics.
700 Ensure linux_nat_prepare_to_resume will reset the hardware debug
701 registers. It is done by the linux_nat_new_thread call, which is
702 being skipped in add_lwp above for the first lwp of a pid. */
703 gdb_assert (num_lwps (GET_PID (child_lp->ptid)) == 1);
704 if (linux_nat_new_thread != NULL)
705 linux_nat_new_thread (child_lp);
706
707 if (linux_nat_prepare_to_resume != NULL)
708 linux_nat_prepare_to_resume (child_lp);
ac264b3b 709 ptrace (PTRACE_DETACH, child_pid, 0, 0);
4403d8e9
JK
710
711 do_cleanups (old_chain);
ac264b3b
MS
712 }
713 else
714 {
77435e4c 715 struct inferior *parent_inf, *child_inf;
2277426b 716 struct cleanup *old_chain;
7f9f62ba
PA
717
718 /* Add process to GDB's tables. */
77435e4c
PA
719 child_inf = add_inferior (child_pid);
720
e58b0e63 721 parent_inf = current_inferior ();
77435e4c 722 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 723 copy_terminal_info (child_inf, parent_inf);
7f9f62ba 724
2277426b 725 old_chain = save_inferior_ptid ();
6c95b8df 726 save_current_program_space ();
2277426b
PA
727
728 inferior_ptid = ptid_build (child_pid, child_pid, 0);
729 add_thread (inferior_ptid);
6c95b8df
PA
730 child_lp = add_lwp (inferior_ptid);
731 child_lp->stopped = 1;
25289eb2 732 child_lp->last_resume_kind = resume_stop;
7dcd53a0 733 child_inf->symfile_flags = SYMFILE_NO_READ;
2277426b 734
6c95b8df
PA
735 /* If this is a vfork child, then the address-space is
736 shared with the parent. */
737 if (has_vforked)
738 {
739 child_inf->pspace = parent_inf->pspace;
740 child_inf->aspace = parent_inf->aspace;
741
742 /* The parent will be frozen until the child is done
743 with the shared region. Keep track of the
744 parent. */
745 child_inf->vfork_parent = parent_inf;
746 child_inf->pending_detach = 0;
747 parent_inf->vfork_child = child_inf;
748 parent_inf->pending_detach = 0;
749 }
750 else
751 {
752 child_inf->aspace = new_address_space ();
753 child_inf->pspace = add_program_space (child_inf->aspace);
754 child_inf->removable = 1;
755 set_current_program_space (child_inf->pspace);
756 clone_program_space (child_inf->pspace, parent_inf->pspace);
757
758 /* Let the shared library layer (solib-svr4) learn about
759 this new process, relocate the cloned exec, pull in
760 shared libraries, and install the solib event
761 breakpoint. If a "cloned-VM" event was propagated
762 better throughout the core, this wouldn't be
763 required. */
268a4a75 764 solib_create_inferior_hook (0);
6c95b8df
PA
765 }
766
767 /* Let the thread_db layer learn about this new process. */
2277426b
PA
768 check_for_thread_db ();
769
770 do_cleanups (old_chain);
ac264b3b 771 }
9016a515
DJ
772
773 if (has_vforked)
774 {
3ced3da4 775 struct lwp_info *parent_lp;
6c95b8df
PA
776 struct inferior *parent_inf;
777
778 parent_inf = current_inferior ();
779
780 /* If we detached from the child, then we have to be careful
781 to not insert breakpoints in the parent until the child
782 is done with the shared memory region. However, if we're
783 staying attached to the child, then we can and should
784 insert breakpoints, so that we can debug it. A
785 subsequent child exec or exit is enough to know when does
786 the child stops using the parent's address space. */
787 parent_inf->waiting_for_vfork_done = detach_fork;
56710373 788 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
6c95b8df 789
3ced3da4 790 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937 791 gdb_assert (linux_supports_tracefork_flag >= 0);
3ced3da4 792
b957e937 793 if (linux_supports_tracevforkdone (0))
9016a515 794 {
6c95b8df
PA
795 if (debug_linux_nat)
796 fprintf_unfiltered (gdb_stdlog,
797 "LCFF: waiting for VFORK_DONE on %d\n",
798 parent_pid);
3ced3da4 799 parent_lp->stopped = 1;
9016a515 800
6c95b8df
PA
801 /* We'll handle the VFORK_DONE event like any other
802 event, in target_wait. */
9016a515
DJ
803 }
804 else
805 {
806 /* We can't insert breakpoints until the child has
807 finished with the shared memory region. We need to
808 wait until that happens. Ideal would be to just
809 call:
810 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
811 - waitpid (parent_pid, &status, __WALL);
812 However, most architectures can't handle a syscall
813 being traced on the way out if it wasn't traced on
814 the way in.
815
816 We might also think to loop, continuing the child
817 until it exits or gets a SIGTRAP. One problem is
818 that the child might call ptrace with PTRACE_TRACEME.
819
820 There's no simple and reliable way to figure out when
821 the vforked child will be done with its copy of the
822 shared memory. We could step it out of the syscall,
823 two instructions, let it go, and then single-step the
824 parent once. When we have hardware single-step, this
825 would work; with software single-step it could still
826 be made to work but we'd have to be able to insert
827 single-step breakpoints in the child, and we'd have
828 to insert -just- the single-step breakpoint in the
829 parent. Very awkward.
830
831 In the end, the best we can do is to make sure it
832 runs for a little while. Hopefully it will be out of
833 range of any breakpoints we reinsert. Usually this
834 is only the single-step breakpoint at vfork's return
835 point. */
836
6c95b8df
PA
837 if (debug_linux_nat)
838 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
839 "LCFF: no VFORK_DONE "
840 "support, sleeping a bit\n");
6c95b8df 841
9016a515 842 usleep (10000);
9016a515 843
6c95b8df
PA
844 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
845 and leave it pending. The next linux_nat_resume call
846 will notice a pending event, and bypasses actually
847 resuming the inferior. */
3ced3da4
PA
848 parent_lp->status = 0;
849 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
850 parent_lp->stopped = 1;
6c95b8df
PA
851
852 /* If we're in async mode, need to tell the event loop
853 there's something here to process. */
854 if (target_can_async_p ())
855 async_file_mark ();
856 }
9016a515 857 }
4de4c07c 858 }
3993f6b1 859 else
4de4c07c 860 {
77435e4c 861 struct inferior *parent_inf, *child_inf;
3ced3da4 862 struct lwp_info *child_lp;
6c95b8df 863 struct program_space *parent_pspace;
4de4c07c 864
e85a822c 865 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
866 {
867 target_terminal_ours ();
6c95b8df 868 if (has_vforked)
3e43a32a
MS
869 fprintf_filtered (gdb_stdlog,
870 _("Attaching after process %d "
871 "vfork to child process %d.\n"),
6c95b8df
PA
872 parent_pid, child_pid);
873 else
3e43a32a
MS
874 fprintf_filtered (gdb_stdlog,
875 _("Attaching after process %d "
876 "fork to child process %d.\n"),
6c95b8df 877 parent_pid, child_pid);
f75c00e4 878 }
4de4c07c 879
7a7d3353
PA
880 /* Add the new inferior first, so that the target_detach below
881 doesn't unpush the target. */
882
77435e4c
PA
883 child_inf = add_inferior (child_pid);
884
e58b0e63 885 parent_inf = current_inferior ();
77435e4c 886 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 887 copy_terminal_info (child_inf, parent_inf);
7a7d3353 888
6c95b8df 889 parent_pspace = parent_inf->pspace;
9016a515 890
6c95b8df
PA
891 /* If we're vforking, we want to hold on to the parent until the
892 child exits or execs. At child exec or exit time we can
893 remove the old breakpoints from the parent and detach or
894 resume debugging it. Otherwise, detach the parent now; we'll
895 want to reuse it's program/address spaces, but we can't set
896 them to the child before removing breakpoints from the
897 parent, otherwise, the breakpoints module could decide to
898 remove breakpoints from the wrong process (since they'd be
899 assigned to the same address space). */
9016a515
DJ
900
901 if (has_vforked)
7f9f62ba 902 {
6c95b8df
PA
903 gdb_assert (child_inf->vfork_parent == NULL);
904 gdb_assert (parent_inf->vfork_child == NULL);
905 child_inf->vfork_parent = parent_inf;
906 child_inf->pending_detach = 0;
907 parent_inf->vfork_child = child_inf;
908 parent_inf->pending_detach = detach_fork;
909 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 910 }
2277426b 911 else if (detach_fork)
b84876c2 912 target_detach (NULL, 0);
4de4c07c 913
6c95b8df
PA
914 /* Note that the detach above makes PARENT_INF dangling. */
915
916 /* Add the child thread to the appropriate lists, and switch to
917 this new thread, before cloning the program space, and
918 informing the solib layer about this new process. */
919
9f0bdab8 920 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b 921 add_thread (inferior_ptid);
3ced3da4
PA
922 child_lp = add_lwp (inferior_ptid);
923 child_lp->stopped = 1;
25289eb2 924 child_lp->last_resume_kind = resume_stop;
6c95b8df
PA
925
926 /* If this is a vfork child, then the address-space is shared
927 with the parent. If we detached from the parent, then we can
928 reuse the parent's program/address spaces. */
929 if (has_vforked || detach_fork)
930 {
931 child_inf->pspace = parent_pspace;
932 child_inf->aspace = child_inf->pspace->aspace;
933 }
934 else
935 {
936 child_inf->aspace = new_address_space ();
937 child_inf->pspace = add_program_space (child_inf->aspace);
938 child_inf->removable = 1;
7dcd53a0 939 child_inf->symfile_flags = SYMFILE_NO_READ;
6c95b8df
PA
940 set_current_program_space (child_inf->pspace);
941 clone_program_space (child_inf->pspace, parent_pspace);
942
943 /* Let the shared library layer (solib-svr4) learn about
944 this new process, relocate the cloned exec, pull in
945 shared libraries, and install the solib event breakpoint.
946 If a "cloned-VM" event was propagated better throughout
947 the core, this wouldn't be required. */
268a4a75 948 solib_create_inferior_hook (0);
6c95b8df 949 }
ac264b3b 950
6c95b8df 951 /* Let the thread_db layer learn about this new process. */
ef29ce1a 952 check_for_thread_db ();
4de4c07c
DJ
953 }
954
7feb7d06 955 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
956 return 0;
957}
958
4de4c07c 959\f
77b06cd7 960static int
6d8fd2b7 961linux_child_insert_fork_catchpoint (int pid)
4de4c07c 962{
77b06cd7 963 return !linux_supports_tracefork (pid);
3993f6b1
DJ
964}
965
eb73ad13
PA
966static int
967linux_child_remove_fork_catchpoint (int pid)
968{
969 return 0;
970}
971
77b06cd7 972static int
6d8fd2b7 973linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 974{
77b06cd7 975 return !linux_supports_tracefork (pid);
3993f6b1
DJ
976}
977
eb73ad13
PA
978static int
979linux_child_remove_vfork_catchpoint (int pid)
980{
981 return 0;
982}
983
77b06cd7 984static int
6d8fd2b7 985linux_child_insert_exec_catchpoint (int pid)
3993f6b1 986{
77b06cd7 987 return !linux_supports_tracefork (pid);
3993f6b1
DJ
988}
989
eb73ad13
PA
990static int
991linux_child_remove_exec_catchpoint (int pid)
992{
993 return 0;
994}
995
a96d9b2e
SDJ
996static int
997linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
998 int table_size, int *table)
999{
77b06cd7
TJB
1000 if (!linux_supports_tracesysgood (pid))
1001 return 1;
1002
a96d9b2e
SDJ
1003 /* On GNU/Linux, we ignore the arguments. It means that we only
1004 enable the syscall catchpoints, but do not disable them.
77b06cd7 1005
a96d9b2e
SDJ
1006 Also, we do not use the `table' information because we do not
1007 filter system calls here. We let GDB do the logic for us. */
1008 return 0;
1009}
1010
d6b0e80f
AC
1011/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1012 are processes sharing the same VM space. A multi-threaded process
1013 is basically a group of such processes. However, such a grouping
1014 is almost entirely a user-space issue; the kernel doesn't enforce
1015 such a grouping at all (this might change in the future). In
1016 general, we'll rely on the threads library (i.e. the GNU/Linux
1017 Threads library) to provide such a grouping.
1018
1019 It is perfectly well possible to write a multi-threaded application
1020 without the assistance of a threads library, by using the clone
1021 system call directly. This module should be able to give some
1022 rudimentary support for debugging such applications if developers
1023 specify the CLONE_PTRACE flag in the clone system call, and are
1024 using the Linux kernel 2.4 or above.
1025
1026 Note that there are some peculiarities in GNU/Linux that affect
1027 this code:
1028
1029 - In general one should specify the __WCLONE flag to waitpid in
1030 order to make it report events for any of the cloned processes
1031 (and leave it out for the initial process). However, if a cloned
1032 process has exited the exit status is only reported if the
1033 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1034 we cannot use it since GDB must work on older systems too.
1035
1036 - When a traced, cloned process exits and is waited for by the
1037 debugger, the kernel reassigns it to the original parent and
1038 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1039 library doesn't notice this, which leads to the "zombie problem":
1040 When debugged a multi-threaded process that spawns a lot of
1041 threads will run out of processes, even if the threads exit,
1042 because the "zombies" stay around. */
1043
1044/* List of known LWPs. */
9f0bdab8 1045struct lwp_info *lwp_list;
d6b0e80f
AC
1046\f
1047
d6b0e80f
AC
1048/* Original signal mask. */
1049static sigset_t normal_mask;
1050
1051/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1052 _initialize_linux_nat. */
1053static sigset_t suspend_mask;
1054
7feb7d06
PA
1055/* Signals to block to make that sigsuspend work. */
1056static sigset_t blocked_mask;
1057
1058/* SIGCHLD action. */
1059struct sigaction sigchld_action;
b84876c2 1060
7feb7d06
PA
1061/* Block child signals (SIGCHLD and linux threads signals), and store
1062 the previous mask in PREV_MASK. */
84e46146 1063
7feb7d06
PA
1064static void
1065block_child_signals (sigset_t *prev_mask)
1066{
1067 /* Make sure SIGCHLD is blocked. */
1068 if (!sigismember (&blocked_mask, SIGCHLD))
1069 sigaddset (&blocked_mask, SIGCHLD);
1070
1071 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1072}
1073
1074/* Restore child signals mask, previously returned by
1075 block_child_signals. */
1076
1077static void
1078restore_child_signals_mask (sigset_t *prev_mask)
1079{
1080 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1081}
2455069d
UW
1082
1083/* Mask of signals to pass directly to the inferior. */
1084static sigset_t pass_mask;
1085
1086/* Update signals to pass to the inferior. */
1087static void
1088linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1089{
1090 int signo;
1091
1092 sigemptyset (&pass_mask);
1093
1094 for (signo = 1; signo < NSIG; signo++)
1095 {
2ea28649 1096 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
1097 if (target_signo < numsigs && pass_signals[target_signo])
1098 sigaddset (&pass_mask, signo);
1099 }
1100}
1101
d6b0e80f
AC
1102\f
1103
1104/* Prototypes for local functions. */
1105static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1106static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1107static char *linux_child_pid_to_exec_file (int pid);
710151dd 1108
d6b0e80f
AC
1109\f
1110/* Convert wait status STATUS to a string. Used for printing debug
1111 messages only. */
1112
1113static char *
1114status_to_str (int status)
1115{
1116 static char buf[64];
1117
1118 if (WIFSTOPPED (status))
206aa767 1119 {
ca2163eb 1120 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1121 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1122 strsignal (SIGTRAP));
1123 else
1124 snprintf (buf, sizeof (buf), "%s (stopped)",
1125 strsignal (WSTOPSIG (status)));
1126 }
d6b0e80f
AC
1127 else if (WIFSIGNALED (status))
1128 snprintf (buf, sizeof (buf), "%s (terminated)",
ba9b2ec3 1129 strsignal (WTERMSIG (status)));
d6b0e80f
AC
1130 else
1131 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1132
1133 return buf;
1134}
1135
7b50312a
PA
1136/* Destroy and free LP. */
1137
1138static void
1139lwp_free (struct lwp_info *lp)
1140{
1141 xfree (lp->arch_private);
1142 xfree (lp);
1143}
1144
d90e17a7
PA
1145/* Remove all LWPs belong to PID from the lwp list. */
1146
1147static void
1148purge_lwp_list (int pid)
1149{
1150 struct lwp_info *lp, *lpprev, *lpnext;
1151
1152 lpprev = NULL;
1153
1154 for (lp = lwp_list; lp; lp = lpnext)
1155 {
1156 lpnext = lp->next;
1157
1158 if (ptid_get_pid (lp->ptid) == pid)
1159 {
1160 if (lp == lwp_list)
1161 lwp_list = lp->next;
1162 else
1163 lpprev->next = lp->next;
1164
7b50312a 1165 lwp_free (lp);
d90e17a7
PA
1166 }
1167 else
1168 lpprev = lp;
1169 }
1170}
1171
f973ed9c 1172/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1173 structure describing the new LWP. The LWP should already be stopped
1174 (with an exception for the very first LWP). */
d6b0e80f
AC
1175
1176static struct lwp_info *
1177add_lwp (ptid_t ptid)
1178{
1179 struct lwp_info *lp;
1180
1181 gdb_assert (is_lwp (ptid));
1182
1183 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1184
1185 memset (lp, 0, sizeof (struct lwp_info));
1186
25289eb2 1187 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1188 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1189
1190 lp->ptid = ptid;
dc146f7c 1191 lp->core = -1;
d6b0e80f
AC
1192
1193 lp->next = lwp_list;
1194 lwp_list = lp;
d6b0e80f 1195
6e012a6c
PA
1196 /* Let the arch specific bits know about this new thread. Current
1197 clients of this callback take the opportunity to install
1198 watchpoints in the new thread. Don't do this for the first
1199 thread though. If we're spawning a child ("run"), the thread
1200 executes the shell wrapper first, and we shouldn't touch it until
1201 it execs the program we want to debug. For "attach", it'd be
1202 okay to call the callback, but it's not necessary, because
1203 watchpoints can't yet have been inserted into the inferior. */
1204 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
7b50312a 1205 linux_nat_new_thread (lp);
9f0bdab8 1206
d6b0e80f
AC
1207 return lp;
1208}
1209
1210/* Remove the LWP specified by PID from the list. */
1211
1212static void
1213delete_lwp (ptid_t ptid)
1214{
1215 struct lwp_info *lp, *lpprev;
1216
1217 lpprev = NULL;
1218
1219 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1220 if (ptid_equal (lp->ptid, ptid))
1221 break;
1222
1223 if (!lp)
1224 return;
1225
d6b0e80f
AC
1226 if (lpprev)
1227 lpprev->next = lp->next;
1228 else
1229 lwp_list = lp->next;
1230
7b50312a 1231 lwp_free (lp);
d6b0e80f
AC
1232}
1233
1234/* Return a pointer to the structure describing the LWP corresponding
1235 to PID. If no corresponding LWP could be found, return NULL. */
1236
1237static struct lwp_info *
1238find_lwp_pid (ptid_t ptid)
1239{
1240 struct lwp_info *lp;
1241 int lwp;
1242
1243 if (is_lwp (ptid))
1244 lwp = GET_LWP (ptid);
1245 else
1246 lwp = GET_PID (ptid);
1247
1248 for (lp = lwp_list; lp; lp = lp->next)
1249 if (lwp == GET_LWP (lp->ptid))
1250 return lp;
1251
1252 return NULL;
1253}
1254
1255/* Call CALLBACK with its second argument set to DATA for every LWP in
1256 the list. If CALLBACK returns 1 for a particular LWP, return a
1257 pointer to the structure describing that LWP immediately.
1258 Otherwise return NULL. */
1259
1260struct lwp_info *
d90e17a7
PA
1261iterate_over_lwps (ptid_t filter,
1262 int (*callback) (struct lwp_info *, void *),
1263 void *data)
d6b0e80f
AC
1264{
1265 struct lwp_info *lp, *lpnext;
1266
1267 for (lp = lwp_list; lp; lp = lpnext)
1268 {
1269 lpnext = lp->next;
d90e17a7
PA
1270
1271 if (ptid_match (lp->ptid, filter))
1272 {
1273 if ((*callback) (lp, data))
1274 return lp;
1275 }
d6b0e80f
AC
1276 }
1277
1278 return NULL;
1279}
1280
4403d8e9
JK
1281/* Iterate like iterate_over_lwps does except when forking-off a child call
1282 CALLBACK with CALLBACK_DATA specifically only for that new child PID. */
1283
1284void
1285linux_nat_iterate_watchpoint_lwps
1286 (linux_nat_iterate_watchpoint_lwps_ftype callback, void *callback_data)
1287{
1288 int inferior_pid = ptid_get_pid (inferior_ptid);
1289 struct inferior *inf = current_inferior ();
1290
1291 if (inf->pid == inferior_pid)
1292 {
1293 /* Iterate all the threads of the current inferior. Without specifying
1294 INFERIOR_PID it would iterate all threads of all inferiors, which is
1295 inappropriate for watchpoints. */
1296
1297 iterate_over_lwps (pid_to_ptid (inferior_pid), callback, callback_data);
1298 }
1299 else
1300 {
1301 /* Detaching a new child PID temporarily present in INFERIOR_PID. */
1302
1303 struct lwp_info *child_lp;
1304 struct cleanup *old_chain;
1305 pid_t child_pid = GET_PID (inferior_ptid);
1306 ptid_t child_ptid = ptid_build (child_pid, child_pid, 0);
1307
1308 gdb_assert (!is_lwp (inferior_ptid));
1309 gdb_assert (find_lwp_pid (child_ptid) == NULL);
1310 child_lp = add_lwp (child_ptid);
1311 child_lp->stopped = 1;
1312 child_lp->last_resume_kind = resume_stop;
1313 old_chain = make_cleanup (delete_lwp_cleanup, child_lp);
1314
1315 callback (child_lp, callback_data);
1316
1317 do_cleanups (old_chain);
1318 }
1319}
1320
2277426b
PA
1321/* Update our internal state when changing from one checkpoint to
1322 another indicated by NEW_PTID. We can only switch single-threaded
1323 applications, so we only create one new LWP, and the previous list
1324 is discarded. */
f973ed9c
DJ
1325
1326void
1327linux_nat_switch_fork (ptid_t new_ptid)
1328{
1329 struct lwp_info *lp;
1330
2277426b
PA
1331 purge_lwp_list (GET_PID (inferior_ptid));
1332
f973ed9c
DJ
1333 lp = add_lwp (new_ptid);
1334 lp->stopped = 1;
e26af52f 1335
2277426b
PA
1336 /* This changes the thread's ptid while preserving the gdb thread
1337 num. Also changes the inferior pid, while preserving the
1338 inferior num. */
1339 thread_change_ptid (inferior_ptid, new_ptid);
1340
1341 /* We've just told GDB core that the thread changed target id, but,
1342 in fact, it really is a different thread, with different register
1343 contents. */
1344 registers_changed ();
e26af52f
DJ
1345}
1346
e26af52f
DJ
1347/* Handle the exit of a single thread LP. */
1348
1349static void
1350exit_lwp (struct lwp_info *lp)
1351{
e09875d4 1352 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1353
1354 if (th)
e26af52f 1355 {
17faa917
DJ
1356 if (print_thread_events)
1357 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1358
4f8d22e3 1359 delete_thread (lp->ptid);
e26af52f
DJ
1360 }
1361
1362 delete_lwp (lp->ptid);
1363}
1364
a0ef4274
DJ
1365/* Wait for the LWP specified by LP, which we have just attached to.
1366 Returns a wait status for that LWP, to cache. */
1367
1368static int
1369linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1370 int *signalled)
1371{
1372 pid_t new_pid, pid = GET_LWP (ptid);
1373 int status;
1374
644cebc9 1375 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
1376 {
1377 if (debug_linux_nat)
1378 fprintf_unfiltered (gdb_stdlog,
1379 "LNPAW: Attaching to a stopped process\n");
1380
1381 /* The process is definitely stopped. It is in a job control
1382 stop, unless the kernel predates the TASK_STOPPED /
1383 TASK_TRACED distinction, in which case it might be in a
1384 ptrace stop. Make sure it is in a ptrace stop; from there we
1385 can kill it, signal it, et cetera.
1386
1387 First make sure there is a pending SIGSTOP. Since we are
1388 already attached, the process can not transition from stopped
1389 to running without a PTRACE_CONT; so we know this signal will
1390 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1391 probably already in the queue (unless this kernel is old
1392 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1393 is not an RT signal, it can only be queued once. */
1394 kill_lwp (pid, SIGSTOP);
1395
1396 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1397 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1398 ptrace (PTRACE_CONT, pid, 0, 0);
1399 }
1400
1401 /* Make sure the initial process is stopped. The user-level threads
1402 layer might want to poke around in the inferior, and that won't
1403 work if things haven't stabilized yet. */
1404 new_pid = my_waitpid (pid, &status, 0);
1405 if (new_pid == -1 && errno == ECHILD)
1406 {
1407 if (first)
1408 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1409
1410 /* Try again with __WCLONE to check cloned processes. */
1411 new_pid = my_waitpid (pid, &status, __WCLONE);
1412 *cloned = 1;
1413 }
1414
dacc9cb2
PP
1415 gdb_assert (pid == new_pid);
1416
1417 if (!WIFSTOPPED (status))
1418 {
1419 /* The pid we tried to attach has apparently just exited. */
1420 if (debug_linux_nat)
1421 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1422 pid, status_to_str (status));
1423 return status;
1424 }
a0ef4274
DJ
1425
1426 if (WSTOPSIG (status) != SIGSTOP)
1427 {
1428 *signalled = 1;
1429 if (debug_linux_nat)
1430 fprintf_unfiltered (gdb_stdlog,
1431 "LNPAW: Received %s after attaching\n",
1432 status_to_str (status));
1433 }
1434
1435 return status;
1436}
1437
84636d28
PA
1438/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1439 the new LWP could not be attached, or 1 if we're already auto
1440 attached to this thread, but haven't processed the
1441 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1442 its existance, without considering it an error. */
d6b0e80f 1443
9ee57c33 1444int
93815fbf 1445lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1446{
9ee57c33 1447 struct lwp_info *lp;
7feb7d06 1448 sigset_t prev_mask;
84636d28 1449 int lwpid;
d6b0e80f
AC
1450
1451 gdb_assert (is_lwp (ptid));
1452
7feb7d06 1453 block_child_signals (&prev_mask);
d6b0e80f 1454
9ee57c33 1455 lp = find_lwp_pid (ptid);
84636d28 1456 lwpid = GET_LWP (ptid);
d6b0e80f
AC
1457
1458 /* We assume that we're already attached to any LWP that has an id
1459 equal to the overall process id, and to any LWP that is already
1460 in our list of LWPs. If we're not seeing exit events from threads
1461 and we've had PID wraparound since we last tried to stop all threads,
1462 this assumption might be wrong; fortunately, this is very unlikely
1463 to happen. */
84636d28 1464 if (lwpid != GET_PID (ptid) && lp == NULL)
d6b0e80f 1465 {
a0ef4274 1466 int status, cloned = 0, signalled = 0;
d6b0e80f 1467
84636d28 1468 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1469 {
84636d28
PA
1470 if (linux_supports_tracefork_flag)
1471 {
1472 /* If we haven't stopped all threads when we get here,
1473 we may have seen a thread listed in thread_db's list,
1474 but not processed the PTRACE_EVENT_CLONE yet. If
1475 that's the case, ignore this new thread, and let
1476 normal event handling discover it later. */
1477 if (in_pid_list_p (stopped_pids, lwpid))
1478 {
1479 /* We've already seen this thread stop, but we
1480 haven't seen the PTRACE_EVENT_CLONE extended
1481 event yet. */
1482 restore_child_signals_mask (&prev_mask);
1483 return 0;
1484 }
1485 else
1486 {
1487 int new_pid;
1488 int status;
1489
1490 /* See if we've got a stop for this new child
1491 pending. If so, we're already attached. */
1492 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1493 if (new_pid == -1 && errno == ECHILD)
1494 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1495 if (new_pid != -1)
1496 {
1497 if (WIFSTOPPED (status))
1498 add_to_pid_list (&stopped_pids, lwpid, status);
1499
1500 restore_child_signals_mask (&prev_mask);
1501 return 1;
1502 }
1503 }
1504 }
1505
9ee57c33
DJ
1506 /* If we fail to attach to the thread, issue a warning,
1507 but continue. One way this can happen is if thread
e9efe249 1508 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1509 bug may place threads in the thread list and then fail
1510 to create them. */
1511 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1512 safe_strerror (errno));
7feb7d06 1513 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1514 return -1;
1515 }
1516
d6b0e80f
AC
1517 if (debug_linux_nat)
1518 fprintf_unfiltered (gdb_stdlog,
1519 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1520 target_pid_to_str (ptid));
1521
a0ef4274 1522 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1523 if (!WIFSTOPPED (status))
673c2bbe
DE
1524 {
1525 restore_child_signals_mask (&prev_mask);
f687d035 1526 return 1;
673c2bbe 1527 }
dacc9cb2 1528
a0ef4274
DJ
1529 lp = add_lwp (ptid);
1530 lp->stopped = 1;
1531 lp->cloned = cloned;
1532 lp->signalled = signalled;
1533 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1534 {
a0ef4274
DJ
1535 lp->resumed = 1;
1536 lp->status = status;
d6b0e80f
AC
1537 }
1538
a0ef4274 1539 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1540
1541 if (debug_linux_nat)
1542 {
1543 fprintf_unfiltered (gdb_stdlog,
1544 "LLAL: waitpid %s received %s\n",
1545 target_pid_to_str (ptid),
1546 status_to_str (status));
1547 }
1548 }
1549 else
1550 {
1551 /* We assume that the LWP representing the original process is
1552 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1553 that the GNU/linux ptrace layer uses to keep track of
1554 threads. Note that this won't have already been done since
1555 the main thread will have, we assume, been stopped by an
1556 attach from a different layer. */
9ee57c33
DJ
1557 if (lp == NULL)
1558 lp = add_lwp (ptid);
d6b0e80f
AC
1559 lp->stopped = 1;
1560 }
9ee57c33 1561
25289eb2 1562 lp->last_resume_kind = resume_stop;
7feb7d06 1563 restore_child_signals_mask (&prev_mask);
9ee57c33 1564 return 0;
d6b0e80f
AC
1565}
1566
b84876c2 1567static void
136d6dae
VP
1568linux_nat_create_inferior (struct target_ops *ops,
1569 char *exec_file, char *allargs, char **env,
b84876c2
PA
1570 int from_tty)
1571{
10568435
JK
1572#ifdef HAVE_PERSONALITY
1573 int personality_orig = 0, personality_set = 0;
1574#endif /* HAVE_PERSONALITY */
b84876c2
PA
1575
1576 /* The fork_child mechanism is synchronous and calls target_wait, so
1577 we have to mask the async mode. */
1578
10568435
JK
1579#ifdef HAVE_PERSONALITY
1580 if (disable_randomization)
1581 {
1582 errno = 0;
1583 personality_orig = personality (0xffffffff);
1584 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1585 {
1586 personality_set = 1;
1587 personality (personality_orig | ADDR_NO_RANDOMIZE);
1588 }
1589 if (errno != 0 || (personality_set
1590 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1591 warning (_("Error disabling address space randomization: %s"),
1592 safe_strerror (errno));
1593 }
1594#endif /* HAVE_PERSONALITY */
1595
2455069d
UW
1596 /* Make sure we report all signals during startup. */
1597 linux_nat_pass_signals (0, NULL);
1598
136d6dae 1599 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1600
10568435
JK
1601#ifdef HAVE_PERSONALITY
1602 if (personality_set)
1603 {
1604 errno = 0;
1605 personality (personality_orig);
1606 if (errno != 0)
1607 warning (_("Error restoring address space randomization: %s"),
1608 safe_strerror (errno));
1609 }
1610#endif /* HAVE_PERSONALITY */
b84876c2
PA
1611}
1612
d6b0e80f 1613static void
136d6dae 1614linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1615{
1616 struct lwp_info *lp;
d6b0e80f 1617 int status;
af990527 1618 ptid_t ptid;
87b0bb13 1619 volatile struct gdb_exception ex;
d6b0e80f 1620
2455069d
UW
1621 /* Make sure we report all signals during attach. */
1622 linux_nat_pass_signals (0, NULL);
1623
87b0bb13
JK
1624 TRY_CATCH (ex, RETURN_MASK_ERROR)
1625 {
1626 linux_ops->to_attach (ops, args, from_tty);
1627 }
1628 if (ex.reason < 0)
1629 {
1630 pid_t pid = parse_pid_to_attach (args);
1631 struct buffer buffer;
1632 char *message, *buffer_s;
1633
1634 message = xstrdup (ex.message);
1635 make_cleanup (xfree, message);
1636
1637 buffer_init (&buffer);
1638 linux_ptrace_attach_warnings (pid, &buffer);
1639
1640 buffer_grow_str0 (&buffer, "");
1641 buffer_s = buffer_finish (&buffer);
1642 make_cleanup (xfree, buffer_s);
1643
1644 throw_error (ex.error, "%s%s", buffer_s, message);
1645 }
d6b0e80f 1646
af990527
PA
1647 /* The ptrace base target adds the main thread with (pid,0,0)
1648 format. Decorate it with lwp info. */
1649 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1650 thread_change_ptid (inferior_ptid, ptid);
1651
9f0bdab8 1652 /* Add the initial process as the first LWP to the list. */
af990527 1653 lp = add_lwp (ptid);
a0ef4274
DJ
1654
1655 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1656 &lp->signalled);
dacc9cb2
PP
1657 if (!WIFSTOPPED (status))
1658 {
1659 if (WIFEXITED (status))
1660 {
1661 int exit_code = WEXITSTATUS (status);
1662
1663 target_terminal_ours ();
1664 target_mourn_inferior ();
1665 if (exit_code == 0)
1666 error (_("Unable to attach: program exited normally."));
1667 else
1668 error (_("Unable to attach: program exited with code %d."),
1669 exit_code);
1670 }
1671 else if (WIFSIGNALED (status))
1672 {
2ea28649 1673 enum gdb_signal signo;
dacc9cb2
PP
1674
1675 target_terminal_ours ();
1676 target_mourn_inferior ();
1677
2ea28649 1678 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1679 error (_("Unable to attach: program terminated with signal "
1680 "%s, %s."),
2ea28649
PA
1681 gdb_signal_to_name (signo),
1682 gdb_signal_to_string (signo));
dacc9cb2
PP
1683 }
1684
1685 internal_error (__FILE__, __LINE__,
1686 _("unexpected status %d for PID %ld"),
1687 status, (long) GET_LWP (ptid));
1688 }
1689
a0ef4274 1690 lp->stopped = 1;
9f0bdab8 1691
a0ef4274 1692 /* Save the wait status to report later. */
d6b0e80f 1693 lp->resumed = 1;
a0ef4274
DJ
1694 if (debug_linux_nat)
1695 fprintf_unfiltered (gdb_stdlog,
1696 "LNA: waitpid %ld, saving status %s\n",
1697 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1698
7feb7d06
PA
1699 lp->status = status;
1700
1701 if (target_can_async_p ())
1702 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1703}
1704
a0ef4274
DJ
1705/* Get pending status of LP. */
1706static int
1707get_pending_status (struct lwp_info *lp, int *status)
1708{
a493e3e2 1709 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1710
1711 /* If we paused threads momentarily, we may have stored pending
1712 events in lp->status or lp->waitstatus (see stop_wait_callback),
1713 and GDB core hasn't seen any signal for those threads.
1714 Otherwise, the last signal reported to the core is found in the
1715 thread object's stop_signal.
1716
1717 There's a corner case that isn't handled here at present. Only
1718 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1719 stop_signal make sense as a real signal to pass to the inferior.
1720 Some catchpoint related events, like
1721 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1722 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1723 those traps are debug API (ptrace in our case) related and
1724 induced; the inferior wouldn't see them if it wasn't being
1725 traced. Hence, we should never pass them to the inferior, even
1726 when set to pass state. Since this corner case isn't handled by
1727 infrun.c when proceeding with a signal, for consistency, neither
1728 do we handle it here (or elsewhere in the file we check for
1729 signal pass state). Normally SIGTRAP isn't set to pass state, so
1730 this is really a corner case. */
1731
1732 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1733 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1734 else if (lp->status)
2ea28649 1735 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
ca2163eb
PA
1736 else if (non_stop && !is_executing (lp->ptid))
1737 {
1738 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1739
16c381f0 1740 signo = tp->suspend.stop_signal;
ca2163eb
PA
1741 }
1742 else if (!non_stop)
a0ef4274 1743 {
ca2163eb
PA
1744 struct target_waitstatus last;
1745 ptid_t last_ptid;
4c28f408 1746
ca2163eb 1747 get_last_target_status (&last_ptid, &last);
4c28f408 1748
ca2163eb
PA
1749 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1750 {
e09875d4 1751 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1752
16c381f0 1753 signo = tp->suspend.stop_signal;
4c28f408 1754 }
ca2163eb 1755 }
4c28f408 1756
ca2163eb 1757 *status = 0;
4c28f408 1758
a493e3e2 1759 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1760 {
1761 if (debug_linux_nat)
1762 fprintf_unfiltered (gdb_stdlog,
1763 "GPT: lwp %s has no pending signal\n",
1764 target_pid_to_str (lp->ptid));
1765 }
1766 else if (!signal_pass_state (signo))
1767 {
1768 if (debug_linux_nat)
3e43a32a
MS
1769 fprintf_unfiltered (gdb_stdlog,
1770 "GPT: lwp %s had signal %s, "
1771 "but it is in no pass state\n",
ca2163eb 1772 target_pid_to_str (lp->ptid),
2ea28649 1773 gdb_signal_to_string (signo));
a0ef4274 1774 }
a0ef4274 1775 else
4c28f408 1776 {
2ea28649 1777 *status = W_STOPCODE (gdb_signal_to_host (signo));
ca2163eb
PA
1778
1779 if (debug_linux_nat)
1780 fprintf_unfiltered (gdb_stdlog,
1781 "GPT: lwp %s has pending signal %s\n",
1782 target_pid_to_str (lp->ptid),
2ea28649 1783 gdb_signal_to_string (signo));
4c28f408 1784 }
a0ef4274
DJ
1785
1786 return 0;
1787}
1788
d6b0e80f
AC
1789static int
1790detach_callback (struct lwp_info *lp, void *data)
1791{
1792 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1793
1794 if (debug_linux_nat && lp->status)
1795 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1796 strsignal (WSTOPSIG (lp->status)),
1797 target_pid_to_str (lp->ptid));
1798
a0ef4274
DJ
1799 /* If there is a pending SIGSTOP, get rid of it. */
1800 if (lp->signalled)
d6b0e80f 1801 {
d6b0e80f
AC
1802 if (debug_linux_nat)
1803 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1804 "DC: Sending SIGCONT to %s\n",
1805 target_pid_to_str (lp->ptid));
d6b0e80f 1806
a0ef4274 1807 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1808 lp->signalled = 0;
d6b0e80f
AC
1809 }
1810
1811 /* We don't actually detach from the LWP that has an id equal to the
1812 overall process id just yet. */
1813 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1814 {
a0ef4274
DJ
1815 int status = 0;
1816
1817 /* Pass on any pending signal for this LWP. */
1818 get_pending_status (lp, &status);
1819
7b50312a
PA
1820 if (linux_nat_prepare_to_resume != NULL)
1821 linux_nat_prepare_to_resume (lp);
d6b0e80f
AC
1822 errno = 0;
1823 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1824 WSTOPSIG (status)) < 0)
8a3fe4f8 1825 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1826 safe_strerror (errno));
1827
1828 if (debug_linux_nat)
1829 fprintf_unfiltered (gdb_stdlog,
1830 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1831 target_pid_to_str (lp->ptid),
7feb7d06 1832 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1833
1834 delete_lwp (lp->ptid);
1835 }
1836
1837 return 0;
1838}
1839
1840static void
136d6dae 1841linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1842{
b84876c2 1843 int pid;
a0ef4274 1844 int status;
d90e17a7
PA
1845 struct lwp_info *main_lwp;
1846
1847 pid = GET_PID (inferior_ptid);
a0ef4274 1848
b84876c2
PA
1849 if (target_can_async_p ())
1850 linux_nat_async (NULL, 0);
1851
4c28f408
PA
1852 /* Stop all threads before detaching. ptrace requires that the
1853 thread is stopped to sucessfully detach. */
d90e17a7 1854 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1855 /* ... and wait until all of them have reported back that
1856 they're no longer running. */
d90e17a7 1857 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1858
d90e17a7 1859 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1860
1861 /* Only the initial process should be left right now. */
d90e17a7
PA
1862 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1863
1864 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1865
a0ef4274
DJ
1866 /* Pass on any pending signal for the last LWP. */
1867 if ((args == NULL || *args == '\0')
d90e17a7 1868 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1869 && WIFSTOPPED (status))
1870 {
1871 /* Put the signal number in ARGS so that inf_ptrace_detach will
1872 pass it along with PTRACE_DETACH. */
1873 args = alloca (8);
1874 sprintf (args, "%d", (int) WSTOPSIG (status));
ddabfc73
TT
1875 if (debug_linux_nat)
1876 fprintf_unfiltered (gdb_stdlog,
1877 "LND: Sending signal %s to %s\n",
1878 args,
1879 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1880 }
1881
7b50312a
PA
1882 if (linux_nat_prepare_to_resume != NULL)
1883 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1884 delete_lwp (main_lwp->ptid);
b84876c2 1885
7a7d3353
PA
1886 if (forks_exist_p ())
1887 {
1888 /* Multi-fork case. The current inferior_ptid is being detached
1889 from, but there are other viable forks to debug. Detach from
1890 the current fork, and context-switch to the first
1891 available. */
1892 linux_fork_detach (args, from_tty);
1893
1894 if (non_stop && target_can_async_p ())
1895 target_async (inferior_event_handler, 0);
1896 }
1897 else
1898 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1899}
1900
1901/* Resume LP. */
1902
25289eb2 1903static void
e5ef252a 1904resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1905{
25289eb2 1906 if (lp->stopped)
6c95b8df 1907 {
25289eb2
PA
1908 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1909
1910 if (inf->vfork_child != NULL)
1911 {
1912 if (debug_linux_nat)
1913 fprintf_unfiltered (gdb_stdlog,
1914 "RC: Not resuming %s (vfork parent)\n",
1915 target_pid_to_str (lp->ptid));
1916 }
1917 else if (lp->status == 0
1918 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1919 {
1920 if (debug_linux_nat)
1921 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1922 "RC: Resuming sibling %s, %s, %s\n",
1923 target_pid_to_str (lp->ptid),
1924 (signo != GDB_SIGNAL_0
1925 ? strsignal (gdb_signal_to_host (signo))
1926 : "0"),
1927 step ? "step" : "resume");
25289eb2 1928
7b50312a
PA
1929 if (linux_nat_prepare_to_resume != NULL)
1930 linux_nat_prepare_to_resume (lp);
25289eb2
PA
1931 linux_ops->to_resume (linux_ops,
1932 pid_to_ptid (GET_LWP (lp->ptid)),
e5ef252a 1933 step, signo);
25289eb2
PA
1934 lp->stopped = 0;
1935 lp->step = step;
1936 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1937 lp->stopped_by_watchpoint = 0;
1938 }
1939 else
1940 {
1941 if (debug_linux_nat)
1942 fprintf_unfiltered (gdb_stdlog,
1943 "RC: Not resuming sibling %s (has pending)\n",
1944 target_pid_to_str (lp->ptid));
1945 }
6c95b8df 1946 }
25289eb2 1947 else
d6b0e80f 1948 {
d90e17a7
PA
1949 if (debug_linux_nat)
1950 fprintf_unfiltered (gdb_stdlog,
25289eb2 1951 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1952 target_pid_to_str (lp->ptid));
d6b0e80f 1953 }
25289eb2 1954}
d6b0e80f 1955
e5ef252a
PA
1956/* Resume LWP, with the last stop signal, if it is in pass state. */
1957
25289eb2 1958static int
e5ef252a 1959linux_nat_resume_callback (struct lwp_info *lp, void *data)
25289eb2 1960{
e5ef252a
PA
1961 enum gdb_signal signo = GDB_SIGNAL_0;
1962
1963 if (lp->stopped)
1964 {
1965 struct thread_info *thread;
1966
1967 thread = find_thread_ptid (lp->ptid);
1968 if (thread != NULL)
1969 {
1970 if (signal_pass_state (thread->suspend.stop_signal))
1971 signo = thread->suspend.stop_signal;
1972 thread->suspend.stop_signal = GDB_SIGNAL_0;
1973 }
1974 }
1975
1976 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1977 return 0;
1978}
1979
1980static int
1981resume_clear_callback (struct lwp_info *lp, void *data)
1982{
1983 lp->resumed = 0;
25289eb2 1984 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1985 return 0;
1986}
1987
1988static int
1989resume_set_callback (struct lwp_info *lp, void *data)
1990{
1991 lp->resumed = 1;
25289eb2 1992 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1993 return 0;
1994}
1995
1996static void
28439f5e 1997linux_nat_resume (struct target_ops *ops,
2ea28649 1998 ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f 1999{
7feb7d06 2000 sigset_t prev_mask;
d6b0e80f 2001 struct lwp_info *lp;
d90e17a7 2002 int resume_many;
d6b0e80f 2003
76f50ad1
DJ
2004 if (debug_linux_nat)
2005 fprintf_unfiltered (gdb_stdlog,
2006 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
2007 step ? "step" : "resume",
2008 target_pid_to_str (ptid),
a493e3e2 2009 (signo != GDB_SIGNAL_0
2ea28649 2010 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
2011 target_pid_to_str (inferior_ptid));
2012
7feb7d06 2013 block_child_signals (&prev_mask);
b84876c2 2014
d6b0e80f 2015 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
2016 resume_many = (ptid_equal (minus_one_ptid, ptid)
2017 || ptid_is_pid (ptid));
4c28f408 2018
e3e9f5a2
PA
2019 /* Mark the lwps we're resuming as resumed. */
2020 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 2021
d90e17a7
PA
2022 /* See if it's the current inferior that should be handled
2023 specially. */
2024 if (resume_many)
2025 lp = find_lwp_pid (inferior_ptid);
2026 else
2027 lp = find_lwp_pid (ptid);
9f0bdab8 2028 gdb_assert (lp != NULL);
d6b0e80f 2029
9f0bdab8
DJ
2030 /* Remember if we're stepping. */
2031 lp->step = step;
25289eb2 2032 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 2033
9f0bdab8
DJ
2034 /* If we have a pending wait status for this thread, there is no
2035 point in resuming the process. But first make sure that
2036 linux_nat_wait won't preemptively handle the event - we
2037 should never take this short-circuit if we are going to
2038 leave LP running, since we have skipped resuming all the
2039 other threads. This bit of code needs to be synchronized
2040 with linux_nat_wait. */
76f50ad1 2041
9f0bdab8
DJ
2042 if (lp->status && WIFSTOPPED (lp->status))
2043 {
2455069d
UW
2044 if (!lp->step
2045 && WSTOPSIG (lp->status)
2046 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 2047 {
9f0bdab8
DJ
2048 if (debug_linux_nat)
2049 fprintf_unfiltered (gdb_stdlog,
2050 "LLR: Not short circuiting for ignored "
2051 "status 0x%x\n", lp->status);
2052
d6b0e80f
AC
2053 /* FIXME: What should we do if we are supposed to continue
2054 this thread with a signal? */
a493e3e2 2055 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 2056 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
2057 lp->status = 0;
2058 }
2059 }
76f50ad1 2060
6c95b8df 2061 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
2062 {
2063 /* FIXME: What should we do if we are supposed to continue
2064 this thread with a signal? */
a493e3e2 2065 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 2066
9f0bdab8
DJ
2067 if (debug_linux_nat)
2068 fprintf_unfiltered (gdb_stdlog,
2069 "LLR: Short circuiting for status 0x%x\n",
2070 lp->status);
d6b0e80f 2071
7feb7d06
PA
2072 restore_child_signals_mask (&prev_mask);
2073 if (target_can_async_p ())
2074 {
2075 target_async (inferior_event_handler, 0);
2076 /* Tell the event loop we have something to process. */
2077 async_file_mark ();
2078 }
9f0bdab8 2079 return;
d6b0e80f
AC
2080 }
2081
9f0bdab8 2082 /* Mark LWP as not stopped to prevent it from being continued by
e5ef252a 2083 linux_nat_resume_callback. */
9f0bdab8
DJ
2084 lp->stopped = 0;
2085
d90e17a7 2086 if (resume_many)
e5ef252a 2087 iterate_over_lwps (ptid, linux_nat_resume_callback, NULL);
d90e17a7
PA
2088
2089 /* Convert to something the lower layer understands. */
2090 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 2091
7b50312a
PA
2092 if (linux_nat_prepare_to_resume != NULL)
2093 linux_nat_prepare_to_resume (lp);
28439f5e 2094 linux_ops->to_resume (linux_ops, ptid, step, signo);
9f0bdab8 2095 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
ebec9a0f 2096 lp->stopped_by_watchpoint = 0;
9f0bdab8 2097
d6b0e80f
AC
2098 if (debug_linux_nat)
2099 fprintf_unfiltered (gdb_stdlog,
2100 "LLR: %s %s, %s (resume event thread)\n",
2101 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2102 target_pid_to_str (ptid),
a493e3e2 2103 (signo != GDB_SIGNAL_0
2ea28649 2104 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 2105
7feb7d06 2106 restore_child_signals_mask (&prev_mask);
b84876c2 2107 if (target_can_async_p ())
8ea051c5 2108 target_async (inferior_event_handler, 0);
d6b0e80f
AC
2109}
2110
c5f62d5f 2111/* Send a signal to an LWP. */
d6b0e80f
AC
2112
2113static int
2114kill_lwp (int lwpid, int signo)
2115{
c5f62d5f
DE
2116 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2117 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
2118
2119#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
2120 {
2121 static int tkill_failed;
2122
2123 if (!tkill_failed)
2124 {
2125 int ret;
2126
2127 errno = 0;
2128 ret = syscall (__NR_tkill, lwpid, signo);
2129 if (errno != ENOSYS)
2130 return ret;
2131 tkill_failed = 1;
2132 }
2133 }
d6b0e80f
AC
2134#endif
2135
2136 return kill (lwpid, signo);
2137}
2138
ca2163eb
PA
2139/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2140 event, check if the core is interested in it: if not, ignore the
2141 event, and keep waiting; otherwise, we need to toggle the LWP's
2142 syscall entry/exit status, since the ptrace event itself doesn't
2143 indicate it, and report the trap to higher layers. */
2144
2145static int
2146linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2147{
2148 struct target_waitstatus *ourstatus = &lp->waitstatus;
2149 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2150 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2151
2152 if (stopping)
2153 {
2154 /* If we're stopping threads, there's a SIGSTOP pending, which
2155 makes it so that the LWP reports an immediate syscall return,
2156 followed by the SIGSTOP. Skip seeing that "return" using
2157 PTRACE_CONT directly, and let stop_wait_callback collect the
2158 SIGSTOP. Later when the thread is resumed, a new syscall
2159 entry event. If we didn't do this (and returned 0), we'd
2160 leave a syscall entry pending, and our caller, by using
2161 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2162 itself. Later, when the user re-resumes this LWP, we'd see
2163 another syscall entry event and we'd mistake it for a return.
2164
2165 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2166 (leaving immediately with LWP->signalled set, without issuing
2167 a PTRACE_CONT), it would still be problematic to leave this
2168 syscall enter pending, as later when the thread is resumed,
2169 it would then see the same syscall exit mentioned above,
2170 followed by the delayed SIGSTOP, while the syscall didn't
2171 actually get to execute. It seems it would be even more
2172 confusing to the user. */
2173
2174 if (debug_linux_nat)
2175 fprintf_unfiltered (gdb_stdlog,
2176 "LHST: ignoring syscall %d "
2177 "for LWP %ld (stopping threads), "
2178 "resuming with PTRACE_CONT for SIGSTOP\n",
2179 syscall_number,
2180 GET_LWP (lp->ptid));
2181
2182 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2183 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2184 return 1;
2185 }
2186
2187 if (catch_syscall_enabled ())
2188 {
2189 /* Always update the entry/return state, even if this particular
2190 syscall isn't interesting to the core now. In async mode,
2191 the user could install a new catchpoint for this syscall
2192 between syscall enter/return, and we'll need to know to
2193 report a syscall return if that happens. */
2194 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2195 ? TARGET_WAITKIND_SYSCALL_RETURN
2196 : TARGET_WAITKIND_SYSCALL_ENTRY);
2197
2198 if (catching_syscall_number (syscall_number))
2199 {
2200 /* Alright, an event to report. */
2201 ourstatus->kind = lp->syscall_state;
2202 ourstatus->value.syscall_number = syscall_number;
2203
2204 if (debug_linux_nat)
2205 fprintf_unfiltered (gdb_stdlog,
2206 "LHST: stopping for %s of syscall %d"
2207 " for LWP %ld\n",
3e43a32a
MS
2208 lp->syscall_state
2209 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
2210 ? "entry" : "return",
2211 syscall_number,
2212 GET_LWP (lp->ptid));
2213 return 0;
2214 }
2215
2216 if (debug_linux_nat)
2217 fprintf_unfiltered (gdb_stdlog,
2218 "LHST: ignoring %s of syscall %d "
2219 "for LWP %ld\n",
2220 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2221 ? "entry" : "return",
2222 syscall_number,
2223 GET_LWP (lp->ptid));
2224 }
2225 else
2226 {
2227 /* If we had been syscall tracing, and hence used PT_SYSCALL
2228 before on this LWP, it could happen that the user removes all
2229 syscall catchpoints before we get to process this event.
2230 There are two noteworthy issues here:
2231
2232 - When stopped at a syscall entry event, resuming with
2233 PT_STEP still resumes executing the syscall and reports a
2234 syscall return.
2235
2236 - Only PT_SYSCALL catches syscall enters. If we last
2237 single-stepped this thread, then this event can't be a
2238 syscall enter. If we last single-stepped this thread, this
2239 has to be a syscall exit.
2240
2241 The points above mean that the next resume, be it PT_STEP or
2242 PT_CONTINUE, can not trigger a syscall trace event. */
2243 if (debug_linux_nat)
2244 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2245 "LHST: caught syscall event "
2246 "with no syscall catchpoints."
ca2163eb
PA
2247 " %d for LWP %ld, ignoring\n",
2248 syscall_number,
2249 GET_LWP (lp->ptid));
2250 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2251 }
2252
2253 /* The core isn't interested in this event. For efficiency, avoid
2254 stopping all threads only to have the core resume them all again.
2255 Since we're not stopping threads, if we're still syscall tracing
2256 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2257 subsequent syscall. Simply resume using the inf-ptrace layer,
2258 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2259
2260 /* Note that gdbarch_get_syscall_number may access registers, hence
2261 fill a regcache. */
2262 registers_changed ();
7b50312a
PA
2263 if (linux_nat_prepare_to_resume != NULL)
2264 linux_nat_prepare_to_resume (lp);
ca2163eb 2265 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 2266 lp->step, GDB_SIGNAL_0);
ca2163eb
PA
2267 return 1;
2268}
2269
3d799a95
DJ
2270/* Handle a GNU/Linux extended wait response. If we see a clone
2271 event, we need to add the new LWP to our list (and not report the
2272 trap to higher layers). This function returns non-zero if the
2273 event should be ignored and we should wait again. If STOPPING is
2274 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2275
2276static int
3d799a95
DJ
2277linux_handle_extended_wait (struct lwp_info *lp, int status,
2278 int stopping)
d6b0e80f 2279{
3d799a95
DJ
2280 int pid = GET_LWP (lp->ptid);
2281 struct target_waitstatus *ourstatus = &lp->waitstatus;
3d799a95 2282 int event = status >> 16;
d6b0e80f 2283
3d799a95
DJ
2284 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2285 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2286 {
3d799a95
DJ
2287 unsigned long new_pid;
2288 int ret;
2289
2290 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2291
3d799a95
DJ
2292 /* If we haven't already seen the new PID stop, wait for it now. */
2293 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2294 {
2295 /* The new child has a pending SIGSTOP. We can't affect it until it
2296 hits the SIGSTOP, but we're already attached. */
2297 ret = my_waitpid (new_pid, &status,
2298 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2299 if (ret == -1)
2300 perror_with_name (_("waiting for new child"));
2301 else if (ret != new_pid)
2302 internal_error (__FILE__, __LINE__,
2303 _("wait returned unexpected PID %d"), ret);
2304 else if (!WIFSTOPPED (status))
2305 internal_error (__FILE__, __LINE__,
2306 _("wait returned unexpected status 0x%x"), status);
2307 }
2308
3a3e9ee3 2309 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2310
2277426b
PA
2311 if (event == PTRACE_EVENT_FORK
2312 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2313 {
2277426b
PA
2314 /* Handle checkpointing by linux-fork.c here as a special
2315 case. We don't want the follow-fork-mode or 'catch fork'
2316 to interfere with this. */
2317
2318 /* This won't actually modify the breakpoint list, but will
2319 physically remove the breakpoints from the child. */
2320 detach_breakpoints (new_pid);
2321
2322 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2323 if (!find_fork_pid (new_pid))
2324 add_fork (new_pid);
2277426b
PA
2325
2326 /* Report as spurious, so that infrun doesn't want to follow
2327 this fork. We're actually doing an infcall in
2328 linux-fork.c. */
2329 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2330 linux_enable_event_reporting (pid_to_ptid (new_pid));
2331
2332 /* Report the stop to the core. */
2333 return 0;
2334 }
2335
3d799a95
DJ
2336 if (event == PTRACE_EVENT_FORK)
2337 ourstatus->kind = TARGET_WAITKIND_FORKED;
2338 else if (event == PTRACE_EVENT_VFORK)
2339 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2340 else
3d799a95 2341 {
78768c4a
JK
2342 struct lwp_info *new_lp;
2343
3d799a95 2344 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2345
3c4d7e12
PA
2346 if (debug_linux_nat)
2347 fprintf_unfiltered (gdb_stdlog,
2348 "LHEW: Got clone event "
2349 "from LWP %d, new child is LWP %ld\n",
2350 pid, new_pid);
2351
d90e17a7 2352 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2353 new_lp->cloned = 1;
4c28f408 2354 new_lp->stopped = 1;
d6b0e80f 2355
3d799a95
DJ
2356 if (WSTOPSIG (status) != SIGSTOP)
2357 {
2358 /* This can happen if someone starts sending signals to
2359 the new thread before it gets a chance to run, which
2360 have a lower number than SIGSTOP (e.g. SIGUSR1).
2361 This is an unlikely case, and harder to handle for
2362 fork / vfork than for clone, so we do not try - but
2363 we handle it for clone events here. We'll send
2364 the other signal on to the thread below. */
2365
2366 new_lp->signalled = 1;
2367 }
2368 else
79395f92
PA
2369 {
2370 struct thread_info *tp;
2371
2372 /* When we stop for an event in some other thread, and
2373 pull the thread list just as this thread has cloned,
2374 we'll have seen the new thread in the thread_db list
2375 before handling the CLONE event (glibc's
2376 pthread_create adds the new thread to the thread list
2377 before clone'ing, and has the kernel fill in the
2378 thread's tid on the clone call with
2379 CLONE_PARENT_SETTID). If that happened, and the core
2380 had requested the new thread to stop, we'll have
2381 killed it with SIGSTOP. But since SIGSTOP is not an
2382 RT signal, it can only be queued once. We need to be
2383 careful to not resume the LWP if we wanted it to
2384 stop. In that case, we'll leave the SIGSTOP pending.
a493e3e2 2385 It will later be reported as GDB_SIGNAL_0. */
79395f92
PA
2386 tp = find_thread_ptid (new_lp->ptid);
2387 if (tp != NULL && tp->stop_requested)
2388 new_lp->last_resume_kind = resume_stop;
2389 else
2390 status = 0;
2391 }
d6b0e80f 2392
4c28f408 2393 if (non_stop)
3d799a95 2394 {
4c28f408
PA
2395 /* Add the new thread to GDB's lists as soon as possible
2396 so that:
2397
2398 1) the frontend doesn't have to wait for a stop to
2399 display them, and,
2400
2401 2) we tag it with the correct running state. */
2402
2403 /* If the thread_db layer is active, let it know about
2404 this new thread, and add it to GDB's list. */
2405 if (!thread_db_attach_lwp (new_lp->ptid))
2406 {
2407 /* We're not using thread_db. Add it to GDB's
2408 list. */
2409 target_post_attach (GET_LWP (new_lp->ptid));
2410 add_thread (new_lp->ptid);
2411 }
2412
2413 if (!stopping)
2414 {
2415 set_running (new_lp->ptid, 1);
2416 set_executing (new_lp->ptid, 1);
e21ffe51
PA
2417 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2418 resume_stop. */
2419 new_lp->last_resume_kind = resume_continue;
4c28f408
PA
2420 }
2421 }
2422
79395f92
PA
2423 if (status != 0)
2424 {
2425 /* We created NEW_LP so it cannot yet contain STATUS. */
2426 gdb_assert (new_lp->status == 0);
2427
2428 /* Save the wait status to report later. */
2429 if (debug_linux_nat)
2430 fprintf_unfiltered (gdb_stdlog,
2431 "LHEW: waitpid of new LWP %ld, "
2432 "saving status %s\n",
2433 (long) GET_LWP (new_lp->ptid),
2434 status_to_str (status));
2435 new_lp->status = status;
2436 }
2437
ca2163eb
PA
2438 /* Note the need to use the low target ops to resume, to
2439 handle resuming with PT_SYSCALL if we have syscall
2440 catchpoints. */
4c28f408
PA
2441 if (!stopping)
2442 {
3d799a95 2443 new_lp->resumed = 1;
ca2163eb 2444
79395f92 2445 if (status == 0)
ad34eb2f 2446 {
e21ffe51 2447 gdb_assert (new_lp->last_resume_kind == resume_continue);
ad34eb2f
JK
2448 if (debug_linux_nat)
2449 fprintf_unfiltered (gdb_stdlog,
79395f92
PA
2450 "LHEW: resuming new LWP %ld\n",
2451 GET_LWP (new_lp->ptid));
7b50312a
PA
2452 if (linux_nat_prepare_to_resume != NULL)
2453 linux_nat_prepare_to_resume (new_lp);
79395f92 2454 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
a493e3e2 2455 0, GDB_SIGNAL_0);
79395f92 2456 new_lp->stopped = 0;
ad34eb2f
JK
2457 }
2458 }
d6b0e80f 2459
3d799a95
DJ
2460 if (debug_linux_nat)
2461 fprintf_unfiltered (gdb_stdlog,
3c4d7e12 2462 "LHEW: resuming parent LWP %d\n", pid);
7b50312a
PA
2463 if (linux_nat_prepare_to_resume != NULL)
2464 linux_nat_prepare_to_resume (lp);
ca2163eb 2465 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 2466 0, GDB_SIGNAL_0);
3d799a95
DJ
2467
2468 return 1;
2469 }
2470
2471 return 0;
d6b0e80f
AC
2472 }
2473
3d799a95
DJ
2474 if (event == PTRACE_EVENT_EXEC)
2475 {
a75724bc
PA
2476 if (debug_linux_nat)
2477 fprintf_unfiltered (gdb_stdlog,
2478 "LHEW: Got exec event from LWP %ld\n",
2479 GET_LWP (lp->ptid));
2480
3d799a95
DJ
2481 ourstatus->kind = TARGET_WAITKIND_EXECD;
2482 ourstatus->value.execd_pathname
6d8fd2b7 2483 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2484
6c95b8df
PA
2485 return 0;
2486 }
2487
2488 if (event == PTRACE_EVENT_VFORK_DONE)
2489 {
2490 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2491 {
6c95b8df 2492 if (debug_linux_nat)
3e43a32a
MS
2493 fprintf_unfiltered (gdb_stdlog,
2494 "LHEW: Got expected PTRACE_EVENT_"
2495 "VFORK_DONE from LWP %ld: stopping\n",
6c95b8df 2496 GET_LWP (lp->ptid));
3d799a95 2497
6c95b8df
PA
2498 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2499 return 0;
3d799a95
DJ
2500 }
2501
6c95b8df 2502 if (debug_linux_nat)
3e43a32a
MS
2503 fprintf_unfiltered (gdb_stdlog,
2504 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2505 "from LWP %ld: resuming\n",
6c95b8df
PA
2506 GET_LWP (lp->ptid));
2507 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2508 return 1;
3d799a95
DJ
2509 }
2510
2511 internal_error (__FILE__, __LINE__,
2512 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2513}
2514
2515/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2516 exited. */
2517
2518static int
2519wait_lwp (struct lwp_info *lp)
2520{
2521 pid_t pid;
432b4d03 2522 int status = 0;
d6b0e80f 2523 int thread_dead = 0;
432b4d03 2524 sigset_t prev_mask;
d6b0e80f
AC
2525
2526 gdb_assert (!lp->stopped);
2527 gdb_assert (lp->status == 0);
2528
432b4d03
JK
2529 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2530 block_child_signals (&prev_mask);
2531
2532 for (;;)
d6b0e80f 2533 {
432b4d03
JK
2534 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2535 was right and we should just call sigsuspend. */
2536
2537 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
d6b0e80f 2538 if (pid == -1 && errno == ECHILD)
432b4d03 2539 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2540 if (pid == -1 && errno == ECHILD)
2541 {
2542 /* The thread has previously exited. We need to delete it
2543 now because, for some vendor 2.4 kernels with NPTL
2544 support backported, there won't be an exit event unless
2545 it is the main thread. 2.6 kernels will report an exit
2546 event for each thread that exits, as expected. */
2547 thread_dead = 1;
2548 if (debug_linux_nat)
2549 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2550 target_pid_to_str (lp->ptid));
2551 }
432b4d03
JK
2552 if (pid != 0)
2553 break;
2554
2555 /* Bugs 10970, 12702.
2556 Thread group leader may have exited in which case we'll lock up in
2557 waitpid if there are other threads, even if they are all zombies too.
2558 Basically, we're not supposed to use waitpid this way.
2559 __WCLONE is not applicable for the leader so we can't use that.
2560 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2561 process; it gets ESRCH both for the zombie and for running processes.
2562
2563 As a workaround, check if we're waiting for the thread group leader and
2564 if it's a zombie, and avoid calling waitpid if it is.
2565
2566 This is racy, what if the tgl becomes a zombie right after we check?
2567 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2568 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03
JK
2569
2570 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
5f572dec 2571 && linux_proc_pid_is_zombie (GET_LWP (lp->ptid)))
d6b0e80f 2572 {
d6b0e80f
AC
2573 thread_dead = 1;
2574 if (debug_linux_nat)
432b4d03
JK
2575 fprintf_unfiltered (gdb_stdlog,
2576 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2577 target_pid_to_str (lp->ptid));
432b4d03 2578 break;
d6b0e80f 2579 }
432b4d03
JK
2580
2581 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2582 get invoked despite our caller had them intentionally blocked by
2583 block_child_signals. This is sensitive only to the loop of
2584 linux_nat_wait_1 and there if we get called my_waitpid gets called
2585 again before it gets to sigsuspend so we can safely let the handlers
2586 get executed here. */
2587
2588 sigsuspend (&suspend_mask);
2589 }
2590
2591 restore_child_signals_mask (&prev_mask);
2592
d6b0e80f
AC
2593 if (!thread_dead)
2594 {
2595 gdb_assert (pid == GET_LWP (lp->ptid));
2596
2597 if (debug_linux_nat)
2598 {
2599 fprintf_unfiltered (gdb_stdlog,
2600 "WL: waitpid %s received %s\n",
2601 target_pid_to_str (lp->ptid),
2602 status_to_str (status));
2603 }
d6b0e80f 2604
a9f4bb21
PA
2605 /* Check if the thread has exited. */
2606 if (WIFEXITED (status) || WIFSIGNALED (status))
2607 {
2608 thread_dead = 1;
2609 if (debug_linux_nat)
2610 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2611 target_pid_to_str (lp->ptid));
2612 }
d6b0e80f
AC
2613 }
2614
2615 if (thread_dead)
2616 {
e26af52f 2617 exit_lwp (lp);
d6b0e80f
AC
2618 return 0;
2619 }
2620
2621 gdb_assert (WIFSTOPPED (status));
2622
ca2163eb
PA
2623 /* Handle GNU/Linux's syscall SIGTRAPs. */
2624 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2625 {
2626 /* No longer need the sysgood bit. The ptrace event ends up
2627 recorded in lp->waitstatus if we care for it. We can carry
2628 on handling the event like a regular SIGTRAP from here
2629 on. */
2630 status = W_STOPCODE (SIGTRAP);
2631 if (linux_handle_syscall_trap (lp, 1))
2632 return wait_lwp (lp);
2633 }
2634
d6b0e80f
AC
2635 /* Handle GNU/Linux's extended waitstatus for trace events. */
2636 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2637 {
2638 if (debug_linux_nat)
2639 fprintf_unfiltered (gdb_stdlog,
2640 "WL: Handling extended status 0x%06x\n",
2641 status);
3d799a95 2642 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2643 return wait_lwp (lp);
2644 }
2645
2646 return status;
2647}
2648
9f0bdab8
DJ
2649/* Save the most recent siginfo for LP. This is currently only called
2650 for SIGTRAP; some ports use the si_addr field for
2651 target_stopped_data_address. In the future, it may also be used to
2652 restore the siginfo of requeued signals. */
2653
2654static void
2655save_siginfo (struct lwp_info *lp)
2656{
2657 errno = 0;
2658 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2659 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2660
2661 if (errno != 0)
2662 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2663}
2664
d6b0e80f
AC
2665/* Send a SIGSTOP to LP. */
2666
2667static int
2668stop_callback (struct lwp_info *lp, void *data)
2669{
2670 if (!lp->stopped && !lp->signalled)
2671 {
2672 int ret;
2673
2674 if (debug_linux_nat)
2675 {
2676 fprintf_unfiltered (gdb_stdlog,
2677 "SC: kill %s **<SIGSTOP>**\n",
2678 target_pid_to_str (lp->ptid));
2679 }
2680 errno = 0;
2681 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2682 if (debug_linux_nat)
2683 {
2684 fprintf_unfiltered (gdb_stdlog,
2685 "SC: lwp kill %d %s\n",
2686 ret,
2687 errno ? safe_strerror (errno) : "ERRNO-OK");
2688 }
2689
2690 lp->signalled = 1;
2691 gdb_assert (lp->status == 0);
2692 }
2693
2694 return 0;
2695}
2696
7b50312a
PA
2697/* Request a stop on LWP. */
2698
2699void
2700linux_stop_lwp (struct lwp_info *lwp)
2701{
2702 stop_callback (lwp, NULL);
2703}
2704
57380f4e 2705/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2706
2707static int
57380f4e
DJ
2708linux_nat_has_pending_sigint (int pid)
2709{
2710 sigset_t pending, blocked, ignored;
57380f4e
DJ
2711
2712 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2713
2714 if (sigismember (&pending, SIGINT)
2715 && !sigismember (&ignored, SIGINT))
2716 return 1;
2717
2718 return 0;
2719}
2720
2721/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2722
2723static int
2724set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2725{
57380f4e
DJ
2726 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2727 flag to consume the next one. */
2728 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2729 && WSTOPSIG (lp->status) == SIGINT)
2730 lp->status = 0;
2731 else
2732 lp->ignore_sigint = 1;
2733
2734 return 0;
2735}
2736
2737/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2738 This function is called after we know the LWP has stopped; if the LWP
2739 stopped before the expected SIGINT was delivered, then it will never have
2740 arrived. Also, if the signal was delivered to a shared queue and consumed
2741 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2742
57380f4e
DJ
2743static void
2744maybe_clear_ignore_sigint (struct lwp_info *lp)
2745{
2746 if (!lp->ignore_sigint)
2747 return;
2748
2749 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2750 {
2751 if (debug_linux_nat)
2752 fprintf_unfiltered (gdb_stdlog,
2753 "MCIS: Clearing bogus flag for %s\n",
2754 target_pid_to_str (lp->ptid));
2755 lp->ignore_sigint = 0;
2756 }
2757}
2758
ebec9a0f
PA
2759/* Fetch the possible triggered data watchpoint info and store it in
2760 LP.
2761
2762 On some archs, like x86, that use debug registers to set
2763 watchpoints, it's possible that the way to know which watched
2764 address trapped, is to check the register that is used to select
2765 which address to watch. Problem is, between setting the watchpoint
2766 and reading back which data address trapped, the user may change
2767 the set of watchpoints, and, as a consequence, GDB changes the
2768 debug registers in the inferior. To avoid reading back a stale
2769 stopped-data-address when that happens, we cache in LP the fact
2770 that a watchpoint trapped, and the corresponding data address, as
2771 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2772 registers meanwhile, we have the cached data we can rely on. */
2773
2774static void
2775save_sigtrap (struct lwp_info *lp)
2776{
2777 struct cleanup *old_chain;
2778
2779 if (linux_ops->to_stopped_by_watchpoint == NULL)
2780 {
2781 lp->stopped_by_watchpoint = 0;
2782 return;
2783 }
2784
2785 old_chain = save_inferior_ptid ();
2786 inferior_ptid = lp->ptid;
2787
2788 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2789
2790 if (lp->stopped_by_watchpoint)
2791 {
2792 if (linux_ops->to_stopped_data_address != NULL)
2793 lp->stopped_data_address_p =
2794 linux_ops->to_stopped_data_address (&current_target,
2795 &lp->stopped_data_address);
2796 else
2797 lp->stopped_data_address_p = 0;
2798 }
2799
2800 do_cleanups (old_chain);
2801}
2802
2803/* See save_sigtrap. */
2804
2805static int
2806linux_nat_stopped_by_watchpoint (void)
2807{
2808 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2809
2810 gdb_assert (lp != NULL);
2811
2812 return lp->stopped_by_watchpoint;
2813}
2814
2815static int
2816linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2817{
2818 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2819
2820 gdb_assert (lp != NULL);
2821
2822 *addr_p = lp->stopped_data_address;
2823
2824 return lp->stopped_data_address_p;
2825}
2826
26ab7092
JK
2827/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2828
2829static int
2830sigtrap_is_event (int status)
2831{
2832 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2833}
2834
2835/* SIGTRAP-like events recognizer. */
2836
2837static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2838
00390b84
JK
2839/* Check for SIGTRAP-like events in LP. */
2840
2841static int
2842linux_nat_lp_status_is_event (struct lwp_info *lp)
2843{
2844 /* We check for lp->waitstatus in addition to lp->status, because we can
2845 have pending process exits recorded in lp->status
2846 and W_EXITCODE(0,0) == 0. We should probably have an additional
2847 lp->status_p flag. */
2848
2849 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2850 && linux_nat_status_is_event (lp->status));
2851}
2852
26ab7092
JK
2853/* Set alternative SIGTRAP-like events recognizer. If
2854 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2855 applied. */
2856
2857void
2858linux_nat_set_status_is_event (struct target_ops *t,
2859 int (*status_is_event) (int status))
2860{
2861 linux_nat_status_is_event = status_is_event;
2862}
2863
57380f4e
DJ
2864/* Wait until LP is stopped. */
2865
2866static int
2867stop_wait_callback (struct lwp_info *lp, void *data)
2868{
6c95b8df
PA
2869 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2870
2871 /* If this is a vfork parent, bail out, it is not going to report
2872 any SIGSTOP until the vfork is done with. */
2873 if (inf->vfork_child != NULL)
2874 return 0;
2875
d6b0e80f
AC
2876 if (!lp->stopped)
2877 {
2878 int status;
2879
2880 status = wait_lwp (lp);
2881 if (status == 0)
2882 return 0;
2883
57380f4e
DJ
2884 if (lp->ignore_sigint && WIFSTOPPED (status)
2885 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2886 {
57380f4e 2887 lp->ignore_sigint = 0;
d6b0e80f
AC
2888
2889 errno = 0;
2890 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2891 if (debug_linux_nat)
2892 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2893 "PTRACE_CONT %s, 0, 0 (%s) "
2894 "(discarding SIGINT)\n",
d6b0e80f
AC
2895 target_pid_to_str (lp->ptid),
2896 errno ? safe_strerror (errno) : "OK");
2897
57380f4e 2898 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2899 }
2900
57380f4e
DJ
2901 maybe_clear_ignore_sigint (lp);
2902
d6b0e80f
AC
2903 if (WSTOPSIG (status) != SIGSTOP)
2904 {
e5ef252a 2905 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2906
e5ef252a
PA
2907 /* Save the trap's siginfo in case we need it later. */
2908 save_siginfo (lp);
d6b0e80f 2909
e5ef252a
PA
2910 save_sigtrap (lp);
2911
2912 if (debug_linux_nat)
2913 fprintf_unfiltered (gdb_stdlog,
2914 "SWC: Pending event %s in %s\n",
2915 status_to_str ((int) status),
2916 target_pid_to_str (lp->ptid));
2917
2918 /* Save the sigtrap event. */
2919 lp->status = status;
2920 gdb_assert (!lp->stopped);
2921 gdb_assert (lp->signalled);
2922 lp->stopped = 1;
d6b0e80f
AC
2923 }
2924 else
2925 {
2926 /* We caught the SIGSTOP that we intended to catch, so
2927 there's no SIGSTOP pending. */
e5ef252a
PA
2928
2929 if (debug_linux_nat)
2930 fprintf_unfiltered (gdb_stdlog,
2931 "SWC: Delayed SIGSTOP caught for %s.\n",
2932 target_pid_to_str (lp->ptid));
2933
d6b0e80f 2934 lp->stopped = 1;
e5ef252a
PA
2935
2936 /* Reset SIGNALLED only after the stop_wait_callback call
2937 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2938 lp->signalled = 0;
2939 }
2940 }
2941
2942 return 0;
2943}
2944
d6b0e80f
AC
2945/* Return non-zero if LP has a wait status pending. */
2946
2947static int
2948status_callback (struct lwp_info *lp, void *data)
2949{
2950 /* Only report a pending wait status if we pretend that this has
2951 indeed been resumed. */
ca2163eb
PA
2952 if (!lp->resumed)
2953 return 0;
2954
2955 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2956 {
2957 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
766062f6 2958 or a pending process exit. Note that `W_EXITCODE(0,0) ==
ca2163eb
PA
2959 0', so a clean process exit can not be stored pending in
2960 lp->status, it is indistinguishable from
2961 no-pending-status. */
2962 return 1;
2963 }
2964
2965 if (lp->status != 0)
2966 return 1;
2967
2968 return 0;
d6b0e80f
AC
2969}
2970
2971/* Return non-zero if LP isn't stopped. */
2972
2973static int
2974running_callback (struct lwp_info *lp, void *data)
2975{
25289eb2
PA
2976 return (!lp->stopped
2977 || ((lp->status != 0
2978 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2979 && lp->resumed));
d6b0e80f
AC
2980}
2981
2982/* Count the LWP's that have had events. */
2983
2984static int
2985count_events_callback (struct lwp_info *lp, void *data)
2986{
2987 int *count = data;
2988
2989 gdb_assert (count != NULL);
2990
e09490f1 2991 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2992 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2993 (*count)++;
2994
2995 return 0;
2996}
2997
2998/* Select the LWP (if any) that is currently being single-stepped. */
2999
3000static int
3001select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
3002{
25289eb2
PA
3003 if (lp->last_resume_kind == resume_step
3004 && lp->status != 0)
d6b0e80f
AC
3005 return 1;
3006 else
3007 return 0;
3008}
3009
3010/* Select the Nth LWP that has had a SIGTRAP event. */
3011
3012static int
3013select_event_lwp_callback (struct lwp_info *lp, void *data)
3014{
3015 int *selector = data;
3016
3017 gdb_assert (selector != NULL);
3018
1777feb0 3019 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 3020 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
3021 if ((*selector)-- == 0)
3022 return 1;
3023
3024 return 0;
3025}
3026
710151dd
PA
3027static int
3028cancel_breakpoint (struct lwp_info *lp)
3029{
3030 /* Arrange for a breakpoint to be hit again later. We don't keep
3031 the SIGTRAP status and don't forward the SIGTRAP signal to the
3032 LWP. We will handle the current event, eventually we will resume
3033 this LWP, and this breakpoint will trap again.
3034
3035 If we do not do this, then we run the risk that the user will
3036 delete or disable the breakpoint, but the LWP will have already
3037 tripped on it. */
3038
515630c5
UW
3039 struct regcache *regcache = get_thread_regcache (lp->ptid);
3040 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3041 CORE_ADDR pc;
3042
3043 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 3044 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
3045 {
3046 if (debug_linux_nat)
3047 fprintf_unfiltered (gdb_stdlog,
3048 "CB: Push back breakpoint for %s\n",
3049 target_pid_to_str (lp->ptid));
3050
3051 /* Back up the PC if necessary. */
515630c5
UW
3052 if (gdbarch_decr_pc_after_break (gdbarch))
3053 regcache_write_pc (regcache, pc);
3054
710151dd
PA
3055 return 1;
3056 }
3057 return 0;
3058}
3059
d6b0e80f
AC
3060static int
3061cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3062{
3063 struct lwp_info *event_lp = data;
3064
3065 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3066 if (lp == event_lp)
3067 return 0;
3068
3069 /* If a LWP other than the LWP that we're reporting an event for has
3070 hit a GDB breakpoint (as opposed to some random trap signal),
3071 then just arrange for it to hit it again later. We don't keep
3072 the SIGTRAP status and don't forward the SIGTRAP signal to the
3073 LWP. We will handle the current event, eventually we will resume
3074 all LWPs, and this one will get its breakpoint trap again.
3075
3076 If we do not do this, then we run the risk that the user will
3077 delete or disable the breakpoint, but the LWP will have already
3078 tripped on it. */
3079
00390b84 3080 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
3081 && cancel_breakpoint (lp))
3082 /* Throw away the SIGTRAP. */
3083 lp->status = 0;
d6b0e80f
AC
3084
3085 return 0;
3086}
3087
3088/* Select one LWP out of those that have events pending. */
3089
3090static void
d90e17a7 3091select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
3092{
3093 int num_events = 0;
3094 int random_selector;
3095 struct lwp_info *event_lp;
3096
ac264b3b 3097 /* Record the wait status for the original LWP. */
d6b0e80f
AC
3098 (*orig_lp)->status = *status;
3099
3100 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
3101 event_lp = iterate_over_lwps (filter,
3102 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
3103 if (event_lp != NULL)
3104 {
3105 if (debug_linux_nat)
3106 fprintf_unfiltered (gdb_stdlog,
3107 "SEL: Select single-step %s\n",
3108 target_pid_to_str (event_lp->ptid));
3109 }
3110 else
3111 {
3112 /* No single-stepping LWP. Select one at random, out of those
3113 which have had SIGTRAP events. */
3114
3115 /* First see how many SIGTRAP events we have. */
d90e17a7 3116 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
3117
3118 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3119 random_selector = (int)
3120 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3121
3122 if (debug_linux_nat && num_events > 1)
3123 fprintf_unfiltered (gdb_stdlog,
3124 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3125 num_events, random_selector);
3126
d90e17a7
PA
3127 event_lp = iterate_over_lwps (filter,
3128 select_event_lwp_callback,
d6b0e80f
AC
3129 &random_selector);
3130 }
3131
3132 if (event_lp != NULL)
3133 {
3134 /* Switch the event LWP. */
3135 *orig_lp = event_lp;
3136 *status = event_lp->status;
3137 }
3138
3139 /* Flush the wait status for the event LWP. */
3140 (*orig_lp)->status = 0;
3141}
3142
3143/* Return non-zero if LP has been resumed. */
3144
3145static int
3146resumed_callback (struct lwp_info *lp, void *data)
3147{
3148 return lp->resumed;
3149}
3150
12d9289a
PA
3151/* Stop an active thread, verify it still exists, then resume it. If
3152 the thread ends up with a pending status, then it is not resumed,
3153 and *DATA (really a pointer to int), is set. */
d6b0e80f
AC
3154
3155static int
3156stop_and_resume_callback (struct lwp_info *lp, void *data)
3157{
12d9289a
PA
3158 int *new_pending_p = data;
3159
25289eb2 3160 if (!lp->stopped)
d6b0e80f 3161 {
25289eb2
PA
3162 ptid_t ptid = lp->ptid;
3163
d6b0e80f
AC
3164 stop_callback (lp, NULL);
3165 stop_wait_callback (lp, NULL);
25289eb2
PA
3166
3167 /* Resume if the lwp still exists, and the core wanted it
3168 running. */
12d9289a
PA
3169 lp = find_lwp_pid (ptid);
3170 if (lp != NULL)
25289eb2 3171 {
12d9289a
PA
3172 if (lp->last_resume_kind == resume_stop
3173 && lp->status == 0)
3174 {
3175 /* The core wanted the LWP to stop. Even if it stopped
3176 cleanly (with SIGSTOP), leave the event pending. */
3177 if (debug_linux_nat)
3178 fprintf_unfiltered (gdb_stdlog,
3179 "SARC: core wanted LWP %ld stopped "
3180 "(leaving SIGSTOP pending)\n",
3181 GET_LWP (lp->ptid));
3182 lp->status = W_STOPCODE (SIGSTOP);
3183 }
3184
3185 if (lp->status == 0)
3186 {
3187 if (debug_linux_nat)
3188 fprintf_unfiltered (gdb_stdlog,
3189 "SARC: re-resuming LWP %ld\n",
3190 GET_LWP (lp->ptid));
e5ef252a 3191 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
12d9289a
PA
3192 }
3193 else
3194 {
3195 if (debug_linux_nat)
3196 fprintf_unfiltered (gdb_stdlog,
3197 "SARC: not re-resuming LWP %ld "
3198 "(has pending)\n",
3199 GET_LWP (lp->ptid));
3200 if (new_pending_p)
3201 *new_pending_p = 1;
3202 }
25289eb2 3203 }
d6b0e80f
AC
3204 }
3205 return 0;
3206}
3207
02f3fc28 3208/* Check if we should go on and pass this event to common code.
12d9289a
PA
3209 Return the affected lwp if we are, or NULL otherwise. If we stop
3210 all lwps temporarily, we may end up with new pending events in some
3211 other lwp. In that case set *NEW_PENDING_P to true. */
3212
02f3fc28 3213static struct lwp_info *
0e5bf2a8 3214linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
02f3fc28
PA
3215{
3216 struct lwp_info *lp;
3217
12d9289a
PA
3218 *new_pending_p = 0;
3219
02f3fc28
PA
3220 lp = find_lwp_pid (pid_to_ptid (lwpid));
3221
3222 /* Check for stop events reported by a process we didn't already
3223 know about - anything not already in our LWP list.
3224
3225 If we're expecting to receive stopped processes after
3226 fork, vfork, and clone events, then we'll just add the
3227 new one to our list and go back to waiting for the event
3228 to be reported - the stopped process might be returned
0e5bf2a8
PA
3229 from waitpid before or after the event is.
3230
3231 But note the case of a non-leader thread exec'ing after the
3232 leader having exited, and gone from our lists. The non-leader
3233 thread changes its tid to the tgid. */
3234
3235 if (WIFSTOPPED (status) && lp == NULL
3236 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3237 {
3238 /* A multi-thread exec after we had seen the leader exiting. */
3239 if (debug_linux_nat)
3240 fprintf_unfiltered (gdb_stdlog,
3241 "LLW: Re-adding thread group leader LWP %d.\n",
3242 lwpid);
3243
3244 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3245 lp->stopped = 1;
3246 lp->resumed = 1;
3247 add_thread (lp->ptid);
3248 }
3249
02f3fc28
PA
3250 if (WIFSTOPPED (status) && !lp)
3251 {
84636d28 3252 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
3253 return NULL;
3254 }
3255
3256 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3257 our list, i.e. not part of the current process. This can happen
fd62cb89 3258 if we detach from a program we originally forked and then it
02f3fc28
PA
3259 exits. */
3260 if (!WIFSTOPPED (status) && !lp)
3261 return NULL;
3262
ca2163eb
PA
3263 /* Handle GNU/Linux's syscall SIGTRAPs. */
3264 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3265 {
3266 /* No longer need the sysgood bit. The ptrace event ends up
3267 recorded in lp->waitstatus if we care for it. We can carry
3268 on handling the event like a regular SIGTRAP from here
3269 on. */
3270 status = W_STOPCODE (SIGTRAP);
3271 if (linux_handle_syscall_trap (lp, 0))
3272 return NULL;
3273 }
02f3fc28 3274
ca2163eb
PA
3275 /* Handle GNU/Linux's extended waitstatus for trace events. */
3276 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3277 {
3278 if (debug_linux_nat)
3279 fprintf_unfiltered (gdb_stdlog,
3280 "LLW: Handling extended status 0x%06x\n",
3281 status);
3282 if (linux_handle_extended_wait (lp, status, 0))
3283 return NULL;
3284 }
3285
26ab7092 3286 if (linux_nat_status_is_event (status))
ebec9a0f
PA
3287 {
3288 /* Save the trap's siginfo in case we need it later. */
3289 save_siginfo (lp);
3290
3291 save_sigtrap (lp);
3292 }
ca2163eb 3293
02f3fc28 3294 /* Check if the thread has exited. */
d90e17a7
PA
3295 if ((WIFEXITED (status) || WIFSIGNALED (status))
3296 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3297 {
9db03742
JB
3298 /* If this is the main thread, we must stop all threads and verify
3299 if they are still alive. This is because in the nptl thread model
3300 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3301 other than the main thread. We only get the main thread exit
3302 signal once all child threads have already exited. If we
3303 stop all the threads and use the stop_wait_callback to check
3304 if they have exited we can determine whether this signal
3305 should be ignored or whether it means the end of the debugged
3306 application, regardless of which threading model is being
5d3b6af6 3307 used. */
02f3fc28
PA
3308 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3309 {
3310 lp->stopped = 1;
d90e17a7 3311 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
12d9289a 3312 stop_and_resume_callback, new_pending_p);
02f3fc28
PA
3313 }
3314
3315 if (debug_linux_nat)
3316 fprintf_unfiltered (gdb_stdlog,
3317 "LLW: %s exited.\n",
3318 target_pid_to_str (lp->ptid));
3319
d90e17a7 3320 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3321 {
3322 /* If there is at least one more LWP, then the exit signal
3323 was not the end of the debugged application and should be
3324 ignored. */
3325 exit_lwp (lp);
3326 return NULL;
3327 }
02f3fc28
PA
3328 }
3329
3330 /* Check if the current LWP has previously exited. In the nptl
3331 thread model, LWPs other than the main thread do not issue
3332 signals when they exit so we must check whenever the thread has
3333 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3334 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3335 {
d90e17a7
PA
3336 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3337
02f3fc28
PA
3338 if (debug_linux_nat)
3339 fprintf_unfiltered (gdb_stdlog,
3340 "LLW: %s exited.\n",
3341 target_pid_to_str (lp->ptid));
3342
3343 exit_lwp (lp);
3344
3345 /* Make sure there is at least one thread running. */
d90e17a7 3346 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3347
3348 /* Discard the event. */
3349 return NULL;
3350 }
3351
3352 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3353 an attempt to stop an LWP. */
3354 if (lp->signalled
3355 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3356 {
3357 if (debug_linux_nat)
3358 fprintf_unfiltered (gdb_stdlog,
3359 "LLW: Delayed SIGSTOP caught for %s.\n",
3360 target_pid_to_str (lp->ptid));
3361
02f3fc28
PA
3362 lp->signalled = 0;
3363
25289eb2
PA
3364 if (lp->last_resume_kind != resume_stop)
3365 {
3366 /* This is a delayed SIGSTOP. */
02f3fc28 3367
25289eb2
PA
3368 registers_changed ();
3369
7b50312a
PA
3370 if (linux_nat_prepare_to_resume != NULL)
3371 linux_nat_prepare_to_resume (lp);
25289eb2 3372 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 3373 lp->step, GDB_SIGNAL_0);
25289eb2
PA
3374 if (debug_linux_nat)
3375 fprintf_unfiltered (gdb_stdlog,
3376 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3377 lp->step ?
3378 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3379 target_pid_to_str (lp->ptid));
02f3fc28 3380
25289eb2
PA
3381 lp->stopped = 0;
3382 gdb_assert (lp->resumed);
02f3fc28 3383
25289eb2
PA
3384 /* Discard the event. */
3385 return NULL;
3386 }
02f3fc28
PA
3387 }
3388
57380f4e
DJ
3389 /* Make sure we don't report a SIGINT that we have already displayed
3390 for another thread. */
3391 if (lp->ignore_sigint
3392 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3393 {
3394 if (debug_linux_nat)
3395 fprintf_unfiltered (gdb_stdlog,
3396 "LLW: Delayed SIGINT caught for %s.\n",
3397 target_pid_to_str (lp->ptid));
3398
3399 /* This is a delayed SIGINT. */
3400 lp->ignore_sigint = 0;
3401
3402 registers_changed ();
7b50312a
PA
3403 if (linux_nat_prepare_to_resume != NULL)
3404 linux_nat_prepare_to_resume (lp);
28439f5e 3405 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 3406 lp->step, GDB_SIGNAL_0);
57380f4e
DJ
3407 if (debug_linux_nat)
3408 fprintf_unfiltered (gdb_stdlog,
3409 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3410 lp->step ?
3411 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3412 target_pid_to_str (lp->ptid));
3413
3414 lp->stopped = 0;
3415 gdb_assert (lp->resumed);
3416
3417 /* Discard the event. */
3418 return NULL;
3419 }
3420
02f3fc28
PA
3421 /* An interesting event. */
3422 gdb_assert (lp);
ca2163eb 3423 lp->status = status;
02f3fc28
PA
3424 return lp;
3425}
3426
0e5bf2a8
PA
3427/* Detect zombie thread group leaders, and "exit" them. We can't reap
3428 their exits until all other threads in the group have exited. */
3429
3430static void
3431check_zombie_leaders (void)
3432{
3433 struct inferior *inf;
3434
3435 ALL_INFERIORS (inf)
3436 {
3437 struct lwp_info *leader_lp;
3438
3439 if (inf->pid == 0)
3440 continue;
3441
3442 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3443 if (leader_lp != NULL
3444 /* Check if there are other threads in the group, as we may
3445 have raced with the inferior simply exiting. */
3446 && num_lwps (inf->pid) > 1
5f572dec 3447 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
3448 {
3449 if (debug_linux_nat)
3450 fprintf_unfiltered (gdb_stdlog,
3451 "CZL: Thread group leader %d zombie "
3452 "(it exited, or another thread execd).\n",
3453 inf->pid);
3454
3455 /* A leader zombie can mean one of two things:
3456
3457 - It exited, and there's an exit status pending
3458 available, or only the leader exited (not the whole
3459 program). In the latter case, we can't waitpid the
3460 leader's exit status until all other threads are gone.
3461
3462 - There are 3 or more threads in the group, and a thread
3463 other than the leader exec'd. On an exec, the Linux
3464 kernel destroys all other threads (except the execing
3465 one) in the thread group, and resets the execing thread's
3466 tid to the tgid. No exit notification is sent for the
3467 execing thread -- from the ptracer's perspective, it
3468 appears as though the execing thread just vanishes.
3469 Until we reap all other threads except the leader and the
3470 execing thread, the leader will be zombie, and the
3471 execing thread will be in `D (disc sleep)'. As soon as
3472 all other threads are reaped, the execing thread changes
3473 it's tid to the tgid, and the previous (zombie) leader
3474 vanishes, giving place to the "new" leader. We could try
3475 distinguishing the exit and exec cases, by waiting once
3476 more, and seeing if something comes out, but it doesn't
3477 sound useful. The previous leader _does_ go away, and
3478 we'll re-add the new one once we see the exec event
3479 (which is just the same as what would happen if the
3480 previous leader did exit voluntarily before some other
3481 thread execs). */
3482
3483 if (debug_linux_nat)
3484 fprintf_unfiltered (gdb_stdlog,
3485 "CZL: Thread group leader %d vanished.\n",
3486 inf->pid);
3487 exit_lwp (leader_lp);
3488 }
3489 }
3490}
3491
d6b0e80f 3492static ptid_t
7feb7d06 3493linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3494 ptid_t ptid, struct target_waitstatus *ourstatus,
3495 int target_options)
d6b0e80f 3496{
7feb7d06 3497 static sigset_t prev_mask;
4b60df3d 3498 enum resume_kind last_resume_kind;
12d9289a 3499 struct lwp_info *lp;
12d9289a 3500 int status;
d6b0e80f 3501
01124a23 3502 if (debug_linux_nat)
b84876c2
PA
3503 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3504
f973ed9c
DJ
3505 /* The first time we get here after starting a new inferior, we may
3506 not have added it to the LWP list yet - this is the earliest
3507 moment at which we know its PID. */
d90e17a7 3508 if (ptid_is_pid (inferior_ptid))
f973ed9c 3509 {
27c9d204
PA
3510 /* Upgrade the main thread's ptid. */
3511 thread_change_ptid (inferior_ptid,
3512 BUILD_LWP (GET_PID (inferior_ptid),
3513 GET_PID (inferior_ptid)));
3514
f973ed9c
DJ
3515 lp = add_lwp (inferior_ptid);
3516 lp->resumed = 1;
3517 }
3518
7feb7d06
PA
3519 /* Make sure SIGCHLD is blocked. */
3520 block_child_signals (&prev_mask);
d6b0e80f
AC
3521
3522retry:
d90e17a7
PA
3523 lp = NULL;
3524 status = 0;
d6b0e80f
AC
3525
3526 /* First check if there is a LWP with a wait status pending. */
0e5bf2a8 3527 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d6b0e80f 3528 {
0e5bf2a8 3529 /* Any LWP in the PTID group that's been resumed will do. */
d90e17a7 3530 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3531 if (lp)
3532 {
ca2163eb 3533 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3534 fprintf_unfiltered (gdb_stdlog,
3535 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3536 status_to_str (lp->status),
d6b0e80f
AC
3537 target_pid_to_str (lp->ptid));
3538 }
d6b0e80f
AC
3539 }
3540 else if (is_lwp (ptid))
3541 {
3542 if (debug_linux_nat)
3543 fprintf_unfiltered (gdb_stdlog,
3544 "LLW: Waiting for specific LWP %s.\n",
3545 target_pid_to_str (ptid));
3546
3547 /* We have a specific LWP to check. */
3548 lp = find_lwp_pid (ptid);
3549 gdb_assert (lp);
d6b0e80f 3550
ca2163eb 3551 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3552 fprintf_unfiltered (gdb_stdlog,
3553 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3554 status_to_str (lp->status),
d6b0e80f
AC
3555 target_pid_to_str (lp->ptid));
3556
d90e17a7
PA
3557 /* We check for lp->waitstatus in addition to lp->status,
3558 because we can have pending process exits recorded in
3559 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3560 an additional lp->status_p flag. */
ca2163eb 3561 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3562 lp = NULL;
d6b0e80f
AC
3563 }
3564
b84876c2
PA
3565 if (!target_can_async_p ())
3566 {
3567 /* Causes SIGINT to be passed on to the attached process. */
3568 set_sigint_trap ();
b84876c2 3569 }
d6b0e80f 3570
0e5bf2a8 3571 /* But if we don't find a pending event, we'll have to wait. */
7feb7d06 3572
d90e17a7 3573 while (lp == NULL)
d6b0e80f
AC
3574 {
3575 pid_t lwpid;
3576
0e5bf2a8
PA
3577 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3578 quirks:
3579
3580 - If the thread group leader exits while other threads in the
3581 thread group still exist, waitpid(TGID, ...) hangs. That
3582 waitpid won't return an exit status until the other threads
3583 in the group are reapped.
3584
3585 - When a non-leader thread execs, that thread just vanishes
3586 without reporting an exit (so we'd hang if we waited for it
3587 explicitly in that case). The exec event is reported to
3588 the TGID pid. */
3589
3590 errno = 0;
3591 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3592 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3593 lwpid = my_waitpid (-1, &status, WNOHANG);
3594
3595 if (debug_linux_nat)
3596 fprintf_unfiltered (gdb_stdlog,
3597 "LNW: waitpid(-1, ...) returned %d, %s\n",
3598 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3599
d6b0e80f
AC
3600 if (lwpid > 0)
3601 {
12d9289a
PA
3602 /* If this is true, then we paused LWPs momentarily, and may
3603 now have pending events to handle. */
3604 int new_pending;
3605
d6b0e80f
AC
3606 if (debug_linux_nat)
3607 {
3608 fprintf_unfiltered (gdb_stdlog,
3609 "LLW: waitpid %ld received %s\n",
3610 (long) lwpid, status_to_str (status));
3611 }
3612
0e5bf2a8 3613 lp = linux_nat_filter_event (lwpid, status, &new_pending);
d90e17a7 3614
33355866
JK
3615 /* STATUS is now no longer valid, use LP->STATUS instead. */
3616 status = 0;
3617
0e5bf2a8 3618 if (lp && !ptid_match (lp->ptid, ptid))
d6b0e80f 3619 {
e3e9f5a2
PA
3620 gdb_assert (lp->resumed);
3621
d90e17a7 3622 if (debug_linux_nat)
3e43a32a
MS
3623 fprintf (stderr,
3624 "LWP %ld got an event %06x, leaving pending.\n",
33355866 3625 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3626
ca2163eb 3627 if (WIFSTOPPED (lp->status))
d90e17a7 3628 {
ca2163eb 3629 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3630 {
e3e9f5a2
PA
3631 /* Cancel breakpoint hits. The breakpoint may
3632 be removed before we fetch events from this
3633 process to report to the core. It is best
3634 not to assume the moribund breakpoints
3635 heuristic always handles these cases --- it
3636 could be too many events go through to the
3637 core before this one is handled. All-stop
3638 always cancels breakpoint hits in all
3639 threads. */
3640 if (non_stop
00390b84 3641 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3642 && cancel_breakpoint (lp))
3643 {
3644 /* Throw away the SIGTRAP. */
3645 lp->status = 0;
3646
3647 if (debug_linux_nat)
3648 fprintf (stderr,
3e43a32a
MS
3649 "LLW: LWP %ld hit a breakpoint while"
3650 " waiting for another process;"
3651 " cancelled it\n",
e3e9f5a2
PA
3652 ptid_get_lwp (lp->ptid));
3653 }
3654 lp->stopped = 1;
d90e17a7
PA
3655 }
3656 else
3657 {
3658 lp->stopped = 1;
3659 lp->signalled = 0;
3660 }
3661 }
33355866 3662 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3663 {
3664 if (debug_linux_nat)
3e43a32a
MS
3665 fprintf (stderr,
3666 "Process %ld exited while stopping LWPs\n",
d90e17a7
PA
3667 ptid_get_lwp (lp->ptid));
3668
3669 /* This was the last lwp in the process. Since
3670 events are serialized to GDB core, and we can't
3671 report this one right now, but GDB core and the
3672 other target layers will want to be notified
3673 about the exit code/signal, leave the status
3674 pending for the next time we're able to report
3675 it. */
d90e17a7
PA
3676
3677 /* Prevent trying to stop this thread again. We'll
3678 never try to resume it because it has a pending
3679 status. */
3680 lp->stopped = 1;
3681
3682 /* Dead LWP's aren't expected to reported a pending
3683 sigstop. */
3684 lp->signalled = 0;
3685
3686 /* Store the pending event in the waitstatus as
3687 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3688 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3689 }
3690
3691 /* Keep looking. */
3692 lp = NULL;
d6b0e80f
AC
3693 }
3694
0e5bf2a8 3695 if (new_pending)
d90e17a7 3696 {
0e5bf2a8
PA
3697 /* Some LWP now has a pending event. Go all the way
3698 back to check it. */
3699 goto retry;
3700 }
12d9289a 3701
0e5bf2a8
PA
3702 if (lp)
3703 {
3704 /* We got an event to report to the core. */
3705 break;
d90e17a7 3706 }
0e5bf2a8
PA
3707
3708 /* Retry until nothing comes out of waitpid. A single
3709 SIGCHLD can indicate more than one child stopped. */
3710 continue;
d6b0e80f
AC
3711 }
3712
0e5bf2a8
PA
3713 /* Check for zombie thread group leaders. Those can't be reaped
3714 until all other threads in the thread group are. */
3715 check_zombie_leaders ();
d6b0e80f 3716
0e5bf2a8
PA
3717 /* If there are no resumed children left, bail. We'd be stuck
3718 forever in the sigsuspend call below otherwise. */
3719 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3720 {
3721 if (debug_linux_nat)
3722 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3723
0e5bf2a8 3724 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3725
0e5bf2a8
PA
3726 if (!target_can_async_p ())
3727 clear_sigint_trap ();
b84876c2 3728
0e5bf2a8
PA
3729 restore_child_signals_mask (&prev_mask);
3730 return minus_one_ptid;
d6b0e80f 3731 }
28736962 3732
0e5bf2a8
PA
3733 /* No interesting event to report to the core. */
3734
3735 if (target_options & TARGET_WNOHANG)
3736 {
01124a23 3737 if (debug_linux_nat)
28736962
PA
3738 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3739
0e5bf2a8 3740 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3741 restore_child_signals_mask (&prev_mask);
3742 return minus_one_ptid;
3743 }
d6b0e80f
AC
3744
3745 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3746 gdb_assert (lp == NULL);
0e5bf2a8
PA
3747
3748 /* Block until we get an event reported with SIGCHLD. */
3749 sigsuspend (&suspend_mask);
d6b0e80f
AC
3750 }
3751
b84876c2 3752 if (!target_can_async_p ())
d26b5354 3753 clear_sigint_trap ();
d6b0e80f
AC
3754
3755 gdb_assert (lp);
3756
ca2163eb
PA
3757 status = lp->status;
3758 lp->status = 0;
3759
d6b0e80f
AC
3760 /* Don't report signals that GDB isn't interested in, such as
3761 signals that are neither printed nor stopped upon. Stopping all
3762 threads can be a bit time-consuming so if we want decent
3763 performance with heavily multi-threaded programs, especially when
3764 they're using a high frequency timer, we'd better avoid it if we
3765 can. */
3766
3767 if (WIFSTOPPED (status))
3768 {
2ea28649 3769 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
d6b0e80f 3770
2455069d
UW
3771 /* When using hardware single-step, we need to report every signal.
3772 Otherwise, signals in pass_mask may be short-circuited. */
d539ed7e 3773 if (!lp->step
2455069d 3774 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
d6b0e80f
AC
3775 {
3776 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3777 here? It is not clear we should. GDB may not expect
3778 other threads to run. On the other hand, not resuming
3779 newly attached threads may cause an unwanted delay in
3780 getting them running. */
3781 registers_changed ();
7b50312a
PA
3782 if (linux_nat_prepare_to_resume != NULL)
3783 linux_nat_prepare_to_resume (lp);
28439f5e 3784 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3785 lp->step, signo);
d6b0e80f
AC
3786 if (debug_linux_nat)
3787 fprintf_unfiltered (gdb_stdlog,
3788 "LLW: %s %s, %s (preempt 'handle')\n",
3789 lp->step ?
3790 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3791 target_pid_to_str (lp->ptid),
a493e3e2 3792 (signo != GDB_SIGNAL_0
2ea28649 3793 ? strsignal (gdb_signal_to_host (signo))
423ec54c 3794 : "0"));
d6b0e80f 3795 lp->stopped = 0;
d6b0e80f
AC
3796 goto retry;
3797 }
3798
1ad15515 3799 if (!non_stop)
d6b0e80f 3800 {
1ad15515
PA
3801 /* Only do the below in all-stop, as we currently use SIGINT
3802 to implement target_stop (see linux_nat_stop) in
3803 non-stop. */
a493e3e2 3804 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
1ad15515
PA
3805 {
3806 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3807 forwarded to the entire process group, that is, all LWPs
3808 will receive it - unless they're using CLONE_THREAD to
3809 share signals. Since we only want to report it once, we
3810 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3811 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3812 set_ignore_sigint, NULL);
1ad15515
PA
3813 lp->ignore_sigint = 0;
3814 }
3815 else
3816 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3817 }
3818 }
3819
3820 /* This LWP is stopped now. */
3821 lp->stopped = 1;
3822
3823 if (debug_linux_nat)
3824 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3825 status_to_str (status), target_pid_to_str (lp->ptid));
3826
4c28f408
PA
3827 if (!non_stop)
3828 {
3829 /* Now stop all other LWP's ... */
d90e17a7 3830 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3831
3832 /* ... and wait until all of them have reported back that
3833 they're no longer running. */
d90e17a7 3834 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3835
3836 /* If we're not waiting for a specific LWP, choose an event LWP
3837 from among those that have had events. Giving equal priority
3838 to all LWPs that have had events helps prevent
3839 starvation. */
0e5bf2a8 3840 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d90e17a7 3841 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3842
e3e9f5a2
PA
3843 /* Now that we've selected our final event LWP, cancel any
3844 breakpoints in other LWPs that have hit a GDB breakpoint.
3845 See the comment in cancel_breakpoints_callback to find out
3846 why. */
3847 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3848
4b60df3d
PA
3849 /* We'll need this to determine whether to report a SIGSTOP as
3850 TARGET_WAITKIND_0. Need to take a copy because
3851 resume_clear_callback clears it. */
3852 last_resume_kind = lp->last_resume_kind;
3853
e3e9f5a2
PA
3854 /* In all-stop, from the core's perspective, all LWPs are now
3855 stopped until a new resume action is sent over. */
3856 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3857 }
3858 else
25289eb2 3859 {
4b60df3d
PA
3860 /* See above. */
3861 last_resume_kind = lp->last_resume_kind;
3862 resume_clear_callback (lp, NULL);
25289eb2 3863 }
d6b0e80f 3864
26ab7092 3865 if (linux_nat_status_is_event (status))
d6b0e80f 3866 {
d6b0e80f
AC
3867 if (debug_linux_nat)
3868 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3869 "LLW: trap ptid is %s.\n",
3870 target_pid_to_str (lp->ptid));
d6b0e80f 3871 }
d6b0e80f
AC
3872
3873 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3874 {
3875 *ourstatus = lp->waitstatus;
3876 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3877 }
3878 else
3879 store_waitstatus (ourstatus, status);
3880
01124a23 3881 if (debug_linux_nat)
b84876c2
PA
3882 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3883
7feb7d06 3884 restore_child_signals_mask (&prev_mask);
1e225492 3885
4b60df3d 3886 if (last_resume_kind == resume_stop
25289eb2
PA
3887 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3888 && WSTOPSIG (status) == SIGSTOP)
3889 {
3890 /* A thread that has been requested to stop by GDB with
3891 target_stop, and it stopped cleanly, so report as SIG0. The
3892 use of SIGSTOP is an implementation detail. */
a493e3e2 3893 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3894 }
3895
1e225492
JK
3896 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3897 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3898 lp->core = -1;
3899 else
2e794194 3900 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3901
f973ed9c 3902 return lp->ptid;
d6b0e80f
AC
3903}
3904
e3e9f5a2
PA
3905/* Resume LWPs that are currently stopped without any pending status
3906 to report, but are resumed from the core's perspective. */
3907
3908static int
3909resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3910{
3911 ptid_t *wait_ptid_p = data;
3912
3913 if (lp->stopped
3914 && lp->resumed
3915 && lp->status == 0
3916 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3917 {
336060f3
PA
3918 struct regcache *regcache = get_thread_regcache (lp->ptid);
3919 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3920 CORE_ADDR pc = regcache_read_pc (regcache);
3921
e3e9f5a2
PA
3922 gdb_assert (is_executing (lp->ptid));
3923
3924 /* Don't bother if there's a breakpoint at PC that we'd hit
3925 immediately, and we're not waiting for this LWP. */
3926 if (!ptid_match (lp->ptid, *wait_ptid_p))
3927 {
e3e9f5a2
PA
3928 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3929 return 0;
3930 }
3931
3932 if (debug_linux_nat)
3933 fprintf_unfiltered (gdb_stdlog,
336060f3
PA
3934 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3935 target_pid_to_str (lp->ptid),
3936 paddress (gdbarch, pc),
3937 lp->step);
e3e9f5a2 3938
336060f3 3939 registers_changed ();
7b50312a
PA
3940 if (linux_nat_prepare_to_resume != NULL)
3941 linux_nat_prepare_to_resume (lp);
e3e9f5a2 3942 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 3943 lp->step, GDB_SIGNAL_0);
e3e9f5a2
PA
3944 lp->stopped = 0;
3945 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3946 lp->stopped_by_watchpoint = 0;
3947 }
3948
3949 return 0;
3950}
3951
7feb7d06
PA
3952static ptid_t
3953linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3954 ptid_t ptid, struct target_waitstatus *ourstatus,
3955 int target_options)
7feb7d06
PA
3956{
3957 ptid_t event_ptid;
3958
3959 if (debug_linux_nat)
3e43a32a
MS
3960 fprintf_unfiltered (gdb_stdlog,
3961 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
7feb7d06
PA
3962
3963 /* Flush the async file first. */
3964 if (target_can_async_p ())
3965 async_file_flush ();
3966
e3e9f5a2
PA
3967 /* Resume LWPs that are currently stopped without any pending status
3968 to report, but are resumed from the core's perspective. LWPs get
3969 in this state if we find them stopping at a time we're not
3970 interested in reporting the event (target_wait on a
3971 specific_process, for example, see linux_nat_wait_1), and
3972 meanwhile the event became uninteresting. Don't bother resuming
3973 LWPs we're not going to wait for if they'd stop immediately. */
3974 if (non_stop)
3975 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3976
47608cb1 3977 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3978
3979 /* If we requested any event, and something came out, assume there
3980 may be more. If we requested a specific lwp or process, also
3981 assume there may be more. */
3982 if (target_can_async_p ()
6953d224
PA
3983 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3984 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3985 || !ptid_equal (ptid, minus_one_ptid)))
3986 async_file_mark ();
3987
3988 /* Get ready for the next event. */
3989 if (target_can_async_p ())
3990 target_async (inferior_event_handler, 0);
3991
3992 return event_ptid;
3993}
3994
d6b0e80f
AC
3995static int
3996kill_callback (struct lwp_info *lp, void *data)
3997{
ed731959
JK
3998 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3999
4000 errno = 0;
4001 kill (GET_LWP (lp->ptid), SIGKILL);
4002 if (debug_linux_nat)
4003 fprintf_unfiltered (gdb_stdlog,
4004 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4005 target_pid_to_str (lp->ptid),
4006 errno ? safe_strerror (errno) : "OK");
4007
4008 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4009
d6b0e80f
AC
4010 errno = 0;
4011 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
4012 if (debug_linux_nat)
4013 fprintf_unfiltered (gdb_stdlog,
4014 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4015 target_pid_to_str (lp->ptid),
4016 errno ? safe_strerror (errno) : "OK");
4017
4018 return 0;
4019}
4020
4021static int
4022kill_wait_callback (struct lwp_info *lp, void *data)
4023{
4024 pid_t pid;
4025
4026 /* We must make sure that there are no pending events (delayed
4027 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4028 program doesn't interfere with any following debugging session. */
4029
4030 /* For cloned processes we must check both with __WCLONE and
4031 without, since the exit status of a cloned process isn't reported
4032 with __WCLONE. */
4033 if (lp->cloned)
4034 {
4035 do
4036 {
58aecb61 4037 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 4038 if (pid != (pid_t) -1)
d6b0e80f 4039 {
e85a822c
DJ
4040 if (debug_linux_nat)
4041 fprintf_unfiltered (gdb_stdlog,
4042 "KWC: wait %s received unknown.\n",
4043 target_pid_to_str (lp->ptid));
4044 /* The Linux kernel sometimes fails to kill a thread
4045 completely after PTRACE_KILL; that goes from the stop
4046 point in do_fork out to the one in
4047 get_signal_to_deliever and waits again. So kill it
4048 again. */
4049 kill_callback (lp, NULL);
d6b0e80f
AC
4050 }
4051 }
4052 while (pid == GET_LWP (lp->ptid));
4053
4054 gdb_assert (pid == -1 && errno == ECHILD);
4055 }
4056
4057 do
4058 {
58aecb61 4059 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 4060 if (pid != (pid_t) -1)
d6b0e80f 4061 {
e85a822c
DJ
4062 if (debug_linux_nat)
4063 fprintf_unfiltered (gdb_stdlog,
4064 "KWC: wait %s received unk.\n",
4065 target_pid_to_str (lp->ptid));
4066 /* See the call to kill_callback above. */
4067 kill_callback (lp, NULL);
d6b0e80f
AC
4068 }
4069 }
4070 while (pid == GET_LWP (lp->ptid));
4071
4072 gdb_assert (pid == -1 && errno == ECHILD);
4073 return 0;
4074}
4075
4076static void
7d85a9c0 4077linux_nat_kill (struct target_ops *ops)
d6b0e80f 4078{
f973ed9c
DJ
4079 struct target_waitstatus last;
4080 ptid_t last_ptid;
4081 int status;
d6b0e80f 4082
f973ed9c
DJ
4083 /* If we're stopped while forking and we haven't followed yet,
4084 kill the other task. We need to do this first because the
4085 parent will be sleeping if this is a vfork. */
d6b0e80f 4086
f973ed9c 4087 get_last_target_status (&last_ptid, &last);
d6b0e80f 4088
f973ed9c
DJ
4089 if (last.kind == TARGET_WAITKIND_FORKED
4090 || last.kind == TARGET_WAITKIND_VFORKED)
4091 {
3a3e9ee3 4092 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
4093 wait (&status);
4094 }
4095
4096 if (forks_exist_p ())
7feb7d06 4097 linux_fork_killall ();
f973ed9c
DJ
4098 else
4099 {
d90e17a7 4100 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 4101
4c28f408
PA
4102 /* Stop all threads before killing them, since ptrace requires
4103 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 4104 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
4105 /* ... and wait until all of them have reported back that
4106 they're no longer running. */
d90e17a7 4107 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 4108
f973ed9c 4109 /* Kill all LWP's ... */
d90e17a7 4110 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
4111
4112 /* ... and wait until we've flushed all events. */
d90e17a7 4113 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
4114 }
4115
4116 target_mourn_inferior ();
d6b0e80f
AC
4117}
4118
4119static void
136d6dae 4120linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 4121{
d90e17a7 4122 purge_lwp_list (ptid_get_pid (inferior_ptid));
d6b0e80f 4123
f973ed9c 4124 if (! forks_exist_p ())
d90e17a7
PA
4125 /* Normal case, no other forks available. */
4126 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
4127 else
4128 /* Multi-fork case. The current inferior_ptid has exited, but
4129 there are other viable forks to debug. Delete the exiting
4130 one and context-switch to the first available. */
4131 linux_fork_mourn_inferior ();
d6b0e80f
AC
4132}
4133
5b009018
PA
4134/* Convert a native/host siginfo object, into/from the siginfo in the
4135 layout of the inferiors' architecture. */
4136
4137static void
a5362b9a 4138siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
4139{
4140 int done = 0;
4141
4142 if (linux_nat_siginfo_fixup != NULL)
4143 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4144
4145 /* If there was no callback, or the callback didn't do anything,
4146 then just do a straight memcpy. */
4147 if (!done)
4148 {
4149 if (direction == 1)
a5362b9a 4150 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 4151 else
a5362b9a 4152 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
4153 }
4154}
4155
4aa995e1
PA
4156static LONGEST
4157linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4158 const char *annex, gdb_byte *readbuf,
4159 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4160{
4aa995e1 4161 int pid;
a5362b9a
TS
4162 siginfo_t siginfo;
4163 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
4164
4165 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4166 gdb_assert (readbuf || writebuf);
4167
4168 pid = GET_LWP (inferior_ptid);
4169 if (pid == 0)
4170 pid = GET_PID (inferior_ptid);
4171
4172 if (offset > sizeof (siginfo))
4173 return -1;
4174
4175 errno = 0;
4176 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4177 if (errno != 0)
4178 return -1;
4179
5b009018
PA
4180 /* When GDB is built as a 64-bit application, ptrace writes into
4181 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4182 inferior with a 64-bit GDB should look the same as debugging it
4183 with a 32-bit GDB, we need to convert it. GDB core always sees
4184 the converted layout, so any read/write will have to be done
4185 post-conversion. */
4186 siginfo_fixup (&siginfo, inf_siginfo, 0);
4187
4aa995e1
PA
4188 if (offset + len > sizeof (siginfo))
4189 len = sizeof (siginfo) - offset;
4190
4191 if (readbuf != NULL)
5b009018 4192 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4193 else
4194 {
5b009018
PA
4195 memcpy (inf_siginfo + offset, writebuf, len);
4196
4197 /* Convert back to ptrace layout before flushing it out. */
4198 siginfo_fixup (&siginfo, inf_siginfo, 1);
4199
4aa995e1
PA
4200 errno = 0;
4201 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4202 if (errno != 0)
4203 return -1;
4204 }
4205
4206 return len;
4207}
4208
10d6c8cd
DJ
4209static LONGEST
4210linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4211 const char *annex, gdb_byte *readbuf,
4212 const gdb_byte *writebuf,
4213 ULONGEST offset, LONGEST len)
d6b0e80f 4214{
4aa995e1 4215 struct cleanup *old_chain;
10d6c8cd 4216 LONGEST xfer;
d6b0e80f 4217
4aa995e1
PA
4218 if (object == TARGET_OBJECT_SIGNAL_INFO)
4219 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4220 offset, len);
4221
c35b1492
PA
4222 /* The target is connected but no live inferior is selected. Pass
4223 this request down to a lower stratum (e.g., the executable
4224 file). */
4225 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4226 return 0;
4227
4aa995e1
PA
4228 old_chain = save_inferior_ptid ();
4229
d6b0e80f
AC
4230 if (is_lwp (inferior_ptid))
4231 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4232
10d6c8cd
DJ
4233 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4234 offset, len);
d6b0e80f
AC
4235
4236 do_cleanups (old_chain);
4237 return xfer;
4238}
4239
4240static int
28439f5e 4241linux_thread_alive (ptid_t ptid)
d6b0e80f 4242{
8c6a60d1 4243 int err, tmp_errno;
4c28f408 4244
d6b0e80f
AC
4245 gdb_assert (is_lwp (ptid));
4246
4c28f408
PA
4247 /* Send signal 0 instead of anything ptrace, because ptracing a
4248 running thread errors out claiming that the thread doesn't
4249 exist. */
4250 err = kill_lwp (GET_LWP (ptid), 0);
8c6a60d1 4251 tmp_errno = errno;
d6b0e80f
AC
4252 if (debug_linux_nat)
4253 fprintf_unfiltered (gdb_stdlog,
4c28f408 4254 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4255 target_pid_to_str (ptid),
8c6a60d1 4256 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 4257
4c28f408 4258 if (err != 0)
d6b0e80f
AC
4259 return 0;
4260
4261 return 1;
4262}
4263
28439f5e
PA
4264static int
4265linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4266{
4267 return linux_thread_alive (ptid);
4268}
4269
d6b0e80f 4270static char *
117de6a9 4271linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4272{
4273 static char buf[64];
4274
a0ef4274 4275 if (is_lwp (ptid)
d90e17a7
PA
4276 && (GET_PID (ptid) != GET_LWP (ptid)
4277 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
4278 {
4279 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4280 return buf;
4281 }
4282
4283 return normal_pid_to_str (ptid);
4284}
4285
4694da01
TT
4286static char *
4287linux_nat_thread_name (struct thread_info *thr)
4288{
4289 int pid = ptid_get_pid (thr->ptid);
4290 long lwp = ptid_get_lwp (thr->ptid);
4291#define FORMAT "/proc/%d/task/%ld/comm"
4292 char buf[sizeof (FORMAT) + 30];
4293 FILE *comm_file;
4294 char *result = NULL;
4295
4296 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4297 comm_file = fopen (buf, "r");
4298 if (comm_file)
4299 {
4300 /* Not exported by the kernel, so we define it here. */
4301#define COMM_LEN 16
4302 static char line[COMM_LEN + 1];
4303
4304 if (fgets (line, sizeof (line), comm_file))
4305 {
4306 char *nl = strchr (line, '\n');
4307
4308 if (nl)
4309 *nl = '\0';
4310 if (*line != '\0')
4311 result = line;
4312 }
4313
4314 fclose (comm_file);
4315 }
4316
4317#undef COMM_LEN
4318#undef FORMAT
4319
4320 return result;
4321}
4322
dba24537
AC
4323/* Accepts an integer PID; Returns a string representing a file that
4324 can be opened to get the symbols for the child process. */
4325
6d8fd2b7
UW
4326static char *
4327linux_child_pid_to_exec_file (int pid)
dba24537
AC
4328{
4329 char *name1, *name2;
4330
4331 name1 = xmalloc (MAXPATHLEN);
4332 name2 = xmalloc (MAXPATHLEN);
4333 make_cleanup (xfree, name1);
4334 make_cleanup (xfree, name2);
4335 memset (name2, 0, MAXPATHLEN);
4336
4337 sprintf (name1, "/proc/%d/exe", pid);
4338 if (readlink (name1, name2, MAXPATHLEN) > 0)
4339 return name2;
4340 else
4341 return name1;
4342}
4343
dba24537
AC
4344/* Records the thread's register state for the corefile note
4345 section. */
4346
4347static char *
6432734d
UW
4348linux_nat_collect_thread_registers (const struct regcache *regcache,
4349 ptid_t ptid, bfd *obfd,
4350 char *note_data, int *note_size,
2ea28649 4351 enum gdb_signal stop_signal)
dba24537 4352{
6432734d 4353 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 4354 const struct regset *regset;
55e969c1 4355 int core_regset_p;
6432734d
UW
4356 gdb_gregset_t gregs;
4357 gdb_fpregset_t fpregs;
4f844a66
DM
4358
4359 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
dba24537 4360
6432734d
UW
4361 if (core_regset_p
4362 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4363 sizeof (gregs)))
4364 != NULL && regset->collect_regset != NULL)
4365 regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
4f844a66 4366 else
6432734d 4367 fill_gregset (regcache, &gregs, -1);
2f2241f1 4368
6432734d
UW
4369 note_data = (char *) elfcore_write_prstatus
4370 (obfd, note_data, note_size, ptid_get_lwp (ptid),
2ea28649 4371 gdb_signal_to_host (stop_signal), &gregs);
2f2241f1 4372
6432734d
UW
4373 if (core_regset_p
4374 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4375 sizeof (fpregs)))
3e43a32a 4376 != NULL && regset->collect_regset != NULL)
6432734d
UW
4377 regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
4378 else
4379 fill_fpregset (regcache, &fpregs, -1);
17ea7499 4380
6432734d
UW
4381 note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
4382 &fpregs, sizeof (fpregs));
4f844a66 4383
dba24537
AC
4384 return note_data;
4385}
4386
dba24537
AC
4387/* Fills the "to_make_corefile_note" target vector. Builds the note
4388 section for a corefile, and returns it in a malloc buffer. */
4389
4390static char *
4391linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4392{
6432734d
UW
4393 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4394 converted to gdbarch_core_regset_sections, this function can go away. */
4395 return linux_make_corefile_notes (target_gdbarch, obfd, note_size,
4396 linux_nat_collect_thread_registers);
dba24537
AC
4397}
4398
10d6c8cd
DJ
4399/* Implement the to_xfer_partial interface for memory reads using the /proc
4400 filesystem. Because we can use a single read() call for /proc, this
4401 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4402 but it doesn't support writes. */
4403
4404static LONGEST
4405linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4406 const char *annex, gdb_byte *readbuf,
4407 const gdb_byte *writebuf,
4408 ULONGEST offset, LONGEST len)
dba24537 4409{
10d6c8cd
DJ
4410 LONGEST ret;
4411 int fd;
dba24537
AC
4412 char filename[64];
4413
10d6c8cd 4414 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4415 return 0;
4416
4417 /* Don't bother for one word. */
4418 if (len < 3 * sizeof (long))
4419 return 0;
4420
4421 /* We could keep this file open and cache it - possibly one per
4422 thread. That requires some juggling, but is even faster. */
4423 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4424 fd = open (filename, O_RDONLY | O_LARGEFILE);
4425 if (fd == -1)
4426 return 0;
4427
4428 /* If pread64 is available, use it. It's faster if the kernel
4429 supports it (only one syscall), and it's 64-bit safe even on
4430 32-bit platforms (for instance, SPARC debugging a SPARC64
4431 application). */
4432#ifdef HAVE_PREAD64
10d6c8cd 4433 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4434#else
10d6c8cd 4435 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4436#endif
4437 ret = 0;
4438 else
4439 ret = len;
4440
4441 close (fd);
4442 return ret;
4443}
4444
efcbbd14
UW
4445
4446/* Enumerate spufs IDs for process PID. */
4447static LONGEST
4448spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4449{
4450 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4451 LONGEST pos = 0;
4452 LONGEST written = 0;
4453 char path[128];
4454 DIR *dir;
4455 struct dirent *entry;
4456
4457 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4458 dir = opendir (path);
4459 if (!dir)
4460 return -1;
4461
4462 rewinddir (dir);
4463 while ((entry = readdir (dir)) != NULL)
4464 {
4465 struct stat st;
4466 struct statfs stfs;
4467 int fd;
4468
4469 fd = atoi (entry->d_name);
4470 if (!fd)
4471 continue;
4472
4473 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4474 if (stat (path, &st) != 0)
4475 continue;
4476 if (!S_ISDIR (st.st_mode))
4477 continue;
4478
4479 if (statfs (path, &stfs) != 0)
4480 continue;
4481 if (stfs.f_type != SPUFS_MAGIC)
4482 continue;
4483
4484 if (pos >= offset && pos + 4 <= offset + len)
4485 {
4486 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4487 written += 4;
4488 }
4489 pos += 4;
4490 }
4491
4492 closedir (dir);
4493 return written;
4494}
4495
4496/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4497 object type, using the /proc file system. */
4498static LONGEST
4499linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4500 const char *annex, gdb_byte *readbuf,
4501 const gdb_byte *writebuf,
4502 ULONGEST offset, LONGEST len)
4503{
4504 char buf[128];
4505 int fd = 0;
4506 int ret = -1;
4507 int pid = PIDGET (inferior_ptid);
4508
4509 if (!annex)
4510 {
4511 if (!readbuf)
4512 return -1;
4513 else
4514 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4515 }
4516
4517 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4518 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4519 if (fd <= 0)
4520 return -1;
4521
4522 if (offset != 0
4523 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4524 {
4525 close (fd);
4526 return 0;
4527 }
4528
4529 if (writebuf)
4530 ret = write (fd, writebuf, (size_t) len);
4531 else if (readbuf)
4532 ret = read (fd, readbuf, (size_t) len);
4533
4534 close (fd);
4535 return ret;
4536}
4537
4538
dba24537
AC
4539/* Parse LINE as a signal set and add its set bits to SIGS. */
4540
4541static void
4542add_line_to_sigset (const char *line, sigset_t *sigs)
4543{
4544 int len = strlen (line) - 1;
4545 const char *p;
4546 int signum;
4547
4548 if (line[len] != '\n')
8a3fe4f8 4549 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4550
4551 p = line;
4552 signum = len * 4;
4553 while (len-- > 0)
4554 {
4555 int digit;
4556
4557 if (*p >= '0' && *p <= '9')
4558 digit = *p - '0';
4559 else if (*p >= 'a' && *p <= 'f')
4560 digit = *p - 'a' + 10;
4561 else
8a3fe4f8 4562 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4563
4564 signum -= 4;
4565
4566 if (digit & 1)
4567 sigaddset (sigs, signum + 1);
4568 if (digit & 2)
4569 sigaddset (sigs, signum + 2);
4570 if (digit & 4)
4571 sigaddset (sigs, signum + 3);
4572 if (digit & 8)
4573 sigaddset (sigs, signum + 4);
4574
4575 p++;
4576 }
4577}
4578
4579/* Find process PID's pending signals from /proc/pid/status and set
4580 SIGS to match. */
4581
4582void
3e43a32a
MS
4583linux_proc_pending_signals (int pid, sigset_t *pending,
4584 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4585{
4586 FILE *procfile;
4587 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
7c8a8b04 4588 struct cleanup *cleanup;
dba24537
AC
4589
4590 sigemptyset (pending);
4591 sigemptyset (blocked);
4592 sigemptyset (ignored);
4593 sprintf (fname, "/proc/%d/status", pid);
4594 procfile = fopen (fname, "r");
4595 if (procfile == NULL)
8a3fe4f8 4596 error (_("Could not open %s"), fname);
7c8a8b04 4597 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4598
4599 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4600 {
4601 /* Normal queued signals are on the SigPnd line in the status
4602 file. However, 2.6 kernels also have a "shared" pending
4603 queue for delivering signals to a thread group, so check for
4604 a ShdPnd line also.
4605
4606 Unfortunately some Red Hat kernels include the shared pending
4607 queue but not the ShdPnd status field. */
4608
4609 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4610 add_line_to_sigset (buffer + 8, pending);
4611 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4612 add_line_to_sigset (buffer + 8, pending);
4613 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4614 add_line_to_sigset (buffer + 8, blocked);
4615 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4616 add_line_to_sigset (buffer + 8, ignored);
4617 }
4618
7c8a8b04 4619 do_cleanups (cleanup);
dba24537
AC
4620}
4621
07e059b5
VP
4622static LONGEST
4623linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e
MS
4624 const char *annex, gdb_byte *readbuf,
4625 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
07e059b5 4626{
07e059b5
VP
4627 gdb_assert (object == TARGET_OBJECT_OSDATA);
4628
d26e3629 4629 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4630}
4631
10d6c8cd
DJ
4632static LONGEST
4633linux_xfer_partial (struct target_ops *ops, enum target_object object,
4634 const char *annex, gdb_byte *readbuf,
4635 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4636{
4637 LONGEST xfer;
4638
4639 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4640 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
10d6c8cd
DJ
4641 offset, len);
4642
07e059b5
VP
4643 if (object == TARGET_OBJECT_OSDATA)
4644 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4645 offset, len);
4646
efcbbd14
UW
4647 if (object == TARGET_OBJECT_SPU)
4648 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4649 offset, len);
4650
8f313923
JK
4651 /* GDB calculates all the addresses in possibly larget width of the address.
4652 Address width needs to be masked before its final use - either by
4653 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4654
4655 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4656
4657 if (object == TARGET_OBJECT_MEMORY)
4658 {
4659 int addr_bit = gdbarch_addr_bit (target_gdbarch);
4660
4661 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4662 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4663 }
4664
10d6c8cd
DJ
4665 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4666 offset, len);
4667 if (xfer != 0)
4668 return xfer;
4669
4670 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4671 offset, len);
4672}
4673
5808517f
YQ
4674static void
4675cleanup_target_stop (void *arg)
4676{
4677 ptid_t *ptid = (ptid_t *) arg;
4678
4679 gdb_assert (arg != NULL);
4680
4681 /* Unpause all */
a493e3e2 4682 target_resume (*ptid, 0, GDB_SIGNAL_0);
5808517f
YQ
4683}
4684
4685static VEC(static_tracepoint_marker_p) *
4686linux_child_static_tracepoint_markers_by_strid (const char *strid)
4687{
4688 char s[IPA_CMD_BUF_SIZE];
4689 struct cleanup *old_chain;
4690 int pid = ptid_get_pid (inferior_ptid);
4691 VEC(static_tracepoint_marker_p) *markers = NULL;
4692 struct static_tracepoint_marker *marker = NULL;
4693 char *p = s;
4694 ptid_t ptid = ptid_build (pid, 0, 0);
4695
4696 /* Pause all */
4697 target_stop (ptid);
4698
4699 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4700 s[sizeof ("qTfSTM")] = 0;
4701
42476b70 4702 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4703
4704 old_chain = make_cleanup (free_current_marker, &marker);
4705 make_cleanup (cleanup_target_stop, &ptid);
4706
4707 while (*p++ == 'm')
4708 {
4709 if (marker == NULL)
4710 marker = XCNEW (struct static_tracepoint_marker);
4711
4712 do
4713 {
4714 parse_static_tracepoint_marker_definition (p, &p, marker);
4715
4716 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4717 {
4718 VEC_safe_push (static_tracepoint_marker_p,
4719 markers, marker);
4720 marker = NULL;
4721 }
4722 else
4723 {
4724 release_static_tracepoint_marker (marker);
4725 memset (marker, 0, sizeof (*marker));
4726 }
4727 }
4728 while (*p++ == ','); /* comma-separated list */
4729
4730 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4731 s[sizeof ("qTsSTM")] = 0;
42476b70 4732 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4733 p = s;
4734 }
4735
4736 do_cleanups (old_chain);
4737
4738 return markers;
4739}
4740
e9efe249 4741/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4742 it with local methods. */
4743
910122bf
UW
4744static void
4745linux_target_install_ops (struct target_ops *t)
10d6c8cd 4746{
6d8fd2b7 4747 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4748 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4749 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4750 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4751 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4752 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4753 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4754 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4755 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4756 t->to_post_attach = linux_child_post_attach;
4757 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4758 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4759
4760 super_xfer_partial = t->to_xfer_partial;
4761 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4762
4763 t->to_static_tracepoint_markers_by_strid
4764 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4765}
4766
4767struct target_ops *
4768linux_target (void)
4769{
4770 struct target_ops *t;
4771
4772 t = inf_ptrace_target ();
4773 linux_target_install_ops (t);
4774
4775 return t;
4776}
4777
4778struct target_ops *
7714d83a 4779linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4780{
4781 struct target_ops *t;
4782
4783 t = inf_ptrace_trad_target (register_u_offset);
4784 linux_target_install_ops (t);
10d6c8cd 4785
10d6c8cd
DJ
4786 return t;
4787}
4788
b84876c2
PA
4789/* target_is_async_p implementation. */
4790
4791static int
4792linux_nat_is_async_p (void)
4793{
4794 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4795 it explicitly with the "set target-async" command.
b84876c2 4796 Someday, linux will always be async. */
3dd5b83d 4797 return target_async_permitted;
b84876c2
PA
4798}
4799
4800/* target_can_async_p implementation. */
4801
4802static int
4803linux_nat_can_async_p (void)
4804{
4805 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4806 it explicitly with the "set target-async" command.
b84876c2 4807 Someday, linux will always be async. */
3dd5b83d 4808 return target_async_permitted;
b84876c2
PA
4809}
4810
9908b566
VP
4811static int
4812linux_nat_supports_non_stop (void)
4813{
4814 return 1;
4815}
4816
d90e17a7
PA
4817/* True if we want to support multi-process. To be removed when GDB
4818 supports multi-exec. */
4819
2277426b 4820int linux_multi_process = 1;
d90e17a7
PA
4821
4822static int
4823linux_nat_supports_multi_process (void)
4824{
4825 return linux_multi_process;
4826}
4827
03583c20
UW
4828static int
4829linux_nat_supports_disable_randomization (void)
4830{
4831#ifdef HAVE_PERSONALITY
4832 return 1;
4833#else
4834 return 0;
4835#endif
4836}
4837
b84876c2
PA
4838static int async_terminal_is_ours = 1;
4839
4840/* target_terminal_inferior implementation. */
4841
4842static void
4843linux_nat_terminal_inferior (void)
4844{
4845 if (!target_is_async_p ())
4846 {
4847 /* Async mode is disabled. */
4848 terminal_inferior ();
4849 return;
4850 }
4851
b84876c2
PA
4852 terminal_inferior ();
4853
d9d2d8b6 4854 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4855 if (!async_terminal_is_ours)
4856 return;
4857
4858 delete_file_handler (input_fd);
4859 async_terminal_is_ours = 0;
4860 set_sigint_trap ();
4861}
4862
4863/* target_terminal_ours implementation. */
4864
2c0b251b 4865static void
b84876c2
PA
4866linux_nat_terminal_ours (void)
4867{
4868 if (!target_is_async_p ())
4869 {
4870 /* Async mode is disabled. */
4871 terminal_ours ();
4872 return;
4873 }
4874
4875 /* GDB should never give the terminal to the inferior if the
4876 inferior is running in the background (run&, continue&, etc.),
4877 but claiming it sure should. */
4878 terminal_ours ();
4879
b84876c2
PA
4880 if (async_terminal_is_ours)
4881 return;
4882
4883 clear_sigint_trap ();
4884 add_file_handler (input_fd, stdin_event_handler, 0);
4885 async_terminal_is_ours = 1;
4886}
4887
4888static void (*async_client_callback) (enum inferior_event_type event_type,
4889 void *context);
4890static void *async_client_context;
4891
7feb7d06
PA
4892/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4893 so we notice when any child changes state, and notify the
4894 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4895 above to wait for the arrival of a SIGCHLD. */
4896
b84876c2 4897static void
7feb7d06 4898sigchld_handler (int signo)
b84876c2 4899{
7feb7d06
PA
4900 int old_errno = errno;
4901
01124a23
DE
4902 if (debug_linux_nat)
4903 ui_file_write_async_safe (gdb_stdlog,
4904 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4905
4906 if (signo == SIGCHLD
4907 && linux_nat_event_pipe[0] != -1)
4908 async_file_mark (); /* Let the event loop know that there are
4909 events to handle. */
4910
4911 errno = old_errno;
4912}
4913
4914/* Callback registered with the target events file descriptor. */
4915
4916static void
4917handle_target_event (int error, gdb_client_data client_data)
4918{
4919 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4920}
4921
4922/* Create/destroy the target events pipe. Returns previous state. */
4923
4924static int
4925linux_async_pipe (int enable)
4926{
4927 int previous = (linux_nat_event_pipe[0] != -1);
4928
4929 if (previous != enable)
4930 {
4931 sigset_t prev_mask;
4932
4933 block_child_signals (&prev_mask);
4934
4935 if (enable)
4936 {
4937 if (pipe (linux_nat_event_pipe) == -1)
4938 internal_error (__FILE__, __LINE__,
4939 "creating event pipe failed.");
4940
4941 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4942 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4943 }
4944 else
4945 {
4946 close (linux_nat_event_pipe[0]);
4947 close (linux_nat_event_pipe[1]);
4948 linux_nat_event_pipe[0] = -1;
4949 linux_nat_event_pipe[1] = -1;
4950 }
4951
4952 restore_child_signals_mask (&prev_mask);
4953 }
4954
4955 return previous;
b84876c2
PA
4956}
4957
4958/* target_async implementation. */
4959
4960static void
4961linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4962 void *context), void *context)
4963{
b84876c2
PA
4964 if (callback != NULL)
4965 {
4966 async_client_callback = callback;
4967 async_client_context = context;
7feb7d06
PA
4968 if (!linux_async_pipe (1))
4969 {
4970 add_file_handler (linux_nat_event_pipe[0],
4971 handle_target_event, NULL);
4972 /* There may be pending events to handle. Tell the event loop
4973 to poll them. */
4974 async_file_mark ();
4975 }
b84876c2
PA
4976 }
4977 else
4978 {
4979 async_client_callback = callback;
4980 async_client_context = context;
b84876c2 4981 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4982 linux_async_pipe (0);
b84876c2
PA
4983 }
4984 return;
4985}
4986
a493e3e2 4987/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4988 event came out. */
4989
4c28f408 4990static int
252fbfc8 4991linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4992{
d90e17a7 4993 if (!lwp->stopped)
252fbfc8 4994 {
d90e17a7 4995 ptid_t ptid = lwp->ptid;
252fbfc8 4996
d90e17a7
PA
4997 if (debug_linux_nat)
4998 fprintf_unfiltered (gdb_stdlog,
4999 "LNSL: running -> suspending %s\n",
5000 target_pid_to_str (lwp->ptid));
252fbfc8 5001
252fbfc8 5002
25289eb2
PA
5003 if (lwp->last_resume_kind == resume_stop)
5004 {
5005 if (debug_linux_nat)
5006 fprintf_unfiltered (gdb_stdlog,
5007 "linux-nat: already stopping LWP %ld at "
5008 "GDB's request\n",
5009 ptid_get_lwp (lwp->ptid));
5010 return 0;
5011 }
252fbfc8 5012
25289eb2
PA
5013 stop_callback (lwp, NULL);
5014 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
5015 }
5016 else
5017 {
5018 /* Already known to be stopped; do nothing. */
252fbfc8 5019
d90e17a7
PA
5020 if (debug_linux_nat)
5021 {
e09875d4 5022 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
5023 fprintf_unfiltered (gdb_stdlog,
5024 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5025 target_pid_to_str (lwp->ptid));
5026 else
3e43a32a
MS
5027 fprintf_unfiltered (gdb_stdlog,
5028 "LNSL: already stopped/no "
5029 "stop_requested yet %s\n",
d90e17a7 5030 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5031 }
5032 }
4c28f408
PA
5033 return 0;
5034}
5035
5036static void
5037linux_nat_stop (ptid_t ptid)
5038{
5039 if (non_stop)
d90e17a7 5040 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5041 else
5042 linux_ops->to_stop (ptid);
5043}
5044
d90e17a7
PA
5045static void
5046linux_nat_close (int quitting)
5047{
5048 /* Unregister from the event loop. */
305436e0
PA
5049 if (linux_nat_is_async_p ())
5050 linux_nat_async (NULL, 0);
d90e17a7 5051
d90e17a7
PA
5052 if (linux_ops->to_close)
5053 linux_ops->to_close (quitting);
5054}
5055
c0694254
PA
5056/* When requests are passed down from the linux-nat layer to the
5057 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5058 used. The address space pointer is stored in the inferior object,
5059 but the common code that is passed such ptid can't tell whether
5060 lwpid is a "main" process id or not (it assumes so). We reverse
5061 look up the "main" process id from the lwp here. */
5062
70221824 5063static struct address_space *
c0694254
PA
5064linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5065{
5066 struct lwp_info *lwp;
5067 struct inferior *inf;
5068 int pid;
5069
5070 pid = GET_LWP (ptid);
5071 if (GET_LWP (ptid) == 0)
5072 {
5073 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5074 tgid. */
5075 lwp = find_lwp_pid (ptid);
5076 pid = GET_PID (lwp->ptid);
5077 }
5078 else
5079 {
5080 /* A (pid,lwpid,0) ptid. */
5081 pid = GET_PID (ptid);
5082 }
5083
5084 inf = find_inferior_pid (pid);
5085 gdb_assert (inf != NULL);
5086 return inf->aspace;
5087}
5088
dc146f7c
VP
5089/* Return the cached value of the processor core for thread PTID. */
5090
70221824 5091static int
dc146f7c
VP
5092linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5093{
5094 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 5095
dc146f7c
VP
5096 if (info)
5097 return info->core;
5098 return -1;
5099}
5100
f973ed9c
DJ
5101void
5102linux_nat_add_target (struct target_ops *t)
5103{
f973ed9c
DJ
5104 /* Save the provided single-threaded target. We save this in a separate
5105 variable because another target we've inherited from (e.g. inf-ptrace)
5106 may have saved a pointer to T; we want to use it for the final
5107 process stratum target. */
5108 linux_ops_saved = *t;
5109 linux_ops = &linux_ops_saved;
5110
5111 /* Override some methods for multithreading. */
b84876c2 5112 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5113 t->to_attach = linux_nat_attach;
5114 t->to_detach = linux_nat_detach;
5115 t->to_resume = linux_nat_resume;
5116 t->to_wait = linux_nat_wait;
2455069d 5117 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
5118 t->to_xfer_partial = linux_nat_xfer_partial;
5119 t->to_kill = linux_nat_kill;
5120 t->to_mourn_inferior = linux_nat_mourn_inferior;
5121 t->to_thread_alive = linux_nat_thread_alive;
5122 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 5123 t->to_thread_name = linux_nat_thread_name;
f973ed9c 5124 t->to_has_thread_control = tc_schedlock;
c0694254 5125 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
5126 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5127 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 5128
b84876c2
PA
5129 t->to_can_async_p = linux_nat_can_async_p;
5130 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5131 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 5132 t->to_async = linux_nat_async;
b84876c2
PA
5133 t->to_terminal_inferior = linux_nat_terminal_inferior;
5134 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5135 t->to_close = linux_nat_close;
b84876c2 5136
4c28f408
PA
5137 /* Methods for non-stop support. */
5138 t->to_stop = linux_nat_stop;
5139
d90e17a7
PA
5140 t->to_supports_multi_process = linux_nat_supports_multi_process;
5141
03583c20
UW
5142 t->to_supports_disable_randomization
5143 = linux_nat_supports_disable_randomization;
5144
dc146f7c
VP
5145 t->to_core_of_thread = linux_nat_core_of_thread;
5146
f973ed9c
DJ
5147 /* We don't change the stratum; this target will sit at
5148 process_stratum and thread_db will set at thread_stratum. This
5149 is a little strange, since this is a multi-threaded-capable
5150 target, but we want to be on the stack below thread_db, and we
5151 also want to be used for single-threaded processes. */
5152
5153 add_target (t);
f973ed9c
DJ
5154}
5155
9f0bdab8
DJ
5156/* Register a method to call whenever a new thread is attached. */
5157void
7b50312a
PA
5158linux_nat_set_new_thread (struct target_ops *t,
5159 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
5160{
5161 /* Save the pointer. We only support a single registered instance
5162 of the GNU/Linux native target, so we do not need to map this to
5163 T. */
5164 linux_nat_new_thread = new_thread;
5165}
5166
5b009018
PA
5167/* Register a method that converts a siginfo object between the layout
5168 that ptrace returns, and the layout in the architecture of the
5169 inferior. */
5170void
5171linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 5172 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
5173 gdb_byte *,
5174 int))
5175{
5176 /* Save the pointer. */
5177 linux_nat_siginfo_fixup = siginfo_fixup;
5178}
5179
7b50312a
PA
5180/* Register a method to call prior to resuming a thread. */
5181
5182void
5183linux_nat_set_prepare_to_resume (struct target_ops *t,
5184 void (*prepare_to_resume) (struct lwp_info *))
5185{
5186 /* Save the pointer. */
5187 linux_nat_prepare_to_resume = prepare_to_resume;
5188}
5189
9f0bdab8 5190/* Return the saved siginfo associated with PTID. */
a5362b9a 5191siginfo_t *
9f0bdab8
DJ
5192linux_nat_get_siginfo (ptid_t ptid)
5193{
5194 struct lwp_info *lp = find_lwp_pid (ptid);
5195
5196 gdb_assert (lp != NULL);
5197
5198 return &lp->siginfo;
5199}
5200
2c0b251b
PA
5201/* Provide a prototype to silence -Wmissing-prototypes. */
5202extern initialize_file_ftype _initialize_linux_nat;
5203
d6b0e80f
AC
5204void
5205_initialize_linux_nat (void)
5206{
b84876c2
PA
5207 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5208 &debug_linux_nat, _("\
5209Set debugging of GNU/Linux lwp module."), _("\
5210Show debugging of GNU/Linux lwp module."), _("\
5211Enables printf debugging output."),
5212 NULL,
5213 show_debug_linux_nat,
5214 &setdebuglist, &showdebuglist);
5215
b84876c2 5216 /* Save this mask as the default. */
d6b0e80f
AC
5217 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5218
7feb7d06
PA
5219 /* Install a SIGCHLD handler. */
5220 sigchld_action.sa_handler = sigchld_handler;
5221 sigemptyset (&sigchld_action.sa_mask);
5222 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5223
5224 /* Make it the default. */
7feb7d06 5225 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5226
5227 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5228 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5229 sigdelset (&suspend_mask, SIGCHLD);
5230
7feb7d06 5231 sigemptyset (&blocked_mask);
d6b0e80f
AC
5232}
5233\f
5234
5235/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5236 the GNU/Linux Threads library and therefore doesn't really belong
5237 here. */
5238
5239/* Read variable NAME in the target and return its value if found.
5240 Otherwise return zero. It is assumed that the type of the variable
5241 is `int'. */
5242
5243static int
5244get_signo (const char *name)
5245{
5246 struct minimal_symbol *ms;
5247 int signo;
5248
5249 ms = lookup_minimal_symbol (name, NULL, NULL);
5250 if (ms == NULL)
5251 return 0;
5252
8e70166d 5253 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5254 sizeof (signo)) != 0)
5255 return 0;
5256
5257 return signo;
5258}
5259
5260/* Return the set of signals used by the threads library in *SET. */
5261
5262void
5263lin_thread_get_thread_signals (sigset_t *set)
5264{
5265 struct sigaction action;
5266 int restart, cancel;
5267
b84876c2 5268 sigemptyset (&blocked_mask);
d6b0e80f
AC
5269 sigemptyset (set);
5270
5271 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5272 cancel = get_signo ("__pthread_sig_cancel");
5273
5274 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5275 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5276 not provide any way for the debugger to query the signal numbers -
5277 fortunately they don't change! */
5278
d6b0e80f 5279 if (restart == 0)
17fbb0bd 5280 restart = __SIGRTMIN;
d6b0e80f 5281
d6b0e80f 5282 if (cancel == 0)
17fbb0bd 5283 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5284
5285 sigaddset (set, restart);
5286 sigaddset (set, cancel);
5287
5288 /* The GNU/Linux Threads library makes terminating threads send a
5289 special "cancel" signal instead of SIGCHLD. Make sure we catch
5290 those (to prevent them from terminating GDB itself, which is
5291 likely to be their default action) and treat them the same way as
5292 SIGCHLD. */
5293
5294 action.sa_handler = sigchld_handler;
5295 sigemptyset (&action.sa_mask);
58aecb61 5296 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5297 sigaction (cancel, &action, NULL);
5298
5299 /* We block the "cancel" signal throughout this code ... */
5300 sigaddset (&blocked_mask, cancel);
5301 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5302
5303 /* ... except during a sigsuspend. */
5304 sigdelset (&suspend_mask, cancel);
5305}
This page took 1.43417 seconds and 4 git commands to generate.