* read.h (s_vendor_attribute): Move to...
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
0b302171 3 Copyright (C) 2001-2012 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
22#include "target.h"
d6b0e80f 23#include "gdb_string.h"
3993f6b1 24#include "gdb_wait.h"
d6b0e80f
AC
25#include "gdb_assert.h"
26#ifdef HAVE_TKILL_SYSCALL
27#include <unistd.h>
28#include <sys/syscall.h>
29#endif
3993f6b1 30#include <sys/ptrace.h>
0274a8ce 31#include "linux-nat.h"
af96c192 32#include "linux-ptrace.h"
13da1c97 33#include "linux-procfs.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
dba24537 42#include <sys/param.h> /* for MAXPATHLEN */
1777feb0 43#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
44#include "elf-bfd.h" /* for elfcore_write_* */
45#include "gregset.h" /* for gregset */
46#include "gdbcore.h" /* for get_exec_file */
47#include <ctype.h> /* for isdigit */
1777feb0 48#include "gdbthread.h" /* for struct thread_info etc. */
dba24537
AC
49#include "gdb_stat.h" /* for struct stat */
50#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
51#include "inf-loop.h"
52#include "event-loop.h"
53#include "event-top.h"
07e059b5
VP
54#include <pwd.h>
55#include <sys/types.h>
56#include "gdb_dirent.h"
57#include "xml-support.h"
191c4426 58#include "terminal.h"
efcbbd14 59#include <sys/vfs.h>
6c95b8df 60#include "solib.h"
d26e3629 61#include "linux-osdata.h"
6432734d 62#include "linux-tdep.h"
7dcd53a0 63#include "symfile.h"
5808517f
YQ
64#include "agent.h"
65#include "tracepoint.h"
87b0bb13
JK
66#include "exceptions.h"
67#include "linux-ptrace.h"
68#include "buffer.h"
6ecd4729 69#include "target-descriptions.h"
efcbbd14
UW
70
71#ifndef SPUFS_MAGIC
72#define SPUFS_MAGIC 0x23c9b64e
73#endif
dba24537 74
10568435
JK
75#ifdef HAVE_PERSONALITY
76# include <sys/personality.h>
77# if !HAVE_DECL_ADDR_NO_RANDOMIZE
78# define ADDR_NO_RANDOMIZE 0x0040000
79# endif
80#endif /* HAVE_PERSONALITY */
81
1777feb0 82/* This comment documents high-level logic of this file.
8a77dff3
VP
83
84Waiting for events in sync mode
85===============================
86
87When waiting for an event in a specific thread, we just use waitpid, passing
88the specific pid, and not passing WNOHANG.
89
1777feb0 90When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 91version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 92threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
93miss an event. The solution is to use non-blocking waitpid, together with
94sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 95process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
96flag to check for events in cloned processes. If nothing is found, we use
97sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
98happened to a child process -- and SIGCHLD will be delivered both for events
99in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
100an event, we get back to calling nonblocking waitpid with and without
101__WCLONED.
8a77dff3
VP
102
103Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 104so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
105blocked, the signal becomes pending and sigsuspend immediately
106notices it and returns.
107
108Waiting for events in async mode
109================================
110
7feb7d06
PA
111In async mode, GDB should always be ready to handle both user input
112and target events, so neither blocking waitpid nor sigsuspend are
113viable options. Instead, we should asynchronously notify the GDB main
114event loop whenever there's an unprocessed event from the target. We
115detect asynchronous target events by handling SIGCHLD signals. To
116notify the event loop about target events, the self-pipe trick is used
117--- a pipe is registered as waitable event source in the event loop,
118the event loop select/poll's on the read end of this pipe (as well on
119other event sources, e.g., stdin), and the SIGCHLD handler writes a
120byte to this pipe. This is more portable than relying on
121pselect/ppoll, since on kernels that lack those syscalls, libc
122emulates them with select/poll+sigprocmask, and that is racy
123(a.k.a. plain broken).
124
125Obviously, if we fail to notify the event loop if there's a target
126event, it's bad. OTOH, if we notify the event loop when there's no
127event from the target, linux_nat_wait will detect that there's no real
128event to report, and return event of type TARGET_WAITKIND_IGNORE.
129This is mostly harmless, but it will waste time and is better avoided.
130
131The main design point is that every time GDB is outside linux-nat.c,
132we have a SIGCHLD handler installed that is called when something
133happens to the target and notifies the GDB event loop. Whenever GDB
134core decides to handle the event, and calls into linux-nat.c, we
135process things as in sync mode, except that the we never block in
136sigsuspend.
137
138While processing an event, we may end up momentarily blocked in
139waitpid calls. Those waitpid calls, while blocking, are guarantied to
140return quickly. E.g., in all-stop mode, before reporting to the core
141that an LWP hit a breakpoint, all LWPs are stopped by sending them
142SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
143Note that this is different from blocking indefinitely waiting for the
144next event --- here, we're already handling an event.
8a77dff3
VP
145
146Use of signals
147==============
148
149We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
150signal is not entirely significant; we just need for a signal to be delivered,
151so that we can intercept it. SIGSTOP's advantage is that it can not be
152blocked. A disadvantage is that it is not a real-time signal, so it can only
153be queued once; we do not keep track of other sources of SIGSTOP.
154
155Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
156use them, because they have special behavior when the signal is generated -
157not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
158kills the entire thread group.
159
160A delivered SIGSTOP would stop the entire thread group, not just the thread we
161tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
162cancel it (by PTRACE_CONT without passing SIGSTOP).
163
164We could use a real-time signal instead. This would solve those problems; we
165could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
166But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
167generates it, and there are races with trying to find a signal that is not
168blocked. */
a0ef4274 169
dba24537
AC
170#ifndef O_LARGEFILE
171#define O_LARGEFILE 0
172#endif
0274a8ce 173
ca2163eb
PA
174/* Unlike other extended result codes, WSTOPSIG (status) on
175 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
176 instead SIGTRAP with bit 7 set. */
177#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
178
10d6c8cd
DJ
179/* The single-threaded native GNU/Linux target_ops. We save a pointer for
180 the use of the multi-threaded target. */
181static struct target_ops *linux_ops;
f973ed9c 182static struct target_ops linux_ops_saved;
10d6c8cd 183
9f0bdab8 184/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
185static void (*linux_nat_new_thread) (struct lwp_info *);
186
187/* Hook to call prior to resuming a thread. */
188static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 189
5b009018
PA
190/* The method to call, if any, when the siginfo object needs to be
191 converted between the layout returned by ptrace, and the layout in
192 the architecture of the inferior. */
a5362b9a 193static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
194 gdb_byte *,
195 int);
196
ac264b3b
MS
197/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
198 Called by our to_xfer_partial. */
199static LONGEST (*super_xfer_partial) (struct target_ops *,
200 enum target_object,
201 const char *, gdb_byte *,
202 const gdb_byte *,
10d6c8cd
DJ
203 ULONGEST, LONGEST);
204
ccce17b0 205static unsigned int debug_linux_nat;
920d2a44
AC
206static void
207show_debug_linux_nat (struct ui_file *file, int from_tty,
208 struct cmd_list_element *c, const char *value)
209{
210 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
211 value);
212}
d6b0e80f 213
ae087d01
DJ
214struct simple_pid_list
215{
216 int pid;
3d799a95 217 int status;
ae087d01
DJ
218 struct simple_pid_list *next;
219};
220struct simple_pid_list *stopped_pids;
221
3993f6b1
DJ
222/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
223 can not be used, 1 if it can. */
224
225static int linux_supports_tracefork_flag = -1;
226
3e43a32a
MS
227/* This variable is a tri-state flag: -1 for unknown, 0 if
228 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
a96d9b2e
SDJ
229
230static int linux_supports_tracesysgood_flag = -1;
231
9016a515
DJ
232/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
233 PTRACE_O_TRACEVFORKDONE. */
234
235static int linux_supports_tracevforkdone_flag = -1;
236
a96d9b2e
SDJ
237/* Stores the current used ptrace() options. */
238static int current_ptrace_options = 0;
239
3dd5b83d
PA
240/* Async mode support. */
241
b84876c2
PA
242/* The read/write ends of the pipe registered as waitable file in the
243 event loop. */
244static int linux_nat_event_pipe[2] = { -1, -1 };
245
7feb7d06 246/* Flush the event pipe. */
b84876c2 247
7feb7d06
PA
248static void
249async_file_flush (void)
b84876c2 250{
7feb7d06
PA
251 int ret;
252 char buf;
b84876c2 253
7feb7d06 254 do
b84876c2 255 {
7feb7d06 256 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 257 }
7feb7d06 258 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
259}
260
7feb7d06
PA
261/* Put something (anything, doesn't matter what, or how much) in event
262 pipe, so that the select/poll in the event-loop realizes we have
263 something to process. */
252fbfc8 264
b84876c2 265static void
7feb7d06 266async_file_mark (void)
b84876c2 267{
7feb7d06 268 int ret;
b84876c2 269
7feb7d06
PA
270 /* It doesn't really matter what the pipe contains, as long we end
271 up with something in it. Might as well flush the previous
272 left-overs. */
273 async_file_flush ();
b84876c2 274
7feb7d06 275 do
b84876c2 276 {
7feb7d06 277 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 278 }
7feb7d06 279 while (ret == -1 && errno == EINTR);
b84876c2 280
7feb7d06
PA
281 /* Ignore EAGAIN. If the pipe is full, the event loop will already
282 be awakened anyway. */
b84876c2
PA
283}
284
7feb7d06 285static void linux_nat_async (void (*callback)
3e43a32a
MS
286 (enum inferior_event_type event_type,
287 void *context),
7feb7d06 288 void *context);
7feb7d06
PA
289static int kill_lwp (int lwpid, int signo);
290
291static int stop_callback (struct lwp_info *lp, void *data);
292
293static void block_child_signals (sigset_t *prev_mask);
294static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
295
296struct lwp_info;
297static struct lwp_info *add_lwp (ptid_t ptid);
298static void purge_lwp_list (int pid);
4403d8e9 299static void delete_lwp (ptid_t ptid);
2277426b
PA
300static struct lwp_info *find_lwp_pid (ptid_t ptid);
301
ae087d01
DJ
302\f
303/* Trivial list manipulation functions to keep track of a list of
304 new stopped processes. */
305static void
3d799a95 306add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
307{
308 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 309
ae087d01 310 new_pid->pid = pid;
3d799a95 311 new_pid->status = status;
ae087d01
DJ
312 new_pid->next = *listp;
313 *listp = new_pid;
314}
315
84636d28
PA
316static int
317in_pid_list_p (struct simple_pid_list *list, int pid)
318{
319 struct simple_pid_list *p;
320
321 for (p = list; p != NULL; p = p->next)
322 if (p->pid == pid)
323 return 1;
324 return 0;
325}
326
ae087d01 327static int
46a96992 328pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
329{
330 struct simple_pid_list **p;
331
332 for (p = listp; *p != NULL; p = &(*p)->next)
333 if ((*p)->pid == pid)
334 {
335 struct simple_pid_list *next = (*p)->next;
e0881a8e 336
46a96992 337 *statusp = (*p)->status;
ae087d01
DJ
338 xfree (*p);
339 *p = next;
340 return 1;
341 }
342 return 0;
343}
344
3993f6b1
DJ
345\f
346/* A helper function for linux_test_for_tracefork, called after fork (). */
347
348static void
349linux_tracefork_child (void)
350{
3993f6b1
DJ
351 ptrace (PTRACE_TRACEME, 0, 0, 0);
352 kill (getpid (), SIGSTOP);
353 fork ();
48bb3cce 354 _exit (0);
3993f6b1
DJ
355}
356
7feb7d06 357/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
358
359static int
46a96992 360my_waitpid (int pid, int *statusp, int flags)
b957e937
DJ
361{
362 int ret;
b84876c2 363
b957e937
DJ
364 do
365 {
46a96992 366 ret = waitpid (pid, statusp, flags);
b957e937
DJ
367 }
368 while (ret == -1 && errno == EINTR);
369
370 return ret;
371}
372
373/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
374
375 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
376 we know that the feature is not available. This may change the tracing
377 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
378
379 However, if it succeeds, we don't know for sure that the feature is
380 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 381 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
382 fork tracing, and let it fork. If the process exits, we assume that we
383 can't use TRACEFORK; if we get the fork notification, and we can extract
384 the new child's PID, then we assume that we can. */
3993f6b1
DJ
385
386static void
b957e937 387linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
388{
389 int child_pid, ret, status;
390 long second_pid;
7feb7d06 391 sigset_t prev_mask;
4c28f408 392
7feb7d06
PA
393 /* We don't want those ptrace calls to be interrupted. */
394 block_child_signals (&prev_mask);
3993f6b1 395
b957e937
DJ
396 linux_supports_tracefork_flag = 0;
397 linux_supports_tracevforkdone_flag = 0;
398
399 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
400 if (ret != 0)
7feb7d06
PA
401 {
402 restore_child_signals_mask (&prev_mask);
403 return;
404 }
b957e937 405
3993f6b1
DJ
406 child_pid = fork ();
407 if (child_pid == -1)
e2e0b3e5 408 perror_with_name (("fork"));
3993f6b1
DJ
409
410 if (child_pid == 0)
411 linux_tracefork_child ();
412
b957e937 413 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 414 if (ret == -1)
e2e0b3e5 415 perror_with_name (("waitpid"));
3993f6b1 416 else if (ret != child_pid)
8a3fe4f8 417 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 418 if (! WIFSTOPPED (status))
3e43a32a
MS
419 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
420 status);
3993f6b1 421
3993f6b1
DJ
422 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
423 if (ret != 0)
424 {
b957e937
DJ
425 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
426 if (ret != 0)
427 {
8a3fe4f8 428 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 429 restore_child_signals_mask (&prev_mask);
b957e937
DJ
430 return;
431 }
432
433 ret = my_waitpid (child_pid, &status, 0);
434 if (ret != child_pid)
3e43a32a
MS
435 warning (_("linux_test_for_tracefork: failed "
436 "to wait for killed child"));
b957e937 437 else if (!WIFSIGNALED (status))
3e43a32a
MS
438 warning (_("linux_test_for_tracefork: unexpected "
439 "wait status 0x%x from killed child"), status);
b957e937 440
7feb7d06 441 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
442 return;
443 }
444
9016a515
DJ
445 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
446 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
447 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
448 linux_supports_tracevforkdone_flag = (ret == 0);
449
b957e937
DJ
450 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
451 if (ret != 0)
8a3fe4f8 452 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
453
454 ret = my_waitpid (child_pid, &status, 0);
455
3993f6b1
DJ
456 if (ret == child_pid && WIFSTOPPED (status)
457 && status >> 16 == PTRACE_EVENT_FORK)
458 {
459 second_pid = 0;
460 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
461 if (ret == 0 && second_pid != 0)
462 {
463 int second_status;
464
465 linux_supports_tracefork_flag = 1;
b957e937
DJ
466 my_waitpid (second_pid, &second_status, 0);
467 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
468 if (ret != 0)
3e43a32a
MS
469 warning (_("linux_test_for_tracefork: "
470 "failed to kill second child"));
97725dc4 471 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
472 }
473 }
b957e937 474 else
8a3fe4f8
AC
475 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
476 "(%d, status 0x%x)"), ret, status);
3993f6b1 477
b957e937
DJ
478 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
479 if (ret != 0)
8a3fe4f8 480 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 481 my_waitpid (child_pid, &status, 0);
4c28f408 482
7feb7d06 483 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
484}
485
a96d9b2e
SDJ
486/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
487
488 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
489 we know that the feature is not available. This may change the tracing
490 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
491
492static void
493linux_test_for_tracesysgood (int original_pid)
494{
495 int ret;
496 sigset_t prev_mask;
497
498 /* We don't want those ptrace calls to be interrupted. */
499 block_child_signals (&prev_mask);
500
501 linux_supports_tracesysgood_flag = 0;
502
503 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
504 if (ret != 0)
505 goto out;
506
507 linux_supports_tracesysgood_flag = 1;
508out:
509 restore_child_signals_mask (&prev_mask);
510}
511
512/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
513 This function also sets linux_supports_tracesysgood_flag. */
514
515static int
516linux_supports_tracesysgood (int pid)
517{
518 if (linux_supports_tracesysgood_flag == -1)
519 linux_test_for_tracesysgood (pid);
520 return linux_supports_tracesysgood_flag;
521}
522
3993f6b1
DJ
523/* Return non-zero iff we have tracefork functionality available.
524 This function also sets linux_supports_tracefork_flag. */
525
526static int
b957e937 527linux_supports_tracefork (int pid)
3993f6b1
DJ
528{
529 if (linux_supports_tracefork_flag == -1)
b957e937 530 linux_test_for_tracefork (pid);
3993f6b1
DJ
531 return linux_supports_tracefork_flag;
532}
533
9016a515 534static int
b957e937 535linux_supports_tracevforkdone (int pid)
9016a515
DJ
536{
537 if (linux_supports_tracefork_flag == -1)
b957e937 538 linux_test_for_tracefork (pid);
9016a515
DJ
539 return linux_supports_tracevforkdone_flag;
540}
541
a96d9b2e
SDJ
542static void
543linux_enable_tracesysgood (ptid_t ptid)
544{
545 int pid = ptid_get_lwp (ptid);
546
547 if (pid == 0)
548 pid = ptid_get_pid (ptid);
549
550 if (linux_supports_tracesysgood (pid) == 0)
551 return;
552
553 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
554
555 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
556}
557
3993f6b1 558\f
4de4c07c
DJ
559void
560linux_enable_event_reporting (ptid_t ptid)
561{
d3587048 562 int pid = ptid_get_lwp (ptid);
4de4c07c 563
d3587048
DJ
564 if (pid == 0)
565 pid = ptid_get_pid (ptid);
566
b957e937 567 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
568 return;
569
a96d9b2e
SDJ
570 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
571 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
572
b957e937 573 if (linux_supports_tracevforkdone (pid))
a96d9b2e 574 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
575
576 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
577 read-only process state. */
4de4c07c 578
a96d9b2e 579 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
580}
581
6d8fd2b7
UW
582static void
583linux_child_post_attach (int pid)
4de4c07c
DJ
584{
585 linux_enable_event_reporting (pid_to_ptid (pid));
a96d9b2e 586 linux_enable_tracesysgood (pid_to_ptid (pid));
aa7c7447 587 linux_ptrace_init_warnings ();
4de4c07c
DJ
588}
589
10d6c8cd 590static void
4de4c07c
DJ
591linux_child_post_startup_inferior (ptid_t ptid)
592{
593 linux_enable_event_reporting (ptid);
a96d9b2e 594 linux_enable_tracesysgood (ptid);
aa7c7447 595 linux_ptrace_init_warnings ();
4de4c07c
DJ
596}
597
4403d8e9
JK
598/* Return the number of known LWPs in the tgid given by PID. */
599
600static int
601num_lwps (int pid)
602{
603 int count = 0;
604 struct lwp_info *lp;
605
606 for (lp = lwp_list; lp; lp = lp->next)
607 if (ptid_get_pid (lp->ptid) == pid)
608 count++;
609
610 return count;
611}
612
613/* Call delete_lwp with prototype compatible for make_cleanup. */
614
615static void
616delete_lwp_cleanup (void *lp_voidp)
617{
618 struct lwp_info *lp = lp_voidp;
619
620 delete_lwp (lp->ptid);
621}
622
6d8fd2b7
UW
623static int
624linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 625{
7feb7d06 626 sigset_t prev_mask;
9016a515 627 int has_vforked;
4de4c07c
DJ
628 int parent_pid, child_pid;
629
7feb7d06 630 block_child_signals (&prev_mask);
b84876c2 631
e58b0e63
PA
632 has_vforked = (inferior_thread ()->pending_follow.kind
633 == TARGET_WAITKIND_VFORKED);
634 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 635 if (parent_pid == 0)
e58b0e63
PA
636 parent_pid = ptid_get_pid (inferior_ptid);
637 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 638
2277426b
PA
639 if (!detach_fork)
640 linux_enable_event_reporting (pid_to_ptid (child_pid));
641
6c95b8df
PA
642 if (has_vforked
643 && !non_stop /* Non-stop always resumes both branches. */
644 && (!target_is_async_p () || sync_execution)
645 && !(follow_child || detach_fork || sched_multi))
646 {
647 /* The parent stays blocked inside the vfork syscall until the
648 child execs or exits. If we don't let the child run, then
649 the parent stays blocked. If we're telling the parent to run
650 in the foreground, the user will not be able to ctrl-c to get
651 back the terminal, effectively hanging the debug session. */
ac74f770
MS
652 fprintf_filtered (gdb_stderr, _("\
653Can not resume the parent process over vfork in the foreground while\n\
654holding the child stopped. Try \"set detach-on-fork\" or \
655\"set schedule-multiple\".\n"));
656 /* FIXME output string > 80 columns. */
6c95b8df
PA
657 return 1;
658 }
659
4de4c07c
DJ
660 if (! follow_child)
661 {
6c95b8df 662 struct lwp_info *child_lp = NULL;
4de4c07c 663
1777feb0 664 /* We're already attached to the parent, by default. */
4de4c07c 665
ac264b3b
MS
666 /* Detach new forked process? */
667 if (detach_fork)
f75c00e4 668 {
4403d8e9
JK
669 struct cleanup *old_chain;
670
6c95b8df
PA
671 /* Before detaching from the child, remove all breakpoints
672 from it. If we forked, then this has already been taken
673 care of by infrun.c. If we vforked however, any
674 breakpoint inserted in the parent is visible in the
675 child, even those added while stopped in a vfork
676 catchpoint. This will remove the breakpoints from the
677 parent also, but they'll be reinserted below. */
678 if (has_vforked)
679 {
680 /* keep breakpoints list in sync. */
681 remove_breakpoints_pid (GET_PID (inferior_ptid));
682 }
683
e85a822c 684 if (info_verbose || debug_linux_nat)
ac264b3b
MS
685 {
686 target_terminal_ours ();
687 fprintf_filtered (gdb_stdlog,
3e43a32a
MS
688 "Detaching after fork from "
689 "child process %d.\n",
ac264b3b
MS
690 child_pid);
691 }
4de4c07c 692
4403d8e9
JK
693 old_chain = save_inferior_ptid ();
694 inferior_ptid = ptid_build (child_pid, child_pid, 0);
695
696 child_lp = add_lwp (inferior_ptid);
697 child_lp->stopped = 1;
698 child_lp->last_resume_kind = resume_stop;
699 make_cleanup (delete_lwp_cleanup, child_lp);
700
701 /* CHILD_LP has new PID, therefore linux_nat_new_thread is not called for it.
702 See i386_inferior_data_get for the Linux kernel specifics.
703 Ensure linux_nat_prepare_to_resume will reset the hardware debug
704 registers. It is done by the linux_nat_new_thread call, which is
705 being skipped in add_lwp above for the first lwp of a pid. */
706 gdb_assert (num_lwps (GET_PID (child_lp->ptid)) == 1);
707 if (linux_nat_new_thread != NULL)
708 linux_nat_new_thread (child_lp);
709
710 if (linux_nat_prepare_to_resume != NULL)
711 linux_nat_prepare_to_resume (child_lp);
ac264b3b 712 ptrace (PTRACE_DETACH, child_pid, 0, 0);
4403d8e9
JK
713
714 do_cleanups (old_chain);
ac264b3b
MS
715 }
716 else
717 {
77435e4c 718 struct inferior *parent_inf, *child_inf;
2277426b 719 struct cleanup *old_chain;
7f9f62ba
PA
720
721 /* Add process to GDB's tables. */
77435e4c
PA
722 child_inf = add_inferior (child_pid);
723
e58b0e63 724 parent_inf = current_inferior ();
77435e4c 725 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 726 copy_terminal_info (child_inf, parent_inf);
6ecd4729
PA
727 child_inf->gdbarch = parent_inf->gdbarch;
728 copy_inferior_target_desc_info (child_inf, parent_inf);
7f9f62ba 729
2277426b 730 old_chain = save_inferior_ptid ();
6c95b8df 731 save_current_program_space ();
2277426b
PA
732
733 inferior_ptid = ptid_build (child_pid, child_pid, 0);
734 add_thread (inferior_ptid);
6c95b8df
PA
735 child_lp = add_lwp (inferior_ptid);
736 child_lp->stopped = 1;
25289eb2 737 child_lp->last_resume_kind = resume_stop;
7dcd53a0 738 child_inf->symfile_flags = SYMFILE_NO_READ;
2277426b 739
6c95b8df
PA
740 /* If this is a vfork child, then the address-space is
741 shared with the parent. */
742 if (has_vforked)
743 {
744 child_inf->pspace = parent_inf->pspace;
745 child_inf->aspace = parent_inf->aspace;
746
747 /* The parent will be frozen until the child is done
748 with the shared region. Keep track of the
749 parent. */
750 child_inf->vfork_parent = parent_inf;
751 child_inf->pending_detach = 0;
752 parent_inf->vfork_child = child_inf;
753 parent_inf->pending_detach = 0;
754 }
755 else
756 {
757 child_inf->aspace = new_address_space ();
758 child_inf->pspace = add_program_space (child_inf->aspace);
759 child_inf->removable = 1;
760 set_current_program_space (child_inf->pspace);
761 clone_program_space (child_inf->pspace, parent_inf->pspace);
762
763 /* Let the shared library layer (solib-svr4) learn about
764 this new process, relocate the cloned exec, pull in
765 shared libraries, and install the solib event
766 breakpoint. If a "cloned-VM" event was propagated
767 better throughout the core, this wouldn't be
768 required. */
268a4a75 769 solib_create_inferior_hook (0);
6c95b8df
PA
770 }
771
772 /* Let the thread_db layer learn about this new process. */
2277426b
PA
773 check_for_thread_db ();
774
775 do_cleanups (old_chain);
ac264b3b 776 }
9016a515
DJ
777
778 if (has_vforked)
779 {
3ced3da4 780 struct lwp_info *parent_lp;
6c95b8df
PA
781 struct inferior *parent_inf;
782
783 parent_inf = current_inferior ();
784
785 /* If we detached from the child, then we have to be careful
786 to not insert breakpoints in the parent until the child
787 is done with the shared memory region. However, if we're
788 staying attached to the child, then we can and should
789 insert breakpoints, so that we can debug it. A
790 subsequent child exec or exit is enough to know when does
791 the child stops using the parent's address space. */
792 parent_inf->waiting_for_vfork_done = detach_fork;
56710373 793 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
6c95b8df 794
3ced3da4 795 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937 796 gdb_assert (linux_supports_tracefork_flag >= 0);
3ced3da4 797
b957e937 798 if (linux_supports_tracevforkdone (0))
9016a515 799 {
6c95b8df
PA
800 if (debug_linux_nat)
801 fprintf_unfiltered (gdb_stdlog,
802 "LCFF: waiting for VFORK_DONE on %d\n",
803 parent_pid);
3ced3da4 804 parent_lp->stopped = 1;
9016a515 805
6c95b8df
PA
806 /* We'll handle the VFORK_DONE event like any other
807 event, in target_wait. */
9016a515
DJ
808 }
809 else
810 {
811 /* We can't insert breakpoints until the child has
812 finished with the shared memory region. We need to
813 wait until that happens. Ideal would be to just
814 call:
815 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
816 - waitpid (parent_pid, &status, __WALL);
817 However, most architectures can't handle a syscall
818 being traced on the way out if it wasn't traced on
819 the way in.
820
821 We might also think to loop, continuing the child
822 until it exits or gets a SIGTRAP. One problem is
823 that the child might call ptrace with PTRACE_TRACEME.
824
825 There's no simple and reliable way to figure out when
826 the vforked child will be done with its copy of the
827 shared memory. We could step it out of the syscall,
828 two instructions, let it go, and then single-step the
829 parent once. When we have hardware single-step, this
830 would work; with software single-step it could still
831 be made to work but we'd have to be able to insert
832 single-step breakpoints in the child, and we'd have
833 to insert -just- the single-step breakpoint in the
834 parent. Very awkward.
835
836 In the end, the best we can do is to make sure it
837 runs for a little while. Hopefully it will be out of
838 range of any breakpoints we reinsert. Usually this
839 is only the single-step breakpoint at vfork's return
840 point. */
841
6c95b8df
PA
842 if (debug_linux_nat)
843 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
844 "LCFF: no VFORK_DONE "
845 "support, sleeping a bit\n");
6c95b8df 846
9016a515 847 usleep (10000);
9016a515 848
6c95b8df
PA
849 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
850 and leave it pending. The next linux_nat_resume call
851 will notice a pending event, and bypasses actually
852 resuming the inferior. */
3ced3da4
PA
853 parent_lp->status = 0;
854 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
855 parent_lp->stopped = 1;
6c95b8df
PA
856
857 /* If we're in async mode, need to tell the event loop
858 there's something here to process. */
859 if (target_can_async_p ())
860 async_file_mark ();
861 }
9016a515 862 }
4de4c07c 863 }
3993f6b1 864 else
4de4c07c 865 {
77435e4c 866 struct inferior *parent_inf, *child_inf;
3ced3da4 867 struct lwp_info *child_lp;
6c95b8df 868 struct program_space *parent_pspace;
4de4c07c 869
e85a822c 870 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
871 {
872 target_terminal_ours ();
6c95b8df 873 if (has_vforked)
3e43a32a
MS
874 fprintf_filtered (gdb_stdlog,
875 _("Attaching after process %d "
876 "vfork to child process %d.\n"),
6c95b8df
PA
877 parent_pid, child_pid);
878 else
3e43a32a
MS
879 fprintf_filtered (gdb_stdlog,
880 _("Attaching after process %d "
881 "fork to child process %d.\n"),
6c95b8df 882 parent_pid, child_pid);
f75c00e4 883 }
4de4c07c 884
7a7d3353
PA
885 /* Add the new inferior first, so that the target_detach below
886 doesn't unpush the target. */
887
77435e4c
PA
888 child_inf = add_inferior (child_pid);
889
e58b0e63 890 parent_inf = current_inferior ();
77435e4c 891 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 892 copy_terminal_info (child_inf, parent_inf);
6ecd4729
PA
893 child_inf->gdbarch = parent_inf->gdbarch;
894 copy_inferior_target_desc_info (child_inf, parent_inf);
7a7d3353 895
6c95b8df 896 parent_pspace = parent_inf->pspace;
9016a515 897
6c95b8df
PA
898 /* If we're vforking, we want to hold on to the parent until the
899 child exits or execs. At child exec or exit time we can
900 remove the old breakpoints from the parent and detach or
901 resume debugging it. Otherwise, detach the parent now; we'll
902 want to reuse it's program/address spaces, but we can't set
903 them to the child before removing breakpoints from the
904 parent, otherwise, the breakpoints module could decide to
905 remove breakpoints from the wrong process (since they'd be
906 assigned to the same address space). */
9016a515
DJ
907
908 if (has_vforked)
7f9f62ba 909 {
6c95b8df
PA
910 gdb_assert (child_inf->vfork_parent == NULL);
911 gdb_assert (parent_inf->vfork_child == NULL);
912 child_inf->vfork_parent = parent_inf;
913 child_inf->pending_detach = 0;
914 parent_inf->vfork_child = child_inf;
915 parent_inf->pending_detach = detach_fork;
916 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 917 }
2277426b 918 else if (detach_fork)
b84876c2 919 target_detach (NULL, 0);
4de4c07c 920
6c95b8df
PA
921 /* Note that the detach above makes PARENT_INF dangling. */
922
923 /* Add the child thread to the appropriate lists, and switch to
924 this new thread, before cloning the program space, and
925 informing the solib layer about this new process. */
926
9f0bdab8 927 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b 928 add_thread (inferior_ptid);
3ced3da4
PA
929 child_lp = add_lwp (inferior_ptid);
930 child_lp->stopped = 1;
25289eb2 931 child_lp->last_resume_kind = resume_stop;
6c95b8df
PA
932
933 /* If this is a vfork child, then the address-space is shared
934 with the parent. If we detached from the parent, then we can
935 reuse the parent's program/address spaces. */
936 if (has_vforked || detach_fork)
937 {
938 child_inf->pspace = parent_pspace;
939 child_inf->aspace = child_inf->pspace->aspace;
940 }
941 else
942 {
943 child_inf->aspace = new_address_space ();
944 child_inf->pspace = add_program_space (child_inf->aspace);
945 child_inf->removable = 1;
7dcd53a0 946 child_inf->symfile_flags = SYMFILE_NO_READ;
6c95b8df
PA
947 set_current_program_space (child_inf->pspace);
948 clone_program_space (child_inf->pspace, parent_pspace);
949
950 /* Let the shared library layer (solib-svr4) learn about
951 this new process, relocate the cloned exec, pull in
952 shared libraries, and install the solib event breakpoint.
953 If a "cloned-VM" event was propagated better throughout
954 the core, this wouldn't be required. */
268a4a75 955 solib_create_inferior_hook (0);
6c95b8df 956 }
ac264b3b 957
6c95b8df 958 /* Let the thread_db layer learn about this new process. */
ef29ce1a 959 check_for_thread_db ();
4de4c07c
DJ
960 }
961
7feb7d06 962 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
963 return 0;
964}
965
4de4c07c 966\f
77b06cd7 967static int
6d8fd2b7 968linux_child_insert_fork_catchpoint (int pid)
4de4c07c 969{
77b06cd7 970 return !linux_supports_tracefork (pid);
3993f6b1
DJ
971}
972
eb73ad13
PA
973static int
974linux_child_remove_fork_catchpoint (int pid)
975{
976 return 0;
977}
978
77b06cd7 979static int
6d8fd2b7 980linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 981{
77b06cd7 982 return !linux_supports_tracefork (pid);
3993f6b1
DJ
983}
984
eb73ad13
PA
985static int
986linux_child_remove_vfork_catchpoint (int pid)
987{
988 return 0;
989}
990
77b06cd7 991static int
6d8fd2b7 992linux_child_insert_exec_catchpoint (int pid)
3993f6b1 993{
77b06cd7 994 return !linux_supports_tracefork (pid);
3993f6b1
DJ
995}
996
eb73ad13
PA
997static int
998linux_child_remove_exec_catchpoint (int pid)
999{
1000 return 0;
1001}
1002
a96d9b2e
SDJ
1003static int
1004linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
1005 int table_size, int *table)
1006{
77b06cd7
TJB
1007 if (!linux_supports_tracesysgood (pid))
1008 return 1;
1009
a96d9b2e
SDJ
1010 /* On GNU/Linux, we ignore the arguments. It means that we only
1011 enable the syscall catchpoints, but do not disable them.
77b06cd7 1012
a96d9b2e
SDJ
1013 Also, we do not use the `table' information because we do not
1014 filter system calls here. We let GDB do the logic for us. */
1015 return 0;
1016}
1017
d6b0e80f
AC
1018/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1019 are processes sharing the same VM space. A multi-threaded process
1020 is basically a group of such processes. However, such a grouping
1021 is almost entirely a user-space issue; the kernel doesn't enforce
1022 such a grouping at all (this might change in the future). In
1023 general, we'll rely on the threads library (i.e. the GNU/Linux
1024 Threads library) to provide such a grouping.
1025
1026 It is perfectly well possible to write a multi-threaded application
1027 without the assistance of a threads library, by using the clone
1028 system call directly. This module should be able to give some
1029 rudimentary support for debugging such applications if developers
1030 specify the CLONE_PTRACE flag in the clone system call, and are
1031 using the Linux kernel 2.4 or above.
1032
1033 Note that there are some peculiarities in GNU/Linux that affect
1034 this code:
1035
1036 - In general one should specify the __WCLONE flag to waitpid in
1037 order to make it report events for any of the cloned processes
1038 (and leave it out for the initial process). However, if a cloned
1039 process has exited the exit status is only reported if the
1040 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1041 we cannot use it since GDB must work on older systems too.
1042
1043 - When a traced, cloned process exits and is waited for by the
1044 debugger, the kernel reassigns it to the original parent and
1045 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1046 library doesn't notice this, which leads to the "zombie problem":
1047 When debugged a multi-threaded process that spawns a lot of
1048 threads will run out of processes, even if the threads exit,
1049 because the "zombies" stay around. */
1050
1051/* List of known LWPs. */
9f0bdab8 1052struct lwp_info *lwp_list;
d6b0e80f
AC
1053\f
1054
d6b0e80f
AC
1055/* Original signal mask. */
1056static sigset_t normal_mask;
1057
1058/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1059 _initialize_linux_nat. */
1060static sigset_t suspend_mask;
1061
7feb7d06
PA
1062/* Signals to block to make that sigsuspend work. */
1063static sigset_t blocked_mask;
1064
1065/* SIGCHLD action. */
1066struct sigaction sigchld_action;
b84876c2 1067
7feb7d06
PA
1068/* Block child signals (SIGCHLD and linux threads signals), and store
1069 the previous mask in PREV_MASK. */
84e46146 1070
7feb7d06
PA
1071static void
1072block_child_signals (sigset_t *prev_mask)
1073{
1074 /* Make sure SIGCHLD is blocked. */
1075 if (!sigismember (&blocked_mask, SIGCHLD))
1076 sigaddset (&blocked_mask, SIGCHLD);
1077
1078 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1079}
1080
1081/* Restore child signals mask, previously returned by
1082 block_child_signals. */
1083
1084static void
1085restore_child_signals_mask (sigset_t *prev_mask)
1086{
1087 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1088}
2455069d
UW
1089
1090/* Mask of signals to pass directly to the inferior. */
1091static sigset_t pass_mask;
1092
1093/* Update signals to pass to the inferior. */
1094static void
1095linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1096{
1097 int signo;
1098
1099 sigemptyset (&pass_mask);
1100
1101 for (signo = 1; signo < NSIG; signo++)
1102 {
2ea28649 1103 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
1104 if (target_signo < numsigs && pass_signals[target_signo])
1105 sigaddset (&pass_mask, signo);
1106 }
1107}
1108
d6b0e80f
AC
1109\f
1110
1111/* Prototypes for local functions. */
1112static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1113static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1114static char *linux_child_pid_to_exec_file (int pid);
710151dd 1115
d6b0e80f
AC
1116\f
1117/* Convert wait status STATUS to a string. Used for printing debug
1118 messages only. */
1119
1120static char *
1121status_to_str (int status)
1122{
1123 static char buf[64];
1124
1125 if (WIFSTOPPED (status))
206aa767 1126 {
ca2163eb 1127 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1128 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1129 strsignal (SIGTRAP));
1130 else
1131 snprintf (buf, sizeof (buf), "%s (stopped)",
1132 strsignal (WSTOPSIG (status)));
1133 }
d6b0e80f
AC
1134 else if (WIFSIGNALED (status))
1135 snprintf (buf, sizeof (buf), "%s (terminated)",
ba9b2ec3 1136 strsignal (WTERMSIG (status)));
d6b0e80f
AC
1137 else
1138 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1139
1140 return buf;
1141}
1142
7b50312a
PA
1143/* Destroy and free LP. */
1144
1145static void
1146lwp_free (struct lwp_info *lp)
1147{
1148 xfree (lp->arch_private);
1149 xfree (lp);
1150}
1151
d90e17a7
PA
1152/* Remove all LWPs belong to PID from the lwp list. */
1153
1154static void
1155purge_lwp_list (int pid)
1156{
1157 struct lwp_info *lp, *lpprev, *lpnext;
1158
1159 lpprev = NULL;
1160
1161 for (lp = lwp_list; lp; lp = lpnext)
1162 {
1163 lpnext = lp->next;
1164
1165 if (ptid_get_pid (lp->ptid) == pid)
1166 {
1167 if (lp == lwp_list)
1168 lwp_list = lp->next;
1169 else
1170 lpprev->next = lp->next;
1171
7b50312a 1172 lwp_free (lp);
d90e17a7
PA
1173 }
1174 else
1175 lpprev = lp;
1176 }
1177}
1178
f973ed9c 1179/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1180 structure describing the new LWP. The LWP should already be stopped
1181 (with an exception for the very first LWP). */
d6b0e80f
AC
1182
1183static struct lwp_info *
1184add_lwp (ptid_t ptid)
1185{
1186 struct lwp_info *lp;
1187
1188 gdb_assert (is_lwp (ptid));
1189
1190 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1191
1192 memset (lp, 0, sizeof (struct lwp_info));
1193
25289eb2 1194 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1195 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1196
1197 lp->ptid = ptid;
dc146f7c 1198 lp->core = -1;
d6b0e80f
AC
1199
1200 lp->next = lwp_list;
1201 lwp_list = lp;
d6b0e80f 1202
6e012a6c
PA
1203 /* Let the arch specific bits know about this new thread. Current
1204 clients of this callback take the opportunity to install
1205 watchpoints in the new thread. Don't do this for the first
1206 thread though. If we're spawning a child ("run"), the thread
1207 executes the shell wrapper first, and we shouldn't touch it until
1208 it execs the program we want to debug. For "attach", it'd be
1209 okay to call the callback, but it's not necessary, because
1210 watchpoints can't yet have been inserted into the inferior. */
1211 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
7b50312a 1212 linux_nat_new_thread (lp);
9f0bdab8 1213
d6b0e80f
AC
1214 return lp;
1215}
1216
1217/* Remove the LWP specified by PID from the list. */
1218
1219static void
1220delete_lwp (ptid_t ptid)
1221{
1222 struct lwp_info *lp, *lpprev;
1223
1224 lpprev = NULL;
1225
1226 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1227 if (ptid_equal (lp->ptid, ptid))
1228 break;
1229
1230 if (!lp)
1231 return;
1232
d6b0e80f
AC
1233 if (lpprev)
1234 lpprev->next = lp->next;
1235 else
1236 lwp_list = lp->next;
1237
7b50312a 1238 lwp_free (lp);
d6b0e80f
AC
1239}
1240
1241/* Return a pointer to the structure describing the LWP corresponding
1242 to PID. If no corresponding LWP could be found, return NULL. */
1243
1244static struct lwp_info *
1245find_lwp_pid (ptid_t ptid)
1246{
1247 struct lwp_info *lp;
1248 int lwp;
1249
1250 if (is_lwp (ptid))
1251 lwp = GET_LWP (ptid);
1252 else
1253 lwp = GET_PID (ptid);
1254
1255 for (lp = lwp_list; lp; lp = lp->next)
1256 if (lwp == GET_LWP (lp->ptid))
1257 return lp;
1258
1259 return NULL;
1260}
1261
1262/* Call CALLBACK with its second argument set to DATA for every LWP in
1263 the list. If CALLBACK returns 1 for a particular LWP, return a
1264 pointer to the structure describing that LWP immediately.
1265 Otherwise return NULL. */
1266
1267struct lwp_info *
d90e17a7
PA
1268iterate_over_lwps (ptid_t filter,
1269 int (*callback) (struct lwp_info *, void *),
1270 void *data)
d6b0e80f
AC
1271{
1272 struct lwp_info *lp, *lpnext;
1273
1274 for (lp = lwp_list; lp; lp = lpnext)
1275 {
1276 lpnext = lp->next;
d90e17a7
PA
1277
1278 if (ptid_match (lp->ptid, filter))
1279 {
1280 if ((*callback) (lp, data))
1281 return lp;
1282 }
d6b0e80f
AC
1283 }
1284
1285 return NULL;
1286}
1287
4403d8e9
JK
1288/* Iterate like iterate_over_lwps does except when forking-off a child call
1289 CALLBACK with CALLBACK_DATA specifically only for that new child PID. */
1290
1291void
1292linux_nat_iterate_watchpoint_lwps
1293 (linux_nat_iterate_watchpoint_lwps_ftype callback, void *callback_data)
1294{
1295 int inferior_pid = ptid_get_pid (inferior_ptid);
1296 struct inferior *inf = current_inferior ();
1297
1298 if (inf->pid == inferior_pid)
1299 {
1300 /* Iterate all the threads of the current inferior. Without specifying
1301 INFERIOR_PID it would iterate all threads of all inferiors, which is
1302 inappropriate for watchpoints. */
1303
1304 iterate_over_lwps (pid_to_ptid (inferior_pid), callback, callback_data);
1305 }
1306 else
1307 {
1308 /* Detaching a new child PID temporarily present in INFERIOR_PID. */
1309
1310 struct lwp_info *child_lp;
1311 struct cleanup *old_chain;
1312 pid_t child_pid = GET_PID (inferior_ptid);
1313 ptid_t child_ptid = ptid_build (child_pid, child_pid, 0);
1314
4403d8e9
JK
1315 gdb_assert (find_lwp_pid (child_ptid) == NULL);
1316 child_lp = add_lwp (child_ptid);
1317 child_lp->stopped = 1;
1318 child_lp->last_resume_kind = resume_stop;
1319 old_chain = make_cleanup (delete_lwp_cleanup, child_lp);
1320
1321 callback (child_lp, callback_data);
1322
1323 do_cleanups (old_chain);
1324 }
1325}
1326
2277426b
PA
1327/* Update our internal state when changing from one checkpoint to
1328 another indicated by NEW_PTID. We can only switch single-threaded
1329 applications, so we only create one new LWP, and the previous list
1330 is discarded. */
f973ed9c
DJ
1331
1332void
1333linux_nat_switch_fork (ptid_t new_ptid)
1334{
1335 struct lwp_info *lp;
1336
2277426b
PA
1337 purge_lwp_list (GET_PID (inferior_ptid));
1338
f973ed9c
DJ
1339 lp = add_lwp (new_ptid);
1340 lp->stopped = 1;
e26af52f 1341
2277426b
PA
1342 /* This changes the thread's ptid while preserving the gdb thread
1343 num. Also changes the inferior pid, while preserving the
1344 inferior num. */
1345 thread_change_ptid (inferior_ptid, new_ptid);
1346
1347 /* We've just told GDB core that the thread changed target id, but,
1348 in fact, it really is a different thread, with different register
1349 contents. */
1350 registers_changed ();
e26af52f
DJ
1351}
1352
e26af52f
DJ
1353/* Handle the exit of a single thread LP. */
1354
1355static void
1356exit_lwp (struct lwp_info *lp)
1357{
e09875d4 1358 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1359
1360 if (th)
e26af52f 1361 {
17faa917
DJ
1362 if (print_thread_events)
1363 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1364
4f8d22e3 1365 delete_thread (lp->ptid);
e26af52f
DJ
1366 }
1367
1368 delete_lwp (lp->ptid);
1369}
1370
a0ef4274
DJ
1371/* Wait for the LWP specified by LP, which we have just attached to.
1372 Returns a wait status for that LWP, to cache. */
1373
1374static int
1375linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1376 int *signalled)
1377{
1378 pid_t new_pid, pid = GET_LWP (ptid);
1379 int status;
1380
644cebc9 1381 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
1382 {
1383 if (debug_linux_nat)
1384 fprintf_unfiltered (gdb_stdlog,
1385 "LNPAW: Attaching to a stopped process\n");
1386
1387 /* The process is definitely stopped. It is in a job control
1388 stop, unless the kernel predates the TASK_STOPPED /
1389 TASK_TRACED distinction, in which case it might be in a
1390 ptrace stop. Make sure it is in a ptrace stop; from there we
1391 can kill it, signal it, et cetera.
1392
1393 First make sure there is a pending SIGSTOP. Since we are
1394 already attached, the process can not transition from stopped
1395 to running without a PTRACE_CONT; so we know this signal will
1396 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1397 probably already in the queue (unless this kernel is old
1398 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1399 is not an RT signal, it can only be queued once. */
1400 kill_lwp (pid, SIGSTOP);
1401
1402 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1403 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1404 ptrace (PTRACE_CONT, pid, 0, 0);
1405 }
1406
1407 /* Make sure the initial process is stopped. The user-level threads
1408 layer might want to poke around in the inferior, and that won't
1409 work if things haven't stabilized yet. */
1410 new_pid = my_waitpid (pid, &status, 0);
1411 if (new_pid == -1 && errno == ECHILD)
1412 {
1413 if (first)
1414 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1415
1416 /* Try again with __WCLONE to check cloned processes. */
1417 new_pid = my_waitpid (pid, &status, __WCLONE);
1418 *cloned = 1;
1419 }
1420
dacc9cb2
PP
1421 gdb_assert (pid == new_pid);
1422
1423 if (!WIFSTOPPED (status))
1424 {
1425 /* The pid we tried to attach has apparently just exited. */
1426 if (debug_linux_nat)
1427 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1428 pid, status_to_str (status));
1429 return status;
1430 }
a0ef4274
DJ
1431
1432 if (WSTOPSIG (status) != SIGSTOP)
1433 {
1434 *signalled = 1;
1435 if (debug_linux_nat)
1436 fprintf_unfiltered (gdb_stdlog,
1437 "LNPAW: Received %s after attaching\n",
1438 status_to_str (status));
1439 }
1440
1441 return status;
1442}
1443
84636d28
PA
1444/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1445 the new LWP could not be attached, or 1 if we're already auto
1446 attached to this thread, but haven't processed the
1447 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1448 its existance, without considering it an error. */
d6b0e80f 1449
9ee57c33 1450int
93815fbf 1451lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1452{
9ee57c33 1453 struct lwp_info *lp;
7feb7d06 1454 sigset_t prev_mask;
84636d28 1455 int lwpid;
d6b0e80f
AC
1456
1457 gdb_assert (is_lwp (ptid));
1458
7feb7d06 1459 block_child_signals (&prev_mask);
d6b0e80f 1460
9ee57c33 1461 lp = find_lwp_pid (ptid);
84636d28 1462 lwpid = GET_LWP (ptid);
d6b0e80f
AC
1463
1464 /* We assume that we're already attached to any LWP that has an id
1465 equal to the overall process id, and to any LWP that is already
1466 in our list of LWPs. If we're not seeing exit events from threads
1467 and we've had PID wraparound since we last tried to stop all threads,
1468 this assumption might be wrong; fortunately, this is very unlikely
1469 to happen. */
84636d28 1470 if (lwpid != GET_PID (ptid) && lp == NULL)
d6b0e80f 1471 {
a0ef4274 1472 int status, cloned = 0, signalled = 0;
d6b0e80f 1473
84636d28 1474 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1475 {
84636d28
PA
1476 if (linux_supports_tracefork_flag)
1477 {
1478 /* If we haven't stopped all threads when we get here,
1479 we may have seen a thread listed in thread_db's list,
1480 but not processed the PTRACE_EVENT_CLONE yet. If
1481 that's the case, ignore this new thread, and let
1482 normal event handling discover it later. */
1483 if (in_pid_list_p (stopped_pids, lwpid))
1484 {
1485 /* We've already seen this thread stop, but we
1486 haven't seen the PTRACE_EVENT_CLONE extended
1487 event yet. */
1488 restore_child_signals_mask (&prev_mask);
1489 return 0;
1490 }
1491 else
1492 {
1493 int new_pid;
1494 int status;
1495
1496 /* See if we've got a stop for this new child
1497 pending. If so, we're already attached. */
1498 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1499 if (new_pid == -1 && errno == ECHILD)
1500 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1501 if (new_pid != -1)
1502 {
1503 if (WIFSTOPPED (status))
1504 add_to_pid_list (&stopped_pids, lwpid, status);
1505
1506 restore_child_signals_mask (&prev_mask);
1507 return 1;
1508 }
1509 }
1510 }
1511
9ee57c33
DJ
1512 /* If we fail to attach to the thread, issue a warning,
1513 but continue. One way this can happen is if thread
e9efe249 1514 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1515 bug may place threads in the thread list and then fail
1516 to create them. */
1517 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1518 safe_strerror (errno));
7feb7d06 1519 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1520 return -1;
1521 }
1522
d6b0e80f
AC
1523 if (debug_linux_nat)
1524 fprintf_unfiltered (gdb_stdlog,
1525 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1526 target_pid_to_str (ptid));
1527
a0ef4274 1528 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1529 if (!WIFSTOPPED (status))
673c2bbe
DE
1530 {
1531 restore_child_signals_mask (&prev_mask);
f687d035 1532 return 1;
673c2bbe 1533 }
dacc9cb2 1534
a0ef4274
DJ
1535 lp = add_lwp (ptid);
1536 lp->stopped = 1;
1537 lp->cloned = cloned;
1538 lp->signalled = signalled;
1539 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1540 {
a0ef4274
DJ
1541 lp->resumed = 1;
1542 lp->status = status;
d6b0e80f
AC
1543 }
1544
a0ef4274 1545 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1546
1547 if (debug_linux_nat)
1548 {
1549 fprintf_unfiltered (gdb_stdlog,
1550 "LLAL: waitpid %s received %s\n",
1551 target_pid_to_str (ptid),
1552 status_to_str (status));
1553 }
1554 }
1555 else
1556 {
1557 /* We assume that the LWP representing the original process is
1558 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1559 that the GNU/linux ptrace layer uses to keep track of
1560 threads. Note that this won't have already been done since
1561 the main thread will have, we assume, been stopped by an
1562 attach from a different layer. */
9ee57c33
DJ
1563 if (lp == NULL)
1564 lp = add_lwp (ptid);
d6b0e80f
AC
1565 lp->stopped = 1;
1566 }
9ee57c33 1567
25289eb2 1568 lp->last_resume_kind = resume_stop;
7feb7d06 1569 restore_child_signals_mask (&prev_mask);
9ee57c33 1570 return 0;
d6b0e80f
AC
1571}
1572
b84876c2 1573static void
136d6dae
VP
1574linux_nat_create_inferior (struct target_ops *ops,
1575 char *exec_file, char *allargs, char **env,
b84876c2
PA
1576 int from_tty)
1577{
10568435
JK
1578#ifdef HAVE_PERSONALITY
1579 int personality_orig = 0, personality_set = 0;
1580#endif /* HAVE_PERSONALITY */
b84876c2
PA
1581
1582 /* The fork_child mechanism is synchronous and calls target_wait, so
1583 we have to mask the async mode. */
1584
10568435
JK
1585#ifdef HAVE_PERSONALITY
1586 if (disable_randomization)
1587 {
1588 errno = 0;
1589 personality_orig = personality (0xffffffff);
1590 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1591 {
1592 personality_set = 1;
1593 personality (personality_orig | ADDR_NO_RANDOMIZE);
1594 }
1595 if (errno != 0 || (personality_set
1596 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1597 warning (_("Error disabling address space randomization: %s"),
1598 safe_strerror (errno));
1599 }
1600#endif /* HAVE_PERSONALITY */
1601
2455069d
UW
1602 /* Make sure we report all signals during startup. */
1603 linux_nat_pass_signals (0, NULL);
1604
136d6dae 1605 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1606
10568435
JK
1607#ifdef HAVE_PERSONALITY
1608 if (personality_set)
1609 {
1610 errno = 0;
1611 personality (personality_orig);
1612 if (errno != 0)
1613 warning (_("Error restoring address space randomization: %s"),
1614 safe_strerror (errno));
1615 }
1616#endif /* HAVE_PERSONALITY */
b84876c2
PA
1617}
1618
d6b0e80f 1619static void
136d6dae 1620linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1621{
1622 struct lwp_info *lp;
d6b0e80f 1623 int status;
af990527 1624 ptid_t ptid;
87b0bb13 1625 volatile struct gdb_exception ex;
d6b0e80f 1626
2455069d
UW
1627 /* Make sure we report all signals during attach. */
1628 linux_nat_pass_signals (0, NULL);
1629
87b0bb13
JK
1630 TRY_CATCH (ex, RETURN_MASK_ERROR)
1631 {
1632 linux_ops->to_attach (ops, args, from_tty);
1633 }
1634 if (ex.reason < 0)
1635 {
1636 pid_t pid = parse_pid_to_attach (args);
1637 struct buffer buffer;
1638 char *message, *buffer_s;
1639
1640 message = xstrdup (ex.message);
1641 make_cleanup (xfree, message);
1642
1643 buffer_init (&buffer);
1644 linux_ptrace_attach_warnings (pid, &buffer);
1645
1646 buffer_grow_str0 (&buffer, "");
1647 buffer_s = buffer_finish (&buffer);
1648 make_cleanup (xfree, buffer_s);
1649
1650 throw_error (ex.error, "%s%s", buffer_s, message);
1651 }
d6b0e80f 1652
af990527
PA
1653 /* The ptrace base target adds the main thread with (pid,0,0)
1654 format. Decorate it with lwp info. */
1655 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1656 thread_change_ptid (inferior_ptid, ptid);
1657
9f0bdab8 1658 /* Add the initial process as the first LWP to the list. */
af990527 1659 lp = add_lwp (ptid);
a0ef4274
DJ
1660
1661 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1662 &lp->signalled);
dacc9cb2
PP
1663 if (!WIFSTOPPED (status))
1664 {
1665 if (WIFEXITED (status))
1666 {
1667 int exit_code = WEXITSTATUS (status);
1668
1669 target_terminal_ours ();
1670 target_mourn_inferior ();
1671 if (exit_code == 0)
1672 error (_("Unable to attach: program exited normally."));
1673 else
1674 error (_("Unable to attach: program exited with code %d."),
1675 exit_code);
1676 }
1677 else if (WIFSIGNALED (status))
1678 {
2ea28649 1679 enum gdb_signal signo;
dacc9cb2
PP
1680
1681 target_terminal_ours ();
1682 target_mourn_inferior ();
1683
2ea28649 1684 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1685 error (_("Unable to attach: program terminated with signal "
1686 "%s, %s."),
2ea28649
PA
1687 gdb_signal_to_name (signo),
1688 gdb_signal_to_string (signo));
dacc9cb2
PP
1689 }
1690
1691 internal_error (__FILE__, __LINE__,
1692 _("unexpected status %d for PID %ld"),
1693 status, (long) GET_LWP (ptid));
1694 }
1695
a0ef4274 1696 lp->stopped = 1;
9f0bdab8 1697
a0ef4274 1698 /* Save the wait status to report later. */
d6b0e80f 1699 lp->resumed = 1;
a0ef4274
DJ
1700 if (debug_linux_nat)
1701 fprintf_unfiltered (gdb_stdlog,
1702 "LNA: waitpid %ld, saving status %s\n",
1703 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1704
7feb7d06
PA
1705 lp->status = status;
1706
1707 if (target_can_async_p ())
1708 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1709}
1710
a0ef4274
DJ
1711/* Get pending status of LP. */
1712static int
1713get_pending_status (struct lwp_info *lp, int *status)
1714{
a493e3e2 1715 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1716
1717 /* If we paused threads momentarily, we may have stored pending
1718 events in lp->status or lp->waitstatus (see stop_wait_callback),
1719 and GDB core hasn't seen any signal for those threads.
1720 Otherwise, the last signal reported to the core is found in the
1721 thread object's stop_signal.
1722
1723 There's a corner case that isn't handled here at present. Only
1724 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1725 stop_signal make sense as a real signal to pass to the inferior.
1726 Some catchpoint related events, like
1727 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1728 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1729 those traps are debug API (ptrace in our case) related and
1730 induced; the inferior wouldn't see them if it wasn't being
1731 traced. Hence, we should never pass them to the inferior, even
1732 when set to pass state. Since this corner case isn't handled by
1733 infrun.c when proceeding with a signal, for consistency, neither
1734 do we handle it here (or elsewhere in the file we check for
1735 signal pass state). Normally SIGTRAP isn't set to pass state, so
1736 this is really a corner case. */
1737
1738 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1739 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1740 else if (lp->status)
2ea28649 1741 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
ca2163eb
PA
1742 else if (non_stop && !is_executing (lp->ptid))
1743 {
1744 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1745
16c381f0 1746 signo = tp->suspend.stop_signal;
ca2163eb
PA
1747 }
1748 else if (!non_stop)
a0ef4274 1749 {
ca2163eb
PA
1750 struct target_waitstatus last;
1751 ptid_t last_ptid;
4c28f408 1752
ca2163eb 1753 get_last_target_status (&last_ptid, &last);
4c28f408 1754
ca2163eb
PA
1755 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1756 {
e09875d4 1757 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1758
16c381f0 1759 signo = tp->suspend.stop_signal;
4c28f408 1760 }
ca2163eb 1761 }
4c28f408 1762
ca2163eb 1763 *status = 0;
4c28f408 1764
a493e3e2 1765 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1766 {
1767 if (debug_linux_nat)
1768 fprintf_unfiltered (gdb_stdlog,
1769 "GPT: lwp %s has no pending signal\n",
1770 target_pid_to_str (lp->ptid));
1771 }
1772 else if (!signal_pass_state (signo))
1773 {
1774 if (debug_linux_nat)
3e43a32a
MS
1775 fprintf_unfiltered (gdb_stdlog,
1776 "GPT: lwp %s had signal %s, "
1777 "but it is in no pass state\n",
ca2163eb 1778 target_pid_to_str (lp->ptid),
2ea28649 1779 gdb_signal_to_string (signo));
a0ef4274 1780 }
a0ef4274 1781 else
4c28f408 1782 {
2ea28649 1783 *status = W_STOPCODE (gdb_signal_to_host (signo));
ca2163eb
PA
1784
1785 if (debug_linux_nat)
1786 fprintf_unfiltered (gdb_stdlog,
1787 "GPT: lwp %s has pending signal %s\n",
1788 target_pid_to_str (lp->ptid),
2ea28649 1789 gdb_signal_to_string (signo));
4c28f408 1790 }
a0ef4274
DJ
1791
1792 return 0;
1793}
1794
d6b0e80f
AC
1795static int
1796detach_callback (struct lwp_info *lp, void *data)
1797{
1798 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1799
1800 if (debug_linux_nat && lp->status)
1801 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1802 strsignal (WSTOPSIG (lp->status)),
1803 target_pid_to_str (lp->ptid));
1804
a0ef4274
DJ
1805 /* If there is a pending SIGSTOP, get rid of it. */
1806 if (lp->signalled)
d6b0e80f 1807 {
d6b0e80f
AC
1808 if (debug_linux_nat)
1809 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1810 "DC: Sending SIGCONT to %s\n",
1811 target_pid_to_str (lp->ptid));
d6b0e80f 1812
a0ef4274 1813 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1814 lp->signalled = 0;
d6b0e80f
AC
1815 }
1816
1817 /* We don't actually detach from the LWP that has an id equal to the
1818 overall process id just yet. */
1819 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1820 {
a0ef4274
DJ
1821 int status = 0;
1822
1823 /* Pass on any pending signal for this LWP. */
1824 get_pending_status (lp, &status);
1825
7b50312a
PA
1826 if (linux_nat_prepare_to_resume != NULL)
1827 linux_nat_prepare_to_resume (lp);
d6b0e80f
AC
1828 errno = 0;
1829 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1830 WSTOPSIG (status)) < 0)
8a3fe4f8 1831 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1832 safe_strerror (errno));
1833
1834 if (debug_linux_nat)
1835 fprintf_unfiltered (gdb_stdlog,
1836 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1837 target_pid_to_str (lp->ptid),
7feb7d06 1838 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1839
1840 delete_lwp (lp->ptid);
1841 }
1842
1843 return 0;
1844}
1845
1846static void
136d6dae 1847linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1848{
b84876c2 1849 int pid;
a0ef4274 1850 int status;
d90e17a7
PA
1851 struct lwp_info *main_lwp;
1852
1853 pid = GET_PID (inferior_ptid);
a0ef4274 1854
ae5e0686
MK
1855 /* Don't unregister from the event loop, as there may be other
1856 inferiors running. */
b84876c2 1857
4c28f408
PA
1858 /* Stop all threads before detaching. ptrace requires that the
1859 thread is stopped to sucessfully detach. */
d90e17a7 1860 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1861 /* ... and wait until all of them have reported back that
1862 they're no longer running. */
d90e17a7 1863 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1864
d90e17a7 1865 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1866
1867 /* Only the initial process should be left right now. */
d90e17a7
PA
1868 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1869
1870 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1871
a0ef4274
DJ
1872 /* Pass on any pending signal for the last LWP. */
1873 if ((args == NULL || *args == '\0')
d90e17a7 1874 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1875 && WIFSTOPPED (status))
1876 {
1877 /* Put the signal number in ARGS so that inf_ptrace_detach will
1878 pass it along with PTRACE_DETACH. */
1879 args = alloca (8);
1880 sprintf (args, "%d", (int) WSTOPSIG (status));
ddabfc73
TT
1881 if (debug_linux_nat)
1882 fprintf_unfiltered (gdb_stdlog,
1883 "LND: Sending signal %s to %s\n",
1884 args,
1885 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1886 }
1887
7b50312a
PA
1888 if (linux_nat_prepare_to_resume != NULL)
1889 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1890 delete_lwp (main_lwp->ptid);
b84876c2 1891
7a7d3353
PA
1892 if (forks_exist_p ())
1893 {
1894 /* Multi-fork case. The current inferior_ptid is being detached
1895 from, but there are other viable forks to debug. Detach from
1896 the current fork, and context-switch to the first
1897 available. */
1898 linux_fork_detach (args, from_tty);
7a7d3353
PA
1899 }
1900 else
1901 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1902}
1903
1904/* Resume LP. */
1905
25289eb2 1906static void
e5ef252a 1907resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1908{
25289eb2 1909 if (lp->stopped)
6c95b8df 1910 {
25289eb2
PA
1911 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1912
1913 if (inf->vfork_child != NULL)
1914 {
1915 if (debug_linux_nat)
1916 fprintf_unfiltered (gdb_stdlog,
1917 "RC: Not resuming %s (vfork parent)\n",
1918 target_pid_to_str (lp->ptid));
1919 }
1920 else if (lp->status == 0
1921 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1922 {
1923 if (debug_linux_nat)
1924 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1925 "RC: Resuming sibling %s, %s, %s\n",
1926 target_pid_to_str (lp->ptid),
1927 (signo != GDB_SIGNAL_0
1928 ? strsignal (gdb_signal_to_host (signo))
1929 : "0"),
1930 step ? "step" : "resume");
25289eb2 1931
7b50312a
PA
1932 if (linux_nat_prepare_to_resume != NULL)
1933 linux_nat_prepare_to_resume (lp);
25289eb2
PA
1934 linux_ops->to_resume (linux_ops,
1935 pid_to_ptid (GET_LWP (lp->ptid)),
e5ef252a 1936 step, signo);
25289eb2
PA
1937 lp->stopped = 0;
1938 lp->step = step;
25289eb2
PA
1939 lp->stopped_by_watchpoint = 0;
1940 }
1941 else
1942 {
1943 if (debug_linux_nat)
1944 fprintf_unfiltered (gdb_stdlog,
1945 "RC: Not resuming sibling %s (has pending)\n",
1946 target_pid_to_str (lp->ptid));
1947 }
6c95b8df 1948 }
25289eb2 1949 else
d6b0e80f 1950 {
d90e17a7
PA
1951 if (debug_linux_nat)
1952 fprintf_unfiltered (gdb_stdlog,
25289eb2 1953 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1954 target_pid_to_str (lp->ptid));
d6b0e80f 1955 }
25289eb2 1956}
d6b0e80f 1957
e5ef252a
PA
1958/* Resume LWP, with the last stop signal, if it is in pass state. */
1959
25289eb2 1960static int
e5ef252a 1961linux_nat_resume_callback (struct lwp_info *lp, void *data)
25289eb2 1962{
e5ef252a
PA
1963 enum gdb_signal signo = GDB_SIGNAL_0;
1964
1965 if (lp->stopped)
1966 {
1967 struct thread_info *thread;
1968
1969 thread = find_thread_ptid (lp->ptid);
1970 if (thread != NULL)
1971 {
1972 if (signal_pass_state (thread->suspend.stop_signal))
1973 signo = thread->suspend.stop_signal;
1974 thread->suspend.stop_signal = GDB_SIGNAL_0;
1975 }
1976 }
1977
1978 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1979 return 0;
1980}
1981
1982static int
1983resume_clear_callback (struct lwp_info *lp, void *data)
1984{
1985 lp->resumed = 0;
25289eb2 1986 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1987 return 0;
1988}
1989
1990static int
1991resume_set_callback (struct lwp_info *lp, void *data)
1992{
1993 lp->resumed = 1;
25289eb2 1994 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1995 return 0;
1996}
1997
1998static void
28439f5e 1999linux_nat_resume (struct target_ops *ops,
2ea28649 2000 ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f 2001{
7feb7d06 2002 sigset_t prev_mask;
d6b0e80f 2003 struct lwp_info *lp;
d90e17a7 2004 int resume_many;
d6b0e80f 2005
76f50ad1
DJ
2006 if (debug_linux_nat)
2007 fprintf_unfiltered (gdb_stdlog,
2008 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
2009 step ? "step" : "resume",
2010 target_pid_to_str (ptid),
a493e3e2 2011 (signo != GDB_SIGNAL_0
2ea28649 2012 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
2013 target_pid_to_str (inferior_ptid));
2014
7feb7d06 2015 block_child_signals (&prev_mask);
b84876c2 2016
d6b0e80f 2017 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
2018 resume_many = (ptid_equal (minus_one_ptid, ptid)
2019 || ptid_is_pid (ptid));
4c28f408 2020
e3e9f5a2
PA
2021 /* Mark the lwps we're resuming as resumed. */
2022 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 2023
d90e17a7
PA
2024 /* See if it's the current inferior that should be handled
2025 specially. */
2026 if (resume_many)
2027 lp = find_lwp_pid (inferior_ptid);
2028 else
2029 lp = find_lwp_pid (ptid);
9f0bdab8 2030 gdb_assert (lp != NULL);
d6b0e80f 2031
9f0bdab8
DJ
2032 /* Remember if we're stepping. */
2033 lp->step = step;
25289eb2 2034 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 2035
9f0bdab8
DJ
2036 /* If we have a pending wait status for this thread, there is no
2037 point in resuming the process. But first make sure that
2038 linux_nat_wait won't preemptively handle the event - we
2039 should never take this short-circuit if we are going to
2040 leave LP running, since we have skipped resuming all the
2041 other threads. This bit of code needs to be synchronized
2042 with linux_nat_wait. */
76f50ad1 2043
9f0bdab8
DJ
2044 if (lp->status && WIFSTOPPED (lp->status))
2045 {
2455069d
UW
2046 if (!lp->step
2047 && WSTOPSIG (lp->status)
2048 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 2049 {
9f0bdab8
DJ
2050 if (debug_linux_nat)
2051 fprintf_unfiltered (gdb_stdlog,
2052 "LLR: Not short circuiting for ignored "
2053 "status 0x%x\n", lp->status);
2054
d6b0e80f
AC
2055 /* FIXME: What should we do if we are supposed to continue
2056 this thread with a signal? */
a493e3e2 2057 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 2058 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
2059 lp->status = 0;
2060 }
2061 }
76f50ad1 2062
6c95b8df 2063 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
2064 {
2065 /* FIXME: What should we do if we are supposed to continue
2066 this thread with a signal? */
a493e3e2 2067 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 2068
9f0bdab8
DJ
2069 if (debug_linux_nat)
2070 fprintf_unfiltered (gdb_stdlog,
2071 "LLR: Short circuiting for status 0x%x\n",
2072 lp->status);
d6b0e80f 2073
7feb7d06
PA
2074 restore_child_signals_mask (&prev_mask);
2075 if (target_can_async_p ())
2076 {
2077 target_async (inferior_event_handler, 0);
2078 /* Tell the event loop we have something to process. */
2079 async_file_mark ();
2080 }
9f0bdab8 2081 return;
d6b0e80f
AC
2082 }
2083
9f0bdab8 2084 /* Mark LWP as not stopped to prevent it from being continued by
e5ef252a 2085 linux_nat_resume_callback. */
9f0bdab8
DJ
2086 lp->stopped = 0;
2087
d90e17a7 2088 if (resume_many)
e5ef252a 2089 iterate_over_lwps (ptid, linux_nat_resume_callback, NULL);
d90e17a7
PA
2090
2091 /* Convert to something the lower layer understands. */
2092 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 2093
7b50312a
PA
2094 if (linux_nat_prepare_to_resume != NULL)
2095 linux_nat_prepare_to_resume (lp);
28439f5e 2096 linux_ops->to_resume (linux_ops, ptid, step, signo);
ebec9a0f 2097 lp->stopped_by_watchpoint = 0;
9f0bdab8 2098
d6b0e80f
AC
2099 if (debug_linux_nat)
2100 fprintf_unfiltered (gdb_stdlog,
2101 "LLR: %s %s, %s (resume event thread)\n",
2102 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2103 target_pid_to_str (ptid),
a493e3e2 2104 (signo != GDB_SIGNAL_0
2ea28649 2105 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 2106
7feb7d06 2107 restore_child_signals_mask (&prev_mask);
b84876c2 2108 if (target_can_async_p ())
8ea051c5 2109 target_async (inferior_event_handler, 0);
d6b0e80f
AC
2110}
2111
c5f62d5f 2112/* Send a signal to an LWP. */
d6b0e80f
AC
2113
2114static int
2115kill_lwp (int lwpid, int signo)
2116{
c5f62d5f
DE
2117 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2118 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
2119
2120#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
2121 {
2122 static int tkill_failed;
2123
2124 if (!tkill_failed)
2125 {
2126 int ret;
2127
2128 errno = 0;
2129 ret = syscall (__NR_tkill, lwpid, signo);
2130 if (errno != ENOSYS)
2131 return ret;
2132 tkill_failed = 1;
2133 }
2134 }
d6b0e80f
AC
2135#endif
2136
2137 return kill (lwpid, signo);
2138}
2139
ca2163eb
PA
2140/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2141 event, check if the core is interested in it: if not, ignore the
2142 event, and keep waiting; otherwise, we need to toggle the LWP's
2143 syscall entry/exit status, since the ptrace event itself doesn't
2144 indicate it, and report the trap to higher layers. */
2145
2146static int
2147linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2148{
2149 struct target_waitstatus *ourstatus = &lp->waitstatus;
2150 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2151 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2152
2153 if (stopping)
2154 {
2155 /* If we're stopping threads, there's a SIGSTOP pending, which
2156 makes it so that the LWP reports an immediate syscall return,
2157 followed by the SIGSTOP. Skip seeing that "return" using
2158 PTRACE_CONT directly, and let stop_wait_callback collect the
2159 SIGSTOP. Later when the thread is resumed, a new syscall
2160 entry event. If we didn't do this (and returned 0), we'd
2161 leave a syscall entry pending, and our caller, by using
2162 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2163 itself. Later, when the user re-resumes this LWP, we'd see
2164 another syscall entry event and we'd mistake it for a return.
2165
2166 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2167 (leaving immediately with LWP->signalled set, without issuing
2168 a PTRACE_CONT), it would still be problematic to leave this
2169 syscall enter pending, as later when the thread is resumed,
2170 it would then see the same syscall exit mentioned above,
2171 followed by the delayed SIGSTOP, while the syscall didn't
2172 actually get to execute. It seems it would be even more
2173 confusing to the user. */
2174
2175 if (debug_linux_nat)
2176 fprintf_unfiltered (gdb_stdlog,
2177 "LHST: ignoring syscall %d "
2178 "for LWP %ld (stopping threads), "
2179 "resuming with PTRACE_CONT for SIGSTOP\n",
2180 syscall_number,
2181 GET_LWP (lp->ptid));
2182
2183 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2184 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2185 return 1;
2186 }
2187
2188 if (catch_syscall_enabled ())
2189 {
2190 /* Always update the entry/return state, even if this particular
2191 syscall isn't interesting to the core now. In async mode,
2192 the user could install a new catchpoint for this syscall
2193 between syscall enter/return, and we'll need to know to
2194 report a syscall return if that happens. */
2195 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2196 ? TARGET_WAITKIND_SYSCALL_RETURN
2197 : TARGET_WAITKIND_SYSCALL_ENTRY);
2198
2199 if (catching_syscall_number (syscall_number))
2200 {
2201 /* Alright, an event to report. */
2202 ourstatus->kind = lp->syscall_state;
2203 ourstatus->value.syscall_number = syscall_number;
2204
2205 if (debug_linux_nat)
2206 fprintf_unfiltered (gdb_stdlog,
2207 "LHST: stopping for %s of syscall %d"
2208 " for LWP %ld\n",
3e43a32a
MS
2209 lp->syscall_state
2210 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
2211 ? "entry" : "return",
2212 syscall_number,
2213 GET_LWP (lp->ptid));
2214 return 0;
2215 }
2216
2217 if (debug_linux_nat)
2218 fprintf_unfiltered (gdb_stdlog,
2219 "LHST: ignoring %s of syscall %d "
2220 "for LWP %ld\n",
2221 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2222 ? "entry" : "return",
2223 syscall_number,
2224 GET_LWP (lp->ptid));
2225 }
2226 else
2227 {
2228 /* If we had been syscall tracing, and hence used PT_SYSCALL
2229 before on this LWP, it could happen that the user removes all
2230 syscall catchpoints before we get to process this event.
2231 There are two noteworthy issues here:
2232
2233 - When stopped at a syscall entry event, resuming with
2234 PT_STEP still resumes executing the syscall and reports a
2235 syscall return.
2236
2237 - Only PT_SYSCALL catches syscall enters. If we last
2238 single-stepped this thread, then this event can't be a
2239 syscall enter. If we last single-stepped this thread, this
2240 has to be a syscall exit.
2241
2242 The points above mean that the next resume, be it PT_STEP or
2243 PT_CONTINUE, can not trigger a syscall trace event. */
2244 if (debug_linux_nat)
2245 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2246 "LHST: caught syscall event "
2247 "with no syscall catchpoints."
ca2163eb
PA
2248 " %d for LWP %ld, ignoring\n",
2249 syscall_number,
2250 GET_LWP (lp->ptid));
2251 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2252 }
2253
2254 /* The core isn't interested in this event. For efficiency, avoid
2255 stopping all threads only to have the core resume them all again.
2256 Since we're not stopping threads, if we're still syscall tracing
2257 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2258 subsequent syscall. Simply resume using the inf-ptrace layer,
2259 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2260
2261 /* Note that gdbarch_get_syscall_number may access registers, hence
2262 fill a regcache. */
2263 registers_changed ();
7b50312a
PA
2264 if (linux_nat_prepare_to_resume != NULL)
2265 linux_nat_prepare_to_resume (lp);
ca2163eb 2266 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 2267 lp->step, GDB_SIGNAL_0);
ca2163eb
PA
2268 return 1;
2269}
2270
3d799a95
DJ
2271/* Handle a GNU/Linux extended wait response. If we see a clone
2272 event, we need to add the new LWP to our list (and not report the
2273 trap to higher layers). This function returns non-zero if the
2274 event should be ignored and we should wait again. If STOPPING is
2275 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2276
2277static int
3d799a95
DJ
2278linux_handle_extended_wait (struct lwp_info *lp, int status,
2279 int stopping)
d6b0e80f 2280{
3d799a95
DJ
2281 int pid = GET_LWP (lp->ptid);
2282 struct target_waitstatus *ourstatus = &lp->waitstatus;
3d799a95 2283 int event = status >> 16;
d6b0e80f 2284
3d799a95
DJ
2285 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2286 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2287 {
3d799a95
DJ
2288 unsigned long new_pid;
2289 int ret;
2290
2291 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2292
3d799a95
DJ
2293 /* If we haven't already seen the new PID stop, wait for it now. */
2294 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2295 {
2296 /* The new child has a pending SIGSTOP. We can't affect it until it
2297 hits the SIGSTOP, but we're already attached. */
2298 ret = my_waitpid (new_pid, &status,
2299 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2300 if (ret == -1)
2301 perror_with_name (_("waiting for new child"));
2302 else if (ret != new_pid)
2303 internal_error (__FILE__, __LINE__,
2304 _("wait returned unexpected PID %d"), ret);
2305 else if (!WIFSTOPPED (status))
2306 internal_error (__FILE__, __LINE__,
2307 _("wait returned unexpected status 0x%x"), status);
2308 }
2309
3a3e9ee3 2310 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2311
2277426b
PA
2312 if (event == PTRACE_EVENT_FORK
2313 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2314 {
2277426b
PA
2315 /* Handle checkpointing by linux-fork.c here as a special
2316 case. We don't want the follow-fork-mode or 'catch fork'
2317 to interfere with this. */
2318
2319 /* This won't actually modify the breakpoint list, but will
2320 physically remove the breakpoints from the child. */
d80ee84f 2321 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2277426b
PA
2322
2323 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2324 if (!find_fork_pid (new_pid))
2325 add_fork (new_pid);
2277426b
PA
2326
2327 /* Report as spurious, so that infrun doesn't want to follow
2328 this fork. We're actually doing an infcall in
2329 linux-fork.c. */
2330 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2331 linux_enable_event_reporting (pid_to_ptid (new_pid));
2332
2333 /* Report the stop to the core. */
2334 return 0;
2335 }
2336
3d799a95
DJ
2337 if (event == PTRACE_EVENT_FORK)
2338 ourstatus->kind = TARGET_WAITKIND_FORKED;
2339 else if (event == PTRACE_EVENT_VFORK)
2340 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2341 else
3d799a95 2342 {
78768c4a
JK
2343 struct lwp_info *new_lp;
2344
3d799a95 2345 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2346
3c4d7e12
PA
2347 if (debug_linux_nat)
2348 fprintf_unfiltered (gdb_stdlog,
2349 "LHEW: Got clone event "
2350 "from LWP %d, new child is LWP %ld\n",
2351 pid, new_pid);
2352
d90e17a7 2353 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2354 new_lp->cloned = 1;
4c28f408 2355 new_lp->stopped = 1;
d6b0e80f 2356
3d799a95
DJ
2357 if (WSTOPSIG (status) != SIGSTOP)
2358 {
2359 /* This can happen if someone starts sending signals to
2360 the new thread before it gets a chance to run, which
2361 have a lower number than SIGSTOP (e.g. SIGUSR1).
2362 This is an unlikely case, and harder to handle for
2363 fork / vfork than for clone, so we do not try - but
2364 we handle it for clone events here. We'll send
2365 the other signal on to the thread below. */
2366
2367 new_lp->signalled = 1;
2368 }
2369 else
79395f92
PA
2370 {
2371 struct thread_info *tp;
2372
2373 /* When we stop for an event in some other thread, and
2374 pull the thread list just as this thread has cloned,
2375 we'll have seen the new thread in the thread_db list
2376 before handling the CLONE event (glibc's
2377 pthread_create adds the new thread to the thread list
2378 before clone'ing, and has the kernel fill in the
2379 thread's tid on the clone call with
2380 CLONE_PARENT_SETTID). If that happened, and the core
2381 had requested the new thread to stop, we'll have
2382 killed it with SIGSTOP. But since SIGSTOP is not an
2383 RT signal, it can only be queued once. We need to be
2384 careful to not resume the LWP if we wanted it to
2385 stop. In that case, we'll leave the SIGSTOP pending.
a493e3e2 2386 It will later be reported as GDB_SIGNAL_0. */
79395f92
PA
2387 tp = find_thread_ptid (new_lp->ptid);
2388 if (tp != NULL && tp->stop_requested)
2389 new_lp->last_resume_kind = resume_stop;
2390 else
2391 status = 0;
2392 }
d6b0e80f 2393
4c28f408 2394 if (non_stop)
3d799a95 2395 {
4c28f408
PA
2396 /* Add the new thread to GDB's lists as soon as possible
2397 so that:
2398
2399 1) the frontend doesn't have to wait for a stop to
2400 display them, and,
2401
2402 2) we tag it with the correct running state. */
2403
2404 /* If the thread_db layer is active, let it know about
2405 this new thread, and add it to GDB's list. */
2406 if (!thread_db_attach_lwp (new_lp->ptid))
2407 {
2408 /* We're not using thread_db. Add it to GDB's
2409 list. */
2410 target_post_attach (GET_LWP (new_lp->ptid));
2411 add_thread (new_lp->ptid);
2412 }
2413
2414 if (!stopping)
2415 {
2416 set_running (new_lp->ptid, 1);
2417 set_executing (new_lp->ptid, 1);
e21ffe51
PA
2418 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2419 resume_stop. */
2420 new_lp->last_resume_kind = resume_continue;
4c28f408
PA
2421 }
2422 }
2423
79395f92
PA
2424 if (status != 0)
2425 {
2426 /* We created NEW_LP so it cannot yet contain STATUS. */
2427 gdb_assert (new_lp->status == 0);
2428
2429 /* Save the wait status to report later. */
2430 if (debug_linux_nat)
2431 fprintf_unfiltered (gdb_stdlog,
2432 "LHEW: waitpid of new LWP %ld, "
2433 "saving status %s\n",
2434 (long) GET_LWP (new_lp->ptid),
2435 status_to_str (status));
2436 new_lp->status = status;
2437 }
2438
ca2163eb
PA
2439 /* Note the need to use the low target ops to resume, to
2440 handle resuming with PT_SYSCALL if we have syscall
2441 catchpoints. */
4c28f408
PA
2442 if (!stopping)
2443 {
3d799a95 2444 new_lp->resumed = 1;
ca2163eb 2445
79395f92 2446 if (status == 0)
ad34eb2f 2447 {
e21ffe51 2448 gdb_assert (new_lp->last_resume_kind == resume_continue);
ad34eb2f
JK
2449 if (debug_linux_nat)
2450 fprintf_unfiltered (gdb_stdlog,
79395f92
PA
2451 "LHEW: resuming new LWP %ld\n",
2452 GET_LWP (new_lp->ptid));
7b50312a
PA
2453 if (linux_nat_prepare_to_resume != NULL)
2454 linux_nat_prepare_to_resume (new_lp);
79395f92 2455 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
a493e3e2 2456 0, GDB_SIGNAL_0);
79395f92 2457 new_lp->stopped = 0;
ad34eb2f
JK
2458 }
2459 }
d6b0e80f 2460
3d799a95
DJ
2461 if (debug_linux_nat)
2462 fprintf_unfiltered (gdb_stdlog,
3c4d7e12 2463 "LHEW: resuming parent LWP %d\n", pid);
7b50312a
PA
2464 if (linux_nat_prepare_to_resume != NULL)
2465 linux_nat_prepare_to_resume (lp);
ca2163eb 2466 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 2467 0, GDB_SIGNAL_0);
3d799a95
DJ
2468
2469 return 1;
2470 }
2471
2472 return 0;
d6b0e80f
AC
2473 }
2474
3d799a95
DJ
2475 if (event == PTRACE_EVENT_EXEC)
2476 {
a75724bc
PA
2477 if (debug_linux_nat)
2478 fprintf_unfiltered (gdb_stdlog,
2479 "LHEW: Got exec event from LWP %ld\n",
2480 GET_LWP (lp->ptid));
2481
3d799a95
DJ
2482 ourstatus->kind = TARGET_WAITKIND_EXECD;
2483 ourstatus->value.execd_pathname
6d8fd2b7 2484 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2485
6c95b8df
PA
2486 return 0;
2487 }
2488
2489 if (event == PTRACE_EVENT_VFORK_DONE)
2490 {
2491 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2492 {
6c95b8df 2493 if (debug_linux_nat)
3e43a32a
MS
2494 fprintf_unfiltered (gdb_stdlog,
2495 "LHEW: Got expected PTRACE_EVENT_"
2496 "VFORK_DONE from LWP %ld: stopping\n",
6c95b8df 2497 GET_LWP (lp->ptid));
3d799a95 2498
6c95b8df
PA
2499 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2500 return 0;
3d799a95
DJ
2501 }
2502
6c95b8df 2503 if (debug_linux_nat)
3e43a32a
MS
2504 fprintf_unfiltered (gdb_stdlog,
2505 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2506 "from LWP %ld: resuming\n",
6c95b8df
PA
2507 GET_LWP (lp->ptid));
2508 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2509 return 1;
3d799a95
DJ
2510 }
2511
2512 internal_error (__FILE__, __LINE__,
2513 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2514}
2515
2516/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2517 exited. */
2518
2519static int
2520wait_lwp (struct lwp_info *lp)
2521{
2522 pid_t pid;
432b4d03 2523 int status = 0;
d6b0e80f 2524 int thread_dead = 0;
432b4d03 2525 sigset_t prev_mask;
d6b0e80f
AC
2526
2527 gdb_assert (!lp->stopped);
2528 gdb_assert (lp->status == 0);
2529
432b4d03
JK
2530 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2531 block_child_signals (&prev_mask);
2532
2533 for (;;)
d6b0e80f 2534 {
432b4d03
JK
2535 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2536 was right and we should just call sigsuspend. */
2537
2538 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
d6b0e80f 2539 if (pid == -1 && errno == ECHILD)
432b4d03 2540 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2541 if (pid == -1 && errno == ECHILD)
2542 {
2543 /* The thread has previously exited. We need to delete it
2544 now because, for some vendor 2.4 kernels with NPTL
2545 support backported, there won't be an exit event unless
2546 it is the main thread. 2.6 kernels will report an exit
2547 event for each thread that exits, as expected. */
2548 thread_dead = 1;
2549 if (debug_linux_nat)
2550 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2551 target_pid_to_str (lp->ptid));
2552 }
432b4d03
JK
2553 if (pid != 0)
2554 break;
2555
2556 /* Bugs 10970, 12702.
2557 Thread group leader may have exited in which case we'll lock up in
2558 waitpid if there are other threads, even if they are all zombies too.
2559 Basically, we're not supposed to use waitpid this way.
2560 __WCLONE is not applicable for the leader so we can't use that.
2561 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2562 process; it gets ESRCH both for the zombie and for running processes.
2563
2564 As a workaround, check if we're waiting for the thread group leader and
2565 if it's a zombie, and avoid calling waitpid if it is.
2566
2567 This is racy, what if the tgl becomes a zombie right after we check?
2568 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2569 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03
JK
2570
2571 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
5f572dec 2572 && linux_proc_pid_is_zombie (GET_LWP (lp->ptid)))
d6b0e80f 2573 {
d6b0e80f
AC
2574 thread_dead = 1;
2575 if (debug_linux_nat)
432b4d03
JK
2576 fprintf_unfiltered (gdb_stdlog,
2577 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2578 target_pid_to_str (lp->ptid));
432b4d03 2579 break;
d6b0e80f 2580 }
432b4d03
JK
2581
2582 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2583 get invoked despite our caller had them intentionally blocked by
2584 block_child_signals. This is sensitive only to the loop of
2585 linux_nat_wait_1 and there if we get called my_waitpid gets called
2586 again before it gets to sigsuspend so we can safely let the handlers
2587 get executed here. */
2588
2589 sigsuspend (&suspend_mask);
2590 }
2591
2592 restore_child_signals_mask (&prev_mask);
2593
d6b0e80f
AC
2594 if (!thread_dead)
2595 {
2596 gdb_assert (pid == GET_LWP (lp->ptid));
2597
2598 if (debug_linux_nat)
2599 {
2600 fprintf_unfiltered (gdb_stdlog,
2601 "WL: waitpid %s received %s\n",
2602 target_pid_to_str (lp->ptid),
2603 status_to_str (status));
2604 }
d6b0e80f 2605
a9f4bb21
PA
2606 /* Check if the thread has exited. */
2607 if (WIFEXITED (status) || WIFSIGNALED (status))
2608 {
2609 thread_dead = 1;
2610 if (debug_linux_nat)
2611 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2612 target_pid_to_str (lp->ptid));
2613 }
d6b0e80f
AC
2614 }
2615
2616 if (thread_dead)
2617 {
e26af52f 2618 exit_lwp (lp);
d6b0e80f
AC
2619 return 0;
2620 }
2621
2622 gdb_assert (WIFSTOPPED (status));
2623
ca2163eb
PA
2624 /* Handle GNU/Linux's syscall SIGTRAPs. */
2625 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2626 {
2627 /* No longer need the sysgood bit. The ptrace event ends up
2628 recorded in lp->waitstatus if we care for it. We can carry
2629 on handling the event like a regular SIGTRAP from here
2630 on. */
2631 status = W_STOPCODE (SIGTRAP);
2632 if (linux_handle_syscall_trap (lp, 1))
2633 return wait_lwp (lp);
2634 }
2635
d6b0e80f
AC
2636 /* Handle GNU/Linux's extended waitstatus for trace events. */
2637 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2638 {
2639 if (debug_linux_nat)
2640 fprintf_unfiltered (gdb_stdlog,
2641 "WL: Handling extended status 0x%06x\n",
2642 status);
3d799a95 2643 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2644 return wait_lwp (lp);
2645 }
2646
2647 return status;
2648}
2649
2650/* Send a SIGSTOP to LP. */
2651
2652static int
2653stop_callback (struct lwp_info *lp, void *data)
2654{
2655 if (!lp->stopped && !lp->signalled)
2656 {
2657 int ret;
2658
2659 if (debug_linux_nat)
2660 {
2661 fprintf_unfiltered (gdb_stdlog,
2662 "SC: kill %s **<SIGSTOP>**\n",
2663 target_pid_to_str (lp->ptid));
2664 }
2665 errno = 0;
2666 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2667 if (debug_linux_nat)
2668 {
2669 fprintf_unfiltered (gdb_stdlog,
2670 "SC: lwp kill %d %s\n",
2671 ret,
2672 errno ? safe_strerror (errno) : "ERRNO-OK");
2673 }
2674
2675 lp->signalled = 1;
2676 gdb_assert (lp->status == 0);
2677 }
2678
2679 return 0;
2680}
2681
7b50312a
PA
2682/* Request a stop on LWP. */
2683
2684void
2685linux_stop_lwp (struct lwp_info *lwp)
2686{
2687 stop_callback (lwp, NULL);
2688}
2689
57380f4e 2690/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2691
2692static int
57380f4e
DJ
2693linux_nat_has_pending_sigint (int pid)
2694{
2695 sigset_t pending, blocked, ignored;
57380f4e
DJ
2696
2697 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2698
2699 if (sigismember (&pending, SIGINT)
2700 && !sigismember (&ignored, SIGINT))
2701 return 1;
2702
2703 return 0;
2704}
2705
2706/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2707
2708static int
2709set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2710{
57380f4e
DJ
2711 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2712 flag to consume the next one. */
2713 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2714 && WSTOPSIG (lp->status) == SIGINT)
2715 lp->status = 0;
2716 else
2717 lp->ignore_sigint = 1;
2718
2719 return 0;
2720}
2721
2722/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2723 This function is called after we know the LWP has stopped; if the LWP
2724 stopped before the expected SIGINT was delivered, then it will never have
2725 arrived. Also, if the signal was delivered to a shared queue and consumed
2726 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2727
57380f4e
DJ
2728static void
2729maybe_clear_ignore_sigint (struct lwp_info *lp)
2730{
2731 if (!lp->ignore_sigint)
2732 return;
2733
2734 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2735 {
2736 if (debug_linux_nat)
2737 fprintf_unfiltered (gdb_stdlog,
2738 "MCIS: Clearing bogus flag for %s\n",
2739 target_pid_to_str (lp->ptid));
2740 lp->ignore_sigint = 0;
2741 }
2742}
2743
ebec9a0f
PA
2744/* Fetch the possible triggered data watchpoint info and store it in
2745 LP.
2746
2747 On some archs, like x86, that use debug registers to set
2748 watchpoints, it's possible that the way to know which watched
2749 address trapped, is to check the register that is used to select
2750 which address to watch. Problem is, between setting the watchpoint
2751 and reading back which data address trapped, the user may change
2752 the set of watchpoints, and, as a consequence, GDB changes the
2753 debug registers in the inferior. To avoid reading back a stale
2754 stopped-data-address when that happens, we cache in LP the fact
2755 that a watchpoint trapped, and the corresponding data address, as
2756 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2757 registers meanwhile, we have the cached data we can rely on. */
2758
2759static void
2760save_sigtrap (struct lwp_info *lp)
2761{
2762 struct cleanup *old_chain;
2763
2764 if (linux_ops->to_stopped_by_watchpoint == NULL)
2765 {
2766 lp->stopped_by_watchpoint = 0;
2767 return;
2768 }
2769
2770 old_chain = save_inferior_ptid ();
2771 inferior_ptid = lp->ptid;
2772
2773 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2774
2775 if (lp->stopped_by_watchpoint)
2776 {
2777 if (linux_ops->to_stopped_data_address != NULL)
2778 lp->stopped_data_address_p =
2779 linux_ops->to_stopped_data_address (&current_target,
2780 &lp->stopped_data_address);
2781 else
2782 lp->stopped_data_address_p = 0;
2783 }
2784
2785 do_cleanups (old_chain);
2786}
2787
2788/* See save_sigtrap. */
2789
2790static int
2791linux_nat_stopped_by_watchpoint (void)
2792{
2793 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2794
2795 gdb_assert (lp != NULL);
2796
2797 return lp->stopped_by_watchpoint;
2798}
2799
2800static int
2801linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2802{
2803 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2804
2805 gdb_assert (lp != NULL);
2806
2807 *addr_p = lp->stopped_data_address;
2808
2809 return lp->stopped_data_address_p;
2810}
2811
26ab7092
JK
2812/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2813
2814static int
2815sigtrap_is_event (int status)
2816{
2817 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2818}
2819
2820/* SIGTRAP-like events recognizer. */
2821
2822static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2823
00390b84
JK
2824/* Check for SIGTRAP-like events in LP. */
2825
2826static int
2827linux_nat_lp_status_is_event (struct lwp_info *lp)
2828{
2829 /* We check for lp->waitstatus in addition to lp->status, because we can
2830 have pending process exits recorded in lp->status
2831 and W_EXITCODE(0,0) == 0. We should probably have an additional
2832 lp->status_p flag. */
2833
2834 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2835 && linux_nat_status_is_event (lp->status));
2836}
2837
26ab7092
JK
2838/* Set alternative SIGTRAP-like events recognizer. If
2839 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2840 applied. */
2841
2842void
2843linux_nat_set_status_is_event (struct target_ops *t,
2844 int (*status_is_event) (int status))
2845{
2846 linux_nat_status_is_event = status_is_event;
2847}
2848
57380f4e
DJ
2849/* Wait until LP is stopped. */
2850
2851static int
2852stop_wait_callback (struct lwp_info *lp, void *data)
2853{
6c95b8df
PA
2854 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2855
2856 /* If this is a vfork parent, bail out, it is not going to report
2857 any SIGSTOP until the vfork is done with. */
2858 if (inf->vfork_child != NULL)
2859 return 0;
2860
d6b0e80f
AC
2861 if (!lp->stopped)
2862 {
2863 int status;
2864
2865 status = wait_lwp (lp);
2866 if (status == 0)
2867 return 0;
2868
57380f4e
DJ
2869 if (lp->ignore_sigint && WIFSTOPPED (status)
2870 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2871 {
57380f4e 2872 lp->ignore_sigint = 0;
d6b0e80f
AC
2873
2874 errno = 0;
2875 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2876 if (debug_linux_nat)
2877 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2878 "PTRACE_CONT %s, 0, 0 (%s) "
2879 "(discarding SIGINT)\n",
d6b0e80f
AC
2880 target_pid_to_str (lp->ptid),
2881 errno ? safe_strerror (errno) : "OK");
2882
57380f4e 2883 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2884 }
2885
57380f4e
DJ
2886 maybe_clear_ignore_sigint (lp);
2887
d6b0e80f
AC
2888 if (WSTOPSIG (status) != SIGSTOP)
2889 {
e5ef252a 2890 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2891
e5ef252a
PA
2892 save_sigtrap (lp);
2893
2894 if (debug_linux_nat)
2895 fprintf_unfiltered (gdb_stdlog,
2896 "SWC: Pending event %s in %s\n",
2897 status_to_str ((int) status),
2898 target_pid_to_str (lp->ptid));
2899
2900 /* Save the sigtrap event. */
2901 lp->status = status;
2902 gdb_assert (!lp->stopped);
2903 gdb_assert (lp->signalled);
2904 lp->stopped = 1;
d6b0e80f
AC
2905 }
2906 else
2907 {
2908 /* We caught the SIGSTOP that we intended to catch, so
2909 there's no SIGSTOP pending. */
e5ef252a
PA
2910
2911 if (debug_linux_nat)
2912 fprintf_unfiltered (gdb_stdlog,
2913 "SWC: Delayed SIGSTOP caught for %s.\n",
2914 target_pid_to_str (lp->ptid));
2915
d6b0e80f 2916 lp->stopped = 1;
e5ef252a
PA
2917
2918 /* Reset SIGNALLED only after the stop_wait_callback call
2919 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2920 lp->signalled = 0;
2921 }
2922 }
2923
2924 return 0;
2925}
2926
d6b0e80f
AC
2927/* Return non-zero if LP has a wait status pending. */
2928
2929static int
2930status_callback (struct lwp_info *lp, void *data)
2931{
2932 /* Only report a pending wait status if we pretend that this has
2933 indeed been resumed. */
ca2163eb
PA
2934 if (!lp->resumed)
2935 return 0;
2936
2937 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2938 {
2939 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
766062f6 2940 or a pending process exit. Note that `W_EXITCODE(0,0) ==
ca2163eb
PA
2941 0', so a clean process exit can not be stored pending in
2942 lp->status, it is indistinguishable from
2943 no-pending-status. */
2944 return 1;
2945 }
2946
2947 if (lp->status != 0)
2948 return 1;
2949
2950 return 0;
d6b0e80f
AC
2951}
2952
2953/* Return non-zero if LP isn't stopped. */
2954
2955static int
2956running_callback (struct lwp_info *lp, void *data)
2957{
25289eb2
PA
2958 return (!lp->stopped
2959 || ((lp->status != 0
2960 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2961 && lp->resumed));
d6b0e80f
AC
2962}
2963
2964/* Count the LWP's that have had events. */
2965
2966static int
2967count_events_callback (struct lwp_info *lp, void *data)
2968{
2969 int *count = data;
2970
2971 gdb_assert (count != NULL);
2972
e09490f1 2973 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2974 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2975 (*count)++;
2976
2977 return 0;
2978}
2979
2980/* Select the LWP (if any) that is currently being single-stepped. */
2981
2982static int
2983select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2984{
25289eb2
PA
2985 if (lp->last_resume_kind == resume_step
2986 && lp->status != 0)
d6b0e80f
AC
2987 return 1;
2988 else
2989 return 0;
2990}
2991
2992/* Select the Nth LWP that has had a SIGTRAP event. */
2993
2994static int
2995select_event_lwp_callback (struct lwp_info *lp, void *data)
2996{
2997 int *selector = data;
2998
2999 gdb_assert (selector != NULL);
3000
1777feb0 3001 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 3002 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
3003 if ((*selector)-- == 0)
3004 return 1;
3005
3006 return 0;
3007}
3008
710151dd
PA
3009static int
3010cancel_breakpoint (struct lwp_info *lp)
3011{
3012 /* Arrange for a breakpoint to be hit again later. We don't keep
3013 the SIGTRAP status and don't forward the SIGTRAP signal to the
3014 LWP. We will handle the current event, eventually we will resume
3015 this LWP, and this breakpoint will trap again.
3016
3017 If we do not do this, then we run the risk that the user will
3018 delete or disable the breakpoint, but the LWP will have already
3019 tripped on it. */
3020
515630c5
UW
3021 struct regcache *regcache = get_thread_regcache (lp->ptid);
3022 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3023 CORE_ADDR pc;
3024
3025 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 3026 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
3027 {
3028 if (debug_linux_nat)
3029 fprintf_unfiltered (gdb_stdlog,
3030 "CB: Push back breakpoint for %s\n",
3031 target_pid_to_str (lp->ptid));
3032
3033 /* Back up the PC if necessary. */
515630c5
UW
3034 if (gdbarch_decr_pc_after_break (gdbarch))
3035 regcache_write_pc (regcache, pc);
3036
710151dd
PA
3037 return 1;
3038 }
3039 return 0;
3040}
3041
d6b0e80f
AC
3042static int
3043cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3044{
3045 struct lwp_info *event_lp = data;
3046
3047 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3048 if (lp == event_lp)
3049 return 0;
3050
3051 /* If a LWP other than the LWP that we're reporting an event for has
3052 hit a GDB breakpoint (as opposed to some random trap signal),
3053 then just arrange for it to hit it again later. We don't keep
3054 the SIGTRAP status and don't forward the SIGTRAP signal to the
3055 LWP. We will handle the current event, eventually we will resume
3056 all LWPs, and this one will get its breakpoint trap again.
3057
3058 If we do not do this, then we run the risk that the user will
3059 delete or disable the breakpoint, but the LWP will have already
3060 tripped on it. */
3061
00390b84 3062 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
3063 && cancel_breakpoint (lp))
3064 /* Throw away the SIGTRAP. */
3065 lp->status = 0;
d6b0e80f
AC
3066
3067 return 0;
3068}
3069
3070/* Select one LWP out of those that have events pending. */
3071
3072static void
d90e17a7 3073select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
3074{
3075 int num_events = 0;
3076 int random_selector;
3077 struct lwp_info *event_lp;
3078
ac264b3b 3079 /* Record the wait status for the original LWP. */
d6b0e80f
AC
3080 (*orig_lp)->status = *status;
3081
3082 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
3083 event_lp = iterate_over_lwps (filter,
3084 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
3085 if (event_lp != NULL)
3086 {
3087 if (debug_linux_nat)
3088 fprintf_unfiltered (gdb_stdlog,
3089 "SEL: Select single-step %s\n",
3090 target_pid_to_str (event_lp->ptid));
3091 }
3092 else
3093 {
3094 /* No single-stepping LWP. Select one at random, out of those
3095 which have had SIGTRAP events. */
3096
3097 /* First see how many SIGTRAP events we have. */
d90e17a7 3098 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
3099
3100 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3101 random_selector = (int)
3102 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3103
3104 if (debug_linux_nat && num_events > 1)
3105 fprintf_unfiltered (gdb_stdlog,
3106 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3107 num_events, random_selector);
3108
d90e17a7
PA
3109 event_lp = iterate_over_lwps (filter,
3110 select_event_lwp_callback,
d6b0e80f
AC
3111 &random_selector);
3112 }
3113
3114 if (event_lp != NULL)
3115 {
3116 /* Switch the event LWP. */
3117 *orig_lp = event_lp;
3118 *status = event_lp->status;
3119 }
3120
3121 /* Flush the wait status for the event LWP. */
3122 (*orig_lp)->status = 0;
3123}
3124
3125/* Return non-zero if LP has been resumed. */
3126
3127static int
3128resumed_callback (struct lwp_info *lp, void *data)
3129{
3130 return lp->resumed;
3131}
3132
12d9289a
PA
3133/* Stop an active thread, verify it still exists, then resume it. If
3134 the thread ends up with a pending status, then it is not resumed,
3135 and *DATA (really a pointer to int), is set. */
d6b0e80f
AC
3136
3137static int
3138stop_and_resume_callback (struct lwp_info *lp, void *data)
3139{
12d9289a
PA
3140 int *new_pending_p = data;
3141
25289eb2 3142 if (!lp->stopped)
d6b0e80f 3143 {
25289eb2
PA
3144 ptid_t ptid = lp->ptid;
3145
d6b0e80f
AC
3146 stop_callback (lp, NULL);
3147 stop_wait_callback (lp, NULL);
25289eb2
PA
3148
3149 /* Resume if the lwp still exists, and the core wanted it
3150 running. */
12d9289a
PA
3151 lp = find_lwp_pid (ptid);
3152 if (lp != NULL)
25289eb2 3153 {
12d9289a
PA
3154 if (lp->last_resume_kind == resume_stop
3155 && lp->status == 0)
3156 {
3157 /* The core wanted the LWP to stop. Even if it stopped
3158 cleanly (with SIGSTOP), leave the event pending. */
3159 if (debug_linux_nat)
3160 fprintf_unfiltered (gdb_stdlog,
3161 "SARC: core wanted LWP %ld stopped "
3162 "(leaving SIGSTOP pending)\n",
3163 GET_LWP (lp->ptid));
3164 lp->status = W_STOPCODE (SIGSTOP);
3165 }
3166
3167 if (lp->status == 0)
3168 {
3169 if (debug_linux_nat)
3170 fprintf_unfiltered (gdb_stdlog,
3171 "SARC: re-resuming LWP %ld\n",
3172 GET_LWP (lp->ptid));
e5ef252a 3173 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
12d9289a
PA
3174 }
3175 else
3176 {
3177 if (debug_linux_nat)
3178 fprintf_unfiltered (gdb_stdlog,
3179 "SARC: not re-resuming LWP %ld "
3180 "(has pending)\n",
3181 GET_LWP (lp->ptid));
3182 if (new_pending_p)
3183 *new_pending_p = 1;
3184 }
25289eb2 3185 }
d6b0e80f
AC
3186 }
3187 return 0;
3188}
3189
02f3fc28 3190/* Check if we should go on and pass this event to common code.
12d9289a
PA
3191 Return the affected lwp if we are, or NULL otherwise. If we stop
3192 all lwps temporarily, we may end up with new pending events in some
3193 other lwp. In that case set *NEW_PENDING_P to true. */
3194
02f3fc28 3195static struct lwp_info *
0e5bf2a8 3196linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
02f3fc28
PA
3197{
3198 struct lwp_info *lp;
3199
12d9289a
PA
3200 *new_pending_p = 0;
3201
02f3fc28
PA
3202 lp = find_lwp_pid (pid_to_ptid (lwpid));
3203
3204 /* Check for stop events reported by a process we didn't already
3205 know about - anything not already in our LWP list.
3206
3207 If we're expecting to receive stopped processes after
3208 fork, vfork, and clone events, then we'll just add the
3209 new one to our list and go back to waiting for the event
3210 to be reported - the stopped process might be returned
0e5bf2a8
PA
3211 from waitpid before or after the event is.
3212
3213 But note the case of a non-leader thread exec'ing after the
3214 leader having exited, and gone from our lists. The non-leader
3215 thread changes its tid to the tgid. */
3216
3217 if (WIFSTOPPED (status) && lp == NULL
3218 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3219 {
3220 /* A multi-thread exec after we had seen the leader exiting. */
3221 if (debug_linux_nat)
3222 fprintf_unfiltered (gdb_stdlog,
3223 "LLW: Re-adding thread group leader LWP %d.\n",
3224 lwpid);
3225
3226 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3227 lp->stopped = 1;
3228 lp->resumed = 1;
3229 add_thread (lp->ptid);
3230 }
3231
02f3fc28
PA
3232 if (WIFSTOPPED (status) && !lp)
3233 {
84636d28 3234 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
3235 return NULL;
3236 }
3237
3238 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3239 our list, i.e. not part of the current process. This can happen
fd62cb89 3240 if we detach from a program we originally forked and then it
02f3fc28
PA
3241 exits. */
3242 if (!WIFSTOPPED (status) && !lp)
3243 return NULL;
3244
ca2163eb
PA
3245 /* Handle GNU/Linux's syscall SIGTRAPs. */
3246 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3247 {
3248 /* No longer need the sysgood bit. The ptrace event ends up
3249 recorded in lp->waitstatus if we care for it. We can carry
3250 on handling the event like a regular SIGTRAP from here
3251 on. */
3252 status = W_STOPCODE (SIGTRAP);
3253 if (linux_handle_syscall_trap (lp, 0))
3254 return NULL;
3255 }
02f3fc28 3256
ca2163eb
PA
3257 /* Handle GNU/Linux's extended waitstatus for trace events. */
3258 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3259 {
3260 if (debug_linux_nat)
3261 fprintf_unfiltered (gdb_stdlog,
3262 "LLW: Handling extended status 0x%06x\n",
3263 status);
3264 if (linux_handle_extended_wait (lp, status, 0))
3265 return NULL;
3266 }
3267
26ab7092 3268 if (linux_nat_status_is_event (status))
da559b09 3269 save_sigtrap (lp);
ca2163eb 3270
02f3fc28 3271 /* Check if the thread has exited. */
d90e17a7
PA
3272 if ((WIFEXITED (status) || WIFSIGNALED (status))
3273 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3274 {
9db03742
JB
3275 /* If this is the main thread, we must stop all threads and verify
3276 if they are still alive. This is because in the nptl thread model
3277 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3278 other than the main thread. We only get the main thread exit
3279 signal once all child threads have already exited. If we
3280 stop all the threads and use the stop_wait_callback to check
3281 if they have exited we can determine whether this signal
3282 should be ignored or whether it means the end of the debugged
3283 application, regardless of which threading model is being
5d3b6af6 3284 used. */
02f3fc28
PA
3285 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3286 {
3287 lp->stopped = 1;
d90e17a7 3288 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
12d9289a 3289 stop_and_resume_callback, new_pending_p);
02f3fc28
PA
3290 }
3291
3292 if (debug_linux_nat)
3293 fprintf_unfiltered (gdb_stdlog,
3294 "LLW: %s exited.\n",
3295 target_pid_to_str (lp->ptid));
3296
d90e17a7 3297 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3298 {
3299 /* If there is at least one more LWP, then the exit signal
3300 was not the end of the debugged application and should be
3301 ignored. */
3302 exit_lwp (lp);
3303 return NULL;
3304 }
02f3fc28
PA
3305 }
3306
3307 /* Check if the current LWP has previously exited. In the nptl
3308 thread model, LWPs other than the main thread do not issue
3309 signals when they exit so we must check whenever the thread has
3310 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3311 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3312 {
d90e17a7
PA
3313 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3314
02f3fc28
PA
3315 if (debug_linux_nat)
3316 fprintf_unfiltered (gdb_stdlog,
3317 "LLW: %s exited.\n",
3318 target_pid_to_str (lp->ptid));
3319
3320 exit_lwp (lp);
3321
3322 /* Make sure there is at least one thread running. */
d90e17a7 3323 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3324
3325 /* Discard the event. */
3326 return NULL;
3327 }
3328
3329 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3330 an attempt to stop an LWP. */
3331 if (lp->signalled
3332 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3333 {
3334 if (debug_linux_nat)
3335 fprintf_unfiltered (gdb_stdlog,
3336 "LLW: Delayed SIGSTOP caught for %s.\n",
3337 target_pid_to_str (lp->ptid));
3338
02f3fc28
PA
3339 lp->signalled = 0;
3340
25289eb2
PA
3341 if (lp->last_resume_kind != resume_stop)
3342 {
3343 /* This is a delayed SIGSTOP. */
02f3fc28 3344
25289eb2
PA
3345 registers_changed ();
3346
7b50312a
PA
3347 if (linux_nat_prepare_to_resume != NULL)
3348 linux_nat_prepare_to_resume (lp);
25289eb2 3349 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 3350 lp->step, GDB_SIGNAL_0);
25289eb2
PA
3351 if (debug_linux_nat)
3352 fprintf_unfiltered (gdb_stdlog,
3353 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3354 lp->step ?
3355 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3356 target_pid_to_str (lp->ptid));
02f3fc28 3357
25289eb2
PA
3358 lp->stopped = 0;
3359 gdb_assert (lp->resumed);
02f3fc28 3360
25289eb2
PA
3361 /* Discard the event. */
3362 return NULL;
3363 }
02f3fc28
PA
3364 }
3365
57380f4e
DJ
3366 /* Make sure we don't report a SIGINT that we have already displayed
3367 for another thread. */
3368 if (lp->ignore_sigint
3369 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3370 {
3371 if (debug_linux_nat)
3372 fprintf_unfiltered (gdb_stdlog,
3373 "LLW: Delayed SIGINT caught for %s.\n",
3374 target_pid_to_str (lp->ptid));
3375
3376 /* This is a delayed SIGINT. */
3377 lp->ignore_sigint = 0;
3378
3379 registers_changed ();
7b50312a
PA
3380 if (linux_nat_prepare_to_resume != NULL)
3381 linux_nat_prepare_to_resume (lp);
28439f5e 3382 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 3383 lp->step, GDB_SIGNAL_0);
57380f4e
DJ
3384 if (debug_linux_nat)
3385 fprintf_unfiltered (gdb_stdlog,
3386 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3387 lp->step ?
3388 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3389 target_pid_to_str (lp->ptid));
3390
3391 lp->stopped = 0;
3392 gdb_assert (lp->resumed);
3393
3394 /* Discard the event. */
3395 return NULL;
3396 }
3397
02f3fc28
PA
3398 /* An interesting event. */
3399 gdb_assert (lp);
ca2163eb 3400 lp->status = status;
02f3fc28
PA
3401 return lp;
3402}
3403
0e5bf2a8
PA
3404/* Detect zombie thread group leaders, and "exit" them. We can't reap
3405 their exits until all other threads in the group have exited. */
3406
3407static void
3408check_zombie_leaders (void)
3409{
3410 struct inferior *inf;
3411
3412 ALL_INFERIORS (inf)
3413 {
3414 struct lwp_info *leader_lp;
3415
3416 if (inf->pid == 0)
3417 continue;
3418
3419 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3420 if (leader_lp != NULL
3421 /* Check if there are other threads in the group, as we may
3422 have raced with the inferior simply exiting. */
3423 && num_lwps (inf->pid) > 1
5f572dec 3424 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
3425 {
3426 if (debug_linux_nat)
3427 fprintf_unfiltered (gdb_stdlog,
3428 "CZL: Thread group leader %d zombie "
3429 "(it exited, or another thread execd).\n",
3430 inf->pid);
3431
3432 /* A leader zombie can mean one of two things:
3433
3434 - It exited, and there's an exit status pending
3435 available, or only the leader exited (not the whole
3436 program). In the latter case, we can't waitpid the
3437 leader's exit status until all other threads are gone.
3438
3439 - There are 3 or more threads in the group, and a thread
3440 other than the leader exec'd. On an exec, the Linux
3441 kernel destroys all other threads (except the execing
3442 one) in the thread group, and resets the execing thread's
3443 tid to the tgid. No exit notification is sent for the
3444 execing thread -- from the ptracer's perspective, it
3445 appears as though the execing thread just vanishes.
3446 Until we reap all other threads except the leader and the
3447 execing thread, the leader will be zombie, and the
3448 execing thread will be in `D (disc sleep)'. As soon as
3449 all other threads are reaped, the execing thread changes
3450 it's tid to the tgid, and the previous (zombie) leader
3451 vanishes, giving place to the "new" leader. We could try
3452 distinguishing the exit and exec cases, by waiting once
3453 more, and seeing if something comes out, but it doesn't
3454 sound useful. The previous leader _does_ go away, and
3455 we'll re-add the new one once we see the exec event
3456 (which is just the same as what would happen if the
3457 previous leader did exit voluntarily before some other
3458 thread execs). */
3459
3460 if (debug_linux_nat)
3461 fprintf_unfiltered (gdb_stdlog,
3462 "CZL: Thread group leader %d vanished.\n",
3463 inf->pid);
3464 exit_lwp (leader_lp);
3465 }
3466 }
3467}
3468
d6b0e80f 3469static ptid_t
7feb7d06 3470linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3471 ptid_t ptid, struct target_waitstatus *ourstatus,
3472 int target_options)
d6b0e80f 3473{
7feb7d06 3474 static sigset_t prev_mask;
4b60df3d 3475 enum resume_kind last_resume_kind;
12d9289a 3476 struct lwp_info *lp;
12d9289a 3477 int status;
d6b0e80f 3478
01124a23 3479 if (debug_linux_nat)
b84876c2
PA
3480 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3481
f973ed9c
DJ
3482 /* The first time we get here after starting a new inferior, we may
3483 not have added it to the LWP list yet - this is the earliest
3484 moment at which we know its PID. */
d90e17a7 3485 if (ptid_is_pid (inferior_ptid))
f973ed9c 3486 {
27c9d204
PA
3487 /* Upgrade the main thread's ptid. */
3488 thread_change_ptid (inferior_ptid,
3489 BUILD_LWP (GET_PID (inferior_ptid),
3490 GET_PID (inferior_ptid)));
3491
f973ed9c
DJ
3492 lp = add_lwp (inferior_ptid);
3493 lp->resumed = 1;
3494 }
3495
7feb7d06
PA
3496 /* Make sure SIGCHLD is blocked. */
3497 block_child_signals (&prev_mask);
d6b0e80f
AC
3498
3499retry:
d90e17a7
PA
3500 lp = NULL;
3501 status = 0;
d6b0e80f
AC
3502
3503 /* First check if there is a LWP with a wait status pending. */
0e5bf2a8 3504 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d6b0e80f 3505 {
0e5bf2a8 3506 /* Any LWP in the PTID group that's been resumed will do. */
d90e17a7 3507 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3508 if (lp)
3509 {
ca2163eb 3510 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3511 fprintf_unfiltered (gdb_stdlog,
3512 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3513 status_to_str (lp->status),
d6b0e80f
AC
3514 target_pid_to_str (lp->ptid));
3515 }
d6b0e80f
AC
3516 }
3517 else if (is_lwp (ptid))
3518 {
3519 if (debug_linux_nat)
3520 fprintf_unfiltered (gdb_stdlog,
3521 "LLW: Waiting for specific LWP %s.\n",
3522 target_pid_to_str (ptid));
3523
3524 /* We have a specific LWP to check. */
3525 lp = find_lwp_pid (ptid);
3526 gdb_assert (lp);
d6b0e80f 3527
ca2163eb 3528 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3529 fprintf_unfiltered (gdb_stdlog,
3530 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3531 status_to_str (lp->status),
d6b0e80f
AC
3532 target_pid_to_str (lp->ptid));
3533
d90e17a7
PA
3534 /* We check for lp->waitstatus in addition to lp->status,
3535 because we can have pending process exits recorded in
3536 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3537 an additional lp->status_p flag. */
ca2163eb 3538 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3539 lp = NULL;
d6b0e80f
AC
3540 }
3541
b84876c2
PA
3542 if (!target_can_async_p ())
3543 {
3544 /* Causes SIGINT to be passed on to the attached process. */
3545 set_sigint_trap ();
b84876c2 3546 }
d6b0e80f 3547
0e5bf2a8 3548 /* But if we don't find a pending event, we'll have to wait. */
7feb7d06 3549
d90e17a7 3550 while (lp == NULL)
d6b0e80f
AC
3551 {
3552 pid_t lwpid;
3553
0e5bf2a8
PA
3554 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3555 quirks:
3556
3557 - If the thread group leader exits while other threads in the
3558 thread group still exist, waitpid(TGID, ...) hangs. That
3559 waitpid won't return an exit status until the other threads
3560 in the group are reapped.
3561
3562 - When a non-leader thread execs, that thread just vanishes
3563 without reporting an exit (so we'd hang if we waited for it
3564 explicitly in that case). The exec event is reported to
3565 the TGID pid. */
3566
3567 errno = 0;
3568 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3569 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3570 lwpid = my_waitpid (-1, &status, WNOHANG);
3571
3572 if (debug_linux_nat)
3573 fprintf_unfiltered (gdb_stdlog,
3574 "LNW: waitpid(-1, ...) returned %d, %s\n",
3575 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3576
d6b0e80f
AC
3577 if (lwpid > 0)
3578 {
12d9289a
PA
3579 /* If this is true, then we paused LWPs momentarily, and may
3580 now have pending events to handle. */
3581 int new_pending;
3582
d6b0e80f
AC
3583 if (debug_linux_nat)
3584 {
3585 fprintf_unfiltered (gdb_stdlog,
3586 "LLW: waitpid %ld received %s\n",
3587 (long) lwpid, status_to_str (status));
3588 }
3589
0e5bf2a8 3590 lp = linux_nat_filter_event (lwpid, status, &new_pending);
d90e17a7 3591
33355866
JK
3592 /* STATUS is now no longer valid, use LP->STATUS instead. */
3593 status = 0;
3594
0e5bf2a8 3595 if (lp && !ptid_match (lp->ptid, ptid))
d6b0e80f 3596 {
e3e9f5a2
PA
3597 gdb_assert (lp->resumed);
3598
d90e17a7 3599 if (debug_linux_nat)
3e43a32a
MS
3600 fprintf (stderr,
3601 "LWP %ld got an event %06x, leaving pending.\n",
33355866 3602 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3603
ca2163eb 3604 if (WIFSTOPPED (lp->status))
d90e17a7 3605 {
ca2163eb 3606 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3607 {
e3e9f5a2
PA
3608 /* Cancel breakpoint hits. The breakpoint may
3609 be removed before we fetch events from this
3610 process to report to the core. It is best
3611 not to assume the moribund breakpoints
3612 heuristic always handles these cases --- it
3613 could be too many events go through to the
3614 core before this one is handled. All-stop
3615 always cancels breakpoint hits in all
3616 threads. */
3617 if (non_stop
00390b84 3618 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3619 && cancel_breakpoint (lp))
3620 {
3621 /* Throw away the SIGTRAP. */
3622 lp->status = 0;
3623
3624 if (debug_linux_nat)
3625 fprintf (stderr,
3e43a32a
MS
3626 "LLW: LWP %ld hit a breakpoint while"
3627 " waiting for another process;"
3628 " cancelled it\n",
e3e9f5a2
PA
3629 ptid_get_lwp (lp->ptid));
3630 }
3631 lp->stopped = 1;
d90e17a7
PA
3632 }
3633 else
3634 {
3635 lp->stopped = 1;
3636 lp->signalled = 0;
3637 }
3638 }
33355866 3639 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3640 {
3641 if (debug_linux_nat)
3e43a32a
MS
3642 fprintf (stderr,
3643 "Process %ld exited while stopping LWPs\n",
d90e17a7
PA
3644 ptid_get_lwp (lp->ptid));
3645
3646 /* This was the last lwp in the process. Since
3647 events are serialized to GDB core, and we can't
3648 report this one right now, but GDB core and the
3649 other target layers will want to be notified
3650 about the exit code/signal, leave the status
3651 pending for the next time we're able to report
3652 it. */
d90e17a7
PA
3653
3654 /* Prevent trying to stop this thread again. We'll
3655 never try to resume it because it has a pending
3656 status. */
3657 lp->stopped = 1;
3658
3659 /* Dead LWP's aren't expected to reported a pending
3660 sigstop. */
3661 lp->signalled = 0;
3662
3663 /* Store the pending event in the waitstatus as
3664 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3665 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3666 }
3667
3668 /* Keep looking. */
3669 lp = NULL;
d6b0e80f
AC
3670 }
3671
0e5bf2a8 3672 if (new_pending)
d90e17a7 3673 {
0e5bf2a8
PA
3674 /* Some LWP now has a pending event. Go all the way
3675 back to check it. */
3676 goto retry;
3677 }
12d9289a 3678
0e5bf2a8
PA
3679 if (lp)
3680 {
3681 /* We got an event to report to the core. */
3682 break;
d90e17a7 3683 }
0e5bf2a8
PA
3684
3685 /* Retry until nothing comes out of waitpid. A single
3686 SIGCHLD can indicate more than one child stopped. */
3687 continue;
d6b0e80f
AC
3688 }
3689
0e5bf2a8
PA
3690 /* Check for zombie thread group leaders. Those can't be reaped
3691 until all other threads in the thread group are. */
3692 check_zombie_leaders ();
d6b0e80f 3693
0e5bf2a8
PA
3694 /* If there are no resumed children left, bail. We'd be stuck
3695 forever in the sigsuspend call below otherwise. */
3696 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3697 {
3698 if (debug_linux_nat)
3699 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3700
0e5bf2a8 3701 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3702
0e5bf2a8
PA
3703 if (!target_can_async_p ())
3704 clear_sigint_trap ();
b84876c2 3705
0e5bf2a8
PA
3706 restore_child_signals_mask (&prev_mask);
3707 return minus_one_ptid;
d6b0e80f 3708 }
28736962 3709
0e5bf2a8
PA
3710 /* No interesting event to report to the core. */
3711
3712 if (target_options & TARGET_WNOHANG)
3713 {
01124a23 3714 if (debug_linux_nat)
28736962
PA
3715 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3716
0e5bf2a8 3717 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3718 restore_child_signals_mask (&prev_mask);
3719 return minus_one_ptid;
3720 }
d6b0e80f
AC
3721
3722 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3723 gdb_assert (lp == NULL);
0e5bf2a8
PA
3724
3725 /* Block until we get an event reported with SIGCHLD. */
3726 sigsuspend (&suspend_mask);
d6b0e80f
AC
3727 }
3728
b84876c2 3729 if (!target_can_async_p ())
d26b5354 3730 clear_sigint_trap ();
d6b0e80f
AC
3731
3732 gdb_assert (lp);
3733
ca2163eb
PA
3734 status = lp->status;
3735 lp->status = 0;
3736
d6b0e80f
AC
3737 /* Don't report signals that GDB isn't interested in, such as
3738 signals that are neither printed nor stopped upon. Stopping all
3739 threads can be a bit time-consuming so if we want decent
3740 performance with heavily multi-threaded programs, especially when
3741 they're using a high frequency timer, we'd better avoid it if we
3742 can. */
3743
3744 if (WIFSTOPPED (status))
3745 {
2ea28649 3746 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
d6b0e80f 3747
2455069d
UW
3748 /* When using hardware single-step, we need to report every signal.
3749 Otherwise, signals in pass_mask may be short-circuited. */
d539ed7e 3750 if (!lp->step
2455069d 3751 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
d6b0e80f
AC
3752 {
3753 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3754 here? It is not clear we should. GDB may not expect
3755 other threads to run. On the other hand, not resuming
3756 newly attached threads may cause an unwanted delay in
3757 getting them running. */
3758 registers_changed ();
7b50312a
PA
3759 if (linux_nat_prepare_to_resume != NULL)
3760 linux_nat_prepare_to_resume (lp);
28439f5e 3761 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3762 lp->step, signo);
d6b0e80f
AC
3763 if (debug_linux_nat)
3764 fprintf_unfiltered (gdb_stdlog,
3765 "LLW: %s %s, %s (preempt 'handle')\n",
3766 lp->step ?
3767 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3768 target_pid_to_str (lp->ptid),
a493e3e2 3769 (signo != GDB_SIGNAL_0
2ea28649 3770 ? strsignal (gdb_signal_to_host (signo))
423ec54c 3771 : "0"));
d6b0e80f 3772 lp->stopped = 0;
d6b0e80f
AC
3773 goto retry;
3774 }
3775
1ad15515 3776 if (!non_stop)
d6b0e80f 3777 {
1ad15515
PA
3778 /* Only do the below in all-stop, as we currently use SIGINT
3779 to implement target_stop (see linux_nat_stop) in
3780 non-stop. */
a493e3e2 3781 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
1ad15515
PA
3782 {
3783 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3784 forwarded to the entire process group, that is, all LWPs
3785 will receive it - unless they're using CLONE_THREAD to
3786 share signals. Since we only want to report it once, we
3787 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3788 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3789 set_ignore_sigint, NULL);
1ad15515
PA
3790 lp->ignore_sigint = 0;
3791 }
3792 else
3793 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3794 }
3795 }
3796
3797 /* This LWP is stopped now. */
3798 lp->stopped = 1;
3799
3800 if (debug_linux_nat)
3801 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3802 status_to_str (status), target_pid_to_str (lp->ptid));
3803
4c28f408
PA
3804 if (!non_stop)
3805 {
3806 /* Now stop all other LWP's ... */
d90e17a7 3807 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3808
3809 /* ... and wait until all of them have reported back that
3810 they're no longer running. */
d90e17a7 3811 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3812
3813 /* If we're not waiting for a specific LWP, choose an event LWP
3814 from among those that have had events. Giving equal priority
3815 to all LWPs that have had events helps prevent
3816 starvation. */
0e5bf2a8 3817 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d90e17a7 3818 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3819
e3e9f5a2
PA
3820 /* Now that we've selected our final event LWP, cancel any
3821 breakpoints in other LWPs that have hit a GDB breakpoint.
3822 See the comment in cancel_breakpoints_callback to find out
3823 why. */
3824 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3825
4b60df3d
PA
3826 /* We'll need this to determine whether to report a SIGSTOP as
3827 TARGET_WAITKIND_0. Need to take a copy because
3828 resume_clear_callback clears it. */
3829 last_resume_kind = lp->last_resume_kind;
3830
e3e9f5a2
PA
3831 /* In all-stop, from the core's perspective, all LWPs are now
3832 stopped until a new resume action is sent over. */
3833 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3834 }
3835 else
25289eb2 3836 {
4b60df3d
PA
3837 /* See above. */
3838 last_resume_kind = lp->last_resume_kind;
3839 resume_clear_callback (lp, NULL);
25289eb2 3840 }
d6b0e80f 3841
26ab7092 3842 if (linux_nat_status_is_event (status))
d6b0e80f 3843 {
d6b0e80f
AC
3844 if (debug_linux_nat)
3845 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3846 "LLW: trap ptid is %s.\n",
3847 target_pid_to_str (lp->ptid));
d6b0e80f 3848 }
d6b0e80f
AC
3849
3850 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3851 {
3852 *ourstatus = lp->waitstatus;
3853 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3854 }
3855 else
3856 store_waitstatus (ourstatus, status);
3857
01124a23 3858 if (debug_linux_nat)
b84876c2
PA
3859 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3860
7feb7d06 3861 restore_child_signals_mask (&prev_mask);
1e225492 3862
4b60df3d 3863 if (last_resume_kind == resume_stop
25289eb2
PA
3864 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3865 && WSTOPSIG (status) == SIGSTOP)
3866 {
3867 /* A thread that has been requested to stop by GDB with
3868 target_stop, and it stopped cleanly, so report as SIG0. The
3869 use of SIGSTOP is an implementation detail. */
a493e3e2 3870 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3871 }
3872
1e225492
JK
3873 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3874 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3875 lp->core = -1;
3876 else
2e794194 3877 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3878
f973ed9c 3879 return lp->ptid;
d6b0e80f
AC
3880}
3881
e3e9f5a2
PA
3882/* Resume LWPs that are currently stopped without any pending status
3883 to report, but are resumed from the core's perspective. */
3884
3885static int
3886resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3887{
3888 ptid_t *wait_ptid_p = data;
3889
3890 if (lp->stopped
3891 && lp->resumed
3892 && lp->status == 0
3893 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3894 {
336060f3
PA
3895 struct regcache *regcache = get_thread_regcache (lp->ptid);
3896 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3897 CORE_ADDR pc = regcache_read_pc (regcache);
3898
e3e9f5a2
PA
3899 gdb_assert (is_executing (lp->ptid));
3900
3901 /* Don't bother if there's a breakpoint at PC that we'd hit
3902 immediately, and we're not waiting for this LWP. */
3903 if (!ptid_match (lp->ptid, *wait_ptid_p))
3904 {
e3e9f5a2
PA
3905 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3906 return 0;
3907 }
3908
3909 if (debug_linux_nat)
3910 fprintf_unfiltered (gdb_stdlog,
336060f3
PA
3911 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3912 target_pid_to_str (lp->ptid),
3913 paddress (gdbarch, pc),
3914 lp->step);
e3e9f5a2 3915
336060f3 3916 registers_changed ();
7b50312a
PA
3917 if (linux_nat_prepare_to_resume != NULL)
3918 linux_nat_prepare_to_resume (lp);
e3e9f5a2 3919 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 3920 lp->step, GDB_SIGNAL_0);
e3e9f5a2 3921 lp->stopped = 0;
e3e9f5a2
PA
3922 lp->stopped_by_watchpoint = 0;
3923 }
3924
3925 return 0;
3926}
3927
7feb7d06
PA
3928static ptid_t
3929linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3930 ptid_t ptid, struct target_waitstatus *ourstatus,
3931 int target_options)
7feb7d06
PA
3932{
3933 ptid_t event_ptid;
3934
3935 if (debug_linux_nat)
09826ec5
PA
3936 {
3937 char *options_string;
3938
3939 options_string = target_options_to_string (target_options);
3940 fprintf_unfiltered (gdb_stdlog,
3941 "linux_nat_wait: [%s], [%s]\n",
3942 target_pid_to_str (ptid),
3943 options_string);
3944 xfree (options_string);
3945 }
7feb7d06
PA
3946
3947 /* Flush the async file first. */
3948 if (target_can_async_p ())
3949 async_file_flush ();
3950
e3e9f5a2
PA
3951 /* Resume LWPs that are currently stopped without any pending status
3952 to report, but are resumed from the core's perspective. LWPs get
3953 in this state if we find them stopping at a time we're not
3954 interested in reporting the event (target_wait on a
3955 specific_process, for example, see linux_nat_wait_1), and
3956 meanwhile the event became uninteresting. Don't bother resuming
3957 LWPs we're not going to wait for if they'd stop immediately. */
3958 if (non_stop)
3959 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3960
47608cb1 3961 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3962
3963 /* If we requested any event, and something came out, assume there
3964 may be more. If we requested a specific lwp or process, also
3965 assume there may be more. */
3966 if (target_can_async_p ()
6953d224
PA
3967 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3968 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3969 || !ptid_equal (ptid, minus_one_ptid)))
3970 async_file_mark ();
3971
3972 /* Get ready for the next event. */
3973 if (target_can_async_p ())
3974 target_async (inferior_event_handler, 0);
3975
3976 return event_ptid;
3977}
3978
d6b0e80f
AC
3979static int
3980kill_callback (struct lwp_info *lp, void *data)
3981{
ed731959
JK
3982 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3983
3984 errno = 0;
3985 kill (GET_LWP (lp->ptid), SIGKILL);
3986 if (debug_linux_nat)
3987 fprintf_unfiltered (gdb_stdlog,
3988 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3989 target_pid_to_str (lp->ptid),
3990 errno ? safe_strerror (errno) : "OK");
3991
3992 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3993
d6b0e80f
AC
3994 errno = 0;
3995 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3996 if (debug_linux_nat)
3997 fprintf_unfiltered (gdb_stdlog,
3998 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3999 target_pid_to_str (lp->ptid),
4000 errno ? safe_strerror (errno) : "OK");
4001
4002 return 0;
4003}
4004
4005static int
4006kill_wait_callback (struct lwp_info *lp, void *data)
4007{
4008 pid_t pid;
4009
4010 /* We must make sure that there are no pending events (delayed
4011 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4012 program doesn't interfere with any following debugging session. */
4013
4014 /* For cloned processes we must check both with __WCLONE and
4015 without, since the exit status of a cloned process isn't reported
4016 with __WCLONE. */
4017 if (lp->cloned)
4018 {
4019 do
4020 {
58aecb61 4021 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 4022 if (pid != (pid_t) -1)
d6b0e80f 4023 {
e85a822c
DJ
4024 if (debug_linux_nat)
4025 fprintf_unfiltered (gdb_stdlog,
4026 "KWC: wait %s received unknown.\n",
4027 target_pid_to_str (lp->ptid));
4028 /* The Linux kernel sometimes fails to kill a thread
4029 completely after PTRACE_KILL; that goes from the stop
4030 point in do_fork out to the one in
4031 get_signal_to_deliever and waits again. So kill it
4032 again. */
4033 kill_callback (lp, NULL);
d6b0e80f
AC
4034 }
4035 }
4036 while (pid == GET_LWP (lp->ptid));
4037
4038 gdb_assert (pid == -1 && errno == ECHILD);
4039 }
4040
4041 do
4042 {
58aecb61 4043 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 4044 if (pid != (pid_t) -1)
d6b0e80f 4045 {
e85a822c
DJ
4046 if (debug_linux_nat)
4047 fprintf_unfiltered (gdb_stdlog,
4048 "KWC: wait %s received unk.\n",
4049 target_pid_to_str (lp->ptid));
4050 /* See the call to kill_callback above. */
4051 kill_callback (lp, NULL);
d6b0e80f
AC
4052 }
4053 }
4054 while (pid == GET_LWP (lp->ptid));
4055
4056 gdb_assert (pid == -1 && errno == ECHILD);
4057 return 0;
4058}
4059
4060static void
7d85a9c0 4061linux_nat_kill (struct target_ops *ops)
d6b0e80f 4062{
f973ed9c
DJ
4063 struct target_waitstatus last;
4064 ptid_t last_ptid;
4065 int status;
d6b0e80f 4066
f973ed9c
DJ
4067 /* If we're stopped while forking and we haven't followed yet,
4068 kill the other task. We need to do this first because the
4069 parent will be sleeping if this is a vfork. */
d6b0e80f 4070
f973ed9c 4071 get_last_target_status (&last_ptid, &last);
d6b0e80f 4072
f973ed9c
DJ
4073 if (last.kind == TARGET_WAITKIND_FORKED
4074 || last.kind == TARGET_WAITKIND_VFORKED)
4075 {
3a3e9ee3 4076 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
4077 wait (&status);
4078 }
4079
4080 if (forks_exist_p ())
7feb7d06 4081 linux_fork_killall ();
f973ed9c
DJ
4082 else
4083 {
d90e17a7 4084 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 4085
4c28f408
PA
4086 /* Stop all threads before killing them, since ptrace requires
4087 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 4088 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
4089 /* ... and wait until all of them have reported back that
4090 they're no longer running. */
d90e17a7 4091 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 4092
f973ed9c 4093 /* Kill all LWP's ... */
d90e17a7 4094 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
4095
4096 /* ... and wait until we've flushed all events. */
d90e17a7 4097 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
4098 }
4099
4100 target_mourn_inferior ();
d6b0e80f
AC
4101}
4102
4103static void
136d6dae 4104linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 4105{
d90e17a7 4106 purge_lwp_list (ptid_get_pid (inferior_ptid));
d6b0e80f 4107
f973ed9c 4108 if (! forks_exist_p ())
d90e17a7
PA
4109 /* Normal case, no other forks available. */
4110 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
4111 else
4112 /* Multi-fork case. The current inferior_ptid has exited, but
4113 there are other viable forks to debug. Delete the exiting
4114 one and context-switch to the first available. */
4115 linux_fork_mourn_inferior ();
d6b0e80f
AC
4116}
4117
5b009018
PA
4118/* Convert a native/host siginfo object, into/from the siginfo in the
4119 layout of the inferiors' architecture. */
4120
4121static void
a5362b9a 4122siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
4123{
4124 int done = 0;
4125
4126 if (linux_nat_siginfo_fixup != NULL)
4127 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4128
4129 /* If there was no callback, or the callback didn't do anything,
4130 then just do a straight memcpy. */
4131 if (!done)
4132 {
4133 if (direction == 1)
a5362b9a 4134 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 4135 else
a5362b9a 4136 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
4137 }
4138}
4139
4aa995e1
PA
4140static LONGEST
4141linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4142 const char *annex, gdb_byte *readbuf,
4143 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4144{
4aa995e1 4145 int pid;
a5362b9a
TS
4146 siginfo_t siginfo;
4147 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
4148
4149 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4150 gdb_assert (readbuf || writebuf);
4151
4152 pid = GET_LWP (inferior_ptid);
4153 if (pid == 0)
4154 pid = GET_PID (inferior_ptid);
4155
4156 if (offset > sizeof (siginfo))
4157 return -1;
4158
4159 errno = 0;
4160 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4161 if (errno != 0)
4162 return -1;
4163
5b009018
PA
4164 /* When GDB is built as a 64-bit application, ptrace writes into
4165 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4166 inferior with a 64-bit GDB should look the same as debugging it
4167 with a 32-bit GDB, we need to convert it. GDB core always sees
4168 the converted layout, so any read/write will have to be done
4169 post-conversion. */
4170 siginfo_fixup (&siginfo, inf_siginfo, 0);
4171
4aa995e1
PA
4172 if (offset + len > sizeof (siginfo))
4173 len = sizeof (siginfo) - offset;
4174
4175 if (readbuf != NULL)
5b009018 4176 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4177 else
4178 {
5b009018
PA
4179 memcpy (inf_siginfo + offset, writebuf, len);
4180
4181 /* Convert back to ptrace layout before flushing it out. */
4182 siginfo_fixup (&siginfo, inf_siginfo, 1);
4183
4aa995e1
PA
4184 errno = 0;
4185 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4186 if (errno != 0)
4187 return -1;
4188 }
4189
4190 return len;
4191}
4192
10d6c8cd
DJ
4193static LONGEST
4194linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4195 const char *annex, gdb_byte *readbuf,
4196 const gdb_byte *writebuf,
4197 ULONGEST offset, LONGEST len)
d6b0e80f 4198{
4aa995e1 4199 struct cleanup *old_chain;
10d6c8cd 4200 LONGEST xfer;
d6b0e80f 4201
4aa995e1
PA
4202 if (object == TARGET_OBJECT_SIGNAL_INFO)
4203 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4204 offset, len);
4205
c35b1492
PA
4206 /* The target is connected but no live inferior is selected. Pass
4207 this request down to a lower stratum (e.g., the executable
4208 file). */
4209 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4210 return 0;
4211
4aa995e1
PA
4212 old_chain = save_inferior_ptid ();
4213
d6b0e80f
AC
4214 if (is_lwp (inferior_ptid))
4215 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4216
10d6c8cd
DJ
4217 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4218 offset, len);
d6b0e80f
AC
4219
4220 do_cleanups (old_chain);
4221 return xfer;
4222}
4223
4224static int
28439f5e 4225linux_thread_alive (ptid_t ptid)
d6b0e80f 4226{
8c6a60d1 4227 int err, tmp_errno;
4c28f408 4228
d6b0e80f
AC
4229 gdb_assert (is_lwp (ptid));
4230
4c28f408
PA
4231 /* Send signal 0 instead of anything ptrace, because ptracing a
4232 running thread errors out claiming that the thread doesn't
4233 exist. */
4234 err = kill_lwp (GET_LWP (ptid), 0);
8c6a60d1 4235 tmp_errno = errno;
d6b0e80f
AC
4236 if (debug_linux_nat)
4237 fprintf_unfiltered (gdb_stdlog,
4c28f408 4238 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4239 target_pid_to_str (ptid),
8c6a60d1 4240 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 4241
4c28f408 4242 if (err != 0)
d6b0e80f
AC
4243 return 0;
4244
4245 return 1;
4246}
4247
28439f5e
PA
4248static int
4249linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4250{
4251 return linux_thread_alive (ptid);
4252}
4253
d6b0e80f 4254static char *
117de6a9 4255linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4256{
4257 static char buf[64];
4258
a0ef4274 4259 if (is_lwp (ptid)
d90e17a7
PA
4260 && (GET_PID (ptid) != GET_LWP (ptid)
4261 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
4262 {
4263 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4264 return buf;
4265 }
4266
4267 return normal_pid_to_str (ptid);
4268}
4269
4694da01
TT
4270static char *
4271linux_nat_thread_name (struct thread_info *thr)
4272{
4273 int pid = ptid_get_pid (thr->ptid);
4274 long lwp = ptid_get_lwp (thr->ptid);
4275#define FORMAT "/proc/%d/task/%ld/comm"
4276 char buf[sizeof (FORMAT) + 30];
4277 FILE *comm_file;
4278 char *result = NULL;
4279
4280 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4281 comm_file = fopen (buf, "r");
4282 if (comm_file)
4283 {
4284 /* Not exported by the kernel, so we define it here. */
4285#define COMM_LEN 16
4286 static char line[COMM_LEN + 1];
4287
4288 if (fgets (line, sizeof (line), comm_file))
4289 {
4290 char *nl = strchr (line, '\n');
4291
4292 if (nl)
4293 *nl = '\0';
4294 if (*line != '\0')
4295 result = line;
4296 }
4297
4298 fclose (comm_file);
4299 }
4300
4301#undef COMM_LEN
4302#undef FORMAT
4303
4304 return result;
4305}
4306
dba24537
AC
4307/* Accepts an integer PID; Returns a string representing a file that
4308 can be opened to get the symbols for the child process. */
4309
6d8fd2b7
UW
4310static char *
4311linux_child_pid_to_exec_file (int pid)
dba24537
AC
4312{
4313 char *name1, *name2;
4314
4315 name1 = xmalloc (MAXPATHLEN);
4316 name2 = xmalloc (MAXPATHLEN);
4317 make_cleanup (xfree, name1);
4318 make_cleanup (xfree, name2);
4319 memset (name2, 0, MAXPATHLEN);
4320
4321 sprintf (name1, "/proc/%d/exe", pid);
4322 if (readlink (name1, name2, MAXPATHLEN) > 0)
4323 return name2;
4324 else
4325 return name1;
4326}
4327
dba24537
AC
4328/* Records the thread's register state for the corefile note
4329 section. */
4330
4331static char *
6432734d
UW
4332linux_nat_collect_thread_registers (const struct regcache *regcache,
4333 ptid_t ptid, bfd *obfd,
4334 char *note_data, int *note_size,
2ea28649 4335 enum gdb_signal stop_signal)
dba24537 4336{
6432734d 4337 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 4338 const struct regset *regset;
55e969c1 4339 int core_regset_p;
6432734d
UW
4340 gdb_gregset_t gregs;
4341 gdb_fpregset_t fpregs;
4f844a66
DM
4342
4343 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
dba24537 4344
6432734d
UW
4345 if (core_regset_p
4346 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4347 sizeof (gregs)))
4348 != NULL && regset->collect_regset != NULL)
4349 regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
4f844a66 4350 else
6432734d 4351 fill_gregset (regcache, &gregs, -1);
2f2241f1 4352
6432734d
UW
4353 note_data = (char *) elfcore_write_prstatus
4354 (obfd, note_data, note_size, ptid_get_lwp (ptid),
2ea28649 4355 gdb_signal_to_host (stop_signal), &gregs);
2f2241f1 4356
6432734d
UW
4357 if (core_regset_p
4358 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4359 sizeof (fpregs)))
3e43a32a 4360 != NULL && regset->collect_regset != NULL)
6432734d
UW
4361 regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
4362 else
4363 fill_fpregset (regcache, &fpregs, -1);
17ea7499 4364
6432734d
UW
4365 note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
4366 &fpregs, sizeof (fpregs));
4f844a66 4367
dba24537
AC
4368 return note_data;
4369}
4370
dba24537
AC
4371/* Fills the "to_make_corefile_note" target vector. Builds the note
4372 section for a corefile, and returns it in a malloc buffer. */
4373
4374static char *
4375linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4376{
6432734d
UW
4377 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4378 converted to gdbarch_core_regset_sections, this function can go away. */
4379 return linux_make_corefile_notes (target_gdbarch, obfd, note_size,
4380 linux_nat_collect_thread_registers);
dba24537
AC
4381}
4382
10d6c8cd
DJ
4383/* Implement the to_xfer_partial interface for memory reads using the /proc
4384 filesystem. Because we can use a single read() call for /proc, this
4385 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4386 but it doesn't support writes. */
4387
4388static LONGEST
4389linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4390 const char *annex, gdb_byte *readbuf,
4391 const gdb_byte *writebuf,
4392 ULONGEST offset, LONGEST len)
dba24537 4393{
10d6c8cd
DJ
4394 LONGEST ret;
4395 int fd;
dba24537
AC
4396 char filename[64];
4397
10d6c8cd 4398 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4399 return 0;
4400
4401 /* Don't bother for one word. */
4402 if (len < 3 * sizeof (long))
4403 return 0;
4404
4405 /* We could keep this file open and cache it - possibly one per
4406 thread. That requires some juggling, but is even faster. */
4407 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4408 fd = open (filename, O_RDONLY | O_LARGEFILE);
4409 if (fd == -1)
4410 return 0;
4411
4412 /* If pread64 is available, use it. It's faster if the kernel
4413 supports it (only one syscall), and it's 64-bit safe even on
4414 32-bit platforms (for instance, SPARC debugging a SPARC64
4415 application). */
4416#ifdef HAVE_PREAD64
10d6c8cd 4417 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4418#else
10d6c8cd 4419 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4420#endif
4421 ret = 0;
4422 else
4423 ret = len;
4424
4425 close (fd);
4426 return ret;
4427}
4428
efcbbd14
UW
4429
4430/* Enumerate spufs IDs for process PID. */
4431static LONGEST
4432spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4433{
4434 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4435 LONGEST pos = 0;
4436 LONGEST written = 0;
4437 char path[128];
4438 DIR *dir;
4439 struct dirent *entry;
4440
4441 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4442 dir = opendir (path);
4443 if (!dir)
4444 return -1;
4445
4446 rewinddir (dir);
4447 while ((entry = readdir (dir)) != NULL)
4448 {
4449 struct stat st;
4450 struct statfs stfs;
4451 int fd;
4452
4453 fd = atoi (entry->d_name);
4454 if (!fd)
4455 continue;
4456
4457 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4458 if (stat (path, &st) != 0)
4459 continue;
4460 if (!S_ISDIR (st.st_mode))
4461 continue;
4462
4463 if (statfs (path, &stfs) != 0)
4464 continue;
4465 if (stfs.f_type != SPUFS_MAGIC)
4466 continue;
4467
4468 if (pos >= offset && pos + 4 <= offset + len)
4469 {
4470 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4471 written += 4;
4472 }
4473 pos += 4;
4474 }
4475
4476 closedir (dir);
4477 return written;
4478}
4479
4480/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4481 object type, using the /proc file system. */
4482static LONGEST
4483linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4484 const char *annex, gdb_byte *readbuf,
4485 const gdb_byte *writebuf,
4486 ULONGEST offset, LONGEST len)
4487{
4488 char buf[128];
4489 int fd = 0;
4490 int ret = -1;
4491 int pid = PIDGET (inferior_ptid);
4492
4493 if (!annex)
4494 {
4495 if (!readbuf)
4496 return -1;
4497 else
4498 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4499 }
4500
4501 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4502 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4503 if (fd <= 0)
4504 return -1;
4505
4506 if (offset != 0
4507 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4508 {
4509 close (fd);
4510 return 0;
4511 }
4512
4513 if (writebuf)
4514 ret = write (fd, writebuf, (size_t) len);
4515 else if (readbuf)
4516 ret = read (fd, readbuf, (size_t) len);
4517
4518 close (fd);
4519 return ret;
4520}
4521
4522
dba24537
AC
4523/* Parse LINE as a signal set and add its set bits to SIGS. */
4524
4525static void
4526add_line_to_sigset (const char *line, sigset_t *sigs)
4527{
4528 int len = strlen (line) - 1;
4529 const char *p;
4530 int signum;
4531
4532 if (line[len] != '\n')
8a3fe4f8 4533 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4534
4535 p = line;
4536 signum = len * 4;
4537 while (len-- > 0)
4538 {
4539 int digit;
4540
4541 if (*p >= '0' && *p <= '9')
4542 digit = *p - '0';
4543 else if (*p >= 'a' && *p <= 'f')
4544 digit = *p - 'a' + 10;
4545 else
8a3fe4f8 4546 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4547
4548 signum -= 4;
4549
4550 if (digit & 1)
4551 sigaddset (sigs, signum + 1);
4552 if (digit & 2)
4553 sigaddset (sigs, signum + 2);
4554 if (digit & 4)
4555 sigaddset (sigs, signum + 3);
4556 if (digit & 8)
4557 sigaddset (sigs, signum + 4);
4558
4559 p++;
4560 }
4561}
4562
4563/* Find process PID's pending signals from /proc/pid/status and set
4564 SIGS to match. */
4565
4566void
3e43a32a
MS
4567linux_proc_pending_signals (int pid, sigset_t *pending,
4568 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4569{
4570 FILE *procfile;
4571 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
7c8a8b04 4572 struct cleanup *cleanup;
dba24537
AC
4573
4574 sigemptyset (pending);
4575 sigemptyset (blocked);
4576 sigemptyset (ignored);
4577 sprintf (fname, "/proc/%d/status", pid);
4578 procfile = fopen (fname, "r");
4579 if (procfile == NULL)
8a3fe4f8 4580 error (_("Could not open %s"), fname);
7c8a8b04 4581 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4582
4583 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4584 {
4585 /* Normal queued signals are on the SigPnd line in the status
4586 file. However, 2.6 kernels also have a "shared" pending
4587 queue for delivering signals to a thread group, so check for
4588 a ShdPnd line also.
4589
4590 Unfortunately some Red Hat kernels include the shared pending
4591 queue but not the ShdPnd status field. */
4592
4593 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4594 add_line_to_sigset (buffer + 8, pending);
4595 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4596 add_line_to_sigset (buffer + 8, pending);
4597 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4598 add_line_to_sigset (buffer + 8, blocked);
4599 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4600 add_line_to_sigset (buffer + 8, ignored);
4601 }
4602
7c8a8b04 4603 do_cleanups (cleanup);
dba24537
AC
4604}
4605
07e059b5
VP
4606static LONGEST
4607linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e
MS
4608 const char *annex, gdb_byte *readbuf,
4609 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
07e059b5 4610{
07e059b5
VP
4611 gdb_assert (object == TARGET_OBJECT_OSDATA);
4612
d26e3629 4613 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4614}
4615
10d6c8cd
DJ
4616static LONGEST
4617linux_xfer_partial (struct target_ops *ops, enum target_object object,
4618 const char *annex, gdb_byte *readbuf,
4619 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4620{
4621 LONGEST xfer;
4622
4623 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4624 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
10d6c8cd
DJ
4625 offset, len);
4626
07e059b5
VP
4627 if (object == TARGET_OBJECT_OSDATA)
4628 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4629 offset, len);
4630
efcbbd14
UW
4631 if (object == TARGET_OBJECT_SPU)
4632 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4633 offset, len);
4634
8f313923
JK
4635 /* GDB calculates all the addresses in possibly larget width of the address.
4636 Address width needs to be masked before its final use - either by
4637 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4638
4639 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4640
4641 if (object == TARGET_OBJECT_MEMORY)
4642 {
4643 int addr_bit = gdbarch_addr_bit (target_gdbarch);
4644
4645 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4646 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4647 }
4648
10d6c8cd
DJ
4649 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4650 offset, len);
4651 if (xfer != 0)
4652 return xfer;
4653
4654 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4655 offset, len);
4656}
4657
5808517f
YQ
4658static void
4659cleanup_target_stop (void *arg)
4660{
4661 ptid_t *ptid = (ptid_t *) arg;
4662
4663 gdb_assert (arg != NULL);
4664
4665 /* Unpause all */
a493e3e2 4666 target_resume (*ptid, 0, GDB_SIGNAL_0);
5808517f
YQ
4667}
4668
4669static VEC(static_tracepoint_marker_p) *
4670linux_child_static_tracepoint_markers_by_strid (const char *strid)
4671{
4672 char s[IPA_CMD_BUF_SIZE];
4673 struct cleanup *old_chain;
4674 int pid = ptid_get_pid (inferior_ptid);
4675 VEC(static_tracepoint_marker_p) *markers = NULL;
4676 struct static_tracepoint_marker *marker = NULL;
4677 char *p = s;
4678 ptid_t ptid = ptid_build (pid, 0, 0);
4679
4680 /* Pause all */
4681 target_stop (ptid);
4682
4683 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4684 s[sizeof ("qTfSTM")] = 0;
4685
42476b70 4686 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4687
4688 old_chain = make_cleanup (free_current_marker, &marker);
4689 make_cleanup (cleanup_target_stop, &ptid);
4690
4691 while (*p++ == 'm')
4692 {
4693 if (marker == NULL)
4694 marker = XCNEW (struct static_tracepoint_marker);
4695
4696 do
4697 {
4698 parse_static_tracepoint_marker_definition (p, &p, marker);
4699
4700 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4701 {
4702 VEC_safe_push (static_tracepoint_marker_p,
4703 markers, marker);
4704 marker = NULL;
4705 }
4706 else
4707 {
4708 release_static_tracepoint_marker (marker);
4709 memset (marker, 0, sizeof (*marker));
4710 }
4711 }
4712 while (*p++ == ','); /* comma-separated list */
4713
4714 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4715 s[sizeof ("qTsSTM")] = 0;
42476b70 4716 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4717 p = s;
4718 }
4719
4720 do_cleanups (old_chain);
4721
4722 return markers;
4723}
4724
e9efe249 4725/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4726 it with local methods. */
4727
910122bf
UW
4728static void
4729linux_target_install_ops (struct target_ops *t)
10d6c8cd 4730{
6d8fd2b7 4731 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4732 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4733 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4734 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4735 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4736 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4737 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4738 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4739 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4740 t->to_post_attach = linux_child_post_attach;
4741 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4742 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4743
4744 super_xfer_partial = t->to_xfer_partial;
4745 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4746
4747 t->to_static_tracepoint_markers_by_strid
4748 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4749}
4750
4751struct target_ops *
4752linux_target (void)
4753{
4754 struct target_ops *t;
4755
4756 t = inf_ptrace_target ();
4757 linux_target_install_ops (t);
4758
4759 return t;
4760}
4761
4762struct target_ops *
7714d83a 4763linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4764{
4765 struct target_ops *t;
4766
4767 t = inf_ptrace_trad_target (register_u_offset);
4768 linux_target_install_ops (t);
10d6c8cd 4769
10d6c8cd
DJ
4770 return t;
4771}
4772
b84876c2
PA
4773/* target_is_async_p implementation. */
4774
4775static int
4776linux_nat_is_async_p (void)
4777{
4778 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4779 it explicitly with the "set target-async" command.
b84876c2 4780 Someday, linux will always be async. */
3dd5b83d 4781 return target_async_permitted;
b84876c2
PA
4782}
4783
4784/* target_can_async_p implementation. */
4785
4786static int
4787linux_nat_can_async_p (void)
4788{
4789 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4790 it explicitly with the "set target-async" command.
b84876c2 4791 Someday, linux will always be async. */
3dd5b83d 4792 return target_async_permitted;
b84876c2
PA
4793}
4794
9908b566
VP
4795static int
4796linux_nat_supports_non_stop (void)
4797{
4798 return 1;
4799}
4800
d90e17a7
PA
4801/* True if we want to support multi-process. To be removed when GDB
4802 supports multi-exec. */
4803
2277426b 4804int linux_multi_process = 1;
d90e17a7
PA
4805
4806static int
4807linux_nat_supports_multi_process (void)
4808{
4809 return linux_multi_process;
4810}
4811
03583c20
UW
4812static int
4813linux_nat_supports_disable_randomization (void)
4814{
4815#ifdef HAVE_PERSONALITY
4816 return 1;
4817#else
4818 return 0;
4819#endif
4820}
4821
b84876c2
PA
4822static int async_terminal_is_ours = 1;
4823
4824/* target_terminal_inferior implementation. */
4825
4826static void
4827linux_nat_terminal_inferior (void)
4828{
4829 if (!target_is_async_p ())
4830 {
4831 /* Async mode is disabled. */
4832 terminal_inferior ();
4833 return;
4834 }
4835
b84876c2
PA
4836 terminal_inferior ();
4837
d9d2d8b6 4838 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4839 if (!async_terminal_is_ours)
4840 return;
4841
4842 delete_file_handler (input_fd);
4843 async_terminal_is_ours = 0;
4844 set_sigint_trap ();
4845}
4846
4847/* target_terminal_ours implementation. */
4848
2c0b251b 4849static void
b84876c2
PA
4850linux_nat_terminal_ours (void)
4851{
4852 if (!target_is_async_p ())
4853 {
4854 /* Async mode is disabled. */
4855 terminal_ours ();
4856 return;
4857 }
4858
4859 /* GDB should never give the terminal to the inferior if the
4860 inferior is running in the background (run&, continue&, etc.),
4861 but claiming it sure should. */
4862 terminal_ours ();
4863
b84876c2
PA
4864 if (async_terminal_is_ours)
4865 return;
4866
4867 clear_sigint_trap ();
4868 add_file_handler (input_fd, stdin_event_handler, 0);
4869 async_terminal_is_ours = 1;
4870}
4871
4872static void (*async_client_callback) (enum inferior_event_type event_type,
4873 void *context);
4874static void *async_client_context;
4875
7feb7d06
PA
4876/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4877 so we notice when any child changes state, and notify the
4878 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4879 above to wait for the arrival of a SIGCHLD. */
4880
b84876c2 4881static void
7feb7d06 4882sigchld_handler (int signo)
b84876c2 4883{
7feb7d06
PA
4884 int old_errno = errno;
4885
01124a23
DE
4886 if (debug_linux_nat)
4887 ui_file_write_async_safe (gdb_stdlog,
4888 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4889
4890 if (signo == SIGCHLD
4891 && linux_nat_event_pipe[0] != -1)
4892 async_file_mark (); /* Let the event loop know that there are
4893 events to handle. */
4894
4895 errno = old_errno;
4896}
4897
4898/* Callback registered with the target events file descriptor. */
4899
4900static void
4901handle_target_event (int error, gdb_client_data client_data)
4902{
4903 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4904}
4905
4906/* Create/destroy the target events pipe. Returns previous state. */
4907
4908static int
4909linux_async_pipe (int enable)
4910{
4911 int previous = (linux_nat_event_pipe[0] != -1);
4912
4913 if (previous != enable)
4914 {
4915 sigset_t prev_mask;
4916
4917 block_child_signals (&prev_mask);
4918
4919 if (enable)
4920 {
4921 if (pipe (linux_nat_event_pipe) == -1)
4922 internal_error (__FILE__, __LINE__,
4923 "creating event pipe failed.");
4924
4925 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4926 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4927 }
4928 else
4929 {
4930 close (linux_nat_event_pipe[0]);
4931 close (linux_nat_event_pipe[1]);
4932 linux_nat_event_pipe[0] = -1;
4933 linux_nat_event_pipe[1] = -1;
4934 }
4935
4936 restore_child_signals_mask (&prev_mask);
4937 }
4938
4939 return previous;
b84876c2
PA
4940}
4941
4942/* target_async implementation. */
4943
4944static void
4945linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4946 void *context), void *context)
4947{
b84876c2
PA
4948 if (callback != NULL)
4949 {
4950 async_client_callback = callback;
4951 async_client_context = context;
7feb7d06
PA
4952 if (!linux_async_pipe (1))
4953 {
4954 add_file_handler (linux_nat_event_pipe[0],
4955 handle_target_event, NULL);
4956 /* There may be pending events to handle. Tell the event loop
4957 to poll them. */
4958 async_file_mark ();
4959 }
b84876c2
PA
4960 }
4961 else
4962 {
4963 async_client_callback = callback;
4964 async_client_context = context;
b84876c2 4965 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4966 linux_async_pipe (0);
b84876c2
PA
4967 }
4968 return;
4969}
4970
a493e3e2 4971/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4972 event came out. */
4973
4c28f408 4974static int
252fbfc8 4975linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4976{
d90e17a7 4977 if (!lwp->stopped)
252fbfc8 4978 {
d90e17a7 4979 ptid_t ptid = lwp->ptid;
252fbfc8 4980
d90e17a7
PA
4981 if (debug_linux_nat)
4982 fprintf_unfiltered (gdb_stdlog,
4983 "LNSL: running -> suspending %s\n",
4984 target_pid_to_str (lwp->ptid));
252fbfc8 4985
252fbfc8 4986
25289eb2
PA
4987 if (lwp->last_resume_kind == resume_stop)
4988 {
4989 if (debug_linux_nat)
4990 fprintf_unfiltered (gdb_stdlog,
4991 "linux-nat: already stopping LWP %ld at "
4992 "GDB's request\n",
4993 ptid_get_lwp (lwp->ptid));
4994 return 0;
4995 }
252fbfc8 4996
25289eb2
PA
4997 stop_callback (lwp, NULL);
4998 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4999 }
5000 else
5001 {
5002 /* Already known to be stopped; do nothing. */
252fbfc8 5003
d90e17a7
PA
5004 if (debug_linux_nat)
5005 {
e09875d4 5006 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
5007 fprintf_unfiltered (gdb_stdlog,
5008 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5009 target_pid_to_str (lwp->ptid));
5010 else
3e43a32a
MS
5011 fprintf_unfiltered (gdb_stdlog,
5012 "LNSL: already stopped/no "
5013 "stop_requested yet %s\n",
d90e17a7 5014 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5015 }
5016 }
4c28f408
PA
5017 return 0;
5018}
5019
5020static void
5021linux_nat_stop (ptid_t ptid)
5022{
5023 if (non_stop)
d90e17a7 5024 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5025 else
5026 linux_ops->to_stop (ptid);
5027}
5028
d90e17a7
PA
5029static void
5030linux_nat_close (int quitting)
5031{
5032 /* Unregister from the event loop. */
305436e0
PA
5033 if (linux_nat_is_async_p ())
5034 linux_nat_async (NULL, 0);
d90e17a7 5035
d90e17a7
PA
5036 if (linux_ops->to_close)
5037 linux_ops->to_close (quitting);
5038}
5039
c0694254
PA
5040/* When requests are passed down from the linux-nat layer to the
5041 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5042 used. The address space pointer is stored in the inferior object,
5043 but the common code that is passed such ptid can't tell whether
5044 lwpid is a "main" process id or not (it assumes so). We reverse
5045 look up the "main" process id from the lwp here. */
5046
70221824 5047static struct address_space *
c0694254
PA
5048linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5049{
5050 struct lwp_info *lwp;
5051 struct inferior *inf;
5052 int pid;
5053
5054 pid = GET_LWP (ptid);
5055 if (GET_LWP (ptid) == 0)
5056 {
5057 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5058 tgid. */
5059 lwp = find_lwp_pid (ptid);
5060 pid = GET_PID (lwp->ptid);
5061 }
5062 else
5063 {
5064 /* A (pid,lwpid,0) ptid. */
5065 pid = GET_PID (ptid);
5066 }
5067
5068 inf = find_inferior_pid (pid);
5069 gdb_assert (inf != NULL);
5070 return inf->aspace;
5071}
5072
dc146f7c
VP
5073/* Return the cached value of the processor core for thread PTID. */
5074
70221824 5075static int
dc146f7c
VP
5076linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5077{
5078 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 5079
dc146f7c
VP
5080 if (info)
5081 return info->core;
5082 return -1;
5083}
5084
f973ed9c
DJ
5085void
5086linux_nat_add_target (struct target_ops *t)
5087{
f973ed9c
DJ
5088 /* Save the provided single-threaded target. We save this in a separate
5089 variable because another target we've inherited from (e.g. inf-ptrace)
5090 may have saved a pointer to T; we want to use it for the final
5091 process stratum target. */
5092 linux_ops_saved = *t;
5093 linux_ops = &linux_ops_saved;
5094
5095 /* Override some methods for multithreading. */
b84876c2 5096 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5097 t->to_attach = linux_nat_attach;
5098 t->to_detach = linux_nat_detach;
5099 t->to_resume = linux_nat_resume;
5100 t->to_wait = linux_nat_wait;
2455069d 5101 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
5102 t->to_xfer_partial = linux_nat_xfer_partial;
5103 t->to_kill = linux_nat_kill;
5104 t->to_mourn_inferior = linux_nat_mourn_inferior;
5105 t->to_thread_alive = linux_nat_thread_alive;
5106 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 5107 t->to_thread_name = linux_nat_thread_name;
f973ed9c 5108 t->to_has_thread_control = tc_schedlock;
c0694254 5109 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
5110 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5111 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 5112
b84876c2
PA
5113 t->to_can_async_p = linux_nat_can_async_p;
5114 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5115 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 5116 t->to_async = linux_nat_async;
b84876c2
PA
5117 t->to_terminal_inferior = linux_nat_terminal_inferior;
5118 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5119 t->to_close = linux_nat_close;
b84876c2 5120
4c28f408
PA
5121 /* Methods for non-stop support. */
5122 t->to_stop = linux_nat_stop;
5123
d90e17a7
PA
5124 t->to_supports_multi_process = linux_nat_supports_multi_process;
5125
03583c20
UW
5126 t->to_supports_disable_randomization
5127 = linux_nat_supports_disable_randomization;
5128
dc146f7c
VP
5129 t->to_core_of_thread = linux_nat_core_of_thread;
5130
f973ed9c
DJ
5131 /* We don't change the stratum; this target will sit at
5132 process_stratum and thread_db will set at thread_stratum. This
5133 is a little strange, since this is a multi-threaded-capable
5134 target, but we want to be on the stack below thread_db, and we
5135 also want to be used for single-threaded processes. */
5136
5137 add_target (t);
f973ed9c
DJ
5138}
5139
9f0bdab8
DJ
5140/* Register a method to call whenever a new thread is attached. */
5141void
7b50312a
PA
5142linux_nat_set_new_thread (struct target_ops *t,
5143 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
5144{
5145 /* Save the pointer. We only support a single registered instance
5146 of the GNU/Linux native target, so we do not need to map this to
5147 T. */
5148 linux_nat_new_thread = new_thread;
5149}
5150
5b009018
PA
5151/* Register a method that converts a siginfo object between the layout
5152 that ptrace returns, and the layout in the architecture of the
5153 inferior. */
5154void
5155linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 5156 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
5157 gdb_byte *,
5158 int))
5159{
5160 /* Save the pointer. */
5161 linux_nat_siginfo_fixup = siginfo_fixup;
5162}
5163
7b50312a
PA
5164/* Register a method to call prior to resuming a thread. */
5165
5166void
5167linux_nat_set_prepare_to_resume (struct target_ops *t,
5168 void (*prepare_to_resume) (struct lwp_info *))
5169{
5170 /* Save the pointer. */
5171 linux_nat_prepare_to_resume = prepare_to_resume;
5172}
5173
f865ee35
JK
5174/* See linux-nat.h. */
5175
5176int
5177linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 5178{
da559b09 5179 int pid;
9f0bdab8 5180
da559b09
JK
5181 pid = GET_LWP (ptid);
5182 if (pid == 0)
5183 pid = GET_PID (ptid);
f865ee35 5184
da559b09
JK
5185 errno = 0;
5186 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
5187 if (errno != 0)
5188 {
5189 memset (siginfo, 0, sizeof (*siginfo));
5190 return 0;
5191 }
f865ee35 5192 return 1;
9f0bdab8
DJ
5193}
5194
2c0b251b
PA
5195/* Provide a prototype to silence -Wmissing-prototypes. */
5196extern initialize_file_ftype _initialize_linux_nat;
5197
d6b0e80f
AC
5198void
5199_initialize_linux_nat (void)
5200{
ccce17b0
YQ
5201 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
5202 &debug_linux_nat, _("\
b84876c2
PA
5203Set debugging of GNU/Linux lwp module."), _("\
5204Show debugging of GNU/Linux lwp module."), _("\
5205Enables printf debugging output."),
ccce17b0
YQ
5206 NULL,
5207 show_debug_linux_nat,
5208 &setdebuglist, &showdebuglist);
b84876c2 5209
b84876c2 5210 /* Save this mask as the default. */
d6b0e80f
AC
5211 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5212
7feb7d06
PA
5213 /* Install a SIGCHLD handler. */
5214 sigchld_action.sa_handler = sigchld_handler;
5215 sigemptyset (&sigchld_action.sa_mask);
5216 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5217
5218 /* Make it the default. */
7feb7d06 5219 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5220
5221 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5222 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5223 sigdelset (&suspend_mask, SIGCHLD);
5224
7feb7d06 5225 sigemptyset (&blocked_mask);
d6b0e80f
AC
5226}
5227\f
5228
5229/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5230 the GNU/Linux Threads library and therefore doesn't really belong
5231 here. */
5232
5233/* Read variable NAME in the target and return its value if found.
5234 Otherwise return zero. It is assumed that the type of the variable
5235 is `int'. */
5236
5237static int
5238get_signo (const char *name)
5239{
5240 struct minimal_symbol *ms;
5241 int signo;
5242
5243 ms = lookup_minimal_symbol (name, NULL, NULL);
5244 if (ms == NULL)
5245 return 0;
5246
8e70166d 5247 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5248 sizeof (signo)) != 0)
5249 return 0;
5250
5251 return signo;
5252}
5253
5254/* Return the set of signals used by the threads library in *SET. */
5255
5256void
5257lin_thread_get_thread_signals (sigset_t *set)
5258{
5259 struct sigaction action;
5260 int restart, cancel;
5261
b84876c2 5262 sigemptyset (&blocked_mask);
d6b0e80f
AC
5263 sigemptyset (set);
5264
5265 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5266 cancel = get_signo ("__pthread_sig_cancel");
5267
5268 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5269 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5270 not provide any way for the debugger to query the signal numbers -
5271 fortunately they don't change! */
5272
d6b0e80f 5273 if (restart == 0)
17fbb0bd 5274 restart = __SIGRTMIN;
d6b0e80f 5275
d6b0e80f 5276 if (cancel == 0)
17fbb0bd 5277 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5278
5279 sigaddset (set, restart);
5280 sigaddset (set, cancel);
5281
5282 /* The GNU/Linux Threads library makes terminating threads send a
5283 special "cancel" signal instead of SIGCHLD. Make sure we catch
5284 those (to prevent them from terminating GDB itself, which is
5285 likely to be their default action) and treat them the same way as
5286 SIGCHLD. */
5287
5288 action.sa_handler = sigchld_handler;
5289 sigemptyset (&action.sa_mask);
58aecb61 5290 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5291 sigaction (cancel, &action, NULL);
5292
5293 /* We block the "cancel" signal throughout this code ... */
5294 sigaddset (&blocked_mask, cancel);
5295 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5296
5297 /* ... except during a sigsuspend. */
5298 sigdelset (&suspend_mask, cancel);
5299}
This page took 1.39424 seconds and 4 git commands to generate.