Enable --gc-sections test cases for aarch64*-*-*
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
28e7fd62 3 Copyright (C) 2001-2013 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
22#include "target.h"
d6b0e80f 23#include "gdb_string.h"
3993f6b1 24#include "gdb_wait.h"
d6b0e80f
AC
25#include "gdb_assert.h"
26#ifdef HAVE_TKILL_SYSCALL
27#include <unistd.h>
28#include <sys/syscall.h>
29#endif
3993f6b1 30#include <sys/ptrace.h>
0274a8ce 31#include "linux-nat.h"
af96c192 32#include "linux-ptrace.h"
13da1c97 33#include "linux-procfs.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
dba24537 42#include <sys/param.h> /* for MAXPATHLEN */
1777feb0 43#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
44#include "elf-bfd.h" /* for elfcore_write_* */
45#include "gregset.h" /* for gregset */
46#include "gdbcore.h" /* for get_exec_file */
47#include <ctype.h> /* for isdigit */
1777feb0 48#include "gdbthread.h" /* for struct thread_info etc. */
dba24537
AC
49#include "gdb_stat.h" /* for struct stat */
50#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
51#include "inf-loop.h"
52#include "event-loop.h"
53#include "event-top.h"
07e059b5
VP
54#include <pwd.h>
55#include <sys/types.h>
56#include "gdb_dirent.h"
57#include "xml-support.h"
191c4426 58#include "terminal.h"
efcbbd14 59#include <sys/vfs.h>
6c95b8df 60#include "solib.h"
d26e3629 61#include "linux-osdata.h"
6432734d 62#include "linux-tdep.h"
7dcd53a0 63#include "symfile.h"
5808517f
YQ
64#include "agent.h"
65#include "tracepoint.h"
87b0bb13
JK
66#include "exceptions.h"
67#include "linux-ptrace.h"
68#include "buffer.h"
6ecd4729 69#include "target-descriptions.h"
efcbbd14
UW
70
71#ifndef SPUFS_MAGIC
72#define SPUFS_MAGIC 0x23c9b64e
73#endif
dba24537 74
10568435
JK
75#ifdef HAVE_PERSONALITY
76# include <sys/personality.h>
77# if !HAVE_DECL_ADDR_NO_RANDOMIZE
78# define ADDR_NO_RANDOMIZE 0x0040000
79# endif
80#endif /* HAVE_PERSONALITY */
81
1777feb0 82/* This comment documents high-level logic of this file.
8a77dff3
VP
83
84Waiting for events in sync mode
85===============================
86
87When waiting for an event in a specific thread, we just use waitpid, passing
88the specific pid, and not passing WNOHANG.
89
1777feb0 90When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 91version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 92threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
93miss an event. The solution is to use non-blocking waitpid, together with
94sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 95process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
96flag to check for events in cloned processes. If nothing is found, we use
97sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
98happened to a child process -- and SIGCHLD will be delivered both for events
99in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
100an event, we get back to calling nonblocking waitpid with and without
101__WCLONED.
8a77dff3
VP
102
103Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 104so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
105blocked, the signal becomes pending and sigsuspend immediately
106notices it and returns.
107
108Waiting for events in async mode
109================================
110
7feb7d06
PA
111In async mode, GDB should always be ready to handle both user input
112and target events, so neither blocking waitpid nor sigsuspend are
113viable options. Instead, we should asynchronously notify the GDB main
114event loop whenever there's an unprocessed event from the target. We
115detect asynchronous target events by handling SIGCHLD signals. To
116notify the event loop about target events, the self-pipe trick is used
117--- a pipe is registered as waitable event source in the event loop,
118the event loop select/poll's on the read end of this pipe (as well on
119other event sources, e.g., stdin), and the SIGCHLD handler writes a
120byte to this pipe. This is more portable than relying on
121pselect/ppoll, since on kernels that lack those syscalls, libc
122emulates them with select/poll+sigprocmask, and that is racy
123(a.k.a. plain broken).
124
125Obviously, if we fail to notify the event loop if there's a target
126event, it's bad. OTOH, if we notify the event loop when there's no
127event from the target, linux_nat_wait will detect that there's no real
128event to report, and return event of type TARGET_WAITKIND_IGNORE.
129This is mostly harmless, but it will waste time and is better avoided.
130
131The main design point is that every time GDB is outside linux-nat.c,
132we have a SIGCHLD handler installed that is called when something
133happens to the target and notifies the GDB event loop. Whenever GDB
134core decides to handle the event, and calls into linux-nat.c, we
135process things as in sync mode, except that the we never block in
136sigsuspend.
137
138While processing an event, we may end up momentarily blocked in
139waitpid calls. Those waitpid calls, while blocking, are guarantied to
140return quickly. E.g., in all-stop mode, before reporting to the core
141that an LWP hit a breakpoint, all LWPs are stopped by sending them
142SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
143Note that this is different from blocking indefinitely waiting for the
144next event --- here, we're already handling an event.
8a77dff3
VP
145
146Use of signals
147==============
148
149We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
150signal is not entirely significant; we just need for a signal to be delivered,
151so that we can intercept it. SIGSTOP's advantage is that it can not be
152blocked. A disadvantage is that it is not a real-time signal, so it can only
153be queued once; we do not keep track of other sources of SIGSTOP.
154
155Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
156use them, because they have special behavior when the signal is generated -
157not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
158kills the entire thread group.
159
160A delivered SIGSTOP would stop the entire thread group, not just the thread we
161tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
162cancel it (by PTRACE_CONT without passing SIGSTOP).
163
164We could use a real-time signal instead. This would solve those problems; we
165could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
166But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
167generates it, and there are races with trying to find a signal that is not
168blocked. */
a0ef4274 169
dba24537
AC
170#ifndef O_LARGEFILE
171#define O_LARGEFILE 0
172#endif
0274a8ce 173
ca2163eb
PA
174/* Unlike other extended result codes, WSTOPSIG (status) on
175 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
176 instead SIGTRAP with bit 7 set. */
177#define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
178
10d6c8cd
DJ
179/* The single-threaded native GNU/Linux target_ops. We save a pointer for
180 the use of the multi-threaded target. */
181static struct target_ops *linux_ops;
f973ed9c 182static struct target_ops linux_ops_saved;
10d6c8cd 183
9f0bdab8 184/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
185static void (*linux_nat_new_thread) (struct lwp_info *);
186
26cb8b7c
PA
187/* The method to call, if any, when a new fork is attached. */
188static linux_nat_new_fork_ftype *linux_nat_new_fork;
189
190/* The method to call, if any, when a process is no longer
191 attached. */
192static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
193
7b50312a
PA
194/* Hook to call prior to resuming a thread. */
195static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 196
5b009018
PA
197/* The method to call, if any, when the siginfo object needs to be
198 converted between the layout returned by ptrace, and the layout in
199 the architecture of the inferior. */
a5362b9a 200static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
201 gdb_byte *,
202 int);
203
ac264b3b
MS
204/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
205 Called by our to_xfer_partial. */
206static LONGEST (*super_xfer_partial) (struct target_ops *,
207 enum target_object,
208 const char *, gdb_byte *,
209 const gdb_byte *,
10d6c8cd
DJ
210 ULONGEST, LONGEST);
211
ccce17b0 212static unsigned int debug_linux_nat;
920d2a44
AC
213static void
214show_debug_linux_nat (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216{
217 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
218 value);
219}
d6b0e80f 220
ae087d01
DJ
221struct simple_pid_list
222{
223 int pid;
3d799a95 224 int status;
ae087d01
DJ
225 struct simple_pid_list *next;
226};
227struct simple_pid_list *stopped_pids;
228
3993f6b1
DJ
229/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
230 can not be used, 1 if it can. */
231
232static int linux_supports_tracefork_flag = -1;
233
3e43a32a
MS
234/* This variable is a tri-state flag: -1 for unknown, 0 if
235 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
a96d9b2e
SDJ
236
237static int linux_supports_tracesysgood_flag = -1;
238
9016a515
DJ
239/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
240 PTRACE_O_TRACEVFORKDONE. */
241
242static int linux_supports_tracevforkdone_flag = -1;
243
a96d9b2e
SDJ
244/* Stores the current used ptrace() options. */
245static int current_ptrace_options = 0;
246
3dd5b83d
PA
247/* Async mode support. */
248
b84876c2
PA
249/* The read/write ends of the pipe registered as waitable file in the
250 event loop. */
251static int linux_nat_event_pipe[2] = { -1, -1 };
252
7feb7d06 253/* Flush the event pipe. */
b84876c2 254
7feb7d06
PA
255static void
256async_file_flush (void)
b84876c2 257{
7feb7d06
PA
258 int ret;
259 char buf;
b84876c2 260
7feb7d06 261 do
b84876c2 262 {
7feb7d06 263 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 264 }
7feb7d06 265 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
266}
267
7feb7d06
PA
268/* Put something (anything, doesn't matter what, or how much) in event
269 pipe, so that the select/poll in the event-loop realizes we have
270 something to process. */
252fbfc8 271
b84876c2 272static void
7feb7d06 273async_file_mark (void)
b84876c2 274{
7feb7d06 275 int ret;
b84876c2 276
7feb7d06
PA
277 /* It doesn't really matter what the pipe contains, as long we end
278 up with something in it. Might as well flush the previous
279 left-overs. */
280 async_file_flush ();
b84876c2 281
7feb7d06 282 do
b84876c2 283 {
7feb7d06 284 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 285 }
7feb7d06 286 while (ret == -1 && errno == EINTR);
b84876c2 287
7feb7d06
PA
288 /* Ignore EAGAIN. If the pipe is full, the event loop will already
289 be awakened anyway. */
b84876c2
PA
290}
291
7feb7d06 292static void linux_nat_async (void (*callback)
3e43a32a
MS
293 (enum inferior_event_type event_type,
294 void *context),
7feb7d06 295 void *context);
7feb7d06
PA
296static int kill_lwp (int lwpid, int signo);
297
298static int stop_callback (struct lwp_info *lp, void *data);
299
300static void block_child_signals (sigset_t *prev_mask);
301static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
302
303struct lwp_info;
304static struct lwp_info *add_lwp (ptid_t ptid);
305static void purge_lwp_list (int pid);
4403d8e9 306static void delete_lwp (ptid_t ptid);
2277426b
PA
307static struct lwp_info *find_lwp_pid (ptid_t ptid);
308
ae087d01
DJ
309\f
310/* Trivial list manipulation functions to keep track of a list of
311 new stopped processes. */
312static void
3d799a95 313add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
314{
315 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 316
ae087d01 317 new_pid->pid = pid;
3d799a95 318 new_pid->status = status;
ae087d01
DJ
319 new_pid->next = *listp;
320 *listp = new_pid;
321}
322
84636d28
PA
323static int
324in_pid_list_p (struct simple_pid_list *list, int pid)
325{
326 struct simple_pid_list *p;
327
328 for (p = list; p != NULL; p = p->next)
329 if (p->pid == pid)
330 return 1;
331 return 0;
332}
333
ae087d01 334static int
46a96992 335pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
336{
337 struct simple_pid_list **p;
338
339 for (p = listp; *p != NULL; p = &(*p)->next)
340 if ((*p)->pid == pid)
341 {
342 struct simple_pid_list *next = (*p)->next;
e0881a8e 343
46a96992 344 *statusp = (*p)->status;
ae087d01
DJ
345 xfree (*p);
346 *p = next;
347 return 1;
348 }
349 return 0;
350}
351
3993f6b1
DJ
352\f
353/* A helper function for linux_test_for_tracefork, called after fork (). */
354
355static void
356linux_tracefork_child (void)
357{
3993f6b1
DJ
358 ptrace (PTRACE_TRACEME, 0, 0, 0);
359 kill (getpid (), SIGSTOP);
360 fork ();
48bb3cce 361 _exit (0);
3993f6b1
DJ
362}
363
7feb7d06 364/* Wrapper function for waitpid which handles EINTR. */
b957e937
DJ
365
366static int
46a96992 367my_waitpid (int pid, int *statusp, int flags)
b957e937
DJ
368{
369 int ret;
b84876c2 370
b957e937
DJ
371 do
372 {
46a96992 373 ret = waitpid (pid, statusp, flags);
b957e937
DJ
374 }
375 while (ret == -1 && errno == EINTR);
376
377 return ret;
378}
379
380/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
381
382 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
383 we know that the feature is not available. This may change the tracing
384 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
385
386 However, if it succeeds, we don't know for sure that the feature is
387 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 388 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
389 fork tracing, and let it fork. If the process exits, we assume that we
390 can't use TRACEFORK; if we get the fork notification, and we can extract
391 the new child's PID, then we assume that we can. */
3993f6b1
DJ
392
393static void
b957e937 394linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
395{
396 int child_pid, ret, status;
397 long second_pid;
7feb7d06 398 sigset_t prev_mask;
4c28f408 399
7feb7d06
PA
400 /* We don't want those ptrace calls to be interrupted. */
401 block_child_signals (&prev_mask);
3993f6b1 402
b957e937
DJ
403 linux_supports_tracefork_flag = 0;
404 linux_supports_tracevforkdone_flag = 0;
405
406 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
407 if (ret != 0)
7feb7d06
PA
408 {
409 restore_child_signals_mask (&prev_mask);
410 return;
411 }
b957e937 412
3993f6b1
DJ
413 child_pid = fork ();
414 if (child_pid == -1)
e2e0b3e5 415 perror_with_name (("fork"));
3993f6b1
DJ
416
417 if (child_pid == 0)
418 linux_tracefork_child ();
419
b957e937 420 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 421 if (ret == -1)
e2e0b3e5 422 perror_with_name (("waitpid"));
3993f6b1 423 else if (ret != child_pid)
8a3fe4f8 424 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 425 if (! WIFSTOPPED (status))
3e43a32a
MS
426 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
427 status);
3993f6b1 428
3993f6b1
DJ
429 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
430 if (ret != 0)
431 {
b957e937
DJ
432 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
433 if (ret != 0)
434 {
8a3fe4f8 435 warning (_("linux_test_for_tracefork: failed to kill child"));
7feb7d06 436 restore_child_signals_mask (&prev_mask);
b957e937
DJ
437 return;
438 }
439
440 ret = my_waitpid (child_pid, &status, 0);
441 if (ret != child_pid)
3e43a32a
MS
442 warning (_("linux_test_for_tracefork: failed "
443 "to wait for killed child"));
b957e937 444 else if (!WIFSIGNALED (status))
3e43a32a
MS
445 warning (_("linux_test_for_tracefork: unexpected "
446 "wait status 0x%x from killed child"), status);
b957e937 447
7feb7d06 448 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
449 return;
450 }
451
9016a515
DJ
452 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
453 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
454 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
455 linux_supports_tracevforkdone_flag = (ret == 0);
456
b957e937
DJ
457 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
458 if (ret != 0)
8a3fe4f8 459 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
460
461 ret = my_waitpid (child_pid, &status, 0);
462
3993f6b1
DJ
463 if (ret == child_pid && WIFSTOPPED (status)
464 && status >> 16 == PTRACE_EVENT_FORK)
465 {
466 second_pid = 0;
467 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
468 if (ret == 0 && second_pid != 0)
469 {
470 int second_status;
471
472 linux_supports_tracefork_flag = 1;
b957e937
DJ
473 my_waitpid (second_pid, &second_status, 0);
474 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
475 if (ret != 0)
3e43a32a
MS
476 warning (_("linux_test_for_tracefork: "
477 "failed to kill second child"));
97725dc4 478 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
479 }
480 }
b957e937 481 else
8a3fe4f8
AC
482 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
483 "(%d, status 0x%x)"), ret, status);
3993f6b1 484
b957e937
DJ
485 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
486 if (ret != 0)
8a3fe4f8 487 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 488 my_waitpid (child_pid, &status, 0);
4c28f408 489
7feb7d06 490 restore_child_signals_mask (&prev_mask);
3993f6b1
DJ
491}
492
a96d9b2e
SDJ
493/* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
494
495 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
496 we know that the feature is not available. This may change the tracing
497 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
498
499static void
500linux_test_for_tracesysgood (int original_pid)
501{
502 int ret;
503 sigset_t prev_mask;
504
505 /* We don't want those ptrace calls to be interrupted. */
506 block_child_signals (&prev_mask);
507
508 linux_supports_tracesysgood_flag = 0;
509
510 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
511 if (ret != 0)
512 goto out;
513
514 linux_supports_tracesysgood_flag = 1;
515out:
516 restore_child_signals_mask (&prev_mask);
517}
518
519/* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
520 This function also sets linux_supports_tracesysgood_flag. */
521
522static int
523linux_supports_tracesysgood (int pid)
524{
525 if (linux_supports_tracesysgood_flag == -1)
526 linux_test_for_tracesysgood (pid);
527 return linux_supports_tracesysgood_flag;
528}
529
3993f6b1
DJ
530/* Return non-zero iff we have tracefork functionality available.
531 This function also sets linux_supports_tracefork_flag. */
532
533static int
b957e937 534linux_supports_tracefork (int pid)
3993f6b1
DJ
535{
536 if (linux_supports_tracefork_flag == -1)
b957e937 537 linux_test_for_tracefork (pid);
3993f6b1
DJ
538 return linux_supports_tracefork_flag;
539}
540
9016a515 541static int
b957e937 542linux_supports_tracevforkdone (int pid)
9016a515
DJ
543{
544 if (linux_supports_tracefork_flag == -1)
b957e937 545 linux_test_for_tracefork (pid);
9016a515
DJ
546 return linux_supports_tracevforkdone_flag;
547}
548
a96d9b2e
SDJ
549static void
550linux_enable_tracesysgood (ptid_t ptid)
551{
552 int pid = ptid_get_lwp (ptid);
553
554 if (pid == 0)
555 pid = ptid_get_pid (ptid);
556
557 if (linux_supports_tracesysgood (pid) == 0)
558 return;
559
560 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
561
562 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
563}
564
3993f6b1 565\f
4de4c07c
DJ
566void
567linux_enable_event_reporting (ptid_t ptid)
568{
d3587048 569 int pid = ptid_get_lwp (ptid);
4de4c07c 570
d3587048
DJ
571 if (pid == 0)
572 pid = ptid_get_pid (ptid);
573
b957e937 574 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
575 return;
576
a96d9b2e
SDJ
577 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
578 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
579
b957e937 580 if (linux_supports_tracevforkdone (pid))
a96d9b2e 581 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
9016a515
DJ
582
583 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
584 read-only process state. */
4de4c07c 585
a96d9b2e 586 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
4de4c07c
DJ
587}
588
6d8fd2b7
UW
589static void
590linux_child_post_attach (int pid)
4de4c07c
DJ
591{
592 linux_enable_event_reporting (pid_to_ptid (pid));
a96d9b2e 593 linux_enable_tracesysgood (pid_to_ptid (pid));
aa7c7447 594 linux_ptrace_init_warnings ();
4de4c07c
DJ
595}
596
10d6c8cd 597static void
4de4c07c
DJ
598linux_child_post_startup_inferior (ptid_t ptid)
599{
600 linux_enable_event_reporting (ptid);
a96d9b2e 601 linux_enable_tracesysgood (ptid);
aa7c7447 602 linux_ptrace_init_warnings ();
4de4c07c
DJ
603}
604
4403d8e9
JK
605/* Return the number of known LWPs in the tgid given by PID. */
606
607static int
608num_lwps (int pid)
609{
610 int count = 0;
611 struct lwp_info *lp;
612
613 for (lp = lwp_list; lp; lp = lp->next)
614 if (ptid_get_pid (lp->ptid) == pid)
615 count++;
616
617 return count;
618}
619
620/* Call delete_lwp with prototype compatible for make_cleanup. */
621
622static void
623delete_lwp_cleanup (void *lp_voidp)
624{
625 struct lwp_info *lp = lp_voidp;
626
627 delete_lwp (lp->ptid);
628}
629
6d8fd2b7
UW
630static int
631linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 632{
7feb7d06 633 sigset_t prev_mask;
9016a515 634 int has_vforked;
4de4c07c
DJ
635 int parent_pid, child_pid;
636
7feb7d06 637 block_child_signals (&prev_mask);
b84876c2 638
e58b0e63
PA
639 has_vforked = (inferior_thread ()->pending_follow.kind
640 == TARGET_WAITKIND_VFORKED);
641 parent_pid = ptid_get_lwp (inferior_ptid);
d3587048 642 if (parent_pid == 0)
e58b0e63
PA
643 parent_pid = ptid_get_pid (inferior_ptid);
644 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
4de4c07c 645
2277426b
PA
646 if (!detach_fork)
647 linux_enable_event_reporting (pid_to_ptid (child_pid));
648
6c95b8df
PA
649 if (has_vforked
650 && !non_stop /* Non-stop always resumes both branches. */
651 && (!target_is_async_p () || sync_execution)
652 && !(follow_child || detach_fork || sched_multi))
653 {
654 /* The parent stays blocked inside the vfork syscall until the
655 child execs or exits. If we don't let the child run, then
656 the parent stays blocked. If we're telling the parent to run
657 in the foreground, the user will not be able to ctrl-c to get
658 back the terminal, effectively hanging the debug session. */
ac74f770
MS
659 fprintf_filtered (gdb_stderr, _("\
660Can not resume the parent process over vfork in the foreground while\n\
661holding the child stopped. Try \"set detach-on-fork\" or \
662\"set schedule-multiple\".\n"));
663 /* FIXME output string > 80 columns. */
6c95b8df
PA
664 return 1;
665 }
666
4de4c07c
DJ
667 if (! follow_child)
668 {
6c95b8df 669 struct lwp_info *child_lp = NULL;
4de4c07c 670
1777feb0 671 /* We're already attached to the parent, by default. */
4de4c07c 672
ac264b3b
MS
673 /* Detach new forked process? */
674 if (detach_fork)
f75c00e4 675 {
4403d8e9
JK
676 struct cleanup *old_chain;
677
6c95b8df
PA
678 /* Before detaching from the child, remove all breakpoints
679 from it. If we forked, then this has already been taken
680 care of by infrun.c. If we vforked however, any
681 breakpoint inserted in the parent is visible in the
682 child, even those added while stopped in a vfork
683 catchpoint. This will remove the breakpoints from the
684 parent also, but they'll be reinserted below. */
685 if (has_vforked)
686 {
687 /* keep breakpoints list in sync. */
688 remove_breakpoints_pid (GET_PID (inferior_ptid));
689 }
690
e85a822c 691 if (info_verbose || debug_linux_nat)
ac264b3b
MS
692 {
693 target_terminal_ours ();
694 fprintf_filtered (gdb_stdlog,
3e43a32a
MS
695 "Detaching after fork from "
696 "child process %d.\n",
ac264b3b
MS
697 child_pid);
698 }
4de4c07c 699
4403d8e9
JK
700 old_chain = save_inferior_ptid ();
701 inferior_ptid = ptid_build (child_pid, child_pid, 0);
702
703 child_lp = add_lwp (inferior_ptid);
704 child_lp->stopped = 1;
705 child_lp->last_resume_kind = resume_stop;
706 make_cleanup (delete_lwp_cleanup, child_lp);
707
4403d8e9
JK
708 if (linux_nat_prepare_to_resume != NULL)
709 linux_nat_prepare_to_resume (child_lp);
ac264b3b 710 ptrace (PTRACE_DETACH, child_pid, 0, 0);
4403d8e9
JK
711
712 do_cleanups (old_chain);
ac264b3b
MS
713 }
714 else
715 {
77435e4c 716 struct inferior *parent_inf, *child_inf;
2277426b 717 struct cleanup *old_chain;
7f9f62ba
PA
718
719 /* Add process to GDB's tables. */
77435e4c
PA
720 child_inf = add_inferior (child_pid);
721
e58b0e63 722 parent_inf = current_inferior ();
77435e4c 723 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 724 copy_terminal_info (child_inf, parent_inf);
6ecd4729
PA
725 child_inf->gdbarch = parent_inf->gdbarch;
726 copy_inferior_target_desc_info (child_inf, parent_inf);
7f9f62ba 727
2277426b 728 old_chain = save_inferior_ptid ();
6c95b8df 729 save_current_program_space ();
2277426b
PA
730
731 inferior_ptid = ptid_build (child_pid, child_pid, 0);
732 add_thread (inferior_ptid);
6c95b8df
PA
733 child_lp = add_lwp (inferior_ptid);
734 child_lp->stopped = 1;
25289eb2 735 child_lp->last_resume_kind = resume_stop;
7dcd53a0 736 child_inf->symfile_flags = SYMFILE_NO_READ;
2277426b 737
6c95b8df
PA
738 /* If this is a vfork child, then the address-space is
739 shared with the parent. */
740 if (has_vforked)
741 {
742 child_inf->pspace = parent_inf->pspace;
743 child_inf->aspace = parent_inf->aspace;
744
745 /* The parent will be frozen until the child is done
746 with the shared region. Keep track of the
747 parent. */
748 child_inf->vfork_parent = parent_inf;
749 child_inf->pending_detach = 0;
750 parent_inf->vfork_child = child_inf;
751 parent_inf->pending_detach = 0;
752 }
753 else
754 {
755 child_inf->aspace = new_address_space ();
756 child_inf->pspace = add_program_space (child_inf->aspace);
757 child_inf->removable = 1;
758 set_current_program_space (child_inf->pspace);
759 clone_program_space (child_inf->pspace, parent_inf->pspace);
760
761 /* Let the shared library layer (solib-svr4) learn about
762 this new process, relocate the cloned exec, pull in
763 shared libraries, and install the solib event
764 breakpoint. If a "cloned-VM" event was propagated
765 better throughout the core, this wouldn't be
766 required. */
268a4a75 767 solib_create_inferior_hook (0);
6c95b8df
PA
768 }
769
770 /* Let the thread_db layer learn about this new process. */
2277426b
PA
771 check_for_thread_db ();
772
773 do_cleanups (old_chain);
ac264b3b 774 }
9016a515
DJ
775
776 if (has_vforked)
777 {
3ced3da4 778 struct lwp_info *parent_lp;
6c95b8df
PA
779 struct inferior *parent_inf;
780
781 parent_inf = current_inferior ();
782
783 /* If we detached from the child, then we have to be careful
784 to not insert breakpoints in the parent until the child
785 is done with the shared memory region. However, if we're
786 staying attached to the child, then we can and should
787 insert breakpoints, so that we can debug it. A
788 subsequent child exec or exit is enough to know when does
789 the child stops using the parent's address space. */
790 parent_inf->waiting_for_vfork_done = detach_fork;
56710373 791 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
6c95b8df 792
3ced3da4 793 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
b957e937 794 gdb_assert (linux_supports_tracefork_flag >= 0);
3ced3da4 795
b957e937 796 if (linux_supports_tracevforkdone (0))
9016a515 797 {
6c95b8df
PA
798 if (debug_linux_nat)
799 fprintf_unfiltered (gdb_stdlog,
800 "LCFF: waiting for VFORK_DONE on %d\n",
801 parent_pid);
3ced3da4 802 parent_lp->stopped = 1;
9016a515 803
6c95b8df
PA
804 /* We'll handle the VFORK_DONE event like any other
805 event, in target_wait. */
9016a515
DJ
806 }
807 else
808 {
809 /* We can't insert breakpoints until the child has
810 finished with the shared memory region. We need to
811 wait until that happens. Ideal would be to just
812 call:
813 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
814 - waitpid (parent_pid, &status, __WALL);
815 However, most architectures can't handle a syscall
816 being traced on the way out if it wasn't traced on
817 the way in.
818
819 We might also think to loop, continuing the child
820 until it exits or gets a SIGTRAP. One problem is
821 that the child might call ptrace with PTRACE_TRACEME.
822
823 There's no simple and reliable way to figure out when
824 the vforked child will be done with its copy of the
825 shared memory. We could step it out of the syscall,
826 two instructions, let it go, and then single-step the
827 parent once. When we have hardware single-step, this
828 would work; with software single-step it could still
829 be made to work but we'd have to be able to insert
830 single-step breakpoints in the child, and we'd have
831 to insert -just- the single-step breakpoint in the
832 parent. Very awkward.
833
834 In the end, the best we can do is to make sure it
835 runs for a little while. Hopefully it will be out of
836 range of any breakpoints we reinsert. Usually this
837 is only the single-step breakpoint at vfork's return
838 point. */
839
6c95b8df
PA
840 if (debug_linux_nat)
841 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
842 "LCFF: no VFORK_DONE "
843 "support, sleeping a bit\n");
6c95b8df 844
9016a515 845 usleep (10000);
9016a515 846
6c95b8df
PA
847 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
848 and leave it pending. The next linux_nat_resume call
849 will notice a pending event, and bypasses actually
850 resuming the inferior. */
3ced3da4
PA
851 parent_lp->status = 0;
852 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
853 parent_lp->stopped = 1;
6c95b8df
PA
854
855 /* If we're in async mode, need to tell the event loop
856 there's something here to process. */
857 if (target_can_async_p ())
858 async_file_mark ();
859 }
9016a515 860 }
4de4c07c 861 }
3993f6b1 862 else
4de4c07c 863 {
77435e4c 864 struct inferior *parent_inf, *child_inf;
3ced3da4 865 struct lwp_info *child_lp;
6c95b8df 866 struct program_space *parent_pspace;
4de4c07c 867
e85a822c 868 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
869 {
870 target_terminal_ours ();
6c95b8df 871 if (has_vforked)
3e43a32a
MS
872 fprintf_filtered (gdb_stdlog,
873 _("Attaching after process %d "
874 "vfork to child process %d.\n"),
6c95b8df
PA
875 parent_pid, child_pid);
876 else
3e43a32a
MS
877 fprintf_filtered (gdb_stdlog,
878 _("Attaching after process %d "
879 "fork to child process %d.\n"),
6c95b8df 880 parent_pid, child_pid);
f75c00e4 881 }
4de4c07c 882
7a7d3353
PA
883 /* Add the new inferior first, so that the target_detach below
884 doesn't unpush the target. */
885
77435e4c
PA
886 child_inf = add_inferior (child_pid);
887
e58b0e63 888 parent_inf = current_inferior ();
77435e4c 889 child_inf->attach_flag = parent_inf->attach_flag;
191c4426 890 copy_terminal_info (child_inf, parent_inf);
6ecd4729
PA
891 child_inf->gdbarch = parent_inf->gdbarch;
892 copy_inferior_target_desc_info (child_inf, parent_inf);
7a7d3353 893
6c95b8df 894 parent_pspace = parent_inf->pspace;
9016a515 895
6c95b8df
PA
896 /* If we're vforking, we want to hold on to the parent until the
897 child exits or execs. At child exec or exit time we can
898 remove the old breakpoints from the parent and detach or
899 resume debugging it. Otherwise, detach the parent now; we'll
900 want to reuse it's program/address spaces, but we can't set
901 them to the child before removing breakpoints from the
902 parent, otherwise, the breakpoints module could decide to
903 remove breakpoints from the wrong process (since they'd be
904 assigned to the same address space). */
9016a515
DJ
905
906 if (has_vforked)
7f9f62ba 907 {
6c95b8df
PA
908 gdb_assert (child_inf->vfork_parent == NULL);
909 gdb_assert (parent_inf->vfork_child == NULL);
910 child_inf->vfork_parent = parent_inf;
911 child_inf->pending_detach = 0;
912 parent_inf->vfork_child = child_inf;
913 parent_inf->pending_detach = detach_fork;
914 parent_inf->waiting_for_vfork_done = 0;
ac264b3b 915 }
2277426b 916 else if (detach_fork)
b84876c2 917 target_detach (NULL, 0);
4de4c07c 918
6c95b8df
PA
919 /* Note that the detach above makes PARENT_INF dangling. */
920
921 /* Add the child thread to the appropriate lists, and switch to
922 this new thread, before cloning the program space, and
923 informing the solib layer about this new process. */
924
9f0bdab8 925 inferior_ptid = ptid_build (child_pid, child_pid, 0);
2277426b 926 add_thread (inferior_ptid);
3ced3da4
PA
927 child_lp = add_lwp (inferior_ptid);
928 child_lp->stopped = 1;
25289eb2 929 child_lp->last_resume_kind = resume_stop;
6c95b8df
PA
930
931 /* If this is a vfork child, then the address-space is shared
932 with the parent. If we detached from the parent, then we can
933 reuse the parent's program/address spaces. */
934 if (has_vforked || detach_fork)
935 {
936 child_inf->pspace = parent_pspace;
937 child_inf->aspace = child_inf->pspace->aspace;
938 }
939 else
940 {
941 child_inf->aspace = new_address_space ();
942 child_inf->pspace = add_program_space (child_inf->aspace);
943 child_inf->removable = 1;
7dcd53a0 944 child_inf->symfile_flags = SYMFILE_NO_READ;
6c95b8df
PA
945 set_current_program_space (child_inf->pspace);
946 clone_program_space (child_inf->pspace, parent_pspace);
947
948 /* Let the shared library layer (solib-svr4) learn about
949 this new process, relocate the cloned exec, pull in
950 shared libraries, and install the solib event breakpoint.
951 If a "cloned-VM" event was propagated better throughout
952 the core, this wouldn't be required. */
268a4a75 953 solib_create_inferior_hook (0);
6c95b8df 954 }
ac264b3b 955
6c95b8df 956 /* Let the thread_db layer learn about this new process. */
ef29ce1a 957 check_for_thread_db ();
4de4c07c
DJ
958 }
959
7feb7d06 960 restore_child_signals_mask (&prev_mask);
4de4c07c
DJ
961 return 0;
962}
963
4de4c07c 964\f
77b06cd7 965static int
6d8fd2b7 966linux_child_insert_fork_catchpoint (int pid)
4de4c07c 967{
77b06cd7 968 return !linux_supports_tracefork (pid);
3993f6b1
DJ
969}
970
eb73ad13
PA
971static int
972linux_child_remove_fork_catchpoint (int pid)
973{
974 return 0;
975}
976
77b06cd7 977static int
6d8fd2b7 978linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 979{
77b06cd7 980 return !linux_supports_tracefork (pid);
3993f6b1
DJ
981}
982
eb73ad13
PA
983static int
984linux_child_remove_vfork_catchpoint (int pid)
985{
986 return 0;
987}
988
77b06cd7 989static int
6d8fd2b7 990linux_child_insert_exec_catchpoint (int pid)
3993f6b1 991{
77b06cd7 992 return !linux_supports_tracefork (pid);
3993f6b1
DJ
993}
994
eb73ad13
PA
995static int
996linux_child_remove_exec_catchpoint (int pid)
997{
998 return 0;
999}
1000
a96d9b2e
SDJ
1001static int
1002linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
1003 int table_size, int *table)
1004{
77b06cd7
TJB
1005 if (!linux_supports_tracesysgood (pid))
1006 return 1;
1007
a96d9b2e
SDJ
1008 /* On GNU/Linux, we ignore the arguments. It means that we only
1009 enable the syscall catchpoints, but do not disable them.
77b06cd7 1010
a96d9b2e
SDJ
1011 Also, we do not use the `table' information because we do not
1012 filter system calls here. We let GDB do the logic for us. */
1013 return 0;
1014}
1015
d6b0e80f
AC
1016/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1017 are processes sharing the same VM space. A multi-threaded process
1018 is basically a group of such processes. However, such a grouping
1019 is almost entirely a user-space issue; the kernel doesn't enforce
1020 such a grouping at all (this might change in the future). In
1021 general, we'll rely on the threads library (i.e. the GNU/Linux
1022 Threads library) to provide such a grouping.
1023
1024 It is perfectly well possible to write a multi-threaded application
1025 without the assistance of a threads library, by using the clone
1026 system call directly. This module should be able to give some
1027 rudimentary support for debugging such applications if developers
1028 specify the CLONE_PTRACE flag in the clone system call, and are
1029 using the Linux kernel 2.4 or above.
1030
1031 Note that there are some peculiarities in GNU/Linux that affect
1032 this code:
1033
1034 - In general one should specify the __WCLONE flag to waitpid in
1035 order to make it report events for any of the cloned processes
1036 (and leave it out for the initial process). However, if a cloned
1037 process has exited the exit status is only reported if the
1038 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1039 we cannot use it since GDB must work on older systems too.
1040
1041 - When a traced, cloned process exits and is waited for by the
1042 debugger, the kernel reassigns it to the original parent and
1043 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1044 library doesn't notice this, which leads to the "zombie problem":
1045 When debugged a multi-threaded process that spawns a lot of
1046 threads will run out of processes, even if the threads exit,
1047 because the "zombies" stay around. */
1048
1049/* List of known LWPs. */
9f0bdab8 1050struct lwp_info *lwp_list;
d6b0e80f
AC
1051\f
1052
d6b0e80f
AC
1053/* Original signal mask. */
1054static sigset_t normal_mask;
1055
1056/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1057 _initialize_linux_nat. */
1058static sigset_t suspend_mask;
1059
7feb7d06
PA
1060/* Signals to block to make that sigsuspend work. */
1061static sigset_t blocked_mask;
1062
1063/* SIGCHLD action. */
1064struct sigaction sigchld_action;
b84876c2 1065
7feb7d06
PA
1066/* Block child signals (SIGCHLD and linux threads signals), and store
1067 the previous mask in PREV_MASK. */
84e46146 1068
7feb7d06
PA
1069static void
1070block_child_signals (sigset_t *prev_mask)
1071{
1072 /* Make sure SIGCHLD is blocked. */
1073 if (!sigismember (&blocked_mask, SIGCHLD))
1074 sigaddset (&blocked_mask, SIGCHLD);
1075
1076 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1077}
1078
1079/* Restore child signals mask, previously returned by
1080 block_child_signals. */
1081
1082static void
1083restore_child_signals_mask (sigset_t *prev_mask)
1084{
1085 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1086}
2455069d
UW
1087
1088/* Mask of signals to pass directly to the inferior. */
1089static sigset_t pass_mask;
1090
1091/* Update signals to pass to the inferior. */
1092static void
1093linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1094{
1095 int signo;
1096
1097 sigemptyset (&pass_mask);
1098
1099 for (signo = 1; signo < NSIG; signo++)
1100 {
2ea28649 1101 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
1102 if (target_signo < numsigs && pass_signals[target_signo])
1103 sigaddset (&pass_mask, signo);
1104 }
1105}
1106
d6b0e80f
AC
1107\f
1108
1109/* Prototypes for local functions. */
1110static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 1111static int linux_thread_alive (ptid_t ptid);
6d8fd2b7 1112static char *linux_child_pid_to_exec_file (int pid);
710151dd 1113
d6b0e80f
AC
1114\f
1115/* Convert wait status STATUS to a string. Used for printing debug
1116 messages only. */
1117
1118static char *
1119status_to_str (int status)
1120{
1121 static char buf[64];
1122
1123 if (WIFSTOPPED (status))
206aa767 1124 {
ca2163eb 1125 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
206aa767
DE
1126 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1127 strsignal (SIGTRAP));
1128 else
1129 snprintf (buf, sizeof (buf), "%s (stopped)",
1130 strsignal (WSTOPSIG (status)));
1131 }
d6b0e80f
AC
1132 else if (WIFSIGNALED (status))
1133 snprintf (buf, sizeof (buf), "%s (terminated)",
ba9b2ec3 1134 strsignal (WTERMSIG (status)));
d6b0e80f
AC
1135 else
1136 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1137
1138 return buf;
1139}
1140
7b50312a
PA
1141/* Destroy and free LP. */
1142
1143static void
1144lwp_free (struct lwp_info *lp)
1145{
1146 xfree (lp->arch_private);
1147 xfree (lp);
1148}
1149
d90e17a7
PA
1150/* Remove all LWPs belong to PID from the lwp list. */
1151
1152static void
1153purge_lwp_list (int pid)
1154{
1155 struct lwp_info *lp, *lpprev, *lpnext;
1156
1157 lpprev = NULL;
1158
1159 for (lp = lwp_list; lp; lp = lpnext)
1160 {
1161 lpnext = lp->next;
1162
1163 if (ptid_get_pid (lp->ptid) == pid)
1164 {
1165 if (lp == lwp_list)
1166 lwp_list = lp->next;
1167 else
1168 lpprev->next = lp->next;
1169
7b50312a 1170 lwp_free (lp);
d90e17a7
PA
1171 }
1172 else
1173 lpprev = lp;
1174 }
1175}
1176
26cb8b7c
PA
1177/* Add the LWP specified by PTID to the list. PTID is the first LWP
1178 in the process. Return a pointer to the structure describing the
1179 new LWP.
1180
1181 This differs from add_lwp in that we don't let the arch specific
1182 bits know about this new thread. Current clients of this callback
1183 take the opportunity to install watchpoints in the new thread, and
1184 we shouldn't do that for the first thread. If we're spawning a
1185 child ("run"), the thread executes the shell wrapper first, and we
1186 shouldn't touch it until it execs the program we want to debug.
1187 For "attach", it'd be okay to call the callback, but it's not
1188 necessary, because watchpoints can't yet have been inserted into
1189 the inferior. */
d6b0e80f
AC
1190
1191static struct lwp_info *
26cb8b7c 1192add_initial_lwp (ptid_t ptid)
d6b0e80f
AC
1193{
1194 struct lwp_info *lp;
1195
1196 gdb_assert (is_lwp (ptid));
1197
1198 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1199
1200 memset (lp, 0, sizeof (struct lwp_info));
1201
25289eb2 1202 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1203 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1204
1205 lp->ptid = ptid;
dc146f7c 1206 lp->core = -1;
d6b0e80f
AC
1207
1208 lp->next = lwp_list;
1209 lwp_list = lp;
d6b0e80f 1210
26cb8b7c
PA
1211 return lp;
1212}
1213
1214/* Add the LWP specified by PID to the list. Return a pointer to the
1215 structure describing the new LWP. The LWP should already be
1216 stopped. */
1217
1218static struct lwp_info *
1219add_lwp (ptid_t ptid)
1220{
1221 struct lwp_info *lp;
1222
1223 lp = add_initial_lwp (ptid);
1224
6e012a6c
PA
1225 /* Let the arch specific bits know about this new thread. Current
1226 clients of this callback take the opportunity to install
26cb8b7c
PA
1227 watchpoints in the new thread. We don't do this for the first
1228 thread though. See add_initial_lwp. */
1229 if (linux_nat_new_thread != NULL)
7b50312a 1230 linux_nat_new_thread (lp);
9f0bdab8 1231
d6b0e80f
AC
1232 return lp;
1233}
1234
1235/* Remove the LWP specified by PID from the list. */
1236
1237static void
1238delete_lwp (ptid_t ptid)
1239{
1240 struct lwp_info *lp, *lpprev;
1241
1242 lpprev = NULL;
1243
1244 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1245 if (ptid_equal (lp->ptid, ptid))
1246 break;
1247
1248 if (!lp)
1249 return;
1250
d6b0e80f
AC
1251 if (lpprev)
1252 lpprev->next = lp->next;
1253 else
1254 lwp_list = lp->next;
1255
7b50312a 1256 lwp_free (lp);
d6b0e80f
AC
1257}
1258
1259/* Return a pointer to the structure describing the LWP corresponding
1260 to PID. If no corresponding LWP could be found, return NULL. */
1261
1262static struct lwp_info *
1263find_lwp_pid (ptid_t ptid)
1264{
1265 struct lwp_info *lp;
1266 int lwp;
1267
1268 if (is_lwp (ptid))
1269 lwp = GET_LWP (ptid);
1270 else
1271 lwp = GET_PID (ptid);
1272
1273 for (lp = lwp_list; lp; lp = lp->next)
1274 if (lwp == GET_LWP (lp->ptid))
1275 return lp;
1276
1277 return NULL;
1278}
1279
1280/* Call CALLBACK with its second argument set to DATA for every LWP in
1281 the list. If CALLBACK returns 1 for a particular LWP, return a
1282 pointer to the structure describing that LWP immediately.
1283 Otherwise return NULL. */
1284
1285struct lwp_info *
d90e17a7
PA
1286iterate_over_lwps (ptid_t filter,
1287 int (*callback) (struct lwp_info *, void *),
1288 void *data)
d6b0e80f
AC
1289{
1290 struct lwp_info *lp, *lpnext;
1291
1292 for (lp = lwp_list; lp; lp = lpnext)
1293 {
1294 lpnext = lp->next;
d90e17a7
PA
1295
1296 if (ptid_match (lp->ptid, filter))
1297 {
1298 if ((*callback) (lp, data))
1299 return lp;
1300 }
d6b0e80f
AC
1301 }
1302
1303 return NULL;
1304}
1305
2277426b
PA
1306/* Update our internal state when changing from one checkpoint to
1307 another indicated by NEW_PTID. We can only switch single-threaded
1308 applications, so we only create one new LWP, and the previous list
1309 is discarded. */
f973ed9c
DJ
1310
1311void
1312linux_nat_switch_fork (ptid_t new_ptid)
1313{
1314 struct lwp_info *lp;
1315
2277426b
PA
1316 purge_lwp_list (GET_PID (inferior_ptid));
1317
f973ed9c
DJ
1318 lp = add_lwp (new_ptid);
1319 lp->stopped = 1;
e26af52f 1320
2277426b
PA
1321 /* This changes the thread's ptid while preserving the gdb thread
1322 num. Also changes the inferior pid, while preserving the
1323 inferior num. */
1324 thread_change_ptid (inferior_ptid, new_ptid);
1325
1326 /* We've just told GDB core that the thread changed target id, but,
1327 in fact, it really is a different thread, with different register
1328 contents. */
1329 registers_changed ();
e26af52f
DJ
1330}
1331
e26af52f
DJ
1332/* Handle the exit of a single thread LP. */
1333
1334static void
1335exit_lwp (struct lwp_info *lp)
1336{
e09875d4 1337 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1338
1339 if (th)
e26af52f 1340 {
17faa917
DJ
1341 if (print_thread_events)
1342 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1343
4f8d22e3 1344 delete_thread (lp->ptid);
e26af52f
DJ
1345 }
1346
1347 delete_lwp (lp->ptid);
1348}
1349
a0ef4274
DJ
1350/* Wait for the LWP specified by LP, which we have just attached to.
1351 Returns a wait status for that LWP, to cache. */
1352
1353static int
1354linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1355 int *signalled)
1356{
1357 pid_t new_pid, pid = GET_LWP (ptid);
1358 int status;
1359
644cebc9 1360 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
1361 {
1362 if (debug_linux_nat)
1363 fprintf_unfiltered (gdb_stdlog,
1364 "LNPAW: Attaching to a stopped process\n");
1365
1366 /* The process is definitely stopped. It is in a job control
1367 stop, unless the kernel predates the TASK_STOPPED /
1368 TASK_TRACED distinction, in which case it might be in a
1369 ptrace stop. Make sure it is in a ptrace stop; from there we
1370 can kill it, signal it, et cetera.
1371
1372 First make sure there is a pending SIGSTOP. Since we are
1373 already attached, the process can not transition from stopped
1374 to running without a PTRACE_CONT; so we know this signal will
1375 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1376 probably already in the queue (unless this kernel is old
1377 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1378 is not an RT signal, it can only be queued once. */
1379 kill_lwp (pid, SIGSTOP);
1380
1381 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1382 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1383 ptrace (PTRACE_CONT, pid, 0, 0);
1384 }
1385
1386 /* Make sure the initial process is stopped. The user-level threads
1387 layer might want to poke around in the inferior, and that won't
1388 work if things haven't stabilized yet. */
1389 new_pid = my_waitpid (pid, &status, 0);
1390 if (new_pid == -1 && errno == ECHILD)
1391 {
1392 if (first)
1393 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1394
1395 /* Try again with __WCLONE to check cloned processes. */
1396 new_pid = my_waitpid (pid, &status, __WCLONE);
1397 *cloned = 1;
1398 }
1399
dacc9cb2
PP
1400 gdb_assert (pid == new_pid);
1401
1402 if (!WIFSTOPPED (status))
1403 {
1404 /* The pid we tried to attach has apparently just exited. */
1405 if (debug_linux_nat)
1406 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1407 pid, status_to_str (status));
1408 return status;
1409 }
a0ef4274
DJ
1410
1411 if (WSTOPSIG (status) != SIGSTOP)
1412 {
1413 *signalled = 1;
1414 if (debug_linux_nat)
1415 fprintf_unfiltered (gdb_stdlog,
1416 "LNPAW: Received %s after attaching\n",
1417 status_to_str (status));
1418 }
1419
1420 return status;
1421}
1422
84636d28
PA
1423/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1424 the new LWP could not be attached, or 1 if we're already auto
1425 attached to this thread, but haven't processed the
1426 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1427 its existance, without considering it an error. */
d6b0e80f 1428
9ee57c33 1429int
93815fbf 1430lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1431{
9ee57c33 1432 struct lwp_info *lp;
7feb7d06 1433 sigset_t prev_mask;
84636d28 1434 int lwpid;
d6b0e80f
AC
1435
1436 gdb_assert (is_lwp (ptid));
1437
7feb7d06 1438 block_child_signals (&prev_mask);
d6b0e80f 1439
9ee57c33 1440 lp = find_lwp_pid (ptid);
84636d28 1441 lwpid = GET_LWP (ptid);
d6b0e80f
AC
1442
1443 /* We assume that we're already attached to any LWP that has an id
1444 equal to the overall process id, and to any LWP that is already
1445 in our list of LWPs. If we're not seeing exit events from threads
1446 and we've had PID wraparound since we last tried to stop all threads,
1447 this assumption might be wrong; fortunately, this is very unlikely
1448 to happen. */
84636d28 1449 if (lwpid != GET_PID (ptid) && lp == NULL)
d6b0e80f 1450 {
a0ef4274 1451 int status, cloned = 0, signalled = 0;
d6b0e80f 1452
84636d28 1453 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1454 {
84636d28
PA
1455 if (linux_supports_tracefork_flag)
1456 {
1457 /* If we haven't stopped all threads when we get here,
1458 we may have seen a thread listed in thread_db's list,
1459 but not processed the PTRACE_EVENT_CLONE yet. If
1460 that's the case, ignore this new thread, and let
1461 normal event handling discover it later. */
1462 if (in_pid_list_p (stopped_pids, lwpid))
1463 {
1464 /* We've already seen this thread stop, but we
1465 haven't seen the PTRACE_EVENT_CLONE extended
1466 event yet. */
1467 restore_child_signals_mask (&prev_mask);
1468 return 0;
1469 }
1470 else
1471 {
1472 int new_pid;
1473 int status;
1474
1475 /* See if we've got a stop for this new child
1476 pending. If so, we're already attached. */
1477 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1478 if (new_pid == -1 && errno == ECHILD)
1479 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1480 if (new_pid != -1)
1481 {
1482 if (WIFSTOPPED (status))
1483 add_to_pid_list (&stopped_pids, lwpid, status);
1484
1485 restore_child_signals_mask (&prev_mask);
1486 return 1;
1487 }
1488 }
1489 }
1490
9ee57c33
DJ
1491 /* If we fail to attach to the thread, issue a warning,
1492 but continue. One way this can happen is if thread
e9efe249 1493 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1494 bug may place threads in the thread list and then fail
1495 to create them. */
1496 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1497 safe_strerror (errno));
7feb7d06 1498 restore_child_signals_mask (&prev_mask);
9ee57c33
DJ
1499 return -1;
1500 }
1501
d6b0e80f
AC
1502 if (debug_linux_nat)
1503 fprintf_unfiltered (gdb_stdlog,
1504 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1505 target_pid_to_str (ptid));
1506
a0ef4274 1507 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1508 if (!WIFSTOPPED (status))
673c2bbe
DE
1509 {
1510 restore_child_signals_mask (&prev_mask);
f687d035 1511 return 1;
673c2bbe 1512 }
dacc9cb2 1513
a0ef4274
DJ
1514 lp = add_lwp (ptid);
1515 lp->stopped = 1;
1516 lp->cloned = cloned;
1517 lp->signalled = signalled;
1518 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1519 {
a0ef4274
DJ
1520 lp->resumed = 1;
1521 lp->status = status;
d6b0e80f
AC
1522 }
1523
a0ef4274 1524 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1525
1526 if (debug_linux_nat)
1527 {
1528 fprintf_unfiltered (gdb_stdlog,
1529 "LLAL: waitpid %s received %s\n",
1530 target_pid_to_str (ptid),
1531 status_to_str (status));
1532 }
1533 }
1534 else
1535 {
1536 /* We assume that the LWP representing the original process is
1537 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1538 that the GNU/linux ptrace layer uses to keep track of
1539 threads. Note that this won't have already been done since
1540 the main thread will have, we assume, been stopped by an
1541 attach from a different layer. */
9ee57c33
DJ
1542 if (lp == NULL)
1543 lp = add_lwp (ptid);
d6b0e80f
AC
1544 lp->stopped = 1;
1545 }
9ee57c33 1546
25289eb2 1547 lp->last_resume_kind = resume_stop;
7feb7d06 1548 restore_child_signals_mask (&prev_mask);
9ee57c33 1549 return 0;
d6b0e80f
AC
1550}
1551
b84876c2 1552static void
136d6dae
VP
1553linux_nat_create_inferior (struct target_ops *ops,
1554 char *exec_file, char *allargs, char **env,
b84876c2
PA
1555 int from_tty)
1556{
10568435
JK
1557#ifdef HAVE_PERSONALITY
1558 int personality_orig = 0, personality_set = 0;
1559#endif /* HAVE_PERSONALITY */
b84876c2
PA
1560
1561 /* The fork_child mechanism is synchronous and calls target_wait, so
1562 we have to mask the async mode. */
1563
10568435
JK
1564#ifdef HAVE_PERSONALITY
1565 if (disable_randomization)
1566 {
1567 errno = 0;
1568 personality_orig = personality (0xffffffff);
1569 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1570 {
1571 personality_set = 1;
1572 personality (personality_orig | ADDR_NO_RANDOMIZE);
1573 }
1574 if (errno != 0 || (personality_set
1575 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1576 warning (_("Error disabling address space randomization: %s"),
1577 safe_strerror (errno));
1578 }
1579#endif /* HAVE_PERSONALITY */
1580
2455069d
UW
1581 /* Make sure we report all signals during startup. */
1582 linux_nat_pass_signals (0, NULL);
1583
136d6dae 1584 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1585
10568435
JK
1586#ifdef HAVE_PERSONALITY
1587 if (personality_set)
1588 {
1589 errno = 0;
1590 personality (personality_orig);
1591 if (errno != 0)
1592 warning (_("Error restoring address space randomization: %s"),
1593 safe_strerror (errno));
1594 }
1595#endif /* HAVE_PERSONALITY */
b84876c2
PA
1596}
1597
d6b0e80f 1598static void
136d6dae 1599linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1600{
1601 struct lwp_info *lp;
d6b0e80f 1602 int status;
af990527 1603 ptid_t ptid;
87b0bb13 1604 volatile struct gdb_exception ex;
d6b0e80f 1605
2455069d
UW
1606 /* Make sure we report all signals during attach. */
1607 linux_nat_pass_signals (0, NULL);
1608
87b0bb13
JK
1609 TRY_CATCH (ex, RETURN_MASK_ERROR)
1610 {
1611 linux_ops->to_attach (ops, args, from_tty);
1612 }
1613 if (ex.reason < 0)
1614 {
1615 pid_t pid = parse_pid_to_attach (args);
1616 struct buffer buffer;
1617 char *message, *buffer_s;
1618
1619 message = xstrdup (ex.message);
1620 make_cleanup (xfree, message);
1621
1622 buffer_init (&buffer);
1623 linux_ptrace_attach_warnings (pid, &buffer);
1624
1625 buffer_grow_str0 (&buffer, "");
1626 buffer_s = buffer_finish (&buffer);
1627 make_cleanup (xfree, buffer_s);
1628
1629 throw_error (ex.error, "%s%s", buffer_s, message);
1630 }
d6b0e80f 1631
af990527
PA
1632 /* The ptrace base target adds the main thread with (pid,0,0)
1633 format. Decorate it with lwp info. */
1634 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1635 thread_change_ptid (inferior_ptid, ptid);
1636
9f0bdab8 1637 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1638 lp = add_initial_lwp (ptid);
a0ef4274
DJ
1639
1640 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1641 &lp->signalled);
dacc9cb2
PP
1642 if (!WIFSTOPPED (status))
1643 {
1644 if (WIFEXITED (status))
1645 {
1646 int exit_code = WEXITSTATUS (status);
1647
1648 target_terminal_ours ();
1649 target_mourn_inferior ();
1650 if (exit_code == 0)
1651 error (_("Unable to attach: program exited normally."));
1652 else
1653 error (_("Unable to attach: program exited with code %d."),
1654 exit_code);
1655 }
1656 else if (WIFSIGNALED (status))
1657 {
2ea28649 1658 enum gdb_signal signo;
dacc9cb2
PP
1659
1660 target_terminal_ours ();
1661 target_mourn_inferior ();
1662
2ea28649 1663 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1664 error (_("Unable to attach: program terminated with signal "
1665 "%s, %s."),
2ea28649
PA
1666 gdb_signal_to_name (signo),
1667 gdb_signal_to_string (signo));
dacc9cb2
PP
1668 }
1669
1670 internal_error (__FILE__, __LINE__,
1671 _("unexpected status %d for PID %ld"),
1672 status, (long) GET_LWP (ptid));
1673 }
1674
a0ef4274 1675 lp->stopped = 1;
9f0bdab8 1676
a0ef4274 1677 /* Save the wait status to report later. */
d6b0e80f 1678 lp->resumed = 1;
a0ef4274
DJ
1679 if (debug_linux_nat)
1680 fprintf_unfiltered (gdb_stdlog,
1681 "LNA: waitpid %ld, saving status %s\n",
1682 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd 1683
7feb7d06
PA
1684 lp->status = status;
1685
1686 if (target_can_async_p ())
1687 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1688}
1689
a0ef4274
DJ
1690/* Get pending status of LP. */
1691static int
1692get_pending_status (struct lwp_info *lp, int *status)
1693{
a493e3e2 1694 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1695
1696 /* If we paused threads momentarily, we may have stored pending
1697 events in lp->status or lp->waitstatus (see stop_wait_callback),
1698 and GDB core hasn't seen any signal for those threads.
1699 Otherwise, the last signal reported to the core is found in the
1700 thread object's stop_signal.
1701
1702 There's a corner case that isn't handled here at present. Only
1703 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1704 stop_signal make sense as a real signal to pass to the inferior.
1705 Some catchpoint related events, like
1706 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1707 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1708 those traps are debug API (ptrace in our case) related and
1709 induced; the inferior wouldn't see them if it wasn't being
1710 traced. Hence, we should never pass them to the inferior, even
1711 when set to pass state. Since this corner case isn't handled by
1712 infrun.c when proceeding with a signal, for consistency, neither
1713 do we handle it here (or elsewhere in the file we check for
1714 signal pass state). Normally SIGTRAP isn't set to pass state, so
1715 this is really a corner case. */
1716
1717 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1718 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1719 else if (lp->status)
2ea28649 1720 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
ca2163eb
PA
1721 else if (non_stop && !is_executing (lp->ptid))
1722 {
1723 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1724
16c381f0 1725 signo = tp->suspend.stop_signal;
ca2163eb
PA
1726 }
1727 else if (!non_stop)
a0ef4274 1728 {
ca2163eb
PA
1729 struct target_waitstatus last;
1730 ptid_t last_ptid;
4c28f408 1731
ca2163eb 1732 get_last_target_status (&last_ptid, &last);
4c28f408 1733
ca2163eb
PA
1734 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1735 {
e09875d4 1736 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1737
16c381f0 1738 signo = tp->suspend.stop_signal;
4c28f408 1739 }
ca2163eb 1740 }
4c28f408 1741
ca2163eb 1742 *status = 0;
4c28f408 1743
a493e3e2 1744 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1745 {
1746 if (debug_linux_nat)
1747 fprintf_unfiltered (gdb_stdlog,
1748 "GPT: lwp %s has no pending signal\n",
1749 target_pid_to_str (lp->ptid));
1750 }
1751 else if (!signal_pass_state (signo))
1752 {
1753 if (debug_linux_nat)
3e43a32a
MS
1754 fprintf_unfiltered (gdb_stdlog,
1755 "GPT: lwp %s had signal %s, "
1756 "but it is in no pass state\n",
ca2163eb 1757 target_pid_to_str (lp->ptid),
2ea28649 1758 gdb_signal_to_string (signo));
a0ef4274 1759 }
a0ef4274 1760 else
4c28f408 1761 {
2ea28649 1762 *status = W_STOPCODE (gdb_signal_to_host (signo));
ca2163eb
PA
1763
1764 if (debug_linux_nat)
1765 fprintf_unfiltered (gdb_stdlog,
1766 "GPT: lwp %s has pending signal %s\n",
1767 target_pid_to_str (lp->ptid),
2ea28649 1768 gdb_signal_to_string (signo));
4c28f408 1769 }
a0ef4274
DJ
1770
1771 return 0;
1772}
1773
d6b0e80f
AC
1774static int
1775detach_callback (struct lwp_info *lp, void *data)
1776{
1777 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1778
1779 if (debug_linux_nat && lp->status)
1780 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1781 strsignal (WSTOPSIG (lp->status)),
1782 target_pid_to_str (lp->ptid));
1783
a0ef4274
DJ
1784 /* If there is a pending SIGSTOP, get rid of it. */
1785 if (lp->signalled)
d6b0e80f 1786 {
d6b0e80f
AC
1787 if (debug_linux_nat)
1788 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1789 "DC: Sending SIGCONT to %s\n",
1790 target_pid_to_str (lp->ptid));
d6b0e80f 1791
a0ef4274 1792 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1793 lp->signalled = 0;
d6b0e80f
AC
1794 }
1795
1796 /* We don't actually detach from the LWP that has an id equal to the
1797 overall process id just yet. */
1798 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1799 {
a0ef4274
DJ
1800 int status = 0;
1801
1802 /* Pass on any pending signal for this LWP. */
1803 get_pending_status (lp, &status);
1804
7b50312a
PA
1805 if (linux_nat_prepare_to_resume != NULL)
1806 linux_nat_prepare_to_resume (lp);
d6b0e80f
AC
1807 errno = 0;
1808 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1809 WSTOPSIG (status)) < 0)
8a3fe4f8 1810 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1811 safe_strerror (errno));
1812
1813 if (debug_linux_nat)
1814 fprintf_unfiltered (gdb_stdlog,
1815 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1816 target_pid_to_str (lp->ptid),
7feb7d06 1817 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1818
1819 delete_lwp (lp->ptid);
1820 }
1821
1822 return 0;
1823}
1824
1825static void
136d6dae 1826linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1827{
b84876c2 1828 int pid;
a0ef4274 1829 int status;
d90e17a7
PA
1830 struct lwp_info *main_lwp;
1831
1832 pid = GET_PID (inferior_ptid);
a0ef4274 1833
ae5e0686
MK
1834 /* Don't unregister from the event loop, as there may be other
1835 inferiors running. */
b84876c2 1836
4c28f408
PA
1837 /* Stop all threads before detaching. ptrace requires that the
1838 thread is stopped to sucessfully detach. */
d90e17a7 1839 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1840 /* ... and wait until all of them have reported back that
1841 they're no longer running. */
d90e17a7 1842 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1843
d90e17a7 1844 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1845
1846 /* Only the initial process should be left right now. */
d90e17a7
PA
1847 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1848
1849 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1850
a0ef4274
DJ
1851 /* Pass on any pending signal for the last LWP. */
1852 if ((args == NULL || *args == '\0')
d90e17a7 1853 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1854 && WIFSTOPPED (status))
1855 {
1856 /* Put the signal number in ARGS so that inf_ptrace_detach will
1857 pass it along with PTRACE_DETACH. */
1858 args = alloca (8);
1859 sprintf (args, "%d", (int) WSTOPSIG (status));
ddabfc73
TT
1860 if (debug_linux_nat)
1861 fprintf_unfiltered (gdb_stdlog,
1862 "LND: Sending signal %s to %s\n",
1863 args,
1864 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1865 }
1866
7b50312a
PA
1867 if (linux_nat_prepare_to_resume != NULL)
1868 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1869 delete_lwp (main_lwp->ptid);
b84876c2 1870
7a7d3353
PA
1871 if (forks_exist_p ())
1872 {
1873 /* Multi-fork case. The current inferior_ptid is being detached
1874 from, but there are other viable forks to debug. Detach from
1875 the current fork, and context-switch to the first
1876 available. */
1877 linux_fork_detach (args, from_tty);
7a7d3353
PA
1878 }
1879 else
1880 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1881}
1882
1883/* Resume LP. */
1884
25289eb2 1885static void
e5ef252a 1886resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1887{
25289eb2 1888 if (lp->stopped)
6c95b8df 1889 {
25289eb2
PA
1890 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1891
1892 if (inf->vfork_child != NULL)
1893 {
1894 if (debug_linux_nat)
1895 fprintf_unfiltered (gdb_stdlog,
1896 "RC: Not resuming %s (vfork parent)\n",
1897 target_pid_to_str (lp->ptid));
1898 }
1899 else if (lp->status == 0
1900 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1901 {
1902 if (debug_linux_nat)
1903 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1904 "RC: Resuming sibling %s, %s, %s\n",
1905 target_pid_to_str (lp->ptid),
1906 (signo != GDB_SIGNAL_0
1907 ? strsignal (gdb_signal_to_host (signo))
1908 : "0"),
1909 step ? "step" : "resume");
25289eb2 1910
7b50312a
PA
1911 if (linux_nat_prepare_to_resume != NULL)
1912 linux_nat_prepare_to_resume (lp);
25289eb2
PA
1913 linux_ops->to_resume (linux_ops,
1914 pid_to_ptid (GET_LWP (lp->ptid)),
e5ef252a 1915 step, signo);
25289eb2
PA
1916 lp->stopped = 0;
1917 lp->step = step;
25289eb2
PA
1918 lp->stopped_by_watchpoint = 0;
1919 }
1920 else
1921 {
1922 if (debug_linux_nat)
1923 fprintf_unfiltered (gdb_stdlog,
1924 "RC: Not resuming sibling %s (has pending)\n",
1925 target_pid_to_str (lp->ptid));
1926 }
6c95b8df 1927 }
25289eb2 1928 else
d6b0e80f 1929 {
d90e17a7
PA
1930 if (debug_linux_nat)
1931 fprintf_unfiltered (gdb_stdlog,
25289eb2 1932 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1933 target_pid_to_str (lp->ptid));
d6b0e80f 1934 }
25289eb2 1935}
d6b0e80f 1936
e5ef252a
PA
1937/* Resume LWP, with the last stop signal, if it is in pass state. */
1938
25289eb2 1939static int
e5ef252a 1940linux_nat_resume_callback (struct lwp_info *lp, void *data)
25289eb2 1941{
e5ef252a
PA
1942 enum gdb_signal signo = GDB_SIGNAL_0;
1943
1944 if (lp->stopped)
1945 {
1946 struct thread_info *thread;
1947
1948 thread = find_thread_ptid (lp->ptid);
1949 if (thread != NULL)
1950 {
1951 if (signal_pass_state (thread->suspend.stop_signal))
1952 signo = thread->suspend.stop_signal;
1953 thread->suspend.stop_signal = GDB_SIGNAL_0;
1954 }
1955 }
1956
1957 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1958 return 0;
1959}
1960
1961static int
1962resume_clear_callback (struct lwp_info *lp, void *data)
1963{
1964 lp->resumed = 0;
25289eb2 1965 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1966 return 0;
1967}
1968
1969static int
1970resume_set_callback (struct lwp_info *lp, void *data)
1971{
1972 lp->resumed = 1;
25289eb2 1973 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1974 return 0;
1975}
1976
1977static void
28439f5e 1978linux_nat_resume (struct target_ops *ops,
2ea28649 1979 ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f 1980{
7feb7d06 1981 sigset_t prev_mask;
d6b0e80f 1982 struct lwp_info *lp;
d90e17a7 1983 int resume_many;
d6b0e80f 1984
76f50ad1
DJ
1985 if (debug_linux_nat)
1986 fprintf_unfiltered (gdb_stdlog,
1987 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1988 step ? "step" : "resume",
1989 target_pid_to_str (ptid),
a493e3e2 1990 (signo != GDB_SIGNAL_0
2ea28649 1991 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1992 target_pid_to_str (inferior_ptid));
1993
7feb7d06 1994 block_child_signals (&prev_mask);
b84876c2 1995
d6b0e80f 1996 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1997 resume_many = (ptid_equal (minus_one_ptid, ptid)
1998 || ptid_is_pid (ptid));
4c28f408 1999
e3e9f5a2
PA
2000 /* Mark the lwps we're resuming as resumed. */
2001 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 2002
d90e17a7
PA
2003 /* See if it's the current inferior that should be handled
2004 specially. */
2005 if (resume_many)
2006 lp = find_lwp_pid (inferior_ptid);
2007 else
2008 lp = find_lwp_pid (ptid);
9f0bdab8 2009 gdb_assert (lp != NULL);
d6b0e80f 2010
9f0bdab8
DJ
2011 /* Remember if we're stepping. */
2012 lp->step = step;
25289eb2 2013 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 2014
9f0bdab8
DJ
2015 /* If we have a pending wait status for this thread, there is no
2016 point in resuming the process. But first make sure that
2017 linux_nat_wait won't preemptively handle the event - we
2018 should never take this short-circuit if we are going to
2019 leave LP running, since we have skipped resuming all the
2020 other threads. This bit of code needs to be synchronized
2021 with linux_nat_wait. */
76f50ad1 2022
9f0bdab8
DJ
2023 if (lp->status && WIFSTOPPED (lp->status))
2024 {
2455069d
UW
2025 if (!lp->step
2026 && WSTOPSIG (lp->status)
2027 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 2028 {
9f0bdab8
DJ
2029 if (debug_linux_nat)
2030 fprintf_unfiltered (gdb_stdlog,
2031 "LLR: Not short circuiting for ignored "
2032 "status 0x%x\n", lp->status);
2033
d6b0e80f
AC
2034 /* FIXME: What should we do if we are supposed to continue
2035 this thread with a signal? */
a493e3e2 2036 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 2037 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
2038 lp->status = 0;
2039 }
2040 }
76f50ad1 2041
6c95b8df 2042 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
9f0bdab8
DJ
2043 {
2044 /* FIXME: What should we do if we are supposed to continue
2045 this thread with a signal? */
a493e3e2 2046 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 2047
9f0bdab8
DJ
2048 if (debug_linux_nat)
2049 fprintf_unfiltered (gdb_stdlog,
2050 "LLR: Short circuiting for status 0x%x\n",
2051 lp->status);
d6b0e80f 2052
7feb7d06
PA
2053 restore_child_signals_mask (&prev_mask);
2054 if (target_can_async_p ())
2055 {
2056 target_async (inferior_event_handler, 0);
2057 /* Tell the event loop we have something to process. */
2058 async_file_mark ();
2059 }
9f0bdab8 2060 return;
d6b0e80f
AC
2061 }
2062
9f0bdab8 2063 /* Mark LWP as not stopped to prevent it from being continued by
e5ef252a 2064 linux_nat_resume_callback. */
9f0bdab8
DJ
2065 lp->stopped = 0;
2066
d90e17a7 2067 if (resume_many)
e5ef252a 2068 iterate_over_lwps (ptid, linux_nat_resume_callback, NULL);
d90e17a7
PA
2069
2070 /* Convert to something the lower layer understands. */
2071 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 2072
7b50312a
PA
2073 if (linux_nat_prepare_to_resume != NULL)
2074 linux_nat_prepare_to_resume (lp);
28439f5e 2075 linux_ops->to_resume (linux_ops, ptid, step, signo);
ebec9a0f 2076 lp->stopped_by_watchpoint = 0;
9f0bdab8 2077
d6b0e80f
AC
2078 if (debug_linux_nat)
2079 fprintf_unfiltered (gdb_stdlog,
2080 "LLR: %s %s, %s (resume event thread)\n",
2081 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2082 target_pid_to_str (ptid),
a493e3e2 2083 (signo != GDB_SIGNAL_0
2ea28649 2084 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 2085
7feb7d06 2086 restore_child_signals_mask (&prev_mask);
b84876c2 2087 if (target_can_async_p ())
8ea051c5 2088 target_async (inferior_event_handler, 0);
d6b0e80f
AC
2089}
2090
c5f62d5f 2091/* Send a signal to an LWP. */
d6b0e80f
AC
2092
2093static int
2094kill_lwp (int lwpid, int signo)
2095{
c5f62d5f
DE
2096 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2097 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
2098
2099#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
2100 {
2101 static int tkill_failed;
2102
2103 if (!tkill_failed)
2104 {
2105 int ret;
2106
2107 errno = 0;
2108 ret = syscall (__NR_tkill, lwpid, signo);
2109 if (errno != ENOSYS)
2110 return ret;
2111 tkill_failed = 1;
2112 }
2113 }
d6b0e80f
AC
2114#endif
2115
2116 return kill (lwpid, signo);
2117}
2118
ca2163eb
PA
2119/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2120 event, check if the core is interested in it: if not, ignore the
2121 event, and keep waiting; otherwise, we need to toggle the LWP's
2122 syscall entry/exit status, since the ptrace event itself doesn't
2123 indicate it, and report the trap to higher layers. */
2124
2125static int
2126linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2127{
2128 struct target_waitstatus *ourstatus = &lp->waitstatus;
2129 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2130 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2131
2132 if (stopping)
2133 {
2134 /* If we're stopping threads, there's a SIGSTOP pending, which
2135 makes it so that the LWP reports an immediate syscall return,
2136 followed by the SIGSTOP. Skip seeing that "return" using
2137 PTRACE_CONT directly, and let stop_wait_callback collect the
2138 SIGSTOP. Later when the thread is resumed, a new syscall
2139 entry event. If we didn't do this (and returned 0), we'd
2140 leave a syscall entry pending, and our caller, by using
2141 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2142 itself. Later, when the user re-resumes this LWP, we'd see
2143 another syscall entry event and we'd mistake it for a return.
2144
2145 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2146 (leaving immediately with LWP->signalled set, without issuing
2147 a PTRACE_CONT), it would still be problematic to leave this
2148 syscall enter pending, as later when the thread is resumed,
2149 it would then see the same syscall exit mentioned above,
2150 followed by the delayed SIGSTOP, while the syscall didn't
2151 actually get to execute. It seems it would be even more
2152 confusing to the user. */
2153
2154 if (debug_linux_nat)
2155 fprintf_unfiltered (gdb_stdlog,
2156 "LHST: ignoring syscall %d "
2157 "for LWP %ld (stopping threads), "
2158 "resuming with PTRACE_CONT for SIGSTOP\n",
2159 syscall_number,
2160 GET_LWP (lp->ptid));
2161
2162 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2163 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2164 return 1;
2165 }
2166
2167 if (catch_syscall_enabled ())
2168 {
2169 /* Always update the entry/return state, even if this particular
2170 syscall isn't interesting to the core now. In async mode,
2171 the user could install a new catchpoint for this syscall
2172 between syscall enter/return, and we'll need to know to
2173 report a syscall return if that happens. */
2174 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2175 ? TARGET_WAITKIND_SYSCALL_RETURN
2176 : TARGET_WAITKIND_SYSCALL_ENTRY);
2177
2178 if (catching_syscall_number (syscall_number))
2179 {
2180 /* Alright, an event to report. */
2181 ourstatus->kind = lp->syscall_state;
2182 ourstatus->value.syscall_number = syscall_number;
2183
2184 if (debug_linux_nat)
2185 fprintf_unfiltered (gdb_stdlog,
2186 "LHST: stopping for %s of syscall %d"
2187 " for LWP %ld\n",
3e43a32a
MS
2188 lp->syscall_state
2189 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
2190 ? "entry" : "return",
2191 syscall_number,
2192 GET_LWP (lp->ptid));
2193 return 0;
2194 }
2195
2196 if (debug_linux_nat)
2197 fprintf_unfiltered (gdb_stdlog,
2198 "LHST: ignoring %s of syscall %d "
2199 "for LWP %ld\n",
2200 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2201 ? "entry" : "return",
2202 syscall_number,
2203 GET_LWP (lp->ptid));
2204 }
2205 else
2206 {
2207 /* If we had been syscall tracing, and hence used PT_SYSCALL
2208 before on this LWP, it could happen that the user removes all
2209 syscall catchpoints before we get to process this event.
2210 There are two noteworthy issues here:
2211
2212 - When stopped at a syscall entry event, resuming with
2213 PT_STEP still resumes executing the syscall and reports a
2214 syscall return.
2215
2216 - Only PT_SYSCALL catches syscall enters. If we last
2217 single-stepped this thread, then this event can't be a
2218 syscall enter. If we last single-stepped this thread, this
2219 has to be a syscall exit.
2220
2221 The points above mean that the next resume, be it PT_STEP or
2222 PT_CONTINUE, can not trigger a syscall trace event. */
2223 if (debug_linux_nat)
2224 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2225 "LHST: caught syscall event "
2226 "with no syscall catchpoints."
ca2163eb
PA
2227 " %d for LWP %ld, ignoring\n",
2228 syscall_number,
2229 GET_LWP (lp->ptid));
2230 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2231 }
2232
2233 /* The core isn't interested in this event. For efficiency, avoid
2234 stopping all threads only to have the core resume them all again.
2235 Since we're not stopping threads, if we're still syscall tracing
2236 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2237 subsequent syscall. Simply resume using the inf-ptrace layer,
2238 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2239
2240 /* Note that gdbarch_get_syscall_number may access registers, hence
2241 fill a regcache. */
2242 registers_changed ();
7b50312a
PA
2243 if (linux_nat_prepare_to_resume != NULL)
2244 linux_nat_prepare_to_resume (lp);
ca2163eb 2245 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 2246 lp->step, GDB_SIGNAL_0);
ca2163eb
PA
2247 return 1;
2248}
2249
3d799a95
DJ
2250/* Handle a GNU/Linux extended wait response. If we see a clone
2251 event, we need to add the new LWP to our list (and not report the
2252 trap to higher layers). This function returns non-zero if the
2253 event should be ignored and we should wait again. If STOPPING is
2254 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
2255
2256static int
3d799a95
DJ
2257linux_handle_extended_wait (struct lwp_info *lp, int status,
2258 int stopping)
d6b0e80f 2259{
3d799a95
DJ
2260 int pid = GET_LWP (lp->ptid);
2261 struct target_waitstatus *ourstatus = &lp->waitstatus;
3d799a95 2262 int event = status >> 16;
d6b0e80f 2263
3d799a95
DJ
2264 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2265 || event == PTRACE_EVENT_CLONE)
d6b0e80f 2266 {
3d799a95
DJ
2267 unsigned long new_pid;
2268 int ret;
2269
2270 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 2271
3d799a95
DJ
2272 /* If we haven't already seen the new PID stop, wait for it now. */
2273 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2274 {
2275 /* The new child has a pending SIGSTOP. We can't affect it until it
2276 hits the SIGSTOP, but we're already attached. */
2277 ret = my_waitpid (new_pid, &status,
2278 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2279 if (ret == -1)
2280 perror_with_name (_("waiting for new child"));
2281 else if (ret != new_pid)
2282 internal_error (__FILE__, __LINE__,
2283 _("wait returned unexpected PID %d"), ret);
2284 else if (!WIFSTOPPED (status))
2285 internal_error (__FILE__, __LINE__,
2286 _("wait returned unexpected status 0x%x"), status);
2287 }
2288
3a3e9ee3 2289 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 2290
26cb8b7c
PA
2291 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
2292 {
2293 /* The arch-specific native code may need to know about new
2294 forks even if those end up never mapped to an
2295 inferior. */
2296 if (linux_nat_new_fork != NULL)
2297 linux_nat_new_fork (lp, new_pid);
2298 }
2299
2277426b
PA
2300 if (event == PTRACE_EVENT_FORK
2301 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2302 {
2277426b
PA
2303 /* Handle checkpointing by linux-fork.c here as a special
2304 case. We don't want the follow-fork-mode or 'catch fork'
2305 to interfere with this. */
2306
2307 /* This won't actually modify the breakpoint list, but will
2308 physically remove the breakpoints from the child. */
d80ee84f 2309 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2277426b
PA
2310
2311 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2312 if (!find_fork_pid (new_pid))
2313 add_fork (new_pid);
2277426b
PA
2314
2315 /* Report as spurious, so that infrun doesn't want to follow
2316 this fork. We're actually doing an infcall in
2317 linux-fork.c. */
2318 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2319 linux_enable_event_reporting (pid_to_ptid (new_pid));
2320
2321 /* Report the stop to the core. */
2322 return 0;
2323 }
2324
3d799a95
DJ
2325 if (event == PTRACE_EVENT_FORK)
2326 ourstatus->kind = TARGET_WAITKIND_FORKED;
2327 else if (event == PTRACE_EVENT_VFORK)
2328 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2329 else
3d799a95 2330 {
78768c4a
JK
2331 struct lwp_info *new_lp;
2332
3d799a95 2333 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2334
3c4d7e12
PA
2335 if (debug_linux_nat)
2336 fprintf_unfiltered (gdb_stdlog,
2337 "LHEW: Got clone event "
2338 "from LWP %d, new child is LWP %ld\n",
2339 pid, new_pid);
2340
d90e17a7 2341 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
3d799a95 2342 new_lp->cloned = 1;
4c28f408 2343 new_lp->stopped = 1;
d6b0e80f 2344
3d799a95
DJ
2345 if (WSTOPSIG (status) != SIGSTOP)
2346 {
2347 /* This can happen if someone starts sending signals to
2348 the new thread before it gets a chance to run, which
2349 have a lower number than SIGSTOP (e.g. SIGUSR1).
2350 This is an unlikely case, and harder to handle for
2351 fork / vfork than for clone, so we do not try - but
2352 we handle it for clone events here. We'll send
2353 the other signal on to the thread below. */
2354
2355 new_lp->signalled = 1;
2356 }
2357 else
79395f92
PA
2358 {
2359 struct thread_info *tp;
2360
2361 /* When we stop for an event in some other thread, and
2362 pull the thread list just as this thread has cloned,
2363 we'll have seen the new thread in the thread_db list
2364 before handling the CLONE event (glibc's
2365 pthread_create adds the new thread to the thread list
2366 before clone'ing, and has the kernel fill in the
2367 thread's tid on the clone call with
2368 CLONE_PARENT_SETTID). If that happened, and the core
2369 had requested the new thread to stop, we'll have
2370 killed it with SIGSTOP. But since SIGSTOP is not an
2371 RT signal, it can only be queued once. We need to be
2372 careful to not resume the LWP if we wanted it to
2373 stop. In that case, we'll leave the SIGSTOP pending.
a493e3e2 2374 It will later be reported as GDB_SIGNAL_0. */
79395f92
PA
2375 tp = find_thread_ptid (new_lp->ptid);
2376 if (tp != NULL && tp->stop_requested)
2377 new_lp->last_resume_kind = resume_stop;
2378 else
2379 status = 0;
2380 }
d6b0e80f 2381
4c28f408 2382 if (non_stop)
3d799a95 2383 {
4c28f408
PA
2384 /* Add the new thread to GDB's lists as soon as possible
2385 so that:
2386
2387 1) the frontend doesn't have to wait for a stop to
2388 display them, and,
2389
2390 2) we tag it with the correct running state. */
2391
2392 /* If the thread_db layer is active, let it know about
2393 this new thread, and add it to GDB's list. */
2394 if (!thread_db_attach_lwp (new_lp->ptid))
2395 {
2396 /* We're not using thread_db. Add it to GDB's
2397 list. */
2398 target_post_attach (GET_LWP (new_lp->ptid));
2399 add_thread (new_lp->ptid);
2400 }
2401
2402 if (!stopping)
2403 {
2404 set_running (new_lp->ptid, 1);
2405 set_executing (new_lp->ptid, 1);
e21ffe51
PA
2406 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2407 resume_stop. */
2408 new_lp->last_resume_kind = resume_continue;
4c28f408
PA
2409 }
2410 }
2411
79395f92
PA
2412 if (status != 0)
2413 {
2414 /* We created NEW_LP so it cannot yet contain STATUS. */
2415 gdb_assert (new_lp->status == 0);
2416
2417 /* Save the wait status to report later. */
2418 if (debug_linux_nat)
2419 fprintf_unfiltered (gdb_stdlog,
2420 "LHEW: waitpid of new LWP %ld, "
2421 "saving status %s\n",
2422 (long) GET_LWP (new_lp->ptid),
2423 status_to_str (status));
2424 new_lp->status = status;
2425 }
2426
ca2163eb
PA
2427 /* Note the need to use the low target ops to resume, to
2428 handle resuming with PT_SYSCALL if we have syscall
2429 catchpoints. */
4c28f408
PA
2430 if (!stopping)
2431 {
3d799a95 2432 new_lp->resumed = 1;
ca2163eb 2433
79395f92 2434 if (status == 0)
ad34eb2f 2435 {
e21ffe51 2436 gdb_assert (new_lp->last_resume_kind == resume_continue);
ad34eb2f
JK
2437 if (debug_linux_nat)
2438 fprintf_unfiltered (gdb_stdlog,
79395f92
PA
2439 "LHEW: resuming new LWP %ld\n",
2440 GET_LWP (new_lp->ptid));
7b50312a
PA
2441 if (linux_nat_prepare_to_resume != NULL)
2442 linux_nat_prepare_to_resume (new_lp);
79395f92 2443 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
a493e3e2 2444 0, GDB_SIGNAL_0);
79395f92 2445 new_lp->stopped = 0;
ad34eb2f
JK
2446 }
2447 }
d6b0e80f 2448
3d799a95
DJ
2449 if (debug_linux_nat)
2450 fprintf_unfiltered (gdb_stdlog,
3c4d7e12 2451 "LHEW: resuming parent LWP %d\n", pid);
7b50312a
PA
2452 if (linux_nat_prepare_to_resume != NULL)
2453 linux_nat_prepare_to_resume (lp);
ca2163eb 2454 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 2455 0, GDB_SIGNAL_0);
3d799a95
DJ
2456
2457 return 1;
2458 }
2459
2460 return 0;
d6b0e80f
AC
2461 }
2462
3d799a95
DJ
2463 if (event == PTRACE_EVENT_EXEC)
2464 {
a75724bc
PA
2465 if (debug_linux_nat)
2466 fprintf_unfiltered (gdb_stdlog,
2467 "LHEW: Got exec event from LWP %ld\n",
2468 GET_LWP (lp->ptid));
2469
3d799a95
DJ
2470 ourstatus->kind = TARGET_WAITKIND_EXECD;
2471 ourstatus->value.execd_pathname
6d8fd2b7 2472 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95 2473
6c95b8df
PA
2474 return 0;
2475 }
2476
2477 if (event == PTRACE_EVENT_VFORK_DONE)
2478 {
2479 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2480 {
6c95b8df 2481 if (debug_linux_nat)
3e43a32a
MS
2482 fprintf_unfiltered (gdb_stdlog,
2483 "LHEW: Got expected PTRACE_EVENT_"
2484 "VFORK_DONE from LWP %ld: stopping\n",
6c95b8df 2485 GET_LWP (lp->ptid));
3d799a95 2486
6c95b8df
PA
2487 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2488 return 0;
3d799a95
DJ
2489 }
2490
6c95b8df 2491 if (debug_linux_nat)
3e43a32a
MS
2492 fprintf_unfiltered (gdb_stdlog,
2493 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2494 "from LWP %ld: resuming\n",
6c95b8df
PA
2495 GET_LWP (lp->ptid));
2496 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2497 return 1;
3d799a95
DJ
2498 }
2499
2500 internal_error (__FILE__, __LINE__,
2501 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2502}
2503
2504/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2505 exited. */
2506
2507static int
2508wait_lwp (struct lwp_info *lp)
2509{
2510 pid_t pid;
432b4d03 2511 int status = 0;
d6b0e80f 2512 int thread_dead = 0;
432b4d03 2513 sigset_t prev_mask;
d6b0e80f
AC
2514
2515 gdb_assert (!lp->stopped);
2516 gdb_assert (lp->status == 0);
2517
432b4d03
JK
2518 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2519 block_child_signals (&prev_mask);
2520
2521 for (;;)
d6b0e80f 2522 {
432b4d03
JK
2523 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2524 was right and we should just call sigsuspend. */
2525
2526 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
d6b0e80f 2527 if (pid == -1 && errno == ECHILD)
432b4d03 2528 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2529 if (pid == -1 && errno == ECHILD)
2530 {
2531 /* The thread has previously exited. We need to delete it
2532 now because, for some vendor 2.4 kernels with NPTL
2533 support backported, there won't be an exit event unless
2534 it is the main thread. 2.6 kernels will report an exit
2535 event for each thread that exits, as expected. */
2536 thread_dead = 1;
2537 if (debug_linux_nat)
2538 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2539 target_pid_to_str (lp->ptid));
2540 }
432b4d03
JK
2541 if (pid != 0)
2542 break;
2543
2544 /* Bugs 10970, 12702.
2545 Thread group leader may have exited in which case we'll lock up in
2546 waitpid if there are other threads, even if they are all zombies too.
2547 Basically, we're not supposed to use waitpid this way.
2548 __WCLONE is not applicable for the leader so we can't use that.
2549 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2550 process; it gets ESRCH both for the zombie and for running processes.
2551
2552 As a workaround, check if we're waiting for the thread group leader and
2553 if it's a zombie, and avoid calling waitpid if it is.
2554
2555 This is racy, what if the tgl becomes a zombie right after we check?
2556 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2557 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03
JK
2558
2559 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
5f572dec 2560 && linux_proc_pid_is_zombie (GET_LWP (lp->ptid)))
d6b0e80f 2561 {
d6b0e80f
AC
2562 thread_dead = 1;
2563 if (debug_linux_nat)
432b4d03
JK
2564 fprintf_unfiltered (gdb_stdlog,
2565 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2566 target_pid_to_str (lp->ptid));
432b4d03 2567 break;
d6b0e80f 2568 }
432b4d03
JK
2569
2570 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2571 get invoked despite our caller had them intentionally blocked by
2572 block_child_signals. This is sensitive only to the loop of
2573 linux_nat_wait_1 and there if we get called my_waitpid gets called
2574 again before it gets to sigsuspend so we can safely let the handlers
2575 get executed here. */
2576
2577 sigsuspend (&suspend_mask);
2578 }
2579
2580 restore_child_signals_mask (&prev_mask);
2581
d6b0e80f
AC
2582 if (!thread_dead)
2583 {
2584 gdb_assert (pid == GET_LWP (lp->ptid));
2585
2586 if (debug_linux_nat)
2587 {
2588 fprintf_unfiltered (gdb_stdlog,
2589 "WL: waitpid %s received %s\n",
2590 target_pid_to_str (lp->ptid),
2591 status_to_str (status));
2592 }
d6b0e80f 2593
a9f4bb21
PA
2594 /* Check if the thread has exited. */
2595 if (WIFEXITED (status) || WIFSIGNALED (status))
2596 {
2597 thread_dead = 1;
2598 if (debug_linux_nat)
2599 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2600 target_pid_to_str (lp->ptid));
2601 }
d6b0e80f
AC
2602 }
2603
2604 if (thread_dead)
2605 {
e26af52f 2606 exit_lwp (lp);
d6b0e80f
AC
2607 return 0;
2608 }
2609
2610 gdb_assert (WIFSTOPPED (status));
2611
ca2163eb
PA
2612 /* Handle GNU/Linux's syscall SIGTRAPs. */
2613 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2614 {
2615 /* No longer need the sysgood bit. The ptrace event ends up
2616 recorded in lp->waitstatus if we care for it. We can carry
2617 on handling the event like a regular SIGTRAP from here
2618 on. */
2619 status = W_STOPCODE (SIGTRAP);
2620 if (linux_handle_syscall_trap (lp, 1))
2621 return wait_lwp (lp);
2622 }
2623
d6b0e80f
AC
2624 /* Handle GNU/Linux's extended waitstatus for trace events. */
2625 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2626 {
2627 if (debug_linux_nat)
2628 fprintf_unfiltered (gdb_stdlog,
2629 "WL: Handling extended status 0x%06x\n",
2630 status);
3d799a95 2631 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2632 return wait_lwp (lp);
2633 }
2634
2635 return status;
2636}
2637
2638/* Send a SIGSTOP to LP. */
2639
2640static int
2641stop_callback (struct lwp_info *lp, void *data)
2642{
2643 if (!lp->stopped && !lp->signalled)
2644 {
2645 int ret;
2646
2647 if (debug_linux_nat)
2648 {
2649 fprintf_unfiltered (gdb_stdlog,
2650 "SC: kill %s **<SIGSTOP>**\n",
2651 target_pid_to_str (lp->ptid));
2652 }
2653 errno = 0;
2654 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2655 if (debug_linux_nat)
2656 {
2657 fprintf_unfiltered (gdb_stdlog,
2658 "SC: lwp kill %d %s\n",
2659 ret,
2660 errno ? safe_strerror (errno) : "ERRNO-OK");
2661 }
2662
2663 lp->signalled = 1;
2664 gdb_assert (lp->status == 0);
2665 }
2666
2667 return 0;
2668}
2669
7b50312a
PA
2670/* Request a stop on LWP. */
2671
2672void
2673linux_stop_lwp (struct lwp_info *lwp)
2674{
2675 stop_callback (lwp, NULL);
2676}
2677
57380f4e 2678/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2679
2680static int
57380f4e
DJ
2681linux_nat_has_pending_sigint (int pid)
2682{
2683 sigset_t pending, blocked, ignored;
57380f4e
DJ
2684
2685 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2686
2687 if (sigismember (&pending, SIGINT)
2688 && !sigismember (&ignored, SIGINT))
2689 return 1;
2690
2691 return 0;
2692}
2693
2694/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2695
2696static int
2697set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2698{
57380f4e
DJ
2699 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2700 flag to consume the next one. */
2701 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2702 && WSTOPSIG (lp->status) == SIGINT)
2703 lp->status = 0;
2704 else
2705 lp->ignore_sigint = 1;
2706
2707 return 0;
2708}
2709
2710/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2711 This function is called after we know the LWP has stopped; if the LWP
2712 stopped before the expected SIGINT was delivered, then it will never have
2713 arrived. Also, if the signal was delivered to a shared queue and consumed
2714 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2715
57380f4e
DJ
2716static void
2717maybe_clear_ignore_sigint (struct lwp_info *lp)
2718{
2719 if (!lp->ignore_sigint)
2720 return;
2721
2722 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2723 {
2724 if (debug_linux_nat)
2725 fprintf_unfiltered (gdb_stdlog,
2726 "MCIS: Clearing bogus flag for %s\n",
2727 target_pid_to_str (lp->ptid));
2728 lp->ignore_sigint = 0;
2729 }
2730}
2731
ebec9a0f
PA
2732/* Fetch the possible triggered data watchpoint info and store it in
2733 LP.
2734
2735 On some archs, like x86, that use debug registers to set
2736 watchpoints, it's possible that the way to know which watched
2737 address trapped, is to check the register that is used to select
2738 which address to watch. Problem is, between setting the watchpoint
2739 and reading back which data address trapped, the user may change
2740 the set of watchpoints, and, as a consequence, GDB changes the
2741 debug registers in the inferior. To avoid reading back a stale
2742 stopped-data-address when that happens, we cache in LP the fact
2743 that a watchpoint trapped, and the corresponding data address, as
2744 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2745 registers meanwhile, we have the cached data we can rely on. */
2746
2747static void
2748save_sigtrap (struct lwp_info *lp)
2749{
2750 struct cleanup *old_chain;
2751
2752 if (linux_ops->to_stopped_by_watchpoint == NULL)
2753 {
2754 lp->stopped_by_watchpoint = 0;
2755 return;
2756 }
2757
2758 old_chain = save_inferior_ptid ();
2759 inferior_ptid = lp->ptid;
2760
2761 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2762
2763 if (lp->stopped_by_watchpoint)
2764 {
2765 if (linux_ops->to_stopped_data_address != NULL)
2766 lp->stopped_data_address_p =
2767 linux_ops->to_stopped_data_address (&current_target,
2768 &lp->stopped_data_address);
2769 else
2770 lp->stopped_data_address_p = 0;
2771 }
2772
2773 do_cleanups (old_chain);
2774}
2775
2776/* See save_sigtrap. */
2777
2778static int
2779linux_nat_stopped_by_watchpoint (void)
2780{
2781 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2782
2783 gdb_assert (lp != NULL);
2784
2785 return lp->stopped_by_watchpoint;
2786}
2787
2788static int
2789linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2790{
2791 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2792
2793 gdb_assert (lp != NULL);
2794
2795 *addr_p = lp->stopped_data_address;
2796
2797 return lp->stopped_data_address_p;
2798}
2799
26ab7092
JK
2800/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2801
2802static int
2803sigtrap_is_event (int status)
2804{
2805 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2806}
2807
2808/* SIGTRAP-like events recognizer. */
2809
2810static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2811
00390b84
JK
2812/* Check for SIGTRAP-like events in LP. */
2813
2814static int
2815linux_nat_lp_status_is_event (struct lwp_info *lp)
2816{
2817 /* We check for lp->waitstatus in addition to lp->status, because we can
2818 have pending process exits recorded in lp->status
2819 and W_EXITCODE(0,0) == 0. We should probably have an additional
2820 lp->status_p flag. */
2821
2822 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2823 && linux_nat_status_is_event (lp->status));
2824}
2825
26ab7092
JK
2826/* Set alternative SIGTRAP-like events recognizer. If
2827 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2828 applied. */
2829
2830void
2831linux_nat_set_status_is_event (struct target_ops *t,
2832 int (*status_is_event) (int status))
2833{
2834 linux_nat_status_is_event = status_is_event;
2835}
2836
57380f4e
DJ
2837/* Wait until LP is stopped. */
2838
2839static int
2840stop_wait_callback (struct lwp_info *lp, void *data)
2841{
6c95b8df
PA
2842 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2843
2844 /* If this is a vfork parent, bail out, it is not going to report
2845 any SIGSTOP until the vfork is done with. */
2846 if (inf->vfork_child != NULL)
2847 return 0;
2848
d6b0e80f
AC
2849 if (!lp->stopped)
2850 {
2851 int status;
2852
2853 status = wait_lwp (lp);
2854 if (status == 0)
2855 return 0;
2856
57380f4e
DJ
2857 if (lp->ignore_sigint && WIFSTOPPED (status)
2858 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2859 {
57380f4e 2860 lp->ignore_sigint = 0;
d6b0e80f
AC
2861
2862 errno = 0;
2863 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2864 if (debug_linux_nat)
2865 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2866 "PTRACE_CONT %s, 0, 0 (%s) "
2867 "(discarding SIGINT)\n",
d6b0e80f
AC
2868 target_pid_to_str (lp->ptid),
2869 errno ? safe_strerror (errno) : "OK");
2870
57380f4e 2871 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2872 }
2873
57380f4e
DJ
2874 maybe_clear_ignore_sigint (lp);
2875
d6b0e80f
AC
2876 if (WSTOPSIG (status) != SIGSTOP)
2877 {
e5ef252a 2878 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2879
e5ef252a
PA
2880 save_sigtrap (lp);
2881
2882 if (debug_linux_nat)
2883 fprintf_unfiltered (gdb_stdlog,
2884 "SWC: Pending event %s in %s\n",
2885 status_to_str ((int) status),
2886 target_pid_to_str (lp->ptid));
2887
2888 /* Save the sigtrap event. */
2889 lp->status = status;
2890 gdb_assert (!lp->stopped);
2891 gdb_assert (lp->signalled);
2892 lp->stopped = 1;
d6b0e80f
AC
2893 }
2894 else
2895 {
2896 /* We caught the SIGSTOP that we intended to catch, so
2897 there's no SIGSTOP pending. */
e5ef252a
PA
2898
2899 if (debug_linux_nat)
2900 fprintf_unfiltered (gdb_stdlog,
2901 "SWC: Delayed SIGSTOP caught for %s.\n",
2902 target_pid_to_str (lp->ptid));
2903
d6b0e80f 2904 lp->stopped = 1;
e5ef252a
PA
2905
2906 /* Reset SIGNALLED only after the stop_wait_callback call
2907 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2908 lp->signalled = 0;
2909 }
2910 }
2911
2912 return 0;
2913}
2914
d6b0e80f
AC
2915/* Return non-zero if LP has a wait status pending. */
2916
2917static int
2918status_callback (struct lwp_info *lp, void *data)
2919{
2920 /* Only report a pending wait status if we pretend that this has
2921 indeed been resumed. */
ca2163eb
PA
2922 if (!lp->resumed)
2923 return 0;
2924
2925 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2926 {
2927 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
766062f6 2928 or a pending process exit. Note that `W_EXITCODE(0,0) ==
ca2163eb
PA
2929 0', so a clean process exit can not be stored pending in
2930 lp->status, it is indistinguishable from
2931 no-pending-status. */
2932 return 1;
2933 }
2934
2935 if (lp->status != 0)
2936 return 1;
2937
2938 return 0;
d6b0e80f
AC
2939}
2940
2941/* Return non-zero if LP isn't stopped. */
2942
2943static int
2944running_callback (struct lwp_info *lp, void *data)
2945{
25289eb2
PA
2946 return (!lp->stopped
2947 || ((lp->status != 0
2948 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2949 && lp->resumed));
d6b0e80f
AC
2950}
2951
2952/* Count the LWP's that have had events. */
2953
2954static int
2955count_events_callback (struct lwp_info *lp, void *data)
2956{
2957 int *count = data;
2958
2959 gdb_assert (count != NULL);
2960
e09490f1 2961 /* Count only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2962 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2963 (*count)++;
2964
2965 return 0;
2966}
2967
2968/* Select the LWP (if any) that is currently being single-stepped. */
2969
2970static int
2971select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2972{
25289eb2
PA
2973 if (lp->last_resume_kind == resume_step
2974 && lp->status != 0)
d6b0e80f
AC
2975 return 1;
2976 else
2977 return 0;
2978}
2979
2980/* Select the Nth LWP that has had a SIGTRAP event. */
2981
2982static int
2983select_event_lwp_callback (struct lwp_info *lp, void *data)
2984{
2985 int *selector = data;
2986
2987 gdb_assert (selector != NULL);
2988
1777feb0 2989 /* Select only resumed LWPs that have a SIGTRAP event pending. */
00390b84 2990 if (lp->resumed && linux_nat_lp_status_is_event (lp))
d6b0e80f
AC
2991 if ((*selector)-- == 0)
2992 return 1;
2993
2994 return 0;
2995}
2996
710151dd
PA
2997static int
2998cancel_breakpoint (struct lwp_info *lp)
2999{
3000 /* Arrange for a breakpoint to be hit again later. We don't keep
3001 the SIGTRAP status and don't forward the SIGTRAP signal to the
3002 LWP. We will handle the current event, eventually we will resume
3003 this LWP, and this breakpoint will trap again.
3004
3005 If we do not do this, then we run the risk that the user will
3006 delete or disable the breakpoint, but the LWP will have already
3007 tripped on it. */
3008
515630c5
UW
3009 struct regcache *regcache = get_thread_regcache (lp->ptid);
3010 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3011 CORE_ADDR pc;
3012
3013 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
6c95b8df 3014 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
710151dd
PA
3015 {
3016 if (debug_linux_nat)
3017 fprintf_unfiltered (gdb_stdlog,
3018 "CB: Push back breakpoint for %s\n",
3019 target_pid_to_str (lp->ptid));
3020
3021 /* Back up the PC if necessary. */
515630c5
UW
3022 if (gdbarch_decr_pc_after_break (gdbarch))
3023 regcache_write_pc (regcache, pc);
3024
710151dd
PA
3025 return 1;
3026 }
3027 return 0;
3028}
3029
d6b0e80f
AC
3030static int
3031cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3032{
3033 struct lwp_info *event_lp = data;
3034
3035 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3036 if (lp == event_lp)
3037 return 0;
3038
3039 /* If a LWP other than the LWP that we're reporting an event for has
3040 hit a GDB breakpoint (as opposed to some random trap signal),
3041 then just arrange for it to hit it again later. We don't keep
3042 the SIGTRAP status and don't forward the SIGTRAP signal to the
3043 LWP. We will handle the current event, eventually we will resume
3044 all LWPs, and this one will get its breakpoint trap again.
3045
3046 If we do not do this, then we run the risk that the user will
3047 delete or disable the breakpoint, but the LWP will have already
3048 tripped on it. */
3049
00390b84 3050 if (linux_nat_lp_status_is_event (lp)
710151dd
PA
3051 && cancel_breakpoint (lp))
3052 /* Throw away the SIGTRAP. */
3053 lp->status = 0;
d6b0e80f
AC
3054
3055 return 0;
3056}
3057
3058/* Select one LWP out of those that have events pending. */
3059
3060static void
d90e17a7 3061select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
3062{
3063 int num_events = 0;
3064 int random_selector;
3065 struct lwp_info *event_lp;
3066
ac264b3b 3067 /* Record the wait status for the original LWP. */
d6b0e80f
AC
3068 (*orig_lp)->status = *status;
3069
3070 /* Give preference to any LWP that is being single-stepped. */
d90e17a7
PA
3071 event_lp = iterate_over_lwps (filter,
3072 select_singlestep_lwp_callback, NULL);
d6b0e80f
AC
3073 if (event_lp != NULL)
3074 {
3075 if (debug_linux_nat)
3076 fprintf_unfiltered (gdb_stdlog,
3077 "SEL: Select single-step %s\n",
3078 target_pid_to_str (event_lp->ptid));
3079 }
3080 else
3081 {
3082 /* No single-stepping LWP. Select one at random, out of those
3083 which have had SIGTRAP events. */
3084
3085 /* First see how many SIGTRAP events we have. */
d90e17a7 3086 iterate_over_lwps (filter, count_events_callback, &num_events);
d6b0e80f
AC
3087
3088 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3089 random_selector = (int)
3090 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3091
3092 if (debug_linux_nat && num_events > 1)
3093 fprintf_unfiltered (gdb_stdlog,
3094 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3095 num_events, random_selector);
3096
d90e17a7
PA
3097 event_lp = iterate_over_lwps (filter,
3098 select_event_lwp_callback,
d6b0e80f
AC
3099 &random_selector);
3100 }
3101
3102 if (event_lp != NULL)
3103 {
3104 /* Switch the event LWP. */
3105 *orig_lp = event_lp;
3106 *status = event_lp->status;
3107 }
3108
3109 /* Flush the wait status for the event LWP. */
3110 (*orig_lp)->status = 0;
3111}
3112
3113/* Return non-zero if LP has been resumed. */
3114
3115static int
3116resumed_callback (struct lwp_info *lp, void *data)
3117{
3118 return lp->resumed;
3119}
3120
12d9289a
PA
3121/* Stop an active thread, verify it still exists, then resume it. If
3122 the thread ends up with a pending status, then it is not resumed,
3123 and *DATA (really a pointer to int), is set. */
d6b0e80f
AC
3124
3125static int
3126stop_and_resume_callback (struct lwp_info *lp, void *data)
3127{
12d9289a
PA
3128 int *new_pending_p = data;
3129
25289eb2 3130 if (!lp->stopped)
d6b0e80f 3131 {
25289eb2
PA
3132 ptid_t ptid = lp->ptid;
3133
d6b0e80f
AC
3134 stop_callback (lp, NULL);
3135 stop_wait_callback (lp, NULL);
25289eb2
PA
3136
3137 /* Resume if the lwp still exists, and the core wanted it
3138 running. */
12d9289a
PA
3139 lp = find_lwp_pid (ptid);
3140 if (lp != NULL)
25289eb2 3141 {
12d9289a
PA
3142 if (lp->last_resume_kind == resume_stop
3143 && lp->status == 0)
3144 {
3145 /* The core wanted the LWP to stop. Even if it stopped
3146 cleanly (with SIGSTOP), leave the event pending. */
3147 if (debug_linux_nat)
3148 fprintf_unfiltered (gdb_stdlog,
3149 "SARC: core wanted LWP %ld stopped "
3150 "(leaving SIGSTOP pending)\n",
3151 GET_LWP (lp->ptid));
3152 lp->status = W_STOPCODE (SIGSTOP);
3153 }
3154
3155 if (lp->status == 0)
3156 {
3157 if (debug_linux_nat)
3158 fprintf_unfiltered (gdb_stdlog,
3159 "SARC: re-resuming LWP %ld\n",
3160 GET_LWP (lp->ptid));
e5ef252a 3161 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
12d9289a
PA
3162 }
3163 else
3164 {
3165 if (debug_linux_nat)
3166 fprintf_unfiltered (gdb_stdlog,
3167 "SARC: not re-resuming LWP %ld "
3168 "(has pending)\n",
3169 GET_LWP (lp->ptid));
3170 if (new_pending_p)
3171 *new_pending_p = 1;
3172 }
25289eb2 3173 }
d6b0e80f
AC
3174 }
3175 return 0;
3176}
3177
02f3fc28 3178/* Check if we should go on and pass this event to common code.
12d9289a
PA
3179 Return the affected lwp if we are, or NULL otherwise. If we stop
3180 all lwps temporarily, we may end up with new pending events in some
3181 other lwp. In that case set *NEW_PENDING_P to true. */
3182
02f3fc28 3183static struct lwp_info *
0e5bf2a8 3184linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
02f3fc28
PA
3185{
3186 struct lwp_info *lp;
3187
12d9289a
PA
3188 *new_pending_p = 0;
3189
02f3fc28
PA
3190 lp = find_lwp_pid (pid_to_ptid (lwpid));
3191
3192 /* Check for stop events reported by a process we didn't already
3193 know about - anything not already in our LWP list.
3194
3195 If we're expecting to receive stopped processes after
3196 fork, vfork, and clone events, then we'll just add the
3197 new one to our list and go back to waiting for the event
3198 to be reported - the stopped process might be returned
0e5bf2a8
PA
3199 from waitpid before or after the event is.
3200
3201 But note the case of a non-leader thread exec'ing after the
3202 leader having exited, and gone from our lists. The non-leader
3203 thread changes its tid to the tgid. */
3204
3205 if (WIFSTOPPED (status) && lp == NULL
3206 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3207 {
3208 /* A multi-thread exec after we had seen the leader exiting. */
3209 if (debug_linux_nat)
3210 fprintf_unfiltered (gdb_stdlog,
3211 "LLW: Re-adding thread group leader LWP %d.\n",
3212 lwpid);
3213
3214 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3215 lp->stopped = 1;
3216 lp->resumed = 1;
3217 add_thread (lp->ptid);
3218 }
3219
02f3fc28
PA
3220 if (WIFSTOPPED (status) && !lp)
3221 {
84636d28 3222 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
3223 return NULL;
3224 }
3225
3226 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3227 our list, i.e. not part of the current process. This can happen
fd62cb89 3228 if we detach from a program we originally forked and then it
02f3fc28
PA
3229 exits. */
3230 if (!WIFSTOPPED (status) && !lp)
3231 return NULL;
3232
ca2163eb
PA
3233 /* Handle GNU/Linux's syscall SIGTRAPs. */
3234 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3235 {
3236 /* No longer need the sysgood bit. The ptrace event ends up
3237 recorded in lp->waitstatus if we care for it. We can carry
3238 on handling the event like a regular SIGTRAP from here
3239 on. */
3240 status = W_STOPCODE (SIGTRAP);
3241 if (linux_handle_syscall_trap (lp, 0))
3242 return NULL;
3243 }
02f3fc28 3244
ca2163eb
PA
3245 /* Handle GNU/Linux's extended waitstatus for trace events. */
3246 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
02f3fc28
PA
3247 {
3248 if (debug_linux_nat)
3249 fprintf_unfiltered (gdb_stdlog,
3250 "LLW: Handling extended status 0x%06x\n",
3251 status);
3252 if (linux_handle_extended_wait (lp, status, 0))
3253 return NULL;
3254 }
3255
26ab7092 3256 if (linux_nat_status_is_event (status))
da559b09 3257 save_sigtrap (lp);
ca2163eb 3258
02f3fc28 3259 /* Check if the thread has exited. */
d90e17a7
PA
3260 if ((WIFEXITED (status) || WIFSIGNALED (status))
3261 && num_lwps (GET_PID (lp->ptid)) > 1)
02f3fc28 3262 {
9db03742
JB
3263 /* If this is the main thread, we must stop all threads and verify
3264 if they are still alive. This is because in the nptl thread model
3265 on Linux 2.4, there is no signal issued for exiting LWPs
02f3fc28
PA
3266 other than the main thread. We only get the main thread exit
3267 signal once all child threads have already exited. If we
3268 stop all the threads and use the stop_wait_callback to check
3269 if they have exited we can determine whether this signal
3270 should be ignored or whether it means the end of the debugged
3271 application, regardless of which threading model is being
5d3b6af6 3272 used. */
02f3fc28
PA
3273 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3274 {
3275 lp->stopped = 1;
d90e17a7 3276 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
12d9289a 3277 stop_and_resume_callback, new_pending_p);
02f3fc28
PA
3278 }
3279
3280 if (debug_linux_nat)
3281 fprintf_unfiltered (gdb_stdlog,
3282 "LLW: %s exited.\n",
3283 target_pid_to_str (lp->ptid));
3284
d90e17a7 3285 if (num_lwps (GET_PID (lp->ptid)) > 1)
9db03742
JB
3286 {
3287 /* If there is at least one more LWP, then the exit signal
3288 was not the end of the debugged application and should be
3289 ignored. */
3290 exit_lwp (lp);
3291 return NULL;
3292 }
02f3fc28
PA
3293 }
3294
3295 /* Check if the current LWP has previously exited. In the nptl
3296 thread model, LWPs other than the main thread do not issue
3297 signals when they exit so we must check whenever the thread has
3298 stopped. A similar check is made in stop_wait_callback(). */
d90e17a7 3299 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3300 {
d90e17a7
PA
3301 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3302
02f3fc28
PA
3303 if (debug_linux_nat)
3304 fprintf_unfiltered (gdb_stdlog,
3305 "LLW: %s exited.\n",
3306 target_pid_to_str (lp->ptid));
3307
3308 exit_lwp (lp);
3309
3310 /* Make sure there is at least one thread running. */
d90e17a7 3311 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3312
3313 /* Discard the event. */
3314 return NULL;
3315 }
3316
3317 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3318 an attempt to stop an LWP. */
3319 if (lp->signalled
3320 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3321 {
3322 if (debug_linux_nat)
3323 fprintf_unfiltered (gdb_stdlog,
3324 "LLW: Delayed SIGSTOP caught for %s.\n",
3325 target_pid_to_str (lp->ptid));
3326
02f3fc28
PA
3327 lp->signalled = 0;
3328
25289eb2
PA
3329 if (lp->last_resume_kind != resume_stop)
3330 {
3331 /* This is a delayed SIGSTOP. */
02f3fc28 3332
25289eb2
PA
3333 registers_changed ();
3334
7b50312a
PA
3335 if (linux_nat_prepare_to_resume != NULL)
3336 linux_nat_prepare_to_resume (lp);
25289eb2 3337 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 3338 lp->step, GDB_SIGNAL_0);
25289eb2
PA
3339 if (debug_linux_nat)
3340 fprintf_unfiltered (gdb_stdlog,
3341 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3342 lp->step ?
3343 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3344 target_pid_to_str (lp->ptid));
02f3fc28 3345
25289eb2
PA
3346 lp->stopped = 0;
3347 gdb_assert (lp->resumed);
02f3fc28 3348
25289eb2
PA
3349 /* Discard the event. */
3350 return NULL;
3351 }
02f3fc28
PA
3352 }
3353
57380f4e
DJ
3354 /* Make sure we don't report a SIGINT that we have already displayed
3355 for another thread. */
3356 if (lp->ignore_sigint
3357 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3358 {
3359 if (debug_linux_nat)
3360 fprintf_unfiltered (gdb_stdlog,
3361 "LLW: Delayed SIGINT caught for %s.\n",
3362 target_pid_to_str (lp->ptid));
3363
3364 /* This is a delayed SIGINT. */
3365 lp->ignore_sigint = 0;
3366
3367 registers_changed ();
7b50312a
PA
3368 if (linux_nat_prepare_to_resume != NULL)
3369 linux_nat_prepare_to_resume (lp);
28439f5e 3370 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 3371 lp->step, GDB_SIGNAL_0);
57380f4e
DJ
3372 if (debug_linux_nat)
3373 fprintf_unfiltered (gdb_stdlog,
3374 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3375 lp->step ?
3376 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3377 target_pid_to_str (lp->ptid));
3378
3379 lp->stopped = 0;
3380 gdb_assert (lp->resumed);
3381
3382 /* Discard the event. */
3383 return NULL;
3384 }
3385
02f3fc28
PA
3386 /* An interesting event. */
3387 gdb_assert (lp);
ca2163eb 3388 lp->status = status;
02f3fc28
PA
3389 return lp;
3390}
3391
0e5bf2a8
PA
3392/* Detect zombie thread group leaders, and "exit" them. We can't reap
3393 their exits until all other threads in the group have exited. */
3394
3395static void
3396check_zombie_leaders (void)
3397{
3398 struct inferior *inf;
3399
3400 ALL_INFERIORS (inf)
3401 {
3402 struct lwp_info *leader_lp;
3403
3404 if (inf->pid == 0)
3405 continue;
3406
3407 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3408 if (leader_lp != NULL
3409 /* Check if there are other threads in the group, as we may
3410 have raced with the inferior simply exiting. */
3411 && num_lwps (inf->pid) > 1
5f572dec 3412 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
3413 {
3414 if (debug_linux_nat)
3415 fprintf_unfiltered (gdb_stdlog,
3416 "CZL: Thread group leader %d zombie "
3417 "(it exited, or another thread execd).\n",
3418 inf->pid);
3419
3420 /* A leader zombie can mean one of two things:
3421
3422 - It exited, and there's an exit status pending
3423 available, or only the leader exited (not the whole
3424 program). In the latter case, we can't waitpid the
3425 leader's exit status until all other threads are gone.
3426
3427 - There are 3 or more threads in the group, and a thread
3428 other than the leader exec'd. On an exec, the Linux
3429 kernel destroys all other threads (except the execing
3430 one) in the thread group, and resets the execing thread's
3431 tid to the tgid. No exit notification is sent for the
3432 execing thread -- from the ptracer's perspective, it
3433 appears as though the execing thread just vanishes.
3434 Until we reap all other threads except the leader and the
3435 execing thread, the leader will be zombie, and the
3436 execing thread will be in `D (disc sleep)'. As soon as
3437 all other threads are reaped, the execing thread changes
3438 it's tid to the tgid, and the previous (zombie) leader
3439 vanishes, giving place to the "new" leader. We could try
3440 distinguishing the exit and exec cases, by waiting once
3441 more, and seeing if something comes out, but it doesn't
3442 sound useful. The previous leader _does_ go away, and
3443 we'll re-add the new one once we see the exec event
3444 (which is just the same as what would happen if the
3445 previous leader did exit voluntarily before some other
3446 thread execs). */
3447
3448 if (debug_linux_nat)
3449 fprintf_unfiltered (gdb_stdlog,
3450 "CZL: Thread group leader %d vanished.\n",
3451 inf->pid);
3452 exit_lwp (leader_lp);
3453 }
3454 }
3455}
3456
d6b0e80f 3457static ptid_t
7feb7d06 3458linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3459 ptid_t ptid, struct target_waitstatus *ourstatus,
3460 int target_options)
d6b0e80f 3461{
7feb7d06 3462 static sigset_t prev_mask;
4b60df3d 3463 enum resume_kind last_resume_kind;
12d9289a 3464 struct lwp_info *lp;
12d9289a 3465 int status;
d6b0e80f 3466
01124a23 3467 if (debug_linux_nat)
b84876c2
PA
3468 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3469
f973ed9c
DJ
3470 /* The first time we get here after starting a new inferior, we may
3471 not have added it to the LWP list yet - this is the earliest
3472 moment at which we know its PID. */
d90e17a7 3473 if (ptid_is_pid (inferior_ptid))
f973ed9c 3474 {
27c9d204
PA
3475 /* Upgrade the main thread's ptid. */
3476 thread_change_ptid (inferior_ptid,
3477 BUILD_LWP (GET_PID (inferior_ptid),
3478 GET_PID (inferior_ptid)));
3479
26cb8b7c 3480 lp = add_initial_lwp (inferior_ptid);
f973ed9c
DJ
3481 lp->resumed = 1;
3482 }
3483
7feb7d06
PA
3484 /* Make sure SIGCHLD is blocked. */
3485 block_child_signals (&prev_mask);
d6b0e80f
AC
3486
3487retry:
d90e17a7
PA
3488 lp = NULL;
3489 status = 0;
d6b0e80f
AC
3490
3491 /* First check if there is a LWP with a wait status pending. */
0e5bf2a8 3492 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d6b0e80f 3493 {
0e5bf2a8 3494 /* Any LWP in the PTID group that's been resumed will do. */
d90e17a7 3495 lp = iterate_over_lwps (ptid, status_callback, NULL);
d6b0e80f
AC
3496 if (lp)
3497 {
ca2163eb 3498 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3499 fprintf_unfiltered (gdb_stdlog,
3500 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3501 status_to_str (lp->status),
d6b0e80f
AC
3502 target_pid_to_str (lp->ptid));
3503 }
d6b0e80f
AC
3504 }
3505 else if (is_lwp (ptid))
3506 {
3507 if (debug_linux_nat)
3508 fprintf_unfiltered (gdb_stdlog,
3509 "LLW: Waiting for specific LWP %s.\n",
3510 target_pid_to_str (ptid));
3511
3512 /* We have a specific LWP to check. */
3513 lp = find_lwp_pid (ptid);
3514 gdb_assert (lp);
d6b0e80f 3515
ca2163eb 3516 if (debug_linux_nat && lp->status)
d6b0e80f
AC
3517 fprintf_unfiltered (gdb_stdlog,
3518 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3519 status_to_str (lp->status),
d6b0e80f
AC
3520 target_pid_to_str (lp->ptid));
3521
d90e17a7
PA
3522 /* We check for lp->waitstatus in addition to lp->status,
3523 because we can have pending process exits recorded in
3524 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3525 an additional lp->status_p flag. */
ca2163eb 3526 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
d90e17a7 3527 lp = NULL;
d6b0e80f
AC
3528 }
3529
b84876c2
PA
3530 if (!target_can_async_p ())
3531 {
3532 /* Causes SIGINT to be passed on to the attached process. */
3533 set_sigint_trap ();
b84876c2 3534 }
d6b0e80f 3535
0e5bf2a8 3536 /* But if we don't find a pending event, we'll have to wait. */
7feb7d06 3537
d90e17a7 3538 while (lp == NULL)
d6b0e80f
AC
3539 {
3540 pid_t lwpid;
3541
0e5bf2a8
PA
3542 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3543 quirks:
3544
3545 - If the thread group leader exits while other threads in the
3546 thread group still exist, waitpid(TGID, ...) hangs. That
3547 waitpid won't return an exit status until the other threads
3548 in the group are reapped.
3549
3550 - When a non-leader thread execs, that thread just vanishes
3551 without reporting an exit (so we'd hang if we waited for it
3552 explicitly in that case). The exec event is reported to
3553 the TGID pid. */
3554
3555 errno = 0;
3556 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3557 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3558 lwpid = my_waitpid (-1, &status, WNOHANG);
3559
3560 if (debug_linux_nat)
3561 fprintf_unfiltered (gdb_stdlog,
3562 "LNW: waitpid(-1, ...) returned %d, %s\n",
3563 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3564
d6b0e80f
AC
3565 if (lwpid > 0)
3566 {
12d9289a
PA
3567 /* If this is true, then we paused LWPs momentarily, and may
3568 now have pending events to handle. */
3569 int new_pending;
3570
d6b0e80f
AC
3571 if (debug_linux_nat)
3572 {
3573 fprintf_unfiltered (gdb_stdlog,
3574 "LLW: waitpid %ld received %s\n",
3575 (long) lwpid, status_to_str (status));
3576 }
3577
0e5bf2a8 3578 lp = linux_nat_filter_event (lwpid, status, &new_pending);
d90e17a7 3579
33355866
JK
3580 /* STATUS is now no longer valid, use LP->STATUS instead. */
3581 status = 0;
3582
0e5bf2a8 3583 if (lp && !ptid_match (lp->ptid, ptid))
d6b0e80f 3584 {
e3e9f5a2
PA
3585 gdb_assert (lp->resumed);
3586
d90e17a7 3587 if (debug_linux_nat)
3e43a32a
MS
3588 fprintf (stderr,
3589 "LWP %ld got an event %06x, leaving pending.\n",
33355866 3590 ptid_get_lwp (lp->ptid), lp->status);
d90e17a7 3591
ca2163eb 3592 if (WIFSTOPPED (lp->status))
d90e17a7 3593 {
ca2163eb 3594 if (WSTOPSIG (lp->status) != SIGSTOP)
d90e17a7 3595 {
e3e9f5a2
PA
3596 /* Cancel breakpoint hits. The breakpoint may
3597 be removed before we fetch events from this
3598 process to report to the core. It is best
3599 not to assume the moribund breakpoints
3600 heuristic always handles these cases --- it
3601 could be too many events go through to the
3602 core before this one is handled. All-stop
3603 always cancels breakpoint hits in all
3604 threads. */
3605 if (non_stop
00390b84 3606 && linux_nat_lp_status_is_event (lp)
e3e9f5a2
PA
3607 && cancel_breakpoint (lp))
3608 {
3609 /* Throw away the SIGTRAP. */
3610 lp->status = 0;
3611
3612 if (debug_linux_nat)
3613 fprintf (stderr,
3e43a32a
MS
3614 "LLW: LWP %ld hit a breakpoint while"
3615 " waiting for another process;"
3616 " cancelled it\n",
e3e9f5a2
PA
3617 ptid_get_lwp (lp->ptid));
3618 }
3619 lp->stopped = 1;
d90e17a7
PA
3620 }
3621 else
3622 {
3623 lp->stopped = 1;
3624 lp->signalled = 0;
3625 }
3626 }
33355866 3627 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
d90e17a7
PA
3628 {
3629 if (debug_linux_nat)
3e43a32a
MS
3630 fprintf (stderr,
3631 "Process %ld exited while stopping LWPs\n",
d90e17a7
PA
3632 ptid_get_lwp (lp->ptid));
3633
3634 /* This was the last lwp in the process. Since
3635 events are serialized to GDB core, and we can't
3636 report this one right now, but GDB core and the
3637 other target layers will want to be notified
3638 about the exit code/signal, leave the status
3639 pending for the next time we're able to report
3640 it. */
d90e17a7
PA
3641
3642 /* Prevent trying to stop this thread again. We'll
3643 never try to resume it because it has a pending
3644 status. */
3645 lp->stopped = 1;
3646
3647 /* Dead LWP's aren't expected to reported a pending
3648 sigstop. */
3649 lp->signalled = 0;
3650
3651 /* Store the pending event in the waitstatus as
3652 well, because W_EXITCODE(0,0) == 0. */
ca2163eb 3653 store_waitstatus (&lp->waitstatus, lp->status);
d90e17a7
PA
3654 }
3655
3656 /* Keep looking. */
3657 lp = NULL;
d6b0e80f
AC
3658 }
3659
0e5bf2a8 3660 if (new_pending)
d90e17a7 3661 {
0e5bf2a8
PA
3662 /* Some LWP now has a pending event. Go all the way
3663 back to check it. */
3664 goto retry;
3665 }
12d9289a 3666
0e5bf2a8
PA
3667 if (lp)
3668 {
3669 /* We got an event to report to the core. */
3670 break;
d90e17a7 3671 }
0e5bf2a8
PA
3672
3673 /* Retry until nothing comes out of waitpid. A single
3674 SIGCHLD can indicate more than one child stopped. */
3675 continue;
d6b0e80f
AC
3676 }
3677
0e5bf2a8
PA
3678 /* Check for zombie thread group leaders. Those can't be reaped
3679 until all other threads in the thread group are. */
3680 check_zombie_leaders ();
d6b0e80f 3681
0e5bf2a8
PA
3682 /* If there are no resumed children left, bail. We'd be stuck
3683 forever in the sigsuspend call below otherwise. */
3684 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3685 {
3686 if (debug_linux_nat)
3687 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3688
0e5bf2a8 3689 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3690
0e5bf2a8
PA
3691 if (!target_can_async_p ())
3692 clear_sigint_trap ();
b84876c2 3693
0e5bf2a8
PA
3694 restore_child_signals_mask (&prev_mask);
3695 return minus_one_ptid;
d6b0e80f 3696 }
28736962 3697
0e5bf2a8
PA
3698 /* No interesting event to report to the core. */
3699
3700 if (target_options & TARGET_WNOHANG)
3701 {
01124a23 3702 if (debug_linux_nat)
28736962
PA
3703 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3704
0e5bf2a8 3705 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3706 restore_child_signals_mask (&prev_mask);
3707 return minus_one_ptid;
3708 }
d6b0e80f
AC
3709
3710 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3711 gdb_assert (lp == NULL);
0e5bf2a8
PA
3712
3713 /* Block until we get an event reported with SIGCHLD. */
3714 sigsuspend (&suspend_mask);
d6b0e80f
AC
3715 }
3716
b84876c2 3717 if (!target_can_async_p ())
d26b5354 3718 clear_sigint_trap ();
d6b0e80f
AC
3719
3720 gdb_assert (lp);
3721
ca2163eb
PA
3722 status = lp->status;
3723 lp->status = 0;
3724
d6b0e80f
AC
3725 /* Don't report signals that GDB isn't interested in, such as
3726 signals that are neither printed nor stopped upon. Stopping all
3727 threads can be a bit time-consuming so if we want decent
3728 performance with heavily multi-threaded programs, especially when
3729 they're using a high frequency timer, we'd better avoid it if we
3730 can. */
3731
3732 if (WIFSTOPPED (status))
3733 {
2ea28649 3734 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
d6b0e80f 3735
2455069d
UW
3736 /* When using hardware single-step, we need to report every signal.
3737 Otherwise, signals in pass_mask may be short-circuited. */
d539ed7e 3738 if (!lp->step
2455069d 3739 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
d6b0e80f
AC
3740 {
3741 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3742 here? It is not clear we should. GDB may not expect
3743 other threads to run. On the other hand, not resuming
3744 newly attached threads may cause an unwanted delay in
3745 getting them running. */
3746 registers_changed ();
7b50312a
PA
3747 if (linux_nat_prepare_to_resume != NULL)
3748 linux_nat_prepare_to_resume (lp);
28439f5e 3749 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
10d6c8cd 3750 lp->step, signo);
d6b0e80f
AC
3751 if (debug_linux_nat)
3752 fprintf_unfiltered (gdb_stdlog,
3753 "LLW: %s %s, %s (preempt 'handle')\n",
3754 lp->step ?
3755 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3756 target_pid_to_str (lp->ptid),
a493e3e2 3757 (signo != GDB_SIGNAL_0
2ea28649 3758 ? strsignal (gdb_signal_to_host (signo))
423ec54c 3759 : "0"));
d6b0e80f 3760 lp->stopped = 0;
d6b0e80f
AC
3761 goto retry;
3762 }
3763
1ad15515 3764 if (!non_stop)
d6b0e80f 3765 {
1ad15515
PA
3766 /* Only do the below in all-stop, as we currently use SIGINT
3767 to implement target_stop (see linux_nat_stop) in
3768 non-stop. */
a493e3e2 3769 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
1ad15515
PA
3770 {
3771 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3772 forwarded to the entire process group, that is, all LWPs
3773 will receive it - unless they're using CLONE_THREAD to
3774 share signals. Since we only want to report it once, we
3775 mark it as ignored for all LWPs except this one. */
d90e17a7
PA
3776 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3777 set_ignore_sigint, NULL);
1ad15515
PA
3778 lp->ignore_sigint = 0;
3779 }
3780 else
3781 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3782 }
3783 }
3784
3785 /* This LWP is stopped now. */
3786 lp->stopped = 1;
3787
3788 if (debug_linux_nat)
3789 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3790 status_to_str (status), target_pid_to_str (lp->ptid));
3791
4c28f408
PA
3792 if (!non_stop)
3793 {
3794 /* Now stop all other LWP's ... */
d90e17a7 3795 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3796
3797 /* ... and wait until all of them have reported back that
3798 they're no longer running. */
d90e17a7 3799 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
4c28f408
PA
3800
3801 /* If we're not waiting for a specific LWP, choose an event LWP
3802 from among those that have had events. Giving equal priority
3803 to all LWPs that have had events helps prevent
3804 starvation. */
0e5bf2a8 3805 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
d90e17a7 3806 select_event_lwp (ptid, &lp, &status);
d6b0e80f 3807
e3e9f5a2
PA
3808 /* Now that we've selected our final event LWP, cancel any
3809 breakpoints in other LWPs that have hit a GDB breakpoint.
3810 See the comment in cancel_breakpoints_callback to find out
3811 why. */
3812 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3813
4b60df3d
PA
3814 /* We'll need this to determine whether to report a SIGSTOP as
3815 TARGET_WAITKIND_0. Need to take a copy because
3816 resume_clear_callback clears it. */
3817 last_resume_kind = lp->last_resume_kind;
3818
e3e9f5a2
PA
3819 /* In all-stop, from the core's perspective, all LWPs are now
3820 stopped until a new resume action is sent over. */
3821 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3822 }
3823 else
25289eb2 3824 {
4b60df3d
PA
3825 /* See above. */
3826 last_resume_kind = lp->last_resume_kind;
3827 resume_clear_callback (lp, NULL);
25289eb2 3828 }
d6b0e80f 3829
26ab7092 3830 if (linux_nat_status_is_event (status))
d6b0e80f 3831 {
d6b0e80f
AC
3832 if (debug_linux_nat)
3833 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3834 "LLW: trap ptid is %s.\n",
3835 target_pid_to_str (lp->ptid));
d6b0e80f 3836 }
d6b0e80f
AC
3837
3838 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3839 {
3840 *ourstatus = lp->waitstatus;
3841 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3842 }
3843 else
3844 store_waitstatus (ourstatus, status);
3845
01124a23 3846 if (debug_linux_nat)
b84876c2
PA
3847 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3848
7feb7d06 3849 restore_child_signals_mask (&prev_mask);
1e225492 3850
4b60df3d 3851 if (last_resume_kind == resume_stop
25289eb2
PA
3852 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3853 && WSTOPSIG (status) == SIGSTOP)
3854 {
3855 /* A thread that has been requested to stop by GDB with
3856 target_stop, and it stopped cleanly, so report as SIG0. The
3857 use of SIGSTOP is an implementation detail. */
a493e3e2 3858 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3859 }
3860
1e225492
JK
3861 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3862 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3863 lp->core = -1;
3864 else
2e794194 3865 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3866
f973ed9c 3867 return lp->ptid;
d6b0e80f
AC
3868}
3869
e3e9f5a2
PA
3870/* Resume LWPs that are currently stopped without any pending status
3871 to report, but are resumed from the core's perspective. */
3872
3873static int
3874resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3875{
3876 ptid_t *wait_ptid_p = data;
3877
3878 if (lp->stopped
3879 && lp->resumed
3880 && lp->status == 0
3881 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3882 {
336060f3
PA
3883 struct regcache *regcache = get_thread_regcache (lp->ptid);
3884 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3885 CORE_ADDR pc = regcache_read_pc (regcache);
3886
e3e9f5a2
PA
3887 gdb_assert (is_executing (lp->ptid));
3888
3889 /* Don't bother if there's a breakpoint at PC that we'd hit
3890 immediately, and we're not waiting for this LWP. */
3891 if (!ptid_match (lp->ptid, *wait_ptid_p))
3892 {
e3e9f5a2
PA
3893 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3894 return 0;
3895 }
3896
3897 if (debug_linux_nat)
3898 fprintf_unfiltered (gdb_stdlog,
336060f3
PA
3899 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3900 target_pid_to_str (lp->ptid),
3901 paddress (gdbarch, pc),
3902 lp->step);
e3e9f5a2 3903
336060f3 3904 registers_changed ();
7b50312a
PA
3905 if (linux_nat_prepare_to_resume != NULL)
3906 linux_nat_prepare_to_resume (lp);
e3e9f5a2 3907 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
a493e3e2 3908 lp->step, GDB_SIGNAL_0);
e3e9f5a2 3909 lp->stopped = 0;
e3e9f5a2
PA
3910 lp->stopped_by_watchpoint = 0;
3911 }
3912
3913 return 0;
3914}
3915
7feb7d06
PA
3916static ptid_t
3917linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3918 ptid_t ptid, struct target_waitstatus *ourstatus,
3919 int target_options)
7feb7d06
PA
3920{
3921 ptid_t event_ptid;
3922
3923 if (debug_linux_nat)
09826ec5
PA
3924 {
3925 char *options_string;
3926
3927 options_string = target_options_to_string (target_options);
3928 fprintf_unfiltered (gdb_stdlog,
3929 "linux_nat_wait: [%s], [%s]\n",
3930 target_pid_to_str (ptid),
3931 options_string);
3932 xfree (options_string);
3933 }
7feb7d06
PA
3934
3935 /* Flush the async file first. */
3936 if (target_can_async_p ())
3937 async_file_flush ();
3938
e3e9f5a2
PA
3939 /* Resume LWPs that are currently stopped without any pending status
3940 to report, but are resumed from the core's perspective. LWPs get
3941 in this state if we find them stopping at a time we're not
3942 interested in reporting the event (target_wait on a
3943 specific_process, for example, see linux_nat_wait_1), and
3944 meanwhile the event became uninteresting. Don't bother resuming
3945 LWPs we're not going to wait for if they'd stop immediately. */
3946 if (non_stop)
3947 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3948
47608cb1 3949 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3950
3951 /* If we requested any event, and something came out, assume there
3952 may be more. If we requested a specific lwp or process, also
3953 assume there may be more. */
3954 if (target_can_async_p ()
6953d224
PA
3955 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3956 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3957 || !ptid_equal (ptid, minus_one_ptid)))
3958 async_file_mark ();
3959
3960 /* Get ready for the next event. */
3961 if (target_can_async_p ())
3962 target_async (inferior_event_handler, 0);
3963
3964 return event_ptid;
3965}
3966
d6b0e80f
AC
3967static int
3968kill_callback (struct lwp_info *lp, void *data)
3969{
ed731959
JK
3970 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3971
3972 errno = 0;
3973 kill (GET_LWP (lp->ptid), SIGKILL);
3974 if (debug_linux_nat)
3975 fprintf_unfiltered (gdb_stdlog,
3976 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3977 target_pid_to_str (lp->ptid),
3978 errno ? safe_strerror (errno) : "OK");
3979
3980 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3981
d6b0e80f
AC
3982 errno = 0;
3983 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3984 if (debug_linux_nat)
3985 fprintf_unfiltered (gdb_stdlog,
3986 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3987 target_pid_to_str (lp->ptid),
3988 errno ? safe_strerror (errno) : "OK");
3989
3990 return 0;
3991}
3992
3993static int
3994kill_wait_callback (struct lwp_info *lp, void *data)
3995{
3996 pid_t pid;
3997
3998 /* We must make sure that there are no pending events (delayed
3999 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4000 program doesn't interfere with any following debugging session. */
4001
4002 /* For cloned processes we must check both with __WCLONE and
4003 without, since the exit status of a cloned process isn't reported
4004 with __WCLONE. */
4005 if (lp->cloned)
4006 {
4007 do
4008 {
58aecb61 4009 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 4010 if (pid != (pid_t) -1)
d6b0e80f 4011 {
e85a822c
DJ
4012 if (debug_linux_nat)
4013 fprintf_unfiltered (gdb_stdlog,
4014 "KWC: wait %s received unknown.\n",
4015 target_pid_to_str (lp->ptid));
4016 /* The Linux kernel sometimes fails to kill a thread
4017 completely after PTRACE_KILL; that goes from the stop
4018 point in do_fork out to the one in
4019 get_signal_to_deliever and waits again. So kill it
4020 again. */
4021 kill_callback (lp, NULL);
d6b0e80f
AC
4022 }
4023 }
4024 while (pid == GET_LWP (lp->ptid));
4025
4026 gdb_assert (pid == -1 && errno == ECHILD);
4027 }
4028
4029 do
4030 {
58aecb61 4031 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 4032 if (pid != (pid_t) -1)
d6b0e80f 4033 {
e85a822c
DJ
4034 if (debug_linux_nat)
4035 fprintf_unfiltered (gdb_stdlog,
4036 "KWC: wait %s received unk.\n",
4037 target_pid_to_str (lp->ptid));
4038 /* See the call to kill_callback above. */
4039 kill_callback (lp, NULL);
d6b0e80f
AC
4040 }
4041 }
4042 while (pid == GET_LWP (lp->ptid));
4043
4044 gdb_assert (pid == -1 && errno == ECHILD);
4045 return 0;
4046}
4047
4048static void
7d85a9c0 4049linux_nat_kill (struct target_ops *ops)
d6b0e80f 4050{
f973ed9c
DJ
4051 struct target_waitstatus last;
4052 ptid_t last_ptid;
4053 int status;
d6b0e80f 4054
f973ed9c
DJ
4055 /* If we're stopped while forking and we haven't followed yet,
4056 kill the other task. We need to do this first because the
4057 parent will be sleeping if this is a vfork. */
d6b0e80f 4058
f973ed9c 4059 get_last_target_status (&last_ptid, &last);
d6b0e80f 4060
f973ed9c
DJ
4061 if (last.kind == TARGET_WAITKIND_FORKED
4062 || last.kind == TARGET_WAITKIND_VFORKED)
4063 {
3a3e9ee3 4064 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c 4065 wait (&status);
26cb8b7c
PA
4066
4067 /* Let the arch-specific native code know this process is
4068 gone. */
4069 linux_nat_forget_process (PIDGET (last.value.related_pid));
f973ed9c
DJ
4070 }
4071
4072 if (forks_exist_p ())
7feb7d06 4073 linux_fork_killall ();
f973ed9c
DJ
4074 else
4075 {
d90e17a7 4076 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 4077
4c28f408
PA
4078 /* Stop all threads before killing them, since ptrace requires
4079 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 4080 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
4081 /* ... and wait until all of them have reported back that
4082 they're no longer running. */
d90e17a7 4083 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 4084
f973ed9c 4085 /* Kill all LWP's ... */
d90e17a7 4086 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
4087
4088 /* ... and wait until we've flushed all events. */
d90e17a7 4089 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
4090 }
4091
4092 target_mourn_inferior ();
d6b0e80f
AC
4093}
4094
4095static void
136d6dae 4096linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 4097{
26cb8b7c
PA
4098 int pid = ptid_get_pid (inferior_ptid);
4099
4100 purge_lwp_list (pid);
d6b0e80f 4101
f973ed9c 4102 if (! forks_exist_p ())
d90e17a7
PA
4103 /* Normal case, no other forks available. */
4104 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
4105 else
4106 /* Multi-fork case. The current inferior_ptid has exited, but
4107 there are other viable forks to debug. Delete the exiting
4108 one and context-switch to the first available. */
4109 linux_fork_mourn_inferior ();
26cb8b7c
PA
4110
4111 /* Let the arch-specific native code know this process is gone. */
4112 linux_nat_forget_process (pid);
d6b0e80f
AC
4113}
4114
5b009018
PA
4115/* Convert a native/host siginfo object, into/from the siginfo in the
4116 layout of the inferiors' architecture. */
4117
4118static void
a5362b9a 4119siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
4120{
4121 int done = 0;
4122
4123 if (linux_nat_siginfo_fixup != NULL)
4124 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4125
4126 /* If there was no callback, or the callback didn't do anything,
4127 then just do a straight memcpy. */
4128 if (!done)
4129 {
4130 if (direction == 1)
a5362b9a 4131 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 4132 else
a5362b9a 4133 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
4134 }
4135}
4136
4aa995e1
PA
4137static LONGEST
4138linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4139 const char *annex, gdb_byte *readbuf,
4140 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4141{
4aa995e1 4142 int pid;
a5362b9a
TS
4143 siginfo_t siginfo;
4144 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
4145
4146 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4147 gdb_assert (readbuf || writebuf);
4148
4149 pid = GET_LWP (inferior_ptid);
4150 if (pid == 0)
4151 pid = GET_PID (inferior_ptid);
4152
4153 if (offset > sizeof (siginfo))
4154 return -1;
4155
4156 errno = 0;
4157 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4158 if (errno != 0)
4159 return -1;
4160
5b009018
PA
4161 /* When GDB is built as a 64-bit application, ptrace writes into
4162 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4163 inferior with a 64-bit GDB should look the same as debugging it
4164 with a 32-bit GDB, we need to convert it. GDB core always sees
4165 the converted layout, so any read/write will have to be done
4166 post-conversion. */
4167 siginfo_fixup (&siginfo, inf_siginfo, 0);
4168
4aa995e1
PA
4169 if (offset + len > sizeof (siginfo))
4170 len = sizeof (siginfo) - offset;
4171
4172 if (readbuf != NULL)
5b009018 4173 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
4174 else
4175 {
5b009018
PA
4176 memcpy (inf_siginfo + offset, writebuf, len);
4177
4178 /* Convert back to ptrace layout before flushing it out. */
4179 siginfo_fixup (&siginfo, inf_siginfo, 1);
4180
4aa995e1
PA
4181 errno = 0;
4182 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4183 if (errno != 0)
4184 return -1;
4185 }
4186
4187 return len;
4188}
4189
10d6c8cd
DJ
4190static LONGEST
4191linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4192 const char *annex, gdb_byte *readbuf,
4193 const gdb_byte *writebuf,
4194 ULONGEST offset, LONGEST len)
d6b0e80f 4195{
4aa995e1 4196 struct cleanup *old_chain;
10d6c8cd 4197 LONGEST xfer;
d6b0e80f 4198
4aa995e1
PA
4199 if (object == TARGET_OBJECT_SIGNAL_INFO)
4200 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4201 offset, len);
4202
c35b1492
PA
4203 /* The target is connected but no live inferior is selected. Pass
4204 this request down to a lower stratum (e.g., the executable
4205 file). */
4206 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4207 return 0;
4208
4aa995e1
PA
4209 old_chain = save_inferior_ptid ();
4210
d6b0e80f
AC
4211 if (is_lwp (inferior_ptid))
4212 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4213
10d6c8cd
DJ
4214 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4215 offset, len);
d6b0e80f
AC
4216
4217 do_cleanups (old_chain);
4218 return xfer;
4219}
4220
4221static int
28439f5e 4222linux_thread_alive (ptid_t ptid)
d6b0e80f 4223{
8c6a60d1 4224 int err, tmp_errno;
4c28f408 4225
d6b0e80f
AC
4226 gdb_assert (is_lwp (ptid));
4227
4c28f408
PA
4228 /* Send signal 0 instead of anything ptrace, because ptracing a
4229 running thread errors out claiming that the thread doesn't
4230 exist. */
4231 err = kill_lwp (GET_LWP (ptid), 0);
8c6a60d1 4232 tmp_errno = errno;
d6b0e80f
AC
4233 if (debug_linux_nat)
4234 fprintf_unfiltered (gdb_stdlog,
4c28f408 4235 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 4236 target_pid_to_str (ptid),
8c6a60d1 4237 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 4238
4c28f408 4239 if (err != 0)
d6b0e80f
AC
4240 return 0;
4241
4242 return 1;
4243}
4244
28439f5e
PA
4245static int
4246linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4247{
4248 return linux_thread_alive (ptid);
4249}
4250
d6b0e80f 4251static char *
117de6a9 4252linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4253{
4254 static char buf[64];
4255
a0ef4274 4256 if (is_lwp (ptid)
d90e17a7
PA
4257 && (GET_PID (ptid) != GET_LWP (ptid)
4258 || num_lwps (GET_PID (ptid)) > 1))
d6b0e80f
AC
4259 {
4260 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4261 return buf;
4262 }
4263
4264 return normal_pid_to_str (ptid);
4265}
4266
4694da01
TT
4267static char *
4268linux_nat_thread_name (struct thread_info *thr)
4269{
4270 int pid = ptid_get_pid (thr->ptid);
4271 long lwp = ptid_get_lwp (thr->ptid);
4272#define FORMAT "/proc/%d/task/%ld/comm"
4273 char buf[sizeof (FORMAT) + 30];
4274 FILE *comm_file;
4275 char *result = NULL;
4276
4277 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4278 comm_file = fopen (buf, "r");
4279 if (comm_file)
4280 {
4281 /* Not exported by the kernel, so we define it here. */
4282#define COMM_LEN 16
4283 static char line[COMM_LEN + 1];
4284
4285 if (fgets (line, sizeof (line), comm_file))
4286 {
4287 char *nl = strchr (line, '\n');
4288
4289 if (nl)
4290 *nl = '\0';
4291 if (*line != '\0')
4292 result = line;
4293 }
4294
4295 fclose (comm_file);
4296 }
4297
4298#undef COMM_LEN
4299#undef FORMAT
4300
4301 return result;
4302}
4303
dba24537
AC
4304/* Accepts an integer PID; Returns a string representing a file that
4305 can be opened to get the symbols for the child process. */
4306
6d8fd2b7
UW
4307static char *
4308linux_child_pid_to_exec_file (int pid)
dba24537
AC
4309{
4310 char *name1, *name2;
4311
4312 name1 = xmalloc (MAXPATHLEN);
4313 name2 = xmalloc (MAXPATHLEN);
4314 make_cleanup (xfree, name1);
4315 make_cleanup (xfree, name2);
4316 memset (name2, 0, MAXPATHLEN);
4317
4318 sprintf (name1, "/proc/%d/exe", pid);
0270a750 4319 if (readlink (name1, name2, MAXPATHLEN - 1) > 0)
dba24537
AC
4320 return name2;
4321 else
4322 return name1;
4323}
4324
dba24537
AC
4325/* Records the thread's register state for the corefile note
4326 section. */
4327
4328static char *
6432734d
UW
4329linux_nat_collect_thread_registers (const struct regcache *regcache,
4330 ptid_t ptid, bfd *obfd,
4331 char *note_data, int *note_size,
2ea28649 4332 enum gdb_signal stop_signal)
dba24537 4333{
6432734d 4334 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 4335 const struct regset *regset;
55e969c1 4336 int core_regset_p;
6432734d
UW
4337 gdb_gregset_t gregs;
4338 gdb_fpregset_t fpregs;
4f844a66
DM
4339
4340 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
dba24537 4341
6432734d
UW
4342 if (core_regset_p
4343 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4344 sizeof (gregs)))
4345 != NULL && regset->collect_regset != NULL)
4346 regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
4f844a66 4347 else
6432734d 4348 fill_gregset (regcache, &gregs, -1);
2f2241f1 4349
6432734d
UW
4350 note_data = (char *) elfcore_write_prstatus
4351 (obfd, note_data, note_size, ptid_get_lwp (ptid),
2ea28649 4352 gdb_signal_to_host (stop_signal), &gregs);
2f2241f1 4353
6432734d
UW
4354 if (core_regset_p
4355 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4356 sizeof (fpregs)))
3e43a32a 4357 != NULL && regset->collect_regset != NULL)
6432734d
UW
4358 regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
4359 else
4360 fill_fpregset (regcache, &fpregs, -1);
17ea7499 4361
6432734d
UW
4362 note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
4363 &fpregs, sizeof (fpregs));
4f844a66 4364
dba24537
AC
4365 return note_data;
4366}
4367
dba24537
AC
4368/* Fills the "to_make_corefile_note" target vector. Builds the note
4369 section for a corefile, and returns it in a malloc buffer. */
4370
4371static char *
4372linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4373{
6432734d
UW
4374 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4375 converted to gdbarch_core_regset_sections, this function can go away. */
f5656ead 4376 return linux_make_corefile_notes (target_gdbarch (), obfd, note_size,
6432734d 4377 linux_nat_collect_thread_registers);
dba24537
AC
4378}
4379
10d6c8cd
DJ
4380/* Implement the to_xfer_partial interface for memory reads using the /proc
4381 filesystem. Because we can use a single read() call for /proc, this
4382 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4383 but it doesn't support writes. */
4384
4385static LONGEST
4386linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4387 const char *annex, gdb_byte *readbuf,
4388 const gdb_byte *writebuf,
4389 ULONGEST offset, LONGEST len)
dba24537 4390{
10d6c8cd
DJ
4391 LONGEST ret;
4392 int fd;
dba24537
AC
4393 char filename[64];
4394
10d6c8cd 4395 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4396 return 0;
4397
4398 /* Don't bother for one word. */
4399 if (len < 3 * sizeof (long))
4400 return 0;
4401
4402 /* We could keep this file open and cache it - possibly one per
4403 thread. That requires some juggling, but is even faster. */
4404 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4405 fd = open (filename, O_RDONLY | O_LARGEFILE);
4406 if (fd == -1)
4407 return 0;
4408
4409 /* If pread64 is available, use it. It's faster if the kernel
4410 supports it (only one syscall), and it's 64-bit safe even on
4411 32-bit platforms (for instance, SPARC debugging a SPARC64
4412 application). */
4413#ifdef HAVE_PREAD64
10d6c8cd 4414 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4415#else
10d6c8cd 4416 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4417#endif
4418 ret = 0;
4419 else
4420 ret = len;
4421
4422 close (fd);
4423 return ret;
4424}
4425
efcbbd14
UW
4426
4427/* Enumerate spufs IDs for process PID. */
4428static LONGEST
4429spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4430{
f5656ead 4431 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
efcbbd14
UW
4432 LONGEST pos = 0;
4433 LONGEST written = 0;
4434 char path[128];
4435 DIR *dir;
4436 struct dirent *entry;
4437
4438 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4439 dir = opendir (path);
4440 if (!dir)
4441 return -1;
4442
4443 rewinddir (dir);
4444 while ((entry = readdir (dir)) != NULL)
4445 {
4446 struct stat st;
4447 struct statfs stfs;
4448 int fd;
4449
4450 fd = atoi (entry->d_name);
4451 if (!fd)
4452 continue;
4453
4454 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4455 if (stat (path, &st) != 0)
4456 continue;
4457 if (!S_ISDIR (st.st_mode))
4458 continue;
4459
4460 if (statfs (path, &stfs) != 0)
4461 continue;
4462 if (stfs.f_type != SPUFS_MAGIC)
4463 continue;
4464
4465 if (pos >= offset && pos + 4 <= offset + len)
4466 {
4467 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4468 written += 4;
4469 }
4470 pos += 4;
4471 }
4472
4473 closedir (dir);
4474 return written;
4475}
4476
4477/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4478 object type, using the /proc file system. */
4479static LONGEST
4480linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4481 const char *annex, gdb_byte *readbuf,
4482 const gdb_byte *writebuf,
4483 ULONGEST offset, LONGEST len)
4484{
4485 char buf[128];
4486 int fd = 0;
4487 int ret = -1;
4488 int pid = PIDGET (inferior_ptid);
4489
4490 if (!annex)
4491 {
4492 if (!readbuf)
4493 return -1;
4494 else
4495 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4496 }
4497
4498 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4499 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4500 if (fd <= 0)
4501 return -1;
4502
4503 if (offset != 0
4504 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4505 {
4506 close (fd);
4507 return 0;
4508 }
4509
4510 if (writebuf)
4511 ret = write (fd, writebuf, (size_t) len);
4512 else if (readbuf)
4513 ret = read (fd, readbuf, (size_t) len);
4514
4515 close (fd);
4516 return ret;
4517}
4518
4519
dba24537
AC
4520/* Parse LINE as a signal set and add its set bits to SIGS. */
4521
4522static void
4523add_line_to_sigset (const char *line, sigset_t *sigs)
4524{
4525 int len = strlen (line) - 1;
4526 const char *p;
4527 int signum;
4528
4529 if (line[len] != '\n')
8a3fe4f8 4530 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4531
4532 p = line;
4533 signum = len * 4;
4534 while (len-- > 0)
4535 {
4536 int digit;
4537
4538 if (*p >= '0' && *p <= '9')
4539 digit = *p - '0';
4540 else if (*p >= 'a' && *p <= 'f')
4541 digit = *p - 'a' + 10;
4542 else
8a3fe4f8 4543 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4544
4545 signum -= 4;
4546
4547 if (digit & 1)
4548 sigaddset (sigs, signum + 1);
4549 if (digit & 2)
4550 sigaddset (sigs, signum + 2);
4551 if (digit & 4)
4552 sigaddset (sigs, signum + 3);
4553 if (digit & 8)
4554 sigaddset (sigs, signum + 4);
4555
4556 p++;
4557 }
4558}
4559
4560/* Find process PID's pending signals from /proc/pid/status and set
4561 SIGS to match. */
4562
4563void
3e43a32a
MS
4564linux_proc_pending_signals (int pid, sigset_t *pending,
4565 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4566{
4567 FILE *procfile;
4568 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
7c8a8b04 4569 struct cleanup *cleanup;
dba24537
AC
4570
4571 sigemptyset (pending);
4572 sigemptyset (blocked);
4573 sigemptyset (ignored);
4574 sprintf (fname, "/proc/%d/status", pid);
4575 procfile = fopen (fname, "r");
4576 if (procfile == NULL)
8a3fe4f8 4577 error (_("Could not open %s"), fname);
7c8a8b04 4578 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
4579
4580 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4581 {
4582 /* Normal queued signals are on the SigPnd line in the status
4583 file. However, 2.6 kernels also have a "shared" pending
4584 queue for delivering signals to a thread group, so check for
4585 a ShdPnd line also.
4586
4587 Unfortunately some Red Hat kernels include the shared pending
4588 queue but not the ShdPnd status field. */
4589
4590 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4591 add_line_to_sigset (buffer + 8, pending);
4592 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4593 add_line_to_sigset (buffer + 8, pending);
4594 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4595 add_line_to_sigset (buffer + 8, blocked);
4596 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4597 add_line_to_sigset (buffer + 8, ignored);
4598 }
4599
7c8a8b04 4600 do_cleanups (cleanup);
dba24537
AC
4601}
4602
07e059b5
VP
4603static LONGEST
4604linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e
MS
4605 const char *annex, gdb_byte *readbuf,
4606 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
07e059b5 4607{
07e059b5
VP
4608 gdb_assert (object == TARGET_OBJECT_OSDATA);
4609
d26e3629 4610 return linux_common_xfer_osdata (annex, readbuf, offset, len);
07e059b5
VP
4611}
4612
10d6c8cd
DJ
4613static LONGEST
4614linux_xfer_partial (struct target_ops *ops, enum target_object object,
4615 const char *annex, gdb_byte *readbuf,
4616 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4617{
4618 LONGEST xfer;
4619
4620 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4621 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
10d6c8cd
DJ
4622 offset, len);
4623
07e059b5
VP
4624 if (object == TARGET_OBJECT_OSDATA)
4625 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4626 offset, len);
4627
efcbbd14
UW
4628 if (object == TARGET_OBJECT_SPU)
4629 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4630 offset, len);
4631
8f313923
JK
4632 /* GDB calculates all the addresses in possibly larget width of the address.
4633 Address width needs to be masked before its final use - either by
4634 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4635
4636 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4637
4638 if (object == TARGET_OBJECT_MEMORY)
4639 {
f5656ead 4640 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
8f313923
JK
4641
4642 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4643 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4644 }
4645
10d6c8cd
DJ
4646 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4647 offset, len);
4648 if (xfer != 0)
4649 return xfer;
4650
4651 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4652 offset, len);
4653}
4654
5808517f
YQ
4655static void
4656cleanup_target_stop (void *arg)
4657{
4658 ptid_t *ptid = (ptid_t *) arg;
4659
4660 gdb_assert (arg != NULL);
4661
4662 /* Unpause all */
a493e3e2 4663 target_resume (*ptid, 0, GDB_SIGNAL_0);
5808517f
YQ
4664}
4665
4666static VEC(static_tracepoint_marker_p) *
4667linux_child_static_tracepoint_markers_by_strid (const char *strid)
4668{
4669 char s[IPA_CMD_BUF_SIZE];
4670 struct cleanup *old_chain;
4671 int pid = ptid_get_pid (inferior_ptid);
4672 VEC(static_tracepoint_marker_p) *markers = NULL;
4673 struct static_tracepoint_marker *marker = NULL;
4674 char *p = s;
4675 ptid_t ptid = ptid_build (pid, 0, 0);
4676
4677 /* Pause all */
4678 target_stop (ptid);
4679
4680 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4681 s[sizeof ("qTfSTM")] = 0;
4682
42476b70 4683 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4684
4685 old_chain = make_cleanup (free_current_marker, &marker);
4686 make_cleanup (cleanup_target_stop, &ptid);
4687
4688 while (*p++ == 'm')
4689 {
4690 if (marker == NULL)
4691 marker = XCNEW (struct static_tracepoint_marker);
4692
4693 do
4694 {
4695 parse_static_tracepoint_marker_definition (p, &p, marker);
4696
4697 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4698 {
4699 VEC_safe_push (static_tracepoint_marker_p,
4700 markers, marker);
4701 marker = NULL;
4702 }
4703 else
4704 {
4705 release_static_tracepoint_marker (marker);
4706 memset (marker, 0, sizeof (*marker));
4707 }
4708 }
4709 while (*p++ == ','); /* comma-separated list */
4710
4711 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4712 s[sizeof ("qTsSTM")] = 0;
42476b70 4713 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4714 p = s;
4715 }
4716
4717 do_cleanups (old_chain);
4718
4719 return markers;
4720}
4721
e9efe249 4722/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4723 it with local methods. */
4724
910122bf
UW
4725static void
4726linux_target_install_ops (struct target_ops *t)
10d6c8cd 4727{
6d8fd2b7 4728 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4729 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4730 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4731 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4732 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4733 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4734 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4735 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4736 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4737 t->to_post_attach = linux_child_post_attach;
4738 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4739 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4740
4741 super_xfer_partial = t->to_xfer_partial;
4742 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4743
4744 t->to_static_tracepoint_markers_by_strid
4745 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4746}
4747
4748struct target_ops *
4749linux_target (void)
4750{
4751 struct target_ops *t;
4752
4753 t = inf_ptrace_target ();
4754 linux_target_install_ops (t);
4755
4756 return t;
4757}
4758
4759struct target_ops *
7714d83a 4760linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4761{
4762 struct target_ops *t;
4763
4764 t = inf_ptrace_trad_target (register_u_offset);
4765 linux_target_install_ops (t);
10d6c8cd 4766
10d6c8cd
DJ
4767 return t;
4768}
4769
b84876c2
PA
4770/* target_is_async_p implementation. */
4771
4772static int
4773linux_nat_is_async_p (void)
4774{
4775 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4776 it explicitly with the "set target-async" command.
b84876c2 4777 Someday, linux will always be async. */
3dd5b83d 4778 return target_async_permitted;
b84876c2
PA
4779}
4780
4781/* target_can_async_p implementation. */
4782
4783static int
4784linux_nat_can_async_p (void)
4785{
4786 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4787 it explicitly with the "set target-async" command.
b84876c2 4788 Someday, linux will always be async. */
3dd5b83d 4789 return target_async_permitted;
b84876c2
PA
4790}
4791
9908b566
VP
4792static int
4793linux_nat_supports_non_stop (void)
4794{
4795 return 1;
4796}
4797
d90e17a7
PA
4798/* True if we want to support multi-process. To be removed when GDB
4799 supports multi-exec. */
4800
2277426b 4801int linux_multi_process = 1;
d90e17a7
PA
4802
4803static int
4804linux_nat_supports_multi_process (void)
4805{
4806 return linux_multi_process;
4807}
4808
03583c20
UW
4809static int
4810linux_nat_supports_disable_randomization (void)
4811{
4812#ifdef HAVE_PERSONALITY
4813 return 1;
4814#else
4815 return 0;
4816#endif
4817}
4818
b84876c2
PA
4819static int async_terminal_is_ours = 1;
4820
4821/* target_terminal_inferior implementation. */
4822
4823static void
4824linux_nat_terminal_inferior (void)
4825{
4826 if (!target_is_async_p ())
4827 {
4828 /* Async mode is disabled. */
4829 terminal_inferior ();
4830 return;
4831 }
4832
b84876c2
PA
4833 terminal_inferior ();
4834
d9d2d8b6 4835 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4836 if (!async_terminal_is_ours)
4837 return;
4838
4839 delete_file_handler (input_fd);
4840 async_terminal_is_ours = 0;
4841 set_sigint_trap ();
4842}
4843
4844/* target_terminal_ours implementation. */
4845
2c0b251b 4846static void
b84876c2
PA
4847linux_nat_terminal_ours (void)
4848{
4849 if (!target_is_async_p ())
4850 {
4851 /* Async mode is disabled. */
4852 terminal_ours ();
4853 return;
4854 }
4855
4856 /* GDB should never give the terminal to the inferior if the
4857 inferior is running in the background (run&, continue&, etc.),
4858 but claiming it sure should. */
4859 terminal_ours ();
4860
b84876c2
PA
4861 if (async_terminal_is_ours)
4862 return;
4863
4864 clear_sigint_trap ();
4865 add_file_handler (input_fd, stdin_event_handler, 0);
4866 async_terminal_is_ours = 1;
4867}
4868
4869static void (*async_client_callback) (enum inferior_event_type event_type,
4870 void *context);
4871static void *async_client_context;
4872
7feb7d06
PA
4873/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4874 so we notice when any child changes state, and notify the
4875 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4876 above to wait for the arrival of a SIGCHLD. */
4877
b84876c2 4878static void
7feb7d06 4879sigchld_handler (int signo)
b84876c2 4880{
7feb7d06
PA
4881 int old_errno = errno;
4882
01124a23
DE
4883 if (debug_linux_nat)
4884 ui_file_write_async_safe (gdb_stdlog,
4885 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4886
4887 if (signo == SIGCHLD
4888 && linux_nat_event_pipe[0] != -1)
4889 async_file_mark (); /* Let the event loop know that there are
4890 events to handle. */
4891
4892 errno = old_errno;
4893}
4894
4895/* Callback registered with the target events file descriptor. */
4896
4897static void
4898handle_target_event (int error, gdb_client_data client_data)
4899{
4900 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4901}
4902
4903/* Create/destroy the target events pipe. Returns previous state. */
4904
4905static int
4906linux_async_pipe (int enable)
4907{
4908 int previous = (linux_nat_event_pipe[0] != -1);
4909
4910 if (previous != enable)
4911 {
4912 sigset_t prev_mask;
4913
4914 block_child_signals (&prev_mask);
4915
4916 if (enable)
4917 {
4918 if (pipe (linux_nat_event_pipe) == -1)
4919 internal_error (__FILE__, __LINE__,
4920 "creating event pipe failed.");
4921
4922 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4923 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4924 }
4925 else
4926 {
4927 close (linux_nat_event_pipe[0]);
4928 close (linux_nat_event_pipe[1]);
4929 linux_nat_event_pipe[0] = -1;
4930 linux_nat_event_pipe[1] = -1;
4931 }
4932
4933 restore_child_signals_mask (&prev_mask);
4934 }
4935
4936 return previous;
b84876c2
PA
4937}
4938
4939/* target_async implementation. */
4940
4941static void
4942linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4943 void *context), void *context)
4944{
b84876c2
PA
4945 if (callback != NULL)
4946 {
4947 async_client_callback = callback;
4948 async_client_context = context;
7feb7d06
PA
4949 if (!linux_async_pipe (1))
4950 {
4951 add_file_handler (linux_nat_event_pipe[0],
4952 handle_target_event, NULL);
4953 /* There may be pending events to handle. Tell the event loop
4954 to poll them. */
4955 async_file_mark ();
4956 }
b84876c2
PA
4957 }
4958 else
4959 {
4960 async_client_callback = callback;
4961 async_client_context = context;
b84876c2 4962 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4963 linux_async_pipe (0);
b84876c2
PA
4964 }
4965 return;
4966}
4967
a493e3e2 4968/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4969 event came out. */
4970
4c28f408 4971static int
252fbfc8 4972linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4973{
d90e17a7 4974 if (!lwp->stopped)
252fbfc8 4975 {
d90e17a7
PA
4976 if (debug_linux_nat)
4977 fprintf_unfiltered (gdb_stdlog,
4978 "LNSL: running -> suspending %s\n",
4979 target_pid_to_str (lwp->ptid));
252fbfc8 4980
252fbfc8 4981
25289eb2
PA
4982 if (lwp->last_resume_kind == resume_stop)
4983 {
4984 if (debug_linux_nat)
4985 fprintf_unfiltered (gdb_stdlog,
4986 "linux-nat: already stopping LWP %ld at "
4987 "GDB's request\n",
4988 ptid_get_lwp (lwp->ptid));
4989 return 0;
4990 }
252fbfc8 4991
25289eb2
PA
4992 stop_callback (lwp, NULL);
4993 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4994 }
4995 else
4996 {
4997 /* Already known to be stopped; do nothing. */
252fbfc8 4998
d90e17a7
PA
4999 if (debug_linux_nat)
5000 {
e09875d4 5001 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
5002 fprintf_unfiltered (gdb_stdlog,
5003 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
5004 target_pid_to_str (lwp->ptid));
5005 else
3e43a32a
MS
5006 fprintf_unfiltered (gdb_stdlog,
5007 "LNSL: already stopped/no "
5008 "stop_requested yet %s\n",
d90e17a7 5009 target_pid_to_str (lwp->ptid));
252fbfc8
PA
5010 }
5011 }
4c28f408
PA
5012 return 0;
5013}
5014
5015static void
5016linux_nat_stop (ptid_t ptid)
5017{
5018 if (non_stop)
d90e17a7 5019 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408
PA
5020 else
5021 linux_ops->to_stop (ptid);
5022}
5023
d90e17a7
PA
5024static void
5025linux_nat_close (int quitting)
5026{
5027 /* Unregister from the event loop. */
305436e0
PA
5028 if (linux_nat_is_async_p ())
5029 linux_nat_async (NULL, 0);
d90e17a7 5030
d90e17a7
PA
5031 if (linux_ops->to_close)
5032 linux_ops->to_close (quitting);
5033}
5034
c0694254
PA
5035/* When requests are passed down from the linux-nat layer to the
5036 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5037 used. The address space pointer is stored in the inferior object,
5038 but the common code that is passed such ptid can't tell whether
5039 lwpid is a "main" process id or not (it assumes so). We reverse
5040 look up the "main" process id from the lwp here. */
5041
70221824 5042static struct address_space *
c0694254
PA
5043linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5044{
5045 struct lwp_info *lwp;
5046 struct inferior *inf;
5047 int pid;
5048
5049 pid = GET_LWP (ptid);
5050 if (GET_LWP (ptid) == 0)
5051 {
5052 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5053 tgid. */
5054 lwp = find_lwp_pid (ptid);
5055 pid = GET_PID (lwp->ptid);
5056 }
5057 else
5058 {
5059 /* A (pid,lwpid,0) ptid. */
5060 pid = GET_PID (ptid);
5061 }
5062
5063 inf = find_inferior_pid (pid);
5064 gdb_assert (inf != NULL);
5065 return inf->aspace;
5066}
5067
dc146f7c
VP
5068/* Return the cached value of the processor core for thread PTID. */
5069
70221824 5070static int
dc146f7c
VP
5071linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5072{
5073 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 5074
dc146f7c
VP
5075 if (info)
5076 return info->core;
5077 return -1;
5078}
5079
f973ed9c
DJ
5080void
5081linux_nat_add_target (struct target_ops *t)
5082{
f973ed9c
DJ
5083 /* Save the provided single-threaded target. We save this in a separate
5084 variable because another target we've inherited from (e.g. inf-ptrace)
5085 may have saved a pointer to T; we want to use it for the final
5086 process stratum target. */
5087 linux_ops_saved = *t;
5088 linux_ops = &linux_ops_saved;
5089
5090 /* Override some methods for multithreading. */
b84876c2 5091 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
5092 t->to_attach = linux_nat_attach;
5093 t->to_detach = linux_nat_detach;
5094 t->to_resume = linux_nat_resume;
5095 t->to_wait = linux_nat_wait;
2455069d 5096 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
5097 t->to_xfer_partial = linux_nat_xfer_partial;
5098 t->to_kill = linux_nat_kill;
5099 t->to_mourn_inferior = linux_nat_mourn_inferior;
5100 t->to_thread_alive = linux_nat_thread_alive;
5101 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 5102 t->to_thread_name = linux_nat_thread_name;
f973ed9c 5103 t->to_has_thread_control = tc_schedlock;
c0694254 5104 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
5105 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5106 t->to_stopped_data_address = linux_nat_stopped_data_address;
f973ed9c 5107
b84876c2
PA
5108 t->to_can_async_p = linux_nat_can_async_p;
5109 t->to_is_async_p = linux_nat_is_async_p;
9908b566 5110 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 5111 t->to_async = linux_nat_async;
b84876c2
PA
5112 t->to_terminal_inferior = linux_nat_terminal_inferior;
5113 t->to_terminal_ours = linux_nat_terminal_ours;
d90e17a7 5114 t->to_close = linux_nat_close;
b84876c2 5115
4c28f408
PA
5116 /* Methods for non-stop support. */
5117 t->to_stop = linux_nat_stop;
5118
d90e17a7
PA
5119 t->to_supports_multi_process = linux_nat_supports_multi_process;
5120
03583c20
UW
5121 t->to_supports_disable_randomization
5122 = linux_nat_supports_disable_randomization;
5123
dc146f7c
VP
5124 t->to_core_of_thread = linux_nat_core_of_thread;
5125
f973ed9c
DJ
5126 /* We don't change the stratum; this target will sit at
5127 process_stratum and thread_db will set at thread_stratum. This
5128 is a little strange, since this is a multi-threaded-capable
5129 target, but we want to be on the stack below thread_db, and we
5130 also want to be used for single-threaded processes. */
5131
5132 add_target (t);
f973ed9c
DJ
5133}
5134
9f0bdab8
DJ
5135/* Register a method to call whenever a new thread is attached. */
5136void
7b50312a
PA
5137linux_nat_set_new_thread (struct target_ops *t,
5138 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
5139{
5140 /* Save the pointer. We only support a single registered instance
5141 of the GNU/Linux native target, so we do not need to map this to
5142 T. */
5143 linux_nat_new_thread = new_thread;
5144}
5145
26cb8b7c
PA
5146/* See declaration in linux-nat.h. */
5147
5148void
5149linux_nat_set_new_fork (struct target_ops *t,
5150 linux_nat_new_fork_ftype *new_fork)
5151{
5152 /* Save the pointer. */
5153 linux_nat_new_fork = new_fork;
5154}
5155
5156/* See declaration in linux-nat.h. */
5157
5158void
5159linux_nat_set_forget_process (struct target_ops *t,
5160 linux_nat_forget_process_ftype *fn)
5161{
5162 /* Save the pointer. */
5163 linux_nat_forget_process_hook = fn;
5164}
5165
5166/* See declaration in linux-nat.h. */
5167
5168void
5169linux_nat_forget_process (pid_t pid)
5170{
5171 if (linux_nat_forget_process_hook != NULL)
5172 linux_nat_forget_process_hook (pid);
5173}
5174
5b009018
PA
5175/* Register a method that converts a siginfo object between the layout
5176 that ptrace returns, and the layout in the architecture of the
5177 inferior. */
5178void
5179linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 5180 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
5181 gdb_byte *,
5182 int))
5183{
5184 /* Save the pointer. */
5185 linux_nat_siginfo_fixup = siginfo_fixup;
5186}
5187
7b50312a
PA
5188/* Register a method to call prior to resuming a thread. */
5189
5190void
5191linux_nat_set_prepare_to_resume (struct target_ops *t,
5192 void (*prepare_to_resume) (struct lwp_info *))
5193{
5194 /* Save the pointer. */
5195 linux_nat_prepare_to_resume = prepare_to_resume;
5196}
5197
f865ee35
JK
5198/* See linux-nat.h. */
5199
5200int
5201linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 5202{
da559b09 5203 int pid;
9f0bdab8 5204
da559b09
JK
5205 pid = GET_LWP (ptid);
5206 if (pid == 0)
5207 pid = GET_PID (ptid);
f865ee35 5208
da559b09
JK
5209 errno = 0;
5210 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
5211 if (errno != 0)
5212 {
5213 memset (siginfo, 0, sizeof (*siginfo));
5214 return 0;
5215 }
f865ee35 5216 return 1;
9f0bdab8
DJ
5217}
5218
2c0b251b
PA
5219/* Provide a prototype to silence -Wmissing-prototypes. */
5220extern initialize_file_ftype _initialize_linux_nat;
5221
d6b0e80f
AC
5222void
5223_initialize_linux_nat (void)
5224{
ccce17b0
YQ
5225 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
5226 &debug_linux_nat, _("\
b84876c2
PA
5227Set debugging of GNU/Linux lwp module."), _("\
5228Show debugging of GNU/Linux lwp module."), _("\
5229Enables printf debugging output."),
ccce17b0
YQ
5230 NULL,
5231 show_debug_linux_nat,
5232 &setdebuglist, &showdebuglist);
b84876c2 5233
b84876c2 5234 /* Save this mask as the default. */
d6b0e80f
AC
5235 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5236
7feb7d06
PA
5237 /* Install a SIGCHLD handler. */
5238 sigchld_action.sa_handler = sigchld_handler;
5239 sigemptyset (&sigchld_action.sa_mask);
5240 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
5241
5242 /* Make it the default. */
7feb7d06 5243 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
5244
5245 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5246 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5247 sigdelset (&suspend_mask, SIGCHLD);
5248
7feb7d06 5249 sigemptyset (&blocked_mask);
d6b0e80f
AC
5250}
5251\f
5252
5253/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5254 the GNU/Linux Threads library and therefore doesn't really belong
5255 here. */
5256
5257/* Read variable NAME in the target and return its value if found.
5258 Otherwise return zero. It is assumed that the type of the variable
5259 is `int'. */
5260
5261static int
5262get_signo (const char *name)
5263{
5264 struct minimal_symbol *ms;
5265 int signo;
5266
5267 ms = lookup_minimal_symbol (name, NULL, NULL);
5268 if (ms == NULL)
5269 return 0;
5270
8e70166d 5271 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5272 sizeof (signo)) != 0)
5273 return 0;
5274
5275 return signo;
5276}
5277
5278/* Return the set of signals used by the threads library in *SET. */
5279
5280void
5281lin_thread_get_thread_signals (sigset_t *set)
5282{
5283 struct sigaction action;
5284 int restart, cancel;
5285
b84876c2 5286 sigemptyset (&blocked_mask);
d6b0e80f
AC
5287 sigemptyset (set);
5288
5289 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5290 cancel = get_signo ("__pthread_sig_cancel");
5291
5292 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5293 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5294 not provide any way for the debugger to query the signal numbers -
5295 fortunately they don't change! */
5296
d6b0e80f 5297 if (restart == 0)
17fbb0bd 5298 restart = __SIGRTMIN;
d6b0e80f 5299
d6b0e80f 5300 if (cancel == 0)
17fbb0bd 5301 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5302
5303 sigaddset (set, restart);
5304 sigaddset (set, cancel);
5305
5306 /* The GNU/Linux Threads library makes terminating threads send a
5307 special "cancel" signal instead of SIGCHLD. Make sure we catch
5308 those (to prevent them from terminating GDB itself, which is
5309 likely to be their default action) and treat them the same way as
5310 SIGCHLD. */
5311
5312 action.sa_handler = sigchld_handler;
5313 sigemptyset (&action.sa_mask);
58aecb61 5314 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5315 sigaction (cancel, &action, NULL);
5316
5317 /* We block the "cancel" signal throughout this code ... */
5318 sigaddset (&blocked_mask, cancel);
5319 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5320
5321 /* ... except during a sigsuspend. */
5322 sigdelset (&suspend_mask, cancel);
5323}
This page took 1.299983 seconds and 4 git commands to generate.