2012-10-26 Pedro Alves <palves@redhat.com>
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2012 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "target.h"
23 #include "gdb_string.h"
24 #include "gdb_wait.h"
25 #include "gdb_assert.h"
26 #ifdef HAVE_TKILL_SYSCALL
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #endif
30 #include <sys/ptrace.h>
31 #include "linux-nat.h"
32 #include "linux-ptrace.h"
33 #include "linux-procfs.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/param.h> /* for MAXPATHLEN */
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include "gdbthread.h" /* for struct thread_info etc. */
49 #include "gdb_stat.h" /* for struct stat */
50 #include <fcntl.h> /* for O_RDONLY */
51 #include "inf-loop.h"
52 #include "event-loop.h"
53 #include "event-top.h"
54 #include <pwd.h>
55 #include <sys/types.h>
56 #include "gdb_dirent.h"
57 #include "xml-support.h"
58 #include "terminal.h"
59 #include <sys/vfs.h>
60 #include "solib.h"
61 #include "linux-osdata.h"
62 #include "linux-tdep.h"
63 #include "symfile.h"
64 #include "agent.h"
65 #include "tracepoint.h"
66 #include "exceptions.h"
67 #include "linux-ptrace.h"
68 #include "buffer.h"
69
70 #ifndef SPUFS_MAGIC
71 #define SPUFS_MAGIC 0x23c9b64e
72 #endif
73
74 #ifdef HAVE_PERSONALITY
75 # include <sys/personality.h>
76 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
77 # define ADDR_NO_RANDOMIZE 0x0040000
78 # endif
79 #endif /* HAVE_PERSONALITY */
80
81 /* This comment documents high-level logic of this file.
82
83 Waiting for events in sync mode
84 ===============================
85
86 When waiting for an event in a specific thread, we just use waitpid, passing
87 the specific pid, and not passing WNOHANG.
88
89 When waiting for an event in all threads, waitpid is not quite good. Prior to
90 version 2.4, Linux can either wait for event in main thread, or in secondary
91 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
92 miss an event. The solution is to use non-blocking waitpid, together with
93 sigsuspend. First, we use non-blocking waitpid to get an event in the main
94 process, if any. Second, we use non-blocking waitpid with the __WCLONED
95 flag to check for events in cloned processes. If nothing is found, we use
96 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
97 happened to a child process -- and SIGCHLD will be delivered both for events
98 in main debugged process and in cloned processes. As soon as we know there's
99 an event, we get back to calling nonblocking waitpid with and without
100 __WCLONED.
101
102 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
103 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
104 blocked, the signal becomes pending and sigsuspend immediately
105 notices it and returns.
106
107 Waiting for events in async mode
108 ================================
109
110 In async mode, GDB should always be ready to handle both user input
111 and target events, so neither blocking waitpid nor sigsuspend are
112 viable options. Instead, we should asynchronously notify the GDB main
113 event loop whenever there's an unprocessed event from the target. We
114 detect asynchronous target events by handling SIGCHLD signals. To
115 notify the event loop about target events, the self-pipe trick is used
116 --- a pipe is registered as waitable event source in the event loop,
117 the event loop select/poll's on the read end of this pipe (as well on
118 other event sources, e.g., stdin), and the SIGCHLD handler writes a
119 byte to this pipe. This is more portable than relying on
120 pselect/ppoll, since on kernels that lack those syscalls, libc
121 emulates them with select/poll+sigprocmask, and that is racy
122 (a.k.a. plain broken).
123
124 Obviously, if we fail to notify the event loop if there's a target
125 event, it's bad. OTOH, if we notify the event loop when there's no
126 event from the target, linux_nat_wait will detect that there's no real
127 event to report, and return event of type TARGET_WAITKIND_IGNORE.
128 This is mostly harmless, but it will waste time and is better avoided.
129
130 The main design point is that every time GDB is outside linux-nat.c,
131 we have a SIGCHLD handler installed that is called when something
132 happens to the target and notifies the GDB event loop. Whenever GDB
133 core decides to handle the event, and calls into linux-nat.c, we
134 process things as in sync mode, except that the we never block in
135 sigsuspend.
136
137 While processing an event, we may end up momentarily blocked in
138 waitpid calls. Those waitpid calls, while blocking, are guarantied to
139 return quickly. E.g., in all-stop mode, before reporting to the core
140 that an LWP hit a breakpoint, all LWPs are stopped by sending them
141 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
142 Note that this is different from blocking indefinitely waiting for the
143 next event --- here, we're already handling an event.
144
145 Use of signals
146 ==============
147
148 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
149 signal is not entirely significant; we just need for a signal to be delivered,
150 so that we can intercept it. SIGSTOP's advantage is that it can not be
151 blocked. A disadvantage is that it is not a real-time signal, so it can only
152 be queued once; we do not keep track of other sources of SIGSTOP.
153
154 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
155 use them, because they have special behavior when the signal is generated -
156 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
157 kills the entire thread group.
158
159 A delivered SIGSTOP would stop the entire thread group, not just the thread we
160 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
161 cancel it (by PTRACE_CONT without passing SIGSTOP).
162
163 We could use a real-time signal instead. This would solve those problems; we
164 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
165 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
166 generates it, and there are races with trying to find a signal that is not
167 blocked. */
168
169 #ifndef O_LARGEFILE
170 #define O_LARGEFILE 0
171 #endif
172
173 /* Unlike other extended result codes, WSTOPSIG (status) on
174 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
175 instead SIGTRAP with bit 7 set. */
176 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
177
178 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
179 the use of the multi-threaded target. */
180 static struct target_ops *linux_ops;
181 static struct target_ops linux_ops_saved;
182
183 /* The method to call, if any, when a new thread is attached. */
184 static void (*linux_nat_new_thread) (struct lwp_info *);
185
186 /* Hook to call prior to resuming a thread. */
187 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
188
189 /* The method to call, if any, when the siginfo object needs to be
190 converted between the layout returned by ptrace, and the layout in
191 the architecture of the inferior. */
192 static int (*linux_nat_siginfo_fixup) (siginfo_t *,
193 gdb_byte *,
194 int);
195
196 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
197 Called by our to_xfer_partial. */
198 static LONGEST (*super_xfer_partial) (struct target_ops *,
199 enum target_object,
200 const char *, gdb_byte *,
201 const gdb_byte *,
202 ULONGEST, LONGEST);
203
204 static unsigned int debug_linux_nat;
205 static void
206 show_debug_linux_nat (struct ui_file *file, int from_tty,
207 struct cmd_list_element *c, const char *value)
208 {
209 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
210 value);
211 }
212
213 struct simple_pid_list
214 {
215 int pid;
216 int status;
217 struct simple_pid_list *next;
218 };
219 struct simple_pid_list *stopped_pids;
220
221 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
222 can not be used, 1 if it can. */
223
224 static int linux_supports_tracefork_flag = -1;
225
226 /* This variable is a tri-state flag: -1 for unknown, 0 if
227 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
228
229 static int linux_supports_tracesysgood_flag = -1;
230
231 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
232 PTRACE_O_TRACEVFORKDONE. */
233
234 static int linux_supports_tracevforkdone_flag = -1;
235
236 /* Stores the current used ptrace() options. */
237 static int current_ptrace_options = 0;
238
239 /* Async mode support. */
240
241 /* The read/write ends of the pipe registered as waitable file in the
242 event loop. */
243 static int linux_nat_event_pipe[2] = { -1, -1 };
244
245 /* Flush the event pipe. */
246
247 static void
248 async_file_flush (void)
249 {
250 int ret;
251 char buf;
252
253 do
254 {
255 ret = read (linux_nat_event_pipe[0], &buf, 1);
256 }
257 while (ret >= 0 || (ret == -1 && errno == EINTR));
258 }
259
260 /* Put something (anything, doesn't matter what, or how much) in event
261 pipe, so that the select/poll in the event-loop realizes we have
262 something to process. */
263
264 static void
265 async_file_mark (void)
266 {
267 int ret;
268
269 /* It doesn't really matter what the pipe contains, as long we end
270 up with something in it. Might as well flush the previous
271 left-overs. */
272 async_file_flush ();
273
274 do
275 {
276 ret = write (linux_nat_event_pipe[1], "+", 1);
277 }
278 while (ret == -1 && errno == EINTR);
279
280 /* Ignore EAGAIN. If the pipe is full, the event loop will already
281 be awakened anyway. */
282 }
283
284 static void linux_nat_async (void (*callback)
285 (enum inferior_event_type event_type,
286 void *context),
287 void *context);
288 static int kill_lwp (int lwpid, int signo);
289
290 static int stop_callback (struct lwp_info *lp, void *data);
291
292 static void block_child_signals (sigset_t *prev_mask);
293 static void restore_child_signals_mask (sigset_t *prev_mask);
294
295 struct lwp_info;
296 static struct lwp_info *add_lwp (ptid_t ptid);
297 static void purge_lwp_list (int pid);
298 static void delete_lwp (ptid_t ptid);
299 static struct lwp_info *find_lwp_pid (ptid_t ptid);
300
301 \f
302 /* Trivial list manipulation functions to keep track of a list of
303 new stopped processes. */
304 static void
305 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
306 {
307 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
308
309 new_pid->pid = pid;
310 new_pid->status = status;
311 new_pid->next = *listp;
312 *listp = new_pid;
313 }
314
315 static int
316 in_pid_list_p (struct simple_pid_list *list, int pid)
317 {
318 struct simple_pid_list *p;
319
320 for (p = list; p != NULL; p = p->next)
321 if (p->pid == pid)
322 return 1;
323 return 0;
324 }
325
326 static int
327 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
328 {
329 struct simple_pid_list **p;
330
331 for (p = listp; *p != NULL; p = &(*p)->next)
332 if ((*p)->pid == pid)
333 {
334 struct simple_pid_list *next = (*p)->next;
335
336 *statusp = (*p)->status;
337 xfree (*p);
338 *p = next;
339 return 1;
340 }
341 return 0;
342 }
343
344 \f
345 /* A helper function for linux_test_for_tracefork, called after fork (). */
346
347 static void
348 linux_tracefork_child (void)
349 {
350 ptrace (PTRACE_TRACEME, 0, 0, 0);
351 kill (getpid (), SIGSTOP);
352 fork ();
353 _exit (0);
354 }
355
356 /* Wrapper function for waitpid which handles EINTR. */
357
358 static int
359 my_waitpid (int pid, int *statusp, int flags)
360 {
361 int ret;
362
363 do
364 {
365 ret = waitpid (pid, statusp, flags);
366 }
367 while (ret == -1 && errno == EINTR);
368
369 return ret;
370 }
371
372 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
373
374 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
375 we know that the feature is not available. This may change the tracing
376 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
377
378 However, if it succeeds, we don't know for sure that the feature is
379 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
380 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
381 fork tracing, and let it fork. If the process exits, we assume that we
382 can't use TRACEFORK; if we get the fork notification, and we can extract
383 the new child's PID, then we assume that we can. */
384
385 static void
386 linux_test_for_tracefork (int original_pid)
387 {
388 int child_pid, ret, status;
389 long second_pid;
390 sigset_t prev_mask;
391
392 /* We don't want those ptrace calls to be interrupted. */
393 block_child_signals (&prev_mask);
394
395 linux_supports_tracefork_flag = 0;
396 linux_supports_tracevforkdone_flag = 0;
397
398 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
399 if (ret != 0)
400 {
401 restore_child_signals_mask (&prev_mask);
402 return;
403 }
404
405 child_pid = fork ();
406 if (child_pid == -1)
407 perror_with_name (("fork"));
408
409 if (child_pid == 0)
410 linux_tracefork_child ();
411
412 ret = my_waitpid (child_pid, &status, 0);
413 if (ret == -1)
414 perror_with_name (("waitpid"));
415 else if (ret != child_pid)
416 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
417 if (! WIFSTOPPED (status))
418 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
419 status);
420
421 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
422 if (ret != 0)
423 {
424 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
425 if (ret != 0)
426 {
427 warning (_("linux_test_for_tracefork: failed to kill child"));
428 restore_child_signals_mask (&prev_mask);
429 return;
430 }
431
432 ret = my_waitpid (child_pid, &status, 0);
433 if (ret != child_pid)
434 warning (_("linux_test_for_tracefork: failed "
435 "to wait for killed child"));
436 else if (!WIFSIGNALED (status))
437 warning (_("linux_test_for_tracefork: unexpected "
438 "wait status 0x%x from killed child"), status);
439
440 restore_child_signals_mask (&prev_mask);
441 return;
442 }
443
444 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
445 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
446 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
447 linux_supports_tracevforkdone_flag = (ret == 0);
448
449 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
450 if (ret != 0)
451 warning (_("linux_test_for_tracefork: failed to resume child"));
452
453 ret = my_waitpid (child_pid, &status, 0);
454
455 if (ret == child_pid && WIFSTOPPED (status)
456 && status >> 16 == PTRACE_EVENT_FORK)
457 {
458 second_pid = 0;
459 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
460 if (ret == 0 && second_pid != 0)
461 {
462 int second_status;
463
464 linux_supports_tracefork_flag = 1;
465 my_waitpid (second_pid, &second_status, 0);
466 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
467 if (ret != 0)
468 warning (_("linux_test_for_tracefork: "
469 "failed to kill second child"));
470 my_waitpid (second_pid, &status, 0);
471 }
472 }
473 else
474 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
475 "(%d, status 0x%x)"), ret, status);
476
477 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
478 if (ret != 0)
479 warning (_("linux_test_for_tracefork: failed to kill child"));
480 my_waitpid (child_pid, &status, 0);
481
482 restore_child_signals_mask (&prev_mask);
483 }
484
485 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
486
487 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
488 we know that the feature is not available. This may change the tracing
489 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
490
491 static void
492 linux_test_for_tracesysgood (int original_pid)
493 {
494 int ret;
495 sigset_t prev_mask;
496
497 /* We don't want those ptrace calls to be interrupted. */
498 block_child_signals (&prev_mask);
499
500 linux_supports_tracesysgood_flag = 0;
501
502 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
503 if (ret != 0)
504 goto out;
505
506 linux_supports_tracesysgood_flag = 1;
507 out:
508 restore_child_signals_mask (&prev_mask);
509 }
510
511 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
512 This function also sets linux_supports_tracesysgood_flag. */
513
514 static int
515 linux_supports_tracesysgood (int pid)
516 {
517 if (linux_supports_tracesysgood_flag == -1)
518 linux_test_for_tracesysgood (pid);
519 return linux_supports_tracesysgood_flag;
520 }
521
522 /* Return non-zero iff we have tracefork functionality available.
523 This function also sets linux_supports_tracefork_flag. */
524
525 static int
526 linux_supports_tracefork (int pid)
527 {
528 if (linux_supports_tracefork_flag == -1)
529 linux_test_for_tracefork (pid);
530 return linux_supports_tracefork_flag;
531 }
532
533 static int
534 linux_supports_tracevforkdone (int pid)
535 {
536 if (linux_supports_tracefork_flag == -1)
537 linux_test_for_tracefork (pid);
538 return linux_supports_tracevforkdone_flag;
539 }
540
541 static void
542 linux_enable_tracesysgood (ptid_t ptid)
543 {
544 int pid = ptid_get_lwp (ptid);
545
546 if (pid == 0)
547 pid = ptid_get_pid (ptid);
548
549 if (linux_supports_tracesysgood (pid) == 0)
550 return;
551
552 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
553
554 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
555 }
556
557 \f
558 void
559 linux_enable_event_reporting (ptid_t ptid)
560 {
561 int pid = ptid_get_lwp (ptid);
562
563 if (pid == 0)
564 pid = ptid_get_pid (ptid);
565
566 if (! linux_supports_tracefork (pid))
567 return;
568
569 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
570 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
571
572 if (linux_supports_tracevforkdone (pid))
573 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
574
575 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
576 read-only process state. */
577
578 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
579 }
580
581 static void
582 linux_child_post_attach (int pid)
583 {
584 linux_enable_event_reporting (pid_to_ptid (pid));
585 linux_enable_tracesysgood (pid_to_ptid (pid));
586 linux_ptrace_init_warnings ();
587 }
588
589 static void
590 linux_child_post_startup_inferior (ptid_t ptid)
591 {
592 linux_enable_event_reporting (ptid);
593 linux_enable_tracesysgood (ptid);
594 linux_ptrace_init_warnings ();
595 }
596
597 /* Return the number of known LWPs in the tgid given by PID. */
598
599 static int
600 num_lwps (int pid)
601 {
602 int count = 0;
603 struct lwp_info *lp;
604
605 for (lp = lwp_list; lp; lp = lp->next)
606 if (ptid_get_pid (lp->ptid) == pid)
607 count++;
608
609 return count;
610 }
611
612 /* Call delete_lwp with prototype compatible for make_cleanup. */
613
614 static void
615 delete_lwp_cleanup (void *lp_voidp)
616 {
617 struct lwp_info *lp = lp_voidp;
618
619 delete_lwp (lp->ptid);
620 }
621
622 static int
623 linux_child_follow_fork (struct target_ops *ops, int follow_child)
624 {
625 sigset_t prev_mask;
626 int has_vforked;
627 int parent_pid, child_pid;
628
629 block_child_signals (&prev_mask);
630
631 has_vforked = (inferior_thread ()->pending_follow.kind
632 == TARGET_WAITKIND_VFORKED);
633 parent_pid = ptid_get_lwp (inferior_ptid);
634 if (parent_pid == 0)
635 parent_pid = ptid_get_pid (inferior_ptid);
636 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
637
638 if (!detach_fork)
639 linux_enable_event_reporting (pid_to_ptid (child_pid));
640
641 if (has_vforked
642 && !non_stop /* Non-stop always resumes both branches. */
643 && (!target_is_async_p () || sync_execution)
644 && !(follow_child || detach_fork || sched_multi))
645 {
646 /* The parent stays blocked inside the vfork syscall until the
647 child execs or exits. If we don't let the child run, then
648 the parent stays blocked. If we're telling the parent to run
649 in the foreground, the user will not be able to ctrl-c to get
650 back the terminal, effectively hanging the debug session. */
651 fprintf_filtered (gdb_stderr, _("\
652 Can not resume the parent process over vfork in the foreground while\n\
653 holding the child stopped. Try \"set detach-on-fork\" or \
654 \"set schedule-multiple\".\n"));
655 /* FIXME output string > 80 columns. */
656 return 1;
657 }
658
659 if (! follow_child)
660 {
661 struct lwp_info *child_lp = NULL;
662
663 /* We're already attached to the parent, by default. */
664
665 /* Detach new forked process? */
666 if (detach_fork)
667 {
668 struct cleanup *old_chain;
669
670 /* Before detaching from the child, remove all breakpoints
671 from it. If we forked, then this has already been taken
672 care of by infrun.c. If we vforked however, any
673 breakpoint inserted in the parent is visible in the
674 child, even those added while stopped in a vfork
675 catchpoint. This will remove the breakpoints from the
676 parent also, but they'll be reinserted below. */
677 if (has_vforked)
678 {
679 /* keep breakpoints list in sync. */
680 remove_breakpoints_pid (GET_PID (inferior_ptid));
681 }
682
683 if (info_verbose || debug_linux_nat)
684 {
685 target_terminal_ours ();
686 fprintf_filtered (gdb_stdlog,
687 "Detaching after fork from "
688 "child process %d.\n",
689 child_pid);
690 }
691
692 old_chain = save_inferior_ptid ();
693 inferior_ptid = ptid_build (child_pid, child_pid, 0);
694
695 child_lp = add_lwp (inferior_ptid);
696 child_lp->stopped = 1;
697 child_lp->last_resume_kind = resume_stop;
698 make_cleanup (delete_lwp_cleanup, child_lp);
699
700 /* CHILD_LP has new PID, therefore linux_nat_new_thread is not called for it.
701 See i386_inferior_data_get for the Linux kernel specifics.
702 Ensure linux_nat_prepare_to_resume will reset the hardware debug
703 registers. It is done by the linux_nat_new_thread call, which is
704 being skipped in add_lwp above for the first lwp of a pid. */
705 gdb_assert (num_lwps (GET_PID (child_lp->ptid)) == 1);
706 if (linux_nat_new_thread != NULL)
707 linux_nat_new_thread (child_lp);
708
709 if (linux_nat_prepare_to_resume != NULL)
710 linux_nat_prepare_to_resume (child_lp);
711 ptrace (PTRACE_DETACH, child_pid, 0, 0);
712
713 do_cleanups (old_chain);
714 }
715 else
716 {
717 struct inferior *parent_inf, *child_inf;
718 struct cleanup *old_chain;
719
720 /* Add process to GDB's tables. */
721 child_inf = add_inferior (child_pid);
722
723 parent_inf = current_inferior ();
724 child_inf->attach_flag = parent_inf->attach_flag;
725 copy_terminal_info (child_inf, parent_inf);
726
727 old_chain = save_inferior_ptid ();
728 save_current_program_space ();
729
730 inferior_ptid = ptid_build (child_pid, child_pid, 0);
731 add_thread (inferior_ptid);
732 child_lp = add_lwp (inferior_ptid);
733 child_lp->stopped = 1;
734 child_lp->last_resume_kind = resume_stop;
735 child_inf->symfile_flags = SYMFILE_NO_READ;
736
737 /* If this is a vfork child, then the address-space is
738 shared with the parent. */
739 if (has_vforked)
740 {
741 child_inf->pspace = parent_inf->pspace;
742 child_inf->aspace = parent_inf->aspace;
743
744 /* The parent will be frozen until the child is done
745 with the shared region. Keep track of the
746 parent. */
747 child_inf->vfork_parent = parent_inf;
748 child_inf->pending_detach = 0;
749 parent_inf->vfork_child = child_inf;
750 parent_inf->pending_detach = 0;
751 }
752 else
753 {
754 child_inf->aspace = new_address_space ();
755 child_inf->pspace = add_program_space (child_inf->aspace);
756 child_inf->removable = 1;
757 set_current_program_space (child_inf->pspace);
758 clone_program_space (child_inf->pspace, parent_inf->pspace);
759
760 /* Let the shared library layer (solib-svr4) learn about
761 this new process, relocate the cloned exec, pull in
762 shared libraries, and install the solib event
763 breakpoint. If a "cloned-VM" event was propagated
764 better throughout the core, this wouldn't be
765 required. */
766 solib_create_inferior_hook (0);
767 }
768
769 /* Let the thread_db layer learn about this new process. */
770 check_for_thread_db ();
771
772 do_cleanups (old_chain);
773 }
774
775 if (has_vforked)
776 {
777 struct lwp_info *parent_lp;
778 struct inferior *parent_inf;
779
780 parent_inf = current_inferior ();
781
782 /* If we detached from the child, then we have to be careful
783 to not insert breakpoints in the parent until the child
784 is done with the shared memory region. However, if we're
785 staying attached to the child, then we can and should
786 insert breakpoints, so that we can debug it. A
787 subsequent child exec or exit is enough to know when does
788 the child stops using the parent's address space. */
789 parent_inf->waiting_for_vfork_done = detach_fork;
790 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
791
792 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
793 gdb_assert (linux_supports_tracefork_flag >= 0);
794
795 if (linux_supports_tracevforkdone (0))
796 {
797 if (debug_linux_nat)
798 fprintf_unfiltered (gdb_stdlog,
799 "LCFF: waiting for VFORK_DONE on %d\n",
800 parent_pid);
801 parent_lp->stopped = 1;
802
803 /* We'll handle the VFORK_DONE event like any other
804 event, in target_wait. */
805 }
806 else
807 {
808 /* We can't insert breakpoints until the child has
809 finished with the shared memory region. We need to
810 wait until that happens. Ideal would be to just
811 call:
812 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
813 - waitpid (parent_pid, &status, __WALL);
814 However, most architectures can't handle a syscall
815 being traced on the way out if it wasn't traced on
816 the way in.
817
818 We might also think to loop, continuing the child
819 until it exits or gets a SIGTRAP. One problem is
820 that the child might call ptrace with PTRACE_TRACEME.
821
822 There's no simple and reliable way to figure out when
823 the vforked child will be done with its copy of the
824 shared memory. We could step it out of the syscall,
825 two instructions, let it go, and then single-step the
826 parent once. When we have hardware single-step, this
827 would work; with software single-step it could still
828 be made to work but we'd have to be able to insert
829 single-step breakpoints in the child, and we'd have
830 to insert -just- the single-step breakpoint in the
831 parent. Very awkward.
832
833 In the end, the best we can do is to make sure it
834 runs for a little while. Hopefully it will be out of
835 range of any breakpoints we reinsert. Usually this
836 is only the single-step breakpoint at vfork's return
837 point. */
838
839 if (debug_linux_nat)
840 fprintf_unfiltered (gdb_stdlog,
841 "LCFF: no VFORK_DONE "
842 "support, sleeping a bit\n");
843
844 usleep (10000);
845
846 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
847 and leave it pending. The next linux_nat_resume call
848 will notice a pending event, and bypasses actually
849 resuming the inferior. */
850 parent_lp->status = 0;
851 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
852 parent_lp->stopped = 1;
853
854 /* If we're in async mode, need to tell the event loop
855 there's something here to process. */
856 if (target_can_async_p ())
857 async_file_mark ();
858 }
859 }
860 }
861 else
862 {
863 struct inferior *parent_inf, *child_inf;
864 struct lwp_info *child_lp;
865 struct program_space *parent_pspace;
866
867 if (info_verbose || debug_linux_nat)
868 {
869 target_terminal_ours ();
870 if (has_vforked)
871 fprintf_filtered (gdb_stdlog,
872 _("Attaching after process %d "
873 "vfork to child process %d.\n"),
874 parent_pid, child_pid);
875 else
876 fprintf_filtered (gdb_stdlog,
877 _("Attaching after process %d "
878 "fork to child process %d.\n"),
879 parent_pid, child_pid);
880 }
881
882 /* Add the new inferior first, so that the target_detach below
883 doesn't unpush the target. */
884
885 child_inf = add_inferior (child_pid);
886
887 parent_inf = current_inferior ();
888 child_inf->attach_flag = parent_inf->attach_flag;
889 copy_terminal_info (child_inf, parent_inf);
890
891 parent_pspace = parent_inf->pspace;
892
893 /* If we're vforking, we want to hold on to the parent until the
894 child exits or execs. At child exec or exit time we can
895 remove the old breakpoints from the parent and detach or
896 resume debugging it. Otherwise, detach the parent now; we'll
897 want to reuse it's program/address spaces, but we can't set
898 them to the child before removing breakpoints from the
899 parent, otherwise, the breakpoints module could decide to
900 remove breakpoints from the wrong process (since they'd be
901 assigned to the same address space). */
902
903 if (has_vforked)
904 {
905 gdb_assert (child_inf->vfork_parent == NULL);
906 gdb_assert (parent_inf->vfork_child == NULL);
907 child_inf->vfork_parent = parent_inf;
908 child_inf->pending_detach = 0;
909 parent_inf->vfork_child = child_inf;
910 parent_inf->pending_detach = detach_fork;
911 parent_inf->waiting_for_vfork_done = 0;
912 }
913 else if (detach_fork)
914 target_detach (NULL, 0);
915
916 /* Note that the detach above makes PARENT_INF dangling. */
917
918 /* Add the child thread to the appropriate lists, and switch to
919 this new thread, before cloning the program space, and
920 informing the solib layer about this new process. */
921
922 inferior_ptid = ptid_build (child_pid, child_pid, 0);
923 add_thread (inferior_ptid);
924 child_lp = add_lwp (inferior_ptid);
925 child_lp->stopped = 1;
926 child_lp->last_resume_kind = resume_stop;
927
928 /* If this is a vfork child, then the address-space is shared
929 with the parent. If we detached from the parent, then we can
930 reuse the parent's program/address spaces. */
931 if (has_vforked || detach_fork)
932 {
933 child_inf->pspace = parent_pspace;
934 child_inf->aspace = child_inf->pspace->aspace;
935 }
936 else
937 {
938 child_inf->aspace = new_address_space ();
939 child_inf->pspace = add_program_space (child_inf->aspace);
940 child_inf->removable = 1;
941 child_inf->symfile_flags = SYMFILE_NO_READ;
942 set_current_program_space (child_inf->pspace);
943 clone_program_space (child_inf->pspace, parent_pspace);
944
945 /* Let the shared library layer (solib-svr4) learn about
946 this new process, relocate the cloned exec, pull in
947 shared libraries, and install the solib event breakpoint.
948 If a "cloned-VM" event was propagated better throughout
949 the core, this wouldn't be required. */
950 solib_create_inferior_hook (0);
951 }
952
953 /* Let the thread_db layer learn about this new process. */
954 check_for_thread_db ();
955 }
956
957 restore_child_signals_mask (&prev_mask);
958 return 0;
959 }
960
961 \f
962 static int
963 linux_child_insert_fork_catchpoint (int pid)
964 {
965 return !linux_supports_tracefork (pid);
966 }
967
968 static int
969 linux_child_remove_fork_catchpoint (int pid)
970 {
971 return 0;
972 }
973
974 static int
975 linux_child_insert_vfork_catchpoint (int pid)
976 {
977 return !linux_supports_tracefork (pid);
978 }
979
980 static int
981 linux_child_remove_vfork_catchpoint (int pid)
982 {
983 return 0;
984 }
985
986 static int
987 linux_child_insert_exec_catchpoint (int pid)
988 {
989 return !linux_supports_tracefork (pid);
990 }
991
992 static int
993 linux_child_remove_exec_catchpoint (int pid)
994 {
995 return 0;
996 }
997
998 static int
999 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
1000 int table_size, int *table)
1001 {
1002 if (!linux_supports_tracesysgood (pid))
1003 return 1;
1004
1005 /* On GNU/Linux, we ignore the arguments. It means that we only
1006 enable the syscall catchpoints, but do not disable them.
1007
1008 Also, we do not use the `table' information because we do not
1009 filter system calls here. We let GDB do the logic for us. */
1010 return 0;
1011 }
1012
1013 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1014 are processes sharing the same VM space. A multi-threaded process
1015 is basically a group of such processes. However, such a grouping
1016 is almost entirely a user-space issue; the kernel doesn't enforce
1017 such a grouping at all (this might change in the future). In
1018 general, we'll rely on the threads library (i.e. the GNU/Linux
1019 Threads library) to provide such a grouping.
1020
1021 It is perfectly well possible to write a multi-threaded application
1022 without the assistance of a threads library, by using the clone
1023 system call directly. This module should be able to give some
1024 rudimentary support for debugging such applications if developers
1025 specify the CLONE_PTRACE flag in the clone system call, and are
1026 using the Linux kernel 2.4 or above.
1027
1028 Note that there are some peculiarities in GNU/Linux that affect
1029 this code:
1030
1031 - In general one should specify the __WCLONE flag to waitpid in
1032 order to make it report events for any of the cloned processes
1033 (and leave it out for the initial process). However, if a cloned
1034 process has exited the exit status is only reported if the
1035 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1036 we cannot use it since GDB must work on older systems too.
1037
1038 - When a traced, cloned process exits and is waited for by the
1039 debugger, the kernel reassigns it to the original parent and
1040 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1041 library doesn't notice this, which leads to the "zombie problem":
1042 When debugged a multi-threaded process that spawns a lot of
1043 threads will run out of processes, even if the threads exit,
1044 because the "zombies" stay around. */
1045
1046 /* List of known LWPs. */
1047 struct lwp_info *lwp_list;
1048 \f
1049
1050 /* Original signal mask. */
1051 static sigset_t normal_mask;
1052
1053 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1054 _initialize_linux_nat. */
1055 static sigset_t suspend_mask;
1056
1057 /* Signals to block to make that sigsuspend work. */
1058 static sigset_t blocked_mask;
1059
1060 /* SIGCHLD action. */
1061 struct sigaction sigchld_action;
1062
1063 /* Block child signals (SIGCHLD and linux threads signals), and store
1064 the previous mask in PREV_MASK. */
1065
1066 static void
1067 block_child_signals (sigset_t *prev_mask)
1068 {
1069 /* Make sure SIGCHLD is blocked. */
1070 if (!sigismember (&blocked_mask, SIGCHLD))
1071 sigaddset (&blocked_mask, SIGCHLD);
1072
1073 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1074 }
1075
1076 /* Restore child signals mask, previously returned by
1077 block_child_signals. */
1078
1079 static void
1080 restore_child_signals_mask (sigset_t *prev_mask)
1081 {
1082 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1083 }
1084
1085 /* Mask of signals to pass directly to the inferior. */
1086 static sigset_t pass_mask;
1087
1088 /* Update signals to pass to the inferior. */
1089 static void
1090 linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1091 {
1092 int signo;
1093
1094 sigemptyset (&pass_mask);
1095
1096 for (signo = 1; signo < NSIG; signo++)
1097 {
1098 int target_signo = gdb_signal_from_host (signo);
1099 if (target_signo < numsigs && pass_signals[target_signo])
1100 sigaddset (&pass_mask, signo);
1101 }
1102 }
1103
1104 \f
1105
1106 /* Prototypes for local functions. */
1107 static int stop_wait_callback (struct lwp_info *lp, void *data);
1108 static int linux_thread_alive (ptid_t ptid);
1109 static char *linux_child_pid_to_exec_file (int pid);
1110
1111 \f
1112 /* Convert wait status STATUS to a string. Used for printing debug
1113 messages only. */
1114
1115 static char *
1116 status_to_str (int status)
1117 {
1118 static char buf[64];
1119
1120 if (WIFSTOPPED (status))
1121 {
1122 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1123 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1124 strsignal (SIGTRAP));
1125 else
1126 snprintf (buf, sizeof (buf), "%s (stopped)",
1127 strsignal (WSTOPSIG (status)));
1128 }
1129 else if (WIFSIGNALED (status))
1130 snprintf (buf, sizeof (buf), "%s (terminated)",
1131 strsignal (WTERMSIG (status)));
1132 else
1133 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1134
1135 return buf;
1136 }
1137
1138 /* Destroy and free LP. */
1139
1140 static void
1141 lwp_free (struct lwp_info *lp)
1142 {
1143 xfree (lp->arch_private);
1144 xfree (lp);
1145 }
1146
1147 /* Remove all LWPs belong to PID from the lwp list. */
1148
1149 static void
1150 purge_lwp_list (int pid)
1151 {
1152 struct lwp_info *lp, *lpprev, *lpnext;
1153
1154 lpprev = NULL;
1155
1156 for (lp = lwp_list; lp; lp = lpnext)
1157 {
1158 lpnext = lp->next;
1159
1160 if (ptid_get_pid (lp->ptid) == pid)
1161 {
1162 if (lp == lwp_list)
1163 lwp_list = lp->next;
1164 else
1165 lpprev->next = lp->next;
1166
1167 lwp_free (lp);
1168 }
1169 else
1170 lpprev = lp;
1171 }
1172 }
1173
1174 /* Add the LWP specified by PID to the list. Return a pointer to the
1175 structure describing the new LWP. The LWP should already be stopped
1176 (with an exception for the very first LWP). */
1177
1178 static struct lwp_info *
1179 add_lwp (ptid_t ptid)
1180 {
1181 struct lwp_info *lp;
1182
1183 gdb_assert (is_lwp (ptid));
1184
1185 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1186
1187 memset (lp, 0, sizeof (struct lwp_info));
1188
1189 lp->last_resume_kind = resume_continue;
1190 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1191
1192 lp->ptid = ptid;
1193 lp->core = -1;
1194
1195 lp->next = lwp_list;
1196 lwp_list = lp;
1197
1198 /* Let the arch specific bits know about this new thread. Current
1199 clients of this callback take the opportunity to install
1200 watchpoints in the new thread. Don't do this for the first
1201 thread though. If we're spawning a child ("run"), the thread
1202 executes the shell wrapper first, and we shouldn't touch it until
1203 it execs the program we want to debug. For "attach", it'd be
1204 okay to call the callback, but it's not necessary, because
1205 watchpoints can't yet have been inserted into the inferior. */
1206 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
1207 linux_nat_new_thread (lp);
1208
1209 return lp;
1210 }
1211
1212 /* Remove the LWP specified by PID from the list. */
1213
1214 static void
1215 delete_lwp (ptid_t ptid)
1216 {
1217 struct lwp_info *lp, *lpprev;
1218
1219 lpprev = NULL;
1220
1221 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1222 if (ptid_equal (lp->ptid, ptid))
1223 break;
1224
1225 if (!lp)
1226 return;
1227
1228 if (lpprev)
1229 lpprev->next = lp->next;
1230 else
1231 lwp_list = lp->next;
1232
1233 lwp_free (lp);
1234 }
1235
1236 /* Return a pointer to the structure describing the LWP corresponding
1237 to PID. If no corresponding LWP could be found, return NULL. */
1238
1239 static struct lwp_info *
1240 find_lwp_pid (ptid_t ptid)
1241 {
1242 struct lwp_info *lp;
1243 int lwp;
1244
1245 if (is_lwp (ptid))
1246 lwp = GET_LWP (ptid);
1247 else
1248 lwp = GET_PID (ptid);
1249
1250 for (lp = lwp_list; lp; lp = lp->next)
1251 if (lwp == GET_LWP (lp->ptid))
1252 return lp;
1253
1254 return NULL;
1255 }
1256
1257 /* Call CALLBACK with its second argument set to DATA for every LWP in
1258 the list. If CALLBACK returns 1 for a particular LWP, return a
1259 pointer to the structure describing that LWP immediately.
1260 Otherwise return NULL. */
1261
1262 struct lwp_info *
1263 iterate_over_lwps (ptid_t filter,
1264 int (*callback) (struct lwp_info *, void *),
1265 void *data)
1266 {
1267 struct lwp_info *lp, *lpnext;
1268
1269 for (lp = lwp_list; lp; lp = lpnext)
1270 {
1271 lpnext = lp->next;
1272
1273 if (ptid_match (lp->ptid, filter))
1274 {
1275 if ((*callback) (lp, data))
1276 return lp;
1277 }
1278 }
1279
1280 return NULL;
1281 }
1282
1283 /* Iterate like iterate_over_lwps does except when forking-off a child call
1284 CALLBACK with CALLBACK_DATA specifically only for that new child PID. */
1285
1286 void
1287 linux_nat_iterate_watchpoint_lwps
1288 (linux_nat_iterate_watchpoint_lwps_ftype callback, void *callback_data)
1289 {
1290 int inferior_pid = ptid_get_pid (inferior_ptid);
1291 struct inferior *inf = current_inferior ();
1292
1293 if (inf->pid == inferior_pid)
1294 {
1295 /* Iterate all the threads of the current inferior. Without specifying
1296 INFERIOR_PID it would iterate all threads of all inferiors, which is
1297 inappropriate for watchpoints. */
1298
1299 iterate_over_lwps (pid_to_ptid (inferior_pid), callback, callback_data);
1300 }
1301 else
1302 {
1303 /* Detaching a new child PID temporarily present in INFERIOR_PID. */
1304
1305 struct lwp_info *child_lp;
1306 struct cleanup *old_chain;
1307 pid_t child_pid = GET_PID (inferior_ptid);
1308 ptid_t child_ptid = ptid_build (child_pid, child_pid, 0);
1309
1310 gdb_assert (find_lwp_pid (child_ptid) == NULL);
1311 child_lp = add_lwp (child_ptid);
1312 child_lp->stopped = 1;
1313 child_lp->last_resume_kind = resume_stop;
1314 old_chain = make_cleanup (delete_lwp_cleanup, child_lp);
1315
1316 callback (child_lp, callback_data);
1317
1318 do_cleanups (old_chain);
1319 }
1320 }
1321
1322 /* Update our internal state when changing from one checkpoint to
1323 another indicated by NEW_PTID. We can only switch single-threaded
1324 applications, so we only create one new LWP, and the previous list
1325 is discarded. */
1326
1327 void
1328 linux_nat_switch_fork (ptid_t new_ptid)
1329 {
1330 struct lwp_info *lp;
1331
1332 purge_lwp_list (GET_PID (inferior_ptid));
1333
1334 lp = add_lwp (new_ptid);
1335 lp->stopped = 1;
1336
1337 /* This changes the thread's ptid while preserving the gdb thread
1338 num. Also changes the inferior pid, while preserving the
1339 inferior num. */
1340 thread_change_ptid (inferior_ptid, new_ptid);
1341
1342 /* We've just told GDB core that the thread changed target id, but,
1343 in fact, it really is a different thread, with different register
1344 contents. */
1345 registers_changed ();
1346 }
1347
1348 /* Handle the exit of a single thread LP. */
1349
1350 static void
1351 exit_lwp (struct lwp_info *lp)
1352 {
1353 struct thread_info *th = find_thread_ptid (lp->ptid);
1354
1355 if (th)
1356 {
1357 if (print_thread_events)
1358 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1359
1360 delete_thread (lp->ptid);
1361 }
1362
1363 delete_lwp (lp->ptid);
1364 }
1365
1366 /* Wait for the LWP specified by LP, which we have just attached to.
1367 Returns a wait status for that LWP, to cache. */
1368
1369 static int
1370 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1371 int *signalled)
1372 {
1373 pid_t new_pid, pid = GET_LWP (ptid);
1374 int status;
1375
1376 if (linux_proc_pid_is_stopped (pid))
1377 {
1378 if (debug_linux_nat)
1379 fprintf_unfiltered (gdb_stdlog,
1380 "LNPAW: Attaching to a stopped process\n");
1381
1382 /* The process is definitely stopped. It is in a job control
1383 stop, unless the kernel predates the TASK_STOPPED /
1384 TASK_TRACED distinction, in which case it might be in a
1385 ptrace stop. Make sure it is in a ptrace stop; from there we
1386 can kill it, signal it, et cetera.
1387
1388 First make sure there is a pending SIGSTOP. Since we are
1389 already attached, the process can not transition from stopped
1390 to running without a PTRACE_CONT; so we know this signal will
1391 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1392 probably already in the queue (unless this kernel is old
1393 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1394 is not an RT signal, it can only be queued once. */
1395 kill_lwp (pid, SIGSTOP);
1396
1397 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1398 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1399 ptrace (PTRACE_CONT, pid, 0, 0);
1400 }
1401
1402 /* Make sure the initial process is stopped. The user-level threads
1403 layer might want to poke around in the inferior, and that won't
1404 work if things haven't stabilized yet. */
1405 new_pid = my_waitpid (pid, &status, 0);
1406 if (new_pid == -1 && errno == ECHILD)
1407 {
1408 if (first)
1409 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1410
1411 /* Try again with __WCLONE to check cloned processes. */
1412 new_pid = my_waitpid (pid, &status, __WCLONE);
1413 *cloned = 1;
1414 }
1415
1416 gdb_assert (pid == new_pid);
1417
1418 if (!WIFSTOPPED (status))
1419 {
1420 /* The pid we tried to attach has apparently just exited. */
1421 if (debug_linux_nat)
1422 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1423 pid, status_to_str (status));
1424 return status;
1425 }
1426
1427 if (WSTOPSIG (status) != SIGSTOP)
1428 {
1429 *signalled = 1;
1430 if (debug_linux_nat)
1431 fprintf_unfiltered (gdb_stdlog,
1432 "LNPAW: Received %s after attaching\n",
1433 status_to_str (status));
1434 }
1435
1436 return status;
1437 }
1438
1439 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1440 the new LWP could not be attached, or 1 if we're already auto
1441 attached to this thread, but haven't processed the
1442 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1443 its existance, without considering it an error. */
1444
1445 int
1446 lin_lwp_attach_lwp (ptid_t ptid)
1447 {
1448 struct lwp_info *lp;
1449 sigset_t prev_mask;
1450 int lwpid;
1451
1452 gdb_assert (is_lwp (ptid));
1453
1454 block_child_signals (&prev_mask);
1455
1456 lp = find_lwp_pid (ptid);
1457 lwpid = GET_LWP (ptid);
1458
1459 /* We assume that we're already attached to any LWP that has an id
1460 equal to the overall process id, and to any LWP that is already
1461 in our list of LWPs. If we're not seeing exit events from threads
1462 and we've had PID wraparound since we last tried to stop all threads,
1463 this assumption might be wrong; fortunately, this is very unlikely
1464 to happen. */
1465 if (lwpid != GET_PID (ptid) && lp == NULL)
1466 {
1467 int status, cloned = 0, signalled = 0;
1468
1469 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1470 {
1471 if (linux_supports_tracefork_flag)
1472 {
1473 /* If we haven't stopped all threads when we get here,
1474 we may have seen a thread listed in thread_db's list,
1475 but not processed the PTRACE_EVENT_CLONE yet. If
1476 that's the case, ignore this new thread, and let
1477 normal event handling discover it later. */
1478 if (in_pid_list_p (stopped_pids, lwpid))
1479 {
1480 /* We've already seen this thread stop, but we
1481 haven't seen the PTRACE_EVENT_CLONE extended
1482 event yet. */
1483 restore_child_signals_mask (&prev_mask);
1484 return 0;
1485 }
1486 else
1487 {
1488 int new_pid;
1489 int status;
1490
1491 /* See if we've got a stop for this new child
1492 pending. If so, we're already attached. */
1493 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1494 if (new_pid == -1 && errno == ECHILD)
1495 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1496 if (new_pid != -1)
1497 {
1498 if (WIFSTOPPED (status))
1499 add_to_pid_list (&stopped_pids, lwpid, status);
1500
1501 restore_child_signals_mask (&prev_mask);
1502 return 1;
1503 }
1504 }
1505 }
1506
1507 /* If we fail to attach to the thread, issue a warning,
1508 but continue. One way this can happen is if thread
1509 creation is interrupted; as of Linux kernel 2.6.19, a
1510 bug may place threads in the thread list and then fail
1511 to create them. */
1512 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1513 safe_strerror (errno));
1514 restore_child_signals_mask (&prev_mask);
1515 return -1;
1516 }
1517
1518 if (debug_linux_nat)
1519 fprintf_unfiltered (gdb_stdlog,
1520 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1521 target_pid_to_str (ptid));
1522
1523 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1524 if (!WIFSTOPPED (status))
1525 {
1526 restore_child_signals_mask (&prev_mask);
1527 return 1;
1528 }
1529
1530 lp = add_lwp (ptid);
1531 lp->stopped = 1;
1532 lp->cloned = cloned;
1533 lp->signalled = signalled;
1534 if (WSTOPSIG (status) != SIGSTOP)
1535 {
1536 lp->resumed = 1;
1537 lp->status = status;
1538 }
1539
1540 target_post_attach (GET_LWP (lp->ptid));
1541
1542 if (debug_linux_nat)
1543 {
1544 fprintf_unfiltered (gdb_stdlog,
1545 "LLAL: waitpid %s received %s\n",
1546 target_pid_to_str (ptid),
1547 status_to_str (status));
1548 }
1549 }
1550 else
1551 {
1552 /* We assume that the LWP representing the original process is
1553 already stopped. Mark it as stopped in the data structure
1554 that the GNU/linux ptrace layer uses to keep track of
1555 threads. Note that this won't have already been done since
1556 the main thread will have, we assume, been stopped by an
1557 attach from a different layer. */
1558 if (lp == NULL)
1559 lp = add_lwp (ptid);
1560 lp->stopped = 1;
1561 }
1562
1563 lp->last_resume_kind = resume_stop;
1564 restore_child_signals_mask (&prev_mask);
1565 return 0;
1566 }
1567
1568 static void
1569 linux_nat_create_inferior (struct target_ops *ops,
1570 char *exec_file, char *allargs, char **env,
1571 int from_tty)
1572 {
1573 #ifdef HAVE_PERSONALITY
1574 int personality_orig = 0, personality_set = 0;
1575 #endif /* HAVE_PERSONALITY */
1576
1577 /* The fork_child mechanism is synchronous and calls target_wait, so
1578 we have to mask the async mode. */
1579
1580 #ifdef HAVE_PERSONALITY
1581 if (disable_randomization)
1582 {
1583 errno = 0;
1584 personality_orig = personality (0xffffffff);
1585 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1586 {
1587 personality_set = 1;
1588 personality (personality_orig | ADDR_NO_RANDOMIZE);
1589 }
1590 if (errno != 0 || (personality_set
1591 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1592 warning (_("Error disabling address space randomization: %s"),
1593 safe_strerror (errno));
1594 }
1595 #endif /* HAVE_PERSONALITY */
1596
1597 /* Make sure we report all signals during startup. */
1598 linux_nat_pass_signals (0, NULL);
1599
1600 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1601
1602 #ifdef HAVE_PERSONALITY
1603 if (personality_set)
1604 {
1605 errno = 0;
1606 personality (personality_orig);
1607 if (errno != 0)
1608 warning (_("Error restoring address space randomization: %s"),
1609 safe_strerror (errno));
1610 }
1611 #endif /* HAVE_PERSONALITY */
1612 }
1613
1614 static void
1615 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1616 {
1617 struct lwp_info *lp;
1618 int status;
1619 ptid_t ptid;
1620 volatile struct gdb_exception ex;
1621
1622 /* Make sure we report all signals during attach. */
1623 linux_nat_pass_signals (0, NULL);
1624
1625 TRY_CATCH (ex, RETURN_MASK_ERROR)
1626 {
1627 linux_ops->to_attach (ops, args, from_tty);
1628 }
1629 if (ex.reason < 0)
1630 {
1631 pid_t pid = parse_pid_to_attach (args);
1632 struct buffer buffer;
1633 char *message, *buffer_s;
1634
1635 message = xstrdup (ex.message);
1636 make_cleanup (xfree, message);
1637
1638 buffer_init (&buffer);
1639 linux_ptrace_attach_warnings (pid, &buffer);
1640
1641 buffer_grow_str0 (&buffer, "");
1642 buffer_s = buffer_finish (&buffer);
1643 make_cleanup (xfree, buffer_s);
1644
1645 throw_error (ex.error, "%s%s", buffer_s, message);
1646 }
1647
1648 /* The ptrace base target adds the main thread with (pid,0,0)
1649 format. Decorate it with lwp info. */
1650 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1651 thread_change_ptid (inferior_ptid, ptid);
1652
1653 /* Add the initial process as the first LWP to the list. */
1654 lp = add_lwp (ptid);
1655
1656 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1657 &lp->signalled);
1658 if (!WIFSTOPPED (status))
1659 {
1660 if (WIFEXITED (status))
1661 {
1662 int exit_code = WEXITSTATUS (status);
1663
1664 target_terminal_ours ();
1665 target_mourn_inferior ();
1666 if (exit_code == 0)
1667 error (_("Unable to attach: program exited normally."));
1668 else
1669 error (_("Unable to attach: program exited with code %d."),
1670 exit_code);
1671 }
1672 else if (WIFSIGNALED (status))
1673 {
1674 enum gdb_signal signo;
1675
1676 target_terminal_ours ();
1677 target_mourn_inferior ();
1678
1679 signo = gdb_signal_from_host (WTERMSIG (status));
1680 error (_("Unable to attach: program terminated with signal "
1681 "%s, %s."),
1682 gdb_signal_to_name (signo),
1683 gdb_signal_to_string (signo));
1684 }
1685
1686 internal_error (__FILE__, __LINE__,
1687 _("unexpected status %d for PID %ld"),
1688 status, (long) GET_LWP (ptid));
1689 }
1690
1691 lp->stopped = 1;
1692
1693 /* Save the wait status to report later. */
1694 lp->resumed = 1;
1695 if (debug_linux_nat)
1696 fprintf_unfiltered (gdb_stdlog,
1697 "LNA: waitpid %ld, saving status %s\n",
1698 (long) GET_PID (lp->ptid), status_to_str (status));
1699
1700 lp->status = status;
1701
1702 if (target_can_async_p ())
1703 target_async (inferior_event_handler, 0);
1704 }
1705
1706 /* Get pending status of LP. */
1707 static int
1708 get_pending_status (struct lwp_info *lp, int *status)
1709 {
1710 enum gdb_signal signo = GDB_SIGNAL_0;
1711
1712 /* If we paused threads momentarily, we may have stored pending
1713 events in lp->status or lp->waitstatus (see stop_wait_callback),
1714 and GDB core hasn't seen any signal for those threads.
1715 Otherwise, the last signal reported to the core is found in the
1716 thread object's stop_signal.
1717
1718 There's a corner case that isn't handled here at present. Only
1719 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1720 stop_signal make sense as a real signal to pass to the inferior.
1721 Some catchpoint related events, like
1722 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1723 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1724 those traps are debug API (ptrace in our case) related and
1725 induced; the inferior wouldn't see them if it wasn't being
1726 traced. Hence, we should never pass them to the inferior, even
1727 when set to pass state. Since this corner case isn't handled by
1728 infrun.c when proceeding with a signal, for consistency, neither
1729 do we handle it here (or elsewhere in the file we check for
1730 signal pass state). Normally SIGTRAP isn't set to pass state, so
1731 this is really a corner case. */
1732
1733 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1734 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1735 else if (lp->status)
1736 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1737 else if (non_stop && !is_executing (lp->ptid))
1738 {
1739 struct thread_info *tp = find_thread_ptid (lp->ptid);
1740
1741 signo = tp->suspend.stop_signal;
1742 }
1743 else if (!non_stop)
1744 {
1745 struct target_waitstatus last;
1746 ptid_t last_ptid;
1747
1748 get_last_target_status (&last_ptid, &last);
1749
1750 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1751 {
1752 struct thread_info *tp = find_thread_ptid (lp->ptid);
1753
1754 signo = tp->suspend.stop_signal;
1755 }
1756 }
1757
1758 *status = 0;
1759
1760 if (signo == GDB_SIGNAL_0)
1761 {
1762 if (debug_linux_nat)
1763 fprintf_unfiltered (gdb_stdlog,
1764 "GPT: lwp %s has no pending signal\n",
1765 target_pid_to_str (lp->ptid));
1766 }
1767 else if (!signal_pass_state (signo))
1768 {
1769 if (debug_linux_nat)
1770 fprintf_unfiltered (gdb_stdlog,
1771 "GPT: lwp %s had signal %s, "
1772 "but it is in no pass state\n",
1773 target_pid_to_str (lp->ptid),
1774 gdb_signal_to_string (signo));
1775 }
1776 else
1777 {
1778 *status = W_STOPCODE (gdb_signal_to_host (signo));
1779
1780 if (debug_linux_nat)
1781 fprintf_unfiltered (gdb_stdlog,
1782 "GPT: lwp %s has pending signal %s\n",
1783 target_pid_to_str (lp->ptid),
1784 gdb_signal_to_string (signo));
1785 }
1786
1787 return 0;
1788 }
1789
1790 static int
1791 detach_callback (struct lwp_info *lp, void *data)
1792 {
1793 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1794
1795 if (debug_linux_nat && lp->status)
1796 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1797 strsignal (WSTOPSIG (lp->status)),
1798 target_pid_to_str (lp->ptid));
1799
1800 /* If there is a pending SIGSTOP, get rid of it. */
1801 if (lp->signalled)
1802 {
1803 if (debug_linux_nat)
1804 fprintf_unfiltered (gdb_stdlog,
1805 "DC: Sending SIGCONT to %s\n",
1806 target_pid_to_str (lp->ptid));
1807
1808 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1809 lp->signalled = 0;
1810 }
1811
1812 /* We don't actually detach from the LWP that has an id equal to the
1813 overall process id just yet. */
1814 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1815 {
1816 int status = 0;
1817
1818 /* Pass on any pending signal for this LWP. */
1819 get_pending_status (lp, &status);
1820
1821 if (linux_nat_prepare_to_resume != NULL)
1822 linux_nat_prepare_to_resume (lp);
1823 errno = 0;
1824 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1825 WSTOPSIG (status)) < 0)
1826 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1827 safe_strerror (errno));
1828
1829 if (debug_linux_nat)
1830 fprintf_unfiltered (gdb_stdlog,
1831 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1832 target_pid_to_str (lp->ptid),
1833 strsignal (WSTOPSIG (status)));
1834
1835 delete_lwp (lp->ptid);
1836 }
1837
1838 return 0;
1839 }
1840
1841 static void
1842 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1843 {
1844 int pid;
1845 int status;
1846 struct lwp_info *main_lwp;
1847
1848 pid = GET_PID (inferior_ptid);
1849
1850 /* Don't unregister from the event loop, as there may be other
1851 inferiors running. */
1852
1853 /* Stop all threads before detaching. ptrace requires that the
1854 thread is stopped to sucessfully detach. */
1855 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1856 /* ... and wait until all of them have reported back that
1857 they're no longer running. */
1858 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1859
1860 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1861
1862 /* Only the initial process should be left right now. */
1863 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1864
1865 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1866
1867 /* Pass on any pending signal for the last LWP. */
1868 if ((args == NULL || *args == '\0')
1869 && get_pending_status (main_lwp, &status) != -1
1870 && WIFSTOPPED (status))
1871 {
1872 /* Put the signal number in ARGS so that inf_ptrace_detach will
1873 pass it along with PTRACE_DETACH. */
1874 args = alloca (8);
1875 sprintf (args, "%d", (int) WSTOPSIG (status));
1876 if (debug_linux_nat)
1877 fprintf_unfiltered (gdb_stdlog,
1878 "LND: Sending signal %s to %s\n",
1879 args,
1880 target_pid_to_str (main_lwp->ptid));
1881 }
1882
1883 if (linux_nat_prepare_to_resume != NULL)
1884 linux_nat_prepare_to_resume (main_lwp);
1885 delete_lwp (main_lwp->ptid);
1886
1887 if (forks_exist_p ())
1888 {
1889 /* Multi-fork case. The current inferior_ptid is being detached
1890 from, but there are other viable forks to debug. Detach from
1891 the current fork, and context-switch to the first
1892 available. */
1893 linux_fork_detach (args, from_tty);
1894 }
1895 else
1896 linux_ops->to_detach (ops, args, from_tty);
1897 }
1898
1899 /* Resume LP. */
1900
1901 static void
1902 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1903 {
1904 if (lp->stopped)
1905 {
1906 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1907
1908 if (inf->vfork_child != NULL)
1909 {
1910 if (debug_linux_nat)
1911 fprintf_unfiltered (gdb_stdlog,
1912 "RC: Not resuming %s (vfork parent)\n",
1913 target_pid_to_str (lp->ptid));
1914 }
1915 else if (lp->status == 0
1916 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1917 {
1918 if (debug_linux_nat)
1919 fprintf_unfiltered (gdb_stdlog,
1920 "RC: Resuming sibling %s, %s, %s\n",
1921 target_pid_to_str (lp->ptid),
1922 (signo != GDB_SIGNAL_0
1923 ? strsignal (gdb_signal_to_host (signo))
1924 : "0"),
1925 step ? "step" : "resume");
1926
1927 if (linux_nat_prepare_to_resume != NULL)
1928 linux_nat_prepare_to_resume (lp);
1929 linux_ops->to_resume (linux_ops,
1930 pid_to_ptid (GET_LWP (lp->ptid)),
1931 step, signo);
1932 lp->stopped = 0;
1933 lp->step = step;
1934 lp->stopped_by_watchpoint = 0;
1935 }
1936 else
1937 {
1938 if (debug_linux_nat)
1939 fprintf_unfiltered (gdb_stdlog,
1940 "RC: Not resuming sibling %s (has pending)\n",
1941 target_pid_to_str (lp->ptid));
1942 }
1943 }
1944 else
1945 {
1946 if (debug_linux_nat)
1947 fprintf_unfiltered (gdb_stdlog,
1948 "RC: Not resuming sibling %s (not stopped)\n",
1949 target_pid_to_str (lp->ptid));
1950 }
1951 }
1952
1953 /* Resume LWP, with the last stop signal, if it is in pass state. */
1954
1955 static int
1956 linux_nat_resume_callback (struct lwp_info *lp, void *data)
1957 {
1958 enum gdb_signal signo = GDB_SIGNAL_0;
1959
1960 if (lp->stopped)
1961 {
1962 struct thread_info *thread;
1963
1964 thread = find_thread_ptid (lp->ptid);
1965 if (thread != NULL)
1966 {
1967 if (signal_pass_state (thread->suspend.stop_signal))
1968 signo = thread->suspend.stop_signal;
1969 thread->suspend.stop_signal = GDB_SIGNAL_0;
1970 }
1971 }
1972
1973 resume_lwp (lp, 0, signo);
1974 return 0;
1975 }
1976
1977 static int
1978 resume_clear_callback (struct lwp_info *lp, void *data)
1979 {
1980 lp->resumed = 0;
1981 lp->last_resume_kind = resume_stop;
1982 return 0;
1983 }
1984
1985 static int
1986 resume_set_callback (struct lwp_info *lp, void *data)
1987 {
1988 lp->resumed = 1;
1989 lp->last_resume_kind = resume_continue;
1990 return 0;
1991 }
1992
1993 static void
1994 linux_nat_resume (struct target_ops *ops,
1995 ptid_t ptid, int step, enum gdb_signal signo)
1996 {
1997 sigset_t prev_mask;
1998 struct lwp_info *lp;
1999 int resume_many;
2000
2001 if (debug_linux_nat)
2002 fprintf_unfiltered (gdb_stdlog,
2003 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
2004 step ? "step" : "resume",
2005 target_pid_to_str (ptid),
2006 (signo != GDB_SIGNAL_0
2007 ? strsignal (gdb_signal_to_host (signo)) : "0"),
2008 target_pid_to_str (inferior_ptid));
2009
2010 block_child_signals (&prev_mask);
2011
2012 /* A specific PTID means `step only this process id'. */
2013 resume_many = (ptid_equal (minus_one_ptid, ptid)
2014 || ptid_is_pid (ptid));
2015
2016 /* Mark the lwps we're resuming as resumed. */
2017 iterate_over_lwps (ptid, resume_set_callback, NULL);
2018
2019 /* See if it's the current inferior that should be handled
2020 specially. */
2021 if (resume_many)
2022 lp = find_lwp_pid (inferior_ptid);
2023 else
2024 lp = find_lwp_pid (ptid);
2025 gdb_assert (lp != NULL);
2026
2027 /* Remember if we're stepping. */
2028 lp->step = step;
2029 lp->last_resume_kind = step ? resume_step : resume_continue;
2030
2031 /* If we have a pending wait status for this thread, there is no
2032 point in resuming the process. But first make sure that
2033 linux_nat_wait won't preemptively handle the event - we
2034 should never take this short-circuit if we are going to
2035 leave LP running, since we have skipped resuming all the
2036 other threads. This bit of code needs to be synchronized
2037 with linux_nat_wait. */
2038
2039 if (lp->status && WIFSTOPPED (lp->status))
2040 {
2041 if (!lp->step
2042 && WSTOPSIG (lp->status)
2043 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
2044 {
2045 if (debug_linux_nat)
2046 fprintf_unfiltered (gdb_stdlog,
2047 "LLR: Not short circuiting for ignored "
2048 "status 0x%x\n", lp->status);
2049
2050 /* FIXME: What should we do if we are supposed to continue
2051 this thread with a signal? */
2052 gdb_assert (signo == GDB_SIGNAL_0);
2053 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
2054 lp->status = 0;
2055 }
2056 }
2057
2058 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2059 {
2060 /* FIXME: What should we do if we are supposed to continue
2061 this thread with a signal? */
2062 gdb_assert (signo == GDB_SIGNAL_0);
2063
2064 if (debug_linux_nat)
2065 fprintf_unfiltered (gdb_stdlog,
2066 "LLR: Short circuiting for status 0x%x\n",
2067 lp->status);
2068
2069 restore_child_signals_mask (&prev_mask);
2070 if (target_can_async_p ())
2071 {
2072 target_async (inferior_event_handler, 0);
2073 /* Tell the event loop we have something to process. */
2074 async_file_mark ();
2075 }
2076 return;
2077 }
2078
2079 /* Mark LWP as not stopped to prevent it from being continued by
2080 linux_nat_resume_callback. */
2081 lp->stopped = 0;
2082
2083 if (resume_many)
2084 iterate_over_lwps (ptid, linux_nat_resume_callback, NULL);
2085
2086 /* Convert to something the lower layer understands. */
2087 ptid = pid_to_ptid (GET_LWP (lp->ptid));
2088
2089 if (linux_nat_prepare_to_resume != NULL)
2090 linux_nat_prepare_to_resume (lp);
2091 linux_ops->to_resume (linux_ops, ptid, step, signo);
2092 lp->stopped_by_watchpoint = 0;
2093
2094 if (debug_linux_nat)
2095 fprintf_unfiltered (gdb_stdlog,
2096 "LLR: %s %s, %s (resume event thread)\n",
2097 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2098 target_pid_to_str (ptid),
2099 (signo != GDB_SIGNAL_0
2100 ? strsignal (gdb_signal_to_host (signo)) : "0"));
2101
2102 restore_child_signals_mask (&prev_mask);
2103 if (target_can_async_p ())
2104 target_async (inferior_event_handler, 0);
2105 }
2106
2107 /* Send a signal to an LWP. */
2108
2109 static int
2110 kill_lwp (int lwpid, int signo)
2111 {
2112 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2113 fails, then we are not using nptl threads and we should be using kill. */
2114
2115 #ifdef HAVE_TKILL_SYSCALL
2116 {
2117 static int tkill_failed;
2118
2119 if (!tkill_failed)
2120 {
2121 int ret;
2122
2123 errno = 0;
2124 ret = syscall (__NR_tkill, lwpid, signo);
2125 if (errno != ENOSYS)
2126 return ret;
2127 tkill_failed = 1;
2128 }
2129 }
2130 #endif
2131
2132 return kill (lwpid, signo);
2133 }
2134
2135 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2136 event, check if the core is interested in it: if not, ignore the
2137 event, and keep waiting; otherwise, we need to toggle the LWP's
2138 syscall entry/exit status, since the ptrace event itself doesn't
2139 indicate it, and report the trap to higher layers. */
2140
2141 static int
2142 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2143 {
2144 struct target_waitstatus *ourstatus = &lp->waitstatus;
2145 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2146 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2147
2148 if (stopping)
2149 {
2150 /* If we're stopping threads, there's a SIGSTOP pending, which
2151 makes it so that the LWP reports an immediate syscall return,
2152 followed by the SIGSTOP. Skip seeing that "return" using
2153 PTRACE_CONT directly, and let stop_wait_callback collect the
2154 SIGSTOP. Later when the thread is resumed, a new syscall
2155 entry event. If we didn't do this (and returned 0), we'd
2156 leave a syscall entry pending, and our caller, by using
2157 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2158 itself. Later, when the user re-resumes this LWP, we'd see
2159 another syscall entry event and we'd mistake it for a return.
2160
2161 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2162 (leaving immediately with LWP->signalled set, without issuing
2163 a PTRACE_CONT), it would still be problematic to leave this
2164 syscall enter pending, as later when the thread is resumed,
2165 it would then see the same syscall exit mentioned above,
2166 followed by the delayed SIGSTOP, while the syscall didn't
2167 actually get to execute. It seems it would be even more
2168 confusing to the user. */
2169
2170 if (debug_linux_nat)
2171 fprintf_unfiltered (gdb_stdlog,
2172 "LHST: ignoring syscall %d "
2173 "for LWP %ld (stopping threads), "
2174 "resuming with PTRACE_CONT for SIGSTOP\n",
2175 syscall_number,
2176 GET_LWP (lp->ptid));
2177
2178 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2179 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2180 return 1;
2181 }
2182
2183 if (catch_syscall_enabled ())
2184 {
2185 /* Always update the entry/return state, even if this particular
2186 syscall isn't interesting to the core now. In async mode,
2187 the user could install a new catchpoint for this syscall
2188 between syscall enter/return, and we'll need to know to
2189 report a syscall return if that happens. */
2190 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2191 ? TARGET_WAITKIND_SYSCALL_RETURN
2192 : TARGET_WAITKIND_SYSCALL_ENTRY);
2193
2194 if (catching_syscall_number (syscall_number))
2195 {
2196 /* Alright, an event to report. */
2197 ourstatus->kind = lp->syscall_state;
2198 ourstatus->value.syscall_number = syscall_number;
2199
2200 if (debug_linux_nat)
2201 fprintf_unfiltered (gdb_stdlog,
2202 "LHST: stopping for %s of syscall %d"
2203 " for LWP %ld\n",
2204 lp->syscall_state
2205 == TARGET_WAITKIND_SYSCALL_ENTRY
2206 ? "entry" : "return",
2207 syscall_number,
2208 GET_LWP (lp->ptid));
2209 return 0;
2210 }
2211
2212 if (debug_linux_nat)
2213 fprintf_unfiltered (gdb_stdlog,
2214 "LHST: ignoring %s of syscall %d "
2215 "for LWP %ld\n",
2216 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2217 ? "entry" : "return",
2218 syscall_number,
2219 GET_LWP (lp->ptid));
2220 }
2221 else
2222 {
2223 /* If we had been syscall tracing, and hence used PT_SYSCALL
2224 before on this LWP, it could happen that the user removes all
2225 syscall catchpoints before we get to process this event.
2226 There are two noteworthy issues here:
2227
2228 - When stopped at a syscall entry event, resuming with
2229 PT_STEP still resumes executing the syscall and reports a
2230 syscall return.
2231
2232 - Only PT_SYSCALL catches syscall enters. If we last
2233 single-stepped this thread, then this event can't be a
2234 syscall enter. If we last single-stepped this thread, this
2235 has to be a syscall exit.
2236
2237 The points above mean that the next resume, be it PT_STEP or
2238 PT_CONTINUE, can not trigger a syscall trace event. */
2239 if (debug_linux_nat)
2240 fprintf_unfiltered (gdb_stdlog,
2241 "LHST: caught syscall event "
2242 "with no syscall catchpoints."
2243 " %d for LWP %ld, ignoring\n",
2244 syscall_number,
2245 GET_LWP (lp->ptid));
2246 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2247 }
2248
2249 /* The core isn't interested in this event. For efficiency, avoid
2250 stopping all threads only to have the core resume them all again.
2251 Since we're not stopping threads, if we're still syscall tracing
2252 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2253 subsequent syscall. Simply resume using the inf-ptrace layer,
2254 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2255
2256 /* Note that gdbarch_get_syscall_number may access registers, hence
2257 fill a regcache. */
2258 registers_changed ();
2259 if (linux_nat_prepare_to_resume != NULL)
2260 linux_nat_prepare_to_resume (lp);
2261 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2262 lp->step, GDB_SIGNAL_0);
2263 return 1;
2264 }
2265
2266 /* Handle a GNU/Linux extended wait response. If we see a clone
2267 event, we need to add the new LWP to our list (and not report the
2268 trap to higher layers). This function returns non-zero if the
2269 event should be ignored and we should wait again. If STOPPING is
2270 true, the new LWP remains stopped, otherwise it is continued. */
2271
2272 static int
2273 linux_handle_extended_wait (struct lwp_info *lp, int status,
2274 int stopping)
2275 {
2276 int pid = GET_LWP (lp->ptid);
2277 struct target_waitstatus *ourstatus = &lp->waitstatus;
2278 int event = status >> 16;
2279
2280 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2281 || event == PTRACE_EVENT_CLONE)
2282 {
2283 unsigned long new_pid;
2284 int ret;
2285
2286 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2287
2288 /* If we haven't already seen the new PID stop, wait for it now. */
2289 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2290 {
2291 /* The new child has a pending SIGSTOP. We can't affect it until it
2292 hits the SIGSTOP, but we're already attached. */
2293 ret = my_waitpid (new_pid, &status,
2294 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2295 if (ret == -1)
2296 perror_with_name (_("waiting for new child"));
2297 else if (ret != new_pid)
2298 internal_error (__FILE__, __LINE__,
2299 _("wait returned unexpected PID %d"), ret);
2300 else if (!WIFSTOPPED (status))
2301 internal_error (__FILE__, __LINE__,
2302 _("wait returned unexpected status 0x%x"), status);
2303 }
2304
2305 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2306
2307 if (event == PTRACE_EVENT_FORK
2308 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2309 {
2310 /* Handle checkpointing by linux-fork.c here as a special
2311 case. We don't want the follow-fork-mode or 'catch fork'
2312 to interfere with this. */
2313
2314 /* This won't actually modify the breakpoint list, but will
2315 physically remove the breakpoints from the child. */
2316 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2317
2318 /* Retain child fork in ptrace (stopped) state. */
2319 if (!find_fork_pid (new_pid))
2320 add_fork (new_pid);
2321
2322 /* Report as spurious, so that infrun doesn't want to follow
2323 this fork. We're actually doing an infcall in
2324 linux-fork.c. */
2325 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2326 linux_enable_event_reporting (pid_to_ptid (new_pid));
2327
2328 /* Report the stop to the core. */
2329 return 0;
2330 }
2331
2332 if (event == PTRACE_EVENT_FORK)
2333 ourstatus->kind = TARGET_WAITKIND_FORKED;
2334 else if (event == PTRACE_EVENT_VFORK)
2335 ourstatus->kind = TARGET_WAITKIND_VFORKED;
2336 else
2337 {
2338 struct lwp_info *new_lp;
2339
2340 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2341
2342 if (debug_linux_nat)
2343 fprintf_unfiltered (gdb_stdlog,
2344 "LHEW: Got clone event "
2345 "from LWP %d, new child is LWP %ld\n",
2346 pid, new_pid);
2347
2348 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2349 new_lp->cloned = 1;
2350 new_lp->stopped = 1;
2351
2352 if (WSTOPSIG (status) != SIGSTOP)
2353 {
2354 /* This can happen if someone starts sending signals to
2355 the new thread before it gets a chance to run, which
2356 have a lower number than SIGSTOP (e.g. SIGUSR1).
2357 This is an unlikely case, and harder to handle for
2358 fork / vfork than for clone, so we do not try - but
2359 we handle it for clone events here. We'll send
2360 the other signal on to the thread below. */
2361
2362 new_lp->signalled = 1;
2363 }
2364 else
2365 {
2366 struct thread_info *tp;
2367
2368 /* When we stop for an event in some other thread, and
2369 pull the thread list just as this thread has cloned,
2370 we'll have seen the new thread in the thread_db list
2371 before handling the CLONE event (glibc's
2372 pthread_create adds the new thread to the thread list
2373 before clone'ing, and has the kernel fill in the
2374 thread's tid on the clone call with
2375 CLONE_PARENT_SETTID). If that happened, and the core
2376 had requested the new thread to stop, we'll have
2377 killed it with SIGSTOP. But since SIGSTOP is not an
2378 RT signal, it can only be queued once. We need to be
2379 careful to not resume the LWP if we wanted it to
2380 stop. In that case, we'll leave the SIGSTOP pending.
2381 It will later be reported as GDB_SIGNAL_0. */
2382 tp = find_thread_ptid (new_lp->ptid);
2383 if (tp != NULL && tp->stop_requested)
2384 new_lp->last_resume_kind = resume_stop;
2385 else
2386 status = 0;
2387 }
2388
2389 if (non_stop)
2390 {
2391 /* Add the new thread to GDB's lists as soon as possible
2392 so that:
2393
2394 1) the frontend doesn't have to wait for a stop to
2395 display them, and,
2396
2397 2) we tag it with the correct running state. */
2398
2399 /* If the thread_db layer is active, let it know about
2400 this new thread, and add it to GDB's list. */
2401 if (!thread_db_attach_lwp (new_lp->ptid))
2402 {
2403 /* We're not using thread_db. Add it to GDB's
2404 list. */
2405 target_post_attach (GET_LWP (new_lp->ptid));
2406 add_thread (new_lp->ptid);
2407 }
2408
2409 if (!stopping)
2410 {
2411 set_running (new_lp->ptid, 1);
2412 set_executing (new_lp->ptid, 1);
2413 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2414 resume_stop. */
2415 new_lp->last_resume_kind = resume_continue;
2416 }
2417 }
2418
2419 if (status != 0)
2420 {
2421 /* We created NEW_LP so it cannot yet contain STATUS. */
2422 gdb_assert (new_lp->status == 0);
2423
2424 /* Save the wait status to report later. */
2425 if (debug_linux_nat)
2426 fprintf_unfiltered (gdb_stdlog,
2427 "LHEW: waitpid of new LWP %ld, "
2428 "saving status %s\n",
2429 (long) GET_LWP (new_lp->ptid),
2430 status_to_str (status));
2431 new_lp->status = status;
2432 }
2433
2434 /* Note the need to use the low target ops to resume, to
2435 handle resuming with PT_SYSCALL if we have syscall
2436 catchpoints. */
2437 if (!stopping)
2438 {
2439 new_lp->resumed = 1;
2440
2441 if (status == 0)
2442 {
2443 gdb_assert (new_lp->last_resume_kind == resume_continue);
2444 if (debug_linux_nat)
2445 fprintf_unfiltered (gdb_stdlog,
2446 "LHEW: resuming new LWP %ld\n",
2447 GET_LWP (new_lp->ptid));
2448 if (linux_nat_prepare_to_resume != NULL)
2449 linux_nat_prepare_to_resume (new_lp);
2450 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2451 0, GDB_SIGNAL_0);
2452 new_lp->stopped = 0;
2453 }
2454 }
2455
2456 if (debug_linux_nat)
2457 fprintf_unfiltered (gdb_stdlog,
2458 "LHEW: resuming parent LWP %d\n", pid);
2459 if (linux_nat_prepare_to_resume != NULL)
2460 linux_nat_prepare_to_resume (lp);
2461 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2462 0, GDB_SIGNAL_0);
2463
2464 return 1;
2465 }
2466
2467 return 0;
2468 }
2469
2470 if (event == PTRACE_EVENT_EXEC)
2471 {
2472 if (debug_linux_nat)
2473 fprintf_unfiltered (gdb_stdlog,
2474 "LHEW: Got exec event from LWP %ld\n",
2475 GET_LWP (lp->ptid));
2476
2477 ourstatus->kind = TARGET_WAITKIND_EXECD;
2478 ourstatus->value.execd_pathname
2479 = xstrdup (linux_child_pid_to_exec_file (pid));
2480
2481 return 0;
2482 }
2483
2484 if (event == PTRACE_EVENT_VFORK_DONE)
2485 {
2486 if (current_inferior ()->waiting_for_vfork_done)
2487 {
2488 if (debug_linux_nat)
2489 fprintf_unfiltered (gdb_stdlog,
2490 "LHEW: Got expected PTRACE_EVENT_"
2491 "VFORK_DONE from LWP %ld: stopping\n",
2492 GET_LWP (lp->ptid));
2493
2494 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2495 return 0;
2496 }
2497
2498 if (debug_linux_nat)
2499 fprintf_unfiltered (gdb_stdlog,
2500 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2501 "from LWP %ld: resuming\n",
2502 GET_LWP (lp->ptid));
2503 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2504 return 1;
2505 }
2506
2507 internal_error (__FILE__, __LINE__,
2508 _("unknown ptrace event %d"), event);
2509 }
2510
2511 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2512 exited. */
2513
2514 static int
2515 wait_lwp (struct lwp_info *lp)
2516 {
2517 pid_t pid;
2518 int status = 0;
2519 int thread_dead = 0;
2520 sigset_t prev_mask;
2521
2522 gdb_assert (!lp->stopped);
2523 gdb_assert (lp->status == 0);
2524
2525 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2526 block_child_signals (&prev_mask);
2527
2528 for (;;)
2529 {
2530 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2531 was right and we should just call sigsuspend. */
2532
2533 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
2534 if (pid == -1 && errno == ECHILD)
2535 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
2536 if (pid == -1 && errno == ECHILD)
2537 {
2538 /* The thread has previously exited. We need to delete it
2539 now because, for some vendor 2.4 kernels with NPTL
2540 support backported, there won't be an exit event unless
2541 it is the main thread. 2.6 kernels will report an exit
2542 event for each thread that exits, as expected. */
2543 thread_dead = 1;
2544 if (debug_linux_nat)
2545 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2546 target_pid_to_str (lp->ptid));
2547 }
2548 if (pid != 0)
2549 break;
2550
2551 /* Bugs 10970, 12702.
2552 Thread group leader may have exited in which case we'll lock up in
2553 waitpid if there are other threads, even if they are all zombies too.
2554 Basically, we're not supposed to use waitpid this way.
2555 __WCLONE is not applicable for the leader so we can't use that.
2556 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2557 process; it gets ESRCH both for the zombie and for running processes.
2558
2559 As a workaround, check if we're waiting for the thread group leader and
2560 if it's a zombie, and avoid calling waitpid if it is.
2561
2562 This is racy, what if the tgl becomes a zombie right after we check?
2563 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2564 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2565
2566 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2567 && linux_proc_pid_is_zombie (GET_LWP (lp->ptid)))
2568 {
2569 thread_dead = 1;
2570 if (debug_linux_nat)
2571 fprintf_unfiltered (gdb_stdlog,
2572 "WL: Thread group leader %s vanished.\n",
2573 target_pid_to_str (lp->ptid));
2574 break;
2575 }
2576
2577 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2578 get invoked despite our caller had them intentionally blocked by
2579 block_child_signals. This is sensitive only to the loop of
2580 linux_nat_wait_1 and there if we get called my_waitpid gets called
2581 again before it gets to sigsuspend so we can safely let the handlers
2582 get executed here. */
2583
2584 sigsuspend (&suspend_mask);
2585 }
2586
2587 restore_child_signals_mask (&prev_mask);
2588
2589 if (!thread_dead)
2590 {
2591 gdb_assert (pid == GET_LWP (lp->ptid));
2592
2593 if (debug_linux_nat)
2594 {
2595 fprintf_unfiltered (gdb_stdlog,
2596 "WL: waitpid %s received %s\n",
2597 target_pid_to_str (lp->ptid),
2598 status_to_str (status));
2599 }
2600
2601 /* Check if the thread has exited. */
2602 if (WIFEXITED (status) || WIFSIGNALED (status))
2603 {
2604 thread_dead = 1;
2605 if (debug_linux_nat)
2606 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2607 target_pid_to_str (lp->ptid));
2608 }
2609 }
2610
2611 if (thread_dead)
2612 {
2613 exit_lwp (lp);
2614 return 0;
2615 }
2616
2617 gdb_assert (WIFSTOPPED (status));
2618
2619 /* Handle GNU/Linux's syscall SIGTRAPs. */
2620 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2621 {
2622 /* No longer need the sysgood bit. The ptrace event ends up
2623 recorded in lp->waitstatus if we care for it. We can carry
2624 on handling the event like a regular SIGTRAP from here
2625 on. */
2626 status = W_STOPCODE (SIGTRAP);
2627 if (linux_handle_syscall_trap (lp, 1))
2628 return wait_lwp (lp);
2629 }
2630
2631 /* Handle GNU/Linux's extended waitstatus for trace events. */
2632 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2633 {
2634 if (debug_linux_nat)
2635 fprintf_unfiltered (gdb_stdlog,
2636 "WL: Handling extended status 0x%06x\n",
2637 status);
2638 if (linux_handle_extended_wait (lp, status, 1))
2639 return wait_lwp (lp);
2640 }
2641
2642 return status;
2643 }
2644
2645 /* Send a SIGSTOP to LP. */
2646
2647 static int
2648 stop_callback (struct lwp_info *lp, void *data)
2649 {
2650 if (!lp->stopped && !lp->signalled)
2651 {
2652 int ret;
2653
2654 if (debug_linux_nat)
2655 {
2656 fprintf_unfiltered (gdb_stdlog,
2657 "SC: kill %s **<SIGSTOP>**\n",
2658 target_pid_to_str (lp->ptid));
2659 }
2660 errno = 0;
2661 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2662 if (debug_linux_nat)
2663 {
2664 fprintf_unfiltered (gdb_stdlog,
2665 "SC: lwp kill %d %s\n",
2666 ret,
2667 errno ? safe_strerror (errno) : "ERRNO-OK");
2668 }
2669
2670 lp->signalled = 1;
2671 gdb_assert (lp->status == 0);
2672 }
2673
2674 return 0;
2675 }
2676
2677 /* Request a stop on LWP. */
2678
2679 void
2680 linux_stop_lwp (struct lwp_info *lwp)
2681 {
2682 stop_callback (lwp, NULL);
2683 }
2684
2685 /* Return non-zero if LWP PID has a pending SIGINT. */
2686
2687 static int
2688 linux_nat_has_pending_sigint (int pid)
2689 {
2690 sigset_t pending, blocked, ignored;
2691
2692 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2693
2694 if (sigismember (&pending, SIGINT)
2695 && !sigismember (&ignored, SIGINT))
2696 return 1;
2697
2698 return 0;
2699 }
2700
2701 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2702
2703 static int
2704 set_ignore_sigint (struct lwp_info *lp, void *data)
2705 {
2706 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2707 flag to consume the next one. */
2708 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2709 && WSTOPSIG (lp->status) == SIGINT)
2710 lp->status = 0;
2711 else
2712 lp->ignore_sigint = 1;
2713
2714 return 0;
2715 }
2716
2717 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2718 This function is called after we know the LWP has stopped; if the LWP
2719 stopped before the expected SIGINT was delivered, then it will never have
2720 arrived. Also, if the signal was delivered to a shared queue and consumed
2721 by a different thread, it will never be delivered to this LWP. */
2722
2723 static void
2724 maybe_clear_ignore_sigint (struct lwp_info *lp)
2725 {
2726 if (!lp->ignore_sigint)
2727 return;
2728
2729 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2730 {
2731 if (debug_linux_nat)
2732 fprintf_unfiltered (gdb_stdlog,
2733 "MCIS: Clearing bogus flag for %s\n",
2734 target_pid_to_str (lp->ptid));
2735 lp->ignore_sigint = 0;
2736 }
2737 }
2738
2739 /* Fetch the possible triggered data watchpoint info and store it in
2740 LP.
2741
2742 On some archs, like x86, that use debug registers to set
2743 watchpoints, it's possible that the way to know which watched
2744 address trapped, is to check the register that is used to select
2745 which address to watch. Problem is, between setting the watchpoint
2746 and reading back which data address trapped, the user may change
2747 the set of watchpoints, and, as a consequence, GDB changes the
2748 debug registers in the inferior. To avoid reading back a stale
2749 stopped-data-address when that happens, we cache in LP the fact
2750 that a watchpoint trapped, and the corresponding data address, as
2751 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2752 registers meanwhile, we have the cached data we can rely on. */
2753
2754 static void
2755 save_sigtrap (struct lwp_info *lp)
2756 {
2757 struct cleanup *old_chain;
2758
2759 if (linux_ops->to_stopped_by_watchpoint == NULL)
2760 {
2761 lp->stopped_by_watchpoint = 0;
2762 return;
2763 }
2764
2765 old_chain = save_inferior_ptid ();
2766 inferior_ptid = lp->ptid;
2767
2768 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2769
2770 if (lp->stopped_by_watchpoint)
2771 {
2772 if (linux_ops->to_stopped_data_address != NULL)
2773 lp->stopped_data_address_p =
2774 linux_ops->to_stopped_data_address (&current_target,
2775 &lp->stopped_data_address);
2776 else
2777 lp->stopped_data_address_p = 0;
2778 }
2779
2780 do_cleanups (old_chain);
2781 }
2782
2783 /* See save_sigtrap. */
2784
2785 static int
2786 linux_nat_stopped_by_watchpoint (void)
2787 {
2788 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2789
2790 gdb_assert (lp != NULL);
2791
2792 return lp->stopped_by_watchpoint;
2793 }
2794
2795 static int
2796 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2797 {
2798 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2799
2800 gdb_assert (lp != NULL);
2801
2802 *addr_p = lp->stopped_data_address;
2803
2804 return lp->stopped_data_address_p;
2805 }
2806
2807 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2808
2809 static int
2810 sigtrap_is_event (int status)
2811 {
2812 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2813 }
2814
2815 /* SIGTRAP-like events recognizer. */
2816
2817 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2818
2819 /* Check for SIGTRAP-like events in LP. */
2820
2821 static int
2822 linux_nat_lp_status_is_event (struct lwp_info *lp)
2823 {
2824 /* We check for lp->waitstatus in addition to lp->status, because we can
2825 have pending process exits recorded in lp->status
2826 and W_EXITCODE(0,0) == 0. We should probably have an additional
2827 lp->status_p flag. */
2828
2829 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2830 && linux_nat_status_is_event (lp->status));
2831 }
2832
2833 /* Set alternative SIGTRAP-like events recognizer. If
2834 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2835 applied. */
2836
2837 void
2838 linux_nat_set_status_is_event (struct target_ops *t,
2839 int (*status_is_event) (int status))
2840 {
2841 linux_nat_status_is_event = status_is_event;
2842 }
2843
2844 /* Wait until LP is stopped. */
2845
2846 static int
2847 stop_wait_callback (struct lwp_info *lp, void *data)
2848 {
2849 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2850
2851 /* If this is a vfork parent, bail out, it is not going to report
2852 any SIGSTOP until the vfork is done with. */
2853 if (inf->vfork_child != NULL)
2854 return 0;
2855
2856 if (!lp->stopped)
2857 {
2858 int status;
2859
2860 status = wait_lwp (lp);
2861 if (status == 0)
2862 return 0;
2863
2864 if (lp->ignore_sigint && WIFSTOPPED (status)
2865 && WSTOPSIG (status) == SIGINT)
2866 {
2867 lp->ignore_sigint = 0;
2868
2869 errno = 0;
2870 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2871 if (debug_linux_nat)
2872 fprintf_unfiltered (gdb_stdlog,
2873 "PTRACE_CONT %s, 0, 0 (%s) "
2874 "(discarding SIGINT)\n",
2875 target_pid_to_str (lp->ptid),
2876 errno ? safe_strerror (errno) : "OK");
2877
2878 return stop_wait_callback (lp, NULL);
2879 }
2880
2881 maybe_clear_ignore_sigint (lp);
2882
2883 if (WSTOPSIG (status) != SIGSTOP)
2884 {
2885 /* The thread was stopped with a signal other than SIGSTOP. */
2886
2887 save_sigtrap (lp);
2888
2889 if (debug_linux_nat)
2890 fprintf_unfiltered (gdb_stdlog,
2891 "SWC: Pending event %s in %s\n",
2892 status_to_str ((int) status),
2893 target_pid_to_str (lp->ptid));
2894
2895 /* Save the sigtrap event. */
2896 lp->status = status;
2897 gdb_assert (!lp->stopped);
2898 gdb_assert (lp->signalled);
2899 lp->stopped = 1;
2900 }
2901 else
2902 {
2903 /* We caught the SIGSTOP that we intended to catch, so
2904 there's no SIGSTOP pending. */
2905
2906 if (debug_linux_nat)
2907 fprintf_unfiltered (gdb_stdlog,
2908 "SWC: Delayed SIGSTOP caught for %s.\n",
2909 target_pid_to_str (lp->ptid));
2910
2911 lp->stopped = 1;
2912
2913 /* Reset SIGNALLED only after the stop_wait_callback call
2914 above as it does gdb_assert on SIGNALLED. */
2915 lp->signalled = 0;
2916 }
2917 }
2918
2919 return 0;
2920 }
2921
2922 /* Return non-zero if LP has a wait status pending. */
2923
2924 static int
2925 status_callback (struct lwp_info *lp, void *data)
2926 {
2927 /* Only report a pending wait status if we pretend that this has
2928 indeed been resumed. */
2929 if (!lp->resumed)
2930 return 0;
2931
2932 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2933 {
2934 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2935 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2936 0', so a clean process exit can not be stored pending in
2937 lp->status, it is indistinguishable from
2938 no-pending-status. */
2939 return 1;
2940 }
2941
2942 if (lp->status != 0)
2943 return 1;
2944
2945 return 0;
2946 }
2947
2948 /* Return non-zero if LP isn't stopped. */
2949
2950 static int
2951 running_callback (struct lwp_info *lp, void *data)
2952 {
2953 return (!lp->stopped
2954 || ((lp->status != 0
2955 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2956 && lp->resumed));
2957 }
2958
2959 /* Count the LWP's that have had events. */
2960
2961 static int
2962 count_events_callback (struct lwp_info *lp, void *data)
2963 {
2964 int *count = data;
2965
2966 gdb_assert (count != NULL);
2967
2968 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2969 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2970 (*count)++;
2971
2972 return 0;
2973 }
2974
2975 /* Select the LWP (if any) that is currently being single-stepped. */
2976
2977 static int
2978 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2979 {
2980 if (lp->last_resume_kind == resume_step
2981 && lp->status != 0)
2982 return 1;
2983 else
2984 return 0;
2985 }
2986
2987 /* Select the Nth LWP that has had a SIGTRAP event. */
2988
2989 static int
2990 select_event_lwp_callback (struct lwp_info *lp, void *data)
2991 {
2992 int *selector = data;
2993
2994 gdb_assert (selector != NULL);
2995
2996 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2997 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2998 if ((*selector)-- == 0)
2999 return 1;
3000
3001 return 0;
3002 }
3003
3004 static int
3005 cancel_breakpoint (struct lwp_info *lp)
3006 {
3007 /* Arrange for a breakpoint to be hit again later. We don't keep
3008 the SIGTRAP status and don't forward the SIGTRAP signal to the
3009 LWP. We will handle the current event, eventually we will resume
3010 this LWP, and this breakpoint will trap again.
3011
3012 If we do not do this, then we run the risk that the user will
3013 delete or disable the breakpoint, but the LWP will have already
3014 tripped on it. */
3015
3016 struct regcache *regcache = get_thread_regcache (lp->ptid);
3017 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3018 CORE_ADDR pc;
3019
3020 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
3021 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3022 {
3023 if (debug_linux_nat)
3024 fprintf_unfiltered (gdb_stdlog,
3025 "CB: Push back breakpoint for %s\n",
3026 target_pid_to_str (lp->ptid));
3027
3028 /* Back up the PC if necessary. */
3029 if (gdbarch_decr_pc_after_break (gdbarch))
3030 regcache_write_pc (regcache, pc);
3031
3032 return 1;
3033 }
3034 return 0;
3035 }
3036
3037 static int
3038 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3039 {
3040 struct lwp_info *event_lp = data;
3041
3042 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3043 if (lp == event_lp)
3044 return 0;
3045
3046 /* If a LWP other than the LWP that we're reporting an event for has
3047 hit a GDB breakpoint (as opposed to some random trap signal),
3048 then just arrange for it to hit it again later. We don't keep
3049 the SIGTRAP status and don't forward the SIGTRAP signal to the
3050 LWP. We will handle the current event, eventually we will resume
3051 all LWPs, and this one will get its breakpoint trap again.
3052
3053 If we do not do this, then we run the risk that the user will
3054 delete or disable the breakpoint, but the LWP will have already
3055 tripped on it. */
3056
3057 if (linux_nat_lp_status_is_event (lp)
3058 && cancel_breakpoint (lp))
3059 /* Throw away the SIGTRAP. */
3060 lp->status = 0;
3061
3062 return 0;
3063 }
3064
3065 /* Select one LWP out of those that have events pending. */
3066
3067 static void
3068 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
3069 {
3070 int num_events = 0;
3071 int random_selector;
3072 struct lwp_info *event_lp;
3073
3074 /* Record the wait status for the original LWP. */
3075 (*orig_lp)->status = *status;
3076
3077 /* Give preference to any LWP that is being single-stepped. */
3078 event_lp = iterate_over_lwps (filter,
3079 select_singlestep_lwp_callback, NULL);
3080 if (event_lp != NULL)
3081 {
3082 if (debug_linux_nat)
3083 fprintf_unfiltered (gdb_stdlog,
3084 "SEL: Select single-step %s\n",
3085 target_pid_to_str (event_lp->ptid));
3086 }
3087 else
3088 {
3089 /* No single-stepping LWP. Select one at random, out of those
3090 which have had SIGTRAP events. */
3091
3092 /* First see how many SIGTRAP events we have. */
3093 iterate_over_lwps (filter, count_events_callback, &num_events);
3094
3095 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3096 random_selector = (int)
3097 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3098
3099 if (debug_linux_nat && num_events > 1)
3100 fprintf_unfiltered (gdb_stdlog,
3101 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3102 num_events, random_selector);
3103
3104 event_lp = iterate_over_lwps (filter,
3105 select_event_lwp_callback,
3106 &random_selector);
3107 }
3108
3109 if (event_lp != NULL)
3110 {
3111 /* Switch the event LWP. */
3112 *orig_lp = event_lp;
3113 *status = event_lp->status;
3114 }
3115
3116 /* Flush the wait status for the event LWP. */
3117 (*orig_lp)->status = 0;
3118 }
3119
3120 /* Return non-zero if LP has been resumed. */
3121
3122 static int
3123 resumed_callback (struct lwp_info *lp, void *data)
3124 {
3125 return lp->resumed;
3126 }
3127
3128 /* Stop an active thread, verify it still exists, then resume it. If
3129 the thread ends up with a pending status, then it is not resumed,
3130 and *DATA (really a pointer to int), is set. */
3131
3132 static int
3133 stop_and_resume_callback (struct lwp_info *lp, void *data)
3134 {
3135 int *new_pending_p = data;
3136
3137 if (!lp->stopped)
3138 {
3139 ptid_t ptid = lp->ptid;
3140
3141 stop_callback (lp, NULL);
3142 stop_wait_callback (lp, NULL);
3143
3144 /* Resume if the lwp still exists, and the core wanted it
3145 running. */
3146 lp = find_lwp_pid (ptid);
3147 if (lp != NULL)
3148 {
3149 if (lp->last_resume_kind == resume_stop
3150 && lp->status == 0)
3151 {
3152 /* The core wanted the LWP to stop. Even if it stopped
3153 cleanly (with SIGSTOP), leave the event pending. */
3154 if (debug_linux_nat)
3155 fprintf_unfiltered (gdb_stdlog,
3156 "SARC: core wanted LWP %ld stopped "
3157 "(leaving SIGSTOP pending)\n",
3158 GET_LWP (lp->ptid));
3159 lp->status = W_STOPCODE (SIGSTOP);
3160 }
3161
3162 if (lp->status == 0)
3163 {
3164 if (debug_linux_nat)
3165 fprintf_unfiltered (gdb_stdlog,
3166 "SARC: re-resuming LWP %ld\n",
3167 GET_LWP (lp->ptid));
3168 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
3169 }
3170 else
3171 {
3172 if (debug_linux_nat)
3173 fprintf_unfiltered (gdb_stdlog,
3174 "SARC: not re-resuming LWP %ld "
3175 "(has pending)\n",
3176 GET_LWP (lp->ptid));
3177 if (new_pending_p)
3178 *new_pending_p = 1;
3179 }
3180 }
3181 }
3182 return 0;
3183 }
3184
3185 /* Check if we should go on and pass this event to common code.
3186 Return the affected lwp if we are, or NULL otherwise. If we stop
3187 all lwps temporarily, we may end up with new pending events in some
3188 other lwp. In that case set *NEW_PENDING_P to true. */
3189
3190 static struct lwp_info *
3191 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
3192 {
3193 struct lwp_info *lp;
3194
3195 *new_pending_p = 0;
3196
3197 lp = find_lwp_pid (pid_to_ptid (lwpid));
3198
3199 /* Check for stop events reported by a process we didn't already
3200 know about - anything not already in our LWP list.
3201
3202 If we're expecting to receive stopped processes after
3203 fork, vfork, and clone events, then we'll just add the
3204 new one to our list and go back to waiting for the event
3205 to be reported - the stopped process might be returned
3206 from waitpid before or after the event is.
3207
3208 But note the case of a non-leader thread exec'ing after the
3209 leader having exited, and gone from our lists. The non-leader
3210 thread changes its tid to the tgid. */
3211
3212 if (WIFSTOPPED (status) && lp == NULL
3213 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3214 {
3215 /* A multi-thread exec after we had seen the leader exiting. */
3216 if (debug_linux_nat)
3217 fprintf_unfiltered (gdb_stdlog,
3218 "LLW: Re-adding thread group leader LWP %d.\n",
3219 lwpid);
3220
3221 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3222 lp->stopped = 1;
3223 lp->resumed = 1;
3224 add_thread (lp->ptid);
3225 }
3226
3227 if (WIFSTOPPED (status) && !lp)
3228 {
3229 add_to_pid_list (&stopped_pids, lwpid, status);
3230 return NULL;
3231 }
3232
3233 /* Make sure we don't report an event for the exit of an LWP not in
3234 our list, i.e. not part of the current process. This can happen
3235 if we detach from a program we originally forked and then it
3236 exits. */
3237 if (!WIFSTOPPED (status) && !lp)
3238 return NULL;
3239
3240 /* Handle GNU/Linux's syscall SIGTRAPs. */
3241 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3242 {
3243 /* No longer need the sysgood bit. The ptrace event ends up
3244 recorded in lp->waitstatus if we care for it. We can carry
3245 on handling the event like a regular SIGTRAP from here
3246 on. */
3247 status = W_STOPCODE (SIGTRAP);
3248 if (linux_handle_syscall_trap (lp, 0))
3249 return NULL;
3250 }
3251
3252 /* Handle GNU/Linux's extended waitstatus for trace events. */
3253 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3254 {
3255 if (debug_linux_nat)
3256 fprintf_unfiltered (gdb_stdlog,
3257 "LLW: Handling extended status 0x%06x\n",
3258 status);
3259 if (linux_handle_extended_wait (lp, status, 0))
3260 return NULL;
3261 }
3262
3263 if (linux_nat_status_is_event (status))
3264 save_sigtrap (lp);
3265
3266 /* Check if the thread has exited. */
3267 if ((WIFEXITED (status) || WIFSIGNALED (status))
3268 && num_lwps (GET_PID (lp->ptid)) > 1)
3269 {
3270 /* If this is the main thread, we must stop all threads and verify
3271 if they are still alive. This is because in the nptl thread model
3272 on Linux 2.4, there is no signal issued for exiting LWPs
3273 other than the main thread. We only get the main thread exit
3274 signal once all child threads have already exited. If we
3275 stop all the threads and use the stop_wait_callback to check
3276 if they have exited we can determine whether this signal
3277 should be ignored or whether it means the end of the debugged
3278 application, regardless of which threading model is being
3279 used. */
3280 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3281 {
3282 lp->stopped = 1;
3283 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3284 stop_and_resume_callback, new_pending_p);
3285 }
3286
3287 if (debug_linux_nat)
3288 fprintf_unfiltered (gdb_stdlog,
3289 "LLW: %s exited.\n",
3290 target_pid_to_str (lp->ptid));
3291
3292 if (num_lwps (GET_PID (lp->ptid)) > 1)
3293 {
3294 /* If there is at least one more LWP, then the exit signal
3295 was not the end of the debugged application and should be
3296 ignored. */
3297 exit_lwp (lp);
3298 return NULL;
3299 }
3300 }
3301
3302 /* Check if the current LWP has previously exited. In the nptl
3303 thread model, LWPs other than the main thread do not issue
3304 signals when they exit so we must check whenever the thread has
3305 stopped. A similar check is made in stop_wait_callback(). */
3306 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3307 {
3308 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3309
3310 if (debug_linux_nat)
3311 fprintf_unfiltered (gdb_stdlog,
3312 "LLW: %s exited.\n",
3313 target_pid_to_str (lp->ptid));
3314
3315 exit_lwp (lp);
3316
3317 /* Make sure there is at least one thread running. */
3318 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3319
3320 /* Discard the event. */
3321 return NULL;
3322 }
3323
3324 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3325 an attempt to stop an LWP. */
3326 if (lp->signalled
3327 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3328 {
3329 if (debug_linux_nat)
3330 fprintf_unfiltered (gdb_stdlog,
3331 "LLW: Delayed SIGSTOP caught for %s.\n",
3332 target_pid_to_str (lp->ptid));
3333
3334 lp->signalled = 0;
3335
3336 if (lp->last_resume_kind != resume_stop)
3337 {
3338 /* This is a delayed SIGSTOP. */
3339
3340 registers_changed ();
3341
3342 if (linux_nat_prepare_to_resume != NULL)
3343 linux_nat_prepare_to_resume (lp);
3344 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3345 lp->step, GDB_SIGNAL_0);
3346 if (debug_linux_nat)
3347 fprintf_unfiltered (gdb_stdlog,
3348 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3349 lp->step ?
3350 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3351 target_pid_to_str (lp->ptid));
3352
3353 lp->stopped = 0;
3354 gdb_assert (lp->resumed);
3355
3356 /* Discard the event. */
3357 return NULL;
3358 }
3359 }
3360
3361 /* Make sure we don't report a SIGINT that we have already displayed
3362 for another thread. */
3363 if (lp->ignore_sigint
3364 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3365 {
3366 if (debug_linux_nat)
3367 fprintf_unfiltered (gdb_stdlog,
3368 "LLW: Delayed SIGINT caught for %s.\n",
3369 target_pid_to_str (lp->ptid));
3370
3371 /* This is a delayed SIGINT. */
3372 lp->ignore_sigint = 0;
3373
3374 registers_changed ();
3375 if (linux_nat_prepare_to_resume != NULL)
3376 linux_nat_prepare_to_resume (lp);
3377 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3378 lp->step, GDB_SIGNAL_0);
3379 if (debug_linux_nat)
3380 fprintf_unfiltered (gdb_stdlog,
3381 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3382 lp->step ?
3383 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3384 target_pid_to_str (lp->ptid));
3385
3386 lp->stopped = 0;
3387 gdb_assert (lp->resumed);
3388
3389 /* Discard the event. */
3390 return NULL;
3391 }
3392
3393 /* An interesting event. */
3394 gdb_assert (lp);
3395 lp->status = status;
3396 return lp;
3397 }
3398
3399 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3400 their exits until all other threads in the group have exited. */
3401
3402 static void
3403 check_zombie_leaders (void)
3404 {
3405 struct inferior *inf;
3406
3407 ALL_INFERIORS (inf)
3408 {
3409 struct lwp_info *leader_lp;
3410
3411 if (inf->pid == 0)
3412 continue;
3413
3414 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3415 if (leader_lp != NULL
3416 /* Check if there are other threads in the group, as we may
3417 have raced with the inferior simply exiting. */
3418 && num_lwps (inf->pid) > 1
3419 && linux_proc_pid_is_zombie (inf->pid))
3420 {
3421 if (debug_linux_nat)
3422 fprintf_unfiltered (gdb_stdlog,
3423 "CZL: Thread group leader %d zombie "
3424 "(it exited, or another thread execd).\n",
3425 inf->pid);
3426
3427 /* A leader zombie can mean one of two things:
3428
3429 - It exited, and there's an exit status pending
3430 available, or only the leader exited (not the whole
3431 program). In the latter case, we can't waitpid the
3432 leader's exit status until all other threads are gone.
3433
3434 - There are 3 or more threads in the group, and a thread
3435 other than the leader exec'd. On an exec, the Linux
3436 kernel destroys all other threads (except the execing
3437 one) in the thread group, and resets the execing thread's
3438 tid to the tgid. No exit notification is sent for the
3439 execing thread -- from the ptracer's perspective, it
3440 appears as though the execing thread just vanishes.
3441 Until we reap all other threads except the leader and the
3442 execing thread, the leader will be zombie, and the
3443 execing thread will be in `D (disc sleep)'. As soon as
3444 all other threads are reaped, the execing thread changes
3445 it's tid to the tgid, and the previous (zombie) leader
3446 vanishes, giving place to the "new" leader. We could try
3447 distinguishing the exit and exec cases, by waiting once
3448 more, and seeing if something comes out, but it doesn't
3449 sound useful. The previous leader _does_ go away, and
3450 we'll re-add the new one once we see the exec event
3451 (which is just the same as what would happen if the
3452 previous leader did exit voluntarily before some other
3453 thread execs). */
3454
3455 if (debug_linux_nat)
3456 fprintf_unfiltered (gdb_stdlog,
3457 "CZL: Thread group leader %d vanished.\n",
3458 inf->pid);
3459 exit_lwp (leader_lp);
3460 }
3461 }
3462 }
3463
3464 static ptid_t
3465 linux_nat_wait_1 (struct target_ops *ops,
3466 ptid_t ptid, struct target_waitstatus *ourstatus,
3467 int target_options)
3468 {
3469 static sigset_t prev_mask;
3470 enum resume_kind last_resume_kind;
3471 struct lwp_info *lp;
3472 int status;
3473
3474 if (debug_linux_nat)
3475 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3476
3477 /* The first time we get here after starting a new inferior, we may
3478 not have added it to the LWP list yet - this is the earliest
3479 moment at which we know its PID. */
3480 if (ptid_is_pid (inferior_ptid))
3481 {
3482 /* Upgrade the main thread's ptid. */
3483 thread_change_ptid (inferior_ptid,
3484 BUILD_LWP (GET_PID (inferior_ptid),
3485 GET_PID (inferior_ptid)));
3486
3487 lp = add_lwp (inferior_ptid);
3488 lp->resumed = 1;
3489 }
3490
3491 /* Make sure SIGCHLD is blocked. */
3492 block_child_signals (&prev_mask);
3493
3494 retry:
3495 lp = NULL;
3496 status = 0;
3497
3498 /* First check if there is a LWP with a wait status pending. */
3499 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3500 {
3501 /* Any LWP in the PTID group that's been resumed will do. */
3502 lp = iterate_over_lwps (ptid, status_callback, NULL);
3503 if (lp)
3504 {
3505 if (debug_linux_nat && lp->status)
3506 fprintf_unfiltered (gdb_stdlog,
3507 "LLW: Using pending wait status %s for %s.\n",
3508 status_to_str (lp->status),
3509 target_pid_to_str (lp->ptid));
3510 }
3511 }
3512 else if (is_lwp (ptid))
3513 {
3514 if (debug_linux_nat)
3515 fprintf_unfiltered (gdb_stdlog,
3516 "LLW: Waiting for specific LWP %s.\n",
3517 target_pid_to_str (ptid));
3518
3519 /* We have a specific LWP to check. */
3520 lp = find_lwp_pid (ptid);
3521 gdb_assert (lp);
3522
3523 if (debug_linux_nat && lp->status)
3524 fprintf_unfiltered (gdb_stdlog,
3525 "LLW: Using pending wait status %s for %s.\n",
3526 status_to_str (lp->status),
3527 target_pid_to_str (lp->ptid));
3528
3529 /* We check for lp->waitstatus in addition to lp->status,
3530 because we can have pending process exits recorded in
3531 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3532 an additional lp->status_p flag. */
3533 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3534 lp = NULL;
3535 }
3536
3537 if (!target_can_async_p ())
3538 {
3539 /* Causes SIGINT to be passed on to the attached process. */
3540 set_sigint_trap ();
3541 }
3542
3543 /* But if we don't find a pending event, we'll have to wait. */
3544
3545 while (lp == NULL)
3546 {
3547 pid_t lwpid;
3548
3549 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3550 quirks:
3551
3552 - If the thread group leader exits while other threads in the
3553 thread group still exist, waitpid(TGID, ...) hangs. That
3554 waitpid won't return an exit status until the other threads
3555 in the group are reapped.
3556
3557 - When a non-leader thread execs, that thread just vanishes
3558 without reporting an exit (so we'd hang if we waited for it
3559 explicitly in that case). The exec event is reported to
3560 the TGID pid. */
3561
3562 errno = 0;
3563 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3564 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3565 lwpid = my_waitpid (-1, &status, WNOHANG);
3566
3567 if (debug_linux_nat)
3568 fprintf_unfiltered (gdb_stdlog,
3569 "LNW: waitpid(-1, ...) returned %d, %s\n",
3570 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3571
3572 if (lwpid > 0)
3573 {
3574 /* If this is true, then we paused LWPs momentarily, and may
3575 now have pending events to handle. */
3576 int new_pending;
3577
3578 if (debug_linux_nat)
3579 {
3580 fprintf_unfiltered (gdb_stdlog,
3581 "LLW: waitpid %ld received %s\n",
3582 (long) lwpid, status_to_str (status));
3583 }
3584
3585 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3586
3587 /* STATUS is now no longer valid, use LP->STATUS instead. */
3588 status = 0;
3589
3590 if (lp && !ptid_match (lp->ptid, ptid))
3591 {
3592 gdb_assert (lp->resumed);
3593
3594 if (debug_linux_nat)
3595 fprintf (stderr,
3596 "LWP %ld got an event %06x, leaving pending.\n",
3597 ptid_get_lwp (lp->ptid), lp->status);
3598
3599 if (WIFSTOPPED (lp->status))
3600 {
3601 if (WSTOPSIG (lp->status) != SIGSTOP)
3602 {
3603 /* Cancel breakpoint hits. The breakpoint may
3604 be removed before we fetch events from this
3605 process to report to the core. It is best
3606 not to assume the moribund breakpoints
3607 heuristic always handles these cases --- it
3608 could be too many events go through to the
3609 core before this one is handled. All-stop
3610 always cancels breakpoint hits in all
3611 threads. */
3612 if (non_stop
3613 && linux_nat_lp_status_is_event (lp)
3614 && cancel_breakpoint (lp))
3615 {
3616 /* Throw away the SIGTRAP. */
3617 lp->status = 0;
3618
3619 if (debug_linux_nat)
3620 fprintf (stderr,
3621 "LLW: LWP %ld hit a breakpoint while"
3622 " waiting for another process;"
3623 " cancelled it\n",
3624 ptid_get_lwp (lp->ptid));
3625 }
3626 lp->stopped = 1;
3627 }
3628 else
3629 {
3630 lp->stopped = 1;
3631 lp->signalled = 0;
3632 }
3633 }
3634 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3635 {
3636 if (debug_linux_nat)
3637 fprintf (stderr,
3638 "Process %ld exited while stopping LWPs\n",
3639 ptid_get_lwp (lp->ptid));
3640
3641 /* This was the last lwp in the process. Since
3642 events are serialized to GDB core, and we can't
3643 report this one right now, but GDB core and the
3644 other target layers will want to be notified
3645 about the exit code/signal, leave the status
3646 pending for the next time we're able to report
3647 it. */
3648
3649 /* Prevent trying to stop this thread again. We'll
3650 never try to resume it because it has a pending
3651 status. */
3652 lp->stopped = 1;
3653
3654 /* Dead LWP's aren't expected to reported a pending
3655 sigstop. */
3656 lp->signalled = 0;
3657
3658 /* Store the pending event in the waitstatus as
3659 well, because W_EXITCODE(0,0) == 0. */
3660 store_waitstatus (&lp->waitstatus, lp->status);
3661 }
3662
3663 /* Keep looking. */
3664 lp = NULL;
3665 }
3666
3667 if (new_pending)
3668 {
3669 /* Some LWP now has a pending event. Go all the way
3670 back to check it. */
3671 goto retry;
3672 }
3673
3674 if (lp)
3675 {
3676 /* We got an event to report to the core. */
3677 break;
3678 }
3679
3680 /* Retry until nothing comes out of waitpid. A single
3681 SIGCHLD can indicate more than one child stopped. */
3682 continue;
3683 }
3684
3685 /* Check for zombie thread group leaders. Those can't be reaped
3686 until all other threads in the thread group are. */
3687 check_zombie_leaders ();
3688
3689 /* If there are no resumed children left, bail. We'd be stuck
3690 forever in the sigsuspend call below otherwise. */
3691 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3692 {
3693 if (debug_linux_nat)
3694 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3695
3696 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3697
3698 if (!target_can_async_p ())
3699 clear_sigint_trap ();
3700
3701 restore_child_signals_mask (&prev_mask);
3702 return minus_one_ptid;
3703 }
3704
3705 /* No interesting event to report to the core. */
3706
3707 if (target_options & TARGET_WNOHANG)
3708 {
3709 if (debug_linux_nat)
3710 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3711
3712 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3713 restore_child_signals_mask (&prev_mask);
3714 return minus_one_ptid;
3715 }
3716
3717 /* We shouldn't end up here unless we want to try again. */
3718 gdb_assert (lp == NULL);
3719
3720 /* Block until we get an event reported with SIGCHLD. */
3721 sigsuspend (&suspend_mask);
3722 }
3723
3724 if (!target_can_async_p ())
3725 clear_sigint_trap ();
3726
3727 gdb_assert (lp);
3728
3729 status = lp->status;
3730 lp->status = 0;
3731
3732 /* Don't report signals that GDB isn't interested in, such as
3733 signals that are neither printed nor stopped upon. Stopping all
3734 threads can be a bit time-consuming so if we want decent
3735 performance with heavily multi-threaded programs, especially when
3736 they're using a high frequency timer, we'd better avoid it if we
3737 can. */
3738
3739 if (WIFSTOPPED (status))
3740 {
3741 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3742
3743 /* When using hardware single-step, we need to report every signal.
3744 Otherwise, signals in pass_mask may be short-circuited. */
3745 if (!lp->step
3746 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3747 {
3748 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3749 here? It is not clear we should. GDB may not expect
3750 other threads to run. On the other hand, not resuming
3751 newly attached threads may cause an unwanted delay in
3752 getting them running. */
3753 registers_changed ();
3754 if (linux_nat_prepare_to_resume != NULL)
3755 linux_nat_prepare_to_resume (lp);
3756 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3757 lp->step, signo);
3758 if (debug_linux_nat)
3759 fprintf_unfiltered (gdb_stdlog,
3760 "LLW: %s %s, %s (preempt 'handle')\n",
3761 lp->step ?
3762 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3763 target_pid_to_str (lp->ptid),
3764 (signo != GDB_SIGNAL_0
3765 ? strsignal (gdb_signal_to_host (signo))
3766 : "0"));
3767 lp->stopped = 0;
3768 goto retry;
3769 }
3770
3771 if (!non_stop)
3772 {
3773 /* Only do the below in all-stop, as we currently use SIGINT
3774 to implement target_stop (see linux_nat_stop) in
3775 non-stop. */
3776 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3777 {
3778 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3779 forwarded to the entire process group, that is, all LWPs
3780 will receive it - unless they're using CLONE_THREAD to
3781 share signals. Since we only want to report it once, we
3782 mark it as ignored for all LWPs except this one. */
3783 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3784 set_ignore_sigint, NULL);
3785 lp->ignore_sigint = 0;
3786 }
3787 else
3788 maybe_clear_ignore_sigint (lp);
3789 }
3790 }
3791
3792 /* This LWP is stopped now. */
3793 lp->stopped = 1;
3794
3795 if (debug_linux_nat)
3796 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3797 status_to_str (status), target_pid_to_str (lp->ptid));
3798
3799 if (!non_stop)
3800 {
3801 /* Now stop all other LWP's ... */
3802 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3803
3804 /* ... and wait until all of them have reported back that
3805 they're no longer running. */
3806 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3807
3808 /* If we're not waiting for a specific LWP, choose an event LWP
3809 from among those that have had events. Giving equal priority
3810 to all LWPs that have had events helps prevent
3811 starvation. */
3812 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3813 select_event_lwp (ptid, &lp, &status);
3814
3815 /* Now that we've selected our final event LWP, cancel any
3816 breakpoints in other LWPs that have hit a GDB breakpoint.
3817 See the comment in cancel_breakpoints_callback to find out
3818 why. */
3819 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3820
3821 /* We'll need this to determine whether to report a SIGSTOP as
3822 TARGET_WAITKIND_0. Need to take a copy because
3823 resume_clear_callback clears it. */
3824 last_resume_kind = lp->last_resume_kind;
3825
3826 /* In all-stop, from the core's perspective, all LWPs are now
3827 stopped until a new resume action is sent over. */
3828 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3829 }
3830 else
3831 {
3832 /* See above. */
3833 last_resume_kind = lp->last_resume_kind;
3834 resume_clear_callback (lp, NULL);
3835 }
3836
3837 if (linux_nat_status_is_event (status))
3838 {
3839 if (debug_linux_nat)
3840 fprintf_unfiltered (gdb_stdlog,
3841 "LLW: trap ptid is %s.\n",
3842 target_pid_to_str (lp->ptid));
3843 }
3844
3845 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3846 {
3847 *ourstatus = lp->waitstatus;
3848 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3849 }
3850 else
3851 store_waitstatus (ourstatus, status);
3852
3853 if (debug_linux_nat)
3854 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3855
3856 restore_child_signals_mask (&prev_mask);
3857
3858 if (last_resume_kind == resume_stop
3859 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3860 && WSTOPSIG (status) == SIGSTOP)
3861 {
3862 /* A thread that has been requested to stop by GDB with
3863 target_stop, and it stopped cleanly, so report as SIG0. The
3864 use of SIGSTOP is an implementation detail. */
3865 ourstatus->value.sig = GDB_SIGNAL_0;
3866 }
3867
3868 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3869 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3870 lp->core = -1;
3871 else
3872 lp->core = linux_common_core_of_thread (lp->ptid);
3873
3874 return lp->ptid;
3875 }
3876
3877 /* Resume LWPs that are currently stopped without any pending status
3878 to report, but are resumed from the core's perspective. */
3879
3880 static int
3881 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3882 {
3883 ptid_t *wait_ptid_p = data;
3884
3885 if (lp->stopped
3886 && lp->resumed
3887 && lp->status == 0
3888 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3889 {
3890 struct regcache *regcache = get_thread_regcache (lp->ptid);
3891 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3892 CORE_ADDR pc = regcache_read_pc (regcache);
3893
3894 gdb_assert (is_executing (lp->ptid));
3895
3896 /* Don't bother if there's a breakpoint at PC that we'd hit
3897 immediately, and we're not waiting for this LWP. */
3898 if (!ptid_match (lp->ptid, *wait_ptid_p))
3899 {
3900 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3901 return 0;
3902 }
3903
3904 if (debug_linux_nat)
3905 fprintf_unfiltered (gdb_stdlog,
3906 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3907 target_pid_to_str (lp->ptid),
3908 paddress (gdbarch, pc),
3909 lp->step);
3910
3911 registers_changed ();
3912 if (linux_nat_prepare_to_resume != NULL)
3913 linux_nat_prepare_to_resume (lp);
3914 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3915 lp->step, GDB_SIGNAL_0);
3916 lp->stopped = 0;
3917 lp->stopped_by_watchpoint = 0;
3918 }
3919
3920 return 0;
3921 }
3922
3923 static ptid_t
3924 linux_nat_wait (struct target_ops *ops,
3925 ptid_t ptid, struct target_waitstatus *ourstatus,
3926 int target_options)
3927 {
3928 ptid_t event_ptid;
3929
3930 if (debug_linux_nat)
3931 {
3932 char *options_string;
3933
3934 options_string = target_options_to_string (target_options);
3935 fprintf_unfiltered (gdb_stdlog,
3936 "linux_nat_wait: [%s], [%s]\n",
3937 target_pid_to_str (ptid),
3938 options_string);
3939 xfree (options_string);
3940 }
3941
3942 /* Flush the async file first. */
3943 if (target_can_async_p ())
3944 async_file_flush ();
3945
3946 /* Resume LWPs that are currently stopped without any pending status
3947 to report, but are resumed from the core's perspective. LWPs get
3948 in this state if we find them stopping at a time we're not
3949 interested in reporting the event (target_wait on a
3950 specific_process, for example, see linux_nat_wait_1), and
3951 meanwhile the event became uninteresting. Don't bother resuming
3952 LWPs we're not going to wait for if they'd stop immediately. */
3953 if (non_stop)
3954 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3955
3956 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3957
3958 /* If we requested any event, and something came out, assume there
3959 may be more. If we requested a specific lwp or process, also
3960 assume there may be more. */
3961 if (target_can_async_p ()
3962 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3963 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3964 || !ptid_equal (ptid, minus_one_ptid)))
3965 async_file_mark ();
3966
3967 /* Get ready for the next event. */
3968 if (target_can_async_p ())
3969 target_async (inferior_event_handler, 0);
3970
3971 return event_ptid;
3972 }
3973
3974 static int
3975 kill_callback (struct lwp_info *lp, void *data)
3976 {
3977 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3978
3979 errno = 0;
3980 kill (GET_LWP (lp->ptid), SIGKILL);
3981 if (debug_linux_nat)
3982 fprintf_unfiltered (gdb_stdlog,
3983 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3984 target_pid_to_str (lp->ptid),
3985 errno ? safe_strerror (errno) : "OK");
3986
3987 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3988
3989 errno = 0;
3990 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3991 if (debug_linux_nat)
3992 fprintf_unfiltered (gdb_stdlog,
3993 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3994 target_pid_to_str (lp->ptid),
3995 errno ? safe_strerror (errno) : "OK");
3996
3997 return 0;
3998 }
3999
4000 static int
4001 kill_wait_callback (struct lwp_info *lp, void *data)
4002 {
4003 pid_t pid;
4004
4005 /* We must make sure that there are no pending events (delayed
4006 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4007 program doesn't interfere with any following debugging session. */
4008
4009 /* For cloned processes we must check both with __WCLONE and
4010 without, since the exit status of a cloned process isn't reported
4011 with __WCLONE. */
4012 if (lp->cloned)
4013 {
4014 do
4015 {
4016 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
4017 if (pid != (pid_t) -1)
4018 {
4019 if (debug_linux_nat)
4020 fprintf_unfiltered (gdb_stdlog,
4021 "KWC: wait %s received unknown.\n",
4022 target_pid_to_str (lp->ptid));
4023 /* The Linux kernel sometimes fails to kill a thread
4024 completely after PTRACE_KILL; that goes from the stop
4025 point in do_fork out to the one in
4026 get_signal_to_deliever and waits again. So kill it
4027 again. */
4028 kill_callback (lp, NULL);
4029 }
4030 }
4031 while (pid == GET_LWP (lp->ptid));
4032
4033 gdb_assert (pid == -1 && errno == ECHILD);
4034 }
4035
4036 do
4037 {
4038 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
4039 if (pid != (pid_t) -1)
4040 {
4041 if (debug_linux_nat)
4042 fprintf_unfiltered (gdb_stdlog,
4043 "KWC: wait %s received unk.\n",
4044 target_pid_to_str (lp->ptid));
4045 /* See the call to kill_callback above. */
4046 kill_callback (lp, NULL);
4047 }
4048 }
4049 while (pid == GET_LWP (lp->ptid));
4050
4051 gdb_assert (pid == -1 && errno == ECHILD);
4052 return 0;
4053 }
4054
4055 static void
4056 linux_nat_kill (struct target_ops *ops)
4057 {
4058 struct target_waitstatus last;
4059 ptid_t last_ptid;
4060 int status;
4061
4062 /* If we're stopped while forking and we haven't followed yet,
4063 kill the other task. We need to do this first because the
4064 parent will be sleeping if this is a vfork. */
4065
4066 get_last_target_status (&last_ptid, &last);
4067
4068 if (last.kind == TARGET_WAITKIND_FORKED
4069 || last.kind == TARGET_WAITKIND_VFORKED)
4070 {
4071 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
4072 wait (&status);
4073 }
4074
4075 if (forks_exist_p ())
4076 linux_fork_killall ();
4077 else
4078 {
4079 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
4080
4081 /* Stop all threads before killing them, since ptrace requires
4082 that the thread is stopped to sucessfully PTRACE_KILL. */
4083 iterate_over_lwps (ptid, stop_callback, NULL);
4084 /* ... and wait until all of them have reported back that
4085 they're no longer running. */
4086 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4087
4088 /* Kill all LWP's ... */
4089 iterate_over_lwps (ptid, kill_callback, NULL);
4090
4091 /* ... and wait until we've flushed all events. */
4092 iterate_over_lwps (ptid, kill_wait_callback, NULL);
4093 }
4094
4095 target_mourn_inferior ();
4096 }
4097
4098 static void
4099 linux_nat_mourn_inferior (struct target_ops *ops)
4100 {
4101 purge_lwp_list (ptid_get_pid (inferior_ptid));
4102
4103 if (! forks_exist_p ())
4104 /* Normal case, no other forks available. */
4105 linux_ops->to_mourn_inferior (ops);
4106 else
4107 /* Multi-fork case. The current inferior_ptid has exited, but
4108 there are other viable forks to debug. Delete the exiting
4109 one and context-switch to the first available. */
4110 linux_fork_mourn_inferior ();
4111 }
4112
4113 /* Convert a native/host siginfo object, into/from the siginfo in the
4114 layout of the inferiors' architecture. */
4115
4116 static void
4117 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
4118 {
4119 int done = 0;
4120
4121 if (linux_nat_siginfo_fixup != NULL)
4122 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4123
4124 /* If there was no callback, or the callback didn't do anything,
4125 then just do a straight memcpy. */
4126 if (!done)
4127 {
4128 if (direction == 1)
4129 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4130 else
4131 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4132 }
4133 }
4134
4135 static LONGEST
4136 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4137 const char *annex, gdb_byte *readbuf,
4138 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4139 {
4140 int pid;
4141 siginfo_t siginfo;
4142 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4143
4144 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4145 gdb_assert (readbuf || writebuf);
4146
4147 pid = GET_LWP (inferior_ptid);
4148 if (pid == 0)
4149 pid = GET_PID (inferior_ptid);
4150
4151 if (offset > sizeof (siginfo))
4152 return -1;
4153
4154 errno = 0;
4155 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4156 if (errno != 0)
4157 return -1;
4158
4159 /* When GDB is built as a 64-bit application, ptrace writes into
4160 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4161 inferior with a 64-bit GDB should look the same as debugging it
4162 with a 32-bit GDB, we need to convert it. GDB core always sees
4163 the converted layout, so any read/write will have to be done
4164 post-conversion. */
4165 siginfo_fixup (&siginfo, inf_siginfo, 0);
4166
4167 if (offset + len > sizeof (siginfo))
4168 len = sizeof (siginfo) - offset;
4169
4170 if (readbuf != NULL)
4171 memcpy (readbuf, inf_siginfo + offset, len);
4172 else
4173 {
4174 memcpy (inf_siginfo + offset, writebuf, len);
4175
4176 /* Convert back to ptrace layout before flushing it out. */
4177 siginfo_fixup (&siginfo, inf_siginfo, 1);
4178
4179 errno = 0;
4180 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4181 if (errno != 0)
4182 return -1;
4183 }
4184
4185 return len;
4186 }
4187
4188 static LONGEST
4189 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4190 const char *annex, gdb_byte *readbuf,
4191 const gdb_byte *writebuf,
4192 ULONGEST offset, LONGEST len)
4193 {
4194 struct cleanup *old_chain;
4195 LONGEST xfer;
4196
4197 if (object == TARGET_OBJECT_SIGNAL_INFO)
4198 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4199 offset, len);
4200
4201 /* The target is connected but no live inferior is selected. Pass
4202 this request down to a lower stratum (e.g., the executable
4203 file). */
4204 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4205 return 0;
4206
4207 old_chain = save_inferior_ptid ();
4208
4209 if (is_lwp (inferior_ptid))
4210 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4211
4212 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4213 offset, len);
4214
4215 do_cleanups (old_chain);
4216 return xfer;
4217 }
4218
4219 static int
4220 linux_thread_alive (ptid_t ptid)
4221 {
4222 int err, tmp_errno;
4223
4224 gdb_assert (is_lwp (ptid));
4225
4226 /* Send signal 0 instead of anything ptrace, because ptracing a
4227 running thread errors out claiming that the thread doesn't
4228 exist. */
4229 err = kill_lwp (GET_LWP (ptid), 0);
4230 tmp_errno = errno;
4231 if (debug_linux_nat)
4232 fprintf_unfiltered (gdb_stdlog,
4233 "LLTA: KILL(SIG0) %s (%s)\n",
4234 target_pid_to_str (ptid),
4235 err ? safe_strerror (tmp_errno) : "OK");
4236
4237 if (err != 0)
4238 return 0;
4239
4240 return 1;
4241 }
4242
4243 static int
4244 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4245 {
4246 return linux_thread_alive (ptid);
4247 }
4248
4249 static char *
4250 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4251 {
4252 static char buf[64];
4253
4254 if (is_lwp (ptid)
4255 && (GET_PID (ptid) != GET_LWP (ptid)
4256 || num_lwps (GET_PID (ptid)) > 1))
4257 {
4258 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4259 return buf;
4260 }
4261
4262 return normal_pid_to_str (ptid);
4263 }
4264
4265 static char *
4266 linux_nat_thread_name (struct thread_info *thr)
4267 {
4268 int pid = ptid_get_pid (thr->ptid);
4269 long lwp = ptid_get_lwp (thr->ptid);
4270 #define FORMAT "/proc/%d/task/%ld/comm"
4271 char buf[sizeof (FORMAT) + 30];
4272 FILE *comm_file;
4273 char *result = NULL;
4274
4275 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4276 comm_file = fopen (buf, "r");
4277 if (comm_file)
4278 {
4279 /* Not exported by the kernel, so we define it here. */
4280 #define COMM_LEN 16
4281 static char line[COMM_LEN + 1];
4282
4283 if (fgets (line, sizeof (line), comm_file))
4284 {
4285 char *nl = strchr (line, '\n');
4286
4287 if (nl)
4288 *nl = '\0';
4289 if (*line != '\0')
4290 result = line;
4291 }
4292
4293 fclose (comm_file);
4294 }
4295
4296 #undef COMM_LEN
4297 #undef FORMAT
4298
4299 return result;
4300 }
4301
4302 /* Accepts an integer PID; Returns a string representing a file that
4303 can be opened to get the symbols for the child process. */
4304
4305 static char *
4306 linux_child_pid_to_exec_file (int pid)
4307 {
4308 char *name1, *name2;
4309
4310 name1 = xmalloc (MAXPATHLEN);
4311 name2 = xmalloc (MAXPATHLEN);
4312 make_cleanup (xfree, name1);
4313 make_cleanup (xfree, name2);
4314 memset (name2, 0, MAXPATHLEN);
4315
4316 sprintf (name1, "/proc/%d/exe", pid);
4317 if (readlink (name1, name2, MAXPATHLEN) > 0)
4318 return name2;
4319 else
4320 return name1;
4321 }
4322
4323 /* Records the thread's register state for the corefile note
4324 section. */
4325
4326 static char *
4327 linux_nat_collect_thread_registers (const struct regcache *regcache,
4328 ptid_t ptid, bfd *obfd,
4329 char *note_data, int *note_size,
4330 enum gdb_signal stop_signal)
4331 {
4332 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4333 const struct regset *regset;
4334 int core_regset_p;
4335 gdb_gregset_t gregs;
4336 gdb_fpregset_t fpregs;
4337
4338 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4339
4340 if (core_regset_p
4341 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4342 sizeof (gregs)))
4343 != NULL && regset->collect_regset != NULL)
4344 regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
4345 else
4346 fill_gregset (regcache, &gregs, -1);
4347
4348 note_data = (char *) elfcore_write_prstatus
4349 (obfd, note_data, note_size, ptid_get_lwp (ptid),
4350 gdb_signal_to_host (stop_signal), &gregs);
4351
4352 if (core_regset_p
4353 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4354 sizeof (fpregs)))
4355 != NULL && regset->collect_regset != NULL)
4356 regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
4357 else
4358 fill_fpregset (regcache, &fpregs, -1);
4359
4360 note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
4361 &fpregs, sizeof (fpregs));
4362
4363 return note_data;
4364 }
4365
4366 /* Fills the "to_make_corefile_note" target vector. Builds the note
4367 section for a corefile, and returns it in a malloc buffer. */
4368
4369 static char *
4370 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4371 {
4372 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4373 converted to gdbarch_core_regset_sections, this function can go away. */
4374 return linux_make_corefile_notes (target_gdbarch, obfd, note_size,
4375 linux_nat_collect_thread_registers);
4376 }
4377
4378 /* Implement the to_xfer_partial interface for memory reads using the /proc
4379 filesystem. Because we can use a single read() call for /proc, this
4380 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4381 but it doesn't support writes. */
4382
4383 static LONGEST
4384 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4385 const char *annex, gdb_byte *readbuf,
4386 const gdb_byte *writebuf,
4387 ULONGEST offset, LONGEST len)
4388 {
4389 LONGEST ret;
4390 int fd;
4391 char filename[64];
4392
4393 if (object != TARGET_OBJECT_MEMORY || !readbuf)
4394 return 0;
4395
4396 /* Don't bother for one word. */
4397 if (len < 3 * sizeof (long))
4398 return 0;
4399
4400 /* We could keep this file open and cache it - possibly one per
4401 thread. That requires some juggling, but is even faster. */
4402 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4403 fd = open (filename, O_RDONLY | O_LARGEFILE);
4404 if (fd == -1)
4405 return 0;
4406
4407 /* If pread64 is available, use it. It's faster if the kernel
4408 supports it (only one syscall), and it's 64-bit safe even on
4409 32-bit platforms (for instance, SPARC debugging a SPARC64
4410 application). */
4411 #ifdef HAVE_PREAD64
4412 if (pread64 (fd, readbuf, len, offset) != len)
4413 #else
4414 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4415 #endif
4416 ret = 0;
4417 else
4418 ret = len;
4419
4420 close (fd);
4421 return ret;
4422 }
4423
4424
4425 /* Enumerate spufs IDs for process PID. */
4426 static LONGEST
4427 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4428 {
4429 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4430 LONGEST pos = 0;
4431 LONGEST written = 0;
4432 char path[128];
4433 DIR *dir;
4434 struct dirent *entry;
4435
4436 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4437 dir = opendir (path);
4438 if (!dir)
4439 return -1;
4440
4441 rewinddir (dir);
4442 while ((entry = readdir (dir)) != NULL)
4443 {
4444 struct stat st;
4445 struct statfs stfs;
4446 int fd;
4447
4448 fd = atoi (entry->d_name);
4449 if (!fd)
4450 continue;
4451
4452 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4453 if (stat (path, &st) != 0)
4454 continue;
4455 if (!S_ISDIR (st.st_mode))
4456 continue;
4457
4458 if (statfs (path, &stfs) != 0)
4459 continue;
4460 if (stfs.f_type != SPUFS_MAGIC)
4461 continue;
4462
4463 if (pos >= offset && pos + 4 <= offset + len)
4464 {
4465 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4466 written += 4;
4467 }
4468 pos += 4;
4469 }
4470
4471 closedir (dir);
4472 return written;
4473 }
4474
4475 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4476 object type, using the /proc file system. */
4477 static LONGEST
4478 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4479 const char *annex, gdb_byte *readbuf,
4480 const gdb_byte *writebuf,
4481 ULONGEST offset, LONGEST len)
4482 {
4483 char buf[128];
4484 int fd = 0;
4485 int ret = -1;
4486 int pid = PIDGET (inferior_ptid);
4487
4488 if (!annex)
4489 {
4490 if (!readbuf)
4491 return -1;
4492 else
4493 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4494 }
4495
4496 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4497 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4498 if (fd <= 0)
4499 return -1;
4500
4501 if (offset != 0
4502 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4503 {
4504 close (fd);
4505 return 0;
4506 }
4507
4508 if (writebuf)
4509 ret = write (fd, writebuf, (size_t) len);
4510 else if (readbuf)
4511 ret = read (fd, readbuf, (size_t) len);
4512
4513 close (fd);
4514 return ret;
4515 }
4516
4517
4518 /* Parse LINE as a signal set and add its set bits to SIGS. */
4519
4520 static void
4521 add_line_to_sigset (const char *line, sigset_t *sigs)
4522 {
4523 int len = strlen (line) - 1;
4524 const char *p;
4525 int signum;
4526
4527 if (line[len] != '\n')
4528 error (_("Could not parse signal set: %s"), line);
4529
4530 p = line;
4531 signum = len * 4;
4532 while (len-- > 0)
4533 {
4534 int digit;
4535
4536 if (*p >= '0' && *p <= '9')
4537 digit = *p - '0';
4538 else if (*p >= 'a' && *p <= 'f')
4539 digit = *p - 'a' + 10;
4540 else
4541 error (_("Could not parse signal set: %s"), line);
4542
4543 signum -= 4;
4544
4545 if (digit & 1)
4546 sigaddset (sigs, signum + 1);
4547 if (digit & 2)
4548 sigaddset (sigs, signum + 2);
4549 if (digit & 4)
4550 sigaddset (sigs, signum + 3);
4551 if (digit & 8)
4552 sigaddset (sigs, signum + 4);
4553
4554 p++;
4555 }
4556 }
4557
4558 /* Find process PID's pending signals from /proc/pid/status and set
4559 SIGS to match. */
4560
4561 void
4562 linux_proc_pending_signals (int pid, sigset_t *pending,
4563 sigset_t *blocked, sigset_t *ignored)
4564 {
4565 FILE *procfile;
4566 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
4567 struct cleanup *cleanup;
4568
4569 sigemptyset (pending);
4570 sigemptyset (blocked);
4571 sigemptyset (ignored);
4572 sprintf (fname, "/proc/%d/status", pid);
4573 procfile = fopen (fname, "r");
4574 if (procfile == NULL)
4575 error (_("Could not open %s"), fname);
4576 cleanup = make_cleanup_fclose (procfile);
4577
4578 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4579 {
4580 /* Normal queued signals are on the SigPnd line in the status
4581 file. However, 2.6 kernels also have a "shared" pending
4582 queue for delivering signals to a thread group, so check for
4583 a ShdPnd line also.
4584
4585 Unfortunately some Red Hat kernels include the shared pending
4586 queue but not the ShdPnd status field. */
4587
4588 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4589 add_line_to_sigset (buffer + 8, pending);
4590 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4591 add_line_to_sigset (buffer + 8, pending);
4592 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4593 add_line_to_sigset (buffer + 8, blocked);
4594 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4595 add_line_to_sigset (buffer + 8, ignored);
4596 }
4597
4598 do_cleanups (cleanup);
4599 }
4600
4601 static LONGEST
4602 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4603 const char *annex, gdb_byte *readbuf,
4604 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4605 {
4606 gdb_assert (object == TARGET_OBJECT_OSDATA);
4607
4608 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4609 }
4610
4611 static LONGEST
4612 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4613 const char *annex, gdb_byte *readbuf,
4614 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4615 {
4616 LONGEST xfer;
4617
4618 if (object == TARGET_OBJECT_AUXV)
4619 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4620 offset, len);
4621
4622 if (object == TARGET_OBJECT_OSDATA)
4623 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4624 offset, len);
4625
4626 if (object == TARGET_OBJECT_SPU)
4627 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4628 offset, len);
4629
4630 /* GDB calculates all the addresses in possibly larget width of the address.
4631 Address width needs to be masked before its final use - either by
4632 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4633
4634 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4635
4636 if (object == TARGET_OBJECT_MEMORY)
4637 {
4638 int addr_bit = gdbarch_addr_bit (target_gdbarch);
4639
4640 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4641 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4642 }
4643
4644 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4645 offset, len);
4646 if (xfer != 0)
4647 return xfer;
4648
4649 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4650 offset, len);
4651 }
4652
4653 static void
4654 cleanup_target_stop (void *arg)
4655 {
4656 ptid_t *ptid = (ptid_t *) arg;
4657
4658 gdb_assert (arg != NULL);
4659
4660 /* Unpause all */
4661 target_resume (*ptid, 0, GDB_SIGNAL_0);
4662 }
4663
4664 static VEC(static_tracepoint_marker_p) *
4665 linux_child_static_tracepoint_markers_by_strid (const char *strid)
4666 {
4667 char s[IPA_CMD_BUF_SIZE];
4668 struct cleanup *old_chain;
4669 int pid = ptid_get_pid (inferior_ptid);
4670 VEC(static_tracepoint_marker_p) *markers = NULL;
4671 struct static_tracepoint_marker *marker = NULL;
4672 char *p = s;
4673 ptid_t ptid = ptid_build (pid, 0, 0);
4674
4675 /* Pause all */
4676 target_stop (ptid);
4677
4678 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4679 s[sizeof ("qTfSTM")] = 0;
4680
4681 agent_run_command (pid, s, strlen (s) + 1);
4682
4683 old_chain = make_cleanup (free_current_marker, &marker);
4684 make_cleanup (cleanup_target_stop, &ptid);
4685
4686 while (*p++ == 'm')
4687 {
4688 if (marker == NULL)
4689 marker = XCNEW (struct static_tracepoint_marker);
4690
4691 do
4692 {
4693 parse_static_tracepoint_marker_definition (p, &p, marker);
4694
4695 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4696 {
4697 VEC_safe_push (static_tracepoint_marker_p,
4698 markers, marker);
4699 marker = NULL;
4700 }
4701 else
4702 {
4703 release_static_tracepoint_marker (marker);
4704 memset (marker, 0, sizeof (*marker));
4705 }
4706 }
4707 while (*p++ == ','); /* comma-separated list */
4708
4709 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4710 s[sizeof ("qTsSTM")] = 0;
4711 agent_run_command (pid, s, strlen (s) + 1);
4712 p = s;
4713 }
4714
4715 do_cleanups (old_chain);
4716
4717 return markers;
4718 }
4719
4720 /* Create a prototype generic GNU/Linux target. The client can override
4721 it with local methods. */
4722
4723 static void
4724 linux_target_install_ops (struct target_ops *t)
4725 {
4726 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4727 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4728 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4729 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4730 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4731 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4732 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4733 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4734 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4735 t->to_post_attach = linux_child_post_attach;
4736 t->to_follow_fork = linux_child_follow_fork;
4737 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4738
4739 super_xfer_partial = t->to_xfer_partial;
4740 t->to_xfer_partial = linux_xfer_partial;
4741
4742 t->to_static_tracepoint_markers_by_strid
4743 = linux_child_static_tracepoint_markers_by_strid;
4744 }
4745
4746 struct target_ops *
4747 linux_target (void)
4748 {
4749 struct target_ops *t;
4750
4751 t = inf_ptrace_target ();
4752 linux_target_install_ops (t);
4753
4754 return t;
4755 }
4756
4757 struct target_ops *
4758 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4759 {
4760 struct target_ops *t;
4761
4762 t = inf_ptrace_trad_target (register_u_offset);
4763 linux_target_install_ops (t);
4764
4765 return t;
4766 }
4767
4768 /* target_is_async_p implementation. */
4769
4770 static int
4771 linux_nat_is_async_p (void)
4772 {
4773 /* NOTE: palves 2008-03-21: We're only async when the user requests
4774 it explicitly with the "set target-async" command.
4775 Someday, linux will always be async. */
4776 return target_async_permitted;
4777 }
4778
4779 /* target_can_async_p implementation. */
4780
4781 static int
4782 linux_nat_can_async_p (void)
4783 {
4784 /* NOTE: palves 2008-03-21: We're only async when the user requests
4785 it explicitly with the "set target-async" command.
4786 Someday, linux will always be async. */
4787 return target_async_permitted;
4788 }
4789
4790 static int
4791 linux_nat_supports_non_stop (void)
4792 {
4793 return 1;
4794 }
4795
4796 /* True if we want to support multi-process. To be removed when GDB
4797 supports multi-exec. */
4798
4799 int linux_multi_process = 1;
4800
4801 static int
4802 linux_nat_supports_multi_process (void)
4803 {
4804 return linux_multi_process;
4805 }
4806
4807 static int
4808 linux_nat_supports_disable_randomization (void)
4809 {
4810 #ifdef HAVE_PERSONALITY
4811 return 1;
4812 #else
4813 return 0;
4814 #endif
4815 }
4816
4817 static int async_terminal_is_ours = 1;
4818
4819 /* target_terminal_inferior implementation. */
4820
4821 static void
4822 linux_nat_terminal_inferior (void)
4823 {
4824 if (!target_is_async_p ())
4825 {
4826 /* Async mode is disabled. */
4827 terminal_inferior ();
4828 return;
4829 }
4830
4831 terminal_inferior ();
4832
4833 /* Calls to target_terminal_*() are meant to be idempotent. */
4834 if (!async_terminal_is_ours)
4835 return;
4836
4837 delete_file_handler (input_fd);
4838 async_terminal_is_ours = 0;
4839 set_sigint_trap ();
4840 }
4841
4842 /* target_terminal_ours implementation. */
4843
4844 static void
4845 linux_nat_terminal_ours (void)
4846 {
4847 if (!target_is_async_p ())
4848 {
4849 /* Async mode is disabled. */
4850 terminal_ours ();
4851 return;
4852 }
4853
4854 /* GDB should never give the terminal to the inferior if the
4855 inferior is running in the background (run&, continue&, etc.),
4856 but claiming it sure should. */
4857 terminal_ours ();
4858
4859 if (async_terminal_is_ours)
4860 return;
4861
4862 clear_sigint_trap ();
4863 add_file_handler (input_fd, stdin_event_handler, 0);
4864 async_terminal_is_ours = 1;
4865 }
4866
4867 static void (*async_client_callback) (enum inferior_event_type event_type,
4868 void *context);
4869 static void *async_client_context;
4870
4871 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4872 so we notice when any child changes state, and notify the
4873 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4874 above to wait for the arrival of a SIGCHLD. */
4875
4876 static void
4877 sigchld_handler (int signo)
4878 {
4879 int old_errno = errno;
4880
4881 if (debug_linux_nat)
4882 ui_file_write_async_safe (gdb_stdlog,
4883 "sigchld\n", sizeof ("sigchld\n") - 1);
4884
4885 if (signo == SIGCHLD
4886 && linux_nat_event_pipe[0] != -1)
4887 async_file_mark (); /* Let the event loop know that there are
4888 events to handle. */
4889
4890 errno = old_errno;
4891 }
4892
4893 /* Callback registered with the target events file descriptor. */
4894
4895 static void
4896 handle_target_event (int error, gdb_client_data client_data)
4897 {
4898 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4899 }
4900
4901 /* Create/destroy the target events pipe. Returns previous state. */
4902
4903 static int
4904 linux_async_pipe (int enable)
4905 {
4906 int previous = (linux_nat_event_pipe[0] != -1);
4907
4908 if (previous != enable)
4909 {
4910 sigset_t prev_mask;
4911
4912 block_child_signals (&prev_mask);
4913
4914 if (enable)
4915 {
4916 if (pipe (linux_nat_event_pipe) == -1)
4917 internal_error (__FILE__, __LINE__,
4918 "creating event pipe failed.");
4919
4920 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4921 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4922 }
4923 else
4924 {
4925 close (linux_nat_event_pipe[0]);
4926 close (linux_nat_event_pipe[1]);
4927 linux_nat_event_pipe[0] = -1;
4928 linux_nat_event_pipe[1] = -1;
4929 }
4930
4931 restore_child_signals_mask (&prev_mask);
4932 }
4933
4934 return previous;
4935 }
4936
4937 /* target_async implementation. */
4938
4939 static void
4940 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4941 void *context), void *context)
4942 {
4943 if (callback != NULL)
4944 {
4945 async_client_callback = callback;
4946 async_client_context = context;
4947 if (!linux_async_pipe (1))
4948 {
4949 add_file_handler (linux_nat_event_pipe[0],
4950 handle_target_event, NULL);
4951 /* There may be pending events to handle. Tell the event loop
4952 to poll them. */
4953 async_file_mark ();
4954 }
4955 }
4956 else
4957 {
4958 async_client_callback = callback;
4959 async_client_context = context;
4960 delete_file_handler (linux_nat_event_pipe[0]);
4961 linux_async_pipe (0);
4962 }
4963 return;
4964 }
4965
4966 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4967 event came out. */
4968
4969 static int
4970 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4971 {
4972 if (!lwp->stopped)
4973 {
4974 ptid_t ptid = lwp->ptid;
4975
4976 if (debug_linux_nat)
4977 fprintf_unfiltered (gdb_stdlog,
4978 "LNSL: running -> suspending %s\n",
4979 target_pid_to_str (lwp->ptid));
4980
4981
4982 if (lwp->last_resume_kind == resume_stop)
4983 {
4984 if (debug_linux_nat)
4985 fprintf_unfiltered (gdb_stdlog,
4986 "linux-nat: already stopping LWP %ld at "
4987 "GDB's request\n",
4988 ptid_get_lwp (lwp->ptid));
4989 return 0;
4990 }
4991
4992 stop_callback (lwp, NULL);
4993 lwp->last_resume_kind = resume_stop;
4994 }
4995 else
4996 {
4997 /* Already known to be stopped; do nothing. */
4998
4999 if (debug_linux_nat)
5000 {
5001 if (find_thread_ptid (lwp->ptid)->stop_requested)
5002 fprintf_unfiltered (gdb_stdlog,
5003 "LNSL: already stopped/stop_requested %s\n",
5004 target_pid_to_str (lwp->ptid));
5005 else
5006 fprintf_unfiltered (gdb_stdlog,
5007 "LNSL: already stopped/no "
5008 "stop_requested yet %s\n",
5009 target_pid_to_str (lwp->ptid));
5010 }
5011 }
5012 return 0;
5013 }
5014
5015 static void
5016 linux_nat_stop (ptid_t ptid)
5017 {
5018 if (non_stop)
5019 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
5020 else
5021 linux_ops->to_stop (ptid);
5022 }
5023
5024 static void
5025 linux_nat_close (int quitting)
5026 {
5027 /* Unregister from the event loop. */
5028 if (linux_nat_is_async_p ())
5029 linux_nat_async (NULL, 0);
5030
5031 if (linux_ops->to_close)
5032 linux_ops->to_close (quitting);
5033 }
5034
5035 /* When requests are passed down from the linux-nat layer to the
5036 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5037 used. The address space pointer is stored in the inferior object,
5038 but the common code that is passed such ptid can't tell whether
5039 lwpid is a "main" process id or not (it assumes so). We reverse
5040 look up the "main" process id from the lwp here. */
5041
5042 static struct address_space *
5043 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5044 {
5045 struct lwp_info *lwp;
5046 struct inferior *inf;
5047 int pid;
5048
5049 pid = GET_LWP (ptid);
5050 if (GET_LWP (ptid) == 0)
5051 {
5052 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5053 tgid. */
5054 lwp = find_lwp_pid (ptid);
5055 pid = GET_PID (lwp->ptid);
5056 }
5057 else
5058 {
5059 /* A (pid,lwpid,0) ptid. */
5060 pid = GET_PID (ptid);
5061 }
5062
5063 inf = find_inferior_pid (pid);
5064 gdb_assert (inf != NULL);
5065 return inf->aspace;
5066 }
5067
5068 /* Return the cached value of the processor core for thread PTID. */
5069
5070 static int
5071 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5072 {
5073 struct lwp_info *info = find_lwp_pid (ptid);
5074
5075 if (info)
5076 return info->core;
5077 return -1;
5078 }
5079
5080 void
5081 linux_nat_add_target (struct target_ops *t)
5082 {
5083 /* Save the provided single-threaded target. We save this in a separate
5084 variable because another target we've inherited from (e.g. inf-ptrace)
5085 may have saved a pointer to T; we want to use it for the final
5086 process stratum target. */
5087 linux_ops_saved = *t;
5088 linux_ops = &linux_ops_saved;
5089
5090 /* Override some methods for multithreading. */
5091 t->to_create_inferior = linux_nat_create_inferior;
5092 t->to_attach = linux_nat_attach;
5093 t->to_detach = linux_nat_detach;
5094 t->to_resume = linux_nat_resume;
5095 t->to_wait = linux_nat_wait;
5096 t->to_pass_signals = linux_nat_pass_signals;
5097 t->to_xfer_partial = linux_nat_xfer_partial;
5098 t->to_kill = linux_nat_kill;
5099 t->to_mourn_inferior = linux_nat_mourn_inferior;
5100 t->to_thread_alive = linux_nat_thread_alive;
5101 t->to_pid_to_str = linux_nat_pid_to_str;
5102 t->to_thread_name = linux_nat_thread_name;
5103 t->to_has_thread_control = tc_schedlock;
5104 t->to_thread_address_space = linux_nat_thread_address_space;
5105 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5106 t->to_stopped_data_address = linux_nat_stopped_data_address;
5107
5108 t->to_can_async_p = linux_nat_can_async_p;
5109 t->to_is_async_p = linux_nat_is_async_p;
5110 t->to_supports_non_stop = linux_nat_supports_non_stop;
5111 t->to_async = linux_nat_async;
5112 t->to_terminal_inferior = linux_nat_terminal_inferior;
5113 t->to_terminal_ours = linux_nat_terminal_ours;
5114 t->to_close = linux_nat_close;
5115
5116 /* Methods for non-stop support. */
5117 t->to_stop = linux_nat_stop;
5118
5119 t->to_supports_multi_process = linux_nat_supports_multi_process;
5120
5121 t->to_supports_disable_randomization
5122 = linux_nat_supports_disable_randomization;
5123
5124 t->to_core_of_thread = linux_nat_core_of_thread;
5125
5126 /* We don't change the stratum; this target will sit at
5127 process_stratum and thread_db will set at thread_stratum. This
5128 is a little strange, since this is a multi-threaded-capable
5129 target, but we want to be on the stack below thread_db, and we
5130 also want to be used for single-threaded processes. */
5131
5132 add_target (t);
5133 }
5134
5135 /* Register a method to call whenever a new thread is attached. */
5136 void
5137 linux_nat_set_new_thread (struct target_ops *t,
5138 void (*new_thread) (struct lwp_info *))
5139 {
5140 /* Save the pointer. We only support a single registered instance
5141 of the GNU/Linux native target, so we do not need to map this to
5142 T. */
5143 linux_nat_new_thread = new_thread;
5144 }
5145
5146 /* Register a method that converts a siginfo object between the layout
5147 that ptrace returns, and the layout in the architecture of the
5148 inferior. */
5149 void
5150 linux_nat_set_siginfo_fixup (struct target_ops *t,
5151 int (*siginfo_fixup) (siginfo_t *,
5152 gdb_byte *,
5153 int))
5154 {
5155 /* Save the pointer. */
5156 linux_nat_siginfo_fixup = siginfo_fixup;
5157 }
5158
5159 /* Register a method to call prior to resuming a thread. */
5160
5161 void
5162 linux_nat_set_prepare_to_resume (struct target_ops *t,
5163 void (*prepare_to_resume) (struct lwp_info *))
5164 {
5165 /* Save the pointer. */
5166 linux_nat_prepare_to_resume = prepare_to_resume;
5167 }
5168
5169 /* See linux-nat.h. */
5170
5171 int
5172 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
5173 {
5174 int pid;
5175
5176 pid = GET_LWP (ptid);
5177 if (pid == 0)
5178 pid = GET_PID (ptid);
5179
5180 errno = 0;
5181 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
5182 if (errno != 0)
5183 {
5184 memset (siginfo, 0, sizeof (*siginfo));
5185 return 0;
5186 }
5187 return 1;
5188 }
5189
5190 /* Provide a prototype to silence -Wmissing-prototypes. */
5191 extern initialize_file_ftype _initialize_linux_nat;
5192
5193 void
5194 _initialize_linux_nat (void)
5195 {
5196 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
5197 &debug_linux_nat, _("\
5198 Set debugging of GNU/Linux lwp module."), _("\
5199 Show debugging of GNU/Linux lwp module."), _("\
5200 Enables printf debugging output."),
5201 NULL,
5202 show_debug_linux_nat,
5203 &setdebuglist, &showdebuglist);
5204
5205 /* Save this mask as the default. */
5206 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5207
5208 /* Install a SIGCHLD handler. */
5209 sigchld_action.sa_handler = sigchld_handler;
5210 sigemptyset (&sigchld_action.sa_mask);
5211 sigchld_action.sa_flags = SA_RESTART;
5212
5213 /* Make it the default. */
5214 sigaction (SIGCHLD, &sigchld_action, NULL);
5215
5216 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5217 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5218 sigdelset (&suspend_mask, SIGCHLD);
5219
5220 sigemptyset (&blocked_mask);
5221 }
5222 \f
5223
5224 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5225 the GNU/Linux Threads library and therefore doesn't really belong
5226 here. */
5227
5228 /* Read variable NAME in the target and return its value if found.
5229 Otherwise return zero. It is assumed that the type of the variable
5230 is `int'. */
5231
5232 static int
5233 get_signo (const char *name)
5234 {
5235 struct minimal_symbol *ms;
5236 int signo;
5237
5238 ms = lookup_minimal_symbol (name, NULL, NULL);
5239 if (ms == NULL)
5240 return 0;
5241
5242 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
5243 sizeof (signo)) != 0)
5244 return 0;
5245
5246 return signo;
5247 }
5248
5249 /* Return the set of signals used by the threads library in *SET. */
5250
5251 void
5252 lin_thread_get_thread_signals (sigset_t *set)
5253 {
5254 struct sigaction action;
5255 int restart, cancel;
5256
5257 sigemptyset (&blocked_mask);
5258 sigemptyset (set);
5259
5260 restart = get_signo ("__pthread_sig_restart");
5261 cancel = get_signo ("__pthread_sig_cancel");
5262
5263 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5264 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5265 not provide any way for the debugger to query the signal numbers -
5266 fortunately they don't change! */
5267
5268 if (restart == 0)
5269 restart = __SIGRTMIN;
5270
5271 if (cancel == 0)
5272 cancel = __SIGRTMIN + 1;
5273
5274 sigaddset (set, restart);
5275 sigaddset (set, cancel);
5276
5277 /* The GNU/Linux Threads library makes terminating threads send a
5278 special "cancel" signal instead of SIGCHLD. Make sure we catch
5279 those (to prevent them from terminating GDB itself, which is
5280 likely to be their default action) and treat them the same way as
5281 SIGCHLD. */
5282
5283 action.sa_handler = sigchld_handler;
5284 sigemptyset (&action.sa_mask);
5285 action.sa_flags = SA_RESTART;
5286 sigaction (cancel, &action, NULL);
5287
5288 /* We block the "cancel" signal throughout this code ... */
5289 sigaddset (&blocked_mask, cancel);
5290 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5291
5292 /* ... except during a sigsuspend. */
5293 sigdelset (&suspend_mask, cancel);
5294 }
This page took 0.141908 seconds and 4 git commands to generate.