gdb/
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2012 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "target.h"
23 #include "gdb_string.h"
24 #include "gdb_wait.h"
25 #include "gdb_assert.h"
26 #ifdef HAVE_TKILL_SYSCALL
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #endif
30 #include <sys/ptrace.h>
31 #include "linux-nat.h"
32 #include "linux-ptrace.h"
33 #include "linux-procfs.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/param.h> /* for MAXPATHLEN */
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include "gdbthread.h" /* for struct thread_info etc. */
49 #include "gdb_stat.h" /* for struct stat */
50 #include <fcntl.h> /* for O_RDONLY */
51 #include "inf-loop.h"
52 #include "event-loop.h"
53 #include "event-top.h"
54 #include <pwd.h>
55 #include <sys/types.h>
56 #include "gdb_dirent.h"
57 #include "xml-support.h"
58 #include "terminal.h"
59 #include <sys/vfs.h>
60 #include "solib.h"
61 #include "linux-osdata.h"
62 #include "linux-tdep.h"
63 #include "symfile.h"
64 #include "agent.h"
65 #include "tracepoint.h"
66 #include "exceptions.h"
67 #include "linux-ptrace.h"
68 #include "buffer.h"
69
70 #ifndef SPUFS_MAGIC
71 #define SPUFS_MAGIC 0x23c9b64e
72 #endif
73
74 #ifdef HAVE_PERSONALITY
75 # include <sys/personality.h>
76 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
77 # define ADDR_NO_RANDOMIZE 0x0040000
78 # endif
79 #endif /* HAVE_PERSONALITY */
80
81 /* This comment documents high-level logic of this file.
82
83 Waiting for events in sync mode
84 ===============================
85
86 When waiting for an event in a specific thread, we just use waitpid, passing
87 the specific pid, and not passing WNOHANG.
88
89 When waiting for an event in all threads, waitpid is not quite good. Prior to
90 version 2.4, Linux can either wait for event in main thread, or in secondary
91 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
92 miss an event. The solution is to use non-blocking waitpid, together with
93 sigsuspend. First, we use non-blocking waitpid to get an event in the main
94 process, if any. Second, we use non-blocking waitpid with the __WCLONED
95 flag to check for events in cloned processes. If nothing is found, we use
96 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
97 happened to a child process -- and SIGCHLD will be delivered both for events
98 in main debugged process and in cloned processes. As soon as we know there's
99 an event, we get back to calling nonblocking waitpid with and without
100 __WCLONED.
101
102 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
103 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
104 blocked, the signal becomes pending and sigsuspend immediately
105 notices it and returns.
106
107 Waiting for events in async mode
108 ================================
109
110 In async mode, GDB should always be ready to handle both user input
111 and target events, so neither blocking waitpid nor sigsuspend are
112 viable options. Instead, we should asynchronously notify the GDB main
113 event loop whenever there's an unprocessed event from the target. We
114 detect asynchronous target events by handling SIGCHLD signals. To
115 notify the event loop about target events, the self-pipe trick is used
116 --- a pipe is registered as waitable event source in the event loop,
117 the event loop select/poll's on the read end of this pipe (as well on
118 other event sources, e.g., stdin), and the SIGCHLD handler writes a
119 byte to this pipe. This is more portable than relying on
120 pselect/ppoll, since on kernels that lack those syscalls, libc
121 emulates them with select/poll+sigprocmask, and that is racy
122 (a.k.a. plain broken).
123
124 Obviously, if we fail to notify the event loop if there's a target
125 event, it's bad. OTOH, if we notify the event loop when there's no
126 event from the target, linux_nat_wait will detect that there's no real
127 event to report, and return event of type TARGET_WAITKIND_IGNORE.
128 This is mostly harmless, but it will waste time and is better avoided.
129
130 The main design point is that every time GDB is outside linux-nat.c,
131 we have a SIGCHLD handler installed that is called when something
132 happens to the target and notifies the GDB event loop. Whenever GDB
133 core decides to handle the event, and calls into linux-nat.c, we
134 process things as in sync mode, except that the we never block in
135 sigsuspend.
136
137 While processing an event, we may end up momentarily blocked in
138 waitpid calls. Those waitpid calls, while blocking, are guarantied to
139 return quickly. E.g., in all-stop mode, before reporting to the core
140 that an LWP hit a breakpoint, all LWPs are stopped by sending them
141 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
142 Note that this is different from blocking indefinitely waiting for the
143 next event --- here, we're already handling an event.
144
145 Use of signals
146 ==============
147
148 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
149 signal is not entirely significant; we just need for a signal to be delivered,
150 so that we can intercept it. SIGSTOP's advantage is that it can not be
151 blocked. A disadvantage is that it is not a real-time signal, so it can only
152 be queued once; we do not keep track of other sources of SIGSTOP.
153
154 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
155 use them, because they have special behavior when the signal is generated -
156 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
157 kills the entire thread group.
158
159 A delivered SIGSTOP would stop the entire thread group, not just the thread we
160 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
161 cancel it (by PTRACE_CONT without passing SIGSTOP).
162
163 We could use a real-time signal instead. This would solve those problems; we
164 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
165 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
166 generates it, and there are races with trying to find a signal that is not
167 blocked. */
168
169 #ifndef O_LARGEFILE
170 #define O_LARGEFILE 0
171 #endif
172
173 /* Unlike other extended result codes, WSTOPSIG (status) on
174 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
175 instead SIGTRAP with bit 7 set. */
176 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
177
178 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
179 the use of the multi-threaded target. */
180 static struct target_ops *linux_ops;
181 static struct target_ops linux_ops_saved;
182
183 /* The method to call, if any, when a new thread is attached. */
184 static void (*linux_nat_new_thread) (struct lwp_info *);
185
186 /* Hook to call prior to resuming a thread. */
187 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
188
189 /* The method to call, if any, when the siginfo object needs to be
190 converted between the layout returned by ptrace, and the layout in
191 the architecture of the inferior. */
192 static int (*linux_nat_siginfo_fixup) (siginfo_t *,
193 gdb_byte *,
194 int);
195
196 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
197 Called by our to_xfer_partial. */
198 static LONGEST (*super_xfer_partial) (struct target_ops *,
199 enum target_object,
200 const char *, gdb_byte *,
201 const gdb_byte *,
202 ULONGEST, LONGEST);
203
204 static int debug_linux_nat;
205 static void
206 show_debug_linux_nat (struct ui_file *file, int from_tty,
207 struct cmd_list_element *c, const char *value)
208 {
209 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
210 value);
211 }
212
213 struct simple_pid_list
214 {
215 int pid;
216 int status;
217 struct simple_pid_list *next;
218 };
219 struct simple_pid_list *stopped_pids;
220
221 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
222 can not be used, 1 if it can. */
223
224 static int linux_supports_tracefork_flag = -1;
225
226 /* This variable is a tri-state flag: -1 for unknown, 0 if
227 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
228
229 static int linux_supports_tracesysgood_flag = -1;
230
231 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
232 PTRACE_O_TRACEVFORKDONE. */
233
234 static int linux_supports_tracevforkdone_flag = -1;
235
236 /* Stores the current used ptrace() options. */
237 static int current_ptrace_options = 0;
238
239 /* Async mode support. */
240
241 /* The read/write ends of the pipe registered as waitable file in the
242 event loop. */
243 static int linux_nat_event_pipe[2] = { -1, -1 };
244
245 /* Flush the event pipe. */
246
247 static void
248 async_file_flush (void)
249 {
250 int ret;
251 char buf;
252
253 do
254 {
255 ret = read (linux_nat_event_pipe[0], &buf, 1);
256 }
257 while (ret >= 0 || (ret == -1 && errno == EINTR));
258 }
259
260 /* Put something (anything, doesn't matter what, or how much) in event
261 pipe, so that the select/poll in the event-loop realizes we have
262 something to process. */
263
264 static void
265 async_file_mark (void)
266 {
267 int ret;
268
269 /* It doesn't really matter what the pipe contains, as long we end
270 up with something in it. Might as well flush the previous
271 left-overs. */
272 async_file_flush ();
273
274 do
275 {
276 ret = write (linux_nat_event_pipe[1], "+", 1);
277 }
278 while (ret == -1 && errno == EINTR);
279
280 /* Ignore EAGAIN. If the pipe is full, the event loop will already
281 be awakened anyway. */
282 }
283
284 static void linux_nat_async (void (*callback)
285 (enum inferior_event_type event_type,
286 void *context),
287 void *context);
288 static int kill_lwp (int lwpid, int signo);
289
290 static int stop_callback (struct lwp_info *lp, void *data);
291
292 static void block_child_signals (sigset_t *prev_mask);
293 static void restore_child_signals_mask (sigset_t *prev_mask);
294
295 struct lwp_info;
296 static struct lwp_info *add_lwp (ptid_t ptid);
297 static void purge_lwp_list (int pid);
298 static void delete_lwp (ptid_t ptid);
299 static struct lwp_info *find_lwp_pid (ptid_t ptid);
300
301 \f
302 /* Trivial list manipulation functions to keep track of a list of
303 new stopped processes. */
304 static void
305 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
306 {
307 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
308
309 new_pid->pid = pid;
310 new_pid->status = status;
311 new_pid->next = *listp;
312 *listp = new_pid;
313 }
314
315 static int
316 in_pid_list_p (struct simple_pid_list *list, int pid)
317 {
318 struct simple_pid_list *p;
319
320 for (p = list; p != NULL; p = p->next)
321 if (p->pid == pid)
322 return 1;
323 return 0;
324 }
325
326 static int
327 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
328 {
329 struct simple_pid_list **p;
330
331 for (p = listp; *p != NULL; p = &(*p)->next)
332 if ((*p)->pid == pid)
333 {
334 struct simple_pid_list *next = (*p)->next;
335
336 *statusp = (*p)->status;
337 xfree (*p);
338 *p = next;
339 return 1;
340 }
341 return 0;
342 }
343
344 \f
345 /* A helper function for linux_test_for_tracefork, called after fork (). */
346
347 static void
348 linux_tracefork_child (void)
349 {
350 ptrace (PTRACE_TRACEME, 0, 0, 0);
351 kill (getpid (), SIGSTOP);
352 fork ();
353 _exit (0);
354 }
355
356 /* Wrapper function for waitpid which handles EINTR. */
357
358 static int
359 my_waitpid (int pid, int *statusp, int flags)
360 {
361 int ret;
362
363 do
364 {
365 ret = waitpid (pid, statusp, flags);
366 }
367 while (ret == -1 && errno == EINTR);
368
369 return ret;
370 }
371
372 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
373
374 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
375 we know that the feature is not available. This may change the tracing
376 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
377
378 However, if it succeeds, we don't know for sure that the feature is
379 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
380 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
381 fork tracing, and let it fork. If the process exits, we assume that we
382 can't use TRACEFORK; if we get the fork notification, and we can extract
383 the new child's PID, then we assume that we can. */
384
385 static void
386 linux_test_for_tracefork (int original_pid)
387 {
388 int child_pid, ret, status;
389 long second_pid;
390 sigset_t prev_mask;
391
392 /* We don't want those ptrace calls to be interrupted. */
393 block_child_signals (&prev_mask);
394
395 linux_supports_tracefork_flag = 0;
396 linux_supports_tracevforkdone_flag = 0;
397
398 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
399 if (ret != 0)
400 {
401 restore_child_signals_mask (&prev_mask);
402 return;
403 }
404
405 child_pid = fork ();
406 if (child_pid == -1)
407 perror_with_name (("fork"));
408
409 if (child_pid == 0)
410 linux_tracefork_child ();
411
412 ret = my_waitpid (child_pid, &status, 0);
413 if (ret == -1)
414 perror_with_name (("waitpid"));
415 else if (ret != child_pid)
416 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
417 if (! WIFSTOPPED (status))
418 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
419 status);
420
421 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
422 if (ret != 0)
423 {
424 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
425 if (ret != 0)
426 {
427 warning (_("linux_test_for_tracefork: failed to kill child"));
428 restore_child_signals_mask (&prev_mask);
429 return;
430 }
431
432 ret = my_waitpid (child_pid, &status, 0);
433 if (ret != child_pid)
434 warning (_("linux_test_for_tracefork: failed "
435 "to wait for killed child"));
436 else if (!WIFSIGNALED (status))
437 warning (_("linux_test_for_tracefork: unexpected "
438 "wait status 0x%x from killed child"), status);
439
440 restore_child_signals_mask (&prev_mask);
441 return;
442 }
443
444 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
445 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
446 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
447 linux_supports_tracevforkdone_flag = (ret == 0);
448
449 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
450 if (ret != 0)
451 warning (_("linux_test_for_tracefork: failed to resume child"));
452
453 ret = my_waitpid (child_pid, &status, 0);
454
455 if (ret == child_pid && WIFSTOPPED (status)
456 && status >> 16 == PTRACE_EVENT_FORK)
457 {
458 second_pid = 0;
459 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
460 if (ret == 0 && second_pid != 0)
461 {
462 int second_status;
463
464 linux_supports_tracefork_flag = 1;
465 my_waitpid (second_pid, &second_status, 0);
466 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
467 if (ret != 0)
468 warning (_("linux_test_for_tracefork: "
469 "failed to kill second child"));
470 my_waitpid (second_pid, &status, 0);
471 }
472 }
473 else
474 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
475 "(%d, status 0x%x)"), ret, status);
476
477 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
478 if (ret != 0)
479 warning (_("linux_test_for_tracefork: failed to kill child"));
480 my_waitpid (child_pid, &status, 0);
481
482 restore_child_signals_mask (&prev_mask);
483 }
484
485 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
486
487 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
488 we know that the feature is not available. This may change the tracing
489 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
490
491 static void
492 linux_test_for_tracesysgood (int original_pid)
493 {
494 int ret;
495 sigset_t prev_mask;
496
497 /* We don't want those ptrace calls to be interrupted. */
498 block_child_signals (&prev_mask);
499
500 linux_supports_tracesysgood_flag = 0;
501
502 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
503 if (ret != 0)
504 goto out;
505
506 linux_supports_tracesysgood_flag = 1;
507 out:
508 restore_child_signals_mask (&prev_mask);
509 }
510
511 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
512 This function also sets linux_supports_tracesysgood_flag. */
513
514 static int
515 linux_supports_tracesysgood (int pid)
516 {
517 if (linux_supports_tracesysgood_flag == -1)
518 linux_test_for_tracesysgood (pid);
519 return linux_supports_tracesysgood_flag;
520 }
521
522 /* Return non-zero iff we have tracefork functionality available.
523 This function also sets linux_supports_tracefork_flag. */
524
525 static int
526 linux_supports_tracefork (int pid)
527 {
528 if (linux_supports_tracefork_flag == -1)
529 linux_test_for_tracefork (pid);
530 return linux_supports_tracefork_flag;
531 }
532
533 static int
534 linux_supports_tracevforkdone (int pid)
535 {
536 if (linux_supports_tracefork_flag == -1)
537 linux_test_for_tracefork (pid);
538 return linux_supports_tracevforkdone_flag;
539 }
540
541 static void
542 linux_enable_tracesysgood (ptid_t ptid)
543 {
544 int pid = ptid_get_lwp (ptid);
545
546 if (pid == 0)
547 pid = ptid_get_pid (ptid);
548
549 if (linux_supports_tracesysgood (pid) == 0)
550 return;
551
552 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
553
554 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
555 }
556
557 \f
558 void
559 linux_enable_event_reporting (ptid_t ptid)
560 {
561 int pid = ptid_get_lwp (ptid);
562
563 if (pid == 0)
564 pid = ptid_get_pid (ptid);
565
566 if (! linux_supports_tracefork (pid))
567 return;
568
569 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
570 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
571
572 if (linux_supports_tracevforkdone (pid))
573 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
574
575 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
576 read-only process state. */
577
578 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
579 }
580
581 static void
582 linux_child_post_attach (int pid)
583 {
584 linux_enable_event_reporting (pid_to_ptid (pid));
585 linux_enable_tracesysgood (pid_to_ptid (pid));
586 }
587
588 static void
589 linux_child_post_startup_inferior (ptid_t ptid)
590 {
591 linux_enable_event_reporting (ptid);
592 linux_enable_tracesysgood (ptid);
593 }
594
595 /* Return the number of known LWPs in the tgid given by PID. */
596
597 static int
598 num_lwps (int pid)
599 {
600 int count = 0;
601 struct lwp_info *lp;
602
603 for (lp = lwp_list; lp; lp = lp->next)
604 if (ptid_get_pid (lp->ptid) == pid)
605 count++;
606
607 return count;
608 }
609
610 /* Call delete_lwp with prototype compatible for make_cleanup. */
611
612 static void
613 delete_lwp_cleanup (void *lp_voidp)
614 {
615 struct lwp_info *lp = lp_voidp;
616
617 delete_lwp (lp->ptid);
618 }
619
620 static int
621 linux_child_follow_fork (struct target_ops *ops, int follow_child)
622 {
623 sigset_t prev_mask;
624 int has_vforked;
625 int parent_pid, child_pid;
626
627 block_child_signals (&prev_mask);
628
629 has_vforked = (inferior_thread ()->pending_follow.kind
630 == TARGET_WAITKIND_VFORKED);
631 parent_pid = ptid_get_lwp (inferior_ptid);
632 if (parent_pid == 0)
633 parent_pid = ptid_get_pid (inferior_ptid);
634 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
635
636 if (!detach_fork)
637 linux_enable_event_reporting (pid_to_ptid (child_pid));
638
639 if (has_vforked
640 && !non_stop /* Non-stop always resumes both branches. */
641 && (!target_is_async_p () || sync_execution)
642 && !(follow_child || detach_fork || sched_multi))
643 {
644 /* The parent stays blocked inside the vfork syscall until the
645 child execs or exits. If we don't let the child run, then
646 the parent stays blocked. If we're telling the parent to run
647 in the foreground, the user will not be able to ctrl-c to get
648 back the terminal, effectively hanging the debug session. */
649 fprintf_filtered (gdb_stderr, _("\
650 Can not resume the parent process over vfork in the foreground while\n\
651 holding the child stopped. Try \"set detach-on-fork\" or \
652 \"set schedule-multiple\".\n"));
653 /* FIXME output string > 80 columns. */
654 return 1;
655 }
656
657 if (! follow_child)
658 {
659 struct lwp_info *child_lp = NULL;
660
661 /* We're already attached to the parent, by default. */
662
663 /* Detach new forked process? */
664 if (detach_fork)
665 {
666 struct cleanup *old_chain;
667
668 /* Before detaching from the child, remove all breakpoints
669 from it. If we forked, then this has already been taken
670 care of by infrun.c. If we vforked however, any
671 breakpoint inserted in the parent is visible in the
672 child, even those added while stopped in a vfork
673 catchpoint. This will remove the breakpoints from the
674 parent also, but they'll be reinserted below. */
675 if (has_vforked)
676 {
677 /* keep breakpoints list in sync. */
678 remove_breakpoints_pid (GET_PID (inferior_ptid));
679 }
680
681 if (info_verbose || debug_linux_nat)
682 {
683 target_terminal_ours ();
684 fprintf_filtered (gdb_stdlog,
685 "Detaching after fork from "
686 "child process %d.\n",
687 child_pid);
688 }
689
690 old_chain = save_inferior_ptid ();
691 inferior_ptid = ptid_build (child_pid, child_pid, 0);
692
693 child_lp = add_lwp (inferior_ptid);
694 child_lp->stopped = 1;
695 child_lp->last_resume_kind = resume_stop;
696 make_cleanup (delete_lwp_cleanup, child_lp);
697
698 /* CHILD_LP has new PID, therefore linux_nat_new_thread is not called for it.
699 See i386_inferior_data_get for the Linux kernel specifics.
700 Ensure linux_nat_prepare_to_resume will reset the hardware debug
701 registers. It is done by the linux_nat_new_thread call, which is
702 being skipped in add_lwp above for the first lwp of a pid. */
703 gdb_assert (num_lwps (GET_PID (child_lp->ptid)) == 1);
704 if (linux_nat_new_thread != NULL)
705 linux_nat_new_thread (child_lp);
706
707 if (linux_nat_prepare_to_resume != NULL)
708 linux_nat_prepare_to_resume (child_lp);
709 ptrace (PTRACE_DETACH, child_pid, 0, 0);
710
711 do_cleanups (old_chain);
712 }
713 else
714 {
715 struct inferior *parent_inf, *child_inf;
716 struct cleanup *old_chain;
717
718 /* Add process to GDB's tables. */
719 child_inf = add_inferior (child_pid);
720
721 parent_inf = current_inferior ();
722 child_inf->attach_flag = parent_inf->attach_flag;
723 copy_terminal_info (child_inf, parent_inf);
724
725 old_chain = save_inferior_ptid ();
726 save_current_program_space ();
727
728 inferior_ptid = ptid_build (child_pid, child_pid, 0);
729 add_thread (inferior_ptid);
730 child_lp = add_lwp (inferior_ptid);
731 child_lp->stopped = 1;
732 child_lp->last_resume_kind = resume_stop;
733 child_inf->symfile_flags = SYMFILE_NO_READ;
734
735 /* If this is a vfork child, then the address-space is
736 shared with the parent. */
737 if (has_vforked)
738 {
739 child_inf->pspace = parent_inf->pspace;
740 child_inf->aspace = parent_inf->aspace;
741
742 /* The parent will be frozen until the child is done
743 with the shared region. Keep track of the
744 parent. */
745 child_inf->vfork_parent = parent_inf;
746 child_inf->pending_detach = 0;
747 parent_inf->vfork_child = child_inf;
748 parent_inf->pending_detach = 0;
749 }
750 else
751 {
752 child_inf->aspace = new_address_space ();
753 child_inf->pspace = add_program_space (child_inf->aspace);
754 child_inf->removable = 1;
755 set_current_program_space (child_inf->pspace);
756 clone_program_space (child_inf->pspace, parent_inf->pspace);
757
758 /* Let the shared library layer (solib-svr4) learn about
759 this new process, relocate the cloned exec, pull in
760 shared libraries, and install the solib event
761 breakpoint. If a "cloned-VM" event was propagated
762 better throughout the core, this wouldn't be
763 required. */
764 solib_create_inferior_hook (0);
765 }
766
767 /* Let the thread_db layer learn about this new process. */
768 check_for_thread_db ();
769
770 do_cleanups (old_chain);
771 }
772
773 if (has_vforked)
774 {
775 struct lwp_info *parent_lp;
776 struct inferior *parent_inf;
777
778 parent_inf = current_inferior ();
779
780 /* If we detached from the child, then we have to be careful
781 to not insert breakpoints in the parent until the child
782 is done with the shared memory region. However, if we're
783 staying attached to the child, then we can and should
784 insert breakpoints, so that we can debug it. A
785 subsequent child exec or exit is enough to know when does
786 the child stops using the parent's address space. */
787 parent_inf->waiting_for_vfork_done = detach_fork;
788 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
789
790 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
791 gdb_assert (linux_supports_tracefork_flag >= 0);
792
793 if (linux_supports_tracevforkdone (0))
794 {
795 if (debug_linux_nat)
796 fprintf_unfiltered (gdb_stdlog,
797 "LCFF: waiting for VFORK_DONE on %d\n",
798 parent_pid);
799 parent_lp->stopped = 1;
800
801 /* We'll handle the VFORK_DONE event like any other
802 event, in target_wait. */
803 }
804 else
805 {
806 /* We can't insert breakpoints until the child has
807 finished with the shared memory region. We need to
808 wait until that happens. Ideal would be to just
809 call:
810 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
811 - waitpid (parent_pid, &status, __WALL);
812 However, most architectures can't handle a syscall
813 being traced on the way out if it wasn't traced on
814 the way in.
815
816 We might also think to loop, continuing the child
817 until it exits or gets a SIGTRAP. One problem is
818 that the child might call ptrace with PTRACE_TRACEME.
819
820 There's no simple and reliable way to figure out when
821 the vforked child will be done with its copy of the
822 shared memory. We could step it out of the syscall,
823 two instructions, let it go, and then single-step the
824 parent once. When we have hardware single-step, this
825 would work; with software single-step it could still
826 be made to work but we'd have to be able to insert
827 single-step breakpoints in the child, and we'd have
828 to insert -just- the single-step breakpoint in the
829 parent. Very awkward.
830
831 In the end, the best we can do is to make sure it
832 runs for a little while. Hopefully it will be out of
833 range of any breakpoints we reinsert. Usually this
834 is only the single-step breakpoint at vfork's return
835 point. */
836
837 if (debug_linux_nat)
838 fprintf_unfiltered (gdb_stdlog,
839 "LCFF: no VFORK_DONE "
840 "support, sleeping a bit\n");
841
842 usleep (10000);
843
844 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
845 and leave it pending. The next linux_nat_resume call
846 will notice a pending event, and bypasses actually
847 resuming the inferior. */
848 parent_lp->status = 0;
849 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
850 parent_lp->stopped = 1;
851
852 /* If we're in async mode, need to tell the event loop
853 there's something here to process. */
854 if (target_can_async_p ())
855 async_file_mark ();
856 }
857 }
858 }
859 else
860 {
861 struct inferior *parent_inf, *child_inf;
862 struct lwp_info *child_lp;
863 struct program_space *parent_pspace;
864
865 if (info_verbose || debug_linux_nat)
866 {
867 target_terminal_ours ();
868 if (has_vforked)
869 fprintf_filtered (gdb_stdlog,
870 _("Attaching after process %d "
871 "vfork to child process %d.\n"),
872 parent_pid, child_pid);
873 else
874 fprintf_filtered (gdb_stdlog,
875 _("Attaching after process %d "
876 "fork to child process %d.\n"),
877 parent_pid, child_pid);
878 }
879
880 /* Add the new inferior first, so that the target_detach below
881 doesn't unpush the target. */
882
883 child_inf = add_inferior (child_pid);
884
885 parent_inf = current_inferior ();
886 child_inf->attach_flag = parent_inf->attach_flag;
887 copy_terminal_info (child_inf, parent_inf);
888
889 parent_pspace = parent_inf->pspace;
890
891 /* If we're vforking, we want to hold on to the parent until the
892 child exits or execs. At child exec or exit time we can
893 remove the old breakpoints from the parent and detach or
894 resume debugging it. Otherwise, detach the parent now; we'll
895 want to reuse it's program/address spaces, but we can't set
896 them to the child before removing breakpoints from the
897 parent, otherwise, the breakpoints module could decide to
898 remove breakpoints from the wrong process (since they'd be
899 assigned to the same address space). */
900
901 if (has_vforked)
902 {
903 gdb_assert (child_inf->vfork_parent == NULL);
904 gdb_assert (parent_inf->vfork_child == NULL);
905 child_inf->vfork_parent = parent_inf;
906 child_inf->pending_detach = 0;
907 parent_inf->vfork_child = child_inf;
908 parent_inf->pending_detach = detach_fork;
909 parent_inf->waiting_for_vfork_done = 0;
910 }
911 else if (detach_fork)
912 target_detach (NULL, 0);
913
914 /* Note that the detach above makes PARENT_INF dangling. */
915
916 /* Add the child thread to the appropriate lists, and switch to
917 this new thread, before cloning the program space, and
918 informing the solib layer about this new process. */
919
920 inferior_ptid = ptid_build (child_pid, child_pid, 0);
921 add_thread (inferior_ptid);
922 child_lp = add_lwp (inferior_ptid);
923 child_lp->stopped = 1;
924 child_lp->last_resume_kind = resume_stop;
925
926 /* If this is a vfork child, then the address-space is shared
927 with the parent. If we detached from the parent, then we can
928 reuse the parent's program/address spaces. */
929 if (has_vforked || detach_fork)
930 {
931 child_inf->pspace = parent_pspace;
932 child_inf->aspace = child_inf->pspace->aspace;
933 }
934 else
935 {
936 child_inf->aspace = new_address_space ();
937 child_inf->pspace = add_program_space (child_inf->aspace);
938 child_inf->removable = 1;
939 child_inf->symfile_flags = SYMFILE_NO_READ;
940 set_current_program_space (child_inf->pspace);
941 clone_program_space (child_inf->pspace, parent_pspace);
942
943 /* Let the shared library layer (solib-svr4) learn about
944 this new process, relocate the cloned exec, pull in
945 shared libraries, and install the solib event breakpoint.
946 If a "cloned-VM" event was propagated better throughout
947 the core, this wouldn't be required. */
948 solib_create_inferior_hook (0);
949 }
950
951 /* Let the thread_db layer learn about this new process. */
952 check_for_thread_db ();
953 }
954
955 restore_child_signals_mask (&prev_mask);
956 return 0;
957 }
958
959 \f
960 static int
961 linux_child_insert_fork_catchpoint (int pid)
962 {
963 return !linux_supports_tracefork (pid);
964 }
965
966 static int
967 linux_child_remove_fork_catchpoint (int pid)
968 {
969 return 0;
970 }
971
972 static int
973 linux_child_insert_vfork_catchpoint (int pid)
974 {
975 return !linux_supports_tracefork (pid);
976 }
977
978 static int
979 linux_child_remove_vfork_catchpoint (int pid)
980 {
981 return 0;
982 }
983
984 static int
985 linux_child_insert_exec_catchpoint (int pid)
986 {
987 return !linux_supports_tracefork (pid);
988 }
989
990 static int
991 linux_child_remove_exec_catchpoint (int pid)
992 {
993 return 0;
994 }
995
996 static int
997 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
998 int table_size, int *table)
999 {
1000 if (!linux_supports_tracesysgood (pid))
1001 return 1;
1002
1003 /* On GNU/Linux, we ignore the arguments. It means that we only
1004 enable the syscall catchpoints, but do not disable them.
1005
1006 Also, we do not use the `table' information because we do not
1007 filter system calls here. We let GDB do the logic for us. */
1008 return 0;
1009 }
1010
1011 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
1012 are processes sharing the same VM space. A multi-threaded process
1013 is basically a group of such processes. However, such a grouping
1014 is almost entirely a user-space issue; the kernel doesn't enforce
1015 such a grouping at all (this might change in the future). In
1016 general, we'll rely on the threads library (i.e. the GNU/Linux
1017 Threads library) to provide such a grouping.
1018
1019 It is perfectly well possible to write a multi-threaded application
1020 without the assistance of a threads library, by using the clone
1021 system call directly. This module should be able to give some
1022 rudimentary support for debugging such applications if developers
1023 specify the CLONE_PTRACE flag in the clone system call, and are
1024 using the Linux kernel 2.4 or above.
1025
1026 Note that there are some peculiarities in GNU/Linux that affect
1027 this code:
1028
1029 - In general one should specify the __WCLONE flag to waitpid in
1030 order to make it report events for any of the cloned processes
1031 (and leave it out for the initial process). However, if a cloned
1032 process has exited the exit status is only reported if the
1033 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1034 we cannot use it since GDB must work on older systems too.
1035
1036 - When a traced, cloned process exits and is waited for by the
1037 debugger, the kernel reassigns it to the original parent and
1038 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1039 library doesn't notice this, which leads to the "zombie problem":
1040 When debugged a multi-threaded process that spawns a lot of
1041 threads will run out of processes, even if the threads exit,
1042 because the "zombies" stay around. */
1043
1044 /* List of known LWPs. */
1045 struct lwp_info *lwp_list;
1046 \f
1047
1048 /* Original signal mask. */
1049 static sigset_t normal_mask;
1050
1051 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1052 _initialize_linux_nat. */
1053 static sigset_t suspend_mask;
1054
1055 /* Signals to block to make that sigsuspend work. */
1056 static sigset_t blocked_mask;
1057
1058 /* SIGCHLD action. */
1059 struct sigaction sigchld_action;
1060
1061 /* Block child signals (SIGCHLD and linux threads signals), and store
1062 the previous mask in PREV_MASK. */
1063
1064 static void
1065 block_child_signals (sigset_t *prev_mask)
1066 {
1067 /* Make sure SIGCHLD is blocked. */
1068 if (!sigismember (&blocked_mask, SIGCHLD))
1069 sigaddset (&blocked_mask, SIGCHLD);
1070
1071 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1072 }
1073
1074 /* Restore child signals mask, previously returned by
1075 block_child_signals. */
1076
1077 static void
1078 restore_child_signals_mask (sigset_t *prev_mask)
1079 {
1080 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1081 }
1082
1083 /* Mask of signals to pass directly to the inferior. */
1084 static sigset_t pass_mask;
1085
1086 /* Update signals to pass to the inferior. */
1087 static void
1088 linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1089 {
1090 int signo;
1091
1092 sigemptyset (&pass_mask);
1093
1094 for (signo = 1; signo < NSIG; signo++)
1095 {
1096 int target_signo = gdb_signal_from_host (signo);
1097 if (target_signo < numsigs && pass_signals[target_signo])
1098 sigaddset (&pass_mask, signo);
1099 }
1100 }
1101
1102 \f
1103
1104 /* Prototypes for local functions. */
1105 static int stop_wait_callback (struct lwp_info *lp, void *data);
1106 static int linux_thread_alive (ptid_t ptid);
1107 static char *linux_child_pid_to_exec_file (int pid);
1108
1109 \f
1110 /* Convert wait status STATUS to a string. Used for printing debug
1111 messages only. */
1112
1113 static char *
1114 status_to_str (int status)
1115 {
1116 static char buf[64];
1117
1118 if (WIFSTOPPED (status))
1119 {
1120 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1121 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1122 strsignal (SIGTRAP));
1123 else
1124 snprintf (buf, sizeof (buf), "%s (stopped)",
1125 strsignal (WSTOPSIG (status)));
1126 }
1127 else if (WIFSIGNALED (status))
1128 snprintf (buf, sizeof (buf), "%s (terminated)",
1129 strsignal (WTERMSIG (status)));
1130 else
1131 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1132
1133 return buf;
1134 }
1135
1136 /* Destroy and free LP. */
1137
1138 static void
1139 lwp_free (struct lwp_info *lp)
1140 {
1141 xfree (lp->arch_private);
1142 xfree (lp);
1143 }
1144
1145 /* Remove all LWPs belong to PID from the lwp list. */
1146
1147 static void
1148 purge_lwp_list (int pid)
1149 {
1150 struct lwp_info *lp, *lpprev, *lpnext;
1151
1152 lpprev = NULL;
1153
1154 for (lp = lwp_list; lp; lp = lpnext)
1155 {
1156 lpnext = lp->next;
1157
1158 if (ptid_get_pid (lp->ptid) == pid)
1159 {
1160 if (lp == lwp_list)
1161 lwp_list = lp->next;
1162 else
1163 lpprev->next = lp->next;
1164
1165 lwp_free (lp);
1166 }
1167 else
1168 lpprev = lp;
1169 }
1170 }
1171
1172 /* Add the LWP specified by PID to the list. Return a pointer to the
1173 structure describing the new LWP. The LWP should already be stopped
1174 (with an exception for the very first LWP). */
1175
1176 static struct lwp_info *
1177 add_lwp (ptid_t ptid)
1178 {
1179 struct lwp_info *lp;
1180
1181 gdb_assert (is_lwp (ptid));
1182
1183 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1184
1185 memset (lp, 0, sizeof (struct lwp_info));
1186
1187 lp->last_resume_kind = resume_continue;
1188 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1189
1190 lp->ptid = ptid;
1191 lp->core = -1;
1192
1193 lp->next = lwp_list;
1194 lwp_list = lp;
1195
1196 /* Let the arch specific bits know about this new thread. Current
1197 clients of this callback take the opportunity to install
1198 watchpoints in the new thread. Don't do this for the first
1199 thread though. If we're spawning a child ("run"), the thread
1200 executes the shell wrapper first, and we shouldn't touch it until
1201 it execs the program we want to debug. For "attach", it'd be
1202 okay to call the callback, but it's not necessary, because
1203 watchpoints can't yet have been inserted into the inferior. */
1204 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
1205 linux_nat_new_thread (lp);
1206
1207 return lp;
1208 }
1209
1210 /* Remove the LWP specified by PID from the list. */
1211
1212 static void
1213 delete_lwp (ptid_t ptid)
1214 {
1215 struct lwp_info *lp, *lpprev;
1216
1217 lpprev = NULL;
1218
1219 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1220 if (ptid_equal (lp->ptid, ptid))
1221 break;
1222
1223 if (!lp)
1224 return;
1225
1226 if (lpprev)
1227 lpprev->next = lp->next;
1228 else
1229 lwp_list = lp->next;
1230
1231 lwp_free (lp);
1232 }
1233
1234 /* Return a pointer to the structure describing the LWP corresponding
1235 to PID. If no corresponding LWP could be found, return NULL. */
1236
1237 static struct lwp_info *
1238 find_lwp_pid (ptid_t ptid)
1239 {
1240 struct lwp_info *lp;
1241 int lwp;
1242
1243 if (is_lwp (ptid))
1244 lwp = GET_LWP (ptid);
1245 else
1246 lwp = GET_PID (ptid);
1247
1248 for (lp = lwp_list; lp; lp = lp->next)
1249 if (lwp == GET_LWP (lp->ptid))
1250 return lp;
1251
1252 return NULL;
1253 }
1254
1255 /* Call CALLBACK with its second argument set to DATA for every LWP in
1256 the list. If CALLBACK returns 1 for a particular LWP, return a
1257 pointer to the structure describing that LWP immediately.
1258 Otherwise return NULL. */
1259
1260 struct lwp_info *
1261 iterate_over_lwps (ptid_t filter,
1262 int (*callback) (struct lwp_info *, void *),
1263 void *data)
1264 {
1265 struct lwp_info *lp, *lpnext;
1266
1267 for (lp = lwp_list; lp; lp = lpnext)
1268 {
1269 lpnext = lp->next;
1270
1271 if (ptid_match (lp->ptid, filter))
1272 {
1273 if ((*callback) (lp, data))
1274 return lp;
1275 }
1276 }
1277
1278 return NULL;
1279 }
1280
1281 /* Iterate like iterate_over_lwps does except when forking-off a child call
1282 CALLBACK with CALLBACK_DATA specifically only for that new child PID. */
1283
1284 void
1285 linux_nat_iterate_watchpoint_lwps
1286 (linux_nat_iterate_watchpoint_lwps_ftype callback, void *callback_data)
1287 {
1288 int inferior_pid = ptid_get_pid (inferior_ptid);
1289 struct inferior *inf = current_inferior ();
1290
1291 if (inf->pid == inferior_pid)
1292 {
1293 /* Iterate all the threads of the current inferior. Without specifying
1294 INFERIOR_PID it would iterate all threads of all inferiors, which is
1295 inappropriate for watchpoints. */
1296
1297 iterate_over_lwps (pid_to_ptid (inferior_pid), callback, callback_data);
1298 }
1299 else
1300 {
1301 /* Detaching a new child PID temporarily present in INFERIOR_PID. */
1302
1303 struct lwp_info *child_lp;
1304 struct cleanup *old_chain;
1305 pid_t child_pid = GET_PID (inferior_ptid);
1306 ptid_t child_ptid = ptid_build (child_pid, child_pid, 0);
1307
1308 gdb_assert (!is_lwp (inferior_ptid));
1309 gdb_assert (find_lwp_pid (child_ptid) == NULL);
1310 child_lp = add_lwp (child_ptid);
1311 child_lp->stopped = 1;
1312 child_lp->last_resume_kind = resume_stop;
1313 old_chain = make_cleanup (delete_lwp_cleanup, child_lp);
1314
1315 callback (child_lp, callback_data);
1316
1317 do_cleanups (old_chain);
1318 }
1319 }
1320
1321 /* Update our internal state when changing from one checkpoint to
1322 another indicated by NEW_PTID. We can only switch single-threaded
1323 applications, so we only create one new LWP, and the previous list
1324 is discarded. */
1325
1326 void
1327 linux_nat_switch_fork (ptid_t new_ptid)
1328 {
1329 struct lwp_info *lp;
1330
1331 purge_lwp_list (GET_PID (inferior_ptid));
1332
1333 lp = add_lwp (new_ptid);
1334 lp->stopped = 1;
1335
1336 /* This changes the thread's ptid while preserving the gdb thread
1337 num. Also changes the inferior pid, while preserving the
1338 inferior num. */
1339 thread_change_ptid (inferior_ptid, new_ptid);
1340
1341 /* We've just told GDB core that the thread changed target id, but,
1342 in fact, it really is a different thread, with different register
1343 contents. */
1344 registers_changed ();
1345 }
1346
1347 /* Handle the exit of a single thread LP. */
1348
1349 static void
1350 exit_lwp (struct lwp_info *lp)
1351 {
1352 struct thread_info *th = find_thread_ptid (lp->ptid);
1353
1354 if (th)
1355 {
1356 if (print_thread_events)
1357 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1358
1359 delete_thread (lp->ptid);
1360 }
1361
1362 delete_lwp (lp->ptid);
1363 }
1364
1365 /* Wait for the LWP specified by LP, which we have just attached to.
1366 Returns a wait status for that LWP, to cache. */
1367
1368 static int
1369 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1370 int *signalled)
1371 {
1372 pid_t new_pid, pid = GET_LWP (ptid);
1373 int status;
1374
1375 if (linux_proc_pid_is_stopped (pid))
1376 {
1377 if (debug_linux_nat)
1378 fprintf_unfiltered (gdb_stdlog,
1379 "LNPAW: Attaching to a stopped process\n");
1380
1381 /* The process is definitely stopped. It is in a job control
1382 stop, unless the kernel predates the TASK_STOPPED /
1383 TASK_TRACED distinction, in which case it might be in a
1384 ptrace stop. Make sure it is in a ptrace stop; from there we
1385 can kill it, signal it, et cetera.
1386
1387 First make sure there is a pending SIGSTOP. Since we are
1388 already attached, the process can not transition from stopped
1389 to running without a PTRACE_CONT; so we know this signal will
1390 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1391 probably already in the queue (unless this kernel is old
1392 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1393 is not an RT signal, it can only be queued once. */
1394 kill_lwp (pid, SIGSTOP);
1395
1396 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1397 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1398 ptrace (PTRACE_CONT, pid, 0, 0);
1399 }
1400
1401 /* Make sure the initial process is stopped. The user-level threads
1402 layer might want to poke around in the inferior, and that won't
1403 work if things haven't stabilized yet. */
1404 new_pid = my_waitpid (pid, &status, 0);
1405 if (new_pid == -1 && errno == ECHILD)
1406 {
1407 if (first)
1408 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1409
1410 /* Try again with __WCLONE to check cloned processes. */
1411 new_pid = my_waitpid (pid, &status, __WCLONE);
1412 *cloned = 1;
1413 }
1414
1415 gdb_assert (pid == new_pid);
1416
1417 if (!WIFSTOPPED (status))
1418 {
1419 /* The pid we tried to attach has apparently just exited. */
1420 if (debug_linux_nat)
1421 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1422 pid, status_to_str (status));
1423 return status;
1424 }
1425
1426 if (WSTOPSIG (status) != SIGSTOP)
1427 {
1428 *signalled = 1;
1429 if (debug_linux_nat)
1430 fprintf_unfiltered (gdb_stdlog,
1431 "LNPAW: Received %s after attaching\n",
1432 status_to_str (status));
1433 }
1434
1435 return status;
1436 }
1437
1438 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1439 the new LWP could not be attached, or 1 if we're already auto
1440 attached to this thread, but haven't processed the
1441 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1442 its existance, without considering it an error. */
1443
1444 int
1445 lin_lwp_attach_lwp (ptid_t ptid)
1446 {
1447 struct lwp_info *lp;
1448 sigset_t prev_mask;
1449 int lwpid;
1450
1451 gdb_assert (is_lwp (ptid));
1452
1453 block_child_signals (&prev_mask);
1454
1455 lp = find_lwp_pid (ptid);
1456 lwpid = GET_LWP (ptid);
1457
1458 /* We assume that we're already attached to any LWP that has an id
1459 equal to the overall process id, and to any LWP that is already
1460 in our list of LWPs. If we're not seeing exit events from threads
1461 and we've had PID wraparound since we last tried to stop all threads,
1462 this assumption might be wrong; fortunately, this is very unlikely
1463 to happen. */
1464 if (lwpid != GET_PID (ptid) && lp == NULL)
1465 {
1466 int status, cloned = 0, signalled = 0;
1467
1468 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1469 {
1470 if (linux_supports_tracefork_flag)
1471 {
1472 /* If we haven't stopped all threads when we get here,
1473 we may have seen a thread listed in thread_db's list,
1474 but not processed the PTRACE_EVENT_CLONE yet. If
1475 that's the case, ignore this new thread, and let
1476 normal event handling discover it later. */
1477 if (in_pid_list_p (stopped_pids, lwpid))
1478 {
1479 /* We've already seen this thread stop, but we
1480 haven't seen the PTRACE_EVENT_CLONE extended
1481 event yet. */
1482 restore_child_signals_mask (&prev_mask);
1483 return 0;
1484 }
1485 else
1486 {
1487 int new_pid;
1488 int status;
1489
1490 /* See if we've got a stop for this new child
1491 pending. If so, we're already attached. */
1492 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1493 if (new_pid == -1 && errno == ECHILD)
1494 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1495 if (new_pid != -1)
1496 {
1497 if (WIFSTOPPED (status))
1498 add_to_pid_list (&stopped_pids, lwpid, status);
1499
1500 restore_child_signals_mask (&prev_mask);
1501 return 1;
1502 }
1503 }
1504 }
1505
1506 /* If we fail to attach to the thread, issue a warning,
1507 but continue. One way this can happen is if thread
1508 creation is interrupted; as of Linux kernel 2.6.19, a
1509 bug may place threads in the thread list and then fail
1510 to create them. */
1511 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1512 safe_strerror (errno));
1513 restore_child_signals_mask (&prev_mask);
1514 return -1;
1515 }
1516
1517 if (debug_linux_nat)
1518 fprintf_unfiltered (gdb_stdlog,
1519 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1520 target_pid_to_str (ptid));
1521
1522 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1523 if (!WIFSTOPPED (status))
1524 {
1525 restore_child_signals_mask (&prev_mask);
1526 return 1;
1527 }
1528
1529 lp = add_lwp (ptid);
1530 lp->stopped = 1;
1531 lp->cloned = cloned;
1532 lp->signalled = signalled;
1533 if (WSTOPSIG (status) != SIGSTOP)
1534 {
1535 lp->resumed = 1;
1536 lp->status = status;
1537 }
1538
1539 target_post_attach (GET_LWP (lp->ptid));
1540
1541 if (debug_linux_nat)
1542 {
1543 fprintf_unfiltered (gdb_stdlog,
1544 "LLAL: waitpid %s received %s\n",
1545 target_pid_to_str (ptid),
1546 status_to_str (status));
1547 }
1548 }
1549 else
1550 {
1551 /* We assume that the LWP representing the original process is
1552 already stopped. Mark it as stopped in the data structure
1553 that the GNU/linux ptrace layer uses to keep track of
1554 threads. Note that this won't have already been done since
1555 the main thread will have, we assume, been stopped by an
1556 attach from a different layer. */
1557 if (lp == NULL)
1558 lp = add_lwp (ptid);
1559 lp->stopped = 1;
1560 }
1561
1562 lp->last_resume_kind = resume_stop;
1563 restore_child_signals_mask (&prev_mask);
1564 return 0;
1565 }
1566
1567 static void
1568 linux_nat_create_inferior (struct target_ops *ops,
1569 char *exec_file, char *allargs, char **env,
1570 int from_tty)
1571 {
1572 #ifdef HAVE_PERSONALITY
1573 int personality_orig = 0, personality_set = 0;
1574 #endif /* HAVE_PERSONALITY */
1575
1576 /* The fork_child mechanism is synchronous and calls target_wait, so
1577 we have to mask the async mode. */
1578
1579 #ifdef HAVE_PERSONALITY
1580 if (disable_randomization)
1581 {
1582 errno = 0;
1583 personality_orig = personality (0xffffffff);
1584 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1585 {
1586 personality_set = 1;
1587 personality (personality_orig | ADDR_NO_RANDOMIZE);
1588 }
1589 if (errno != 0 || (personality_set
1590 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1591 warning (_("Error disabling address space randomization: %s"),
1592 safe_strerror (errno));
1593 }
1594 #endif /* HAVE_PERSONALITY */
1595
1596 /* Make sure we report all signals during startup. */
1597 linux_nat_pass_signals (0, NULL);
1598
1599 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1600
1601 #ifdef HAVE_PERSONALITY
1602 if (personality_set)
1603 {
1604 errno = 0;
1605 personality (personality_orig);
1606 if (errno != 0)
1607 warning (_("Error restoring address space randomization: %s"),
1608 safe_strerror (errno));
1609 }
1610 #endif /* HAVE_PERSONALITY */
1611 }
1612
1613 static void
1614 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1615 {
1616 struct lwp_info *lp;
1617 int status;
1618 ptid_t ptid;
1619 volatile struct gdb_exception ex;
1620
1621 /* Make sure we report all signals during attach. */
1622 linux_nat_pass_signals (0, NULL);
1623
1624 TRY_CATCH (ex, RETURN_MASK_ERROR)
1625 {
1626 linux_ops->to_attach (ops, args, from_tty);
1627 }
1628 if (ex.reason < 0)
1629 {
1630 pid_t pid = parse_pid_to_attach (args);
1631 struct buffer buffer;
1632 char *message, *buffer_s;
1633
1634 message = xstrdup (ex.message);
1635 make_cleanup (xfree, message);
1636
1637 buffer_init (&buffer);
1638 linux_ptrace_attach_warnings (pid, &buffer);
1639
1640 buffer_grow_str0 (&buffer, "");
1641 buffer_s = buffer_finish (&buffer);
1642 make_cleanup (xfree, buffer_s);
1643
1644 throw_error (ex.error, "%s%s", buffer_s, message);
1645 }
1646
1647 /* The ptrace base target adds the main thread with (pid,0,0)
1648 format. Decorate it with lwp info. */
1649 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1650 thread_change_ptid (inferior_ptid, ptid);
1651
1652 /* Add the initial process as the first LWP to the list. */
1653 lp = add_lwp (ptid);
1654
1655 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1656 &lp->signalled);
1657 if (!WIFSTOPPED (status))
1658 {
1659 if (WIFEXITED (status))
1660 {
1661 int exit_code = WEXITSTATUS (status);
1662
1663 target_terminal_ours ();
1664 target_mourn_inferior ();
1665 if (exit_code == 0)
1666 error (_("Unable to attach: program exited normally."));
1667 else
1668 error (_("Unable to attach: program exited with code %d."),
1669 exit_code);
1670 }
1671 else if (WIFSIGNALED (status))
1672 {
1673 enum gdb_signal signo;
1674
1675 target_terminal_ours ();
1676 target_mourn_inferior ();
1677
1678 signo = gdb_signal_from_host (WTERMSIG (status));
1679 error (_("Unable to attach: program terminated with signal "
1680 "%s, %s."),
1681 gdb_signal_to_name (signo),
1682 gdb_signal_to_string (signo));
1683 }
1684
1685 internal_error (__FILE__, __LINE__,
1686 _("unexpected status %d for PID %ld"),
1687 status, (long) GET_LWP (ptid));
1688 }
1689
1690 lp->stopped = 1;
1691
1692 /* Save the wait status to report later. */
1693 lp->resumed = 1;
1694 if (debug_linux_nat)
1695 fprintf_unfiltered (gdb_stdlog,
1696 "LNA: waitpid %ld, saving status %s\n",
1697 (long) GET_PID (lp->ptid), status_to_str (status));
1698
1699 lp->status = status;
1700
1701 if (target_can_async_p ())
1702 target_async (inferior_event_handler, 0);
1703 }
1704
1705 /* Get pending status of LP. */
1706 static int
1707 get_pending_status (struct lwp_info *lp, int *status)
1708 {
1709 enum gdb_signal signo = GDB_SIGNAL_0;
1710
1711 /* If we paused threads momentarily, we may have stored pending
1712 events in lp->status or lp->waitstatus (see stop_wait_callback),
1713 and GDB core hasn't seen any signal for those threads.
1714 Otherwise, the last signal reported to the core is found in the
1715 thread object's stop_signal.
1716
1717 There's a corner case that isn't handled here at present. Only
1718 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1719 stop_signal make sense as a real signal to pass to the inferior.
1720 Some catchpoint related events, like
1721 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1722 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1723 those traps are debug API (ptrace in our case) related and
1724 induced; the inferior wouldn't see them if it wasn't being
1725 traced. Hence, we should never pass them to the inferior, even
1726 when set to pass state. Since this corner case isn't handled by
1727 infrun.c when proceeding with a signal, for consistency, neither
1728 do we handle it here (or elsewhere in the file we check for
1729 signal pass state). Normally SIGTRAP isn't set to pass state, so
1730 this is really a corner case. */
1731
1732 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1733 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1734 else if (lp->status)
1735 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1736 else if (non_stop && !is_executing (lp->ptid))
1737 {
1738 struct thread_info *tp = find_thread_ptid (lp->ptid);
1739
1740 signo = tp->suspend.stop_signal;
1741 }
1742 else if (!non_stop)
1743 {
1744 struct target_waitstatus last;
1745 ptid_t last_ptid;
1746
1747 get_last_target_status (&last_ptid, &last);
1748
1749 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1750 {
1751 struct thread_info *tp = find_thread_ptid (lp->ptid);
1752
1753 signo = tp->suspend.stop_signal;
1754 }
1755 }
1756
1757 *status = 0;
1758
1759 if (signo == GDB_SIGNAL_0)
1760 {
1761 if (debug_linux_nat)
1762 fprintf_unfiltered (gdb_stdlog,
1763 "GPT: lwp %s has no pending signal\n",
1764 target_pid_to_str (lp->ptid));
1765 }
1766 else if (!signal_pass_state (signo))
1767 {
1768 if (debug_linux_nat)
1769 fprintf_unfiltered (gdb_stdlog,
1770 "GPT: lwp %s had signal %s, "
1771 "but it is in no pass state\n",
1772 target_pid_to_str (lp->ptid),
1773 gdb_signal_to_string (signo));
1774 }
1775 else
1776 {
1777 *status = W_STOPCODE (gdb_signal_to_host (signo));
1778
1779 if (debug_linux_nat)
1780 fprintf_unfiltered (gdb_stdlog,
1781 "GPT: lwp %s has pending signal %s\n",
1782 target_pid_to_str (lp->ptid),
1783 gdb_signal_to_string (signo));
1784 }
1785
1786 return 0;
1787 }
1788
1789 static int
1790 detach_callback (struct lwp_info *lp, void *data)
1791 {
1792 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1793
1794 if (debug_linux_nat && lp->status)
1795 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1796 strsignal (WSTOPSIG (lp->status)),
1797 target_pid_to_str (lp->ptid));
1798
1799 /* If there is a pending SIGSTOP, get rid of it. */
1800 if (lp->signalled)
1801 {
1802 if (debug_linux_nat)
1803 fprintf_unfiltered (gdb_stdlog,
1804 "DC: Sending SIGCONT to %s\n",
1805 target_pid_to_str (lp->ptid));
1806
1807 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1808 lp->signalled = 0;
1809 }
1810
1811 /* We don't actually detach from the LWP that has an id equal to the
1812 overall process id just yet. */
1813 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1814 {
1815 int status = 0;
1816
1817 /* Pass on any pending signal for this LWP. */
1818 get_pending_status (lp, &status);
1819
1820 if (linux_nat_prepare_to_resume != NULL)
1821 linux_nat_prepare_to_resume (lp);
1822 errno = 0;
1823 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1824 WSTOPSIG (status)) < 0)
1825 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1826 safe_strerror (errno));
1827
1828 if (debug_linux_nat)
1829 fprintf_unfiltered (gdb_stdlog,
1830 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1831 target_pid_to_str (lp->ptid),
1832 strsignal (WSTOPSIG (status)));
1833
1834 delete_lwp (lp->ptid);
1835 }
1836
1837 return 0;
1838 }
1839
1840 static void
1841 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1842 {
1843 int pid;
1844 int status;
1845 struct lwp_info *main_lwp;
1846
1847 pid = GET_PID (inferior_ptid);
1848
1849 if (target_can_async_p ())
1850 linux_nat_async (NULL, 0);
1851
1852 /* Stop all threads before detaching. ptrace requires that the
1853 thread is stopped to sucessfully detach. */
1854 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1855 /* ... and wait until all of them have reported back that
1856 they're no longer running. */
1857 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1858
1859 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1860
1861 /* Only the initial process should be left right now. */
1862 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1863
1864 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1865
1866 /* Pass on any pending signal for the last LWP. */
1867 if ((args == NULL || *args == '\0')
1868 && get_pending_status (main_lwp, &status) != -1
1869 && WIFSTOPPED (status))
1870 {
1871 /* Put the signal number in ARGS so that inf_ptrace_detach will
1872 pass it along with PTRACE_DETACH. */
1873 args = alloca (8);
1874 sprintf (args, "%d", (int) WSTOPSIG (status));
1875 if (debug_linux_nat)
1876 fprintf_unfiltered (gdb_stdlog,
1877 "LND: Sending signal %s to %s\n",
1878 args,
1879 target_pid_to_str (main_lwp->ptid));
1880 }
1881
1882 if (linux_nat_prepare_to_resume != NULL)
1883 linux_nat_prepare_to_resume (main_lwp);
1884 delete_lwp (main_lwp->ptid);
1885
1886 if (forks_exist_p ())
1887 {
1888 /* Multi-fork case. The current inferior_ptid is being detached
1889 from, but there are other viable forks to debug. Detach from
1890 the current fork, and context-switch to the first
1891 available. */
1892 linux_fork_detach (args, from_tty);
1893
1894 if (non_stop && target_can_async_p ())
1895 target_async (inferior_event_handler, 0);
1896 }
1897 else
1898 linux_ops->to_detach (ops, args, from_tty);
1899 }
1900
1901 /* Resume LP. */
1902
1903 static void
1904 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1905 {
1906 if (lp->stopped)
1907 {
1908 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1909
1910 if (inf->vfork_child != NULL)
1911 {
1912 if (debug_linux_nat)
1913 fprintf_unfiltered (gdb_stdlog,
1914 "RC: Not resuming %s (vfork parent)\n",
1915 target_pid_to_str (lp->ptid));
1916 }
1917 else if (lp->status == 0
1918 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1919 {
1920 if (debug_linux_nat)
1921 fprintf_unfiltered (gdb_stdlog,
1922 "RC: Resuming sibling %s, %s, %s\n",
1923 target_pid_to_str (lp->ptid),
1924 (signo != GDB_SIGNAL_0
1925 ? strsignal (gdb_signal_to_host (signo))
1926 : "0"),
1927 step ? "step" : "resume");
1928
1929 if (linux_nat_prepare_to_resume != NULL)
1930 linux_nat_prepare_to_resume (lp);
1931 linux_ops->to_resume (linux_ops,
1932 pid_to_ptid (GET_LWP (lp->ptid)),
1933 step, signo);
1934 lp->stopped = 0;
1935 lp->step = step;
1936 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1937 lp->stopped_by_watchpoint = 0;
1938 }
1939 else
1940 {
1941 if (debug_linux_nat)
1942 fprintf_unfiltered (gdb_stdlog,
1943 "RC: Not resuming sibling %s (has pending)\n",
1944 target_pid_to_str (lp->ptid));
1945 }
1946 }
1947 else
1948 {
1949 if (debug_linux_nat)
1950 fprintf_unfiltered (gdb_stdlog,
1951 "RC: Not resuming sibling %s (not stopped)\n",
1952 target_pid_to_str (lp->ptid));
1953 }
1954 }
1955
1956 /* Resume LWP, with the last stop signal, if it is in pass state. */
1957
1958 static int
1959 linux_nat_resume_callback (struct lwp_info *lp, void *data)
1960 {
1961 enum gdb_signal signo = GDB_SIGNAL_0;
1962
1963 if (lp->stopped)
1964 {
1965 struct thread_info *thread;
1966
1967 thread = find_thread_ptid (lp->ptid);
1968 if (thread != NULL)
1969 {
1970 if (signal_pass_state (thread->suspend.stop_signal))
1971 signo = thread->suspend.stop_signal;
1972 thread->suspend.stop_signal = GDB_SIGNAL_0;
1973 }
1974 }
1975
1976 resume_lwp (lp, 0, signo);
1977 return 0;
1978 }
1979
1980 static int
1981 resume_clear_callback (struct lwp_info *lp, void *data)
1982 {
1983 lp->resumed = 0;
1984 lp->last_resume_kind = resume_stop;
1985 return 0;
1986 }
1987
1988 static int
1989 resume_set_callback (struct lwp_info *lp, void *data)
1990 {
1991 lp->resumed = 1;
1992 lp->last_resume_kind = resume_continue;
1993 return 0;
1994 }
1995
1996 static void
1997 linux_nat_resume (struct target_ops *ops,
1998 ptid_t ptid, int step, enum gdb_signal signo)
1999 {
2000 sigset_t prev_mask;
2001 struct lwp_info *lp;
2002 int resume_many;
2003
2004 if (debug_linux_nat)
2005 fprintf_unfiltered (gdb_stdlog,
2006 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
2007 step ? "step" : "resume",
2008 target_pid_to_str (ptid),
2009 (signo != GDB_SIGNAL_0
2010 ? strsignal (gdb_signal_to_host (signo)) : "0"),
2011 target_pid_to_str (inferior_ptid));
2012
2013 block_child_signals (&prev_mask);
2014
2015 /* A specific PTID means `step only this process id'. */
2016 resume_many = (ptid_equal (minus_one_ptid, ptid)
2017 || ptid_is_pid (ptid));
2018
2019 /* Mark the lwps we're resuming as resumed. */
2020 iterate_over_lwps (ptid, resume_set_callback, NULL);
2021
2022 /* See if it's the current inferior that should be handled
2023 specially. */
2024 if (resume_many)
2025 lp = find_lwp_pid (inferior_ptid);
2026 else
2027 lp = find_lwp_pid (ptid);
2028 gdb_assert (lp != NULL);
2029
2030 /* Remember if we're stepping. */
2031 lp->step = step;
2032 lp->last_resume_kind = step ? resume_step : resume_continue;
2033
2034 /* If we have a pending wait status for this thread, there is no
2035 point in resuming the process. But first make sure that
2036 linux_nat_wait won't preemptively handle the event - we
2037 should never take this short-circuit if we are going to
2038 leave LP running, since we have skipped resuming all the
2039 other threads. This bit of code needs to be synchronized
2040 with linux_nat_wait. */
2041
2042 if (lp->status && WIFSTOPPED (lp->status))
2043 {
2044 if (!lp->step
2045 && WSTOPSIG (lp->status)
2046 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
2047 {
2048 if (debug_linux_nat)
2049 fprintf_unfiltered (gdb_stdlog,
2050 "LLR: Not short circuiting for ignored "
2051 "status 0x%x\n", lp->status);
2052
2053 /* FIXME: What should we do if we are supposed to continue
2054 this thread with a signal? */
2055 gdb_assert (signo == GDB_SIGNAL_0);
2056 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
2057 lp->status = 0;
2058 }
2059 }
2060
2061 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2062 {
2063 /* FIXME: What should we do if we are supposed to continue
2064 this thread with a signal? */
2065 gdb_assert (signo == GDB_SIGNAL_0);
2066
2067 if (debug_linux_nat)
2068 fprintf_unfiltered (gdb_stdlog,
2069 "LLR: Short circuiting for status 0x%x\n",
2070 lp->status);
2071
2072 restore_child_signals_mask (&prev_mask);
2073 if (target_can_async_p ())
2074 {
2075 target_async (inferior_event_handler, 0);
2076 /* Tell the event loop we have something to process. */
2077 async_file_mark ();
2078 }
2079 return;
2080 }
2081
2082 /* Mark LWP as not stopped to prevent it from being continued by
2083 linux_nat_resume_callback. */
2084 lp->stopped = 0;
2085
2086 if (resume_many)
2087 iterate_over_lwps (ptid, linux_nat_resume_callback, NULL);
2088
2089 /* Convert to something the lower layer understands. */
2090 ptid = pid_to_ptid (GET_LWP (lp->ptid));
2091
2092 if (linux_nat_prepare_to_resume != NULL)
2093 linux_nat_prepare_to_resume (lp);
2094 linux_ops->to_resume (linux_ops, ptid, step, signo);
2095 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2096 lp->stopped_by_watchpoint = 0;
2097
2098 if (debug_linux_nat)
2099 fprintf_unfiltered (gdb_stdlog,
2100 "LLR: %s %s, %s (resume event thread)\n",
2101 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2102 target_pid_to_str (ptid),
2103 (signo != GDB_SIGNAL_0
2104 ? strsignal (gdb_signal_to_host (signo)) : "0"));
2105
2106 restore_child_signals_mask (&prev_mask);
2107 if (target_can_async_p ())
2108 target_async (inferior_event_handler, 0);
2109 }
2110
2111 /* Send a signal to an LWP. */
2112
2113 static int
2114 kill_lwp (int lwpid, int signo)
2115 {
2116 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2117 fails, then we are not using nptl threads and we should be using kill. */
2118
2119 #ifdef HAVE_TKILL_SYSCALL
2120 {
2121 static int tkill_failed;
2122
2123 if (!tkill_failed)
2124 {
2125 int ret;
2126
2127 errno = 0;
2128 ret = syscall (__NR_tkill, lwpid, signo);
2129 if (errno != ENOSYS)
2130 return ret;
2131 tkill_failed = 1;
2132 }
2133 }
2134 #endif
2135
2136 return kill (lwpid, signo);
2137 }
2138
2139 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2140 event, check if the core is interested in it: if not, ignore the
2141 event, and keep waiting; otherwise, we need to toggle the LWP's
2142 syscall entry/exit status, since the ptrace event itself doesn't
2143 indicate it, and report the trap to higher layers. */
2144
2145 static int
2146 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2147 {
2148 struct target_waitstatus *ourstatus = &lp->waitstatus;
2149 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2150 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2151
2152 if (stopping)
2153 {
2154 /* If we're stopping threads, there's a SIGSTOP pending, which
2155 makes it so that the LWP reports an immediate syscall return,
2156 followed by the SIGSTOP. Skip seeing that "return" using
2157 PTRACE_CONT directly, and let stop_wait_callback collect the
2158 SIGSTOP. Later when the thread is resumed, a new syscall
2159 entry event. If we didn't do this (and returned 0), we'd
2160 leave a syscall entry pending, and our caller, by using
2161 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2162 itself. Later, when the user re-resumes this LWP, we'd see
2163 another syscall entry event and we'd mistake it for a return.
2164
2165 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2166 (leaving immediately with LWP->signalled set, without issuing
2167 a PTRACE_CONT), it would still be problematic to leave this
2168 syscall enter pending, as later when the thread is resumed,
2169 it would then see the same syscall exit mentioned above,
2170 followed by the delayed SIGSTOP, while the syscall didn't
2171 actually get to execute. It seems it would be even more
2172 confusing to the user. */
2173
2174 if (debug_linux_nat)
2175 fprintf_unfiltered (gdb_stdlog,
2176 "LHST: ignoring syscall %d "
2177 "for LWP %ld (stopping threads), "
2178 "resuming with PTRACE_CONT for SIGSTOP\n",
2179 syscall_number,
2180 GET_LWP (lp->ptid));
2181
2182 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2183 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2184 return 1;
2185 }
2186
2187 if (catch_syscall_enabled ())
2188 {
2189 /* Always update the entry/return state, even if this particular
2190 syscall isn't interesting to the core now. In async mode,
2191 the user could install a new catchpoint for this syscall
2192 between syscall enter/return, and we'll need to know to
2193 report a syscall return if that happens. */
2194 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2195 ? TARGET_WAITKIND_SYSCALL_RETURN
2196 : TARGET_WAITKIND_SYSCALL_ENTRY);
2197
2198 if (catching_syscall_number (syscall_number))
2199 {
2200 /* Alright, an event to report. */
2201 ourstatus->kind = lp->syscall_state;
2202 ourstatus->value.syscall_number = syscall_number;
2203
2204 if (debug_linux_nat)
2205 fprintf_unfiltered (gdb_stdlog,
2206 "LHST: stopping for %s of syscall %d"
2207 " for LWP %ld\n",
2208 lp->syscall_state
2209 == TARGET_WAITKIND_SYSCALL_ENTRY
2210 ? "entry" : "return",
2211 syscall_number,
2212 GET_LWP (lp->ptid));
2213 return 0;
2214 }
2215
2216 if (debug_linux_nat)
2217 fprintf_unfiltered (gdb_stdlog,
2218 "LHST: ignoring %s of syscall %d "
2219 "for LWP %ld\n",
2220 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2221 ? "entry" : "return",
2222 syscall_number,
2223 GET_LWP (lp->ptid));
2224 }
2225 else
2226 {
2227 /* If we had been syscall tracing, and hence used PT_SYSCALL
2228 before on this LWP, it could happen that the user removes all
2229 syscall catchpoints before we get to process this event.
2230 There are two noteworthy issues here:
2231
2232 - When stopped at a syscall entry event, resuming with
2233 PT_STEP still resumes executing the syscall and reports a
2234 syscall return.
2235
2236 - Only PT_SYSCALL catches syscall enters. If we last
2237 single-stepped this thread, then this event can't be a
2238 syscall enter. If we last single-stepped this thread, this
2239 has to be a syscall exit.
2240
2241 The points above mean that the next resume, be it PT_STEP or
2242 PT_CONTINUE, can not trigger a syscall trace event. */
2243 if (debug_linux_nat)
2244 fprintf_unfiltered (gdb_stdlog,
2245 "LHST: caught syscall event "
2246 "with no syscall catchpoints."
2247 " %d for LWP %ld, ignoring\n",
2248 syscall_number,
2249 GET_LWP (lp->ptid));
2250 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2251 }
2252
2253 /* The core isn't interested in this event. For efficiency, avoid
2254 stopping all threads only to have the core resume them all again.
2255 Since we're not stopping threads, if we're still syscall tracing
2256 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2257 subsequent syscall. Simply resume using the inf-ptrace layer,
2258 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2259
2260 /* Note that gdbarch_get_syscall_number may access registers, hence
2261 fill a regcache. */
2262 registers_changed ();
2263 if (linux_nat_prepare_to_resume != NULL)
2264 linux_nat_prepare_to_resume (lp);
2265 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2266 lp->step, GDB_SIGNAL_0);
2267 return 1;
2268 }
2269
2270 /* Handle a GNU/Linux extended wait response. If we see a clone
2271 event, we need to add the new LWP to our list (and not report the
2272 trap to higher layers). This function returns non-zero if the
2273 event should be ignored and we should wait again. If STOPPING is
2274 true, the new LWP remains stopped, otherwise it is continued. */
2275
2276 static int
2277 linux_handle_extended_wait (struct lwp_info *lp, int status,
2278 int stopping)
2279 {
2280 int pid = GET_LWP (lp->ptid);
2281 struct target_waitstatus *ourstatus = &lp->waitstatus;
2282 int event = status >> 16;
2283
2284 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2285 || event == PTRACE_EVENT_CLONE)
2286 {
2287 unsigned long new_pid;
2288 int ret;
2289
2290 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2291
2292 /* If we haven't already seen the new PID stop, wait for it now. */
2293 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2294 {
2295 /* The new child has a pending SIGSTOP. We can't affect it until it
2296 hits the SIGSTOP, but we're already attached. */
2297 ret = my_waitpid (new_pid, &status,
2298 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2299 if (ret == -1)
2300 perror_with_name (_("waiting for new child"));
2301 else if (ret != new_pid)
2302 internal_error (__FILE__, __LINE__,
2303 _("wait returned unexpected PID %d"), ret);
2304 else if (!WIFSTOPPED (status))
2305 internal_error (__FILE__, __LINE__,
2306 _("wait returned unexpected status 0x%x"), status);
2307 }
2308
2309 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2310
2311 if (event == PTRACE_EVENT_FORK
2312 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2313 {
2314 /* Handle checkpointing by linux-fork.c here as a special
2315 case. We don't want the follow-fork-mode or 'catch fork'
2316 to interfere with this. */
2317
2318 /* This won't actually modify the breakpoint list, but will
2319 physically remove the breakpoints from the child. */
2320 detach_breakpoints (new_pid);
2321
2322 /* Retain child fork in ptrace (stopped) state. */
2323 if (!find_fork_pid (new_pid))
2324 add_fork (new_pid);
2325
2326 /* Report as spurious, so that infrun doesn't want to follow
2327 this fork. We're actually doing an infcall in
2328 linux-fork.c. */
2329 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2330 linux_enable_event_reporting (pid_to_ptid (new_pid));
2331
2332 /* Report the stop to the core. */
2333 return 0;
2334 }
2335
2336 if (event == PTRACE_EVENT_FORK)
2337 ourstatus->kind = TARGET_WAITKIND_FORKED;
2338 else if (event == PTRACE_EVENT_VFORK)
2339 ourstatus->kind = TARGET_WAITKIND_VFORKED;
2340 else
2341 {
2342 struct lwp_info *new_lp;
2343
2344 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2345
2346 if (debug_linux_nat)
2347 fprintf_unfiltered (gdb_stdlog,
2348 "LHEW: Got clone event "
2349 "from LWP %d, new child is LWP %ld\n",
2350 pid, new_pid);
2351
2352 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2353 new_lp->cloned = 1;
2354 new_lp->stopped = 1;
2355
2356 if (WSTOPSIG (status) != SIGSTOP)
2357 {
2358 /* This can happen if someone starts sending signals to
2359 the new thread before it gets a chance to run, which
2360 have a lower number than SIGSTOP (e.g. SIGUSR1).
2361 This is an unlikely case, and harder to handle for
2362 fork / vfork than for clone, so we do not try - but
2363 we handle it for clone events here. We'll send
2364 the other signal on to the thread below. */
2365
2366 new_lp->signalled = 1;
2367 }
2368 else
2369 {
2370 struct thread_info *tp;
2371
2372 /* When we stop for an event in some other thread, and
2373 pull the thread list just as this thread has cloned,
2374 we'll have seen the new thread in the thread_db list
2375 before handling the CLONE event (glibc's
2376 pthread_create adds the new thread to the thread list
2377 before clone'ing, and has the kernel fill in the
2378 thread's tid on the clone call with
2379 CLONE_PARENT_SETTID). If that happened, and the core
2380 had requested the new thread to stop, we'll have
2381 killed it with SIGSTOP. But since SIGSTOP is not an
2382 RT signal, it can only be queued once. We need to be
2383 careful to not resume the LWP if we wanted it to
2384 stop. In that case, we'll leave the SIGSTOP pending.
2385 It will later be reported as GDB_SIGNAL_0. */
2386 tp = find_thread_ptid (new_lp->ptid);
2387 if (tp != NULL && tp->stop_requested)
2388 new_lp->last_resume_kind = resume_stop;
2389 else
2390 status = 0;
2391 }
2392
2393 if (non_stop)
2394 {
2395 /* Add the new thread to GDB's lists as soon as possible
2396 so that:
2397
2398 1) the frontend doesn't have to wait for a stop to
2399 display them, and,
2400
2401 2) we tag it with the correct running state. */
2402
2403 /* If the thread_db layer is active, let it know about
2404 this new thread, and add it to GDB's list. */
2405 if (!thread_db_attach_lwp (new_lp->ptid))
2406 {
2407 /* We're not using thread_db. Add it to GDB's
2408 list. */
2409 target_post_attach (GET_LWP (new_lp->ptid));
2410 add_thread (new_lp->ptid);
2411 }
2412
2413 if (!stopping)
2414 {
2415 set_running (new_lp->ptid, 1);
2416 set_executing (new_lp->ptid, 1);
2417 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2418 resume_stop. */
2419 new_lp->last_resume_kind = resume_continue;
2420 }
2421 }
2422
2423 if (status != 0)
2424 {
2425 /* We created NEW_LP so it cannot yet contain STATUS. */
2426 gdb_assert (new_lp->status == 0);
2427
2428 /* Save the wait status to report later. */
2429 if (debug_linux_nat)
2430 fprintf_unfiltered (gdb_stdlog,
2431 "LHEW: waitpid of new LWP %ld, "
2432 "saving status %s\n",
2433 (long) GET_LWP (new_lp->ptid),
2434 status_to_str (status));
2435 new_lp->status = status;
2436 }
2437
2438 /* Note the need to use the low target ops to resume, to
2439 handle resuming with PT_SYSCALL if we have syscall
2440 catchpoints. */
2441 if (!stopping)
2442 {
2443 new_lp->resumed = 1;
2444
2445 if (status == 0)
2446 {
2447 gdb_assert (new_lp->last_resume_kind == resume_continue);
2448 if (debug_linux_nat)
2449 fprintf_unfiltered (gdb_stdlog,
2450 "LHEW: resuming new LWP %ld\n",
2451 GET_LWP (new_lp->ptid));
2452 if (linux_nat_prepare_to_resume != NULL)
2453 linux_nat_prepare_to_resume (new_lp);
2454 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2455 0, GDB_SIGNAL_0);
2456 new_lp->stopped = 0;
2457 }
2458 }
2459
2460 if (debug_linux_nat)
2461 fprintf_unfiltered (gdb_stdlog,
2462 "LHEW: resuming parent LWP %d\n", pid);
2463 if (linux_nat_prepare_to_resume != NULL)
2464 linux_nat_prepare_to_resume (lp);
2465 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2466 0, GDB_SIGNAL_0);
2467
2468 return 1;
2469 }
2470
2471 return 0;
2472 }
2473
2474 if (event == PTRACE_EVENT_EXEC)
2475 {
2476 if (debug_linux_nat)
2477 fprintf_unfiltered (gdb_stdlog,
2478 "LHEW: Got exec event from LWP %ld\n",
2479 GET_LWP (lp->ptid));
2480
2481 ourstatus->kind = TARGET_WAITKIND_EXECD;
2482 ourstatus->value.execd_pathname
2483 = xstrdup (linux_child_pid_to_exec_file (pid));
2484
2485 return 0;
2486 }
2487
2488 if (event == PTRACE_EVENT_VFORK_DONE)
2489 {
2490 if (current_inferior ()->waiting_for_vfork_done)
2491 {
2492 if (debug_linux_nat)
2493 fprintf_unfiltered (gdb_stdlog,
2494 "LHEW: Got expected PTRACE_EVENT_"
2495 "VFORK_DONE from LWP %ld: stopping\n",
2496 GET_LWP (lp->ptid));
2497
2498 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2499 return 0;
2500 }
2501
2502 if (debug_linux_nat)
2503 fprintf_unfiltered (gdb_stdlog,
2504 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2505 "from LWP %ld: resuming\n",
2506 GET_LWP (lp->ptid));
2507 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2508 return 1;
2509 }
2510
2511 internal_error (__FILE__, __LINE__,
2512 _("unknown ptrace event %d"), event);
2513 }
2514
2515 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2516 exited. */
2517
2518 static int
2519 wait_lwp (struct lwp_info *lp)
2520 {
2521 pid_t pid;
2522 int status = 0;
2523 int thread_dead = 0;
2524 sigset_t prev_mask;
2525
2526 gdb_assert (!lp->stopped);
2527 gdb_assert (lp->status == 0);
2528
2529 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2530 block_child_signals (&prev_mask);
2531
2532 for (;;)
2533 {
2534 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2535 was right and we should just call sigsuspend. */
2536
2537 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
2538 if (pid == -1 && errno == ECHILD)
2539 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
2540 if (pid == -1 && errno == ECHILD)
2541 {
2542 /* The thread has previously exited. We need to delete it
2543 now because, for some vendor 2.4 kernels with NPTL
2544 support backported, there won't be an exit event unless
2545 it is the main thread. 2.6 kernels will report an exit
2546 event for each thread that exits, as expected. */
2547 thread_dead = 1;
2548 if (debug_linux_nat)
2549 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2550 target_pid_to_str (lp->ptid));
2551 }
2552 if (pid != 0)
2553 break;
2554
2555 /* Bugs 10970, 12702.
2556 Thread group leader may have exited in which case we'll lock up in
2557 waitpid if there are other threads, even if they are all zombies too.
2558 Basically, we're not supposed to use waitpid this way.
2559 __WCLONE is not applicable for the leader so we can't use that.
2560 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2561 process; it gets ESRCH both for the zombie and for running processes.
2562
2563 As a workaround, check if we're waiting for the thread group leader and
2564 if it's a zombie, and avoid calling waitpid if it is.
2565
2566 This is racy, what if the tgl becomes a zombie right after we check?
2567 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2568 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2569
2570 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2571 && linux_proc_pid_is_zombie (GET_LWP (lp->ptid)))
2572 {
2573 thread_dead = 1;
2574 if (debug_linux_nat)
2575 fprintf_unfiltered (gdb_stdlog,
2576 "WL: Thread group leader %s vanished.\n",
2577 target_pid_to_str (lp->ptid));
2578 break;
2579 }
2580
2581 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2582 get invoked despite our caller had them intentionally blocked by
2583 block_child_signals. This is sensitive only to the loop of
2584 linux_nat_wait_1 and there if we get called my_waitpid gets called
2585 again before it gets to sigsuspend so we can safely let the handlers
2586 get executed here. */
2587
2588 sigsuspend (&suspend_mask);
2589 }
2590
2591 restore_child_signals_mask (&prev_mask);
2592
2593 if (!thread_dead)
2594 {
2595 gdb_assert (pid == GET_LWP (lp->ptid));
2596
2597 if (debug_linux_nat)
2598 {
2599 fprintf_unfiltered (gdb_stdlog,
2600 "WL: waitpid %s received %s\n",
2601 target_pid_to_str (lp->ptid),
2602 status_to_str (status));
2603 }
2604
2605 /* Check if the thread has exited. */
2606 if (WIFEXITED (status) || WIFSIGNALED (status))
2607 {
2608 thread_dead = 1;
2609 if (debug_linux_nat)
2610 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2611 target_pid_to_str (lp->ptid));
2612 }
2613 }
2614
2615 if (thread_dead)
2616 {
2617 exit_lwp (lp);
2618 return 0;
2619 }
2620
2621 gdb_assert (WIFSTOPPED (status));
2622
2623 /* Handle GNU/Linux's syscall SIGTRAPs. */
2624 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2625 {
2626 /* No longer need the sysgood bit. The ptrace event ends up
2627 recorded in lp->waitstatus if we care for it. We can carry
2628 on handling the event like a regular SIGTRAP from here
2629 on. */
2630 status = W_STOPCODE (SIGTRAP);
2631 if (linux_handle_syscall_trap (lp, 1))
2632 return wait_lwp (lp);
2633 }
2634
2635 /* Handle GNU/Linux's extended waitstatus for trace events. */
2636 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2637 {
2638 if (debug_linux_nat)
2639 fprintf_unfiltered (gdb_stdlog,
2640 "WL: Handling extended status 0x%06x\n",
2641 status);
2642 if (linux_handle_extended_wait (lp, status, 1))
2643 return wait_lwp (lp);
2644 }
2645
2646 return status;
2647 }
2648
2649 /* Save the most recent siginfo for LP. This is currently only called
2650 for SIGTRAP; some ports use the si_addr field for
2651 target_stopped_data_address. In the future, it may also be used to
2652 restore the siginfo of requeued signals. */
2653
2654 static void
2655 save_siginfo (struct lwp_info *lp)
2656 {
2657 errno = 0;
2658 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2659 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2660
2661 if (errno != 0)
2662 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2663 }
2664
2665 /* Send a SIGSTOP to LP. */
2666
2667 static int
2668 stop_callback (struct lwp_info *lp, void *data)
2669 {
2670 if (!lp->stopped && !lp->signalled)
2671 {
2672 int ret;
2673
2674 if (debug_linux_nat)
2675 {
2676 fprintf_unfiltered (gdb_stdlog,
2677 "SC: kill %s **<SIGSTOP>**\n",
2678 target_pid_to_str (lp->ptid));
2679 }
2680 errno = 0;
2681 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2682 if (debug_linux_nat)
2683 {
2684 fprintf_unfiltered (gdb_stdlog,
2685 "SC: lwp kill %d %s\n",
2686 ret,
2687 errno ? safe_strerror (errno) : "ERRNO-OK");
2688 }
2689
2690 lp->signalled = 1;
2691 gdb_assert (lp->status == 0);
2692 }
2693
2694 return 0;
2695 }
2696
2697 /* Request a stop on LWP. */
2698
2699 void
2700 linux_stop_lwp (struct lwp_info *lwp)
2701 {
2702 stop_callback (lwp, NULL);
2703 }
2704
2705 /* Return non-zero if LWP PID has a pending SIGINT. */
2706
2707 static int
2708 linux_nat_has_pending_sigint (int pid)
2709 {
2710 sigset_t pending, blocked, ignored;
2711
2712 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2713
2714 if (sigismember (&pending, SIGINT)
2715 && !sigismember (&ignored, SIGINT))
2716 return 1;
2717
2718 return 0;
2719 }
2720
2721 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2722
2723 static int
2724 set_ignore_sigint (struct lwp_info *lp, void *data)
2725 {
2726 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2727 flag to consume the next one. */
2728 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2729 && WSTOPSIG (lp->status) == SIGINT)
2730 lp->status = 0;
2731 else
2732 lp->ignore_sigint = 1;
2733
2734 return 0;
2735 }
2736
2737 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2738 This function is called after we know the LWP has stopped; if the LWP
2739 stopped before the expected SIGINT was delivered, then it will never have
2740 arrived. Also, if the signal was delivered to a shared queue and consumed
2741 by a different thread, it will never be delivered to this LWP. */
2742
2743 static void
2744 maybe_clear_ignore_sigint (struct lwp_info *lp)
2745 {
2746 if (!lp->ignore_sigint)
2747 return;
2748
2749 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2750 {
2751 if (debug_linux_nat)
2752 fprintf_unfiltered (gdb_stdlog,
2753 "MCIS: Clearing bogus flag for %s\n",
2754 target_pid_to_str (lp->ptid));
2755 lp->ignore_sigint = 0;
2756 }
2757 }
2758
2759 /* Fetch the possible triggered data watchpoint info and store it in
2760 LP.
2761
2762 On some archs, like x86, that use debug registers to set
2763 watchpoints, it's possible that the way to know which watched
2764 address trapped, is to check the register that is used to select
2765 which address to watch. Problem is, between setting the watchpoint
2766 and reading back which data address trapped, the user may change
2767 the set of watchpoints, and, as a consequence, GDB changes the
2768 debug registers in the inferior. To avoid reading back a stale
2769 stopped-data-address when that happens, we cache in LP the fact
2770 that a watchpoint trapped, and the corresponding data address, as
2771 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2772 registers meanwhile, we have the cached data we can rely on. */
2773
2774 static void
2775 save_sigtrap (struct lwp_info *lp)
2776 {
2777 struct cleanup *old_chain;
2778
2779 if (linux_ops->to_stopped_by_watchpoint == NULL)
2780 {
2781 lp->stopped_by_watchpoint = 0;
2782 return;
2783 }
2784
2785 old_chain = save_inferior_ptid ();
2786 inferior_ptid = lp->ptid;
2787
2788 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2789
2790 if (lp->stopped_by_watchpoint)
2791 {
2792 if (linux_ops->to_stopped_data_address != NULL)
2793 lp->stopped_data_address_p =
2794 linux_ops->to_stopped_data_address (&current_target,
2795 &lp->stopped_data_address);
2796 else
2797 lp->stopped_data_address_p = 0;
2798 }
2799
2800 do_cleanups (old_chain);
2801 }
2802
2803 /* See save_sigtrap. */
2804
2805 static int
2806 linux_nat_stopped_by_watchpoint (void)
2807 {
2808 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2809
2810 gdb_assert (lp != NULL);
2811
2812 return lp->stopped_by_watchpoint;
2813 }
2814
2815 static int
2816 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2817 {
2818 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2819
2820 gdb_assert (lp != NULL);
2821
2822 *addr_p = lp->stopped_data_address;
2823
2824 return lp->stopped_data_address_p;
2825 }
2826
2827 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2828
2829 static int
2830 sigtrap_is_event (int status)
2831 {
2832 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2833 }
2834
2835 /* SIGTRAP-like events recognizer. */
2836
2837 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2838
2839 /* Check for SIGTRAP-like events in LP. */
2840
2841 static int
2842 linux_nat_lp_status_is_event (struct lwp_info *lp)
2843 {
2844 /* We check for lp->waitstatus in addition to lp->status, because we can
2845 have pending process exits recorded in lp->status
2846 and W_EXITCODE(0,0) == 0. We should probably have an additional
2847 lp->status_p flag. */
2848
2849 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2850 && linux_nat_status_is_event (lp->status));
2851 }
2852
2853 /* Set alternative SIGTRAP-like events recognizer. If
2854 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2855 applied. */
2856
2857 void
2858 linux_nat_set_status_is_event (struct target_ops *t,
2859 int (*status_is_event) (int status))
2860 {
2861 linux_nat_status_is_event = status_is_event;
2862 }
2863
2864 /* Wait until LP is stopped. */
2865
2866 static int
2867 stop_wait_callback (struct lwp_info *lp, void *data)
2868 {
2869 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2870
2871 /* If this is a vfork parent, bail out, it is not going to report
2872 any SIGSTOP until the vfork is done with. */
2873 if (inf->vfork_child != NULL)
2874 return 0;
2875
2876 if (!lp->stopped)
2877 {
2878 int status;
2879
2880 status = wait_lwp (lp);
2881 if (status == 0)
2882 return 0;
2883
2884 if (lp->ignore_sigint && WIFSTOPPED (status)
2885 && WSTOPSIG (status) == SIGINT)
2886 {
2887 lp->ignore_sigint = 0;
2888
2889 errno = 0;
2890 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2891 if (debug_linux_nat)
2892 fprintf_unfiltered (gdb_stdlog,
2893 "PTRACE_CONT %s, 0, 0 (%s) "
2894 "(discarding SIGINT)\n",
2895 target_pid_to_str (lp->ptid),
2896 errno ? safe_strerror (errno) : "OK");
2897
2898 return stop_wait_callback (lp, NULL);
2899 }
2900
2901 maybe_clear_ignore_sigint (lp);
2902
2903 if (WSTOPSIG (status) != SIGSTOP)
2904 {
2905 /* The thread was stopped with a signal other than SIGSTOP. */
2906
2907 /* Save the trap's siginfo in case we need it later. */
2908 save_siginfo (lp);
2909
2910 save_sigtrap (lp);
2911
2912 if (debug_linux_nat)
2913 fprintf_unfiltered (gdb_stdlog,
2914 "SWC: Pending event %s in %s\n",
2915 status_to_str ((int) status),
2916 target_pid_to_str (lp->ptid));
2917
2918 /* Save the sigtrap event. */
2919 lp->status = status;
2920 gdb_assert (!lp->stopped);
2921 gdb_assert (lp->signalled);
2922 lp->stopped = 1;
2923 }
2924 else
2925 {
2926 /* We caught the SIGSTOP that we intended to catch, so
2927 there's no SIGSTOP pending. */
2928
2929 if (debug_linux_nat)
2930 fprintf_unfiltered (gdb_stdlog,
2931 "SWC: Delayed SIGSTOP caught for %s.\n",
2932 target_pid_to_str (lp->ptid));
2933
2934 lp->stopped = 1;
2935
2936 /* Reset SIGNALLED only after the stop_wait_callback call
2937 above as it does gdb_assert on SIGNALLED. */
2938 lp->signalled = 0;
2939 }
2940 }
2941
2942 return 0;
2943 }
2944
2945 /* Return non-zero if LP has a wait status pending. */
2946
2947 static int
2948 status_callback (struct lwp_info *lp, void *data)
2949 {
2950 /* Only report a pending wait status if we pretend that this has
2951 indeed been resumed. */
2952 if (!lp->resumed)
2953 return 0;
2954
2955 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2956 {
2957 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2958 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2959 0', so a clean process exit can not be stored pending in
2960 lp->status, it is indistinguishable from
2961 no-pending-status. */
2962 return 1;
2963 }
2964
2965 if (lp->status != 0)
2966 return 1;
2967
2968 return 0;
2969 }
2970
2971 /* Return non-zero if LP isn't stopped. */
2972
2973 static int
2974 running_callback (struct lwp_info *lp, void *data)
2975 {
2976 return (!lp->stopped
2977 || ((lp->status != 0
2978 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2979 && lp->resumed));
2980 }
2981
2982 /* Count the LWP's that have had events. */
2983
2984 static int
2985 count_events_callback (struct lwp_info *lp, void *data)
2986 {
2987 int *count = data;
2988
2989 gdb_assert (count != NULL);
2990
2991 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2992 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2993 (*count)++;
2994
2995 return 0;
2996 }
2997
2998 /* Select the LWP (if any) that is currently being single-stepped. */
2999
3000 static int
3001 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
3002 {
3003 if (lp->last_resume_kind == resume_step
3004 && lp->status != 0)
3005 return 1;
3006 else
3007 return 0;
3008 }
3009
3010 /* Select the Nth LWP that has had a SIGTRAP event. */
3011
3012 static int
3013 select_event_lwp_callback (struct lwp_info *lp, void *data)
3014 {
3015 int *selector = data;
3016
3017 gdb_assert (selector != NULL);
3018
3019 /* Select only resumed LWPs that have a SIGTRAP event pending. */
3020 if (lp->resumed && linux_nat_lp_status_is_event (lp))
3021 if ((*selector)-- == 0)
3022 return 1;
3023
3024 return 0;
3025 }
3026
3027 static int
3028 cancel_breakpoint (struct lwp_info *lp)
3029 {
3030 /* Arrange for a breakpoint to be hit again later. We don't keep
3031 the SIGTRAP status and don't forward the SIGTRAP signal to the
3032 LWP. We will handle the current event, eventually we will resume
3033 this LWP, and this breakpoint will trap again.
3034
3035 If we do not do this, then we run the risk that the user will
3036 delete or disable the breakpoint, but the LWP will have already
3037 tripped on it. */
3038
3039 struct regcache *regcache = get_thread_regcache (lp->ptid);
3040 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3041 CORE_ADDR pc;
3042
3043 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
3044 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3045 {
3046 if (debug_linux_nat)
3047 fprintf_unfiltered (gdb_stdlog,
3048 "CB: Push back breakpoint for %s\n",
3049 target_pid_to_str (lp->ptid));
3050
3051 /* Back up the PC if necessary. */
3052 if (gdbarch_decr_pc_after_break (gdbarch))
3053 regcache_write_pc (regcache, pc);
3054
3055 return 1;
3056 }
3057 return 0;
3058 }
3059
3060 static int
3061 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3062 {
3063 struct lwp_info *event_lp = data;
3064
3065 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3066 if (lp == event_lp)
3067 return 0;
3068
3069 /* If a LWP other than the LWP that we're reporting an event for has
3070 hit a GDB breakpoint (as opposed to some random trap signal),
3071 then just arrange for it to hit it again later. We don't keep
3072 the SIGTRAP status and don't forward the SIGTRAP signal to the
3073 LWP. We will handle the current event, eventually we will resume
3074 all LWPs, and this one will get its breakpoint trap again.
3075
3076 If we do not do this, then we run the risk that the user will
3077 delete or disable the breakpoint, but the LWP will have already
3078 tripped on it. */
3079
3080 if (linux_nat_lp_status_is_event (lp)
3081 && cancel_breakpoint (lp))
3082 /* Throw away the SIGTRAP. */
3083 lp->status = 0;
3084
3085 return 0;
3086 }
3087
3088 /* Select one LWP out of those that have events pending. */
3089
3090 static void
3091 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
3092 {
3093 int num_events = 0;
3094 int random_selector;
3095 struct lwp_info *event_lp;
3096
3097 /* Record the wait status for the original LWP. */
3098 (*orig_lp)->status = *status;
3099
3100 /* Give preference to any LWP that is being single-stepped. */
3101 event_lp = iterate_over_lwps (filter,
3102 select_singlestep_lwp_callback, NULL);
3103 if (event_lp != NULL)
3104 {
3105 if (debug_linux_nat)
3106 fprintf_unfiltered (gdb_stdlog,
3107 "SEL: Select single-step %s\n",
3108 target_pid_to_str (event_lp->ptid));
3109 }
3110 else
3111 {
3112 /* No single-stepping LWP. Select one at random, out of those
3113 which have had SIGTRAP events. */
3114
3115 /* First see how many SIGTRAP events we have. */
3116 iterate_over_lwps (filter, count_events_callback, &num_events);
3117
3118 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3119 random_selector = (int)
3120 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3121
3122 if (debug_linux_nat && num_events > 1)
3123 fprintf_unfiltered (gdb_stdlog,
3124 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3125 num_events, random_selector);
3126
3127 event_lp = iterate_over_lwps (filter,
3128 select_event_lwp_callback,
3129 &random_selector);
3130 }
3131
3132 if (event_lp != NULL)
3133 {
3134 /* Switch the event LWP. */
3135 *orig_lp = event_lp;
3136 *status = event_lp->status;
3137 }
3138
3139 /* Flush the wait status for the event LWP. */
3140 (*orig_lp)->status = 0;
3141 }
3142
3143 /* Return non-zero if LP has been resumed. */
3144
3145 static int
3146 resumed_callback (struct lwp_info *lp, void *data)
3147 {
3148 return lp->resumed;
3149 }
3150
3151 /* Stop an active thread, verify it still exists, then resume it. If
3152 the thread ends up with a pending status, then it is not resumed,
3153 and *DATA (really a pointer to int), is set. */
3154
3155 static int
3156 stop_and_resume_callback (struct lwp_info *lp, void *data)
3157 {
3158 int *new_pending_p = data;
3159
3160 if (!lp->stopped)
3161 {
3162 ptid_t ptid = lp->ptid;
3163
3164 stop_callback (lp, NULL);
3165 stop_wait_callback (lp, NULL);
3166
3167 /* Resume if the lwp still exists, and the core wanted it
3168 running. */
3169 lp = find_lwp_pid (ptid);
3170 if (lp != NULL)
3171 {
3172 if (lp->last_resume_kind == resume_stop
3173 && lp->status == 0)
3174 {
3175 /* The core wanted the LWP to stop. Even if it stopped
3176 cleanly (with SIGSTOP), leave the event pending. */
3177 if (debug_linux_nat)
3178 fprintf_unfiltered (gdb_stdlog,
3179 "SARC: core wanted LWP %ld stopped "
3180 "(leaving SIGSTOP pending)\n",
3181 GET_LWP (lp->ptid));
3182 lp->status = W_STOPCODE (SIGSTOP);
3183 }
3184
3185 if (lp->status == 0)
3186 {
3187 if (debug_linux_nat)
3188 fprintf_unfiltered (gdb_stdlog,
3189 "SARC: re-resuming LWP %ld\n",
3190 GET_LWP (lp->ptid));
3191 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
3192 }
3193 else
3194 {
3195 if (debug_linux_nat)
3196 fprintf_unfiltered (gdb_stdlog,
3197 "SARC: not re-resuming LWP %ld "
3198 "(has pending)\n",
3199 GET_LWP (lp->ptid));
3200 if (new_pending_p)
3201 *new_pending_p = 1;
3202 }
3203 }
3204 }
3205 return 0;
3206 }
3207
3208 /* Check if we should go on and pass this event to common code.
3209 Return the affected lwp if we are, or NULL otherwise. If we stop
3210 all lwps temporarily, we may end up with new pending events in some
3211 other lwp. In that case set *NEW_PENDING_P to true. */
3212
3213 static struct lwp_info *
3214 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
3215 {
3216 struct lwp_info *lp;
3217
3218 *new_pending_p = 0;
3219
3220 lp = find_lwp_pid (pid_to_ptid (lwpid));
3221
3222 /* Check for stop events reported by a process we didn't already
3223 know about - anything not already in our LWP list.
3224
3225 If we're expecting to receive stopped processes after
3226 fork, vfork, and clone events, then we'll just add the
3227 new one to our list and go back to waiting for the event
3228 to be reported - the stopped process might be returned
3229 from waitpid before or after the event is.
3230
3231 But note the case of a non-leader thread exec'ing after the
3232 leader having exited, and gone from our lists. The non-leader
3233 thread changes its tid to the tgid. */
3234
3235 if (WIFSTOPPED (status) && lp == NULL
3236 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3237 {
3238 /* A multi-thread exec after we had seen the leader exiting. */
3239 if (debug_linux_nat)
3240 fprintf_unfiltered (gdb_stdlog,
3241 "LLW: Re-adding thread group leader LWP %d.\n",
3242 lwpid);
3243
3244 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3245 lp->stopped = 1;
3246 lp->resumed = 1;
3247 add_thread (lp->ptid);
3248 }
3249
3250 if (WIFSTOPPED (status) && !lp)
3251 {
3252 add_to_pid_list (&stopped_pids, lwpid, status);
3253 return NULL;
3254 }
3255
3256 /* Make sure we don't report an event for the exit of an LWP not in
3257 our list, i.e. not part of the current process. This can happen
3258 if we detach from a program we originally forked and then it
3259 exits. */
3260 if (!WIFSTOPPED (status) && !lp)
3261 return NULL;
3262
3263 /* Handle GNU/Linux's syscall SIGTRAPs. */
3264 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3265 {
3266 /* No longer need the sysgood bit. The ptrace event ends up
3267 recorded in lp->waitstatus if we care for it. We can carry
3268 on handling the event like a regular SIGTRAP from here
3269 on. */
3270 status = W_STOPCODE (SIGTRAP);
3271 if (linux_handle_syscall_trap (lp, 0))
3272 return NULL;
3273 }
3274
3275 /* Handle GNU/Linux's extended waitstatus for trace events. */
3276 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3277 {
3278 if (debug_linux_nat)
3279 fprintf_unfiltered (gdb_stdlog,
3280 "LLW: Handling extended status 0x%06x\n",
3281 status);
3282 if (linux_handle_extended_wait (lp, status, 0))
3283 return NULL;
3284 }
3285
3286 if (linux_nat_status_is_event (status))
3287 {
3288 /* Save the trap's siginfo in case we need it later. */
3289 save_siginfo (lp);
3290
3291 save_sigtrap (lp);
3292 }
3293
3294 /* Check if the thread has exited. */
3295 if ((WIFEXITED (status) || WIFSIGNALED (status))
3296 && num_lwps (GET_PID (lp->ptid)) > 1)
3297 {
3298 /* If this is the main thread, we must stop all threads and verify
3299 if they are still alive. This is because in the nptl thread model
3300 on Linux 2.4, there is no signal issued for exiting LWPs
3301 other than the main thread. We only get the main thread exit
3302 signal once all child threads have already exited. If we
3303 stop all the threads and use the stop_wait_callback to check
3304 if they have exited we can determine whether this signal
3305 should be ignored or whether it means the end of the debugged
3306 application, regardless of which threading model is being
3307 used. */
3308 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3309 {
3310 lp->stopped = 1;
3311 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3312 stop_and_resume_callback, new_pending_p);
3313 }
3314
3315 if (debug_linux_nat)
3316 fprintf_unfiltered (gdb_stdlog,
3317 "LLW: %s exited.\n",
3318 target_pid_to_str (lp->ptid));
3319
3320 if (num_lwps (GET_PID (lp->ptid)) > 1)
3321 {
3322 /* If there is at least one more LWP, then the exit signal
3323 was not the end of the debugged application and should be
3324 ignored. */
3325 exit_lwp (lp);
3326 return NULL;
3327 }
3328 }
3329
3330 /* Check if the current LWP has previously exited. In the nptl
3331 thread model, LWPs other than the main thread do not issue
3332 signals when they exit so we must check whenever the thread has
3333 stopped. A similar check is made in stop_wait_callback(). */
3334 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3335 {
3336 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3337
3338 if (debug_linux_nat)
3339 fprintf_unfiltered (gdb_stdlog,
3340 "LLW: %s exited.\n",
3341 target_pid_to_str (lp->ptid));
3342
3343 exit_lwp (lp);
3344
3345 /* Make sure there is at least one thread running. */
3346 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3347
3348 /* Discard the event. */
3349 return NULL;
3350 }
3351
3352 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3353 an attempt to stop an LWP. */
3354 if (lp->signalled
3355 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3356 {
3357 if (debug_linux_nat)
3358 fprintf_unfiltered (gdb_stdlog,
3359 "LLW: Delayed SIGSTOP caught for %s.\n",
3360 target_pid_to_str (lp->ptid));
3361
3362 lp->signalled = 0;
3363
3364 if (lp->last_resume_kind != resume_stop)
3365 {
3366 /* This is a delayed SIGSTOP. */
3367
3368 registers_changed ();
3369
3370 if (linux_nat_prepare_to_resume != NULL)
3371 linux_nat_prepare_to_resume (lp);
3372 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3373 lp->step, GDB_SIGNAL_0);
3374 if (debug_linux_nat)
3375 fprintf_unfiltered (gdb_stdlog,
3376 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3377 lp->step ?
3378 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3379 target_pid_to_str (lp->ptid));
3380
3381 lp->stopped = 0;
3382 gdb_assert (lp->resumed);
3383
3384 /* Discard the event. */
3385 return NULL;
3386 }
3387 }
3388
3389 /* Make sure we don't report a SIGINT that we have already displayed
3390 for another thread. */
3391 if (lp->ignore_sigint
3392 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3393 {
3394 if (debug_linux_nat)
3395 fprintf_unfiltered (gdb_stdlog,
3396 "LLW: Delayed SIGINT caught for %s.\n",
3397 target_pid_to_str (lp->ptid));
3398
3399 /* This is a delayed SIGINT. */
3400 lp->ignore_sigint = 0;
3401
3402 registers_changed ();
3403 if (linux_nat_prepare_to_resume != NULL)
3404 linux_nat_prepare_to_resume (lp);
3405 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3406 lp->step, GDB_SIGNAL_0);
3407 if (debug_linux_nat)
3408 fprintf_unfiltered (gdb_stdlog,
3409 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3410 lp->step ?
3411 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3412 target_pid_to_str (lp->ptid));
3413
3414 lp->stopped = 0;
3415 gdb_assert (lp->resumed);
3416
3417 /* Discard the event. */
3418 return NULL;
3419 }
3420
3421 /* An interesting event. */
3422 gdb_assert (lp);
3423 lp->status = status;
3424 return lp;
3425 }
3426
3427 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3428 their exits until all other threads in the group have exited. */
3429
3430 static void
3431 check_zombie_leaders (void)
3432 {
3433 struct inferior *inf;
3434
3435 ALL_INFERIORS (inf)
3436 {
3437 struct lwp_info *leader_lp;
3438
3439 if (inf->pid == 0)
3440 continue;
3441
3442 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3443 if (leader_lp != NULL
3444 /* Check if there are other threads in the group, as we may
3445 have raced with the inferior simply exiting. */
3446 && num_lwps (inf->pid) > 1
3447 && linux_proc_pid_is_zombie (inf->pid))
3448 {
3449 if (debug_linux_nat)
3450 fprintf_unfiltered (gdb_stdlog,
3451 "CZL: Thread group leader %d zombie "
3452 "(it exited, or another thread execd).\n",
3453 inf->pid);
3454
3455 /* A leader zombie can mean one of two things:
3456
3457 - It exited, and there's an exit status pending
3458 available, or only the leader exited (not the whole
3459 program). In the latter case, we can't waitpid the
3460 leader's exit status until all other threads are gone.
3461
3462 - There are 3 or more threads in the group, and a thread
3463 other than the leader exec'd. On an exec, the Linux
3464 kernel destroys all other threads (except the execing
3465 one) in the thread group, and resets the execing thread's
3466 tid to the tgid. No exit notification is sent for the
3467 execing thread -- from the ptracer's perspective, it
3468 appears as though the execing thread just vanishes.
3469 Until we reap all other threads except the leader and the
3470 execing thread, the leader will be zombie, and the
3471 execing thread will be in `D (disc sleep)'. As soon as
3472 all other threads are reaped, the execing thread changes
3473 it's tid to the tgid, and the previous (zombie) leader
3474 vanishes, giving place to the "new" leader. We could try
3475 distinguishing the exit and exec cases, by waiting once
3476 more, and seeing if something comes out, but it doesn't
3477 sound useful. The previous leader _does_ go away, and
3478 we'll re-add the new one once we see the exec event
3479 (which is just the same as what would happen if the
3480 previous leader did exit voluntarily before some other
3481 thread execs). */
3482
3483 if (debug_linux_nat)
3484 fprintf_unfiltered (gdb_stdlog,
3485 "CZL: Thread group leader %d vanished.\n",
3486 inf->pid);
3487 exit_lwp (leader_lp);
3488 }
3489 }
3490 }
3491
3492 static ptid_t
3493 linux_nat_wait_1 (struct target_ops *ops,
3494 ptid_t ptid, struct target_waitstatus *ourstatus,
3495 int target_options)
3496 {
3497 static sigset_t prev_mask;
3498 enum resume_kind last_resume_kind;
3499 struct lwp_info *lp;
3500 int status;
3501
3502 if (debug_linux_nat)
3503 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3504
3505 /* The first time we get here after starting a new inferior, we may
3506 not have added it to the LWP list yet - this is the earliest
3507 moment at which we know its PID. */
3508 if (ptid_is_pid (inferior_ptid))
3509 {
3510 /* Upgrade the main thread's ptid. */
3511 thread_change_ptid (inferior_ptid,
3512 BUILD_LWP (GET_PID (inferior_ptid),
3513 GET_PID (inferior_ptid)));
3514
3515 lp = add_lwp (inferior_ptid);
3516 lp->resumed = 1;
3517 }
3518
3519 /* Make sure SIGCHLD is blocked. */
3520 block_child_signals (&prev_mask);
3521
3522 retry:
3523 lp = NULL;
3524 status = 0;
3525
3526 /* First check if there is a LWP with a wait status pending. */
3527 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3528 {
3529 /* Any LWP in the PTID group that's been resumed will do. */
3530 lp = iterate_over_lwps (ptid, status_callback, NULL);
3531 if (lp)
3532 {
3533 if (debug_linux_nat && lp->status)
3534 fprintf_unfiltered (gdb_stdlog,
3535 "LLW: Using pending wait status %s for %s.\n",
3536 status_to_str (lp->status),
3537 target_pid_to_str (lp->ptid));
3538 }
3539 }
3540 else if (is_lwp (ptid))
3541 {
3542 if (debug_linux_nat)
3543 fprintf_unfiltered (gdb_stdlog,
3544 "LLW: Waiting for specific LWP %s.\n",
3545 target_pid_to_str (ptid));
3546
3547 /* We have a specific LWP to check. */
3548 lp = find_lwp_pid (ptid);
3549 gdb_assert (lp);
3550
3551 if (debug_linux_nat && lp->status)
3552 fprintf_unfiltered (gdb_stdlog,
3553 "LLW: Using pending wait status %s for %s.\n",
3554 status_to_str (lp->status),
3555 target_pid_to_str (lp->ptid));
3556
3557 /* We check for lp->waitstatus in addition to lp->status,
3558 because we can have pending process exits recorded in
3559 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3560 an additional lp->status_p flag. */
3561 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3562 lp = NULL;
3563 }
3564
3565 if (!target_can_async_p ())
3566 {
3567 /* Causes SIGINT to be passed on to the attached process. */
3568 set_sigint_trap ();
3569 }
3570
3571 /* But if we don't find a pending event, we'll have to wait. */
3572
3573 while (lp == NULL)
3574 {
3575 pid_t lwpid;
3576
3577 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3578 quirks:
3579
3580 - If the thread group leader exits while other threads in the
3581 thread group still exist, waitpid(TGID, ...) hangs. That
3582 waitpid won't return an exit status until the other threads
3583 in the group are reapped.
3584
3585 - When a non-leader thread execs, that thread just vanishes
3586 without reporting an exit (so we'd hang if we waited for it
3587 explicitly in that case). The exec event is reported to
3588 the TGID pid. */
3589
3590 errno = 0;
3591 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3592 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3593 lwpid = my_waitpid (-1, &status, WNOHANG);
3594
3595 if (debug_linux_nat)
3596 fprintf_unfiltered (gdb_stdlog,
3597 "LNW: waitpid(-1, ...) returned %d, %s\n",
3598 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3599
3600 if (lwpid > 0)
3601 {
3602 /* If this is true, then we paused LWPs momentarily, and may
3603 now have pending events to handle. */
3604 int new_pending;
3605
3606 if (debug_linux_nat)
3607 {
3608 fprintf_unfiltered (gdb_stdlog,
3609 "LLW: waitpid %ld received %s\n",
3610 (long) lwpid, status_to_str (status));
3611 }
3612
3613 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3614
3615 /* STATUS is now no longer valid, use LP->STATUS instead. */
3616 status = 0;
3617
3618 if (lp && !ptid_match (lp->ptid, ptid))
3619 {
3620 gdb_assert (lp->resumed);
3621
3622 if (debug_linux_nat)
3623 fprintf (stderr,
3624 "LWP %ld got an event %06x, leaving pending.\n",
3625 ptid_get_lwp (lp->ptid), lp->status);
3626
3627 if (WIFSTOPPED (lp->status))
3628 {
3629 if (WSTOPSIG (lp->status) != SIGSTOP)
3630 {
3631 /* Cancel breakpoint hits. The breakpoint may
3632 be removed before we fetch events from this
3633 process to report to the core. It is best
3634 not to assume the moribund breakpoints
3635 heuristic always handles these cases --- it
3636 could be too many events go through to the
3637 core before this one is handled. All-stop
3638 always cancels breakpoint hits in all
3639 threads. */
3640 if (non_stop
3641 && linux_nat_lp_status_is_event (lp)
3642 && cancel_breakpoint (lp))
3643 {
3644 /* Throw away the SIGTRAP. */
3645 lp->status = 0;
3646
3647 if (debug_linux_nat)
3648 fprintf (stderr,
3649 "LLW: LWP %ld hit a breakpoint while"
3650 " waiting for another process;"
3651 " cancelled it\n",
3652 ptid_get_lwp (lp->ptid));
3653 }
3654 lp->stopped = 1;
3655 }
3656 else
3657 {
3658 lp->stopped = 1;
3659 lp->signalled = 0;
3660 }
3661 }
3662 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3663 {
3664 if (debug_linux_nat)
3665 fprintf (stderr,
3666 "Process %ld exited while stopping LWPs\n",
3667 ptid_get_lwp (lp->ptid));
3668
3669 /* This was the last lwp in the process. Since
3670 events are serialized to GDB core, and we can't
3671 report this one right now, but GDB core and the
3672 other target layers will want to be notified
3673 about the exit code/signal, leave the status
3674 pending for the next time we're able to report
3675 it. */
3676
3677 /* Prevent trying to stop this thread again. We'll
3678 never try to resume it because it has a pending
3679 status. */
3680 lp->stopped = 1;
3681
3682 /* Dead LWP's aren't expected to reported a pending
3683 sigstop. */
3684 lp->signalled = 0;
3685
3686 /* Store the pending event in the waitstatus as
3687 well, because W_EXITCODE(0,0) == 0. */
3688 store_waitstatus (&lp->waitstatus, lp->status);
3689 }
3690
3691 /* Keep looking. */
3692 lp = NULL;
3693 }
3694
3695 if (new_pending)
3696 {
3697 /* Some LWP now has a pending event. Go all the way
3698 back to check it. */
3699 goto retry;
3700 }
3701
3702 if (lp)
3703 {
3704 /* We got an event to report to the core. */
3705 break;
3706 }
3707
3708 /* Retry until nothing comes out of waitpid. A single
3709 SIGCHLD can indicate more than one child stopped. */
3710 continue;
3711 }
3712
3713 /* Check for zombie thread group leaders. Those can't be reaped
3714 until all other threads in the thread group are. */
3715 check_zombie_leaders ();
3716
3717 /* If there are no resumed children left, bail. We'd be stuck
3718 forever in the sigsuspend call below otherwise. */
3719 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3720 {
3721 if (debug_linux_nat)
3722 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3723
3724 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3725
3726 if (!target_can_async_p ())
3727 clear_sigint_trap ();
3728
3729 restore_child_signals_mask (&prev_mask);
3730 return minus_one_ptid;
3731 }
3732
3733 /* No interesting event to report to the core. */
3734
3735 if (target_options & TARGET_WNOHANG)
3736 {
3737 if (debug_linux_nat)
3738 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3739
3740 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3741 restore_child_signals_mask (&prev_mask);
3742 return minus_one_ptid;
3743 }
3744
3745 /* We shouldn't end up here unless we want to try again. */
3746 gdb_assert (lp == NULL);
3747
3748 /* Block until we get an event reported with SIGCHLD. */
3749 sigsuspend (&suspend_mask);
3750 }
3751
3752 if (!target_can_async_p ())
3753 clear_sigint_trap ();
3754
3755 gdb_assert (lp);
3756
3757 status = lp->status;
3758 lp->status = 0;
3759
3760 /* Don't report signals that GDB isn't interested in, such as
3761 signals that are neither printed nor stopped upon. Stopping all
3762 threads can be a bit time-consuming so if we want decent
3763 performance with heavily multi-threaded programs, especially when
3764 they're using a high frequency timer, we'd better avoid it if we
3765 can. */
3766
3767 if (WIFSTOPPED (status))
3768 {
3769 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3770
3771 /* When using hardware single-step, we need to report every signal.
3772 Otherwise, signals in pass_mask may be short-circuited. */
3773 if (!lp->step
3774 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3775 {
3776 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3777 here? It is not clear we should. GDB may not expect
3778 other threads to run. On the other hand, not resuming
3779 newly attached threads may cause an unwanted delay in
3780 getting them running. */
3781 registers_changed ();
3782 if (linux_nat_prepare_to_resume != NULL)
3783 linux_nat_prepare_to_resume (lp);
3784 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3785 lp->step, signo);
3786 if (debug_linux_nat)
3787 fprintf_unfiltered (gdb_stdlog,
3788 "LLW: %s %s, %s (preempt 'handle')\n",
3789 lp->step ?
3790 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3791 target_pid_to_str (lp->ptid),
3792 (signo != GDB_SIGNAL_0
3793 ? strsignal (gdb_signal_to_host (signo))
3794 : "0"));
3795 lp->stopped = 0;
3796 goto retry;
3797 }
3798
3799 if (!non_stop)
3800 {
3801 /* Only do the below in all-stop, as we currently use SIGINT
3802 to implement target_stop (see linux_nat_stop) in
3803 non-stop. */
3804 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3805 {
3806 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3807 forwarded to the entire process group, that is, all LWPs
3808 will receive it - unless they're using CLONE_THREAD to
3809 share signals. Since we only want to report it once, we
3810 mark it as ignored for all LWPs except this one. */
3811 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3812 set_ignore_sigint, NULL);
3813 lp->ignore_sigint = 0;
3814 }
3815 else
3816 maybe_clear_ignore_sigint (lp);
3817 }
3818 }
3819
3820 /* This LWP is stopped now. */
3821 lp->stopped = 1;
3822
3823 if (debug_linux_nat)
3824 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3825 status_to_str (status), target_pid_to_str (lp->ptid));
3826
3827 if (!non_stop)
3828 {
3829 /* Now stop all other LWP's ... */
3830 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3831
3832 /* ... and wait until all of them have reported back that
3833 they're no longer running. */
3834 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3835
3836 /* If we're not waiting for a specific LWP, choose an event LWP
3837 from among those that have had events. Giving equal priority
3838 to all LWPs that have had events helps prevent
3839 starvation. */
3840 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3841 select_event_lwp (ptid, &lp, &status);
3842
3843 /* Now that we've selected our final event LWP, cancel any
3844 breakpoints in other LWPs that have hit a GDB breakpoint.
3845 See the comment in cancel_breakpoints_callback to find out
3846 why. */
3847 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3848
3849 /* We'll need this to determine whether to report a SIGSTOP as
3850 TARGET_WAITKIND_0. Need to take a copy because
3851 resume_clear_callback clears it. */
3852 last_resume_kind = lp->last_resume_kind;
3853
3854 /* In all-stop, from the core's perspective, all LWPs are now
3855 stopped until a new resume action is sent over. */
3856 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3857 }
3858 else
3859 {
3860 /* See above. */
3861 last_resume_kind = lp->last_resume_kind;
3862 resume_clear_callback (lp, NULL);
3863 }
3864
3865 if (linux_nat_status_is_event (status))
3866 {
3867 if (debug_linux_nat)
3868 fprintf_unfiltered (gdb_stdlog,
3869 "LLW: trap ptid is %s.\n",
3870 target_pid_to_str (lp->ptid));
3871 }
3872
3873 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3874 {
3875 *ourstatus = lp->waitstatus;
3876 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3877 }
3878 else
3879 store_waitstatus (ourstatus, status);
3880
3881 if (debug_linux_nat)
3882 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3883
3884 restore_child_signals_mask (&prev_mask);
3885
3886 if (last_resume_kind == resume_stop
3887 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3888 && WSTOPSIG (status) == SIGSTOP)
3889 {
3890 /* A thread that has been requested to stop by GDB with
3891 target_stop, and it stopped cleanly, so report as SIG0. The
3892 use of SIGSTOP is an implementation detail. */
3893 ourstatus->value.sig = GDB_SIGNAL_0;
3894 }
3895
3896 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3897 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3898 lp->core = -1;
3899 else
3900 lp->core = linux_common_core_of_thread (lp->ptid);
3901
3902 return lp->ptid;
3903 }
3904
3905 /* Resume LWPs that are currently stopped without any pending status
3906 to report, but are resumed from the core's perspective. */
3907
3908 static int
3909 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3910 {
3911 ptid_t *wait_ptid_p = data;
3912
3913 if (lp->stopped
3914 && lp->resumed
3915 && lp->status == 0
3916 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3917 {
3918 struct regcache *regcache = get_thread_regcache (lp->ptid);
3919 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3920 CORE_ADDR pc = regcache_read_pc (regcache);
3921
3922 gdb_assert (is_executing (lp->ptid));
3923
3924 /* Don't bother if there's a breakpoint at PC that we'd hit
3925 immediately, and we're not waiting for this LWP. */
3926 if (!ptid_match (lp->ptid, *wait_ptid_p))
3927 {
3928 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3929 return 0;
3930 }
3931
3932 if (debug_linux_nat)
3933 fprintf_unfiltered (gdb_stdlog,
3934 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3935 target_pid_to_str (lp->ptid),
3936 paddress (gdbarch, pc),
3937 lp->step);
3938
3939 registers_changed ();
3940 if (linux_nat_prepare_to_resume != NULL)
3941 linux_nat_prepare_to_resume (lp);
3942 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3943 lp->step, GDB_SIGNAL_0);
3944 lp->stopped = 0;
3945 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3946 lp->stopped_by_watchpoint = 0;
3947 }
3948
3949 return 0;
3950 }
3951
3952 static ptid_t
3953 linux_nat_wait (struct target_ops *ops,
3954 ptid_t ptid, struct target_waitstatus *ourstatus,
3955 int target_options)
3956 {
3957 ptid_t event_ptid;
3958
3959 if (debug_linux_nat)
3960 fprintf_unfiltered (gdb_stdlog,
3961 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
3962
3963 /* Flush the async file first. */
3964 if (target_can_async_p ())
3965 async_file_flush ();
3966
3967 /* Resume LWPs that are currently stopped without any pending status
3968 to report, but are resumed from the core's perspective. LWPs get
3969 in this state if we find them stopping at a time we're not
3970 interested in reporting the event (target_wait on a
3971 specific_process, for example, see linux_nat_wait_1), and
3972 meanwhile the event became uninteresting. Don't bother resuming
3973 LWPs we're not going to wait for if they'd stop immediately. */
3974 if (non_stop)
3975 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3976
3977 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3978
3979 /* If we requested any event, and something came out, assume there
3980 may be more. If we requested a specific lwp or process, also
3981 assume there may be more. */
3982 if (target_can_async_p ()
3983 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3984 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3985 || !ptid_equal (ptid, minus_one_ptid)))
3986 async_file_mark ();
3987
3988 /* Get ready for the next event. */
3989 if (target_can_async_p ())
3990 target_async (inferior_event_handler, 0);
3991
3992 return event_ptid;
3993 }
3994
3995 static int
3996 kill_callback (struct lwp_info *lp, void *data)
3997 {
3998 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3999
4000 errno = 0;
4001 kill (GET_LWP (lp->ptid), SIGKILL);
4002 if (debug_linux_nat)
4003 fprintf_unfiltered (gdb_stdlog,
4004 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4005 target_pid_to_str (lp->ptid),
4006 errno ? safe_strerror (errno) : "OK");
4007
4008 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4009
4010 errno = 0;
4011 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
4012 if (debug_linux_nat)
4013 fprintf_unfiltered (gdb_stdlog,
4014 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4015 target_pid_to_str (lp->ptid),
4016 errno ? safe_strerror (errno) : "OK");
4017
4018 return 0;
4019 }
4020
4021 static int
4022 kill_wait_callback (struct lwp_info *lp, void *data)
4023 {
4024 pid_t pid;
4025
4026 /* We must make sure that there are no pending events (delayed
4027 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4028 program doesn't interfere with any following debugging session. */
4029
4030 /* For cloned processes we must check both with __WCLONE and
4031 without, since the exit status of a cloned process isn't reported
4032 with __WCLONE. */
4033 if (lp->cloned)
4034 {
4035 do
4036 {
4037 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
4038 if (pid != (pid_t) -1)
4039 {
4040 if (debug_linux_nat)
4041 fprintf_unfiltered (gdb_stdlog,
4042 "KWC: wait %s received unknown.\n",
4043 target_pid_to_str (lp->ptid));
4044 /* The Linux kernel sometimes fails to kill a thread
4045 completely after PTRACE_KILL; that goes from the stop
4046 point in do_fork out to the one in
4047 get_signal_to_deliever and waits again. So kill it
4048 again. */
4049 kill_callback (lp, NULL);
4050 }
4051 }
4052 while (pid == GET_LWP (lp->ptid));
4053
4054 gdb_assert (pid == -1 && errno == ECHILD);
4055 }
4056
4057 do
4058 {
4059 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
4060 if (pid != (pid_t) -1)
4061 {
4062 if (debug_linux_nat)
4063 fprintf_unfiltered (gdb_stdlog,
4064 "KWC: wait %s received unk.\n",
4065 target_pid_to_str (lp->ptid));
4066 /* See the call to kill_callback above. */
4067 kill_callback (lp, NULL);
4068 }
4069 }
4070 while (pid == GET_LWP (lp->ptid));
4071
4072 gdb_assert (pid == -1 && errno == ECHILD);
4073 return 0;
4074 }
4075
4076 static void
4077 linux_nat_kill (struct target_ops *ops)
4078 {
4079 struct target_waitstatus last;
4080 ptid_t last_ptid;
4081 int status;
4082
4083 /* If we're stopped while forking and we haven't followed yet,
4084 kill the other task. We need to do this first because the
4085 parent will be sleeping if this is a vfork. */
4086
4087 get_last_target_status (&last_ptid, &last);
4088
4089 if (last.kind == TARGET_WAITKIND_FORKED
4090 || last.kind == TARGET_WAITKIND_VFORKED)
4091 {
4092 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
4093 wait (&status);
4094 }
4095
4096 if (forks_exist_p ())
4097 linux_fork_killall ();
4098 else
4099 {
4100 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
4101
4102 /* Stop all threads before killing them, since ptrace requires
4103 that the thread is stopped to sucessfully PTRACE_KILL. */
4104 iterate_over_lwps (ptid, stop_callback, NULL);
4105 /* ... and wait until all of them have reported back that
4106 they're no longer running. */
4107 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4108
4109 /* Kill all LWP's ... */
4110 iterate_over_lwps (ptid, kill_callback, NULL);
4111
4112 /* ... and wait until we've flushed all events. */
4113 iterate_over_lwps (ptid, kill_wait_callback, NULL);
4114 }
4115
4116 target_mourn_inferior ();
4117 }
4118
4119 static void
4120 linux_nat_mourn_inferior (struct target_ops *ops)
4121 {
4122 purge_lwp_list (ptid_get_pid (inferior_ptid));
4123
4124 if (! forks_exist_p ())
4125 /* Normal case, no other forks available. */
4126 linux_ops->to_mourn_inferior (ops);
4127 else
4128 /* Multi-fork case. The current inferior_ptid has exited, but
4129 there are other viable forks to debug. Delete the exiting
4130 one and context-switch to the first available. */
4131 linux_fork_mourn_inferior ();
4132 }
4133
4134 /* Convert a native/host siginfo object, into/from the siginfo in the
4135 layout of the inferiors' architecture. */
4136
4137 static void
4138 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
4139 {
4140 int done = 0;
4141
4142 if (linux_nat_siginfo_fixup != NULL)
4143 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4144
4145 /* If there was no callback, or the callback didn't do anything,
4146 then just do a straight memcpy. */
4147 if (!done)
4148 {
4149 if (direction == 1)
4150 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
4151 else
4152 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
4153 }
4154 }
4155
4156 static LONGEST
4157 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4158 const char *annex, gdb_byte *readbuf,
4159 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4160 {
4161 int pid;
4162 siginfo_t siginfo;
4163 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4164
4165 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4166 gdb_assert (readbuf || writebuf);
4167
4168 pid = GET_LWP (inferior_ptid);
4169 if (pid == 0)
4170 pid = GET_PID (inferior_ptid);
4171
4172 if (offset > sizeof (siginfo))
4173 return -1;
4174
4175 errno = 0;
4176 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4177 if (errno != 0)
4178 return -1;
4179
4180 /* When GDB is built as a 64-bit application, ptrace writes into
4181 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4182 inferior with a 64-bit GDB should look the same as debugging it
4183 with a 32-bit GDB, we need to convert it. GDB core always sees
4184 the converted layout, so any read/write will have to be done
4185 post-conversion. */
4186 siginfo_fixup (&siginfo, inf_siginfo, 0);
4187
4188 if (offset + len > sizeof (siginfo))
4189 len = sizeof (siginfo) - offset;
4190
4191 if (readbuf != NULL)
4192 memcpy (readbuf, inf_siginfo + offset, len);
4193 else
4194 {
4195 memcpy (inf_siginfo + offset, writebuf, len);
4196
4197 /* Convert back to ptrace layout before flushing it out. */
4198 siginfo_fixup (&siginfo, inf_siginfo, 1);
4199
4200 errno = 0;
4201 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4202 if (errno != 0)
4203 return -1;
4204 }
4205
4206 return len;
4207 }
4208
4209 static LONGEST
4210 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4211 const char *annex, gdb_byte *readbuf,
4212 const gdb_byte *writebuf,
4213 ULONGEST offset, LONGEST len)
4214 {
4215 struct cleanup *old_chain;
4216 LONGEST xfer;
4217
4218 if (object == TARGET_OBJECT_SIGNAL_INFO)
4219 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4220 offset, len);
4221
4222 /* The target is connected but no live inferior is selected. Pass
4223 this request down to a lower stratum (e.g., the executable
4224 file). */
4225 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4226 return 0;
4227
4228 old_chain = save_inferior_ptid ();
4229
4230 if (is_lwp (inferior_ptid))
4231 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4232
4233 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4234 offset, len);
4235
4236 do_cleanups (old_chain);
4237 return xfer;
4238 }
4239
4240 static int
4241 linux_thread_alive (ptid_t ptid)
4242 {
4243 int err, tmp_errno;
4244
4245 gdb_assert (is_lwp (ptid));
4246
4247 /* Send signal 0 instead of anything ptrace, because ptracing a
4248 running thread errors out claiming that the thread doesn't
4249 exist. */
4250 err = kill_lwp (GET_LWP (ptid), 0);
4251 tmp_errno = errno;
4252 if (debug_linux_nat)
4253 fprintf_unfiltered (gdb_stdlog,
4254 "LLTA: KILL(SIG0) %s (%s)\n",
4255 target_pid_to_str (ptid),
4256 err ? safe_strerror (tmp_errno) : "OK");
4257
4258 if (err != 0)
4259 return 0;
4260
4261 return 1;
4262 }
4263
4264 static int
4265 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4266 {
4267 return linux_thread_alive (ptid);
4268 }
4269
4270 static char *
4271 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4272 {
4273 static char buf[64];
4274
4275 if (is_lwp (ptid)
4276 && (GET_PID (ptid) != GET_LWP (ptid)
4277 || num_lwps (GET_PID (ptid)) > 1))
4278 {
4279 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4280 return buf;
4281 }
4282
4283 return normal_pid_to_str (ptid);
4284 }
4285
4286 static char *
4287 linux_nat_thread_name (struct thread_info *thr)
4288 {
4289 int pid = ptid_get_pid (thr->ptid);
4290 long lwp = ptid_get_lwp (thr->ptid);
4291 #define FORMAT "/proc/%d/task/%ld/comm"
4292 char buf[sizeof (FORMAT) + 30];
4293 FILE *comm_file;
4294 char *result = NULL;
4295
4296 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4297 comm_file = fopen (buf, "r");
4298 if (comm_file)
4299 {
4300 /* Not exported by the kernel, so we define it here. */
4301 #define COMM_LEN 16
4302 static char line[COMM_LEN + 1];
4303
4304 if (fgets (line, sizeof (line), comm_file))
4305 {
4306 char *nl = strchr (line, '\n');
4307
4308 if (nl)
4309 *nl = '\0';
4310 if (*line != '\0')
4311 result = line;
4312 }
4313
4314 fclose (comm_file);
4315 }
4316
4317 #undef COMM_LEN
4318 #undef FORMAT
4319
4320 return result;
4321 }
4322
4323 /* Accepts an integer PID; Returns a string representing a file that
4324 can be opened to get the symbols for the child process. */
4325
4326 static char *
4327 linux_child_pid_to_exec_file (int pid)
4328 {
4329 char *name1, *name2;
4330
4331 name1 = xmalloc (MAXPATHLEN);
4332 name2 = xmalloc (MAXPATHLEN);
4333 make_cleanup (xfree, name1);
4334 make_cleanup (xfree, name2);
4335 memset (name2, 0, MAXPATHLEN);
4336
4337 sprintf (name1, "/proc/%d/exe", pid);
4338 if (readlink (name1, name2, MAXPATHLEN) > 0)
4339 return name2;
4340 else
4341 return name1;
4342 }
4343
4344 /* Records the thread's register state for the corefile note
4345 section. */
4346
4347 static char *
4348 linux_nat_collect_thread_registers (const struct regcache *regcache,
4349 ptid_t ptid, bfd *obfd,
4350 char *note_data, int *note_size,
4351 enum gdb_signal stop_signal)
4352 {
4353 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4354 const struct regset *regset;
4355 int core_regset_p;
4356 gdb_gregset_t gregs;
4357 gdb_fpregset_t fpregs;
4358
4359 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4360
4361 if (core_regset_p
4362 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4363 sizeof (gregs)))
4364 != NULL && regset->collect_regset != NULL)
4365 regset->collect_regset (regset, regcache, -1, &gregs, sizeof (gregs));
4366 else
4367 fill_gregset (regcache, &gregs, -1);
4368
4369 note_data = (char *) elfcore_write_prstatus
4370 (obfd, note_data, note_size, ptid_get_lwp (ptid),
4371 gdb_signal_to_host (stop_signal), &gregs);
4372
4373 if (core_regset_p
4374 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4375 sizeof (fpregs)))
4376 != NULL && regset->collect_regset != NULL)
4377 regset->collect_regset (regset, regcache, -1, &fpregs, sizeof (fpregs));
4378 else
4379 fill_fpregset (regcache, &fpregs, -1);
4380
4381 note_data = (char *) elfcore_write_prfpreg (obfd, note_data, note_size,
4382 &fpregs, sizeof (fpregs));
4383
4384 return note_data;
4385 }
4386
4387 /* Fills the "to_make_corefile_note" target vector. Builds the note
4388 section for a corefile, and returns it in a malloc buffer. */
4389
4390 static char *
4391 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4392 {
4393 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4394 converted to gdbarch_core_regset_sections, this function can go away. */
4395 return linux_make_corefile_notes (target_gdbarch, obfd, note_size,
4396 linux_nat_collect_thread_registers);
4397 }
4398
4399 /* Implement the to_xfer_partial interface for memory reads using the /proc
4400 filesystem. Because we can use a single read() call for /proc, this
4401 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4402 but it doesn't support writes. */
4403
4404 static LONGEST
4405 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4406 const char *annex, gdb_byte *readbuf,
4407 const gdb_byte *writebuf,
4408 ULONGEST offset, LONGEST len)
4409 {
4410 LONGEST ret;
4411 int fd;
4412 char filename[64];
4413
4414 if (object != TARGET_OBJECT_MEMORY || !readbuf)
4415 return 0;
4416
4417 /* Don't bother for one word. */
4418 if (len < 3 * sizeof (long))
4419 return 0;
4420
4421 /* We could keep this file open and cache it - possibly one per
4422 thread. That requires some juggling, but is even faster. */
4423 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4424 fd = open (filename, O_RDONLY | O_LARGEFILE);
4425 if (fd == -1)
4426 return 0;
4427
4428 /* If pread64 is available, use it. It's faster if the kernel
4429 supports it (only one syscall), and it's 64-bit safe even on
4430 32-bit platforms (for instance, SPARC debugging a SPARC64
4431 application). */
4432 #ifdef HAVE_PREAD64
4433 if (pread64 (fd, readbuf, len, offset) != len)
4434 #else
4435 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4436 #endif
4437 ret = 0;
4438 else
4439 ret = len;
4440
4441 close (fd);
4442 return ret;
4443 }
4444
4445
4446 /* Enumerate spufs IDs for process PID. */
4447 static LONGEST
4448 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4449 {
4450 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4451 LONGEST pos = 0;
4452 LONGEST written = 0;
4453 char path[128];
4454 DIR *dir;
4455 struct dirent *entry;
4456
4457 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4458 dir = opendir (path);
4459 if (!dir)
4460 return -1;
4461
4462 rewinddir (dir);
4463 while ((entry = readdir (dir)) != NULL)
4464 {
4465 struct stat st;
4466 struct statfs stfs;
4467 int fd;
4468
4469 fd = atoi (entry->d_name);
4470 if (!fd)
4471 continue;
4472
4473 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4474 if (stat (path, &st) != 0)
4475 continue;
4476 if (!S_ISDIR (st.st_mode))
4477 continue;
4478
4479 if (statfs (path, &stfs) != 0)
4480 continue;
4481 if (stfs.f_type != SPUFS_MAGIC)
4482 continue;
4483
4484 if (pos >= offset && pos + 4 <= offset + len)
4485 {
4486 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4487 written += 4;
4488 }
4489 pos += 4;
4490 }
4491
4492 closedir (dir);
4493 return written;
4494 }
4495
4496 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4497 object type, using the /proc file system. */
4498 static LONGEST
4499 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4500 const char *annex, gdb_byte *readbuf,
4501 const gdb_byte *writebuf,
4502 ULONGEST offset, LONGEST len)
4503 {
4504 char buf[128];
4505 int fd = 0;
4506 int ret = -1;
4507 int pid = PIDGET (inferior_ptid);
4508
4509 if (!annex)
4510 {
4511 if (!readbuf)
4512 return -1;
4513 else
4514 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4515 }
4516
4517 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4518 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4519 if (fd <= 0)
4520 return -1;
4521
4522 if (offset != 0
4523 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4524 {
4525 close (fd);
4526 return 0;
4527 }
4528
4529 if (writebuf)
4530 ret = write (fd, writebuf, (size_t) len);
4531 else if (readbuf)
4532 ret = read (fd, readbuf, (size_t) len);
4533
4534 close (fd);
4535 return ret;
4536 }
4537
4538
4539 /* Parse LINE as a signal set and add its set bits to SIGS. */
4540
4541 static void
4542 add_line_to_sigset (const char *line, sigset_t *sigs)
4543 {
4544 int len = strlen (line) - 1;
4545 const char *p;
4546 int signum;
4547
4548 if (line[len] != '\n')
4549 error (_("Could not parse signal set: %s"), line);
4550
4551 p = line;
4552 signum = len * 4;
4553 while (len-- > 0)
4554 {
4555 int digit;
4556
4557 if (*p >= '0' && *p <= '9')
4558 digit = *p - '0';
4559 else if (*p >= 'a' && *p <= 'f')
4560 digit = *p - 'a' + 10;
4561 else
4562 error (_("Could not parse signal set: %s"), line);
4563
4564 signum -= 4;
4565
4566 if (digit & 1)
4567 sigaddset (sigs, signum + 1);
4568 if (digit & 2)
4569 sigaddset (sigs, signum + 2);
4570 if (digit & 4)
4571 sigaddset (sigs, signum + 3);
4572 if (digit & 8)
4573 sigaddset (sigs, signum + 4);
4574
4575 p++;
4576 }
4577 }
4578
4579 /* Find process PID's pending signals from /proc/pid/status and set
4580 SIGS to match. */
4581
4582 void
4583 linux_proc_pending_signals (int pid, sigset_t *pending,
4584 sigset_t *blocked, sigset_t *ignored)
4585 {
4586 FILE *procfile;
4587 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
4588 struct cleanup *cleanup;
4589
4590 sigemptyset (pending);
4591 sigemptyset (blocked);
4592 sigemptyset (ignored);
4593 sprintf (fname, "/proc/%d/status", pid);
4594 procfile = fopen (fname, "r");
4595 if (procfile == NULL)
4596 error (_("Could not open %s"), fname);
4597 cleanup = make_cleanup_fclose (procfile);
4598
4599 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4600 {
4601 /* Normal queued signals are on the SigPnd line in the status
4602 file. However, 2.6 kernels also have a "shared" pending
4603 queue for delivering signals to a thread group, so check for
4604 a ShdPnd line also.
4605
4606 Unfortunately some Red Hat kernels include the shared pending
4607 queue but not the ShdPnd status field. */
4608
4609 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4610 add_line_to_sigset (buffer + 8, pending);
4611 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4612 add_line_to_sigset (buffer + 8, pending);
4613 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4614 add_line_to_sigset (buffer + 8, blocked);
4615 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4616 add_line_to_sigset (buffer + 8, ignored);
4617 }
4618
4619 do_cleanups (cleanup);
4620 }
4621
4622 static LONGEST
4623 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4624 const char *annex, gdb_byte *readbuf,
4625 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4626 {
4627 gdb_assert (object == TARGET_OBJECT_OSDATA);
4628
4629 return linux_common_xfer_osdata (annex, readbuf, offset, len);
4630 }
4631
4632 static LONGEST
4633 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4634 const char *annex, gdb_byte *readbuf,
4635 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4636 {
4637 LONGEST xfer;
4638
4639 if (object == TARGET_OBJECT_AUXV)
4640 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4641 offset, len);
4642
4643 if (object == TARGET_OBJECT_OSDATA)
4644 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4645 offset, len);
4646
4647 if (object == TARGET_OBJECT_SPU)
4648 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4649 offset, len);
4650
4651 /* GDB calculates all the addresses in possibly larget width of the address.
4652 Address width needs to be masked before its final use - either by
4653 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4654
4655 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4656
4657 if (object == TARGET_OBJECT_MEMORY)
4658 {
4659 int addr_bit = gdbarch_addr_bit (target_gdbarch);
4660
4661 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4662 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4663 }
4664
4665 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4666 offset, len);
4667 if (xfer != 0)
4668 return xfer;
4669
4670 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4671 offset, len);
4672 }
4673
4674 static void
4675 cleanup_target_stop (void *arg)
4676 {
4677 ptid_t *ptid = (ptid_t *) arg;
4678
4679 gdb_assert (arg != NULL);
4680
4681 /* Unpause all */
4682 target_resume (*ptid, 0, GDB_SIGNAL_0);
4683 }
4684
4685 static VEC(static_tracepoint_marker_p) *
4686 linux_child_static_tracepoint_markers_by_strid (const char *strid)
4687 {
4688 char s[IPA_CMD_BUF_SIZE];
4689 struct cleanup *old_chain;
4690 int pid = ptid_get_pid (inferior_ptid);
4691 VEC(static_tracepoint_marker_p) *markers = NULL;
4692 struct static_tracepoint_marker *marker = NULL;
4693 char *p = s;
4694 ptid_t ptid = ptid_build (pid, 0, 0);
4695
4696 /* Pause all */
4697 target_stop (ptid);
4698
4699 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4700 s[sizeof ("qTfSTM")] = 0;
4701
4702 agent_run_command (pid, s, strlen (s) + 1);
4703
4704 old_chain = make_cleanup (free_current_marker, &marker);
4705 make_cleanup (cleanup_target_stop, &ptid);
4706
4707 while (*p++ == 'm')
4708 {
4709 if (marker == NULL)
4710 marker = XCNEW (struct static_tracepoint_marker);
4711
4712 do
4713 {
4714 parse_static_tracepoint_marker_definition (p, &p, marker);
4715
4716 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4717 {
4718 VEC_safe_push (static_tracepoint_marker_p,
4719 markers, marker);
4720 marker = NULL;
4721 }
4722 else
4723 {
4724 release_static_tracepoint_marker (marker);
4725 memset (marker, 0, sizeof (*marker));
4726 }
4727 }
4728 while (*p++ == ','); /* comma-separated list */
4729
4730 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4731 s[sizeof ("qTsSTM")] = 0;
4732 agent_run_command (pid, s, strlen (s) + 1);
4733 p = s;
4734 }
4735
4736 do_cleanups (old_chain);
4737
4738 return markers;
4739 }
4740
4741 /* Create a prototype generic GNU/Linux target. The client can override
4742 it with local methods. */
4743
4744 static void
4745 linux_target_install_ops (struct target_ops *t)
4746 {
4747 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4748 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4749 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4750 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4751 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4752 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4753 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4754 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4755 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4756 t->to_post_attach = linux_child_post_attach;
4757 t->to_follow_fork = linux_child_follow_fork;
4758 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4759
4760 super_xfer_partial = t->to_xfer_partial;
4761 t->to_xfer_partial = linux_xfer_partial;
4762
4763 t->to_static_tracepoint_markers_by_strid
4764 = linux_child_static_tracepoint_markers_by_strid;
4765 }
4766
4767 struct target_ops *
4768 linux_target (void)
4769 {
4770 struct target_ops *t;
4771
4772 t = inf_ptrace_target ();
4773 linux_target_install_ops (t);
4774
4775 return t;
4776 }
4777
4778 struct target_ops *
4779 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4780 {
4781 struct target_ops *t;
4782
4783 t = inf_ptrace_trad_target (register_u_offset);
4784 linux_target_install_ops (t);
4785
4786 return t;
4787 }
4788
4789 /* target_is_async_p implementation. */
4790
4791 static int
4792 linux_nat_is_async_p (void)
4793 {
4794 /* NOTE: palves 2008-03-21: We're only async when the user requests
4795 it explicitly with the "set target-async" command.
4796 Someday, linux will always be async. */
4797 return target_async_permitted;
4798 }
4799
4800 /* target_can_async_p implementation. */
4801
4802 static int
4803 linux_nat_can_async_p (void)
4804 {
4805 /* NOTE: palves 2008-03-21: We're only async when the user requests
4806 it explicitly with the "set target-async" command.
4807 Someday, linux will always be async. */
4808 return target_async_permitted;
4809 }
4810
4811 static int
4812 linux_nat_supports_non_stop (void)
4813 {
4814 return 1;
4815 }
4816
4817 /* True if we want to support multi-process. To be removed when GDB
4818 supports multi-exec. */
4819
4820 int linux_multi_process = 1;
4821
4822 static int
4823 linux_nat_supports_multi_process (void)
4824 {
4825 return linux_multi_process;
4826 }
4827
4828 static int
4829 linux_nat_supports_disable_randomization (void)
4830 {
4831 #ifdef HAVE_PERSONALITY
4832 return 1;
4833 #else
4834 return 0;
4835 #endif
4836 }
4837
4838 static int async_terminal_is_ours = 1;
4839
4840 /* target_terminal_inferior implementation. */
4841
4842 static void
4843 linux_nat_terminal_inferior (void)
4844 {
4845 if (!target_is_async_p ())
4846 {
4847 /* Async mode is disabled. */
4848 terminal_inferior ();
4849 return;
4850 }
4851
4852 terminal_inferior ();
4853
4854 /* Calls to target_terminal_*() are meant to be idempotent. */
4855 if (!async_terminal_is_ours)
4856 return;
4857
4858 delete_file_handler (input_fd);
4859 async_terminal_is_ours = 0;
4860 set_sigint_trap ();
4861 }
4862
4863 /* target_terminal_ours implementation. */
4864
4865 static void
4866 linux_nat_terminal_ours (void)
4867 {
4868 if (!target_is_async_p ())
4869 {
4870 /* Async mode is disabled. */
4871 terminal_ours ();
4872 return;
4873 }
4874
4875 /* GDB should never give the terminal to the inferior if the
4876 inferior is running in the background (run&, continue&, etc.),
4877 but claiming it sure should. */
4878 terminal_ours ();
4879
4880 if (async_terminal_is_ours)
4881 return;
4882
4883 clear_sigint_trap ();
4884 add_file_handler (input_fd, stdin_event_handler, 0);
4885 async_terminal_is_ours = 1;
4886 }
4887
4888 static void (*async_client_callback) (enum inferior_event_type event_type,
4889 void *context);
4890 static void *async_client_context;
4891
4892 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4893 so we notice when any child changes state, and notify the
4894 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4895 above to wait for the arrival of a SIGCHLD. */
4896
4897 static void
4898 sigchld_handler (int signo)
4899 {
4900 int old_errno = errno;
4901
4902 if (debug_linux_nat)
4903 ui_file_write_async_safe (gdb_stdlog,
4904 "sigchld\n", sizeof ("sigchld\n") - 1);
4905
4906 if (signo == SIGCHLD
4907 && linux_nat_event_pipe[0] != -1)
4908 async_file_mark (); /* Let the event loop know that there are
4909 events to handle. */
4910
4911 errno = old_errno;
4912 }
4913
4914 /* Callback registered with the target events file descriptor. */
4915
4916 static void
4917 handle_target_event (int error, gdb_client_data client_data)
4918 {
4919 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4920 }
4921
4922 /* Create/destroy the target events pipe. Returns previous state. */
4923
4924 static int
4925 linux_async_pipe (int enable)
4926 {
4927 int previous = (linux_nat_event_pipe[0] != -1);
4928
4929 if (previous != enable)
4930 {
4931 sigset_t prev_mask;
4932
4933 block_child_signals (&prev_mask);
4934
4935 if (enable)
4936 {
4937 if (pipe (linux_nat_event_pipe) == -1)
4938 internal_error (__FILE__, __LINE__,
4939 "creating event pipe failed.");
4940
4941 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4942 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4943 }
4944 else
4945 {
4946 close (linux_nat_event_pipe[0]);
4947 close (linux_nat_event_pipe[1]);
4948 linux_nat_event_pipe[0] = -1;
4949 linux_nat_event_pipe[1] = -1;
4950 }
4951
4952 restore_child_signals_mask (&prev_mask);
4953 }
4954
4955 return previous;
4956 }
4957
4958 /* target_async implementation. */
4959
4960 static void
4961 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4962 void *context), void *context)
4963 {
4964 if (callback != NULL)
4965 {
4966 async_client_callback = callback;
4967 async_client_context = context;
4968 if (!linux_async_pipe (1))
4969 {
4970 add_file_handler (linux_nat_event_pipe[0],
4971 handle_target_event, NULL);
4972 /* There may be pending events to handle. Tell the event loop
4973 to poll them. */
4974 async_file_mark ();
4975 }
4976 }
4977 else
4978 {
4979 async_client_callback = callback;
4980 async_client_context = context;
4981 delete_file_handler (linux_nat_event_pipe[0]);
4982 linux_async_pipe (0);
4983 }
4984 return;
4985 }
4986
4987 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4988 event came out. */
4989
4990 static int
4991 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4992 {
4993 if (!lwp->stopped)
4994 {
4995 ptid_t ptid = lwp->ptid;
4996
4997 if (debug_linux_nat)
4998 fprintf_unfiltered (gdb_stdlog,
4999 "LNSL: running -> suspending %s\n",
5000 target_pid_to_str (lwp->ptid));
5001
5002
5003 if (lwp->last_resume_kind == resume_stop)
5004 {
5005 if (debug_linux_nat)
5006 fprintf_unfiltered (gdb_stdlog,
5007 "linux-nat: already stopping LWP %ld at "
5008 "GDB's request\n",
5009 ptid_get_lwp (lwp->ptid));
5010 return 0;
5011 }
5012
5013 stop_callback (lwp, NULL);
5014 lwp->last_resume_kind = resume_stop;
5015 }
5016 else
5017 {
5018 /* Already known to be stopped; do nothing. */
5019
5020 if (debug_linux_nat)
5021 {
5022 if (find_thread_ptid (lwp->ptid)->stop_requested)
5023 fprintf_unfiltered (gdb_stdlog,
5024 "LNSL: already stopped/stop_requested %s\n",
5025 target_pid_to_str (lwp->ptid));
5026 else
5027 fprintf_unfiltered (gdb_stdlog,
5028 "LNSL: already stopped/no "
5029 "stop_requested yet %s\n",
5030 target_pid_to_str (lwp->ptid));
5031 }
5032 }
5033 return 0;
5034 }
5035
5036 static void
5037 linux_nat_stop (ptid_t ptid)
5038 {
5039 if (non_stop)
5040 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
5041 else
5042 linux_ops->to_stop (ptid);
5043 }
5044
5045 static void
5046 linux_nat_close (int quitting)
5047 {
5048 /* Unregister from the event loop. */
5049 if (linux_nat_is_async_p ())
5050 linux_nat_async (NULL, 0);
5051
5052 if (linux_ops->to_close)
5053 linux_ops->to_close (quitting);
5054 }
5055
5056 /* When requests are passed down from the linux-nat layer to the
5057 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5058 used. The address space pointer is stored in the inferior object,
5059 but the common code that is passed such ptid can't tell whether
5060 lwpid is a "main" process id or not (it assumes so). We reverse
5061 look up the "main" process id from the lwp here. */
5062
5063 static struct address_space *
5064 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5065 {
5066 struct lwp_info *lwp;
5067 struct inferior *inf;
5068 int pid;
5069
5070 pid = GET_LWP (ptid);
5071 if (GET_LWP (ptid) == 0)
5072 {
5073 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5074 tgid. */
5075 lwp = find_lwp_pid (ptid);
5076 pid = GET_PID (lwp->ptid);
5077 }
5078 else
5079 {
5080 /* A (pid,lwpid,0) ptid. */
5081 pid = GET_PID (ptid);
5082 }
5083
5084 inf = find_inferior_pid (pid);
5085 gdb_assert (inf != NULL);
5086 return inf->aspace;
5087 }
5088
5089 /* Return the cached value of the processor core for thread PTID. */
5090
5091 static int
5092 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5093 {
5094 struct lwp_info *info = find_lwp_pid (ptid);
5095
5096 if (info)
5097 return info->core;
5098 return -1;
5099 }
5100
5101 void
5102 linux_nat_add_target (struct target_ops *t)
5103 {
5104 /* Save the provided single-threaded target. We save this in a separate
5105 variable because another target we've inherited from (e.g. inf-ptrace)
5106 may have saved a pointer to T; we want to use it for the final
5107 process stratum target. */
5108 linux_ops_saved = *t;
5109 linux_ops = &linux_ops_saved;
5110
5111 /* Override some methods for multithreading. */
5112 t->to_create_inferior = linux_nat_create_inferior;
5113 t->to_attach = linux_nat_attach;
5114 t->to_detach = linux_nat_detach;
5115 t->to_resume = linux_nat_resume;
5116 t->to_wait = linux_nat_wait;
5117 t->to_pass_signals = linux_nat_pass_signals;
5118 t->to_xfer_partial = linux_nat_xfer_partial;
5119 t->to_kill = linux_nat_kill;
5120 t->to_mourn_inferior = linux_nat_mourn_inferior;
5121 t->to_thread_alive = linux_nat_thread_alive;
5122 t->to_pid_to_str = linux_nat_pid_to_str;
5123 t->to_thread_name = linux_nat_thread_name;
5124 t->to_has_thread_control = tc_schedlock;
5125 t->to_thread_address_space = linux_nat_thread_address_space;
5126 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5127 t->to_stopped_data_address = linux_nat_stopped_data_address;
5128
5129 t->to_can_async_p = linux_nat_can_async_p;
5130 t->to_is_async_p = linux_nat_is_async_p;
5131 t->to_supports_non_stop = linux_nat_supports_non_stop;
5132 t->to_async = linux_nat_async;
5133 t->to_terminal_inferior = linux_nat_terminal_inferior;
5134 t->to_terminal_ours = linux_nat_terminal_ours;
5135 t->to_close = linux_nat_close;
5136
5137 /* Methods for non-stop support. */
5138 t->to_stop = linux_nat_stop;
5139
5140 t->to_supports_multi_process = linux_nat_supports_multi_process;
5141
5142 t->to_supports_disable_randomization
5143 = linux_nat_supports_disable_randomization;
5144
5145 t->to_core_of_thread = linux_nat_core_of_thread;
5146
5147 /* We don't change the stratum; this target will sit at
5148 process_stratum and thread_db will set at thread_stratum. This
5149 is a little strange, since this is a multi-threaded-capable
5150 target, but we want to be on the stack below thread_db, and we
5151 also want to be used for single-threaded processes. */
5152
5153 add_target (t);
5154 }
5155
5156 /* Register a method to call whenever a new thread is attached. */
5157 void
5158 linux_nat_set_new_thread (struct target_ops *t,
5159 void (*new_thread) (struct lwp_info *))
5160 {
5161 /* Save the pointer. We only support a single registered instance
5162 of the GNU/Linux native target, so we do not need to map this to
5163 T. */
5164 linux_nat_new_thread = new_thread;
5165 }
5166
5167 /* Register a method that converts a siginfo object between the layout
5168 that ptrace returns, and the layout in the architecture of the
5169 inferior. */
5170 void
5171 linux_nat_set_siginfo_fixup (struct target_ops *t,
5172 int (*siginfo_fixup) (siginfo_t *,
5173 gdb_byte *,
5174 int))
5175 {
5176 /* Save the pointer. */
5177 linux_nat_siginfo_fixup = siginfo_fixup;
5178 }
5179
5180 /* Register a method to call prior to resuming a thread. */
5181
5182 void
5183 linux_nat_set_prepare_to_resume (struct target_ops *t,
5184 void (*prepare_to_resume) (struct lwp_info *))
5185 {
5186 /* Save the pointer. */
5187 linux_nat_prepare_to_resume = prepare_to_resume;
5188 }
5189
5190 /* See linux-nat.h. */
5191
5192 int
5193 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
5194 {
5195 struct lwp_info *lp = find_lwp_pid (ptid);
5196
5197 gdb_assert (lp != NULL);
5198
5199 *siginfo = lp->siginfo;
5200
5201 return 1;
5202 }
5203
5204 /* Provide a prototype to silence -Wmissing-prototypes. */
5205 extern initialize_file_ftype _initialize_linux_nat;
5206
5207 void
5208 _initialize_linux_nat (void)
5209 {
5210 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5211 &debug_linux_nat, _("\
5212 Set debugging of GNU/Linux lwp module."), _("\
5213 Show debugging of GNU/Linux lwp module."), _("\
5214 Enables printf debugging output."),
5215 NULL,
5216 show_debug_linux_nat,
5217 &setdebuglist, &showdebuglist);
5218
5219 /* Save this mask as the default. */
5220 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5221
5222 /* Install a SIGCHLD handler. */
5223 sigchld_action.sa_handler = sigchld_handler;
5224 sigemptyset (&sigchld_action.sa_mask);
5225 sigchld_action.sa_flags = SA_RESTART;
5226
5227 /* Make it the default. */
5228 sigaction (SIGCHLD, &sigchld_action, NULL);
5229
5230 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5231 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5232 sigdelset (&suspend_mask, SIGCHLD);
5233
5234 sigemptyset (&blocked_mask);
5235 }
5236 \f
5237
5238 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5239 the GNU/Linux Threads library and therefore doesn't really belong
5240 here. */
5241
5242 /* Read variable NAME in the target and return its value if found.
5243 Otherwise return zero. It is assumed that the type of the variable
5244 is `int'. */
5245
5246 static int
5247 get_signo (const char *name)
5248 {
5249 struct minimal_symbol *ms;
5250 int signo;
5251
5252 ms = lookup_minimal_symbol (name, NULL, NULL);
5253 if (ms == NULL)
5254 return 0;
5255
5256 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
5257 sizeof (signo)) != 0)
5258 return 0;
5259
5260 return signo;
5261 }
5262
5263 /* Return the set of signals used by the threads library in *SET. */
5264
5265 void
5266 lin_thread_get_thread_signals (sigset_t *set)
5267 {
5268 struct sigaction action;
5269 int restart, cancel;
5270
5271 sigemptyset (&blocked_mask);
5272 sigemptyset (set);
5273
5274 restart = get_signo ("__pthread_sig_restart");
5275 cancel = get_signo ("__pthread_sig_cancel");
5276
5277 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5278 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5279 not provide any way for the debugger to query the signal numbers -
5280 fortunately they don't change! */
5281
5282 if (restart == 0)
5283 restart = __SIGRTMIN;
5284
5285 if (cancel == 0)
5286 cancel = __SIGRTMIN + 1;
5287
5288 sigaddset (set, restart);
5289 sigaddset (set, cancel);
5290
5291 /* The GNU/Linux Threads library makes terminating threads send a
5292 special "cancel" signal instead of SIGCHLD. Make sure we catch
5293 those (to prevent them from terminating GDB itself, which is
5294 likely to be their default action) and treat them the same way as
5295 SIGCHLD. */
5296
5297 action.sa_handler = sigchld_handler;
5298 sigemptyset (&action.sa_mask);
5299 action.sa_flags = SA_RESTART;
5300 sigaction (cancel, &action, NULL);
5301
5302 /* We block the "cancel" signal throughout this code ... */
5303 sigaddset (&blocked_mask, cancel);
5304 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5305
5306 /* ... except during a sigsuspend. */
5307 sigdelset (&suspend_mask, cancel);
5308 }
This page took 0.188327 seconds and 4 git commands to generate.