GNU/Linux: Stop using libthread_db/td_ta_thr_iter
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "infrun.h"
23 #include "target.h"
24 #include "nat/linux-nat.h"
25 #include "nat/linux-waitpid.h"
26 #include "gdb_wait.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "nat/linux-ptrace.h"
34 #include "nat/linux-procfs.h"
35 #include "nat/linux-personality.h"
36 #include "linux-fork.h"
37 #include "gdbthread.h"
38 #include "gdbcmd.h"
39 #include "regcache.h"
40 #include "regset.h"
41 #include "inf-child.h"
42 #include "inf-ptrace.h"
43 #include "auxv.h"
44 #include <sys/procfs.h> /* for elf_gregset etc. */
45 #include "elf-bfd.h" /* for elfcore_write_* */
46 #include "gregset.h" /* for gregset */
47 #include "gdbcore.h" /* for get_exec_file */
48 #include <ctype.h> /* for isdigit */
49 #include <sys/stat.h> /* for struct stat */
50 #include <fcntl.h> /* for O_RDONLY */
51 #include "inf-loop.h"
52 #include "event-loop.h"
53 #include "event-top.h"
54 #include <pwd.h>
55 #include <sys/types.h>
56 #include <dirent.h>
57 #include "xml-support.h"
58 #include <sys/vfs.h>
59 #include "solib.h"
60 #include "nat/linux-osdata.h"
61 #include "linux-tdep.h"
62 #include "symfile.h"
63 #include "agent.h"
64 #include "tracepoint.h"
65 #include "buffer.h"
66 #include "target-descriptions.h"
67 #include "filestuff.h"
68 #include "objfiles.h"
69
70 #ifndef SPUFS_MAGIC
71 #define SPUFS_MAGIC 0x23c9b64e
72 #endif
73
74 /* This comment documents high-level logic of this file.
75
76 Waiting for events in sync mode
77 ===============================
78
79 When waiting for an event in a specific thread, we just use waitpid, passing
80 the specific pid, and not passing WNOHANG.
81
82 When waiting for an event in all threads, waitpid is not quite good. Prior to
83 version 2.4, Linux can either wait for event in main thread, or in secondary
84 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
85 miss an event. The solution is to use non-blocking waitpid, together with
86 sigsuspend. First, we use non-blocking waitpid to get an event in the main
87 process, if any. Second, we use non-blocking waitpid with the __WCLONED
88 flag to check for events in cloned processes. If nothing is found, we use
89 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
90 happened to a child process -- and SIGCHLD will be delivered both for events
91 in main debugged process and in cloned processes. As soon as we know there's
92 an event, we get back to calling nonblocking waitpid with and without
93 __WCLONED.
94
95 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
96 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
97 blocked, the signal becomes pending and sigsuspend immediately
98 notices it and returns.
99
100 Waiting for events in async mode
101 ================================
102
103 In async mode, GDB should always be ready to handle both user input
104 and target events, so neither blocking waitpid nor sigsuspend are
105 viable options. Instead, we should asynchronously notify the GDB main
106 event loop whenever there's an unprocessed event from the target. We
107 detect asynchronous target events by handling SIGCHLD signals. To
108 notify the event loop about target events, the self-pipe trick is used
109 --- a pipe is registered as waitable event source in the event loop,
110 the event loop select/poll's on the read end of this pipe (as well on
111 other event sources, e.g., stdin), and the SIGCHLD handler writes a
112 byte to this pipe. This is more portable than relying on
113 pselect/ppoll, since on kernels that lack those syscalls, libc
114 emulates them with select/poll+sigprocmask, and that is racy
115 (a.k.a. plain broken).
116
117 Obviously, if we fail to notify the event loop if there's a target
118 event, it's bad. OTOH, if we notify the event loop when there's no
119 event from the target, linux_nat_wait will detect that there's no real
120 event to report, and return event of type TARGET_WAITKIND_IGNORE.
121 This is mostly harmless, but it will waste time and is better avoided.
122
123 The main design point is that every time GDB is outside linux-nat.c,
124 we have a SIGCHLD handler installed that is called when something
125 happens to the target and notifies the GDB event loop. Whenever GDB
126 core decides to handle the event, and calls into linux-nat.c, we
127 process things as in sync mode, except that the we never block in
128 sigsuspend.
129
130 While processing an event, we may end up momentarily blocked in
131 waitpid calls. Those waitpid calls, while blocking, are guarantied to
132 return quickly. E.g., in all-stop mode, before reporting to the core
133 that an LWP hit a breakpoint, all LWPs are stopped by sending them
134 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
135 Note that this is different from blocking indefinitely waiting for the
136 next event --- here, we're already handling an event.
137
138 Use of signals
139 ==============
140
141 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
142 signal is not entirely significant; we just need for a signal to be delivered,
143 so that we can intercept it. SIGSTOP's advantage is that it can not be
144 blocked. A disadvantage is that it is not a real-time signal, so it can only
145 be queued once; we do not keep track of other sources of SIGSTOP.
146
147 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
148 use them, because they have special behavior when the signal is generated -
149 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
150 kills the entire thread group.
151
152 A delivered SIGSTOP would stop the entire thread group, not just the thread we
153 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
154 cancel it (by PTRACE_CONT without passing SIGSTOP).
155
156 We could use a real-time signal instead. This would solve those problems; we
157 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
158 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
159 generates it, and there are races with trying to find a signal that is not
160 blocked. */
161
162 #ifndef O_LARGEFILE
163 #define O_LARGEFILE 0
164 #endif
165
166 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
167 the use of the multi-threaded target. */
168 static struct target_ops *linux_ops;
169 static struct target_ops linux_ops_saved;
170
171 /* The method to call, if any, when a new thread is attached. */
172 static void (*linux_nat_new_thread) (struct lwp_info *);
173
174 /* The method to call, if any, when a new fork is attached. */
175 static linux_nat_new_fork_ftype *linux_nat_new_fork;
176
177 /* The method to call, if any, when a process is no longer
178 attached. */
179 static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
180
181 /* Hook to call prior to resuming a thread. */
182 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
183
184 /* The method to call, if any, when the siginfo object needs to be
185 converted between the layout returned by ptrace, and the layout in
186 the architecture of the inferior. */
187 static int (*linux_nat_siginfo_fixup) (siginfo_t *,
188 gdb_byte *,
189 int);
190
191 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
192 Called by our to_xfer_partial. */
193 static target_xfer_partial_ftype *super_xfer_partial;
194
195 /* The saved to_close method, inherited from inf-ptrace.c.
196 Called by our to_close. */
197 static void (*super_close) (struct target_ops *);
198
199 static unsigned int debug_linux_nat;
200 static void
201 show_debug_linux_nat (struct ui_file *file, int from_tty,
202 struct cmd_list_element *c, const char *value)
203 {
204 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
205 value);
206 }
207
208 struct simple_pid_list
209 {
210 int pid;
211 int status;
212 struct simple_pid_list *next;
213 };
214 struct simple_pid_list *stopped_pids;
215
216 /* Async mode support. */
217
218 /* The read/write ends of the pipe registered as waitable file in the
219 event loop. */
220 static int linux_nat_event_pipe[2] = { -1, -1 };
221
222 /* True if we're currently in async mode. */
223 #define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
224
225 /* Flush the event pipe. */
226
227 static void
228 async_file_flush (void)
229 {
230 int ret;
231 char buf;
232
233 do
234 {
235 ret = read (linux_nat_event_pipe[0], &buf, 1);
236 }
237 while (ret >= 0 || (ret == -1 && errno == EINTR));
238 }
239
240 /* Put something (anything, doesn't matter what, or how much) in event
241 pipe, so that the select/poll in the event-loop realizes we have
242 something to process. */
243
244 static void
245 async_file_mark (void)
246 {
247 int ret;
248
249 /* It doesn't really matter what the pipe contains, as long we end
250 up with something in it. Might as well flush the previous
251 left-overs. */
252 async_file_flush ();
253
254 do
255 {
256 ret = write (linux_nat_event_pipe[1], "+", 1);
257 }
258 while (ret == -1 && errno == EINTR);
259
260 /* Ignore EAGAIN. If the pipe is full, the event loop will already
261 be awakened anyway. */
262 }
263
264 static int kill_lwp (int lwpid, int signo);
265
266 static int stop_callback (struct lwp_info *lp, void *data);
267 static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
268
269 static void block_child_signals (sigset_t *prev_mask);
270 static void restore_child_signals_mask (sigset_t *prev_mask);
271
272 struct lwp_info;
273 static struct lwp_info *add_lwp (ptid_t ptid);
274 static void purge_lwp_list (int pid);
275 static void delete_lwp (ptid_t ptid);
276 static struct lwp_info *find_lwp_pid (ptid_t ptid);
277
278 static int lwp_status_pending_p (struct lwp_info *lp);
279
280 static int check_stopped_by_breakpoint (struct lwp_info *lp);
281 static int sigtrap_is_event (int status);
282 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
283
284 \f
285 /* Trivial list manipulation functions to keep track of a list of
286 new stopped processes. */
287 static void
288 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
289 {
290 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
291
292 new_pid->pid = pid;
293 new_pid->status = status;
294 new_pid->next = *listp;
295 *listp = new_pid;
296 }
297
298 static int
299 in_pid_list_p (struct simple_pid_list *list, int pid)
300 {
301 struct simple_pid_list *p;
302
303 for (p = list; p != NULL; p = p->next)
304 if (p->pid == pid)
305 return 1;
306 return 0;
307 }
308
309 static int
310 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
311 {
312 struct simple_pid_list **p;
313
314 for (p = listp; *p != NULL; p = &(*p)->next)
315 if ((*p)->pid == pid)
316 {
317 struct simple_pid_list *next = (*p)->next;
318
319 *statusp = (*p)->status;
320 xfree (*p);
321 *p = next;
322 return 1;
323 }
324 return 0;
325 }
326
327 /* Initialize ptrace warnings and check for supported ptrace
328 features given PID.
329
330 ATTACHED should be nonzero iff we attached to the inferior. */
331
332 static void
333 linux_init_ptrace (pid_t pid, int attached)
334 {
335 linux_enable_event_reporting (pid, attached);
336 linux_ptrace_init_warnings ();
337 }
338
339 static void
340 linux_child_post_attach (struct target_ops *self, int pid)
341 {
342 linux_init_ptrace (pid, 1);
343 }
344
345 static void
346 linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
347 {
348 linux_init_ptrace (ptid_get_pid (ptid), 0);
349 }
350
351 /* Return the number of known LWPs in the tgid given by PID. */
352
353 static int
354 num_lwps (int pid)
355 {
356 int count = 0;
357 struct lwp_info *lp;
358
359 for (lp = lwp_list; lp; lp = lp->next)
360 if (ptid_get_pid (lp->ptid) == pid)
361 count++;
362
363 return count;
364 }
365
366 /* Call delete_lwp with prototype compatible for make_cleanup. */
367
368 static void
369 delete_lwp_cleanup (void *lp_voidp)
370 {
371 struct lwp_info *lp = lp_voidp;
372
373 delete_lwp (lp->ptid);
374 }
375
376 /* Target hook for follow_fork. On entry inferior_ptid must be the
377 ptid of the followed inferior. At return, inferior_ptid will be
378 unchanged. */
379
380 static int
381 linux_child_follow_fork (struct target_ops *ops, int follow_child,
382 int detach_fork)
383 {
384 if (!follow_child)
385 {
386 struct lwp_info *child_lp = NULL;
387 int status = W_STOPCODE (0);
388 struct cleanup *old_chain;
389 int has_vforked;
390 int parent_pid, child_pid;
391
392 has_vforked = (inferior_thread ()->pending_follow.kind
393 == TARGET_WAITKIND_VFORKED);
394 parent_pid = ptid_get_lwp (inferior_ptid);
395 if (parent_pid == 0)
396 parent_pid = ptid_get_pid (inferior_ptid);
397 child_pid
398 = ptid_get_pid (inferior_thread ()->pending_follow.value.related_pid);
399
400
401 /* We're already attached to the parent, by default. */
402 old_chain = save_inferior_ptid ();
403 inferior_ptid = ptid_build (child_pid, child_pid, 0);
404 child_lp = add_lwp (inferior_ptid);
405 child_lp->stopped = 1;
406 child_lp->last_resume_kind = resume_stop;
407
408 /* Detach new forked process? */
409 if (detach_fork)
410 {
411 make_cleanup (delete_lwp_cleanup, child_lp);
412
413 if (linux_nat_prepare_to_resume != NULL)
414 linux_nat_prepare_to_resume (child_lp);
415
416 /* When debugging an inferior in an architecture that supports
417 hardware single stepping on a kernel without commit
418 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
419 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
420 set if the parent process had them set.
421 To work around this, single step the child process
422 once before detaching to clear the flags. */
423
424 if (!gdbarch_software_single_step_p (target_thread_architecture
425 (child_lp->ptid)))
426 {
427 linux_disable_event_reporting (child_pid);
428 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
429 perror_with_name (_("Couldn't do single step"));
430 if (my_waitpid (child_pid, &status, 0) < 0)
431 perror_with_name (_("Couldn't wait vfork process"));
432 }
433
434 if (WIFSTOPPED (status))
435 {
436 int signo;
437
438 signo = WSTOPSIG (status);
439 if (signo != 0
440 && !signal_pass_state (gdb_signal_from_host (signo)))
441 signo = 0;
442 ptrace (PTRACE_DETACH, child_pid, 0, signo);
443 }
444
445 /* Resets value of inferior_ptid to parent ptid. */
446 do_cleanups (old_chain);
447 }
448 else
449 {
450 /* Let the thread_db layer learn about this new process. */
451 check_for_thread_db ();
452 }
453
454 do_cleanups (old_chain);
455
456 if (has_vforked)
457 {
458 struct lwp_info *parent_lp;
459
460 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
461 gdb_assert (linux_supports_tracefork () >= 0);
462
463 if (linux_supports_tracevforkdone ())
464 {
465 if (debug_linux_nat)
466 fprintf_unfiltered (gdb_stdlog,
467 "LCFF: waiting for VFORK_DONE on %d\n",
468 parent_pid);
469 parent_lp->stopped = 1;
470
471 /* We'll handle the VFORK_DONE event like any other
472 event, in target_wait. */
473 }
474 else
475 {
476 /* We can't insert breakpoints until the child has
477 finished with the shared memory region. We need to
478 wait until that happens. Ideal would be to just
479 call:
480 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
481 - waitpid (parent_pid, &status, __WALL);
482 However, most architectures can't handle a syscall
483 being traced on the way out if it wasn't traced on
484 the way in.
485
486 We might also think to loop, continuing the child
487 until it exits or gets a SIGTRAP. One problem is
488 that the child might call ptrace with PTRACE_TRACEME.
489
490 There's no simple and reliable way to figure out when
491 the vforked child will be done with its copy of the
492 shared memory. We could step it out of the syscall,
493 two instructions, let it go, and then single-step the
494 parent once. When we have hardware single-step, this
495 would work; with software single-step it could still
496 be made to work but we'd have to be able to insert
497 single-step breakpoints in the child, and we'd have
498 to insert -just- the single-step breakpoint in the
499 parent. Very awkward.
500
501 In the end, the best we can do is to make sure it
502 runs for a little while. Hopefully it will be out of
503 range of any breakpoints we reinsert. Usually this
504 is only the single-step breakpoint at vfork's return
505 point. */
506
507 if (debug_linux_nat)
508 fprintf_unfiltered (gdb_stdlog,
509 "LCFF: no VFORK_DONE "
510 "support, sleeping a bit\n");
511
512 usleep (10000);
513
514 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
515 and leave it pending. The next linux_nat_resume call
516 will notice a pending event, and bypasses actually
517 resuming the inferior. */
518 parent_lp->status = 0;
519 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
520 parent_lp->stopped = 1;
521
522 /* If we're in async mode, need to tell the event loop
523 there's something here to process. */
524 if (target_is_async_p ())
525 async_file_mark ();
526 }
527 }
528 }
529 else
530 {
531 struct lwp_info *child_lp;
532
533 child_lp = add_lwp (inferior_ptid);
534 child_lp->stopped = 1;
535 child_lp->last_resume_kind = resume_stop;
536
537 /* Let the thread_db layer learn about this new process. */
538 check_for_thread_db ();
539 }
540
541 return 0;
542 }
543
544 \f
545 static int
546 linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
547 {
548 return !linux_supports_tracefork ();
549 }
550
551 static int
552 linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
553 {
554 return 0;
555 }
556
557 static int
558 linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
559 {
560 return !linux_supports_tracefork ();
561 }
562
563 static int
564 linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
565 {
566 return 0;
567 }
568
569 static int
570 linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
571 {
572 return !linux_supports_tracefork ();
573 }
574
575 static int
576 linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
577 {
578 return 0;
579 }
580
581 static int
582 linux_child_set_syscall_catchpoint (struct target_ops *self,
583 int pid, int needed, int any_count,
584 int table_size, int *table)
585 {
586 if (!linux_supports_tracesysgood ())
587 return 1;
588
589 /* On GNU/Linux, we ignore the arguments. It means that we only
590 enable the syscall catchpoints, but do not disable them.
591
592 Also, we do not use the `table' information because we do not
593 filter system calls here. We let GDB do the logic for us. */
594 return 0;
595 }
596
597 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
598 are processes sharing the same VM space. A multi-threaded process
599 is basically a group of such processes. However, such a grouping
600 is almost entirely a user-space issue; the kernel doesn't enforce
601 such a grouping at all (this might change in the future). In
602 general, we'll rely on the threads library (i.e. the GNU/Linux
603 Threads library) to provide such a grouping.
604
605 It is perfectly well possible to write a multi-threaded application
606 without the assistance of a threads library, by using the clone
607 system call directly. This module should be able to give some
608 rudimentary support for debugging such applications if developers
609 specify the CLONE_PTRACE flag in the clone system call, and are
610 using the Linux kernel 2.4 or above.
611
612 Note that there are some peculiarities in GNU/Linux that affect
613 this code:
614
615 - In general one should specify the __WCLONE flag to waitpid in
616 order to make it report events for any of the cloned processes
617 (and leave it out for the initial process). However, if a cloned
618 process has exited the exit status is only reported if the
619 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
620 we cannot use it since GDB must work on older systems too.
621
622 - When a traced, cloned process exits and is waited for by the
623 debugger, the kernel reassigns it to the original parent and
624 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
625 library doesn't notice this, which leads to the "zombie problem":
626 When debugged a multi-threaded process that spawns a lot of
627 threads will run out of processes, even if the threads exit,
628 because the "zombies" stay around. */
629
630 /* List of known LWPs. */
631 struct lwp_info *lwp_list;
632 \f
633
634 /* Original signal mask. */
635 static sigset_t normal_mask;
636
637 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
638 _initialize_linux_nat. */
639 static sigset_t suspend_mask;
640
641 /* Signals to block to make that sigsuspend work. */
642 static sigset_t blocked_mask;
643
644 /* SIGCHLD action. */
645 struct sigaction sigchld_action;
646
647 /* Block child signals (SIGCHLD and linux threads signals), and store
648 the previous mask in PREV_MASK. */
649
650 static void
651 block_child_signals (sigset_t *prev_mask)
652 {
653 /* Make sure SIGCHLD is blocked. */
654 if (!sigismember (&blocked_mask, SIGCHLD))
655 sigaddset (&blocked_mask, SIGCHLD);
656
657 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
658 }
659
660 /* Restore child signals mask, previously returned by
661 block_child_signals. */
662
663 static void
664 restore_child_signals_mask (sigset_t *prev_mask)
665 {
666 sigprocmask (SIG_SETMASK, prev_mask, NULL);
667 }
668
669 /* Mask of signals to pass directly to the inferior. */
670 static sigset_t pass_mask;
671
672 /* Update signals to pass to the inferior. */
673 static void
674 linux_nat_pass_signals (struct target_ops *self,
675 int numsigs, unsigned char *pass_signals)
676 {
677 int signo;
678
679 sigemptyset (&pass_mask);
680
681 for (signo = 1; signo < NSIG; signo++)
682 {
683 int target_signo = gdb_signal_from_host (signo);
684 if (target_signo < numsigs && pass_signals[target_signo])
685 sigaddset (&pass_mask, signo);
686 }
687 }
688
689 \f
690
691 /* Prototypes for local functions. */
692 static int stop_wait_callback (struct lwp_info *lp, void *data);
693 static int linux_thread_alive (ptid_t ptid);
694 static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
695 static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
696
697 \f
698
699 /* Destroy and free LP. */
700
701 static void
702 lwp_free (struct lwp_info *lp)
703 {
704 xfree (lp->arch_private);
705 xfree (lp);
706 }
707
708 /* Remove all LWPs belong to PID from the lwp list. */
709
710 static void
711 purge_lwp_list (int pid)
712 {
713 struct lwp_info *lp, *lpprev, *lpnext;
714
715 lpprev = NULL;
716
717 for (lp = lwp_list; lp; lp = lpnext)
718 {
719 lpnext = lp->next;
720
721 if (ptid_get_pid (lp->ptid) == pid)
722 {
723 if (lp == lwp_list)
724 lwp_list = lp->next;
725 else
726 lpprev->next = lp->next;
727
728 lwp_free (lp);
729 }
730 else
731 lpprev = lp;
732 }
733 }
734
735 /* Add the LWP specified by PTID to the list. PTID is the first LWP
736 in the process. Return a pointer to the structure describing the
737 new LWP.
738
739 This differs from add_lwp in that we don't let the arch specific
740 bits know about this new thread. Current clients of this callback
741 take the opportunity to install watchpoints in the new thread, and
742 we shouldn't do that for the first thread. If we're spawning a
743 child ("run"), the thread executes the shell wrapper first, and we
744 shouldn't touch it until it execs the program we want to debug.
745 For "attach", it'd be okay to call the callback, but it's not
746 necessary, because watchpoints can't yet have been inserted into
747 the inferior. */
748
749 static struct lwp_info *
750 add_initial_lwp (ptid_t ptid)
751 {
752 struct lwp_info *lp;
753
754 gdb_assert (ptid_lwp_p (ptid));
755
756 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
757
758 memset (lp, 0, sizeof (struct lwp_info));
759
760 lp->last_resume_kind = resume_continue;
761 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
762
763 lp->ptid = ptid;
764 lp->core = -1;
765
766 lp->next = lwp_list;
767 lwp_list = lp;
768
769 return lp;
770 }
771
772 /* Add the LWP specified by PID to the list. Return a pointer to the
773 structure describing the new LWP. The LWP should already be
774 stopped. */
775
776 static struct lwp_info *
777 add_lwp (ptid_t ptid)
778 {
779 struct lwp_info *lp;
780
781 lp = add_initial_lwp (ptid);
782
783 /* Let the arch specific bits know about this new thread. Current
784 clients of this callback take the opportunity to install
785 watchpoints in the new thread. We don't do this for the first
786 thread though. See add_initial_lwp. */
787 if (linux_nat_new_thread != NULL)
788 linux_nat_new_thread (lp);
789
790 return lp;
791 }
792
793 /* Remove the LWP specified by PID from the list. */
794
795 static void
796 delete_lwp (ptid_t ptid)
797 {
798 struct lwp_info *lp, *lpprev;
799
800 lpprev = NULL;
801
802 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
803 if (ptid_equal (lp->ptid, ptid))
804 break;
805
806 if (!lp)
807 return;
808
809 if (lpprev)
810 lpprev->next = lp->next;
811 else
812 lwp_list = lp->next;
813
814 lwp_free (lp);
815 }
816
817 /* Return a pointer to the structure describing the LWP corresponding
818 to PID. If no corresponding LWP could be found, return NULL. */
819
820 static struct lwp_info *
821 find_lwp_pid (ptid_t ptid)
822 {
823 struct lwp_info *lp;
824 int lwp;
825
826 if (ptid_lwp_p (ptid))
827 lwp = ptid_get_lwp (ptid);
828 else
829 lwp = ptid_get_pid (ptid);
830
831 for (lp = lwp_list; lp; lp = lp->next)
832 if (lwp == ptid_get_lwp (lp->ptid))
833 return lp;
834
835 return NULL;
836 }
837
838 /* Call CALLBACK with its second argument set to DATA for every LWP in
839 the list. If CALLBACK returns 1 for a particular LWP, return a
840 pointer to the structure describing that LWP immediately.
841 Otherwise return NULL. */
842
843 struct lwp_info *
844 iterate_over_lwps (ptid_t filter,
845 int (*callback) (struct lwp_info *, void *),
846 void *data)
847 {
848 struct lwp_info *lp, *lpnext;
849
850 for (lp = lwp_list; lp; lp = lpnext)
851 {
852 lpnext = lp->next;
853
854 if (ptid_match (lp->ptid, filter))
855 {
856 if ((*callback) (lp, data))
857 return lp;
858 }
859 }
860
861 return NULL;
862 }
863
864 /* Update our internal state when changing from one checkpoint to
865 another indicated by NEW_PTID. We can only switch single-threaded
866 applications, so we only create one new LWP, and the previous list
867 is discarded. */
868
869 void
870 linux_nat_switch_fork (ptid_t new_ptid)
871 {
872 struct lwp_info *lp;
873
874 purge_lwp_list (ptid_get_pid (inferior_ptid));
875
876 lp = add_lwp (new_ptid);
877 lp->stopped = 1;
878
879 /* This changes the thread's ptid while preserving the gdb thread
880 num. Also changes the inferior pid, while preserving the
881 inferior num. */
882 thread_change_ptid (inferior_ptid, new_ptid);
883
884 /* We've just told GDB core that the thread changed target id, but,
885 in fact, it really is a different thread, with different register
886 contents. */
887 registers_changed ();
888 }
889
890 /* Handle the exit of a single thread LP. */
891
892 static void
893 exit_lwp (struct lwp_info *lp)
894 {
895 struct thread_info *th = find_thread_ptid (lp->ptid);
896
897 if (th)
898 {
899 if (print_thread_events)
900 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
901
902 delete_thread (lp->ptid);
903 }
904
905 delete_lwp (lp->ptid);
906 }
907
908 /* Wait for the LWP specified by LP, which we have just attached to.
909 Returns a wait status for that LWP, to cache. */
910
911 static int
912 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
913 int *signalled)
914 {
915 pid_t new_pid, pid = ptid_get_lwp (ptid);
916 int status;
917
918 if (linux_proc_pid_is_stopped (pid))
919 {
920 if (debug_linux_nat)
921 fprintf_unfiltered (gdb_stdlog,
922 "LNPAW: Attaching to a stopped process\n");
923
924 /* The process is definitely stopped. It is in a job control
925 stop, unless the kernel predates the TASK_STOPPED /
926 TASK_TRACED distinction, in which case it might be in a
927 ptrace stop. Make sure it is in a ptrace stop; from there we
928 can kill it, signal it, et cetera.
929
930 First make sure there is a pending SIGSTOP. Since we are
931 already attached, the process can not transition from stopped
932 to running without a PTRACE_CONT; so we know this signal will
933 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
934 probably already in the queue (unless this kernel is old
935 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
936 is not an RT signal, it can only be queued once. */
937 kill_lwp (pid, SIGSTOP);
938
939 /* Finally, resume the stopped process. This will deliver the SIGSTOP
940 (or a higher priority signal, just like normal PTRACE_ATTACH). */
941 ptrace (PTRACE_CONT, pid, 0, 0);
942 }
943
944 /* Make sure the initial process is stopped. The user-level threads
945 layer might want to poke around in the inferior, and that won't
946 work if things haven't stabilized yet. */
947 new_pid = my_waitpid (pid, &status, 0);
948 if (new_pid == -1 && errno == ECHILD)
949 {
950 if (first)
951 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
952
953 /* Try again with __WCLONE to check cloned processes. */
954 new_pid = my_waitpid (pid, &status, __WCLONE);
955 *cloned = 1;
956 }
957
958 gdb_assert (pid == new_pid);
959
960 if (!WIFSTOPPED (status))
961 {
962 /* The pid we tried to attach has apparently just exited. */
963 if (debug_linux_nat)
964 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
965 pid, status_to_str (status));
966 return status;
967 }
968
969 if (WSTOPSIG (status) != SIGSTOP)
970 {
971 *signalled = 1;
972 if (debug_linux_nat)
973 fprintf_unfiltered (gdb_stdlog,
974 "LNPAW: Received %s after attaching\n",
975 status_to_str (status));
976 }
977
978 return status;
979 }
980
981 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
982 the new LWP could not be attached, or 1 if we're already auto
983 attached to this thread, but haven't processed the
984 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
985 its existance, without considering it an error. */
986
987 int
988 lin_lwp_attach_lwp (ptid_t ptid)
989 {
990 struct lwp_info *lp;
991 int lwpid;
992
993 gdb_assert (ptid_lwp_p (ptid));
994
995 lp = find_lwp_pid (ptid);
996 lwpid = ptid_get_lwp (ptid);
997
998 /* We assume that we're already attached to any LWP that is already
999 in our list of LWPs. If we're not seeing exit events from threads
1000 and we've had PID wraparound since we last tried to stop all threads,
1001 this assumption might be wrong; fortunately, this is very unlikely
1002 to happen. */
1003 if (lp == NULL)
1004 {
1005 int status, cloned = 0, signalled = 0;
1006
1007 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1008 {
1009 if (linux_supports_tracefork ())
1010 {
1011 /* If we haven't stopped all threads when we get here,
1012 we may have seen a thread listed in thread_db's list,
1013 but not processed the PTRACE_EVENT_CLONE yet. If
1014 that's the case, ignore this new thread, and let
1015 normal event handling discover it later. */
1016 if (in_pid_list_p (stopped_pids, lwpid))
1017 {
1018 /* We've already seen this thread stop, but we
1019 haven't seen the PTRACE_EVENT_CLONE extended
1020 event yet. */
1021 if (debug_linux_nat)
1022 fprintf_unfiltered (gdb_stdlog,
1023 "LLAL: attach failed, but already seen "
1024 "this thread %s stop\n",
1025 target_pid_to_str (ptid));
1026 return 1;
1027 }
1028 else
1029 {
1030 int new_pid;
1031 int status;
1032
1033 if (debug_linux_nat)
1034 fprintf_unfiltered (gdb_stdlog,
1035 "LLAL: attach failed, and haven't seen "
1036 "this thread %s stop yet\n",
1037 target_pid_to_str (ptid));
1038
1039 /* We may or may not be attached to the LWP already.
1040 Try waitpid on it. If that errors, we're not
1041 attached to the LWP yet. Otherwise, we're
1042 already attached. */
1043 gdb_assert (lwpid > 0);
1044 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1045 if (new_pid == -1 && errno == ECHILD)
1046 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1047 if (new_pid != -1)
1048 {
1049 if (new_pid == 0)
1050 {
1051 /* The child hasn't stopped for its initial
1052 SIGSTOP stop yet. */
1053 if (debug_linux_nat)
1054 fprintf_unfiltered (gdb_stdlog,
1055 "LLAL: child hasn't "
1056 "stopped yet\n");
1057 }
1058 else if (WIFSTOPPED (status))
1059 {
1060 if (debug_linux_nat)
1061 fprintf_unfiltered (gdb_stdlog,
1062 "LLAL: adding to stopped_pids\n");
1063 add_to_pid_list (&stopped_pids, lwpid, status);
1064 }
1065 return 1;
1066 }
1067 }
1068 }
1069
1070 /* If we fail to attach to the thread, issue a warning,
1071 but continue. One way this can happen is if thread
1072 creation is interrupted; as of Linux kernel 2.6.19, a
1073 bug may place threads in the thread list and then fail
1074 to create them. */
1075 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1076 safe_strerror (errno));
1077 return -1;
1078 }
1079
1080 if (debug_linux_nat)
1081 fprintf_unfiltered (gdb_stdlog,
1082 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1083 target_pid_to_str (ptid));
1084
1085 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1086 if (!WIFSTOPPED (status))
1087 return 1;
1088
1089 lp = add_lwp (ptid);
1090 lp->stopped = 1;
1091 lp->last_resume_kind = resume_stop;
1092 lp->cloned = cloned;
1093 lp->signalled = signalled;
1094 if (WSTOPSIG (status) != SIGSTOP)
1095 {
1096 lp->resumed = 1;
1097 lp->status = status;
1098 }
1099
1100 target_post_attach (ptid_get_lwp (lp->ptid));
1101
1102 if (debug_linux_nat)
1103 {
1104 fprintf_unfiltered (gdb_stdlog,
1105 "LLAL: waitpid %s received %s\n",
1106 target_pid_to_str (ptid),
1107 status_to_str (status));
1108 }
1109 }
1110
1111 return 0;
1112 }
1113
1114 static void
1115 linux_nat_create_inferior (struct target_ops *ops,
1116 char *exec_file, char *allargs, char **env,
1117 int from_tty)
1118 {
1119 struct cleanup *restore_personality
1120 = maybe_disable_address_space_randomization (disable_randomization);
1121
1122 /* The fork_child mechanism is synchronous and calls target_wait, so
1123 we have to mask the async mode. */
1124
1125 /* Make sure we report all signals during startup. */
1126 linux_nat_pass_signals (ops, 0, NULL);
1127
1128 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1129
1130 do_cleanups (restore_personality);
1131 }
1132
1133 /* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1134 already attached. Returns true if a new LWP is found, false
1135 otherwise. */
1136
1137 static int
1138 attach_proc_task_lwp_callback (ptid_t ptid)
1139 {
1140 struct lwp_info *lp;
1141
1142 /* Ignore LWPs we're already attached to. */
1143 lp = find_lwp_pid (ptid);
1144 if (lp == NULL)
1145 {
1146 int lwpid = ptid_get_lwp (ptid);
1147
1148 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1149 {
1150 int err = errno;
1151
1152 /* Be quiet if we simply raced with the thread exiting.
1153 EPERM is returned if the thread's task still exists, and
1154 is marked as exited or zombie, as well as other
1155 conditions, so in that case, confirm the status in
1156 /proc/PID/status. */
1157 if (err == ESRCH
1158 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1159 {
1160 if (debug_linux_nat)
1161 {
1162 fprintf_unfiltered (gdb_stdlog,
1163 "Cannot attach to lwp %d: "
1164 "thread is gone (%d: %s)\n",
1165 lwpid, err, safe_strerror (err));
1166 }
1167 }
1168 else
1169 {
1170 warning (_("Cannot attach to lwp %d: %s"),
1171 lwpid,
1172 linux_ptrace_attach_fail_reason_string (ptid,
1173 err));
1174 }
1175 }
1176 else
1177 {
1178 if (debug_linux_nat)
1179 fprintf_unfiltered (gdb_stdlog,
1180 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1181 target_pid_to_str (ptid));
1182
1183 lp = add_lwp (ptid);
1184 lp->cloned = 1;
1185
1186 /* The next time we wait for this LWP we'll see a SIGSTOP as
1187 PTRACE_ATTACH brings it to a halt. */
1188 lp->signalled = 1;
1189
1190 /* We need to wait for a stop before being able to make the
1191 next ptrace call on this LWP. */
1192 lp->must_set_ptrace_flags = 1;
1193 }
1194
1195 return 1;
1196 }
1197 return 0;
1198 }
1199
1200 static void
1201 linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
1202 {
1203 struct lwp_info *lp;
1204 int status;
1205 ptid_t ptid;
1206 volatile struct gdb_exception ex;
1207
1208 /* Make sure we report all signals during attach. */
1209 linux_nat_pass_signals (ops, 0, NULL);
1210
1211 TRY_CATCH (ex, RETURN_MASK_ERROR)
1212 {
1213 linux_ops->to_attach (ops, args, from_tty);
1214 }
1215 if (ex.reason < 0)
1216 {
1217 pid_t pid = parse_pid_to_attach (args);
1218 struct buffer buffer;
1219 char *message, *buffer_s;
1220
1221 message = xstrdup (ex.message);
1222 make_cleanup (xfree, message);
1223
1224 buffer_init (&buffer);
1225 linux_ptrace_attach_fail_reason (pid, &buffer);
1226
1227 buffer_grow_str0 (&buffer, "");
1228 buffer_s = buffer_finish (&buffer);
1229 make_cleanup (xfree, buffer_s);
1230
1231 if (*buffer_s != '\0')
1232 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1233 else
1234 throw_error (ex.error, "%s", message);
1235 }
1236
1237 /* The ptrace base target adds the main thread with (pid,0,0)
1238 format. Decorate it with lwp info. */
1239 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1240 ptid_get_pid (inferior_ptid),
1241 0);
1242 thread_change_ptid (inferior_ptid, ptid);
1243
1244 /* Add the initial process as the first LWP to the list. */
1245 lp = add_initial_lwp (ptid);
1246
1247 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1248 &lp->signalled);
1249 if (!WIFSTOPPED (status))
1250 {
1251 if (WIFEXITED (status))
1252 {
1253 int exit_code = WEXITSTATUS (status);
1254
1255 target_terminal_ours ();
1256 target_mourn_inferior ();
1257 if (exit_code == 0)
1258 error (_("Unable to attach: program exited normally."));
1259 else
1260 error (_("Unable to attach: program exited with code %d."),
1261 exit_code);
1262 }
1263 else if (WIFSIGNALED (status))
1264 {
1265 enum gdb_signal signo;
1266
1267 target_terminal_ours ();
1268 target_mourn_inferior ();
1269
1270 signo = gdb_signal_from_host (WTERMSIG (status));
1271 error (_("Unable to attach: program terminated with signal "
1272 "%s, %s."),
1273 gdb_signal_to_name (signo),
1274 gdb_signal_to_string (signo));
1275 }
1276
1277 internal_error (__FILE__, __LINE__,
1278 _("unexpected status %d for PID %ld"),
1279 status, (long) ptid_get_lwp (ptid));
1280 }
1281
1282 lp->stopped = 1;
1283
1284 /* Save the wait status to report later. */
1285 lp->resumed = 1;
1286 if (debug_linux_nat)
1287 fprintf_unfiltered (gdb_stdlog,
1288 "LNA: waitpid %ld, saving status %s\n",
1289 (long) ptid_get_pid (lp->ptid), status_to_str (status));
1290
1291 lp->status = status;
1292
1293 /* We must attach to every LWP. If /proc is mounted, use that to
1294 find them now. The inferior may be using raw clone instead of
1295 using pthreads. But even if it is using pthreads, thread_db
1296 walks structures in the inferior's address space to find the list
1297 of threads/LWPs, and those structures may well be corrupted.
1298 Note that once thread_db is loaded, we'll still use it to list
1299 threads and associate pthread info with each LWP. */
1300 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1301 attach_proc_task_lwp_callback);
1302
1303 if (target_can_async_p ())
1304 target_async (inferior_event_handler, 0);
1305 }
1306
1307 /* Get pending status of LP. */
1308 static int
1309 get_pending_status (struct lwp_info *lp, int *status)
1310 {
1311 enum gdb_signal signo = GDB_SIGNAL_0;
1312
1313 /* If we paused threads momentarily, we may have stored pending
1314 events in lp->status or lp->waitstatus (see stop_wait_callback),
1315 and GDB core hasn't seen any signal for those threads.
1316 Otherwise, the last signal reported to the core is found in the
1317 thread object's stop_signal.
1318
1319 There's a corner case that isn't handled here at present. Only
1320 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1321 stop_signal make sense as a real signal to pass to the inferior.
1322 Some catchpoint related events, like
1323 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1324 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1325 those traps are debug API (ptrace in our case) related and
1326 induced; the inferior wouldn't see them if it wasn't being
1327 traced. Hence, we should never pass them to the inferior, even
1328 when set to pass state. Since this corner case isn't handled by
1329 infrun.c when proceeding with a signal, for consistency, neither
1330 do we handle it here (or elsewhere in the file we check for
1331 signal pass state). Normally SIGTRAP isn't set to pass state, so
1332 this is really a corner case. */
1333
1334 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1335 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1336 else if (lp->status)
1337 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1338 else if (non_stop && !is_executing (lp->ptid))
1339 {
1340 struct thread_info *tp = find_thread_ptid (lp->ptid);
1341
1342 signo = tp->suspend.stop_signal;
1343 }
1344 else if (!non_stop)
1345 {
1346 struct target_waitstatus last;
1347 ptid_t last_ptid;
1348
1349 get_last_target_status (&last_ptid, &last);
1350
1351 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
1352 {
1353 struct thread_info *tp = find_thread_ptid (lp->ptid);
1354
1355 signo = tp->suspend.stop_signal;
1356 }
1357 }
1358
1359 *status = 0;
1360
1361 if (signo == GDB_SIGNAL_0)
1362 {
1363 if (debug_linux_nat)
1364 fprintf_unfiltered (gdb_stdlog,
1365 "GPT: lwp %s has no pending signal\n",
1366 target_pid_to_str (lp->ptid));
1367 }
1368 else if (!signal_pass_state (signo))
1369 {
1370 if (debug_linux_nat)
1371 fprintf_unfiltered (gdb_stdlog,
1372 "GPT: lwp %s had signal %s, "
1373 "but it is in no pass state\n",
1374 target_pid_to_str (lp->ptid),
1375 gdb_signal_to_string (signo));
1376 }
1377 else
1378 {
1379 *status = W_STOPCODE (gdb_signal_to_host (signo));
1380
1381 if (debug_linux_nat)
1382 fprintf_unfiltered (gdb_stdlog,
1383 "GPT: lwp %s has pending signal %s\n",
1384 target_pid_to_str (lp->ptid),
1385 gdb_signal_to_string (signo));
1386 }
1387
1388 return 0;
1389 }
1390
1391 static int
1392 detach_callback (struct lwp_info *lp, void *data)
1393 {
1394 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1395
1396 if (debug_linux_nat && lp->status)
1397 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1398 strsignal (WSTOPSIG (lp->status)),
1399 target_pid_to_str (lp->ptid));
1400
1401 /* If there is a pending SIGSTOP, get rid of it. */
1402 if (lp->signalled)
1403 {
1404 if (debug_linux_nat)
1405 fprintf_unfiltered (gdb_stdlog,
1406 "DC: Sending SIGCONT to %s\n",
1407 target_pid_to_str (lp->ptid));
1408
1409 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
1410 lp->signalled = 0;
1411 }
1412
1413 /* We don't actually detach from the LWP that has an id equal to the
1414 overall process id just yet. */
1415 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
1416 {
1417 int status = 0;
1418
1419 /* Pass on any pending signal for this LWP. */
1420 get_pending_status (lp, &status);
1421
1422 if (linux_nat_prepare_to_resume != NULL)
1423 linux_nat_prepare_to_resume (lp);
1424 errno = 0;
1425 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
1426 WSTOPSIG (status)) < 0)
1427 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1428 safe_strerror (errno));
1429
1430 if (debug_linux_nat)
1431 fprintf_unfiltered (gdb_stdlog,
1432 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1433 target_pid_to_str (lp->ptid),
1434 strsignal (WSTOPSIG (status)));
1435
1436 delete_lwp (lp->ptid);
1437 }
1438
1439 return 0;
1440 }
1441
1442 static void
1443 linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
1444 {
1445 int pid;
1446 int status;
1447 struct lwp_info *main_lwp;
1448
1449 pid = ptid_get_pid (inferior_ptid);
1450
1451 /* Don't unregister from the event loop, as there may be other
1452 inferiors running. */
1453
1454 /* Stop all threads before detaching. ptrace requires that the
1455 thread is stopped to sucessfully detach. */
1456 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1457 /* ... and wait until all of them have reported back that
1458 they're no longer running. */
1459 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1460
1461 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1462
1463 /* Only the initial process should be left right now. */
1464 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
1465
1466 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1467
1468 /* Pass on any pending signal for the last LWP. */
1469 if ((args == NULL || *args == '\0')
1470 && get_pending_status (main_lwp, &status) != -1
1471 && WIFSTOPPED (status))
1472 {
1473 char *tem;
1474
1475 /* Put the signal number in ARGS so that inf_ptrace_detach will
1476 pass it along with PTRACE_DETACH. */
1477 tem = alloca (8);
1478 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
1479 args = tem;
1480 if (debug_linux_nat)
1481 fprintf_unfiltered (gdb_stdlog,
1482 "LND: Sending signal %s to %s\n",
1483 args,
1484 target_pid_to_str (main_lwp->ptid));
1485 }
1486
1487 if (linux_nat_prepare_to_resume != NULL)
1488 linux_nat_prepare_to_resume (main_lwp);
1489 delete_lwp (main_lwp->ptid);
1490
1491 if (forks_exist_p ())
1492 {
1493 /* Multi-fork case. The current inferior_ptid is being detached
1494 from, but there are other viable forks to debug. Detach from
1495 the current fork, and context-switch to the first
1496 available. */
1497 linux_fork_detach (args, from_tty);
1498 }
1499 else
1500 linux_ops->to_detach (ops, args, from_tty);
1501 }
1502
1503 /* Resume execution of the inferior process. If STEP is nonzero,
1504 single-step it. If SIGNAL is nonzero, give it that signal. */
1505
1506 static void
1507 linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1508 {
1509 ptid_t ptid;
1510
1511 lp->step = step;
1512
1513 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1514 We only presently need that if the LWP is stepped though (to
1515 handle the case of stepping a breakpoint instruction). */
1516 if (step)
1517 {
1518 struct regcache *regcache = get_thread_regcache (lp->ptid);
1519
1520 lp->stop_pc = regcache_read_pc (regcache);
1521 }
1522 else
1523 lp->stop_pc = 0;
1524
1525 if (linux_nat_prepare_to_resume != NULL)
1526 linux_nat_prepare_to_resume (lp);
1527 /* Convert to something the lower layer understands. */
1528 ptid = pid_to_ptid (ptid_get_lwp (lp->ptid));
1529 linux_ops->to_resume (linux_ops, ptid, step, signo);
1530 lp->stop_reason = LWP_STOPPED_BY_NO_REASON;
1531 lp->stopped = 0;
1532 registers_changed_ptid (lp->ptid);
1533 }
1534
1535 /* Resume LP. */
1536
1537 static void
1538 resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1539 {
1540 if (lp->stopped)
1541 {
1542 struct inferior *inf = find_inferior_ptid (lp->ptid);
1543
1544 if (inf->vfork_child != NULL)
1545 {
1546 if (debug_linux_nat)
1547 fprintf_unfiltered (gdb_stdlog,
1548 "RC: Not resuming %s (vfork parent)\n",
1549 target_pid_to_str (lp->ptid));
1550 }
1551 else if (!lwp_status_pending_p (lp))
1552 {
1553 if (debug_linux_nat)
1554 fprintf_unfiltered (gdb_stdlog,
1555 "RC: Resuming sibling %s, %s, %s\n",
1556 target_pid_to_str (lp->ptid),
1557 (signo != GDB_SIGNAL_0
1558 ? strsignal (gdb_signal_to_host (signo))
1559 : "0"),
1560 step ? "step" : "resume");
1561
1562 linux_resume_one_lwp (lp, step, signo);
1563 }
1564 else
1565 {
1566 if (debug_linux_nat)
1567 fprintf_unfiltered (gdb_stdlog,
1568 "RC: Not resuming sibling %s (has pending)\n",
1569 target_pid_to_str (lp->ptid));
1570 }
1571 }
1572 else
1573 {
1574 if (debug_linux_nat)
1575 fprintf_unfiltered (gdb_stdlog,
1576 "RC: Not resuming sibling %s (not stopped)\n",
1577 target_pid_to_str (lp->ptid));
1578 }
1579 }
1580
1581 /* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1582 Resume LWP with the last stop signal, if it is in pass state. */
1583
1584 static int
1585 linux_nat_resume_callback (struct lwp_info *lp, void *except)
1586 {
1587 enum gdb_signal signo = GDB_SIGNAL_0;
1588
1589 if (lp == except)
1590 return 0;
1591
1592 if (lp->stopped)
1593 {
1594 struct thread_info *thread;
1595
1596 thread = find_thread_ptid (lp->ptid);
1597 if (thread != NULL)
1598 {
1599 signo = thread->suspend.stop_signal;
1600 thread->suspend.stop_signal = GDB_SIGNAL_0;
1601 }
1602 }
1603
1604 resume_lwp (lp, 0, signo);
1605 return 0;
1606 }
1607
1608 static int
1609 resume_clear_callback (struct lwp_info *lp, void *data)
1610 {
1611 lp->resumed = 0;
1612 lp->last_resume_kind = resume_stop;
1613 return 0;
1614 }
1615
1616 static int
1617 resume_set_callback (struct lwp_info *lp, void *data)
1618 {
1619 lp->resumed = 1;
1620 lp->last_resume_kind = resume_continue;
1621 return 0;
1622 }
1623
1624 static void
1625 linux_nat_resume (struct target_ops *ops,
1626 ptid_t ptid, int step, enum gdb_signal signo)
1627 {
1628 struct lwp_info *lp;
1629 int resume_many;
1630
1631 if (debug_linux_nat)
1632 fprintf_unfiltered (gdb_stdlog,
1633 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1634 step ? "step" : "resume",
1635 target_pid_to_str (ptid),
1636 (signo != GDB_SIGNAL_0
1637 ? strsignal (gdb_signal_to_host (signo)) : "0"),
1638 target_pid_to_str (inferior_ptid));
1639
1640 /* A specific PTID means `step only this process id'. */
1641 resume_many = (ptid_equal (minus_one_ptid, ptid)
1642 || ptid_is_pid (ptid));
1643
1644 /* Mark the lwps we're resuming as resumed. */
1645 iterate_over_lwps (ptid, resume_set_callback, NULL);
1646
1647 /* See if it's the current inferior that should be handled
1648 specially. */
1649 if (resume_many)
1650 lp = find_lwp_pid (inferior_ptid);
1651 else
1652 lp = find_lwp_pid (ptid);
1653 gdb_assert (lp != NULL);
1654
1655 /* Remember if we're stepping. */
1656 lp->last_resume_kind = step ? resume_step : resume_continue;
1657
1658 /* If we have a pending wait status for this thread, there is no
1659 point in resuming the process. But first make sure that
1660 linux_nat_wait won't preemptively handle the event - we
1661 should never take this short-circuit if we are going to
1662 leave LP running, since we have skipped resuming all the
1663 other threads. This bit of code needs to be synchronized
1664 with linux_nat_wait. */
1665
1666 if (lp->status && WIFSTOPPED (lp->status))
1667 {
1668 if (!lp->step
1669 && WSTOPSIG (lp->status)
1670 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1671 {
1672 if (debug_linux_nat)
1673 fprintf_unfiltered (gdb_stdlog,
1674 "LLR: Not short circuiting for ignored "
1675 "status 0x%x\n", lp->status);
1676
1677 /* FIXME: What should we do if we are supposed to continue
1678 this thread with a signal? */
1679 gdb_assert (signo == GDB_SIGNAL_0);
1680 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
1681 lp->status = 0;
1682 }
1683 }
1684
1685 if (lwp_status_pending_p (lp))
1686 {
1687 /* FIXME: What should we do if we are supposed to continue
1688 this thread with a signal? */
1689 gdb_assert (signo == GDB_SIGNAL_0);
1690
1691 if (debug_linux_nat)
1692 fprintf_unfiltered (gdb_stdlog,
1693 "LLR: Short circuiting for status 0x%x\n",
1694 lp->status);
1695
1696 if (target_can_async_p ())
1697 {
1698 target_async (inferior_event_handler, 0);
1699 /* Tell the event loop we have something to process. */
1700 async_file_mark ();
1701 }
1702 return;
1703 }
1704
1705 if (resume_many)
1706 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
1707
1708 linux_resume_one_lwp (lp, step, signo);
1709
1710 if (debug_linux_nat)
1711 fprintf_unfiltered (gdb_stdlog,
1712 "LLR: %s %s, %s (resume event thread)\n",
1713 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1714 target_pid_to_str (ptid),
1715 (signo != GDB_SIGNAL_0
1716 ? strsignal (gdb_signal_to_host (signo)) : "0"));
1717
1718 if (target_can_async_p ())
1719 target_async (inferior_event_handler, 0);
1720 }
1721
1722 /* Send a signal to an LWP. */
1723
1724 static int
1725 kill_lwp (int lwpid, int signo)
1726 {
1727 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1728 fails, then we are not using nptl threads and we should be using kill. */
1729
1730 #ifdef HAVE_TKILL_SYSCALL
1731 {
1732 static int tkill_failed;
1733
1734 if (!tkill_failed)
1735 {
1736 int ret;
1737
1738 errno = 0;
1739 ret = syscall (__NR_tkill, lwpid, signo);
1740 if (errno != ENOSYS)
1741 return ret;
1742 tkill_failed = 1;
1743 }
1744 }
1745 #endif
1746
1747 return kill (lwpid, signo);
1748 }
1749
1750 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1751 event, check if the core is interested in it: if not, ignore the
1752 event, and keep waiting; otherwise, we need to toggle the LWP's
1753 syscall entry/exit status, since the ptrace event itself doesn't
1754 indicate it, and report the trap to higher layers. */
1755
1756 static int
1757 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1758 {
1759 struct target_waitstatus *ourstatus = &lp->waitstatus;
1760 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1761 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1762
1763 if (stopping)
1764 {
1765 /* If we're stopping threads, there's a SIGSTOP pending, which
1766 makes it so that the LWP reports an immediate syscall return,
1767 followed by the SIGSTOP. Skip seeing that "return" using
1768 PTRACE_CONT directly, and let stop_wait_callback collect the
1769 SIGSTOP. Later when the thread is resumed, a new syscall
1770 entry event. If we didn't do this (and returned 0), we'd
1771 leave a syscall entry pending, and our caller, by using
1772 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1773 itself. Later, when the user re-resumes this LWP, we'd see
1774 another syscall entry event and we'd mistake it for a return.
1775
1776 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1777 (leaving immediately with LWP->signalled set, without issuing
1778 a PTRACE_CONT), it would still be problematic to leave this
1779 syscall enter pending, as later when the thread is resumed,
1780 it would then see the same syscall exit mentioned above,
1781 followed by the delayed SIGSTOP, while the syscall didn't
1782 actually get to execute. It seems it would be even more
1783 confusing to the user. */
1784
1785 if (debug_linux_nat)
1786 fprintf_unfiltered (gdb_stdlog,
1787 "LHST: ignoring syscall %d "
1788 "for LWP %ld (stopping threads), "
1789 "resuming with PTRACE_CONT for SIGSTOP\n",
1790 syscall_number,
1791 ptid_get_lwp (lp->ptid));
1792
1793 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1794 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
1795 lp->stopped = 0;
1796 return 1;
1797 }
1798
1799 if (catch_syscall_enabled ())
1800 {
1801 /* Always update the entry/return state, even if this particular
1802 syscall isn't interesting to the core now. In async mode,
1803 the user could install a new catchpoint for this syscall
1804 between syscall enter/return, and we'll need to know to
1805 report a syscall return if that happens. */
1806 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1807 ? TARGET_WAITKIND_SYSCALL_RETURN
1808 : TARGET_WAITKIND_SYSCALL_ENTRY);
1809
1810 if (catching_syscall_number (syscall_number))
1811 {
1812 /* Alright, an event to report. */
1813 ourstatus->kind = lp->syscall_state;
1814 ourstatus->value.syscall_number = syscall_number;
1815
1816 if (debug_linux_nat)
1817 fprintf_unfiltered (gdb_stdlog,
1818 "LHST: stopping for %s of syscall %d"
1819 " for LWP %ld\n",
1820 lp->syscall_state
1821 == TARGET_WAITKIND_SYSCALL_ENTRY
1822 ? "entry" : "return",
1823 syscall_number,
1824 ptid_get_lwp (lp->ptid));
1825 return 0;
1826 }
1827
1828 if (debug_linux_nat)
1829 fprintf_unfiltered (gdb_stdlog,
1830 "LHST: ignoring %s of syscall %d "
1831 "for LWP %ld\n",
1832 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1833 ? "entry" : "return",
1834 syscall_number,
1835 ptid_get_lwp (lp->ptid));
1836 }
1837 else
1838 {
1839 /* If we had been syscall tracing, and hence used PT_SYSCALL
1840 before on this LWP, it could happen that the user removes all
1841 syscall catchpoints before we get to process this event.
1842 There are two noteworthy issues here:
1843
1844 - When stopped at a syscall entry event, resuming with
1845 PT_STEP still resumes executing the syscall and reports a
1846 syscall return.
1847
1848 - Only PT_SYSCALL catches syscall enters. If we last
1849 single-stepped this thread, then this event can't be a
1850 syscall enter. If we last single-stepped this thread, this
1851 has to be a syscall exit.
1852
1853 The points above mean that the next resume, be it PT_STEP or
1854 PT_CONTINUE, can not trigger a syscall trace event. */
1855 if (debug_linux_nat)
1856 fprintf_unfiltered (gdb_stdlog,
1857 "LHST: caught syscall event "
1858 "with no syscall catchpoints."
1859 " %d for LWP %ld, ignoring\n",
1860 syscall_number,
1861 ptid_get_lwp (lp->ptid));
1862 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1863 }
1864
1865 /* The core isn't interested in this event. For efficiency, avoid
1866 stopping all threads only to have the core resume them all again.
1867 Since we're not stopping threads, if we're still syscall tracing
1868 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1869 subsequent syscall. Simply resume using the inf-ptrace layer,
1870 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1871
1872 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
1873 return 1;
1874 }
1875
1876 /* Handle a GNU/Linux extended wait response. If we see a clone
1877 event, we need to add the new LWP to our list (and not report the
1878 trap to higher layers). This function returns non-zero if the
1879 event should be ignored and we should wait again. If STOPPING is
1880 true, the new LWP remains stopped, otherwise it is continued. */
1881
1882 static int
1883 linux_handle_extended_wait (struct lwp_info *lp, int status,
1884 int stopping)
1885 {
1886 int pid = ptid_get_lwp (lp->ptid);
1887 struct target_waitstatus *ourstatus = &lp->waitstatus;
1888 int event = linux_ptrace_get_extended_event (status);
1889
1890 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1891 || event == PTRACE_EVENT_CLONE)
1892 {
1893 unsigned long new_pid;
1894 int ret;
1895
1896 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1897
1898 /* If we haven't already seen the new PID stop, wait for it now. */
1899 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1900 {
1901 /* The new child has a pending SIGSTOP. We can't affect it until it
1902 hits the SIGSTOP, but we're already attached. */
1903 ret = my_waitpid (new_pid, &status,
1904 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1905 if (ret == -1)
1906 perror_with_name (_("waiting for new child"));
1907 else if (ret != new_pid)
1908 internal_error (__FILE__, __LINE__,
1909 _("wait returned unexpected PID %d"), ret);
1910 else if (!WIFSTOPPED (status))
1911 internal_error (__FILE__, __LINE__,
1912 _("wait returned unexpected status 0x%x"), status);
1913 }
1914
1915 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
1916
1917 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1918 {
1919 /* The arch-specific native code may need to know about new
1920 forks even if those end up never mapped to an
1921 inferior. */
1922 if (linux_nat_new_fork != NULL)
1923 linux_nat_new_fork (lp, new_pid);
1924 }
1925
1926 if (event == PTRACE_EVENT_FORK
1927 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
1928 {
1929 /* Handle checkpointing by linux-fork.c here as a special
1930 case. We don't want the follow-fork-mode or 'catch fork'
1931 to interfere with this. */
1932
1933 /* This won't actually modify the breakpoint list, but will
1934 physically remove the breakpoints from the child. */
1935 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
1936
1937 /* Retain child fork in ptrace (stopped) state. */
1938 if (!find_fork_pid (new_pid))
1939 add_fork (new_pid);
1940
1941 /* Report as spurious, so that infrun doesn't want to follow
1942 this fork. We're actually doing an infcall in
1943 linux-fork.c. */
1944 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
1945
1946 /* Report the stop to the core. */
1947 return 0;
1948 }
1949
1950 if (event == PTRACE_EVENT_FORK)
1951 ourstatus->kind = TARGET_WAITKIND_FORKED;
1952 else if (event == PTRACE_EVENT_VFORK)
1953 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1954 else
1955 {
1956 struct lwp_info *new_lp;
1957
1958 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1959
1960 if (debug_linux_nat)
1961 fprintf_unfiltered (gdb_stdlog,
1962 "LHEW: Got clone event "
1963 "from LWP %d, new child is LWP %ld\n",
1964 pid, new_pid);
1965
1966 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
1967 new_lp->cloned = 1;
1968 new_lp->stopped = 1;
1969
1970 if (WSTOPSIG (status) != SIGSTOP)
1971 {
1972 /* This can happen if someone starts sending signals to
1973 the new thread before it gets a chance to run, which
1974 have a lower number than SIGSTOP (e.g. SIGUSR1).
1975 This is an unlikely case, and harder to handle for
1976 fork / vfork than for clone, so we do not try - but
1977 we handle it for clone events here. We'll send
1978 the other signal on to the thread below. */
1979
1980 new_lp->signalled = 1;
1981 }
1982 else
1983 {
1984 struct thread_info *tp;
1985
1986 /* When we stop for an event in some other thread, and
1987 pull the thread list just as this thread has cloned,
1988 we'll have seen the new thread in the thread_db list
1989 before handling the CLONE event (glibc's
1990 pthread_create adds the new thread to the thread list
1991 before clone'ing, and has the kernel fill in the
1992 thread's tid on the clone call with
1993 CLONE_PARENT_SETTID). If that happened, and the core
1994 had requested the new thread to stop, we'll have
1995 killed it with SIGSTOP. But since SIGSTOP is not an
1996 RT signal, it can only be queued once. We need to be
1997 careful to not resume the LWP if we wanted it to
1998 stop. In that case, we'll leave the SIGSTOP pending.
1999 It will later be reported as GDB_SIGNAL_0. */
2000 tp = find_thread_ptid (new_lp->ptid);
2001 if (tp != NULL && tp->stop_requested)
2002 new_lp->last_resume_kind = resume_stop;
2003 else
2004 status = 0;
2005 }
2006
2007 /* If the thread_db layer is active, let it record the user
2008 level thread id and status, and add the thread to GDB's
2009 list. */
2010 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
2011 {
2012 /* The process is not using thread_db. Add the LWP to
2013 GDB's list. */
2014 target_post_attach (ptid_get_lwp (new_lp->ptid));
2015 add_thread (new_lp->ptid);
2016 }
2017
2018 if (!stopping)
2019 {
2020 set_running (new_lp->ptid, 1);
2021 set_executing (new_lp->ptid, 1);
2022 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2023 resume_stop. */
2024 new_lp->last_resume_kind = resume_continue;
2025 }
2026
2027 if (status != 0)
2028 {
2029 /* We created NEW_LP so it cannot yet contain STATUS. */
2030 gdb_assert (new_lp->status == 0);
2031
2032 /* Save the wait status to report later. */
2033 if (debug_linux_nat)
2034 fprintf_unfiltered (gdb_stdlog,
2035 "LHEW: waitpid of new LWP %ld, "
2036 "saving status %s\n",
2037 (long) ptid_get_lwp (new_lp->ptid),
2038 status_to_str (status));
2039 new_lp->status = status;
2040 }
2041
2042 new_lp->resumed = !stopping;
2043 return 1;
2044 }
2045
2046 return 0;
2047 }
2048
2049 if (event == PTRACE_EVENT_EXEC)
2050 {
2051 if (debug_linux_nat)
2052 fprintf_unfiltered (gdb_stdlog,
2053 "LHEW: Got exec event from LWP %ld\n",
2054 ptid_get_lwp (lp->ptid));
2055
2056 ourstatus->kind = TARGET_WAITKIND_EXECD;
2057 ourstatus->value.execd_pathname
2058 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
2059
2060 /* The thread that execed must have been resumed, but, when a
2061 thread execs, it changes its tid to the tgid, and the old
2062 tgid thread might have not been resumed. */
2063 lp->resumed = 1;
2064 return 0;
2065 }
2066
2067 if (event == PTRACE_EVENT_VFORK_DONE)
2068 {
2069 if (current_inferior ()->waiting_for_vfork_done)
2070 {
2071 if (debug_linux_nat)
2072 fprintf_unfiltered (gdb_stdlog,
2073 "LHEW: Got expected PTRACE_EVENT_"
2074 "VFORK_DONE from LWP %ld: stopping\n",
2075 ptid_get_lwp (lp->ptid));
2076
2077 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2078 return 0;
2079 }
2080
2081 if (debug_linux_nat)
2082 fprintf_unfiltered (gdb_stdlog,
2083 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2084 "from LWP %ld: ignoring\n",
2085 ptid_get_lwp (lp->ptid));
2086 return 1;
2087 }
2088
2089 internal_error (__FILE__, __LINE__,
2090 _("unknown ptrace event %d"), event);
2091 }
2092
2093 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2094 exited. */
2095
2096 static int
2097 wait_lwp (struct lwp_info *lp)
2098 {
2099 pid_t pid;
2100 int status = 0;
2101 int thread_dead = 0;
2102 sigset_t prev_mask;
2103
2104 gdb_assert (!lp->stopped);
2105 gdb_assert (lp->status == 0);
2106
2107 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2108 block_child_signals (&prev_mask);
2109
2110 for (;;)
2111 {
2112 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2113 was right and we should just call sigsuspend. */
2114
2115 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
2116 if (pid == -1 && errno == ECHILD)
2117 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
2118 if (pid == -1 && errno == ECHILD)
2119 {
2120 /* The thread has previously exited. We need to delete it
2121 now because, for some vendor 2.4 kernels with NPTL
2122 support backported, there won't be an exit event unless
2123 it is the main thread. 2.6 kernels will report an exit
2124 event for each thread that exits, as expected. */
2125 thread_dead = 1;
2126 if (debug_linux_nat)
2127 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2128 target_pid_to_str (lp->ptid));
2129 }
2130 if (pid != 0)
2131 break;
2132
2133 /* Bugs 10970, 12702.
2134 Thread group leader may have exited in which case we'll lock up in
2135 waitpid if there are other threads, even if they are all zombies too.
2136 Basically, we're not supposed to use waitpid this way.
2137 __WCLONE is not applicable for the leader so we can't use that.
2138 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2139 process; it gets ESRCH both for the zombie and for running processes.
2140
2141 As a workaround, check if we're waiting for the thread group leader and
2142 if it's a zombie, and avoid calling waitpid if it is.
2143
2144 This is racy, what if the tgl becomes a zombie right after we check?
2145 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2146 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2147
2148 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2149 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
2150 {
2151 thread_dead = 1;
2152 if (debug_linux_nat)
2153 fprintf_unfiltered (gdb_stdlog,
2154 "WL: Thread group leader %s vanished.\n",
2155 target_pid_to_str (lp->ptid));
2156 break;
2157 }
2158
2159 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2160 get invoked despite our caller had them intentionally blocked by
2161 block_child_signals. This is sensitive only to the loop of
2162 linux_nat_wait_1 and there if we get called my_waitpid gets called
2163 again before it gets to sigsuspend so we can safely let the handlers
2164 get executed here. */
2165
2166 if (debug_linux_nat)
2167 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
2168 sigsuspend (&suspend_mask);
2169 }
2170
2171 restore_child_signals_mask (&prev_mask);
2172
2173 if (!thread_dead)
2174 {
2175 gdb_assert (pid == ptid_get_lwp (lp->ptid));
2176
2177 if (debug_linux_nat)
2178 {
2179 fprintf_unfiltered (gdb_stdlog,
2180 "WL: waitpid %s received %s\n",
2181 target_pid_to_str (lp->ptid),
2182 status_to_str (status));
2183 }
2184
2185 /* Check if the thread has exited. */
2186 if (WIFEXITED (status) || WIFSIGNALED (status))
2187 {
2188 thread_dead = 1;
2189 if (debug_linux_nat)
2190 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2191 target_pid_to_str (lp->ptid));
2192 }
2193 }
2194
2195 if (thread_dead)
2196 {
2197 exit_lwp (lp);
2198 return 0;
2199 }
2200
2201 gdb_assert (WIFSTOPPED (status));
2202 lp->stopped = 1;
2203
2204 if (lp->must_set_ptrace_flags)
2205 {
2206 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2207
2208 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2209 lp->must_set_ptrace_flags = 0;
2210 }
2211
2212 /* Handle GNU/Linux's syscall SIGTRAPs. */
2213 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2214 {
2215 /* No longer need the sysgood bit. The ptrace event ends up
2216 recorded in lp->waitstatus if we care for it. We can carry
2217 on handling the event like a regular SIGTRAP from here
2218 on. */
2219 status = W_STOPCODE (SIGTRAP);
2220 if (linux_handle_syscall_trap (lp, 1))
2221 return wait_lwp (lp);
2222 }
2223
2224 /* Handle GNU/Linux's extended waitstatus for trace events. */
2225 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2226 && linux_is_extended_waitstatus (status))
2227 {
2228 if (debug_linux_nat)
2229 fprintf_unfiltered (gdb_stdlog,
2230 "WL: Handling extended status 0x%06x\n",
2231 status);
2232 linux_handle_extended_wait (lp, status, 1);
2233 return 0;
2234 }
2235
2236 return status;
2237 }
2238
2239 /* Send a SIGSTOP to LP. */
2240
2241 static int
2242 stop_callback (struct lwp_info *lp, void *data)
2243 {
2244 if (!lp->stopped && !lp->signalled)
2245 {
2246 int ret;
2247
2248 if (debug_linux_nat)
2249 {
2250 fprintf_unfiltered (gdb_stdlog,
2251 "SC: kill %s **<SIGSTOP>**\n",
2252 target_pid_to_str (lp->ptid));
2253 }
2254 errno = 0;
2255 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
2256 if (debug_linux_nat)
2257 {
2258 fprintf_unfiltered (gdb_stdlog,
2259 "SC: lwp kill %d %s\n",
2260 ret,
2261 errno ? safe_strerror (errno) : "ERRNO-OK");
2262 }
2263
2264 lp->signalled = 1;
2265 gdb_assert (lp->status == 0);
2266 }
2267
2268 return 0;
2269 }
2270
2271 /* Request a stop on LWP. */
2272
2273 void
2274 linux_stop_lwp (struct lwp_info *lwp)
2275 {
2276 stop_callback (lwp, NULL);
2277 }
2278
2279 /* See linux-nat.h */
2280
2281 void
2282 linux_stop_and_wait_all_lwps (void)
2283 {
2284 /* Stop all LWP's ... */
2285 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
2286
2287 /* ... and wait until all of them have reported back that
2288 they're no longer running. */
2289 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
2290 }
2291
2292 /* See linux-nat.h */
2293
2294 void
2295 linux_unstop_all_lwps (void)
2296 {
2297 iterate_over_lwps (minus_one_ptid,
2298 resume_stopped_resumed_lwps, &minus_one_ptid);
2299 }
2300
2301 /* Return non-zero if LWP PID has a pending SIGINT. */
2302
2303 static int
2304 linux_nat_has_pending_sigint (int pid)
2305 {
2306 sigset_t pending, blocked, ignored;
2307
2308 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2309
2310 if (sigismember (&pending, SIGINT)
2311 && !sigismember (&ignored, SIGINT))
2312 return 1;
2313
2314 return 0;
2315 }
2316
2317 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2318
2319 static int
2320 set_ignore_sigint (struct lwp_info *lp, void *data)
2321 {
2322 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2323 flag to consume the next one. */
2324 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2325 && WSTOPSIG (lp->status) == SIGINT)
2326 lp->status = 0;
2327 else
2328 lp->ignore_sigint = 1;
2329
2330 return 0;
2331 }
2332
2333 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2334 This function is called after we know the LWP has stopped; if the LWP
2335 stopped before the expected SIGINT was delivered, then it will never have
2336 arrived. Also, if the signal was delivered to a shared queue and consumed
2337 by a different thread, it will never be delivered to this LWP. */
2338
2339 static void
2340 maybe_clear_ignore_sigint (struct lwp_info *lp)
2341 {
2342 if (!lp->ignore_sigint)
2343 return;
2344
2345 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
2346 {
2347 if (debug_linux_nat)
2348 fprintf_unfiltered (gdb_stdlog,
2349 "MCIS: Clearing bogus flag for %s\n",
2350 target_pid_to_str (lp->ptid));
2351 lp->ignore_sigint = 0;
2352 }
2353 }
2354
2355 /* Fetch the possible triggered data watchpoint info and store it in
2356 LP.
2357
2358 On some archs, like x86, that use debug registers to set
2359 watchpoints, it's possible that the way to know which watched
2360 address trapped, is to check the register that is used to select
2361 which address to watch. Problem is, between setting the watchpoint
2362 and reading back which data address trapped, the user may change
2363 the set of watchpoints, and, as a consequence, GDB changes the
2364 debug registers in the inferior. To avoid reading back a stale
2365 stopped-data-address when that happens, we cache in LP the fact
2366 that a watchpoint trapped, and the corresponding data address, as
2367 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2368 registers meanwhile, we have the cached data we can rely on. */
2369
2370 static int
2371 check_stopped_by_watchpoint (struct lwp_info *lp)
2372 {
2373 struct cleanup *old_chain;
2374
2375 if (linux_ops->to_stopped_by_watchpoint == NULL)
2376 return 0;
2377
2378 old_chain = save_inferior_ptid ();
2379 inferior_ptid = lp->ptid;
2380
2381 if (linux_ops->to_stopped_by_watchpoint (linux_ops))
2382 {
2383 lp->stop_reason = LWP_STOPPED_BY_WATCHPOINT;
2384
2385 if (linux_ops->to_stopped_data_address != NULL)
2386 lp->stopped_data_address_p =
2387 linux_ops->to_stopped_data_address (&current_target,
2388 &lp->stopped_data_address);
2389 else
2390 lp->stopped_data_address_p = 0;
2391 }
2392
2393 do_cleanups (old_chain);
2394
2395 return lp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
2396 }
2397
2398 /* Called when the LWP stopped for a trap that could be explained by a
2399 watchpoint or a breakpoint. */
2400
2401 static void
2402 save_sigtrap (struct lwp_info *lp)
2403 {
2404 gdb_assert (lp->stop_reason == LWP_STOPPED_BY_NO_REASON);
2405 gdb_assert (lp->status != 0);
2406
2407 if (check_stopped_by_watchpoint (lp))
2408 return;
2409
2410 if (linux_nat_status_is_event (lp->status))
2411 check_stopped_by_breakpoint (lp);
2412 }
2413
2414 /* Returns true if the LWP had stopped for a watchpoint. */
2415
2416 static int
2417 linux_nat_stopped_by_watchpoint (struct target_ops *ops)
2418 {
2419 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2420
2421 gdb_assert (lp != NULL);
2422
2423 return lp->stop_reason == LWP_STOPPED_BY_WATCHPOINT;
2424 }
2425
2426 static int
2427 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2428 {
2429 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2430
2431 gdb_assert (lp != NULL);
2432
2433 *addr_p = lp->stopped_data_address;
2434
2435 return lp->stopped_data_address_p;
2436 }
2437
2438 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2439
2440 static int
2441 sigtrap_is_event (int status)
2442 {
2443 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2444 }
2445
2446 /* Set alternative SIGTRAP-like events recognizer. If
2447 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2448 applied. */
2449
2450 void
2451 linux_nat_set_status_is_event (struct target_ops *t,
2452 int (*status_is_event) (int status))
2453 {
2454 linux_nat_status_is_event = status_is_event;
2455 }
2456
2457 /* Wait until LP is stopped. */
2458
2459 static int
2460 stop_wait_callback (struct lwp_info *lp, void *data)
2461 {
2462 struct inferior *inf = find_inferior_ptid (lp->ptid);
2463
2464 /* If this is a vfork parent, bail out, it is not going to report
2465 any SIGSTOP until the vfork is done with. */
2466 if (inf->vfork_child != NULL)
2467 return 0;
2468
2469 if (!lp->stopped)
2470 {
2471 int status;
2472
2473 status = wait_lwp (lp);
2474 if (status == 0)
2475 return 0;
2476
2477 if (lp->ignore_sigint && WIFSTOPPED (status)
2478 && WSTOPSIG (status) == SIGINT)
2479 {
2480 lp->ignore_sigint = 0;
2481
2482 errno = 0;
2483 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
2484 lp->stopped = 0;
2485 if (debug_linux_nat)
2486 fprintf_unfiltered (gdb_stdlog,
2487 "PTRACE_CONT %s, 0, 0 (%s) "
2488 "(discarding SIGINT)\n",
2489 target_pid_to_str (lp->ptid),
2490 errno ? safe_strerror (errno) : "OK");
2491
2492 return stop_wait_callback (lp, NULL);
2493 }
2494
2495 maybe_clear_ignore_sigint (lp);
2496
2497 if (WSTOPSIG (status) != SIGSTOP)
2498 {
2499 /* The thread was stopped with a signal other than SIGSTOP. */
2500
2501 if (debug_linux_nat)
2502 fprintf_unfiltered (gdb_stdlog,
2503 "SWC: Pending event %s in %s\n",
2504 status_to_str ((int) status),
2505 target_pid_to_str (lp->ptid));
2506
2507 /* Save the sigtrap event. */
2508 lp->status = status;
2509 gdb_assert (lp->signalled);
2510 save_sigtrap (lp);
2511 }
2512 else
2513 {
2514 /* We caught the SIGSTOP that we intended to catch, so
2515 there's no SIGSTOP pending. */
2516
2517 if (debug_linux_nat)
2518 fprintf_unfiltered (gdb_stdlog,
2519 "SWC: Delayed SIGSTOP caught for %s.\n",
2520 target_pid_to_str (lp->ptid));
2521
2522 /* Reset SIGNALLED only after the stop_wait_callback call
2523 above as it does gdb_assert on SIGNALLED. */
2524 lp->signalled = 0;
2525 }
2526 }
2527
2528 return 0;
2529 }
2530
2531 /* Return non-zero if LP has a wait status pending. Discard the
2532 pending event and resume the LWP if the event that originally
2533 caused the stop became uninteresting. */
2534
2535 static int
2536 status_callback (struct lwp_info *lp, void *data)
2537 {
2538 /* Only report a pending wait status if we pretend that this has
2539 indeed been resumed. */
2540 if (!lp->resumed)
2541 return 0;
2542
2543 if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT
2544 || lp->stop_reason == LWP_STOPPED_BY_HW_BREAKPOINT)
2545 {
2546 struct regcache *regcache = get_thread_regcache (lp->ptid);
2547 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2548 CORE_ADDR pc;
2549 int discard = 0;
2550
2551 gdb_assert (lp->status != 0);
2552
2553 pc = regcache_read_pc (regcache);
2554
2555 if (pc != lp->stop_pc)
2556 {
2557 if (debug_linux_nat)
2558 fprintf_unfiltered (gdb_stdlog,
2559 "SC: PC of %s changed. was=%s, now=%s\n",
2560 target_pid_to_str (lp->ptid),
2561 paddress (target_gdbarch (), lp->stop_pc),
2562 paddress (target_gdbarch (), pc));
2563 discard = 1;
2564 }
2565 else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2566 {
2567 if (debug_linux_nat)
2568 fprintf_unfiltered (gdb_stdlog,
2569 "SC: previous breakpoint of %s, at %s gone\n",
2570 target_pid_to_str (lp->ptid),
2571 paddress (target_gdbarch (), lp->stop_pc));
2572
2573 discard = 1;
2574 }
2575
2576 if (discard)
2577 {
2578 if (debug_linux_nat)
2579 fprintf_unfiltered (gdb_stdlog,
2580 "SC: pending event of %s cancelled.\n",
2581 target_pid_to_str (lp->ptid));
2582
2583 lp->status = 0;
2584 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2585 return 0;
2586 }
2587 return 1;
2588 }
2589
2590 return lwp_status_pending_p (lp);
2591 }
2592
2593 /* Return non-zero if LP isn't stopped. */
2594
2595 static int
2596 running_callback (struct lwp_info *lp, void *data)
2597 {
2598 return (!lp->stopped
2599 || (lwp_status_pending_p (lp) && lp->resumed));
2600 }
2601
2602 /* Count the LWP's that have had events. */
2603
2604 static int
2605 count_events_callback (struct lwp_info *lp, void *data)
2606 {
2607 int *count = data;
2608
2609 gdb_assert (count != NULL);
2610
2611 /* Select only resumed LWPs that have an event pending. */
2612 if (lp->resumed && lwp_status_pending_p (lp))
2613 (*count)++;
2614
2615 return 0;
2616 }
2617
2618 /* Select the LWP (if any) that is currently being single-stepped. */
2619
2620 static int
2621 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2622 {
2623 if (lp->last_resume_kind == resume_step
2624 && lp->status != 0)
2625 return 1;
2626 else
2627 return 0;
2628 }
2629
2630 /* Returns true if LP has a status pending. */
2631
2632 static int
2633 lwp_status_pending_p (struct lwp_info *lp)
2634 {
2635 /* We check for lp->waitstatus in addition to lp->status, because we
2636 can have pending process exits recorded in lp->status and
2637 W_EXITCODE(0,0) happens to be 0. */
2638 return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2639 }
2640
2641 /* Select the Nth LWP that has had a SIGTRAP event. */
2642
2643 static int
2644 select_event_lwp_callback (struct lwp_info *lp, void *data)
2645 {
2646 int *selector = data;
2647
2648 gdb_assert (selector != NULL);
2649
2650 /* Select only resumed LWPs that have an event pending. */
2651 if (lp->resumed && lwp_status_pending_p (lp))
2652 if ((*selector)-- == 0)
2653 return 1;
2654
2655 return 0;
2656 }
2657
2658 /* Called when the LWP got a signal/trap that could be explained by a
2659 software or hardware breakpoint. */
2660
2661 static int
2662 check_stopped_by_breakpoint (struct lwp_info *lp)
2663 {
2664 /* Arrange for a breakpoint to be hit again later. We don't keep
2665 the SIGTRAP status and don't forward the SIGTRAP signal to the
2666 LWP. We will handle the current event, eventually we will resume
2667 this LWP, and this breakpoint will trap again.
2668
2669 If we do not do this, then we run the risk that the user will
2670 delete or disable the breakpoint, but the LWP will have already
2671 tripped on it. */
2672
2673 struct regcache *regcache = get_thread_regcache (lp->ptid);
2674 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2675 CORE_ADDR pc;
2676 CORE_ADDR sw_bp_pc;
2677
2678 pc = regcache_read_pc (regcache);
2679 sw_bp_pc = pc - target_decr_pc_after_break (gdbarch);
2680
2681 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2682 && software_breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2683 sw_bp_pc))
2684 {
2685 /* The LWP was either continued, or stepped a software
2686 breakpoint instruction. */
2687 if (debug_linux_nat)
2688 fprintf_unfiltered (gdb_stdlog,
2689 "CB: Push back software breakpoint for %s\n",
2690 target_pid_to_str (lp->ptid));
2691
2692 /* Back up the PC if necessary. */
2693 if (pc != sw_bp_pc)
2694 regcache_write_pc (regcache, sw_bp_pc);
2695
2696 lp->stop_pc = sw_bp_pc;
2697 lp->stop_reason = LWP_STOPPED_BY_SW_BREAKPOINT;
2698 return 1;
2699 }
2700
2701 if (hardware_breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2702 {
2703 if (debug_linux_nat)
2704 fprintf_unfiltered (gdb_stdlog,
2705 "CB: Push back hardware breakpoint for %s\n",
2706 target_pid_to_str (lp->ptid));
2707
2708 lp->stop_pc = pc;
2709 lp->stop_reason = LWP_STOPPED_BY_HW_BREAKPOINT;
2710 return 1;
2711 }
2712
2713 return 0;
2714 }
2715
2716 /* Select one LWP out of those that have events pending. */
2717
2718 static void
2719 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
2720 {
2721 int num_events = 0;
2722 int random_selector;
2723 struct lwp_info *event_lp = NULL;
2724
2725 /* Record the wait status for the original LWP. */
2726 (*orig_lp)->status = *status;
2727
2728 /* In all-stop, give preference to the LWP that is being
2729 single-stepped. There will be at most one, and it will be the
2730 LWP that the core is most interested in. If we didn't do this,
2731 then we'd have to handle pending step SIGTRAPs somehow in case
2732 the core later continues the previously-stepped thread, as
2733 otherwise we'd report the pending SIGTRAP then, and the core, not
2734 having stepped the thread, wouldn't understand what the trap was
2735 for, and therefore would report it to the user as a random
2736 signal. */
2737 if (!non_stop)
2738 {
2739 event_lp = iterate_over_lwps (filter,
2740 select_singlestep_lwp_callback, NULL);
2741 if (event_lp != NULL)
2742 {
2743 if (debug_linux_nat)
2744 fprintf_unfiltered (gdb_stdlog,
2745 "SEL: Select single-step %s\n",
2746 target_pid_to_str (event_lp->ptid));
2747 }
2748 }
2749
2750 if (event_lp == NULL)
2751 {
2752 /* Pick one at random, out of those which have had events. */
2753
2754 /* First see how many events we have. */
2755 iterate_over_lwps (filter, count_events_callback, &num_events);
2756
2757 /* Now randomly pick a LWP out of those that have had
2758 events. */
2759 random_selector = (int)
2760 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2761
2762 if (debug_linux_nat && num_events > 1)
2763 fprintf_unfiltered (gdb_stdlog,
2764 "SEL: Found %d events, selecting #%d\n",
2765 num_events, random_selector);
2766
2767 event_lp = iterate_over_lwps (filter,
2768 select_event_lwp_callback,
2769 &random_selector);
2770 }
2771
2772 if (event_lp != NULL)
2773 {
2774 /* Switch the event LWP. */
2775 *orig_lp = event_lp;
2776 *status = event_lp->status;
2777 }
2778
2779 /* Flush the wait status for the event LWP. */
2780 (*orig_lp)->status = 0;
2781 }
2782
2783 /* Return non-zero if LP has been resumed. */
2784
2785 static int
2786 resumed_callback (struct lwp_info *lp, void *data)
2787 {
2788 return lp->resumed;
2789 }
2790
2791 /* Stop an active thread, verify it still exists, then resume it. If
2792 the thread ends up with a pending status, then it is not resumed,
2793 and *DATA (really a pointer to int), is set. */
2794
2795 static int
2796 stop_and_resume_callback (struct lwp_info *lp, void *data)
2797 {
2798 if (!lp->stopped)
2799 {
2800 ptid_t ptid = lp->ptid;
2801
2802 stop_callback (lp, NULL);
2803 stop_wait_callback (lp, NULL);
2804
2805 /* Resume if the lwp still exists, and the core wanted it
2806 running. */
2807 lp = find_lwp_pid (ptid);
2808 if (lp != NULL)
2809 {
2810 if (lp->last_resume_kind == resume_stop
2811 && !lwp_status_pending_p (lp))
2812 {
2813 /* The core wanted the LWP to stop. Even if it stopped
2814 cleanly (with SIGSTOP), leave the event pending. */
2815 if (debug_linux_nat)
2816 fprintf_unfiltered (gdb_stdlog,
2817 "SARC: core wanted LWP %ld stopped "
2818 "(leaving SIGSTOP pending)\n",
2819 ptid_get_lwp (lp->ptid));
2820 lp->status = W_STOPCODE (SIGSTOP);
2821 }
2822
2823 if (!lwp_status_pending_p (lp))
2824 {
2825 if (debug_linux_nat)
2826 fprintf_unfiltered (gdb_stdlog,
2827 "SARC: re-resuming LWP %ld\n",
2828 ptid_get_lwp (lp->ptid));
2829 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
2830 }
2831 else
2832 {
2833 if (debug_linux_nat)
2834 fprintf_unfiltered (gdb_stdlog,
2835 "SARC: not re-resuming LWP %ld "
2836 "(has pending)\n",
2837 ptid_get_lwp (lp->ptid));
2838 }
2839 }
2840 }
2841 return 0;
2842 }
2843
2844 /* Check if we should go on and pass this event to common code.
2845 Return the affected lwp if we are, or NULL otherwise. */
2846
2847 static struct lwp_info *
2848 linux_nat_filter_event (int lwpid, int status)
2849 {
2850 struct lwp_info *lp;
2851 int event = linux_ptrace_get_extended_event (status);
2852
2853 lp = find_lwp_pid (pid_to_ptid (lwpid));
2854
2855 /* Check for stop events reported by a process we didn't already
2856 know about - anything not already in our LWP list.
2857
2858 If we're expecting to receive stopped processes after
2859 fork, vfork, and clone events, then we'll just add the
2860 new one to our list and go back to waiting for the event
2861 to be reported - the stopped process might be returned
2862 from waitpid before or after the event is.
2863
2864 But note the case of a non-leader thread exec'ing after the
2865 leader having exited, and gone from our lists. The non-leader
2866 thread changes its tid to the tgid. */
2867
2868 if (WIFSTOPPED (status) && lp == NULL
2869 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
2870 {
2871 /* A multi-thread exec after we had seen the leader exiting. */
2872 if (debug_linux_nat)
2873 fprintf_unfiltered (gdb_stdlog,
2874 "LLW: Re-adding thread group leader LWP %d.\n",
2875 lwpid);
2876
2877 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
2878 lp->stopped = 1;
2879 lp->resumed = 1;
2880 add_thread (lp->ptid);
2881 }
2882
2883 if (WIFSTOPPED (status) && !lp)
2884 {
2885 if (debug_linux_nat)
2886 fprintf_unfiltered (gdb_stdlog,
2887 "LHEW: saving LWP %ld status %s in stopped_pids list\n",
2888 (long) lwpid, status_to_str (status));
2889 add_to_pid_list (&stopped_pids, lwpid, status);
2890 return NULL;
2891 }
2892
2893 /* Make sure we don't report an event for the exit of an LWP not in
2894 our list, i.e. not part of the current process. This can happen
2895 if we detach from a program we originally forked and then it
2896 exits. */
2897 if (!WIFSTOPPED (status) && !lp)
2898 return NULL;
2899
2900 /* This LWP is stopped now. (And if dead, this prevents it from
2901 ever being continued.) */
2902 lp->stopped = 1;
2903
2904 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2905 {
2906 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2907
2908 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2909 lp->must_set_ptrace_flags = 0;
2910 }
2911
2912 /* Handle GNU/Linux's syscall SIGTRAPs. */
2913 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2914 {
2915 /* No longer need the sysgood bit. The ptrace event ends up
2916 recorded in lp->waitstatus if we care for it. We can carry
2917 on handling the event like a regular SIGTRAP from here
2918 on. */
2919 status = W_STOPCODE (SIGTRAP);
2920 if (linux_handle_syscall_trap (lp, 0))
2921 return NULL;
2922 }
2923
2924 /* Handle GNU/Linux's extended waitstatus for trace events. */
2925 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2926 && linux_is_extended_waitstatus (status))
2927 {
2928 if (debug_linux_nat)
2929 fprintf_unfiltered (gdb_stdlog,
2930 "LLW: Handling extended status 0x%06x\n",
2931 status);
2932 if (linux_handle_extended_wait (lp, status, 0))
2933 return NULL;
2934 }
2935
2936 /* Check if the thread has exited. */
2937 if (WIFEXITED (status) || WIFSIGNALED (status))
2938 {
2939 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
2940 {
2941 /* If this is the main thread, we must stop all threads and
2942 verify if they are still alive. This is because in the
2943 nptl thread model on Linux 2.4, there is no signal issued
2944 for exiting LWPs other than the main thread. We only get
2945 the main thread exit signal once all child threads have
2946 already exited. If we stop all the threads and use the
2947 stop_wait_callback to check if they have exited we can
2948 determine whether this signal should be ignored or
2949 whether it means the end of the debugged application,
2950 regardless of which threading model is being used. */
2951 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
2952 {
2953 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
2954 stop_and_resume_callback, NULL);
2955 }
2956
2957 if (debug_linux_nat)
2958 fprintf_unfiltered (gdb_stdlog,
2959 "LLW: %s exited.\n",
2960 target_pid_to_str (lp->ptid));
2961
2962 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
2963 {
2964 /* If there is at least one more LWP, then the exit signal
2965 was not the end of the debugged application and should be
2966 ignored. */
2967 exit_lwp (lp);
2968 return NULL;
2969 }
2970 }
2971
2972 gdb_assert (lp->resumed);
2973
2974 if (debug_linux_nat)
2975 fprintf_unfiltered (gdb_stdlog,
2976 "Process %ld exited\n",
2977 ptid_get_lwp (lp->ptid));
2978
2979 /* This was the last lwp in the process. Since events are
2980 serialized to GDB core, we may not be able report this one
2981 right now, but GDB core and the other target layers will want
2982 to be notified about the exit code/signal, leave the status
2983 pending for the next time we're able to report it. */
2984
2985 /* Dead LWP's aren't expected to reported a pending sigstop. */
2986 lp->signalled = 0;
2987
2988 /* Store the pending event in the waitstatus, because
2989 W_EXITCODE(0,0) == 0. */
2990 store_waitstatus (&lp->waitstatus, status);
2991 return lp;
2992 }
2993
2994 /* Check if the current LWP has previously exited. In the nptl
2995 thread model, LWPs other than the main thread do not issue
2996 signals when they exit so we must check whenever the thread has
2997 stopped. A similar check is made in stop_wait_callback(). */
2998 if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
2999 {
3000 ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid));
3001
3002 if (debug_linux_nat)
3003 fprintf_unfiltered (gdb_stdlog,
3004 "LLW: %s exited.\n",
3005 target_pid_to_str (lp->ptid));
3006
3007 exit_lwp (lp);
3008
3009 /* Make sure there is at least one thread running. */
3010 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3011
3012 /* Discard the event. */
3013 return NULL;
3014 }
3015
3016 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3017 an attempt to stop an LWP. */
3018 if (lp->signalled
3019 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3020 {
3021 if (debug_linux_nat)
3022 fprintf_unfiltered (gdb_stdlog,
3023 "LLW: Delayed SIGSTOP caught for %s.\n",
3024 target_pid_to_str (lp->ptid));
3025
3026 lp->signalled = 0;
3027
3028 if (lp->last_resume_kind != resume_stop)
3029 {
3030 /* This is a delayed SIGSTOP. */
3031
3032 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3033 if (debug_linux_nat)
3034 fprintf_unfiltered (gdb_stdlog,
3035 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3036 lp->step ?
3037 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3038 target_pid_to_str (lp->ptid));
3039
3040 gdb_assert (lp->resumed);
3041
3042 /* Discard the event. */
3043 return NULL;
3044 }
3045 }
3046
3047 /* Make sure we don't report a SIGINT that we have already displayed
3048 for another thread. */
3049 if (lp->ignore_sigint
3050 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3051 {
3052 if (debug_linux_nat)
3053 fprintf_unfiltered (gdb_stdlog,
3054 "LLW: Delayed SIGINT caught for %s.\n",
3055 target_pid_to_str (lp->ptid));
3056
3057 /* This is a delayed SIGINT. */
3058 lp->ignore_sigint = 0;
3059
3060 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3061 if (debug_linux_nat)
3062 fprintf_unfiltered (gdb_stdlog,
3063 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3064 lp->step ?
3065 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3066 target_pid_to_str (lp->ptid));
3067 gdb_assert (lp->resumed);
3068
3069 /* Discard the event. */
3070 return NULL;
3071 }
3072
3073 /* Don't report signals that GDB isn't interested in, such as
3074 signals that are neither printed nor stopped upon. Stopping all
3075 threads can be a bit time-consuming so if we want decent
3076 performance with heavily multi-threaded programs, especially when
3077 they're using a high frequency timer, we'd better avoid it if we
3078 can. */
3079 if (WIFSTOPPED (status))
3080 {
3081 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3082
3083 if (!non_stop)
3084 {
3085 /* Only do the below in all-stop, as we currently use SIGSTOP
3086 to implement target_stop (see linux_nat_stop) in
3087 non-stop. */
3088 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3089 {
3090 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3091 forwarded to the entire process group, that is, all LWPs
3092 will receive it - unless they're using CLONE_THREAD to
3093 share signals. Since we only want to report it once, we
3094 mark it as ignored for all LWPs except this one. */
3095 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3096 set_ignore_sigint, NULL);
3097 lp->ignore_sigint = 0;
3098 }
3099 else
3100 maybe_clear_ignore_sigint (lp);
3101 }
3102
3103 /* When using hardware single-step, we need to report every signal.
3104 Otherwise, signals in pass_mask may be short-circuited
3105 except signals that might be caused by a breakpoint. */
3106 if (!lp->step
3107 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3108 && !linux_wstatus_maybe_breakpoint (status))
3109 {
3110 linux_resume_one_lwp (lp, lp->step, signo);
3111 if (debug_linux_nat)
3112 fprintf_unfiltered (gdb_stdlog,
3113 "LLW: %s %s, %s (preempt 'handle')\n",
3114 lp->step ?
3115 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3116 target_pid_to_str (lp->ptid),
3117 (signo != GDB_SIGNAL_0
3118 ? strsignal (gdb_signal_to_host (signo))
3119 : "0"));
3120 return NULL;
3121 }
3122 }
3123
3124 /* An interesting event. */
3125 gdb_assert (lp);
3126 lp->status = status;
3127 save_sigtrap (lp);
3128 return lp;
3129 }
3130
3131 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3132 their exits until all other threads in the group have exited. */
3133
3134 static void
3135 check_zombie_leaders (void)
3136 {
3137 struct inferior *inf;
3138
3139 ALL_INFERIORS (inf)
3140 {
3141 struct lwp_info *leader_lp;
3142
3143 if (inf->pid == 0)
3144 continue;
3145
3146 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3147 if (leader_lp != NULL
3148 /* Check if there are other threads in the group, as we may
3149 have raced with the inferior simply exiting. */
3150 && num_lwps (inf->pid) > 1
3151 && linux_proc_pid_is_zombie (inf->pid))
3152 {
3153 if (debug_linux_nat)
3154 fprintf_unfiltered (gdb_stdlog,
3155 "CZL: Thread group leader %d zombie "
3156 "(it exited, or another thread execd).\n",
3157 inf->pid);
3158
3159 /* A leader zombie can mean one of two things:
3160
3161 - It exited, and there's an exit status pending
3162 available, or only the leader exited (not the whole
3163 program). In the latter case, we can't waitpid the
3164 leader's exit status until all other threads are gone.
3165
3166 - There are 3 or more threads in the group, and a thread
3167 other than the leader exec'd. On an exec, the Linux
3168 kernel destroys all other threads (except the execing
3169 one) in the thread group, and resets the execing thread's
3170 tid to the tgid. No exit notification is sent for the
3171 execing thread -- from the ptracer's perspective, it
3172 appears as though the execing thread just vanishes.
3173 Until we reap all other threads except the leader and the
3174 execing thread, the leader will be zombie, and the
3175 execing thread will be in `D (disc sleep)'. As soon as
3176 all other threads are reaped, the execing thread changes
3177 it's tid to the tgid, and the previous (zombie) leader
3178 vanishes, giving place to the "new" leader. We could try
3179 distinguishing the exit and exec cases, by waiting once
3180 more, and seeing if something comes out, but it doesn't
3181 sound useful. The previous leader _does_ go away, and
3182 we'll re-add the new one once we see the exec event
3183 (which is just the same as what would happen if the
3184 previous leader did exit voluntarily before some other
3185 thread execs). */
3186
3187 if (debug_linux_nat)
3188 fprintf_unfiltered (gdb_stdlog,
3189 "CZL: Thread group leader %d vanished.\n",
3190 inf->pid);
3191 exit_lwp (leader_lp);
3192 }
3193 }
3194 }
3195
3196 static ptid_t
3197 linux_nat_wait_1 (struct target_ops *ops,
3198 ptid_t ptid, struct target_waitstatus *ourstatus,
3199 int target_options)
3200 {
3201 sigset_t prev_mask;
3202 enum resume_kind last_resume_kind;
3203 struct lwp_info *lp;
3204 int status;
3205
3206 if (debug_linux_nat)
3207 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3208
3209 /* The first time we get here after starting a new inferior, we may
3210 not have added it to the LWP list yet - this is the earliest
3211 moment at which we know its PID. */
3212 if (ptid_is_pid (inferior_ptid))
3213 {
3214 /* Upgrade the main thread's ptid. */
3215 thread_change_ptid (inferior_ptid,
3216 ptid_build (ptid_get_pid (inferior_ptid),
3217 ptid_get_pid (inferior_ptid), 0));
3218
3219 lp = add_initial_lwp (inferior_ptid);
3220 lp->resumed = 1;
3221 }
3222
3223 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3224 block_child_signals (&prev_mask);
3225
3226 /* First check if there is a LWP with a wait status pending. */
3227 lp = iterate_over_lwps (ptid, status_callback, NULL);
3228 if (lp != NULL)
3229 {
3230 if (debug_linux_nat)
3231 fprintf_unfiltered (gdb_stdlog,
3232 "LLW: Using pending wait status %s for %s.\n",
3233 status_to_str (lp->status),
3234 target_pid_to_str (lp->ptid));
3235 }
3236
3237 if (!target_is_async_p ())
3238 {
3239 /* Causes SIGINT to be passed on to the attached process. */
3240 set_sigint_trap ();
3241 }
3242
3243 /* But if we don't find a pending event, we'll have to wait. Always
3244 pull all events out of the kernel. We'll randomly select an
3245 event LWP out of all that have events, to prevent starvation. */
3246
3247 while (lp == NULL)
3248 {
3249 pid_t lwpid;
3250
3251 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3252 quirks:
3253
3254 - If the thread group leader exits while other threads in the
3255 thread group still exist, waitpid(TGID, ...) hangs. That
3256 waitpid won't return an exit status until the other threads
3257 in the group are reapped.
3258
3259 - When a non-leader thread execs, that thread just vanishes
3260 without reporting an exit (so we'd hang if we waited for it
3261 explicitly in that case). The exec event is reported to
3262 the TGID pid. */
3263
3264 errno = 0;
3265 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3266 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3267 lwpid = my_waitpid (-1, &status, WNOHANG);
3268
3269 if (debug_linux_nat)
3270 fprintf_unfiltered (gdb_stdlog,
3271 "LNW: waitpid(-1, ...) returned %d, %s\n",
3272 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3273
3274 if (lwpid > 0)
3275 {
3276 if (debug_linux_nat)
3277 {
3278 fprintf_unfiltered (gdb_stdlog,
3279 "LLW: waitpid %ld received %s\n",
3280 (long) lwpid, status_to_str (status));
3281 }
3282
3283 linux_nat_filter_event (lwpid, status);
3284 /* Retry until nothing comes out of waitpid. A single
3285 SIGCHLD can indicate more than one child stopped. */
3286 continue;
3287 }
3288
3289 /* Now that we've pulled all events out of the kernel, resume
3290 LWPs that don't have an interesting event to report. */
3291 iterate_over_lwps (minus_one_ptid,
3292 resume_stopped_resumed_lwps, &minus_one_ptid);
3293
3294 /* ... and find an LWP with a status to report to the core, if
3295 any. */
3296 lp = iterate_over_lwps (ptid, status_callback, NULL);
3297 if (lp != NULL)
3298 break;
3299
3300 /* Check for zombie thread group leaders. Those can't be reaped
3301 until all other threads in the thread group are. */
3302 check_zombie_leaders ();
3303
3304 /* If there are no resumed children left, bail. We'd be stuck
3305 forever in the sigsuspend call below otherwise. */
3306 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3307 {
3308 if (debug_linux_nat)
3309 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3310
3311 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3312
3313 if (!target_is_async_p ())
3314 clear_sigint_trap ();
3315
3316 restore_child_signals_mask (&prev_mask);
3317 return minus_one_ptid;
3318 }
3319
3320 /* No interesting event to report to the core. */
3321
3322 if (target_options & TARGET_WNOHANG)
3323 {
3324 if (debug_linux_nat)
3325 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3326
3327 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3328 restore_child_signals_mask (&prev_mask);
3329 return minus_one_ptid;
3330 }
3331
3332 /* We shouldn't end up here unless we want to try again. */
3333 gdb_assert (lp == NULL);
3334
3335 /* Block until we get an event reported with SIGCHLD. */
3336 if (debug_linux_nat)
3337 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
3338 sigsuspend (&suspend_mask);
3339 }
3340
3341 if (!target_is_async_p ())
3342 clear_sigint_trap ();
3343
3344 gdb_assert (lp);
3345
3346 status = lp->status;
3347 lp->status = 0;
3348
3349 if (!non_stop)
3350 {
3351 /* Now stop all other LWP's ... */
3352 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3353
3354 /* ... and wait until all of them have reported back that
3355 they're no longer running. */
3356 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3357 }
3358
3359 /* If we're not waiting for a specific LWP, choose an event LWP from
3360 among those that have had events. Giving equal priority to all
3361 LWPs that have had events helps prevent starvation. */
3362 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3363 select_event_lwp (ptid, &lp, &status);
3364
3365 gdb_assert (lp != NULL);
3366
3367 /* Now that we've selected our final event LWP, un-adjust its PC if
3368 it was a software breakpoint. */
3369 if (lp->stop_reason == LWP_STOPPED_BY_SW_BREAKPOINT)
3370 {
3371 struct regcache *regcache = get_thread_regcache (lp->ptid);
3372 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3373 int decr_pc = target_decr_pc_after_break (gdbarch);
3374
3375 if (decr_pc != 0)
3376 {
3377 CORE_ADDR pc;
3378
3379 pc = regcache_read_pc (regcache);
3380 regcache_write_pc (regcache, pc + decr_pc);
3381 }
3382 }
3383
3384 /* We'll need this to determine whether to report a SIGSTOP as
3385 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3386 clears it. */
3387 last_resume_kind = lp->last_resume_kind;
3388
3389 if (!non_stop)
3390 {
3391 /* In all-stop, from the core's perspective, all LWPs are now
3392 stopped until a new resume action is sent over. */
3393 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3394 }
3395 else
3396 {
3397 resume_clear_callback (lp, NULL);
3398 }
3399
3400 if (linux_nat_status_is_event (status))
3401 {
3402 if (debug_linux_nat)
3403 fprintf_unfiltered (gdb_stdlog,
3404 "LLW: trap ptid is %s.\n",
3405 target_pid_to_str (lp->ptid));
3406 }
3407
3408 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3409 {
3410 *ourstatus = lp->waitstatus;
3411 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3412 }
3413 else
3414 store_waitstatus (ourstatus, status);
3415
3416 if (debug_linux_nat)
3417 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3418
3419 restore_child_signals_mask (&prev_mask);
3420
3421 if (last_resume_kind == resume_stop
3422 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3423 && WSTOPSIG (status) == SIGSTOP)
3424 {
3425 /* A thread that has been requested to stop by GDB with
3426 target_stop, and it stopped cleanly, so report as SIG0. The
3427 use of SIGSTOP is an implementation detail. */
3428 ourstatus->value.sig = GDB_SIGNAL_0;
3429 }
3430
3431 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3432 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3433 lp->core = -1;
3434 else
3435 lp->core = linux_common_core_of_thread (lp->ptid);
3436
3437 return lp->ptid;
3438 }
3439
3440 /* Resume LWPs that are currently stopped without any pending status
3441 to report, but are resumed from the core's perspective. */
3442
3443 static int
3444 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3445 {
3446 ptid_t *wait_ptid_p = data;
3447
3448 if (lp->stopped
3449 && lp->resumed
3450 && !lwp_status_pending_p (lp))
3451 {
3452 struct regcache *regcache = get_thread_regcache (lp->ptid);
3453 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3454 CORE_ADDR pc = regcache_read_pc (regcache);
3455
3456 /* Don't bother if there's a breakpoint at PC that we'd hit
3457 immediately, and we're not waiting for this LWP. */
3458 if (!ptid_match (lp->ptid, *wait_ptid_p))
3459 {
3460 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3461 return 0;
3462 }
3463
3464 if (debug_linux_nat)
3465 fprintf_unfiltered (gdb_stdlog,
3466 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3467 target_pid_to_str (lp->ptid),
3468 paddress (gdbarch, pc),
3469 lp->step);
3470
3471 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
3472 }
3473
3474 return 0;
3475 }
3476
3477 static ptid_t
3478 linux_nat_wait (struct target_ops *ops,
3479 ptid_t ptid, struct target_waitstatus *ourstatus,
3480 int target_options)
3481 {
3482 ptid_t event_ptid;
3483
3484 if (debug_linux_nat)
3485 {
3486 char *options_string;
3487
3488 options_string = target_options_to_string (target_options);
3489 fprintf_unfiltered (gdb_stdlog,
3490 "linux_nat_wait: [%s], [%s]\n",
3491 target_pid_to_str (ptid),
3492 options_string);
3493 xfree (options_string);
3494 }
3495
3496 /* Flush the async file first. */
3497 if (target_is_async_p ())
3498 async_file_flush ();
3499
3500 /* Resume LWPs that are currently stopped without any pending status
3501 to report, but are resumed from the core's perspective. LWPs get
3502 in this state if we find them stopping at a time we're not
3503 interested in reporting the event (target_wait on a
3504 specific_process, for example, see linux_nat_wait_1), and
3505 meanwhile the event became uninteresting. Don't bother resuming
3506 LWPs we're not going to wait for if they'd stop immediately. */
3507 if (non_stop)
3508 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3509
3510 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3511
3512 /* If we requested any event, and something came out, assume there
3513 may be more. If we requested a specific lwp or process, also
3514 assume there may be more. */
3515 if (target_is_async_p ()
3516 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3517 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3518 || !ptid_equal (ptid, minus_one_ptid)))
3519 async_file_mark ();
3520
3521 return event_ptid;
3522 }
3523
3524 static int
3525 kill_callback (struct lwp_info *lp, void *data)
3526 {
3527 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3528
3529 errno = 0;
3530 kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
3531 if (debug_linux_nat)
3532 {
3533 int save_errno = errno;
3534
3535 fprintf_unfiltered (gdb_stdlog,
3536 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3537 target_pid_to_str (lp->ptid),
3538 save_errno ? safe_strerror (save_errno) : "OK");
3539 }
3540
3541 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3542
3543 errno = 0;
3544 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
3545 if (debug_linux_nat)
3546 {
3547 int save_errno = errno;
3548
3549 fprintf_unfiltered (gdb_stdlog,
3550 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3551 target_pid_to_str (lp->ptid),
3552 save_errno ? safe_strerror (save_errno) : "OK");
3553 }
3554
3555 return 0;
3556 }
3557
3558 static int
3559 kill_wait_callback (struct lwp_info *lp, void *data)
3560 {
3561 pid_t pid;
3562
3563 /* We must make sure that there are no pending events (delayed
3564 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3565 program doesn't interfere with any following debugging session. */
3566
3567 /* For cloned processes we must check both with __WCLONE and
3568 without, since the exit status of a cloned process isn't reported
3569 with __WCLONE. */
3570 if (lp->cloned)
3571 {
3572 do
3573 {
3574 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
3575 if (pid != (pid_t) -1)
3576 {
3577 if (debug_linux_nat)
3578 fprintf_unfiltered (gdb_stdlog,
3579 "KWC: wait %s received unknown.\n",
3580 target_pid_to_str (lp->ptid));
3581 /* The Linux kernel sometimes fails to kill a thread
3582 completely after PTRACE_KILL; that goes from the stop
3583 point in do_fork out to the one in
3584 get_signal_to_deliever and waits again. So kill it
3585 again. */
3586 kill_callback (lp, NULL);
3587 }
3588 }
3589 while (pid == ptid_get_lwp (lp->ptid));
3590
3591 gdb_assert (pid == -1 && errno == ECHILD);
3592 }
3593
3594 do
3595 {
3596 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
3597 if (pid != (pid_t) -1)
3598 {
3599 if (debug_linux_nat)
3600 fprintf_unfiltered (gdb_stdlog,
3601 "KWC: wait %s received unk.\n",
3602 target_pid_to_str (lp->ptid));
3603 /* See the call to kill_callback above. */
3604 kill_callback (lp, NULL);
3605 }
3606 }
3607 while (pid == ptid_get_lwp (lp->ptid));
3608
3609 gdb_assert (pid == -1 && errno == ECHILD);
3610 return 0;
3611 }
3612
3613 static void
3614 linux_nat_kill (struct target_ops *ops)
3615 {
3616 struct target_waitstatus last;
3617 ptid_t last_ptid;
3618 int status;
3619
3620 /* If we're stopped while forking and we haven't followed yet,
3621 kill the other task. We need to do this first because the
3622 parent will be sleeping if this is a vfork. */
3623
3624 get_last_target_status (&last_ptid, &last);
3625
3626 if (last.kind == TARGET_WAITKIND_FORKED
3627 || last.kind == TARGET_WAITKIND_VFORKED)
3628 {
3629 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
3630 wait (&status);
3631
3632 /* Let the arch-specific native code know this process is
3633 gone. */
3634 linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
3635 }
3636
3637 if (forks_exist_p ())
3638 linux_fork_killall ();
3639 else
3640 {
3641 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
3642
3643 /* Stop all threads before killing them, since ptrace requires
3644 that the thread is stopped to sucessfully PTRACE_KILL. */
3645 iterate_over_lwps (ptid, stop_callback, NULL);
3646 /* ... and wait until all of them have reported back that
3647 they're no longer running. */
3648 iterate_over_lwps (ptid, stop_wait_callback, NULL);
3649
3650 /* Kill all LWP's ... */
3651 iterate_over_lwps (ptid, kill_callback, NULL);
3652
3653 /* ... and wait until we've flushed all events. */
3654 iterate_over_lwps (ptid, kill_wait_callback, NULL);
3655 }
3656
3657 target_mourn_inferior ();
3658 }
3659
3660 static void
3661 linux_nat_mourn_inferior (struct target_ops *ops)
3662 {
3663 int pid = ptid_get_pid (inferior_ptid);
3664
3665 purge_lwp_list (pid);
3666
3667 if (! forks_exist_p ())
3668 /* Normal case, no other forks available. */
3669 linux_ops->to_mourn_inferior (ops);
3670 else
3671 /* Multi-fork case. The current inferior_ptid has exited, but
3672 there are other viable forks to debug. Delete the exiting
3673 one and context-switch to the first available. */
3674 linux_fork_mourn_inferior ();
3675
3676 /* Let the arch-specific native code know this process is gone. */
3677 linux_nat_forget_process (pid);
3678 }
3679
3680 /* Convert a native/host siginfo object, into/from the siginfo in the
3681 layout of the inferiors' architecture. */
3682
3683 static void
3684 siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
3685 {
3686 int done = 0;
3687
3688 if (linux_nat_siginfo_fixup != NULL)
3689 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3690
3691 /* If there was no callback, or the callback didn't do anything,
3692 then just do a straight memcpy. */
3693 if (!done)
3694 {
3695 if (direction == 1)
3696 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
3697 else
3698 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
3699 }
3700 }
3701
3702 static enum target_xfer_status
3703 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3704 const char *annex, gdb_byte *readbuf,
3705 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3706 ULONGEST *xfered_len)
3707 {
3708 int pid;
3709 siginfo_t siginfo;
3710 gdb_byte inf_siginfo[sizeof (siginfo_t)];
3711
3712 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3713 gdb_assert (readbuf || writebuf);
3714
3715 pid = ptid_get_lwp (inferior_ptid);
3716 if (pid == 0)
3717 pid = ptid_get_pid (inferior_ptid);
3718
3719 if (offset > sizeof (siginfo))
3720 return TARGET_XFER_E_IO;
3721
3722 errno = 0;
3723 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3724 if (errno != 0)
3725 return TARGET_XFER_E_IO;
3726
3727 /* When GDB is built as a 64-bit application, ptrace writes into
3728 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3729 inferior with a 64-bit GDB should look the same as debugging it
3730 with a 32-bit GDB, we need to convert it. GDB core always sees
3731 the converted layout, so any read/write will have to be done
3732 post-conversion. */
3733 siginfo_fixup (&siginfo, inf_siginfo, 0);
3734
3735 if (offset + len > sizeof (siginfo))
3736 len = sizeof (siginfo) - offset;
3737
3738 if (readbuf != NULL)
3739 memcpy (readbuf, inf_siginfo + offset, len);
3740 else
3741 {
3742 memcpy (inf_siginfo + offset, writebuf, len);
3743
3744 /* Convert back to ptrace layout before flushing it out. */
3745 siginfo_fixup (&siginfo, inf_siginfo, 1);
3746
3747 errno = 0;
3748 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3749 if (errno != 0)
3750 return TARGET_XFER_E_IO;
3751 }
3752
3753 *xfered_len = len;
3754 return TARGET_XFER_OK;
3755 }
3756
3757 static enum target_xfer_status
3758 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3759 const char *annex, gdb_byte *readbuf,
3760 const gdb_byte *writebuf,
3761 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
3762 {
3763 struct cleanup *old_chain;
3764 enum target_xfer_status xfer;
3765
3766 if (object == TARGET_OBJECT_SIGNAL_INFO)
3767 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
3768 offset, len, xfered_len);
3769
3770 /* The target is connected but no live inferior is selected. Pass
3771 this request down to a lower stratum (e.g., the executable
3772 file). */
3773 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
3774 return TARGET_XFER_EOF;
3775
3776 old_chain = save_inferior_ptid ();
3777
3778 if (ptid_lwp_p (inferior_ptid))
3779 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
3780
3781 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3782 offset, len, xfered_len);
3783
3784 do_cleanups (old_chain);
3785 return xfer;
3786 }
3787
3788 static int
3789 linux_thread_alive (ptid_t ptid)
3790 {
3791 int err, tmp_errno;
3792
3793 gdb_assert (ptid_lwp_p (ptid));
3794
3795 /* Send signal 0 instead of anything ptrace, because ptracing a
3796 running thread errors out claiming that the thread doesn't
3797 exist. */
3798 err = kill_lwp (ptid_get_lwp (ptid), 0);
3799 tmp_errno = errno;
3800 if (debug_linux_nat)
3801 fprintf_unfiltered (gdb_stdlog,
3802 "LLTA: KILL(SIG0) %s (%s)\n",
3803 target_pid_to_str (ptid),
3804 err ? safe_strerror (tmp_errno) : "OK");
3805
3806 if (err != 0)
3807 return 0;
3808
3809 return 1;
3810 }
3811
3812 static int
3813 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3814 {
3815 return linux_thread_alive (ptid);
3816 }
3817
3818 static char *
3819 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
3820 {
3821 static char buf[64];
3822
3823 if (ptid_lwp_p (ptid)
3824 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3825 || num_lwps (ptid_get_pid (ptid)) > 1))
3826 {
3827 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
3828 return buf;
3829 }
3830
3831 return normal_pid_to_str (ptid);
3832 }
3833
3834 static char *
3835 linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
3836 {
3837 int pid = ptid_get_pid (thr->ptid);
3838 long lwp = ptid_get_lwp (thr->ptid);
3839 #define FORMAT "/proc/%d/task/%ld/comm"
3840 char buf[sizeof (FORMAT) + 30];
3841 FILE *comm_file;
3842 char *result = NULL;
3843
3844 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
3845 comm_file = gdb_fopen_cloexec (buf, "r");
3846 if (comm_file)
3847 {
3848 /* Not exported by the kernel, so we define it here. */
3849 #define COMM_LEN 16
3850 static char line[COMM_LEN + 1];
3851
3852 if (fgets (line, sizeof (line), comm_file))
3853 {
3854 char *nl = strchr (line, '\n');
3855
3856 if (nl)
3857 *nl = '\0';
3858 if (*line != '\0')
3859 result = line;
3860 }
3861
3862 fclose (comm_file);
3863 }
3864
3865 #undef COMM_LEN
3866 #undef FORMAT
3867
3868 return result;
3869 }
3870
3871 /* Accepts an integer PID; Returns a string representing a file that
3872 can be opened to get the symbols for the child process. */
3873
3874 static char *
3875 linux_child_pid_to_exec_file (struct target_ops *self, int pid)
3876 {
3877 static char buf[PATH_MAX];
3878 char name[PATH_MAX];
3879
3880 xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
3881 memset (buf, 0, PATH_MAX);
3882 if (readlink (name, buf, PATH_MAX - 1) <= 0)
3883 strcpy (buf, name);
3884
3885 return buf;
3886 }
3887
3888 /* Implement the to_xfer_partial interface for memory reads using the /proc
3889 filesystem. Because we can use a single read() call for /proc, this
3890 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3891 but it doesn't support writes. */
3892
3893 static enum target_xfer_status
3894 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3895 const char *annex, gdb_byte *readbuf,
3896 const gdb_byte *writebuf,
3897 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
3898 {
3899 LONGEST ret;
3900 int fd;
3901 char filename[64];
3902
3903 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3904 return 0;
3905
3906 /* Don't bother for one word. */
3907 if (len < 3 * sizeof (long))
3908 return TARGET_XFER_EOF;
3909
3910 /* We could keep this file open and cache it - possibly one per
3911 thread. That requires some juggling, but is even faster. */
3912 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
3913 ptid_get_pid (inferior_ptid));
3914 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
3915 if (fd == -1)
3916 return TARGET_XFER_EOF;
3917
3918 /* If pread64 is available, use it. It's faster if the kernel
3919 supports it (only one syscall), and it's 64-bit safe even on
3920 32-bit platforms (for instance, SPARC debugging a SPARC64
3921 application). */
3922 #ifdef HAVE_PREAD64
3923 if (pread64 (fd, readbuf, len, offset) != len)
3924 #else
3925 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3926 #endif
3927 ret = 0;
3928 else
3929 ret = len;
3930
3931 close (fd);
3932
3933 if (ret == 0)
3934 return TARGET_XFER_EOF;
3935 else
3936 {
3937 *xfered_len = ret;
3938 return TARGET_XFER_OK;
3939 }
3940 }
3941
3942
3943 /* Enumerate spufs IDs for process PID. */
3944 static LONGEST
3945 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
3946 {
3947 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
3948 LONGEST pos = 0;
3949 LONGEST written = 0;
3950 char path[128];
3951 DIR *dir;
3952 struct dirent *entry;
3953
3954 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
3955 dir = opendir (path);
3956 if (!dir)
3957 return -1;
3958
3959 rewinddir (dir);
3960 while ((entry = readdir (dir)) != NULL)
3961 {
3962 struct stat st;
3963 struct statfs stfs;
3964 int fd;
3965
3966 fd = atoi (entry->d_name);
3967 if (!fd)
3968 continue;
3969
3970 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
3971 if (stat (path, &st) != 0)
3972 continue;
3973 if (!S_ISDIR (st.st_mode))
3974 continue;
3975
3976 if (statfs (path, &stfs) != 0)
3977 continue;
3978 if (stfs.f_type != SPUFS_MAGIC)
3979 continue;
3980
3981 if (pos >= offset && pos + 4 <= offset + len)
3982 {
3983 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
3984 written += 4;
3985 }
3986 pos += 4;
3987 }
3988
3989 closedir (dir);
3990 return written;
3991 }
3992
3993 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
3994 object type, using the /proc file system. */
3995
3996 static enum target_xfer_status
3997 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
3998 const char *annex, gdb_byte *readbuf,
3999 const gdb_byte *writebuf,
4000 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
4001 {
4002 char buf[128];
4003 int fd = 0;
4004 int ret = -1;
4005 int pid = ptid_get_pid (inferior_ptid);
4006
4007 if (!annex)
4008 {
4009 if (!readbuf)
4010 return TARGET_XFER_E_IO;
4011 else
4012 {
4013 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4014
4015 if (l < 0)
4016 return TARGET_XFER_E_IO;
4017 else if (l == 0)
4018 return TARGET_XFER_EOF;
4019 else
4020 {
4021 *xfered_len = (ULONGEST) l;
4022 return TARGET_XFER_OK;
4023 }
4024 }
4025 }
4026
4027 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4028 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
4029 if (fd <= 0)
4030 return TARGET_XFER_E_IO;
4031
4032 if (offset != 0
4033 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4034 {
4035 close (fd);
4036 return TARGET_XFER_EOF;
4037 }
4038
4039 if (writebuf)
4040 ret = write (fd, writebuf, (size_t) len);
4041 else if (readbuf)
4042 ret = read (fd, readbuf, (size_t) len);
4043
4044 close (fd);
4045
4046 if (ret < 0)
4047 return TARGET_XFER_E_IO;
4048 else if (ret == 0)
4049 return TARGET_XFER_EOF;
4050 else
4051 {
4052 *xfered_len = (ULONGEST) ret;
4053 return TARGET_XFER_OK;
4054 }
4055 }
4056
4057
4058 /* Parse LINE as a signal set and add its set bits to SIGS. */
4059
4060 static void
4061 add_line_to_sigset (const char *line, sigset_t *sigs)
4062 {
4063 int len = strlen (line) - 1;
4064 const char *p;
4065 int signum;
4066
4067 if (line[len] != '\n')
4068 error (_("Could not parse signal set: %s"), line);
4069
4070 p = line;
4071 signum = len * 4;
4072 while (len-- > 0)
4073 {
4074 int digit;
4075
4076 if (*p >= '0' && *p <= '9')
4077 digit = *p - '0';
4078 else if (*p >= 'a' && *p <= 'f')
4079 digit = *p - 'a' + 10;
4080 else
4081 error (_("Could not parse signal set: %s"), line);
4082
4083 signum -= 4;
4084
4085 if (digit & 1)
4086 sigaddset (sigs, signum + 1);
4087 if (digit & 2)
4088 sigaddset (sigs, signum + 2);
4089 if (digit & 4)
4090 sigaddset (sigs, signum + 3);
4091 if (digit & 8)
4092 sigaddset (sigs, signum + 4);
4093
4094 p++;
4095 }
4096 }
4097
4098 /* Find process PID's pending signals from /proc/pid/status and set
4099 SIGS to match. */
4100
4101 void
4102 linux_proc_pending_signals (int pid, sigset_t *pending,
4103 sigset_t *blocked, sigset_t *ignored)
4104 {
4105 FILE *procfile;
4106 char buffer[PATH_MAX], fname[PATH_MAX];
4107 struct cleanup *cleanup;
4108
4109 sigemptyset (pending);
4110 sigemptyset (blocked);
4111 sigemptyset (ignored);
4112 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
4113 procfile = gdb_fopen_cloexec (fname, "r");
4114 if (procfile == NULL)
4115 error (_("Could not open %s"), fname);
4116 cleanup = make_cleanup_fclose (procfile);
4117
4118 while (fgets (buffer, PATH_MAX, procfile) != NULL)
4119 {
4120 /* Normal queued signals are on the SigPnd line in the status
4121 file. However, 2.6 kernels also have a "shared" pending
4122 queue for delivering signals to a thread group, so check for
4123 a ShdPnd line also.
4124
4125 Unfortunately some Red Hat kernels include the shared pending
4126 queue but not the ShdPnd status field. */
4127
4128 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4129 add_line_to_sigset (buffer + 8, pending);
4130 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4131 add_line_to_sigset (buffer + 8, pending);
4132 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4133 add_line_to_sigset (buffer + 8, blocked);
4134 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4135 add_line_to_sigset (buffer + 8, ignored);
4136 }
4137
4138 do_cleanups (cleanup);
4139 }
4140
4141 static enum target_xfer_status
4142 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4143 const char *annex, gdb_byte *readbuf,
4144 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4145 ULONGEST *xfered_len)
4146 {
4147 gdb_assert (object == TARGET_OBJECT_OSDATA);
4148
4149 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4150 if (*xfered_len == 0)
4151 return TARGET_XFER_EOF;
4152 else
4153 return TARGET_XFER_OK;
4154 }
4155
4156 static enum target_xfer_status
4157 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4158 const char *annex, gdb_byte *readbuf,
4159 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4160 ULONGEST *xfered_len)
4161 {
4162 enum target_xfer_status xfer;
4163
4164 if (object == TARGET_OBJECT_AUXV)
4165 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
4166 offset, len, xfered_len);
4167
4168 if (object == TARGET_OBJECT_OSDATA)
4169 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4170 offset, len, xfered_len);
4171
4172 if (object == TARGET_OBJECT_SPU)
4173 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
4174 offset, len, xfered_len);
4175
4176 /* GDB calculates all the addresses in possibly larget width of the address.
4177 Address width needs to be masked before its final use - either by
4178 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4179
4180 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4181
4182 if (object == TARGET_OBJECT_MEMORY)
4183 {
4184 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
4185
4186 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4187 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4188 }
4189
4190 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4191 offset, len, xfered_len);
4192 if (xfer != TARGET_XFER_EOF)
4193 return xfer;
4194
4195 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4196 offset, len, xfered_len);
4197 }
4198
4199 static void
4200 cleanup_target_stop (void *arg)
4201 {
4202 ptid_t *ptid = (ptid_t *) arg;
4203
4204 gdb_assert (arg != NULL);
4205
4206 /* Unpause all */
4207 target_resume (*ptid, 0, GDB_SIGNAL_0);
4208 }
4209
4210 static VEC(static_tracepoint_marker_p) *
4211 linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4212 const char *strid)
4213 {
4214 char s[IPA_CMD_BUF_SIZE];
4215 struct cleanup *old_chain;
4216 int pid = ptid_get_pid (inferior_ptid);
4217 VEC(static_tracepoint_marker_p) *markers = NULL;
4218 struct static_tracepoint_marker *marker = NULL;
4219 char *p = s;
4220 ptid_t ptid = ptid_build (pid, 0, 0);
4221
4222 /* Pause all */
4223 target_stop (ptid);
4224
4225 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4226 s[sizeof ("qTfSTM")] = 0;
4227
4228 agent_run_command (pid, s, strlen (s) + 1);
4229
4230 old_chain = make_cleanup (free_current_marker, &marker);
4231 make_cleanup (cleanup_target_stop, &ptid);
4232
4233 while (*p++ == 'm')
4234 {
4235 if (marker == NULL)
4236 marker = XCNEW (struct static_tracepoint_marker);
4237
4238 do
4239 {
4240 parse_static_tracepoint_marker_definition (p, &p, marker);
4241
4242 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4243 {
4244 VEC_safe_push (static_tracepoint_marker_p,
4245 markers, marker);
4246 marker = NULL;
4247 }
4248 else
4249 {
4250 release_static_tracepoint_marker (marker);
4251 memset (marker, 0, sizeof (*marker));
4252 }
4253 }
4254 while (*p++ == ','); /* comma-separated list */
4255
4256 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4257 s[sizeof ("qTsSTM")] = 0;
4258 agent_run_command (pid, s, strlen (s) + 1);
4259 p = s;
4260 }
4261
4262 do_cleanups (old_chain);
4263
4264 return markers;
4265 }
4266
4267 /* Create a prototype generic GNU/Linux target. The client can override
4268 it with local methods. */
4269
4270 static void
4271 linux_target_install_ops (struct target_ops *t)
4272 {
4273 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4274 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
4275 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4276 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
4277 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4278 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
4279 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
4280 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4281 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4282 t->to_post_attach = linux_child_post_attach;
4283 t->to_follow_fork = linux_child_follow_fork;
4284
4285 super_xfer_partial = t->to_xfer_partial;
4286 t->to_xfer_partial = linux_xfer_partial;
4287
4288 t->to_static_tracepoint_markers_by_strid
4289 = linux_child_static_tracepoint_markers_by_strid;
4290 }
4291
4292 struct target_ops *
4293 linux_target (void)
4294 {
4295 struct target_ops *t;
4296
4297 t = inf_ptrace_target ();
4298 linux_target_install_ops (t);
4299
4300 return t;
4301 }
4302
4303 struct target_ops *
4304 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4305 {
4306 struct target_ops *t;
4307
4308 t = inf_ptrace_trad_target (register_u_offset);
4309 linux_target_install_ops (t);
4310
4311 return t;
4312 }
4313
4314 /* target_is_async_p implementation. */
4315
4316 static int
4317 linux_nat_is_async_p (struct target_ops *ops)
4318 {
4319 return linux_is_async_p ();
4320 }
4321
4322 /* target_can_async_p implementation. */
4323
4324 static int
4325 linux_nat_can_async_p (struct target_ops *ops)
4326 {
4327 /* NOTE: palves 2008-03-21: We're only async when the user requests
4328 it explicitly with the "set target-async" command.
4329 Someday, linux will always be async. */
4330 return target_async_permitted;
4331 }
4332
4333 static int
4334 linux_nat_supports_non_stop (struct target_ops *self)
4335 {
4336 return 1;
4337 }
4338
4339 /* True if we want to support multi-process. To be removed when GDB
4340 supports multi-exec. */
4341
4342 int linux_multi_process = 1;
4343
4344 static int
4345 linux_nat_supports_multi_process (struct target_ops *self)
4346 {
4347 return linux_multi_process;
4348 }
4349
4350 static int
4351 linux_nat_supports_disable_randomization (struct target_ops *self)
4352 {
4353 #ifdef HAVE_PERSONALITY
4354 return 1;
4355 #else
4356 return 0;
4357 #endif
4358 }
4359
4360 static int async_terminal_is_ours = 1;
4361
4362 /* target_terminal_inferior implementation.
4363
4364 This is a wrapper around child_terminal_inferior to add async support. */
4365
4366 static void
4367 linux_nat_terminal_inferior (struct target_ops *self)
4368 {
4369 /* Like target_terminal_inferior, use target_can_async_p, not
4370 target_is_async_p, since at this point the target is not async
4371 yet. If it can async, then we know it will become async prior to
4372 resume. */
4373 if (!target_can_async_p ())
4374 {
4375 /* Async mode is disabled. */
4376 child_terminal_inferior (self);
4377 return;
4378 }
4379
4380 child_terminal_inferior (self);
4381
4382 /* Calls to target_terminal_*() are meant to be idempotent. */
4383 if (!async_terminal_is_ours)
4384 return;
4385
4386 delete_file_handler (input_fd);
4387 async_terminal_is_ours = 0;
4388 set_sigint_trap ();
4389 }
4390
4391 /* target_terminal_ours implementation.
4392
4393 This is a wrapper around child_terminal_ours to add async support (and
4394 implement the target_terminal_ours vs target_terminal_ours_for_output
4395 distinction). child_terminal_ours is currently no different than
4396 child_terminal_ours_for_output.
4397 We leave target_terminal_ours_for_output alone, leaving it to
4398 child_terminal_ours_for_output. */
4399
4400 static void
4401 linux_nat_terminal_ours (struct target_ops *self)
4402 {
4403 /* GDB should never give the terminal to the inferior if the
4404 inferior is running in the background (run&, continue&, etc.),
4405 but claiming it sure should. */
4406 child_terminal_ours (self);
4407
4408 if (async_terminal_is_ours)
4409 return;
4410
4411 clear_sigint_trap ();
4412 add_file_handler (input_fd, stdin_event_handler, 0);
4413 async_terminal_is_ours = 1;
4414 }
4415
4416 static void (*async_client_callback) (enum inferior_event_type event_type,
4417 void *context);
4418 static void *async_client_context;
4419
4420 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4421 so we notice when any child changes state, and notify the
4422 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4423 above to wait for the arrival of a SIGCHLD. */
4424
4425 static void
4426 sigchld_handler (int signo)
4427 {
4428 int old_errno = errno;
4429
4430 if (debug_linux_nat)
4431 ui_file_write_async_safe (gdb_stdlog,
4432 "sigchld\n", sizeof ("sigchld\n") - 1);
4433
4434 if (signo == SIGCHLD
4435 && linux_nat_event_pipe[0] != -1)
4436 async_file_mark (); /* Let the event loop know that there are
4437 events to handle. */
4438
4439 errno = old_errno;
4440 }
4441
4442 /* Callback registered with the target events file descriptor. */
4443
4444 static void
4445 handle_target_event (int error, gdb_client_data client_data)
4446 {
4447 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4448 }
4449
4450 /* Create/destroy the target events pipe. Returns previous state. */
4451
4452 static int
4453 linux_async_pipe (int enable)
4454 {
4455 int previous = linux_is_async_p ();
4456
4457 if (previous != enable)
4458 {
4459 sigset_t prev_mask;
4460
4461 /* Block child signals while we create/destroy the pipe, as
4462 their handler writes to it. */
4463 block_child_signals (&prev_mask);
4464
4465 if (enable)
4466 {
4467 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
4468 internal_error (__FILE__, __LINE__,
4469 "creating event pipe failed.");
4470
4471 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4472 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4473 }
4474 else
4475 {
4476 close (linux_nat_event_pipe[0]);
4477 close (linux_nat_event_pipe[1]);
4478 linux_nat_event_pipe[0] = -1;
4479 linux_nat_event_pipe[1] = -1;
4480 }
4481
4482 restore_child_signals_mask (&prev_mask);
4483 }
4484
4485 return previous;
4486 }
4487
4488 /* target_async implementation. */
4489
4490 static void
4491 linux_nat_async (struct target_ops *ops,
4492 void (*callback) (enum inferior_event_type event_type,
4493 void *context),
4494 void *context)
4495 {
4496 if (callback != NULL)
4497 {
4498 async_client_callback = callback;
4499 async_client_context = context;
4500 if (!linux_async_pipe (1))
4501 {
4502 add_file_handler (linux_nat_event_pipe[0],
4503 handle_target_event, NULL);
4504 /* There may be pending events to handle. Tell the event loop
4505 to poll them. */
4506 async_file_mark ();
4507 }
4508 }
4509 else
4510 {
4511 async_client_callback = callback;
4512 async_client_context = context;
4513 delete_file_handler (linux_nat_event_pipe[0]);
4514 linux_async_pipe (0);
4515 }
4516 return;
4517 }
4518
4519 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4520 event came out. */
4521
4522 static int
4523 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4524 {
4525 if (!lwp->stopped)
4526 {
4527 if (debug_linux_nat)
4528 fprintf_unfiltered (gdb_stdlog,
4529 "LNSL: running -> suspending %s\n",
4530 target_pid_to_str (lwp->ptid));
4531
4532
4533 if (lwp->last_resume_kind == resume_stop)
4534 {
4535 if (debug_linux_nat)
4536 fprintf_unfiltered (gdb_stdlog,
4537 "linux-nat: already stopping LWP %ld at "
4538 "GDB's request\n",
4539 ptid_get_lwp (lwp->ptid));
4540 return 0;
4541 }
4542
4543 stop_callback (lwp, NULL);
4544 lwp->last_resume_kind = resume_stop;
4545 }
4546 else
4547 {
4548 /* Already known to be stopped; do nothing. */
4549
4550 if (debug_linux_nat)
4551 {
4552 if (find_thread_ptid (lwp->ptid)->stop_requested)
4553 fprintf_unfiltered (gdb_stdlog,
4554 "LNSL: already stopped/stop_requested %s\n",
4555 target_pid_to_str (lwp->ptid));
4556 else
4557 fprintf_unfiltered (gdb_stdlog,
4558 "LNSL: already stopped/no "
4559 "stop_requested yet %s\n",
4560 target_pid_to_str (lwp->ptid));
4561 }
4562 }
4563 return 0;
4564 }
4565
4566 static void
4567 linux_nat_stop (struct target_ops *self, ptid_t ptid)
4568 {
4569 if (non_stop)
4570 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4571 else
4572 linux_ops->to_stop (linux_ops, ptid);
4573 }
4574
4575 static void
4576 linux_nat_close (struct target_ops *self)
4577 {
4578 /* Unregister from the event loop. */
4579 if (linux_nat_is_async_p (self))
4580 linux_nat_async (self, NULL, NULL);
4581
4582 if (linux_ops->to_close)
4583 linux_ops->to_close (linux_ops);
4584
4585 super_close (self);
4586 }
4587
4588 /* When requests are passed down from the linux-nat layer to the
4589 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4590 used. The address space pointer is stored in the inferior object,
4591 but the common code that is passed such ptid can't tell whether
4592 lwpid is a "main" process id or not (it assumes so). We reverse
4593 look up the "main" process id from the lwp here. */
4594
4595 static struct address_space *
4596 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4597 {
4598 struct lwp_info *lwp;
4599 struct inferior *inf;
4600 int pid;
4601
4602 if (ptid_get_lwp (ptid) == 0)
4603 {
4604 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4605 tgid. */
4606 lwp = find_lwp_pid (ptid);
4607 pid = ptid_get_pid (lwp->ptid);
4608 }
4609 else
4610 {
4611 /* A (pid,lwpid,0) ptid. */
4612 pid = ptid_get_pid (ptid);
4613 }
4614
4615 inf = find_inferior_pid (pid);
4616 gdb_assert (inf != NULL);
4617 return inf->aspace;
4618 }
4619
4620 /* Return the cached value of the processor core for thread PTID. */
4621
4622 static int
4623 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4624 {
4625 struct lwp_info *info = find_lwp_pid (ptid);
4626
4627 if (info)
4628 return info->core;
4629 return -1;
4630 }
4631
4632 void
4633 linux_nat_add_target (struct target_ops *t)
4634 {
4635 /* Save the provided single-threaded target. We save this in a separate
4636 variable because another target we've inherited from (e.g. inf-ptrace)
4637 may have saved a pointer to T; we want to use it for the final
4638 process stratum target. */
4639 linux_ops_saved = *t;
4640 linux_ops = &linux_ops_saved;
4641
4642 /* Override some methods for multithreading. */
4643 t->to_create_inferior = linux_nat_create_inferior;
4644 t->to_attach = linux_nat_attach;
4645 t->to_detach = linux_nat_detach;
4646 t->to_resume = linux_nat_resume;
4647 t->to_wait = linux_nat_wait;
4648 t->to_pass_signals = linux_nat_pass_signals;
4649 t->to_xfer_partial = linux_nat_xfer_partial;
4650 t->to_kill = linux_nat_kill;
4651 t->to_mourn_inferior = linux_nat_mourn_inferior;
4652 t->to_thread_alive = linux_nat_thread_alive;
4653 t->to_pid_to_str = linux_nat_pid_to_str;
4654 t->to_thread_name = linux_nat_thread_name;
4655 t->to_has_thread_control = tc_schedlock;
4656 t->to_thread_address_space = linux_nat_thread_address_space;
4657 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4658 t->to_stopped_data_address = linux_nat_stopped_data_address;
4659
4660 t->to_can_async_p = linux_nat_can_async_p;
4661 t->to_is_async_p = linux_nat_is_async_p;
4662 t->to_supports_non_stop = linux_nat_supports_non_stop;
4663 t->to_async = linux_nat_async;
4664 t->to_terminal_inferior = linux_nat_terminal_inferior;
4665 t->to_terminal_ours = linux_nat_terminal_ours;
4666
4667 super_close = t->to_close;
4668 t->to_close = linux_nat_close;
4669
4670 /* Methods for non-stop support. */
4671 t->to_stop = linux_nat_stop;
4672
4673 t->to_supports_multi_process = linux_nat_supports_multi_process;
4674
4675 t->to_supports_disable_randomization
4676 = linux_nat_supports_disable_randomization;
4677
4678 t->to_core_of_thread = linux_nat_core_of_thread;
4679
4680 /* We don't change the stratum; this target will sit at
4681 process_stratum and thread_db will set at thread_stratum. This
4682 is a little strange, since this is a multi-threaded-capable
4683 target, but we want to be on the stack below thread_db, and we
4684 also want to be used for single-threaded processes. */
4685
4686 add_target (t);
4687 }
4688
4689 /* Register a method to call whenever a new thread is attached. */
4690 void
4691 linux_nat_set_new_thread (struct target_ops *t,
4692 void (*new_thread) (struct lwp_info *))
4693 {
4694 /* Save the pointer. We only support a single registered instance
4695 of the GNU/Linux native target, so we do not need to map this to
4696 T. */
4697 linux_nat_new_thread = new_thread;
4698 }
4699
4700 /* See declaration in linux-nat.h. */
4701
4702 void
4703 linux_nat_set_new_fork (struct target_ops *t,
4704 linux_nat_new_fork_ftype *new_fork)
4705 {
4706 /* Save the pointer. */
4707 linux_nat_new_fork = new_fork;
4708 }
4709
4710 /* See declaration in linux-nat.h. */
4711
4712 void
4713 linux_nat_set_forget_process (struct target_ops *t,
4714 linux_nat_forget_process_ftype *fn)
4715 {
4716 /* Save the pointer. */
4717 linux_nat_forget_process_hook = fn;
4718 }
4719
4720 /* See declaration in linux-nat.h. */
4721
4722 void
4723 linux_nat_forget_process (pid_t pid)
4724 {
4725 if (linux_nat_forget_process_hook != NULL)
4726 linux_nat_forget_process_hook (pid);
4727 }
4728
4729 /* Register a method that converts a siginfo object between the layout
4730 that ptrace returns, and the layout in the architecture of the
4731 inferior. */
4732 void
4733 linux_nat_set_siginfo_fixup (struct target_ops *t,
4734 int (*siginfo_fixup) (siginfo_t *,
4735 gdb_byte *,
4736 int))
4737 {
4738 /* Save the pointer. */
4739 linux_nat_siginfo_fixup = siginfo_fixup;
4740 }
4741
4742 /* Register a method to call prior to resuming a thread. */
4743
4744 void
4745 linux_nat_set_prepare_to_resume (struct target_ops *t,
4746 void (*prepare_to_resume) (struct lwp_info *))
4747 {
4748 /* Save the pointer. */
4749 linux_nat_prepare_to_resume = prepare_to_resume;
4750 }
4751
4752 /* See linux-nat.h. */
4753
4754 int
4755 linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
4756 {
4757 int pid;
4758
4759 pid = ptid_get_lwp (ptid);
4760 if (pid == 0)
4761 pid = ptid_get_pid (ptid);
4762
4763 errno = 0;
4764 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4765 if (errno != 0)
4766 {
4767 memset (siginfo, 0, sizeof (*siginfo));
4768 return 0;
4769 }
4770 return 1;
4771 }
4772
4773 /* Provide a prototype to silence -Wmissing-prototypes. */
4774 extern initialize_file_ftype _initialize_linux_nat;
4775
4776 void
4777 _initialize_linux_nat (void)
4778 {
4779 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4780 &debug_linux_nat, _("\
4781 Set debugging of GNU/Linux lwp module."), _("\
4782 Show debugging of GNU/Linux lwp module."), _("\
4783 Enables printf debugging output."),
4784 NULL,
4785 show_debug_linux_nat,
4786 &setdebuglist, &showdebuglist);
4787
4788 /* Save this mask as the default. */
4789 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4790
4791 /* Install a SIGCHLD handler. */
4792 sigchld_action.sa_handler = sigchld_handler;
4793 sigemptyset (&sigchld_action.sa_mask);
4794 sigchld_action.sa_flags = SA_RESTART;
4795
4796 /* Make it the default. */
4797 sigaction (SIGCHLD, &sigchld_action, NULL);
4798
4799 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4800 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4801 sigdelset (&suspend_mask, SIGCHLD);
4802
4803 sigemptyset (&blocked_mask);
4804
4805 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
4806 support read-only process state. */
4807 linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
4808 | PTRACE_O_TRACEVFORKDONE
4809 | PTRACE_O_TRACEVFORK
4810 | PTRACE_O_TRACEFORK
4811 | PTRACE_O_TRACEEXEC);
4812 }
4813 \f
4814
4815 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4816 the GNU/Linux Threads library and therefore doesn't really belong
4817 here. */
4818
4819 /* Read variable NAME in the target and return its value if found.
4820 Otherwise return zero. It is assumed that the type of the variable
4821 is `int'. */
4822
4823 static int
4824 get_signo (const char *name)
4825 {
4826 struct bound_minimal_symbol ms;
4827 int signo;
4828
4829 ms = lookup_minimal_symbol (name, NULL, NULL);
4830 if (ms.minsym == NULL)
4831 return 0;
4832
4833 if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
4834 sizeof (signo)) != 0)
4835 return 0;
4836
4837 return signo;
4838 }
4839
4840 /* Return the set of signals used by the threads library in *SET. */
4841
4842 void
4843 lin_thread_get_thread_signals (sigset_t *set)
4844 {
4845 struct sigaction action;
4846 int restart, cancel;
4847
4848 sigemptyset (&blocked_mask);
4849 sigemptyset (set);
4850
4851 restart = get_signo ("__pthread_sig_restart");
4852 cancel = get_signo ("__pthread_sig_cancel");
4853
4854 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4855 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4856 not provide any way for the debugger to query the signal numbers -
4857 fortunately they don't change! */
4858
4859 if (restart == 0)
4860 restart = __SIGRTMIN;
4861
4862 if (cancel == 0)
4863 cancel = __SIGRTMIN + 1;
4864
4865 sigaddset (set, restart);
4866 sigaddset (set, cancel);
4867
4868 /* The GNU/Linux Threads library makes terminating threads send a
4869 special "cancel" signal instead of SIGCHLD. Make sure we catch
4870 those (to prevent them from terminating GDB itself, which is
4871 likely to be their default action) and treat them the same way as
4872 SIGCHLD. */
4873
4874 action.sa_handler = sigchld_handler;
4875 sigemptyset (&action.sa_mask);
4876 action.sa_flags = SA_RESTART;
4877 sigaction (cancel, &action, NULL);
4878
4879 /* We block the "cancel" signal throughout this code ... */
4880 sigaddset (&blocked_mask, cancel);
4881 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4882
4883 /* ... except during a sigsuspend. */
4884 sigdelset (&suspend_mask, cancel);
4885 }
This page took 0.15758 seconds and 5 git commands to generate.