1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001-2013 Free Software Foundation, Inc.
5 This file is part of GDB.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
23 #include "gdb_string.h"
25 #include "gdb_assert.h"
26 #ifdef HAVE_TKILL_SYSCALL
28 #include <sys/syscall.h>
30 #include <sys/ptrace.h>
31 #include "linux-nat.h"
32 #include "linux-ptrace.h"
33 #include "linux-procfs.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
39 #include "inf-child.h"
40 #include "inf-ptrace.h"
42 #include <sys/procfs.h> /* for elf_gregset etc. */
43 #include "elf-bfd.h" /* for elfcore_write_* */
44 #include "gregset.h" /* for gregset */
45 #include "gdbcore.h" /* for get_exec_file */
46 #include <ctype.h> /* for isdigit */
47 #include "gdbthread.h" /* for struct thread_info etc. */
48 #include "gdb_stat.h" /* for struct stat */
49 #include <fcntl.h> /* for O_RDONLY */
51 #include "event-loop.h"
52 #include "event-top.h"
54 #include <sys/types.h>
55 #include "gdb_dirent.h"
56 #include "xml-support.h"
60 #include "linux-osdata.h"
61 #include "linux-tdep.h"
64 #include "tracepoint.h"
65 #include "exceptions.h"
66 #include "linux-ptrace.h"
68 #include "target-descriptions.h"
69 #include "filestuff.h"
72 #define SPUFS_MAGIC 0x23c9b64e
75 #ifdef HAVE_PERSONALITY
76 # include <sys/personality.h>
77 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
78 # define ADDR_NO_RANDOMIZE 0x0040000
80 #endif /* HAVE_PERSONALITY */
82 /* This comment documents high-level logic of this file.
84 Waiting for events in sync mode
85 ===============================
87 When waiting for an event in a specific thread, we just use waitpid, passing
88 the specific pid, and not passing WNOHANG.
90 When waiting for an event in all threads, waitpid is not quite good. Prior to
91 version 2.4, Linux can either wait for event in main thread, or in secondary
92 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
93 miss an event. The solution is to use non-blocking waitpid, together with
94 sigsuspend. First, we use non-blocking waitpid to get an event in the main
95 process, if any. Second, we use non-blocking waitpid with the __WCLONED
96 flag to check for events in cloned processes. If nothing is found, we use
97 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
98 happened to a child process -- and SIGCHLD will be delivered both for events
99 in main debugged process and in cloned processes. As soon as we know there's
100 an event, we get back to calling nonblocking waitpid with and without
103 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
104 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
105 blocked, the signal becomes pending and sigsuspend immediately
106 notices it and returns.
108 Waiting for events in async mode
109 ================================
111 In async mode, GDB should always be ready to handle both user input
112 and target events, so neither blocking waitpid nor sigsuspend are
113 viable options. Instead, we should asynchronously notify the GDB main
114 event loop whenever there's an unprocessed event from the target. We
115 detect asynchronous target events by handling SIGCHLD signals. To
116 notify the event loop about target events, the self-pipe trick is used
117 --- a pipe is registered as waitable event source in the event loop,
118 the event loop select/poll's on the read end of this pipe (as well on
119 other event sources, e.g., stdin), and the SIGCHLD handler writes a
120 byte to this pipe. This is more portable than relying on
121 pselect/ppoll, since on kernels that lack those syscalls, libc
122 emulates them with select/poll+sigprocmask, and that is racy
123 (a.k.a. plain broken).
125 Obviously, if we fail to notify the event loop if there's a target
126 event, it's bad. OTOH, if we notify the event loop when there's no
127 event from the target, linux_nat_wait will detect that there's no real
128 event to report, and return event of type TARGET_WAITKIND_IGNORE.
129 This is mostly harmless, but it will waste time and is better avoided.
131 The main design point is that every time GDB is outside linux-nat.c,
132 we have a SIGCHLD handler installed that is called when something
133 happens to the target and notifies the GDB event loop. Whenever GDB
134 core decides to handle the event, and calls into linux-nat.c, we
135 process things as in sync mode, except that the we never block in
138 While processing an event, we may end up momentarily blocked in
139 waitpid calls. Those waitpid calls, while blocking, are guarantied to
140 return quickly. E.g., in all-stop mode, before reporting to the core
141 that an LWP hit a breakpoint, all LWPs are stopped by sending them
142 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
143 Note that this is different from blocking indefinitely waiting for the
144 next event --- here, we're already handling an event.
149 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
150 signal is not entirely significant; we just need for a signal to be delivered,
151 so that we can intercept it. SIGSTOP's advantage is that it can not be
152 blocked. A disadvantage is that it is not a real-time signal, so it can only
153 be queued once; we do not keep track of other sources of SIGSTOP.
155 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
156 use them, because they have special behavior when the signal is generated -
157 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
158 kills the entire thread group.
160 A delivered SIGSTOP would stop the entire thread group, not just the thread we
161 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
162 cancel it (by PTRACE_CONT without passing SIGSTOP).
164 We could use a real-time signal instead. This would solve those problems; we
165 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
166 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
167 generates it, and there are races with trying to find a signal that is not
171 #define O_LARGEFILE 0
174 /* Unlike other extended result codes, WSTOPSIG (status) on
175 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
176 instead SIGTRAP with bit 7 set. */
177 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
179 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
180 the use of the multi-threaded target. */
181 static struct target_ops
*linux_ops
;
182 static struct target_ops linux_ops_saved
;
184 /* The method to call, if any, when a new thread is attached. */
185 static void (*linux_nat_new_thread
) (struct lwp_info
*);
187 /* The method to call, if any, when a new fork is attached. */
188 static linux_nat_new_fork_ftype
*linux_nat_new_fork
;
190 /* The method to call, if any, when a process is no longer
192 static linux_nat_forget_process_ftype
*linux_nat_forget_process_hook
;
194 /* Hook to call prior to resuming a thread. */
195 static void (*linux_nat_prepare_to_resume
) (struct lwp_info
*);
197 /* The method to call, if any, when the siginfo object needs to be
198 converted between the layout returned by ptrace, and the layout in
199 the architecture of the inferior. */
200 static int (*linux_nat_siginfo_fixup
) (siginfo_t
*,
204 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
205 Called by our to_xfer_partial. */
206 static LONGEST (*super_xfer_partial
) (struct target_ops
*,
208 const char *, gdb_byte
*,
212 static unsigned int debug_linux_nat
;
214 show_debug_linux_nat (struct ui_file
*file
, int from_tty
,
215 struct cmd_list_element
*c
, const char *value
)
217 fprintf_filtered (file
, _("Debugging of GNU/Linux lwp module is %s.\n"),
221 struct simple_pid_list
225 struct simple_pid_list
*next
;
227 struct simple_pid_list
*stopped_pids
;
229 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
230 can not be used, 1 if it can. */
232 static int linux_supports_tracefork_flag
= -1;
234 /* This variable is a tri-state flag: -1 for unknown, 0 if
235 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
237 static int linux_supports_tracesysgood_flag
= -1;
239 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
240 PTRACE_O_TRACEVFORKDONE. */
242 static int linux_supports_tracevforkdone_flag
= -1;
244 /* Stores the current used ptrace() options. */
245 static int current_ptrace_options
= 0;
247 /* Async mode support. */
249 /* The read/write ends of the pipe registered as waitable file in the
251 static int linux_nat_event_pipe
[2] = { -1, -1 };
253 /* Flush the event pipe. */
256 async_file_flush (void)
263 ret
= read (linux_nat_event_pipe
[0], &buf
, 1);
265 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
268 /* Put something (anything, doesn't matter what, or how much) in event
269 pipe, so that the select/poll in the event-loop realizes we have
270 something to process. */
273 async_file_mark (void)
277 /* It doesn't really matter what the pipe contains, as long we end
278 up with something in it. Might as well flush the previous
284 ret
= write (linux_nat_event_pipe
[1], "+", 1);
286 while (ret
== -1 && errno
== EINTR
);
288 /* Ignore EAGAIN. If the pipe is full, the event loop will already
289 be awakened anyway. */
292 static void linux_nat_async (void (*callback
)
293 (enum inferior_event_type event_type
,
296 static int kill_lwp (int lwpid
, int signo
);
298 static int stop_callback (struct lwp_info
*lp
, void *data
);
300 static void block_child_signals (sigset_t
*prev_mask
);
301 static void restore_child_signals_mask (sigset_t
*prev_mask
);
304 static struct lwp_info
*add_lwp (ptid_t ptid
);
305 static void purge_lwp_list (int pid
);
306 static void delete_lwp (ptid_t ptid
);
307 static struct lwp_info
*find_lwp_pid (ptid_t ptid
);
310 /* Trivial list manipulation functions to keep track of a list of
311 new stopped processes. */
313 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
315 struct simple_pid_list
*new_pid
= xmalloc (sizeof (struct simple_pid_list
));
318 new_pid
->status
= status
;
319 new_pid
->next
= *listp
;
324 in_pid_list_p (struct simple_pid_list
*list
, int pid
)
326 struct simple_pid_list
*p
;
328 for (p
= list
; p
!= NULL
; p
= p
->next
)
335 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *statusp
)
337 struct simple_pid_list
**p
;
339 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
340 if ((*p
)->pid
== pid
)
342 struct simple_pid_list
*next
= (*p
)->next
;
344 *statusp
= (*p
)->status
;
353 /* A helper function for linux_test_for_tracefork, called after fork (). */
356 linux_tracefork_child (void)
358 ptrace (PTRACE_TRACEME
, 0, 0, 0);
359 kill (getpid (), SIGSTOP
);
364 /* Wrapper function for waitpid which handles EINTR. */
367 my_waitpid (int pid
, int *statusp
, int flags
)
373 ret
= waitpid (pid
, statusp
, flags
);
375 while (ret
== -1 && errno
== EINTR
);
380 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
382 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
383 we know that the feature is not available. This may change the tracing
384 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
386 However, if it succeeds, we don't know for sure that the feature is
387 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
388 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
389 fork tracing, and let it fork. If the process exits, we assume that we
390 can't use TRACEFORK; if we get the fork notification, and we can extract
391 the new child's PID, then we assume that we can. */
394 linux_test_for_tracefork (int original_pid
)
396 int child_pid
, ret
, status
;
399 linux_supports_tracefork_flag
= 0;
400 linux_supports_tracevforkdone_flag
= 0;
402 ret
= ptrace (PTRACE_SETOPTIONS
, original_pid
, 0, PTRACE_O_TRACEFORK
);
408 perror_with_name (("fork"));
411 linux_tracefork_child ();
413 ret
= my_waitpid (child_pid
, &status
, 0);
415 perror_with_name (("waitpid"));
416 else if (ret
!= child_pid
)
417 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret
);
418 if (! WIFSTOPPED (status
))
419 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
422 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0, PTRACE_O_TRACEFORK
);
425 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
428 warning (_("linux_test_for_tracefork: failed to kill child"));
432 ret
= my_waitpid (child_pid
, &status
, 0);
433 if (ret
!= child_pid
)
434 warning (_("linux_test_for_tracefork: failed "
435 "to wait for killed child"));
436 else if (!WIFSIGNALED (status
))
437 warning (_("linux_test_for_tracefork: unexpected "
438 "wait status 0x%x from killed child"), status
);
443 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
444 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0,
445 PTRACE_O_TRACEFORK
| PTRACE_O_TRACEVFORKDONE
);
446 linux_supports_tracevforkdone_flag
= (ret
== 0);
448 ret
= ptrace (PTRACE_CONT
, child_pid
, 0, 0);
450 warning (_("linux_test_for_tracefork: failed to resume child"));
452 ret
= my_waitpid (child_pid
, &status
, 0);
454 if (ret
== child_pid
&& WIFSTOPPED (status
)
455 && status
>> 16 == PTRACE_EVENT_FORK
)
458 ret
= ptrace (PTRACE_GETEVENTMSG
, child_pid
, 0, &second_pid
);
459 if (ret
== 0 && second_pid
!= 0)
463 linux_supports_tracefork_flag
= 1;
464 my_waitpid (second_pid
, &second_status
, 0);
465 ret
= ptrace (PTRACE_KILL
, second_pid
, 0, 0);
467 warning (_("linux_test_for_tracefork: "
468 "failed to kill second child"));
469 my_waitpid (second_pid
, &status
, 0);
473 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
474 "(%d, status 0x%x)"), ret
, status
);
478 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
480 warning ("linux_test_for_tracefork: failed to kill child");
481 my_waitpid (child_pid
, &status
, 0);
483 while (WIFSTOPPED (status
));
486 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
488 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
489 we know that the feature is not available. This may change the tracing
490 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
493 linux_test_for_tracesysgood (int original_pid
)
497 linux_supports_tracesysgood_flag
= 0;
499 ret
= ptrace (PTRACE_SETOPTIONS
, original_pid
, 0, PTRACE_O_TRACESYSGOOD
);
503 linux_supports_tracesysgood_flag
= 1;
506 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
507 This function also sets linux_supports_tracesysgood_flag. */
510 linux_supports_tracesysgood (int pid
)
512 if (linux_supports_tracesysgood_flag
== -1)
513 linux_test_for_tracesysgood (pid
);
514 return linux_supports_tracesysgood_flag
;
517 /* Return non-zero iff we have tracefork functionality available.
518 This function also sets linux_supports_tracefork_flag. */
521 linux_supports_tracefork (int pid
)
523 if (linux_supports_tracefork_flag
== -1)
524 linux_test_for_tracefork (pid
);
525 return linux_supports_tracefork_flag
;
529 linux_supports_tracevforkdone (int pid
)
531 if (linux_supports_tracefork_flag
== -1)
532 linux_test_for_tracefork (pid
);
533 return linux_supports_tracevforkdone_flag
;
537 linux_enable_tracesysgood (ptid_t ptid
)
539 int pid
= ptid_get_lwp (ptid
);
542 pid
= ptid_get_pid (ptid
);
544 if (linux_supports_tracesysgood (pid
) == 0)
547 current_ptrace_options
|= PTRACE_O_TRACESYSGOOD
;
549 ptrace (PTRACE_SETOPTIONS
, pid
, 0, current_ptrace_options
);
554 linux_enable_event_reporting (ptid_t ptid
)
556 int pid
= ptid_get_lwp (ptid
);
559 pid
= ptid_get_pid (ptid
);
561 if (! linux_supports_tracefork (pid
))
564 current_ptrace_options
|= PTRACE_O_TRACEFORK
| PTRACE_O_TRACEVFORK
565 | PTRACE_O_TRACEEXEC
| PTRACE_O_TRACECLONE
;
567 if (linux_supports_tracevforkdone (pid
))
568 current_ptrace_options
|= PTRACE_O_TRACEVFORKDONE
;
570 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
571 read-only process state. */
573 ptrace (PTRACE_SETOPTIONS
, pid
, 0, current_ptrace_options
);
577 linux_child_post_attach (int pid
)
579 linux_enable_event_reporting (pid_to_ptid (pid
));
580 linux_enable_tracesysgood (pid_to_ptid (pid
));
581 linux_ptrace_init_warnings ();
585 linux_child_post_startup_inferior (ptid_t ptid
)
587 linux_enable_event_reporting (ptid
);
588 linux_enable_tracesysgood (ptid
);
589 linux_ptrace_init_warnings ();
592 /* Return the number of known LWPs in the tgid given by PID. */
600 for (lp
= lwp_list
; lp
; lp
= lp
->next
)
601 if (ptid_get_pid (lp
->ptid
) == pid
)
607 /* Call delete_lwp with prototype compatible for make_cleanup. */
610 delete_lwp_cleanup (void *lp_voidp
)
612 struct lwp_info
*lp
= lp_voidp
;
614 delete_lwp (lp
->ptid
);
618 linux_child_follow_fork (struct target_ops
*ops
, int follow_child
)
621 int parent_pid
, child_pid
;
623 has_vforked
= (inferior_thread ()->pending_follow
.kind
624 == TARGET_WAITKIND_VFORKED
);
625 parent_pid
= ptid_get_lwp (inferior_ptid
);
627 parent_pid
= ptid_get_pid (inferior_ptid
);
628 child_pid
= PIDGET (inferior_thread ()->pending_follow
.value
.related_pid
);
631 && !non_stop
/* Non-stop always resumes both branches. */
632 && (!target_is_async_p () || sync_execution
)
633 && !(follow_child
|| detach_fork
|| sched_multi
))
635 /* The parent stays blocked inside the vfork syscall until the
636 child execs or exits. If we don't let the child run, then
637 the parent stays blocked. If we're telling the parent to run
638 in the foreground, the user will not be able to ctrl-c to get
639 back the terminal, effectively hanging the debug session. */
640 fprintf_filtered (gdb_stderr
, _("\
641 Can not resume the parent process over vfork in the foreground while\n\
642 holding the child stopped. Try \"set detach-on-fork\" or \
643 \"set schedule-multiple\".\n"));
644 /* FIXME output string > 80 columns. */
650 struct lwp_info
*child_lp
= NULL
;
652 /* We're already attached to the parent, by default. */
654 /* Detach new forked process? */
657 struct cleanup
*old_chain
;
659 /* Before detaching from the child, remove all breakpoints
660 from it. If we forked, then this has already been taken
661 care of by infrun.c. If we vforked however, any
662 breakpoint inserted in the parent is visible in the
663 child, even those added while stopped in a vfork
664 catchpoint. This will remove the breakpoints from the
665 parent also, but they'll be reinserted below. */
668 /* keep breakpoints list in sync. */
669 remove_breakpoints_pid (GET_PID (inferior_ptid
));
672 if (info_verbose
|| debug_linux_nat
)
674 target_terminal_ours ();
675 fprintf_filtered (gdb_stdlog
,
676 "Detaching after fork from "
677 "child process %d.\n",
681 old_chain
= save_inferior_ptid ();
682 inferior_ptid
= ptid_build (child_pid
, child_pid
, 0);
684 child_lp
= add_lwp (inferior_ptid
);
685 child_lp
->stopped
= 1;
686 child_lp
->last_resume_kind
= resume_stop
;
687 make_cleanup (delete_lwp_cleanup
, child_lp
);
689 if (linux_nat_prepare_to_resume
!= NULL
)
690 linux_nat_prepare_to_resume (child_lp
);
691 ptrace (PTRACE_DETACH
, child_pid
, 0, 0);
693 do_cleanups (old_chain
);
697 struct inferior
*parent_inf
, *child_inf
;
698 struct cleanup
*old_chain
;
700 /* Add process to GDB's tables. */
701 child_inf
= add_inferior (child_pid
);
703 parent_inf
= current_inferior ();
704 child_inf
->attach_flag
= parent_inf
->attach_flag
;
705 copy_terminal_info (child_inf
, parent_inf
);
706 child_inf
->gdbarch
= parent_inf
->gdbarch
;
707 copy_inferior_target_desc_info (child_inf
, parent_inf
);
709 old_chain
= save_inferior_ptid ();
710 save_current_program_space ();
712 inferior_ptid
= ptid_build (child_pid
, child_pid
, 0);
713 add_thread (inferior_ptid
);
714 child_lp
= add_lwp (inferior_ptid
);
715 child_lp
->stopped
= 1;
716 child_lp
->last_resume_kind
= resume_stop
;
717 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
719 /* If this is a vfork child, then the address-space is
720 shared with the parent. */
723 child_inf
->pspace
= parent_inf
->pspace
;
724 child_inf
->aspace
= parent_inf
->aspace
;
726 /* The parent will be frozen until the child is done
727 with the shared region. Keep track of the
729 child_inf
->vfork_parent
= parent_inf
;
730 child_inf
->pending_detach
= 0;
731 parent_inf
->vfork_child
= child_inf
;
732 parent_inf
->pending_detach
= 0;
736 child_inf
->aspace
= new_address_space ();
737 child_inf
->pspace
= add_program_space (child_inf
->aspace
);
738 child_inf
->removable
= 1;
739 set_current_program_space (child_inf
->pspace
);
740 clone_program_space (child_inf
->pspace
, parent_inf
->pspace
);
742 /* Let the shared library layer (solib-svr4) learn about
743 this new process, relocate the cloned exec, pull in
744 shared libraries, and install the solib event
745 breakpoint. If a "cloned-VM" event was propagated
746 better throughout the core, this wouldn't be
748 solib_create_inferior_hook (0);
751 /* Let the thread_db layer learn about this new process. */
752 check_for_thread_db ();
754 do_cleanups (old_chain
);
759 struct lwp_info
*parent_lp
;
760 struct inferior
*parent_inf
;
762 parent_inf
= current_inferior ();
764 /* If we detached from the child, then we have to be careful
765 to not insert breakpoints in the parent until the child
766 is done with the shared memory region. However, if we're
767 staying attached to the child, then we can and should
768 insert breakpoints, so that we can debug it. A
769 subsequent child exec or exit is enough to know when does
770 the child stops using the parent's address space. */
771 parent_inf
->waiting_for_vfork_done
= detach_fork
;
772 parent_inf
->pspace
->breakpoints_not_allowed
= detach_fork
;
774 parent_lp
= find_lwp_pid (pid_to_ptid (parent_pid
));
775 gdb_assert (linux_supports_tracefork_flag
>= 0);
777 if (linux_supports_tracevforkdone (0))
780 fprintf_unfiltered (gdb_stdlog
,
781 "LCFF: waiting for VFORK_DONE on %d\n",
783 parent_lp
->stopped
= 1;
785 /* We'll handle the VFORK_DONE event like any other
786 event, in target_wait. */
790 /* We can't insert breakpoints until the child has
791 finished with the shared memory region. We need to
792 wait until that happens. Ideal would be to just
794 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
795 - waitpid (parent_pid, &status, __WALL);
796 However, most architectures can't handle a syscall
797 being traced on the way out if it wasn't traced on
800 We might also think to loop, continuing the child
801 until it exits or gets a SIGTRAP. One problem is
802 that the child might call ptrace with PTRACE_TRACEME.
804 There's no simple and reliable way to figure out when
805 the vforked child will be done with its copy of the
806 shared memory. We could step it out of the syscall,
807 two instructions, let it go, and then single-step the
808 parent once. When we have hardware single-step, this
809 would work; with software single-step it could still
810 be made to work but we'd have to be able to insert
811 single-step breakpoints in the child, and we'd have
812 to insert -just- the single-step breakpoint in the
813 parent. Very awkward.
815 In the end, the best we can do is to make sure it
816 runs for a little while. Hopefully it will be out of
817 range of any breakpoints we reinsert. Usually this
818 is only the single-step breakpoint at vfork's return
822 fprintf_unfiltered (gdb_stdlog
,
823 "LCFF: no VFORK_DONE "
824 "support, sleeping a bit\n");
828 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
829 and leave it pending. The next linux_nat_resume call
830 will notice a pending event, and bypasses actually
831 resuming the inferior. */
832 parent_lp
->status
= 0;
833 parent_lp
->waitstatus
.kind
= TARGET_WAITKIND_VFORK_DONE
;
834 parent_lp
->stopped
= 1;
836 /* If we're in async mode, need to tell the event loop
837 there's something here to process. */
838 if (target_can_async_p ())
845 struct inferior
*parent_inf
, *child_inf
;
846 struct lwp_info
*child_lp
;
847 struct program_space
*parent_pspace
;
849 if (info_verbose
|| debug_linux_nat
)
851 target_terminal_ours ();
853 fprintf_filtered (gdb_stdlog
,
854 _("Attaching after process %d "
855 "vfork to child process %d.\n"),
856 parent_pid
, child_pid
);
858 fprintf_filtered (gdb_stdlog
,
859 _("Attaching after process %d "
860 "fork to child process %d.\n"),
861 parent_pid
, child_pid
);
864 /* Add the new inferior first, so that the target_detach below
865 doesn't unpush the target. */
867 child_inf
= add_inferior (child_pid
);
869 parent_inf
= current_inferior ();
870 child_inf
->attach_flag
= parent_inf
->attach_flag
;
871 copy_terminal_info (child_inf
, parent_inf
);
872 child_inf
->gdbarch
= parent_inf
->gdbarch
;
873 copy_inferior_target_desc_info (child_inf
, parent_inf
);
875 parent_pspace
= parent_inf
->pspace
;
877 /* If we're vforking, we want to hold on to the parent until the
878 child exits or execs. At child exec or exit time we can
879 remove the old breakpoints from the parent and detach or
880 resume debugging it. Otherwise, detach the parent now; we'll
881 want to reuse it's program/address spaces, but we can't set
882 them to the child before removing breakpoints from the
883 parent, otherwise, the breakpoints module could decide to
884 remove breakpoints from the wrong process (since they'd be
885 assigned to the same address space). */
889 gdb_assert (child_inf
->vfork_parent
== NULL
);
890 gdb_assert (parent_inf
->vfork_child
== NULL
);
891 child_inf
->vfork_parent
= parent_inf
;
892 child_inf
->pending_detach
= 0;
893 parent_inf
->vfork_child
= child_inf
;
894 parent_inf
->pending_detach
= detach_fork
;
895 parent_inf
->waiting_for_vfork_done
= 0;
897 else if (detach_fork
)
898 target_detach (NULL
, 0);
900 /* Note that the detach above makes PARENT_INF dangling. */
902 /* Add the child thread to the appropriate lists, and switch to
903 this new thread, before cloning the program space, and
904 informing the solib layer about this new process. */
906 inferior_ptid
= ptid_build (child_pid
, child_pid
, 0);
907 add_thread (inferior_ptid
);
908 child_lp
= add_lwp (inferior_ptid
);
909 child_lp
->stopped
= 1;
910 child_lp
->last_resume_kind
= resume_stop
;
912 /* If this is a vfork child, then the address-space is shared
913 with the parent. If we detached from the parent, then we can
914 reuse the parent's program/address spaces. */
915 if (has_vforked
|| detach_fork
)
917 child_inf
->pspace
= parent_pspace
;
918 child_inf
->aspace
= child_inf
->pspace
->aspace
;
922 child_inf
->aspace
= new_address_space ();
923 child_inf
->pspace
= add_program_space (child_inf
->aspace
);
924 child_inf
->removable
= 1;
925 child_inf
->symfile_flags
= SYMFILE_NO_READ
;
926 set_current_program_space (child_inf
->pspace
);
927 clone_program_space (child_inf
->pspace
, parent_pspace
);
929 /* Let the shared library layer (solib-svr4) learn about
930 this new process, relocate the cloned exec, pull in
931 shared libraries, and install the solib event breakpoint.
932 If a "cloned-VM" event was propagated better throughout
933 the core, this wouldn't be required. */
934 solib_create_inferior_hook (0);
937 /* Let the thread_db layer learn about this new process. */
938 check_for_thread_db ();
946 linux_child_insert_fork_catchpoint (int pid
)
948 return !linux_supports_tracefork (pid
);
952 linux_child_remove_fork_catchpoint (int pid
)
958 linux_child_insert_vfork_catchpoint (int pid
)
960 return !linux_supports_tracefork (pid
);
964 linux_child_remove_vfork_catchpoint (int pid
)
970 linux_child_insert_exec_catchpoint (int pid
)
972 return !linux_supports_tracefork (pid
);
976 linux_child_remove_exec_catchpoint (int pid
)
982 linux_child_set_syscall_catchpoint (int pid
, int needed
, int any_count
,
983 int table_size
, int *table
)
985 if (!linux_supports_tracesysgood (pid
))
988 /* On GNU/Linux, we ignore the arguments. It means that we only
989 enable the syscall catchpoints, but do not disable them.
991 Also, we do not use the `table' information because we do not
992 filter system calls here. We let GDB do the logic for us. */
996 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
997 are processes sharing the same VM space. A multi-threaded process
998 is basically a group of such processes. However, such a grouping
999 is almost entirely a user-space issue; the kernel doesn't enforce
1000 such a grouping at all (this might change in the future). In
1001 general, we'll rely on the threads library (i.e. the GNU/Linux
1002 Threads library) to provide such a grouping.
1004 It is perfectly well possible to write a multi-threaded application
1005 without the assistance of a threads library, by using the clone
1006 system call directly. This module should be able to give some
1007 rudimentary support for debugging such applications if developers
1008 specify the CLONE_PTRACE flag in the clone system call, and are
1009 using the Linux kernel 2.4 or above.
1011 Note that there are some peculiarities in GNU/Linux that affect
1014 - In general one should specify the __WCLONE flag to waitpid in
1015 order to make it report events for any of the cloned processes
1016 (and leave it out for the initial process). However, if a cloned
1017 process has exited the exit status is only reported if the
1018 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
1019 we cannot use it since GDB must work on older systems too.
1021 - When a traced, cloned process exits and is waited for by the
1022 debugger, the kernel reassigns it to the original parent and
1023 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
1024 library doesn't notice this, which leads to the "zombie problem":
1025 When debugged a multi-threaded process that spawns a lot of
1026 threads will run out of processes, even if the threads exit,
1027 because the "zombies" stay around. */
1029 /* List of known LWPs. */
1030 struct lwp_info
*lwp_list
;
1033 /* Original signal mask. */
1034 static sigset_t normal_mask
;
1036 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
1037 _initialize_linux_nat. */
1038 static sigset_t suspend_mask
;
1040 /* Signals to block to make that sigsuspend work. */
1041 static sigset_t blocked_mask
;
1043 /* SIGCHLD action. */
1044 struct sigaction sigchld_action
;
1046 /* Block child signals (SIGCHLD and linux threads signals), and store
1047 the previous mask in PREV_MASK. */
1050 block_child_signals (sigset_t
*prev_mask
)
1052 /* Make sure SIGCHLD is blocked. */
1053 if (!sigismember (&blocked_mask
, SIGCHLD
))
1054 sigaddset (&blocked_mask
, SIGCHLD
);
1056 sigprocmask (SIG_BLOCK
, &blocked_mask
, prev_mask
);
1059 /* Restore child signals mask, previously returned by
1060 block_child_signals. */
1063 restore_child_signals_mask (sigset_t
*prev_mask
)
1065 sigprocmask (SIG_SETMASK
, prev_mask
, NULL
);
1068 /* Mask of signals to pass directly to the inferior. */
1069 static sigset_t pass_mask
;
1071 /* Update signals to pass to the inferior. */
1073 linux_nat_pass_signals (int numsigs
, unsigned char *pass_signals
)
1077 sigemptyset (&pass_mask
);
1079 for (signo
= 1; signo
< NSIG
; signo
++)
1081 int target_signo
= gdb_signal_from_host (signo
);
1082 if (target_signo
< numsigs
&& pass_signals
[target_signo
])
1083 sigaddset (&pass_mask
, signo
);
1089 /* Prototypes for local functions. */
1090 static int stop_wait_callback (struct lwp_info
*lp
, void *data
);
1091 static int linux_thread_alive (ptid_t ptid
);
1092 static char *linux_child_pid_to_exec_file (int pid
);
1095 /* Convert wait status STATUS to a string. Used for printing debug
1099 status_to_str (int status
)
1101 static char buf
[64];
1103 if (WIFSTOPPED (status
))
1105 if (WSTOPSIG (status
) == SYSCALL_SIGTRAP
)
1106 snprintf (buf
, sizeof (buf
), "%s (stopped at syscall)",
1107 strsignal (SIGTRAP
));
1109 snprintf (buf
, sizeof (buf
), "%s (stopped)",
1110 strsignal (WSTOPSIG (status
)));
1112 else if (WIFSIGNALED (status
))
1113 snprintf (buf
, sizeof (buf
), "%s (terminated)",
1114 strsignal (WTERMSIG (status
)));
1116 snprintf (buf
, sizeof (buf
), "%d (exited)", WEXITSTATUS (status
));
1121 /* Destroy and free LP. */
1124 lwp_free (struct lwp_info
*lp
)
1126 xfree (lp
->arch_private
);
1130 /* Remove all LWPs belong to PID from the lwp list. */
1133 purge_lwp_list (int pid
)
1135 struct lwp_info
*lp
, *lpprev
, *lpnext
;
1139 for (lp
= lwp_list
; lp
; lp
= lpnext
)
1143 if (ptid_get_pid (lp
->ptid
) == pid
)
1146 lwp_list
= lp
->next
;
1148 lpprev
->next
= lp
->next
;
1157 /* Add the LWP specified by PTID to the list. PTID is the first LWP
1158 in the process. Return a pointer to the structure describing the
1161 This differs from add_lwp in that we don't let the arch specific
1162 bits know about this new thread. Current clients of this callback
1163 take the opportunity to install watchpoints in the new thread, and
1164 we shouldn't do that for the first thread. If we're spawning a
1165 child ("run"), the thread executes the shell wrapper first, and we
1166 shouldn't touch it until it execs the program we want to debug.
1167 For "attach", it'd be okay to call the callback, but it's not
1168 necessary, because watchpoints can't yet have been inserted into
1171 static struct lwp_info
*
1172 add_initial_lwp (ptid_t ptid
)
1174 struct lwp_info
*lp
;
1176 gdb_assert (is_lwp (ptid
));
1178 lp
= (struct lwp_info
*) xmalloc (sizeof (struct lwp_info
));
1180 memset (lp
, 0, sizeof (struct lwp_info
));
1182 lp
->last_resume_kind
= resume_continue
;
1183 lp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
1188 lp
->next
= lwp_list
;
1194 /* Add the LWP specified by PID to the list. Return a pointer to the
1195 structure describing the new LWP. The LWP should already be
1198 static struct lwp_info
*
1199 add_lwp (ptid_t ptid
)
1201 struct lwp_info
*lp
;
1203 lp
= add_initial_lwp (ptid
);
1205 /* Let the arch specific bits know about this new thread. Current
1206 clients of this callback take the opportunity to install
1207 watchpoints in the new thread. We don't do this for the first
1208 thread though. See add_initial_lwp. */
1209 if (linux_nat_new_thread
!= NULL
)
1210 linux_nat_new_thread (lp
);
1215 /* Remove the LWP specified by PID from the list. */
1218 delete_lwp (ptid_t ptid
)
1220 struct lwp_info
*lp
, *lpprev
;
1224 for (lp
= lwp_list
; lp
; lpprev
= lp
, lp
= lp
->next
)
1225 if (ptid_equal (lp
->ptid
, ptid
))
1232 lpprev
->next
= lp
->next
;
1234 lwp_list
= lp
->next
;
1239 /* Return a pointer to the structure describing the LWP corresponding
1240 to PID. If no corresponding LWP could be found, return NULL. */
1242 static struct lwp_info
*
1243 find_lwp_pid (ptid_t ptid
)
1245 struct lwp_info
*lp
;
1249 lwp
= GET_LWP (ptid
);
1251 lwp
= GET_PID (ptid
);
1253 for (lp
= lwp_list
; lp
; lp
= lp
->next
)
1254 if (lwp
== GET_LWP (lp
->ptid
))
1260 /* Call CALLBACK with its second argument set to DATA for every LWP in
1261 the list. If CALLBACK returns 1 for a particular LWP, return a
1262 pointer to the structure describing that LWP immediately.
1263 Otherwise return NULL. */
1266 iterate_over_lwps (ptid_t filter
,
1267 int (*callback
) (struct lwp_info
*, void *),
1270 struct lwp_info
*lp
, *lpnext
;
1272 for (lp
= lwp_list
; lp
; lp
= lpnext
)
1276 if (ptid_match (lp
->ptid
, filter
))
1278 if ((*callback
) (lp
, data
))
1286 /* Update our internal state when changing from one checkpoint to
1287 another indicated by NEW_PTID. We can only switch single-threaded
1288 applications, so we only create one new LWP, and the previous list
1292 linux_nat_switch_fork (ptid_t new_ptid
)
1294 struct lwp_info
*lp
;
1296 purge_lwp_list (GET_PID (inferior_ptid
));
1298 lp
= add_lwp (new_ptid
);
1301 /* This changes the thread's ptid while preserving the gdb thread
1302 num. Also changes the inferior pid, while preserving the
1304 thread_change_ptid (inferior_ptid
, new_ptid
);
1306 /* We've just told GDB core that the thread changed target id, but,
1307 in fact, it really is a different thread, with different register
1309 registers_changed ();
1312 /* Handle the exit of a single thread LP. */
1315 exit_lwp (struct lwp_info
*lp
)
1317 struct thread_info
*th
= find_thread_ptid (lp
->ptid
);
1321 if (print_thread_events
)
1322 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp
->ptid
));
1324 delete_thread (lp
->ptid
);
1327 delete_lwp (lp
->ptid
);
1330 /* Wait for the LWP specified by LP, which we have just attached to.
1331 Returns a wait status for that LWP, to cache. */
1334 linux_nat_post_attach_wait (ptid_t ptid
, int first
, int *cloned
,
1337 pid_t new_pid
, pid
= GET_LWP (ptid
);
1340 if (linux_proc_pid_is_stopped (pid
))
1342 if (debug_linux_nat
)
1343 fprintf_unfiltered (gdb_stdlog
,
1344 "LNPAW: Attaching to a stopped process\n");
1346 /* The process is definitely stopped. It is in a job control
1347 stop, unless the kernel predates the TASK_STOPPED /
1348 TASK_TRACED distinction, in which case it might be in a
1349 ptrace stop. Make sure it is in a ptrace stop; from there we
1350 can kill it, signal it, et cetera.
1352 First make sure there is a pending SIGSTOP. Since we are
1353 already attached, the process can not transition from stopped
1354 to running without a PTRACE_CONT; so we know this signal will
1355 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1356 probably already in the queue (unless this kernel is old
1357 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1358 is not an RT signal, it can only be queued once. */
1359 kill_lwp (pid
, SIGSTOP
);
1361 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1362 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1363 ptrace (PTRACE_CONT
, pid
, 0, 0);
1366 /* Make sure the initial process is stopped. The user-level threads
1367 layer might want to poke around in the inferior, and that won't
1368 work if things haven't stabilized yet. */
1369 new_pid
= my_waitpid (pid
, &status
, 0);
1370 if (new_pid
== -1 && errno
== ECHILD
)
1373 warning (_("%s is a cloned process"), target_pid_to_str (ptid
));
1375 /* Try again with __WCLONE to check cloned processes. */
1376 new_pid
= my_waitpid (pid
, &status
, __WCLONE
);
1380 gdb_assert (pid
== new_pid
);
1382 if (!WIFSTOPPED (status
))
1384 /* The pid we tried to attach has apparently just exited. */
1385 if (debug_linux_nat
)
1386 fprintf_unfiltered (gdb_stdlog
, "LNPAW: Failed to stop %d: %s",
1387 pid
, status_to_str (status
));
1391 if (WSTOPSIG (status
) != SIGSTOP
)
1394 if (debug_linux_nat
)
1395 fprintf_unfiltered (gdb_stdlog
,
1396 "LNPAW: Received %s after attaching\n",
1397 status_to_str (status
));
1403 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1404 the new LWP could not be attached, or 1 if we're already auto
1405 attached to this thread, but haven't processed the
1406 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1407 its existance, without considering it an error. */
1410 lin_lwp_attach_lwp (ptid_t ptid
)
1412 struct lwp_info
*lp
;
1415 gdb_assert (is_lwp (ptid
));
1417 lp
= find_lwp_pid (ptid
);
1418 lwpid
= GET_LWP (ptid
);
1420 /* We assume that we're already attached to any LWP that has an id
1421 equal to the overall process id, and to any LWP that is already
1422 in our list of LWPs. If we're not seeing exit events from threads
1423 and we've had PID wraparound since we last tried to stop all threads,
1424 this assumption might be wrong; fortunately, this is very unlikely
1426 if (lwpid
!= GET_PID (ptid
) && lp
== NULL
)
1428 int status
, cloned
= 0, signalled
= 0;
1430 if (ptrace (PTRACE_ATTACH
, lwpid
, 0, 0) < 0)
1432 if (linux_supports_tracefork_flag
)
1434 /* If we haven't stopped all threads when we get here,
1435 we may have seen a thread listed in thread_db's list,
1436 but not processed the PTRACE_EVENT_CLONE yet. If
1437 that's the case, ignore this new thread, and let
1438 normal event handling discover it later. */
1439 if (in_pid_list_p (stopped_pids
, lwpid
))
1441 /* We've already seen this thread stop, but we
1442 haven't seen the PTRACE_EVENT_CLONE extended
1451 /* See if we've got a stop for this new child
1452 pending. If so, we're already attached. */
1453 new_pid
= my_waitpid (lwpid
, &status
, WNOHANG
);
1454 if (new_pid
== -1 && errno
== ECHILD
)
1455 new_pid
= my_waitpid (lwpid
, &status
, __WCLONE
| WNOHANG
);
1458 if (WIFSTOPPED (status
))
1459 add_to_pid_list (&stopped_pids
, lwpid
, status
);
1465 /* If we fail to attach to the thread, issue a warning,
1466 but continue. One way this can happen is if thread
1467 creation is interrupted; as of Linux kernel 2.6.19, a
1468 bug may place threads in the thread list and then fail
1470 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid
),
1471 safe_strerror (errno
));
1475 if (debug_linux_nat
)
1476 fprintf_unfiltered (gdb_stdlog
,
1477 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1478 target_pid_to_str (ptid
));
1480 status
= linux_nat_post_attach_wait (ptid
, 0, &cloned
, &signalled
);
1481 if (!WIFSTOPPED (status
))
1484 lp
= add_lwp (ptid
);
1486 lp
->cloned
= cloned
;
1487 lp
->signalled
= signalled
;
1488 if (WSTOPSIG (status
) != SIGSTOP
)
1491 lp
->status
= status
;
1494 target_post_attach (GET_LWP (lp
->ptid
));
1496 if (debug_linux_nat
)
1498 fprintf_unfiltered (gdb_stdlog
,
1499 "LLAL: waitpid %s received %s\n",
1500 target_pid_to_str (ptid
),
1501 status_to_str (status
));
1506 /* We assume that the LWP representing the original process is
1507 already stopped. Mark it as stopped in the data structure
1508 that the GNU/linux ptrace layer uses to keep track of
1509 threads. Note that this won't have already been done since
1510 the main thread will have, we assume, been stopped by an
1511 attach from a different layer. */
1513 lp
= add_lwp (ptid
);
1517 lp
->last_resume_kind
= resume_stop
;
1522 linux_nat_create_inferior (struct target_ops
*ops
,
1523 char *exec_file
, char *allargs
, char **env
,
1526 #ifdef HAVE_PERSONALITY
1527 int personality_orig
= 0, personality_set
= 0;
1528 #endif /* HAVE_PERSONALITY */
1530 /* The fork_child mechanism is synchronous and calls target_wait, so
1531 we have to mask the async mode. */
1533 #ifdef HAVE_PERSONALITY
1534 if (disable_randomization
)
1537 personality_orig
= personality (0xffffffff);
1538 if (errno
== 0 && !(personality_orig
& ADDR_NO_RANDOMIZE
))
1540 personality_set
= 1;
1541 personality (personality_orig
| ADDR_NO_RANDOMIZE
);
1543 if (errno
!= 0 || (personality_set
1544 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE
)))
1545 warning (_("Error disabling address space randomization: %s"),
1546 safe_strerror (errno
));
1548 #endif /* HAVE_PERSONALITY */
1550 /* Make sure we report all signals during startup. */
1551 linux_nat_pass_signals (0, NULL
);
1553 linux_ops
->to_create_inferior (ops
, exec_file
, allargs
, env
, from_tty
);
1555 #ifdef HAVE_PERSONALITY
1556 if (personality_set
)
1559 personality (personality_orig
);
1561 warning (_("Error restoring address space randomization: %s"),
1562 safe_strerror (errno
));
1564 #endif /* HAVE_PERSONALITY */
1568 linux_nat_attach (struct target_ops
*ops
, char *args
, int from_tty
)
1570 struct lwp_info
*lp
;
1573 volatile struct gdb_exception ex
;
1575 /* Make sure we report all signals during attach. */
1576 linux_nat_pass_signals (0, NULL
);
1578 TRY_CATCH (ex
, RETURN_MASK_ERROR
)
1580 linux_ops
->to_attach (ops
, args
, from_tty
);
1584 pid_t pid
= parse_pid_to_attach (args
);
1585 struct buffer buffer
;
1586 char *message
, *buffer_s
;
1588 message
= xstrdup (ex
.message
);
1589 make_cleanup (xfree
, message
);
1591 buffer_init (&buffer
);
1592 linux_ptrace_attach_warnings (pid
, &buffer
);
1594 buffer_grow_str0 (&buffer
, "");
1595 buffer_s
= buffer_finish (&buffer
);
1596 make_cleanup (xfree
, buffer_s
);
1598 throw_error (ex
.error
, "%s%s", buffer_s
, message
);
1601 /* The ptrace base target adds the main thread with (pid,0,0)
1602 format. Decorate it with lwp info. */
1603 ptid
= BUILD_LWP (GET_PID (inferior_ptid
), GET_PID (inferior_ptid
));
1604 thread_change_ptid (inferior_ptid
, ptid
);
1606 /* Add the initial process as the first LWP to the list. */
1607 lp
= add_initial_lwp (ptid
);
1609 status
= linux_nat_post_attach_wait (lp
->ptid
, 1, &lp
->cloned
,
1611 if (!WIFSTOPPED (status
))
1613 if (WIFEXITED (status
))
1615 int exit_code
= WEXITSTATUS (status
);
1617 target_terminal_ours ();
1618 target_mourn_inferior ();
1620 error (_("Unable to attach: program exited normally."));
1622 error (_("Unable to attach: program exited with code %d."),
1625 else if (WIFSIGNALED (status
))
1627 enum gdb_signal signo
;
1629 target_terminal_ours ();
1630 target_mourn_inferior ();
1632 signo
= gdb_signal_from_host (WTERMSIG (status
));
1633 error (_("Unable to attach: program terminated with signal "
1635 gdb_signal_to_name (signo
),
1636 gdb_signal_to_string (signo
));
1639 internal_error (__FILE__
, __LINE__
,
1640 _("unexpected status %d for PID %ld"),
1641 status
, (long) GET_LWP (ptid
));
1646 /* Save the wait status to report later. */
1648 if (debug_linux_nat
)
1649 fprintf_unfiltered (gdb_stdlog
,
1650 "LNA: waitpid %ld, saving status %s\n",
1651 (long) GET_PID (lp
->ptid
), status_to_str (status
));
1653 lp
->status
= status
;
1655 if (target_can_async_p ())
1656 target_async (inferior_event_handler
, 0);
1659 /* Get pending status of LP. */
1661 get_pending_status (struct lwp_info
*lp
, int *status
)
1663 enum gdb_signal signo
= GDB_SIGNAL_0
;
1665 /* If we paused threads momentarily, we may have stored pending
1666 events in lp->status or lp->waitstatus (see stop_wait_callback),
1667 and GDB core hasn't seen any signal for those threads.
1668 Otherwise, the last signal reported to the core is found in the
1669 thread object's stop_signal.
1671 There's a corner case that isn't handled here at present. Only
1672 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1673 stop_signal make sense as a real signal to pass to the inferior.
1674 Some catchpoint related events, like
1675 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1676 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1677 those traps are debug API (ptrace in our case) related and
1678 induced; the inferior wouldn't see them if it wasn't being
1679 traced. Hence, we should never pass them to the inferior, even
1680 when set to pass state. Since this corner case isn't handled by
1681 infrun.c when proceeding with a signal, for consistency, neither
1682 do we handle it here (or elsewhere in the file we check for
1683 signal pass state). Normally SIGTRAP isn't set to pass state, so
1684 this is really a corner case. */
1686 if (lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
1687 signo
= GDB_SIGNAL_0
; /* a pending ptrace event, not a real signal. */
1688 else if (lp
->status
)
1689 signo
= gdb_signal_from_host (WSTOPSIG (lp
->status
));
1690 else if (non_stop
&& !is_executing (lp
->ptid
))
1692 struct thread_info
*tp
= find_thread_ptid (lp
->ptid
);
1694 signo
= tp
->suspend
.stop_signal
;
1698 struct target_waitstatus last
;
1701 get_last_target_status (&last_ptid
, &last
);
1703 if (GET_LWP (lp
->ptid
) == GET_LWP (last_ptid
))
1705 struct thread_info
*tp
= find_thread_ptid (lp
->ptid
);
1707 signo
= tp
->suspend
.stop_signal
;
1713 if (signo
== GDB_SIGNAL_0
)
1715 if (debug_linux_nat
)
1716 fprintf_unfiltered (gdb_stdlog
,
1717 "GPT: lwp %s has no pending signal\n",
1718 target_pid_to_str (lp
->ptid
));
1720 else if (!signal_pass_state (signo
))
1722 if (debug_linux_nat
)
1723 fprintf_unfiltered (gdb_stdlog
,
1724 "GPT: lwp %s had signal %s, "
1725 "but it is in no pass state\n",
1726 target_pid_to_str (lp
->ptid
),
1727 gdb_signal_to_string (signo
));
1731 *status
= W_STOPCODE (gdb_signal_to_host (signo
));
1733 if (debug_linux_nat
)
1734 fprintf_unfiltered (gdb_stdlog
,
1735 "GPT: lwp %s has pending signal %s\n",
1736 target_pid_to_str (lp
->ptid
),
1737 gdb_signal_to_string (signo
));
1744 detach_callback (struct lwp_info
*lp
, void *data
)
1746 gdb_assert (lp
->status
== 0 || WIFSTOPPED (lp
->status
));
1748 if (debug_linux_nat
&& lp
->status
)
1749 fprintf_unfiltered (gdb_stdlog
, "DC: Pending %s for %s on detach.\n",
1750 strsignal (WSTOPSIG (lp
->status
)),
1751 target_pid_to_str (lp
->ptid
));
1753 /* If there is a pending SIGSTOP, get rid of it. */
1756 if (debug_linux_nat
)
1757 fprintf_unfiltered (gdb_stdlog
,
1758 "DC: Sending SIGCONT to %s\n",
1759 target_pid_to_str (lp
->ptid
));
1761 kill_lwp (GET_LWP (lp
->ptid
), SIGCONT
);
1765 /* We don't actually detach from the LWP that has an id equal to the
1766 overall process id just yet. */
1767 if (GET_LWP (lp
->ptid
) != GET_PID (lp
->ptid
))
1771 /* Pass on any pending signal for this LWP. */
1772 get_pending_status (lp
, &status
);
1774 if (linux_nat_prepare_to_resume
!= NULL
)
1775 linux_nat_prepare_to_resume (lp
);
1777 if (ptrace (PTRACE_DETACH
, GET_LWP (lp
->ptid
), 0,
1778 WSTOPSIG (status
)) < 0)
1779 error (_("Can't detach %s: %s"), target_pid_to_str (lp
->ptid
),
1780 safe_strerror (errno
));
1782 if (debug_linux_nat
)
1783 fprintf_unfiltered (gdb_stdlog
,
1784 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1785 target_pid_to_str (lp
->ptid
),
1786 strsignal (WSTOPSIG (status
)));
1788 delete_lwp (lp
->ptid
);
1795 linux_nat_detach (struct target_ops
*ops
, char *args
, int from_tty
)
1799 struct lwp_info
*main_lwp
;
1801 pid
= GET_PID (inferior_ptid
);
1803 /* Don't unregister from the event loop, as there may be other
1804 inferiors running. */
1806 /* Stop all threads before detaching. ptrace requires that the
1807 thread is stopped to sucessfully detach. */
1808 iterate_over_lwps (pid_to_ptid (pid
), stop_callback
, NULL
);
1809 /* ... and wait until all of them have reported back that
1810 they're no longer running. */
1811 iterate_over_lwps (pid_to_ptid (pid
), stop_wait_callback
, NULL
);
1813 iterate_over_lwps (pid_to_ptid (pid
), detach_callback
, NULL
);
1815 /* Only the initial process should be left right now. */
1816 gdb_assert (num_lwps (GET_PID (inferior_ptid
)) == 1);
1818 main_lwp
= find_lwp_pid (pid_to_ptid (pid
));
1820 /* Pass on any pending signal for the last LWP. */
1821 if ((args
== NULL
|| *args
== '\0')
1822 && get_pending_status (main_lwp
, &status
) != -1
1823 && WIFSTOPPED (status
))
1825 /* Put the signal number in ARGS so that inf_ptrace_detach will
1826 pass it along with PTRACE_DETACH. */
1828 sprintf (args
, "%d", (int) WSTOPSIG (status
));
1829 if (debug_linux_nat
)
1830 fprintf_unfiltered (gdb_stdlog
,
1831 "LND: Sending signal %s to %s\n",
1833 target_pid_to_str (main_lwp
->ptid
));
1836 if (linux_nat_prepare_to_resume
!= NULL
)
1837 linux_nat_prepare_to_resume (main_lwp
);
1838 delete_lwp (main_lwp
->ptid
);
1840 if (forks_exist_p ())
1842 /* Multi-fork case. The current inferior_ptid is being detached
1843 from, but there are other viable forks to debug. Detach from
1844 the current fork, and context-switch to the first
1846 linux_fork_detach (args
, from_tty
);
1849 linux_ops
->to_detach (ops
, args
, from_tty
);
1855 resume_lwp (struct lwp_info
*lp
, int step
, enum gdb_signal signo
)
1859 struct inferior
*inf
= find_inferior_pid (GET_PID (lp
->ptid
));
1861 if (inf
->vfork_child
!= NULL
)
1863 if (debug_linux_nat
)
1864 fprintf_unfiltered (gdb_stdlog
,
1865 "RC: Not resuming %s (vfork parent)\n",
1866 target_pid_to_str (lp
->ptid
));
1868 else if (lp
->status
== 0
1869 && lp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
)
1871 if (debug_linux_nat
)
1872 fprintf_unfiltered (gdb_stdlog
,
1873 "RC: Resuming sibling %s, %s, %s\n",
1874 target_pid_to_str (lp
->ptid
),
1875 (signo
!= GDB_SIGNAL_0
1876 ? strsignal (gdb_signal_to_host (signo
))
1878 step
? "step" : "resume");
1880 if (linux_nat_prepare_to_resume
!= NULL
)
1881 linux_nat_prepare_to_resume (lp
);
1882 linux_ops
->to_resume (linux_ops
,
1883 pid_to_ptid (GET_LWP (lp
->ptid
)),
1887 lp
->stopped_by_watchpoint
= 0;
1891 if (debug_linux_nat
)
1892 fprintf_unfiltered (gdb_stdlog
,
1893 "RC: Not resuming sibling %s (has pending)\n",
1894 target_pid_to_str (lp
->ptid
));
1899 if (debug_linux_nat
)
1900 fprintf_unfiltered (gdb_stdlog
,
1901 "RC: Not resuming sibling %s (not stopped)\n",
1902 target_pid_to_str (lp
->ptid
));
1906 /* Resume LWP, with the last stop signal, if it is in pass state. */
1909 linux_nat_resume_callback (struct lwp_info
*lp
, void *data
)
1911 enum gdb_signal signo
= GDB_SIGNAL_0
;
1915 struct thread_info
*thread
;
1917 thread
= find_thread_ptid (lp
->ptid
);
1920 if (signal_pass_state (thread
->suspend
.stop_signal
))
1921 signo
= thread
->suspend
.stop_signal
;
1922 thread
->suspend
.stop_signal
= GDB_SIGNAL_0
;
1926 resume_lwp (lp
, 0, signo
);
1931 resume_clear_callback (struct lwp_info
*lp
, void *data
)
1934 lp
->last_resume_kind
= resume_stop
;
1939 resume_set_callback (struct lwp_info
*lp
, void *data
)
1942 lp
->last_resume_kind
= resume_continue
;
1947 linux_nat_resume (struct target_ops
*ops
,
1948 ptid_t ptid
, int step
, enum gdb_signal signo
)
1950 struct lwp_info
*lp
;
1953 if (debug_linux_nat
)
1954 fprintf_unfiltered (gdb_stdlog
,
1955 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1956 step
? "step" : "resume",
1957 target_pid_to_str (ptid
),
1958 (signo
!= GDB_SIGNAL_0
1959 ? strsignal (gdb_signal_to_host (signo
)) : "0"),
1960 target_pid_to_str (inferior_ptid
));
1962 /* A specific PTID means `step only this process id'. */
1963 resume_many
= (ptid_equal (minus_one_ptid
, ptid
)
1964 || ptid_is_pid (ptid
));
1966 /* Mark the lwps we're resuming as resumed. */
1967 iterate_over_lwps (ptid
, resume_set_callback
, NULL
);
1969 /* See if it's the current inferior that should be handled
1972 lp
= find_lwp_pid (inferior_ptid
);
1974 lp
= find_lwp_pid (ptid
);
1975 gdb_assert (lp
!= NULL
);
1977 /* Remember if we're stepping. */
1979 lp
->last_resume_kind
= step
? resume_step
: resume_continue
;
1981 /* If we have a pending wait status for this thread, there is no
1982 point in resuming the process. But first make sure that
1983 linux_nat_wait won't preemptively handle the event - we
1984 should never take this short-circuit if we are going to
1985 leave LP running, since we have skipped resuming all the
1986 other threads. This bit of code needs to be synchronized
1987 with linux_nat_wait. */
1989 if (lp
->status
&& WIFSTOPPED (lp
->status
))
1992 && WSTOPSIG (lp
->status
)
1993 && sigismember (&pass_mask
, WSTOPSIG (lp
->status
)))
1995 if (debug_linux_nat
)
1996 fprintf_unfiltered (gdb_stdlog
,
1997 "LLR: Not short circuiting for ignored "
1998 "status 0x%x\n", lp
->status
);
2000 /* FIXME: What should we do if we are supposed to continue
2001 this thread with a signal? */
2002 gdb_assert (signo
== GDB_SIGNAL_0
);
2003 signo
= gdb_signal_from_host (WSTOPSIG (lp
->status
));
2008 if (lp
->status
|| lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
2010 /* FIXME: What should we do if we are supposed to continue
2011 this thread with a signal? */
2012 gdb_assert (signo
== GDB_SIGNAL_0
);
2014 if (debug_linux_nat
)
2015 fprintf_unfiltered (gdb_stdlog
,
2016 "LLR: Short circuiting for status 0x%x\n",
2019 if (target_can_async_p ())
2021 target_async (inferior_event_handler
, 0);
2022 /* Tell the event loop we have something to process. */
2028 /* Mark LWP as not stopped to prevent it from being continued by
2029 linux_nat_resume_callback. */
2033 iterate_over_lwps (ptid
, linux_nat_resume_callback
, NULL
);
2035 /* Convert to something the lower layer understands. */
2036 ptid
= pid_to_ptid (GET_LWP (lp
->ptid
));
2038 if (linux_nat_prepare_to_resume
!= NULL
)
2039 linux_nat_prepare_to_resume (lp
);
2040 linux_ops
->to_resume (linux_ops
, ptid
, step
, signo
);
2041 lp
->stopped_by_watchpoint
= 0;
2043 if (debug_linux_nat
)
2044 fprintf_unfiltered (gdb_stdlog
,
2045 "LLR: %s %s, %s (resume event thread)\n",
2046 step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2047 target_pid_to_str (ptid
),
2048 (signo
!= GDB_SIGNAL_0
2049 ? strsignal (gdb_signal_to_host (signo
)) : "0"));
2051 if (target_can_async_p ())
2052 target_async (inferior_event_handler
, 0);
2055 /* Send a signal to an LWP. */
2058 kill_lwp (int lwpid
, int signo
)
2060 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2061 fails, then we are not using nptl threads and we should be using kill. */
2063 #ifdef HAVE_TKILL_SYSCALL
2065 static int tkill_failed
;
2072 ret
= syscall (__NR_tkill
, lwpid
, signo
);
2073 if (errno
!= ENOSYS
)
2080 return kill (lwpid
, signo
);
2083 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2084 event, check if the core is interested in it: if not, ignore the
2085 event, and keep waiting; otherwise, we need to toggle the LWP's
2086 syscall entry/exit status, since the ptrace event itself doesn't
2087 indicate it, and report the trap to higher layers. */
2090 linux_handle_syscall_trap (struct lwp_info
*lp
, int stopping
)
2092 struct target_waitstatus
*ourstatus
= &lp
->waitstatus
;
2093 struct gdbarch
*gdbarch
= target_thread_architecture (lp
->ptid
);
2094 int syscall_number
= (int) gdbarch_get_syscall_number (gdbarch
, lp
->ptid
);
2098 /* If we're stopping threads, there's a SIGSTOP pending, which
2099 makes it so that the LWP reports an immediate syscall return,
2100 followed by the SIGSTOP. Skip seeing that "return" using
2101 PTRACE_CONT directly, and let stop_wait_callback collect the
2102 SIGSTOP. Later when the thread is resumed, a new syscall
2103 entry event. If we didn't do this (and returned 0), we'd
2104 leave a syscall entry pending, and our caller, by using
2105 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2106 itself. Later, when the user re-resumes this LWP, we'd see
2107 another syscall entry event and we'd mistake it for a return.
2109 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2110 (leaving immediately with LWP->signalled set, without issuing
2111 a PTRACE_CONT), it would still be problematic to leave this
2112 syscall enter pending, as later when the thread is resumed,
2113 it would then see the same syscall exit mentioned above,
2114 followed by the delayed SIGSTOP, while the syscall didn't
2115 actually get to execute. It seems it would be even more
2116 confusing to the user. */
2118 if (debug_linux_nat
)
2119 fprintf_unfiltered (gdb_stdlog
,
2120 "LHST: ignoring syscall %d "
2121 "for LWP %ld (stopping threads), "
2122 "resuming with PTRACE_CONT for SIGSTOP\n",
2124 GET_LWP (lp
->ptid
));
2126 lp
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2127 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2131 if (catch_syscall_enabled ())
2133 /* Always update the entry/return state, even if this particular
2134 syscall isn't interesting to the core now. In async mode,
2135 the user could install a new catchpoint for this syscall
2136 between syscall enter/return, and we'll need to know to
2137 report a syscall return if that happens. */
2138 lp
->syscall_state
= (lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
2139 ? TARGET_WAITKIND_SYSCALL_RETURN
2140 : TARGET_WAITKIND_SYSCALL_ENTRY
);
2142 if (catching_syscall_number (syscall_number
))
2144 /* Alright, an event to report. */
2145 ourstatus
->kind
= lp
->syscall_state
;
2146 ourstatus
->value
.syscall_number
= syscall_number
;
2148 if (debug_linux_nat
)
2149 fprintf_unfiltered (gdb_stdlog
,
2150 "LHST: stopping for %s of syscall %d"
2153 == TARGET_WAITKIND_SYSCALL_ENTRY
2154 ? "entry" : "return",
2156 GET_LWP (lp
->ptid
));
2160 if (debug_linux_nat
)
2161 fprintf_unfiltered (gdb_stdlog
,
2162 "LHST: ignoring %s of syscall %d "
2164 lp
->syscall_state
== TARGET_WAITKIND_SYSCALL_ENTRY
2165 ? "entry" : "return",
2167 GET_LWP (lp
->ptid
));
2171 /* If we had been syscall tracing, and hence used PT_SYSCALL
2172 before on this LWP, it could happen that the user removes all
2173 syscall catchpoints before we get to process this event.
2174 There are two noteworthy issues here:
2176 - When stopped at a syscall entry event, resuming with
2177 PT_STEP still resumes executing the syscall and reports a
2180 - Only PT_SYSCALL catches syscall enters. If we last
2181 single-stepped this thread, then this event can't be a
2182 syscall enter. If we last single-stepped this thread, this
2183 has to be a syscall exit.
2185 The points above mean that the next resume, be it PT_STEP or
2186 PT_CONTINUE, can not trigger a syscall trace event. */
2187 if (debug_linux_nat
)
2188 fprintf_unfiltered (gdb_stdlog
,
2189 "LHST: caught syscall event "
2190 "with no syscall catchpoints."
2191 " %d for LWP %ld, ignoring\n",
2193 GET_LWP (lp
->ptid
));
2194 lp
->syscall_state
= TARGET_WAITKIND_IGNORE
;
2197 /* The core isn't interested in this event. For efficiency, avoid
2198 stopping all threads only to have the core resume them all again.
2199 Since we're not stopping threads, if we're still syscall tracing
2200 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2201 subsequent syscall. Simply resume using the inf-ptrace layer,
2202 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2204 /* Note that gdbarch_get_syscall_number may access registers, hence
2206 registers_changed ();
2207 if (linux_nat_prepare_to_resume
!= NULL
)
2208 linux_nat_prepare_to_resume (lp
);
2209 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
2210 lp
->step
, GDB_SIGNAL_0
);
2214 /* Handle a GNU/Linux extended wait response. If we see a clone
2215 event, we need to add the new LWP to our list (and not report the
2216 trap to higher layers). This function returns non-zero if the
2217 event should be ignored and we should wait again. If STOPPING is
2218 true, the new LWP remains stopped, otherwise it is continued. */
2221 linux_handle_extended_wait (struct lwp_info
*lp
, int status
,
2224 int pid
= GET_LWP (lp
->ptid
);
2225 struct target_waitstatus
*ourstatus
= &lp
->waitstatus
;
2226 int event
= status
>> 16;
2228 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
2229 || event
== PTRACE_EVENT_CLONE
)
2231 unsigned long new_pid
;
2234 ptrace (PTRACE_GETEVENTMSG
, pid
, 0, &new_pid
);
2236 /* If we haven't already seen the new PID stop, wait for it now. */
2237 if (! pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
2239 /* The new child has a pending SIGSTOP. We can't affect it until it
2240 hits the SIGSTOP, but we're already attached. */
2241 ret
= my_waitpid (new_pid
, &status
,
2242 (event
== PTRACE_EVENT_CLONE
) ? __WCLONE
: 0);
2244 perror_with_name (_("waiting for new child"));
2245 else if (ret
!= new_pid
)
2246 internal_error (__FILE__
, __LINE__
,
2247 _("wait returned unexpected PID %d"), ret
);
2248 else if (!WIFSTOPPED (status
))
2249 internal_error (__FILE__
, __LINE__
,
2250 _("wait returned unexpected status 0x%x"), status
);
2253 ourstatus
->value
.related_pid
= ptid_build (new_pid
, new_pid
, 0);
2255 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
)
2257 /* The arch-specific native code may need to know about new
2258 forks even if those end up never mapped to an
2260 if (linux_nat_new_fork
!= NULL
)
2261 linux_nat_new_fork (lp
, new_pid
);
2264 if (event
== PTRACE_EVENT_FORK
2265 && linux_fork_checkpointing_p (GET_PID (lp
->ptid
)))
2267 /* Handle checkpointing by linux-fork.c here as a special
2268 case. We don't want the follow-fork-mode or 'catch fork'
2269 to interfere with this. */
2271 /* This won't actually modify the breakpoint list, but will
2272 physically remove the breakpoints from the child. */
2273 detach_breakpoints (ptid_build (new_pid
, new_pid
, 0));
2275 /* Retain child fork in ptrace (stopped) state. */
2276 if (!find_fork_pid (new_pid
))
2279 /* Report as spurious, so that infrun doesn't want to follow
2280 this fork. We're actually doing an infcall in
2282 ourstatus
->kind
= TARGET_WAITKIND_SPURIOUS
;
2284 /* Report the stop to the core. */
2288 if (event
== PTRACE_EVENT_FORK
)
2289 ourstatus
->kind
= TARGET_WAITKIND_FORKED
;
2290 else if (event
== PTRACE_EVENT_VFORK
)
2291 ourstatus
->kind
= TARGET_WAITKIND_VFORKED
;
2294 struct lwp_info
*new_lp
;
2296 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2298 if (debug_linux_nat
)
2299 fprintf_unfiltered (gdb_stdlog
,
2300 "LHEW: Got clone event "
2301 "from LWP %d, new child is LWP %ld\n",
2304 new_lp
= add_lwp (BUILD_LWP (new_pid
, GET_PID (lp
->ptid
)));
2306 new_lp
->stopped
= 1;
2308 if (WSTOPSIG (status
) != SIGSTOP
)
2310 /* This can happen if someone starts sending signals to
2311 the new thread before it gets a chance to run, which
2312 have a lower number than SIGSTOP (e.g. SIGUSR1).
2313 This is an unlikely case, and harder to handle for
2314 fork / vfork than for clone, so we do not try - but
2315 we handle it for clone events here. We'll send
2316 the other signal on to the thread below. */
2318 new_lp
->signalled
= 1;
2322 struct thread_info
*tp
;
2324 /* When we stop for an event in some other thread, and
2325 pull the thread list just as this thread has cloned,
2326 we'll have seen the new thread in the thread_db list
2327 before handling the CLONE event (glibc's
2328 pthread_create adds the new thread to the thread list
2329 before clone'ing, and has the kernel fill in the
2330 thread's tid on the clone call with
2331 CLONE_PARENT_SETTID). If that happened, and the core
2332 had requested the new thread to stop, we'll have
2333 killed it with SIGSTOP. But since SIGSTOP is not an
2334 RT signal, it can only be queued once. We need to be
2335 careful to not resume the LWP if we wanted it to
2336 stop. In that case, we'll leave the SIGSTOP pending.
2337 It will later be reported as GDB_SIGNAL_0. */
2338 tp
= find_thread_ptid (new_lp
->ptid
);
2339 if (tp
!= NULL
&& tp
->stop_requested
)
2340 new_lp
->last_resume_kind
= resume_stop
;
2347 /* Add the new thread to GDB's lists as soon as possible
2350 1) the frontend doesn't have to wait for a stop to
2353 2) we tag it with the correct running state. */
2355 /* If the thread_db layer is active, let it know about
2356 this new thread, and add it to GDB's list. */
2357 if (!thread_db_attach_lwp (new_lp
->ptid
))
2359 /* We're not using thread_db. Add it to GDB's
2361 target_post_attach (GET_LWP (new_lp
->ptid
));
2362 add_thread (new_lp
->ptid
);
2367 set_running (new_lp
->ptid
, 1);
2368 set_executing (new_lp
->ptid
, 1);
2369 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2371 new_lp
->last_resume_kind
= resume_continue
;
2377 /* We created NEW_LP so it cannot yet contain STATUS. */
2378 gdb_assert (new_lp
->status
== 0);
2380 /* Save the wait status to report later. */
2381 if (debug_linux_nat
)
2382 fprintf_unfiltered (gdb_stdlog
,
2383 "LHEW: waitpid of new LWP %ld, "
2384 "saving status %s\n",
2385 (long) GET_LWP (new_lp
->ptid
),
2386 status_to_str (status
));
2387 new_lp
->status
= status
;
2390 /* Note the need to use the low target ops to resume, to
2391 handle resuming with PT_SYSCALL if we have syscall
2395 new_lp
->resumed
= 1;
2399 gdb_assert (new_lp
->last_resume_kind
== resume_continue
);
2400 if (debug_linux_nat
)
2401 fprintf_unfiltered (gdb_stdlog
,
2402 "LHEW: resuming new LWP %ld\n",
2403 GET_LWP (new_lp
->ptid
));
2404 if (linux_nat_prepare_to_resume
!= NULL
)
2405 linux_nat_prepare_to_resume (new_lp
);
2406 linux_ops
->to_resume (linux_ops
, pid_to_ptid (new_pid
),
2408 new_lp
->stopped
= 0;
2412 if (debug_linux_nat
)
2413 fprintf_unfiltered (gdb_stdlog
,
2414 "LHEW: resuming parent LWP %d\n", pid
);
2415 if (linux_nat_prepare_to_resume
!= NULL
)
2416 linux_nat_prepare_to_resume (lp
);
2417 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
2426 if (event
== PTRACE_EVENT_EXEC
)
2428 if (debug_linux_nat
)
2429 fprintf_unfiltered (gdb_stdlog
,
2430 "LHEW: Got exec event from LWP %ld\n",
2431 GET_LWP (lp
->ptid
));
2433 ourstatus
->kind
= TARGET_WAITKIND_EXECD
;
2434 ourstatus
->value
.execd_pathname
2435 = xstrdup (linux_child_pid_to_exec_file (pid
));
2440 if (event
== PTRACE_EVENT_VFORK_DONE
)
2442 if (current_inferior ()->waiting_for_vfork_done
)
2444 if (debug_linux_nat
)
2445 fprintf_unfiltered (gdb_stdlog
,
2446 "LHEW: Got expected PTRACE_EVENT_"
2447 "VFORK_DONE from LWP %ld: stopping\n",
2448 GET_LWP (lp
->ptid
));
2450 ourstatus
->kind
= TARGET_WAITKIND_VFORK_DONE
;
2454 if (debug_linux_nat
)
2455 fprintf_unfiltered (gdb_stdlog
,
2456 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2457 "from LWP %ld: resuming\n",
2458 GET_LWP (lp
->ptid
));
2459 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2463 internal_error (__FILE__
, __LINE__
,
2464 _("unknown ptrace event %d"), event
);
2467 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2471 wait_lwp (struct lwp_info
*lp
)
2475 int thread_dead
= 0;
2478 gdb_assert (!lp
->stopped
);
2479 gdb_assert (lp
->status
== 0);
2481 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2482 block_child_signals (&prev_mask
);
2486 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2487 was right and we should just call sigsuspend. */
2489 pid
= my_waitpid (GET_LWP (lp
->ptid
), &status
, WNOHANG
);
2490 if (pid
== -1 && errno
== ECHILD
)
2491 pid
= my_waitpid (GET_LWP (lp
->ptid
), &status
, __WCLONE
| WNOHANG
);
2492 if (pid
== -1 && errno
== ECHILD
)
2494 /* The thread has previously exited. We need to delete it
2495 now because, for some vendor 2.4 kernels with NPTL
2496 support backported, there won't be an exit event unless
2497 it is the main thread. 2.6 kernels will report an exit
2498 event for each thread that exits, as expected. */
2500 if (debug_linux_nat
)
2501 fprintf_unfiltered (gdb_stdlog
, "WL: %s vanished.\n",
2502 target_pid_to_str (lp
->ptid
));
2507 /* Bugs 10970, 12702.
2508 Thread group leader may have exited in which case we'll lock up in
2509 waitpid if there are other threads, even if they are all zombies too.
2510 Basically, we're not supposed to use waitpid this way.
2511 __WCLONE is not applicable for the leader so we can't use that.
2512 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2513 process; it gets ESRCH both for the zombie and for running processes.
2515 As a workaround, check if we're waiting for the thread group leader and
2516 if it's a zombie, and avoid calling waitpid if it is.
2518 This is racy, what if the tgl becomes a zombie right after we check?
2519 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2520 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
2522 if (GET_PID (lp
->ptid
) == GET_LWP (lp
->ptid
)
2523 && linux_proc_pid_is_zombie (GET_LWP (lp
->ptid
)))
2526 if (debug_linux_nat
)
2527 fprintf_unfiltered (gdb_stdlog
,
2528 "WL: Thread group leader %s vanished.\n",
2529 target_pid_to_str (lp
->ptid
));
2533 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2534 get invoked despite our caller had them intentionally blocked by
2535 block_child_signals. This is sensitive only to the loop of
2536 linux_nat_wait_1 and there if we get called my_waitpid gets called
2537 again before it gets to sigsuspend so we can safely let the handlers
2538 get executed here. */
2540 sigsuspend (&suspend_mask
);
2543 restore_child_signals_mask (&prev_mask
);
2547 gdb_assert (pid
== GET_LWP (lp
->ptid
));
2549 if (debug_linux_nat
)
2551 fprintf_unfiltered (gdb_stdlog
,
2552 "WL: waitpid %s received %s\n",
2553 target_pid_to_str (lp
->ptid
),
2554 status_to_str (status
));
2557 /* Check if the thread has exited. */
2558 if (WIFEXITED (status
) || WIFSIGNALED (status
))
2561 if (debug_linux_nat
)
2562 fprintf_unfiltered (gdb_stdlog
, "WL: %s exited.\n",
2563 target_pid_to_str (lp
->ptid
));
2573 gdb_assert (WIFSTOPPED (status
));
2575 /* Handle GNU/Linux's syscall SIGTRAPs. */
2576 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SYSCALL_SIGTRAP
)
2578 /* No longer need the sysgood bit. The ptrace event ends up
2579 recorded in lp->waitstatus if we care for it. We can carry
2580 on handling the event like a regular SIGTRAP from here
2582 status
= W_STOPCODE (SIGTRAP
);
2583 if (linux_handle_syscall_trap (lp
, 1))
2584 return wait_lwp (lp
);
2587 /* Handle GNU/Linux's extended waitstatus for trace events. */
2588 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
2590 if (debug_linux_nat
)
2591 fprintf_unfiltered (gdb_stdlog
,
2592 "WL: Handling extended status 0x%06x\n",
2594 if (linux_handle_extended_wait (lp
, status
, 1))
2595 return wait_lwp (lp
);
2601 /* Send a SIGSTOP to LP. */
2604 stop_callback (struct lwp_info
*lp
, void *data
)
2606 if (!lp
->stopped
&& !lp
->signalled
)
2610 if (debug_linux_nat
)
2612 fprintf_unfiltered (gdb_stdlog
,
2613 "SC: kill %s **<SIGSTOP>**\n",
2614 target_pid_to_str (lp
->ptid
));
2617 ret
= kill_lwp (GET_LWP (lp
->ptid
), SIGSTOP
);
2618 if (debug_linux_nat
)
2620 fprintf_unfiltered (gdb_stdlog
,
2621 "SC: lwp kill %d %s\n",
2623 errno
? safe_strerror (errno
) : "ERRNO-OK");
2627 gdb_assert (lp
->status
== 0);
2633 /* Request a stop on LWP. */
2636 linux_stop_lwp (struct lwp_info
*lwp
)
2638 stop_callback (lwp
, NULL
);
2641 /* Return non-zero if LWP PID has a pending SIGINT. */
2644 linux_nat_has_pending_sigint (int pid
)
2646 sigset_t pending
, blocked
, ignored
;
2648 linux_proc_pending_signals (pid
, &pending
, &blocked
, &ignored
);
2650 if (sigismember (&pending
, SIGINT
)
2651 && !sigismember (&ignored
, SIGINT
))
2657 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2660 set_ignore_sigint (struct lwp_info
*lp
, void *data
)
2662 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2663 flag to consume the next one. */
2664 if (lp
->stopped
&& lp
->status
!= 0 && WIFSTOPPED (lp
->status
)
2665 && WSTOPSIG (lp
->status
) == SIGINT
)
2668 lp
->ignore_sigint
= 1;
2673 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2674 This function is called after we know the LWP has stopped; if the LWP
2675 stopped before the expected SIGINT was delivered, then it will never have
2676 arrived. Also, if the signal was delivered to a shared queue and consumed
2677 by a different thread, it will never be delivered to this LWP. */
2680 maybe_clear_ignore_sigint (struct lwp_info
*lp
)
2682 if (!lp
->ignore_sigint
)
2685 if (!linux_nat_has_pending_sigint (GET_LWP (lp
->ptid
)))
2687 if (debug_linux_nat
)
2688 fprintf_unfiltered (gdb_stdlog
,
2689 "MCIS: Clearing bogus flag for %s\n",
2690 target_pid_to_str (lp
->ptid
));
2691 lp
->ignore_sigint
= 0;
2695 /* Fetch the possible triggered data watchpoint info and store it in
2698 On some archs, like x86, that use debug registers to set
2699 watchpoints, it's possible that the way to know which watched
2700 address trapped, is to check the register that is used to select
2701 which address to watch. Problem is, between setting the watchpoint
2702 and reading back which data address trapped, the user may change
2703 the set of watchpoints, and, as a consequence, GDB changes the
2704 debug registers in the inferior. To avoid reading back a stale
2705 stopped-data-address when that happens, we cache in LP the fact
2706 that a watchpoint trapped, and the corresponding data address, as
2707 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2708 registers meanwhile, we have the cached data we can rely on. */
2711 save_sigtrap (struct lwp_info
*lp
)
2713 struct cleanup
*old_chain
;
2715 if (linux_ops
->to_stopped_by_watchpoint
== NULL
)
2717 lp
->stopped_by_watchpoint
= 0;
2721 old_chain
= save_inferior_ptid ();
2722 inferior_ptid
= lp
->ptid
;
2724 lp
->stopped_by_watchpoint
= linux_ops
->to_stopped_by_watchpoint ();
2726 if (lp
->stopped_by_watchpoint
)
2728 if (linux_ops
->to_stopped_data_address
!= NULL
)
2729 lp
->stopped_data_address_p
=
2730 linux_ops
->to_stopped_data_address (¤t_target
,
2731 &lp
->stopped_data_address
);
2733 lp
->stopped_data_address_p
= 0;
2736 do_cleanups (old_chain
);
2739 /* See save_sigtrap. */
2742 linux_nat_stopped_by_watchpoint (void)
2744 struct lwp_info
*lp
= find_lwp_pid (inferior_ptid
);
2746 gdb_assert (lp
!= NULL
);
2748 return lp
->stopped_by_watchpoint
;
2752 linux_nat_stopped_data_address (struct target_ops
*ops
, CORE_ADDR
*addr_p
)
2754 struct lwp_info
*lp
= find_lwp_pid (inferior_ptid
);
2756 gdb_assert (lp
!= NULL
);
2758 *addr_p
= lp
->stopped_data_address
;
2760 return lp
->stopped_data_address_p
;
2763 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2766 sigtrap_is_event (int status
)
2768 return WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
;
2771 /* SIGTRAP-like events recognizer. */
2773 static int (*linux_nat_status_is_event
) (int status
) = sigtrap_is_event
;
2775 /* Check for SIGTRAP-like events in LP. */
2778 linux_nat_lp_status_is_event (struct lwp_info
*lp
)
2780 /* We check for lp->waitstatus in addition to lp->status, because we can
2781 have pending process exits recorded in lp->status
2782 and W_EXITCODE(0,0) == 0. We should probably have an additional
2783 lp->status_p flag. */
2785 return (lp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
2786 && linux_nat_status_is_event (lp
->status
));
2789 /* Set alternative SIGTRAP-like events recognizer. If
2790 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2794 linux_nat_set_status_is_event (struct target_ops
*t
,
2795 int (*status_is_event
) (int status
))
2797 linux_nat_status_is_event
= status_is_event
;
2800 /* Wait until LP is stopped. */
2803 stop_wait_callback (struct lwp_info
*lp
, void *data
)
2805 struct inferior
*inf
= find_inferior_pid (GET_PID (lp
->ptid
));
2807 /* If this is a vfork parent, bail out, it is not going to report
2808 any SIGSTOP until the vfork is done with. */
2809 if (inf
->vfork_child
!= NULL
)
2816 status
= wait_lwp (lp
);
2820 if (lp
->ignore_sigint
&& WIFSTOPPED (status
)
2821 && WSTOPSIG (status
) == SIGINT
)
2823 lp
->ignore_sigint
= 0;
2826 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2827 if (debug_linux_nat
)
2828 fprintf_unfiltered (gdb_stdlog
,
2829 "PTRACE_CONT %s, 0, 0 (%s) "
2830 "(discarding SIGINT)\n",
2831 target_pid_to_str (lp
->ptid
),
2832 errno
? safe_strerror (errno
) : "OK");
2834 return stop_wait_callback (lp
, NULL
);
2837 maybe_clear_ignore_sigint (lp
);
2839 if (WSTOPSIG (status
) != SIGSTOP
)
2841 /* The thread was stopped with a signal other than SIGSTOP. */
2845 if (debug_linux_nat
)
2846 fprintf_unfiltered (gdb_stdlog
,
2847 "SWC: Pending event %s in %s\n",
2848 status_to_str ((int) status
),
2849 target_pid_to_str (lp
->ptid
));
2851 /* Save the sigtrap event. */
2852 lp
->status
= status
;
2853 gdb_assert (!lp
->stopped
);
2854 gdb_assert (lp
->signalled
);
2859 /* We caught the SIGSTOP that we intended to catch, so
2860 there's no SIGSTOP pending. */
2862 if (debug_linux_nat
)
2863 fprintf_unfiltered (gdb_stdlog
,
2864 "SWC: Delayed SIGSTOP caught for %s.\n",
2865 target_pid_to_str (lp
->ptid
));
2869 /* Reset SIGNALLED only after the stop_wait_callback call
2870 above as it does gdb_assert on SIGNALLED. */
2878 /* Return non-zero if LP has a wait status pending. */
2881 status_callback (struct lwp_info
*lp
, void *data
)
2883 /* Only report a pending wait status if we pretend that this has
2884 indeed been resumed. */
2888 if (lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
2890 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2891 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2892 0', so a clean process exit can not be stored pending in
2893 lp->status, it is indistinguishable from
2894 no-pending-status. */
2898 if (lp
->status
!= 0)
2904 /* Return non-zero if LP isn't stopped. */
2907 running_callback (struct lwp_info
*lp
, void *data
)
2909 return (!lp
->stopped
2910 || ((lp
->status
!= 0
2911 || lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
2915 /* Count the LWP's that have had events. */
2918 count_events_callback (struct lwp_info
*lp
, void *data
)
2922 gdb_assert (count
!= NULL
);
2924 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2925 if (lp
->resumed
&& linux_nat_lp_status_is_event (lp
))
2931 /* Select the LWP (if any) that is currently being single-stepped. */
2934 select_singlestep_lwp_callback (struct lwp_info
*lp
, void *data
)
2936 if (lp
->last_resume_kind
== resume_step
2943 /* Select the Nth LWP that has had a SIGTRAP event. */
2946 select_event_lwp_callback (struct lwp_info
*lp
, void *data
)
2948 int *selector
= data
;
2950 gdb_assert (selector
!= NULL
);
2952 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2953 if (lp
->resumed
&& linux_nat_lp_status_is_event (lp
))
2954 if ((*selector
)-- == 0)
2961 cancel_breakpoint (struct lwp_info
*lp
)
2963 /* Arrange for a breakpoint to be hit again later. We don't keep
2964 the SIGTRAP status and don't forward the SIGTRAP signal to the
2965 LWP. We will handle the current event, eventually we will resume
2966 this LWP, and this breakpoint will trap again.
2968 If we do not do this, then we run the risk that the user will
2969 delete or disable the breakpoint, but the LWP will have already
2972 struct regcache
*regcache
= get_thread_regcache (lp
->ptid
);
2973 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
2976 pc
= regcache_read_pc (regcache
) - gdbarch_decr_pc_after_break (gdbarch
);
2977 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache
), pc
))
2979 if (debug_linux_nat
)
2980 fprintf_unfiltered (gdb_stdlog
,
2981 "CB: Push back breakpoint for %s\n",
2982 target_pid_to_str (lp
->ptid
));
2984 /* Back up the PC if necessary. */
2985 if (gdbarch_decr_pc_after_break (gdbarch
))
2986 regcache_write_pc (regcache
, pc
);
2994 cancel_breakpoints_callback (struct lwp_info
*lp
, void *data
)
2996 struct lwp_info
*event_lp
= data
;
2998 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3002 /* If a LWP other than the LWP that we're reporting an event for has
3003 hit a GDB breakpoint (as opposed to some random trap signal),
3004 then just arrange for it to hit it again later. We don't keep
3005 the SIGTRAP status and don't forward the SIGTRAP signal to the
3006 LWP. We will handle the current event, eventually we will resume
3007 all LWPs, and this one will get its breakpoint trap again.
3009 If we do not do this, then we run the risk that the user will
3010 delete or disable the breakpoint, but the LWP will have already
3013 if (linux_nat_lp_status_is_event (lp
)
3014 && cancel_breakpoint (lp
))
3015 /* Throw away the SIGTRAP. */
3021 /* Select one LWP out of those that have events pending. */
3024 select_event_lwp (ptid_t filter
, struct lwp_info
**orig_lp
, int *status
)
3027 int random_selector
;
3028 struct lwp_info
*event_lp
;
3030 /* Record the wait status for the original LWP. */
3031 (*orig_lp
)->status
= *status
;
3033 /* Give preference to any LWP that is being single-stepped. */
3034 event_lp
= iterate_over_lwps (filter
,
3035 select_singlestep_lwp_callback
, NULL
);
3036 if (event_lp
!= NULL
)
3038 if (debug_linux_nat
)
3039 fprintf_unfiltered (gdb_stdlog
,
3040 "SEL: Select single-step %s\n",
3041 target_pid_to_str (event_lp
->ptid
));
3045 /* No single-stepping LWP. Select one at random, out of those
3046 which have had SIGTRAP events. */
3048 /* First see how many SIGTRAP events we have. */
3049 iterate_over_lwps (filter
, count_events_callback
, &num_events
);
3051 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3052 random_selector
= (int)
3053 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
3055 if (debug_linux_nat
&& num_events
> 1)
3056 fprintf_unfiltered (gdb_stdlog
,
3057 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3058 num_events
, random_selector
);
3060 event_lp
= iterate_over_lwps (filter
,
3061 select_event_lwp_callback
,
3065 if (event_lp
!= NULL
)
3067 /* Switch the event LWP. */
3068 *orig_lp
= event_lp
;
3069 *status
= event_lp
->status
;
3072 /* Flush the wait status for the event LWP. */
3073 (*orig_lp
)->status
= 0;
3076 /* Return non-zero if LP has been resumed. */
3079 resumed_callback (struct lwp_info
*lp
, void *data
)
3084 /* Stop an active thread, verify it still exists, then resume it. If
3085 the thread ends up with a pending status, then it is not resumed,
3086 and *DATA (really a pointer to int), is set. */
3089 stop_and_resume_callback (struct lwp_info
*lp
, void *data
)
3091 int *new_pending_p
= data
;
3095 ptid_t ptid
= lp
->ptid
;
3097 stop_callback (lp
, NULL
);
3098 stop_wait_callback (lp
, NULL
);
3100 /* Resume if the lwp still exists, and the core wanted it
3102 lp
= find_lwp_pid (ptid
);
3105 if (lp
->last_resume_kind
== resume_stop
3108 /* The core wanted the LWP to stop. Even if it stopped
3109 cleanly (with SIGSTOP), leave the event pending. */
3110 if (debug_linux_nat
)
3111 fprintf_unfiltered (gdb_stdlog
,
3112 "SARC: core wanted LWP %ld stopped "
3113 "(leaving SIGSTOP pending)\n",
3114 GET_LWP (lp
->ptid
));
3115 lp
->status
= W_STOPCODE (SIGSTOP
);
3118 if (lp
->status
== 0)
3120 if (debug_linux_nat
)
3121 fprintf_unfiltered (gdb_stdlog
,
3122 "SARC: re-resuming LWP %ld\n",
3123 GET_LWP (lp
->ptid
));
3124 resume_lwp (lp
, lp
->step
, GDB_SIGNAL_0
);
3128 if (debug_linux_nat
)
3129 fprintf_unfiltered (gdb_stdlog
,
3130 "SARC: not re-resuming LWP %ld "
3132 GET_LWP (lp
->ptid
));
3141 /* Check if we should go on and pass this event to common code.
3142 Return the affected lwp if we are, or NULL otherwise. If we stop
3143 all lwps temporarily, we may end up with new pending events in some
3144 other lwp. In that case set *NEW_PENDING_P to true. */
3146 static struct lwp_info
*
3147 linux_nat_filter_event (int lwpid
, int status
, int *new_pending_p
)
3149 struct lwp_info
*lp
;
3153 lp
= find_lwp_pid (pid_to_ptid (lwpid
));
3155 /* Check for stop events reported by a process we didn't already
3156 know about - anything not already in our LWP list.
3158 If we're expecting to receive stopped processes after
3159 fork, vfork, and clone events, then we'll just add the
3160 new one to our list and go back to waiting for the event
3161 to be reported - the stopped process might be returned
3162 from waitpid before or after the event is.
3164 But note the case of a non-leader thread exec'ing after the
3165 leader having exited, and gone from our lists. The non-leader
3166 thread changes its tid to the tgid. */
3168 if (WIFSTOPPED (status
) && lp
== NULL
3169 && (WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 == PTRACE_EVENT_EXEC
))
3171 /* A multi-thread exec after we had seen the leader exiting. */
3172 if (debug_linux_nat
)
3173 fprintf_unfiltered (gdb_stdlog
,
3174 "LLW: Re-adding thread group leader LWP %d.\n",
3177 lp
= add_lwp (BUILD_LWP (lwpid
, lwpid
));
3180 add_thread (lp
->ptid
);
3183 if (WIFSTOPPED (status
) && !lp
)
3185 add_to_pid_list (&stopped_pids
, lwpid
, status
);
3189 /* Make sure we don't report an event for the exit of an LWP not in
3190 our list, i.e. not part of the current process. This can happen
3191 if we detach from a program we originally forked and then it
3193 if (!WIFSTOPPED (status
) && !lp
)
3196 /* Handle GNU/Linux's syscall SIGTRAPs. */
3197 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SYSCALL_SIGTRAP
)
3199 /* No longer need the sysgood bit. The ptrace event ends up
3200 recorded in lp->waitstatus if we care for it. We can carry
3201 on handling the event like a regular SIGTRAP from here
3203 status
= W_STOPCODE (SIGTRAP
);
3204 if (linux_handle_syscall_trap (lp
, 0))
3208 /* Handle GNU/Linux's extended waitstatus for trace events. */
3209 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
3211 if (debug_linux_nat
)
3212 fprintf_unfiltered (gdb_stdlog
,
3213 "LLW: Handling extended status 0x%06x\n",
3215 if (linux_handle_extended_wait (lp
, status
, 0))
3219 if (linux_nat_status_is_event (status
))
3222 /* Check if the thread has exited. */
3223 if ((WIFEXITED (status
) || WIFSIGNALED (status
))
3224 && num_lwps (GET_PID (lp
->ptid
)) > 1)
3226 /* If this is the main thread, we must stop all threads and verify
3227 if they are still alive. This is because in the nptl thread model
3228 on Linux 2.4, there is no signal issued for exiting LWPs
3229 other than the main thread. We only get the main thread exit
3230 signal once all child threads have already exited. If we
3231 stop all the threads and use the stop_wait_callback to check
3232 if they have exited we can determine whether this signal
3233 should be ignored or whether it means the end of the debugged
3234 application, regardless of which threading model is being
3236 if (GET_PID (lp
->ptid
) == GET_LWP (lp
->ptid
))
3239 iterate_over_lwps (pid_to_ptid (GET_PID (lp
->ptid
)),
3240 stop_and_resume_callback
, new_pending_p
);
3243 if (debug_linux_nat
)
3244 fprintf_unfiltered (gdb_stdlog
,
3245 "LLW: %s exited.\n",
3246 target_pid_to_str (lp
->ptid
));
3248 if (num_lwps (GET_PID (lp
->ptid
)) > 1)
3250 /* If there is at least one more LWP, then the exit signal
3251 was not the end of the debugged application and should be
3258 /* Check if the current LWP has previously exited. In the nptl
3259 thread model, LWPs other than the main thread do not issue
3260 signals when they exit so we must check whenever the thread has
3261 stopped. A similar check is made in stop_wait_callback(). */
3262 if (num_lwps (GET_PID (lp
->ptid
)) > 1 && !linux_thread_alive (lp
->ptid
))
3264 ptid_t ptid
= pid_to_ptid (GET_PID (lp
->ptid
));
3266 if (debug_linux_nat
)
3267 fprintf_unfiltered (gdb_stdlog
,
3268 "LLW: %s exited.\n",
3269 target_pid_to_str (lp
->ptid
));
3273 /* Make sure there is at least one thread running. */
3274 gdb_assert (iterate_over_lwps (ptid
, running_callback
, NULL
));
3276 /* Discard the event. */
3280 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3281 an attempt to stop an LWP. */
3283 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGSTOP
)
3285 if (debug_linux_nat
)
3286 fprintf_unfiltered (gdb_stdlog
,
3287 "LLW: Delayed SIGSTOP caught for %s.\n",
3288 target_pid_to_str (lp
->ptid
));
3292 if (lp
->last_resume_kind
!= resume_stop
)
3294 /* This is a delayed SIGSTOP. */
3296 registers_changed ();
3298 if (linux_nat_prepare_to_resume
!= NULL
)
3299 linux_nat_prepare_to_resume (lp
);
3300 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
3301 lp
->step
, GDB_SIGNAL_0
);
3302 if (debug_linux_nat
)
3303 fprintf_unfiltered (gdb_stdlog
,
3304 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3306 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3307 target_pid_to_str (lp
->ptid
));
3310 gdb_assert (lp
->resumed
);
3312 /* Discard the event. */
3317 /* Make sure we don't report a SIGINT that we have already displayed
3318 for another thread. */
3319 if (lp
->ignore_sigint
3320 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGINT
)
3322 if (debug_linux_nat
)
3323 fprintf_unfiltered (gdb_stdlog
,
3324 "LLW: Delayed SIGINT caught for %s.\n",
3325 target_pid_to_str (lp
->ptid
));
3327 /* This is a delayed SIGINT. */
3328 lp
->ignore_sigint
= 0;
3330 registers_changed ();
3331 if (linux_nat_prepare_to_resume
!= NULL
)
3332 linux_nat_prepare_to_resume (lp
);
3333 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
3334 lp
->step
, GDB_SIGNAL_0
);
3335 if (debug_linux_nat
)
3336 fprintf_unfiltered (gdb_stdlog
,
3337 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3339 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3340 target_pid_to_str (lp
->ptid
));
3343 gdb_assert (lp
->resumed
);
3345 /* Discard the event. */
3349 /* An interesting event. */
3351 lp
->status
= status
;
3355 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3356 their exits until all other threads in the group have exited. */
3359 check_zombie_leaders (void)
3361 struct inferior
*inf
;
3365 struct lwp_info
*leader_lp
;
3370 leader_lp
= find_lwp_pid (pid_to_ptid (inf
->pid
));
3371 if (leader_lp
!= NULL
3372 /* Check if there are other threads in the group, as we may
3373 have raced with the inferior simply exiting. */
3374 && num_lwps (inf
->pid
) > 1
3375 && linux_proc_pid_is_zombie (inf
->pid
))
3377 if (debug_linux_nat
)
3378 fprintf_unfiltered (gdb_stdlog
,
3379 "CZL: Thread group leader %d zombie "
3380 "(it exited, or another thread execd).\n",
3383 /* A leader zombie can mean one of two things:
3385 - It exited, and there's an exit status pending
3386 available, or only the leader exited (not the whole
3387 program). In the latter case, we can't waitpid the
3388 leader's exit status until all other threads are gone.
3390 - There are 3 or more threads in the group, and a thread
3391 other than the leader exec'd. On an exec, the Linux
3392 kernel destroys all other threads (except the execing
3393 one) in the thread group, and resets the execing thread's
3394 tid to the tgid. No exit notification is sent for the
3395 execing thread -- from the ptracer's perspective, it
3396 appears as though the execing thread just vanishes.
3397 Until we reap all other threads except the leader and the
3398 execing thread, the leader will be zombie, and the
3399 execing thread will be in `D (disc sleep)'. As soon as
3400 all other threads are reaped, the execing thread changes
3401 it's tid to the tgid, and the previous (zombie) leader
3402 vanishes, giving place to the "new" leader. We could try
3403 distinguishing the exit and exec cases, by waiting once
3404 more, and seeing if something comes out, but it doesn't
3405 sound useful. The previous leader _does_ go away, and
3406 we'll re-add the new one once we see the exec event
3407 (which is just the same as what would happen if the
3408 previous leader did exit voluntarily before some other
3411 if (debug_linux_nat
)
3412 fprintf_unfiltered (gdb_stdlog
,
3413 "CZL: Thread group leader %d vanished.\n",
3415 exit_lwp (leader_lp
);
3421 linux_nat_wait_1 (struct target_ops
*ops
,
3422 ptid_t ptid
, struct target_waitstatus
*ourstatus
,
3425 static sigset_t prev_mask
;
3426 enum resume_kind last_resume_kind
;
3427 struct lwp_info
*lp
;
3430 if (debug_linux_nat
)
3431 fprintf_unfiltered (gdb_stdlog
, "LLW: enter\n");
3433 /* The first time we get here after starting a new inferior, we may
3434 not have added it to the LWP list yet - this is the earliest
3435 moment at which we know its PID. */
3436 if (ptid_is_pid (inferior_ptid
))
3438 /* Upgrade the main thread's ptid. */
3439 thread_change_ptid (inferior_ptid
,
3440 BUILD_LWP (GET_PID (inferior_ptid
),
3441 GET_PID (inferior_ptid
)));
3443 lp
= add_initial_lwp (inferior_ptid
);
3447 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
3448 block_child_signals (&prev_mask
);
3454 /* First check if there is a LWP with a wait status pending. */
3455 if (ptid_equal (ptid
, minus_one_ptid
) || ptid_is_pid (ptid
))
3457 /* Any LWP in the PTID group that's been resumed will do. */
3458 lp
= iterate_over_lwps (ptid
, status_callback
, NULL
);
3461 if (debug_linux_nat
&& lp
->status
)
3462 fprintf_unfiltered (gdb_stdlog
,
3463 "LLW: Using pending wait status %s for %s.\n",
3464 status_to_str (lp
->status
),
3465 target_pid_to_str (lp
->ptid
));
3468 else if (is_lwp (ptid
))
3470 if (debug_linux_nat
)
3471 fprintf_unfiltered (gdb_stdlog
,
3472 "LLW: Waiting for specific LWP %s.\n",
3473 target_pid_to_str (ptid
));
3475 /* We have a specific LWP to check. */
3476 lp
= find_lwp_pid (ptid
);
3479 if (debug_linux_nat
&& lp
->status
)
3480 fprintf_unfiltered (gdb_stdlog
,
3481 "LLW: Using pending wait status %s for %s.\n",
3482 status_to_str (lp
->status
),
3483 target_pid_to_str (lp
->ptid
));
3485 /* We check for lp->waitstatus in addition to lp->status,
3486 because we can have pending process exits recorded in
3487 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3488 an additional lp->status_p flag. */
3489 if (lp
->status
== 0 && lp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
)
3493 if (!target_can_async_p ())
3495 /* Causes SIGINT to be passed on to the attached process. */
3499 /* But if we don't find a pending event, we'll have to wait. */
3505 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3508 - If the thread group leader exits while other threads in the
3509 thread group still exist, waitpid(TGID, ...) hangs. That
3510 waitpid won't return an exit status until the other threads
3511 in the group are reapped.
3513 - When a non-leader thread execs, that thread just vanishes
3514 without reporting an exit (so we'd hang if we waited for it
3515 explicitly in that case). The exec event is reported to
3519 lwpid
= my_waitpid (-1, &status
, __WCLONE
| WNOHANG
);
3520 if (lwpid
== 0 || (lwpid
== -1 && errno
== ECHILD
))
3521 lwpid
= my_waitpid (-1, &status
, WNOHANG
);
3523 if (debug_linux_nat
)
3524 fprintf_unfiltered (gdb_stdlog
,
3525 "LNW: waitpid(-1, ...) returned %d, %s\n",
3526 lwpid
, errno
? safe_strerror (errno
) : "ERRNO-OK");
3530 /* If this is true, then we paused LWPs momentarily, and may
3531 now have pending events to handle. */
3534 if (debug_linux_nat
)
3536 fprintf_unfiltered (gdb_stdlog
,
3537 "LLW: waitpid %ld received %s\n",
3538 (long) lwpid
, status_to_str (status
));
3541 lp
= linux_nat_filter_event (lwpid
, status
, &new_pending
);
3543 /* STATUS is now no longer valid, use LP->STATUS instead. */
3546 if (lp
&& !ptid_match (lp
->ptid
, ptid
))
3548 gdb_assert (lp
->resumed
);
3550 if (debug_linux_nat
)
3552 "LWP %ld got an event %06x, leaving pending.\n",
3553 ptid_get_lwp (lp
->ptid
), lp
->status
);
3555 if (WIFSTOPPED (lp
->status
))
3557 if (WSTOPSIG (lp
->status
) != SIGSTOP
)
3559 /* Cancel breakpoint hits. The breakpoint may
3560 be removed before we fetch events from this
3561 process to report to the core. It is best
3562 not to assume the moribund breakpoints
3563 heuristic always handles these cases --- it
3564 could be too many events go through to the
3565 core before this one is handled. All-stop
3566 always cancels breakpoint hits in all
3569 && linux_nat_lp_status_is_event (lp
)
3570 && cancel_breakpoint (lp
))
3572 /* Throw away the SIGTRAP. */
3575 if (debug_linux_nat
)
3577 "LLW: LWP %ld hit a breakpoint while"
3578 " waiting for another process;"
3580 ptid_get_lwp (lp
->ptid
));
3590 else if (WIFEXITED (lp
->status
) || WIFSIGNALED (lp
->status
))
3592 if (debug_linux_nat
)
3594 "Process %ld exited while stopping LWPs\n",
3595 ptid_get_lwp (lp
->ptid
));
3597 /* This was the last lwp in the process. Since
3598 events are serialized to GDB core, and we can't
3599 report this one right now, but GDB core and the
3600 other target layers will want to be notified
3601 about the exit code/signal, leave the status
3602 pending for the next time we're able to report
3605 /* Prevent trying to stop this thread again. We'll
3606 never try to resume it because it has a pending
3610 /* Dead LWP's aren't expected to reported a pending
3614 /* Store the pending event in the waitstatus as
3615 well, because W_EXITCODE(0,0) == 0. */
3616 store_waitstatus (&lp
->waitstatus
, lp
->status
);
3625 /* Some LWP now has a pending event. Go all the way
3626 back to check it. */
3632 /* We got an event to report to the core. */
3636 /* Retry until nothing comes out of waitpid. A single
3637 SIGCHLD can indicate more than one child stopped. */
3641 /* Check for zombie thread group leaders. Those can't be reaped
3642 until all other threads in the thread group are. */
3643 check_zombie_leaders ();
3645 /* If there are no resumed children left, bail. We'd be stuck
3646 forever in the sigsuspend call below otherwise. */
3647 if (iterate_over_lwps (ptid
, resumed_callback
, NULL
) == NULL
)
3649 if (debug_linux_nat
)
3650 fprintf_unfiltered (gdb_stdlog
, "LLW: exit (no resumed LWP)\n");
3652 ourstatus
->kind
= TARGET_WAITKIND_NO_RESUMED
;
3654 if (!target_can_async_p ())
3655 clear_sigint_trap ();
3657 restore_child_signals_mask (&prev_mask
);
3658 return minus_one_ptid
;
3661 /* No interesting event to report to the core. */
3663 if (target_options
& TARGET_WNOHANG
)
3665 if (debug_linux_nat
)
3666 fprintf_unfiltered (gdb_stdlog
, "LLW: exit (ignore)\n");
3668 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
3669 restore_child_signals_mask (&prev_mask
);
3670 return minus_one_ptid
;
3673 /* We shouldn't end up here unless we want to try again. */
3674 gdb_assert (lp
== NULL
);
3676 /* Block until we get an event reported with SIGCHLD. */
3677 sigsuspend (&suspend_mask
);
3680 if (!target_can_async_p ())
3681 clear_sigint_trap ();
3685 status
= lp
->status
;
3688 /* Don't report signals that GDB isn't interested in, such as
3689 signals that are neither printed nor stopped upon. Stopping all
3690 threads can be a bit time-consuming so if we want decent
3691 performance with heavily multi-threaded programs, especially when
3692 they're using a high frequency timer, we'd better avoid it if we
3695 if (WIFSTOPPED (status
))
3697 enum gdb_signal signo
= gdb_signal_from_host (WSTOPSIG (status
));
3699 /* When using hardware single-step, we need to report every signal.
3700 Otherwise, signals in pass_mask may be short-circuited. */
3702 && WSTOPSIG (status
) && sigismember (&pass_mask
, WSTOPSIG (status
)))
3704 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3705 here? It is not clear we should. GDB may not expect
3706 other threads to run. On the other hand, not resuming
3707 newly attached threads may cause an unwanted delay in
3708 getting them running. */
3709 registers_changed ();
3710 if (linux_nat_prepare_to_resume
!= NULL
)
3711 linux_nat_prepare_to_resume (lp
);
3712 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
3714 if (debug_linux_nat
)
3715 fprintf_unfiltered (gdb_stdlog
,
3716 "LLW: %s %s, %s (preempt 'handle')\n",
3718 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3719 target_pid_to_str (lp
->ptid
),
3720 (signo
!= GDB_SIGNAL_0
3721 ? strsignal (gdb_signal_to_host (signo
))
3729 /* Only do the below in all-stop, as we currently use SIGINT
3730 to implement target_stop (see linux_nat_stop) in
3732 if (signo
== GDB_SIGNAL_INT
&& signal_pass_state (signo
) == 0)
3734 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3735 forwarded to the entire process group, that is, all LWPs
3736 will receive it - unless they're using CLONE_THREAD to
3737 share signals. Since we only want to report it once, we
3738 mark it as ignored for all LWPs except this one. */
3739 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid
)),
3740 set_ignore_sigint
, NULL
);
3741 lp
->ignore_sigint
= 0;
3744 maybe_clear_ignore_sigint (lp
);
3748 /* This LWP is stopped now. */
3751 if (debug_linux_nat
)
3752 fprintf_unfiltered (gdb_stdlog
, "LLW: Candidate event %s in %s.\n",
3753 status_to_str (status
), target_pid_to_str (lp
->ptid
));
3757 /* Now stop all other LWP's ... */
3758 iterate_over_lwps (minus_one_ptid
, stop_callback
, NULL
);
3760 /* ... and wait until all of them have reported back that
3761 they're no longer running. */
3762 iterate_over_lwps (minus_one_ptid
, stop_wait_callback
, NULL
);
3764 /* If we're not waiting for a specific LWP, choose an event LWP
3765 from among those that have had events. Giving equal priority
3766 to all LWPs that have had events helps prevent
3768 if (ptid_equal (ptid
, minus_one_ptid
) || ptid_is_pid (ptid
))
3769 select_event_lwp (ptid
, &lp
, &status
);
3771 /* Now that we've selected our final event LWP, cancel any
3772 breakpoints in other LWPs that have hit a GDB breakpoint.
3773 See the comment in cancel_breakpoints_callback to find out
3775 iterate_over_lwps (minus_one_ptid
, cancel_breakpoints_callback
, lp
);
3777 /* We'll need this to determine whether to report a SIGSTOP as
3778 TARGET_WAITKIND_0. Need to take a copy because
3779 resume_clear_callback clears it. */
3780 last_resume_kind
= lp
->last_resume_kind
;
3782 /* In all-stop, from the core's perspective, all LWPs are now
3783 stopped until a new resume action is sent over. */
3784 iterate_over_lwps (minus_one_ptid
, resume_clear_callback
, NULL
);
3789 last_resume_kind
= lp
->last_resume_kind
;
3790 resume_clear_callback (lp
, NULL
);
3793 if (linux_nat_status_is_event (status
))
3795 if (debug_linux_nat
)
3796 fprintf_unfiltered (gdb_stdlog
,
3797 "LLW: trap ptid is %s.\n",
3798 target_pid_to_str (lp
->ptid
));
3801 if (lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3803 *ourstatus
= lp
->waitstatus
;
3804 lp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
3807 store_waitstatus (ourstatus
, status
);
3809 if (debug_linux_nat
)
3810 fprintf_unfiltered (gdb_stdlog
, "LLW: exit\n");
3812 restore_child_signals_mask (&prev_mask
);
3814 if (last_resume_kind
== resume_stop
3815 && ourstatus
->kind
== TARGET_WAITKIND_STOPPED
3816 && WSTOPSIG (status
) == SIGSTOP
)
3818 /* A thread that has been requested to stop by GDB with
3819 target_stop, and it stopped cleanly, so report as SIG0. The
3820 use of SIGSTOP is an implementation detail. */
3821 ourstatus
->value
.sig
= GDB_SIGNAL_0
;
3824 if (ourstatus
->kind
== TARGET_WAITKIND_EXITED
3825 || ourstatus
->kind
== TARGET_WAITKIND_SIGNALLED
)
3828 lp
->core
= linux_common_core_of_thread (lp
->ptid
);
3833 /* Resume LWPs that are currently stopped without any pending status
3834 to report, but are resumed from the core's perspective. */
3837 resume_stopped_resumed_lwps (struct lwp_info
*lp
, void *data
)
3839 ptid_t
*wait_ptid_p
= data
;
3844 && lp
->waitstatus
.kind
== TARGET_WAITKIND_IGNORE
)
3846 struct regcache
*regcache
= get_thread_regcache (lp
->ptid
);
3847 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3848 CORE_ADDR pc
= regcache_read_pc (regcache
);
3850 gdb_assert (is_executing (lp
->ptid
));
3852 /* Don't bother if there's a breakpoint at PC that we'd hit
3853 immediately, and we're not waiting for this LWP. */
3854 if (!ptid_match (lp
->ptid
, *wait_ptid_p
))
3856 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache
), pc
))
3860 if (debug_linux_nat
)
3861 fprintf_unfiltered (gdb_stdlog
,
3862 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3863 target_pid_to_str (lp
->ptid
),
3864 paddress (gdbarch
, pc
),
3867 registers_changed ();
3868 if (linux_nat_prepare_to_resume
!= NULL
)
3869 linux_nat_prepare_to_resume (lp
);
3870 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
3871 lp
->step
, GDB_SIGNAL_0
);
3873 lp
->stopped_by_watchpoint
= 0;
3880 linux_nat_wait (struct target_ops
*ops
,
3881 ptid_t ptid
, struct target_waitstatus
*ourstatus
,
3886 if (debug_linux_nat
)
3888 char *options_string
;
3890 options_string
= target_options_to_string (target_options
);
3891 fprintf_unfiltered (gdb_stdlog
,
3892 "linux_nat_wait: [%s], [%s]\n",
3893 target_pid_to_str (ptid
),
3895 xfree (options_string
);
3898 /* Flush the async file first. */
3899 if (target_can_async_p ())
3900 async_file_flush ();
3902 /* Resume LWPs that are currently stopped without any pending status
3903 to report, but are resumed from the core's perspective. LWPs get
3904 in this state if we find them stopping at a time we're not
3905 interested in reporting the event (target_wait on a
3906 specific_process, for example, see linux_nat_wait_1), and
3907 meanwhile the event became uninteresting. Don't bother resuming
3908 LWPs we're not going to wait for if they'd stop immediately. */
3910 iterate_over_lwps (minus_one_ptid
, resume_stopped_resumed_lwps
, &ptid
);
3912 event_ptid
= linux_nat_wait_1 (ops
, ptid
, ourstatus
, target_options
);
3914 /* If we requested any event, and something came out, assume there
3915 may be more. If we requested a specific lwp or process, also
3916 assume there may be more. */
3917 if (target_can_async_p ()
3918 && ((ourstatus
->kind
!= TARGET_WAITKIND_IGNORE
3919 && ourstatus
->kind
!= TARGET_WAITKIND_NO_RESUMED
)
3920 || !ptid_equal (ptid
, minus_one_ptid
)))
3923 /* Get ready for the next event. */
3924 if (target_can_async_p ())
3925 target_async (inferior_event_handler
, 0);
3931 kill_callback (struct lwp_info
*lp
, void *data
)
3933 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3936 kill (GET_LWP (lp
->ptid
), SIGKILL
);
3937 if (debug_linux_nat
)
3938 fprintf_unfiltered (gdb_stdlog
,
3939 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3940 target_pid_to_str (lp
->ptid
),
3941 errno
? safe_strerror (errno
) : "OK");
3943 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3946 ptrace (PTRACE_KILL
, GET_LWP (lp
->ptid
), 0, 0);
3947 if (debug_linux_nat
)
3948 fprintf_unfiltered (gdb_stdlog
,
3949 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3950 target_pid_to_str (lp
->ptid
),
3951 errno
? safe_strerror (errno
) : "OK");
3957 kill_wait_callback (struct lwp_info
*lp
, void *data
)
3961 /* We must make sure that there are no pending events (delayed
3962 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3963 program doesn't interfere with any following debugging session. */
3965 /* For cloned processes we must check both with __WCLONE and
3966 without, since the exit status of a cloned process isn't reported
3972 pid
= my_waitpid (GET_LWP (lp
->ptid
), NULL
, __WCLONE
);
3973 if (pid
!= (pid_t
) -1)
3975 if (debug_linux_nat
)
3976 fprintf_unfiltered (gdb_stdlog
,
3977 "KWC: wait %s received unknown.\n",
3978 target_pid_to_str (lp
->ptid
));
3979 /* The Linux kernel sometimes fails to kill a thread
3980 completely after PTRACE_KILL; that goes from the stop
3981 point in do_fork out to the one in
3982 get_signal_to_deliever and waits again. So kill it
3984 kill_callback (lp
, NULL
);
3987 while (pid
== GET_LWP (lp
->ptid
));
3989 gdb_assert (pid
== -1 && errno
== ECHILD
);
3994 pid
= my_waitpid (GET_LWP (lp
->ptid
), NULL
, 0);
3995 if (pid
!= (pid_t
) -1)
3997 if (debug_linux_nat
)
3998 fprintf_unfiltered (gdb_stdlog
,
3999 "KWC: wait %s received unk.\n",
4000 target_pid_to_str (lp
->ptid
));
4001 /* See the call to kill_callback above. */
4002 kill_callback (lp
, NULL
);
4005 while (pid
== GET_LWP (lp
->ptid
));
4007 gdb_assert (pid
== -1 && errno
== ECHILD
);
4012 linux_nat_kill (struct target_ops
*ops
)
4014 struct target_waitstatus last
;
4018 /* If we're stopped while forking and we haven't followed yet,
4019 kill the other task. We need to do this first because the
4020 parent will be sleeping if this is a vfork. */
4022 get_last_target_status (&last_ptid
, &last
);
4024 if (last
.kind
== TARGET_WAITKIND_FORKED
4025 || last
.kind
== TARGET_WAITKIND_VFORKED
)
4027 ptrace (PT_KILL
, PIDGET (last
.value
.related_pid
), 0, 0);
4030 /* Let the arch-specific native code know this process is
4032 linux_nat_forget_process (PIDGET (last
.value
.related_pid
));
4035 if (forks_exist_p ())
4036 linux_fork_killall ();
4039 ptid_t ptid
= pid_to_ptid (ptid_get_pid (inferior_ptid
));
4041 /* Stop all threads before killing them, since ptrace requires
4042 that the thread is stopped to sucessfully PTRACE_KILL. */
4043 iterate_over_lwps (ptid
, stop_callback
, NULL
);
4044 /* ... and wait until all of them have reported back that
4045 they're no longer running. */
4046 iterate_over_lwps (ptid
, stop_wait_callback
, NULL
);
4048 /* Kill all LWP's ... */
4049 iterate_over_lwps (ptid
, kill_callback
, NULL
);
4051 /* ... and wait until we've flushed all events. */
4052 iterate_over_lwps (ptid
, kill_wait_callback
, NULL
);
4055 target_mourn_inferior ();
4059 linux_nat_mourn_inferior (struct target_ops
*ops
)
4061 int pid
= ptid_get_pid (inferior_ptid
);
4063 purge_lwp_list (pid
);
4065 if (! forks_exist_p ())
4066 /* Normal case, no other forks available. */
4067 linux_ops
->to_mourn_inferior (ops
);
4069 /* Multi-fork case. The current inferior_ptid has exited, but
4070 there are other viable forks to debug. Delete the exiting
4071 one and context-switch to the first available. */
4072 linux_fork_mourn_inferior ();
4074 /* Let the arch-specific native code know this process is gone. */
4075 linux_nat_forget_process (pid
);
4078 /* Convert a native/host siginfo object, into/from the siginfo in the
4079 layout of the inferiors' architecture. */
4082 siginfo_fixup (siginfo_t
*siginfo
, gdb_byte
*inf_siginfo
, int direction
)
4086 if (linux_nat_siginfo_fixup
!= NULL
)
4087 done
= linux_nat_siginfo_fixup (siginfo
, inf_siginfo
, direction
);
4089 /* If there was no callback, or the callback didn't do anything,
4090 then just do a straight memcpy. */
4094 memcpy (siginfo
, inf_siginfo
, sizeof (siginfo_t
));
4096 memcpy (inf_siginfo
, siginfo
, sizeof (siginfo_t
));
4101 linux_xfer_siginfo (struct target_ops
*ops
, enum target_object object
,
4102 const char *annex
, gdb_byte
*readbuf
,
4103 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
4107 gdb_byte inf_siginfo
[sizeof (siginfo_t
)];
4109 gdb_assert (object
== TARGET_OBJECT_SIGNAL_INFO
);
4110 gdb_assert (readbuf
|| writebuf
);
4112 pid
= GET_LWP (inferior_ptid
);
4114 pid
= GET_PID (inferior_ptid
);
4116 if (offset
> sizeof (siginfo
))
4120 ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
);
4124 /* When GDB is built as a 64-bit application, ptrace writes into
4125 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4126 inferior with a 64-bit GDB should look the same as debugging it
4127 with a 32-bit GDB, we need to convert it. GDB core always sees
4128 the converted layout, so any read/write will have to be done
4130 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
4132 if (offset
+ len
> sizeof (siginfo
))
4133 len
= sizeof (siginfo
) - offset
;
4135 if (readbuf
!= NULL
)
4136 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
4139 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
4141 /* Convert back to ptrace layout before flushing it out. */
4142 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
4145 ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
);
4154 linux_nat_xfer_partial (struct target_ops
*ops
, enum target_object object
,
4155 const char *annex
, gdb_byte
*readbuf
,
4156 const gdb_byte
*writebuf
,
4157 ULONGEST offset
, LONGEST len
)
4159 struct cleanup
*old_chain
;
4162 if (object
== TARGET_OBJECT_SIGNAL_INFO
)
4163 return linux_xfer_siginfo (ops
, object
, annex
, readbuf
, writebuf
,
4166 /* The target is connected but no live inferior is selected. Pass
4167 this request down to a lower stratum (e.g., the executable
4169 if (object
== TARGET_OBJECT_MEMORY
&& ptid_equal (inferior_ptid
, null_ptid
))
4172 old_chain
= save_inferior_ptid ();
4174 if (is_lwp (inferior_ptid
))
4175 inferior_ptid
= pid_to_ptid (GET_LWP (inferior_ptid
));
4177 xfer
= linux_ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
4180 do_cleanups (old_chain
);
4185 linux_thread_alive (ptid_t ptid
)
4189 gdb_assert (is_lwp (ptid
));
4191 /* Send signal 0 instead of anything ptrace, because ptracing a
4192 running thread errors out claiming that the thread doesn't
4194 err
= kill_lwp (GET_LWP (ptid
), 0);
4196 if (debug_linux_nat
)
4197 fprintf_unfiltered (gdb_stdlog
,
4198 "LLTA: KILL(SIG0) %s (%s)\n",
4199 target_pid_to_str (ptid
),
4200 err
? safe_strerror (tmp_errno
) : "OK");
4209 linux_nat_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
4211 return linux_thread_alive (ptid
);
4215 linux_nat_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
4217 static char buf
[64];
4220 && (GET_PID (ptid
) != GET_LWP (ptid
)
4221 || num_lwps (GET_PID (ptid
)) > 1))
4223 snprintf (buf
, sizeof (buf
), "LWP %ld", GET_LWP (ptid
));
4227 return normal_pid_to_str (ptid
);
4231 linux_nat_thread_name (struct thread_info
*thr
)
4233 int pid
= ptid_get_pid (thr
->ptid
);
4234 long lwp
= ptid_get_lwp (thr
->ptid
);
4235 #define FORMAT "/proc/%d/task/%ld/comm"
4236 char buf
[sizeof (FORMAT
) + 30];
4238 char *result
= NULL
;
4240 snprintf (buf
, sizeof (buf
), FORMAT
, pid
, lwp
);
4241 comm_file
= gdb_fopen_cloexec (buf
, "r");
4244 /* Not exported by the kernel, so we define it here. */
4246 static char line
[COMM_LEN
+ 1];
4248 if (fgets (line
, sizeof (line
), comm_file
))
4250 char *nl
= strchr (line
, '\n');
4267 /* Accepts an integer PID; Returns a string representing a file that
4268 can be opened to get the symbols for the child process. */
4271 linux_child_pid_to_exec_file (int pid
)
4273 char *name1
, *name2
;
4275 name1
= xmalloc (PATH_MAX
);
4276 name2
= xmalloc (PATH_MAX
);
4277 make_cleanup (xfree
, name1
);
4278 make_cleanup (xfree
, name2
);
4279 memset (name2
, 0, PATH_MAX
);
4281 sprintf (name1
, "/proc/%d/exe", pid
);
4282 if (readlink (name1
, name2
, PATH_MAX
- 1) > 0)
4288 /* Records the thread's register state for the corefile note
4292 linux_nat_collect_thread_registers (const struct regcache
*regcache
,
4293 ptid_t ptid
, bfd
*obfd
,
4294 char *note_data
, int *note_size
,
4295 enum gdb_signal stop_signal
)
4297 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
4298 const struct regset
*regset
;
4300 gdb_gregset_t gregs
;
4301 gdb_fpregset_t fpregs
;
4303 core_regset_p
= gdbarch_regset_from_core_section_p (gdbarch
);
4306 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg",
4308 != NULL
&& regset
->collect_regset
!= NULL
)
4309 regset
->collect_regset (regset
, regcache
, -1, &gregs
, sizeof (gregs
));
4311 fill_gregset (regcache
, &gregs
, -1);
4313 note_data
= (char *) elfcore_write_prstatus
4314 (obfd
, note_data
, note_size
, ptid_get_lwp (ptid
),
4315 gdb_signal_to_host (stop_signal
), &gregs
);
4318 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg2",
4320 != NULL
&& regset
->collect_regset
!= NULL
)
4321 regset
->collect_regset (regset
, regcache
, -1, &fpregs
, sizeof (fpregs
));
4323 fill_fpregset (regcache
, &fpregs
, -1);
4325 note_data
= (char *) elfcore_write_prfpreg (obfd
, note_data
, note_size
,
4326 &fpregs
, sizeof (fpregs
));
4331 /* Fills the "to_make_corefile_note" target vector. Builds the note
4332 section for a corefile, and returns it in a malloc buffer. */
4335 linux_nat_make_corefile_notes (bfd
*obfd
, int *note_size
)
4337 /* FIXME: uweigand/2011-10-06: Once all GNU/Linux architectures have been
4338 converted to gdbarch_core_regset_sections, this function can go away. */
4339 return linux_make_corefile_notes (target_gdbarch (), obfd
, note_size
,
4340 linux_nat_collect_thread_registers
);
4343 /* Implement the to_xfer_partial interface for memory reads using the /proc
4344 filesystem. Because we can use a single read() call for /proc, this
4345 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4346 but it doesn't support writes. */
4349 linux_proc_xfer_partial (struct target_ops
*ops
, enum target_object object
,
4350 const char *annex
, gdb_byte
*readbuf
,
4351 const gdb_byte
*writebuf
,
4352 ULONGEST offset
, LONGEST len
)
4358 if (object
!= TARGET_OBJECT_MEMORY
|| !readbuf
)
4361 /* Don't bother for one word. */
4362 if (len
< 3 * sizeof (long))
4365 /* We could keep this file open and cache it - possibly one per
4366 thread. That requires some juggling, but is even faster. */
4367 sprintf (filename
, "/proc/%d/mem", PIDGET (inferior_ptid
));
4368 fd
= gdb_open_cloexec (filename
, O_RDONLY
| O_LARGEFILE
, 0);
4372 /* If pread64 is available, use it. It's faster if the kernel
4373 supports it (only one syscall), and it's 64-bit safe even on
4374 32-bit platforms (for instance, SPARC debugging a SPARC64
4377 if (pread64 (fd
, readbuf
, len
, offset
) != len
)
4379 if (lseek (fd
, offset
, SEEK_SET
) == -1 || read (fd
, readbuf
, len
) != len
)
4390 /* Enumerate spufs IDs for process PID. */
4392 spu_enumerate_spu_ids (int pid
, gdb_byte
*buf
, ULONGEST offset
, LONGEST len
)
4394 enum bfd_endian byte_order
= gdbarch_byte_order (target_gdbarch ());
4396 LONGEST written
= 0;
4399 struct dirent
*entry
;
4401 xsnprintf (path
, sizeof path
, "/proc/%d/fd", pid
);
4402 dir
= opendir (path
);
4407 while ((entry
= readdir (dir
)) != NULL
)
4413 fd
= atoi (entry
->d_name
);
4417 xsnprintf (path
, sizeof path
, "/proc/%d/fd/%d", pid
, fd
);
4418 if (stat (path
, &st
) != 0)
4420 if (!S_ISDIR (st
.st_mode
))
4423 if (statfs (path
, &stfs
) != 0)
4425 if (stfs
.f_type
!= SPUFS_MAGIC
)
4428 if (pos
>= offset
&& pos
+ 4 <= offset
+ len
)
4430 store_unsigned_integer (buf
+ pos
- offset
, 4, byte_order
, fd
);
4440 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4441 object type, using the /proc file system. */
4443 linux_proc_xfer_spu (struct target_ops
*ops
, enum target_object object
,
4444 const char *annex
, gdb_byte
*readbuf
,
4445 const gdb_byte
*writebuf
,
4446 ULONGEST offset
, LONGEST len
)
4451 int pid
= PIDGET (inferior_ptid
);
4458 return spu_enumerate_spu_ids (pid
, readbuf
, offset
, len
);
4461 xsnprintf (buf
, sizeof buf
, "/proc/%d/fd/%s", pid
, annex
);
4462 fd
= gdb_open_cloexec (buf
, writebuf
? O_WRONLY
: O_RDONLY
, 0);
4467 && lseek (fd
, (off_t
) offset
, SEEK_SET
) != (off_t
) offset
)
4474 ret
= write (fd
, writebuf
, (size_t) len
);
4476 ret
= read (fd
, readbuf
, (size_t) len
);
4483 /* Parse LINE as a signal set and add its set bits to SIGS. */
4486 add_line_to_sigset (const char *line
, sigset_t
*sigs
)
4488 int len
= strlen (line
) - 1;
4492 if (line
[len
] != '\n')
4493 error (_("Could not parse signal set: %s"), line
);
4501 if (*p
>= '0' && *p
<= '9')
4503 else if (*p
>= 'a' && *p
<= 'f')
4504 digit
= *p
- 'a' + 10;
4506 error (_("Could not parse signal set: %s"), line
);
4511 sigaddset (sigs
, signum
+ 1);
4513 sigaddset (sigs
, signum
+ 2);
4515 sigaddset (sigs
, signum
+ 3);
4517 sigaddset (sigs
, signum
+ 4);
4523 /* Find process PID's pending signals from /proc/pid/status and set
4527 linux_proc_pending_signals (int pid
, sigset_t
*pending
,
4528 sigset_t
*blocked
, sigset_t
*ignored
)
4531 char buffer
[PATH_MAX
], fname
[PATH_MAX
];
4532 struct cleanup
*cleanup
;
4534 sigemptyset (pending
);
4535 sigemptyset (blocked
);
4536 sigemptyset (ignored
);
4537 sprintf (fname
, "/proc/%d/status", pid
);
4538 procfile
= gdb_fopen_cloexec (fname
, "r");
4539 if (procfile
== NULL
)
4540 error (_("Could not open %s"), fname
);
4541 cleanup
= make_cleanup_fclose (procfile
);
4543 while (fgets (buffer
, PATH_MAX
, procfile
) != NULL
)
4545 /* Normal queued signals are on the SigPnd line in the status
4546 file. However, 2.6 kernels also have a "shared" pending
4547 queue for delivering signals to a thread group, so check for
4550 Unfortunately some Red Hat kernels include the shared pending
4551 queue but not the ShdPnd status field. */
4553 if (strncmp (buffer
, "SigPnd:\t", 8) == 0)
4554 add_line_to_sigset (buffer
+ 8, pending
);
4555 else if (strncmp (buffer
, "ShdPnd:\t", 8) == 0)
4556 add_line_to_sigset (buffer
+ 8, pending
);
4557 else if (strncmp (buffer
, "SigBlk:\t", 8) == 0)
4558 add_line_to_sigset (buffer
+ 8, blocked
);
4559 else if (strncmp (buffer
, "SigIgn:\t", 8) == 0)
4560 add_line_to_sigset (buffer
+ 8, ignored
);
4563 do_cleanups (cleanup
);
4567 linux_nat_xfer_osdata (struct target_ops
*ops
, enum target_object object
,
4568 const char *annex
, gdb_byte
*readbuf
,
4569 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
4571 gdb_assert (object
== TARGET_OBJECT_OSDATA
);
4573 return linux_common_xfer_osdata (annex
, readbuf
, offset
, len
);
4577 linux_xfer_partial (struct target_ops
*ops
, enum target_object object
,
4578 const char *annex
, gdb_byte
*readbuf
,
4579 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
4583 if (object
== TARGET_OBJECT_AUXV
)
4584 return memory_xfer_auxv (ops
, object
, annex
, readbuf
, writebuf
,
4587 if (object
== TARGET_OBJECT_OSDATA
)
4588 return linux_nat_xfer_osdata (ops
, object
, annex
, readbuf
, writebuf
,
4591 if (object
== TARGET_OBJECT_SPU
)
4592 return linux_proc_xfer_spu (ops
, object
, annex
, readbuf
, writebuf
,
4595 /* GDB calculates all the addresses in possibly larget width of the address.
4596 Address width needs to be masked before its final use - either by
4597 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4599 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4601 if (object
== TARGET_OBJECT_MEMORY
)
4603 int addr_bit
= gdbarch_addr_bit (target_gdbarch ());
4605 if (addr_bit
< (sizeof (ULONGEST
) * HOST_CHAR_BIT
))
4606 offset
&= ((ULONGEST
) 1 << addr_bit
) - 1;
4609 xfer
= linux_proc_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
4614 return super_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
4619 cleanup_target_stop (void *arg
)
4621 ptid_t
*ptid
= (ptid_t
*) arg
;
4623 gdb_assert (arg
!= NULL
);
4626 target_resume (*ptid
, 0, GDB_SIGNAL_0
);
4629 static VEC(static_tracepoint_marker_p
) *
4630 linux_child_static_tracepoint_markers_by_strid (const char *strid
)
4632 char s
[IPA_CMD_BUF_SIZE
];
4633 struct cleanup
*old_chain
;
4634 int pid
= ptid_get_pid (inferior_ptid
);
4635 VEC(static_tracepoint_marker_p
) *markers
= NULL
;
4636 struct static_tracepoint_marker
*marker
= NULL
;
4638 ptid_t ptid
= ptid_build (pid
, 0, 0);
4643 memcpy (s
, "qTfSTM", sizeof ("qTfSTM"));
4644 s
[sizeof ("qTfSTM")] = 0;
4646 agent_run_command (pid
, s
, strlen (s
) + 1);
4648 old_chain
= make_cleanup (free_current_marker
, &marker
);
4649 make_cleanup (cleanup_target_stop
, &ptid
);
4654 marker
= XCNEW (struct static_tracepoint_marker
);
4658 parse_static_tracepoint_marker_definition (p
, &p
, marker
);
4660 if (strid
== NULL
|| strcmp (strid
, marker
->str_id
) == 0)
4662 VEC_safe_push (static_tracepoint_marker_p
,
4668 release_static_tracepoint_marker (marker
);
4669 memset (marker
, 0, sizeof (*marker
));
4672 while (*p
++ == ','); /* comma-separated list */
4674 memcpy (s
, "qTsSTM", sizeof ("qTsSTM"));
4675 s
[sizeof ("qTsSTM")] = 0;
4676 agent_run_command (pid
, s
, strlen (s
) + 1);
4680 do_cleanups (old_chain
);
4685 /* Create a prototype generic GNU/Linux target. The client can override
4686 it with local methods. */
4689 linux_target_install_ops (struct target_ops
*t
)
4691 t
->to_insert_fork_catchpoint
= linux_child_insert_fork_catchpoint
;
4692 t
->to_remove_fork_catchpoint
= linux_child_remove_fork_catchpoint
;
4693 t
->to_insert_vfork_catchpoint
= linux_child_insert_vfork_catchpoint
;
4694 t
->to_remove_vfork_catchpoint
= linux_child_remove_vfork_catchpoint
;
4695 t
->to_insert_exec_catchpoint
= linux_child_insert_exec_catchpoint
;
4696 t
->to_remove_exec_catchpoint
= linux_child_remove_exec_catchpoint
;
4697 t
->to_set_syscall_catchpoint
= linux_child_set_syscall_catchpoint
;
4698 t
->to_pid_to_exec_file
= linux_child_pid_to_exec_file
;
4699 t
->to_post_startup_inferior
= linux_child_post_startup_inferior
;
4700 t
->to_post_attach
= linux_child_post_attach
;
4701 t
->to_follow_fork
= linux_child_follow_fork
;
4702 t
->to_make_corefile_notes
= linux_nat_make_corefile_notes
;
4704 super_xfer_partial
= t
->to_xfer_partial
;
4705 t
->to_xfer_partial
= linux_xfer_partial
;
4707 t
->to_static_tracepoint_markers_by_strid
4708 = linux_child_static_tracepoint_markers_by_strid
;
4714 struct target_ops
*t
;
4716 t
= inf_ptrace_target ();
4717 linux_target_install_ops (t
);
4723 linux_trad_target (CORE_ADDR (*register_u_offset
)(struct gdbarch
*, int, int))
4725 struct target_ops
*t
;
4727 t
= inf_ptrace_trad_target (register_u_offset
);
4728 linux_target_install_ops (t
);
4733 /* target_is_async_p implementation. */
4736 linux_nat_is_async_p (void)
4738 /* NOTE: palves 2008-03-21: We're only async when the user requests
4739 it explicitly with the "set target-async" command.
4740 Someday, linux will always be async. */
4741 return target_async_permitted
;
4744 /* target_can_async_p implementation. */
4747 linux_nat_can_async_p (void)
4749 /* NOTE: palves 2008-03-21: We're only async when the user requests
4750 it explicitly with the "set target-async" command.
4751 Someday, linux will always be async. */
4752 return target_async_permitted
;
4756 linux_nat_supports_non_stop (void)
4761 /* True if we want to support multi-process. To be removed when GDB
4762 supports multi-exec. */
4764 int linux_multi_process
= 1;
4767 linux_nat_supports_multi_process (void)
4769 return linux_multi_process
;
4773 linux_nat_supports_disable_randomization (void)
4775 #ifdef HAVE_PERSONALITY
4782 static int async_terminal_is_ours
= 1;
4784 /* target_terminal_inferior implementation. */
4787 linux_nat_terminal_inferior (void)
4789 if (!target_is_async_p ())
4791 /* Async mode is disabled. */
4792 terminal_inferior ();
4796 terminal_inferior ();
4798 /* Calls to target_terminal_*() are meant to be idempotent. */
4799 if (!async_terminal_is_ours
)
4802 delete_file_handler (input_fd
);
4803 async_terminal_is_ours
= 0;
4807 /* target_terminal_ours implementation. */
4810 linux_nat_terminal_ours (void)
4812 if (!target_is_async_p ())
4814 /* Async mode is disabled. */
4819 /* GDB should never give the terminal to the inferior if the
4820 inferior is running in the background (run&, continue&, etc.),
4821 but claiming it sure should. */
4824 if (async_terminal_is_ours
)
4827 clear_sigint_trap ();
4828 add_file_handler (input_fd
, stdin_event_handler
, 0);
4829 async_terminal_is_ours
= 1;
4832 static void (*async_client_callback
) (enum inferior_event_type event_type
,
4834 static void *async_client_context
;
4836 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4837 so we notice when any child changes state, and notify the
4838 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4839 above to wait for the arrival of a SIGCHLD. */
4842 sigchld_handler (int signo
)
4844 int old_errno
= errno
;
4846 if (debug_linux_nat
)
4847 ui_file_write_async_safe (gdb_stdlog
,
4848 "sigchld\n", sizeof ("sigchld\n") - 1);
4850 if (signo
== SIGCHLD
4851 && linux_nat_event_pipe
[0] != -1)
4852 async_file_mark (); /* Let the event loop know that there are
4853 events to handle. */
4858 /* Callback registered with the target events file descriptor. */
4861 handle_target_event (int error
, gdb_client_data client_data
)
4863 (*async_client_callback
) (INF_REG_EVENT
, async_client_context
);
4866 /* Create/destroy the target events pipe. Returns previous state. */
4869 linux_async_pipe (int enable
)
4871 int previous
= (linux_nat_event_pipe
[0] != -1);
4873 if (previous
!= enable
)
4877 /* Block child signals while we create/destroy the pipe, as
4878 their handler writes to it. */
4879 block_child_signals (&prev_mask
);
4883 if (gdb_pipe_cloexec (linux_nat_event_pipe
) == -1)
4884 internal_error (__FILE__
, __LINE__
,
4885 "creating event pipe failed.");
4887 fcntl (linux_nat_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
4888 fcntl (linux_nat_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
4892 close (linux_nat_event_pipe
[0]);
4893 close (linux_nat_event_pipe
[1]);
4894 linux_nat_event_pipe
[0] = -1;
4895 linux_nat_event_pipe
[1] = -1;
4898 restore_child_signals_mask (&prev_mask
);
4904 /* target_async implementation. */
4907 linux_nat_async (void (*callback
) (enum inferior_event_type event_type
,
4908 void *context
), void *context
)
4910 if (callback
!= NULL
)
4912 async_client_callback
= callback
;
4913 async_client_context
= context
;
4914 if (!linux_async_pipe (1))
4916 add_file_handler (linux_nat_event_pipe
[0],
4917 handle_target_event
, NULL
);
4918 /* There may be pending events to handle. Tell the event loop
4925 async_client_callback
= callback
;
4926 async_client_context
= context
;
4927 delete_file_handler (linux_nat_event_pipe
[0]);
4928 linux_async_pipe (0);
4933 /* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
4937 linux_nat_stop_lwp (struct lwp_info
*lwp
, void *data
)
4941 if (debug_linux_nat
)
4942 fprintf_unfiltered (gdb_stdlog
,
4943 "LNSL: running -> suspending %s\n",
4944 target_pid_to_str (lwp
->ptid
));
4947 if (lwp
->last_resume_kind
== resume_stop
)
4949 if (debug_linux_nat
)
4950 fprintf_unfiltered (gdb_stdlog
,
4951 "linux-nat: already stopping LWP %ld at "
4953 ptid_get_lwp (lwp
->ptid
));
4957 stop_callback (lwp
, NULL
);
4958 lwp
->last_resume_kind
= resume_stop
;
4962 /* Already known to be stopped; do nothing. */
4964 if (debug_linux_nat
)
4966 if (find_thread_ptid (lwp
->ptid
)->stop_requested
)
4967 fprintf_unfiltered (gdb_stdlog
,
4968 "LNSL: already stopped/stop_requested %s\n",
4969 target_pid_to_str (lwp
->ptid
));
4971 fprintf_unfiltered (gdb_stdlog
,
4972 "LNSL: already stopped/no "
4973 "stop_requested yet %s\n",
4974 target_pid_to_str (lwp
->ptid
));
4981 linux_nat_stop (ptid_t ptid
)
4984 iterate_over_lwps (ptid
, linux_nat_stop_lwp
, NULL
);
4986 linux_ops
->to_stop (ptid
);
4990 linux_nat_close (void)
4992 /* Unregister from the event loop. */
4993 if (linux_nat_is_async_p ())
4994 linux_nat_async (NULL
, 0);
4996 if (linux_ops
->to_close
)
4997 linux_ops
->to_close ();
5000 /* When requests are passed down from the linux-nat layer to the
5001 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5002 used. The address space pointer is stored in the inferior object,
5003 but the common code that is passed such ptid can't tell whether
5004 lwpid is a "main" process id or not (it assumes so). We reverse
5005 look up the "main" process id from the lwp here. */
5007 static struct address_space
*
5008 linux_nat_thread_address_space (struct target_ops
*t
, ptid_t ptid
)
5010 struct lwp_info
*lwp
;
5011 struct inferior
*inf
;
5014 pid
= GET_LWP (ptid
);
5015 if (GET_LWP (ptid
) == 0)
5017 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5019 lwp
= find_lwp_pid (ptid
);
5020 pid
= GET_PID (lwp
->ptid
);
5024 /* A (pid,lwpid,0) ptid. */
5025 pid
= GET_PID (ptid
);
5028 inf
= find_inferior_pid (pid
);
5029 gdb_assert (inf
!= NULL
);
5033 /* Return the cached value of the processor core for thread PTID. */
5036 linux_nat_core_of_thread (struct target_ops
*ops
, ptid_t ptid
)
5038 struct lwp_info
*info
= find_lwp_pid (ptid
);
5046 linux_nat_add_target (struct target_ops
*t
)
5048 /* Save the provided single-threaded target. We save this in a separate
5049 variable because another target we've inherited from (e.g. inf-ptrace)
5050 may have saved a pointer to T; we want to use it for the final
5051 process stratum target. */
5052 linux_ops_saved
= *t
;
5053 linux_ops
= &linux_ops_saved
;
5055 /* Override some methods for multithreading. */
5056 t
->to_create_inferior
= linux_nat_create_inferior
;
5057 t
->to_attach
= linux_nat_attach
;
5058 t
->to_detach
= linux_nat_detach
;
5059 t
->to_resume
= linux_nat_resume
;
5060 t
->to_wait
= linux_nat_wait
;
5061 t
->to_pass_signals
= linux_nat_pass_signals
;
5062 t
->to_xfer_partial
= linux_nat_xfer_partial
;
5063 t
->to_kill
= linux_nat_kill
;
5064 t
->to_mourn_inferior
= linux_nat_mourn_inferior
;
5065 t
->to_thread_alive
= linux_nat_thread_alive
;
5066 t
->to_pid_to_str
= linux_nat_pid_to_str
;
5067 t
->to_thread_name
= linux_nat_thread_name
;
5068 t
->to_has_thread_control
= tc_schedlock
;
5069 t
->to_thread_address_space
= linux_nat_thread_address_space
;
5070 t
->to_stopped_by_watchpoint
= linux_nat_stopped_by_watchpoint
;
5071 t
->to_stopped_data_address
= linux_nat_stopped_data_address
;
5073 t
->to_can_async_p
= linux_nat_can_async_p
;
5074 t
->to_is_async_p
= linux_nat_is_async_p
;
5075 t
->to_supports_non_stop
= linux_nat_supports_non_stop
;
5076 t
->to_async
= linux_nat_async
;
5077 t
->to_terminal_inferior
= linux_nat_terminal_inferior
;
5078 t
->to_terminal_ours
= linux_nat_terminal_ours
;
5079 t
->to_close
= linux_nat_close
;
5081 /* Methods for non-stop support. */
5082 t
->to_stop
= linux_nat_stop
;
5084 t
->to_supports_multi_process
= linux_nat_supports_multi_process
;
5086 t
->to_supports_disable_randomization
5087 = linux_nat_supports_disable_randomization
;
5089 t
->to_core_of_thread
= linux_nat_core_of_thread
;
5091 /* We don't change the stratum; this target will sit at
5092 process_stratum and thread_db will set at thread_stratum. This
5093 is a little strange, since this is a multi-threaded-capable
5094 target, but we want to be on the stack below thread_db, and we
5095 also want to be used for single-threaded processes. */
5100 /* Register a method to call whenever a new thread is attached. */
5102 linux_nat_set_new_thread (struct target_ops
*t
,
5103 void (*new_thread
) (struct lwp_info
*))
5105 /* Save the pointer. We only support a single registered instance
5106 of the GNU/Linux native target, so we do not need to map this to
5108 linux_nat_new_thread
= new_thread
;
5111 /* See declaration in linux-nat.h. */
5114 linux_nat_set_new_fork (struct target_ops
*t
,
5115 linux_nat_new_fork_ftype
*new_fork
)
5117 /* Save the pointer. */
5118 linux_nat_new_fork
= new_fork
;
5121 /* See declaration in linux-nat.h. */
5124 linux_nat_set_forget_process (struct target_ops
*t
,
5125 linux_nat_forget_process_ftype
*fn
)
5127 /* Save the pointer. */
5128 linux_nat_forget_process_hook
= fn
;
5131 /* See declaration in linux-nat.h. */
5134 linux_nat_forget_process (pid_t pid
)
5136 if (linux_nat_forget_process_hook
!= NULL
)
5137 linux_nat_forget_process_hook (pid
);
5140 /* Register a method that converts a siginfo object between the layout
5141 that ptrace returns, and the layout in the architecture of the
5144 linux_nat_set_siginfo_fixup (struct target_ops
*t
,
5145 int (*siginfo_fixup
) (siginfo_t
*,
5149 /* Save the pointer. */
5150 linux_nat_siginfo_fixup
= siginfo_fixup
;
5153 /* Register a method to call prior to resuming a thread. */
5156 linux_nat_set_prepare_to_resume (struct target_ops
*t
,
5157 void (*prepare_to_resume
) (struct lwp_info
*))
5159 /* Save the pointer. */
5160 linux_nat_prepare_to_resume
= prepare_to_resume
;
5163 /* See linux-nat.h. */
5166 linux_nat_get_siginfo (ptid_t ptid
, siginfo_t
*siginfo
)
5170 pid
= GET_LWP (ptid
);
5172 pid
= GET_PID (ptid
);
5175 ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, siginfo
);
5178 memset (siginfo
, 0, sizeof (*siginfo
));
5184 /* Provide a prototype to silence -Wmissing-prototypes. */
5185 extern initialize_file_ftype _initialize_linux_nat
;
5188 _initialize_linux_nat (void)
5190 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance
,
5191 &debug_linux_nat
, _("\
5192 Set debugging of GNU/Linux lwp module."), _("\
5193 Show debugging of GNU/Linux lwp module."), _("\
5194 Enables printf debugging output."),
5196 show_debug_linux_nat
,
5197 &setdebuglist
, &showdebuglist
);
5199 /* Save this mask as the default. */
5200 sigprocmask (SIG_SETMASK
, NULL
, &normal_mask
);
5202 /* Install a SIGCHLD handler. */
5203 sigchld_action
.sa_handler
= sigchld_handler
;
5204 sigemptyset (&sigchld_action
.sa_mask
);
5205 sigchld_action
.sa_flags
= SA_RESTART
;
5207 /* Make it the default. */
5208 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
5210 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5211 sigprocmask (SIG_SETMASK
, NULL
, &suspend_mask
);
5212 sigdelset (&suspend_mask
, SIGCHLD
);
5214 sigemptyset (&blocked_mask
);
5218 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5219 the GNU/Linux Threads library and therefore doesn't really belong
5222 /* Read variable NAME in the target and return its value if found.
5223 Otherwise return zero. It is assumed that the type of the variable
5227 get_signo (const char *name
)
5229 struct minimal_symbol
*ms
;
5232 ms
= lookup_minimal_symbol (name
, NULL
, NULL
);
5236 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms
), (gdb_byte
*) &signo
,
5237 sizeof (signo
)) != 0)
5243 /* Return the set of signals used by the threads library in *SET. */
5246 lin_thread_get_thread_signals (sigset_t
*set
)
5248 struct sigaction action
;
5249 int restart
, cancel
;
5251 sigemptyset (&blocked_mask
);
5254 restart
= get_signo ("__pthread_sig_restart");
5255 cancel
= get_signo ("__pthread_sig_cancel");
5257 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5258 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5259 not provide any way for the debugger to query the signal numbers -
5260 fortunately they don't change! */
5263 restart
= __SIGRTMIN
;
5266 cancel
= __SIGRTMIN
+ 1;
5268 sigaddset (set
, restart
);
5269 sigaddset (set
, cancel
);
5271 /* The GNU/Linux Threads library makes terminating threads send a
5272 special "cancel" signal instead of SIGCHLD. Make sure we catch
5273 those (to prevent them from terminating GDB itself, which is
5274 likely to be their default action) and treat them the same way as
5277 action
.sa_handler
= sigchld_handler
;
5278 sigemptyset (&action
.sa_mask
);
5279 action
.sa_flags
= SA_RESTART
;
5280 sigaction (cancel
, &action
, NULL
);
5282 /* We block the "cancel" signal throughout this code ... */
5283 sigaddset (&blocked_mask
, cancel
);
5284 sigprocmask (SIG_BLOCK
, &blocked_mask
, NULL
);
5286 /* ... except during a sigsuspend. */
5287 sigdelset (&suspend_mask
, cancel
);