1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdb_string.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
29 #include <sys/syscall.h>
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
38 #include "inf-ptrace.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
50 #include "event-loop.h"
51 #include "event-top.h"
53 #include <sys/types.h>
54 #include "gdb_dirent.h"
55 #include "xml-support.h"
57 #ifdef HAVE_PERSONALITY
58 # include <sys/personality.h>
59 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
60 # define ADDR_NO_RANDOMIZE 0x0040000
62 #endif /* HAVE_PERSONALITY */
64 /* This comment documents high-level logic of this file.
66 Waiting for events in sync mode
67 ===============================
69 When waiting for an event in a specific thread, we just use waitpid, passing
70 the specific pid, and not passing WNOHANG.
72 When waiting for an event in all threads, waitpid is not quite good. Prior to
73 version 2.4, Linux can either wait for event in main thread, or in secondary
74 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
75 miss an event. The solution is to use non-blocking waitpid, together with
76 sigsuspend. First, we use non-blocking waitpid to get an event in the main
77 process, if any. Second, we use non-blocking waitpid with the __WCLONED
78 flag to check for events in cloned processes. If nothing is found, we use
79 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
80 happened to a child process -- and SIGCHLD will be delivered both for events
81 in main debugged process and in cloned processes. As soon as we know there's
82 an event, we get back to calling nonblocking waitpid with and without __WCLONED.
84 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
85 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
86 blocked, the signal becomes pending and sigsuspend immediately
87 notices it and returns.
89 Waiting for events in async mode
90 ================================
92 In async mode, GDB should always be ready to handle both user input
93 and target events, so neither blocking waitpid nor sigsuspend are
94 viable options. Instead, we should asynchronously notify the GDB main
95 event loop whenever there's an unprocessed event from the target. We
96 detect asynchronous target events by handling SIGCHLD signals. To
97 notify the event loop about target events, the self-pipe trick is used
98 --- a pipe is registered as waitable event source in the event loop,
99 the event loop select/poll's on the read end of this pipe (as well on
100 other event sources, e.g., stdin), and the SIGCHLD handler writes a
101 byte to this pipe. This is more portable than relying on
102 pselect/ppoll, since on kernels that lack those syscalls, libc
103 emulates them with select/poll+sigprocmask, and that is racy
104 (a.k.a. plain broken).
106 Obviously, if we fail to notify the event loop if there's a target
107 event, it's bad. OTOH, if we notify the event loop when there's no
108 event from the target, linux_nat_wait will detect that there's no real
109 event to report, and return event of type TARGET_WAITKIND_IGNORE.
110 This is mostly harmless, but it will waste time and is better avoided.
112 The main design point is that every time GDB is outside linux-nat.c,
113 we have a SIGCHLD handler installed that is called when something
114 happens to the target and notifies the GDB event loop. Whenever GDB
115 core decides to handle the event, and calls into linux-nat.c, we
116 process things as in sync mode, except that the we never block in
119 While processing an event, we may end up momentarily blocked in
120 waitpid calls. Those waitpid calls, while blocking, are guarantied to
121 return quickly. E.g., in all-stop mode, before reporting to the core
122 that an LWP hit a breakpoint, all LWPs are stopped by sending them
123 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
124 Note that this is different from blocking indefinitely waiting for the
125 next event --- here, we're already handling an event.
130 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
131 signal is not entirely significant; we just need for a signal to be delivered,
132 so that we can intercept it. SIGSTOP's advantage is that it can not be
133 blocked. A disadvantage is that it is not a real-time signal, so it can only
134 be queued once; we do not keep track of other sources of SIGSTOP.
136 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
137 use them, because they have special behavior when the signal is generated -
138 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
139 kills the entire thread group.
141 A delivered SIGSTOP would stop the entire thread group, not just the thread we
142 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
143 cancel it (by PTRACE_CONT without passing SIGSTOP).
145 We could use a real-time signal instead. This would solve those problems; we
146 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
147 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
148 generates it, and there are races with trying to find a signal that is not
152 #define O_LARGEFILE 0
155 /* If the system headers did not provide the constants, hard-code the normal
157 #ifndef PTRACE_EVENT_FORK
159 #define PTRACE_SETOPTIONS 0x4200
160 #define PTRACE_GETEVENTMSG 0x4201
162 /* options set using PTRACE_SETOPTIONS */
163 #define PTRACE_O_TRACESYSGOOD 0x00000001
164 #define PTRACE_O_TRACEFORK 0x00000002
165 #define PTRACE_O_TRACEVFORK 0x00000004
166 #define PTRACE_O_TRACECLONE 0x00000008
167 #define PTRACE_O_TRACEEXEC 0x00000010
168 #define PTRACE_O_TRACEVFORKDONE 0x00000020
169 #define PTRACE_O_TRACEEXIT 0x00000040
171 /* Wait extended result codes for the above trace options. */
172 #define PTRACE_EVENT_FORK 1
173 #define PTRACE_EVENT_VFORK 2
174 #define PTRACE_EVENT_CLONE 3
175 #define PTRACE_EVENT_EXEC 4
176 #define PTRACE_EVENT_VFORK_DONE 5
177 #define PTRACE_EVENT_EXIT 6
179 #endif /* PTRACE_EVENT_FORK */
181 /* We can't always assume that this flag is available, but all systems
182 with the ptrace event handlers also have __WALL, so it's safe to use
185 #define __WALL 0x40000000 /* Wait for any child. */
188 #ifndef PTRACE_GETSIGINFO
189 # define PTRACE_GETSIGINFO 0x4202
190 # define PTRACE_SETSIGINFO 0x4203
193 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
194 the use of the multi-threaded target. */
195 static struct target_ops
*linux_ops
;
196 static struct target_ops linux_ops_saved
;
198 /* The method to call, if any, when a new thread is attached. */
199 static void (*linux_nat_new_thread
) (ptid_t
);
201 /* The method to call, if any, when the siginfo object needs to be
202 converted between the layout returned by ptrace, and the layout in
203 the architecture of the inferior. */
204 static int (*linux_nat_siginfo_fixup
) (struct siginfo
*,
208 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
209 Called by our to_xfer_partial. */
210 static LONGEST (*super_xfer_partial
) (struct target_ops
*,
212 const char *, gdb_byte
*,
216 static int debug_linux_nat
;
218 show_debug_linux_nat (struct ui_file
*file
, int from_tty
,
219 struct cmd_list_element
*c
, const char *value
)
221 fprintf_filtered (file
, _("Debugging of GNU/Linux lwp module is %s.\n"),
225 static int debug_linux_nat_async
= 0;
227 show_debug_linux_nat_async (struct ui_file
*file
, int from_tty
,
228 struct cmd_list_element
*c
, const char *value
)
230 fprintf_filtered (file
, _("Debugging of GNU/Linux async lwp module is %s.\n"),
234 static int disable_randomization
= 1;
237 show_disable_randomization (struct ui_file
*file
, int from_tty
,
238 struct cmd_list_element
*c
, const char *value
)
240 #ifdef HAVE_PERSONALITY
241 fprintf_filtered (file
, _("\
242 Disabling randomization of debuggee's virtual address space is %s.\n"),
244 #else /* !HAVE_PERSONALITY */
246 Disabling randomization of debuggee's virtual address space is unsupported on\n\
247 this platform.\n"), file
);
248 #endif /* !HAVE_PERSONALITY */
252 set_disable_randomization (char *args
, int from_tty
, struct cmd_list_element
*c
)
254 #ifndef HAVE_PERSONALITY
256 Disabling randomization of debuggee's virtual address space is unsupported on\n\
258 #endif /* !HAVE_PERSONALITY */
261 static int linux_parent_pid
;
263 struct simple_pid_list
267 struct simple_pid_list
*next
;
269 struct simple_pid_list
*stopped_pids
;
271 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
272 can not be used, 1 if it can. */
274 static int linux_supports_tracefork_flag
= -1;
276 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
277 PTRACE_O_TRACEVFORKDONE. */
279 static int linux_supports_tracevforkdone_flag
= -1;
281 /* Async mode support */
283 /* Zero if the async mode, although enabled, is masked, which means
284 linux_nat_wait should behave as if async mode was off. */
285 static int linux_nat_async_mask_value
= 1;
287 /* The read/write ends of the pipe registered as waitable file in the
289 static int linux_nat_event_pipe
[2] = { -1, -1 };
291 /* Flush the event pipe. */
294 async_file_flush (void)
301 ret
= read (linux_nat_event_pipe
[0], &buf
, 1);
303 while (ret
>= 0 || (ret
== -1 && errno
== EINTR
));
306 /* Put something (anything, doesn't matter what, or how much) in event
307 pipe, so that the select/poll in the event-loop realizes we have
308 something to process. */
311 async_file_mark (void)
315 /* It doesn't really matter what the pipe contains, as long we end
316 up with something in it. Might as well flush the previous
322 ret
= write (linux_nat_event_pipe
[1], "+", 1);
324 while (ret
== -1 && errno
== EINTR
);
326 /* Ignore EAGAIN. If the pipe is full, the event loop will already
327 be awakened anyway. */
330 static void linux_nat_async (void (*callback
)
331 (enum inferior_event_type event_type
, void *context
),
333 static int linux_nat_async_mask (int mask
);
334 static int kill_lwp (int lwpid
, int signo
);
336 static int stop_callback (struct lwp_info
*lp
, void *data
);
338 static void block_child_signals (sigset_t
*prev_mask
);
339 static void restore_child_signals_mask (sigset_t
*prev_mask
);
341 /* Trivial list manipulation functions to keep track of a list of
342 new stopped processes. */
344 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
346 struct simple_pid_list
*new_pid
= xmalloc (sizeof (struct simple_pid_list
));
348 new_pid
->status
= status
;
349 new_pid
->next
= *listp
;
354 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *status
)
356 struct simple_pid_list
**p
;
358 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
359 if ((*p
)->pid
== pid
)
361 struct simple_pid_list
*next
= (*p
)->next
;
362 *status
= (*p
)->status
;
371 linux_record_stopped_pid (int pid
, int status
)
373 add_to_pid_list (&stopped_pids
, pid
, status
);
377 /* A helper function for linux_test_for_tracefork, called after fork (). */
380 linux_tracefork_child (void)
384 ptrace (PTRACE_TRACEME
, 0, 0, 0);
385 kill (getpid (), SIGSTOP
);
390 /* Wrapper function for waitpid which handles EINTR. */
393 my_waitpid (int pid
, int *status
, int flags
)
399 ret
= waitpid (pid
, status
, flags
);
401 while (ret
== -1 && errno
== EINTR
);
406 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
408 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
409 we know that the feature is not available. This may change the tracing
410 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
412 However, if it succeeds, we don't know for sure that the feature is
413 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
414 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
415 fork tracing, and let it fork. If the process exits, we assume that we
416 can't use TRACEFORK; if we get the fork notification, and we can extract
417 the new child's PID, then we assume that we can. */
420 linux_test_for_tracefork (int original_pid
)
422 int child_pid
, ret
, status
;
426 /* We don't want those ptrace calls to be interrupted. */
427 block_child_signals (&prev_mask
);
429 linux_supports_tracefork_flag
= 0;
430 linux_supports_tracevforkdone_flag
= 0;
432 ret
= ptrace (PTRACE_SETOPTIONS
, original_pid
, 0, PTRACE_O_TRACEFORK
);
435 restore_child_signals_mask (&prev_mask
);
441 perror_with_name (("fork"));
444 linux_tracefork_child ();
446 ret
= my_waitpid (child_pid
, &status
, 0);
448 perror_with_name (("waitpid"));
449 else if (ret
!= child_pid
)
450 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret
);
451 if (! WIFSTOPPED (status
))
452 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status
);
454 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0, PTRACE_O_TRACEFORK
);
457 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
460 warning (_("linux_test_for_tracefork: failed to kill child"));
461 restore_child_signals_mask (&prev_mask
);
465 ret
= my_waitpid (child_pid
, &status
, 0);
466 if (ret
!= child_pid
)
467 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
468 else if (!WIFSIGNALED (status
))
469 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
470 "killed child"), status
);
472 restore_child_signals_mask (&prev_mask
);
476 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
477 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0,
478 PTRACE_O_TRACEFORK
| PTRACE_O_TRACEVFORKDONE
);
479 linux_supports_tracevforkdone_flag
= (ret
== 0);
481 ret
= ptrace (PTRACE_CONT
, child_pid
, 0, 0);
483 warning (_("linux_test_for_tracefork: failed to resume child"));
485 ret
= my_waitpid (child_pid
, &status
, 0);
487 if (ret
== child_pid
&& WIFSTOPPED (status
)
488 && status
>> 16 == PTRACE_EVENT_FORK
)
491 ret
= ptrace (PTRACE_GETEVENTMSG
, child_pid
, 0, &second_pid
);
492 if (ret
== 0 && second_pid
!= 0)
496 linux_supports_tracefork_flag
= 1;
497 my_waitpid (second_pid
, &second_status
, 0);
498 ret
= ptrace (PTRACE_KILL
, second_pid
, 0, 0);
500 warning (_("linux_test_for_tracefork: failed to kill second child"));
501 my_waitpid (second_pid
, &status
, 0);
505 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
506 "(%d, status 0x%x)"), ret
, status
);
508 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
510 warning (_("linux_test_for_tracefork: failed to kill child"));
511 my_waitpid (child_pid
, &status
, 0);
513 restore_child_signals_mask (&prev_mask
);
516 /* Return non-zero iff we have tracefork functionality available.
517 This function also sets linux_supports_tracefork_flag. */
520 linux_supports_tracefork (int pid
)
522 if (linux_supports_tracefork_flag
== -1)
523 linux_test_for_tracefork (pid
);
524 return linux_supports_tracefork_flag
;
528 linux_supports_tracevforkdone (int pid
)
530 if (linux_supports_tracefork_flag
== -1)
531 linux_test_for_tracefork (pid
);
532 return linux_supports_tracevforkdone_flag
;
537 linux_enable_event_reporting (ptid_t ptid
)
539 int pid
= ptid_get_lwp (ptid
);
543 pid
= ptid_get_pid (ptid
);
545 if (! linux_supports_tracefork (pid
))
548 options
= PTRACE_O_TRACEFORK
| PTRACE_O_TRACEVFORK
| PTRACE_O_TRACEEXEC
549 | PTRACE_O_TRACECLONE
;
550 if (linux_supports_tracevforkdone (pid
))
551 options
|= PTRACE_O_TRACEVFORKDONE
;
553 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
554 read-only process state. */
556 ptrace (PTRACE_SETOPTIONS
, pid
, 0, options
);
560 linux_child_post_attach (int pid
)
562 linux_enable_event_reporting (pid_to_ptid (pid
));
563 check_for_thread_db ();
567 linux_child_post_startup_inferior (ptid_t ptid
)
569 linux_enable_event_reporting (ptid
);
570 check_for_thread_db ();
574 linux_child_follow_fork (struct target_ops
*ops
, int follow_child
)
578 struct target_waitstatus last_status
;
580 int parent_pid
, child_pid
;
582 block_child_signals (&prev_mask
);
584 get_last_target_status (&last_ptid
, &last_status
);
585 has_vforked
= (last_status
.kind
== TARGET_WAITKIND_VFORKED
);
586 parent_pid
= ptid_get_lwp (last_ptid
);
588 parent_pid
= ptid_get_pid (last_ptid
);
589 child_pid
= PIDGET (last_status
.value
.related_pid
);
593 /* We're already attached to the parent, by default. */
595 /* Before detaching from the child, remove all breakpoints from
596 it. If we forked, then this has already been taken care of
597 by infrun.c. If we vforked however, any breakpoint inserted
598 in the parent is visible in the child, even those added while
599 stopped in a vfork catchpoint. This won't actually modify
600 the breakpoint list, but will physically remove the
601 breakpoints from the child. This will remove the breakpoints
602 from the parent also, but they'll be reinserted below. */
604 detach_breakpoints (child_pid
);
606 /* Detach new forked process? */
609 if (info_verbose
|| debug_linux_nat
)
611 target_terminal_ours ();
612 fprintf_filtered (gdb_stdlog
,
613 "Detaching after fork from child process %d.\n",
617 ptrace (PTRACE_DETACH
, child_pid
, 0, 0);
621 struct fork_info
*fp
;
622 struct inferior
*parent_inf
, *child_inf
;
624 /* Add process to GDB's tables. */
625 child_inf
= add_inferior (child_pid
);
627 parent_inf
= find_inferior_pid (GET_PID (last_ptid
));
628 child_inf
->attach_flag
= parent_inf
->attach_flag
;
630 /* Retain child fork in ptrace (stopped) state. */
631 fp
= find_fork_pid (child_pid
);
633 fp
= add_fork (child_pid
);
634 fork_save_infrun_state (fp
, 0);
639 gdb_assert (linux_supports_tracefork_flag
>= 0);
640 if (linux_supports_tracevforkdone (0))
644 ptrace (PTRACE_CONT
, parent_pid
, 0, 0);
645 my_waitpid (parent_pid
, &status
, __WALL
);
646 if ((status
>> 16) != PTRACE_EVENT_VFORK_DONE
)
647 warning (_("Unexpected waitpid result %06x when waiting for "
648 "vfork-done"), status
);
652 /* We can't insert breakpoints until the child has
653 finished with the shared memory region. We need to
654 wait until that happens. Ideal would be to just
656 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
657 - waitpid (parent_pid, &status, __WALL);
658 However, most architectures can't handle a syscall
659 being traced on the way out if it wasn't traced on
662 We might also think to loop, continuing the child
663 until it exits or gets a SIGTRAP. One problem is
664 that the child might call ptrace with PTRACE_TRACEME.
666 There's no simple and reliable way to figure out when
667 the vforked child will be done with its copy of the
668 shared memory. We could step it out of the syscall,
669 two instructions, let it go, and then single-step the
670 parent once. When we have hardware single-step, this
671 would work; with software single-step it could still
672 be made to work but we'd have to be able to insert
673 single-step breakpoints in the child, and we'd have
674 to insert -just- the single-step breakpoint in the
675 parent. Very awkward.
677 In the end, the best we can do is to make sure it
678 runs for a little while. Hopefully it will be out of
679 range of any breakpoints we reinsert. Usually this
680 is only the single-step breakpoint at vfork's return
686 /* Since we vforked, breakpoints were removed in the parent
687 too. Put them back. */
688 reattach_breakpoints (parent_pid
);
693 struct thread_info
*last_tp
= find_thread_pid (last_ptid
);
694 struct thread_info
*tp
;
695 char child_pid_spelling
[40];
696 struct inferior
*parent_inf
, *child_inf
;
698 /* Copy user stepping state to the new inferior thread. */
699 struct breakpoint
*step_resume_breakpoint
= last_tp
->step_resume_breakpoint
;
700 CORE_ADDR step_range_start
= last_tp
->step_range_start
;
701 CORE_ADDR step_range_end
= last_tp
->step_range_end
;
702 struct frame_id step_frame_id
= last_tp
->step_frame_id
;
704 /* Otherwise, deleting the parent would get rid of this
706 last_tp
->step_resume_breakpoint
= NULL
;
708 /* Before detaching from the parent, remove all breakpoints from it. */
709 remove_breakpoints ();
711 if (info_verbose
|| debug_linux_nat
)
713 target_terminal_ours ();
714 fprintf_filtered (gdb_stdlog
,
715 "Attaching after fork to child process %d.\n",
719 /* Add the new inferior first, so that the target_detach below
720 doesn't unpush the target. */
722 child_inf
= add_inferior (child_pid
);
724 parent_inf
= find_inferior_pid (GET_PID (last_ptid
));
725 child_inf
->attach_flag
= parent_inf
->attach_flag
;
727 /* If we're vforking, we may want to hold on to the parent until
728 the child exits or execs. At exec time we can remove the old
729 breakpoints from the parent and detach it; at exit time we
730 could do the same (or even, sneakily, resume debugging it - the
731 child's exec has failed, or something similar).
733 This doesn't clean up "properly", because we can't call
734 target_detach, but that's OK; if the current target is "child",
735 then it doesn't need any further cleanups, and lin_lwp will
736 generally not encounter vfork (vfork is defined to fork
739 The holding part is very easy if we have VFORKDONE events;
740 but keeping track of both processes is beyond GDB at the
741 moment. So we don't expose the parent to the rest of GDB.
742 Instead we quietly hold onto it until such time as we can
747 linux_parent_pid
= parent_pid
;
748 detach_inferior (parent_pid
);
750 else if (!detach_fork
)
752 struct fork_info
*fp
;
753 /* Retain parent fork in ptrace (stopped) state. */
754 fp
= find_fork_pid (parent_pid
);
756 fp
= add_fork (parent_pid
);
757 fork_save_infrun_state (fp
, 0);
759 /* Also add an entry for the child fork. */
760 fp
= find_fork_pid (child_pid
);
762 fp
= add_fork (child_pid
);
763 fork_save_infrun_state (fp
, 0);
766 target_detach (NULL
, 0);
768 inferior_ptid
= ptid_build (child_pid
, child_pid
, 0);
770 linux_nat_switch_fork (inferior_ptid
);
771 check_for_thread_db ();
773 tp
= inferior_thread ();
774 tp
->step_resume_breakpoint
= step_resume_breakpoint
;
775 tp
->step_range_start
= step_range_start
;
776 tp
->step_range_end
= step_range_end
;
777 tp
->step_frame_id
= step_frame_id
;
779 /* Reset breakpoints in the child as appropriate. */
780 follow_inferior_reset_breakpoints ();
783 restore_child_signals_mask (&prev_mask
);
789 linux_child_insert_fork_catchpoint (int pid
)
791 if (! linux_supports_tracefork (pid
))
792 error (_("Your system does not support fork catchpoints."));
796 linux_child_insert_vfork_catchpoint (int pid
)
798 if (!linux_supports_tracefork (pid
))
799 error (_("Your system does not support vfork catchpoints."));
803 linux_child_insert_exec_catchpoint (int pid
)
805 if (!linux_supports_tracefork (pid
))
806 error (_("Your system does not support exec catchpoints."));
809 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
810 are processes sharing the same VM space. A multi-threaded process
811 is basically a group of such processes. However, such a grouping
812 is almost entirely a user-space issue; the kernel doesn't enforce
813 such a grouping at all (this might change in the future). In
814 general, we'll rely on the threads library (i.e. the GNU/Linux
815 Threads library) to provide such a grouping.
817 It is perfectly well possible to write a multi-threaded application
818 without the assistance of a threads library, by using the clone
819 system call directly. This module should be able to give some
820 rudimentary support for debugging such applications if developers
821 specify the CLONE_PTRACE flag in the clone system call, and are
822 using the Linux kernel 2.4 or above.
824 Note that there are some peculiarities in GNU/Linux that affect
827 - In general one should specify the __WCLONE flag to waitpid in
828 order to make it report events for any of the cloned processes
829 (and leave it out for the initial process). However, if a cloned
830 process has exited the exit status is only reported if the
831 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
832 we cannot use it since GDB must work on older systems too.
834 - When a traced, cloned process exits and is waited for by the
835 debugger, the kernel reassigns it to the original parent and
836 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
837 library doesn't notice this, which leads to the "zombie problem":
838 When debugged a multi-threaded process that spawns a lot of
839 threads will run out of processes, even if the threads exit,
840 because the "zombies" stay around. */
842 /* List of known LWPs. */
843 struct lwp_info
*lwp_list
;
845 /* Number of LWPs in the list. */
849 /* Original signal mask. */
850 static sigset_t normal_mask
;
852 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
853 _initialize_linux_nat. */
854 static sigset_t suspend_mask
;
856 /* Signals to block to make that sigsuspend work. */
857 static sigset_t blocked_mask
;
859 /* SIGCHLD action. */
860 struct sigaction sigchld_action
;
862 /* Block child signals (SIGCHLD and linux threads signals), and store
863 the previous mask in PREV_MASK. */
866 block_child_signals (sigset_t
*prev_mask
)
868 /* Make sure SIGCHLD is blocked. */
869 if (!sigismember (&blocked_mask
, SIGCHLD
))
870 sigaddset (&blocked_mask
, SIGCHLD
);
872 sigprocmask (SIG_BLOCK
, &blocked_mask
, prev_mask
);
875 /* Restore child signals mask, previously returned by
876 block_child_signals. */
879 restore_child_signals_mask (sigset_t
*prev_mask
)
881 sigprocmask (SIG_SETMASK
, prev_mask
, NULL
);
885 /* Prototypes for local functions. */
886 static int stop_wait_callback (struct lwp_info
*lp
, void *data
);
887 static int linux_thread_alive (ptid_t ptid
);
888 static char *linux_child_pid_to_exec_file (int pid
);
889 static int cancel_breakpoint (struct lwp_info
*lp
);
892 /* Convert wait status STATUS to a string. Used for printing debug
896 status_to_str (int status
)
900 if (WIFSTOPPED (status
))
901 snprintf (buf
, sizeof (buf
), "%s (stopped)",
902 strsignal (WSTOPSIG (status
)));
903 else if (WIFSIGNALED (status
))
904 snprintf (buf
, sizeof (buf
), "%s (terminated)",
905 strsignal (WSTOPSIG (status
)));
907 snprintf (buf
, sizeof (buf
), "%d (exited)", WEXITSTATUS (status
));
912 /* Initialize the list of LWPs. Note that this module, contrary to
913 what GDB's generic threads layer does for its thread list,
914 re-initializes the LWP lists whenever we mourn or detach (which
915 doesn't involve mourning) the inferior. */
920 struct lwp_info
*lp
, *lpnext
;
922 for (lp
= lwp_list
; lp
; lp
= lpnext
)
932 /* Add the LWP specified by PID to the list. Return a pointer to the
933 structure describing the new LWP. The LWP should already be stopped
934 (with an exception for the very first LWP). */
936 static struct lwp_info
*
937 add_lwp (ptid_t ptid
)
941 gdb_assert (is_lwp (ptid
));
943 lp
= (struct lwp_info
*) xmalloc (sizeof (struct lwp_info
));
945 memset (lp
, 0, sizeof (struct lwp_info
));
947 lp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
955 if (num_lwps
> 1 && linux_nat_new_thread
!= NULL
)
956 linux_nat_new_thread (ptid
);
961 /* Remove the LWP specified by PID from the list. */
964 delete_lwp (ptid_t ptid
)
966 struct lwp_info
*lp
, *lpprev
;
970 for (lp
= lwp_list
; lp
; lpprev
= lp
, lp
= lp
->next
)
971 if (ptid_equal (lp
->ptid
, ptid
))
980 lpprev
->next
= lp
->next
;
987 /* Return a pointer to the structure describing the LWP corresponding
988 to PID. If no corresponding LWP could be found, return NULL. */
990 static struct lwp_info
*
991 find_lwp_pid (ptid_t ptid
)
997 lwp
= GET_LWP (ptid
);
999 lwp
= GET_PID (ptid
);
1001 for (lp
= lwp_list
; lp
; lp
= lp
->next
)
1002 if (lwp
== GET_LWP (lp
->ptid
))
1008 /* Call CALLBACK with its second argument set to DATA for every LWP in
1009 the list. If CALLBACK returns 1 for a particular LWP, return a
1010 pointer to the structure describing that LWP immediately.
1011 Otherwise return NULL. */
1014 iterate_over_lwps (int (*callback
) (struct lwp_info
*, void *), void *data
)
1016 struct lwp_info
*lp
, *lpnext
;
1018 for (lp
= lwp_list
; lp
; lp
= lpnext
)
1021 if ((*callback
) (lp
, data
))
1028 /* Update our internal state when changing from one fork (checkpoint,
1029 et cetera) to another indicated by NEW_PTID. We can only switch
1030 single-threaded applications, so we only create one new LWP, and
1031 the previous list is discarded. */
1034 linux_nat_switch_fork (ptid_t new_ptid
)
1036 struct lwp_info
*lp
;
1039 lp
= add_lwp (new_ptid
);
1042 init_thread_list ();
1043 add_thread_silent (new_ptid
);
1046 /* Handle the exit of a single thread LP. */
1049 exit_lwp (struct lwp_info
*lp
)
1051 struct thread_info
*th
= find_thread_pid (lp
->ptid
);
1055 if (print_thread_events
)
1056 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp
->ptid
));
1058 delete_thread (lp
->ptid
);
1061 delete_lwp (lp
->ptid
);
1064 /* Detect `T (stopped)' in `/proc/PID/status'.
1065 Other states including `T (tracing stop)' are reported as false. */
1068 pid_is_stopped (pid_t pid
)
1074 snprintf (buf
, sizeof (buf
), "/proc/%d/status", (int) pid
);
1075 status_file
= fopen (buf
, "r");
1076 if (status_file
!= NULL
)
1080 while (fgets (buf
, sizeof (buf
), status_file
))
1082 if (strncmp (buf
, "State:", 6) == 0)
1088 if (have_state
&& strstr (buf
, "T (stopped)") != NULL
)
1090 fclose (status_file
);
1095 /* Wait for the LWP specified by LP, which we have just attached to.
1096 Returns a wait status for that LWP, to cache. */
1099 linux_nat_post_attach_wait (ptid_t ptid
, int first
, int *cloned
,
1102 pid_t new_pid
, pid
= GET_LWP (ptid
);
1105 if (pid_is_stopped (pid
))
1107 if (debug_linux_nat
)
1108 fprintf_unfiltered (gdb_stdlog
,
1109 "LNPAW: Attaching to a stopped process\n");
1111 /* The process is definitely stopped. It is in a job control
1112 stop, unless the kernel predates the TASK_STOPPED /
1113 TASK_TRACED distinction, in which case it might be in a
1114 ptrace stop. Make sure it is in a ptrace stop; from there we
1115 can kill it, signal it, et cetera.
1117 First make sure there is a pending SIGSTOP. Since we are
1118 already attached, the process can not transition from stopped
1119 to running without a PTRACE_CONT; so we know this signal will
1120 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1121 probably already in the queue (unless this kernel is old
1122 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1123 is not an RT signal, it can only be queued once. */
1124 kill_lwp (pid
, SIGSTOP
);
1126 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1127 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1128 ptrace (PTRACE_CONT
, pid
, 0, 0);
1131 /* Make sure the initial process is stopped. The user-level threads
1132 layer might want to poke around in the inferior, and that won't
1133 work if things haven't stabilized yet. */
1134 new_pid
= my_waitpid (pid
, &status
, 0);
1135 if (new_pid
== -1 && errno
== ECHILD
)
1138 warning (_("%s is a cloned process"), target_pid_to_str (ptid
));
1140 /* Try again with __WCLONE to check cloned processes. */
1141 new_pid
= my_waitpid (pid
, &status
, __WCLONE
);
1145 gdb_assert (pid
== new_pid
&& WIFSTOPPED (status
));
1147 if (WSTOPSIG (status
) != SIGSTOP
)
1150 if (debug_linux_nat
)
1151 fprintf_unfiltered (gdb_stdlog
,
1152 "LNPAW: Received %s after attaching\n",
1153 status_to_str (status
));
1159 /* Attach to the LWP specified by PID. Return 0 if successful or -1
1160 if the new LWP could not be attached. */
1163 lin_lwp_attach_lwp (ptid_t ptid
)
1165 struct lwp_info
*lp
;
1168 gdb_assert (is_lwp (ptid
));
1170 block_child_signals (&prev_mask
);
1172 lp
= find_lwp_pid (ptid
);
1174 /* We assume that we're already attached to any LWP that has an id
1175 equal to the overall process id, and to any LWP that is already
1176 in our list of LWPs. If we're not seeing exit events from threads
1177 and we've had PID wraparound since we last tried to stop all threads,
1178 this assumption might be wrong; fortunately, this is very unlikely
1180 if (GET_LWP (ptid
) != GET_PID (ptid
) && lp
== NULL
)
1182 int status
, cloned
= 0, signalled
= 0;
1184 if (ptrace (PTRACE_ATTACH
, GET_LWP (ptid
), 0, 0) < 0)
1186 /* If we fail to attach to the thread, issue a warning,
1187 but continue. One way this can happen is if thread
1188 creation is interrupted; as of Linux kernel 2.6.19, a
1189 bug may place threads in the thread list and then fail
1191 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid
),
1192 safe_strerror (errno
));
1193 restore_child_signals_mask (&prev_mask
);
1197 if (debug_linux_nat
)
1198 fprintf_unfiltered (gdb_stdlog
,
1199 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1200 target_pid_to_str (ptid
));
1202 status
= linux_nat_post_attach_wait (ptid
, 0, &cloned
, &signalled
);
1203 lp
= add_lwp (ptid
);
1205 lp
->cloned
= cloned
;
1206 lp
->signalled
= signalled
;
1207 if (WSTOPSIG (status
) != SIGSTOP
)
1210 lp
->status
= status
;
1213 target_post_attach (GET_LWP (lp
->ptid
));
1215 if (debug_linux_nat
)
1217 fprintf_unfiltered (gdb_stdlog
,
1218 "LLAL: waitpid %s received %s\n",
1219 target_pid_to_str (ptid
),
1220 status_to_str (status
));
1225 /* We assume that the LWP representing the original process is
1226 already stopped. Mark it as stopped in the data structure
1227 that the GNU/linux ptrace layer uses to keep track of
1228 threads. Note that this won't have already been done since
1229 the main thread will have, we assume, been stopped by an
1230 attach from a different layer. */
1232 lp
= add_lwp (ptid
);
1236 restore_child_signals_mask (&prev_mask
);
1241 linux_nat_create_inferior (struct target_ops
*ops
,
1242 char *exec_file
, char *allargs
, char **env
,
1245 int saved_async
= 0;
1246 #ifdef HAVE_PERSONALITY
1247 int personality_orig
= 0, personality_set
= 0;
1248 #endif /* HAVE_PERSONALITY */
1250 /* The fork_child mechanism is synchronous and calls target_wait, so
1251 we have to mask the async mode. */
1253 if (target_can_async_p ())
1254 /* Mask async mode. Creating a child requires a loop calling
1255 wait_for_inferior currently. */
1256 saved_async
= linux_nat_async_mask (0);
1258 #ifdef HAVE_PERSONALITY
1259 if (disable_randomization
)
1262 personality_orig
= personality (0xffffffff);
1263 if (errno
== 0 && !(personality_orig
& ADDR_NO_RANDOMIZE
))
1265 personality_set
= 1;
1266 personality (personality_orig
| ADDR_NO_RANDOMIZE
);
1268 if (errno
!= 0 || (personality_set
1269 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE
)))
1270 warning (_("Error disabling address space randomization: %s"),
1271 safe_strerror (errno
));
1273 #endif /* HAVE_PERSONALITY */
1275 linux_ops
->to_create_inferior (ops
, exec_file
, allargs
, env
, from_tty
);
1277 #ifdef HAVE_PERSONALITY
1278 if (personality_set
)
1281 personality (personality_orig
);
1283 warning (_("Error restoring address space randomization: %s"),
1284 safe_strerror (errno
));
1286 #endif /* HAVE_PERSONALITY */
1289 linux_nat_async_mask (saved_async
);
1293 linux_nat_attach (struct target_ops
*ops
, char *args
, int from_tty
)
1295 struct lwp_info
*lp
;
1299 /* FIXME: We should probably accept a list of process id's, and
1300 attach all of them. */
1301 linux_ops
->to_attach (ops
, args
, from_tty
);
1303 /* The ptrace base target adds the main thread with (pid,0,0)
1304 format. Decorate it with lwp info. */
1305 ptid
= BUILD_LWP (GET_PID (inferior_ptid
), GET_PID (inferior_ptid
));
1306 thread_change_ptid (inferior_ptid
, ptid
);
1308 /* Add the initial process as the first LWP to the list. */
1309 lp
= add_lwp (ptid
);
1311 status
= linux_nat_post_attach_wait (lp
->ptid
, 1, &lp
->cloned
,
1315 /* Save the wait status to report later. */
1317 if (debug_linux_nat
)
1318 fprintf_unfiltered (gdb_stdlog
,
1319 "LNA: waitpid %ld, saving status %s\n",
1320 (long) GET_PID (lp
->ptid
), status_to_str (status
));
1322 lp
->status
= status
;
1324 if (target_can_async_p ())
1325 target_async (inferior_event_handler
, 0);
1328 /* Get pending status of LP. */
1330 get_pending_status (struct lwp_info
*lp
, int *status
)
1332 struct target_waitstatus last
;
1335 get_last_target_status (&last_ptid
, &last
);
1337 /* If this lwp is the ptid that GDB is processing an event from, the
1338 signal will be in stop_signal. Otherwise, we may cache pending
1339 events in lp->status while trying to stop all threads (see
1340 stop_wait_callback). */
1346 enum target_signal signo
= TARGET_SIGNAL_0
;
1348 if (is_executing (lp
->ptid
))
1350 /* If the core thought this lwp was executing --- e.g., the
1351 executing property hasn't been updated yet, but the
1352 thread has been stopped with a stop_callback /
1353 stop_wait_callback sequence (see linux_nat_detach for
1354 example) --- we can only have pending events in the local
1356 signo
= target_signal_from_host (WSTOPSIG (lp
->status
));
1360 /* If the core knows the thread is not executing, then we
1361 have the last signal recorded in
1362 thread_info->stop_signal. */
1364 struct thread_info
*tp
= find_thread_pid (lp
->ptid
);
1365 signo
= tp
->stop_signal
;
1368 if (signo
!= TARGET_SIGNAL_0
1369 && !signal_pass_state (signo
))
1371 if (debug_linux_nat
)
1372 fprintf_unfiltered (gdb_stdlog
, "\
1373 GPT: lwp %s had signal %s, but it is in no pass state\n",
1374 target_pid_to_str (lp
->ptid
),
1375 target_signal_to_string (signo
));
1379 if (signo
!= TARGET_SIGNAL_0
)
1380 *status
= W_STOPCODE (target_signal_to_host (signo
));
1382 if (debug_linux_nat
)
1383 fprintf_unfiltered (gdb_stdlog
,
1384 "GPT: lwp %s as pending signal %s\n",
1385 target_pid_to_str (lp
->ptid
),
1386 target_signal_to_string (signo
));
1391 if (GET_LWP (lp
->ptid
) == GET_LWP (last_ptid
))
1393 struct thread_info
*tp
= find_thread_pid (lp
->ptid
);
1394 if (tp
->stop_signal
!= TARGET_SIGNAL_0
1395 && signal_pass_state (tp
->stop_signal
))
1396 *status
= W_STOPCODE (target_signal_to_host (tp
->stop_signal
));
1399 *status
= lp
->status
;
1406 detach_callback (struct lwp_info
*lp
, void *data
)
1408 gdb_assert (lp
->status
== 0 || WIFSTOPPED (lp
->status
));
1410 if (debug_linux_nat
&& lp
->status
)
1411 fprintf_unfiltered (gdb_stdlog
, "DC: Pending %s for %s on detach.\n",
1412 strsignal (WSTOPSIG (lp
->status
)),
1413 target_pid_to_str (lp
->ptid
));
1415 /* If there is a pending SIGSTOP, get rid of it. */
1418 if (debug_linux_nat
)
1419 fprintf_unfiltered (gdb_stdlog
,
1420 "DC: Sending SIGCONT to %s\n",
1421 target_pid_to_str (lp
->ptid
));
1423 kill_lwp (GET_LWP (lp
->ptid
), SIGCONT
);
1427 /* We don't actually detach from the LWP that has an id equal to the
1428 overall process id just yet. */
1429 if (GET_LWP (lp
->ptid
) != GET_PID (lp
->ptid
))
1433 /* Pass on any pending signal for this LWP. */
1434 get_pending_status (lp
, &status
);
1437 if (ptrace (PTRACE_DETACH
, GET_LWP (lp
->ptid
), 0,
1438 WSTOPSIG (status
)) < 0)
1439 error (_("Can't detach %s: %s"), target_pid_to_str (lp
->ptid
),
1440 safe_strerror (errno
));
1442 if (debug_linux_nat
)
1443 fprintf_unfiltered (gdb_stdlog
,
1444 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1445 target_pid_to_str (lp
->ptid
),
1446 strsignal (WSTOPSIG (status
)));
1448 delete_lwp (lp
->ptid
);
1455 linux_nat_detach (struct target_ops
*ops
, char *args
, int from_tty
)
1459 enum target_signal sig
;
1461 if (target_can_async_p ())
1462 linux_nat_async (NULL
, 0);
1464 /* Stop all threads before detaching. ptrace requires that the
1465 thread is stopped to sucessfully detach. */
1466 iterate_over_lwps (stop_callback
, NULL
);
1467 /* ... and wait until all of them have reported back that
1468 they're no longer running. */
1469 iterate_over_lwps (stop_wait_callback
, NULL
);
1471 iterate_over_lwps (detach_callback
, NULL
);
1473 /* Only the initial process should be left right now. */
1474 gdb_assert (num_lwps
== 1);
1476 /* Pass on any pending signal for the last LWP. */
1477 if ((args
== NULL
|| *args
== '\0')
1478 && get_pending_status (lwp_list
, &status
) != -1
1479 && WIFSTOPPED (status
))
1481 /* Put the signal number in ARGS so that inf_ptrace_detach will
1482 pass it along with PTRACE_DETACH. */
1484 sprintf (args
, "%d", (int) WSTOPSIG (status
));
1485 fprintf_unfiltered (gdb_stdlog
,
1486 "LND: Sending signal %s to %s\n",
1488 target_pid_to_str (lwp_list
->ptid
));
1491 /* Destroy LWP info; it's no longer valid. */
1494 pid
= ptid_get_pid (inferior_ptid
);
1496 if (forks_exist_p ())
1498 /* Multi-fork case. The current inferior_ptid is being detached
1499 from, but there are other viable forks to debug. Detach from
1500 the current fork, and context-switch to the first
1502 linux_fork_detach (args
, from_tty
);
1504 if (non_stop
&& target_can_async_p ())
1505 target_async (inferior_event_handler
, 0);
1508 linux_ops
->to_detach (ops
, args
, from_tty
);
1514 resume_callback (struct lwp_info
*lp
, void *data
)
1516 if (lp
->stopped
&& lp
->status
== 0)
1518 linux_ops
->to_resume (linux_ops
,
1519 pid_to_ptid (GET_LWP (lp
->ptid
)),
1520 0, TARGET_SIGNAL_0
);
1521 if (debug_linux_nat
)
1522 fprintf_unfiltered (gdb_stdlog
,
1523 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1524 target_pid_to_str (lp
->ptid
));
1527 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
1529 else if (lp
->stopped
&& debug_linux_nat
)
1530 fprintf_unfiltered (gdb_stdlog
, "RC: Not resuming sibling %s (has pending)\n",
1531 target_pid_to_str (lp
->ptid
));
1532 else if (debug_linux_nat
)
1533 fprintf_unfiltered (gdb_stdlog
, "RC: Not resuming sibling %s (not stopped)\n",
1534 target_pid_to_str (lp
->ptid
));
1540 resume_clear_callback (struct lwp_info
*lp
, void *data
)
1547 resume_set_callback (struct lwp_info
*lp
, void *data
)
1554 linux_nat_resume (struct target_ops
*ops
,
1555 ptid_t ptid
, int step
, enum target_signal signo
)
1558 struct lwp_info
*lp
;
1561 if (debug_linux_nat
)
1562 fprintf_unfiltered (gdb_stdlog
,
1563 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1564 step
? "step" : "resume",
1565 target_pid_to_str (ptid
),
1566 signo
? strsignal (signo
) : "0",
1567 target_pid_to_str (inferior_ptid
));
1569 block_child_signals (&prev_mask
);
1571 /* A specific PTID means `step only this process id'. */
1572 resume_all
= (PIDGET (ptid
) == -1);
1574 if (non_stop
&& resume_all
)
1575 internal_error (__FILE__
, __LINE__
,
1576 "can't resume all in non-stop mode");
1581 iterate_over_lwps (resume_set_callback
, NULL
);
1583 iterate_over_lwps (resume_clear_callback
, NULL
);
1586 /* If PID is -1, it's the current inferior that should be
1587 handled specially. */
1588 if (PIDGET (ptid
) == -1)
1589 ptid
= inferior_ptid
;
1591 lp
= find_lwp_pid (ptid
);
1592 gdb_assert (lp
!= NULL
);
1594 /* Convert to something the lower layer understands. */
1595 ptid
= pid_to_ptid (GET_LWP (lp
->ptid
));
1597 /* Remember if we're stepping. */
1600 /* Mark this LWP as resumed. */
1603 /* If we have a pending wait status for this thread, there is no
1604 point in resuming the process. But first make sure that
1605 linux_nat_wait won't preemptively handle the event - we
1606 should never take this short-circuit if we are going to
1607 leave LP running, since we have skipped resuming all the
1608 other threads. This bit of code needs to be synchronized
1609 with linux_nat_wait. */
1611 if (lp
->status
&& WIFSTOPPED (lp
->status
))
1614 struct inferior
*inf
;
1616 inf
= find_inferior_pid (ptid_get_pid (ptid
));
1618 saved_signo
= target_signal_from_host (WSTOPSIG (lp
->status
));
1620 /* Defer to common code if we're gaining control of the
1622 if (inf
->stop_soon
== NO_STOP_QUIETLY
1623 && signal_stop_state (saved_signo
) == 0
1624 && signal_print_state (saved_signo
) == 0
1625 && signal_pass_state (saved_signo
) == 1)
1627 if (debug_linux_nat
)
1628 fprintf_unfiltered (gdb_stdlog
,
1629 "LLR: Not short circuiting for ignored "
1630 "status 0x%x\n", lp
->status
);
1632 /* FIXME: What should we do if we are supposed to continue
1633 this thread with a signal? */
1634 gdb_assert (signo
== TARGET_SIGNAL_0
);
1635 signo
= saved_signo
;
1642 /* FIXME: What should we do if we are supposed to continue
1643 this thread with a signal? */
1644 gdb_assert (signo
== TARGET_SIGNAL_0
);
1646 if (debug_linux_nat
)
1647 fprintf_unfiltered (gdb_stdlog
,
1648 "LLR: Short circuiting for status 0x%x\n",
1651 restore_child_signals_mask (&prev_mask
);
1652 if (target_can_async_p ())
1654 target_async (inferior_event_handler
, 0);
1655 /* Tell the event loop we have something to process. */
1661 /* Mark LWP as not stopped to prevent it from being continued by
1666 iterate_over_lwps (resume_callback
, NULL
);
1668 linux_ops
->to_resume (linux_ops
, ptid
, step
, signo
);
1669 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
1671 if (debug_linux_nat
)
1672 fprintf_unfiltered (gdb_stdlog
,
1673 "LLR: %s %s, %s (resume event thread)\n",
1674 step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1675 target_pid_to_str (ptid
),
1676 signo
? strsignal (signo
) : "0");
1678 restore_child_signals_mask (&prev_mask
);
1679 if (target_can_async_p ())
1680 target_async (inferior_event_handler
, 0);
1683 /* Issue kill to specified lwp. */
1685 static int tkill_failed
;
1688 kill_lwp (int lwpid
, int signo
)
1692 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1693 fails, then we are not using nptl threads and we should be using kill. */
1695 #ifdef HAVE_TKILL_SYSCALL
1698 int ret
= syscall (__NR_tkill
, lwpid
, signo
);
1699 if (errno
!= ENOSYS
)
1706 return kill (lwpid
, signo
);
1709 /* Handle a GNU/Linux extended wait response. If we see a clone
1710 event, we need to add the new LWP to our list (and not report the
1711 trap to higher layers). This function returns non-zero if the
1712 event should be ignored and we should wait again. If STOPPING is
1713 true, the new LWP remains stopped, otherwise it is continued. */
1716 linux_handle_extended_wait (struct lwp_info
*lp
, int status
,
1719 int pid
= GET_LWP (lp
->ptid
);
1720 struct target_waitstatus
*ourstatus
= &lp
->waitstatus
;
1721 struct lwp_info
*new_lp
= NULL
;
1722 int event
= status
>> 16;
1724 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
1725 || event
== PTRACE_EVENT_CLONE
)
1727 unsigned long new_pid
;
1730 ptrace (PTRACE_GETEVENTMSG
, pid
, 0, &new_pid
);
1732 /* If we haven't already seen the new PID stop, wait for it now. */
1733 if (! pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
1735 /* The new child has a pending SIGSTOP. We can't affect it until it
1736 hits the SIGSTOP, but we're already attached. */
1737 ret
= my_waitpid (new_pid
, &status
,
1738 (event
== PTRACE_EVENT_CLONE
) ? __WCLONE
: 0);
1740 perror_with_name (_("waiting for new child"));
1741 else if (ret
!= new_pid
)
1742 internal_error (__FILE__
, __LINE__
,
1743 _("wait returned unexpected PID %d"), ret
);
1744 else if (!WIFSTOPPED (status
))
1745 internal_error (__FILE__
, __LINE__
,
1746 _("wait returned unexpected status 0x%x"), status
);
1749 ourstatus
->value
.related_pid
= ptid_build (new_pid
, new_pid
, 0);
1751 if (event
== PTRACE_EVENT_FORK
)
1752 ourstatus
->kind
= TARGET_WAITKIND_FORKED
;
1753 else if (event
== PTRACE_EVENT_VFORK
)
1754 ourstatus
->kind
= TARGET_WAITKIND_VFORKED
;
1757 struct cleanup
*old_chain
;
1759 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
1760 new_lp
= add_lwp (BUILD_LWP (new_pid
, GET_PID (inferior_ptid
)));
1762 new_lp
->stopped
= 1;
1764 if (WSTOPSIG (status
) != SIGSTOP
)
1766 /* This can happen if someone starts sending signals to
1767 the new thread before it gets a chance to run, which
1768 have a lower number than SIGSTOP (e.g. SIGUSR1).
1769 This is an unlikely case, and harder to handle for
1770 fork / vfork than for clone, so we do not try - but
1771 we handle it for clone events here. We'll send
1772 the other signal on to the thread below. */
1774 new_lp
->signalled
= 1;
1781 /* Add the new thread to GDB's lists as soon as possible
1784 1) the frontend doesn't have to wait for a stop to
1787 2) we tag it with the correct running state. */
1789 /* If the thread_db layer is active, let it know about
1790 this new thread, and add it to GDB's list. */
1791 if (!thread_db_attach_lwp (new_lp
->ptid
))
1793 /* We're not using thread_db. Add it to GDB's
1795 target_post_attach (GET_LWP (new_lp
->ptid
));
1796 add_thread (new_lp
->ptid
);
1801 set_running (new_lp
->ptid
, 1);
1802 set_executing (new_lp
->ptid
, 1);
1808 new_lp
->stopped
= 0;
1809 new_lp
->resumed
= 1;
1810 ptrace (PTRACE_CONT
, new_pid
, 0,
1811 status
? WSTOPSIG (status
) : 0);
1814 if (debug_linux_nat
)
1815 fprintf_unfiltered (gdb_stdlog
,
1816 "LHEW: Got clone event from LWP %ld, resuming\n",
1817 GET_LWP (lp
->ptid
));
1818 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
1826 if (event
== PTRACE_EVENT_EXEC
)
1828 ourstatus
->kind
= TARGET_WAITKIND_EXECD
;
1829 ourstatus
->value
.execd_pathname
1830 = xstrdup (linux_child_pid_to_exec_file (pid
));
1832 if (linux_parent_pid
)
1834 detach_breakpoints (linux_parent_pid
);
1835 ptrace (PTRACE_DETACH
, linux_parent_pid
, 0, 0);
1837 linux_parent_pid
= 0;
1840 /* At this point, all inserted breakpoints are gone. Doing this
1841 as soon as we detect an exec prevents the badness of deleting
1842 a breakpoint writing the current "shadow contents" to lift
1843 the bp. That shadow is NOT valid after an exec.
1845 Note that we have to do this after the detach_breakpoints
1846 call above, otherwise breakpoints wouldn't be lifted from the
1847 parent on a vfork, because detach_breakpoints would think
1848 that breakpoints are not inserted. */
1849 mark_breakpoints_out ();
1853 internal_error (__FILE__
, __LINE__
,
1854 _("unknown ptrace event %d"), event
);
1857 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1861 wait_lwp (struct lwp_info
*lp
)
1865 int thread_dead
= 0;
1867 gdb_assert (!lp
->stopped
);
1868 gdb_assert (lp
->status
== 0);
1870 pid
= my_waitpid (GET_LWP (lp
->ptid
), &status
, 0);
1871 if (pid
== -1 && errno
== ECHILD
)
1873 pid
= my_waitpid (GET_LWP (lp
->ptid
), &status
, __WCLONE
);
1874 if (pid
== -1 && errno
== ECHILD
)
1876 /* The thread has previously exited. We need to delete it
1877 now because, for some vendor 2.4 kernels with NPTL
1878 support backported, there won't be an exit event unless
1879 it is the main thread. 2.6 kernels will report an exit
1880 event for each thread that exits, as expected. */
1882 if (debug_linux_nat
)
1883 fprintf_unfiltered (gdb_stdlog
, "WL: %s vanished.\n",
1884 target_pid_to_str (lp
->ptid
));
1890 gdb_assert (pid
== GET_LWP (lp
->ptid
));
1892 if (debug_linux_nat
)
1894 fprintf_unfiltered (gdb_stdlog
,
1895 "WL: waitpid %s received %s\n",
1896 target_pid_to_str (lp
->ptid
),
1897 status_to_str (status
));
1901 /* Check if the thread has exited. */
1902 if (WIFEXITED (status
) || WIFSIGNALED (status
))
1905 if (debug_linux_nat
)
1906 fprintf_unfiltered (gdb_stdlog
, "WL: %s exited.\n",
1907 target_pid_to_str (lp
->ptid
));
1916 gdb_assert (WIFSTOPPED (status
));
1918 /* Handle GNU/Linux's extended waitstatus for trace events. */
1919 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
1921 if (debug_linux_nat
)
1922 fprintf_unfiltered (gdb_stdlog
,
1923 "WL: Handling extended status 0x%06x\n",
1925 if (linux_handle_extended_wait (lp
, status
, 1))
1926 return wait_lwp (lp
);
1932 /* Save the most recent siginfo for LP. This is currently only called
1933 for SIGTRAP; some ports use the si_addr field for
1934 target_stopped_data_address. In the future, it may also be used to
1935 restore the siginfo of requeued signals. */
1938 save_siginfo (struct lwp_info
*lp
)
1941 ptrace (PTRACE_GETSIGINFO
, GET_LWP (lp
->ptid
),
1942 (PTRACE_TYPE_ARG3
) 0, &lp
->siginfo
);
1945 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
1948 /* Send a SIGSTOP to LP. */
1951 stop_callback (struct lwp_info
*lp
, void *data
)
1953 if (!lp
->stopped
&& !lp
->signalled
)
1957 if (debug_linux_nat
)
1959 fprintf_unfiltered (gdb_stdlog
,
1960 "SC: kill %s **<SIGSTOP>**\n",
1961 target_pid_to_str (lp
->ptid
));
1964 ret
= kill_lwp (GET_LWP (lp
->ptid
), SIGSTOP
);
1965 if (debug_linux_nat
)
1967 fprintf_unfiltered (gdb_stdlog
,
1968 "SC: lwp kill %d %s\n",
1970 errno
? safe_strerror (errno
) : "ERRNO-OK");
1974 gdb_assert (lp
->status
== 0);
1980 /* Return non-zero if LWP PID has a pending SIGINT. */
1983 linux_nat_has_pending_sigint (int pid
)
1985 sigset_t pending
, blocked
, ignored
;
1988 linux_proc_pending_signals (pid
, &pending
, &blocked
, &ignored
);
1990 if (sigismember (&pending
, SIGINT
)
1991 && !sigismember (&ignored
, SIGINT
))
1997 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2000 set_ignore_sigint (struct lwp_info
*lp
, void *data
)
2002 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2003 flag to consume the next one. */
2004 if (lp
->stopped
&& lp
->status
!= 0 && WIFSTOPPED (lp
->status
)
2005 && WSTOPSIG (lp
->status
) == SIGINT
)
2008 lp
->ignore_sigint
= 1;
2013 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2014 This function is called after we know the LWP has stopped; if the LWP
2015 stopped before the expected SIGINT was delivered, then it will never have
2016 arrived. Also, if the signal was delivered to a shared queue and consumed
2017 by a different thread, it will never be delivered to this LWP. */
2020 maybe_clear_ignore_sigint (struct lwp_info
*lp
)
2022 if (!lp
->ignore_sigint
)
2025 if (!linux_nat_has_pending_sigint (GET_LWP (lp
->ptid
)))
2027 if (debug_linux_nat
)
2028 fprintf_unfiltered (gdb_stdlog
,
2029 "MCIS: Clearing bogus flag for %s\n",
2030 target_pid_to_str (lp
->ptid
));
2031 lp
->ignore_sigint
= 0;
2035 /* Wait until LP is stopped. */
2038 stop_wait_callback (struct lwp_info
*lp
, void *data
)
2044 status
= wait_lwp (lp
);
2048 if (lp
->ignore_sigint
&& WIFSTOPPED (status
)
2049 && WSTOPSIG (status
) == SIGINT
)
2051 lp
->ignore_sigint
= 0;
2054 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2055 if (debug_linux_nat
)
2056 fprintf_unfiltered (gdb_stdlog
,
2057 "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
2058 target_pid_to_str (lp
->ptid
),
2059 errno
? safe_strerror (errno
) : "OK");
2061 return stop_wait_callback (lp
, NULL
);
2064 maybe_clear_ignore_sigint (lp
);
2066 if (WSTOPSIG (status
) != SIGSTOP
)
2068 if (WSTOPSIG (status
) == SIGTRAP
)
2070 /* If a LWP other than the LWP that we're reporting an
2071 event for has hit a GDB breakpoint (as opposed to
2072 some random trap signal), then just arrange for it to
2073 hit it again later. We don't keep the SIGTRAP status
2074 and don't forward the SIGTRAP signal to the LWP. We
2075 will handle the current event, eventually we will
2076 resume all LWPs, and this one will get its breakpoint
2079 If we do not do this, then we run the risk that the
2080 user will delete or disable the breakpoint, but the
2081 thread will have already tripped on it. */
2083 /* Save the trap's siginfo in case we need it later. */
2086 /* Now resume this LWP and get the SIGSTOP event. */
2088 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2089 if (debug_linux_nat
)
2091 fprintf_unfiltered (gdb_stdlog
,
2092 "PTRACE_CONT %s, 0, 0 (%s)\n",
2093 target_pid_to_str (lp
->ptid
),
2094 errno
? safe_strerror (errno
) : "OK");
2096 fprintf_unfiltered (gdb_stdlog
,
2097 "SWC: Candidate SIGTRAP event in %s\n",
2098 target_pid_to_str (lp
->ptid
));
2100 /* Hold this event/waitstatus while we check to see if
2101 there are any more (we still want to get that SIGSTOP). */
2102 stop_wait_callback (lp
, NULL
);
2104 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2105 there's another event, throw it back into the
2109 if (debug_linux_nat
)
2110 fprintf_unfiltered (gdb_stdlog
,
2111 "SWC: kill %s, %s\n",
2112 target_pid_to_str (lp
->ptid
),
2113 status_to_str ((int) status
));
2114 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (lp
->status
));
2117 /* Save the sigtrap event. */
2118 lp
->status
= status
;
2123 /* The thread was stopped with a signal other than
2124 SIGSTOP, and didn't accidentally trip a breakpoint. */
2126 if (debug_linux_nat
)
2128 fprintf_unfiltered (gdb_stdlog
,
2129 "SWC: Pending event %s in %s\n",
2130 status_to_str ((int) status
),
2131 target_pid_to_str (lp
->ptid
));
2133 /* Now resume this LWP and get the SIGSTOP event. */
2135 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2136 if (debug_linux_nat
)
2137 fprintf_unfiltered (gdb_stdlog
,
2138 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2139 target_pid_to_str (lp
->ptid
),
2140 errno
? safe_strerror (errno
) : "OK");
2142 /* Hold this event/waitstatus while we check to see if
2143 there are any more (we still want to get that SIGSTOP). */
2144 stop_wait_callback (lp
, NULL
);
2146 /* If the lp->status field is still empty, use it to
2147 hold this event. If not, then this event must be
2148 returned to the event queue of the LWP. */
2151 if (debug_linux_nat
)
2153 fprintf_unfiltered (gdb_stdlog
,
2154 "SWC: kill %s, %s\n",
2155 target_pid_to_str (lp
->ptid
),
2156 status_to_str ((int) status
));
2158 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (status
));
2161 lp
->status
= status
;
2167 /* We caught the SIGSTOP that we intended to catch, so
2168 there's no SIGSTOP pending. */
2177 /* Return non-zero if LP has a wait status pending. */
2180 status_callback (struct lwp_info
*lp
, void *data
)
2182 /* Only report a pending wait status if we pretend that this has
2183 indeed been resumed. */
2184 return (lp
->status
!= 0 && lp
->resumed
);
2187 /* Return non-zero if LP isn't stopped. */
2190 running_callback (struct lwp_info
*lp
, void *data
)
2192 return (lp
->stopped
== 0 || (lp
->status
!= 0 && lp
->resumed
));
2195 /* Count the LWP's that have had events. */
2198 count_events_callback (struct lwp_info
*lp
, void *data
)
2202 gdb_assert (count
!= NULL
);
2204 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2205 if (lp
->status
!= 0 && lp
->resumed
2206 && WIFSTOPPED (lp
->status
) && WSTOPSIG (lp
->status
) == SIGTRAP
)
2212 /* Select the LWP (if any) that is currently being single-stepped. */
2215 select_singlestep_lwp_callback (struct lwp_info
*lp
, void *data
)
2217 if (lp
->step
&& lp
->status
!= 0)
2223 /* Select the Nth LWP that has had a SIGTRAP event. */
2226 select_event_lwp_callback (struct lwp_info
*lp
, void *data
)
2228 int *selector
= data
;
2230 gdb_assert (selector
!= NULL
);
2232 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2233 if (lp
->status
!= 0 && lp
->resumed
2234 && WIFSTOPPED (lp
->status
) && WSTOPSIG (lp
->status
) == SIGTRAP
)
2235 if ((*selector
)-- == 0)
2242 cancel_breakpoint (struct lwp_info
*lp
)
2244 /* Arrange for a breakpoint to be hit again later. We don't keep
2245 the SIGTRAP status and don't forward the SIGTRAP signal to the
2246 LWP. We will handle the current event, eventually we will resume
2247 this LWP, and this breakpoint will trap again.
2249 If we do not do this, then we run the risk that the user will
2250 delete or disable the breakpoint, but the LWP will have already
2253 struct regcache
*regcache
= get_thread_regcache (lp
->ptid
);
2254 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
2257 pc
= regcache_read_pc (regcache
) - gdbarch_decr_pc_after_break (gdbarch
);
2258 if (breakpoint_inserted_here_p (pc
))
2260 if (debug_linux_nat
)
2261 fprintf_unfiltered (gdb_stdlog
,
2262 "CB: Push back breakpoint for %s\n",
2263 target_pid_to_str (lp
->ptid
));
2265 /* Back up the PC if necessary. */
2266 if (gdbarch_decr_pc_after_break (gdbarch
))
2267 regcache_write_pc (regcache
, pc
);
2275 cancel_breakpoints_callback (struct lwp_info
*lp
, void *data
)
2277 struct lwp_info
*event_lp
= data
;
2279 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2283 /* If a LWP other than the LWP that we're reporting an event for has
2284 hit a GDB breakpoint (as opposed to some random trap signal),
2285 then just arrange for it to hit it again later. We don't keep
2286 the SIGTRAP status and don't forward the SIGTRAP signal to the
2287 LWP. We will handle the current event, eventually we will resume
2288 all LWPs, and this one will get its breakpoint trap again.
2290 If we do not do this, then we run the risk that the user will
2291 delete or disable the breakpoint, but the LWP will have already
2295 && WIFSTOPPED (lp
->status
) && WSTOPSIG (lp
->status
) == SIGTRAP
2296 && cancel_breakpoint (lp
))
2297 /* Throw away the SIGTRAP. */
2303 /* Select one LWP out of those that have events pending. */
2306 select_event_lwp (struct lwp_info
**orig_lp
, int *status
)
2309 int random_selector
;
2310 struct lwp_info
*event_lp
;
2312 /* Record the wait status for the original LWP. */
2313 (*orig_lp
)->status
= *status
;
2315 /* Give preference to any LWP that is being single-stepped. */
2316 event_lp
= iterate_over_lwps (select_singlestep_lwp_callback
, NULL
);
2317 if (event_lp
!= NULL
)
2319 if (debug_linux_nat
)
2320 fprintf_unfiltered (gdb_stdlog
,
2321 "SEL: Select single-step %s\n",
2322 target_pid_to_str (event_lp
->ptid
));
2326 /* No single-stepping LWP. Select one at random, out of those
2327 which have had SIGTRAP events. */
2329 /* First see how many SIGTRAP events we have. */
2330 iterate_over_lwps (count_events_callback
, &num_events
);
2332 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2333 random_selector
= (int)
2334 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
2336 if (debug_linux_nat
&& num_events
> 1)
2337 fprintf_unfiltered (gdb_stdlog
,
2338 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2339 num_events
, random_selector
);
2341 event_lp
= iterate_over_lwps (select_event_lwp_callback
,
2345 if (event_lp
!= NULL
)
2347 /* Switch the event LWP. */
2348 *orig_lp
= event_lp
;
2349 *status
= event_lp
->status
;
2352 /* Flush the wait status for the event LWP. */
2353 (*orig_lp
)->status
= 0;
2356 /* Return non-zero if LP has been resumed. */
2359 resumed_callback (struct lwp_info
*lp
, void *data
)
2364 /* Stop an active thread, verify it still exists, then resume it. */
2367 stop_and_resume_callback (struct lwp_info
*lp
, void *data
)
2369 struct lwp_info
*ptr
;
2371 if (!lp
->stopped
&& !lp
->signalled
)
2373 stop_callback (lp
, NULL
);
2374 stop_wait_callback (lp
, NULL
);
2375 /* Resume if the lwp still exists. */
2376 for (ptr
= lwp_list
; ptr
; ptr
= ptr
->next
)
2379 resume_callback (lp
, NULL
);
2380 resume_set_callback (lp
, NULL
);
2386 /* Check if we should go on and pass this event to common code.
2387 Return the affected lwp if we are, or NULL otherwise. */
2388 static struct lwp_info
*
2389 linux_nat_filter_event (int lwpid
, int status
, int options
)
2391 struct lwp_info
*lp
;
2393 lp
= find_lwp_pid (pid_to_ptid (lwpid
));
2395 /* Check for stop events reported by a process we didn't already
2396 know about - anything not already in our LWP list.
2398 If we're expecting to receive stopped processes after
2399 fork, vfork, and clone events, then we'll just add the
2400 new one to our list and go back to waiting for the event
2401 to be reported - the stopped process might be returned
2402 from waitpid before or after the event is. */
2403 if (WIFSTOPPED (status
) && !lp
)
2405 linux_record_stopped_pid (lwpid
, status
);
2409 /* Make sure we don't report an event for the exit of an LWP not in
2410 our list, i.e. not part of the current process. This can happen
2411 if we detach from a program we original forked and then it
2413 if (!WIFSTOPPED (status
) && !lp
)
2416 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2417 CLONE_PTRACE processes which do not use the thread library -
2418 otherwise we wouldn't find the new LWP this way. That doesn't
2419 currently work, and the following code is currently unreachable
2420 due to the two blocks above. If it's fixed some day, this code
2421 should be broken out into a function so that we can also pick up
2422 LWPs from the new interface. */
2425 lp
= add_lwp (BUILD_LWP (lwpid
, GET_PID (inferior_ptid
)));
2426 if (options
& __WCLONE
)
2429 gdb_assert (WIFSTOPPED (status
)
2430 && WSTOPSIG (status
) == SIGSTOP
);
2433 if (!in_thread_list (inferior_ptid
))
2435 inferior_ptid
= BUILD_LWP (GET_PID (inferior_ptid
),
2436 GET_PID (inferior_ptid
));
2437 add_thread (inferior_ptid
);
2440 add_thread (lp
->ptid
);
2443 /* Save the trap's siginfo in case we need it later. */
2444 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
)
2447 /* Handle GNU/Linux's extended waitstatus for trace events. */
2448 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
2450 if (debug_linux_nat
)
2451 fprintf_unfiltered (gdb_stdlog
,
2452 "LLW: Handling extended status 0x%06x\n",
2454 if (linux_handle_extended_wait (lp
, status
, 0))
2458 /* Check if the thread has exited. */
2459 if ((WIFEXITED (status
) || WIFSIGNALED (status
)) && num_lwps
> 1)
2461 /* If this is the main thread, we must stop all threads and verify
2462 if they are still alive. This is because in the nptl thread model
2463 on Linux 2.4, there is no signal issued for exiting LWPs
2464 other than the main thread. We only get the main thread exit
2465 signal once all child threads have already exited. If we
2466 stop all the threads and use the stop_wait_callback to check
2467 if they have exited we can determine whether this signal
2468 should be ignored or whether it means the end of the debugged
2469 application, regardless of which threading model is being
2471 if (GET_PID (lp
->ptid
) == GET_LWP (lp
->ptid
))
2474 iterate_over_lwps (stop_and_resume_callback
, NULL
);
2477 if (debug_linux_nat
)
2478 fprintf_unfiltered (gdb_stdlog
,
2479 "LLW: %s exited.\n",
2480 target_pid_to_str (lp
->ptid
));
2484 /* If there is at least one more LWP, then the exit signal
2485 was not the end of the debugged application and should be
2492 /* Check if the current LWP has previously exited. In the nptl
2493 thread model, LWPs other than the main thread do not issue
2494 signals when they exit so we must check whenever the thread has
2495 stopped. A similar check is made in stop_wait_callback(). */
2496 if (num_lwps
> 1 && !linux_thread_alive (lp
->ptid
))
2498 if (debug_linux_nat
)
2499 fprintf_unfiltered (gdb_stdlog
,
2500 "LLW: %s exited.\n",
2501 target_pid_to_str (lp
->ptid
));
2505 /* Make sure there is at least one thread running. */
2506 gdb_assert (iterate_over_lwps (running_callback
, NULL
));
2508 /* Discard the event. */
2512 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2513 an attempt to stop an LWP. */
2515 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGSTOP
)
2517 if (debug_linux_nat
)
2518 fprintf_unfiltered (gdb_stdlog
,
2519 "LLW: Delayed SIGSTOP caught for %s.\n",
2520 target_pid_to_str (lp
->ptid
));
2522 /* This is a delayed SIGSTOP. */
2525 registers_changed ();
2527 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
2528 lp
->step
, TARGET_SIGNAL_0
);
2529 if (debug_linux_nat
)
2530 fprintf_unfiltered (gdb_stdlog
,
2531 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2533 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2534 target_pid_to_str (lp
->ptid
));
2537 gdb_assert (lp
->resumed
);
2539 /* Discard the event. */
2543 /* Make sure we don't report a SIGINT that we have already displayed
2544 for another thread. */
2545 if (lp
->ignore_sigint
2546 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGINT
)
2548 if (debug_linux_nat
)
2549 fprintf_unfiltered (gdb_stdlog
,
2550 "LLW: Delayed SIGINT caught for %s.\n",
2551 target_pid_to_str (lp
->ptid
));
2553 /* This is a delayed SIGINT. */
2554 lp
->ignore_sigint
= 0;
2556 registers_changed ();
2557 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
2558 lp
->step
, TARGET_SIGNAL_0
);
2559 if (debug_linux_nat
)
2560 fprintf_unfiltered (gdb_stdlog
,
2561 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2563 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2564 target_pid_to_str (lp
->ptid
));
2567 gdb_assert (lp
->resumed
);
2569 /* Discard the event. */
2573 /* An interesting event. */
2579 linux_nat_wait_1 (struct target_ops
*ops
,
2580 ptid_t ptid
, struct target_waitstatus
*ourstatus
)
2582 static sigset_t prev_mask
;
2583 struct lwp_info
*lp
= NULL
;
2586 pid_t pid
= PIDGET (ptid
);
2588 if (debug_linux_nat_async
)
2589 fprintf_unfiltered (gdb_stdlog
, "LLW: enter\n");
2591 /* The first time we get here after starting a new inferior, we may
2592 not have added it to the LWP list yet - this is the earliest
2593 moment at which we know its PID. */
2596 gdb_assert (!is_lwp (inferior_ptid
));
2598 /* Upgrade the main thread's ptid. */
2599 thread_change_ptid (inferior_ptid
,
2600 BUILD_LWP (GET_PID (inferior_ptid
),
2601 GET_PID (inferior_ptid
)));
2603 lp
= add_lwp (inferior_ptid
);
2607 /* Make sure SIGCHLD is blocked. */
2608 block_child_signals (&prev_mask
);
2612 /* Make sure there is at least one LWP that has been resumed. */
2613 gdb_assert (iterate_over_lwps (resumed_callback
, NULL
));
2615 /* First check if there is a LWP with a wait status pending. */
2618 /* Any LWP that's been resumed will do. */
2619 lp
= iterate_over_lwps (status_callback
, NULL
);
2622 status
= lp
->status
;
2625 if (debug_linux_nat
&& status
)
2626 fprintf_unfiltered (gdb_stdlog
,
2627 "LLW: Using pending wait status %s for %s.\n",
2628 status_to_str (status
),
2629 target_pid_to_str (lp
->ptid
));
2632 /* But if we don't find one, we'll have to wait, and check both
2633 cloned and uncloned processes. We start with the cloned
2635 options
= __WCLONE
| WNOHANG
;
2637 else if (is_lwp (ptid
))
2639 if (debug_linux_nat
)
2640 fprintf_unfiltered (gdb_stdlog
,
2641 "LLW: Waiting for specific LWP %s.\n",
2642 target_pid_to_str (ptid
));
2644 /* We have a specific LWP to check. */
2645 lp
= find_lwp_pid (ptid
);
2647 status
= lp
->status
;
2650 if (debug_linux_nat
&& status
)
2651 fprintf_unfiltered (gdb_stdlog
,
2652 "LLW: Using pending wait status %s for %s.\n",
2653 status_to_str (status
),
2654 target_pid_to_str (lp
->ptid
));
2656 /* If we have to wait, take into account whether PID is a cloned
2657 process or not. And we have to convert it to something that
2658 the layer beneath us can understand. */
2659 options
= lp
->cloned
? __WCLONE
: 0;
2660 pid
= GET_LWP (ptid
);
2663 if (status
&& lp
->signalled
)
2665 /* A pending SIGSTOP may interfere with the normal stream of
2666 events. In a typical case where interference is a problem,
2667 we have a SIGSTOP signal pending for LWP A while
2668 single-stepping it, encounter an event in LWP B, and take the
2669 pending SIGSTOP while trying to stop LWP A. After processing
2670 the event in LWP B, LWP A is continued, and we'll never see
2671 the SIGTRAP associated with the last time we were
2672 single-stepping LWP A. */
2674 /* Resume the thread. It should halt immediately returning the
2676 registers_changed ();
2677 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
2678 lp
->step
, TARGET_SIGNAL_0
);
2679 if (debug_linux_nat
)
2680 fprintf_unfiltered (gdb_stdlog
,
2681 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2682 lp
->step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2683 target_pid_to_str (lp
->ptid
));
2685 gdb_assert (lp
->resumed
);
2687 /* This should catch the pending SIGSTOP. */
2688 stop_wait_callback (lp
, NULL
);
2691 if (!target_can_async_p ())
2693 /* Causes SIGINT to be passed on to the attached process. */
2697 if (target_can_async_p ())
2698 options
|= WNOHANG
; /* In async mode, don't block. */
2704 lwpid
= my_waitpid (pid
, &status
, options
);
2708 gdb_assert (pid
== -1 || lwpid
== pid
);
2710 if (debug_linux_nat
)
2712 fprintf_unfiltered (gdb_stdlog
,
2713 "LLW: waitpid %ld received %s\n",
2714 (long) lwpid
, status_to_str (status
));
2717 lp
= linux_nat_filter_event (lwpid
, status
, options
);
2720 /* A discarded event. */
2730 /* Alternate between checking cloned and uncloned processes. */
2731 options
^= __WCLONE
;
2733 /* And every time we have checked both:
2734 In async mode, return to event loop;
2735 In sync mode, suspend waiting for a SIGCHLD signal. */
2736 if (options
& __WCLONE
)
2738 if (target_can_async_p ())
2740 /* No interesting event. */
2741 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2743 if (debug_linux_nat_async
)
2744 fprintf_unfiltered (gdb_stdlog
, "LLW: exit (ignore)\n");
2746 restore_child_signals_mask (&prev_mask
);
2747 return minus_one_ptid
;
2750 sigsuspend (&suspend_mask
);
2754 /* We shouldn't end up here unless we want to try again. */
2755 gdb_assert (status
== 0);
2758 if (!target_can_async_p ())
2759 clear_sigint_trap ();
2763 /* Don't report signals that GDB isn't interested in, such as
2764 signals that are neither printed nor stopped upon. Stopping all
2765 threads can be a bit time-consuming so if we want decent
2766 performance with heavily multi-threaded programs, especially when
2767 they're using a high frequency timer, we'd better avoid it if we
2770 if (WIFSTOPPED (status
))
2772 int signo
= target_signal_from_host (WSTOPSIG (status
));
2773 struct inferior
*inf
;
2775 inf
= find_inferior_pid (ptid_get_pid (lp
->ptid
));
2778 /* Defer to common code if we get a signal while
2779 single-stepping, since that may need special care, e.g. to
2780 skip the signal handler, or, if we're gaining control of the
2783 && inf
->stop_soon
== NO_STOP_QUIETLY
2784 && signal_stop_state (signo
) == 0
2785 && signal_print_state (signo
) == 0
2786 && signal_pass_state (signo
) == 1)
2788 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2789 here? It is not clear we should. GDB may not expect
2790 other threads to run. On the other hand, not resuming
2791 newly attached threads may cause an unwanted delay in
2792 getting them running. */
2793 registers_changed ();
2794 linux_ops
->to_resume (linux_ops
, pid_to_ptid (GET_LWP (lp
->ptid
)),
2796 if (debug_linux_nat
)
2797 fprintf_unfiltered (gdb_stdlog
,
2798 "LLW: %s %s, %s (preempt 'handle')\n",
2800 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2801 target_pid_to_str (lp
->ptid
),
2802 signo
? strsignal (signo
) : "0");
2810 /* Only do the below in all-stop, as we currently use SIGINT
2811 to implement target_stop (see linux_nat_stop) in
2813 if (signo
== TARGET_SIGNAL_INT
&& signal_pass_state (signo
) == 0)
2815 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2816 forwarded to the entire process group, that is, all LWPs
2817 will receive it - unless they're using CLONE_THREAD to
2818 share signals. Since we only want to report it once, we
2819 mark it as ignored for all LWPs except this one. */
2820 iterate_over_lwps (set_ignore_sigint
, NULL
);
2821 lp
->ignore_sigint
= 0;
2824 maybe_clear_ignore_sigint (lp
);
2828 /* This LWP is stopped now. */
2831 if (debug_linux_nat
)
2832 fprintf_unfiltered (gdb_stdlog
, "LLW: Candidate event %s in %s.\n",
2833 status_to_str (status
), target_pid_to_str (lp
->ptid
));
2837 /* Now stop all other LWP's ... */
2838 iterate_over_lwps (stop_callback
, NULL
);
2840 /* ... and wait until all of them have reported back that
2841 they're no longer running. */
2842 iterate_over_lwps (stop_wait_callback
, NULL
);
2844 /* If we're not waiting for a specific LWP, choose an event LWP
2845 from among those that have had events. Giving equal priority
2846 to all LWPs that have had events helps prevent
2849 select_event_lwp (&lp
, &status
);
2852 /* Now that we've selected our final event LWP, cancel any
2853 breakpoints in other LWPs that have hit a GDB breakpoint. See
2854 the comment in cancel_breakpoints_callback to find out why. */
2855 iterate_over_lwps (cancel_breakpoints_callback
, lp
);
2857 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
)
2859 if (debug_linux_nat
)
2860 fprintf_unfiltered (gdb_stdlog
,
2861 "LLW: trap ptid is %s.\n",
2862 target_pid_to_str (lp
->ptid
));
2865 if (lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
2867 *ourstatus
= lp
->waitstatus
;
2868 lp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
2871 store_waitstatus (ourstatus
, status
);
2873 if (debug_linux_nat_async
)
2874 fprintf_unfiltered (gdb_stdlog
, "LLW: exit\n");
2876 restore_child_signals_mask (&prev_mask
);
2881 linux_nat_wait (struct target_ops
*ops
,
2882 ptid_t ptid
, struct target_waitstatus
*ourstatus
)
2886 if (debug_linux_nat
)
2887 fprintf_unfiltered (gdb_stdlog
, "linux_nat_wait: [%s]\n", target_pid_to_str (ptid
));
2889 /* Flush the async file first. */
2890 if (target_can_async_p ())
2891 async_file_flush ();
2893 event_ptid
= linux_nat_wait_1 (ops
, ptid
, ourstatus
);
2895 /* If we requested any event, and something came out, assume there
2896 may be more. If we requested a specific lwp or process, also
2897 assume there may be more. */
2898 if (target_can_async_p ()
2899 && (ourstatus
->kind
!= TARGET_WAITKIND_IGNORE
2900 || !ptid_equal (ptid
, minus_one_ptid
)))
2903 /* Get ready for the next event. */
2904 if (target_can_async_p ())
2905 target_async (inferior_event_handler
, 0);
2911 kill_callback (struct lwp_info
*lp
, void *data
)
2914 ptrace (PTRACE_KILL
, GET_LWP (lp
->ptid
), 0, 0);
2915 if (debug_linux_nat
)
2916 fprintf_unfiltered (gdb_stdlog
,
2917 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2918 target_pid_to_str (lp
->ptid
),
2919 errno
? safe_strerror (errno
) : "OK");
2925 kill_wait_callback (struct lwp_info
*lp
, void *data
)
2929 /* We must make sure that there are no pending events (delayed
2930 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2931 program doesn't interfere with any following debugging session. */
2933 /* For cloned processes we must check both with __WCLONE and
2934 without, since the exit status of a cloned process isn't reported
2940 pid
= my_waitpid (GET_LWP (lp
->ptid
), NULL
, __WCLONE
);
2941 if (pid
!= (pid_t
) -1)
2943 if (debug_linux_nat
)
2944 fprintf_unfiltered (gdb_stdlog
,
2945 "KWC: wait %s received unknown.\n",
2946 target_pid_to_str (lp
->ptid
));
2947 /* The Linux kernel sometimes fails to kill a thread
2948 completely after PTRACE_KILL; that goes from the stop
2949 point in do_fork out to the one in
2950 get_signal_to_deliever and waits again. So kill it
2952 kill_callback (lp
, NULL
);
2955 while (pid
== GET_LWP (lp
->ptid
));
2957 gdb_assert (pid
== -1 && errno
== ECHILD
);
2962 pid
= my_waitpid (GET_LWP (lp
->ptid
), NULL
, 0);
2963 if (pid
!= (pid_t
) -1)
2965 if (debug_linux_nat
)
2966 fprintf_unfiltered (gdb_stdlog
,
2967 "KWC: wait %s received unk.\n",
2968 target_pid_to_str (lp
->ptid
));
2969 /* See the call to kill_callback above. */
2970 kill_callback (lp
, NULL
);
2973 while (pid
== GET_LWP (lp
->ptid
));
2975 gdb_assert (pid
== -1 && errno
== ECHILD
);
2980 linux_nat_kill (struct target_ops
*ops
)
2982 struct target_waitstatus last
;
2986 /* If we're stopped while forking and we haven't followed yet,
2987 kill the other task. We need to do this first because the
2988 parent will be sleeping if this is a vfork. */
2990 get_last_target_status (&last_ptid
, &last
);
2992 if (last
.kind
== TARGET_WAITKIND_FORKED
2993 || last
.kind
== TARGET_WAITKIND_VFORKED
)
2995 ptrace (PT_KILL
, PIDGET (last
.value
.related_pid
), 0, 0);
2999 if (forks_exist_p ())
3000 linux_fork_killall ();
3003 /* Stop all threads before killing them, since ptrace requires
3004 that the thread is stopped to sucessfully PTRACE_KILL. */
3005 iterate_over_lwps (stop_callback
, NULL
);
3006 /* ... and wait until all of them have reported back that
3007 they're no longer running. */
3008 iterate_over_lwps (stop_wait_callback
, NULL
);
3010 /* Kill all LWP's ... */
3011 iterate_over_lwps (kill_callback
, NULL
);
3013 /* ... and wait until we've flushed all events. */
3014 iterate_over_lwps (kill_wait_callback
, NULL
);
3017 target_mourn_inferior ();
3021 linux_nat_mourn_inferior (struct target_ops
*ops
)
3023 /* Destroy LWP info; it's no longer valid. */
3026 if (! forks_exist_p ())
3028 /* Normal case, no other forks available. */
3029 linux_ops
->to_mourn_inferior (ops
);
3031 if (target_can_async_p ())
3032 linux_nat_async (NULL
, 0);
3035 /* Multi-fork case. The current inferior_ptid has exited, but
3036 there are other viable forks to debug. Delete the exiting
3037 one and context-switch to the first available. */
3038 linux_fork_mourn_inferior ();
3041 /* Convert a native/host siginfo object, into/from the siginfo in the
3042 layout of the inferiors' architecture. */
3045 siginfo_fixup (struct siginfo
*siginfo
, gdb_byte
*inf_siginfo
, int direction
)
3049 if (linux_nat_siginfo_fixup
!= NULL
)
3050 done
= linux_nat_siginfo_fixup (siginfo
, inf_siginfo
, direction
);
3052 /* If there was no callback, or the callback didn't do anything,
3053 then just do a straight memcpy. */
3057 memcpy (siginfo
, inf_siginfo
, sizeof (struct siginfo
));
3059 memcpy (inf_siginfo
, siginfo
, sizeof (struct siginfo
));
3064 linux_xfer_siginfo (struct target_ops
*ops
, enum target_object object
,
3065 const char *annex
, gdb_byte
*readbuf
,
3066 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
3069 struct siginfo siginfo
;
3070 gdb_byte inf_siginfo
[sizeof (struct siginfo
)];
3072 gdb_assert (object
== TARGET_OBJECT_SIGNAL_INFO
);
3073 gdb_assert (readbuf
|| writebuf
);
3075 pid
= GET_LWP (inferior_ptid
);
3077 pid
= GET_PID (inferior_ptid
);
3079 if (offset
> sizeof (siginfo
))
3083 ptrace (PTRACE_GETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
);
3087 /* When GDB is built as a 64-bit application, ptrace writes into
3088 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3089 inferior with a 64-bit GDB should look the same as debugging it
3090 with a 32-bit GDB, we need to convert it. GDB core always sees
3091 the converted layout, so any read/write will have to be done
3093 siginfo_fixup (&siginfo
, inf_siginfo
, 0);
3095 if (offset
+ len
> sizeof (siginfo
))
3096 len
= sizeof (siginfo
) - offset
;
3098 if (readbuf
!= NULL
)
3099 memcpy (readbuf
, inf_siginfo
+ offset
, len
);
3102 memcpy (inf_siginfo
+ offset
, writebuf
, len
);
3104 /* Convert back to ptrace layout before flushing it out. */
3105 siginfo_fixup (&siginfo
, inf_siginfo
, 1);
3108 ptrace (PTRACE_SETSIGINFO
, pid
, (PTRACE_TYPE_ARG3
) 0, &siginfo
);
3117 linux_nat_xfer_partial (struct target_ops
*ops
, enum target_object object
,
3118 const char *annex
, gdb_byte
*readbuf
,
3119 const gdb_byte
*writebuf
,
3120 ULONGEST offset
, LONGEST len
)
3122 struct cleanup
*old_chain
;
3125 if (object
== TARGET_OBJECT_SIGNAL_INFO
)
3126 return linux_xfer_siginfo (ops
, object
, annex
, readbuf
, writebuf
,
3129 old_chain
= save_inferior_ptid ();
3131 if (is_lwp (inferior_ptid
))
3132 inferior_ptid
= pid_to_ptid (GET_LWP (inferior_ptid
));
3134 xfer
= linux_ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
3137 do_cleanups (old_chain
);
3142 linux_thread_alive (ptid_t ptid
)
3146 gdb_assert (is_lwp (ptid
));
3148 /* Send signal 0 instead of anything ptrace, because ptracing a
3149 running thread errors out claiming that the thread doesn't
3151 err
= kill_lwp (GET_LWP (ptid
), 0);
3153 if (debug_linux_nat
)
3154 fprintf_unfiltered (gdb_stdlog
,
3155 "LLTA: KILL(SIG0) %s (%s)\n",
3156 target_pid_to_str (ptid
),
3157 err
? safe_strerror (err
) : "OK");
3166 linux_nat_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
3168 return linux_thread_alive (ptid
);
3172 linux_nat_pid_to_str (struct target_ops
*ops
, ptid_t ptid
)
3174 static char buf
[64];
3177 && ((lwp_list
&& lwp_list
->next
)
3178 || GET_PID (ptid
) != GET_LWP (ptid
)))
3180 snprintf (buf
, sizeof (buf
), "LWP %ld", GET_LWP (ptid
));
3184 return normal_pid_to_str (ptid
);
3187 /* Accepts an integer PID; Returns a string representing a file that
3188 can be opened to get the symbols for the child process. */
3191 linux_child_pid_to_exec_file (int pid
)
3193 char *name1
, *name2
;
3195 name1
= xmalloc (MAXPATHLEN
);
3196 name2
= xmalloc (MAXPATHLEN
);
3197 make_cleanup (xfree
, name1
);
3198 make_cleanup (xfree
, name2
);
3199 memset (name2
, 0, MAXPATHLEN
);
3201 sprintf (name1
, "/proc/%d/exe", pid
);
3202 if (readlink (name1
, name2
, MAXPATHLEN
) > 0)
3208 /* Service function for corefiles and info proc. */
3211 read_mapping (FILE *mapfile
,
3216 char *device
, long long *inode
, char *filename
)
3218 int ret
= fscanf (mapfile
, "%llx-%llx %s %llx %s %llx",
3219 addr
, endaddr
, permissions
, offset
, device
, inode
);
3222 if (ret
> 0 && ret
!= EOF
)
3224 /* Eat everything up to EOL for the filename. This will prevent
3225 weird filenames (such as one with embedded whitespace) from
3226 confusing this code. It also makes this code more robust in
3227 respect to annotations the kernel may add after the filename.
3229 Note the filename is used for informational purposes
3231 ret
+= fscanf (mapfile
, "%[^\n]\n", filename
);
3234 return (ret
!= 0 && ret
!= EOF
);
3237 /* Fills the "to_find_memory_regions" target vector. Lists the memory
3238 regions in the inferior for a corefile. */
3241 linux_nat_find_memory_regions (int (*func
) (CORE_ADDR
,
3243 int, int, int, void *), void *obfd
)
3245 int pid
= PIDGET (inferior_ptid
);
3246 char mapsfilename
[MAXPATHLEN
];
3248 long long addr
, endaddr
, size
, offset
, inode
;
3249 char permissions
[8], device
[8], filename
[MAXPATHLEN
];
3250 int read
, write
, exec
;
3252 struct cleanup
*cleanup
;
3254 /* Compose the filename for the /proc memory map, and open it. */
3255 sprintf (mapsfilename
, "/proc/%d/maps", pid
);
3256 if ((mapsfile
= fopen (mapsfilename
, "r")) == NULL
)
3257 error (_("Could not open %s."), mapsfilename
);
3258 cleanup
= make_cleanup_fclose (mapsfile
);
3261 fprintf_filtered (gdb_stdout
,
3262 "Reading memory regions from %s\n", mapsfilename
);
3264 /* Now iterate until end-of-file. */
3265 while (read_mapping (mapsfile
, &addr
, &endaddr
, &permissions
[0],
3266 &offset
, &device
[0], &inode
, &filename
[0]))
3268 size
= endaddr
- addr
;
3270 /* Get the segment's permissions. */
3271 read
= (strchr (permissions
, 'r') != 0);
3272 write
= (strchr (permissions
, 'w') != 0);
3273 exec
= (strchr (permissions
, 'x') != 0);
3277 fprintf_filtered (gdb_stdout
,
3278 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3279 size
, paddr_nz (addr
),
3281 write
? 'w' : ' ', exec
? 'x' : ' ');
3283 fprintf_filtered (gdb_stdout
, " for %s", filename
);
3284 fprintf_filtered (gdb_stdout
, "\n");
3287 /* Invoke the callback function to create the corefile
3289 func (addr
, size
, read
, write
, exec
, obfd
);
3291 do_cleanups (cleanup
);
3296 find_signalled_thread (struct thread_info
*info
, void *data
)
3298 if (info
->stop_signal
!= TARGET_SIGNAL_0
3299 && ptid_get_pid (info
->ptid
) == ptid_get_pid (inferior_ptid
))
3305 static enum target_signal
3306 find_stop_signal (void)
3308 struct thread_info
*info
=
3309 iterate_over_threads (find_signalled_thread
, NULL
);
3312 return info
->stop_signal
;
3314 return TARGET_SIGNAL_0
;
3317 /* Records the thread's register state for the corefile note
3321 linux_nat_do_thread_registers (bfd
*obfd
, ptid_t ptid
,
3322 char *note_data
, int *note_size
,
3323 enum target_signal stop_signal
)
3325 gdb_gregset_t gregs
;
3326 gdb_fpregset_t fpregs
;
3327 unsigned long lwp
= ptid_get_lwp (ptid
);
3328 struct regcache
*regcache
= get_thread_regcache (ptid
);
3329 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3330 const struct regset
*regset
;
3332 struct cleanup
*old_chain
;
3333 struct core_regset_section
*sect_list
;
3336 old_chain
= save_inferior_ptid ();
3337 inferior_ptid
= ptid
;
3338 target_fetch_registers (regcache
, -1);
3339 do_cleanups (old_chain
);
3341 core_regset_p
= gdbarch_regset_from_core_section_p (gdbarch
);
3342 sect_list
= gdbarch_core_regset_sections (gdbarch
);
3345 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg",
3346 sizeof (gregs
))) != NULL
3347 && regset
->collect_regset
!= NULL
)
3348 regset
->collect_regset (regset
, regcache
, -1,
3349 &gregs
, sizeof (gregs
));
3351 fill_gregset (regcache
, &gregs
, -1);
3353 note_data
= (char *) elfcore_write_prstatus (obfd
,
3357 stop_signal
, &gregs
);
3359 /* The loop below uses the new struct core_regset_section, which stores
3360 the supported section names and sizes for the core file. Note that
3361 note PRSTATUS needs to be treated specially. But the other notes are
3362 structurally the same, so they can benefit from the new struct. */
3363 if (core_regset_p
&& sect_list
!= NULL
)
3364 while (sect_list
->sect_name
!= NULL
)
3366 /* .reg was already handled above. */
3367 if (strcmp (sect_list
->sect_name
, ".reg") == 0)
3372 regset
= gdbarch_regset_from_core_section (gdbarch
,
3373 sect_list
->sect_name
,
3375 gdb_assert (regset
&& regset
->collect_regset
);
3376 gdb_regset
= xmalloc (sect_list
->size
);
3377 regset
->collect_regset (regset
, regcache
, -1,
3378 gdb_regset
, sect_list
->size
);
3379 note_data
= (char *) elfcore_write_register_note (obfd
,
3382 sect_list
->sect_name
,
3389 /* For architectures that does not have the struct core_regset_section
3390 implemented, we use the old method. When all the architectures have
3391 the new support, the code below should be deleted. */
3395 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg2",
3396 sizeof (fpregs
))) != NULL
3397 && regset
->collect_regset
!= NULL
)
3398 regset
->collect_regset (regset
, regcache
, -1,
3399 &fpregs
, sizeof (fpregs
));
3401 fill_fpregset (regcache
, &fpregs
, -1);
3403 note_data
= (char *) elfcore_write_prfpreg (obfd
,
3406 &fpregs
, sizeof (fpregs
));
3412 struct linux_nat_corefile_thread_data
3418 enum target_signal stop_signal
;
3421 /* Called by gdbthread.c once per thread. Records the thread's
3422 register state for the corefile note section. */
3425 linux_nat_corefile_thread_callback (struct lwp_info
*ti
, void *data
)
3427 struct linux_nat_corefile_thread_data
*args
= data
;
3429 args
->note_data
= linux_nat_do_thread_registers (args
->obfd
,
3439 /* Fills the "to_make_corefile_note" target vector. Builds the note
3440 section for a corefile, and returns it in a malloc buffer. */
3443 linux_nat_make_corefile_notes (bfd
*obfd
, int *note_size
)
3445 struct linux_nat_corefile_thread_data thread_args
;
3446 struct cleanup
*old_chain
;
3447 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
3448 char fname
[16] = { '\0' };
3449 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
3450 char psargs
[80] = { '\0' };
3451 char *note_data
= NULL
;
3452 ptid_t current_ptid
= inferior_ptid
;
3456 if (get_exec_file (0))
3458 strncpy (fname
, strrchr (get_exec_file (0), '/') + 1, sizeof (fname
));
3459 strncpy (psargs
, get_exec_file (0), sizeof (psargs
));
3460 if (get_inferior_args ())
3463 char *psargs_end
= psargs
+ sizeof (psargs
);
3465 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3467 string_end
= memchr (psargs
, 0, sizeof (psargs
));
3468 if (string_end
!= NULL
)
3470 *string_end
++ = ' ';
3471 strncpy (string_end
, get_inferior_args (),
3472 psargs_end
- string_end
);
3475 note_data
= (char *) elfcore_write_prpsinfo (obfd
,
3477 note_size
, fname
, psargs
);
3480 /* Dump information for threads. */
3481 thread_args
.obfd
= obfd
;
3482 thread_args
.note_data
= note_data
;
3483 thread_args
.note_size
= note_size
;
3484 thread_args
.num_notes
= 0;
3485 thread_args
.stop_signal
= find_stop_signal ();
3486 iterate_over_lwps (linux_nat_corefile_thread_callback
, &thread_args
);
3487 gdb_assert (thread_args
.num_notes
!= 0);
3488 note_data
= thread_args
.note_data
;
3490 auxv_len
= target_read_alloc (¤t_target
, TARGET_OBJECT_AUXV
,
3494 note_data
= elfcore_write_note (obfd
, note_data
, note_size
,
3495 "CORE", NT_AUXV
, auxv
, auxv_len
);
3499 make_cleanup (xfree
, note_data
);
3503 /* Implement the "info proc" command. */
3506 linux_nat_info_proc_cmd (char *args
, int from_tty
)
3508 /* A long is used for pid instead of an int to avoid a loss of precision
3509 compiler warning from the output of strtoul. */
3510 long pid
= PIDGET (inferior_ptid
);
3513 char buffer
[MAXPATHLEN
];
3514 char fname1
[MAXPATHLEN
], fname2
[MAXPATHLEN
];
3527 /* Break up 'args' into an argv array. */
3528 argv
= gdb_buildargv (args
);
3529 make_cleanup_freeargv (argv
);
3531 while (argv
!= NULL
&& *argv
!= NULL
)
3533 if (isdigit (argv
[0][0]))
3535 pid
= strtoul (argv
[0], NULL
, 10);
3537 else if (strncmp (argv
[0], "mappings", strlen (argv
[0])) == 0)
3541 else if (strcmp (argv
[0], "status") == 0)
3545 else if (strcmp (argv
[0], "stat") == 0)
3549 else if (strcmp (argv
[0], "cmd") == 0)
3553 else if (strncmp (argv
[0], "exe", strlen (argv
[0])) == 0)
3557 else if (strcmp (argv
[0], "cwd") == 0)
3561 else if (strncmp (argv
[0], "all", strlen (argv
[0])) == 0)
3567 /* [...] (future options here) */
3572 error (_("No current process: you must name one."));
3574 sprintf (fname1
, "/proc/%ld", pid
);
3575 if (stat (fname1
, &dummy
) != 0)
3576 error (_("No /proc directory: '%s'"), fname1
);
3578 printf_filtered (_("process %ld\n"), pid
);
3579 if (cmdline_f
|| all
)
3581 sprintf (fname1
, "/proc/%ld/cmdline", pid
);
3582 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3584 struct cleanup
*cleanup
= make_cleanup_fclose (procfile
);
3585 if (fgets (buffer
, sizeof (buffer
), procfile
))
3586 printf_filtered ("cmdline = '%s'\n", buffer
);
3588 warning (_("unable to read '%s'"), fname1
);
3589 do_cleanups (cleanup
);
3592 warning (_("unable to open /proc file '%s'"), fname1
);
3596 sprintf (fname1
, "/proc/%ld/cwd", pid
);
3597 memset (fname2
, 0, sizeof (fname2
));
3598 if (readlink (fname1
, fname2
, sizeof (fname2
)) > 0)
3599 printf_filtered ("cwd = '%s'\n", fname2
);
3601 warning (_("unable to read link '%s'"), fname1
);
3605 sprintf (fname1
, "/proc/%ld/exe", pid
);
3606 memset (fname2
, 0, sizeof (fname2
));
3607 if (readlink (fname1
, fname2
, sizeof (fname2
)) > 0)
3608 printf_filtered ("exe = '%s'\n", fname2
);
3610 warning (_("unable to read link '%s'"), fname1
);
3612 if (mappings_f
|| all
)
3614 sprintf (fname1
, "/proc/%ld/maps", pid
);
3615 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3617 long long addr
, endaddr
, size
, offset
, inode
;
3618 char permissions
[8], device
[8], filename
[MAXPATHLEN
];
3619 struct cleanup
*cleanup
;
3621 cleanup
= make_cleanup_fclose (procfile
);
3622 printf_filtered (_("Mapped address spaces:\n\n"));
3623 if (gdbarch_addr_bit (current_gdbarch
) == 32)
3625 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3628 " Size", " Offset", "objfile");
3632 printf_filtered (" %18s %18s %10s %10s %7s\n",
3635 " Size", " Offset", "objfile");
3638 while (read_mapping (procfile
, &addr
, &endaddr
, &permissions
[0],
3639 &offset
, &device
[0], &inode
, &filename
[0]))
3641 size
= endaddr
- addr
;
3643 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3644 calls here (and possibly above) should be abstracted
3645 out into their own functions? Andrew suggests using
3646 a generic local_address_string instead to print out
3647 the addresses; that makes sense to me, too. */
3649 if (gdbarch_addr_bit (current_gdbarch
) == 32)
3651 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3652 (unsigned long) addr
, /* FIXME: pr_addr */
3653 (unsigned long) endaddr
,
3655 (unsigned int) offset
,
3656 filename
[0] ? filename
: "");
3660 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3661 (unsigned long) addr
, /* FIXME: pr_addr */
3662 (unsigned long) endaddr
,
3664 (unsigned int) offset
,
3665 filename
[0] ? filename
: "");
3669 do_cleanups (cleanup
);
3672 warning (_("unable to open /proc file '%s'"), fname1
);
3674 if (status_f
|| all
)
3676 sprintf (fname1
, "/proc/%ld/status", pid
);
3677 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3679 struct cleanup
*cleanup
= make_cleanup_fclose (procfile
);
3680 while (fgets (buffer
, sizeof (buffer
), procfile
) != NULL
)
3681 puts_filtered (buffer
);
3682 do_cleanups (cleanup
);
3685 warning (_("unable to open /proc file '%s'"), fname1
);
3689 sprintf (fname1
, "/proc/%ld/stat", pid
);
3690 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3695 struct cleanup
*cleanup
= make_cleanup_fclose (procfile
);
3697 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3698 printf_filtered (_("Process: %d\n"), itmp
);
3699 if (fscanf (procfile
, "(%[^)]) ", &buffer
[0]) > 0)
3700 printf_filtered (_("Exec file: %s\n"), buffer
);
3701 if (fscanf (procfile
, "%c ", &ctmp
) > 0)
3702 printf_filtered (_("State: %c\n"), ctmp
);
3703 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3704 printf_filtered (_("Parent process: %d\n"), itmp
);
3705 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3706 printf_filtered (_("Process group: %d\n"), itmp
);
3707 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3708 printf_filtered (_("Session id: %d\n"), itmp
);
3709 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3710 printf_filtered (_("TTY: %d\n"), itmp
);
3711 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3712 printf_filtered (_("TTY owner process group: %d\n"), itmp
);
3713 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3714 printf_filtered (_("Flags: 0x%lx\n"), ltmp
);
3715 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3716 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3717 (unsigned long) ltmp
);
3718 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3719 printf_filtered (_("Minor faults, children: %lu\n"),
3720 (unsigned long) ltmp
);
3721 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3722 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3723 (unsigned long) ltmp
);
3724 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3725 printf_filtered (_("Major faults, children: %lu\n"),
3726 (unsigned long) ltmp
);
3727 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3728 printf_filtered (_("utime: %ld\n"), ltmp
);
3729 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3730 printf_filtered (_("stime: %ld\n"), ltmp
);
3731 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3732 printf_filtered (_("utime, children: %ld\n"), ltmp
);
3733 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3734 printf_filtered (_("stime, children: %ld\n"), ltmp
);
3735 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3736 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3738 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3739 printf_filtered (_("'nice' value: %ld\n"), ltmp
);
3740 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3741 printf_filtered (_("jiffies until next timeout: %lu\n"),
3742 (unsigned long) ltmp
);
3743 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3744 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3745 (unsigned long) ltmp
);
3746 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3747 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3749 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3750 printf_filtered (_("Virtual memory size: %lu\n"),
3751 (unsigned long) ltmp
);
3752 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3753 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp
);
3754 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3755 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp
);
3756 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3757 printf_filtered (_("Start of text: 0x%lx\n"), ltmp
);
3758 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3759 printf_filtered (_("End of text: 0x%lx\n"), ltmp
);
3760 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3761 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp
);
3762 #if 0 /* Don't know how architecture-dependent the rest is...
3763 Anyway the signal bitmap info is available from "status". */
3764 if (fscanf (procfile
, "%lu ", <mp
) > 0) /* FIXME arch? */
3765 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp
);
3766 if (fscanf (procfile
, "%lu ", <mp
) > 0) /* FIXME arch? */
3767 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp
);
3768 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3769 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp
);
3770 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3771 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp
);
3772 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3773 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp
);
3774 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3775 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp
);
3776 if (fscanf (procfile
, "%lu ", <mp
) > 0) /* FIXME arch? */
3777 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp
);
3779 do_cleanups (cleanup
);
3782 warning (_("unable to open /proc file '%s'"), fname1
);
3786 /* Implement the to_xfer_partial interface for memory reads using the /proc
3787 filesystem. Because we can use a single read() call for /proc, this
3788 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3789 but it doesn't support writes. */
3792 linux_proc_xfer_partial (struct target_ops
*ops
, enum target_object object
,
3793 const char *annex
, gdb_byte
*readbuf
,
3794 const gdb_byte
*writebuf
,
3795 ULONGEST offset
, LONGEST len
)
3801 if (object
!= TARGET_OBJECT_MEMORY
|| !readbuf
)
3804 /* Don't bother for one word. */
3805 if (len
< 3 * sizeof (long))
3808 /* We could keep this file open and cache it - possibly one per
3809 thread. That requires some juggling, but is even faster. */
3810 sprintf (filename
, "/proc/%d/mem", PIDGET (inferior_ptid
));
3811 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
3815 /* If pread64 is available, use it. It's faster if the kernel
3816 supports it (only one syscall), and it's 64-bit safe even on
3817 32-bit platforms (for instance, SPARC debugging a SPARC64
3820 if (pread64 (fd
, readbuf
, len
, offset
) != len
)
3822 if (lseek (fd
, offset
, SEEK_SET
) == -1 || read (fd
, readbuf
, len
) != len
)
3832 /* Parse LINE as a signal set and add its set bits to SIGS. */
3835 add_line_to_sigset (const char *line
, sigset_t
*sigs
)
3837 int len
= strlen (line
) - 1;
3841 if (line
[len
] != '\n')
3842 error (_("Could not parse signal set: %s"), line
);
3850 if (*p
>= '0' && *p
<= '9')
3852 else if (*p
>= 'a' && *p
<= 'f')
3853 digit
= *p
- 'a' + 10;
3855 error (_("Could not parse signal set: %s"), line
);
3860 sigaddset (sigs
, signum
+ 1);
3862 sigaddset (sigs
, signum
+ 2);
3864 sigaddset (sigs
, signum
+ 3);
3866 sigaddset (sigs
, signum
+ 4);
3872 /* Find process PID's pending signals from /proc/pid/status and set
3876 linux_proc_pending_signals (int pid
, sigset_t
*pending
, sigset_t
*blocked
, sigset_t
*ignored
)
3879 char buffer
[MAXPATHLEN
], fname
[MAXPATHLEN
];
3881 struct cleanup
*cleanup
;
3883 sigemptyset (pending
);
3884 sigemptyset (blocked
);
3885 sigemptyset (ignored
);
3886 sprintf (fname
, "/proc/%d/status", pid
);
3887 procfile
= fopen (fname
, "r");
3888 if (procfile
== NULL
)
3889 error (_("Could not open %s"), fname
);
3890 cleanup
= make_cleanup_fclose (procfile
);
3892 while (fgets (buffer
, MAXPATHLEN
, procfile
) != NULL
)
3894 /* Normal queued signals are on the SigPnd line in the status
3895 file. However, 2.6 kernels also have a "shared" pending
3896 queue for delivering signals to a thread group, so check for
3899 Unfortunately some Red Hat kernels include the shared pending
3900 queue but not the ShdPnd status field. */
3902 if (strncmp (buffer
, "SigPnd:\t", 8) == 0)
3903 add_line_to_sigset (buffer
+ 8, pending
);
3904 else if (strncmp (buffer
, "ShdPnd:\t", 8) == 0)
3905 add_line_to_sigset (buffer
+ 8, pending
);
3906 else if (strncmp (buffer
, "SigBlk:\t", 8) == 0)
3907 add_line_to_sigset (buffer
+ 8, blocked
);
3908 else if (strncmp (buffer
, "SigIgn:\t", 8) == 0)
3909 add_line_to_sigset (buffer
+ 8, ignored
);
3912 do_cleanups (cleanup
);
3916 linux_nat_xfer_osdata (struct target_ops
*ops
, enum target_object object
,
3917 const char *annex
, gdb_byte
*readbuf
,
3918 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
3920 /* We make the process list snapshot when the object starts to be
3922 static const char *buf
;
3923 static LONGEST len_avail
= -1;
3924 static struct obstack obstack
;
3928 gdb_assert (object
== TARGET_OBJECT_OSDATA
);
3930 if (strcmp (annex
, "processes") != 0)
3933 gdb_assert (readbuf
&& !writebuf
);
3937 if (len_avail
!= -1 && len_avail
!= 0)
3938 obstack_free (&obstack
, NULL
);
3941 obstack_init (&obstack
);
3942 obstack_grow_str (&obstack
, "<osdata type=\"processes\">\n");
3944 dirp
= opendir ("/proc");
3948 while ((dp
= readdir (dirp
)) != NULL
)
3950 struct stat statbuf
;
3951 char procentry
[sizeof ("/proc/4294967295")];
3953 if (!isdigit (dp
->d_name
[0])
3954 || NAMELEN (dp
) > sizeof ("4294967295") - 1)
3957 sprintf (procentry
, "/proc/%s", dp
->d_name
);
3958 if (stat (procentry
, &statbuf
) == 0
3959 && S_ISDIR (statbuf
.st_mode
))
3963 char cmd
[MAXPATHLEN
+ 1];
3964 struct passwd
*entry
;
3966 pathname
= xstrprintf ("/proc/%s/cmdline", dp
->d_name
);
3967 entry
= getpwuid (statbuf
.st_uid
);
3969 if ((f
= fopen (pathname
, "r")) != NULL
)
3971 size_t len
= fread (cmd
, 1, sizeof (cmd
) - 1, f
);
3975 for (i
= 0; i
< len
; i
++)
3980 obstack_xml_printf (
3983 "<column name=\"pid\">%s</column>"
3984 "<column name=\"user\">%s</column>"
3985 "<column name=\"command\">%s</column>"
3988 entry
? entry
->pw_name
: "?",
4001 obstack_grow_str0 (&obstack
, "</osdata>\n");
4002 buf
= obstack_finish (&obstack
);
4003 len_avail
= strlen (buf
);
4006 if (offset
>= len_avail
)
4008 /* Done. Get rid of the obstack. */
4009 obstack_free (&obstack
, NULL
);
4015 if (len
> len_avail
- offset
)
4016 len
= len_avail
- offset
;
4017 memcpy (readbuf
, buf
+ offset
, len
);
4023 linux_xfer_partial (struct target_ops
*ops
, enum target_object object
,
4024 const char *annex
, gdb_byte
*readbuf
,
4025 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
4029 if (object
== TARGET_OBJECT_AUXV
)
4030 return procfs_xfer_auxv (ops
, object
, annex
, readbuf
, writebuf
,
4033 if (object
== TARGET_OBJECT_OSDATA
)
4034 return linux_nat_xfer_osdata (ops
, object
, annex
, readbuf
, writebuf
,
4037 xfer
= linux_proc_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
4042 return super_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
4046 /* Create a prototype generic GNU/Linux target. The client can override
4047 it with local methods. */
4050 linux_target_install_ops (struct target_ops
*t
)
4052 t
->to_insert_fork_catchpoint
= linux_child_insert_fork_catchpoint
;
4053 t
->to_insert_vfork_catchpoint
= linux_child_insert_vfork_catchpoint
;
4054 t
->to_insert_exec_catchpoint
= linux_child_insert_exec_catchpoint
;
4055 t
->to_pid_to_exec_file
= linux_child_pid_to_exec_file
;
4056 t
->to_post_startup_inferior
= linux_child_post_startup_inferior
;
4057 t
->to_post_attach
= linux_child_post_attach
;
4058 t
->to_follow_fork
= linux_child_follow_fork
;
4059 t
->to_find_memory_regions
= linux_nat_find_memory_regions
;
4060 t
->to_make_corefile_notes
= linux_nat_make_corefile_notes
;
4062 super_xfer_partial
= t
->to_xfer_partial
;
4063 t
->to_xfer_partial
= linux_xfer_partial
;
4069 struct target_ops
*t
;
4071 t
= inf_ptrace_target ();
4072 linux_target_install_ops (t
);
4078 linux_trad_target (CORE_ADDR (*register_u_offset
)(struct gdbarch
*, int, int))
4080 struct target_ops
*t
;
4082 t
= inf_ptrace_trad_target (register_u_offset
);
4083 linux_target_install_ops (t
);
4088 /* target_is_async_p implementation. */
4091 linux_nat_is_async_p (void)
4093 /* NOTE: palves 2008-03-21: We're only async when the user requests
4094 it explicitly with the "set target-async" command.
4095 Someday, linux will always be async. */
4096 if (!target_async_permitted
)
4102 /* target_can_async_p implementation. */
4105 linux_nat_can_async_p (void)
4107 /* NOTE: palves 2008-03-21: We're only async when the user requests
4108 it explicitly with the "set target-async" command.
4109 Someday, linux will always be async. */
4110 if (!target_async_permitted
)
4113 /* See target.h/target_async_mask. */
4114 return linux_nat_async_mask_value
;
4118 linux_nat_supports_non_stop (void)
4123 /* target_async_mask implementation. */
4126 linux_nat_async_mask (int new_mask
)
4128 int curr_mask
= linux_nat_async_mask_value
;
4130 if (curr_mask
!= new_mask
)
4134 linux_nat_async (NULL
, 0);
4135 linux_nat_async_mask_value
= new_mask
;
4139 linux_nat_async_mask_value
= new_mask
;
4141 /* If we're going out of async-mask in all-stop, then the
4142 inferior is stopped. The next resume will call
4143 target_async. In non-stop, the target event source
4144 should be always registered in the event loop. Do so
4147 linux_nat_async (inferior_event_handler
, 0);
4154 static int async_terminal_is_ours
= 1;
4156 /* target_terminal_inferior implementation. */
4159 linux_nat_terminal_inferior (void)
4161 if (!target_is_async_p ())
4163 /* Async mode is disabled. */
4164 terminal_inferior ();
4168 /* GDB should never give the terminal to the inferior, if the
4169 inferior is running in the background (run&, continue&, etc.).
4170 This check can be removed when the common code is fixed. */
4171 if (!sync_execution
)
4174 terminal_inferior ();
4176 if (!async_terminal_is_ours
)
4179 delete_file_handler (input_fd
);
4180 async_terminal_is_ours
= 0;
4184 /* target_terminal_ours implementation. */
4187 linux_nat_terminal_ours (void)
4189 if (!target_is_async_p ())
4191 /* Async mode is disabled. */
4196 /* GDB should never give the terminal to the inferior if the
4197 inferior is running in the background (run&, continue&, etc.),
4198 but claiming it sure should. */
4201 if (!sync_execution
)
4204 if (async_terminal_is_ours
)
4207 clear_sigint_trap ();
4208 add_file_handler (input_fd
, stdin_event_handler
, 0);
4209 async_terminal_is_ours
= 1;
4212 static void (*async_client_callback
) (enum inferior_event_type event_type
,
4214 static void *async_client_context
;
4216 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4217 so we notice when any child changes state, and notify the
4218 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4219 above to wait for the arrival of a SIGCHLD. */
4222 sigchld_handler (int signo
)
4224 int old_errno
= errno
;
4226 if (debug_linux_nat_async
)
4227 fprintf_unfiltered (gdb_stdlog
, "sigchld\n");
4229 if (signo
== SIGCHLD
4230 && linux_nat_event_pipe
[0] != -1)
4231 async_file_mark (); /* Let the event loop know that there are
4232 events to handle. */
4237 /* Callback registered with the target events file descriptor. */
4240 handle_target_event (int error
, gdb_client_data client_data
)
4242 (*async_client_callback
) (INF_REG_EVENT
, async_client_context
);
4245 /* Create/destroy the target events pipe. Returns previous state. */
4248 linux_async_pipe (int enable
)
4250 int previous
= (linux_nat_event_pipe
[0] != -1);
4252 if (previous
!= enable
)
4256 block_child_signals (&prev_mask
);
4260 if (pipe (linux_nat_event_pipe
) == -1)
4261 internal_error (__FILE__
, __LINE__
,
4262 "creating event pipe failed.");
4264 fcntl (linux_nat_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
4265 fcntl (linux_nat_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
4269 close (linux_nat_event_pipe
[0]);
4270 close (linux_nat_event_pipe
[1]);
4271 linux_nat_event_pipe
[0] = -1;
4272 linux_nat_event_pipe
[1] = -1;
4275 restore_child_signals_mask (&prev_mask
);
4281 /* target_async implementation. */
4284 linux_nat_async (void (*callback
) (enum inferior_event_type event_type
,
4285 void *context
), void *context
)
4287 if (linux_nat_async_mask_value
== 0 || !target_async_permitted
)
4288 internal_error (__FILE__
, __LINE__
,
4289 "Calling target_async when async is masked");
4291 if (callback
!= NULL
)
4293 async_client_callback
= callback
;
4294 async_client_context
= context
;
4295 if (!linux_async_pipe (1))
4297 add_file_handler (linux_nat_event_pipe
[0],
4298 handle_target_event
, NULL
);
4299 /* There may be pending events to handle. Tell the event loop
4306 async_client_callback
= callback
;
4307 async_client_context
= context
;
4308 delete_file_handler (linux_nat_event_pipe
[0]);
4309 linux_async_pipe (0);
4314 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
4318 linux_nat_stop_lwp (struct lwp_info
*lwp
, void *data
)
4320 ptid_t ptid
= * (ptid_t
*) data
;
4322 if (ptid_equal (lwp
->ptid
, ptid
)
4323 || ptid_equal (minus_one_ptid
, ptid
)
4324 || (ptid_is_pid (ptid
)
4325 && ptid_get_pid (ptid
) == ptid_get_pid (lwp
->ptid
)))
4330 ptid_t ptid
= lwp
->ptid
;
4332 if (debug_linux_nat
)
4333 fprintf_unfiltered (gdb_stdlog
,
4334 "LNSL: running -> suspending %s\n",
4335 target_pid_to_str (lwp
->ptid
));
4338 stop_callback (lwp
, NULL
);
4339 stop_wait_callback (lwp
, NULL
);
4341 /* If the lwp exits while we try to stop it, there's nothing
4343 lwp
= find_lwp_pid (ptid
);
4347 /* If we didn't collect any signal other than SIGSTOP while
4348 stopping the LWP, push a SIGNAL_0 event. In either case,
4349 the event-loop will end up calling target_wait which will
4351 if (lwp
->status
== 0)
4352 lwp
->status
= W_STOPCODE (0);
4357 /* Already known to be stopped; do nothing. */
4359 if (debug_linux_nat
)
4361 if (find_thread_pid (lwp
->ptid
)->stop_requested
)
4362 fprintf_unfiltered (gdb_stdlog
, "\
4363 LNSL: already stopped/stop_requested %s\n",
4364 target_pid_to_str (lwp
->ptid
));
4366 fprintf_unfiltered (gdb_stdlog
, "\
4367 LNSL: already stopped/no stop_requested yet %s\n",
4368 target_pid_to_str (lwp
->ptid
));
4376 linux_nat_stop (ptid_t ptid
)
4379 iterate_over_lwps (linux_nat_stop_lwp
, &ptid
);
4381 linux_ops
->to_stop (ptid
);
4385 linux_nat_add_target (struct target_ops
*t
)
4387 /* Save the provided single-threaded target. We save this in a separate
4388 variable because another target we've inherited from (e.g. inf-ptrace)
4389 may have saved a pointer to T; we want to use it for the final
4390 process stratum target. */
4391 linux_ops_saved
= *t
;
4392 linux_ops
= &linux_ops_saved
;
4394 /* Override some methods for multithreading. */
4395 t
->to_create_inferior
= linux_nat_create_inferior
;
4396 t
->to_attach
= linux_nat_attach
;
4397 t
->to_detach
= linux_nat_detach
;
4398 t
->to_resume
= linux_nat_resume
;
4399 t
->to_wait
= linux_nat_wait
;
4400 t
->to_xfer_partial
= linux_nat_xfer_partial
;
4401 t
->to_kill
= linux_nat_kill
;
4402 t
->to_mourn_inferior
= linux_nat_mourn_inferior
;
4403 t
->to_thread_alive
= linux_nat_thread_alive
;
4404 t
->to_pid_to_str
= linux_nat_pid_to_str
;
4405 t
->to_has_thread_control
= tc_schedlock
;
4407 t
->to_can_async_p
= linux_nat_can_async_p
;
4408 t
->to_is_async_p
= linux_nat_is_async_p
;
4409 t
->to_supports_non_stop
= linux_nat_supports_non_stop
;
4410 t
->to_async
= linux_nat_async
;
4411 t
->to_async_mask
= linux_nat_async_mask
;
4412 t
->to_terminal_inferior
= linux_nat_terminal_inferior
;
4413 t
->to_terminal_ours
= linux_nat_terminal_ours
;
4415 /* Methods for non-stop support. */
4416 t
->to_stop
= linux_nat_stop
;
4418 /* We don't change the stratum; this target will sit at
4419 process_stratum and thread_db will set at thread_stratum. This
4420 is a little strange, since this is a multi-threaded-capable
4421 target, but we want to be on the stack below thread_db, and we
4422 also want to be used for single-threaded processes. */
4427 /* Register a method to call whenever a new thread is attached. */
4429 linux_nat_set_new_thread (struct target_ops
*t
, void (*new_thread
) (ptid_t
))
4431 /* Save the pointer. We only support a single registered instance
4432 of the GNU/Linux native target, so we do not need to map this to
4434 linux_nat_new_thread
= new_thread
;
4437 /* Register a method that converts a siginfo object between the layout
4438 that ptrace returns, and the layout in the architecture of the
4441 linux_nat_set_siginfo_fixup (struct target_ops
*t
,
4442 int (*siginfo_fixup
) (struct siginfo
*,
4446 /* Save the pointer. */
4447 linux_nat_siginfo_fixup
= siginfo_fixup
;
4450 /* Return the saved siginfo associated with PTID. */
4452 linux_nat_get_siginfo (ptid_t ptid
)
4454 struct lwp_info
*lp
= find_lwp_pid (ptid
);
4456 gdb_assert (lp
!= NULL
);
4458 return &lp
->siginfo
;
4461 /* Provide a prototype to silence -Wmissing-prototypes. */
4462 extern initialize_file_ftype _initialize_linux_nat
;
4465 _initialize_linux_nat (void)
4469 add_info ("proc", linux_nat_info_proc_cmd
, _("\
4470 Show /proc process information about any running process.\n\
4471 Specify any process id, or use the program being debugged by default.\n\
4472 Specify any of the following keywords for detailed info:\n\
4473 mappings -- list of mapped memory regions.\n\
4474 stat -- list a bunch of random process info.\n\
4475 status -- list a different bunch of random process info.\n\
4476 all -- list all available /proc info."));
4478 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance
,
4479 &debug_linux_nat
, _("\
4480 Set debugging of GNU/Linux lwp module."), _("\
4481 Show debugging of GNU/Linux lwp module."), _("\
4482 Enables printf debugging output."),
4484 show_debug_linux_nat
,
4485 &setdebuglist
, &showdebuglist
);
4487 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance
,
4488 &debug_linux_nat_async
, _("\
4489 Set debugging of GNU/Linux async lwp module."), _("\
4490 Show debugging of GNU/Linux async lwp module."), _("\
4491 Enables printf debugging output."),
4493 show_debug_linux_nat_async
,
4494 &setdebuglist
, &showdebuglist
);
4496 /* Save this mask as the default. */
4497 sigprocmask (SIG_SETMASK
, NULL
, &normal_mask
);
4499 /* Install a SIGCHLD handler. */
4500 sigchld_action
.sa_handler
= sigchld_handler
;
4501 sigemptyset (&sigchld_action
.sa_mask
);
4502 sigchld_action
.sa_flags
= SA_RESTART
;
4504 /* Make it the default. */
4505 sigaction (SIGCHLD
, &sigchld_action
, NULL
);
4507 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4508 sigprocmask (SIG_SETMASK
, NULL
, &suspend_mask
);
4509 sigdelset (&suspend_mask
, SIGCHLD
);
4511 sigemptyset (&blocked_mask
);
4513 add_setshow_boolean_cmd ("disable-randomization", class_support
,
4514 &disable_randomization
, _("\
4515 Set disabling of debuggee's virtual address space randomization."), _("\
4516 Show disabling of debuggee's virtual address space randomization."), _("\
4517 When this mode is on (which is the default), randomization of the virtual\n\
4518 address space is disabled. Standalone programs run with the randomization\n\
4519 enabled by default on some platforms."),
4520 &set_disable_randomization
,
4521 &show_disable_randomization
,
4522 &setlist
, &showlist
);
4526 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4527 the GNU/Linux Threads library and therefore doesn't really belong
4530 /* Read variable NAME in the target and return its value if found.
4531 Otherwise return zero. It is assumed that the type of the variable
4535 get_signo (const char *name
)
4537 struct minimal_symbol
*ms
;
4540 ms
= lookup_minimal_symbol (name
, NULL
, NULL
);
4544 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms
), (gdb_byte
*) &signo
,
4545 sizeof (signo
)) != 0)
4551 /* Return the set of signals used by the threads library in *SET. */
4554 lin_thread_get_thread_signals (sigset_t
*set
)
4556 struct sigaction action
;
4557 int restart
, cancel
;
4559 sigemptyset (&blocked_mask
);
4562 restart
= get_signo ("__pthread_sig_restart");
4563 cancel
= get_signo ("__pthread_sig_cancel");
4565 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4566 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4567 not provide any way for the debugger to query the signal numbers -
4568 fortunately they don't change! */
4571 restart
= __SIGRTMIN
;
4574 cancel
= __SIGRTMIN
+ 1;
4576 sigaddset (set
, restart
);
4577 sigaddset (set
, cancel
);
4579 /* The GNU/Linux Threads library makes terminating threads send a
4580 special "cancel" signal instead of SIGCHLD. Make sure we catch
4581 those (to prevent them from terminating GDB itself, which is
4582 likely to be their default action) and treat them the same way as
4585 action
.sa_handler
= sigchld_handler
;
4586 sigemptyset (&action
.sa_mask
);
4587 action
.sa_flags
= SA_RESTART
;
4588 sigaction (cancel
, &action
, NULL
);
4590 /* We block the "cancel" signal throughout this code ... */
4591 sigaddset (&blocked_mask
, cancel
);
4592 sigprocmask (SIG_BLOCK
, &blocked_mask
, NULL
);
4594 /* ... except during a sigsuspend. */
4595 sigdelset (&suspend_mask
, cancel
);