1 /* GNU/Linux native-dependent code common to multiple platforms.
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
6 This file is part of GDB.
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdb_string.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
29 #include <sys/syscall.h>
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
38 #include "inf-ptrace.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
50 #include "event-loop.h"
51 #include "event-top.h"
53 #include <sys/types.h>
54 #include "gdb_dirent.h"
55 #include "xml-support.h"
57 #ifdef HAVE_PERSONALITY
58 # include <sys/personality.h>
59 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
60 # define ADDR_NO_RANDOMIZE 0x0040000
62 #endif /* HAVE_PERSONALITY */
64 /* This comment documents high-level logic of this file.
66 Waiting for events in sync mode
67 ===============================
69 When waiting for an event in a specific thread, we just use waitpid, passing
70 the specific pid, and not passing WNOHANG.
72 When waiting for an event in all threads, waitpid is not quite good. Prior to
73 version 2.4, Linux can either wait for event in main thread, or in secondary
74 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
75 miss an event. The solution is to use non-blocking waitpid, together with
76 sigsuspend. First, we use non-blocking waitpid to get an event in the main
77 process, if any. Second, we use non-blocking waitpid with the __WCLONED
78 flag to check for events in cloned processes. If nothing is found, we use
79 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
80 happened to a child process -- and SIGCHLD will be delivered both for events
81 in main debugged process and in cloned processes. As soon as we know there's
82 an event, we get back to calling nonblocking waitpid with and without __WCLONED.
84 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
85 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
86 blocked, the signal becomes pending and sigsuspend immediately
87 notices it and returns.
89 Waiting for events in async mode
90 ================================
92 In async mode, GDB should always be ready to handle both user input and target
93 events, so neither blocking waitpid nor sigsuspend are viable
94 options. Instead, we should notify the GDB main event loop whenever there's
95 unprocessed event from the target. The only way to notify this event loop is
96 to make it wait on input from a pipe, and write something to the pipe whenever
97 there's event. Obviously, if we fail to notify the event loop if there's
98 target event, it's bad. If we notify the event loop when there's no event
99 from target, linux-nat.c will detect that there's no event, actually, and
100 report event of type TARGET_WAITKIND_IGNORE, but it will waste time and
103 The main design point is that every time GDB is outside linux-nat.c, we have a
104 SIGCHLD handler installed that is called when something happens to the target
105 and notifies the GDB event loop. Also, the event is extracted from the target
106 using waitpid and stored for future use. Whenever GDB core decides to handle
107 the event, and calls into linux-nat.c, we disable SIGCHLD and process things
108 as in sync mode, except that before waitpid call we check if there are any
109 previously read events.
111 It could happen that during event processing, we'll try to get more events
112 than there are events in the local queue, which will result to waitpid call.
113 Those waitpid calls, while blocking, are guarantied to always have
114 something for waitpid to return. E.g., stopping a thread with SIGSTOP, and
115 waiting for the lwp to stop.
117 The event loop is notified about new events using a pipe. SIGCHLD handler does
118 waitpid and writes the results in to a pipe. GDB event loop has the other end
119 of the pipe among the sources. When event loop starts to process the event
120 and calls a function in linux-nat.c, all events from the pipe are transferred
121 into a local queue and SIGCHLD is blocked. Further processing goes as in sync
122 mode. Before we return from linux_nat_wait, we transfer all unprocessed events
123 from local queue back to the pipe, so that when we get back to event loop,
124 event loop will notice there's something more to do.
126 SIGCHLD is blocked when we're inside target_wait, so that should we actually
127 want to wait for some more events, SIGCHLD handler does not steal them from
128 us. Technically, it would be possible to add new events to the local queue but
129 it's about the same amount of work as blocking SIGCHLD.
131 This moving of events from pipe into local queue and back into pipe when we
132 enter/leave linux-nat.c is somewhat ugly. Unfortunately, GDB event loop is
133 home-grown and incapable to wait on any queue.
138 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
139 signal is not entirely significant; we just need for a signal to be delivered,
140 so that we can intercept it. SIGSTOP's advantage is that it can not be
141 blocked. A disadvantage is that it is not a real-time signal, so it can only
142 be queued once; we do not keep track of other sources of SIGSTOP.
144 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
145 use them, because they have special behavior when the signal is generated -
146 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
147 kills the entire thread group.
149 A delivered SIGSTOP would stop the entire thread group, not just the thread we
150 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
151 cancel it (by PTRACE_CONT without passing SIGSTOP).
153 We could use a real-time signal instead. This would solve those problems; we
154 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
155 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
156 generates it, and there are races with trying to find a signal that is not
160 #define O_LARGEFILE 0
163 /* If the system headers did not provide the constants, hard-code the normal
165 #ifndef PTRACE_EVENT_FORK
167 #define PTRACE_SETOPTIONS 0x4200
168 #define PTRACE_GETEVENTMSG 0x4201
170 /* options set using PTRACE_SETOPTIONS */
171 #define PTRACE_O_TRACESYSGOOD 0x00000001
172 #define PTRACE_O_TRACEFORK 0x00000002
173 #define PTRACE_O_TRACEVFORK 0x00000004
174 #define PTRACE_O_TRACECLONE 0x00000008
175 #define PTRACE_O_TRACEEXEC 0x00000010
176 #define PTRACE_O_TRACEVFORKDONE 0x00000020
177 #define PTRACE_O_TRACEEXIT 0x00000040
179 /* Wait extended result codes for the above trace options. */
180 #define PTRACE_EVENT_FORK 1
181 #define PTRACE_EVENT_VFORK 2
182 #define PTRACE_EVENT_CLONE 3
183 #define PTRACE_EVENT_EXEC 4
184 #define PTRACE_EVENT_VFORK_DONE 5
185 #define PTRACE_EVENT_EXIT 6
187 #endif /* PTRACE_EVENT_FORK */
189 /* We can't always assume that this flag is available, but all systems
190 with the ptrace event handlers also have __WALL, so it's safe to use
193 #define __WALL 0x40000000 /* Wait for any child. */
196 #ifndef PTRACE_GETSIGINFO
197 #define PTRACE_GETSIGINFO 0x4202
200 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
201 the use of the multi-threaded target. */
202 static struct target_ops
*linux_ops
;
203 static struct target_ops linux_ops_saved
;
205 /* The method to call, if any, when a new thread is attached. */
206 static void (*linux_nat_new_thread
) (ptid_t
);
208 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
209 Called by our to_xfer_partial. */
210 static LONGEST (*super_xfer_partial
) (struct target_ops
*,
212 const char *, gdb_byte
*,
216 static int debug_linux_nat
;
218 show_debug_linux_nat (struct ui_file
*file
, int from_tty
,
219 struct cmd_list_element
*c
, const char *value
)
221 fprintf_filtered (file
, _("Debugging of GNU/Linux lwp module is %s.\n"),
225 static int debug_linux_nat_async
= 0;
227 show_debug_linux_nat_async (struct ui_file
*file
, int from_tty
,
228 struct cmd_list_element
*c
, const char *value
)
230 fprintf_filtered (file
, _("Debugging of GNU/Linux async lwp module is %s.\n"),
234 static int disable_randomization
= 1;
237 show_disable_randomization (struct ui_file
*file
, int from_tty
,
238 struct cmd_list_element
*c
, const char *value
)
240 #ifdef HAVE_PERSONALITY
241 fprintf_filtered (file
, _("\
242 Disabling randomization of debuggee's virtual address space is %s.\n"),
244 #else /* !HAVE_PERSONALITY */
246 Disabling randomization of debuggee's virtual address space is unsupported on\n\
247 this platform.\n"), file
);
248 #endif /* !HAVE_PERSONALITY */
252 set_disable_randomization (char *args
, int from_tty
, struct cmd_list_element
*c
)
254 #ifndef HAVE_PERSONALITY
256 Disabling randomization of debuggee's virtual address space is unsupported on\n\
258 #endif /* !HAVE_PERSONALITY */
261 static int linux_parent_pid
;
263 struct simple_pid_list
267 struct simple_pid_list
*next
;
269 struct simple_pid_list
*stopped_pids
;
271 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
272 can not be used, 1 if it can. */
274 static int linux_supports_tracefork_flag
= -1;
276 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
277 PTRACE_O_TRACEVFORKDONE. */
279 static int linux_supports_tracevforkdone_flag
= -1;
281 /* Async mode support */
283 /* Zero if the async mode, although enabled, is masked, which means
284 linux_nat_wait should behave as if async mode was off. */
285 static int linux_nat_async_mask_value
= 1;
287 /* The read/write ends of the pipe registered as waitable file in the
289 static int linux_nat_event_pipe
[2] = { -1, -1 };
291 /* Number of queued events in the pipe. */
292 static volatile int linux_nat_num_queued_events
;
294 /* The possible SIGCHLD handling states. */
298 /* SIGCHLD disabled, with action set to sigchld_handler, for the
299 sigsuspend in linux_nat_wait. */
301 /* SIGCHLD enabled, with action set to async_sigchld_handler. */
303 /* Set SIGCHLD to default action. Used while creating an
308 /* The current SIGCHLD handling state. */
309 static enum sigchld_state linux_nat_async_events_state
;
311 static enum sigchld_state
linux_nat_async_events (enum sigchld_state enable
);
312 static void pipe_to_local_event_queue (void);
313 static void local_event_queue_to_pipe (void);
314 static void linux_nat_event_pipe_push (int pid
, int status
, int options
);
315 static int linux_nat_event_pipe_pop (int* ptr_status
, int* ptr_options
);
316 static void linux_nat_set_async_mode (int on
);
317 static void linux_nat_async (void (*callback
)
318 (enum inferior_event_type event_type
, void *context
),
320 static int linux_nat_async_mask (int mask
);
321 static int kill_lwp (int lwpid
, int signo
);
323 static int stop_callback (struct lwp_info
*lp
, void *data
);
325 /* Captures the result of a successful waitpid call, along with the
326 options used in that call. */
327 struct waitpid_result
332 struct waitpid_result
*next
;
335 /* A singly-linked list of the results of the waitpid calls performed
336 in the async SIGCHLD handler. */
337 static struct waitpid_result
*waitpid_queue
= NULL
;
339 /* Similarly to `waitpid', but check the local event queue instead of
340 querying the kernel queue. If PEEK, don't remove the event found
344 queued_waitpid_1 (int pid
, int *status
, int flags
, int peek
)
346 struct waitpid_result
*msg
= waitpid_queue
, *prev
= NULL
;
348 if (debug_linux_nat_async
)
349 fprintf_unfiltered (gdb_stdlog
,
351 QWPID: linux_nat_async_events_state(%d), linux_nat_num_queued_events(%d)\n",
352 linux_nat_async_events_state
,
353 linux_nat_num_queued_events
);
357 for (; msg
; prev
= msg
, msg
= msg
->next
)
358 if (pid
== -1 || pid
== msg
->pid
)
361 else if (flags
& __WCLONE
)
363 for (; msg
; prev
= msg
, msg
= msg
->next
)
364 if (msg
->options
& __WCLONE
365 && (pid
== -1 || pid
== msg
->pid
))
370 for (; msg
; prev
= msg
, msg
= msg
->next
)
371 if ((msg
->options
& __WCLONE
) == 0
372 && (pid
== -1 || pid
== msg
->pid
))
381 *status
= msg
->status
;
384 if (debug_linux_nat_async
)
385 fprintf_unfiltered (gdb_stdlog
, "QWPID: pid(%d), status(%x)\n",
391 prev
->next
= msg
->next
;
393 waitpid_queue
= msg
->next
;
402 if (debug_linux_nat_async
)
403 fprintf_unfiltered (gdb_stdlog
, "QWPID: miss\n");
410 /* Similarly to `waitpid', but check the local event queue. */
413 queued_waitpid (int pid
, int *status
, int flags
)
415 return queued_waitpid_1 (pid
, status
, flags
, 0);
419 push_waitpid (int pid
, int status
, int options
)
421 struct waitpid_result
*event
, *new_event
;
423 new_event
= xmalloc (sizeof (*new_event
));
424 new_event
->pid
= pid
;
425 new_event
->status
= status
;
426 new_event
->options
= options
;
427 new_event
->next
= NULL
;
431 for (event
= waitpid_queue
;
432 event
&& event
->next
;
436 event
->next
= new_event
;
439 waitpid_queue
= new_event
;
442 /* Drain all queued events of PID. If PID is -1, the effect is of
443 draining all events. */
445 drain_queued_events (int pid
)
447 while (queued_waitpid (pid
, NULL
, __WALL
) != -1)
452 /* Trivial list manipulation functions to keep track of a list of
453 new stopped processes. */
455 add_to_pid_list (struct simple_pid_list
**listp
, int pid
, int status
)
457 struct simple_pid_list
*new_pid
= xmalloc (sizeof (struct simple_pid_list
));
459 new_pid
->status
= status
;
460 new_pid
->next
= *listp
;
465 pull_pid_from_list (struct simple_pid_list
**listp
, int pid
, int *status
)
467 struct simple_pid_list
**p
;
469 for (p
= listp
; *p
!= NULL
; p
= &(*p
)->next
)
470 if ((*p
)->pid
== pid
)
472 struct simple_pid_list
*next
= (*p
)->next
;
473 *status
= (*p
)->status
;
482 linux_record_stopped_pid (int pid
, int status
)
484 add_to_pid_list (&stopped_pids
, pid
, status
);
488 /* A helper function for linux_test_for_tracefork, called after fork (). */
491 linux_tracefork_child (void)
495 ptrace (PTRACE_TRACEME
, 0, 0, 0);
496 kill (getpid (), SIGSTOP
);
501 /* Wrapper function for waitpid which handles EINTR, and checks for
502 locally queued events. */
505 my_waitpid (int pid
, int *status
, int flags
)
509 /* There should be no concurrent calls to waitpid. */
510 gdb_assert (linux_nat_async_events_state
== sigchld_sync
);
512 ret
= queued_waitpid (pid
, status
, flags
);
518 ret
= waitpid (pid
, status
, flags
);
520 while (ret
== -1 && errno
== EINTR
);
525 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
527 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
528 we know that the feature is not available. This may change the tracing
529 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
531 However, if it succeeds, we don't know for sure that the feature is
532 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
533 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
534 fork tracing, and let it fork. If the process exits, we assume that we
535 can't use TRACEFORK; if we get the fork notification, and we can extract
536 the new child's PID, then we assume that we can. */
539 linux_test_for_tracefork (int original_pid
)
541 int child_pid
, ret
, status
;
543 enum sigchld_state async_events_original_state
;
545 async_events_original_state
= linux_nat_async_events (sigchld_sync
);
547 linux_supports_tracefork_flag
= 0;
548 linux_supports_tracevforkdone_flag
= 0;
550 ret
= ptrace (PTRACE_SETOPTIONS
, original_pid
, 0, PTRACE_O_TRACEFORK
);
556 perror_with_name (("fork"));
559 linux_tracefork_child ();
561 ret
= my_waitpid (child_pid
, &status
, 0);
563 perror_with_name (("waitpid"));
564 else if (ret
!= child_pid
)
565 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret
);
566 if (! WIFSTOPPED (status
))
567 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status
);
569 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0, PTRACE_O_TRACEFORK
);
572 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
575 warning (_("linux_test_for_tracefork: failed to kill child"));
576 linux_nat_async_events (async_events_original_state
);
580 ret
= my_waitpid (child_pid
, &status
, 0);
581 if (ret
!= child_pid
)
582 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
583 else if (!WIFSIGNALED (status
))
584 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
585 "killed child"), status
);
587 linux_nat_async_events (async_events_original_state
);
591 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
592 ret
= ptrace (PTRACE_SETOPTIONS
, child_pid
, 0,
593 PTRACE_O_TRACEFORK
| PTRACE_O_TRACEVFORKDONE
);
594 linux_supports_tracevforkdone_flag
= (ret
== 0);
596 ret
= ptrace (PTRACE_CONT
, child_pid
, 0, 0);
598 warning (_("linux_test_for_tracefork: failed to resume child"));
600 ret
= my_waitpid (child_pid
, &status
, 0);
602 if (ret
== child_pid
&& WIFSTOPPED (status
)
603 && status
>> 16 == PTRACE_EVENT_FORK
)
606 ret
= ptrace (PTRACE_GETEVENTMSG
, child_pid
, 0, &second_pid
);
607 if (ret
== 0 && second_pid
!= 0)
611 linux_supports_tracefork_flag
= 1;
612 my_waitpid (second_pid
, &second_status
, 0);
613 ret
= ptrace (PTRACE_KILL
, second_pid
, 0, 0);
615 warning (_("linux_test_for_tracefork: failed to kill second child"));
616 my_waitpid (second_pid
, &status
, 0);
620 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
621 "(%d, status 0x%x)"), ret
, status
);
623 ret
= ptrace (PTRACE_KILL
, child_pid
, 0, 0);
625 warning (_("linux_test_for_tracefork: failed to kill child"));
626 my_waitpid (child_pid
, &status
, 0);
628 linux_nat_async_events (async_events_original_state
);
631 /* Return non-zero iff we have tracefork functionality available.
632 This function also sets linux_supports_tracefork_flag. */
635 linux_supports_tracefork (int pid
)
637 if (linux_supports_tracefork_flag
== -1)
638 linux_test_for_tracefork (pid
);
639 return linux_supports_tracefork_flag
;
643 linux_supports_tracevforkdone (int pid
)
645 if (linux_supports_tracefork_flag
== -1)
646 linux_test_for_tracefork (pid
);
647 return linux_supports_tracevforkdone_flag
;
652 linux_enable_event_reporting (ptid_t ptid
)
654 int pid
= ptid_get_lwp (ptid
);
658 pid
= ptid_get_pid (ptid
);
660 if (! linux_supports_tracefork (pid
))
663 options
= PTRACE_O_TRACEFORK
| PTRACE_O_TRACEVFORK
| PTRACE_O_TRACEEXEC
664 | PTRACE_O_TRACECLONE
;
665 if (linux_supports_tracevforkdone (pid
))
666 options
|= PTRACE_O_TRACEVFORKDONE
;
668 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
669 read-only process state. */
671 ptrace (PTRACE_SETOPTIONS
, pid
, 0, options
);
675 linux_child_post_attach (int pid
)
677 linux_enable_event_reporting (pid_to_ptid (pid
));
678 check_for_thread_db ();
682 linux_child_post_startup_inferior (ptid_t ptid
)
684 linux_enable_event_reporting (ptid
);
685 check_for_thread_db ();
689 linux_child_follow_fork (struct target_ops
*ops
, int follow_child
)
692 struct target_waitstatus last_status
;
694 int parent_pid
, child_pid
;
696 if (target_can_async_p ())
697 target_async (NULL
, 0);
699 get_last_target_status (&last_ptid
, &last_status
);
700 has_vforked
= (last_status
.kind
== TARGET_WAITKIND_VFORKED
);
701 parent_pid
= ptid_get_lwp (last_ptid
);
703 parent_pid
= ptid_get_pid (last_ptid
);
704 child_pid
= PIDGET (last_status
.value
.related_pid
);
708 /* We're already attached to the parent, by default. */
710 /* Before detaching from the child, remove all breakpoints from
711 it. (This won't actually modify the breakpoint list, but will
712 physically remove the breakpoints from the child.) */
713 /* If we vforked this will remove the breakpoints from the parent
714 also, but they'll be reinserted below. */
715 detach_breakpoints (child_pid
);
717 /* Detach new forked process? */
720 if (info_verbose
|| debug_linux_nat
)
722 target_terminal_ours ();
723 fprintf_filtered (gdb_stdlog
,
724 "Detaching after fork from child process %d.\n",
728 ptrace (PTRACE_DETACH
, child_pid
, 0, 0);
732 struct fork_info
*fp
;
734 /* Add process to GDB's tables. */
735 add_inferior (child_pid
);
737 /* Retain child fork in ptrace (stopped) state. */
738 fp
= find_fork_pid (child_pid
);
740 fp
= add_fork (child_pid
);
741 fork_save_infrun_state (fp
, 0);
746 gdb_assert (linux_supports_tracefork_flag
>= 0);
747 if (linux_supports_tracevforkdone (0))
751 ptrace (PTRACE_CONT
, parent_pid
, 0, 0);
752 my_waitpid (parent_pid
, &status
, __WALL
);
753 if ((status
>> 16) != PTRACE_EVENT_VFORK_DONE
)
754 warning (_("Unexpected waitpid result %06x when waiting for "
755 "vfork-done"), status
);
759 /* We can't insert breakpoints until the child has
760 finished with the shared memory region. We need to
761 wait until that happens. Ideal would be to just
763 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
764 - waitpid (parent_pid, &status, __WALL);
765 However, most architectures can't handle a syscall
766 being traced on the way out if it wasn't traced on
769 We might also think to loop, continuing the child
770 until it exits or gets a SIGTRAP. One problem is
771 that the child might call ptrace with PTRACE_TRACEME.
773 There's no simple and reliable way to figure out when
774 the vforked child will be done with its copy of the
775 shared memory. We could step it out of the syscall,
776 two instructions, let it go, and then single-step the
777 parent once. When we have hardware single-step, this
778 would work; with software single-step it could still
779 be made to work but we'd have to be able to insert
780 single-step breakpoints in the child, and we'd have
781 to insert -just- the single-step breakpoint in the
782 parent. Very awkward.
784 In the end, the best we can do is to make sure it
785 runs for a little while. Hopefully it will be out of
786 range of any breakpoints we reinsert. Usually this
787 is only the single-step breakpoint at vfork's return
793 /* Since we vforked, breakpoints were removed in the parent
794 too. Put them back. */
795 reattach_breakpoints (parent_pid
);
800 struct thread_info
*last_tp
= find_thread_pid (last_ptid
);
801 struct thread_info
*tp
;
802 char child_pid_spelling
[40];
804 /* Copy user stepping state to the new inferior thread. */
805 struct breakpoint
*step_resume_breakpoint
= last_tp
->step_resume_breakpoint
;
806 CORE_ADDR step_range_start
= last_tp
->step_range_start
;
807 CORE_ADDR step_range_end
= last_tp
->step_range_end
;
808 struct frame_id step_frame_id
= last_tp
->step_frame_id
;
810 /* Otherwise, deleting the parent would get rid of this
812 last_tp
->step_resume_breakpoint
= NULL
;
814 /* Needed to keep the breakpoint lists in sync. */
816 detach_breakpoints (child_pid
);
818 /* Before detaching from the parent, remove all breakpoints from it. */
819 remove_breakpoints ();
821 if (info_verbose
|| debug_linux_nat
)
823 target_terminal_ours ();
824 fprintf_filtered (gdb_stdlog
,
825 "Attaching after fork to child process %d.\n",
829 /* If we're vforking, we may want to hold on to the parent until
830 the child exits or execs. At exec time we can remove the old
831 breakpoints from the parent and detach it; at exit time we
832 could do the same (or even, sneakily, resume debugging it - the
833 child's exec has failed, or something similar).
835 This doesn't clean up "properly", because we can't call
836 target_detach, but that's OK; if the current target is "child",
837 then it doesn't need any further cleanups, and lin_lwp will
838 generally not encounter vfork (vfork is defined to fork
841 The holding part is very easy if we have VFORKDONE events;
842 but keeping track of both processes is beyond GDB at the
843 moment. So we don't expose the parent to the rest of GDB.
844 Instead we quietly hold onto it until such time as we can
849 linux_parent_pid
= parent_pid
;
850 detach_inferior (parent_pid
);
852 else if (!detach_fork
)
854 struct fork_info
*fp
;
855 /* Retain parent fork in ptrace (stopped) state. */
856 fp
= find_fork_pid (parent_pid
);
858 fp
= add_fork (parent_pid
);
859 fork_save_infrun_state (fp
, 0);
862 target_detach (NULL
, 0);
864 inferior_ptid
= ptid_build (child_pid
, child_pid
, 0);
865 add_inferior (child_pid
);
867 /* Reinstall ourselves, since we might have been removed in
868 target_detach (which does other necessary cleanup). */
871 linux_nat_switch_fork (inferior_ptid
);
872 check_for_thread_db ();
874 tp
= inferior_thread ();
875 tp
->step_resume_breakpoint
= step_resume_breakpoint
;
876 tp
->step_range_start
= step_range_start
;
877 tp
->step_range_end
= step_range_end
;
878 tp
->step_frame_id
= step_frame_id
;
880 /* Reset breakpoints in the child as appropriate. */
881 follow_inferior_reset_breakpoints ();
884 if (target_can_async_p ())
885 target_async (inferior_event_handler
, 0);
892 linux_child_insert_fork_catchpoint (int pid
)
894 if (! linux_supports_tracefork (pid
))
895 error (_("Your system does not support fork catchpoints."));
899 linux_child_insert_vfork_catchpoint (int pid
)
901 if (!linux_supports_tracefork (pid
))
902 error (_("Your system does not support vfork catchpoints."));
906 linux_child_insert_exec_catchpoint (int pid
)
908 if (!linux_supports_tracefork (pid
))
909 error (_("Your system does not support exec catchpoints."));
912 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
913 are processes sharing the same VM space. A multi-threaded process
914 is basically a group of such processes. However, such a grouping
915 is almost entirely a user-space issue; the kernel doesn't enforce
916 such a grouping at all (this might change in the future). In
917 general, we'll rely on the threads library (i.e. the GNU/Linux
918 Threads library) to provide such a grouping.
920 It is perfectly well possible to write a multi-threaded application
921 without the assistance of a threads library, by using the clone
922 system call directly. This module should be able to give some
923 rudimentary support for debugging such applications if developers
924 specify the CLONE_PTRACE flag in the clone system call, and are
925 using the Linux kernel 2.4 or above.
927 Note that there are some peculiarities in GNU/Linux that affect
930 - In general one should specify the __WCLONE flag to waitpid in
931 order to make it report events for any of the cloned processes
932 (and leave it out for the initial process). However, if a cloned
933 process has exited the exit status is only reported if the
934 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
935 we cannot use it since GDB must work on older systems too.
937 - When a traced, cloned process exits and is waited for by the
938 debugger, the kernel reassigns it to the original parent and
939 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
940 library doesn't notice this, which leads to the "zombie problem":
941 When debugged a multi-threaded process that spawns a lot of
942 threads will run out of processes, even if the threads exit,
943 because the "zombies" stay around. */
945 /* List of known LWPs. */
946 struct lwp_info
*lwp_list
;
948 /* Number of LWPs in the list. */
952 /* Original signal mask. */
953 static sigset_t normal_mask
;
955 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
956 _initialize_linux_nat. */
957 static sigset_t suspend_mask
;
959 /* SIGCHLD action for synchronous mode. */
960 struct sigaction sync_sigchld_action
;
962 /* SIGCHLD action for asynchronous mode. */
963 static struct sigaction async_sigchld_action
;
965 /* SIGCHLD default action, to pass to new inferiors. */
966 static struct sigaction sigchld_default_action
;
969 /* Prototypes for local functions. */
970 static int stop_wait_callback (struct lwp_info
*lp
, void *data
);
971 static int linux_nat_thread_alive (ptid_t ptid
);
972 static char *linux_child_pid_to_exec_file (int pid
);
973 static int cancel_breakpoint (struct lwp_info
*lp
);
976 /* Convert wait status STATUS to a string. Used for printing debug
980 status_to_str (int status
)
984 if (WIFSTOPPED (status
))
985 snprintf (buf
, sizeof (buf
), "%s (stopped)",
986 strsignal (WSTOPSIG (status
)));
987 else if (WIFSIGNALED (status
))
988 snprintf (buf
, sizeof (buf
), "%s (terminated)",
989 strsignal (WSTOPSIG (status
)));
991 snprintf (buf
, sizeof (buf
), "%d (exited)", WEXITSTATUS (status
));
996 /* Initialize the list of LWPs. Note that this module, contrary to
997 what GDB's generic threads layer does for its thread list,
998 re-initializes the LWP lists whenever we mourn or detach (which
999 doesn't involve mourning) the inferior. */
1002 init_lwp_list (void)
1004 struct lwp_info
*lp
, *lpnext
;
1006 for (lp
= lwp_list
; lp
; lp
= lpnext
)
1016 /* Add the LWP specified by PID to the list. Return a pointer to the
1017 structure describing the new LWP. The LWP should already be stopped
1018 (with an exception for the very first LWP). */
1020 static struct lwp_info
*
1021 add_lwp (ptid_t ptid
)
1023 struct lwp_info
*lp
;
1025 gdb_assert (is_lwp (ptid
));
1027 lp
= (struct lwp_info
*) xmalloc (sizeof (struct lwp_info
));
1029 memset (lp
, 0, sizeof (struct lwp_info
));
1031 lp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
1035 lp
->next
= lwp_list
;
1039 if (num_lwps
> 1 && linux_nat_new_thread
!= NULL
)
1040 linux_nat_new_thread (ptid
);
1045 /* Remove the LWP specified by PID from the list. */
1048 delete_lwp (ptid_t ptid
)
1050 struct lwp_info
*lp
, *lpprev
;
1054 for (lp
= lwp_list
; lp
; lpprev
= lp
, lp
= lp
->next
)
1055 if (ptid_equal (lp
->ptid
, ptid
))
1064 lpprev
->next
= lp
->next
;
1066 lwp_list
= lp
->next
;
1071 /* Return a pointer to the structure describing the LWP corresponding
1072 to PID. If no corresponding LWP could be found, return NULL. */
1074 static struct lwp_info
*
1075 find_lwp_pid (ptid_t ptid
)
1077 struct lwp_info
*lp
;
1081 lwp
= GET_LWP (ptid
);
1083 lwp
= GET_PID (ptid
);
1085 for (lp
= lwp_list
; lp
; lp
= lp
->next
)
1086 if (lwp
== GET_LWP (lp
->ptid
))
1092 /* Call CALLBACK with its second argument set to DATA for every LWP in
1093 the list. If CALLBACK returns 1 for a particular LWP, return a
1094 pointer to the structure describing that LWP immediately.
1095 Otherwise return NULL. */
1098 iterate_over_lwps (int (*callback
) (struct lwp_info
*, void *), void *data
)
1100 struct lwp_info
*lp
, *lpnext
;
1102 for (lp
= lwp_list
; lp
; lp
= lpnext
)
1105 if ((*callback
) (lp
, data
))
1112 /* Update our internal state when changing from one fork (checkpoint,
1113 et cetera) to another indicated by NEW_PTID. We can only switch
1114 single-threaded applications, so we only create one new LWP, and
1115 the previous list is discarded. */
1118 linux_nat_switch_fork (ptid_t new_ptid
)
1120 struct lwp_info
*lp
;
1123 lp
= add_lwp (new_ptid
);
1126 init_thread_list ();
1127 add_thread_silent (new_ptid
);
1130 /* Handle the exit of a single thread LP. */
1133 exit_lwp (struct lwp_info
*lp
)
1135 struct thread_info
*th
= find_thread_pid (lp
->ptid
);
1139 if (print_thread_events
)
1140 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp
->ptid
));
1142 delete_thread (lp
->ptid
);
1145 delete_lwp (lp
->ptid
);
1148 /* Detect `T (stopped)' in `/proc/PID/status'.
1149 Other states including `T (tracing stop)' are reported as false. */
1152 pid_is_stopped (pid_t pid
)
1158 snprintf (buf
, sizeof (buf
), "/proc/%d/status", (int) pid
);
1159 status_file
= fopen (buf
, "r");
1160 if (status_file
!= NULL
)
1164 while (fgets (buf
, sizeof (buf
), status_file
))
1166 if (strncmp (buf
, "State:", 6) == 0)
1172 if (have_state
&& strstr (buf
, "T (stopped)") != NULL
)
1174 fclose (status_file
);
1179 /* Wait for the LWP specified by LP, which we have just attached to.
1180 Returns a wait status for that LWP, to cache. */
1183 linux_nat_post_attach_wait (ptid_t ptid
, int first
, int *cloned
,
1186 pid_t new_pid
, pid
= GET_LWP (ptid
);
1189 if (pid_is_stopped (pid
))
1191 if (debug_linux_nat
)
1192 fprintf_unfiltered (gdb_stdlog
,
1193 "LNPAW: Attaching to a stopped process\n");
1195 /* The process is definitely stopped. It is in a job control
1196 stop, unless the kernel predates the TASK_STOPPED /
1197 TASK_TRACED distinction, in which case it might be in a
1198 ptrace stop. Make sure it is in a ptrace stop; from there we
1199 can kill it, signal it, et cetera.
1201 First make sure there is a pending SIGSTOP. Since we are
1202 already attached, the process can not transition from stopped
1203 to running without a PTRACE_CONT; so we know this signal will
1204 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1205 probably already in the queue (unless this kernel is old
1206 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1207 is not an RT signal, it can only be queued once. */
1208 kill_lwp (pid
, SIGSTOP
);
1210 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1211 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1212 ptrace (PTRACE_CONT
, pid
, 0, 0);
1215 /* Make sure the initial process is stopped. The user-level threads
1216 layer might want to poke around in the inferior, and that won't
1217 work if things haven't stabilized yet. */
1218 new_pid
= my_waitpid (pid
, &status
, 0);
1219 if (new_pid
== -1 && errno
== ECHILD
)
1222 warning (_("%s is a cloned process"), target_pid_to_str (ptid
));
1224 /* Try again with __WCLONE to check cloned processes. */
1225 new_pid
= my_waitpid (pid
, &status
, __WCLONE
);
1229 gdb_assert (pid
== new_pid
&& WIFSTOPPED (status
));
1231 if (WSTOPSIG (status
) != SIGSTOP
)
1234 if (debug_linux_nat
)
1235 fprintf_unfiltered (gdb_stdlog
,
1236 "LNPAW: Received %s after attaching\n",
1237 status_to_str (status
));
1243 /* Attach to the LWP specified by PID. Return 0 if successful or -1
1244 if the new LWP could not be attached. */
1247 lin_lwp_attach_lwp (ptid_t ptid
)
1249 struct lwp_info
*lp
;
1250 enum sigchld_state async_events_original_state
;
1252 gdb_assert (is_lwp (ptid
));
1254 async_events_original_state
= linux_nat_async_events (sigchld_sync
);
1256 lp
= find_lwp_pid (ptid
);
1258 /* We assume that we're already attached to any LWP that has an id
1259 equal to the overall process id, and to any LWP that is already
1260 in our list of LWPs. If we're not seeing exit events from threads
1261 and we've had PID wraparound since we last tried to stop all threads,
1262 this assumption might be wrong; fortunately, this is very unlikely
1264 if (GET_LWP (ptid
) != GET_PID (ptid
) && lp
== NULL
)
1266 int status
, cloned
= 0, signalled
= 0;
1268 if (ptrace (PTRACE_ATTACH
, GET_LWP (ptid
), 0, 0) < 0)
1270 /* If we fail to attach to the thread, issue a warning,
1271 but continue. One way this can happen is if thread
1272 creation is interrupted; as of Linux kernel 2.6.19, a
1273 bug may place threads in the thread list and then fail
1275 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid
),
1276 safe_strerror (errno
));
1280 if (debug_linux_nat
)
1281 fprintf_unfiltered (gdb_stdlog
,
1282 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1283 target_pid_to_str (ptid
));
1285 status
= linux_nat_post_attach_wait (ptid
, 0, &cloned
, &signalled
);
1286 lp
= add_lwp (ptid
);
1288 lp
->cloned
= cloned
;
1289 lp
->signalled
= signalled
;
1290 if (WSTOPSIG (status
) != SIGSTOP
)
1293 lp
->status
= status
;
1296 target_post_attach (GET_LWP (lp
->ptid
));
1298 if (debug_linux_nat
)
1300 fprintf_unfiltered (gdb_stdlog
,
1301 "LLAL: waitpid %s received %s\n",
1302 target_pid_to_str (ptid
),
1303 status_to_str (status
));
1308 /* We assume that the LWP representing the original process is
1309 already stopped. Mark it as stopped in the data structure
1310 that the GNU/linux ptrace layer uses to keep track of
1311 threads. Note that this won't have already been done since
1312 the main thread will have, we assume, been stopped by an
1313 attach from a different layer. */
1315 lp
= add_lwp (ptid
);
1319 linux_nat_async_events (async_events_original_state
);
1324 linux_nat_create_inferior (struct target_ops
*ops
,
1325 char *exec_file
, char *allargs
, char **env
,
1328 int saved_async
= 0;
1329 #ifdef HAVE_PERSONALITY
1330 int personality_orig
= 0, personality_set
= 0;
1331 #endif /* HAVE_PERSONALITY */
1333 /* The fork_child mechanism is synchronous and calls target_wait, so
1334 we have to mask the async mode. */
1336 if (target_can_async_p ())
1337 /* Mask async mode. Creating a child requires a loop calling
1338 wait_for_inferior currently. */
1339 saved_async
= linux_nat_async_mask (0);
1342 /* Restore the original signal mask. */
1343 sigprocmask (SIG_SETMASK
, &normal_mask
, NULL
);
1344 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1345 suspend_mask
= normal_mask
;
1346 sigdelset (&suspend_mask
, SIGCHLD
);
1349 /* Set SIGCHLD to the default action, until after execing the child,
1350 since the inferior inherits the superior's signal mask. It will
1351 be blocked again in linux_nat_wait, which is only reached after
1352 the inferior execing. */
1353 linux_nat_async_events (sigchld_default
);
1355 #ifdef HAVE_PERSONALITY
1356 if (disable_randomization
)
1359 personality_orig
= personality (0xffffffff);
1360 if (errno
== 0 && !(personality_orig
& ADDR_NO_RANDOMIZE
))
1362 personality_set
= 1;
1363 personality (personality_orig
| ADDR_NO_RANDOMIZE
);
1365 if (errno
!= 0 || (personality_set
1366 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE
)))
1367 warning (_("Error disabling address space randomization: %s"),
1368 safe_strerror (errno
));
1370 #endif /* HAVE_PERSONALITY */
1372 linux_ops
->to_create_inferior (ops
, exec_file
, allargs
, env
, from_tty
);
1374 #ifdef HAVE_PERSONALITY
1375 if (personality_set
)
1378 personality (personality_orig
);
1380 warning (_("Error restoring address space randomization: %s"),
1381 safe_strerror (errno
));
1383 #endif /* HAVE_PERSONALITY */
1386 linux_nat_async_mask (saved_async
);
1390 linux_nat_attach (struct target_ops
*ops
, char *args
, int from_tty
)
1392 struct lwp_info
*lp
;
1396 /* FIXME: We should probably accept a list of process id's, and
1397 attach all of them. */
1398 linux_ops
->to_attach (ops
, args
, from_tty
);
1400 if (!target_can_async_p ())
1402 /* Restore the original signal mask. */
1403 sigprocmask (SIG_SETMASK
, &normal_mask
, NULL
);
1404 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1405 suspend_mask
= normal_mask
;
1406 sigdelset (&suspend_mask
, SIGCHLD
);
1409 /* The ptrace base target adds the main thread with (pid,0,0)
1410 format. Decorate it with lwp info. */
1411 ptid
= BUILD_LWP (GET_PID (inferior_ptid
), GET_PID (inferior_ptid
));
1412 thread_change_ptid (inferior_ptid
, ptid
);
1414 /* Add the initial process as the first LWP to the list. */
1415 lp
= add_lwp (ptid
);
1417 status
= linux_nat_post_attach_wait (lp
->ptid
, 1, &lp
->cloned
,
1421 /* Save the wait status to report later. */
1423 if (debug_linux_nat
)
1424 fprintf_unfiltered (gdb_stdlog
,
1425 "LNA: waitpid %ld, saving status %s\n",
1426 (long) GET_PID (lp
->ptid
), status_to_str (status
));
1428 if (!target_can_async_p ())
1429 lp
->status
= status
;
1432 /* We already waited for this LWP, so put the wait result on the
1433 pipe. The event loop will wake up and gets us to handling
1435 linux_nat_event_pipe_push (GET_PID (lp
->ptid
), status
,
1436 lp
->cloned
? __WCLONE
: 0);
1437 /* Register in the event loop. */
1438 target_async (inferior_event_handler
, 0);
1442 /* Get pending status of LP. */
1444 get_pending_status (struct lwp_info
*lp
, int *status
)
1446 struct target_waitstatus last
;
1449 get_last_target_status (&last_ptid
, &last
);
1451 /* If this lwp is the ptid that GDB is processing an event from, the
1452 signal will be in stop_signal. Otherwise, in all-stop + sync
1453 mode, we may cache pending events in lp->status while trying to
1454 stop all threads (see stop_wait_callback). In async mode, the
1455 events are always cached in waitpid_queue. */
1461 enum target_signal signo
= TARGET_SIGNAL_0
;
1463 if (is_executing (lp
->ptid
))
1465 /* If the core thought this lwp was executing --- e.g., the
1466 executing property hasn't been updated yet, but the
1467 thread has been stopped with a stop_callback /
1468 stop_wait_callback sequence (see linux_nat_detach for
1469 example) --- we can only have pending events in the local
1471 if (queued_waitpid (GET_LWP (lp
->ptid
), status
, __WALL
) != -1)
1473 if (WIFSTOPPED (*status
))
1474 signo
= target_signal_from_host (WSTOPSIG (*status
));
1476 /* If not stopped, then the lwp is gone, no use in
1477 resending a signal. */
1482 /* If the core knows the thread is not executing, then we
1483 have the last signal recorded in
1484 thread_info->stop_signal. */
1486 struct thread_info
*tp
= find_thread_pid (lp
->ptid
);
1487 signo
= tp
->stop_signal
;
1490 if (signo
!= TARGET_SIGNAL_0
1491 && !signal_pass_state (signo
))
1493 if (debug_linux_nat
)
1494 fprintf_unfiltered (gdb_stdlog
, "\
1495 GPT: lwp %s had signal %s, but it is in no pass state\n",
1496 target_pid_to_str (lp
->ptid
),
1497 target_signal_to_string (signo
));
1501 if (signo
!= TARGET_SIGNAL_0
)
1502 *status
= W_STOPCODE (target_signal_to_host (signo
));
1504 if (debug_linux_nat
)
1505 fprintf_unfiltered (gdb_stdlog
,
1506 "GPT: lwp %s as pending signal %s\n",
1507 target_pid_to_str (lp
->ptid
),
1508 target_signal_to_string (signo
));
1513 if (GET_LWP (lp
->ptid
) == GET_LWP (last_ptid
))
1515 struct thread_info
*tp
= find_thread_pid (lp
->ptid
);
1516 if (tp
->stop_signal
!= TARGET_SIGNAL_0
1517 && signal_pass_state (tp
->stop_signal
))
1518 *status
= W_STOPCODE (target_signal_to_host (tp
->stop_signal
));
1520 else if (target_can_async_p ())
1521 queued_waitpid (GET_LWP (lp
->ptid
), status
, __WALL
);
1523 *status
= lp
->status
;
1530 detach_callback (struct lwp_info
*lp
, void *data
)
1532 gdb_assert (lp
->status
== 0 || WIFSTOPPED (lp
->status
));
1534 if (debug_linux_nat
&& lp
->status
)
1535 fprintf_unfiltered (gdb_stdlog
, "DC: Pending %s for %s on detach.\n",
1536 strsignal (WSTOPSIG (lp
->status
)),
1537 target_pid_to_str (lp
->ptid
));
1539 /* If there is a pending SIGSTOP, get rid of it. */
1542 if (debug_linux_nat
)
1543 fprintf_unfiltered (gdb_stdlog
,
1544 "DC: Sending SIGCONT to %s\n",
1545 target_pid_to_str (lp
->ptid
));
1547 kill_lwp (GET_LWP (lp
->ptid
), SIGCONT
);
1551 /* We don't actually detach from the LWP that has an id equal to the
1552 overall process id just yet. */
1553 if (GET_LWP (lp
->ptid
) != GET_PID (lp
->ptid
))
1557 /* Pass on any pending signal for this LWP. */
1558 get_pending_status (lp
, &status
);
1561 if (ptrace (PTRACE_DETACH
, GET_LWP (lp
->ptid
), 0,
1562 WSTOPSIG (status
)) < 0)
1563 error (_("Can't detach %s: %s"), target_pid_to_str (lp
->ptid
),
1564 safe_strerror (errno
));
1566 if (debug_linux_nat
)
1567 fprintf_unfiltered (gdb_stdlog
,
1568 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1569 target_pid_to_str (lp
->ptid
),
1570 strsignal (WSTOPSIG (lp
->status
)));
1572 delete_lwp (lp
->ptid
);
1579 linux_nat_detach (struct target_ops
*ops
, char *args
, int from_tty
)
1583 enum target_signal sig
;
1585 if (target_can_async_p ())
1586 linux_nat_async (NULL
, 0);
1588 /* Stop all threads before detaching. ptrace requires that the
1589 thread is stopped to sucessfully detach. */
1590 iterate_over_lwps (stop_callback
, NULL
);
1591 /* ... and wait until all of them have reported back that
1592 they're no longer running. */
1593 iterate_over_lwps (stop_wait_callback
, NULL
);
1595 iterate_over_lwps (detach_callback
, NULL
);
1597 /* Only the initial process should be left right now. */
1598 gdb_assert (num_lwps
== 1);
1600 /* Pass on any pending signal for the last LWP. */
1601 if ((args
== NULL
|| *args
== '\0')
1602 && get_pending_status (lwp_list
, &status
) != -1
1603 && WIFSTOPPED (status
))
1605 /* Put the signal number in ARGS so that inf_ptrace_detach will
1606 pass it along with PTRACE_DETACH. */
1608 sprintf (args
, "%d", (int) WSTOPSIG (status
));
1609 fprintf_unfiltered (gdb_stdlog
,
1610 "LND: Sending signal %s to %s\n",
1612 target_pid_to_str (lwp_list
->ptid
));
1615 /* Destroy LWP info; it's no longer valid. */
1618 pid
= GET_PID (inferior_ptid
);
1619 inferior_ptid
= pid_to_ptid (pid
);
1620 linux_ops
->to_detach (ops
, args
, from_tty
);
1622 if (target_can_async_p ())
1623 drain_queued_events (pid
);
1629 resume_callback (struct lwp_info
*lp
, void *data
)
1631 if (lp
->stopped
&& lp
->status
== 0)
1633 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
1634 0, TARGET_SIGNAL_0
);
1635 if (debug_linux_nat
)
1636 fprintf_unfiltered (gdb_stdlog
,
1637 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1638 target_pid_to_str (lp
->ptid
));
1641 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
1643 else if (lp
->stopped
&& debug_linux_nat
)
1644 fprintf_unfiltered (gdb_stdlog
, "RC: Not resuming sibling %s (has pending)\n",
1645 target_pid_to_str (lp
->ptid
));
1646 else if (debug_linux_nat
)
1647 fprintf_unfiltered (gdb_stdlog
, "RC: Not resuming sibling %s (not stopped)\n",
1648 target_pid_to_str (lp
->ptid
));
1654 resume_clear_callback (struct lwp_info
*lp
, void *data
)
1661 resume_set_callback (struct lwp_info
*lp
, void *data
)
1668 linux_nat_resume (ptid_t ptid
, int step
, enum target_signal signo
)
1670 struct lwp_info
*lp
;
1673 if (debug_linux_nat
)
1674 fprintf_unfiltered (gdb_stdlog
,
1675 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1676 step
? "step" : "resume",
1677 target_pid_to_str (ptid
),
1678 signo
? strsignal (signo
) : "0",
1679 target_pid_to_str (inferior_ptid
));
1681 if (target_can_async_p ())
1682 /* Block events while we're here. */
1683 linux_nat_async_events (sigchld_sync
);
1685 /* A specific PTID means `step only this process id'. */
1686 resume_all
= (PIDGET (ptid
) == -1);
1688 if (non_stop
&& resume_all
)
1689 internal_error (__FILE__
, __LINE__
,
1690 "can't resume all in non-stop mode");
1695 iterate_over_lwps (resume_set_callback
, NULL
);
1697 iterate_over_lwps (resume_clear_callback
, NULL
);
1700 /* If PID is -1, it's the current inferior that should be
1701 handled specially. */
1702 if (PIDGET (ptid
) == -1)
1703 ptid
= inferior_ptid
;
1705 lp
= find_lwp_pid (ptid
);
1706 gdb_assert (lp
!= NULL
);
1708 /* Convert to something the lower layer understands. */
1709 ptid
= pid_to_ptid (GET_LWP (lp
->ptid
));
1711 /* Remember if we're stepping. */
1714 /* Mark this LWP as resumed. */
1717 /* If we have a pending wait status for this thread, there is no
1718 point in resuming the process. But first make sure that
1719 linux_nat_wait won't preemptively handle the event - we
1720 should never take this short-circuit if we are going to
1721 leave LP running, since we have skipped resuming all the
1722 other threads. This bit of code needs to be synchronized
1723 with linux_nat_wait. */
1725 /* In async mode, we never have pending wait status. */
1726 if (target_can_async_p () && lp
->status
)
1727 internal_error (__FILE__
, __LINE__
, "Pending status in async mode");
1729 if (lp
->status
&& WIFSTOPPED (lp
->status
))
1732 struct inferior
*inf
;
1734 inf
= find_inferior_pid (ptid_get_pid (ptid
));
1736 saved_signo
= target_signal_from_host (WSTOPSIG (lp
->status
));
1738 /* Defer to common code if we're gaining control of the
1740 if (inf
->stop_soon
== NO_STOP_QUIETLY
1741 && signal_stop_state (saved_signo
) == 0
1742 && signal_print_state (saved_signo
) == 0
1743 && signal_pass_state (saved_signo
) == 1)
1745 if (debug_linux_nat
)
1746 fprintf_unfiltered (gdb_stdlog
,
1747 "LLR: Not short circuiting for ignored "
1748 "status 0x%x\n", lp
->status
);
1750 /* FIXME: What should we do if we are supposed to continue
1751 this thread with a signal? */
1752 gdb_assert (signo
== TARGET_SIGNAL_0
);
1753 signo
= saved_signo
;
1760 /* FIXME: What should we do if we are supposed to continue
1761 this thread with a signal? */
1762 gdb_assert (signo
== TARGET_SIGNAL_0
);
1764 if (debug_linux_nat
)
1765 fprintf_unfiltered (gdb_stdlog
,
1766 "LLR: Short circuiting for status 0x%x\n",
1772 /* Mark LWP as not stopped to prevent it from being continued by
1777 iterate_over_lwps (resume_callback
, NULL
);
1779 linux_ops
->to_resume (ptid
, step
, signo
);
1780 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
1782 if (debug_linux_nat
)
1783 fprintf_unfiltered (gdb_stdlog
,
1784 "LLR: %s %s, %s (resume event thread)\n",
1785 step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1786 target_pid_to_str (ptid
),
1787 signo
? strsignal (signo
) : "0");
1789 if (target_can_async_p ())
1790 target_async (inferior_event_handler
, 0);
1793 /* Issue kill to specified lwp. */
1795 static int tkill_failed
;
1798 kill_lwp (int lwpid
, int signo
)
1802 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1803 fails, then we are not using nptl threads and we should be using kill. */
1805 #ifdef HAVE_TKILL_SYSCALL
1808 int ret
= syscall (__NR_tkill
, lwpid
, signo
);
1809 if (errno
!= ENOSYS
)
1816 return kill (lwpid
, signo
);
1819 /* Handle a GNU/Linux extended wait response. If we see a clone
1820 event, we need to add the new LWP to our list (and not report the
1821 trap to higher layers). This function returns non-zero if the
1822 event should be ignored and we should wait again. If STOPPING is
1823 true, the new LWP remains stopped, otherwise it is continued. */
1826 linux_handle_extended_wait (struct lwp_info
*lp
, int status
,
1829 int pid
= GET_LWP (lp
->ptid
);
1830 struct target_waitstatus
*ourstatus
= &lp
->waitstatus
;
1831 struct lwp_info
*new_lp
= NULL
;
1832 int event
= status
>> 16;
1834 if (event
== PTRACE_EVENT_FORK
|| event
== PTRACE_EVENT_VFORK
1835 || event
== PTRACE_EVENT_CLONE
)
1837 unsigned long new_pid
;
1840 ptrace (PTRACE_GETEVENTMSG
, pid
, 0, &new_pid
);
1842 /* If we haven't already seen the new PID stop, wait for it now. */
1843 if (! pull_pid_from_list (&stopped_pids
, new_pid
, &status
))
1845 /* The new child has a pending SIGSTOP. We can't affect it until it
1846 hits the SIGSTOP, but we're already attached. */
1847 ret
= my_waitpid (new_pid
, &status
,
1848 (event
== PTRACE_EVENT_CLONE
) ? __WCLONE
: 0);
1850 perror_with_name (_("waiting for new child"));
1851 else if (ret
!= new_pid
)
1852 internal_error (__FILE__
, __LINE__
,
1853 _("wait returned unexpected PID %d"), ret
);
1854 else if (!WIFSTOPPED (status
))
1855 internal_error (__FILE__
, __LINE__
,
1856 _("wait returned unexpected status 0x%x"), status
);
1859 ourstatus
->value
.related_pid
= ptid_build (new_pid
, new_pid
, 0);
1861 if (event
== PTRACE_EVENT_FORK
)
1862 ourstatus
->kind
= TARGET_WAITKIND_FORKED
;
1863 else if (event
== PTRACE_EVENT_VFORK
)
1864 ourstatus
->kind
= TARGET_WAITKIND_VFORKED
;
1867 struct cleanup
*old_chain
;
1869 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
1870 new_lp
= add_lwp (BUILD_LWP (new_pid
, GET_PID (inferior_ptid
)));
1872 new_lp
->stopped
= 1;
1874 if (WSTOPSIG (status
) != SIGSTOP
)
1876 /* This can happen if someone starts sending signals to
1877 the new thread before it gets a chance to run, which
1878 have a lower number than SIGSTOP (e.g. SIGUSR1).
1879 This is an unlikely case, and harder to handle for
1880 fork / vfork than for clone, so we do not try - but
1881 we handle it for clone events here. We'll send
1882 the other signal on to the thread below. */
1884 new_lp
->signalled
= 1;
1891 /* Add the new thread to GDB's lists as soon as possible
1894 1) the frontend doesn't have to wait for a stop to
1897 2) we tag it with the correct running state. */
1899 /* If the thread_db layer is active, let it know about
1900 this new thread, and add it to GDB's list. */
1901 if (!thread_db_attach_lwp (new_lp
->ptid
))
1903 /* We're not using thread_db. Add it to GDB's
1905 target_post_attach (GET_LWP (new_lp
->ptid
));
1906 add_thread (new_lp
->ptid
);
1911 set_running (new_lp
->ptid
, 1);
1912 set_executing (new_lp
->ptid
, 1);
1918 new_lp
->stopped
= 0;
1919 new_lp
->resumed
= 1;
1920 ptrace (PTRACE_CONT
, new_pid
, 0,
1921 status
? WSTOPSIG (status
) : 0);
1924 if (debug_linux_nat
)
1925 fprintf_unfiltered (gdb_stdlog
,
1926 "LHEW: Got clone event from LWP %ld, resuming\n",
1927 GET_LWP (lp
->ptid
));
1928 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
1936 if (event
== PTRACE_EVENT_EXEC
)
1938 ourstatus
->kind
= TARGET_WAITKIND_EXECD
;
1939 ourstatus
->value
.execd_pathname
1940 = xstrdup (linux_child_pid_to_exec_file (pid
));
1942 if (linux_parent_pid
)
1944 detach_breakpoints (linux_parent_pid
);
1945 ptrace (PTRACE_DETACH
, linux_parent_pid
, 0, 0);
1947 linux_parent_pid
= 0;
1950 /* At this point, all inserted breakpoints are gone. Doing this
1951 as soon as we detect an exec prevents the badness of deleting
1952 a breakpoint writing the current "shadow contents" to lift
1953 the bp. That shadow is NOT valid after an exec.
1955 Note that we have to do this after the detach_breakpoints
1956 call above, otherwise breakpoints wouldn't be lifted from the
1957 parent on a vfork, because detach_breakpoints would think
1958 that breakpoints are not inserted. */
1959 mark_breakpoints_out ();
1963 internal_error (__FILE__
, __LINE__
,
1964 _("unknown ptrace event %d"), event
);
1967 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1971 wait_lwp (struct lwp_info
*lp
)
1975 int thread_dead
= 0;
1977 gdb_assert (!lp
->stopped
);
1978 gdb_assert (lp
->status
== 0);
1980 pid
= my_waitpid (GET_LWP (lp
->ptid
), &status
, 0);
1981 if (pid
== -1 && errno
== ECHILD
)
1983 pid
= my_waitpid (GET_LWP (lp
->ptid
), &status
, __WCLONE
);
1984 if (pid
== -1 && errno
== ECHILD
)
1986 /* The thread has previously exited. We need to delete it
1987 now because, for some vendor 2.4 kernels with NPTL
1988 support backported, there won't be an exit event unless
1989 it is the main thread. 2.6 kernels will report an exit
1990 event for each thread that exits, as expected. */
1992 if (debug_linux_nat
)
1993 fprintf_unfiltered (gdb_stdlog
, "WL: %s vanished.\n",
1994 target_pid_to_str (lp
->ptid
));
2000 gdb_assert (pid
== GET_LWP (lp
->ptid
));
2002 if (debug_linux_nat
)
2004 fprintf_unfiltered (gdb_stdlog
,
2005 "WL: waitpid %s received %s\n",
2006 target_pid_to_str (lp
->ptid
),
2007 status_to_str (status
));
2011 /* Check if the thread has exited. */
2012 if (WIFEXITED (status
) || WIFSIGNALED (status
))
2015 if (debug_linux_nat
)
2016 fprintf_unfiltered (gdb_stdlog
, "WL: %s exited.\n",
2017 target_pid_to_str (lp
->ptid
));
2026 gdb_assert (WIFSTOPPED (status
));
2028 /* Handle GNU/Linux's extended waitstatus for trace events. */
2029 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
2031 if (debug_linux_nat
)
2032 fprintf_unfiltered (gdb_stdlog
,
2033 "WL: Handling extended status 0x%06x\n",
2035 if (linux_handle_extended_wait (lp
, status
, 1))
2036 return wait_lwp (lp
);
2042 /* Save the most recent siginfo for LP. This is currently only called
2043 for SIGTRAP; some ports use the si_addr field for
2044 target_stopped_data_address. In the future, it may also be used to
2045 restore the siginfo of requeued signals. */
2048 save_siginfo (struct lwp_info
*lp
)
2051 ptrace (PTRACE_GETSIGINFO
, GET_LWP (lp
->ptid
),
2052 (PTRACE_TYPE_ARG3
) 0, &lp
->siginfo
);
2055 memset (&lp
->siginfo
, 0, sizeof (lp
->siginfo
));
2058 /* Send a SIGSTOP to LP. */
2061 stop_callback (struct lwp_info
*lp
, void *data
)
2063 if (!lp
->stopped
&& !lp
->signalled
)
2067 if (debug_linux_nat
)
2069 fprintf_unfiltered (gdb_stdlog
,
2070 "SC: kill %s **<SIGSTOP>**\n",
2071 target_pid_to_str (lp
->ptid
));
2074 ret
= kill_lwp (GET_LWP (lp
->ptid
), SIGSTOP
);
2075 if (debug_linux_nat
)
2077 fprintf_unfiltered (gdb_stdlog
,
2078 "SC: lwp kill %d %s\n",
2080 errno
? safe_strerror (errno
) : "ERRNO-OK");
2084 gdb_assert (lp
->status
== 0);
2090 /* Return non-zero if LWP PID has a pending SIGINT. */
2093 linux_nat_has_pending_sigint (int pid
)
2095 sigset_t pending
, blocked
, ignored
;
2098 linux_proc_pending_signals (pid
, &pending
, &blocked
, &ignored
);
2100 if (sigismember (&pending
, SIGINT
)
2101 && !sigismember (&ignored
, SIGINT
))
2107 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2110 set_ignore_sigint (struct lwp_info
*lp
, void *data
)
2112 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2113 flag to consume the next one. */
2114 if (lp
->stopped
&& lp
->status
!= 0 && WIFSTOPPED (lp
->status
)
2115 && WSTOPSIG (lp
->status
) == SIGINT
)
2118 lp
->ignore_sigint
= 1;
2123 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2124 This function is called after we know the LWP has stopped; if the LWP
2125 stopped before the expected SIGINT was delivered, then it will never have
2126 arrived. Also, if the signal was delivered to a shared queue and consumed
2127 by a different thread, it will never be delivered to this LWP. */
2130 maybe_clear_ignore_sigint (struct lwp_info
*lp
)
2132 if (!lp
->ignore_sigint
)
2135 if (!linux_nat_has_pending_sigint (GET_LWP (lp
->ptid
)))
2137 if (debug_linux_nat
)
2138 fprintf_unfiltered (gdb_stdlog
,
2139 "MCIS: Clearing bogus flag for %s\n",
2140 target_pid_to_str (lp
->ptid
));
2141 lp
->ignore_sigint
= 0;
2145 /* Wait until LP is stopped. */
2148 stop_wait_callback (struct lwp_info
*lp
, void *data
)
2154 status
= wait_lwp (lp
);
2158 if (lp
->ignore_sigint
&& WIFSTOPPED (status
)
2159 && WSTOPSIG (status
) == SIGINT
)
2161 lp
->ignore_sigint
= 0;
2164 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2165 if (debug_linux_nat
)
2166 fprintf_unfiltered (gdb_stdlog
,
2167 "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
2168 target_pid_to_str (lp
->ptid
),
2169 errno
? safe_strerror (errno
) : "OK");
2171 return stop_wait_callback (lp
, NULL
);
2174 maybe_clear_ignore_sigint (lp
);
2176 if (WSTOPSIG (status
) != SIGSTOP
)
2178 if (WSTOPSIG (status
) == SIGTRAP
)
2180 /* If a LWP other than the LWP that we're reporting an
2181 event for has hit a GDB breakpoint (as opposed to
2182 some random trap signal), then just arrange for it to
2183 hit it again later. We don't keep the SIGTRAP status
2184 and don't forward the SIGTRAP signal to the LWP. We
2185 will handle the current event, eventually we will
2186 resume all LWPs, and this one will get its breakpoint
2189 If we do not do this, then we run the risk that the
2190 user will delete or disable the breakpoint, but the
2191 thread will have already tripped on it. */
2193 /* Save the trap's siginfo in case we need it later. */
2196 /* Now resume this LWP and get the SIGSTOP event. */
2198 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2199 if (debug_linux_nat
)
2201 fprintf_unfiltered (gdb_stdlog
,
2202 "PTRACE_CONT %s, 0, 0 (%s)\n",
2203 target_pid_to_str (lp
->ptid
),
2204 errno
? safe_strerror (errno
) : "OK");
2206 fprintf_unfiltered (gdb_stdlog
,
2207 "SWC: Candidate SIGTRAP event in %s\n",
2208 target_pid_to_str (lp
->ptid
));
2210 /* Hold this event/waitstatus while we check to see if
2211 there are any more (we still want to get that SIGSTOP). */
2212 stop_wait_callback (lp
, NULL
);
2214 if (target_can_async_p ())
2216 /* Don't leave a pending wait status in async mode.
2217 Retrigger the breakpoint. */
2218 if (!cancel_breakpoint (lp
))
2220 /* There was no gdb breakpoint set at pc. Put
2221 the event back in the queue. */
2222 if (debug_linux_nat
)
2223 fprintf_unfiltered (gdb_stdlog
, "\
2224 SWC: leaving SIGTRAP in local queue of %s\n", target_pid_to_str (lp
->ptid
));
2225 push_waitpid (GET_LWP (lp
->ptid
),
2226 W_STOPCODE (SIGTRAP
),
2227 lp
->cloned
? __WCLONE
: 0);
2232 /* Hold the SIGTRAP for handling by
2234 /* If there's another event, throw it back into the
2238 if (debug_linux_nat
)
2239 fprintf_unfiltered (gdb_stdlog
,
2240 "SWC: kill %s, %s\n",
2241 target_pid_to_str (lp
->ptid
),
2242 status_to_str ((int) status
));
2243 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (lp
->status
));
2245 /* Save the sigtrap event. */
2246 lp
->status
= status
;
2252 /* The thread was stopped with a signal other than
2253 SIGSTOP, and didn't accidentally trip a breakpoint. */
2255 if (debug_linux_nat
)
2257 fprintf_unfiltered (gdb_stdlog
,
2258 "SWC: Pending event %s in %s\n",
2259 status_to_str ((int) status
),
2260 target_pid_to_str (lp
->ptid
));
2262 /* Now resume this LWP and get the SIGSTOP event. */
2264 ptrace (PTRACE_CONT
, GET_LWP (lp
->ptid
), 0, 0);
2265 if (debug_linux_nat
)
2266 fprintf_unfiltered (gdb_stdlog
,
2267 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2268 target_pid_to_str (lp
->ptid
),
2269 errno
? safe_strerror (errno
) : "OK");
2271 /* Hold this event/waitstatus while we check to see if
2272 there are any more (we still want to get that SIGSTOP). */
2273 stop_wait_callback (lp
, NULL
);
2275 /* If the lp->status field is still empty, use it to
2276 hold this event. If not, then this event must be
2277 returned to the event queue of the LWP. */
2278 if (lp
->status
|| target_can_async_p ())
2280 if (debug_linux_nat
)
2282 fprintf_unfiltered (gdb_stdlog
,
2283 "SWC: kill %s, %s\n",
2284 target_pid_to_str (lp
->ptid
),
2285 status_to_str ((int) status
));
2287 kill_lwp (GET_LWP (lp
->ptid
), WSTOPSIG (status
));
2290 lp
->status
= status
;
2296 /* We caught the SIGSTOP that we intended to catch, so
2297 there's no SIGSTOP pending. */
2306 /* Return non-zero if LP has a wait status pending. */
2309 status_callback (struct lwp_info
*lp
, void *data
)
2311 /* Only report a pending wait status if we pretend that this has
2312 indeed been resumed. */
2313 return (lp
->status
!= 0 && lp
->resumed
);
2316 /* Return non-zero if LP isn't stopped. */
2319 running_callback (struct lwp_info
*lp
, void *data
)
2321 return (lp
->stopped
== 0 || (lp
->status
!= 0 && lp
->resumed
));
2324 /* Count the LWP's that have had events. */
2327 count_events_callback (struct lwp_info
*lp
, void *data
)
2331 gdb_assert (count
!= NULL
);
2333 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2334 if (lp
->status
!= 0 && lp
->resumed
2335 && WIFSTOPPED (lp
->status
) && WSTOPSIG (lp
->status
) == SIGTRAP
)
2341 /* Select the LWP (if any) that is currently being single-stepped. */
2344 select_singlestep_lwp_callback (struct lwp_info
*lp
, void *data
)
2346 if (lp
->step
&& lp
->status
!= 0)
2352 /* Select the Nth LWP that has had a SIGTRAP event. */
2355 select_event_lwp_callback (struct lwp_info
*lp
, void *data
)
2357 int *selector
= data
;
2359 gdb_assert (selector
!= NULL
);
2361 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2362 if (lp
->status
!= 0 && lp
->resumed
2363 && WIFSTOPPED (lp
->status
) && WSTOPSIG (lp
->status
) == SIGTRAP
)
2364 if ((*selector
)-- == 0)
2371 cancel_breakpoint (struct lwp_info
*lp
)
2373 /* Arrange for a breakpoint to be hit again later. We don't keep
2374 the SIGTRAP status and don't forward the SIGTRAP signal to the
2375 LWP. We will handle the current event, eventually we will resume
2376 this LWP, and this breakpoint will trap again.
2378 If we do not do this, then we run the risk that the user will
2379 delete or disable the breakpoint, but the LWP will have already
2382 struct regcache
*regcache
= get_thread_regcache (lp
->ptid
);
2383 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
2386 pc
= regcache_read_pc (regcache
) - gdbarch_decr_pc_after_break (gdbarch
);
2387 if (breakpoint_inserted_here_p (pc
))
2389 if (debug_linux_nat
)
2390 fprintf_unfiltered (gdb_stdlog
,
2391 "CB: Push back breakpoint for %s\n",
2392 target_pid_to_str (lp
->ptid
));
2394 /* Back up the PC if necessary. */
2395 if (gdbarch_decr_pc_after_break (gdbarch
))
2396 regcache_write_pc (regcache
, pc
);
2404 cancel_breakpoints_callback (struct lwp_info
*lp
, void *data
)
2406 struct lwp_info
*event_lp
= data
;
2408 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2412 /* If a LWP other than the LWP that we're reporting an event for has
2413 hit a GDB breakpoint (as opposed to some random trap signal),
2414 then just arrange for it to hit it again later. We don't keep
2415 the SIGTRAP status and don't forward the SIGTRAP signal to the
2416 LWP. We will handle the current event, eventually we will resume
2417 all LWPs, and this one will get its breakpoint trap again.
2419 If we do not do this, then we run the risk that the user will
2420 delete or disable the breakpoint, but the LWP will have already
2424 && WIFSTOPPED (lp
->status
) && WSTOPSIG (lp
->status
) == SIGTRAP
2425 && cancel_breakpoint (lp
))
2426 /* Throw away the SIGTRAP. */
2432 /* Select one LWP out of those that have events pending. */
2435 select_event_lwp (struct lwp_info
**orig_lp
, int *status
)
2438 int random_selector
;
2439 struct lwp_info
*event_lp
;
2441 /* Record the wait status for the original LWP. */
2442 (*orig_lp
)->status
= *status
;
2444 /* Give preference to any LWP that is being single-stepped. */
2445 event_lp
= iterate_over_lwps (select_singlestep_lwp_callback
, NULL
);
2446 if (event_lp
!= NULL
)
2448 if (debug_linux_nat
)
2449 fprintf_unfiltered (gdb_stdlog
,
2450 "SEL: Select single-step %s\n",
2451 target_pid_to_str (event_lp
->ptid
));
2455 /* No single-stepping LWP. Select one at random, out of those
2456 which have had SIGTRAP events. */
2458 /* First see how many SIGTRAP events we have. */
2459 iterate_over_lwps (count_events_callback
, &num_events
);
2461 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2462 random_selector
= (int)
2463 ((num_events
* (double) rand ()) / (RAND_MAX
+ 1.0));
2465 if (debug_linux_nat
&& num_events
> 1)
2466 fprintf_unfiltered (gdb_stdlog
,
2467 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2468 num_events
, random_selector
);
2470 event_lp
= iterate_over_lwps (select_event_lwp_callback
,
2474 if (event_lp
!= NULL
)
2476 /* Switch the event LWP. */
2477 *orig_lp
= event_lp
;
2478 *status
= event_lp
->status
;
2481 /* Flush the wait status for the event LWP. */
2482 (*orig_lp
)->status
= 0;
2485 /* Return non-zero if LP has been resumed. */
2488 resumed_callback (struct lwp_info
*lp
, void *data
)
2493 /* Stop an active thread, verify it still exists, then resume it. */
2496 stop_and_resume_callback (struct lwp_info
*lp
, void *data
)
2498 struct lwp_info
*ptr
;
2500 if (!lp
->stopped
&& !lp
->signalled
)
2502 stop_callback (lp
, NULL
);
2503 stop_wait_callback (lp
, NULL
);
2504 /* Resume if the lwp still exists. */
2505 for (ptr
= lwp_list
; ptr
; ptr
= ptr
->next
)
2508 resume_callback (lp
, NULL
);
2509 resume_set_callback (lp
, NULL
);
2515 /* Check if we should go on and pass this event to common code.
2516 Return the affected lwp if we are, or NULL otherwise. */
2517 static struct lwp_info
*
2518 linux_nat_filter_event (int lwpid
, int status
, int options
)
2520 struct lwp_info
*lp
;
2522 lp
= find_lwp_pid (pid_to_ptid (lwpid
));
2524 /* Check for stop events reported by a process we didn't already
2525 know about - anything not already in our LWP list.
2527 If we're expecting to receive stopped processes after
2528 fork, vfork, and clone events, then we'll just add the
2529 new one to our list and go back to waiting for the event
2530 to be reported - the stopped process might be returned
2531 from waitpid before or after the event is. */
2532 if (WIFSTOPPED (status
) && !lp
)
2534 linux_record_stopped_pid (lwpid
, status
);
2538 /* Make sure we don't report an event for the exit of an LWP not in
2539 our list, i.e. not part of the current process. This can happen
2540 if we detach from a program we original forked and then it
2542 if (!WIFSTOPPED (status
) && !lp
)
2545 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2546 CLONE_PTRACE processes which do not use the thread library -
2547 otherwise we wouldn't find the new LWP this way. That doesn't
2548 currently work, and the following code is currently unreachable
2549 due to the two blocks above. If it's fixed some day, this code
2550 should be broken out into a function so that we can also pick up
2551 LWPs from the new interface. */
2554 lp
= add_lwp (BUILD_LWP (lwpid
, GET_PID (inferior_ptid
)));
2555 if (options
& __WCLONE
)
2558 gdb_assert (WIFSTOPPED (status
)
2559 && WSTOPSIG (status
) == SIGSTOP
);
2562 if (!in_thread_list (inferior_ptid
))
2564 inferior_ptid
= BUILD_LWP (GET_PID (inferior_ptid
),
2565 GET_PID (inferior_ptid
));
2566 add_thread (inferior_ptid
);
2569 add_thread (lp
->ptid
);
2572 /* Save the trap's siginfo in case we need it later. */
2573 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
)
2576 /* Handle GNU/Linux's extended waitstatus for trace events. */
2577 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
&& status
>> 16 != 0)
2579 if (debug_linux_nat
)
2580 fprintf_unfiltered (gdb_stdlog
,
2581 "LLW: Handling extended status 0x%06x\n",
2583 if (linux_handle_extended_wait (lp
, status
, 0))
2587 /* Check if the thread has exited. */
2588 if ((WIFEXITED (status
) || WIFSIGNALED (status
)) && num_lwps
> 1)
2590 /* If this is the main thread, we must stop all threads and
2591 verify if they are still alive. This is because in the nptl
2592 thread model, there is no signal issued for exiting LWPs
2593 other than the main thread. We only get the main thread exit
2594 signal once all child threads have already exited. If we
2595 stop all the threads and use the stop_wait_callback to check
2596 if they have exited we can determine whether this signal
2597 should be ignored or whether it means the end of the debugged
2598 application, regardless of which threading model is being
2600 if (GET_PID (lp
->ptid
) == GET_LWP (lp
->ptid
))
2603 iterate_over_lwps (stop_and_resume_callback
, NULL
);
2606 if (debug_linux_nat
)
2607 fprintf_unfiltered (gdb_stdlog
,
2608 "LLW: %s exited.\n",
2609 target_pid_to_str (lp
->ptid
));
2613 /* If there is at least one more LWP, then the exit signal was
2614 not the end of the debugged application and should be
2620 /* Check if the current LWP has previously exited. In the nptl
2621 thread model, LWPs other than the main thread do not issue
2622 signals when they exit so we must check whenever the thread has
2623 stopped. A similar check is made in stop_wait_callback(). */
2624 if (num_lwps
> 1 && !linux_nat_thread_alive (lp
->ptid
))
2626 if (debug_linux_nat
)
2627 fprintf_unfiltered (gdb_stdlog
,
2628 "LLW: %s exited.\n",
2629 target_pid_to_str (lp
->ptid
));
2633 /* Make sure there is at least one thread running. */
2634 gdb_assert (iterate_over_lwps (running_callback
, NULL
));
2636 /* Discard the event. */
2640 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2641 an attempt to stop an LWP. */
2643 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGSTOP
)
2645 if (debug_linux_nat
)
2646 fprintf_unfiltered (gdb_stdlog
,
2647 "LLW: Delayed SIGSTOP caught for %s.\n",
2648 target_pid_to_str (lp
->ptid
));
2650 /* This is a delayed SIGSTOP. */
2653 registers_changed ();
2655 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
2656 lp
->step
, TARGET_SIGNAL_0
);
2657 if (debug_linux_nat
)
2658 fprintf_unfiltered (gdb_stdlog
,
2659 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2661 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2662 target_pid_to_str (lp
->ptid
));
2665 gdb_assert (lp
->resumed
);
2667 /* Discard the event. */
2671 /* Make sure we don't report a SIGINT that we have already displayed
2672 for another thread. */
2673 if (lp
->ignore_sigint
2674 && WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGINT
)
2676 if (debug_linux_nat
)
2677 fprintf_unfiltered (gdb_stdlog
,
2678 "LLW: Delayed SIGINT caught for %s.\n",
2679 target_pid_to_str (lp
->ptid
));
2681 /* This is a delayed SIGINT. */
2682 lp
->ignore_sigint
= 0;
2684 registers_changed ();
2685 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
2686 lp
->step
, TARGET_SIGNAL_0
);
2687 if (debug_linux_nat
)
2688 fprintf_unfiltered (gdb_stdlog
,
2689 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2691 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2692 target_pid_to_str (lp
->ptid
));
2695 gdb_assert (lp
->resumed
);
2697 /* Discard the event. */
2701 /* An interesting event. */
2706 /* Get the events stored in the pipe into the local queue, so they are
2707 accessible to queued_waitpid. We need to do this, since it is not
2708 always the case that the event at the head of the pipe is the event
2712 pipe_to_local_event_queue (void)
2714 if (debug_linux_nat_async
)
2715 fprintf_unfiltered (gdb_stdlog
,
2716 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2717 linux_nat_num_queued_events
);
2718 while (linux_nat_num_queued_events
)
2720 int lwpid
, status
, options
;
2721 lwpid
= linux_nat_event_pipe_pop (&status
, &options
);
2722 gdb_assert (lwpid
> 0);
2723 push_waitpid (lwpid
, status
, options
);
2727 /* Get the unprocessed events stored in the local queue back into the
2728 pipe, so the event loop realizes there's something else to
2732 local_event_queue_to_pipe (void)
2734 struct waitpid_result
*w
= waitpid_queue
;
2737 struct waitpid_result
*next
= w
->next
;
2738 linux_nat_event_pipe_push (w
->pid
,
2744 waitpid_queue
= NULL
;
2746 if (debug_linux_nat_async
)
2747 fprintf_unfiltered (gdb_stdlog
,
2748 "LEQTP: linux_nat_num_queued_events(%d)\n",
2749 linux_nat_num_queued_events
);
2753 linux_nat_wait (ptid_t ptid
, struct target_waitstatus
*ourstatus
)
2755 struct lwp_info
*lp
= NULL
;
2758 pid_t pid
= PIDGET (ptid
);
2760 if (debug_linux_nat_async
)
2761 fprintf_unfiltered (gdb_stdlog
, "LLW: enter\n");
2763 /* The first time we get here after starting a new inferior, we may
2764 not have added it to the LWP list yet - this is the earliest
2765 moment at which we know its PID. */
2768 gdb_assert (!is_lwp (inferior_ptid
));
2770 /* Upgrade the main thread's ptid. */
2771 thread_change_ptid (inferior_ptid
,
2772 BUILD_LWP (GET_PID (inferior_ptid
),
2773 GET_PID (inferior_ptid
)));
2775 lp
= add_lwp (inferior_ptid
);
2779 /* Block events while we're here. */
2780 linux_nat_async_events (sigchld_sync
);
2784 /* Make sure there is at least one LWP that has been resumed. */
2785 gdb_assert (iterate_over_lwps (resumed_callback
, NULL
));
2787 /* First check if there is a LWP with a wait status pending. */
2790 /* Any LWP that's been resumed will do. */
2791 lp
= iterate_over_lwps (status_callback
, NULL
);
2794 if (target_can_async_p ())
2795 internal_error (__FILE__
, __LINE__
,
2796 "Found an LWP with a pending status in async mode.");
2798 status
= lp
->status
;
2801 if (debug_linux_nat
&& status
)
2802 fprintf_unfiltered (gdb_stdlog
,
2803 "LLW: Using pending wait status %s for %s.\n",
2804 status_to_str (status
),
2805 target_pid_to_str (lp
->ptid
));
2808 /* But if we don't find one, we'll have to wait, and check both
2809 cloned and uncloned processes. We start with the cloned
2811 options
= __WCLONE
| WNOHANG
;
2813 else if (is_lwp (ptid
))
2815 if (debug_linux_nat
)
2816 fprintf_unfiltered (gdb_stdlog
,
2817 "LLW: Waiting for specific LWP %s.\n",
2818 target_pid_to_str (ptid
));
2820 /* We have a specific LWP to check. */
2821 lp
= find_lwp_pid (ptid
);
2823 status
= lp
->status
;
2826 if (debug_linux_nat
&& status
)
2827 fprintf_unfiltered (gdb_stdlog
,
2828 "LLW: Using pending wait status %s for %s.\n",
2829 status_to_str (status
),
2830 target_pid_to_str (lp
->ptid
));
2832 /* If we have to wait, take into account whether PID is a cloned
2833 process or not. And we have to convert it to something that
2834 the layer beneath us can understand. */
2835 options
= lp
->cloned
? __WCLONE
: 0;
2836 pid
= GET_LWP (ptid
);
2839 if (status
&& lp
->signalled
)
2841 /* A pending SIGSTOP may interfere with the normal stream of
2842 events. In a typical case where interference is a problem,
2843 we have a SIGSTOP signal pending for LWP A while
2844 single-stepping it, encounter an event in LWP B, and take the
2845 pending SIGSTOP while trying to stop LWP A. After processing
2846 the event in LWP B, LWP A is continued, and we'll never see
2847 the SIGTRAP associated with the last time we were
2848 single-stepping LWP A. */
2850 /* Resume the thread. It should halt immediately returning the
2852 registers_changed ();
2853 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
2854 lp
->step
, TARGET_SIGNAL_0
);
2855 if (debug_linux_nat
)
2856 fprintf_unfiltered (gdb_stdlog
,
2857 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2858 lp
->step
? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2859 target_pid_to_str (lp
->ptid
));
2861 gdb_assert (lp
->resumed
);
2863 /* This should catch the pending SIGSTOP. */
2864 stop_wait_callback (lp
, NULL
);
2867 if (!target_can_async_p ())
2869 /* Causes SIGINT to be passed on to the attached process. */
2878 if (target_can_async_p ())
2879 /* In async mode, don't ever block. Only look at the locally
2881 lwpid
= queued_waitpid (pid
, &status
, options
);
2883 lwpid
= my_waitpid (pid
, &status
, options
);
2887 gdb_assert (pid
== -1 || lwpid
== pid
);
2889 if (debug_linux_nat
)
2891 fprintf_unfiltered (gdb_stdlog
,
2892 "LLW: waitpid %ld received %s\n",
2893 (long) lwpid
, status_to_str (status
));
2896 lp
= linux_nat_filter_event (lwpid
, status
, options
);
2899 /* A discarded event. */
2909 /* Alternate between checking cloned and uncloned processes. */
2910 options
^= __WCLONE
;
2912 /* And every time we have checked both:
2913 In async mode, return to event loop;
2914 In sync mode, suspend waiting for a SIGCHLD signal. */
2915 if (options
& __WCLONE
)
2917 if (target_can_async_p ())
2919 /* No interesting event. */
2920 ourstatus
->kind
= TARGET_WAITKIND_IGNORE
;
2922 /* Get ready for the next event. */
2923 target_async (inferior_event_handler
, 0);
2925 if (debug_linux_nat_async
)
2926 fprintf_unfiltered (gdb_stdlog
, "LLW: exit (ignore)\n");
2928 return minus_one_ptid
;
2931 sigsuspend (&suspend_mask
);
2935 /* We shouldn't end up here unless we want to try again. */
2936 gdb_assert (status
== 0);
2939 if (!target_can_async_p ())
2941 clear_sigio_trap ();
2942 clear_sigint_trap ();
2947 /* Don't report signals that GDB isn't interested in, such as
2948 signals that are neither printed nor stopped upon. Stopping all
2949 threads can be a bit time-consuming so if we want decent
2950 performance with heavily multi-threaded programs, especially when
2951 they're using a high frequency timer, we'd better avoid it if we
2954 if (WIFSTOPPED (status
))
2956 int signo
= target_signal_from_host (WSTOPSIG (status
));
2957 struct inferior
*inf
;
2959 inf
= find_inferior_pid (ptid_get_pid (lp
->ptid
));
2962 /* Defer to common code if we get a signal while
2963 single-stepping, since that may need special care, e.g. to
2964 skip the signal handler, or, if we're gaining control of the
2967 && inf
->stop_soon
== NO_STOP_QUIETLY
2968 && signal_stop_state (signo
) == 0
2969 && signal_print_state (signo
) == 0
2970 && signal_pass_state (signo
) == 1)
2972 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2973 here? It is not clear we should. GDB may not expect
2974 other threads to run. On the other hand, not resuming
2975 newly attached threads may cause an unwanted delay in
2976 getting them running. */
2977 registers_changed ();
2978 linux_ops
->to_resume (pid_to_ptid (GET_LWP (lp
->ptid
)),
2980 if (debug_linux_nat
)
2981 fprintf_unfiltered (gdb_stdlog
,
2982 "LLW: %s %s, %s (preempt 'handle')\n",
2984 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2985 target_pid_to_str (lp
->ptid
),
2986 signo
? strsignal (signo
) : "0");
2994 /* Only do the below in all-stop, as we currently use SIGINT
2995 to implement target_stop (see linux_nat_stop) in
2997 if (signo
== TARGET_SIGNAL_INT
&& signal_pass_state (signo
) == 0)
2999 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3000 forwarded to the entire process group, that is, all LWPs
3001 will receive it - unless they're using CLONE_THREAD to
3002 share signals. Since we only want to report it once, we
3003 mark it as ignored for all LWPs except this one. */
3004 iterate_over_lwps (set_ignore_sigint
, NULL
);
3005 lp
->ignore_sigint
= 0;
3008 maybe_clear_ignore_sigint (lp
);
3012 /* This LWP is stopped now. */
3015 if (debug_linux_nat
)
3016 fprintf_unfiltered (gdb_stdlog
, "LLW: Candidate event %s in %s.\n",
3017 status_to_str (status
), target_pid_to_str (lp
->ptid
));
3021 /* Now stop all other LWP's ... */
3022 iterate_over_lwps (stop_callback
, NULL
);
3024 /* ... and wait until all of them have reported back that
3025 they're no longer running. */
3026 iterate_over_lwps (stop_wait_callback
, NULL
);
3028 /* If we're not waiting for a specific LWP, choose an event LWP
3029 from among those that have had events. Giving equal priority
3030 to all LWPs that have had events helps prevent
3033 select_event_lwp (&lp
, &status
);
3036 /* Now that we've selected our final event LWP, cancel any
3037 breakpoints in other LWPs that have hit a GDB breakpoint. See
3038 the comment in cancel_breakpoints_callback to find out why. */
3039 iterate_over_lwps (cancel_breakpoints_callback
, lp
);
3041 if (WIFSTOPPED (status
) && WSTOPSIG (status
) == SIGTRAP
)
3043 if (debug_linux_nat
)
3044 fprintf_unfiltered (gdb_stdlog
,
3045 "LLW: trap ptid is %s.\n",
3046 target_pid_to_str (lp
->ptid
));
3049 if (lp
->waitstatus
.kind
!= TARGET_WAITKIND_IGNORE
)
3051 *ourstatus
= lp
->waitstatus
;
3052 lp
->waitstatus
.kind
= TARGET_WAITKIND_IGNORE
;
3055 store_waitstatus (ourstatus
, status
);
3057 /* Get ready for the next event. */
3058 if (target_can_async_p ())
3059 target_async (inferior_event_handler
, 0);
3061 if (debug_linux_nat_async
)
3062 fprintf_unfiltered (gdb_stdlog
, "LLW: exit\n");
3068 kill_callback (struct lwp_info
*lp
, void *data
)
3071 ptrace (PTRACE_KILL
, GET_LWP (lp
->ptid
), 0, 0);
3072 if (debug_linux_nat
)
3073 fprintf_unfiltered (gdb_stdlog
,
3074 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3075 target_pid_to_str (lp
->ptid
),
3076 errno
? safe_strerror (errno
) : "OK");
3082 kill_wait_callback (struct lwp_info
*lp
, void *data
)
3086 /* We must make sure that there are no pending events (delayed
3087 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3088 program doesn't interfere with any following debugging session. */
3090 /* For cloned processes we must check both with __WCLONE and
3091 without, since the exit status of a cloned process isn't reported
3097 pid
= my_waitpid (GET_LWP (lp
->ptid
), NULL
, __WCLONE
);
3098 if (pid
!= (pid_t
) -1)
3100 if (debug_linux_nat
)
3101 fprintf_unfiltered (gdb_stdlog
,
3102 "KWC: wait %s received unknown.\n",
3103 target_pid_to_str (lp
->ptid
));
3104 /* The Linux kernel sometimes fails to kill a thread
3105 completely after PTRACE_KILL; that goes from the stop
3106 point in do_fork out to the one in
3107 get_signal_to_deliever and waits again. So kill it
3109 kill_callback (lp
, NULL
);
3112 while (pid
== GET_LWP (lp
->ptid
));
3114 gdb_assert (pid
== -1 && errno
== ECHILD
);
3119 pid
= my_waitpid (GET_LWP (lp
->ptid
), NULL
, 0);
3120 if (pid
!= (pid_t
) -1)
3122 if (debug_linux_nat
)
3123 fprintf_unfiltered (gdb_stdlog
,
3124 "KWC: wait %s received unk.\n",
3125 target_pid_to_str (lp
->ptid
));
3126 /* See the call to kill_callback above. */
3127 kill_callback (lp
, NULL
);
3130 while (pid
== GET_LWP (lp
->ptid
));
3132 gdb_assert (pid
== -1 && errno
== ECHILD
);
3137 linux_nat_kill (void)
3139 struct target_waitstatus last
;
3143 if (target_can_async_p ())
3144 target_async (NULL
, 0);
3146 /* If we're stopped while forking and we haven't followed yet,
3147 kill the other task. We need to do this first because the
3148 parent will be sleeping if this is a vfork. */
3150 get_last_target_status (&last_ptid
, &last
);
3152 if (last
.kind
== TARGET_WAITKIND_FORKED
3153 || last
.kind
== TARGET_WAITKIND_VFORKED
)
3155 ptrace (PT_KILL
, PIDGET (last
.value
.related_pid
), 0, 0);
3159 if (forks_exist_p ())
3161 linux_fork_killall ();
3162 drain_queued_events (-1);
3166 /* Stop all threads before killing them, since ptrace requires
3167 that the thread is stopped to sucessfully PTRACE_KILL. */
3168 iterate_over_lwps (stop_callback
, NULL
);
3169 /* ... and wait until all of them have reported back that
3170 they're no longer running. */
3171 iterate_over_lwps (stop_wait_callback
, NULL
);
3173 /* Kill all LWP's ... */
3174 iterate_over_lwps (kill_callback
, NULL
);
3176 /* ... and wait until we've flushed all events. */
3177 iterate_over_lwps (kill_wait_callback
, NULL
);
3180 target_mourn_inferior ();
3184 linux_nat_mourn_inferior (struct target_ops
*ops
)
3186 /* Destroy LWP info; it's no longer valid. */
3189 if (! forks_exist_p ())
3191 /* Normal case, no other forks available. */
3192 if (target_can_async_p ())
3193 linux_nat_async (NULL
, 0);
3194 linux_ops
->to_mourn_inferior (ops
);
3197 /* Multi-fork case. The current inferior_ptid has exited, but
3198 there are other viable forks to debug. Delete the exiting
3199 one and context-switch to the first available. */
3200 linux_fork_mourn_inferior ();
3204 linux_nat_xfer_partial (struct target_ops
*ops
, enum target_object object
,
3205 const char *annex
, gdb_byte
*readbuf
,
3206 const gdb_byte
*writebuf
,
3207 ULONGEST offset
, LONGEST len
)
3209 struct cleanup
*old_chain
= save_inferior_ptid ();
3212 if (is_lwp (inferior_ptid
))
3213 inferior_ptid
= pid_to_ptid (GET_LWP (inferior_ptid
));
3215 xfer
= linux_ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
3218 do_cleanups (old_chain
);
3223 linux_nat_thread_alive (ptid_t ptid
)
3227 gdb_assert (is_lwp (ptid
));
3229 /* Send signal 0 instead of anything ptrace, because ptracing a
3230 running thread errors out claiming that the thread doesn't
3232 err
= kill_lwp (GET_LWP (ptid
), 0);
3234 if (debug_linux_nat
)
3235 fprintf_unfiltered (gdb_stdlog
,
3236 "LLTA: KILL(SIG0) %s (%s)\n",
3237 target_pid_to_str (ptid
),
3238 err
? safe_strerror (err
) : "OK");
3247 linux_nat_pid_to_str (ptid_t ptid
)
3249 static char buf
[64];
3252 && ((lwp_list
&& lwp_list
->next
)
3253 || GET_PID (ptid
) != GET_LWP (ptid
)))
3255 snprintf (buf
, sizeof (buf
), "LWP %ld", GET_LWP (ptid
));
3259 return normal_pid_to_str (ptid
);
3263 sigchld_handler (int signo
)
3265 if (target_async_permitted
3266 && linux_nat_async_events_state
!= sigchld_sync
3267 && signo
== SIGCHLD
)
3268 /* It is *always* a bug to hit this. */
3269 internal_error (__FILE__
, __LINE__
,
3270 "sigchld_handler called when async events are enabled");
3272 /* Do nothing. The only reason for this handler is that it allows
3273 us to use sigsuspend in linux_nat_wait above to wait for the
3274 arrival of a SIGCHLD. */
3277 /* Accepts an integer PID; Returns a string representing a file that
3278 can be opened to get the symbols for the child process. */
3281 linux_child_pid_to_exec_file (int pid
)
3283 char *name1
, *name2
;
3285 name1
= xmalloc (MAXPATHLEN
);
3286 name2
= xmalloc (MAXPATHLEN
);
3287 make_cleanup (xfree
, name1
);
3288 make_cleanup (xfree
, name2
);
3289 memset (name2
, 0, MAXPATHLEN
);
3291 sprintf (name1
, "/proc/%d/exe", pid
);
3292 if (readlink (name1
, name2
, MAXPATHLEN
) > 0)
3298 /* Service function for corefiles and info proc. */
3301 read_mapping (FILE *mapfile
,
3306 char *device
, long long *inode
, char *filename
)
3308 int ret
= fscanf (mapfile
, "%llx-%llx %s %llx %s %llx",
3309 addr
, endaddr
, permissions
, offset
, device
, inode
);
3312 if (ret
> 0 && ret
!= EOF
)
3314 /* Eat everything up to EOL for the filename. This will prevent
3315 weird filenames (such as one with embedded whitespace) from
3316 confusing this code. It also makes this code more robust in
3317 respect to annotations the kernel may add after the filename.
3319 Note the filename is used for informational purposes
3321 ret
+= fscanf (mapfile
, "%[^\n]\n", filename
);
3324 return (ret
!= 0 && ret
!= EOF
);
3327 /* Fills the "to_find_memory_regions" target vector. Lists the memory
3328 regions in the inferior for a corefile. */
3331 linux_nat_find_memory_regions (int (*func
) (CORE_ADDR
,
3333 int, int, int, void *), void *obfd
)
3335 long long pid
= PIDGET (inferior_ptid
);
3336 char mapsfilename
[MAXPATHLEN
];
3338 long long addr
, endaddr
, size
, offset
, inode
;
3339 char permissions
[8], device
[8], filename
[MAXPATHLEN
];
3340 int read
, write
, exec
;
3342 struct cleanup
*cleanup
;
3344 /* Compose the filename for the /proc memory map, and open it. */
3345 sprintf (mapsfilename
, "/proc/%lld/maps", pid
);
3346 if ((mapsfile
= fopen (mapsfilename
, "r")) == NULL
)
3347 error (_("Could not open %s."), mapsfilename
);
3348 cleanup
= make_cleanup_fclose (mapsfile
);
3351 fprintf_filtered (gdb_stdout
,
3352 "Reading memory regions from %s\n", mapsfilename
);
3354 /* Now iterate until end-of-file. */
3355 while (read_mapping (mapsfile
, &addr
, &endaddr
, &permissions
[0],
3356 &offset
, &device
[0], &inode
, &filename
[0]))
3358 size
= endaddr
- addr
;
3360 /* Get the segment's permissions. */
3361 read
= (strchr (permissions
, 'r') != 0);
3362 write
= (strchr (permissions
, 'w') != 0);
3363 exec
= (strchr (permissions
, 'x') != 0);
3367 fprintf_filtered (gdb_stdout
,
3368 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3369 size
, paddr_nz (addr
),
3371 write
? 'w' : ' ', exec
? 'x' : ' ');
3373 fprintf_filtered (gdb_stdout
, " for %s", filename
);
3374 fprintf_filtered (gdb_stdout
, "\n");
3377 /* Invoke the callback function to create the corefile
3379 func (addr
, size
, read
, write
, exec
, obfd
);
3381 do_cleanups (cleanup
);
3386 find_signalled_thread (struct thread_info
*info
, void *data
)
3388 if (info
->stop_signal
!= TARGET_SIGNAL_0
3389 && ptid_get_pid (info
->ptid
) == ptid_get_pid (inferior_ptid
))
3395 static enum target_signal
3396 find_stop_signal (void)
3398 struct thread_info
*info
=
3399 iterate_over_threads (find_signalled_thread
, NULL
);
3402 return info
->stop_signal
;
3404 return TARGET_SIGNAL_0
;
3407 /* Records the thread's register state for the corefile note
3411 linux_nat_do_thread_registers (bfd
*obfd
, ptid_t ptid
,
3412 char *note_data
, int *note_size
,
3413 enum target_signal stop_signal
)
3415 gdb_gregset_t gregs
;
3416 gdb_fpregset_t fpregs
;
3417 unsigned long lwp
= ptid_get_lwp (ptid
);
3418 struct regcache
*regcache
= get_thread_regcache (ptid
);
3419 struct gdbarch
*gdbarch
= get_regcache_arch (regcache
);
3420 const struct regset
*regset
;
3422 struct cleanup
*old_chain
;
3423 struct core_regset_section
*sect_list
;
3426 old_chain
= save_inferior_ptid ();
3427 inferior_ptid
= ptid
;
3428 target_fetch_registers (regcache
, -1);
3429 do_cleanups (old_chain
);
3431 core_regset_p
= gdbarch_regset_from_core_section_p (gdbarch
);
3432 sect_list
= gdbarch_core_regset_sections (gdbarch
);
3435 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg",
3436 sizeof (gregs
))) != NULL
3437 && regset
->collect_regset
!= NULL
)
3438 regset
->collect_regset (regset
, regcache
, -1,
3439 &gregs
, sizeof (gregs
));
3441 fill_gregset (regcache
, &gregs
, -1);
3443 note_data
= (char *) elfcore_write_prstatus (obfd
,
3447 stop_signal
, &gregs
);
3449 /* The loop below uses the new struct core_regset_section, which stores
3450 the supported section names and sizes for the core file. Note that
3451 note PRSTATUS needs to be treated specially. But the other notes are
3452 structurally the same, so they can benefit from the new struct. */
3453 if (core_regset_p
&& sect_list
!= NULL
)
3454 while (sect_list
->sect_name
!= NULL
)
3456 /* .reg was already handled above. */
3457 if (strcmp (sect_list
->sect_name
, ".reg") == 0)
3462 regset
= gdbarch_regset_from_core_section (gdbarch
,
3463 sect_list
->sect_name
,
3465 gdb_assert (regset
&& regset
->collect_regset
);
3466 gdb_regset
= xmalloc (sect_list
->size
);
3467 regset
->collect_regset (regset
, regcache
, -1,
3468 gdb_regset
, sect_list
->size
);
3469 note_data
= (char *) elfcore_write_register_note (obfd
,
3472 sect_list
->sect_name
,
3479 /* For architectures that does not have the struct core_regset_section
3480 implemented, we use the old method. When all the architectures have
3481 the new support, the code below should be deleted. */
3485 && (regset
= gdbarch_regset_from_core_section (gdbarch
, ".reg2",
3486 sizeof (fpregs
))) != NULL
3487 && regset
->collect_regset
!= NULL
)
3488 regset
->collect_regset (regset
, regcache
, -1,
3489 &fpregs
, sizeof (fpregs
));
3491 fill_fpregset (regcache
, &fpregs
, -1);
3493 note_data
= (char *) elfcore_write_prfpreg (obfd
,
3496 &fpregs
, sizeof (fpregs
));
3502 struct linux_nat_corefile_thread_data
3508 enum target_signal stop_signal
;
3511 /* Called by gdbthread.c once per thread. Records the thread's
3512 register state for the corefile note section. */
3515 linux_nat_corefile_thread_callback (struct lwp_info
*ti
, void *data
)
3517 struct linux_nat_corefile_thread_data
*args
= data
;
3519 args
->note_data
= linux_nat_do_thread_registers (args
->obfd
,
3529 /* Fills the "to_make_corefile_note" target vector. Builds the note
3530 section for a corefile, and returns it in a malloc buffer. */
3533 linux_nat_make_corefile_notes (bfd
*obfd
, int *note_size
)
3535 struct linux_nat_corefile_thread_data thread_args
;
3536 struct cleanup
*old_chain
;
3537 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
3538 char fname
[16] = { '\0' };
3539 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
3540 char psargs
[80] = { '\0' };
3541 char *note_data
= NULL
;
3542 ptid_t current_ptid
= inferior_ptid
;
3546 if (get_exec_file (0))
3548 strncpy (fname
, strrchr (get_exec_file (0), '/') + 1, sizeof (fname
));
3549 strncpy (psargs
, get_exec_file (0), sizeof (psargs
));
3550 if (get_inferior_args ())
3553 char *psargs_end
= psargs
+ sizeof (psargs
);
3555 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3557 string_end
= memchr (psargs
, 0, sizeof (psargs
));
3558 if (string_end
!= NULL
)
3560 *string_end
++ = ' ';
3561 strncpy (string_end
, get_inferior_args (),
3562 psargs_end
- string_end
);
3565 note_data
= (char *) elfcore_write_prpsinfo (obfd
,
3567 note_size
, fname
, psargs
);
3570 /* Dump information for threads. */
3571 thread_args
.obfd
= obfd
;
3572 thread_args
.note_data
= note_data
;
3573 thread_args
.note_size
= note_size
;
3574 thread_args
.num_notes
= 0;
3575 thread_args
.stop_signal
= find_stop_signal ();
3576 iterate_over_lwps (linux_nat_corefile_thread_callback
, &thread_args
);
3577 gdb_assert (thread_args
.num_notes
!= 0);
3578 note_data
= thread_args
.note_data
;
3580 auxv_len
= target_read_alloc (¤t_target
, TARGET_OBJECT_AUXV
,
3584 note_data
= elfcore_write_note (obfd
, note_data
, note_size
,
3585 "CORE", NT_AUXV
, auxv
, auxv_len
);
3589 make_cleanup (xfree
, note_data
);
3593 /* Implement the "info proc" command. */
3596 linux_nat_info_proc_cmd (char *args
, int from_tty
)
3598 long long pid
= PIDGET (inferior_ptid
);
3601 char buffer
[MAXPATHLEN
];
3602 char fname1
[MAXPATHLEN
], fname2
[MAXPATHLEN
];
3615 /* Break up 'args' into an argv array. */
3616 argv
= gdb_buildargv (args
);
3617 make_cleanup_freeargv (argv
);
3619 while (argv
!= NULL
&& *argv
!= NULL
)
3621 if (isdigit (argv
[0][0]))
3623 pid
= strtoul (argv
[0], NULL
, 10);
3625 else if (strncmp (argv
[0], "mappings", strlen (argv
[0])) == 0)
3629 else if (strcmp (argv
[0], "status") == 0)
3633 else if (strcmp (argv
[0], "stat") == 0)
3637 else if (strcmp (argv
[0], "cmd") == 0)
3641 else if (strncmp (argv
[0], "exe", strlen (argv
[0])) == 0)
3645 else if (strcmp (argv
[0], "cwd") == 0)
3649 else if (strncmp (argv
[0], "all", strlen (argv
[0])) == 0)
3655 /* [...] (future options here) */
3660 error (_("No current process: you must name one."));
3662 sprintf (fname1
, "/proc/%lld", pid
);
3663 if (stat (fname1
, &dummy
) != 0)
3664 error (_("No /proc directory: '%s'"), fname1
);
3666 printf_filtered (_("process %lld\n"), pid
);
3667 if (cmdline_f
|| all
)
3669 sprintf (fname1
, "/proc/%lld/cmdline", pid
);
3670 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3672 struct cleanup
*cleanup
= make_cleanup_fclose (procfile
);
3673 fgets (buffer
, sizeof (buffer
), procfile
);
3674 printf_filtered ("cmdline = '%s'\n", buffer
);
3675 do_cleanups (cleanup
);
3678 warning (_("unable to open /proc file '%s'"), fname1
);
3682 sprintf (fname1
, "/proc/%lld/cwd", pid
);
3683 memset (fname2
, 0, sizeof (fname2
));
3684 if (readlink (fname1
, fname2
, sizeof (fname2
)) > 0)
3685 printf_filtered ("cwd = '%s'\n", fname2
);
3687 warning (_("unable to read link '%s'"), fname1
);
3691 sprintf (fname1
, "/proc/%lld/exe", pid
);
3692 memset (fname2
, 0, sizeof (fname2
));
3693 if (readlink (fname1
, fname2
, sizeof (fname2
)) > 0)
3694 printf_filtered ("exe = '%s'\n", fname2
);
3696 warning (_("unable to read link '%s'"), fname1
);
3698 if (mappings_f
|| all
)
3700 sprintf (fname1
, "/proc/%lld/maps", pid
);
3701 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3703 long long addr
, endaddr
, size
, offset
, inode
;
3704 char permissions
[8], device
[8], filename
[MAXPATHLEN
];
3705 struct cleanup
*cleanup
;
3707 cleanup
= make_cleanup_fclose (procfile
);
3708 printf_filtered (_("Mapped address spaces:\n\n"));
3709 if (gdbarch_addr_bit (current_gdbarch
) == 32)
3711 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3714 " Size", " Offset", "objfile");
3718 printf_filtered (" %18s %18s %10s %10s %7s\n",
3721 " Size", " Offset", "objfile");
3724 while (read_mapping (procfile
, &addr
, &endaddr
, &permissions
[0],
3725 &offset
, &device
[0], &inode
, &filename
[0]))
3727 size
= endaddr
- addr
;
3729 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3730 calls here (and possibly above) should be abstracted
3731 out into their own functions? Andrew suggests using
3732 a generic local_address_string instead to print out
3733 the addresses; that makes sense to me, too. */
3735 if (gdbarch_addr_bit (current_gdbarch
) == 32)
3737 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3738 (unsigned long) addr
, /* FIXME: pr_addr */
3739 (unsigned long) endaddr
,
3741 (unsigned int) offset
,
3742 filename
[0] ? filename
: "");
3746 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3747 (unsigned long) addr
, /* FIXME: pr_addr */
3748 (unsigned long) endaddr
,
3750 (unsigned int) offset
,
3751 filename
[0] ? filename
: "");
3755 do_cleanups (cleanup
);
3758 warning (_("unable to open /proc file '%s'"), fname1
);
3760 if (status_f
|| all
)
3762 sprintf (fname1
, "/proc/%lld/status", pid
);
3763 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3765 struct cleanup
*cleanup
= make_cleanup_fclose (procfile
);
3766 while (fgets (buffer
, sizeof (buffer
), procfile
) != NULL
)
3767 puts_filtered (buffer
);
3768 do_cleanups (cleanup
);
3771 warning (_("unable to open /proc file '%s'"), fname1
);
3775 sprintf (fname1
, "/proc/%lld/stat", pid
);
3776 if ((procfile
= fopen (fname1
, "r")) != NULL
)
3781 struct cleanup
*cleanup
= make_cleanup_fclose (procfile
);
3783 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3784 printf_filtered (_("Process: %d\n"), itmp
);
3785 if (fscanf (procfile
, "(%[^)]) ", &buffer
[0]) > 0)
3786 printf_filtered (_("Exec file: %s\n"), buffer
);
3787 if (fscanf (procfile
, "%c ", &ctmp
) > 0)
3788 printf_filtered (_("State: %c\n"), ctmp
);
3789 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3790 printf_filtered (_("Parent process: %d\n"), itmp
);
3791 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3792 printf_filtered (_("Process group: %d\n"), itmp
);
3793 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3794 printf_filtered (_("Session id: %d\n"), itmp
);
3795 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3796 printf_filtered (_("TTY: %d\n"), itmp
);
3797 if (fscanf (procfile
, "%d ", &itmp
) > 0)
3798 printf_filtered (_("TTY owner process group: %d\n"), itmp
);
3799 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3800 printf_filtered (_("Flags: 0x%lx\n"), ltmp
);
3801 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3802 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3803 (unsigned long) ltmp
);
3804 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3805 printf_filtered (_("Minor faults, children: %lu\n"),
3806 (unsigned long) ltmp
);
3807 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3808 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3809 (unsigned long) ltmp
);
3810 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3811 printf_filtered (_("Major faults, children: %lu\n"),
3812 (unsigned long) ltmp
);
3813 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3814 printf_filtered (_("utime: %ld\n"), ltmp
);
3815 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3816 printf_filtered (_("stime: %ld\n"), ltmp
);
3817 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3818 printf_filtered (_("utime, children: %ld\n"), ltmp
);
3819 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3820 printf_filtered (_("stime, children: %ld\n"), ltmp
);
3821 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3822 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3824 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3825 printf_filtered (_("'nice' value: %ld\n"), ltmp
);
3826 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3827 printf_filtered (_("jiffies until next timeout: %lu\n"),
3828 (unsigned long) ltmp
);
3829 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3830 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3831 (unsigned long) ltmp
);
3832 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3833 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3835 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3836 printf_filtered (_("Virtual memory size: %lu\n"),
3837 (unsigned long) ltmp
);
3838 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3839 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp
);
3840 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3841 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp
);
3842 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3843 printf_filtered (_("Start of text: 0x%lx\n"), ltmp
);
3844 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3845 printf_filtered (_("End of text: 0x%lx\n"), ltmp
);
3846 if (fscanf (procfile
, "%lu ", <mp
) > 0)
3847 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp
);
3848 #if 0 /* Don't know how architecture-dependent the rest is...
3849 Anyway the signal bitmap info is available from "status". */
3850 if (fscanf (procfile
, "%lu ", <mp
) > 0) /* FIXME arch? */
3851 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp
);
3852 if (fscanf (procfile
, "%lu ", <mp
) > 0) /* FIXME arch? */
3853 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp
);
3854 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3855 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp
);
3856 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3857 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp
);
3858 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3859 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp
);
3860 if (fscanf (procfile
, "%ld ", <mp
) > 0)
3861 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp
);
3862 if (fscanf (procfile
, "%lu ", <mp
) > 0) /* FIXME arch? */
3863 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp
);
3865 do_cleanups (cleanup
);
3868 warning (_("unable to open /proc file '%s'"), fname1
);
3872 /* Implement the to_xfer_partial interface for memory reads using the /proc
3873 filesystem. Because we can use a single read() call for /proc, this
3874 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3875 but it doesn't support writes. */
3878 linux_proc_xfer_partial (struct target_ops
*ops
, enum target_object object
,
3879 const char *annex
, gdb_byte
*readbuf
,
3880 const gdb_byte
*writebuf
,
3881 ULONGEST offset
, LONGEST len
)
3887 if (object
!= TARGET_OBJECT_MEMORY
|| !readbuf
)
3890 /* Don't bother for one word. */
3891 if (len
< 3 * sizeof (long))
3894 /* We could keep this file open and cache it - possibly one per
3895 thread. That requires some juggling, but is even faster. */
3896 sprintf (filename
, "/proc/%d/mem", PIDGET (inferior_ptid
));
3897 fd
= open (filename
, O_RDONLY
| O_LARGEFILE
);
3901 /* If pread64 is available, use it. It's faster if the kernel
3902 supports it (only one syscall), and it's 64-bit safe even on
3903 32-bit platforms (for instance, SPARC debugging a SPARC64
3906 if (pread64 (fd
, readbuf
, len
, offset
) != len
)
3908 if (lseek (fd
, offset
, SEEK_SET
) == -1 || read (fd
, readbuf
, len
) != len
)
3918 /* Parse LINE as a signal set and add its set bits to SIGS. */
3921 add_line_to_sigset (const char *line
, sigset_t
*sigs
)
3923 int len
= strlen (line
) - 1;
3927 if (line
[len
] != '\n')
3928 error (_("Could not parse signal set: %s"), line
);
3936 if (*p
>= '0' && *p
<= '9')
3938 else if (*p
>= 'a' && *p
<= 'f')
3939 digit
= *p
- 'a' + 10;
3941 error (_("Could not parse signal set: %s"), line
);
3946 sigaddset (sigs
, signum
+ 1);
3948 sigaddset (sigs
, signum
+ 2);
3950 sigaddset (sigs
, signum
+ 3);
3952 sigaddset (sigs
, signum
+ 4);
3958 /* Find process PID's pending signals from /proc/pid/status and set
3962 linux_proc_pending_signals (int pid
, sigset_t
*pending
, sigset_t
*blocked
, sigset_t
*ignored
)
3965 char buffer
[MAXPATHLEN
], fname
[MAXPATHLEN
];
3967 struct cleanup
*cleanup
;
3969 sigemptyset (pending
);
3970 sigemptyset (blocked
);
3971 sigemptyset (ignored
);
3972 sprintf (fname
, "/proc/%d/status", pid
);
3973 procfile
= fopen (fname
, "r");
3974 if (procfile
== NULL
)
3975 error (_("Could not open %s"), fname
);
3976 cleanup
= make_cleanup_fclose (procfile
);
3978 while (fgets (buffer
, MAXPATHLEN
, procfile
) != NULL
)
3980 /* Normal queued signals are on the SigPnd line in the status
3981 file. However, 2.6 kernels also have a "shared" pending
3982 queue for delivering signals to a thread group, so check for
3985 Unfortunately some Red Hat kernels include the shared pending
3986 queue but not the ShdPnd status field. */
3988 if (strncmp (buffer
, "SigPnd:\t", 8) == 0)
3989 add_line_to_sigset (buffer
+ 8, pending
);
3990 else if (strncmp (buffer
, "ShdPnd:\t", 8) == 0)
3991 add_line_to_sigset (buffer
+ 8, pending
);
3992 else if (strncmp (buffer
, "SigBlk:\t", 8) == 0)
3993 add_line_to_sigset (buffer
+ 8, blocked
);
3994 else if (strncmp (buffer
, "SigIgn:\t", 8) == 0)
3995 add_line_to_sigset (buffer
+ 8, ignored
);
3998 do_cleanups (cleanup
);
4002 linux_nat_xfer_osdata (struct target_ops
*ops
, enum target_object object
,
4003 const char *annex
, gdb_byte
*readbuf
,
4004 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
4006 /* We make the process list snapshot when the object starts to be
4008 static const char *buf
;
4009 static LONGEST len_avail
= -1;
4010 static struct obstack obstack
;
4014 gdb_assert (object
== TARGET_OBJECT_OSDATA
);
4016 if (strcmp (annex
, "processes") != 0)
4019 gdb_assert (readbuf
&& !writebuf
);
4023 if (len_avail
!= -1 && len_avail
!= 0)
4024 obstack_free (&obstack
, NULL
);
4027 obstack_init (&obstack
);
4028 obstack_grow_str (&obstack
, "<osdata type=\"processes\">\n");
4030 dirp
= opendir ("/proc");
4034 while ((dp
= readdir (dirp
)) != NULL
)
4036 struct stat statbuf
;
4037 char procentry
[sizeof ("/proc/4294967295")];
4039 if (!isdigit (dp
->d_name
[0])
4040 || strlen (dp
->d_name
) > sizeof ("4294967295") - 1)
4043 sprintf (procentry
, "/proc/%s", dp
->d_name
);
4044 if (stat (procentry
, &statbuf
) == 0
4045 && S_ISDIR (statbuf
.st_mode
))
4049 char cmd
[MAXPATHLEN
+ 1];
4050 struct passwd
*entry
;
4052 pathname
= xstrprintf ("/proc/%s/cmdline", dp
->d_name
);
4053 entry
= getpwuid (statbuf
.st_uid
);
4055 if ((f
= fopen (pathname
, "r")) != NULL
)
4057 size_t len
= fread (cmd
, 1, sizeof (cmd
) - 1, f
);
4061 for (i
= 0; i
< len
; i
++)
4066 obstack_xml_printf (
4069 "<column name=\"pid\">%s</column>"
4070 "<column name=\"user\">%s</column>"
4071 "<column name=\"command\">%s</column>"
4074 entry
? entry
->pw_name
: "?",
4087 obstack_grow_str0 (&obstack
, "</osdata>\n");
4088 buf
= obstack_finish (&obstack
);
4089 len_avail
= strlen (buf
);
4092 if (offset
>= len_avail
)
4094 /* Done. Get rid of the obstack. */
4095 obstack_free (&obstack
, NULL
);
4101 if (len
> len_avail
- offset
)
4102 len
= len_avail
- offset
;
4103 memcpy (readbuf
, buf
+ offset
, len
);
4109 linux_xfer_partial (struct target_ops
*ops
, enum target_object object
,
4110 const char *annex
, gdb_byte
*readbuf
,
4111 const gdb_byte
*writebuf
, ULONGEST offset
, LONGEST len
)
4115 if (object
== TARGET_OBJECT_AUXV
)
4116 return procfs_xfer_auxv (ops
, object
, annex
, readbuf
, writebuf
,
4119 if (object
== TARGET_OBJECT_OSDATA
)
4120 return linux_nat_xfer_osdata (ops
, object
, annex
, readbuf
, writebuf
,
4123 xfer
= linux_proc_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
4128 return super_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
4132 /* Create a prototype generic GNU/Linux target. The client can override
4133 it with local methods. */
4136 linux_target_install_ops (struct target_ops
*t
)
4138 t
->to_insert_fork_catchpoint
= linux_child_insert_fork_catchpoint
;
4139 t
->to_insert_vfork_catchpoint
= linux_child_insert_vfork_catchpoint
;
4140 t
->to_insert_exec_catchpoint
= linux_child_insert_exec_catchpoint
;
4141 t
->to_pid_to_exec_file
= linux_child_pid_to_exec_file
;
4142 t
->to_post_startup_inferior
= linux_child_post_startup_inferior
;
4143 t
->to_post_attach
= linux_child_post_attach
;
4144 t
->to_follow_fork
= linux_child_follow_fork
;
4145 t
->to_find_memory_regions
= linux_nat_find_memory_regions
;
4146 t
->to_make_corefile_notes
= linux_nat_make_corefile_notes
;
4148 super_xfer_partial
= t
->to_xfer_partial
;
4149 t
->to_xfer_partial
= linux_xfer_partial
;
4155 struct target_ops
*t
;
4157 t
= inf_ptrace_target ();
4158 linux_target_install_ops (t
);
4164 linux_trad_target (CORE_ADDR (*register_u_offset
)(struct gdbarch
*, int, int))
4166 struct target_ops
*t
;
4168 t
= inf_ptrace_trad_target (register_u_offset
);
4169 linux_target_install_ops (t
);
4174 /* target_is_async_p implementation. */
4177 linux_nat_is_async_p (void)
4179 /* NOTE: palves 2008-03-21: We're only async when the user requests
4180 it explicitly with the "maintenance set target-async" command.
4181 Someday, linux will always be async. */
4182 if (!target_async_permitted
)
4188 /* target_can_async_p implementation. */
4191 linux_nat_can_async_p (void)
4193 /* NOTE: palves 2008-03-21: We're only async when the user requests
4194 it explicitly with the "maintenance set target-async" command.
4195 Someday, linux will always be async. */
4196 if (!target_async_permitted
)
4199 /* See target.h/target_async_mask. */
4200 return linux_nat_async_mask_value
;
4204 linux_nat_supports_non_stop (void)
4209 /* target_async_mask implementation. */
4212 linux_nat_async_mask (int mask
)
4215 current_state
= linux_nat_async_mask_value
;
4217 if (current_state
!= mask
)
4221 linux_nat_async (NULL
, 0);
4222 linux_nat_async_mask_value
= mask
;
4226 linux_nat_async_mask_value
= mask
;
4227 linux_nat_async (inferior_event_handler
, 0);
4231 return current_state
;
4234 /* Pop an event from the event pipe. */
4237 linux_nat_event_pipe_pop (int* ptr_status
, int* ptr_options
)
4239 struct waitpid_result event
= {0};
4244 ret
= read (linux_nat_event_pipe
[0], &event
, sizeof (event
));
4246 while (ret
== -1 && errno
== EINTR
);
4248 gdb_assert (ret
== sizeof (event
));
4250 *ptr_status
= event
.status
;
4251 *ptr_options
= event
.options
;
4253 linux_nat_num_queued_events
--;
4258 /* Push an event into the event pipe. */
4261 linux_nat_event_pipe_push (int pid
, int status
, int options
)
4264 struct waitpid_result event
= {0};
4266 event
.status
= status
;
4267 event
.options
= options
;
4271 ret
= write (linux_nat_event_pipe
[1], &event
, sizeof (event
));
4272 gdb_assert ((ret
== -1 && errno
== EINTR
) || ret
== sizeof (event
));
4273 } while (ret
== -1 && errno
== EINTR
);
4275 linux_nat_num_queued_events
++;
4279 get_pending_events (void)
4281 int status
, options
, pid
;
4283 if (!target_async_permitted
4284 || linux_nat_async_events_state
!= sigchld_async
)
4285 internal_error (__FILE__
, __LINE__
,
4286 "get_pending_events called with async masked");
4291 options
= __WCLONE
| WNOHANG
;
4295 pid
= waitpid (-1, &status
, options
);
4297 while (pid
== -1 && errno
== EINTR
);
4304 pid
= waitpid (-1, &status
, options
);
4306 while (pid
== -1 && errno
== EINTR
);
4310 /* No more children reporting events. */
4313 if (debug_linux_nat_async
)
4314 fprintf_unfiltered (gdb_stdlog
, "\
4315 get_pending_events: pid(%d), status(%x), options (%x)\n",
4316 pid
, status
, options
);
4318 linux_nat_event_pipe_push (pid
, status
, options
);
4321 if (debug_linux_nat_async
)
4322 fprintf_unfiltered (gdb_stdlog
, "\
4323 get_pending_events: linux_nat_num_queued_events(%d)\n",
4324 linux_nat_num_queued_events
);
4327 /* SIGCHLD handler for async mode. */
4330 async_sigchld_handler (int signo
)
4332 if (debug_linux_nat_async
)
4333 fprintf_unfiltered (gdb_stdlog
, "async_sigchld_handler\n");
4335 get_pending_events ();
4338 /* Set SIGCHLD handling state to STATE. Returns previous state. */
4340 static enum sigchld_state
4341 linux_nat_async_events (enum sigchld_state state
)
4343 enum sigchld_state current_state
= linux_nat_async_events_state
;
4345 if (debug_linux_nat_async
)
4346 fprintf_unfiltered (gdb_stdlog
,
4347 "LNAE: state(%d): linux_nat_async_events_state(%d), "
4348 "linux_nat_num_queued_events(%d)\n",
4349 state
, linux_nat_async_events_state
,
4350 linux_nat_num_queued_events
);
4352 if (current_state
!= state
)
4355 sigemptyset (&mask
);
4356 sigaddset (&mask
, SIGCHLD
);
4358 /* Always block before changing state. */
4359 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
4361 /* Set new state. */
4362 linux_nat_async_events_state
= state
;
4368 /* Block target events. */
4369 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
4370 sigaction (SIGCHLD
, &sync_sigchld_action
, NULL
);
4371 /* Get events out of queue, and make them available to
4372 queued_waitpid / my_waitpid. */
4373 pipe_to_local_event_queue ();
4378 /* Unblock target events for async mode. */
4380 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
4382 /* Put events we already waited on, in the pipe first, so
4384 local_event_queue_to_pipe ();
4385 /* While in masked async, we may have not collected all
4386 the pending events. Get them out now. */
4387 get_pending_events ();
4390 sigaction (SIGCHLD
, &async_sigchld_action
, NULL
);
4391 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
4394 case sigchld_default
:
4396 /* SIGCHLD default mode. */
4397 sigaction (SIGCHLD
, &sigchld_default_action
, NULL
);
4399 /* Get events out of queue, and make them available to
4400 queued_waitpid / my_waitpid. */
4401 pipe_to_local_event_queue ();
4403 /* Unblock SIGCHLD. */
4404 sigprocmask (SIG_UNBLOCK
, &mask
, NULL
);
4410 return current_state
;
4413 static int async_terminal_is_ours
= 1;
4415 /* target_terminal_inferior implementation. */
4418 linux_nat_terminal_inferior (void)
4420 if (!target_is_async_p ())
4422 /* Async mode is disabled. */
4423 terminal_inferior ();
4427 /* GDB should never give the terminal to the inferior, if the
4428 inferior is running in the background (run&, continue&, etc.).
4429 This check can be removed when the common code is fixed. */
4430 if (!sync_execution
)
4433 terminal_inferior ();
4435 if (!async_terminal_is_ours
)
4438 delete_file_handler (input_fd
);
4439 async_terminal_is_ours
= 0;
4443 /* target_terminal_ours implementation. */
4446 linux_nat_terminal_ours (void)
4448 if (!target_is_async_p ())
4450 /* Async mode is disabled. */
4455 /* GDB should never give the terminal to the inferior if the
4456 inferior is running in the background (run&, continue&, etc.),
4457 but claiming it sure should. */
4460 if (!sync_execution
)
4463 if (async_terminal_is_ours
)
4466 clear_sigint_trap ();
4467 add_file_handler (input_fd
, stdin_event_handler
, 0);
4468 async_terminal_is_ours
= 1;
4471 static void (*async_client_callback
) (enum inferior_event_type event_type
,
4473 static void *async_client_context
;
4476 linux_nat_async_file_handler (int error
, gdb_client_data client_data
)
4478 async_client_callback (INF_REG_EVENT
, async_client_context
);
4481 /* target_async implementation. */
4484 linux_nat_async (void (*callback
) (enum inferior_event_type event_type
,
4485 void *context
), void *context
)
4487 if (linux_nat_async_mask_value
== 0 || !target_async_permitted
)
4488 internal_error (__FILE__
, __LINE__
,
4489 "Calling target_async when async is masked");
4491 if (callback
!= NULL
)
4493 async_client_callback
= callback
;
4494 async_client_context
= context
;
4495 add_file_handler (linux_nat_event_pipe
[0],
4496 linux_nat_async_file_handler
, NULL
);
4498 linux_nat_async_events (sigchld_async
);
4502 async_client_callback
= callback
;
4503 async_client_context
= context
;
4505 linux_nat_async_events (sigchld_sync
);
4506 delete_file_handler (linux_nat_event_pipe
[0]);
4511 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
4515 linux_nat_stop_lwp (struct lwp_info
*lwp
, void *data
)
4517 ptid_t ptid
= * (ptid_t
*) data
;
4519 if (ptid_equal (lwp
->ptid
, ptid
)
4520 || ptid_equal (minus_one_ptid
, ptid
)
4521 || (ptid_is_pid (ptid
)
4522 && ptid_get_pid (ptid
) == ptid_get_pid (lwp
->ptid
)))
4528 if (debug_linux_nat
)
4529 fprintf_unfiltered (gdb_stdlog
,
4530 "LNSL: running -> suspending %s\n",
4531 target_pid_to_str (lwp
->ptid
));
4533 /* Peek once, to check if we've already waited for this
4535 pid
= queued_waitpid_1 (ptid_get_lwp (lwp
->ptid
), &status
,
4536 lwp
->cloned
? __WCLONE
: 0, 1 /* peek */);
4540 ptid_t ptid
= lwp
->ptid
;
4542 stop_callback (lwp
, NULL
);
4543 stop_wait_callback (lwp
, NULL
);
4545 /* If the lwp exits while we try to stop it, there's
4546 nothing else to do. */
4547 lwp
= find_lwp_pid (ptid
);
4551 pid
= queued_waitpid_1 (ptid_get_lwp (lwp
->ptid
), &status
,
4552 lwp
->cloned
? __WCLONE
: 0,
4556 /* If we didn't collect any signal other than SIGSTOP while
4557 stopping the LWP, push a SIGNAL_0 event. In either case,
4558 the event-loop will end up calling target_wait which will
4561 push_waitpid (ptid_get_lwp (lwp
->ptid
), W_STOPCODE (0),
4562 lwp
->cloned
? __WCLONE
: 0);
4566 /* Already known to be stopped; do nothing. */
4568 if (debug_linux_nat
)
4570 if (find_thread_pid (lwp
->ptid
)->stop_requested
)
4571 fprintf_unfiltered (gdb_stdlog
, "\
4572 LNSL: already stopped/stop_requested %s\n",
4573 target_pid_to_str (lwp
->ptid
));
4575 fprintf_unfiltered (gdb_stdlog
, "\
4576 LNSL: already stopped/no stop_requested yet %s\n",
4577 target_pid_to_str (lwp
->ptid
));
4585 linux_nat_stop (ptid_t ptid
)
4589 linux_nat_async_events (sigchld_sync
);
4590 iterate_over_lwps (linux_nat_stop_lwp
, &ptid
);
4591 target_async (inferior_event_handler
, 0);
4594 linux_ops
->to_stop (ptid
);
4598 linux_nat_add_target (struct target_ops
*t
)
4600 /* Save the provided single-threaded target. We save this in a separate
4601 variable because another target we've inherited from (e.g. inf-ptrace)
4602 may have saved a pointer to T; we want to use it for the final
4603 process stratum target. */
4604 linux_ops_saved
= *t
;
4605 linux_ops
= &linux_ops_saved
;
4607 /* Override some methods for multithreading. */
4608 t
->to_create_inferior
= linux_nat_create_inferior
;
4609 t
->to_attach
= linux_nat_attach
;
4610 t
->to_detach
= linux_nat_detach
;
4611 t
->to_resume
= linux_nat_resume
;
4612 t
->to_wait
= linux_nat_wait
;
4613 t
->to_xfer_partial
= linux_nat_xfer_partial
;
4614 t
->to_kill
= linux_nat_kill
;
4615 t
->to_mourn_inferior
= linux_nat_mourn_inferior
;
4616 t
->to_thread_alive
= linux_nat_thread_alive
;
4617 t
->to_pid_to_str
= linux_nat_pid_to_str
;
4618 t
->to_has_thread_control
= tc_schedlock
;
4620 t
->to_can_async_p
= linux_nat_can_async_p
;
4621 t
->to_is_async_p
= linux_nat_is_async_p
;
4622 t
->to_supports_non_stop
= linux_nat_supports_non_stop
;
4623 t
->to_async
= linux_nat_async
;
4624 t
->to_async_mask
= linux_nat_async_mask
;
4625 t
->to_terminal_inferior
= linux_nat_terminal_inferior
;
4626 t
->to_terminal_ours
= linux_nat_terminal_ours
;
4628 /* Methods for non-stop support. */
4629 t
->to_stop
= linux_nat_stop
;
4631 /* We don't change the stratum; this target will sit at
4632 process_stratum and thread_db will set at thread_stratum. This
4633 is a little strange, since this is a multi-threaded-capable
4634 target, but we want to be on the stack below thread_db, and we
4635 also want to be used for single-threaded processes. */
4639 /* TODO: Eliminate this and have libthread_db use
4640 find_target_beneath. */
4644 /* Register a method to call whenever a new thread is attached. */
4646 linux_nat_set_new_thread (struct target_ops
*t
, void (*new_thread
) (ptid_t
))
4648 /* Save the pointer. We only support a single registered instance
4649 of the GNU/Linux native target, so we do not need to map this to
4651 linux_nat_new_thread
= new_thread
;
4654 /* Return the saved siginfo associated with PTID. */
4656 linux_nat_get_siginfo (ptid_t ptid
)
4658 struct lwp_info
*lp
= find_lwp_pid (ptid
);
4660 gdb_assert (lp
!= NULL
);
4662 return &lp
->siginfo
;
4665 /* Enable/Disable async mode. */
4668 linux_nat_setup_async (void)
4670 if (pipe (linux_nat_event_pipe
) == -1)
4671 internal_error (__FILE__
, __LINE__
,
4672 "creating event pipe failed.");
4673 fcntl (linux_nat_event_pipe
[0], F_SETFL
, O_NONBLOCK
);
4674 fcntl (linux_nat_event_pipe
[1], F_SETFL
, O_NONBLOCK
);
4678 _initialize_linux_nat (void)
4682 add_info ("proc", linux_nat_info_proc_cmd
, _("\
4683 Show /proc process information about any running process.\n\
4684 Specify any process id, or use the program being debugged by default.\n\
4685 Specify any of the following keywords for detailed info:\n\
4686 mappings -- list of mapped memory regions.\n\
4687 stat -- list a bunch of random process info.\n\
4688 status -- list a different bunch of random process info.\n\
4689 all -- list all available /proc info."));
4691 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance
,
4692 &debug_linux_nat
, _("\
4693 Set debugging of GNU/Linux lwp module."), _("\
4694 Show debugging of GNU/Linux lwp module."), _("\
4695 Enables printf debugging output."),
4697 show_debug_linux_nat
,
4698 &setdebuglist
, &showdebuglist
);
4700 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance
,
4701 &debug_linux_nat_async
, _("\
4702 Set debugging of GNU/Linux async lwp module."), _("\
4703 Show debugging of GNU/Linux async lwp module."), _("\
4704 Enables printf debugging output."),
4706 show_debug_linux_nat_async
,
4707 &setdebuglist
, &showdebuglist
);
4709 /* Get the default SIGCHLD action. Used while forking an inferior
4710 (see linux_nat_create_inferior/linux_nat_async_events). */
4711 sigaction (SIGCHLD
, NULL
, &sigchld_default_action
);
4713 /* Block SIGCHLD by default. Doing this early prevents it getting
4714 unblocked if an exception is thrown due to an error while the
4715 inferior is starting (sigsetjmp/siglongjmp). */
4716 sigemptyset (&mask
);
4717 sigaddset (&mask
, SIGCHLD
);
4718 sigprocmask (SIG_BLOCK
, &mask
, NULL
);
4720 /* Save this mask as the default. */
4721 sigprocmask (SIG_SETMASK
, NULL
, &normal_mask
);
4723 /* The synchronous SIGCHLD handler. */
4724 sync_sigchld_action
.sa_handler
= sigchld_handler
;
4725 sigemptyset (&sync_sigchld_action
.sa_mask
);
4726 sync_sigchld_action
.sa_flags
= SA_RESTART
;
4728 /* Make it the default. */
4729 sigaction (SIGCHLD
, &sync_sigchld_action
, NULL
);
4731 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4732 sigprocmask (SIG_SETMASK
, NULL
, &suspend_mask
);
4733 sigdelset (&suspend_mask
, SIGCHLD
);
4735 /* SIGCHLD handler for async mode. */
4736 async_sigchld_action
.sa_handler
= async_sigchld_handler
;
4737 sigemptyset (&async_sigchld_action
.sa_mask
);
4738 async_sigchld_action
.sa_flags
= SA_RESTART
;
4740 linux_nat_setup_async ();
4742 add_setshow_boolean_cmd ("disable-randomization", class_support
,
4743 &disable_randomization
, _("\
4744 Set disabling of debuggee's virtual address space randomization."), _("\
4745 Show disabling of debuggee's virtual address space randomization."), _("\
4746 When this mode is on (which is the default), randomization of the virtual\n\
4747 address space is disabled. Standalone programs run with the randomization\n\
4748 enabled by default on some platforms."),
4749 &set_disable_randomization
,
4750 &show_disable_randomization
,
4751 &setlist
, &showlist
);
4755 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4756 the GNU/Linux Threads library and therefore doesn't really belong
4759 /* Read variable NAME in the target and return its value if found.
4760 Otherwise return zero. It is assumed that the type of the variable
4764 get_signo (const char *name
)
4766 struct minimal_symbol
*ms
;
4769 ms
= lookup_minimal_symbol (name
, NULL
, NULL
);
4773 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms
), (gdb_byte
*) &signo
,
4774 sizeof (signo
)) != 0)
4780 /* Return the set of signals used by the threads library in *SET. */
4783 lin_thread_get_thread_signals (sigset_t
*set
)
4785 struct sigaction action
;
4786 int restart
, cancel
;
4787 sigset_t blocked_mask
;
4789 sigemptyset (&blocked_mask
);
4792 restart
= get_signo ("__pthread_sig_restart");
4793 cancel
= get_signo ("__pthread_sig_cancel");
4795 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4796 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4797 not provide any way for the debugger to query the signal numbers -
4798 fortunately they don't change! */
4801 restart
= __SIGRTMIN
;
4804 cancel
= __SIGRTMIN
+ 1;
4806 sigaddset (set
, restart
);
4807 sigaddset (set
, cancel
);
4809 /* The GNU/Linux Threads library makes terminating threads send a
4810 special "cancel" signal instead of SIGCHLD. Make sure we catch
4811 those (to prevent them from terminating GDB itself, which is
4812 likely to be their default action) and treat them the same way as
4815 action
.sa_handler
= sigchld_handler
;
4816 sigemptyset (&action
.sa_mask
);
4817 action
.sa_flags
= SA_RESTART
;
4818 sigaction (cancel
, &action
, NULL
);
4820 /* We block the "cancel" signal throughout this code ... */
4821 sigaddset (&blocked_mask
, cancel
);
4822 sigprocmask (SIG_BLOCK
, &blocked_mask
, NULL
);
4824 /* ... except during a sigsuspend. */
4825 sigdelset (&suspend_mask
, cancel
);