* inf-ptrace.c: Reorder functions.
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place - Suite 330,
20 Boston, MA 02111-1307, USA. */
21
22 #include "defs.h"
23 #include "inferior.h"
24 #include "target.h"
25 #include "gdb_string.h"
26 #include "gdb_wait.h"
27 #include "gdb_assert.h"
28 #ifdef HAVE_TKILL_SYSCALL
29 #include <unistd.h>
30 #include <sys/syscall.h>
31 #endif
32 #include <sys/ptrace.h>
33 #include "linux-nat.h"
34 #include "gdbthread.h"
35 #include "gdbcmd.h"
36 #include "regcache.h"
37 #include <sys/param.h> /* for MAXPATHLEN */
38 #include <sys/procfs.h> /* for elf_gregset etc. */
39 #include "elf-bfd.h" /* for elfcore_write_* */
40 #include "gregset.h" /* for gregset */
41 #include "gdbcore.h" /* for get_exec_file */
42 #include <ctype.h> /* for isdigit */
43 #include "gdbthread.h" /* for struct thread_info etc. */
44 #include "gdb_stat.h" /* for struct stat */
45 #include <fcntl.h> /* for O_RDONLY */
46
47 #ifndef O_LARGEFILE
48 #define O_LARGEFILE 0
49 #endif
50
51 /* If the system headers did not provide the constants, hard-code the normal
52 values. */
53 #ifndef PTRACE_EVENT_FORK
54
55 #define PTRACE_SETOPTIONS 0x4200
56 #define PTRACE_GETEVENTMSG 0x4201
57
58 /* options set using PTRACE_SETOPTIONS */
59 #define PTRACE_O_TRACESYSGOOD 0x00000001
60 #define PTRACE_O_TRACEFORK 0x00000002
61 #define PTRACE_O_TRACEVFORK 0x00000004
62 #define PTRACE_O_TRACECLONE 0x00000008
63 #define PTRACE_O_TRACEEXEC 0x00000010
64 #define PTRACE_O_TRACEVFORKDONE 0x00000020
65 #define PTRACE_O_TRACEEXIT 0x00000040
66
67 /* Wait extended result codes for the above trace options. */
68 #define PTRACE_EVENT_FORK 1
69 #define PTRACE_EVENT_VFORK 2
70 #define PTRACE_EVENT_CLONE 3
71 #define PTRACE_EVENT_EXEC 4
72 #define PTRACE_EVENT_VFORK_DONE 5
73 #define PTRACE_EVENT_EXIT 6
74
75 #endif /* PTRACE_EVENT_FORK */
76
77 /* We can't always assume that this flag is available, but all systems
78 with the ptrace event handlers also have __WALL, so it's safe to use
79 here. */
80 #ifndef __WALL
81 #define __WALL 0x40000000 /* Wait for any child. */
82 #endif
83
84 static int debug_linux_nat;
85 static void
86 show_debug_linux_nat (struct ui_file *file, int from_tty,
87 struct cmd_list_element *c, const char *value)
88 {
89 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
90 value);
91 }
92
93 static int linux_parent_pid;
94
95 struct simple_pid_list
96 {
97 int pid;
98 struct simple_pid_list *next;
99 };
100 struct simple_pid_list *stopped_pids;
101
102 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
103 can not be used, 1 if it can. */
104
105 static int linux_supports_tracefork_flag = -1;
106
107 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
108 PTRACE_O_TRACEVFORKDONE. */
109
110 static int linux_supports_tracevforkdone_flag = -1;
111
112 \f
113 /* Trivial list manipulation functions to keep track of a list of
114 new stopped processes. */
115 static void
116 add_to_pid_list (struct simple_pid_list **listp, int pid)
117 {
118 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
119 new_pid->pid = pid;
120 new_pid->next = *listp;
121 *listp = new_pid;
122 }
123
124 static int
125 pull_pid_from_list (struct simple_pid_list **listp, int pid)
126 {
127 struct simple_pid_list **p;
128
129 for (p = listp; *p != NULL; p = &(*p)->next)
130 if ((*p)->pid == pid)
131 {
132 struct simple_pid_list *next = (*p)->next;
133 xfree (*p);
134 *p = next;
135 return 1;
136 }
137 return 0;
138 }
139
140 void
141 linux_record_stopped_pid (int pid)
142 {
143 add_to_pid_list (&stopped_pids, pid);
144 }
145
146 \f
147 /* A helper function for linux_test_for_tracefork, called after fork (). */
148
149 static void
150 linux_tracefork_child (void)
151 {
152 int ret;
153
154 ptrace (PTRACE_TRACEME, 0, 0, 0);
155 kill (getpid (), SIGSTOP);
156 fork ();
157 _exit (0);
158 }
159
160 /* Wrapper function for waitpid which handles EINTR. */
161
162 static int
163 my_waitpid (int pid, int *status, int flags)
164 {
165 int ret;
166 do
167 {
168 ret = waitpid (pid, status, flags);
169 }
170 while (ret == -1 && errno == EINTR);
171
172 return ret;
173 }
174
175 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
176
177 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
178 we know that the feature is not available. This may change the tracing
179 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
180
181 However, if it succeeds, we don't know for sure that the feature is
182 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
183 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
184 fork tracing, and let it fork. If the process exits, we assume that we
185 can't use TRACEFORK; if we get the fork notification, and we can extract
186 the new child's PID, then we assume that we can. */
187
188 static void
189 linux_test_for_tracefork (int original_pid)
190 {
191 int child_pid, ret, status;
192 long second_pid;
193
194 linux_supports_tracefork_flag = 0;
195 linux_supports_tracevforkdone_flag = 0;
196
197 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
198 if (ret != 0)
199 return;
200
201 child_pid = fork ();
202 if (child_pid == -1)
203 perror_with_name (("fork"));
204
205 if (child_pid == 0)
206 linux_tracefork_child ();
207
208 ret = my_waitpid (child_pid, &status, 0);
209 if (ret == -1)
210 perror_with_name (("waitpid"));
211 else if (ret != child_pid)
212 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
213 if (! WIFSTOPPED (status))
214 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
215
216 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
217 if (ret != 0)
218 {
219 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
220 if (ret != 0)
221 {
222 warning (_("linux_test_for_tracefork: failed to kill child"));
223 return;
224 }
225
226 ret = my_waitpid (child_pid, &status, 0);
227 if (ret != child_pid)
228 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
229 else if (!WIFSIGNALED (status))
230 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
231 "killed child"), status);
232
233 return;
234 }
235
236 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
237 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
238 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
239 linux_supports_tracevforkdone_flag = (ret == 0);
240
241 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
242 if (ret != 0)
243 warning (_("linux_test_for_tracefork: failed to resume child"));
244
245 ret = my_waitpid (child_pid, &status, 0);
246
247 if (ret == child_pid && WIFSTOPPED (status)
248 && status >> 16 == PTRACE_EVENT_FORK)
249 {
250 second_pid = 0;
251 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
252 if (ret == 0 && second_pid != 0)
253 {
254 int second_status;
255
256 linux_supports_tracefork_flag = 1;
257 my_waitpid (second_pid, &second_status, 0);
258 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
259 if (ret != 0)
260 warning (_("linux_test_for_tracefork: failed to kill second child"));
261 }
262 }
263 else
264 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
265 "(%d, status 0x%x)"), ret, status);
266
267 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
268 if (ret != 0)
269 warning (_("linux_test_for_tracefork: failed to kill child"));
270 my_waitpid (child_pid, &status, 0);
271 }
272
273 /* Return non-zero iff we have tracefork functionality available.
274 This function also sets linux_supports_tracefork_flag. */
275
276 static int
277 linux_supports_tracefork (int pid)
278 {
279 if (linux_supports_tracefork_flag == -1)
280 linux_test_for_tracefork (pid);
281 return linux_supports_tracefork_flag;
282 }
283
284 static int
285 linux_supports_tracevforkdone (int pid)
286 {
287 if (linux_supports_tracefork_flag == -1)
288 linux_test_for_tracefork (pid);
289 return linux_supports_tracevforkdone_flag;
290 }
291
292 \f
293 void
294 linux_enable_event_reporting (ptid_t ptid)
295 {
296 int pid = ptid_get_lwp (ptid);
297 int options;
298
299 if (pid == 0)
300 pid = ptid_get_pid (ptid);
301
302 if (! linux_supports_tracefork (pid))
303 return;
304
305 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
306 | PTRACE_O_TRACECLONE;
307 if (linux_supports_tracevforkdone (pid))
308 options |= PTRACE_O_TRACEVFORKDONE;
309
310 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
311 read-only process state. */
312
313 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
314 }
315
316 void
317 child_post_attach (int pid)
318 {
319 linux_enable_event_reporting (pid_to_ptid (pid));
320 }
321
322 void
323 linux_child_post_startup_inferior (ptid_t ptid)
324 {
325 linux_enable_event_reporting (ptid);
326 }
327
328 #ifndef LINUX_CHILD_POST_STARTUP_INFERIOR
329 void
330 child_post_startup_inferior (ptid_t ptid)
331 {
332 linux_child_post_startup_inferior (ptid);
333 }
334 #endif
335
336 int
337 child_follow_fork (int follow_child)
338 {
339 ptid_t last_ptid;
340 struct target_waitstatus last_status;
341 int has_vforked;
342 int parent_pid, child_pid;
343
344 get_last_target_status (&last_ptid, &last_status);
345 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
346 parent_pid = ptid_get_lwp (last_ptid);
347 if (parent_pid == 0)
348 parent_pid = ptid_get_pid (last_ptid);
349 child_pid = last_status.value.related_pid;
350
351 if (! follow_child)
352 {
353 /* We're already attached to the parent, by default. */
354
355 /* Before detaching from the child, remove all breakpoints from
356 it. (This won't actually modify the breakpoint list, but will
357 physically remove the breakpoints from the child.) */
358 /* If we vforked this will remove the breakpoints from the parent
359 also, but they'll be reinserted below. */
360 detach_breakpoints (child_pid);
361
362 if (debug_linux_nat)
363 {
364 target_terminal_ours ();
365 fprintf_unfiltered (gdb_stdlog,
366 "Detaching after fork from child process %d.\n",
367 child_pid);
368 }
369
370 ptrace (PTRACE_DETACH, child_pid, 0, 0);
371
372 if (has_vforked)
373 {
374 gdb_assert (linux_supports_tracefork_flag >= 0);
375 if (linux_supports_tracevforkdone (0))
376 {
377 int status;
378
379 ptrace (PTRACE_CONT, parent_pid, 0, 0);
380 my_waitpid (parent_pid, &status, __WALL);
381 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
382 warning (_("Unexpected waitpid result %06x when waiting for "
383 "vfork-done"), status);
384 }
385 else
386 {
387 /* We can't insert breakpoints until the child has
388 finished with the shared memory region. We need to
389 wait until that happens. Ideal would be to just
390 call:
391 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
392 - waitpid (parent_pid, &status, __WALL);
393 However, most architectures can't handle a syscall
394 being traced on the way out if it wasn't traced on
395 the way in.
396
397 We might also think to loop, continuing the child
398 until it exits or gets a SIGTRAP. One problem is
399 that the child might call ptrace with PTRACE_TRACEME.
400
401 There's no simple and reliable way to figure out when
402 the vforked child will be done with its copy of the
403 shared memory. We could step it out of the syscall,
404 two instructions, let it go, and then single-step the
405 parent once. When we have hardware single-step, this
406 would work; with software single-step it could still
407 be made to work but we'd have to be able to insert
408 single-step breakpoints in the child, and we'd have
409 to insert -just- the single-step breakpoint in the
410 parent. Very awkward.
411
412 In the end, the best we can do is to make sure it
413 runs for a little while. Hopefully it will be out of
414 range of any breakpoints we reinsert. Usually this
415 is only the single-step breakpoint at vfork's return
416 point. */
417
418 usleep (10000);
419 }
420
421 /* Since we vforked, breakpoints were removed in the parent
422 too. Put them back. */
423 reattach_breakpoints (parent_pid);
424 }
425 }
426 else
427 {
428 char child_pid_spelling[40];
429
430 /* Needed to keep the breakpoint lists in sync. */
431 if (! has_vforked)
432 detach_breakpoints (child_pid);
433
434 /* Before detaching from the parent, remove all breakpoints from it. */
435 remove_breakpoints ();
436
437 if (debug_linux_nat)
438 {
439 target_terminal_ours ();
440 fprintf_unfiltered (gdb_stdlog,
441 "Attaching after fork to child process %d.\n",
442 child_pid);
443 }
444
445 /* If we're vforking, we may want to hold on to the parent until
446 the child exits or execs. At exec time we can remove the old
447 breakpoints from the parent and detach it; at exit time we
448 could do the same (or even, sneakily, resume debugging it - the
449 child's exec has failed, or something similar).
450
451 This doesn't clean up "properly", because we can't call
452 target_detach, but that's OK; if the current target is "child",
453 then it doesn't need any further cleanups, and lin_lwp will
454 generally not encounter vfork (vfork is defined to fork
455 in libpthread.so).
456
457 The holding part is very easy if we have VFORKDONE events;
458 but keeping track of both processes is beyond GDB at the
459 moment. So we don't expose the parent to the rest of GDB.
460 Instead we quietly hold onto it until such time as we can
461 safely resume it. */
462
463 if (has_vforked)
464 linux_parent_pid = parent_pid;
465 else
466 target_detach (NULL, 0);
467
468 inferior_ptid = pid_to_ptid (child_pid);
469 push_target (&deprecated_child_ops);
470
471 /* Reset breakpoints in the child as appropriate. */
472 follow_inferior_reset_breakpoints ();
473 }
474
475 return 0;
476 }
477
478 ptid_t
479 linux_handle_extended_wait (int pid, int status,
480 struct target_waitstatus *ourstatus)
481 {
482 int event = status >> 16;
483
484 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
485 || event == PTRACE_EVENT_CLONE)
486 {
487 unsigned long new_pid;
488 int ret;
489
490 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
491
492 /* If we haven't already seen the new PID stop, wait for it now. */
493 if (! pull_pid_from_list (&stopped_pids, new_pid))
494 {
495 /* The new child has a pending SIGSTOP. We can't affect it until it
496 hits the SIGSTOP, but we're already attached. */
497 ret = my_waitpid (new_pid, &status,
498 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
499 if (ret == -1)
500 perror_with_name (_("waiting for new child"));
501 else if (ret != new_pid)
502 internal_error (__FILE__, __LINE__,
503 _("wait returned unexpected PID %d"), ret);
504 else if (!WIFSTOPPED (status) || WSTOPSIG (status) != SIGSTOP)
505 internal_error (__FILE__, __LINE__,
506 _("wait returned unexpected status 0x%x"), status);
507 }
508
509 if (event == PTRACE_EVENT_FORK)
510 ourstatus->kind = TARGET_WAITKIND_FORKED;
511 else if (event == PTRACE_EVENT_VFORK)
512 ourstatus->kind = TARGET_WAITKIND_VFORKED;
513 else
514 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
515
516 ourstatus->value.related_pid = new_pid;
517 return inferior_ptid;
518 }
519
520 if (event == PTRACE_EVENT_EXEC)
521 {
522 ourstatus->kind = TARGET_WAITKIND_EXECD;
523 ourstatus->value.execd_pathname
524 = xstrdup (child_pid_to_exec_file (pid));
525
526 if (linux_parent_pid)
527 {
528 detach_breakpoints (linux_parent_pid);
529 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
530
531 linux_parent_pid = 0;
532 }
533
534 return inferior_ptid;
535 }
536
537 internal_error (__FILE__, __LINE__,
538 _("unknown ptrace event %d"), event);
539 }
540
541 \f
542 void
543 child_insert_fork_catchpoint (int pid)
544 {
545 if (! linux_supports_tracefork (pid))
546 error (_("Your system does not support fork catchpoints."));
547 }
548
549 void
550 child_insert_vfork_catchpoint (int pid)
551 {
552 if (!linux_supports_tracefork (pid))
553 error (_("Your system does not support vfork catchpoints."));
554 }
555
556 void
557 child_insert_exec_catchpoint (int pid)
558 {
559 if (!linux_supports_tracefork (pid))
560 error (_("Your system does not support exec catchpoints."));
561 }
562
563 void
564 kill_inferior (void)
565 {
566 int status;
567 int pid = PIDGET (inferior_ptid);
568 struct target_waitstatus last;
569 ptid_t last_ptid;
570 int ret;
571
572 if (pid == 0)
573 return;
574
575 /* If we're stopped while forking and we haven't followed yet, kill the
576 other task. We need to do this first because the parent will be
577 sleeping if this is a vfork. */
578
579 get_last_target_status (&last_ptid, &last);
580
581 if (last.kind == TARGET_WAITKIND_FORKED
582 || last.kind == TARGET_WAITKIND_VFORKED)
583 {
584 ptrace (PT_KILL, last.value.related_pid, 0, 0);
585 wait (&status);
586 }
587
588 /* Kill the current process. */
589 ptrace (PT_KILL, pid, 0, 0);
590 ret = wait (&status);
591
592 /* We might get a SIGCHLD instead of an exit status. This is
593 aggravated by the first kill above - a child has just died. */
594
595 while (ret == pid && WIFSTOPPED (status))
596 {
597 ptrace (PT_KILL, pid, 0, 0);
598 ret = wait (&status);
599 }
600
601 target_mourn_inferior ();
602 }
603
604 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
605 are processes sharing the same VM space. A multi-threaded process
606 is basically a group of such processes. However, such a grouping
607 is almost entirely a user-space issue; the kernel doesn't enforce
608 such a grouping at all (this might change in the future). In
609 general, we'll rely on the threads library (i.e. the GNU/Linux
610 Threads library) to provide such a grouping.
611
612 It is perfectly well possible to write a multi-threaded application
613 without the assistance of a threads library, by using the clone
614 system call directly. This module should be able to give some
615 rudimentary support for debugging such applications if developers
616 specify the CLONE_PTRACE flag in the clone system call, and are
617 using the Linux kernel 2.4 or above.
618
619 Note that there are some peculiarities in GNU/Linux that affect
620 this code:
621
622 - In general one should specify the __WCLONE flag to waitpid in
623 order to make it report events for any of the cloned processes
624 (and leave it out for the initial process). However, if a cloned
625 process has exited the exit status is only reported if the
626 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
627 we cannot use it since GDB must work on older systems too.
628
629 - When a traced, cloned process exits and is waited for by the
630 debugger, the kernel reassigns it to the original parent and
631 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
632 library doesn't notice this, which leads to the "zombie problem":
633 When debugged a multi-threaded process that spawns a lot of
634 threads will run out of processes, even if the threads exit,
635 because the "zombies" stay around. */
636
637 /* List of known LWPs. */
638 static struct lwp_info *lwp_list;
639
640 /* Number of LWPs in the list. */
641 static int num_lwps;
642
643 /* Non-zero if we're running in "threaded" mode. */
644 static int threaded;
645 \f
646
647 #define GET_LWP(ptid) ptid_get_lwp (ptid)
648 #define GET_PID(ptid) ptid_get_pid (ptid)
649 #define is_lwp(ptid) (GET_LWP (ptid) != 0)
650 #define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0)
651
652 /* If the last reported event was a SIGTRAP, this variable is set to
653 the process id of the LWP/thread that got it. */
654 ptid_t trap_ptid;
655 \f
656
657 /* This module's target-specific operations. */
658 static struct target_ops linux_nat_ops;
659
660 /* Since we cannot wait (in linux_nat_wait) for the initial process and
661 any cloned processes with a single call to waitpid, we have to use
662 the WNOHANG flag and call waitpid in a loop. To optimize
663 things a bit we use `sigsuspend' to wake us up when a process has
664 something to report (it will send us a SIGCHLD if it has). To make
665 this work we have to juggle with the signal mask. We save the
666 original signal mask such that we can restore it before creating a
667 new process in order to avoid blocking certain signals in the
668 inferior. We then block SIGCHLD during the waitpid/sigsuspend
669 loop. */
670
671 /* Original signal mask. */
672 static sigset_t normal_mask;
673
674 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
675 _initialize_linux_nat. */
676 static sigset_t suspend_mask;
677
678 /* Signals to block to make that sigsuspend work. */
679 static sigset_t blocked_mask;
680 \f
681
682 /* Prototypes for local functions. */
683 static int stop_wait_callback (struct lwp_info *lp, void *data);
684 static int linux_nat_thread_alive (ptid_t ptid);
685 \f
686 /* Convert wait status STATUS to a string. Used for printing debug
687 messages only. */
688
689 static char *
690 status_to_str (int status)
691 {
692 static char buf[64];
693
694 if (WIFSTOPPED (status))
695 snprintf (buf, sizeof (buf), "%s (stopped)",
696 strsignal (WSTOPSIG (status)));
697 else if (WIFSIGNALED (status))
698 snprintf (buf, sizeof (buf), "%s (terminated)",
699 strsignal (WSTOPSIG (status)));
700 else
701 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
702
703 return buf;
704 }
705
706 /* Initialize the list of LWPs. Note that this module, contrary to
707 what GDB's generic threads layer does for its thread list,
708 re-initializes the LWP lists whenever we mourn or detach (which
709 doesn't involve mourning) the inferior. */
710
711 static void
712 init_lwp_list (void)
713 {
714 struct lwp_info *lp, *lpnext;
715
716 for (lp = lwp_list; lp; lp = lpnext)
717 {
718 lpnext = lp->next;
719 xfree (lp);
720 }
721
722 lwp_list = NULL;
723 num_lwps = 0;
724 threaded = 0;
725 }
726
727 /* Add the LWP specified by PID to the list. If this causes the
728 number of LWPs to become larger than one, go into "threaded" mode.
729 Return a pointer to the structure describing the new LWP. */
730
731 static struct lwp_info *
732 add_lwp (ptid_t ptid)
733 {
734 struct lwp_info *lp;
735
736 gdb_assert (is_lwp (ptid));
737
738 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
739
740 memset (lp, 0, sizeof (struct lwp_info));
741
742 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
743
744 lp->ptid = ptid;
745
746 lp->next = lwp_list;
747 lwp_list = lp;
748 if (++num_lwps > 1)
749 threaded = 1;
750
751 return lp;
752 }
753
754 /* Remove the LWP specified by PID from the list. */
755
756 static void
757 delete_lwp (ptid_t ptid)
758 {
759 struct lwp_info *lp, *lpprev;
760
761 lpprev = NULL;
762
763 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
764 if (ptid_equal (lp->ptid, ptid))
765 break;
766
767 if (!lp)
768 return;
769
770 /* We don't go back to "non-threaded" mode if the number of threads
771 becomes less than two. */
772 num_lwps--;
773
774 if (lpprev)
775 lpprev->next = lp->next;
776 else
777 lwp_list = lp->next;
778
779 xfree (lp);
780 }
781
782 /* Return a pointer to the structure describing the LWP corresponding
783 to PID. If no corresponding LWP could be found, return NULL. */
784
785 static struct lwp_info *
786 find_lwp_pid (ptid_t ptid)
787 {
788 struct lwp_info *lp;
789 int lwp;
790
791 if (is_lwp (ptid))
792 lwp = GET_LWP (ptid);
793 else
794 lwp = GET_PID (ptid);
795
796 for (lp = lwp_list; lp; lp = lp->next)
797 if (lwp == GET_LWP (lp->ptid))
798 return lp;
799
800 return NULL;
801 }
802
803 /* Call CALLBACK with its second argument set to DATA for every LWP in
804 the list. If CALLBACK returns 1 for a particular LWP, return a
805 pointer to the structure describing that LWP immediately.
806 Otherwise return NULL. */
807
808 struct lwp_info *
809 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
810 {
811 struct lwp_info *lp, *lpnext;
812
813 for (lp = lwp_list; lp; lp = lpnext)
814 {
815 lpnext = lp->next;
816 if ((*callback) (lp, data))
817 return lp;
818 }
819
820 return NULL;
821 }
822
823 /* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
824 a message telling the user that a new LWP has been added to the
825 process. */
826
827 void
828 lin_lwp_attach_lwp (ptid_t ptid, int verbose)
829 {
830 struct lwp_info *lp, *found_lp;
831
832 gdb_assert (is_lwp (ptid));
833
834 /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events
835 to interrupt either the ptrace() or waitpid() calls below. */
836 if (!sigismember (&blocked_mask, SIGCHLD))
837 {
838 sigaddset (&blocked_mask, SIGCHLD);
839 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
840 }
841
842 if (verbose)
843 printf_filtered (_("[New %s]\n"), target_pid_to_str (ptid));
844
845 found_lp = lp = find_lwp_pid (ptid);
846 if (lp == NULL)
847 lp = add_lwp (ptid);
848
849 /* We assume that we're already attached to any LWP that has an id
850 equal to the overall process id, and to any LWP that is already
851 in our list of LWPs. If we're not seeing exit events from threads
852 and we've had PID wraparound since we last tried to stop all threads,
853 this assumption might be wrong; fortunately, this is very unlikely
854 to happen. */
855 if (GET_LWP (ptid) != GET_PID (ptid) && found_lp == NULL)
856 {
857 pid_t pid;
858 int status;
859
860 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
861 error (_("Can't attach %s: %s"), target_pid_to_str (ptid),
862 safe_strerror (errno));
863
864 if (debug_linux_nat)
865 fprintf_unfiltered (gdb_stdlog,
866 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
867 target_pid_to_str (ptid));
868
869 pid = my_waitpid (GET_LWP (ptid), &status, 0);
870 if (pid == -1 && errno == ECHILD)
871 {
872 /* Try again with __WCLONE to check cloned processes. */
873 pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE);
874 lp->cloned = 1;
875 }
876
877 gdb_assert (pid == GET_LWP (ptid)
878 && WIFSTOPPED (status) && WSTOPSIG (status));
879
880 child_post_attach (pid);
881
882 lp->stopped = 1;
883
884 if (debug_linux_nat)
885 {
886 fprintf_unfiltered (gdb_stdlog,
887 "LLAL: waitpid %s received %s\n",
888 target_pid_to_str (ptid),
889 status_to_str (status));
890 }
891 }
892 else
893 {
894 /* We assume that the LWP representing the original process is
895 already stopped. Mark it as stopped in the data structure
896 that the linux ptrace layer uses to keep track of threads.
897 Note that this won't have already been done since the main
898 thread will have, we assume, been stopped by an attach from a
899 different layer. */
900 lp->stopped = 1;
901 }
902 }
903
904 static void
905 linux_nat_attach (char *args, int from_tty)
906 {
907 struct lwp_info *lp;
908 pid_t pid;
909 int status;
910
911 /* FIXME: We should probably accept a list of process id's, and
912 attach all of them. */
913 deprecated_child_ops.to_attach (args, from_tty);
914
915 /* Add the initial process as the first LWP to the list. */
916 lp = add_lwp (BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid)));
917
918 /* Make sure the initial process is stopped. The user-level threads
919 layer might want to poke around in the inferior, and that won't
920 work if things haven't stabilized yet. */
921 pid = my_waitpid (GET_PID (inferior_ptid), &status, 0);
922 if (pid == -1 && errno == ECHILD)
923 {
924 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
925
926 /* Try again with __WCLONE to check cloned processes. */
927 pid = my_waitpid (GET_PID (inferior_ptid), &status, __WCLONE);
928 lp->cloned = 1;
929 }
930
931 gdb_assert (pid == GET_PID (inferior_ptid)
932 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
933
934 lp->stopped = 1;
935
936 /* Fake the SIGSTOP that core GDB expects. */
937 lp->status = W_STOPCODE (SIGSTOP);
938 lp->resumed = 1;
939 if (debug_linux_nat)
940 {
941 fprintf_unfiltered (gdb_stdlog,
942 "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid);
943 }
944 }
945
946 static int
947 detach_callback (struct lwp_info *lp, void *data)
948 {
949 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
950
951 if (debug_linux_nat && lp->status)
952 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
953 strsignal (WSTOPSIG (lp->status)),
954 target_pid_to_str (lp->ptid));
955
956 while (lp->signalled && lp->stopped)
957 {
958 errno = 0;
959 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
960 WSTOPSIG (lp->status)) < 0)
961 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
962 safe_strerror (errno));
963
964 if (debug_linux_nat)
965 fprintf_unfiltered (gdb_stdlog,
966 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
967 target_pid_to_str (lp->ptid),
968 status_to_str (lp->status));
969
970 lp->stopped = 0;
971 lp->signalled = 0;
972 lp->status = 0;
973 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
974 here. But since lp->signalled was cleared above,
975 stop_wait_callback didn't do anything; the process was left
976 running. Shouldn't we be waiting for it to stop?
977 I've removed the call, since stop_wait_callback now does do
978 something when called with lp->signalled == 0. */
979
980 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
981 }
982
983 /* We don't actually detach from the LWP that has an id equal to the
984 overall process id just yet. */
985 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
986 {
987 errno = 0;
988 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
989 WSTOPSIG (lp->status)) < 0)
990 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
991 safe_strerror (errno));
992
993 if (debug_linux_nat)
994 fprintf_unfiltered (gdb_stdlog,
995 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
996 target_pid_to_str (lp->ptid),
997 strsignal (WSTOPSIG (lp->status)));
998
999 delete_lwp (lp->ptid);
1000 }
1001
1002 return 0;
1003 }
1004
1005 static void
1006 linux_nat_detach (char *args, int from_tty)
1007 {
1008 iterate_over_lwps (detach_callback, NULL);
1009
1010 /* Only the initial process should be left right now. */
1011 gdb_assert (num_lwps == 1);
1012
1013 trap_ptid = null_ptid;
1014
1015 /* Destroy LWP info; it's no longer valid. */
1016 init_lwp_list ();
1017
1018 /* Restore the original signal mask. */
1019 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1020 sigemptyset (&blocked_mask);
1021
1022 inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid));
1023 deprecated_child_ops.to_detach (args, from_tty);
1024 }
1025
1026 /* Resume LP. */
1027
1028 static int
1029 resume_callback (struct lwp_info *lp, void *data)
1030 {
1031 if (lp->stopped && lp->status == 0)
1032 {
1033 struct thread_info *tp;
1034
1035 child_resume (pid_to_ptid (GET_LWP (lp->ptid)), 0, TARGET_SIGNAL_0);
1036 if (debug_linux_nat)
1037 fprintf_unfiltered (gdb_stdlog,
1038 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1039 target_pid_to_str (lp->ptid));
1040 lp->stopped = 0;
1041 lp->step = 0;
1042 }
1043
1044 return 0;
1045 }
1046
1047 static int
1048 resume_clear_callback (struct lwp_info *lp, void *data)
1049 {
1050 lp->resumed = 0;
1051 return 0;
1052 }
1053
1054 static int
1055 resume_set_callback (struct lwp_info *lp, void *data)
1056 {
1057 lp->resumed = 1;
1058 return 0;
1059 }
1060
1061 static void
1062 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1063 {
1064 struct lwp_info *lp;
1065 int resume_all;
1066
1067 /* A specific PTID means `step only this process id'. */
1068 resume_all = (PIDGET (ptid) == -1);
1069
1070 if (resume_all)
1071 iterate_over_lwps (resume_set_callback, NULL);
1072 else
1073 iterate_over_lwps (resume_clear_callback, NULL);
1074
1075 /* If PID is -1, it's the current inferior that should be
1076 handled specially. */
1077 if (PIDGET (ptid) == -1)
1078 ptid = inferior_ptid;
1079
1080 lp = find_lwp_pid (ptid);
1081 if (lp)
1082 {
1083 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1084
1085 /* Remember if we're stepping. */
1086 lp->step = step;
1087
1088 /* Mark this LWP as resumed. */
1089 lp->resumed = 1;
1090
1091 /* If we have a pending wait status for this thread, there is no
1092 point in resuming the process. */
1093 if (lp->status)
1094 {
1095 /* FIXME: What should we do if we are supposed to continue
1096 this thread with a signal? */
1097 gdb_assert (signo == TARGET_SIGNAL_0);
1098 return;
1099 }
1100
1101 /* Mark LWP as not stopped to prevent it from being continued by
1102 resume_callback. */
1103 lp->stopped = 0;
1104 }
1105
1106 if (resume_all)
1107 iterate_over_lwps (resume_callback, NULL);
1108
1109 child_resume (ptid, step, signo);
1110 if (debug_linux_nat)
1111 fprintf_unfiltered (gdb_stdlog,
1112 "LLR: %s %s, %s (resume event thread)\n",
1113 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1114 target_pid_to_str (ptid),
1115 signo ? strsignal (signo) : "0");
1116 }
1117
1118 /* Issue kill to specified lwp. */
1119
1120 static int tkill_failed;
1121
1122 static int
1123 kill_lwp (int lwpid, int signo)
1124 {
1125 errno = 0;
1126
1127 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1128 fails, then we are not using nptl threads and we should be using kill. */
1129
1130 #ifdef HAVE_TKILL_SYSCALL
1131 if (!tkill_failed)
1132 {
1133 int ret = syscall (__NR_tkill, lwpid, signo);
1134 if (errno != ENOSYS)
1135 return ret;
1136 errno = 0;
1137 tkill_failed = 1;
1138 }
1139 #endif
1140
1141 return kill (lwpid, signo);
1142 }
1143
1144 /* Handle a GNU/Linux extended wait response. Most of the work we
1145 just pass off to linux_handle_extended_wait, but if it reports a
1146 clone event we need to add the new LWP to our list (and not report
1147 the trap to higher layers). This function returns non-zero if
1148 the event should be ignored and we should wait again. */
1149
1150 static int
1151 linux_nat_handle_extended (struct lwp_info *lp, int status)
1152 {
1153 linux_handle_extended_wait (GET_LWP (lp->ptid), status,
1154 &lp->waitstatus);
1155
1156 /* TARGET_WAITKIND_SPURIOUS is used to indicate clone events. */
1157 if (lp->waitstatus.kind == TARGET_WAITKIND_SPURIOUS)
1158 {
1159 struct lwp_info *new_lp;
1160 new_lp = add_lwp (BUILD_LWP (lp->waitstatus.value.related_pid,
1161 GET_PID (inferior_ptid)));
1162 new_lp->cloned = 1;
1163 new_lp->stopped = 1;
1164
1165 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1166
1167 if (debug_linux_nat)
1168 fprintf_unfiltered (gdb_stdlog,
1169 "LLHE: Got clone event from LWP %ld, resuming\n",
1170 GET_LWP (lp->ptid));
1171 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1172
1173 return 1;
1174 }
1175
1176 return 0;
1177 }
1178
1179 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1180 exited. */
1181
1182 static int
1183 wait_lwp (struct lwp_info *lp)
1184 {
1185 pid_t pid;
1186 int status;
1187 int thread_dead = 0;
1188
1189 gdb_assert (!lp->stopped);
1190 gdb_assert (lp->status == 0);
1191
1192 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
1193 if (pid == -1 && errno == ECHILD)
1194 {
1195 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1196 if (pid == -1 && errno == ECHILD)
1197 {
1198 /* The thread has previously exited. We need to delete it
1199 now because, for some vendor 2.4 kernels with NPTL
1200 support backported, there won't be an exit event unless
1201 it is the main thread. 2.6 kernels will report an exit
1202 event for each thread that exits, as expected. */
1203 thread_dead = 1;
1204 if (debug_linux_nat)
1205 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1206 target_pid_to_str (lp->ptid));
1207 }
1208 }
1209
1210 if (!thread_dead)
1211 {
1212 gdb_assert (pid == GET_LWP (lp->ptid));
1213
1214 if (debug_linux_nat)
1215 {
1216 fprintf_unfiltered (gdb_stdlog,
1217 "WL: waitpid %s received %s\n",
1218 target_pid_to_str (lp->ptid),
1219 status_to_str (status));
1220 }
1221 }
1222
1223 /* Check if the thread has exited. */
1224 if (WIFEXITED (status) || WIFSIGNALED (status))
1225 {
1226 thread_dead = 1;
1227 if (debug_linux_nat)
1228 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1229 target_pid_to_str (lp->ptid));
1230 }
1231
1232 if (thread_dead)
1233 {
1234 if (in_thread_list (lp->ptid))
1235 {
1236 /* Core GDB cannot deal with us deleting the current thread. */
1237 if (!ptid_equal (lp->ptid, inferior_ptid))
1238 delete_thread (lp->ptid);
1239 printf_unfiltered (_("[%s exited]\n"),
1240 target_pid_to_str (lp->ptid));
1241 }
1242
1243 delete_lwp (lp->ptid);
1244 return 0;
1245 }
1246
1247 gdb_assert (WIFSTOPPED (status));
1248
1249 /* Handle GNU/Linux's extended waitstatus for trace events. */
1250 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1251 {
1252 if (debug_linux_nat)
1253 fprintf_unfiltered (gdb_stdlog,
1254 "WL: Handling extended status 0x%06x\n",
1255 status);
1256 if (linux_nat_handle_extended (lp, status))
1257 return wait_lwp (lp);
1258 }
1259
1260 return status;
1261 }
1262
1263 /* Send a SIGSTOP to LP. */
1264
1265 static int
1266 stop_callback (struct lwp_info *lp, void *data)
1267 {
1268 if (!lp->stopped && !lp->signalled)
1269 {
1270 int ret;
1271
1272 if (debug_linux_nat)
1273 {
1274 fprintf_unfiltered (gdb_stdlog,
1275 "SC: kill %s **<SIGSTOP>**\n",
1276 target_pid_to_str (lp->ptid));
1277 }
1278 errno = 0;
1279 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1280 if (debug_linux_nat)
1281 {
1282 fprintf_unfiltered (gdb_stdlog,
1283 "SC: lwp kill %d %s\n",
1284 ret,
1285 errno ? safe_strerror (errno) : "ERRNO-OK");
1286 }
1287
1288 lp->signalled = 1;
1289 gdb_assert (lp->status == 0);
1290 }
1291
1292 return 0;
1293 }
1294
1295 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1296 a pointer to a set of signals to be flushed immediately. */
1297
1298 static int
1299 stop_wait_callback (struct lwp_info *lp, void *data)
1300 {
1301 sigset_t *flush_mask = data;
1302
1303 if (!lp->stopped)
1304 {
1305 int status;
1306
1307 status = wait_lwp (lp);
1308 if (status == 0)
1309 return 0;
1310
1311 /* Ignore any signals in FLUSH_MASK. */
1312 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1313 {
1314 if (!lp->signalled)
1315 {
1316 lp->stopped = 1;
1317 return 0;
1318 }
1319
1320 errno = 0;
1321 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1322 if (debug_linux_nat)
1323 fprintf_unfiltered (gdb_stdlog,
1324 "PTRACE_CONT %s, 0, 0 (%s)\n",
1325 target_pid_to_str (lp->ptid),
1326 errno ? safe_strerror (errno) : "OK");
1327
1328 return stop_wait_callback (lp, flush_mask);
1329 }
1330
1331 if (WSTOPSIG (status) != SIGSTOP)
1332 {
1333 if (WSTOPSIG (status) == SIGTRAP)
1334 {
1335 /* If a LWP other than the LWP that we're reporting an
1336 event for has hit a GDB breakpoint (as opposed to
1337 some random trap signal), then just arrange for it to
1338 hit it again later. We don't keep the SIGTRAP status
1339 and don't forward the SIGTRAP signal to the LWP. We
1340 will handle the current event, eventually we will
1341 resume all LWPs, and this one will get its breakpoint
1342 trap again.
1343
1344 If we do not do this, then we run the risk that the
1345 user will delete or disable the breakpoint, but the
1346 thread will have already tripped on it. */
1347
1348 /* Now resume this LWP and get the SIGSTOP event. */
1349 errno = 0;
1350 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1351 if (debug_linux_nat)
1352 {
1353 fprintf_unfiltered (gdb_stdlog,
1354 "PTRACE_CONT %s, 0, 0 (%s)\n",
1355 target_pid_to_str (lp->ptid),
1356 errno ? safe_strerror (errno) : "OK");
1357
1358 fprintf_unfiltered (gdb_stdlog,
1359 "SWC: Candidate SIGTRAP event in %s\n",
1360 target_pid_to_str (lp->ptid));
1361 }
1362 /* Hold the SIGTRAP for handling by linux_nat_wait. */
1363 stop_wait_callback (lp, data);
1364 /* If there's another event, throw it back into the queue. */
1365 if (lp->status)
1366 {
1367 if (debug_linux_nat)
1368 {
1369 fprintf_unfiltered (gdb_stdlog,
1370 "SWC: kill %s, %s\n",
1371 target_pid_to_str (lp->ptid),
1372 status_to_str ((int) status));
1373 }
1374 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1375 }
1376 /* Save the sigtrap event. */
1377 lp->status = status;
1378 return 0;
1379 }
1380 else
1381 {
1382 /* The thread was stopped with a signal other than
1383 SIGSTOP, and didn't accidentally trip a breakpoint. */
1384
1385 if (debug_linux_nat)
1386 {
1387 fprintf_unfiltered (gdb_stdlog,
1388 "SWC: Pending event %s in %s\n",
1389 status_to_str ((int) status),
1390 target_pid_to_str (lp->ptid));
1391 }
1392 /* Now resume this LWP and get the SIGSTOP event. */
1393 errno = 0;
1394 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1395 if (debug_linux_nat)
1396 fprintf_unfiltered (gdb_stdlog,
1397 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1398 target_pid_to_str (lp->ptid),
1399 errno ? safe_strerror (errno) : "OK");
1400
1401 /* Hold this event/waitstatus while we check to see if
1402 there are any more (we still want to get that SIGSTOP). */
1403 stop_wait_callback (lp, data);
1404 /* If the lp->status field is still empty, use it to hold
1405 this event. If not, then this event must be returned
1406 to the event queue of the LWP. */
1407 if (lp->status == 0)
1408 lp->status = status;
1409 else
1410 {
1411 if (debug_linux_nat)
1412 {
1413 fprintf_unfiltered (gdb_stdlog,
1414 "SWC: kill %s, %s\n",
1415 target_pid_to_str (lp->ptid),
1416 status_to_str ((int) status));
1417 }
1418 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1419 }
1420 return 0;
1421 }
1422 }
1423 else
1424 {
1425 /* We caught the SIGSTOP that we intended to catch, so
1426 there's no SIGSTOP pending. */
1427 lp->stopped = 1;
1428 lp->signalled = 0;
1429 }
1430 }
1431
1432 return 0;
1433 }
1434
1435 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
1436 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1437
1438 static int
1439 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1440 {
1441 sigset_t blocked, ignored;
1442 int i;
1443
1444 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1445
1446 if (!flush_mask)
1447 return 0;
1448
1449 for (i = 1; i < NSIG; i++)
1450 if (sigismember (pending, i))
1451 if (!sigismember (flush_mask, i)
1452 || sigismember (&blocked, i)
1453 || sigismember (&ignored, i))
1454 sigdelset (pending, i);
1455
1456 if (sigisemptyset (pending))
1457 return 0;
1458
1459 return 1;
1460 }
1461
1462 /* DATA is interpreted as a mask of signals to flush. If LP has
1463 signals pending, and they are all in the flush mask, then arrange
1464 to flush them. LP should be stopped, as should all other threads
1465 it might share a signal queue with. */
1466
1467 static int
1468 flush_callback (struct lwp_info *lp, void *data)
1469 {
1470 sigset_t *flush_mask = data;
1471 sigset_t pending, intersection, blocked, ignored;
1472 int pid, status;
1473
1474 /* Normally, when an LWP exits, it is removed from the LWP list. The
1475 last LWP isn't removed till later, however. So if there is only
1476 one LWP on the list, make sure it's alive. */
1477 if (lwp_list == lp && lp->next == NULL)
1478 if (!linux_nat_thread_alive (lp->ptid))
1479 return 0;
1480
1481 /* Just because the LWP is stopped doesn't mean that new signals
1482 can't arrive from outside, so this function must be careful of
1483 race conditions. However, because all threads are stopped, we
1484 can assume that the pending mask will not shrink unless we resume
1485 the LWP, and that it will then get another signal. We can't
1486 control which one, however. */
1487
1488 if (lp->status)
1489 {
1490 if (debug_linux_nat)
1491 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
1492 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1493 lp->status = 0;
1494 }
1495
1496 while (linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
1497 {
1498 int ret;
1499
1500 errno = 0;
1501 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1502 if (debug_linux_nat)
1503 fprintf_unfiltered (gdb_stderr,
1504 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1505
1506 lp->stopped = 0;
1507 stop_wait_callback (lp, flush_mask);
1508 if (debug_linux_nat)
1509 fprintf_unfiltered (gdb_stderr,
1510 "FC: Wait finished; saved status is %d\n",
1511 lp->status);
1512 }
1513
1514 return 0;
1515 }
1516
1517 /* Return non-zero if LP has a wait status pending. */
1518
1519 static int
1520 status_callback (struct lwp_info *lp, void *data)
1521 {
1522 /* Only report a pending wait status if we pretend that this has
1523 indeed been resumed. */
1524 return (lp->status != 0 && lp->resumed);
1525 }
1526
1527 /* Return non-zero if LP isn't stopped. */
1528
1529 static int
1530 running_callback (struct lwp_info *lp, void *data)
1531 {
1532 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1533 }
1534
1535 /* Count the LWP's that have had events. */
1536
1537 static int
1538 count_events_callback (struct lwp_info *lp, void *data)
1539 {
1540 int *count = data;
1541
1542 gdb_assert (count != NULL);
1543
1544 /* Count only LWPs that have a SIGTRAP event pending. */
1545 if (lp->status != 0
1546 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1547 (*count)++;
1548
1549 return 0;
1550 }
1551
1552 /* Select the LWP (if any) that is currently being single-stepped. */
1553
1554 static int
1555 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1556 {
1557 if (lp->step && lp->status != 0)
1558 return 1;
1559 else
1560 return 0;
1561 }
1562
1563 /* Select the Nth LWP that has had a SIGTRAP event. */
1564
1565 static int
1566 select_event_lwp_callback (struct lwp_info *lp, void *data)
1567 {
1568 int *selector = data;
1569
1570 gdb_assert (selector != NULL);
1571
1572 /* Select only LWPs that have a SIGTRAP event pending. */
1573 if (lp->status != 0
1574 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1575 if ((*selector)-- == 0)
1576 return 1;
1577
1578 return 0;
1579 }
1580
1581 static int
1582 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
1583 {
1584 struct lwp_info *event_lp = data;
1585
1586 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1587 if (lp == event_lp)
1588 return 0;
1589
1590 /* If a LWP other than the LWP that we're reporting an event for has
1591 hit a GDB breakpoint (as opposed to some random trap signal),
1592 then just arrange for it to hit it again later. We don't keep
1593 the SIGTRAP status and don't forward the SIGTRAP signal to the
1594 LWP. We will handle the current event, eventually we will resume
1595 all LWPs, and this one will get its breakpoint trap again.
1596
1597 If we do not do this, then we run the risk that the user will
1598 delete or disable the breakpoint, but the LWP will have already
1599 tripped on it. */
1600
1601 if (lp->status != 0
1602 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
1603 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
1604 DECR_PC_AFTER_BREAK))
1605 {
1606 if (debug_linux_nat)
1607 fprintf_unfiltered (gdb_stdlog,
1608 "CBC: Push back breakpoint for %s\n",
1609 target_pid_to_str (lp->ptid));
1610
1611 /* Back up the PC if necessary. */
1612 if (DECR_PC_AFTER_BREAK)
1613 write_pc_pid (read_pc_pid (lp->ptid) - DECR_PC_AFTER_BREAK, lp->ptid);
1614
1615 /* Throw away the SIGTRAP. */
1616 lp->status = 0;
1617 }
1618
1619 return 0;
1620 }
1621
1622 /* Select one LWP out of those that have events pending. */
1623
1624 static void
1625 select_event_lwp (struct lwp_info **orig_lp, int *status)
1626 {
1627 int num_events = 0;
1628 int random_selector;
1629 struct lwp_info *event_lp;
1630
1631 /* Record the wait status for the origional LWP. */
1632 (*orig_lp)->status = *status;
1633
1634 /* Give preference to any LWP that is being single-stepped. */
1635 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
1636 if (event_lp != NULL)
1637 {
1638 if (debug_linux_nat)
1639 fprintf_unfiltered (gdb_stdlog,
1640 "SEL: Select single-step %s\n",
1641 target_pid_to_str (event_lp->ptid));
1642 }
1643 else
1644 {
1645 /* No single-stepping LWP. Select one at random, out of those
1646 which have had SIGTRAP events. */
1647
1648 /* First see how many SIGTRAP events we have. */
1649 iterate_over_lwps (count_events_callback, &num_events);
1650
1651 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1652 random_selector = (int)
1653 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1654
1655 if (debug_linux_nat && num_events > 1)
1656 fprintf_unfiltered (gdb_stdlog,
1657 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1658 num_events, random_selector);
1659
1660 event_lp = iterate_over_lwps (select_event_lwp_callback,
1661 &random_selector);
1662 }
1663
1664 if (event_lp != NULL)
1665 {
1666 /* Switch the event LWP. */
1667 *orig_lp = event_lp;
1668 *status = event_lp->status;
1669 }
1670
1671 /* Flush the wait status for the event LWP. */
1672 (*orig_lp)->status = 0;
1673 }
1674
1675 /* Return non-zero if LP has been resumed. */
1676
1677 static int
1678 resumed_callback (struct lwp_info *lp, void *data)
1679 {
1680 return lp->resumed;
1681 }
1682
1683 #ifdef CHILD_WAIT
1684
1685 /* We need to override child_wait to support attaching to cloned
1686 processes, since a normal wait (as done by the default version)
1687 ignores those processes. */
1688
1689 /* Wait for child PTID to do something. Return id of the child,
1690 minus_one_ptid in case of error; store status into *OURSTATUS. */
1691
1692 ptid_t
1693 child_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1694 {
1695 int save_errno;
1696 int status;
1697 pid_t pid;
1698
1699 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1700
1701 do
1702 {
1703 set_sigint_trap (); /* Causes SIGINT to be passed on to the
1704 attached process. */
1705 set_sigio_trap ();
1706
1707 pid = my_waitpid (GET_PID (ptid), &status, 0);
1708 if (pid == -1 && errno == ECHILD)
1709 /* Try again with __WCLONE to check cloned processes. */
1710 pid = my_waitpid (GET_PID (ptid), &status, __WCLONE);
1711
1712 if (debug_linux_nat)
1713 {
1714 fprintf_unfiltered (gdb_stdlog,
1715 "CW: waitpid %ld received %s\n",
1716 (long) pid, status_to_str (status));
1717 }
1718
1719 save_errno = errno;
1720
1721 /* Make sure we don't report an event for the exit of the
1722 original program, if we've detached from it. */
1723 if (pid != -1 && !WIFSTOPPED (status) && pid != GET_PID (inferior_ptid))
1724 {
1725 pid = -1;
1726 save_errno = EINTR;
1727 }
1728
1729 /* Check for stop events reported by a process we didn't already
1730 know about - in this case, anything other than inferior_ptid.
1731
1732 If we're expecting to receive stopped processes after fork,
1733 vfork, and clone events, then we'll just add the new one to
1734 our list and go back to waiting for the event to be reported
1735 - the stopped process might be returned from waitpid before
1736 or after the event is. If we want to handle debugging of
1737 CLONE_PTRACE processes we need to do more here, i.e. switch
1738 to multi-threaded mode. */
1739 if (pid != -1 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP
1740 && pid != GET_PID (inferior_ptid))
1741 {
1742 linux_record_stopped_pid (pid);
1743 pid = -1;
1744 save_errno = EINTR;
1745 }
1746
1747 /* Handle GNU/Linux's extended waitstatus for trace events. */
1748 if (pid != -1 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
1749 && status >> 16 != 0)
1750 {
1751 linux_handle_extended_wait (pid, status, ourstatus);
1752
1753 /* If we see a clone event, detach the child, and don't
1754 report the event. It would be nice to offer some way to
1755 switch into a non-thread-db based threaded mode at this
1756 point. */
1757 if (ourstatus->kind == TARGET_WAITKIND_SPURIOUS)
1758 {
1759 ptrace (PTRACE_DETACH, ourstatus->value.related_pid, 0, 0);
1760 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1761 ptrace (PTRACE_CONT, pid, 0, 0);
1762 pid = -1;
1763 save_errno = EINTR;
1764 }
1765 }
1766
1767 clear_sigio_trap ();
1768 clear_sigint_trap ();
1769 }
1770 while (pid == -1 && save_errno == EINTR);
1771
1772 if (pid == -1)
1773 {
1774 warning (_("Child process unexpectedly missing: %s"),
1775 safe_strerror (errno));
1776
1777 /* Claim it exited with unknown signal. */
1778 ourstatus->kind = TARGET_WAITKIND_SIGNALLED;
1779 ourstatus->value.sig = TARGET_SIGNAL_UNKNOWN;
1780 return minus_one_ptid;
1781 }
1782
1783 if (ourstatus->kind == TARGET_WAITKIND_IGNORE)
1784 store_waitstatus (ourstatus, status);
1785
1786 return pid_to_ptid (pid);
1787 }
1788
1789 #endif
1790
1791 /* Stop an active thread, verify it still exists, then resume it. */
1792
1793 static int
1794 stop_and_resume_callback (struct lwp_info *lp, void *data)
1795 {
1796 struct lwp_info *ptr;
1797
1798 if (!lp->stopped && !lp->signalled)
1799 {
1800 stop_callback (lp, NULL);
1801 stop_wait_callback (lp, NULL);
1802 /* Resume if the lwp still exists. */
1803 for (ptr = lwp_list; ptr; ptr = ptr->next)
1804 if (lp == ptr)
1805 {
1806 resume_callback (lp, NULL);
1807 resume_set_callback (lp, NULL);
1808 }
1809 }
1810 return 0;
1811 }
1812
1813 static ptid_t
1814 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1815 {
1816 struct lwp_info *lp = NULL;
1817 int options = 0;
1818 int status = 0;
1819 pid_t pid = PIDGET (ptid);
1820 sigset_t flush_mask;
1821
1822 sigemptyset (&flush_mask);
1823
1824 /* Make sure SIGCHLD is blocked. */
1825 if (!sigismember (&blocked_mask, SIGCHLD))
1826 {
1827 sigaddset (&blocked_mask, SIGCHLD);
1828 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
1829 }
1830
1831 retry:
1832
1833 /* Make sure there is at least one LWP that has been resumed, at
1834 least if there are any LWPs at all. */
1835 gdb_assert (num_lwps == 0 || iterate_over_lwps (resumed_callback, NULL));
1836
1837 /* First check if there is a LWP with a wait status pending. */
1838 if (pid == -1)
1839 {
1840 /* Any LWP that's been resumed will do. */
1841 lp = iterate_over_lwps (status_callback, NULL);
1842 if (lp)
1843 {
1844 status = lp->status;
1845 lp->status = 0;
1846
1847 if (debug_linux_nat && status)
1848 fprintf_unfiltered (gdb_stdlog,
1849 "LLW: Using pending wait status %s for %s.\n",
1850 status_to_str (status),
1851 target_pid_to_str (lp->ptid));
1852 }
1853
1854 /* But if we don't fine one, we'll have to wait, and check both
1855 cloned and uncloned processes. We start with the cloned
1856 processes. */
1857 options = __WCLONE | WNOHANG;
1858 }
1859 else if (is_lwp (ptid))
1860 {
1861 if (debug_linux_nat)
1862 fprintf_unfiltered (gdb_stdlog,
1863 "LLW: Waiting for specific LWP %s.\n",
1864 target_pid_to_str (ptid));
1865
1866 /* We have a specific LWP to check. */
1867 lp = find_lwp_pid (ptid);
1868 gdb_assert (lp);
1869 status = lp->status;
1870 lp->status = 0;
1871
1872 if (debug_linux_nat && status)
1873 fprintf_unfiltered (gdb_stdlog,
1874 "LLW: Using pending wait status %s for %s.\n",
1875 status_to_str (status),
1876 target_pid_to_str (lp->ptid));
1877
1878 /* If we have to wait, take into account whether PID is a cloned
1879 process or not. And we have to convert it to something that
1880 the layer beneath us can understand. */
1881 options = lp->cloned ? __WCLONE : 0;
1882 pid = GET_LWP (ptid);
1883 }
1884
1885 if (status && lp->signalled)
1886 {
1887 /* A pending SIGSTOP may interfere with the normal stream of
1888 events. In a typical case where interference is a problem,
1889 we have a SIGSTOP signal pending for LWP A while
1890 single-stepping it, encounter an event in LWP B, and take the
1891 pending SIGSTOP while trying to stop LWP A. After processing
1892 the event in LWP B, LWP A is continued, and we'll never see
1893 the SIGTRAP associated with the last time we were
1894 single-stepping LWP A. */
1895
1896 /* Resume the thread. It should halt immediately returning the
1897 pending SIGSTOP. */
1898 registers_changed ();
1899 child_resume (pid_to_ptid (GET_LWP (lp->ptid)), lp->step,
1900 TARGET_SIGNAL_0);
1901 if (debug_linux_nat)
1902 fprintf_unfiltered (gdb_stdlog,
1903 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
1904 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1905 target_pid_to_str (lp->ptid));
1906 lp->stopped = 0;
1907 gdb_assert (lp->resumed);
1908
1909 /* This should catch the pending SIGSTOP. */
1910 stop_wait_callback (lp, NULL);
1911 }
1912
1913 set_sigint_trap (); /* Causes SIGINT to be passed on to the
1914 attached process. */
1915 set_sigio_trap ();
1916
1917 while (status == 0)
1918 {
1919 pid_t lwpid;
1920
1921 lwpid = my_waitpid (pid, &status, options);
1922 if (lwpid > 0)
1923 {
1924 gdb_assert (pid == -1 || lwpid == pid);
1925
1926 if (debug_linux_nat)
1927 {
1928 fprintf_unfiltered (gdb_stdlog,
1929 "LLW: waitpid %ld received %s\n",
1930 (long) lwpid, status_to_str (status));
1931 }
1932
1933 lp = find_lwp_pid (pid_to_ptid (lwpid));
1934
1935 /* Check for stop events reported by a process we didn't
1936 already know about - anything not already in our LWP
1937 list.
1938
1939 If we're expecting to receive stopped processes after
1940 fork, vfork, and clone events, then we'll just add the
1941 new one to our list and go back to waiting for the event
1942 to be reported - the stopped process might be returned
1943 from waitpid before or after the event is. */
1944 if (WIFSTOPPED (status) && !lp)
1945 {
1946 linux_record_stopped_pid (lwpid);
1947 status = 0;
1948 continue;
1949 }
1950
1951 /* Make sure we don't report an event for the exit of an LWP not in
1952 our list, i.e. not part of the current process. This can happen
1953 if we detach from a program we original forked and then it
1954 exits. */
1955 if (!WIFSTOPPED (status) && !lp)
1956 {
1957 status = 0;
1958 continue;
1959 }
1960
1961 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
1962 CLONE_PTRACE processes which do not use the thread library -
1963 otherwise we wouldn't find the new LWP this way. That doesn't
1964 currently work, and the following code is currently unreachable
1965 due to the two blocks above. If it's fixed some day, this code
1966 should be broken out into a function so that we can also pick up
1967 LWPs from the new interface. */
1968 if (!lp)
1969 {
1970 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
1971 if (options & __WCLONE)
1972 lp->cloned = 1;
1973
1974 if (threaded)
1975 {
1976 gdb_assert (WIFSTOPPED (status)
1977 && WSTOPSIG (status) == SIGSTOP);
1978 lp->signalled = 1;
1979
1980 if (!in_thread_list (inferior_ptid))
1981 {
1982 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
1983 GET_PID (inferior_ptid));
1984 add_thread (inferior_ptid);
1985 }
1986
1987 add_thread (lp->ptid);
1988 printf_unfiltered (_("[New %s]\n"),
1989 target_pid_to_str (lp->ptid));
1990 }
1991 }
1992
1993 /* Handle GNU/Linux's extended waitstatus for trace events. */
1994 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1995 {
1996 if (debug_linux_nat)
1997 fprintf_unfiltered (gdb_stdlog,
1998 "LLW: Handling extended status 0x%06x\n",
1999 status);
2000 if (linux_nat_handle_extended (lp, status))
2001 {
2002 status = 0;
2003 continue;
2004 }
2005 }
2006
2007 /* Check if the thread has exited. */
2008 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2009 {
2010 if (in_thread_list (lp->ptid))
2011 {
2012 /* Core GDB cannot deal with us deleting the current
2013 thread. */
2014 if (!ptid_equal (lp->ptid, inferior_ptid))
2015 delete_thread (lp->ptid);
2016 printf_unfiltered (_("[%s exited]\n"),
2017 target_pid_to_str (lp->ptid));
2018 }
2019
2020 /* If this is the main thread, we must stop all threads and
2021 verify if they are still alive. This is because in the nptl
2022 thread model, there is no signal issued for exiting LWPs
2023 other than the main thread. We only get the main thread
2024 exit signal once all child threads have already exited.
2025 If we stop all the threads and use the stop_wait_callback
2026 to check if they have exited we can determine whether this
2027 signal should be ignored or whether it means the end of the
2028 debugged application, regardless of which threading model
2029 is being used. */
2030 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2031 {
2032 lp->stopped = 1;
2033 iterate_over_lwps (stop_and_resume_callback, NULL);
2034 }
2035
2036 if (debug_linux_nat)
2037 fprintf_unfiltered (gdb_stdlog,
2038 "LLW: %s exited.\n",
2039 target_pid_to_str (lp->ptid));
2040
2041 delete_lwp (lp->ptid);
2042
2043 /* If there is at least one more LWP, then the exit signal
2044 was not the end of the debugged application and should be
2045 ignored. */
2046 if (num_lwps > 0)
2047 {
2048 /* Make sure there is at least one thread running. */
2049 gdb_assert (iterate_over_lwps (running_callback, NULL));
2050
2051 /* Discard the event. */
2052 status = 0;
2053 continue;
2054 }
2055 }
2056
2057 /* Check if the current LWP has previously exited. In the nptl
2058 thread model, LWPs other than the main thread do not issue
2059 signals when they exit so we must check whenever the thread
2060 has stopped. A similar check is made in stop_wait_callback(). */
2061 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2062 {
2063 if (in_thread_list (lp->ptid))
2064 {
2065 /* Core GDB cannot deal with us deleting the current
2066 thread. */
2067 if (!ptid_equal (lp->ptid, inferior_ptid))
2068 delete_thread (lp->ptid);
2069 printf_unfiltered (_("[%s exited]\n"),
2070 target_pid_to_str (lp->ptid));
2071 }
2072 if (debug_linux_nat)
2073 fprintf_unfiltered (gdb_stdlog,
2074 "LLW: %s exited.\n",
2075 target_pid_to_str (lp->ptid));
2076
2077 delete_lwp (lp->ptid);
2078
2079 /* Make sure there is at least one thread running. */
2080 gdb_assert (iterate_over_lwps (running_callback, NULL));
2081
2082 /* Discard the event. */
2083 status = 0;
2084 continue;
2085 }
2086
2087 /* Make sure we don't report a SIGSTOP that we sent
2088 ourselves in an attempt to stop an LWP. */
2089 if (lp->signalled
2090 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2091 {
2092 if (debug_linux_nat)
2093 fprintf_unfiltered (gdb_stdlog,
2094 "LLW: Delayed SIGSTOP caught for %s.\n",
2095 target_pid_to_str (lp->ptid));
2096
2097 /* This is a delayed SIGSTOP. */
2098 lp->signalled = 0;
2099
2100 registers_changed ();
2101 child_resume (pid_to_ptid (GET_LWP (lp->ptid)), lp->step,
2102 TARGET_SIGNAL_0);
2103 if (debug_linux_nat)
2104 fprintf_unfiltered (gdb_stdlog,
2105 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2106 lp->step ?
2107 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2108 target_pid_to_str (lp->ptid));
2109
2110 lp->stopped = 0;
2111 gdb_assert (lp->resumed);
2112
2113 /* Discard the event. */
2114 status = 0;
2115 continue;
2116 }
2117
2118 break;
2119 }
2120
2121 if (pid == -1)
2122 {
2123 /* Alternate between checking cloned and uncloned processes. */
2124 options ^= __WCLONE;
2125
2126 /* And suspend every time we have checked both. */
2127 if (options & __WCLONE)
2128 sigsuspend (&suspend_mask);
2129 }
2130
2131 /* We shouldn't end up here unless we want to try again. */
2132 gdb_assert (status == 0);
2133 }
2134
2135 clear_sigio_trap ();
2136 clear_sigint_trap ();
2137
2138 gdb_assert (lp);
2139
2140 /* Don't report signals that GDB isn't interested in, such as
2141 signals that are neither printed nor stopped upon. Stopping all
2142 threads can be a bit time-consuming so if we want decent
2143 performance with heavily multi-threaded programs, especially when
2144 they're using a high frequency timer, we'd better avoid it if we
2145 can. */
2146
2147 if (WIFSTOPPED (status))
2148 {
2149 int signo = target_signal_from_host (WSTOPSIG (status));
2150
2151 if (signal_stop_state (signo) == 0
2152 && signal_print_state (signo) == 0
2153 && signal_pass_state (signo) == 1)
2154 {
2155 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2156 here? It is not clear we should. GDB may not expect
2157 other threads to run. On the other hand, not resuming
2158 newly attached threads may cause an unwanted delay in
2159 getting them running. */
2160 registers_changed ();
2161 child_resume (pid_to_ptid (GET_LWP (lp->ptid)), lp->step, signo);
2162 if (debug_linux_nat)
2163 fprintf_unfiltered (gdb_stdlog,
2164 "LLW: %s %s, %s (preempt 'handle')\n",
2165 lp->step ?
2166 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2167 target_pid_to_str (lp->ptid),
2168 signo ? strsignal (signo) : "0");
2169 lp->stopped = 0;
2170 status = 0;
2171 goto retry;
2172 }
2173
2174 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2175 {
2176 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2177 forwarded to the entire process group, that is, all LWP's
2178 will receive it. Since we only want to report it once,
2179 we try to flush it from all LWPs except this one. */
2180 sigaddset (&flush_mask, SIGINT);
2181 }
2182 }
2183
2184 /* This LWP is stopped now. */
2185 lp->stopped = 1;
2186
2187 if (debug_linux_nat)
2188 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2189 status_to_str (status), target_pid_to_str (lp->ptid));
2190
2191 /* Now stop all other LWP's ... */
2192 iterate_over_lwps (stop_callback, NULL);
2193
2194 /* ... and wait until all of them have reported back that they're no
2195 longer running. */
2196 iterate_over_lwps (stop_wait_callback, &flush_mask);
2197 iterate_over_lwps (flush_callback, &flush_mask);
2198
2199 /* If we're not waiting for a specific LWP, choose an event LWP from
2200 among those that have had events. Giving equal priority to all
2201 LWPs that have had events helps prevent starvation. */
2202 if (pid == -1)
2203 select_event_lwp (&lp, &status);
2204
2205 /* Now that we've selected our final event LWP, cancel any
2206 breakpoints in other LWPs that have hit a GDB breakpoint. See
2207 the comment in cancel_breakpoints_callback to find out why. */
2208 iterate_over_lwps (cancel_breakpoints_callback, lp);
2209
2210 /* If we're not running in "threaded" mode, we'll report the bare
2211 process id. */
2212
2213 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2214 {
2215 trap_ptid = (threaded ? lp->ptid : pid_to_ptid (GET_LWP (lp->ptid)));
2216 if (debug_linux_nat)
2217 fprintf_unfiltered (gdb_stdlog,
2218 "LLW: trap_ptid is %s.\n",
2219 target_pid_to_str (trap_ptid));
2220 }
2221 else
2222 trap_ptid = null_ptid;
2223
2224 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2225 {
2226 *ourstatus = lp->waitstatus;
2227 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2228 }
2229 else
2230 store_waitstatus (ourstatus, status);
2231
2232 return (threaded ? lp->ptid : pid_to_ptid (GET_LWP (lp->ptid)));
2233 }
2234
2235 static int
2236 kill_callback (struct lwp_info *lp, void *data)
2237 {
2238 errno = 0;
2239 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2240 if (debug_linux_nat)
2241 fprintf_unfiltered (gdb_stdlog,
2242 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2243 target_pid_to_str (lp->ptid),
2244 errno ? safe_strerror (errno) : "OK");
2245
2246 return 0;
2247 }
2248
2249 static int
2250 kill_wait_callback (struct lwp_info *lp, void *data)
2251 {
2252 pid_t pid;
2253
2254 /* We must make sure that there are no pending events (delayed
2255 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2256 program doesn't interfere with any following debugging session. */
2257
2258 /* For cloned processes we must check both with __WCLONE and
2259 without, since the exit status of a cloned process isn't reported
2260 with __WCLONE. */
2261 if (lp->cloned)
2262 {
2263 do
2264 {
2265 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
2266 if (pid != (pid_t) -1 && debug_linux_nat)
2267 {
2268 fprintf_unfiltered (gdb_stdlog,
2269 "KWC: wait %s received unknown.\n",
2270 target_pid_to_str (lp->ptid));
2271 }
2272 }
2273 while (pid == GET_LWP (lp->ptid));
2274
2275 gdb_assert (pid == -1 && errno == ECHILD);
2276 }
2277
2278 do
2279 {
2280 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
2281 if (pid != (pid_t) -1 && debug_linux_nat)
2282 {
2283 fprintf_unfiltered (gdb_stdlog,
2284 "KWC: wait %s received unk.\n",
2285 target_pid_to_str (lp->ptid));
2286 }
2287 }
2288 while (pid == GET_LWP (lp->ptid));
2289
2290 gdb_assert (pid == -1 && errno == ECHILD);
2291 return 0;
2292 }
2293
2294 static void
2295 linux_nat_kill (void)
2296 {
2297 /* Kill all LWP's ... */
2298 iterate_over_lwps (kill_callback, NULL);
2299
2300 /* ... and wait until we've flushed all events. */
2301 iterate_over_lwps (kill_wait_callback, NULL);
2302
2303 target_mourn_inferior ();
2304 }
2305
2306 static void
2307 linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
2308 int from_tty)
2309 {
2310 deprecated_child_ops.to_create_inferior (exec_file, allargs, env, from_tty);
2311 }
2312
2313 static void
2314 linux_nat_mourn_inferior (void)
2315 {
2316 trap_ptid = null_ptid;
2317
2318 /* Destroy LWP info; it's no longer valid. */
2319 init_lwp_list ();
2320
2321 /* Restore the original signal mask. */
2322 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
2323 sigemptyset (&blocked_mask);
2324
2325 deprecated_child_ops.to_mourn_inferior ();
2326 }
2327
2328 static int
2329 linux_nat_xfer_memory (CORE_ADDR memaddr, gdb_byte *myaddr, int len,
2330 int write, struct mem_attrib *attrib,
2331 struct target_ops *target)
2332 {
2333 struct cleanup *old_chain = save_inferior_ptid ();
2334 int xfer;
2335
2336 if (is_lwp (inferior_ptid))
2337 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2338
2339 xfer = linux_proc_xfer_memory (memaddr, myaddr, len, write, attrib, target);
2340 if (xfer == 0)
2341 xfer = child_xfer_memory (memaddr, myaddr, len, write, attrib, target);
2342
2343 do_cleanups (old_chain);
2344 return xfer;
2345 }
2346
2347 static int
2348 linux_nat_thread_alive (ptid_t ptid)
2349 {
2350 gdb_assert (is_lwp (ptid));
2351
2352 errno = 0;
2353 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2354 if (debug_linux_nat)
2355 fprintf_unfiltered (gdb_stdlog,
2356 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2357 target_pid_to_str (ptid),
2358 errno ? safe_strerror (errno) : "OK");
2359 if (errno)
2360 return 0;
2361
2362 return 1;
2363 }
2364
2365 static char *
2366 linux_nat_pid_to_str (ptid_t ptid)
2367 {
2368 static char buf[64];
2369
2370 if (is_lwp (ptid))
2371 {
2372 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2373 return buf;
2374 }
2375
2376 return normal_pid_to_str (ptid);
2377 }
2378
2379 static void
2380 init_linux_nat_ops (void)
2381 {
2382 #if 0
2383 linux_nat_ops.to_open = linux_nat_open;
2384 #endif
2385 linux_nat_ops.to_shortname = "lwp-layer";
2386 linux_nat_ops.to_longname = "lwp-layer";
2387 linux_nat_ops.to_doc = "Low level threads support (LWP layer)";
2388 linux_nat_ops.to_attach = linux_nat_attach;
2389 linux_nat_ops.to_detach = linux_nat_detach;
2390 linux_nat_ops.to_resume = linux_nat_resume;
2391 linux_nat_ops.to_wait = linux_nat_wait;
2392 /* fetch_inferior_registers and store_inferior_registers will
2393 honor the LWP id, so we can use them directly. */
2394 linux_nat_ops.to_fetch_registers = fetch_inferior_registers;
2395 linux_nat_ops.to_store_registers = store_inferior_registers;
2396 linux_nat_ops.deprecated_xfer_memory = linux_nat_xfer_memory;
2397 linux_nat_ops.to_kill = linux_nat_kill;
2398 linux_nat_ops.to_create_inferior = linux_nat_create_inferior;
2399 linux_nat_ops.to_mourn_inferior = linux_nat_mourn_inferior;
2400 linux_nat_ops.to_thread_alive = linux_nat_thread_alive;
2401 linux_nat_ops.to_pid_to_str = linux_nat_pid_to_str;
2402 linux_nat_ops.to_post_startup_inferior = child_post_startup_inferior;
2403 linux_nat_ops.to_post_attach = child_post_attach;
2404 linux_nat_ops.to_insert_fork_catchpoint = child_insert_fork_catchpoint;
2405 linux_nat_ops.to_insert_vfork_catchpoint = child_insert_vfork_catchpoint;
2406 linux_nat_ops.to_insert_exec_catchpoint = child_insert_exec_catchpoint;
2407
2408 linux_nat_ops.to_stratum = thread_stratum;
2409 linux_nat_ops.to_has_thread_control = tc_schedlock;
2410 linux_nat_ops.to_magic = OPS_MAGIC;
2411 }
2412
2413 static void
2414 sigchld_handler (int signo)
2415 {
2416 /* Do nothing. The only reason for this handler is that it allows
2417 us to use sigsuspend in linux_nat_wait above to wait for the
2418 arrival of a SIGCHLD. */
2419 }
2420
2421 /* Accepts an integer PID; Returns a string representing a file that
2422 can be opened to get the symbols for the child process. */
2423
2424 char *
2425 child_pid_to_exec_file (int pid)
2426 {
2427 char *name1, *name2;
2428
2429 name1 = xmalloc (MAXPATHLEN);
2430 name2 = xmalloc (MAXPATHLEN);
2431 make_cleanup (xfree, name1);
2432 make_cleanup (xfree, name2);
2433 memset (name2, 0, MAXPATHLEN);
2434
2435 sprintf (name1, "/proc/%d/exe", pid);
2436 if (readlink (name1, name2, MAXPATHLEN) > 0)
2437 return name2;
2438 else
2439 return name1;
2440 }
2441
2442 /* Service function for corefiles and info proc. */
2443
2444 static int
2445 read_mapping (FILE *mapfile,
2446 long long *addr,
2447 long long *endaddr,
2448 char *permissions,
2449 long long *offset,
2450 char *device, long long *inode, char *filename)
2451 {
2452 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2453 addr, endaddr, permissions, offset, device, inode);
2454
2455 filename[0] = '\0';
2456 if (ret > 0 && ret != EOF)
2457 {
2458 /* Eat everything up to EOL for the filename. This will prevent
2459 weird filenames (such as one with embedded whitespace) from
2460 confusing this code. It also makes this code more robust in
2461 respect to annotations the kernel may add after the filename.
2462
2463 Note the filename is used for informational purposes
2464 only. */
2465 ret += fscanf (mapfile, "%[^\n]\n", filename);
2466 }
2467
2468 return (ret != 0 && ret != EOF);
2469 }
2470
2471 /* Fills the "to_find_memory_regions" target vector. Lists the memory
2472 regions in the inferior for a corefile. */
2473
2474 static int
2475 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2476 unsigned long,
2477 int, int, int, void *), void *obfd)
2478 {
2479 long long pid = PIDGET (inferior_ptid);
2480 char mapsfilename[MAXPATHLEN];
2481 FILE *mapsfile;
2482 long long addr, endaddr, size, offset, inode;
2483 char permissions[8], device[8], filename[MAXPATHLEN];
2484 int read, write, exec;
2485 int ret;
2486
2487 /* Compose the filename for the /proc memory map, and open it. */
2488 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2489 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
2490 error (_("Could not open %s."), mapsfilename);
2491
2492 if (info_verbose)
2493 fprintf_filtered (gdb_stdout,
2494 "Reading memory regions from %s\n", mapsfilename);
2495
2496 /* Now iterate until end-of-file. */
2497 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2498 &offset, &device[0], &inode, &filename[0]))
2499 {
2500 size = endaddr - addr;
2501
2502 /* Get the segment's permissions. */
2503 read = (strchr (permissions, 'r') != 0);
2504 write = (strchr (permissions, 'w') != 0);
2505 exec = (strchr (permissions, 'x') != 0);
2506
2507 if (info_verbose)
2508 {
2509 fprintf_filtered (gdb_stdout,
2510 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2511 size, paddr_nz (addr),
2512 read ? 'r' : ' ',
2513 write ? 'w' : ' ', exec ? 'x' : ' ');
2514 if (filename && filename[0])
2515 fprintf_filtered (gdb_stdout, " for %s", filename);
2516 fprintf_filtered (gdb_stdout, "\n");
2517 }
2518
2519 /* Invoke the callback function to create the corefile
2520 segment. */
2521 func (addr, size, read, write, exec, obfd);
2522 }
2523 fclose (mapsfile);
2524 return 0;
2525 }
2526
2527 /* Records the thread's register state for the corefile note
2528 section. */
2529
2530 static char *
2531 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2532 char *note_data, int *note_size)
2533 {
2534 gdb_gregset_t gregs;
2535 gdb_fpregset_t fpregs;
2536 #ifdef FILL_FPXREGSET
2537 gdb_fpxregset_t fpxregs;
2538 #endif
2539 unsigned long lwp = ptid_get_lwp (ptid);
2540
2541 fill_gregset (&gregs, -1);
2542 note_data = (char *) elfcore_write_prstatus (obfd,
2543 note_data,
2544 note_size,
2545 lwp,
2546 stop_signal, &gregs);
2547
2548 fill_fpregset (&fpregs, -1);
2549 note_data = (char *) elfcore_write_prfpreg (obfd,
2550 note_data,
2551 note_size,
2552 &fpregs, sizeof (fpregs));
2553 #ifdef FILL_FPXREGSET
2554 fill_fpxregset (&fpxregs, -1);
2555 note_data = (char *) elfcore_write_prxfpreg (obfd,
2556 note_data,
2557 note_size,
2558 &fpxregs, sizeof (fpxregs));
2559 #endif
2560 return note_data;
2561 }
2562
2563 struct linux_nat_corefile_thread_data
2564 {
2565 bfd *obfd;
2566 char *note_data;
2567 int *note_size;
2568 int num_notes;
2569 };
2570
2571 /* Called by gdbthread.c once per thread. Records the thread's
2572 register state for the corefile note section. */
2573
2574 static int
2575 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
2576 {
2577 struct linux_nat_corefile_thread_data *args = data;
2578 ptid_t saved_ptid = inferior_ptid;
2579
2580 inferior_ptid = ti->ptid;
2581 registers_changed ();
2582 target_fetch_registers (-1); /* FIXME should not be necessary;
2583 fill_gregset should do it automatically. */
2584 args->note_data = linux_nat_do_thread_registers (args->obfd,
2585 ti->ptid,
2586 args->note_data,
2587 args->note_size);
2588 args->num_notes++;
2589 inferior_ptid = saved_ptid;
2590 registers_changed ();
2591 target_fetch_registers (-1); /* FIXME should not be necessary;
2592 fill_gregset should do it automatically. */
2593 return 0;
2594 }
2595
2596 /* Records the register state for the corefile note section. */
2597
2598 static char *
2599 linux_nat_do_registers (bfd *obfd, ptid_t ptid,
2600 char *note_data, int *note_size)
2601 {
2602 registers_changed ();
2603 target_fetch_registers (-1); /* FIXME should not be necessary;
2604 fill_gregset should do it automatically. */
2605 return linux_nat_do_thread_registers (obfd,
2606 ptid_build (ptid_get_pid (inferior_ptid),
2607 ptid_get_pid (inferior_ptid),
2608 0),
2609 note_data, note_size);
2610 return note_data;
2611 }
2612
2613 /* Fills the "to_make_corefile_note" target vector. Builds the note
2614 section for a corefile, and returns it in a malloc buffer. */
2615
2616 static char *
2617 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
2618 {
2619 struct linux_nat_corefile_thread_data thread_args;
2620 struct cleanup *old_chain;
2621 char fname[16] = { '\0' };
2622 char psargs[80] = { '\0' };
2623 char *note_data = NULL;
2624 ptid_t current_ptid = inferior_ptid;
2625 gdb_byte *auxv;
2626 int auxv_len;
2627
2628 if (get_exec_file (0))
2629 {
2630 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
2631 strncpy (psargs, get_exec_file (0), sizeof (psargs));
2632 if (get_inferior_args ())
2633 {
2634 strncat (psargs, " ", sizeof (psargs) - strlen (psargs));
2635 strncat (psargs, get_inferior_args (),
2636 sizeof (psargs) - strlen (psargs));
2637 }
2638 note_data = (char *) elfcore_write_prpsinfo (obfd,
2639 note_data,
2640 note_size, fname, psargs);
2641 }
2642
2643 /* Dump information for threads. */
2644 thread_args.obfd = obfd;
2645 thread_args.note_data = note_data;
2646 thread_args.note_size = note_size;
2647 thread_args.num_notes = 0;
2648 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2649 if (thread_args.num_notes == 0)
2650 {
2651 /* iterate_over_threads didn't come up with any threads; just
2652 use inferior_ptid. */
2653 note_data = linux_nat_do_registers (obfd, inferior_ptid,
2654 note_data, note_size);
2655 }
2656 else
2657 {
2658 note_data = thread_args.note_data;
2659 }
2660
2661 auxv_len = target_auxv_read (&current_target, &auxv);
2662 if (auxv_len > 0)
2663 {
2664 note_data = elfcore_write_note (obfd, note_data, note_size,
2665 "CORE", NT_AUXV, auxv, auxv_len);
2666 xfree (auxv);
2667 }
2668
2669 make_cleanup (xfree, note_data);
2670 return note_data;
2671 }
2672
2673 /* Implement the "info proc" command. */
2674
2675 static void
2676 linux_nat_info_proc_cmd (char *args, int from_tty)
2677 {
2678 long long pid = PIDGET (inferior_ptid);
2679 FILE *procfile;
2680 char **argv = NULL;
2681 char buffer[MAXPATHLEN];
2682 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
2683 int cmdline_f = 1;
2684 int cwd_f = 1;
2685 int exe_f = 1;
2686 int mappings_f = 0;
2687 int environ_f = 0;
2688 int status_f = 0;
2689 int stat_f = 0;
2690 int all = 0;
2691 struct stat dummy;
2692
2693 if (args)
2694 {
2695 /* Break up 'args' into an argv array. */
2696 if ((argv = buildargv (args)) == NULL)
2697 nomem (0);
2698 else
2699 make_cleanup_freeargv (argv);
2700 }
2701 while (argv != NULL && *argv != NULL)
2702 {
2703 if (isdigit (argv[0][0]))
2704 {
2705 pid = strtoul (argv[0], NULL, 10);
2706 }
2707 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
2708 {
2709 mappings_f = 1;
2710 }
2711 else if (strcmp (argv[0], "status") == 0)
2712 {
2713 status_f = 1;
2714 }
2715 else if (strcmp (argv[0], "stat") == 0)
2716 {
2717 stat_f = 1;
2718 }
2719 else if (strcmp (argv[0], "cmd") == 0)
2720 {
2721 cmdline_f = 1;
2722 }
2723 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
2724 {
2725 exe_f = 1;
2726 }
2727 else if (strcmp (argv[0], "cwd") == 0)
2728 {
2729 cwd_f = 1;
2730 }
2731 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
2732 {
2733 all = 1;
2734 }
2735 else
2736 {
2737 /* [...] (future options here) */
2738 }
2739 argv++;
2740 }
2741 if (pid == 0)
2742 error (_("No current process: you must name one."));
2743
2744 sprintf (fname1, "/proc/%lld", pid);
2745 if (stat (fname1, &dummy) != 0)
2746 error (_("No /proc directory: '%s'"), fname1);
2747
2748 printf_filtered (_("process %lld\n"), pid);
2749 if (cmdline_f || all)
2750 {
2751 sprintf (fname1, "/proc/%lld/cmdline", pid);
2752 if ((procfile = fopen (fname1, "r")) > 0)
2753 {
2754 fgets (buffer, sizeof (buffer), procfile);
2755 printf_filtered ("cmdline = '%s'\n", buffer);
2756 fclose (procfile);
2757 }
2758 else
2759 warning (_("unable to open /proc file '%s'"), fname1);
2760 }
2761 if (cwd_f || all)
2762 {
2763 sprintf (fname1, "/proc/%lld/cwd", pid);
2764 memset (fname2, 0, sizeof (fname2));
2765 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2766 printf_filtered ("cwd = '%s'\n", fname2);
2767 else
2768 warning (_("unable to read link '%s'"), fname1);
2769 }
2770 if (exe_f || all)
2771 {
2772 sprintf (fname1, "/proc/%lld/exe", pid);
2773 memset (fname2, 0, sizeof (fname2));
2774 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2775 printf_filtered ("exe = '%s'\n", fname2);
2776 else
2777 warning (_("unable to read link '%s'"), fname1);
2778 }
2779 if (mappings_f || all)
2780 {
2781 sprintf (fname1, "/proc/%lld/maps", pid);
2782 if ((procfile = fopen (fname1, "r")) > 0)
2783 {
2784 long long addr, endaddr, size, offset, inode;
2785 char permissions[8], device[8], filename[MAXPATHLEN];
2786
2787 printf_filtered (_("Mapped address spaces:\n\n"));
2788 if (TARGET_ADDR_BIT == 32)
2789 {
2790 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
2791 "Start Addr",
2792 " End Addr",
2793 " Size", " Offset", "objfile");
2794 }
2795 else
2796 {
2797 printf_filtered (" %18s %18s %10s %10s %7s\n",
2798 "Start Addr",
2799 " End Addr",
2800 " Size", " Offset", "objfile");
2801 }
2802
2803 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
2804 &offset, &device[0], &inode, &filename[0]))
2805 {
2806 size = endaddr - addr;
2807
2808 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
2809 calls here (and possibly above) should be abstracted
2810 out into their own functions? Andrew suggests using
2811 a generic local_address_string instead to print out
2812 the addresses; that makes sense to me, too. */
2813
2814 if (TARGET_ADDR_BIT == 32)
2815 {
2816 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
2817 (unsigned long) addr, /* FIXME: pr_addr */
2818 (unsigned long) endaddr,
2819 (int) size,
2820 (unsigned int) offset,
2821 filename[0] ? filename : "");
2822 }
2823 else
2824 {
2825 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
2826 (unsigned long) addr, /* FIXME: pr_addr */
2827 (unsigned long) endaddr,
2828 (int) size,
2829 (unsigned int) offset,
2830 filename[0] ? filename : "");
2831 }
2832 }
2833
2834 fclose (procfile);
2835 }
2836 else
2837 warning (_("unable to open /proc file '%s'"), fname1);
2838 }
2839 if (status_f || all)
2840 {
2841 sprintf (fname1, "/proc/%lld/status", pid);
2842 if ((procfile = fopen (fname1, "r")) > 0)
2843 {
2844 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2845 puts_filtered (buffer);
2846 fclose (procfile);
2847 }
2848 else
2849 warning (_("unable to open /proc file '%s'"), fname1);
2850 }
2851 if (stat_f || all)
2852 {
2853 sprintf (fname1, "/proc/%lld/stat", pid);
2854 if ((procfile = fopen (fname1, "r")) > 0)
2855 {
2856 int itmp;
2857 char ctmp;
2858
2859 if (fscanf (procfile, "%d ", &itmp) > 0)
2860 printf_filtered (_("Process: %d\n"), itmp);
2861 if (fscanf (procfile, "%s ", &buffer[0]) > 0)
2862 printf_filtered (_("Exec file: %s\n"), buffer);
2863 if (fscanf (procfile, "%c ", &ctmp) > 0)
2864 printf_filtered (_("State: %c\n"), ctmp);
2865 if (fscanf (procfile, "%d ", &itmp) > 0)
2866 printf_filtered (_("Parent process: %d\n"), itmp);
2867 if (fscanf (procfile, "%d ", &itmp) > 0)
2868 printf_filtered (_("Process group: %d\n"), itmp);
2869 if (fscanf (procfile, "%d ", &itmp) > 0)
2870 printf_filtered (_("Session id: %d\n"), itmp);
2871 if (fscanf (procfile, "%d ", &itmp) > 0)
2872 printf_filtered (_("TTY: %d\n"), itmp);
2873 if (fscanf (procfile, "%d ", &itmp) > 0)
2874 printf_filtered (_("TTY owner process group: %d\n"), itmp);
2875 if (fscanf (procfile, "%u ", &itmp) > 0)
2876 printf_filtered (_("Flags: 0x%x\n"), itmp);
2877 if (fscanf (procfile, "%u ", &itmp) > 0)
2878 printf_filtered (_("Minor faults (no memory page): %u\n"),
2879 (unsigned int) itmp);
2880 if (fscanf (procfile, "%u ", &itmp) > 0)
2881 printf_filtered (_("Minor faults, children: %u\n"),
2882 (unsigned int) itmp);
2883 if (fscanf (procfile, "%u ", &itmp) > 0)
2884 printf_filtered (_("Major faults (memory page faults): %u\n"),
2885 (unsigned int) itmp);
2886 if (fscanf (procfile, "%u ", &itmp) > 0)
2887 printf_filtered (_("Major faults, children: %u\n"),
2888 (unsigned int) itmp);
2889 if (fscanf (procfile, "%d ", &itmp) > 0)
2890 printf_filtered ("utime: %d\n", itmp);
2891 if (fscanf (procfile, "%d ", &itmp) > 0)
2892 printf_filtered ("stime: %d\n", itmp);
2893 if (fscanf (procfile, "%d ", &itmp) > 0)
2894 printf_filtered ("utime, children: %d\n", itmp);
2895 if (fscanf (procfile, "%d ", &itmp) > 0)
2896 printf_filtered ("stime, children: %d\n", itmp);
2897 if (fscanf (procfile, "%d ", &itmp) > 0)
2898 printf_filtered (_("jiffies remaining in current time slice: %d\n"),
2899 itmp);
2900 if (fscanf (procfile, "%d ", &itmp) > 0)
2901 printf_filtered ("'nice' value: %d\n", itmp);
2902 if (fscanf (procfile, "%u ", &itmp) > 0)
2903 printf_filtered (_("jiffies until next timeout: %u\n"),
2904 (unsigned int) itmp);
2905 if (fscanf (procfile, "%u ", &itmp) > 0)
2906 printf_filtered ("jiffies until next SIGALRM: %u\n",
2907 (unsigned int) itmp);
2908 if (fscanf (procfile, "%d ", &itmp) > 0)
2909 printf_filtered (_("start time (jiffies since system boot): %d\n"),
2910 itmp);
2911 if (fscanf (procfile, "%u ", &itmp) > 0)
2912 printf_filtered (_("Virtual memory size: %u\n"),
2913 (unsigned int) itmp);
2914 if (fscanf (procfile, "%u ", &itmp) > 0)
2915 printf_filtered (_("Resident set size: %u\n"), (unsigned int) itmp);
2916 if (fscanf (procfile, "%u ", &itmp) > 0)
2917 printf_filtered ("rlim: %u\n", (unsigned int) itmp);
2918 if (fscanf (procfile, "%u ", &itmp) > 0)
2919 printf_filtered (_("Start of text: 0x%x\n"), itmp);
2920 if (fscanf (procfile, "%u ", &itmp) > 0)
2921 printf_filtered (_("End of text: 0x%x\n"), itmp);
2922 if (fscanf (procfile, "%u ", &itmp) > 0)
2923 printf_filtered (_("Start of stack: 0x%x\n"), itmp);
2924 #if 0 /* Don't know how architecture-dependent the rest is...
2925 Anyway the signal bitmap info is available from "status". */
2926 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2927 printf_filtered (_("Kernel stack pointer: 0x%x\n"), itmp);
2928 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2929 printf_filtered (_("Kernel instr pointer: 0x%x\n"), itmp);
2930 if (fscanf (procfile, "%d ", &itmp) > 0)
2931 printf_filtered (_("Pending signals bitmap: 0x%x\n"), itmp);
2932 if (fscanf (procfile, "%d ", &itmp) > 0)
2933 printf_filtered (_("Blocked signals bitmap: 0x%x\n"), itmp);
2934 if (fscanf (procfile, "%d ", &itmp) > 0)
2935 printf_filtered (_("Ignored signals bitmap: 0x%x\n"), itmp);
2936 if (fscanf (procfile, "%d ", &itmp) > 0)
2937 printf_filtered (_("Catched signals bitmap: 0x%x\n"), itmp);
2938 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2939 printf_filtered (_("wchan (system call): 0x%x\n"), itmp);
2940 #endif
2941 fclose (procfile);
2942 }
2943 else
2944 warning (_("unable to open /proc file '%s'"), fname1);
2945 }
2946 }
2947
2948 int
2949 linux_proc_xfer_memory (CORE_ADDR addr, char *myaddr, int len, int write,
2950 struct mem_attrib *attrib, struct target_ops *target)
2951 {
2952 int fd, ret;
2953 char filename[64];
2954
2955 if (write)
2956 return 0;
2957
2958 /* Don't bother for one word. */
2959 if (len < 3 * sizeof (long))
2960 return 0;
2961
2962 /* We could keep this file open and cache it - possibly one per
2963 thread. That requires some juggling, but is even faster. */
2964 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
2965 fd = open (filename, O_RDONLY | O_LARGEFILE);
2966 if (fd == -1)
2967 return 0;
2968
2969 /* If pread64 is available, use it. It's faster if the kernel
2970 supports it (only one syscall), and it's 64-bit safe even on
2971 32-bit platforms (for instance, SPARC debugging a SPARC64
2972 application). */
2973 #ifdef HAVE_PREAD64
2974 if (pread64 (fd, myaddr, len, addr) != len)
2975 #else
2976 if (lseek (fd, addr, SEEK_SET) == -1 || read (fd, myaddr, len) != len)
2977 #endif
2978 ret = 0;
2979 else
2980 ret = len;
2981
2982 close (fd);
2983 return ret;
2984 }
2985
2986 /* Parse LINE as a signal set and add its set bits to SIGS. */
2987
2988 static void
2989 add_line_to_sigset (const char *line, sigset_t *sigs)
2990 {
2991 int len = strlen (line) - 1;
2992 const char *p;
2993 int signum;
2994
2995 if (line[len] != '\n')
2996 error (_("Could not parse signal set: %s"), line);
2997
2998 p = line;
2999 signum = len * 4;
3000 while (len-- > 0)
3001 {
3002 int digit;
3003
3004 if (*p >= '0' && *p <= '9')
3005 digit = *p - '0';
3006 else if (*p >= 'a' && *p <= 'f')
3007 digit = *p - 'a' + 10;
3008 else
3009 error (_("Could not parse signal set: %s"), line);
3010
3011 signum -= 4;
3012
3013 if (digit & 1)
3014 sigaddset (sigs, signum + 1);
3015 if (digit & 2)
3016 sigaddset (sigs, signum + 2);
3017 if (digit & 4)
3018 sigaddset (sigs, signum + 3);
3019 if (digit & 8)
3020 sigaddset (sigs, signum + 4);
3021
3022 p++;
3023 }
3024 }
3025
3026 /* Find process PID's pending signals from /proc/pid/status and set
3027 SIGS to match. */
3028
3029 void
3030 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3031 {
3032 FILE *procfile;
3033 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3034 int signum;
3035
3036 sigemptyset (pending);
3037 sigemptyset (blocked);
3038 sigemptyset (ignored);
3039 sprintf (fname, "/proc/%d/status", pid);
3040 procfile = fopen (fname, "r");
3041 if (procfile == NULL)
3042 error (_("Could not open %s"), fname);
3043
3044 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3045 {
3046 /* Normal queued signals are on the SigPnd line in the status
3047 file. However, 2.6 kernels also have a "shared" pending
3048 queue for delivering signals to a thread group, so check for
3049 a ShdPnd line also.
3050
3051 Unfortunately some Red Hat kernels include the shared pending
3052 queue but not the ShdPnd status field. */
3053
3054 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3055 add_line_to_sigset (buffer + 8, pending);
3056 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3057 add_line_to_sigset (buffer + 8, pending);
3058 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3059 add_line_to_sigset (buffer + 8, blocked);
3060 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3061 add_line_to_sigset (buffer + 8, ignored);
3062 }
3063
3064 fclose (procfile);
3065 }
3066
3067 void
3068 _initialize_linux_nat (void)
3069 {
3070 struct sigaction action;
3071 extern void thread_db_init (struct target_ops *);
3072
3073 deprecated_child_ops.to_find_memory_regions = linux_nat_find_memory_regions;
3074 deprecated_child_ops.to_make_corefile_notes = linux_nat_make_corefile_notes;
3075
3076 add_info ("proc", linux_nat_info_proc_cmd, _("\
3077 Show /proc process information about any running process.\n\
3078 Specify any process id, or use the program being debugged by default.\n\
3079 Specify any of the following keywords for detailed info:\n\
3080 mappings -- list of mapped memory regions.\n\
3081 stat -- list a bunch of random process info.\n\
3082 status -- list a different bunch of random process info.\n\
3083 all -- list all available /proc info."));
3084
3085 init_linux_nat_ops ();
3086 add_target (&linux_nat_ops);
3087 thread_db_init (&linux_nat_ops);
3088
3089 /* Save the original signal mask. */
3090 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
3091
3092 action.sa_handler = sigchld_handler;
3093 sigemptyset (&action.sa_mask);
3094 action.sa_flags = SA_RESTART;
3095 sigaction (SIGCHLD, &action, NULL);
3096
3097 /* Make sure we don't block SIGCHLD during a sigsuspend. */
3098 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
3099 sigdelset (&suspend_mask, SIGCHLD);
3100
3101 sigemptyset (&blocked_mask);
3102
3103 add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\
3104 Set debugging of GNU/Linux lwp module."), _("\
3105 Show debugging of GNU/Linux lwp module."), _("\
3106 Enables printf debugging output."),
3107 NULL,
3108 show_debug_linux_nat,
3109 &setdebuglist, &showdebuglist);
3110 }
3111 \f
3112
3113 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
3114 the GNU/Linux Threads library and therefore doesn't really belong
3115 here. */
3116
3117 /* Read variable NAME in the target and return its value if found.
3118 Otherwise return zero. It is assumed that the type of the variable
3119 is `int'. */
3120
3121 static int
3122 get_signo (const char *name)
3123 {
3124 struct minimal_symbol *ms;
3125 int signo;
3126
3127 ms = lookup_minimal_symbol (name, NULL, NULL);
3128 if (ms == NULL)
3129 return 0;
3130
3131 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (char *) &signo,
3132 sizeof (signo)) != 0)
3133 return 0;
3134
3135 return signo;
3136 }
3137
3138 /* Return the set of signals used by the threads library in *SET. */
3139
3140 void
3141 lin_thread_get_thread_signals (sigset_t *set)
3142 {
3143 struct sigaction action;
3144 int restart, cancel;
3145
3146 sigemptyset (set);
3147
3148 restart = get_signo ("__pthread_sig_restart");
3149 if (restart == 0)
3150 return;
3151
3152 cancel = get_signo ("__pthread_sig_cancel");
3153 if (cancel == 0)
3154 return;
3155
3156 sigaddset (set, restart);
3157 sigaddset (set, cancel);
3158
3159 /* The GNU/Linux Threads library makes terminating threads send a
3160 special "cancel" signal instead of SIGCHLD. Make sure we catch
3161 those (to prevent them from terminating GDB itself, which is
3162 likely to be their default action) and treat them the same way as
3163 SIGCHLD. */
3164
3165 action.sa_handler = sigchld_handler;
3166 sigemptyset (&action.sa_mask);
3167 action.sa_flags = SA_RESTART;
3168 sigaction (cancel, &action, NULL);
3169
3170 /* We block the "cancel" signal throughout this code ... */
3171 sigaddset (&blocked_mask, cancel);
3172 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
3173
3174 /* ... except during a sigsuspend. */
3175 sigdelset (&suspend_mask, cancel);
3176 }
This page took 0.096567 seconds and 4 git commands to generate.