* NEWS: Mention pointer to member improvements.
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
22
23 #include "defs.h"
24 #include "inferior.h"
25 #include "target.h"
26 #include "gdb_string.h"
27 #include "gdb_wait.h"
28 #include "gdb_assert.h"
29 #ifdef HAVE_TKILL_SYSCALL
30 #include <unistd.h>
31 #include <sys/syscall.h>
32 #endif
33 #include <sys/ptrace.h>
34 #include "linux-nat.h"
35 #include "linux-fork.h"
36 #include "gdbthread.h"
37 #include "gdbcmd.h"
38 #include "regcache.h"
39 #include "regset.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/param.h> /* for MAXPATHLEN */
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include "gdbthread.h" /* for struct thread_info etc. */
49 #include "gdb_stat.h" /* for struct stat */
50 #include <fcntl.h> /* for O_RDONLY */
51
52 #ifndef O_LARGEFILE
53 #define O_LARGEFILE 0
54 #endif
55
56 /* If the system headers did not provide the constants, hard-code the normal
57 values. */
58 #ifndef PTRACE_EVENT_FORK
59
60 #define PTRACE_SETOPTIONS 0x4200
61 #define PTRACE_GETEVENTMSG 0x4201
62
63 /* options set using PTRACE_SETOPTIONS */
64 #define PTRACE_O_TRACESYSGOOD 0x00000001
65 #define PTRACE_O_TRACEFORK 0x00000002
66 #define PTRACE_O_TRACEVFORK 0x00000004
67 #define PTRACE_O_TRACECLONE 0x00000008
68 #define PTRACE_O_TRACEEXEC 0x00000010
69 #define PTRACE_O_TRACEVFORKDONE 0x00000020
70 #define PTRACE_O_TRACEEXIT 0x00000040
71
72 /* Wait extended result codes for the above trace options. */
73 #define PTRACE_EVENT_FORK 1
74 #define PTRACE_EVENT_VFORK 2
75 #define PTRACE_EVENT_CLONE 3
76 #define PTRACE_EVENT_EXEC 4
77 #define PTRACE_EVENT_VFORK_DONE 5
78 #define PTRACE_EVENT_EXIT 6
79
80 #endif /* PTRACE_EVENT_FORK */
81
82 /* We can't always assume that this flag is available, but all systems
83 with the ptrace event handlers also have __WALL, so it's safe to use
84 here. */
85 #ifndef __WALL
86 #define __WALL 0x40000000 /* Wait for any child. */
87 #endif
88
89 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
90 the use of the multi-threaded target. */
91 static struct target_ops *linux_ops;
92 static struct target_ops linux_ops_saved;
93
94 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
95 Called by our to_xfer_partial. */
96 static LONGEST (*super_xfer_partial) (struct target_ops *,
97 enum target_object,
98 const char *, gdb_byte *,
99 const gdb_byte *,
100 ULONGEST, LONGEST);
101
102 static int debug_linux_nat;
103 static void
104 show_debug_linux_nat (struct ui_file *file, int from_tty,
105 struct cmd_list_element *c, const char *value)
106 {
107 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
108 value);
109 }
110
111 static int linux_parent_pid;
112
113 struct simple_pid_list
114 {
115 int pid;
116 struct simple_pid_list *next;
117 };
118 struct simple_pid_list *stopped_pids;
119
120 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
121 can not be used, 1 if it can. */
122
123 static int linux_supports_tracefork_flag = -1;
124
125 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
126 PTRACE_O_TRACEVFORKDONE. */
127
128 static int linux_supports_tracevforkdone_flag = -1;
129
130 \f
131 /* Trivial list manipulation functions to keep track of a list of
132 new stopped processes. */
133 static void
134 add_to_pid_list (struct simple_pid_list **listp, int pid)
135 {
136 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
137 new_pid->pid = pid;
138 new_pid->next = *listp;
139 *listp = new_pid;
140 }
141
142 static int
143 pull_pid_from_list (struct simple_pid_list **listp, int pid)
144 {
145 struct simple_pid_list **p;
146
147 for (p = listp; *p != NULL; p = &(*p)->next)
148 if ((*p)->pid == pid)
149 {
150 struct simple_pid_list *next = (*p)->next;
151 xfree (*p);
152 *p = next;
153 return 1;
154 }
155 return 0;
156 }
157
158 void
159 linux_record_stopped_pid (int pid)
160 {
161 add_to_pid_list (&stopped_pids, pid);
162 }
163
164 \f
165 /* A helper function for linux_test_for_tracefork, called after fork (). */
166
167 static void
168 linux_tracefork_child (void)
169 {
170 int ret;
171
172 ptrace (PTRACE_TRACEME, 0, 0, 0);
173 kill (getpid (), SIGSTOP);
174 fork ();
175 _exit (0);
176 }
177
178 /* Wrapper function for waitpid which handles EINTR. */
179
180 static int
181 my_waitpid (int pid, int *status, int flags)
182 {
183 int ret;
184 do
185 {
186 ret = waitpid (pid, status, flags);
187 }
188 while (ret == -1 && errno == EINTR);
189
190 return ret;
191 }
192
193 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
194
195 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
196 we know that the feature is not available. This may change the tracing
197 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
198
199 However, if it succeeds, we don't know for sure that the feature is
200 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
201 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
202 fork tracing, and let it fork. If the process exits, we assume that we
203 can't use TRACEFORK; if we get the fork notification, and we can extract
204 the new child's PID, then we assume that we can. */
205
206 static void
207 linux_test_for_tracefork (int original_pid)
208 {
209 int child_pid, ret, status;
210 long second_pid;
211
212 linux_supports_tracefork_flag = 0;
213 linux_supports_tracevforkdone_flag = 0;
214
215 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
216 if (ret != 0)
217 return;
218
219 child_pid = fork ();
220 if (child_pid == -1)
221 perror_with_name (("fork"));
222
223 if (child_pid == 0)
224 linux_tracefork_child ();
225
226 ret = my_waitpid (child_pid, &status, 0);
227 if (ret == -1)
228 perror_with_name (("waitpid"));
229 else if (ret != child_pid)
230 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
231 if (! WIFSTOPPED (status))
232 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
233
234 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
235 if (ret != 0)
236 {
237 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
238 if (ret != 0)
239 {
240 warning (_("linux_test_for_tracefork: failed to kill child"));
241 return;
242 }
243
244 ret = my_waitpid (child_pid, &status, 0);
245 if (ret != child_pid)
246 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
247 else if (!WIFSIGNALED (status))
248 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
249 "killed child"), status);
250
251 return;
252 }
253
254 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
255 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
256 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
257 linux_supports_tracevforkdone_flag = (ret == 0);
258
259 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
260 if (ret != 0)
261 warning (_("linux_test_for_tracefork: failed to resume child"));
262
263 ret = my_waitpid (child_pid, &status, 0);
264
265 if (ret == child_pid && WIFSTOPPED (status)
266 && status >> 16 == PTRACE_EVENT_FORK)
267 {
268 second_pid = 0;
269 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
270 if (ret == 0 && second_pid != 0)
271 {
272 int second_status;
273
274 linux_supports_tracefork_flag = 1;
275 my_waitpid (second_pid, &second_status, 0);
276 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
277 if (ret != 0)
278 warning (_("linux_test_for_tracefork: failed to kill second child"));
279 my_waitpid (second_pid, &status, 0);
280 }
281 }
282 else
283 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
284 "(%d, status 0x%x)"), ret, status);
285
286 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
287 if (ret != 0)
288 warning (_("linux_test_for_tracefork: failed to kill child"));
289 my_waitpid (child_pid, &status, 0);
290 }
291
292 /* Return non-zero iff we have tracefork functionality available.
293 This function also sets linux_supports_tracefork_flag. */
294
295 static int
296 linux_supports_tracefork (int pid)
297 {
298 if (linux_supports_tracefork_flag == -1)
299 linux_test_for_tracefork (pid);
300 return linux_supports_tracefork_flag;
301 }
302
303 static int
304 linux_supports_tracevforkdone (int pid)
305 {
306 if (linux_supports_tracefork_flag == -1)
307 linux_test_for_tracefork (pid);
308 return linux_supports_tracevforkdone_flag;
309 }
310
311 \f
312 void
313 linux_enable_event_reporting (ptid_t ptid)
314 {
315 int pid = ptid_get_lwp (ptid);
316 int options;
317
318 if (pid == 0)
319 pid = ptid_get_pid (ptid);
320
321 if (! linux_supports_tracefork (pid))
322 return;
323
324 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
325 | PTRACE_O_TRACECLONE;
326 if (linux_supports_tracevforkdone (pid))
327 options |= PTRACE_O_TRACEVFORKDONE;
328
329 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
330 read-only process state. */
331
332 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
333 }
334
335 void
336 child_post_attach (int pid)
337 {
338 linux_enable_event_reporting (pid_to_ptid (pid));
339 check_for_thread_db ();
340 }
341
342 static void
343 linux_child_post_startup_inferior (ptid_t ptid)
344 {
345 linux_enable_event_reporting (ptid);
346 check_for_thread_db ();
347 }
348
349 int
350 child_follow_fork (struct target_ops *ops, int follow_child)
351 {
352 ptid_t last_ptid;
353 struct target_waitstatus last_status;
354 int has_vforked;
355 int parent_pid, child_pid;
356
357 get_last_target_status (&last_ptid, &last_status);
358 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
359 parent_pid = ptid_get_lwp (last_ptid);
360 if (parent_pid == 0)
361 parent_pid = ptid_get_pid (last_ptid);
362 child_pid = last_status.value.related_pid;
363
364 if (! follow_child)
365 {
366 /* We're already attached to the parent, by default. */
367
368 /* Before detaching from the child, remove all breakpoints from
369 it. (This won't actually modify the breakpoint list, but will
370 physically remove the breakpoints from the child.) */
371 /* If we vforked this will remove the breakpoints from the parent
372 also, but they'll be reinserted below. */
373 detach_breakpoints (child_pid);
374
375 /* Detach new forked process? */
376 if (detach_fork)
377 {
378 if (debug_linux_nat)
379 {
380 target_terminal_ours ();
381 fprintf_filtered (gdb_stdlog,
382 "Detaching after fork from child process %d.\n",
383 child_pid);
384 }
385
386 ptrace (PTRACE_DETACH, child_pid, 0, 0);
387 }
388 else
389 {
390 struct fork_info *fp;
391 /* Retain child fork in ptrace (stopped) state. */
392 fp = find_fork_pid (child_pid);
393 if (!fp)
394 fp = add_fork (child_pid);
395 fork_save_infrun_state (fp, 0);
396 }
397
398 if (has_vforked)
399 {
400 gdb_assert (linux_supports_tracefork_flag >= 0);
401 if (linux_supports_tracevforkdone (0))
402 {
403 int status;
404
405 ptrace (PTRACE_CONT, parent_pid, 0, 0);
406 my_waitpid (parent_pid, &status, __WALL);
407 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
408 warning (_("Unexpected waitpid result %06x when waiting for "
409 "vfork-done"), status);
410 }
411 else
412 {
413 /* We can't insert breakpoints until the child has
414 finished with the shared memory region. We need to
415 wait until that happens. Ideal would be to just
416 call:
417 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
418 - waitpid (parent_pid, &status, __WALL);
419 However, most architectures can't handle a syscall
420 being traced on the way out if it wasn't traced on
421 the way in.
422
423 We might also think to loop, continuing the child
424 until it exits or gets a SIGTRAP. One problem is
425 that the child might call ptrace with PTRACE_TRACEME.
426
427 There's no simple and reliable way to figure out when
428 the vforked child will be done with its copy of the
429 shared memory. We could step it out of the syscall,
430 two instructions, let it go, and then single-step the
431 parent once. When we have hardware single-step, this
432 would work; with software single-step it could still
433 be made to work but we'd have to be able to insert
434 single-step breakpoints in the child, and we'd have
435 to insert -just- the single-step breakpoint in the
436 parent. Very awkward.
437
438 In the end, the best we can do is to make sure it
439 runs for a little while. Hopefully it will be out of
440 range of any breakpoints we reinsert. Usually this
441 is only the single-step breakpoint at vfork's return
442 point. */
443
444 usleep (10000);
445 }
446
447 /* Since we vforked, breakpoints were removed in the parent
448 too. Put them back. */
449 reattach_breakpoints (parent_pid);
450 }
451 }
452 else
453 {
454 char child_pid_spelling[40];
455
456 /* Needed to keep the breakpoint lists in sync. */
457 if (! has_vforked)
458 detach_breakpoints (child_pid);
459
460 /* Before detaching from the parent, remove all breakpoints from it. */
461 remove_breakpoints ();
462
463 if (debug_linux_nat)
464 {
465 target_terminal_ours ();
466 fprintf_filtered (gdb_stdlog,
467 "Attaching after fork to child process %d.\n",
468 child_pid);
469 }
470
471 /* If we're vforking, we may want to hold on to the parent until
472 the child exits or execs. At exec time we can remove the old
473 breakpoints from the parent and detach it; at exit time we
474 could do the same (or even, sneakily, resume debugging it - the
475 child's exec has failed, or something similar).
476
477 This doesn't clean up "properly", because we can't call
478 target_detach, but that's OK; if the current target is "child",
479 then it doesn't need any further cleanups, and lin_lwp will
480 generally not encounter vfork (vfork is defined to fork
481 in libpthread.so).
482
483 The holding part is very easy if we have VFORKDONE events;
484 but keeping track of both processes is beyond GDB at the
485 moment. So we don't expose the parent to the rest of GDB.
486 Instead we quietly hold onto it until such time as we can
487 safely resume it. */
488
489 if (has_vforked)
490 linux_parent_pid = parent_pid;
491 else if (!detach_fork)
492 {
493 struct fork_info *fp;
494 /* Retain parent fork in ptrace (stopped) state. */
495 fp = find_fork_pid (parent_pid);
496 if (!fp)
497 fp = add_fork (parent_pid);
498 fork_save_infrun_state (fp, 0);
499 }
500 else
501 {
502 target_detach (NULL, 0);
503 }
504
505 inferior_ptid = pid_to_ptid (child_pid);
506
507 /* Reinstall ourselves, since we might have been removed in
508 target_detach (which does other necessary cleanup). */
509
510 push_target (ops);
511
512 /* Reset breakpoints in the child as appropriate. */
513 follow_inferior_reset_breakpoints ();
514 }
515
516 return 0;
517 }
518
519 ptid_t
520 linux_handle_extended_wait (int pid, int status,
521 struct target_waitstatus *ourstatus)
522 {
523 int event = status >> 16;
524
525 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
526 || event == PTRACE_EVENT_CLONE)
527 {
528 unsigned long new_pid;
529 int ret;
530
531 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
532
533 /* If we haven't already seen the new PID stop, wait for it now. */
534 if (! pull_pid_from_list (&stopped_pids, new_pid))
535 {
536 /* The new child has a pending SIGSTOP. We can't affect it until it
537 hits the SIGSTOP, but we're already attached. */
538 ret = my_waitpid (new_pid, &status,
539 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
540 if (ret == -1)
541 perror_with_name (_("waiting for new child"));
542 else if (ret != new_pid)
543 internal_error (__FILE__, __LINE__,
544 _("wait returned unexpected PID %d"), ret);
545 else if (!WIFSTOPPED (status) || WSTOPSIG (status) != SIGSTOP)
546 internal_error (__FILE__, __LINE__,
547 _("wait returned unexpected status 0x%x"), status);
548 }
549
550 if (event == PTRACE_EVENT_FORK)
551 ourstatus->kind = TARGET_WAITKIND_FORKED;
552 else if (event == PTRACE_EVENT_VFORK)
553 ourstatus->kind = TARGET_WAITKIND_VFORKED;
554 else
555 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
556
557 ourstatus->value.related_pid = new_pid;
558 return inferior_ptid;
559 }
560
561 if (event == PTRACE_EVENT_EXEC)
562 {
563 ourstatus->kind = TARGET_WAITKIND_EXECD;
564 ourstatus->value.execd_pathname
565 = xstrdup (child_pid_to_exec_file (pid));
566
567 if (linux_parent_pid)
568 {
569 detach_breakpoints (linux_parent_pid);
570 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
571
572 linux_parent_pid = 0;
573 }
574
575 return inferior_ptid;
576 }
577
578 internal_error (__FILE__, __LINE__,
579 _("unknown ptrace event %d"), event);
580 }
581
582 \f
583 void
584 child_insert_fork_catchpoint (int pid)
585 {
586 if (! linux_supports_tracefork (pid))
587 error (_("Your system does not support fork catchpoints."));
588 }
589
590 void
591 child_insert_vfork_catchpoint (int pid)
592 {
593 if (!linux_supports_tracefork (pid))
594 error (_("Your system does not support vfork catchpoints."));
595 }
596
597 void
598 child_insert_exec_catchpoint (int pid)
599 {
600 if (!linux_supports_tracefork (pid))
601 error (_("Your system does not support exec catchpoints."));
602 }
603
604 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
605 are processes sharing the same VM space. A multi-threaded process
606 is basically a group of such processes. However, such a grouping
607 is almost entirely a user-space issue; the kernel doesn't enforce
608 such a grouping at all (this might change in the future). In
609 general, we'll rely on the threads library (i.e. the GNU/Linux
610 Threads library) to provide such a grouping.
611
612 It is perfectly well possible to write a multi-threaded application
613 without the assistance of a threads library, by using the clone
614 system call directly. This module should be able to give some
615 rudimentary support for debugging such applications if developers
616 specify the CLONE_PTRACE flag in the clone system call, and are
617 using the Linux kernel 2.4 or above.
618
619 Note that there are some peculiarities in GNU/Linux that affect
620 this code:
621
622 - In general one should specify the __WCLONE flag to waitpid in
623 order to make it report events for any of the cloned processes
624 (and leave it out for the initial process). However, if a cloned
625 process has exited the exit status is only reported if the
626 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
627 we cannot use it since GDB must work on older systems too.
628
629 - When a traced, cloned process exits and is waited for by the
630 debugger, the kernel reassigns it to the original parent and
631 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
632 library doesn't notice this, which leads to the "zombie problem":
633 When debugged a multi-threaded process that spawns a lot of
634 threads will run out of processes, even if the threads exit,
635 because the "zombies" stay around. */
636
637 /* List of known LWPs. */
638 static struct lwp_info *lwp_list;
639
640 /* Number of LWPs in the list. */
641 static int num_lwps;
642 \f
643
644 #define GET_LWP(ptid) ptid_get_lwp (ptid)
645 #define GET_PID(ptid) ptid_get_pid (ptid)
646 #define is_lwp(ptid) (GET_LWP (ptid) != 0)
647 #define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0)
648
649 /* If the last reported event was a SIGTRAP, this variable is set to
650 the process id of the LWP/thread that got it. */
651 ptid_t trap_ptid;
652 \f
653
654 /* Since we cannot wait (in linux_nat_wait) for the initial process and
655 any cloned processes with a single call to waitpid, we have to use
656 the WNOHANG flag and call waitpid in a loop. To optimize
657 things a bit we use `sigsuspend' to wake us up when a process has
658 something to report (it will send us a SIGCHLD if it has). To make
659 this work we have to juggle with the signal mask. We save the
660 original signal mask such that we can restore it before creating a
661 new process in order to avoid blocking certain signals in the
662 inferior. We then block SIGCHLD during the waitpid/sigsuspend
663 loop. */
664
665 /* Original signal mask. */
666 static sigset_t normal_mask;
667
668 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
669 _initialize_linux_nat. */
670 static sigset_t suspend_mask;
671
672 /* Signals to block to make that sigsuspend work. */
673 static sigset_t blocked_mask;
674 \f
675
676 /* Prototypes for local functions. */
677 static int stop_wait_callback (struct lwp_info *lp, void *data);
678 static int linux_nat_thread_alive (ptid_t ptid);
679 \f
680 /* Convert wait status STATUS to a string. Used for printing debug
681 messages only. */
682
683 static char *
684 status_to_str (int status)
685 {
686 static char buf[64];
687
688 if (WIFSTOPPED (status))
689 snprintf (buf, sizeof (buf), "%s (stopped)",
690 strsignal (WSTOPSIG (status)));
691 else if (WIFSIGNALED (status))
692 snprintf (buf, sizeof (buf), "%s (terminated)",
693 strsignal (WSTOPSIG (status)));
694 else
695 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
696
697 return buf;
698 }
699
700 /* Initialize the list of LWPs. Note that this module, contrary to
701 what GDB's generic threads layer does for its thread list,
702 re-initializes the LWP lists whenever we mourn or detach (which
703 doesn't involve mourning) the inferior. */
704
705 static void
706 init_lwp_list (void)
707 {
708 struct lwp_info *lp, *lpnext;
709
710 for (lp = lwp_list; lp; lp = lpnext)
711 {
712 lpnext = lp->next;
713 xfree (lp);
714 }
715
716 lwp_list = NULL;
717 num_lwps = 0;
718 }
719
720 /* Add the LWP specified by PID to the list. Return a pointer to the
721 structure describing the new LWP. */
722
723 static struct lwp_info *
724 add_lwp (ptid_t ptid)
725 {
726 struct lwp_info *lp;
727
728 gdb_assert (is_lwp (ptid));
729
730 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
731
732 memset (lp, 0, sizeof (struct lwp_info));
733
734 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
735
736 lp->ptid = ptid;
737
738 lp->next = lwp_list;
739 lwp_list = lp;
740 ++num_lwps;
741
742 return lp;
743 }
744
745 /* Remove the LWP specified by PID from the list. */
746
747 static void
748 delete_lwp (ptid_t ptid)
749 {
750 struct lwp_info *lp, *lpprev;
751
752 lpprev = NULL;
753
754 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
755 if (ptid_equal (lp->ptid, ptid))
756 break;
757
758 if (!lp)
759 return;
760
761 num_lwps--;
762
763 if (lpprev)
764 lpprev->next = lp->next;
765 else
766 lwp_list = lp->next;
767
768 xfree (lp);
769 }
770
771 /* Return a pointer to the structure describing the LWP corresponding
772 to PID. If no corresponding LWP could be found, return NULL. */
773
774 static struct lwp_info *
775 find_lwp_pid (ptid_t ptid)
776 {
777 struct lwp_info *lp;
778 int lwp;
779
780 if (is_lwp (ptid))
781 lwp = GET_LWP (ptid);
782 else
783 lwp = GET_PID (ptid);
784
785 for (lp = lwp_list; lp; lp = lp->next)
786 if (lwp == GET_LWP (lp->ptid))
787 return lp;
788
789 return NULL;
790 }
791
792 /* Call CALLBACK with its second argument set to DATA for every LWP in
793 the list. If CALLBACK returns 1 for a particular LWP, return a
794 pointer to the structure describing that LWP immediately.
795 Otherwise return NULL. */
796
797 struct lwp_info *
798 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
799 {
800 struct lwp_info *lp, *lpnext;
801
802 for (lp = lwp_list; lp; lp = lpnext)
803 {
804 lpnext = lp->next;
805 if ((*callback) (lp, data))
806 return lp;
807 }
808
809 return NULL;
810 }
811
812 /* Update our internal state when changing from one fork (checkpoint,
813 et cetera) to another indicated by NEW_PTID. We can only switch
814 single-threaded applications, so we only create one new LWP, and
815 the previous list is discarded. */
816
817 void
818 linux_nat_switch_fork (ptid_t new_ptid)
819 {
820 struct lwp_info *lp;
821
822 init_lwp_list ();
823 lp = add_lwp (new_ptid);
824 lp->stopped = 1;
825 }
826
827 /* Record a PTID for later deletion. */
828
829 struct saved_ptids
830 {
831 ptid_t ptid;
832 struct saved_ptids *next;
833 };
834 static struct saved_ptids *threads_to_delete;
835
836 static void
837 record_dead_thread (ptid_t ptid)
838 {
839 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
840 p->ptid = ptid;
841 p->next = threads_to_delete;
842 threads_to_delete = p;
843 }
844
845 /* Delete any dead threads which are not the current thread. */
846
847 static void
848 prune_lwps (void)
849 {
850 struct saved_ptids **p = &threads_to_delete;
851
852 while (*p)
853 if (! ptid_equal ((*p)->ptid, inferior_ptid))
854 {
855 struct saved_ptids *tmp = *p;
856 delete_thread (tmp->ptid);
857 *p = tmp->next;
858 xfree (tmp);
859 }
860 else
861 p = &(*p)->next;
862 }
863
864 /* Callback for iterate_over_threads that finds a thread corresponding
865 to the given LWP. */
866
867 static int
868 find_thread_from_lwp (struct thread_info *thr, void *dummy)
869 {
870 ptid_t *ptid_p = dummy;
871
872 if (GET_LWP (thr->ptid) && GET_LWP (thr->ptid) == GET_LWP (*ptid_p))
873 return 1;
874 else
875 return 0;
876 }
877
878 /* Handle the exit of a single thread LP. */
879
880 static void
881 exit_lwp (struct lwp_info *lp)
882 {
883 if (in_thread_list (lp->ptid))
884 {
885 /* Core GDB cannot deal with us deleting the current thread. */
886 if (!ptid_equal (lp->ptid, inferior_ptid))
887 delete_thread (lp->ptid);
888 else
889 record_dead_thread (lp->ptid);
890 printf_unfiltered (_("[%s exited]\n"),
891 target_pid_to_str (lp->ptid));
892 }
893 else
894 {
895 /* Even if LP->PTID is not in the global GDB thread list, the
896 LWP may be - with an additional thread ID. We don't need
897 to print anything in this case; thread_db is in use and
898 already took care of that. But it didn't delete the thread
899 in order to handle zombies correctly. */
900
901 struct thread_info *thr;
902
903 thr = iterate_over_threads (find_thread_from_lwp, &lp->ptid);
904 if (thr)
905 {
906 if (!ptid_equal (thr->ptid, inferior_ptid))
907 delete_thread (thr->ptid);
908 else
909 record_dead_thread (thr->ptid);
910 }
911 }
912
913 delete_lwp (lp->ptid);
914 }
915
916 /* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
917 a message telling the user that a new LWP has been added to the
918 process. Return 0 if successful or -1 if the new LWP could not
919 be attached. */
920
921 int
922 lin_lwp_attach_lwp (ptid_t ptid, int verbose)
923 {
924 struct lwp_info *lp;
925
926 gdb_assert (is_lwp (ptid));
927
928 /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events
929 to interrupt either the ptrace() or waitpid() calls below. */
930 if (!sigismember (&blocked_mask, SIGCHLD))
931 {
932 sigaddset (&blocked_mask, SIGCHLD);
933 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
934 }
935
936 lp = find_lwp_pid (ptid);
937
938 /* We assume that we're already attached to any LWP that has an id
939 equal to the overall process id, and to any LWP that is already
940 in our list of LWPs. If we're not seeing exit events from threads
941 and we've had PID wraparound since we last tried to stop all threads,
942 this assumption might be wrong; fortunately, this is very unlikely
943 to happen. */
944 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
945 {
946 pid_t pid;
947 int status;
948
949 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
950 {
951 /* If we fail to attach to the thread, issue a warning,
952 but continue. One way this can happen is if thread
953 creation is interrupted; as of Linux 2.6.19, a kernel
954 bug may place threads in the thread list and then fail
955 to create them. */
956 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
957 safe_strerror (errno));
958 return -1;
959 }
960
961 if (lp == NULL)
962 lp = add_lwp (ptid);
963
964 if (debug_linux_nat)
965 fprintf_unfiltered (gdb_stdlog,
966 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
967 target_pid_to_str (ptid));
968
969 pid = my_waitpid (GET_LWP (ptid), &status, 0);
970 if (pid == -1 && errno == ECHILD)
971 {
972 /* Try again with __WCLONE to check cloned processes. */
973 pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE);
974 lp->cloned = 1;
975 }
976
977 gdb_assert (pid == GET_LWP (ptid)
978 && WIFSTOPPED (status) && WSTOPSIG (status));
979
980 target_post_attach (pid);
981
982 lp->stopped = 1;
983
984 if (debug_linux_nat)
985 {
986 fprintf_unfiltered (gdb_stdlog,
987 "LLAL: waitpid %s received %s\n",
988 target_pid_to_str (ptid),
989 status_to_str (status));
990 }
991 }
992 else
993 {
994 /* We assume that the LWP representing the original process is
995 already stopped. Mark it as stopped in the data structure
996 that the GNU/linux ptrace layer uses to keep track of
997 threads. Note that this won't have already been done since
998 the main thread will have, we assume, been stopped by an
999 attach from a different layer. */
1000 if (lp == NULL)
1001 lp = add_lwp (ptid);
1002 lp->stopped = 1;
1003 }
1004
1005 if (verbose)
1006 printf_filtered (_("[New %s]\n"), target_pid_to_str (ptid));
1007
1008 return 0;
1009 }
1010
1011 static void
1012 linux_nat_attach (char *args, int from_tty)
1013 {
1014 struct lwp_info *lp;
1015 pid_t pid;
1016 int status;
1017
1018 /* FIXME: We should probably accept a list of process id's, and
1019 attach all of them. */
1020 linux_ops->to_attach (args, from_tty);
1021
1022 /* Add the initial process as the first LWP to the list. */
1023 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1024 lp = add_lwp (inferior_ptid);
1025
1026 /* Make sure the initial process is stopped. The user-level threads
1027 layer might want to poke around in the inferior, and that won't
1028 work if things haven't stabilized yet. */
1029 pid = my_waitpid (GET_PID (inferior_ptid), &status, 0);
1030 if (pid == -1 && errno == ECHILD)
1031 {
1032 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
1033
1034 /* Try again with __WCLONE to check cloned processes. */
1035 pid = my_waitpid (GET_PID (inferior_ptid), &status, __WCLONE);
1036 lp->cloned = 1;
1037 }
1038
1039 gdb_assert (pid == GET_PID (inferior_ptid)
1040 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
1041
1042 lp->stopped = 1;
1043
1044 /* Fake the SIGSTOP that core GDB expects. */
1045 lp->status = W_STOPCODE (SIGSTOP);
1046 lp->resumed = 1;
1047 if (debug_linux_nat)
1048 {
1049 fprintf_unfiltered (gdb_stdlog,
1050 "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid);
1051 }
1052 }
1053
1054 static int
1055 detach_callback (struct lwp_info *lp, void *data)
1056 {
1057 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1058
1059 if (debug_linux_nat && lp->status)
1060 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1061 strsignal (WSTOPSIG (lp->status)),
1062 target_pid_to_str (lp->ptid));
1063
1064 while (lp->signalled && lp->stopped)
1065 {
1066 errno = 0;
1067 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
1068 WSTOPSIG (lp->status)) < 0)
1069 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
1070 safe_strerror (errno));
1071
1072 if (debug_linux_nat)
1073 fprintf_unfiltered (gdb_stdlog,
1074 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
1075 target_pid_to_str (lp->ptid),
1076 status_to_str (lp->status));
1077
1078 lp->stopped = 0;
1079 lp->signalled = 0;
1080 lp->status = 0;
1081 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
1082 here. But since lp->signalled was cleared above,
1083 stop_wait_callback didn't do anything; the process was left
1084 running. Shouldn't we be waiting for it to stop?
1085 I've removed the call, since stop_wait_callback now does do
1086 something when called with lp->signalled == 0. */
1087
1088 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1089 }
1090
1091 /* We don't actually detach from the LWP that has an id equal to the
1092 overall process id just yet. */
1093 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1094 {
1095 errno = 0;
1096 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1097 WSTOPSIG (lp->status)) < 0)
1098 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1099 safe_strerror (errno));
1100
1101 if (debug_linux_nat)
1102 fprintf_unfiltered (gdb_stdlog,
1103 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1104 target_pid_to_str (lp->ptid),
1105 strsignal (WSTOPSIG (lp->status)));
1106
1107 delete_lwp (lp->ptid);
1108 }
1109
1110 return 0;
1111 }
1112
1113 static void
1114 linux_nat_detach (char *args, int from_tty)
1115 {
1116 iterate_over_lwps (detach_callback, NULL);
1117
1118 /* Only the initial process should be left right now. */
1119 gdb_assert (num_lwps == 1);
1120
1121 trap_ptid = null_ptid;
1122
1123 /* Destroy LWP info; it's no longer valid. */
1124 init_lwp_list ();
1125
1126 /* Restore the original signal mask. */
1127 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1128 sigemptyset (&blocked_mask);
1129
1130 inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid));
1131 linux_ops->to_detach (args, from_tty);
1132 }
1133
1134 /* Resume LP. */
1135
1136 static int
1137 resume_callback (struct lwp_info *lp, void *data)
1138 {
1139 if (lp->stopped && lp->status == 0)
1140 {
1141 struct thread_info *tp;
1142
1143 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1144 0, TARGET_SIGNAL_0);
1145 if (debug_linux_nat)
1146 fprintf_unfiltered (gdb_stdlog,
1147 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1148 target_pid_to_str (lp->ptid));
1149 lp->stopped = 0;
1150 lp->step = 0;
1151 }
1152
1153 return 0;
1154 }
1155
1156 static int
1157 resume_clear_callback (struct lwp_info *lp, void *data)
1158 {
1159 lp->resumed = 0;
1160 return 0;
1161 }
1162
1163 static int
1164 resume_set_callback (struct lwp_info *lp, void *data)
1165 {
1166 lp->resumed = 1;
1167 return 0;
1168 }
1169
1170 static void
1171 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1172 {
1173 struct lwp_info *lp;
1174 int resume_all;
1175
1176 if (debug_linux_nat)
1177 fprintf_unfiltered (gdb_stdlog,
1178 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1179 step ? "step" : "resume",
1180 target_pid_to_str (ptid),
1181 signo ? strsignal (signo) : "0",
1182 target_pid_to_str (inferior_ptid));
1183
1184 prune_lwps ();
1185
1186 /* A specific PTID means `step only this process id'. */
1187 resume_all = (PIDGET (ptid) == -1);
1188
1189 if (resume_all)
1190 iterate_over_lwps (resume_set_callback, NULL);
1191 else
1192 iterate_over_lwps (resume_clear_callback, NULL);
1193
1194 /* If PID is -1, it's the current inferior that should be
1195 handled specially. */
1196 if (PIDGET (ptid) == -1)
1197 ptid = inferior_ptid;
1198
1199 lp = find_lwp_pid (ptid);
1200 if (lp)
1201 {
1202 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1203
1204 /* Remember if we're stepping. */
1205 lp->step = step;
1206
1207 /* Mark this LWP as resumed. */
1208 lp->resumed = 1;
1209
1210 /* If we have a pending wait status for this thread, there is no
1211 point in resuming the process. But first make sure that
1212 linux_nat_wait won't preemptively handle the event - we
1213 should never take this short-circuit if we are going to
1214 leave LP running, since we have skipped resuming all the
1215 other threads. This bit of code needs to be synchronized
1216 with linux_nat_wait. */
1217
1218 if (lp->status && WIFSTOPPED (lp->status))
1219 {
1220 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1221
1222 if (signal_stop_state (saved_signo) == 0
1223 && signal_print_state (saved_signo) == 0
1224 && signal_pass_state (saved_signo) == 1)
1225 {
1226 if (debug_linux_nat)
1227 fprintf_unfiltered (gdb_stdlog,
1228 "LLR: Not short circuiting for ignored "
1229 "status 0x%x\n", lp->status);
1230
1231 /* FIXME: What should we do if we are supposed to continue
1232 this thread with a signal? */
1233 gdb_assert (signo == TARGET_SIGNAL_0);
1234 signo = saved_signo;
1235 lp->status = 0;
1236 }
1237 }
1238
1239 if (lp->status)
1240 {
1241 /* FIXME: What should we do if we are supposed to continue
1242 this thread with a signal? */
1243 gdb_assert (signo == TARGET_SIGNAL_0);
1244
1245 if (debug_linux_nat)
1246 fprintf_unfiltered (gdb_stdlog,
1247 "LLR: Short circuiting for status 0x%x\n",
1248 lp->status);
1249
1250 return;
1251 }
1252
1253 /* Mark LWP as not stopped to prevent it from being continued by
1254 resume_callback. */
1255 lp->stopped = 0;
1256 }
1257
1258 if (resume_all)
1259 iterate_over_lwps (resume_callback, NULL);
1260
1261 linux_ops->to_resume (ptid, step, signo);
1262 if (debug_linux_nat)
1263 fprintf_unfiltered (gdb_stdlog,
1264 "LLR: %s %s, %s (resume event thread)\n",
1265 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1266 target_pid_to_str (ptid),
1267 signo ? strsignal (signo) : "0");
1268 }
1269
1270 /* Issue kill to specified lwp. */
1271
1272 static int tkill_failed;
1273
1274 static int
1275 kill_lwp (int lwpid, int signo)
1276 {
1277 errno = 0;
1278
1279 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1280 fails, then we are not using nptl threads and we should be using kill. */
1281
1282 #ifdef HAVE_TKILL_SYSCALL
1283 if (!tkill_failed)
1284 {
1285 int ret = syscall (__NR_tkill, lwpid, signo);
1286 if (errno != ENOSYS)
1287 return ret;
1288 errno = 0;
1289 tkill_failed = 1;
1290 }
1291 #endif
1292
1293 return kill (lwpid, signo);
1294 }
1295
1296 /* Handle a GNU/Linux extended wait response. Most of the work we
1297 just pass off to linux_handle_extended_wait, but if it reports a
1298 clone event we need to add the new LWP to our list (and not report
1299 the trap to higher layers). This function returns non-zero if
1300 the event should be ignored and we should wait again. If STOPPING
1301 is true, the new LWP remains stopped, otherwise it is continued. */
1302
1303 static int
1304 linux_nat_handle_extended (struct lwp_info *lp, int status, int stopping)
1305 {
1306 linux_handle_extended_wait (GET_LWP (lp->ptid), status,
1307 &lp->waitstatus);
1308
1309 /* TARGET_WAITKIND_SPURIOUS is used to indicate clone events. */
1310 if (lp->waitstatus.kind == TARGET_WAITKIND_SPURIOUS)
1311 {
1312 struct lwp_info *new_lp;
1313 new_lp = add_lwp (BUILD_LWP (lp->waitstatus.value.related_pid,
1314 GET_PID (inferior_ptid)));
1315 new_lp->cloned = 1;
1316
1317 if (stopping)
1318 new_lp->stopped = 1;
1319 else
1320 ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0, 0);
1321
1322 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1323
1324 if (debug_linux_nat)
1325 fprintf_unfiltered (gdb_stdlog,
1326 "LLHE: Got clone event from LWP %ld, resuming\n",
1327 GET_LWP (lp->ptid));
1328 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1329
1330 return 1;
1331 }
1332
1333 return 0;
1334 }
1335
1336 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1337 exited. */
1338
1339 static int
1340 wait_lwp (struct lwp_info *lp)
1341 {
1342 pid_t pid;
1343 int status;
1344 int thread_dead = 0;
1345
1346 gdb_assert (!lp->stopped);
1347 gdb_assert (lp->status == 0);
1348
1349 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
1350 if (pid == -1 && errno == ECHILD)
1351 {
1352 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1353 if (pid == -1 && errno == ECHILD)
1354 {
1355 /* The thread has previously exited. We need to delete it
1356 now because, for some vendor 2.4 kernels with NPTL
1357 support backported, there won't be an exit event unless
1358 it is the main thread. 2.6 kernels will report an exit
1359 event for each thread that exits, as expected. */
1360 thread_dead = 1;
1361 if (debug_linux_nat)
1362 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1363 target_pid_to_str (lp->ptid));
1364 }
1365 }
1366
1367 if (!thread_dead)
1368 {
1369 gdb_assert (pid == GET_LWP (lp->ptid));
1370
1371 if (debug_linux_nat)
1372 {
1373 fprintf_unfiltered (gdb_stdlog,
1374 "WL: waitpid %s received %s\n",
1375 target_pid_to_str (lp->ptid),
1376 status_to_str (status));
1377 }
1378 }
1379
1380 /* Check if the thread has exited. */
1381 if (WIFEXITED (status) || WIFSIGNALED (status))
1382 {
1383 thread_dead = 1;
1384 if (debug_linux_nat)
1385 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1386 target_pid_to_str (lp->ptid));
1387 }
1388
1389 if (thread_dead)
1390 {
1391 exit_lwp (lp);
1392 return 0;
1393 }
1394
1395 gdb_assert (WIFSTOPPED (status));
1396
1397 /* Handle GNU/Linux's extended waitstatus for trace events. */
1398 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1399 {
1400 if (debug_linux_nat)
1401 fprintf_unfiltered (gdb_stdlog,
1402 "WL: Handling extended status 0x%06x\n",
1403 status);
1404 if (linux_nat_handle_extended (lp, status, 1))
1405 return wait_lwp (lp);
1406 }
1407
1408 return status;
1409 }
1410
1411 /* Send a SIGSTOP to LP. */
1412
1413 static int
1414 stop_callback (struct lwp_info *lp, void *data)
1415 {
1416 if (!lp->stopped && !lp->signalled)
1417 {
1418 int ret;
1419
1420 if (debug_linux_nat)
1421 {
1422 fprintf_unfiltered (gdb_stdlog,
1423 "SC: kill %s **<SIGSTOP>**\n",
1424 target_pid_to_str (lp->ptid));
1425 }
1426 errno = 0;
1427 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1428 if (debug_linux_nat)
1429 {
1430 fprintf_unfiltered (gdb_stdlog,
1431 "SC: lwp kill %d %s\n",
1432 ret,
1433 errno ? safe_strerror (errno) : "ERRNO-OK");
1434 }
1435
1436 lp->signalled = 1;
1437 gdb_assert (lp->status == 0);
1438 }
1439
1440 return 0;
1441 }
1442
1443 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1444 a pointer to a set of signals to be flushed immediately. */
1445
1446 static int
1447 stop_wait_callback (struct lwp_info *lp, void *data)
1448 {
1449 sigset_t *flush_mask = data;
1450
1451 if (!lp->stopped)
1452 {
1453 int status;
1454
1455 status = wait_lwp (lp);
1456 if (status == 0)
1457 return 0;
1458
1459 /* Ignore any signals in FLUSH_MASK. */
1460 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1461 {
1462 if (!lp->signalled)
1463 {
1464 lp->stopped = 1;
1465 return 0;
1466 }
1467
1468 errno = 0;
1469 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1470 if (debug_linux_nat)
1471 fprintf_unfiltered (gdb_stdlog,
1472 "PTRACE_CONT %s, 0, 0 (%s)\n",
1473 target_pid_to_str (lp->ptid),
1474 errno ? safe_strerror (errno) : "OK");
1475
1476 return stop_wait_callback (lp, flush_mask);
1477 }
1478
1479 if (WSTOPSIG (status) != SIGSTOP)
1480 {
1481 if (WSTOPSIG (status) == SIGTRAP)
1482 {
1483 /* If a LWP other than the LWP that we're reporting an
1484 event for has hit a GDB breakpoint (as opposed to
1485 some random trap signal), then just arrange for it to
1486 hit it again later. We don't keep the SIGTRAP status
1487 and don't forward the SIGTRAP signal to the LWP. We
1488 will handle the current event, eventually we will
1489 resume all LWPs, and this one will get its breakpoint
1490 trap again.
1491
1492 If we do not do this, then we run the risk that the
1493 user will delete or disable the breakpoint, but the
1494 thread will have already tripped on it. */
1495
1496 /* Now resume this LWP and get the SIGSTOP event. */
1497 errno = 0;
1498 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1499 if (debug_linux_nat)
1500 {
1501 fprintf_unfiltered (gdb_stdlog,
1502 "PTRACE_CONT %s, 0, 0 (%s)\n",
1503 target_pid_to_str (lp->ptid),
1504 errno ? safe_strerror (errno) : "OK");
1505
1506 fprintf_unfiltered (gdb_stdlog,
1507 "SWC: Candidate SIGTRAP event in %s\n",
1508 target_pid_to_str (lp->ptid));
1509 }
1510 /* Hold the SIGTRAP for handling by linux_nat_wait. */
1511 stop_wait_callback (lp, data);
1512 /* If there's another event, throw it back into the queue. */
1513 if (lp->status)
1514 {
1515 if (debug_linux_nat)
1516 {
1517 fprintf_unfiltered (gdb_stdlog,
1518 "SWC: kill %s, %s\n",
1519 target_pid_to_str (lp->ptid),
1520 status_to_str ((int) status));
1521 }
1522 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1523 }
1524 /* Save the sigtrap event. */
1525 lp->status = status;
1526 return 0;
1527 }
1528 else
1529 {
1530 /* The thread was stopped with a signal other than
1531 SIGSTOP, and didn't accidentally trip a breakpoint. */
1532
1533 if (debug_linux_nat)
1534 {
1535 fprintf_unfiltered (gdb_stdlog,
1536 "SWC: Pending event %s in %s\n",
1537 status_to_str ((int) status),
1538 target_pid_to_str (lp->ptid));
1539 }
1540 /* Now resume this LWP and get the SIGSTOP event. */
1541 errno = 0;
1542 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1543 if (debug_linux_nat)
1544 fprintf_unfiltered (gdb_stdlog,
1545 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1546 target_pid_to_str (lp->ptid),
1547 errno ? safe_strerror (errno) : "OK");
1548
1549 /* Hold this event/waitstatus while we check to see if
1550 there are any more (we still want to get that SIGSTOP). */
1551 stop_wait_callback (lp, data);
1552 /* If the lp->status field is still empty, use it to hold
1553 this event. If not, then this event must be returned
1554 to the event queue of the LWP. */
1555 if (lp->status == 0)
1556 lp->status = status;
1557 else
1558 {
1559 if (debug_linux_nat)
1560 {
1561 fprintf_unfiltered (gdb_stdlog,
1562 "SWC: kill %s, %s\n",
1563 target_pid_to_str (lp->ptid),
1564 status_to_str ((int) status));
1565 }
1566 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1567 }
1568 return 0;
1569 }
1570 }
1571 else
1572 {
1573 /* We caught the SIGSTOP that we intended to catch, so
1574 there's no SIGSTOP pending. */
1575 lp->stopped = 1;
1576 lp->signalled = 0;
1577 }
1578 }
1579
1580 return 0;
1581 }
1582
1583 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
1584 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1585
1586 static int
1587 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1588 {
1589 sigset_t blocked, ignored;
1590 int i;
1591
1592 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1593
1594 if (!flush_mask)
1595 return 0;
1596
1597 for (i = 1; i < NSIG; i++)
1598 if (sigismember (pending, i))
1599 if (!sigismember (flush_mask, i)
1600 || sigismember (&blocked, i)
1601 || sigismember (&ignored, i))
1602 sigdelset (pending, i);
1603
1604 if (sigisemptyset (pending))
1605 return 0;
1606
1607 return 1;
1608 }
1609
1610 /* DATA is interpreted as a mask of signals to flush. If LP has
1611 signals pending, and they are all in the flush mask, then arrange
1612 to flush them. LP should be stopped, as should all other threads
1613 it might share a signal queue with. */
1614
1615 static int
1616 flush_callback (struct lwp_info *lp, void *data)
1617 {
1618 sigset_t *flush_mask = data;
1619 sigset_t pending, intersection, blocked, ignored;
1620 int pid, status;
1621
1622 /* Normally, when an LWP exits, it is removed from the LWP list. The
1623 last LWP isn't removed till later, however. So if there is only
1624 one LWP on the list, make sure it's alive. */
1625 if (lwp_list == lp && lp->next == NULL)
1626 if (!linux_nat_thread_alive (lp->ptid))
1627 return 0;
1628
1629 /* Just because the LWP is stopped doesn't mean that new signals
1630 can't arrive from outside, so this function must be careful of
1631 race conditions. However, because all threads are stopped, we
1632 can assume that the pending mask will not shrink unless we resume
1633 the LWP, and that it will then get another signal. We can't
1634 control which one, however. */
1635
1636 if (lp->status)
1637 {
1638 if (debug_linux_nat)
1639 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
1640 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1641 lp->status = 0;
1642 }
1643
1644 while (linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
1645 {
1646 int ret;
1647
1648 errno = 0;
1649 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1650 if (debug_linux_nat)
1651 fprintf_unfiltered (gdb_stderr,
1652 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1653
1654 lp->stopped = 0;
1655 stop_wait_callback (lp, flush_mask);
1656 if (debug_linux_nat)
1657 fprintf_unfiltered (gdb_stderr,
1658 "FC: Wait finished; saved status is %d\n",
1659 lp->status);
1660 }
1661
1662 return 0;
1663 }
1664
1665 /* Return non-zero if LP has a wait status pending. */
1666
1667 static int
1668 status_callback (struct lwp_info *lp, void *data)
1669 {
1670 /* Only report a pending wait status if we pretend that this has
1671 indeed been resumed. */
1672 return (lp->status != 0 && lp->resumed);
1673 }
1674
1675 /* Return non-zero if LP isn't stopped. */
1676
1677 static int
1678 running_callback (struct lwp_info *lp, void *data)
1679 {
1680 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1681 }
1682
1683 /* Count the LWP's that have had events. */
1684
1685 static int
1686 count_events_callback (struct lwp_info *lp, void *data)
1687 {
1688 int *count = data;
1689
1690 gdb_assert (count != NULL);
1691
1692 /* Count only LWPs that have a SIGTRAP event pending. */
1693 if (lp->status != 0
1694 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1695 (*count)++;
1696
1697 return 0;
1698 }
1699
1700 /* Select the LWP (if any) that is currently being single-stepped. */
1701
1702 static int
1703 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1704 {
1705 if (lp->step && lp->status != 0)
1706 return 1;
1707 else
1708 return 0;
1709 }
1710
1711 /* Select the Nth LWP that has had a SIGTRAP event. */
1712
1713 static int
1714 select_event_lwp_callback (struct lwp_info *lp, void *data)
1715 {
1716 int *selector = data;
1717
1718 gdb_assert (selector != NULL);
1719
1720 /* Select only LWPs that have a SIGTRAP event pending. */
1721 if (lp->status != 0
1722 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1723 if ((*selector)-- == 0)
1724 return 1;
1725
1726 return 0;
1727 }
1728
1729 static int
1730 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
1731 {
1732 struct lwp_info *event_lp = data;
1733
1734 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1735 if (lp == event_lp)
1736 return 0;
1737
1738 /* If a LWP other than the LWP that we're reporting an event for has
1739 hit a GDB breakpoint (as opposed to some random trap signal),
1740 then just arrange for it to hit it again later. We don't keep
1741 the SIGTRAP status and don't forward the SIGTRAP signal to the
1742 LWP. We will handle the current event, eventually we will resume
1743 all LWPs, and this one will get its breakpoint trap again.
1744
1745 If we do not do this, then we run the risk that the user will
1746 delete or disable the breakpoint, but the LWP will have already
1747 tripped on it. */
1748
1749 if (lp->status != 0
1750 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
1751 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
1752 DECR_PC_AFTER_BREAK))
1753 {
1754 if (debug_linux_nat)
1755 fprintf_unfiltered (gdb_stdlog,
1756 "CBC: Push back breakpoint for %s\n",
1757 target_pid_to_str (lp->ptid));
1758
1759 /* Back up the PC if necessary. */
1760 if (DECR_PC_AFTER_BREAK)
1761 write_pc_pid (read_pc_pid (lp->ptid) - DECR_PC_AFTER_BREAK, lp->ptid);
1762
1763 /* Throw away the SIGTRAP. */
1764 lp->status = 0;
1765 }
1766
1767 return 0;
1768 }
1769
1770 /* Select one LWP out of those that have events pending. */
1771
1772 static void
1773 select_event_lwp (struct lwp_info **orig_lp, int *status)
1774 {
1775 int num_events = 0;
1776 int random_selector;
1777 struct lwp_info *event_lp;
1778
1779 /* Record the wait status for the original LWP. */
1780 (*orig_lp)->status = *status;
1781
1782 /* Give preference to any LWP that is being single-stepped. */
1783 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
1784 if (event_lp != NULL)
1785 {
1786 if (debug_linux_nat)
1787 fprintf_unfiltered (gdb_stdlog,
1788 "SEL: Select single-step %s\n",
1789 target_pid_to_str (event_lp->ptid));
1790 }
1791 else
1792 {
1793 /* No single-stepping LWP. Select one at random, out of those
1794 which have had SIGTRAP events. */
1795
1796 /* First see how many SIGTRAP events we have. */
1797 iterate_over_lwps (count_events_callback, &num_events);
1798
1799 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1800 random_selector = (int)
1801 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1802
1803 if (debug_linux_nat && num_events > 1)
1804 fprintf_unfiltered (gdb_stdlog,
1805 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1806 num_events, random_selector);
1807
1808 event_lp = iterate_over_lwps (select_event_lwp_callback,
1809 &random_selector);
1810 }
1811
1812 if (event_lp != NULL)
1813 {
1814 /* Switch the event LWP. */
1815 *orig_lp = event_lp;
1816 *status = event_lp->status;
1817 }
1818
1819 /* Flush the wait status for the event LWP. */
1820 (*orig_lp)->status = 0;
1821 }
1822
1823 /* Return non-zero if LP has been resumed. */
1824
1825 static int
1826 resumed_callback (struct lwp_info *lp, void *data)
1827 {
1828 return lp->resumed;
1829 }
1830
1831 /* Stop an active thread, verify it still exists, then resume it. */
1832
1833 static int
1834 stop_and_resume_callback (struct lwp_info *lp, void *data)
1835 {
1836 struct lwp_info *ptr;
1837
1838 if (!lp->stopped && !lp->signalled)
1839 {
1840 stop_callback (lp, NULL);
1841 stop_wait_callback (lp, NULL);
1842 /* Resume if the lwp still exists. */
1843 for (ptr = lwp_list; ptr; ptr = ptr->next)
1844 if (lp == ptr)
1845 {
1846 resume_callback (lp, NULL);
1847 resume_set_callback (lp, NULL);
1848 }
1849 }
1850 return 0;
1851 }
1852
1853 static ptid_t
1854 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1855 {
1856 struct lwp_info *lp = NULL;
1857 int options = 0;
1858 int status = 0;
1859 pid_t pid = PIDGET (ptid);
1860 sigset_t flush_mask;
1861
1862 /* The first time we get here after starting a new inferior, we may
1863 not have added it to the LWP list yet - this is the earliest
1864 moment at which we know its PID. */
1865 if (num_lwps == 0)
1866 {
1867 gdb_assert (!is_lwp (inferior_ptid));
1868
1869 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
1870 GET_PID (inferior_ptid));
1871 lp = add_lwp (inferior_ptid);
1872 lp->resumed = 1;
1873 }
1874
1875 sigemptyset (&flush_mask);
1876
1877 /* Make sure SIGCHLD is blocked. */
1878 if (!sigismember (&blocked_mask, SIGCHLD))
1879 {
1880 sigaddset (&blocked_mask, SIGCHLD);
1881 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
1882 }
1883
1884 retry:
1885
1886 /* Make sure there is at least one LWP that has been resumed. */
1887 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
1888
1889 /* First check if there is a LWP with a wait status pending. */
1890 if (pid == -1)
1891 {
1892 /* Any LWP that's been resumed will do. */
1893 lp = iterate_over_lwps (status_callback, NULL);
1894 if (lp)
1895 {
1896 status = lp->status;
1897 lp->status = 0;
1898
1899 if (debug_linux_nat && status)
1900 fprintf_unfiltered (gdb_stdlog,
1901 "LLW: Using pending wait status %s for %s.\n",
1902 status_to_str (status),
1903 target_pid_to_str (lp->ptid));
1904 }
1905
1906 /* But if we don't fine one, we'll have to wait, and check both
1907 cloned and uncloned processes. We start with the cloned
1908 processes. */
1909 options = __WCLONE | WNOHANG;
1910 }
1911 else if (is_lwp (ptid))
1912 {
1913 if (debug_linux_nat)
1914 fprintf_unfiltered (gdb_stdlog,
1915 "LLW: Waiting for specific LWP %s.\n",
1916 target_pid_to_str (ptid));
1917
1918 /* We have a specific LWP to check. */
1919 lp = find_lwp_pid (ptid);
1920 gdb_assert (lp);
1921 status = lp->status;
1922 lp->status = 0;
1923
1924 if (debug_linux_nat && status)
1925 fprintf_unfiltered (gdb_stdlog,
1926 "LLW: Using pending wait status %s for %s.\n",
1927 status_to_str (status),
1928 target_pid_to_str (lp->ptid));
1929
1930 /* If we have to wait, take into account whether PID is a cloned
1931 process or not. And we have to convert it to something that
1932 the layer beneath us can understand. */
1933 options = lp->cloned ? __WCLONE : 0;
1934 pid = GET_LWP (ptid);
1935 }
1936
1937 if (status && lp->signalled)
1938 {
1939 /* A pending SIGSTOP may interfere with the normal stream of
1940 events. In a typical case where interference is a problem,
1941 we have a SIGSTOP signal pending for LWP A while
1942 single-stepping it, encounter an event in LWP B, and take the
1943 pending SIGSTOP while trying to stop LWP A. After processing
1944 the event in LWP B, LWP A is continued, and we'll never see
1945 the SIGTRAP associated with the last time we were
1946 single-stepping LWP A. */
1947
1948 /* Resume the thread. It should halt immediately returning the
1949 pending SIGSTOP. */
1950 registers_changed ();
1951 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1952 lp->step, TARGET_SIGNAL_0);
1953 if (debug_linux_nat)
1954 fprintf_unfiltered (gdb_stdlog,
1955 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
1956 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1957 target_pid_to_str (lp->ptid));
1958 lp->stopped = 0;
1959 gdb_assert (lp->resumed);
1960
1961 /* This should catch the pending SIGSTOP. */
1962 stop_wait_callback (lp, NULL);
1963 }
1964
1965 set_sigint_trap (); /* Causes SIGINT to be passed on to the
1966 attached process. */
1967 set_sigio_trap ();
1968
1969 while (status == 0)
1970 {
1971 pid_t lwpid;
1972
1973 lwpid = my_waitpid (pid, &status, options);
1974 if (lwpid > 0)
1975 {
1976 gdb_assert (pid == -1 || lwpid == pid);
1977
1978 if (debug_linux_nat)
1979 {
1980 fprintf_unfiltered (gdb_stdlog,
1981 "LLW: waitpid %ld received %s\n",
1982 (long) lwpid, status_to_str (status));
1983 }
1984
1985 lp = find_lwp_pid (pid_to_ptid (lwpid));
1986
1987 /* Check for stop events reported by a process we didn't
1988 already know about - anything not already in our LWP
1989 list.
1990
1991 If we're expecting to receive stopped processes after
1992 fork, vfork, and clone events, then we'll just add the
1993 new one to our list and go back to waiting for the event
1994 to be reported - the stopped process might be returned
1995 from waitpid before or after the event is. */
1996 if (WIFSTOPPED (status) && !lp)
1997 {
1998 linux_record_stopped_pid (lwpid);
1999 status = 0;
2000 continue;
2001 }
2002
2003 /* Make sure we don't report an event for the exit of an LWP not in
2004 our list, i.e. not part of the current process. This can happen
2005 if we detach from a program we original forked and then it
2006 exits. */
2007 if (!WIFSTOPPED (status) && !lp)
2008 {
2009 status = 0;
2010 continue;
2011 }
2012
2013 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2014 CLONE_PTRACE processes which do not use the thread library -
2015 otherwise we wouldn't find the new LWP this way. That doesn't
2016 currently work, and the following code is currently unreachable
2017 due to the two blocks above. If it's fixed some day, this code
2018 should be broken out into a function so that we can also pick up
2019 LWPs from the new interface. */
2020 if (!lp)
2021 {
2022 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2023 if (options & __WCLONE)
2024 lp->cloned = 1;
2025
2026 gdb_assert (WIFSTOPPED (status)
2027 && WSTOPSIG (status) == SIGSTOP);
2028 lp->signalled = 1;
2029
2030 if (!in_thread_list (inferior_ptid))
2031 {
2032 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2033 GET_PID (inferior_ptid));
2034 add_thread (inferior_ptid);
2035 }
2036
2037 add_thread (lp->ptid);
2038 printf_unfiltered (_("[New %s]\n"),
2039 target_pid_to_str (lp->ptid));
2040 }
2041
2042 /* Handle GNU/Linux's extended waitstatus for trace events. */
2043 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2044 {
2045 if (debug_linux_nat)
2046 fprintf_unfiltered (gdb_stdlog,
2047 "LLW: Handling extended status 0x%06x\n",
2048 status);
2049 if (linux_nat_handle_extended (lp, status, 0))
2050 {
2051 status = 0;
2052 continue;
2053 }
2054 }
2055
2056 /* Check if the thread has exited. */
2057 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2058 {
2059 /* If this is the main thread, we must stop all threads and
2060 verify if they are still alive. This is because in the nptl
2061 thread model, there is no signal issued for exiting LWPs
2062 other than the main thread. We only get the main thread
2063 exit signal once all child threads have already exited.
2064 If we stop all the threads and use the stop_wait_callback
2065 to check if they have exited we can determine whether this
2066 signal should be ignored or whether it means the end of the
2067 debugged application, regardless of which threading model
2068 is being used. */
2069 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2070 {
2071 lp->stopped = 1;
2072 iterate_over_lwps (stop_and_resume_callback, NULL);
2073 }
2074
2075 if (debug_linux_nat)
2076 fprintf_unfiltered (gdb_stdlog,
2077 "LLW: %s exited.\n",
2078 target_pid_to_str (lp->ptid));
2079
2080 exit_lwp (lp);
2081
2082 /* If there is at least one more LWP, then the exit signal
2083 was not the end of the debugged application and should be
2084 ignored. */
2085 if (num_lwps > 0)
2086 {
2087 /* Make sure there is at least one thread running. */
2088 gdb_assert (iterate_over_lwps (running_callback, NULL));
2089
2090 /* Discard the event. */
2091 status = 0;
2092 continue;
2093 }
2094 }
2095
2096 /* Check if the current LWP has previously exited. In the nptl
2097 thread model, LWPs other than the main thread do not issue
2098 signals when they exit so we must check whenever the thread
2099 has stopped. A similar check is made in stop_wait_callback(). */
2100 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2101 {
2102 if (debug_linux_nat)
2103 fprintf_unfiltered (gdb_stdlog,
2104 "LLW: %s exited.\n",
2105 target_pid_to_str (lp->ptid));
2106
2107 exit_lwp (lp);
2108
2109 /* Make sure there is at least one thread running. */
2110 gdb_assert (iterate_over_lwps (running_callback, NULL));
2111
2112 /* Discard the event. */
2113 status = 0;
2114 continue;
2115 }
2116
2117 /* Make sure we don't report a SIGSTOP that we sent
2118 ourselves in an attempt to stop an LWP. */
2119 if (lp->signalled
2120 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2121 {
2122 if (debug_linux_nat)
2123 fprintf_unfiltered (gdb_stdlog,
2124 "LLW: Delayed SIGSTOP caught for %s.\n",
2125 target_pid_to_str (lp->ptid));
2126
2127 /* This is a delayed SIGSTOP. */
2128 lp->signalled = 0;
2129
2130 registers_changed ();
2131 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2132 lp->step, TARGET_SIGNAL_0);
2133 if (debug_linux_nat)
2134 fprintf_unfiltered (gdb_stdlog,
2135 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2136 lp->step ?
2137 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2138 target_pid_to_str (lp->ptid));
2139
2140 lp->stopped = 0;
2141 gdb_assert (lp->resumed);
2142
2143 /* Discard the event. */
2144 status = 0;
2145 continue;
2146 }
2147
2148 break;
2149 }
2150
2151 if (pid == -1)
2152 {
2153 /* Alternate between checking cloned and uncloned processes. */
2154 options ^= __WCLONE;
2155
2156 /* And suspend every time we have checked both. */
2157 if (options & __WCLONE)
2158 sigsuspend (&suspend_mask);
2159 }
2160
2161 /* We shouldn't end up here unless we want to try again. */
2162 gdb_assert (status == 0);
2163 }
2164
2165 clear_sigio_trap ();
2166 clear_sigint_trap ();
2167
2168 gdb_assert (lp);
2169
2170 /* Don't report signals that GDB isn't interested in, such as
2171 signals that are neither printed nor stopped upon. Stopping all
2172 threads can be a bit time-consuming so if we want decent
2173 performance with heavily multi-threaded programs, especially when
2174 they're using a high frequency timer, we'd better avoid it if we
2175 can. */
2176
2177 if (WIFSTOPPED (status))
2178 {
2179 int signo = target_signal_from_host (WSTOPSIG (status));
2180
2181 /* If we get a signal while single-stepping, we may need special
2182 care, e.g. to skip the signal handler. Defer to common code. */
2183 if (!lp->step
2184 && signal_stop_state (signo) == 0
2185 && signal_print_state (signo) == 0
2186 && signal_pass_state (signo) == 1)
2187 {
2188 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2189 here? It is not clear we should. GDB may not expect
2190 other threads to run. On the other hand, not resuming
2191 newly attached threads may cause an unwanted delay in
2192 getting them running. */
2193 registers_changed ();
2194 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2195 lp->step, signo);
2196 if (debug_linux_nat)
2197 fprintf_unfiltered (gdb_stdlog,
2198 "LLW: %s %s, %s (preempt 'handle')\n",
2199 lp->step ?
2200 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2201 target_pid_to_str (lp->ptid),
2202 signo ? strsignal (signo) : "0");
2203 lp->stopped = 0;
2204 status = 0;
2205 goto retry;
2206 }
2207
2208 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2209 {
2210 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2211 forwarded to the entire process group, that is, all LWP's
2212 will receive it. Since we only want to report it once,
2213 we try to flush it from all LWPs except this one. */
2214 sigaddset (&flush_mask, SIGINT);
2215 }
2216 }
2217
2218 /* This LWP is stopped now. */
2219 lp->stopped = 1;
2220
2221 if (debug_linux_nat)
2222 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2223 status_to_str (status), target_pid_to_str (lp->ptid));
2224
2225 /* Now stop all other LWP's ... */
2226 iterate_over_lwps (stop_callback, NULL);
2227
2228 /* ... and wait until all of them have reported back that they're no
2229 longer running. */
2230 iterate_over_lwps (stop_wait_callback, &flush_mask);
2231 iterate_over_lwps (flush_callback, &flush_mask);
2232
2233 /* If we're not waiting for a specific LWP, choose an event LWP from
2234 among those that have had events. Giving equal priority to all
2235 LWPs that have had events helps prevent starvation. */
2236 if (pid == -1)
2237 select_event_lwp (&lp, &status);
2238
2239 /* Now that we've selected our final event LWP, cancel any
2240 breakpoints in other LWPs that have hit a GDB breakpoint. See
2241 the comment in cancel_breakpoints_callback to find out why. */
2242 iterate_over_lwps (cancel_breakpoints_callback, lp);
2243
2244 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2245 {
2246 trap_ptid = lp->ptid;
2247 if (debug_linux_nat)
2248 fprintf_unfiltered (gdb_stdlog,
2249 "LLW: trap_ptid is %s.\n",
2250 target_pid_to_str (trap_ptid));
2251 }
2252 else
2253 trap_ptid = null_ptid;
2254
2255 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2256 {
2257 *ourstatus = lp->waitstatus;
2258 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2259 }
2260 else
2261 store_waitstatus (ourstatus, status);
2262
2263 return lp->ptid;
2264 }
2265
2266 static int
2267 kill_callback (struct lwp_info *lp, void *data)
2268 {
2269 errno = 0;
2270 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2271 if (debug_linux_nat)
2272 fprintf_unfiltered (gdb_stdlog,
2273 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2274 target_pid_to_str (lp->ptid),
2275 errno ? safe_strerror (errno) : "OK");
2276
2277 return 0;
2278 }
2279
2280 static int
2281 kill_wait_callback (struct lwp_info *lp, void *data)
2282 {
2283 pid_t pid;
2284
2285 /* We must make sure that there are no pending events (delayed
2286 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2287 program doesn't interfere with any following debugging session. */
2288
2289 /* For cloned processes we must check both with __WCLONE and
2290 without, since the exit status of a cloned process isn't reported
2291 with __WCLONE. */
2292 if (lp->cloned)
2293 {
2294 do
2295 {
2296 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
2297 if (pid != (pid_t) -1 && debug_linux_nat)
2298 {
2299 fprintf_unfiltered (gdb_stdlog,
2300 "KWC: wait %s received unknown.\n",
2301 target_pid_to_str (lp->ptid));
2302 }
2303 }
2304 while (pid == GET_LWP (lp->ptid));
2305
2306 gdb_assert (pid == -1 && errno == ECHILD);
2307 }
2308
2309 do
2310 {
2311 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
2312 if (pid != (pid_t) -1 && debug_linux_nat)
2313 {
2314 fprintf_unfiltered (gdb_stdlog,
2315 "KWC: wait %s received unk.\n",
2316 target_pid_to_str (lp->ptid));
2317 }
2318 }
2319 while (pid == GET_LWP (lp->ptid));
2320
2321 gdb_assert (pid == -1 && errno == ECHILD);
2322 return 0;
2323 }
2324
2325 static void
2326 linux_nat_kill (void)
2327 {
2328 struct target_waitstatus last;
2329 ptid_t last_ptid;
2330 int status;
2331
2332 /* If we're stopped while forking and we haven't followed yet,
2333 kill the other task. We need to do this first because the
2334 parent will be sleeping if this is a vfork. */
2335
2336 get_last_target_status (&last_ptid, &last);
2337
2338 if (last.kind == TARGET_WAITKIND_FORKED
2339 || last.kind == TARGET_WAITKIND_VFORKED)
2340 {
2341 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2342 wait (&status);
2343 }
2344
2345 if (forks_exist_p ())
2346 linux_fork_killall ();
2347 else
2348 {
2349 /* Kill all LWP's ... */
2350 iterate_over_lwps (kill_callback, NULL);
2351
2352 /* ... and wait until we've flushed all events. */
2353 iterate_over_lwps (kill_wait_callback, NULL);
2354 }
2355
2356 target_mourn_inferior ();
2357 }
2358
2359 static void
2360 linux_nat_mourn_inferior (void)
2361 {
2362 trap_ptid = null_ptid;
2363
2364 /* Destroy LWP info; it's no longer valid. */
2365 init_lwp_list ();
2366
2367 /* Restore the original signal mask. */
2368 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
2369 sigemptyset (&blocked_mask);
2370
2371 if (! forks_exist_p ())
2372 /* Normal case, no other forks available. */
2373 linux_ops->to_mourn_inferior ();
2374 else
2375 /* Multi-fork case. The current inferior_ptid has exited, but
2376 there are other viable forks to debug. Delete the exiting
2377 one and context-switch to the first available. */
2378 linux_fork_mourn_inferior ();
2379 }
2380
2381 static LONGEST
2382 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2383 const char *annex, gdb_byte *readbuf,
2384 const gdb_byte *writebuf,
2385 ULONGEST offset, LONGEST len)
2386 {
2387 struct cleanup *old_chain = save_inferior_ptid ();
2388 LONGEST xfer;
2389
2390 if (is_lwp (inferior_ptid))
2391 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2392
2393 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2394 offset, len);
2395
2396 do_cleanups (old_chain);
2397 return xfer;
2398 }
2399
2400 static int
2401 linux_nat_thread_alive (ptid_t ptid)
2402 {
2403 gdb_assert (is_lwp (ptid));
2404
2405 errno = 0;
2406 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2407 if (debug_linux_nat)
2408 fprintf_unfiltered (gdb_stdlog,
2409 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2410 target_pid_to_str (ptid),
2411 errno ? safe_strerror (errno) : "OK");
2412
2413 /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
2414 handle that case gracefully since ptrace will first do a lookup
2415 for the process based upon the passed-in pid. If that fails we
2416 will get either -ESRCH or -EPERM, otherwise the child exists and
2417 is alive. */
2418 if (errno == ESRCH || errno == EPERM)
2419 return 0;
2420
2421 return 1;
2422 }
2423
2424 static char *
2425 linux_nat_pid_to_str (ptid_t ptid)
2426 {
2427 static char buf[64];
2428
2429 if (lwp_list && lwp_list->next && is_lwp (ptid))
2430 {
2431 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2432 return buf;
2433 }
2434
2435 return normal_pid_to_str (ptid);
2436 }
2437
2438 static void
2439 sigchld_handler (int signo)
2440 {
2441 /* Do nothing. The only reason for this handler is that it allows
2442 us to use sigsuspend in linux_nat_wait above to wait for the
2443 arrival of a SIGCHLD. */
2444 }
2445
2446 /* Accepts an integer PID; Returns a string representing a file that
2447 can be opened to get the symbols for the child process. */
2448
2449 char *
2450 child_pid_to_exec_file (int pid)
2451 {
2452 char *name1, *name2;
2453
2454 name1 = xmalloc (MAXPATHLEN);
2455 name2 = xmalloc (MAXPATHLEN);
2456 make_cleanup (xfree, name1);
2457 make_cleanup (xfree, name2);
2458 memset (name2, 0, MAXPATHLEN);
2459
2460 sprintf (name1, "/proc/%d/exe", pid);
2461 if (readlink (name1, name2, MAXPATHLEN) > 0)
2462 return name2;
2463 else
2464 return name1;
2465 }
2466
2467 /* Service function for corefiles and info proc. */
2468
2469 static int
2470 read_mapping (FILE *mapfile,
2471 long long *addr,
2472 long long *endaddr,
2473 char *permissions,
2474 long long *offset,
2475 char *device, long long *inode, char *filename)
2476 {
2477 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2478 addr, endaddr, permissions, offset, device, inode);
2479
2480 filename[0] = '\0';
2481 if (ret > 0 && ret != EOF)
2482 {
2483 /* Eat everything up to EOL for the filename. This will prevent
2484 weird filenames (such as one with embedded whitespace) from
2485 confusing this code. It also makes this code more robust in
2486 respect to annotations the kernel may add after the filename.
2487
2488 Note the filename is used for informational purposes
2489 only. */
2490 ret += fscanf (mapfile, "%[^\n]\n", filename);
2491 }
2492
2493 return (ret != 0 && ret != EOF);
2494 }
2495
2496 /* Fills the "to_find_memory_regions" target vector. Lists the memory
2497 regions in the inferior for a corefile. */
2498
2499 static int
2500 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2501 unsigned long,
2502 int, int, int, void *), void *obfd)
2503 {
2504 long long pid = PIDGET (inferior_ptid);
2505 char mapsfilename[MAXPATHLEN];
2506 FILE *mapsfile;
2507 long long addr, endaddr, size, offset, inode;
2508 char permissions[8], device[8], filename[MAXPATHLEN];
2509 int read, write, exec;
2510 int ret;
2511
2512 /* Compose the filename for the /proc memory map, and open it. */
2513 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2514 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
2515 error (_("Could not open %s."), mapsfilename);
2516
2517 if (info_verbose)
2518 fprintf_filtered (gdb_stdout,
2519 "Reading memory regions from %s\n", mapsfilename);
2520
2521 /* Now iterate until end-of-file. */
2522 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2523 &offset, &device[0], &inode, &filename[0]))
2524 {
2525 size = endaddr - addr;
2526
2527 /* Get the segment's permissions. */
2528 read = (strchr (permissions, 'r') != 0);
2529 write = (strchr (permissions, 'w') != 0);
2530 exec = (strchr (permissions, 'x') != 0);
2531
2532 if (info_verbose)
2533 {
2534 fprintf_filtered (gdb_stdout,
2535 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2536 size, paddr_nz (addr),
2537 read ? 'r' : ' ',
2538 write ? 'w' : ' ', exec ? 'x' : ' ');
2539 if (filename && filename[0])
2540 fprintf_filtered (gdb_stdout, " for %s", filename);
2541 fprintf_filtered (gdb_stdout, "\n");
2542 }
2543
2544 /* Invoke the callback function to create the corefile
2545 segment. */
2546 func (addr, size, read, write, exec, obfd);
2547 }
2548 fclose (mapsfile);
2549 return 0;
2550 }
2551
2552 /* Records the thread's register state for the corefile note
2553 section. */
2554
2555 static char *
2556 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2557 char *note_data, int *note_size)
2558 {
2559 gdb_gregset_t gregs;
2560 gdb_fpregset_t fpregs;
2561 #ifdef FILL_FPXREGSET
2562 gdb_fpxregset_t fpxregs;
2563 #endif
2564 unsigned long lwp = ptid_get_lwp (ptid);
2565 struct gdbarch *gdbarch = current_gdbarch;
2566 const struct regset *regset;
2567 int core_regset_p;
2568
2569 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
2570 if (core_regset_p
2571 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
2572 sizeof (gregs))) != NULL
2573 && regset->collect_regset != NULL)
2574 regset->collect_regset (regset, current_regcache, -1,
2575 &gregs, sizeof (gregs));
2576 else
2577 fill_gregset (&gregs, -1);
2578
2579 note_data = (char *) elfcore_write_prstatus (obfd,
2580 note_data,
2581 note_size,
2582 lwp,
2583 stop_signal, &gregs);
2584
2585 if (core_regset_p
2586 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
2587 sizeof (fpregs))) != NULL
2588 && regset->collect_regset != NULL)
2589 regset->collect_regset (regset, current_regcache, -1,
2590 &fpregs, sizeof (fpregs));
2591 else
2592 fill_fpregset (&fpregs, -1);
2593
2594 note_data = (char *) elfcore_write_prfpreg (obfd,
2595 note_data,
2596 note_size,
2597 &fpregs, sizeof (fpregs));
2598
2599 #ifdef FILL_FPXREGSET
2600 if (core_regset_p
2601 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp",
2602 sizeof (fpxregs))) != NULL
2603 && regset->collect_regset != NULL)
2604 regset->collect_regset (regset, current_regcache, -1,
2605 &fpxregs, sizeof (fpxregs));
2606 else
2607 fill_fpxregset (&fpxregs, -1);
2608
2609 note_data = (char *) elfcore_write_prxfpreg (obfd,
2610 note_data,
2611 note_size,
2612 &fpxregs, sizeof (fpxregs));
2613 #endif
2614 return note_data;
2615 }
2616
2617 struct linux_nat_corefile_thread_data
2618 {
2619 bfd *obfd;
2620 char *note_data;
2621 int *note_size;
2622 int num_notes;
2623 };
2624
2625 /* Called by gdbthread.c once per thread. Records the thread's
2626 register state for the corefile note section. */
2627
2628 static int
2629 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
2630 {
2631 struct linux_nat_corefile_thread_data *args = data;
2632 ptid_t saved_ptid = inferior_ptid;
2633
2634 inferior_ptid = ti->ptid;
2635 registers_changed ();
2636 target_fetch_registers (-1); /* FIXME should not be necessary;
2637 fill_gregset should do it automatically. */
2638 args->note_data = linux_nat_do_thread_registers (args->obfd,
2639 ti->ptid,
2640 args->note_data,
2641 args->note_size);
2642 args->num_notes++;
2643 inferior_ptid = saved_ptid;
2644 registers_changed ();
2645 target_fetch_registers (-1); /* FIXME should not be necessary;
2646 fill_gregset should do it automatically. */
2647 return 0;
2648 }
2649
2650 /* Records the register state for the corefile note section. */
2651
2652 static char *
2653 linux_nat_do_registers (bfd *obfd, ptid_t ptid,
2654 char *note_data, int *note_size)
2655 {
2656 registers_changed ();
2657 target_fetch_registers (-1); /* FIXME should not be necessary;
2658 fill_gregset should do it automatically. */
2659 return linux_nat_do_thread_registers (obfd,
2660 ptid_build (ptid_get_pid (inferior_ptid),
2661 ptid_get_pid (inferior_ptid),
2662 0),
2663 note_data, note_size);
2664 return note_data;
2665 }
2666
2667 /* Fills the "to_make_corefile_note" target vector. Builds the note
2668 section for a corefile, and returns it in a malloc buffer. */
2669
2670 static char *
2671 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
2672 {
2673 struct linux_nat_corefile_thread_data thread_args;
2674 struct cleanup *old_chain;
2675 char fname[16] = { '\0' };
2676 char psargs[80] = { '\0' };
2677 char *note_data = NULL;
2678 ptid_t current_ptid = inferior_ptid;
2679 gdb_byte *auxv;
2680 int auxv_len;
2681
2682 if (get_exec_file (0))
2683 {
2684 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
2685 strncpy (psargs, get_exec_file (0), sizeof (psargs));
2686 if (get_inferior_args ())
2687 {
2688 strncat (psargs, " ", sizeof (psargs) - strlen (psargs));
2689 strncat (psargs, get_inferior_args (),
2690 sizeof (psargs) - strlen (psargs));
2691 }
2692 note_data = (char *) elfcore_write_prpsinfo (obfd,
2693 note_data,
2694 note_size, fname, psargs);
2695 }
2696
2697 /* Dump information for threads. */
2698 thread_args.obfd = obfd;
2699 thread_args.note_data = note_data;
2700 thread_args.note_size = note_size;
2701 thread_args.num_notes = 0;
2702 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2703 if (thread_args.num_notes == 0)
2704 {
2705 /* iterate_over_threads didn't come up with any threads; just
2706 use inferior_ptid. */
2707 note_data = linux_nat_do_registers (obfd, inferior_ptid,
2708 note_data, note_size);
2709 }
2710 else
2711 {
2712 note_data = thread_args.note_data;
2713 }
2714
2715 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
2716 NULL, &auxv);
2717 if (auxv_len > 0)
2718 {
2719 note_data = elfcore_write_note (obfd, note_data, note_size,
2720 "CORE", NT_AUXV, auxv, auxv_len);
2721 xfree (auxv);
2722 }
2723
2724 make_cleanup (xfree, note_data);
2725 return note_data;
2726 }
2727
2728 /* Implement the "info proc" command. */
2729
2730 static void
2731 linux_nat_info_proc_cmd (char *args, int from_tty)
2732 {
2733 long long pid = PIDGET (inferior_ptid);
2734 FILE *procfile;
2735 char **argv = NULL;
2736 char buffer[MAXPATHLEN];
2737 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
2738 int cmdline_f = 1;
2739 int cwd_f = 1;
2740 int exe_f = 1;
2741 int mappings_f = 0;
2742 int environ_f = 0;
2743 int status_f = 0;
2744 int stat_f = 0;
2745 int all = 0;
2746 struct stat dummy;
2747
2748 if (args)
2749 {
2750 /* Break up 'args' into an argv array. */
2751 if ((argv = buildargv (args)) == NULL)
2752 nomem (0);
2753 else
2754 make_cleanup_freeargv (argv);
2755 }
2756 while (argv != NULL && *argv != NULL)
2757 {
2758 if (isdigit (argv[0][0]))
2759 {
2760 pid = strtoul (argv[0], NULL, 10);
2761 }
2762 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
2763 {
2764 mappings_f = 1;
2765 }
2766 else if (strcmp (argv[0], "status") == 0)
2767 {
2768 status_f = 1;
2769 }
2770 else if (strcmp (argv[0], "stat") == 0)
2771 {
2772 stat_f = 1;
2773 }
2774 else if (strcmp (argv[0], "cmd") == 0)
2775 {
2776 cmdline_f = 1;
2777 }
2778 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
2779 {
2780 exe_f = 1;
2781 }
2782 else if (strcmp (argv[0], "cwd") == 0)
2783 {
2784 cwd_f = 1;
2785 }
2786 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
2787 {
2788 all = 1;
2789 }
2790 else
2791 {
2792 /* [...] (future options here) */
2793 }
2794 argv++;
2795 }
2796 if (pid == 0)
2797 error (_("No current process: you must name one."));
2798
2799 sprintf (fname1, "/proc/%lld", pid);
2800 if (stat (fname1, &dummy) != 0)
2801 error (_("No /proc directory: '%s'"), fname1);
2802
2803 printf_filtered (_("process %lld\n"), pid);
2804 if (cmdline_f || all)
2805 {
2806 sprintf (fname1, "/proc/%lld/cmdline", pid);
2807 if ((procfile = fopen (fname1, "r")) > 0)
2808 {
2809 fgets (buffer, sizeof (buffer), procfile);
2810 printf_filtered ("cmdline = '%s'\n", buffer);
2811 fclose (procfile);
2812 }
2813 else
2814 warning (_("unable to open /proc file '%s'"), fname1);
2815 }
2816 if (cwd_f || all)
2817 {
2818 sprintf (fname1, "/proc/%lld/cwd", pid);
2819 memset (fname2, 0, sizeof (fname2));
2820 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2821 printf_filtered ("cwd = '%s'\n", fname2);
2822 else
2823 warning (_("unable to read link '%s'"), fname1);
2824 }
2825 if (exe_f || all)
2826 {
2827 sprintf (fname1, "/proc/%lld/exe", pid);
2828 memset (fname2, 0, sizeof (fname2));
2829 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2830 printf_filtered ("exe = '%s'\n", fname2);
2831 else
2832 warning (_("unable to read link '%s'"), fname1);
2833 }
2834 if (mappings_f || all)
2835 {
2836 sprintf (fname1, "/proc/%lld/maps", pid);
2837 if ((procfile = fopen (fname1, "r")) > 0)
2838 {
2839 long long addr, endaddr, size, offset, inode;
2840 char permissions[8], device[8], filename[MAXPATHLEN];
2841
2842 printf_filtered (_("Mapped address spaces:\n\n"));
2843 if (TARGET_ADDR_BIT == 32)
2844 {
2845 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
2846 "Start Addr",
2847 " End Addr",
2848 " Size", " Offset", "objfile");
2849 }
2850 else
2851 {
2852 printf_filtered (" %18s %18s %10s %10s %7s\n",
2853 "Start Addr",
2854 " End Addr",
2855 " Size", " Offset", "objfile");
2856 }
2857
2858 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
2859 &offset, &device[0], &inode, &filename[0]))
2860 {
2861 size = endaddr - addr;
2862
2863 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
2864 calls here (and possibly above) should be abstracted
2865 out into their own functions? Andrew suggests using
2866 a generic local_address_string instead to print out
2867 the addresses; that makes sense to me, too. */
2868
2869 if (TARGET_ADDR_BIT == 32)
2870 {
2871 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
2872 (unsigned long) addr, /* FIXME: pr_addr */
2873 (unsigned long) endaddr,
2874 (int) size,
2875 (unsigned int) offset,
2876 filename[0] ? filename : "");
2877 }
2878 else
2879 {
2880 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
2881 (unsigned long) addr, /* FIXME: pr_addr */
2882 (unsigned long) endaddr,
2883 (int) size,
2884 (unsigned int) offset,
2885 filename[0] ? filename : "");
2886 }
2887 }
2888
2889 fclose (procfile);
2890 }
2891 else
2892 warning (_("unable to open /proc file '%s'"), fname1);
2893 }
2894 if (status_f || all)
2895 {
2896 sprintf (fname1, "/proc/%lld/status", pid);
2897 if ((procfile = fopen (fname1, "r")) > 0)
2898 {
2899 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2900 puts_filtered (buffer);
2901 fclose (procfile);
2902 }
2903 else
2904 warning (_("unable to open /proc file '%s'"), fname1);
2905 }
2906 if (stat_f || all)
2907 {
2908 sprintf (fname1, "/proc/%lld/stat", pid);
2909 if ((procfile = fopen (fname1, "r")) > 0)
2910 {
2911 int itmp;
2912 char ctmp;
2913
2914 if (fscanf (procfile, "%d ", &itmp) > 0)
2915 printf_filtered (_("Process: %d\n"), itmp);
2916 if (fscanf (procfile, "%s ", &buffer[0]) > 0)
2917 printf_filtered (_("Exec file: %s\n"), buffer);
2918 if (fscanf (procfile, "%c ", &ctmp) > 0)
2919 printf_filtered (_("State: %c\n"), ctmp);
2920 if (fscanf (procfile, "%d ", &itmp) > 0)
2921 printf_filtered (_("Parent process: %d\n"), itmp);
2922 if (fscanf (procfile, "%d ", &itmp) > 0)
2923 printf_filtered (_("Process group: %d\n"), itmp);
2924 if (fscanf (procfile, "%d ", &itmp) > 0)
2925 printf_filtered (_("Session id: %d\n"), itmp);
2926 if (fscanf (procfile, "%d ", &itmp) > 0)
2927 printf_filtered (_("TTY: %d\n"), itmp);
2928 if (fscanf (procfile, "%d ", &itmp) > 0)
2929 printf_filtered (_("TTY owner process group: %d\n"), itmp);
2930 if (fscanf (procfile, "%u ", &itmp) > 0)
2931 printf_filtered (_("Flags: 0x%x\n"), itmp);
2932 if (fscanf (procfile, "%u ", &itmp) > 0)
2933 printf_filtered (_("Minor faults (no memory page): %u\n"),
2934 (unsigned int) itmp);
2935 if (fscanf (procfile, "%u ", &itmp) > 0)
2936 printf_filtered (_("Minor faults, children: %u\n"),
2937 (unsigned int) itmp);
2938 if (fscanf (procfile, "%u ", &itmp) > 0)
2939 printf_filtered (_("Major faults (memory page faults): %u\n"),
2940 (unsigned int) itmp);
2941 if (fscanf (procfile, "%u ", &itmp) > 0)
2942 printf_filtered (_("Major faults, children: %u\n"),
2943 (unsigned int) itmp);
2944 if (fscanf (procfile, "%d ", &itmp) > 0)
2945 printf_filtered ("utime: %d\n", itmp);
2946 if (fscanf (procfile, "%d ", &itmp) > 0)
2947 printf_filtered ("stime: %d\n", itmp);
2948 if (fscanf (procfile, "%d ", &itmp) > 0)
2949 printf_filtered ("utime, children: %d\n", itmp);
2950 if (fscanf (procfile, "%d ", &itmp) > 0)
2951 printf_filtered ("stime, children: %d\n", itmp);
2952 if (fscanf (procfile, "%d ", &itmp) > 0)
2953 printf_filtered (_("jiffies remaining in current time slice: %d\n"),
2954 itmp);
2955 if (fscanf (procfile, "%d ", &itmp) > 0)
2956 printf_filtered ("'nice' value: %d\n", itmp);
2957 if (fscanf (procfile, "%u ", &itmp) > 0)
2958 printf_filtered (_("jiffies until next timeout: %u\n"),
2959 (unsigned int) itmp);
2960 if (fscanf (procfile, "%u ", &itmp) > 0)
2961 printf_filtered ("jiffies until next SIGALRM: %u\n",
2962 (unsigned int) itmp);
2963 if (fscanf (procfile, "%d ", &itmp) > 0)
2964 printf_filtered (_("start time (jiffies since system boot): %d\n"),
2965 itmp);
2966 if (fscanf (procfile, "%u ", &itmp) > 0)
2967 printf_filtered (_("Virtual memory size: %u\n"),
2968 (unsigned int) itmp);
2969 if (fscanf (procfile, "%u ", &itmp) > 0)
2970 printf_filtered (_("Resident set size: %u\n"), (unsigned int) itmp);
2971 if (fscanf (procfile, "%u ", &itmp) > 0)
2972 printf_filtered ("rlim: %u\n", (unsigned int) itmp);
2973 if (fscanf (procfile, "%u ", &itmp) > 0)
2974 printf_filtered (_("Start of text: 0x%x\n"), itmp);
2975 if (fscanf (procfile, "%u ", &itmp) > 0)
2976 printf_filtered (_("End of text: 0x%x\n"), itmp);
2977 if (fscanf (procfile, "%u ", &itmp) > 0)
2978 printf_filtered (_("Start of stack: 0x%x\n"), itmp);
2979 #if 0 /* Don't know how architecture-dependent the rest is...
2980 Anyway the signal bitmap info is available from "status". */
2981 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2982 printf_filtered (_("Kernel stack pointer: 0x%x\n"), itmp);
2983 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2984 printf_filtered (_("Kernel instr pointer: 0x%x\n"), itmp);
2985 if (fscanf (procfile, "%d ", &itmp) > 0)
2986 printf_filtered (_("Pending signals bitmap: 0x%x\n"), itmp);
2987 if (fscanf (procfile, "%d ", &itmp) > 0)
2988 printf_filtered (_("Blocked signals bitmap: 0x%x\n"), itmp);
2989 if (fscanf (procfile, "%d ", &itmp) > 0)
2990 printf_filtered (_("Ignored signals bitmap: 0x%x\n"), itmp);
2991 if (fscanf (procfile, "%d ", &itmp) > 0)
2992 printf_filtered (_("Catched signals bitmap: 0x%x\n"), itmp);
2993 if (fscanf (procfile, "%u ", &itmp) > 0) /* FIXME arch? */
2994 printf_filtered (_("wchan (system call): 0x%x\n"), itmp);
2995 #endif
2996 fclose (procfile);
2997 }
2998 else
2999 warning (_("unable to open /proc file '%s'"), fname1);
3000 }
3001 }
3002
3003 /* Implement the to_xfer_partial interface for memory reads using the /proc
3004 filesystem. Because we can use a single read() call for /proc, this
3005 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3006 but it doesn't support writes. */
3007
3008 static LONGEST
3009 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3010 const char *annex, gdb_byte *readbuf,
3011 const gdb_byte *writebuf,
3012 ULONGEST offset, LONGEST len)
3013 {
3014 LONGEST ret;
3015 int fd;
3016 char filename[64];
3017
3018 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3019 return 0;
3020
3021 /* Don't bother for one word. */
3022 if (len < 3 * sizeof (long))
3023 return 0;
3024
3025 /* We could keep this file open and cache it - possibly one per
3026 thread. That requires some juggling, but is even faster. */
3027 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3028 fd = open (filename, O_RDONLY | O_LARGEFILE);
3029 if (fd == -1)
3030 return 0;
3031
3032 /* If pread64 is available, use it. It's faster if the kernel
3033 supports it (only one syscall), and it's 64-bit safe even on
3034 32-bit platforms (for instance, SPARC debugging a SPARC64
3035 application). */
3036 #ifdef HAVE_PREAD64
3037 if (pread64 (fd, readbuf, len, offset) != len)
3038 #else
3039 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3040 #endif
3041 ret = 0;
3042 else
3043 ret = len;
3044
3045 close (fd);
3046 return ret;
3047 }
3048
3049 /* Parse LINE as a signal set and add its set bits to SIGS. */
3050
3051 static void
3052 add_line_to_sigset (const char *line, sigset_t *sigs)
3053 {
3054 int len = strlen (line) - 1;
3055 const char *p;
3056 int signum;
3057
3058 if (line[len] != '\n')
3059 error (_("Could not parse signal set: %s"), line);
3060
3061 p = line;
3062 signum = len * 4;
3063 while (len-- > 0)
3064 {
3065 int digit;
3066
3067 if (*p >= '0' && *p <= '9')
3068 digit = *p - '0';
3069 else if (*p >= 'a' && *p <= 'f')
3070 digit = *p - 'a' + 10;
3071 else
3072 error (_("Could not parse signal set: %s"), line);
3073
3074 signum -= 4;
3075
3076 if (digit & 1)
3077 sigaddset (sigs, signum + 1);
3078 if (digit & 2)
3079 sigaddset (sigs, signum + 2);
3080 if (digit & 4)
3081 sigaddset (sigs, signum + 3);
3082 if (digit & 8)
3083 sigaddset (sigs, signum + 4);
3084
3085 p++;
3086 }
3087 }
3088
3089 /* Find process PID's pending signals from /proc/pid/status and set
3090 SIGS to match. */
3091
3092 void
3093 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3094 {
3095 FILE *procfile;
3096 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3097 int signum;
3098
3099 sigemptyset (pending);
3100 sigemptyset (blocked);
3101 sigemptyset (ignored);
3102 sprintf (fname, "/proc/%d/status", pid);
3103 procfile = fopen (fname, "r");
3104 if (procfile == NULL)
3105 error (_("Could not open %s"), fname);
3106
3107 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3108 {
3109 /* Normal queued signals are on the SigPnd line in the status
3110 file. However, 2.6 kernels also have a "shared" pending
3111 queue for delivering signals to a thread group, so check for
3112 a ShdPnd line also.
3113
3114 Unfortunately some Red Hat kernels include the shared pending
3115 queue but not the ShdPnd status field. */
3116
3117 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3118 add_line_to_sigset (buffer + 8, pending);
3119 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3120 add_line_to_sigset (buffer + 8, pending);
3121 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3122 add_line_to_sigset (buffer + 8, blocked);
3123 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3124 add_line_to_sigset (buffer + 8, ignored);
3125 }
3126
3127 fclose (procfile);
3128 }
3129
3130 static LONGEST
3131 linux_xfer_partial (struct target_ops *ops, enum target_object object,
3132 const char *annex, gdb_byte *readbuf,
3133 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3134 {
3135 LONGEST xfer;
3136
3137 if (object == TARGET_OBJECT_AUXV)
3138 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3139 offset, len);
3140
3141 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3142 offset, len);
3143 if (xfer != 0)
3144 return xfer;
3145
3146 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3147 offset, len);
3148 }
3149
3150 #ifndef FETCH_INFERIOR_REGISTERS
3151
3152 /* Return the address in the core dump or inferior of register
3153 REGNO. */
3154
3155 static CORE_ADDR
3156 linux_register_u_offset (int regno)
3157 {
3158 /* FIXME drow/2005-09-04: The hardcoded use of register_addr should go
3159 away. This requires disentangling the various definitions of it
3160 (particularly alpha-nat.c's). */
3161 return register_addr (regno, 0);
3162 }
3163
3164 #endif
3165
3166 /* Create a prototype generic Linux target. The client can override
3167 it with local methods. */
3168
3169 struct target_ops *
3170 linux_target (void)
3171 {
3172 struct target_ops *t;
3173
3174 #ifdef FETCH_INFERIOR_REGISTERS
3175 t = inf_ptrace_target ();
3176 #else
3177 t = inf_ptrace_trad_target (linux_register_u_offset);
3178 #endif
3179 t->to_insert_fork_catchpoint = child_insert_fork_catchpoint;
3180 t->to_insert_vfork_catchpoint = child_insert_vfork_catchpoint;
3181 t->to_insert_exec_catchpoint = child_insert_exec_catchpoint;
3182 t->to_pid_to_exec_file = child_pid_to_exec_file;
3183 t->to_post_startup_inferior = linux_child_post_startup_inferior;
3184 t->to_post_attach = child_post_attach;
3185 t->to_follow_fork = child_follow_fork;
3186 t->to_find_memory_regions = linux_nat_find_memory_regions;
3187 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3188
3189 super_xfer_partial = t->to_xfer_partial;
3190 t->to_xfer_partial = linux_xfer_partial;
3191
3192 return t;
3193 }
3194
3195 void
3196 linux_nat_add_target (struct target_ops *t)
3197 {
3198 /* Save the provided single-threaded target. We save this in a separate
3199 variable because another target we've inherited from (e.g. inf-ptrace)
3200 may have saved a pointer to T; we want to use it for the final
3201 process stratum target. */
3202 linux_ops_saved = *t;
3203 linux_ops = &linux_ops_saved;
3204
3205 /* Override some methods for multithreading. */
3206 t->to_attach = linux_nat_attach;
3207 t->to_detach = linux_nat_detach;
3208 t->to_resume = linux_nat_resume;
3209 t->to_wait = linux_nat_wait;
3210 t->to_xfer_partial = linux_nat_xfer_partial;
3211 t->to_kill = linux_nat_kill;
3212 t->to_mourn_inferior = linux_nat_mourn_inferior;
3213 t->to_thread_alive = linux_nat_thread_alive;
3214 t->to_pid_to_str = linux_nat_pid_to_str;
3215 t->to_has_thread_control = tc_schedlock;
3216
3217 /* We don't change the stratum; this target will sit at
3218 process_stratum and thread_db will set at thread_stratum. This
3219 is a little strange, since this is a multi-threaded-capable
3220 target, but we want to be on the stack below thread_db, and we
3221 also want to be used for single-threaded processes. */
3222
3223 add_target (t);
3224
3225 /* TODO: Eliminate this and have libthread_db use
3226 find_target_beneath. */
3227 thread_db_init (t);
3228 }
3229
3230 void
3231 _initialize_linux_nat (void)
3232 {
3233 struct sigaction action;
3234
3235 add_info ("proc", linux_nat_info_proc_cmd, _("\
3236 Show /proc process information about any running process.\n\
3237 Specify any process id, or use the program being debugged by default.\n\
3238 Specify any of the following keywords for detailed info:\n\
3239 mappings -- list of mapped memory regions.\n\
3240 stat -- list a bunch of random process info.\n\
3241 status -- list a different bunch of random process info.\n\
3242 all -- list all available /proc info."));
3243
3244 /* Save the original signal mask. */
3245 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
3246
3247 action.sa_handler = sigchld_handler;
3248 sigemptyset (&action.sa_mask);
3249 action.sa_flags = SA_RESTART;
3250 sigaction (SIGCHLD, &action, NULL);
3251
3252 /* Make sure we don't block SIGCHLD during a sigsuspend. */
3253 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
3254 sigdelset (&suspend_mask, SIGCHLD);
3255
3256 sigemptyset (&blocked_mask);
3257
3258 add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\
3259 Set debugging of GNU/Linux lwp module."), _("\
3260 Show debugging of GNU/Linux lwp module."), _("\
3261 Enables printf debugging output."),
3262 NULL,
3263 show_debug_linux_nat,
3264 &setdebuglist, &showdebuglist);
3265 }
3266 \f
3267
3268 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
3269 the GNU/Linux Threads library and therefore doesn't really belong
3270 here. */
3271
3272 /* Read variable NAME in the target and return its value if found.
3273 Otherwise return zero. It is assumed that the type of the variable
3274 is `int'. */
3275
3276 static int
3277 get_signo (const char *name)
3278 {
3279 struct minimal_symbol *ms;
3280 int signo;
3281
3282 ms = lookup_minimal_symbol (name, NULL, NULL);
3283 if (ms == NULL)
3284 return 0;
3285
3286 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
3287 sizeof (signo)) != 0)
3288 return 0;
3289
3290 return signo;
3291 }
3292
3293 /* Return the set of signals used by the threads library in *SET. */
3294
3295 void
3296 lin_thread_get_thread_signals (sigset_t *set)
3297 {
3298 struct sigaction action;
3299 int restart, cancel;
3300
3301 sigemptyset (set);
3302
3303 restart = get_signo ("__pthread_sig_restart");
3304 cancel = get_signo ("__pthread_sig_cancel");
3305
3306 /* LinuxThreads normally uses the first two RT signals, but in some legacy
3307 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
3308 not provide any way for the debugger to query the signal numbers -
3309 fortunately they don't change! */
3310
3311 if (restart == 0)
3312 restart = __SIGRTMIN;
3313
3314 if (cancel == 0)
3315 cancel = __SIGRTMIN + 1;
3316
3317 sigaddset (set, restart);
3318 sigaddset (set, cancel);
3319
3320 /* The GNU/Linux Threads library makes terminating threads send a
3321 special "cancel" signal instead of SIGCHLD. Make sure we catch
3322 those (to prevent them from terminating GDB itself, which is
3323 likely to be their default action) and treat them the same way as
3324 SIGCHLD. */
3325
3326 action.sa_handler = sigchld_handler;
3327 sigemptyset (&action.sa_mask);
3328 action.sa_flags = SA_RESTART;
3329 sigaction (cancel, &action, NULL);
3330
3331 /* We block the "cancel" signal throughout this code ... */
3332 sigaddset (&blocked_mask, cancel);
3333 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
3334
3335 /* ... except during a sigsuspend. */
3336 sigdelset (&suspend_mask, cancel);
3337 }
3338
This page took 0.103283 seconds and 4 git commands to generate.