* ppc-opc.c: Support optional L form mtmsr.
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "inferior.h"
23 #include "target.h"
24 #include "gdb_string.h"
25 #include "gdb_wait.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
35 #include "gdbcmd.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "inf-ptrace.h"
39 #include "auxv.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
49
50 #ifndef O_LARGEFILE
51 #define O_LARGEFILE 0
52 #endif
53
54 /* If the system headers did not provide the constants, hard-code the normal
55 values. */
56 #ifndef PTRACE_EVENT_FORK
57
58 #define PTRACE_SETOPTIONS 0x4200
59 #define PTRACE_GETEVENTMSG 0x4201
60
61 /* options set using PTRACE_SETOPTIONS */
62 #define PTRACE_O_TRACESYSGOOD 0x00000001
63 #define PTRACE_O_TRACEFORK 0x00000002
64 #define PTRACE_O_TRACEVFORK 0x00000004
65 #define PTRACE_O_TRACECLONE 0x00000008
66 #define PTRACE_O_TRACEEXEC 0x00000010
67 #define PTRACE_O_TRACEVFORKDONE 0x00000020
68 #define PTRACE_O_TRACEEXIT 0x00000040
69
70 /* Wait extended result codes for the above trace options. */
71 #define PTRACE_EVENT_FORK 1
72 #define PTRACE_EVENT_VFORK 2
73 #define PTRACE_EVENT_CLONE 3
74 #define PTRACE_EVENT_EXEC 4
75 #define PTRACE_EVENT_VFORK_DONE 5
76 #define PTRACE_EVENT_EXIT 6
77
78 #endif /* PTRACE_EVENT_FORK */
79
80 /* We can't always assume that this flag is available, but all systems
81 with the ptrace event handlers also have __WALL, so it's safe to use
82 here. */
83 #ifndef __WALL
84 #define __WALL 0x40000000 /* Wait for any child. */
85 #endif
86
87 #ifndef PTRACE_GETSIGINFO
88 #define PTRACE_GETSIGINFO 0x4202
89 #endif
90
91 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
92 the use of the multi-threaded target. */
93 static struct target_ops *linux_ops;
94 static struct target_ops linux_ops_saved;
95
96 /* The method to call, if any, when a new thread is attached. */
97 static void (*linux_nat_new_thread) (ptid_t);
98
99 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
100 Called by our to_xfer_partial. */
101 static LONGEST (*super_xfer_partial) (struct target_ops *,
102 enum target_object,
103 const char *, gdb_byte *,
104 const gdb_byte *,
105 ULONGEST, LONGEST);
106
107 static int debug_linux_nat;
108 static void
109 show_debug_linux_nat (struct ui_file *file, int from_tty,
110 struct cmd_list_element *c, const char *value)
111 {
112 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
113 value);
114 }
115
116 static int linux_parent_pid;
117
118 struct simple_pid_list
119 {
120 int pid;
121 int status;
122 struct simple_pid_list *next;
123 };
124 struct simple_pid_list *stopped_pids;
125
126 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
127 can not be used, 1 if it can. */
128
129 static int linux_supports_tracefork_flag = -1;
130
131 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
132 PTRACE_O_TRACEVFORKDONE. */
133
134 static int linux_supports_tracevforkdone_flag = -1;
135
136 \f
137 /* Trivial list manipulation functions to keep track of a list of
138 new stopped processes. */
139 static void
140 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
141 {
142 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
143 new_pid->pid = pid;
144 new_pid->status = status;
145 new_pid->next = *listp;
146 *listp = new_pid;
147 }
148
149 static int
150 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
151 {
152 struct simple_pid_list **p;
153
154 for (p = listp; *p != NULL; p = &(*p)->next)
155 if ((*p)->pid == pid)
156 {
157 struct simple_pid_list *next = (*p)->next;
158 *status = (*p)->status;
159 xfree (*p);
160 *p = next;
161 return 1;
162 }
163 return 0;
164 }
165
166 static void
167 linux_record_stopped_pid (int pid, int status)
168 {
169 add_to_pid_list (&stopped_pids, pid, status);
170 }
171
172 \f
173 /* A helper function for linux_test_for_tracefork, called after fork (). */
174
175 static void
176 linux_tracefork_child (void)
177 {
178 int ret;
179
180 ptrace (PTRACE_TRACEME, 0, 0, 0);
181 kill (getpid (), SIGSTOP);
182 fork ();
183 _exit (0);
184 }
185
186 /* Wrapper function for waitpid which handles EINTR. */
187
188 static int
189 my_waitpid (int pid, int *status, int flags)
190 {
191 int ret;
192 do
193 {
194 ret = waitpid (pid, status, flags);
195 }
196 while (ret == -1 && errno == EINTR);
197
198 return ret;
199 }
200
201 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
202
203 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
204 we know that the feature is not available. This may change the tracing
205 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
206
207 However, if it succeeds, we don't know for sure that the feature is
208 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
209 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
210 fork tracing, and let it fork. If the process exits, we assume that we
211 can't use TRACEFORK; if we get the fork notification, and we can extract
212 the new child's PID, then we assume that we can. */
213
214 static void
215 linux_test_for_tracefork (int original_pid)
216 {
217 int child_pid, ret, status;
218 long second_pid;
219
220 linux_supports_tracefork_flag = 0;
221 linux_supports_tracevforkdone_flag = 0;
222
223 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
224 if (ret != 0)
225 return;
226
227 child_pid = fork ();
228 if (child_pid == -1)
229 perror_with_name (("fork"));
230
231 if (child_pid == 0)
232 linux_tracefork_child ();
233
234 ret = my_waitpid (child_pid, &status, 0);
235 if (ret == -1)
236 perror_with_name (("waitpid"));
237 else if (ret != child_pid)
238 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
239 if (! WIFSTOPPED (status))
240 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
241
242 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
243 if (ret != 0)
244 {
245 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
246 if (ret != 0)
247 {
248 warning (_("linux_test_for_tracefork: failed to kill child"));
249 return;
250 }
251
252 ret = my_waitpid (child_pid, &status, 0);
253 if (ret != child_pid)
254 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
255 else if (!WIFSIGNALED (status))
256 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
257 "killed child"), status);
258
259 return;
260 }
261
262 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
263 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
264 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
265 linux_supports_tracevforkdone_flag = (ret == 0);
266
267 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
268 if (ret != 0)
269 warning (_("linux_test_for_tracefork: failed to resume child"));
270
271 ret = my_waitpid (child_pid, &status, 0);
272
273 if (ret == child_pid && WIFSTOPPED (status)
274 && status >> 16 == PTRACE_EVENT_FORK)
275 {
276 second_pid = 0;
277 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
278 if (ret == 0 && second_pid != 0)
279 {
280 int second_status;
281
282 linux_supports_tracefork_flag = 1;
283 my_waitpid (second_pid, &second_status, 0);
284 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
285 if (ret != 0)
286 warning (_("linux_test_for_tracefork: failed to kill second child"));
287 my_waitpid (second_pid, &status, 0);
288 }
289 }
290 else
291 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
292 "(%d, status 0x%x)"), ret, status);
293
294 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
295 if (ret != 0)
296 warning (_("linux_test_for_tracefork: failed to kill child"));
297 my_waitpid (child_pid, &status, 0);
298 }
299
300 /* Return non-zero iff we have tracefork functionality available.
301 This function also sets linux_supports_tracefork_flag. */
302
303 static int
304 linux_supports_tracefork (int pid)
305 {
306 if (linux_supports_tracefork_flag == -1)
307 linux_test_for_tracefork (pid);
308 return linux_supports_tracefork_flag;
309 }
310
311 static int
312 linux_supports_tracevforkdone (int pid)
313 {
314 if (linux_supports_tracefork_flag == -1)
315 linux_test_for_tracefork (pid);
316 return linux_supports_tracevforkdone_flag;
317 }
318
319 \f
320 void
321 linux_enable_event_reporting (ptid_t ptid)
322 {
323 int pid = ptid_get_lwp (ptid);
324 int options;
325
326 if (pid == 0)
327 pid = ptid_get_pid (ptid);
328
329 if (! linux_supports_tracefork (pid))
330 return;
331
332 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
333 | PTRACE_O_TRACECLONE;
334 if (linux_supports_tracevforkdone (pid))
335 options |= PTRACE_O_TRACEVFORKDONE;
336
337 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
338 read-only process state. */
339
340 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
341 }
342
343 static void
344 linux_child_post_attach (int pid)
345 {
346 linux_enable_event_reporting (pid_to_ptid (pid));
347 check_for_thread_db ();
348 }
349
350 static void
351 linux_child_post_startup_inferior (ptid_t ptid)
352 {
353 linux_enable_event_reporting (ptid);
354 check_for_thread_db ();
355 }
356
357 static int
358 linux_child_follow_fork (struct target_ops *ops, int follow_child)
359 {
360 ptid_t last_ptid;
361 struct target_waitstatus last_status;
362 int has_vforked;
363 int parent_pid, child_pid;
364
365 get_last_target_status (&last_ptid, &last_status);
366 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
367 parent_pid = ptid_get_lwp (last_ptid);
368 if (parent_pid == 0)
369 parent_pid = ptid_get_pid (last_ptid);
370 child_pid = last_status.value.related_pid;
371
372 if (! follow_child)
373 {
374 /* We're already attached to the parent, by default. */
375
376 /* Before detaching from the child, remove all breakpoints from
377 it. (This won't actually modify the breakpoint list, but will
378 physically remove the breakpoints from the child.) */
379 /* If we vforked this will remove the breakpoints from the parent
380 also, but they'll be reinserted below. */
381 detach_breakpoints (child_pid);
382
383 /* Detach new forked process? */
384 if (detach_fork)
385 {
386 if (debug_linux_nat)
387 {
388 target_terminal_ours ();
389 fprintf_filtered (gdb_stdlog,
390 "Detaching after fork from child process %d.\n",
391 child_pid);
392 }
393
394 ptrace (PTRACE_DETACH, child_pid, 0, 0);
395 }
396 else
397 {
398 struct fork_info *fp;
399 /* Retain child fork in ptrace (stopped) state. */
400 fp = find_fork_pid (child_pid);
401 if (!fp)
402 fp = add_fork (child_pid);
403 fork_save_infrun_state (fp, 0);
404 }
405
406 if (has_vforked)
407 {
408 gdb_assert (linux_supports_tracefork_flag >= 0);
409 if (linux_supports_tracevforkdone (0))
410 {
411 int status;
412
413 ptrace (PTRACE_CONT, parent_pid, 0, 0);
414 my_waitpid (parent_pid, &status, __WALL);
415 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
416 warning (_("Unexpected waitpid result %06x when waiting for "
417 "vfork-done"), status);
418 }
419 else
420 {
421 /* We can't insert breakpoints until the child has
422 finished with the shared memory region. We need to
423 wait until that happens. Ideal would be to just
424 call:
425 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
426 - waitpid (parent_pid, &status, __WALL);
427 However, most architectures can't handle a syscall
428 being traced on the way out if it wasn't traced on
429 the way in.
430
431 We might also think to loop, continuing the child
432 until it exits or gets a SIGTRAP. One problem is
433 that the child might call ptrace with PTRACE_TRACEME.
434
435 There's no simple and reliable way to figure out when
436 the vforked child will be done with its copy of the
437 shared memory. We could step it out of the syscall,
438 two instructions, let it go, and then single-step the
439 parent once. When we have hardware single-step, this
440 would work; with software single-step it could still
441 be made to work but we'd have to be able to insert
442 single-step breakpoints in the child, and we'd have
443 to insert -just- the single-step breakpoint in the
444 parent. Very awkward.
445
446 In the end, the best we can do is to make sure it
447 runs for a little while. Hopefully it will be out of
448 range of any breakpoints we reinsert. Usually this
449 is only the single-step breakpoint at vfork's return
450 point. */
451
452 usleep (10000);
453 }
454
455 /* Since we vforked, breakpoints were removed in the parent
456 too. Put them back. */
457 reattach_breakpoints (parent_pid);
458 }
459 }
460 else
461 {
462 char child_pid_spelling[40];
463
464 /* Needed to keep the breakpoint lists in sync. */
465 if (! has_vforked)
466 detach_breakpoints (child_pid);
467
468 /* Before detaching from the parent, remove all breakpoints from it. */
469 remove_breakpoints ();
470
471 if (debug_linux_nat)
472 {
473 target_terminal_ours ();
474 fprintf_filtered (gdb_stdlog,
475 "Attaching after fork to child process %d.\n",
476 child_pid);
477 }
478
479 /* If we're vforking, we may want to hold on to the parent until
480 the child exits or execs. At exec time we can remove the old
481 breakpoints from the parent and detach it; at exit time we
482 could do the same (or even, sneakily, resume debugging it - the
483 child's exec has failed, or something similar).
484
485 This doesn't clean up "properly", because we can't call
486 target_detach, but that's OK; if the current target is "child",
487 then it doesn't need any further cleanups, and lin_lwp will
488 generally not encounter vfork (vfork is defined to fork
489 in libpthread.so).
490
491 The holding part is very easy if we have VFORKDONE events;
492 but keeping track of both processes is beyond GDB at the
493 moment. So we don't expose the parent to the rest of GDB.
494 Instead we quietly hold onto it until such time as we can
495 safely resume it. */
496
497 if (has_vforked)
498 linux_parent_pid = parent_pid;
499 else if (!detach_fork)
500 {
501 struct fork_info *fp;
502 /* Retain parent fork in ptrace (stopped) state. */
503 fp = find_fork_pid (parent_pid);
504 if (!fp)
505 fp = add_fork (parent_pid);
506 fork_save_infrun_state (fp, 0);
507 }
508 else
509 {
510 target_detach (NULL, 0);
511 }
512
513 inferior_ptid = ptid_build (child_pid, child_pid, 0);
514
515 /* Reinstall ourselves, since we might have been removed in
516 target_detach (which does other necessary cleanup). */
517
518 push_target (ops);
519 linux_nat_switch_fork (inferior_ptid);
520 check_for_thread_db ();
521
522 /* Reset breakpoints in the child as appropriate. */
523 follow_inferior_reset_breakpoints ();
524 }
525
526 return 0;
527 }
528
529 \f
530 static void
531 linux_child_insert_fork_catchpoint (int pid)
532 {
533 if (! linux_supports_tracefork (pid))
534 error (_("Your system does not support fork catchpoints."));
535 }
536
537 static void
538 linux_child_insert_vfork_catchpoint (int pid)
539 {
540 if (!linux_supports_tracefork (pid))
541 error (_("Your system does not support vfork catchpoints."));
542 }
543
544 static void
545 linux_child_insert_exec_catchpoint (int pid)
546 {
547 if (!linux_supports_tracefork (pid))
548 error (_("Your system does not support exec catchpoints."));
549 }
550
551 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
552 are processes sharing the same VM space. A multi-threaded process
553 is basically a group of such processes. However, such a grouping
554 is almost entirely a user-space issue; the kernel doesn't enforce
555 such a grouping at all (this might change in the future). In
556 general, we'll rely on the threads library (i.e. the GNU/Linux
557 Threads library) to provide such a grouping.
558
559 It is perfectly well possible to write a multi-threaded application
560 without the assistance of a threads library, by using the clone
561 system call directly. This module should be able to give some
562 rudimentary support for debugging such applications if developers
563 specify the CLONE_PTRACE flag in the clone system call, and are
564 using the Linux kernel 2.4 or above.
565
566 Note that there are some peculiarities in GNU/Linux that affect
567 this code:
568
569 - In general one should specify the __WCLONE flag to waitpid in
570 order to make it report events for any of the cloned processes
571 (and leave it out for the initial process). However, if a cloned
572 process has exited the exit status is only reported if the
573 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
574 we cannot use it since GDB must work on older systems too.
575
576 - When a traced, cloned process exits and is waited for by the
577 debugger, the kernel reassigns it to the original parent and
578 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
579 library doesn't notice this, which leads to the "zombie problem":
580 When debugged a multi-threaded process that spawns a lot of
581 threads will run out of processes, even if the threads exit,
582 because the "zombies" stay around. */
583
584 /* List of known LWPs. */
585 struct lwp_info *lwp_list;
586
587 /* Number of LWPs in the list. */
588 static int num_lwps;
589 \f
590
591 #define GET_LWP(ptid) ptid_get_lwp (ptid)
592 #define GET_PID(ptid) ptid_get_pid (ptid)
593 #define is_lwp(ptid) (GET_LWP (ptid) != 0)
594 #define BUILD_LWP(lwp, pid) ptid_build (pid, lwp, 0)
595
596 /* If the last reported event was a SIGTRAP, this variable is set to
597 the process id of the LWP/thread that got it. */
598 ptid_t trap_ptid;
599 \f
600
601 /* Since we cannot wait (in linux_nat_wait) for the initial process and
602 any cloned processes with a single call to waitpid, we have to use
603 the WNOHANG flag and call waitpid in a loop. To optimize
604 things a bit we use `sigsuspend' to wake us up when a process has
605 something to report (it will send us a SIGCHLD if it has). To make
606 this work we have to juggle with the signal mask. We save the
607 original signal mask such that we can restore it before creating a
608 new process in order to avoid blocking certain signals in the
609 inferior. We then block SIGCHLD during the waitpid/sigsuspend
610 loop. */
611
612 /* Original signal mask. */
613 static sigset_t normal_mask;
614
615 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
616 _initialize_linux_nat. */
617 static sigset_t suspend_mask;
618
619 /* Signals to block to make that sigsuspend work. */
620 static sigset_t blocked_mask;
621 \f
622
623 /* Prototypes for local functions. */
624 static int stop_wait_callback (struct lwp_info *lp, void *data);
625 static int linux_nat_thread_alive (ptid_t ptid);
626 static char *linux_child_pid_to_exec_file (int pid);
627 \f
628 /* Convert wait status STATUS to a string. Used for printing debug
629 messages only. */
630
631 static char *
632 status_to_str (int status)
633 {
634 static char buf[64];
635
636 if (WIFSTOPPED (status))
637 snprintf (buf, sizeof (buf), "%s (stopped)",
638 strsignal (WSTOPSIG (status)));
639 else if (WIFSIGNALED (status))
640 snprintf (buf, sizeof (buf), "%s (terminated)",
641 strsignal (WSTOPSIG (status)));
642 else
643 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
644
645 return buf;
646 }
647
648 /* Initialize the list of LWPs. Note that this module, contrary to
649 what GDB's generic threads layer does for its thread list,
650 re-initializes the LWP lists whenever we mourn or detach (which
651 doesn't involve mourning) the inferior. */
652
653 static void
654 init_lwp_list (void)
655 {
656 struct lwp_info *lp, *lpnext;
657
658 for (lp = lwp_list; lp; lp = lpnext)
659 {
660 lpnext = lp->next;
661 xfree (lp);
662 }
663
664 lwp_list = NULL;
665 num_lwps = 0;
666 }
667
668 /* Add the LWP specified by PID to the list. Return a pointer to the
669 structure describing the new LWP. The LWP should already be stopped
670 (with an exception for the very first LWP). */
671
672 static struct lwp_info *
673 add_lwp (ptid_t ptid)
674 {
675 struct lwp_info *lp;
676
677 gdb_assert (is_lwp (ptid));
678
679 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
680
681 memset (lp, 0, sizeof (struct lwp_info));
682
683 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
684
685 lp->ptid = ptid;
686
687 lp->next = lwp_list;
688 lwp_list = lp;
689 ++num_lwps;
690
691 if (num_lwps > 1 && linux_nat_new_thread != NULL)
692 linux_nat_new_thread (ptid);
693
694 return lp;
695 }
696
697 /* Remove the LWP specified by PID from the list. */
698
699 static void
700 delete_lwp (ptid_t ptid)
701 {
702 struct lwp_info *lp, *lpprev;
703
704 lpprev = NULL;
705
706 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
707 if (ptid_equal (lp->ptid, ptid))
708 break;
709
710 if (!lp)
711 return;
712
713 num_lwps--;
714
715 if (lpprev)
716 lpprev->next = lp->next;
717 else
718 lwp_list = lp->next;
719
720 xfree (lp);
721 }
722
723 /* Return a pointer to the structure describing the LWP corresponding
724 to PID. If no corresponding LWP could be found, return NULL. */
725
726 static struct lwp_info *
727 find_lwp_pid (ptid_t ptid)
728 {
729 struct lwp_info *lp;
730 int lwp;
731
732 if (is_lwp (ptid))
733 lwp = GET_LWP (ptid);
734 else
735 lwp = GET_PID (ptid);
736
737 for (lp = lwp_list; lp; lp = lp->next)
738 if (lwp == GET_LWP (lp->ptid))
739 return lp;
740
741 return NULL;
742 }
743
744 /* Call CALLBACK with its second argument set to DATA for every LWP in
745 the list. If CALLBACK returns 1 for a particular LWP, return a
746 pointer to the structure describing that LWP immediately.
747 Otherwise return NULL. */
748
749 struct lwp_info *
750 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
751 {
752 struct lwp_info *lp, *lpnext;
753
754 for (lp = lwp_list; lp; lp = lpnext)
755 {
756 lpnext = lp->next;
757 if ((*callback) (lp, data))
758 return lp;
759 }
760
761 return NULL;
762 }
763
764 /* Update our internal state when changing from one fork (checkpoint,
765 et cetera) to another indicated by NEW_PTID. We can only switch
766 single-threaded applications, so we only create one new LWP, and
767 the previous list is discarded. */
768
769 void
770 linux_nat_switch_fork (ptid_t new_ptid)
771 {
772 struct lwp_info *lp;
773
774 init_lwp_list ();
775 lp = add_lwp (new_ptid);
776 lp->stopped = 1;
777 }
778
779 /* Record a PTID for later deletion. */
780
781 struct saved_ptids
782 {
783 ptid_t ptid;
784 struct saved_ptids *next;
785 };
786 static struct saved_ptids *threads_to_delete;
787
788 static void
789 record_dead_thread (ptid_t ptid)
790 {
791 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
792 p->ptid = ptid;
793 p->next = threads_to_delete;
794 threads_to_delete = p;
795 }
796
797 /* Delete any dead threads which are not the current thread. */
798
799 static void
800 prune_lwps (void)
801 {
802 struct saved_ptids **p = &threads_to_delete;
803
804 while (*p)
805 if (! ptid_equal ((*p)->ptid, inferior_ptid))
806 {
807 struct saved_ptids *tmp = *p;
808 delete_thread (tmp->ptid);
809 *p = tmp->next;
810 xfree (tmp);
811 }
812 else
813 p = &(*p)->next;
814 }
815
816 /* Callback for iterate_over_threads that finds a thread corresponding
817 to the given LWP. */
818
819 static int
820 find_thread_from_lwp (struct thread_info *thr, void *dummy)
821 {
822 ptid_t *ptid_p = dummy;
823
824 if (GET_LWP (thr->ptid) && GET_LWP (thr->ptid) == GET_LWP (*ptid_p))
825 return 1;
826 else
827 return 0;
828 }
829
830 /* Handle the exit of a single thread LP. */
831
832 static void
833 exit_lwp (struct lwp_info *lp)
834 {
835 if (in_thread_list (lp->ptid))
836 {
837 /* Core GDB cannot deal with us deleting the current thread. */
838 if (!ptid_equal (lp->ptid, inferior_ptid))
839 delete_thread (lp->ptid);
840 else
841 record_dead_thread (lp->ptid);
842 printf_unfiltered (_("[%s exited]\n"),
843 target_pid_to_str (lp->ptid));
844 }
845 else
846 {
847 /* Even if LP->PTID is not in the global GDB thread list, the
848 LWP may be - with an additional thread ID. We don't need
849 to print anything in this case; thread_db is in use and
850 already took care of that. But it didn't delete the thread
851 in order to handle zombies correctly. */
852
853 struct thread_info *thr;
854
855 thr = iterate_over_threads (find_thread_from_lwp, &lp->ptid);
856 if (thr)
857 {
858 if (!ptid_equal (thr->ptid, inferior_ptid))
859 delete_thread (thr->ptid);
860 else
861 record_dead_thread (thr->ptid);
862 }
863 }
864
865 delete_lwp (lp->ptid);
866 }
867
868 /* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
869 a message telling the user that a new LWP has been added to the
870 process. Return 0 if successful or -1 if the new LWP could not
871 be attached. */
872
873 int
874 lin_lwp_attach_lwp (ptid_t ptid)
875 {
876 struct lwp_info *lp;
877
878 gdb_assert (is_lwp (ptid));
879
880 /* Make sure SIGCHLD is blocked. We don't want SIGCHLD events
881 to interrupt either the ptrace() or waitpid() calls below. */
882 if (!sigismember (&blocked_mask, SIGCHLD))
883 {
884 sigaddset (&blocked_mask, SIGCHLD);
885 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
886 }
887
888 lp = find_lwp_pid (ptid);
889
890 /* We assume that we're already attached to any LWP that has an id
891 equal to the overall process id, and to any LWP that is already
892 in our list of LWPs. If we're not seeing exit events from threads
893 and we've had PID wraparound since we last tried to stop all threads,
894 this assumption might be wrong; fortunately, this is very unlikely
895 to happen. */
896 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
897 {
898 pid_t pid;
899 int status;
900 int cloned = 0;
901
902 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
903 {
904 /* If we fail to attach to the thread, issue a warning,
905 but continue. One way this can happen is if thread
906 creation is interrupted; as of Linux kernel 2.6.19, a
907 bug may place threads in the thread list and then fail
908 to create them. */
909 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
910 safe_strerror (errno));
911 return -1;
912 }
913
914 if (debug_linux_nat)
915 fprintf_unfiltered (gdb_stdlog,
916 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
917 target_pid_to_str (ptid));
918
919 pid = my_waitpid (GET_LWP (ptid), &status, 0);
920 if (pid == -1 && errno == ECHILD)
921 {
922 /* Try again with __WCLONE to check cloned processes. */
923 pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE);
924 cloned = 1;
925 }
926
927 gdb_assert (pid == GET_LWP (ptid)
928 && WIFSTOPPED (status) && WSTOPSIG (status));
929
930 if (lp == NULL)
931 lp = add_lwp (ptid);
932 lp->cloned = cloned;
933
934 target_post_attach (pid);
935
936 lp->stopped = 1;
937
938 if (debug_linux_nat)
939 {
940 fprintf_unfiltered (gdb_stdlog,
941 "LLAL: waitpid %s received %s\n",
942 target_pid_to_str (ptid),
943 status_to_str (status));
944 }
945 }
946 else
947 {
948 /* We assume that the LWP representing the original process is
949 already stopped. Mark it as stopped in the data structure
950 that the GNU/linux ptrace layer uses to keep track of
951 threads. Note that this won't have already been done since
952 the main thread will have, we assume, been stopped by an
953 attach from a different layer. */
954 if (lp == NULL)
955 lp = add_lwp (ptid);
956 lp->stopped = 1;
957 }
958
959 return 0;
960 }
961
962 static void
963 linux_nat_attach (char *args, int from_tty)
964 {
965 struct lwp_info *lp;
966 pid_t pid;
967 int status;
968 int cloned = 0;
969
970 /* FIXME: We should probably accept a list of process id's, and
971 attach all of them. */
972 linux_ops->to_attach (args, from_tty);
973
974 /* Make sure the initial process is stopped. The user-level threads
975 layer might want to poke around in the inferior, and that won't
976 work if things haven't stabilized yet. */
977 pid = my_waitpid (GET_PID (inferior_ptid), &status, 0);
978 if (pid == -1 && errno == ECHILD)
979 {
980 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
981
982 /* Try again with __WCLONE to check cloned processes. */
983 pid = my_waitpid (GET_PID (inferior_ptid), &status, __WCLONE);
984 cloned = 1;
985 }
986
987 gdb_assert (pid == GET_PID (inferior_ptid)
988 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
989
990 /* Add the initial process as the first LWP to the list. */
991 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
992 lp = add_lwp (inferior_ptid);
993 lp->cloned = cloned;
994
995 lp->stopped = 1;
996
997 /* Fake the SIGSTOP that core GDB expects. */
998 lp->status = W_STOPCODE (SIGSTOP);
999 lp->resumed = 1;
1000 if (debug_linux_nat)
1001 {
1002 fprintf_unfiltered (gdb_stdlog,
1003 "LLA: waitpid %ld, faking SIGSTOP\n", (long) pid);
1004 }
1005 }
1006
1007 static int
1008 detach_callback (struct lwp_info *lp, void *data)
1009 {
1010 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1011
1012 if (debug_linux_nat && lp->status)
1013 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1014 strsignal (WSTOPSIG (lp->status)),
1015 target_pid_to_str (lp->ptid));
1016
1017 while (lp->signalled && lp->stopped)
1018 {
1019 errno = 0;
1020 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
1021 WSTOPSIG (lp->status)) < 0)
1022 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
1023 safe_strerror (errno));
1024
1025 if (debug_linux_nat)
1026 fprintf_unfiltered (gdb_stdlog,
1027 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
1028 target_pid_to_str (lp->ptid),
1029 status_to_str (lp->status));
1030
1031 lp->stopped = 0;
1032 lp->signalled = 0;
1033 lp->status = 0;
1034 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
1035 here. But since lp->signalled was cleared above,
1036 stop_wait_callback didn't do anything; the process was left
1037 running. Shouldn't we be waiting for it to stop?
1038 I've removed the call, since stop_wait_callback now does do
1039 something when called with lp->signalled == 0. */
1040
1041 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1042 }
1043
1044 /* We don't actually detach from the LWP that has an id equal to the
1045 overall process id just yet. */
1046 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1047 {
1048 errno = 0;
1049 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1050 WSTOPSIG (lp->status)) < 0)
1051 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1052 safe_strerror (errno));
1053
1054 if (debug_linux_nat)
1055 fprintf_unfiltered (gdb_stdlog,
1056 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1057 target_pid_to_str (lp->ptid),
1058 strsignal (WSTOPSIG (lp->status)));
1059
1060 delete_lwp (lp->ptid);
1061 }
1062
1063 return 0;
1064 }
1065
1066 static void
1067 linux_nat_detach (char *args, int from_tty)
1068 {
1069 iterate_over_lwps (detach_callback, NULL);
1070
1071 /* Only the initial process should be left right now. */
1072 gdb_assert (num_lwps == 1);
1073
1074 trap_ptid = null_ptid;
1075
1076 /* Destroy LWP info; it's no longer valid. */
1077 init_lwp_list ();
1078
1079 /* Restore the original signal mask. */
1080 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1081 sigemptyset (&blocked_mask);
1082
1083 inferior_ptid = pid_to_ptid (GET_PID (inferior_ptid));
1084 linux_ops->to_detach (args, from_tty);
1085 }
1086
1087 /* Resume LP. */
1088
1089 static int
1090 resume_callback (struct lwp_info *lp, void *data)
1091 {
1092 if (lp->stopped && lp->status == 0)
1093 {
1094 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1095 0, TARGET_SIGNAL_0);
1096 if (debug_linux_nat)
1097 fprintf_unfiltered (gdb_stdlog,
1098 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1099 target_pid_to_str (lp->ptid));
1100 lp->stopped = 0;
1101 lp->step = 0;
1102 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1103 }
1104
1105 return 0;
1106 }
1107
1108 static int
1109 resume_clear_callback (struct lwp_info *lp, void *data)
1110 {
1111 lp->resumed = 0;
1112 return 0;
1113 }
1114
1115 static int
1116 resume_set_callback (struct lwp_info *lp, void *data)
1117 {
1118 lp->resumed = 1;
1119 return 0;
1120 }
1121
1122 static void
1123 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1124 {
1125 struct lwp_info *lp;
1126 int resume_all;
1127
1128 if (debug_linux_nat)
1129 fprintf_unfiltered (gdb_stdlog,
1130 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1131 step ? "step" : "resume",
1132 target_pid_to_str (ptid),
1133 signo ? strsignal (signo) : "0",
1134 target_pid_to_str (inferior_ptid));
1135
1136 prune_lwps ();
1137
1138 /* A specific PTID means `step only this process id'. */
1139 resume_all = (PIDGET (ptid) == -1);
1140
1141 if (resume_all)
1142 iterate_over_lwps (resume_set_callback, NULL);
1143 else
1144 iterate_over_lwps (resume_clear_callback, NULL);
1145
1146 /* If PID is -1, it's the current inferior that should be
1147 handled specially. */
1148 if (PIDGET (ptid) == -1)
1149 ptid = inferior_ptid;
1150
1151 lp = find_lwp_pid (ptid);
1152 gdb_assert (lp != NULL);
1153
1154 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1155
1156 /* Remember if we're stepping. */
1157 lp->step = step;
1158
1159 /* Mark this LWP as resumed. */
1160 lp->resumed = 1;
1161
1162 /* If we have a pending wait status for this thread, there is no
1163 point in resuming the process. But first make sure that
1164 linux_nat_wait won't preemptively handle the event - we
1165 should never take this short-circuit if we are going to
1166 leave LP running, since we have skipped resuming all the
1167 other threads. This bit of code needs to be synchronized
1168 with linux_nat_wait. */
1169
1170 if (lp->status && WIFSTOPPED (lp->status))
1171 {
1172 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1173
1174 if (signal_stop_state (saved_signo) == 0
1175 && signal_print_state (saved_signo) == 0
1176 && signal_pass_state (saved_signo) == 1)
1177 {
1178 if (debug_linux_nat)
1179 fprintf_unfiltered (gdb_stdlog,
1180 "LLR: Not short circuiting for ignored "
1181 "status 0x%x\n", lp->status);
1182
1183 /* FIXME: What should we do if we are supposed to continue
1184 this thread with a signal? */
1185 gdb_assert (signo == TARGET_SIGNAL_0);
1186 signo = saved_signo;
1187 lp->status = 0;
1188 }
1189 }
1190
1191 if (lp->status)
1192 {
1193 /* FIXME: What should we do if we are supposed to continue
1194 this thread with a signal? */
1195 gdb_assert (signo == TARGET_SIGNAL_0);
1196
1197 if (debug_linux_nat)
1198 fprintf_unfiltered (gdb_stdlog,
1199 "LLR: Short circuiting for status 0x%x\n",
1200 lp->status);
1201
1202 return;
1203 }
1204
1205 /* Mark LWP as not stopped to prevent it from being continued by
1206 resume_callback. */
1207 lp->stopped = 0;
1208
1209 if (resume_all)
1210 iterate_over_lwps (resume_callback, NULL);
1211
1212 linux_ops->to_resume (ptid, step, signo);
1213 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1214
1215 if (debug_linux_nat)
1216 fprintf_unfiltered (gdb_stdlog,
1217 "LLR: %s %s, %s (resume event thread)\n",
1218 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1219 target_pid_to_str (ptid),
1220 signo ? strsignal (signo) : "0");
1221 }
1222
1223 /* Issue kill to specified lwp. */
1224
1225 static int tkill_failed;
1226
1227 static int
1228 kill_lwp (int lwpid, int signo)
1229 {
1230 errno = 0;
1231
1232 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1233 fails, then we are not using nptl threads and we should be using kill. */
1234
1235 #ifdef HAVE_TKILL_SYSCALL
1236 if (!tkill_failed)
1237 {
1238 int ret = syscall (__NR_tkill, lwpid, signo);
1239 if (errno != ENOSYS)
1240 return ret;
1241 errno = 0;
1242 tkill_failed = 1;
1243 }
1244 #endif
1245
1246 return kill (lwpid, signo);
1247 }
1248
1249 /* Handle a GNU/Linux extended wait response. If we see a clone
1250 event, we need to add the new LWP to our list (and not report the
1251 trap to higher layers). This function returns non-zero if the
1252 event should be ignored and we should wait again. If STOPPING is
1253 true, the new LWP remains stopped, otherwise it is continued. */
1254
1255 static int
1256 linux_handle_extended_wait (struct lwp_info *lp, int status,
1257 int stopping)
1258 {
1259 int pid = GET_LWP (lp->ptid);
1260 struct target_waitstatus *ourstatus = &lp->waitstatus;
1261 struct lwp_info *new_lp = NULL;
1262 int event = status >> 16;
1263
1264 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1265 || event == PTRACE_EVENT_CLONE)
1266 {
1267 unsigned long new_pid;
1268 int ret;
1269
1270 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1271
1272 /* If we haven't already seen the new PID stop, wait for it now. */
1273 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1274 {
1275 /* The new child has a pending SIGSTOP. We can't affect it until it
1276 hits the SIGSTOP, but we're already attached. */
1277 ret = my_waitpid (new_pid, &status,
1278 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1279 if (ret == -1)
1280 perror_with_name (_("waiting for new child"));
1281 else if (ret != new_pid)
1282 internal_error (__FILE__, __LINE__,
1283 _("wait returned unexpected PID %d"), ret);
1284 else if (!WIFSTOPPED (status))
1285 internal_error (__FILE__, __LINE__,
1286 _("wait returned unexpected status 0x%x"), status);
1287 }
1288
1289 ourstatus->value.related_pid = new_pid;
1290
1291 if (event == PTRACE_EVENT_FORK)
1292 ourstatus->kind = TARGET_WAITKIND_FORKED;
1293 else if (event == PTRACE_EVENT_VFORK)
1294 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1295 else
1296 {
1297 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1298 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1299 new_lp->cloned = 1;
1300
1301 if (WSTOPSIG (status) != SIGSTOP)
1302 {
1303 /* This can happen if someone starts sending signals to
1304 the new thread before it gets a chance to run, which
1305 have a lower number than SIGSTOP (e.g. SIGUSR1).
1306 This is an unlikely case, and harder to handle for
1307 fork / vfork than for clone, so we do not try - but
1308 we handle it for clone events here. We'll send
1309 the other signal on to the thread below. */
1310
1311 new_lp->signalled = 1;
1312 }
1313 else
1314 status = 0;
1315
1316 if (stopping)
1317 new_lp->stopped = 1;
1318 else
1319 {
1320 new_lp->resumed = 1;
1321 ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0,
1322 status ? WSTOPSIG (status) : 0);
1323 }
1324
1325 if (debug_linux_nat)
1326 fprintf_unfiltered (gdb_stdlog,
1327 "LHEW: Got clone event from LWP %ld, resuming\n",
1328 GET_LWP (lp->ptid));
1329 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1330
1331 return 1;
1332 }
1333
1334 return 0;
1335 }
1336
1337 if (event == PTRACE_EVENT_EXEC)
1338 {
1339 ourstatus->kind = TARGET_WAITKIND_EXECD;
1340 ourstatus->value.execd_pathname
1341 = xstrdup (linux_child_pid_to_exec_file (pid));
1342
1343 if (linux_parent_pid)
1344 {
1345 detach_breakpoints (linux_parent_pid);
1346 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1347
1348 linux_parent_pid = 0;
1349 }
1350
1351 return 0;
1352 }
1353
1354 internal_error (__FILE__, __LINE__,
1355 _("unknown ptrace event %d"), event);
1356 }
1357
1358 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1359 exited. */
1360
1361 static int
1362 wait_lwp (struct lwp_info *lp)
1363 {
1364 pid_t pid;
1365 int status;
1366 int thread_dead = 0;
1367
1368 gdb_assert (!lp->stopped);
1369 gdb_assert (lp->status == 0);
1370
1371 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
1372 if (pid == -1 && errno == ECHILD)
1373 {
1374 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1375 if (pid == -1 && errno == ECHILD)
1376 {
1377 /* The thread has previously exited. We need to delete it
1378 now because, for some vendor 2.4 kernels with NPTL
1379 support backported, there won't be an exit event unless
1380 it is the main thread. 2.6 kernels will report an exit
1381 event for each thread that exits, as expected. */
1382 thread_dead = 1;
1383 if (debug_linux_nat)
1384 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1385 target_pid_to_str (lp->ptid));
1386 }
1387 }
1388
1389 if (!thread_dead)
1390 {
1391 gdb_assert (pid == GET_LWP (lp->ptid));
1392
1393 if (debug_linux_nat)
1394 {
1395 fprintf_unfiltered (gdb_stdlog,
1396 "WL: waitpid %s received %s\n",
1397 target_pid_to_str (lp->ptid),
1398 status_to_str (status));
1399 }
1400 }
1401
1402 /* Check if the thread has exited. */
1403 if (WIFEXITED (status) || WIFSIGNALED (status))
1404 {
1405 thread_dead = 1;
1406 if (debug_linux_nat)
1407 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1408 target_pid_to_str (lp->ptid));
1409 }
1410
1411 if (thread_dead)
1412 {
1413 exit_lwp (lp);
1414 return 0;
1415 }
1416
1417 gdb_assert (WIFSTOPPED (status));
1418
1419 /* Handle GNU/Linux's extended waitstatus for trace events. */
1420 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1421 {
1422 if (debug_linux_nat)
1423 fprintf_unfiltered (gdb_stdlog,
1424 "WL: Handling extended status 0x%06x\n",
1425 status);
1426 if (linux_handle_extended_wait (lp, status, 1))
1427 return wait_lwp (lp);
1428 }
1429
1430 return status;
1431 }
1432
1433 /* Save the most recent siginfo for LP. This is currently only called
1434 for SIGTRAP; some ports use the si_addr field for
1435 target_stopped_data_address. In the future, it may also be used to
1436 restore the siginfo of requeued signals. */
1437
1438 static void
1439 save_siginfo (struct lwp_info *lp)
1440 {
1441 errno = 0;
1442 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
1443 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
1444
1445 if (errno != 0)
1446 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1447 }
1448
1449 /* Send a SIGSTOP to LP. */
1450
1451 static int
1452 stop_callback (struct lwp_info *lp, void *data)
1453 {
1454 if (!lp->stopped && !lp->signalled)
1455 {
1456 int ret;
1457
1458 if (debug_linux_nat)
1459 {
1460 fprintf_unfiltered (gdb_stdlog,
1461 "SC: kill %s **<SIGSTOP>**\n",
1462 target_pid_to_str (lp->ptid));
1463 }
1464 errno = 0;
1465 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1466 if (debug_linux_nat)
1467 {
1468 fprintf_unfiltered (gdb_stdlog,
1469 "SC: lwp kill %d %s\n",
1470 ret,
1471 errno ? safe_strerror (errno) : "ERRNO-OK");
1472 }
1473
1474 lp->signalled = 1;
1475 gdb_assert (lp->status == 0);
1476 }
1477
1478 return 0;
1479 }
1480
1481 /* Wait until LP is stopped. If DATA is non-null it is interpreted as
1482 a pointer to a set of signals to be flushed immediately. */
1483
1484 static int
1485 stop_wait_callback (struct lwp_info *lp, void *data)
1486 {
1487 sigset_t *flush_mask = data;
1488
1489 if (!lp->stopped)
1490 {
1491 int status;
1492
1493 status = wait_lwp (lp);
1494 if (status == 0)
1495 return 0;
1496
1497 /* Ignore any signals in FLUSH_MASK. */
1498 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1499 {
1500 if (!lp->signalled)
1501 {
1502 lp->stopped = 1;
1503 return 0;
1504 }
1505
1506 errno = 0;
1507 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1508 if (debug_linux_nat)
1509 fprintf_unfiltered (gdb_stdlog,
1510 "PTRACE_CONT %s, 0, 0 (%s)\n",
1511 target_pid_to_str (lp->ptid),
1512 errno ? safe_strerror (errno) : "OK");
1513
1514 return stop_wait_callback (lp, flush_mask);
1515 }
1516
1517 if (WSTOPSIG (status) != SIGSTOP)
1518 {
1519 if (WSTOPSIG (status) == SIGTRAP)
1520 {
1521 /* If a LWP other than the LWP that we're reporting an
1522 event for has hit a GDB breakpoint (as opposed to
1523 some random trap signal), then just arrange for it to
1524 hit it again later. We don't keep the SIGTRAP status
1525 and don't forward the SIGTRAP signal to the LWP. We
1526 will handle the current event, eventually we will
1527 resume all LWPs, and this one will get its breakpoint
1528 trap again.
1529
1530 If we do not do this, then we run the risk that the
1531 user will delete or disable the breakpoint, but the
1532 thread will have already tripped on it. */
1533
1534 /* Save the trap's siginfo in case we need it later. */
1535 save_siginfo (lp);
1536
1537 /* Now resume this LWP and get the SIGSTOP event. */
1538 errno = 0;
1539 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1540 if (debug_linux_nat)
1541 {
1542 fprintf_unfiltered (gdb_stdlog,
1543 "PTRACE_CONT %s, 0, 0 (%s)\n",
1544 target_pid_to_str (lp->ptid),
1545 errno ? safe_strerror (errno) : "OK");
1546
1547 fprintf_unfiltered (gdb_stdlog,
1548 "SWC: Candidate SIGTRAP event in %s\n",
1549 target_pid_to_str (lp->ptid));
1550 }
1551 /* Hold the SIGTRAP for handling by linux_nat_wait. */
1552 stop_wait_callback (lp, data);
1553 /* If there's another event, throw it back into the queue. */
1554 if (lp->status)
1555 {
1556 if (debug_linux_nat)
1557 {
1558 fprintf_unfiltered (gdb_stdlog,
1559 "SWC: kill %s, %s\n",
1560 target_pid_to_str (lp->ptid),
1561 status_to_str ((int) status));
1562 }
1563 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1564 }
1565 /* Save the sigtrap event. */
1566 lp->status = status;
1567 return 0;
1568 }
1569 else
1570 {
1571 /* The thread was stopped with a signal other than
1572 SIGSTOP, and didn't accidentally trip a breakpoint. */
1573
1574 if (debug_linux_nat)
1575 {
1576 fprintf_unfiltered (gdb_stdlog,
1577 "SWC: Pending event %s in %s\n",
1578 status_to_str ((int) status),
1579 target_pid_to_str (lp->ptid));
1580 }
1581 /* Now resume this LWP and get the SIGSTOP event. */
1582 errno = 0;
1583 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1584 if (debug_linux_nat)
1585 fprintf_unfiltered (gdb_stdlog,
1586 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1587 target_pid_to_str (lp->ptid),
1588 errno ? safe_strerror (errno) : "OK");
1589
1590 /* Hold this event/waitstatus while we check to see if
1591 there are any more (we still want to get that SIGSTOP). */
1592 stop_wait_callback (lp, data);
1593 /* If the lp->status field is still empty, use it to hold
1594 this event. If not, then this event must be returned
1595 to the event queue of the LWP. */
1596 if (lp->status == 0)
1597 lp->status = status;
1598 else
1599 {
1600 if (debug_linux_nat)
1601 {
1602 fprintf_unfiltered (gdb_stdlog,
1603 "SWC: kill %s, %s\n",
1604 target_pid_to_str (lp->ptid),
1605 status_to_str ((int) status));
1606 }
1607 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1608 }
1609 return 0;
1610 }
1611 }
1612 else
1613 {
1614 /* We caught the SIGSTOP that we intended to catch, so
1615 there's no SIGSTOP pending. */
1616 lp->stopped = 1;
1617 lp->signalled = 0;
1618 }
1619 }
1620
1621 return 0;
1622 }
1623
1624 /* Check whether PID has any pending signals in FLUSH_MASK. If so set
1625 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1626
1627 static int
1628 linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1629 {
1630 sigset_t blocked, ignored;
1631 int i;
1632
1633 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1634
1635 if (!flush_mask)
1636 return 0;
1637
1638 for (i = 1; i < NSIG; i++)
1639 if (sigismember (pending, i))
1640 if (!sigismember (flush_mask, i)
1641 || sigismember (&blocked, i)
1642 || sigismember (&ignored, i))
1643 sigdelset (pending, i);
1644
1645 if (sigisemptyset (pending))
1646 return 0;
1647
1648 return 1;
1649 }
1650
1651 /* DATA is interpreted as a mask of signals to flush. If LP has
1652 signals pending, and they are all in the flush mask, then arrange
1653 to flush them. LP should be stopped, as should all other threads
1654 it might share a signal queue with. */
1655
1656 static int
1657 flush_callback (struct lwp_info *lp, void *data)
1658 {
1659 sigset_t *flush_mask = data;
1660 sigset_t pending, intersection, blocked, ignored;
1661 int pid, status;
1662
1663 /* Normally, when an LWP exits, it is removed from the LWP list. The
1664 last LWP isn't removed till later, however. So if there is only
1665 one LWP on the list, make sure it's alive. */
1666 if (lwp_list == lp && lp->next == NULL)
1667 if (!linux_nat_thread_alive (lp->ptid))
1668 return 0;
1669
1670 /* Just because the LWP is stopped doesn't mean that new signals
1671 can't arrive from outside, so this function must be careful of
1672 race conditions. However, because all threads are stopped, we
1673 can assume that the pending mask will not shrink unless we resume
1674 the LWP, and that it will then get another signal. We can't
1675 control which one, however. */
1676
1677 if (lp->status)
1678 {
1679 if (debug_linux_nat)
1680 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
1681 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1682 lp->status = 0;
1683 }
1684
1685 /* While there is a pending signal we would like to flush, continue
1686 the inferior and collect another signal. But if there's already
1687 a saved status that we don't want to flush, we can't resume the
1688 inferior - if it stopped for some other reason we wouldn't have
1689 anywhere to save the new status. In that case, we must leave the
1690 signal unflushed (and possibly generate an extra SIGINT stop).
1691 That's much less bad than losing a signal. */
1692 while (lp->status == 0
1693 && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
1694 {
1695 int ret;
1696
1697 errno = 0;
1698 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1699 if (debug_linux_nat)
1700 fprintf_unfiltered (gdb_stderr,
1701 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1702
1703 lp->stopped = 0;
1704 stop_wait_callback (lp, flush_mask);
1705 if (debug_linux_nat)
1706 fprintf_unfiltered (gdb_stderr,
1707 "FC: Wait finished; saved status is %d\n",
1708 lp->status);
1709 }
1710
1711 return 0;
1712 }
1713
1714 /* Return non-zero if LP has a wait status pending. */
1715
1716 static int
1717 status_callback (struct lwp_info *lp, void *data)
1718 {
1719 /* Only report a pending wait status if we pretend that this has
1720 indeed been resumed. */
1721 return (lp->status != 0 && lp->resumed);
1722 }
1723
1724 /* Return non-zero if LP isn't stopped. */
1725
1726 static int
1727 running_callback (struct lwp_info *lp, void *data)
1728 {
1729 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1730 }
1731
1732 /* Count the LWP's that have had events. */
1733
1734 static int
1735 count_events_callback (struct lwp_info *lp, void *data)
1736 {
1737 int *count = data;
1738
1739 gdb_assert (count != NULL);
1740
1741 /* Count only LWPs that have a SIGTRAP event pending. */
1742 if (lp->status != 0
1743 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1744 (*count)++;
1745
1746 return 0;
1747 }
1748
1749 /* Select the LWP (if any) that is currently being single-stepped. */
1750
1751 static int
1752 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1753 {
1754 if (lp->step && lp->status != 0)
1755 return 1;
1756 else
1757 return 0;
1758 }
1759
1760 /* Select the Nth LWP that has had a SIGTRAP event. */
1761
1762 static int
1763 select_event_lwp_callback (struct lwp_info *lp, void *data)
1764 {
1765 int *selector = data;
1766
1767 gdb_assert (selector != NULL);
1768
1769 /* Select only LWPs that have a SIGTRAP event pending. */
1770 if (lp->status != 0
1771 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1772 if ((*selector)-- == 0)
1773 return 1;
1774
1775 return 0;
1776 }
1777
1778 static int
1779 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
1780 {
1781 struct lwp_info *event_lp = data;
1782
1783 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1784 if (lp == event_lp)
1785 return 0;
1786
1787 /* If a LWP other than the LWP that we're reporting an event for has
1788 hit a GDB breakpoint (as opposed to some random trap signal),
1789 then just arrange for it to hit it again later. We don't keep
1790 the SIGTRAP status and don't forward the SIGTRAP signal to the
1791 LWP. We will handle the current event, eventually we will resume
1792 all LWPs, and this one will get its breakpoint trap again.
1793
1794 If we do not do this, then we run the risk that the user will
1795 delete or disable the breakpoint, but the LWP will have already
1796 tripped on it. */
1797
1798 if (lp->status != 0
1799 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
1800 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
1801 gdbarch_decr_pc_after_break
1802 (current_gdbarch)))
1803 {
1804 if (debug_linux_nat)
1805 fprintf_unfiltered (gdb_stdlog,
1806 "CBC: Push back breakpoint for %s\n",
1807 target_pid_to_str (lp->ptid));
1808
1809 /* Back up the PC if necessary. */
1810 if (gdbarch_decr_pc_after_break (current_gdbarch))
1811 write_pc_pid (read_pc_pid (lp->ptid) - gdbarch_decr_pc_after_break
1812 (current_gdbarch),
1813 lp->ptid);
1814
1815 /* Throw away the SIGTRAP. */
1816 lp->status = 0;
1817 }
1818
1819 return 0;
1820 }
1821
1822 /* Select one LWP out of those that have events pending. */
1823
1824 static void
1825 select_event_lwp (struct lwp_info **orig_lp, int *status)
1826 {
1827 int num_events = 0;
1828 int random_selector;
1829 struct lwp_info *event_lp;
1830
1831 /* Record the wait status for the original LWP. */
1832 (*orig_lp)->status = *status;
1833
1834 /* Give preference to any LWP that is being single-stepped. */
1835 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
1836 if (event_lp != NULL)
1837 {
1838 if (debug_linux_nat)
1839 fprintf_unfiltered (gdb_stdlog,
1840 "SEL: Select single-step %s\n",
1841 target_pid_to_str (event_lp->ptid));
1842 }
1843 else
1844 {
1845 /* No single-stepping LWP. Select one at random, out of those
1846 which have had SIGTRAP events. */
1847
1848 /* First see how many SIGTRAP events we have. */
1849 iterate_over_lwps (count_events_callback, &num_events);
1850
1851 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
1852 random_selector = (int)
1853 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
1854
1855 if (debug_linux_nat && num_events > 1)
1856 fprintf_unfiltered (gdb_stdlog,
1857 "SEL: Found %d SIGTRAP events, selecting #%d\n",
1858 num_events, random_selector);
1859
1860 event_lp = iterate_over_lwps (select_event_lwp_callback,
1861 &random_selector);
1862 }
1863
1864 if (event_lp != NULL)
1865 {
1866 /* Switch the event LWP. */
1867 *orig_lp = event_lp;
1868 *status = event_lp->status;
1869 }
1870
1871 /* Flush the wait status for the event LWP. */
1872 (*orig_lp)->status = 0;
1873 }
1874
1875 /* Return non-zero if LP has been resumed. */
1876
1877 static int
1878 resumed_callback (struct lwp_info *lp, void *data)
1879 {
1880 return lp->resumed;
1881 }
1882
1883 /* Stop an active thread, verify it still exists, then resume it. */
1884
1885 static int
1886 stop_and_resume_callback (struct lwp_info *lp, void *data)
1887 {
1888 struct lwp_info *ptr;
1889
1890 if (!lp->stopped && !lp->signalled)
1891 {
1892 stop_callback (lp, NULL);
1893 stop_wait_callback (lp, NULL);
1894 /* Resume if the lwp still exists. */
1895 for (ptr = lwp_list; ptr; ptr = ptr->next)
1896 if (lp == ptr)
1897 {
1898 resume_callback (lp, NULL);
1899 resume_set_callback (lp, NULL);
1900 }
1901 }
1902 return 0;
1903 }
1904
1905 static ptid_t
1906 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
1907 {
1908 struct lwp_info *lp = NULL;
1909 int options = 0;
1910 int status = 0;
1911 pid_t pid = PIDGET (ptid);
1912 sigset_t flush_mask;
1913
1914 /* The first time we get here after starting a new inferior, we may
1915 not have added it to the LWP list yet - this is the earliest
1916 moment at which we know its PID. */
1917 if (num_lwps == 0)
1918 {
1919 gdb_assert (!is_lwp (inferior_ptid));
1920
1921 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
1922 GET_PID (inferior_ptid));
1923 lp = add_lwp (inferior_ptid);
1924 lp->resumed = 1;
1925 }
1926
1927 sigemptyset (&flush_mask);
1928
1929 /* Make sure SIGCHLD is blocked. */
1930 if (!sigismember (&blocked_mask, SIGCHLD))
1931 {
1932 sigaddset (&blocked_mask, SIGCHLD);
1933 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
1934 }
1935
1936 retry:
1937
1938 /* Make sure there is at least one LWP that has been resumed. */
1939 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
1940
1941 /* First check if there is a LWP with a wait status pending. */
1942 if (pid == -1)
1943 {
1944 /* Any LWP that's been resumed will do. */
1945 lp = iterate_over_lwps (status_callback, NULL);
1946 if (lp)
1947 {
1948 status = lp->status;
1949 lp->status = 0;
1950
1951 if (debug_linux_nat && status)
1952 fprintf_unfiltered (gdb_stdlog,
1953 "LLW: Using pending wait status %s for %s.\n",
1954 status_to_str (status),
1955 target_pid_to_str (lp->ptid));
1956 }
1957
1958 /* But if we don't fine one, we'll have to wait, and check both
1959 cloned and uncloned processes. We start with the cloned
1960 processes. */
1961 options = __WCLONE | WNOHANG;
1962 }
1963 else if (is_lwp (ptid))
1964 {
1965 if (debug_linux_nat)
1966 fprintf_unfiltered (gdb_stdlog,
1967 "LLW: Waiting for specific LWP %s.\n",
1968 target_pid_to_str (ptid));
1969
1970 /* We have a specific LWP to check. */
1971 lp = find_lwp_pid (ptid);
1972 gdb_assert (lp);
1973 status = lp->status;
1974 lp->status = 0;
1975
1976 if (debug_linux_nat && status)
1977 fprintf_unfiltered (gdb_stdlog,
1978 "LLW: Using pending wait status %s for %s.\n",
1979 status_to_str (status),
1980 target_pid_to_str (lp->ptid));
1981
1982 /* If we have to wait, take into account whether PID is a cloned
1983 process or not. And we have to convert it to something that
1984 the layer beneath us can understand. */
1985 options = lp->cloned ? __WCLONE : 0;
1986 pid = GET_LWP (ptid);
1987 }
1988
1989 if (status && lp->signalled)
1990 {
1991 /* A pending SIGSTOP may interfere with the normal stream of
1992 events. In a typical case where interference is a problem,
1993 we have a SIGSTOP signal pending for LWP A while
1994 single-stepping it, encounter an event in LWP B, and take the
1995 pending SIGSTOP while trying to stop LWP A. After processing
1996 the event in LWP B, LWP A is continued, and we'll never see
1997 the SIGTRAP associated with the last time we were
1998 single-stepping LWP A. */
1999
2000 /* Resume the thread. It should halt immediately returning the
2001 pending SIGSTOP. */
2002 registers_changed ();
2003 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2004 lp->step, TARGET_SIGNAL_0);
2005 if (debug_linux_nat)
2006 fprintf_unfiltered (gdb_stdlog,
2007 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2008 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2009 target_pid_to_str (lp->ptid));
2010 lp->stopped = 0;
2011 gdb_assert (lp->resumed);
2012
2013 /* This should catch the pending SIGSTOP. */
2014 stop_wait_callback (lp, NULL);
2015 }
2016
2017 set_sigint_trap (); /* Causes SIGINT to be passed on to the
2018 attached process. */
2019 set_sigio_trap ();
2020
2021 while (status == 0)
2022 {
2023 pid_t lwpid;
2024
2025 lwpid = my_waitpid (pid, &status, options);
2026 if (lwpid > 0)
2027 {
2028 gdb_assert (pid == -1 || lwpid == pid);
2029
2030 if (debug_linux_nat)
2031 {
2032 fprintf_unfiltered (gdb_stdlog,
2033 "LLW: waitpid %ld received %s\n",
2034 (long) lwpid, status_to_str (status));
2035 }
2036
2037 lp = find_lwp_pid (pid_to_ptid (lwpid));
2038
2039 /* Check for stop events reported by a process we didn't
2040 already know about - anything not already in our LWP
2041 list.
2042
2043 If we're expecting to receive stopped processes after
2044 fork, vfork, and clone events, then we'll just add the
2045 new one to our list and go back to waiting for the event
2046 to be reported - the stopped process might be returned
2047 from waitpid before or after the event is. */
2048 if (WIFSTOPPED (status) && !lp)
2049 {
2050 linux_record_stopped_pid (lwpid, status);
2051 status = 0;
2052 continue;
2053 }
2054
2055 /* Make sure we don't report an event for the exit of an LWP not in
2056 our list, i.e. not part of the current process. This can happen
2057 if we detach from a program we original forked and then it
2058 exits. */
2059 if (!WIFSTOPPED (status) && !lp)
2060 {
2061 status = 0;
2062 continue;
2063 }
2064
2065 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2066 CLONE_PTRACE processes which do not use the thread library -
2067 otherwise we wouldn't find the new LWP this way. That doesn't
2068 currently work, and the following code is currently unreachable
2069 due to the two blocks above. If it's fixed some day, this code
2070 should be broken out into a function so that we can also pick up
2071 LWPs from the new interface. */
2072 if (!lp)
2073 {
2074 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2075 if (options & __WCLONE)
2076 lp->cloned = 1;
2077
2078 gdb_assert (WIFSTOPPED (status)
2079 && WSTOPSIG (status) == SIGSTOP);
2080 lp->signalled = 1;
2081
2082 if (!in_thread_list (inferior_ptid))
2083 {
2084 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2085 GET_PID (inferior_ptid));
2086 add_thread (inferior_ptid);
2087 }
2088
2089 add_thread (lp->ptid);
2090 }
2091
2092 /* Save the trap's siginfo in case we need it later. */
2093 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2094 save_siginfo (lp);
2095
2096 /* Handle GNU/Linux's extended waitstatus for trace events. */
2097 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2098 {
2099 if (debug_linux_nat)
2100 fprintf_unfiltered (gdb_stdlog,
2101 "LLW: Handling extended status 0x%06x\n",
2102 status);
2103 if (linux_handle_extended_wait (lp, status, 0))
2104 {
2105 status = 0;
2106 continue;
2107 }
2108 }
2109
2110 /* Check if the thread has exited. */
2111 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2112 {
2113 /* If this is the main thread, we must stop all threads and
2114 verify if they are still alive. This is because in the nptl
2115 thread model, there is no signal issued for exiting LWPs
2116 other than the main thread. We only get the main thread
2117 exit signal once all child threads have already exited.
2118 If we stop all the threads and use the stop_wait_callback
2119 to check if they have exited we can determine whether this
2120 signal should be ignored or whether it means the end of the
2121 debugged application, regardless of which threading model
2122 is being used. */
2123 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2124 {
2125 lp->stopped = 1;
2126 iterate_over_lwps (stop_and_resume_callback, NULL);
2127 }
2128
2129 if (debug_linux_nat)
2130 fprintf_unfiltered (gdb_stdlog,
2131 "LLW: %s exited.\n",
2132 target_pid_to_str (lp->ptid));
2133
2134 exit_lwp (lp);
2135
2136 /* If there is at least one more LWP, then the exit signal
2137 was not the end of the debugged application and should be
2138 ignored. */
2139 if (num_lwps > 0)
2140 {
2141 /* Make sure there is at least one thread running. */
2142 gdb_assert (iterate_over_lwps (running_callback, NULL));
2143
2144 /* Discard the event. */
2145 status = 0;
2146 continue;
2147 }
2148 }
2149
2150 /* Check if the current LWP has previously exited. In the nptl
2151 thread model, LWPs other than the main thread do not issue
2152 signals when they exit so we must check whenever the thread
2153 has stopped. A similar check is made in stop_wait_callback(). */
2154 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2155 {
2156 if (debug_linux_nat)
2157 fprintf_unfiltered (gdb_stdlog,
2158 "LLW: %s exited.\n",
2159 target_pid_to_str (lp->ptid));
2160
2161 exit_lwp (lp);
2162
2163 /* Make sure there is at least one thread running. */
2164 gdb_assert (iterate_over_lwps (running_callback, NULL));
2165
2166 /* Discard the event. */
2167 status = 0;
2168 continue;
2169 }
2170
2171 /* Make sure we don't report a SIGSTOP that we sent
2172 ourselves in an attempt to stop an LWP. */
2173 if (lp->signalled
2174 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2175 {
2176 if (debug_linux_nat)
2177 fprintf_unfiltered (gdb_stdlog,
2178 "LLW: Delayed SIGSTOP caught for %s.\n",
2179 target_pid_to_str (lp->ptid));
2180
2181 /* This is a delayed SIGSTOP. */
2182 lp->signalled = 0;
2183
2184 registers_changed ();
2185 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2186 lp->step, TARGET_SIGNAL_0);
2187 if (debug_linux_nat)
2188 fprintf_unfiltered (gdb_stdlog,
2189 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2190 lp->step ?
2191 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2192 target_pid_to_str (lp->ptid));
2193
2194 lp->stopped = 0;
2195 gdb_assert (lp->resumed);
2196
2197 /* Discard the event. */
2198 status = 0;
2199 continue;
2200 }
2201
2202 break;
2203 }
2204
2205 if (pid == -1)
2206 {
2207 /* Alternate between checking cloned and uncloned processes. */
2208 options ^= __WCLONE;
2209
2210 /* And suspend every time we have checked both. */
2211 if (options & __WCLONE)
2212 sigsuspend (&suspend_mask);
2213 }
2214
2215 /* We shouldn't end up here unless we want to try again. */
2216 gdb_assert (status == 0);
2217 }
2218
2219 clear_sigio_trap ();
2220 clear_sigint_trap ();
2221
2222 gdb_assert (lp);
2223
2224 /* Don't report signals that GDB isn't interested in, such as
2225 signals that are neither printed nor stopped upon. Stopping all
2226 threads can be a bit time-consuming so if we want decent
2227 performance with heavily multi-threaded programs, especially when
2228 they're using a high frequency timer, we'd better avoid it if we
2229 can. */
2230
2231 if (WIFSTOPPED (status))
2232 {
2233 int signo = target_signal_from_host (WSTOPSIG (status));
2234
2235 /* If we get a signal while single-stepping, we may need special
2236 care, e.g. to skip the signal handler. Defer to common code. */
2237 if (!lp->step
2238 && signal_stop_state (signo) == 0
2239 && signal_print_state (signo) == 0
2240 && signal_pass_state (signo) == 1)
2241 {
2242 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2243 here? It is not clear we should. GDB may not expect
2244 other threads to run. On the other hand, not resuming
2245 newly attached threads may cause an unwanted delay in
2246 getting them running. */
2247 registers_changed ();
2248 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2249 lp->step, signo);
2250 if (debug_linux_nat)
2251 fprintf_unfiltered (gdb_stdlog,
2252 "LLW: %s %s, %s (preempt 'handle')\n",
2253 lp->step ?
2254 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2255 target_pid_to_str (lp->ptid),
2256 signo ? strsignal (signo) : "0");
2257 lp->stopped = 0;
2258 status = 0;
2259 goto retry;
2260 }
2261
2262 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2263 {
2264 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2265 forwarded to the entire process group, that is, all LWP's
2266 will receive it. Since we only want to report it once,
2267 we try to flush it from all LWPs except this one. */
2268 sigaddset (&flush_mask, SIGINT);
2269 }
2270 }
2271
2272 /* This LWP is stopped now. */
2273 lp->stopped = 1;
2274
2275 if (debug_linux_nat)
2276 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2277 status_to_str (status), target_pid_to_str (lp->ptid));
2278
2279 /* Now stop all other LWP's ... */
2280 iterate_over_lwps (stop_callback, NULL);
2281
2282 /* ... and wait until all of them have reported back that they're no
2283 longer running. */
2284 iterate_over_lwps (stop_wait_callback, &flush_mask);
2285 iterate_over_lwps (flush_callback, &flush_mask);
2286
2287 /* If we're not waiting for a specific LWP, choose an event LWP from
2288 among those that have had events. Giving equal priority to all
2289 LWPs that have had events helps prevent starvation. */
2290 if (pid == -1)
2291 select_event_lwp (&lp, &status);
2292
2293 /* Now that we've selected our final event LWP, cancel any
2294 breakpoints in other LWPs that have hit a GDB breakpoint. See
2295 the comment in cancel_breakpoints_callback to find out why. */
2296 iterate_over_lwps (cancel_breakpoints_callback, lp);
2297
2298 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2299 {
2300 trap_ptid = lp->ptid;
2301 if (debug_linux_nat)
2302 fprintf_unfiltered (gdb_stdlog,
2303 "LLW: trap_ptid is %s.\n",
2304 target_pid_to_str (trap_ptid));
2305 }
2306 else
2307 trap_ptid = null_ptid;
2308
2309 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2310 {
2311 *ourstatus = lp->waitstatus;
2312 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2313 }
2314 else
2315 store_waitstatus (ourstatus, status);
2316
2317 return lp->ptid;
2318 }
2319
2320 static int
2321 kill_callback (struct lwp_info *lp, void *data)
2322 {
2323 errno = 0;
2324 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2325 if (debug_linux_nat)
2326 fprintf_unfiltered (gdb_stdlog,
2327 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2328 target_pid_to_str (lp->ptid),
2329 errno ? safe_strerror (errno) : "OK");
2330
2331 return 0;
2332 }
2333
2334 static int
2335 kill_wait_callback (struct lwp_info *lp, void *data)
2336 {
2337 pid_t pid;
2338
2339 /* We must make sure that there are no pending events (delayed
2340 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2341 program doesn't interfere with any following debugging session. */
2342
2343 /* For cloned processes we must check both with __WCLONE and
2344 without, since the exit status of a cloned process isn't reported
2345 with __WCLONE. */
2346 if (lp->cloned)
2347 {
2348 do
2349 {
2350 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
2351 if (pid != (pid_t) -1 && debug_linux_nat)
2352 {
2353 fprintf_unfiltered (gdb_stdlog,
2354 "KWC: wait %s received unknown.\n",
2355 target_pid_to_str (lp->ptid));
2356 }
2357 }
2358 while (pid == GET_LWP (lp->ptid));
2359
2360 gdb_assert (pid == -1 && errno == ECHILD);
2361 }
2362
2363 do
2364 {
2365 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
2366 if (pid != (pid_t) -1 && debug_linux_nat)
2367 {
2368 fprintf_unfiltered (gdb_stdlog,
2369 "KWC: wait %s received unk.\n",
2370 target_pid_to_str (lp->ptid));
2371 }
2372 }
2373 while (pid == GET_LWP (lp->ptid));
2374
2375 gdb_assert (pid == -1 && errno == ECHILD);
2376 return 0;
2377 }
2378
2379 static void
2380 linux_nat_kill (void)
2381 {
2382 struct target_waitstatus last;
2383 ptid_t last_ptid;
2384 int status;
2385
2386 /* If we're stopped while forking and we haven't followed yet,
2387 kill the other task. We need to do this first because the
2388 parent will be sleeping if this is a vfork. */
2389
2390 get_last_target_status (&last_ptid, &last);
2391
2392 if (last.kind == TARGET_WAITKIND_FORKED
2393 || last.kind == TARGET_WAITKIND_VFORKED)
2394 {
2395 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2396 wait (&status);
2397 }
2398
2399 if (forks_exist_p ())
2400 linux_fork_killall ();
2401 else
2402 {
2403 /* Kill all LWP's ... */
2404 iterate_over_lwps (kill_callback, NULL);
2405
2406 /* ... and wait until we've flushed all events. */
2407 iterate_over_lwps (kill_wait_callback, NULL);
2408 }
2409
2410 target_mourn_inferior ();
2411 }
2412
2413 static void
2414 linux_nat_mourn_inferior (void)
2415 {
2416 trap_ptid = null_ptid;
2417
2418 /* Destroy LWP info; it's no longer valid. */
2419 init_lwp_list ();
2420
2421 /* Restore the original signal mask. */
2422 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
2423 sigemptyset (&blocked_mask);
2424
2425 if (! forks_exist_p ())
2426 /* Normal case, no other forks available. */
2427 linux_ops->to_mourn_inferior ();
2428 else
2429 /* Multi-fork case. The current inferior_ptid has exited, but
2430 there are other viable forks to debug. Delete the exiting
2431 one and context-switch to the first available. */
2432 linux_fork_mourn_inferior ();
2433 }
2434
2435 static LONGEST
2436 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2437 const char *annex, gdb_byte *readbuf,
2438 const gdb_byte *writebuf,
2439 ULONGEST offset, LONGEST len)
2440 {
2441 struct cleanup *old_chain = save_inferior_ptid ();
2442 LONGEST xfer;
2443
2444 if (is_lwp (inferior_ptid))
2445 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2446
2447 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2448 offset, len);
2449
2450 do_cleanups (old_chain);
2451 return xfer;
2452 }
2453
2454 static int
2455 linux_nat_thread_alive (ptid_t ptid)
2456 {
2457 gdb_assert (is_lwp (ptid));
2458
2459 errno = 0;
2460 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2461 if (debug_linux_nat)
2462 fprintf_unfiltered (gdb_stdlog,
2463 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2464 target_pid_to_str (ptid),
2465 errno ? safe_strerror (errno) : "OK");
2466
2467 /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
2468 handle that case gracefully since ptrace will first do a lookup
2469 for the process based upon the passed-in pid. If that fails we
2470 will get either -ESRCH or -EPERM, otherwise the child exists and
2471 is alive. */
2472 if (errno == ESRCH || errno == EPERM)
2473 return 0;
2474
2475 return 1;
2476 }
2477
2478 static char *
2479 linux_nat_pid_to_str (ptid_t ptid)
2480 {
2481 static char buf[64];
2482
2483 if (lwp_list && lwp_list->next && is_lwp (ptid))
2484 {
2485 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2486 return buf;
2487 }
2488
2489 return normal_pid_to_str (ptid);
2490 }
2491
2492 static void
2493 sigchld_handler (int signo)
2494 {
2495 /* Do nothing. The only reason for this handler is that it allows
2496 us to use sigsuspend in linux_nat_wait above to wait for the
2497 arrival of a SIGCHLD. */
2498 }
2499
2500 /* Accepts an integer PID; Returns a string representing a file that
2501 can be opened to get the symbols for the child process. */
2502
2503 static char *
2504 linux_child_pid_to_exec_file (int pid)
2505 {
2506 char *name1, *name2;
2507
2508 name1 = xmalloc (MAXPATHLEN);
2509 name2 = xmalloc (MAXPATHLEN);
2510 make_cleanup (xfree, name1);
2511 make_cleanup (xfree, name2);
2512 memset (name2, 0, MAXPATHLEN);
2513
2514 sprintf (name1, "/proc/%d/exe", pid);
2515 if (readlink (name1, name2, MAXPATHLEN) > 0)
2516 return name2;
2517 else
2518 return name1;
2519 }
2520
2521 /* Service function for corefiles and info proc. */
2522
2523 static int
2524 read_mapping (FILE *mapfile,
2525 long long *addr,
2526 long long *endaddr,
2527 char *permissions,
2528 long long *offset,
2529 char *device, long long *inode, char *filename)
2530 {
2531 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2532 addr, endaddr, permissions, offset, device, inode);
2533
2534 filename[0] = '\0';
2535 if (ret > 0 && ret != EOF)
2536 {
2537 /* Eat everything up to EOL for the filename. This will prevent
2538 weird filenames (such as one with embedded whitespace) from
2539 confusing this code. It also makes this code more robust in
2540 respect to annotations the kernel may add after the filename.
2541
2542 Note the filename is used for informational purposes
2543 only. */
2544 ret += fscanf (mapfile, "%[^\n]\n", filename);
2545 }
2546
2547 return (ret != 0 && ret != EOF);
2548 }
2549
2550 /* Fills the "to_find_memory_regions" target vector. Lists the memory
2551 regions in the inferior for a corefile. */
2552
2553 static int
2554 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2555 unsigned long,
2556 int, int, int, void *), void *obfd)
2557 {
2558 long long pid = PIDGET (inferior_ptid);
2559 char mapsfilename[MAXPATHLEN];
2560 FILE *mapsfile;
2561 long long addr, endaddr, size, offset, inode;
2562 char permissions[8], device[8], filename[MAXPATHLEN];
2563 int read, write, exec;
2564 int ret;
2565
2566 /* Compose the filename for the /proc memory map, and open it. */
2567 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2568 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
2569 error (_("Could not open %s."), mapsfilename);
2570
2571 if (info_verbose)
2572 fprintf_filtered (gdb_stdout,
2573 "Reading memory regions from %s\n", mapsfilename);
2574
2575 /* Now iterate until end-of-file. */
2576 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2577 &offset, &device[0], &inode, &filename[0]))
2578 {
2579 size = endaddr - addr;
2580
2581 /* Get the segment's permissions. */
2582 read = (strchr (permissions, 'r') != 0);
2583 write = (strchr (permissions, 'w') != 0);
2584 exec = (strchr (permissions, 'x') != 0);
2585
2586 if (info_verbose)
2587 {
2588 fprintf_filtered (gdb_stdout,
2589 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2590 size, paddr_nz (addr),
2591 read ? 'r' : ' ',
2592 write ? 'w' : ' ', exec ? 'x' : ' ');
2593 if (filename[0])
2594 fprintf_filtered (gdb_stdout, " for %s", filename);
2595 fprintf_filtered (gdb_stdout, "\n");
2596 }
2597
2598 /* Invoke the callback function to create the corefile
2599 segment. */
2600 func (addr, size, read, write, exec, obfd);
2601 }
2602 fclose (mapsfile);
2603 return 0;
2604 }
2605
2606 /* Records the thread's register state for the corefile note
2607 section. */
2608
2609 static char *
2610 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2611 char *note_data, int *note_size)
2612 {
2613 gdb_gregset_t gregs;
2614 gdb_fpregset_t fpregs;
2615 #ifdef FILL_FPXREGSET
2616 gdb_fpxregset_t fpxregs;
2617 #endif
2618 unsigned long lwp = ptid_get_lwp (ptid);
2619 struct regcache *regcache = get_thread_regcache (ptid);
2620 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2621 const struct regset *regset;
2622 int core_regset_p;
2623 struct cleanup *old_chain;
2624
2625 old_chain = save_inferior_ptid ();
2626 inferior_ptid = ptid;
2627 target_fetch_registers (regcache, -1);
2628 do_cleanups (old_chain);
2629
2630 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
2631 if (core_regset_p
2632 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
2633 sizeof (gregs))) != NULL
2634 && regset->collect_regset != NULL)
2635 regset->collect_regset (regset, regcache, -1,
2636 &gregs, sizeof (gregs));
2637 else
2638 fill_gregset (regcache, &gregs, -1);
2639
2640 note_data = (char *) elfcore_write_prstatus (obfd,
2641 note_data,
2642 note_size,
2643 lwp,
2644 stop_signal, &gregs);
2645
2646 if (core_regset_p
2647 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
2648 sizeof (fpregs))) != NULL
2649 && regset->collect_regset != NULL)
2650 regset->collect_regset (regset, regcache, -1,
2651 &fpregs, sizeof (fpregs));
2652 else
2653 fill_fpregset (regcache, &fpregs, -1);
2654
2655 note_data = (char *) elfcore_write_prfpreg (obfd,
2656 note_data,
2657 note_size,
2658 &fpregs, sizeof (fpregs));
2659
2660 #ifdef FILL_FPXREGSET
2661 if (core_regset_p
2662 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp",
2663 sizeof (fpxregs))) != NULL
2664 && regset->collect_regset != NULL)
2665 regset->collect_regset (regset, regcache, -1,
2666 &fpxregs, sizeof (fpxregs));
2667 else
2668 fill_fpxregset (regcache, &fpxregs, -1);
2669
2670 note_data = (char *) elfcore_write_prxfpreg (obfd,
2671 note_data,
2672 note_size,
2673 &fpxregs, sizeof (fpxregs));
2674 #endif
2675 return note_data;
2676 }
2677
2678 struct linux_nat_corefile_thread_data
2679 {
2680 bfd *obfd;
2681 char *note_data;
2682 int *note_size;
2683 int num_notes;
2684 };
2685
2686 /* Called by gdbthread.c once per thread. Records the thread's
2687 register state for the corefile note section. */
2688
2689 static int
2690 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
2691 {
2692 struct linux_nat_corefile_thread_data *args = data;
2693
2694 args->note_data = linux_nat_do_thread_registers (args->obfd,
2695 ti->ptid,
2696 args->note_data,
2697 args->note_size);
2698 args->num_notes++;
2699
2700 return 0;
2701 }
2702
2703 /* Records the register state for the corefile note section. */
2704
2705 static char *
2706 linux_nat_do_registers (bfd *obfd, ptid_t ptid,
2707 char *note_data, int *note_size)
2708 {
2709 return linux_nat_do_thread_registers (obfd,
2710 ptid_build (ptid_get_pid (inferior_ptid),
2711 ptid_get_pid (inferior_ptid),
2712 0),
2713 note_data, note_size);
2714 }
2715
2716 /* Fills the "to_make_corefile_note" target vector. Builds the note
2717 section for a corefile, and returns it in a malloc buffer. */
2718
2719 static char *
2720 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
2721 {
2722 struct linux_nat_corefile_thread_data thread_args;
2723 struct cleanup *old_chain;
2724 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
2725 char fname[16] = { '\0' };
2726 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
2727 char psargs[80] = { '\0' };
2728 char *note_data = NULL;
2729 ptid_t current_ptid = inferior_ptid;
2730 gdb_byte *auxv;
2731 int auxv_len;
2732
2733 if (get_exec_file (0))
2734 {
2735 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
2736 strncpy (psargs, get_exec_file (0), sizeof (psargs));
2737 if (get_inferior_args ())
2738 {
2739 char *string_end;
2740 char *psargs_end = psargs + sizeof (psargs);
2741
2742 /* linux_elfcore_write_prpsinfo () handles zero unterminated
2743 strings fine. */
2744 string_end = memchr (psargs, 0, sizeof (psargs));
2745 if (string_end != NULL)
2746 {
2747 *string_end++ = ' ';
2748 strncpy (string_end, get_inferior_args (),
2749 psargs_end - string_end);
2750 }
2751 }
2752 note_data = (char *) elfcore_write_prpsinfo (obfd,
2753 note_data,
2754 note_size, fname, psargs);
2755 }
2756
2757 /* Dump information for threads. */
2758 thread_args.obfd = obfd;
2759 thread_args.note_data = note_data;
2760 thread_args.note_size = note_size;
2761 thread_args.num_notes = 0;
2762 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2763 if (thread_args.num_notes == 0)
2764 {
2765 /* iterate_over_threads didn't come up with any threads; just
2766 use inferior_ptid. */
2767 note_data = linux_nat_do_registers (obfd, inferior_ptid,
2768 note_data, note_size);
2769 }
2770 else
2771 {
2772 note_data = thread_args.note_data;
2773 }
2774
2775 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
2776 NULL, &auxv);
2777 if (auxv_len > 0)
2778 {
2779 note_data = elfcore_write_note (obfd, note_data, note_size,
2780 "CORE", NT_AUXV, auxv, auxv_len);
2781 xfree (auxv);
2782 }
2783
2784 make_cleanup (xfree, note_data);
2785 return note_data;
2786 }
2787
2788 /* Implement the "info proc" command. */
2789
2790 static void
2791 linux_nat_info_proc_cmd (char *args, int from_tty)
2792 {
2793 long long pid = PIDGET (inferior_ptid);
2794 FILE *procfile;
2795 char **argv = NULL;
2796 char buffer[MAXPATHLEN];
2797 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
2798 int cmdline_f = 1;
2799 int cwd_f = 1;
2800 int exe_f = 1;
2801 int mappings_f = 0;
2802 int environ_f = 0;
2803 int status_f = 0;
2804 int stat_f = 0;
2805 int all = 0;
2806 struct stat dummy;
2807
2808 if (args)
2809 {
2810 /* Break up 'args' into an argv array. */
2811 if ((argv = buildargv (args)) == NULL)
2812 nomem (0);
2813 else
2814 make_cleanup_freeargv (argv);
2815 }
2816 while (argv != NULL && *argv != NULL)
2817 {
2818 if (isdigit (argv[0][0]))
2819 {
2820 pid = strtoul (argv[0], NULL, 10);
2821 }
2822 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
2823 {
2824 mappings_f = 1;
2825 }
2826 else if (strcmp (argv[0], "status") == 0)
2827 {
2828 status_f = 1;
2829 }
2830 else if (strcmp (argv[0], "stat") == 0)
2831 {
2832 stat_f = 1;
2833 }
2834 else if (strcmp (argv[0], "cmd") == 0)
2835 {
2836 cmdline_f = 1;
2837 }
2838 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
2839 {
2840 exe_f = 1;
2841 }
2842 else if (strcmp (argv[0], "cwd") == 0)
2843 {
2844 cwd_f = 1;
2845 }
2846 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
2847 {
2848 all = 1;
2849 }
2850 else
2851 {
2852 /* [...] (future options here) */
2853 }
2854 argv++;
2855 }
2856 if (pid == 0)
2857 error (_("No current process: you must name one."));
2858
2859 sprintf (fname1, "/proc/%lld", pid);
2860 if (stat (fname1, &dummy) != 0)
2861 error (_("No /proc directory: '%s'"), fname1);
2862
2863 printf_filtered (_("process %lld\n"), pid);
2864 if (cmdline_f || all)
2865 {
2866 sprintf (fname1, "/proc/%lld/cmdline", pid);
2867 if ((procfile = fopen (fname1, "r")) != NULL)
2868 {
2869 fgets (buffer, sizeof (buffer), procfile);
2870 printf_filtered ("cmdline = '%s'\n", buffer);
2871 fclose (procfile);
2872 }
2873 else
2874 warning (_("unable to open /proc file '%s'"), fname1);
2875 }
2876 if (cwd_f || all)
2877 {
2878 sprintf (fname1, "/proc/%lld/cwd", pid);
2879 memset (fname2, 0, sizeof (fname2));
2880 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2881 printf_filtered ("cwd = '%s'\n", fname2);
2882 else
2883 warning (_("unable to read link '%s'"), fname1);
2884 }
2885 if (exe_f || all)
2886 {
2887 sprintf (fname1, "/proc/%lld/exe", pid);
2888 memset (fname2, 0, sizeof (fname2));
2889 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
2890 printf_filtered ("exe = '%s'\n", fname2);
2891 else
2892 warning (_("unable to read link '%s'"), fname1);
2893 }
2894 if (mappings_f || all)
2895 {
2896 sprintf (fname1, "/proc/%lld/maps", pid);
2897 if ((procfile = fopen (fname1, "r")) != NULL)
2898 {
2899 long long addr, endaddr, size, offset, inode;
2900 char permissions[8], device[8], filename[MAXPATHLEN];
2901
2902 printf_filtered (_("Mapped address spaces:\n\n"));
2903 if (gdbarch_addr_bit (current_gdbarch) == 32)
2904 {
2905 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
2906 "Start Addr",
2907 " End Addr",
2908 " Size", " Offset", "objfile");
2909 }
2910 else
2911 {
2912 printf_filtered (" %18s %18s %10s %10s %7s\n",
2913 "Start Addr",
2914 " End Addr",
2915 " Size", " Offset", "objfile");
2916 }
2917
2918 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
2919 &offset, &device[0], &inode, &filename[0]))
2920 {
2921 size = endaddr - addr;
2922
2923 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
2924 calls here (and possibly above) should be abstracted
2925 out into their own functions? Andrew suggests using
2926 a generic local_address_string instead to print out
2927 the addresses; that makes sense to me, too. */
2928
2929 if (gdbarch_addr_bit (current_gdbarch) == 32)
2930 {
2931 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
2932 (unsigned long) addr, /* FIXME: pr_addr */
2933 (unsigned long) endaddr,
2934 (int) size,
2935 (unsigned int) offset,
2936 filename[0] ? filename : "");
2937 }
2938 else
2939 {
2940 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
2941 (unsigned long) addr, /* FIXME: pr_addr */
2942 (unsigned long) endaddr,
2943 (int) size,
2944 (unsigned int) offset,
2945 filename[0] ? filename : "");
2946 }
2947 }
2948
2949 fclose (procfile);
2950 }
2951 else
2952 warning (_("unable to open /proc file '%s'"), fname1);
2953 }
2954 if (status_f || all)
2955 {
2956 sprintf (fname1, "/proc/%lld/status", pid);
2957 if ((procfile = fopen (fname1, "r")) != NULL)
2958 {
2959 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2960 puts_filtered (buffer);
2961 fclose (procfile);
2962 }
2963 else
2964 warning (_("unable to open /proc file '%s'"), fname1);
2965 }
2966 if (stat_f || all)
2967 {
2968 sprintf (fname1, "/proc/%lld/stat", pid);
2969 if ((procfile = fopen (fname1, "r")) != NULL)
2970 {
2971 int itmp;
2972 char ctmp;
2973 long ltmp;
2974
2975 if (fscanf (procfile, "%d ", &itmp) > 0)
2976 printf_filtered (_("Process: %d\n"), itmp);
2977 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
2978 printf_filtered (_("Exec file: %s\n"), buffer);
2979 if (fscanf (procfile, "%c ", &ctmp) > 0)
2980 printf_filtered (_("State: %c\n"), ctmp);
2981 if (fscanf (procfile, "%d ", &itmp) > 0)
2982 printf_filtered (_("Parent process: %d\n"), itmp);
2983 if (fscanf (procfile, "%d ", &itmp) > 0)
2984 printf_filtered (_("Process group: %d\n"), itmp);
2985 if (fscanf (procfile, "%d ", &itmp) > 0)
2986 printf_filtered (_("Session id: %d\n"), itmp);
2987 if (fscanf (procfile, "%d ", &itmp) > 0)
2988 printf_filtered (_("TTY: %d\n"), itmp);
2989 if (fscanf (procfile, "%d ", &itmp) > 0)
2990 printf_filtered (_("TTY owner process group: %d\n"), itmp);
2991 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2992 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
2993 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2994 printf_filtered (_("Minor faults (no memory page): %lu\n"),
2995 (unsigned long) ltmp);
2996 if (fscanf (procfile, "%lu ", &ltmp) > 0)
2997 printf_filtered (_("Minor faults, children: %lu\n"),
2998 (unsigned long) ltmp);
2999 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3000 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3001 (unsigned long) ltmp);
3002 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3003 printf_filtered (_("Major faults, children: %lu\n"),
3004 (unsigned long) ltmp);
3005 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3006 printf_filtered (_("utime: %ld\n"), ltmp);
3007 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3008 printf_filtered (_("stime: %ld\n"), ltmp);
3009 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3010 printf_filtered (_("utime, children: %ld\n"), ltmp);
3011 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3012 printf_filtered (_("stime, children: %ld\n"), ltmp);
3013 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3014 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3015 ltmp);
3016 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3017 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3018 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3019 printf_filtered (_("jiffies until next timeout: %lu\n"),
3020 (unsigned long) ltmp);
3021 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3022 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3023 (unsigned long) ltmp);
3024 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3025 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3026 ltmp);
3027 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3028 printf_filtered (_("Virtual memory size: %lu\n"),
3029 (unsigned long) ltmp);
3030 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3031 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3032 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3033 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3034 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3035 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3036 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3037 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3038 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3039 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3040 #if 0 /* Don't know how architecture-dependent the rest is...
3041 Anyway the signal bitmap info is available from "status". */
3042 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3043 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3044 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3045 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3046 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3047 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3048 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3049 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3050 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3051 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3052 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3053 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3054 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3055 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
3056 #endif
3057 fclose (procfile);
3058 }
3059 else
3060 warning (_("unable to open /proc file '%s'"), fname1);
3061 }
3062 }
3063
3064 /* Implement the to_xfer_partial interface for memory reads using the /proc
3065 filesystem. Because we can use a single read() call for /proc, this
3066 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3067 but it doesn't support writes. */
3068
3069 static LONGEST
3070 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3071 const char *annex, gdb_byte *readbuf,
3072 const gdb_byte *writebuf,
3073 ULONGEST offset, LONGEST len)
3074 {
3075 LONGEST ret;
3076 int fd;
3077 char filename[64];
3078
3079 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3080 return 0;
3081
3082 /* Don't bother for one word. */
3083 if (len < 3 * sizeof (long))
3084 return 0;
3085
3086 /* We could keep this file open and cache it - possibly one per
3087 thread. That requires some juggling, but is even faster. */
3088 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3089 fd = open (filename, O_RDONLY | O_LARGEFILE);
3090 if (fd == -1)
3091 return 0;
3092
3093 /* If pread64 is available, use it. It's faster if the kernel
3094 supports it (only one syscall), and it's 64-bit safe even on
3095 32-bit platforms (for instance, SPARC debugging a SPARC64
3096 application). */
3097 #ifdef HAVE_PREAD64
3098 if (pread64 (fd, readbuf, len, offset) != len)
3099 #else
3100 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3101 #endif
3102 ret = 0;
3103 else
3104 ret = len;
3105
3106 close (fd);
3107 return ret;
3108 }
3109
3110 /* Parse LINE as a signal set and add its set bits to SIGS. */
3111
3112 static void
3113 add_line_to_sigset (const char *line, sigset_t *sigs)
3114 {
3115 int len = strlen (line) - 1;
3116 const char *p;
3117 int signum;
3118
3119 if (line[len] != '\n')
3120 error (_("Could not parse signal set: %s"), line);
3121
3122 p = line;
3123 signum = len * 4;
3124 while (len-- > 0)
3125 {
3126 int digit;
3127
3128 if (*p >= '0' && *p <= '9')
3129 digit = *p - '0';
3130 else if (*p >= 'a' && *p <= 'f')
3131 digit = *p - 'a' + 10;
3132 else
3133 error (_("Could not parse signal set: %s"), line);
3134
3135 signum -= 4;
3136
3137 if (digit & 1)
3138 sigaddset (sigs, signum + 1);
3139 if (digit & 2)
3140 sigaddset (sigs, signum + 2);
3141 if (digit & 4)
3142 sigaddset (sigs, signum + 3);
3143 if (digit & 8)
3144 sigaddset (sigs, signum + 4);
3145
3146 p++;
3147 }
3148 }
3149
3150 /* Find process PID's pending signals from /proc/pid/status and set
3151 SIGS to match. */
3152
3153 void
3154 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3155 {
3156 FILE *procfile;
3157 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3158 int signum;
3159
3160 sigemptyset (pending);
3161 sigemptyset (blocked);
3162 sigemptyset (ignored);
3163 sprintf (fname, "/proc/%d/status", pid);
3164 procfile = fopen (fname, "r");
3165 if (procfile == NULL)
3166 error (_("Could not open %s"), fname);
3167
3168 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3169 {
3170 /* Normal queued signals are on the SigPnd line in the status
3171 file. However, 2.6 kernels also have a "shared" pending
3172 queue for delivering signals to a thread group, so check for
3173 a ShdPnd line also.
3174
3175 Unfortunately some Red Hat kernels include the shared pending
3176 queue but not the ShdPnd status field. */
3177
3178 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3179 add_line_to_sigset (buffer + 8, pending);
3180 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3181 add_line_to_sigset (buffer + 8, pending);
3182 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3183 add_line_to_sigset (buffer + 8, blocked);
3184 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3185 add_line_to_sigset (buffer + 8, ignored);
3186 }
3187
3188 fclose (procfile);
3189 }
3190
3191 static LONGEST
3192 linux_xfer_partial (struct target_ops *ops, enum target_object object,
3193 const char *annex, gdb_byte *readbuf,
3194 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3195 {
3196 LONGEST xfer;
3197
3198 if (object == TARGET_OBJECT_AUXV)
3199 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3200 offset, len);
3201
3202 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3203 offset, len);
3204 if (xfer != 0)
3205 return xfer;
3206
3207 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3208 offset, len);
3209 }
3210
3211 /* Create a prototype generic GNU/Linux target. The client can override
3212 it with local methods. */
3213
3214 static void
3215 linux_target_install_ops (struct target_ops *t)
3216 {
3217 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3218 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
3219 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
3220 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
3221 t->to_post_startup_inferior = linux_child_post_startup_inferior;
3222 t->to_post_attach = linux_child_post_attach;
3223 t->to_follow_fork = linux_child_follow_fork;
3224 t->to_find_memory_regions = linux_nat_find_memory_regions;
3225 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3226
3227 super_xfer_partial = t->to_xfer_partial;
3228 t->to_xfer_partial = linux_xfer_partial;
3229 }
3230
3231 struct target_ops *
3232 linux_target (void)
3233 {
3234 struct target_ops *t;
3235
3236 t = inf_ptrace_target ();
3237 linux_target_install_ops (t);
3238
3239 return t;
3240 }
3241
3242 struct target_ops *
3243 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
3244 {
3245 struct target_ops *t;
3246
3247 t = inf_ptrace_trad_target (register_u_offset);
3248 linux_target_install_ops (t);
3249
3250 return t;
3251 }
3252
3253 void
3254 linux_nat_add_target (struct target_ops *t)
3255 {
3256 /* Save the provided single-threaded target. We save this in a separate
3257 variable because another target we've inherited from (e.g. inf-ptrace)
3258 may have saved a pointer to T; we want to use it for the final
3259 process stratum target. */
3260 linux_ops_saved = *t;
3261 linux_ops = &linux_ops_saved;
3262
3263 /* Override some methods for multithreading. */
3264 t->to_attach = linux_nat_attach;
3265 t->to_detach = linux_nat_detach;
3266 t->to_resume = linux_nat_resume;
3267 t->to_wait = linux_nat_wait;
3268 t->to_xfer_partial = linux_nat_xfer_partial;
3269 t->to_kill = linux_nat_kill;
3270 t->to_mourn_inferior = linux_nat_mourn_inferior;
3271 t->to_thread_alive = linux_nat_thread_alive;
3272 t->to_pid_to_str = linux_nat_pid_to_str;
3273 t->to_has_thread_control = tc_schedlock;
3274
3275 /* We don't change the stratum; this target will sit at
3276 process_stratum and thread_db will set at thread_stratum. This
3277 is a little strange, since this is a multi-threaded-capable
3278 target, but we want to be on the stack below thread_db, and we
3279 also want to be used for single-threaded processes. */
3280
3281 add_target (t);
3282
3283 /* TODO: Eliminate this and have libthread_db use
3284 find_target_beneath. */
3285 thread_db_init (t);
3286 }
3287
3288 /* Register a method to call whenever a new thread is attached. */
3289 void
3290 linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
3291 {
3292 /* Save the pointer. We only support a single registered instance
3293 of the GNU/Linux native target, so we do not need to map this to
3294 T. */
3295 linux_nat_new_thread = new_thread;
3296 }
3297
3298 /* Return the saved siginfo associated with PTID. */
3299 struct siginfo *
3300 linux_nat_get_siginfo (ptid_t ptid)
3301 {
3302 struct lwp_info *lp = find_lwp_pid (ptid);
3303
3304 gdb_assert (lp != NULL);
3305
3306 return &lp->siginfo;
3307 }
3308
3309 void
3310 _initialize_linux_nat (void)
3311 {
3312 struct sigaction action;
3313
3314 add_info ("proc", linux_nat_info_proc_cmd, _("\
3315 Show /proc process information about any running process.\n\
3316 Specify any process id, or use the program being debugged by default.\n\
3317 Specify any of the following keywords for detailed info:\n\
3318 mappings -- list of mapped memory regions.\n\
3319 stat -- list a bunch of random process info.\n\
3320 status -- list a different bunch of random process info.\n\
3321 all -- list all available /proc info."));
3322
3323 /* Save the original signal mask. */
3324 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
3325
3326 action.sa_handler = sigchld_handler;
3327 sigemptyset (&action.sa_mask);
3328 action.sa_flags = SA_RESTART;
3329 sigaction (SIGCHLD, &action, NULL);
3330
3331 /* Make sure we don't block SIGCHLD during a sigsuspend. */
3332 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
3333 sigdelset (&suspend_mask, SIGCHLD);
3334
3335 sigemptyset (&blocked_mask);
3336
3337 add_setshow_zinteger_cmd ("lin-lwp", no_class, &debug_linux_nat, _("\
3338 Set debugging of GNU/Linux lwp module."), _("\
3339 Show debugging of GNU/Linux lwp module."), _("\
3340 Enables printf debugging output."),
3341 NULL,
3342 show_debug_linux_nat,
3343 &setdebuglist, &showdebuglist);
3344 }
3345 \f
3346
3347 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
3348 the GNU/Linux Threads library and therefore doesn't really belong
3349 here. */
3350
3351 /* Read variable NAME in the target and return its value if found.
3352 Otherwise return zero. It is assumed that the type of the variable
3353 is `int'. */
3354
3355 static int
3356 get_signo (const char *name)
3357 {
3358 struct minimal_symbol *ms;
3359 int signo;
3360
3361 ms = lookup_minimal_symbol (name, NULL, NULL);
3362 if (ms == NULL)
3363 return 0;
3364
3365 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
3366 sizeof (signo)) != 0)
3367 return 0;
3368
3369 return signo;
3370 }
3371
3372 /* Return the set of signals used by the threads library in *SET. */
3373
3374 void
3375 lin_thread_get_thread_signals (sigset_t *set)
3376 {
3377 struct sigaction action;
3378 int restart, cancel;
3379
3380 sigemptyset (set);
3381
3382 restart = get_signo ("__pthread_sig_restart");
3383 cancel = get_signo ("__pthread_sig_cancel");
3384
3385 /* LinuxThreads normally uses the first two RT signals, but in some legacy
3386 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
3387 not provide any way for the debugger to query the signal numbers -
3388 fortunately they don't change! */
3389
3390 if (restart == 0)
3391 restart = __SIGRTMIN;
3392
3393 if (cancel == 0)
3394 cancel = __SIGRTMIN + 1;
3395
3396 sigaddset (set, restart);
3397 sigaddset (set, cancel);
3398
3399 /* The GNU/Linux Threads library makes terminating threads send a
3400 special "cancel" signal instead of SIGCHLD. Make sure we catch
3401 those (to prevent them from terminating GDB itself, which is
3402 likely to be their default action) and treat them the same way as
3403 SIGCHLD. */
3404
3405 action.sa_handler = sigchld_handler;
3406 sigemptyset (&action.sa_mask);
3407 action.sa_flags = SA_RESTART;
3408 sigaction (cancel, &action, NULL);
3409
3410 /* We block the "cancel" signal throughout this code ... */
3411 sigaddset (&blocked_mask, cancel);
3412 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
3413
3414 /* ... except during a sigsuspend. */
3415 sigdelset (&suspend_mask, cancel);
3416 }
3417
This page took 0.101041 seconds and 4 git commands to generate.