2011-12-05 Pedro Alves <pedro@codesourcery.com>
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
4 2011 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "inferior.h"
23 #include "target.h"
24 #include "gdb_string.h"
25 #include "gdb_wait.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-ptrace.h"
34 #include "linux-procfs.h"
35 #include "linux-fork.h"
36 #include "gdbthread.h"
37 #include "gdbcmd.h"
38 #include "regcache.h"
39 #include "regset.h"
40 #include "inf-ptrace.h"
41 #include "auxv.h"
42 #include <sys/param.h> /* for MAXPATHLEN */
43 #include <sys/procfs.h> /* for elf_gregset etc. */
44 #include "elf-bfd.h" /* for elfcore_write_* */
45 #include "gregset.h" /* for gregset */
46 #include "gdbcore.h" /* for get_exec_file */
47 #include <ctype.h> /* for isdigit */
48 #include "gdbthread.h" /* for struct thread_info etc. */
49 #include "gdb_stat.h" /* for struct stat */
50 #include <fcntl.h> /* for O_RDONLY */
51 #include "inf-loop.h"
52 #include "event-loop.h"
53 #include "event-top.h"
54 #include <pwd.h>
55 #include <sys/types.h>
56 #include "gdb_dirent.h"
57 #include "xml-support.h"
58 #include "terminal.h"
59 #include <sys/vfs.h>
60 #include "solib.h"
61 #include "linux-osdata.h"
62 #include "cli/cli-utils.h"
63
64 #ifndef SPUFS_MAGIC
65 #define SPUFS_MAGIC 0x23c9b64e
66 #endif
67
68 #ifdef HAVE_PERSONALITY
69 # include <sys/personality.h>
70 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
71 # define ADDR_NO_RANDOMIZE 0x0040000
72 # endif
73 #endif /* HAVE_PERSONALITY */
74
75 /* This comment documents high-level logic of this file.
76
77 Waiting for events in sync mode
78 ===============================
79
80 When waiting for an event in a specific thread, we just use waitpid, passing
81 the specific pid, and not passing WNOHANG.
82
83 When waiting for an event in all threads, waitpid is not quite good. Prior to
84 version 2.4, Linux can either wait for event in main thread, or in secondary
85 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
86 miss an event. The solution is to use non-blocking waitpid, together with
87 sigsuspend. First, we use non-blocking waitpid to get an event in the main
88 process, if any. Second, we use non-blocking waitpid with the __WCLONED
89 flag to check for events in cloned processes. If nothing is found, we use
90 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
91 happened to a child process -- and SIGCHLD will be delivered both for events
92 in main debugged process and in cloned processes. As soon as we know there's
93 an event, we get back to calling nonblocking waitpid with and without
94 __WCLONED.
95
96 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
97 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
98 blocked, the signal becomes pending and sigsuspend immediately
99 notices it and returns.
100
101 Waiting for events in async mode
102 ================================
103
104 In async mode, GDB should always be ready to handle both user input
105 and target events, so neither blocking waitpid nor sigsuspend are
106 viable options. Instead, we should asynchronously notify the GDB main
107 event loop whenever there's an unprocessed event from the target. We
108 detect asynchronous target events by handling SIGCHLD signals. To
109 notify the event loop about target events, the self-pipe trick is used
110 --- a pipe is registered as waitable event source in the event loop,
111 the event loop select/poll's on the read end of this pipe (as well on
112 other event sources, e.g., stdin), and the SIGCHLD handler writes a
113 byte to this pipe. This is more portable than relying on
114 pselect/ppoll, since on kernels that lack those syscalls, libc
115 emulates them with select/poll+sigprocmask, and that is racy
116 (a.k.a. plain broken).
117
118 Obviously, if we fail to notify the event loop if there's a target
119 event, it's bad. OTOH, if we notify the event loop when there's no
120 event from the target, linux_nat_wait will detect that there's no real
121 event to report, and return event of type TARGET_WAITKIND_IGNORE.
122 This is mostly harmless, but it will waste time and is better avoided.
123
124 The main design point is that every time GDB is outside linux-nat.c,
125 we have a SIGCHLD handler installed that is called when something
126 happens to the target and notifies the GDB event loop. Whenever GDB
127 core decides to handle the event, and calls into linux-nat.c, we
128 process things as in sync mode, except that the we never block in
129 sigsuspend.
130
131 While processing an event, we may end up momentarily blocked in
132 waitpid calls. Those waitpid calls, while blocking, are guarantied to
133 return quickly. E.g., in all-stop mode, before reporting to the core
134 that an LWP hit a breakpoint, all LWPs are stopped by sending them
135 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
136 Note that this is different from blocking indefinitely waiting for the
137 next event --- here, we're already handling an event.
138
139 Use of signals
140 ==============
141
142 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
143 signal is not entirely significant; we just need for a signal to be delivered,
144 so that we can intercept it. SIGSTOP's advantage is that it can not be
145 blocked. A disadvantage is that it is not a real-time signal, so it can only
146 be queued once; we do not keep track of other sources of SIGSTOP.
147
148 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
149 use them, because they have special behavior when the signal is generated -
150 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
151 kills the entire thread group.
152
153 A delivered SIGSTOP would stop the entire thread group, not just the thread we
154 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
155 cancel it (by PTRACE_CONT without passing SIGSTOP).
156
157 We could use a real-time signal instead. This would solve those problems; we
158 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
159 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
160 generates it, and there are races with trying to find a signal that is not
161 blocked. */
162
163 #ifndef O_LARGEFILE
164 #define O_LARGEFILE 0
165 #endif
166
167 /* Unlike other extended result codes, WSTOPSIG (status) on
168 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
169 instead SIGTRAP with bit 7 set. */
170 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
171
172 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
173 the use of the multi-threaded target. */
174 static struct target_ops *linux_ops;
175 static struct target_ops linux_ops_saved;
176
177 /* The method to call, if any, when a new thread is attached. */
178 static void (*linux_nat_new_thread) (ptid_t);
179
180 /* The method to call, if any, when the siginfo object needs to be
181 converted between the layout returned by ptrace, and the layout in
182 the architecture of the inferior. */
183 static int (*linux_nat_siginfo_fixup) (struct siginfo *,
184 gdb_byte *,
185 int);
186
187 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
188 Called by our to_xfer_partial. */
189 static LONGEST (*super_xfer_partial) (struct target_ops *,
190 enum target_object,
191 const char *, gdb_byte *,
192 const gdb_byte *,
193 ULONGEST, LONGEST);
194
195 static int debug_linux_nat;
196 static void
197 show_debug_linux_nat (struct ui_file *file, int from_tty,
198 struct cmd_list_element *c, const char *value)
199 {
200 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
201 value);
202 }
203
204 struct simple_pid_list
205 {
206 int pid;
207 int status;
208 struct simple_pid_list *next;
209 };
210 struct simple_pid_list *stopped_pids;
211
212 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
213 can not be used, 1 if it can. */
214
215 static int linux_supports_tracefork_flag = -1;
216
217 /* This variable is a tri-state flag: -1 for unknown, 0 if
218 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
219
220 static int linux_supports_tracesysgood_flag = -1;
221
222 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
223 PTRACE_O_TRACEVFORKDONE. */
224
225 static int linux_supports_tracevforkdone_flag = -1;
226
227 /* Stores the current used ptrace() options. */
228 static int current_ptrace_options = 0;
229
230 /* Async mode support. */
231
232 /* The read/write ends of the pipe registered as waitable file in the
233 event loop. */
234 static int linux_nat_event_pipe[2] = { -1, -1 };
235
236 /* Flush the event pipe. */
237
238 static void
239 async_file_flush (void)
240 {
241 int ret;
242 char buf;
243
244 do
245 {
246 ret = read (linux_nat_event_pipe[0], &buf, 1);
247 }
248 while (ret >= 0 || (ret == -1 && errno == EINTR));
249 }
250
251 /* Put something (anything, doesn't matter what, or how much) in event
252 pipe, so that the select/poll in the event-loop realizes we have
253 something to process. */
254
255 static void
256 async_file_mark (void)
257 {
258 int ret;
259
260 /* It doesn't really matter what the pipe contains, as long we end
261 up with something in it. Might as well flush the previous
262 left-overs. */
263 async_file_flush ();
264
265 do
266 {
267 ret = write (linux_nat_event_pipe[1], "+", 1);
268 }
269 while (ret == -1 && errno == EINTR);
270
271 /* Ignore EAGAIN. If the pipe is full, the event loop will already
272 be awakened anyway. */
273 }
274
275 static void linux_nat_async (void (*callback)
276 (enum inferior_event_type event_type,
277 void *context),
278 void *context);
279 static int kill_lwp (int lwpid, int signo);
280
281 static int stop_callback (struct lwp_info *lp, void *data);
282
283 static void block_child_signals (sigset_t *prev_mask);
284 static void restore_child_signals_mask (sigset_t *prev_mask);
285
286 struct lwp_info;
287 static struct lwp_info *add_lwp (ptid_t ptid);
288 static void purge_lwp_list (int pid);
289 static struct lwp_info *find_lwp_pid (ptid_t ptid);
290
291 \f
292 /* Trivial list manipulation functions to keep track of a list of
293 new stopped processes. */
294 static void
295 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
296 {
297 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
298
299 new_pid->pid = pid;
300 new_pid->status = status;
301 new_pid->next = *listp;
302 *listp = new_pid;
303 }
304
305 static int
306 in_pid_list_p (struct simple_pid_list *list, int pid)
307 {
308 struct simple_pid_list *p;
309
310 for (p = list; p != NULL; p = p->next)
311 if (p->pid == pid)
312 return 1;
313 return 0;
314 }
315
316 static int
317 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
318 {
319 struct simple_pid_list **p;
320
321 for (p = listp; *p != NULL; p = &(*p)->next)
322 if ((*p)->pid == pid)
323 {
324 struct simple_pid_list *next = (*p)->next;
325
326 *statusp = (*p)->status;
327 xfree (*p);
328 *p = next;
329 return 1;
330 }
331 return 0;
332 }
333
334 \f
335 /* A helper function for linux_test_for_tracefork, called after fork (). */
336
337 static void
338 linux_tracefork_child (void)
339 {
340 ptrace (PTRACE_TRACEME, 0, 0, 0);
341 kill (getpid (), SIGSTOP);
342 fork ();
343 _exit (0);
344 }
345
346 /* Wrapper function for waitpid which handles EINTR. */
347
348 static int
349 my_waitpid (int pid, int *statusp, int flags)
350 {
351 int ret;
352
353 do
354 {
355 ret = waitpid (pid, statusp, flags);
356 }
357 while (ret == -1 && errno == EINTR);
358
359 return ret;
360 }
361
362 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
363
364 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
365 we know that the feature is not available. This may change the tracing
366 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
367
368 However, if it succeeds, we don't know for sure that the feature is
369 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
370 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
371 fork tracing, and let it fork. If the process exits, we assume that we
372 can't use TRACEFORK; if we get the fork notification, and we can extract
373 the new child's PID, then we assume that we can. */
374
375 static void
376 linux_test_for_tracefork (int original_pid)
377 {
378 int child_pid, ret, status;
379 long second_pid;
380 sigset_t prev_mask;
381
382 /* We don't want those ptrace calls to be interrupted. */
383 block_child_signals (&prev_mask);
384
385 linux_supports_tracefork_flag = 0;
386 linux_supports_tracevforkdone_flag = 0;
387
388 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
389 if (ret != 0)
390 {
391 restore_child_signals_mask (&prev_mask);
392 return;
393 }
394
395 child_pid = fork ();
396 if (child_pid == -1)
397 perror_with_name (("fork"));
398
399 if (child_pid == 0)
400 linux_tracefork_child ();
401
402 ret = my_waitpid (child_pid, &status, 0);
403 if (ret == -1)
404 perror_with_name (("waitpid"));
405 else if (ret != child_pid)
406 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
407 if (! WIFSTOPPED (status))
408 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
409 status);
410
411 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
412 if (ret != 0)
413 {
414 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
415 if (ret != 0)
416 {
417 warning (_("linux_test_for_tracefork: failed to kill child"));
418 restore_child_signals_mask (&prev_mask);
419 return;
420 }
421
422 ret = my_waitpid (child_pid, &status, 0);
423 if (ret != child_pid)
424 warning (_("linux_test_for_tracefork: failed "
425 "to wait for killed child"));
426 else if (!WIFSIGNALED (status))
427 warning (_("linux_test_for_tracefork: unexpected "
428 "wait status 0x%x from killed child"), status);
429
430 restore_child_signals_mask (&prev_mask);
431 return;
432 }
433
434 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
435 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
436 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
437 linux_supports_tracevforkdone_flag = (ret == 0);
438
439 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
440 if (ret != 0)
441 warning (_("linux_test_for_tracefork: failed to resume child"));
442
443 ret = my_waitpid (child_pid, &status, 0);
444
445 if (ret == child_pid && WIFSTOPPED (status)
446 && status >> 16 == PTRACE_EVENT_FORK)
447 {
448 second_pid = 0;
449 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
450 if (ret == 0 && second_pid != 0)
451 {
452 int second_status;
453
454 linux_supports_tracefork_flag = 1;
455 my_waitpid (second_pid, &second_status, 0);
456 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
457 if (ret != 0)
458 warning (_("linux_test_for_tracefork: "
459 "failed to kill second child"));
460 my_waitpid (second_pid, &status, 0);
461 }
462 }
463 else
464 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
465 "(%d, status 0x%x)"), ret, status);
466
467 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
468 if (ret != 0)
469 warning (_("linux_test_for_tracefork: failed to kill child"));
470 my_waitpid (child_pid, &status, 0);
471
472 restore_child_signals_mask (&prev_mask);
473 }
474
475 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
476
477 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
478 we know that the feature is not available. This may change the tracing
479 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
480
481 static void
482 linux_test_for_tracesysgood (int original_pid)
483 {
484 int ret;
485 sigset_t prev_mask;
486
487 /* We don't want those ptrace calls to be interrupted. */
488 block_child_signals (&prev_mask);
489
490 linux_supports_tracesysgood_flag = 0;
491
492 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
493 if (ret != 0)
494 goto out;
495
496 linux_supports_tracesysgood_flag = 1;
497 out:
498 restore_child_signals_mask (&prev_mask);
499 }
500
501 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
502 This function also sets linux_supports_tracesysgood_flag. */
503
504 static int
505 linux_supports_tracesysgood (int pid)
506 {
507 if (linux_supports_tracesysgood_flag == -1)
508 linux_test_for_tracesysgood (pid);
509 return linux_supports_tracesysgood_flag;
510 }
511
512 /* Return non-zero iff we have tracefork functionality available.
513 This function also sets linux_supports_tracefork_flag. */
514
515 static int
516 linux_supports_tracefork (int pid)
517 {
518 if (linux_supports_tracefork_flag == -1)
519 linux_test_for_tracefork (pid);
520 return linux_supports_tracefork_flag;
521 }
522
523 static int
524 linux_supports_tracevforkdone (int pid)
525 {
526 if (linux_supports_tracefork_flag == -1)
527 linux_test_for_tracefork (pid);
528 return linux_supports_tracevforkdone_flag;
529 }
530
531 static void
532 linux_enable_tracesysgood (ptid_t ptid)
533 {
534 int pid = ptid_get_lwp (ptid);
535
536 if (pid == 0)
537 pid = ptid_get_pid (ptid);
538
539 if (linux_supports_tracesysgood (pid) == 0)
540 return;
541
542 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
543
544 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
545 }
546
547 \f
548 void
549 linux_enable_event_reporting (ptid_t ptid)
550 {
551 int pid = ptid_get_lwp (ptid);
552
553 if (pid == 0)
554 pid = ptid_get_pid (ptid);
555
556 if (! linux_supports_tracefork (pid))
557 return;
558
559 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
560 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
561
562 if (linux_supports_tracevforkdone (pid))
563 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
564
565 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
566 read-only process state. */
567
568 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
569 }
570
571 static void
572 linux_child_post_attach (int pid)
573 {
574 linux_enable_event_reporting (pid_to_ptid (pid));
575 linux_enable_tracesysgood (pid_to_ptid (pid));
576 }
577
578 static void
579 linux_child_post_startup_inferior (ptid_t ptid)
580 {
581 linux_enable_event_reporting (ptid);
582 linux_enable_tracesysgood (ptid);
583 }
584
585 static int
586 linux_child_follow_fork (struct target_ops *ops, int follow_child)
587 {
588 sigset_t prev_mask;
589 int has_vforked;
590 int parent_pid, child_pid;
591
592 block_child_signals (&prev_mask);
593
594 has_vforked = (inferior_thread ()->pending_follow.kind
595 == TARGET_WAITKIND_VFORKED);
596 parent_pid = ptid_get_lwp (inferior_ptid);
597 if (parent_pid == 0)
598 parent_pid = ptid_get_pid (inferior_ptid);
599 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
600
601 if (!detach_fork)
602 linux_enable_event_reporting (pid_to_ptid (child_pid));
603
604 if (has_vforked
605 && !non_stop /* Non-stop always resumes both branches. */
606 && (!target_is_async_p () || sync_execution)
607 && !(follow_child || detach_fork || sched_multi))
608 {
609 /* The parent stays blocked inside the vfork syscall until the
610 child execs or exits. If we don't let the child run, then
611 the parent stays blocked. If we're telling the parent to run
612 in the foreground, the user will not be able to ctrl-c to get
613 back the terminal, effectively hanging the debug session. */
614 fprintf_filtered (gdb_stderr, _("\
615 Can not resume the parent process over vfork in the foreground while\n\
616 holding the child stopped. Try \"set detach-on-fork\" or \
617 \"set schedule-multiple\".\n"));
618 /* FIXME output string > 80 columns. */
619 return 1;
620 }
621
622 if (! follow_child)
623 {
624 struct lwp_info *child_lp = NULL;
625
626 /* We're already attached to the parent, by default. */
627
628 /* Detach new forked process? */
629 if (detach_fork)
630 {
631 /* Before detaching from the child, remove all breakpoints
632 from it. If we forked, then this has already been taken
633 care of by infrun.c. If we vforked however, any
634 breakpoint inserted in the parent is visible in the
635 child, even those added while stopped in a vfork
636 catchpoint. This will remove the breakpoints from the
637 parent also, but they'll be reinserted below. */
638 if (has_vforked)
639 {
640 /* keep breakpoints list in sync. */
641 remove_breakpoints_pid (GET_PID (inferior_ptid));
642 }
643
644 if (info_verbose || debug_linux_nat)
645 {
646 target_terminal_ours ();
647 fprintf_filtered (gdb_stdlog,
648 "Detaching after fork from "
649 "child process %d.\n",
650 child_pid);
651 }
652
653 ptrace (PTRACE_DETACH, child_pid, 0, 0);
654 }
655 else
656 {
657 struct inferior *parent_inf, *child_inf;
658 struct cleanup *old_chain;
659
660 /* Add process to GDB's tables. */
661 child_inf = add_inferior (child_pid);
662
663 parent_inf = current_inferior ();
664 child_inf->attach_flag = parent_inf->attach_flag;
665 copy_terminal_info (child_inf, parent_inf);
666
667 old_chain = save_inferior_ptid ();
668 save_current_program_space ();
669
670 inferior_ptid = ptid_build (child_pid, child_pid, 0);
671 add_thread (inferior_ptid);
672 child_lp = add_lwp (inferior_ptid);
673 child_lp->stopped = 1;
674 child_lp->last_resume_kind = resume_stop;
675
676 /* If this is a vfork child, then the address-space is
677 shared with the parent. */
678 if (has_vforked)
679 {
680 child_inf->pspace = parent_inf->pspace;
681 child_inf->aspace = parent_inf->aspace;
682
683 /* The parent will be frozen until the child is done
684 with the shared region. Keep track of the
685 parent. */
686 child_inf->vfork_parent = parent_inf;
687 child_inf->pending_detach = 0;
688 parent_inf->vfork_child = child_inf;
689 parent_inf->pending_detach = 0;
690 }
691 else
692 {
693 child_inf->aspace = new_address_space ();
694 child_inf->pspace = add_program_space (child_inf->aspace);
695 child_inf->removable = 1;
696 set_current_program_space (child_inf->pspace);
697 clone_program_space (child_inf->pspace, parent_inf->pspace);
698
699 /* Let the shared library layer (solib-svr4) learn about
700 this new process, relocate the cloned exec, pull in
701 shared libraries, and install the solib event
702 breakpoint. If a "cloned-VM" event was propagated
703 better throughout the core, this wouldn't be
704 required. */
705 solib_create_inferior_hook (0);
706 }
707
708 /* Let the thread_db layer learn about this new process. */
709 check_for_thread_db ();
710
711 do_cleanups (old_chain);
712 }
713
714 if (has_vforked)
715 {
716 struct lwp_info *parent_lp;
717 struct inferior *parent_inf;
718
719 parent_inf = current_inferior ();
720
721 /* If we detached from the child, then we have to be careful
722 to not insert breakpoints in the parent until the child
723 is done with the shared memory region. However, if we're
724 staying attached to the child, then we can and should
725 insert breakpoints, so that we can debug it. A
726 subsequent child exec or exit is enough to know when does
727 the child stops using the parent's address space. */
728 parent_inf->waiting_for_vfork_done = detach_fork;
729 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
730
731 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
732 gdb_assert (linux_supports_tracefork_flag >= 0);
733
734 if (linux_supports_tracevforkdone (0))
735 {
736 if (debug_linux_nat)
737 fprintf_unfiltered (gdb_stdlog,
738 "LCFF: waiting for VFORK_DONE on %d\n",
739 parent_pid);
740 parent_lp->stopped = 1;
741
742 /* We'll handle the VFORK_DONE event like any other
743 event, in target_wait. */
744 }
745 else
746 {
747 /* We can't insert breakpoints until the child has
748 finished with the shared memory region. We need to
749 wait until that happens. Ideal would be to just
750 call:
751 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
752 - waitpid (parent_pid, &status, __WALL);
753 However, most architectures can't handle a syscall
754 being traced on the way out if it wasn't traced on
755 the way in.
756
757 We might also think to loop, continuing the child
758 until it exits or gets a SIGTRAP. One problem is
759 that the child might call ptrace with PTRACE_TRACEME.
760
761 There's no simple and reliable way to figure out when
762 the vforked child will be done with its copy of the
763 shared memory. We could step it out of the syscall,
764 two instructions, let it go, and then single-step the
765 parent once. When we have hardware single-step, this
766 would work; with software single-step it could still
767 be made to work but we'd have to be able to insert
768 single-step breakpoints in the child, and we'd have
769 to insert -just- the single-step breakpoint in the
770 parent. Very awkward.
771
772 In the end, the best we can do is to make sure it
773 runs for a little while. Hopefully it will be out of
774 range of any breakpoints we reinsert. Usually this
775 is only the single-step breakpoint at vfork's return
776 point. */
777
778 if (debug_linux_nat)
779 fprintf_unfiltered (gdb_stdlog,
780 "LCFF: no VFORK_DONE "
781 "support, sleeping a bit\n");
782
783 usleep (10000);
784
785 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
786 and leave it pending. The next linux_nat_resume call
787 will notice a pending event, and bypasses actually
788 resuming the inferior. */
789 parent_lp->status = 0;
790 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
791 parent_lp->stopped = 1;
792
793 /* If we're in async mode, need to tell the event loop
794 there's something here to process. */
795 if (target_can_async_p ())
796 async_file_mark ();
797 }
798 }
799 }
800 else
801 {
802 struct inferior *parent_inf, *child_inf;
803 struct lwp_info *child_lp;
804 struct program_space *parent_pspace;
805
806 if (info_verbose || debug_linux_nat)
807 {
808 target_terminal_ours ();
809 if (has_vforked)
810 fprintf_filtered (gdb_stdlog,
811 _("Attaching after process %d "
812 "vfork to child process %d.\n"),
813 parent_pid, child_pid);
814 else
815 fprintf_filtered (gdb_stdlog,
816 _("Attaching after process %d "
817 "fork to child process %d.\n"),
818 parent_pid, child_pid);
819 }
820
821 /* Add the new inferior first, so that the target_detach below
822 doesn't unpush the target. */
823
824 child_inf = add_inferior (child_pid);
825
826 parent_inf = current_inferior ();
827 child_inf->attach_flag = parent_inf->attach_flag;
828 copy_terminal_info (child_inf, parent_inf);
829
830 parent_pspace = parent_inf->pspace;
831
832 /* If we're vforking, we want to hold on to the parent until the
833 child exits or execs. At child exec or exit time we can
834 remove the old breakpoints from the parent and detach or
835 resume debugging it. Otherwise, detach the parent now; we'll
836 want to reuse it's program/address spaces, but we can't set
837 them to the child before removing breakpoints from the
838 parent, otherwise, the breakpoints module could decide to
839 remove breakpoints from the wrong process (since they'd be
840 assigned to the same address space). */
841
842 if (has_vforked)
843 {
844 gdb_assert (child_inf->vfork_parent == NULL);
845 gdb_assert (parent_inf->vfork_child == NULL);
846 child_inf->vfork_parent = parent_inf;
847 child_inf->pending_detach = 0;
848 parent_inf->vfork_child = child_inf;
849 parent_inf->pending_detach = detach_fork;
850 parent_inf->waiting_for_vfork_done = 0;
851 }
852 else if (detach_fork)
853 target_detach (NULL, 0);
854
855 /* Note that the detach above makes PARENT_INF dangling. */
856
857 /* Add the child thread to the appropriate lists, and switch to
858 this new thread, before cloning the program space, and
859 informing the solib layer about this new process. */
860
861 inferior_ptid = ptid_build (child_pid, child_pid, 0);
862 add_thread (inferior_ptid);
863 child_lp = add_lwp (inferior_ptid);
864 child_lp->stopped = 1;
865 child_lp->last_resume_kind = resume_stop;
866
867 /* If this is a vfork child, then the address-space is shared
868 with the parent. If we detached from the parent, then we can
869 reuse the parent's program/address spaces. */
870 if (has_vforked || detach_fork)
871 {
872 child_inf->pspace = parent_pspace;
873 child_inf->aspace = child_inf->pspace->aspace;
874 }
875 else
876 {
877 child_inf->aspace = new_address_space ();
878 child_inf->pspace = add_program_space (child_inf->aspace);
879 child_inf->removable = 1;
880 set_current_program_space (child_inf->pspace);
881 clone_program_space (child_inf->pspace, parent_pspace);
882
883 /* Let the shared library layer (solib-svr4) learn about
884 this new process, relocate the cloned exec, pull in
885 shared libraries, and install the solib event breakpoint.
886 If a "cloned-VM" event was propagated better throughout
887 the core, this wouldn't be required. */
888 solib_create_inferior_hook (0);
889 }
890
891 /* Let the thread_db layer learn about this new process. */
892 check_for_thread_db ();
893 }
894
895 restore_child_signals_mask (&prev_mask);
896 return 0;
897 }
898
899 \f
900 static int
901 linux_child_insert_fork_catchpoint (int pid)
902 {
903 return !linux_supports_tracefork (pid);
904 }
905
906 static int
907 linux_child_remove_fork_catchpoint (int pid)
908 {
909 return 0;
910 }
911
912 static int
913 linux_child_insert_vfork_catchpoint (int pid)
914 {
915 return !linux_supports_tracefork (pid);
916 }
917
918 static int
919 linux_child_remove_vfork_catchpoint (int pid)
920 {
921 return 0;
922 }
923
924 static int
925 linux_child_insert_exec_catchpoint (int pid)
926 {
927 return !linux_supports_tracefork (pid);
928 }
929
930 static int
931 linux_child_remove_exec_catchpoint (int pid)
932 {
933 return 0;
934 }
935
936 static int
937 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
938 int table_size, int *table)
939 {
940 if (!linux_supports_tracesysgood (pid))
941 return 1;
942
943 /* On GNU/Linux, we ignore the arguments. It means that we only
944 enable the syscall catchpoints, but do not disable them.
945
946 Also, we do not use the `table' information because we do not
947 filter system calls here. We let GDB do the logic for us. */
948 return 0;
949 }
950
951 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
952 are processes sharing the same VM space. A multi-threaded process
953 is basically a group of such processes. However, such a grouping
954 is almost entirely a user-space issue; the kernel doesn't enforce
955 such a grouping at all (this might change in the future). In
956 general, we'll rely on the threads library (i.e. the GNU/Linux
957 Threads library) to provide such a grouping.
958
959 It is perfectly well possible to write a multi-threaded application
960 without the assistance of a threads library, by using the clone
961 system call directly. This module should be able to give some
962 rudimentary support for debugging such applications if developers
963 specify the CLONE_PTRACE flag in the clone system call, and are
964 using the Linux kernel 2.4 or above.
965
966 Note that there are some peculiarities in GNU/Linux that affect
967 this code:
968
969 - In general one should specify the __WCLONE flag to waitpid in
970 order to make it report events for any of the cloned processes
971 (and leave it out for the initial process). However, if a cloned
972 process has exited the exit status is only reported if the
973 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
974 we cannot use it since GDB must work on older systems too.
975
976 - When a traced, cloned process exits and is waited for by the
977 debugger, the kernel reassigns it to the original parent and
978 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
979 library doesn't notice this, which leads to the "zombie problem":
980 When debugged a multi-threaded process that spawns a lot of
981 threads will run out of processes, even if the threads exit,
982 because the "zombies" stay around. */
983
984 /* List of known LWPs. */
985 struct lwp_info *lwp_list;
986 \f
987
988 /* Original signal mask. */
989 static sigset_t normal_mask;
990
991 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
992 _initialize_linux_nat. */
993 static sigset_t suspend_mask;
994
995 /* Signals to block to make that sigsuspend work. */
996 static sigset_t blocked_mask;
997
998 /* SIGCHLD action. */
999 struct sigaction sigchld_action;
1000
1001 /* Block child signals (SIGCHLD and linux threads signals), and store
1002 the previous mask in PREV_MASK. */
1003
1004 static void
1005 block_child_signals (sigset_t *prev_mask)
1006 {
1007 /* Make sure SIGCHLD is blocked. */
1008 if (!sigismember (&blocked_mask, SIGCHLD))
1009 sigaddset (&blocked_mask, SIGCHLD);
1010
1011 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1012 }
1013
1014 /* Restore child signals mask, previously returned by
1015 block_child_signals. */
1016
1017 static void
1018 restore_child_signals_mask (sigset_t *prev_mask)
1019 {
1020 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1021 }
1022
1023 /* Mask of signals to pass directly to the inferior. */
1024 static sigset_t pass_mask;
1025
1026 /* Update signals to pass to the inferior. */
1027 static void
1028 linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1029 {
1030 int signo;
1031
1032 sigemptyset (&pass_mask);
1033
1034 for (signo = 1; signo < NSIG; signo++)
1035 {
1036 int target_signo = target_signal_from_host (signo);
1037 if (target_signo < numsigs && pass_signals[target_signo])
1038 sigaddset (&pass_mask, signo);
1039 }
1040 }
1041
1042 \f
1043
1044 /* Prototypes for local functions. */
1045 static int stop_wait_callback (struct lwp_info *lp, void *data);
1046 static int linux_thread_alive (ptid_t ptid);
1047 static char *linux_child_pid_to_exec_file (int pid);
1048
1049 \f
1050 /* Convert wait status STATUS to a string. Used for printing debug
1051 messages only. */
1052
1053 static char *
1054 status_to_str (int status)
1055 {
1056 static char buf[64];
1057
1058 if (WIFSTOPPED (status))
1059 {
1060 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1061 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1062 strsignal (SIGTRAP));
1063 else
1064 snprintf (buf, sizeof (buf), "%s (stopped)",
1065 strsignal (WSTOPSIG (status)));
1066 }
1067 else if (WIFSIGNALED (status))
1068 snprintf (buf, sizeof (buf), "%s (terminated)",
1069 strsignal (WTERMSIG (status)));
1070 else
1071 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1072
1073 return buf;
1074 }
1075
1076 /* Remove all LWPs belong to PID from the lwp list. */
1077
1078 static void
1079 purge_lwp_list (int pid)
1080 {
1081 struct lwp_info *lp, *lpprev, *lpnext;
1082
1083 lpprev = NULL;
1084
1085 for (lp = lwp_list; lp; lp = lpnext)
1086 {
1087 lpnext = lp->next;
1088
1089 if (ptid_get_pid (lp->ptid) == pid)
1090 {
1091 if (lp == lwp_list)
1092 lwp_list = lp->next;
1093 else
1094 lpprev->next = lp->next;
1095
1096 xfree (lp);
1097 }
1098 else
1099 lpprev = lp;
1100 }
1101 }
1102
1103 /* Return the number of known LWPs in the tgid given by PID. */
1104
1105 static int
1106 num_lwps (int pid)
1107 {
1108 int count = 0;
1109 struct lwp_info *lp;
1110
1111 for (lp = lwp_list; lp; lp = lp->next)
1112 if (ptid_get_pid (lp->ptid) == pid)
1113 count++;
1114
1115 return count;
1116 }
1117
1118 /* Add the LWP specified by PID to the list. Return a pointer to the
1119 structure describing the new LWP. The LWP should already be stopped
1120 (with an exception for the very first LWP). */
1121
1122 static struct lwp_info *
1123 add_lwp (ptid_t ptid)
1124 {
1125 struct lwp_info *lp;
1126
1127 gdb_assert (is_lwp (ptid));
1128
1129 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1130
1131 memset (lp, 0, sizeof (struct lwp_info));
1132
1133 lp->last_resume_kind = resume_continue;
1134 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1135
1136 lp->ptid = ptid;
1137 lp->core = -1;
1138
1139 lp->next = lwp_list;
1140 lwp_list = lp;
1141
1142 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
1143 linux_nat_new_thread (ptid);
1144
1145 return lp;
1146 }
1147
1148 /* Remove the LWP specified by PID from the list. */
1149
1150 static void
1151 delete_lwp (ptid_t ptid)
1152 {
1153 struct lwp_info *lp, *lpprev;
1154
1155 lpprev = NULL;
1156
1157 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1158 if (ptid_equal (lp->ptid, ptid))
1159 break;
1160
1161 if (!lp)
1162 return;
1163
1164 if (lpprev)
1165 lpprev->next = lp->next;
1166 else
1167 lwp_list = lp->next;
1168
1169 xfree (lp);
1170 }
1171
1172 /* Return a pointer to the structure describing the LWP corresponding
1173 to PID. If no corresponding LWP could be found, return NULL. */
1174
1175 static struct lwp_info *
1176 find_lwp_pid (ptid_t ptid)
1177 {
1178 struct lwp_info *lp;
1179 int lwp;
1180
1181 if (is_lwp (ptid))
1182 lwp = GET_LWP (ptid);
1183 else
1184 lwp = GET_PID (ptid);
1185
1186 for (lp = lwp_list; lp; lp = lp->next)
1187 if (lwp == GET_LWP (lp->ptid))
1188 return lp;
1189
1190 return NULL;
1191 }
1192
1193 /* Call CALLBACK with its second argument set to DATA for every LWP in
1194 the list. If CALLBACK returns 1 for a particular LWP, return a
1195 pointer to the structure describing that LWP immediately.
1196 Otherwise return NULL. */
1197
1198 struct lwp_info *
1199 iterate_over_lwps (ptid_t filter,
1200 int (*callback) (struct lwp_info *, void *),
1201 void *data)
1202 {
1203 struct lwp_info *lp, *lpnext;
1204
1205 for (lp = lwp_list; lp; lp = lpnext)
1206 {
1207 lpnext = lp->next;
1208
1209 if (ptid_match (lp->ptid, filter))
1210 {
1211 if ((*callback) (lp, data))
1212 return lp;
1213 }
1214 }
1215
1216 return NULL;
1217 }
1218
1219 /* Update our internal state when changing from one checkpoint to
1220 another indicated by NEW_PTID. We can only switch single-threaded
1221 applications, so we only create one new LWP, and the previous list
1222 is discarded. */
1223
1224 void
1225 linux_nat_switch_fork (ptid_t new_ptid)
1226 {
1227 struct lwp_info *lp;
1228
1229 purge_lwp_list (GET_PID (inferior_ptid));
1230
1231 lp = add_lwp (new_ptid);
1232 lp->stopped = 1;
1233
1234 /* This changes the thread's ptid while preserving the gdb thread
1235 num. Also changes the inferior pid, while preserving the
1236 inferior num. */
1237 thread_change_ptid (inferior_ptid, new_ptid);
1238
1239 /* We've just told GDB core that the thread changed target id, but,
1240 in fact, it really is a different thread, with different register
1241 contents. */
1242 registers_changed ();
1243 }
1244
1245 /* Handle the exit of a single thread LP. */
1246
1247 static void
1248 exit_lwp (struct lwp_info *lp)
1249 {
1250 struct thread_info *th = find_thread_ptid (lp->ptid);
1251
1252 if (th)
1253 {
1254 if (print_thread_events)
1255 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1256
1257 delete_thread (lp->ptid);
1258 }
1259
1260 delete_lwp (lp->ptid);
1261 }
1262
1263 /* Detect `T (stopped)' in `/proc/PID/status'.
1264 Other states including `T (tracing stop)' are reported as false. */
1265
1266 static int
1267 pid_is_stopped (pid_t pid)
1268 {
1269 FILE *status_file;
1270 char buf[100];
1271 int retval = 0;
1272
1273 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1274 status_file = fopen (buf, "r");
1275 if (status_file != NULL)
1276 {
1277 int have_state = 0;
1278
1279 while (fgets (buf, sizeof (buf), status_file))
1280 {
1281 if (strncmp (buf, "State:", 6) == 0)
1282 {
1283 have_state = 1;
1284 break;
1285 }
1286 }
1287 if (have_state && strstr (buf, "T (stopped)") != NULL)
1288 retval = 1;
1289 fclose (status_file);
1290 }
1291 return retval;
1292 }
1293
1294 /* Wait for the LWP specified by LP, which we have just attached to.
1295 Returns a wait status for that LWP, to cache. */
1296
1297 static int
1298 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1299 int *signalled)
1300 {
1301 pid_t new_pid, pid = GET_LWP (ptid);
1302 int status;
1303
1304 if (pid_is_stopped (pid))
1305 {
1306 if (debug_linux_nat)
1307 fprintf_unfiltered (gdb_stdlog,
1308 "LNPAW: Attaching to a stopped process\n");
1309
1310 /* The process is definitely stopped. It is in a job control
1311 stop, unless the kernel predates the TASK_STOPPED /
1312 TASK_TRACED distinction, in which case it might be in a
1313 ptrace stop. Make sure it is in a ptrace stop; from there we
1314 can kill it, signal it, et cetera.
1315
1316 First make sure there is a pending SIGSTOP. Since we are
1317 already attached, the process can not transition from stopped
1318 to running without a PTRACE_CONT; so we know this signal will
1319 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1320 probably already in the queue (unless this kernel is old
1321 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1322 is not an RT signal, it can only be queued once. */
1323 kill_lwp (pid, SIGSTOP);
1324
1325 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1326 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1327 ptrace (PTRACE_CONT, pid, 0, 0);
1328 }
1329
1330 /* Make sure the initial process is stopped. The user-level threads
1331 layer might want to poke around in the inferior, and that won't
1332 work if things haven't stabilized yet. */
1333 new_pid = my_waitpid (pid, &status, 0);
1334 if (new_pid == -1 && errno == ECHILD)
1335 {
1336 if (first)
1337 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1338
1339 /* Try again with __WCLONE to check cloned processes. */
1340 new_pid = my_waitpid (pid, &status, __WCLONE);
1341 *cloned = 1;
1342 }
1343
1344 gdb_assert (pid == new_pid);
1345
1346 if (!WIFSTOPPED (status))
1347 {
1348 /* The pid we tried to attach has apparently just exited. */
1349 if (debug_linux_nat)
1350 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1351 pid, status_to_str (status));
1352 return status;
1353 }
1354
1355 if (WSTOPSIG (status) != SIGSTOP)
1356 {
1357 *signalled = 1;
1358 if (debug_linux_nat)
1359 fprintf_unfiltered (gdb_stdlog,
1360 "LNPAW: Received %s after attaching\n",
1361 status_to_str (status));
1362 }
1363
1364 return status;
1365 }
1366
1367 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1368 the new LWP could not be attached, or 1 if we're already auto
1369 attached to this thread, but haven't processed the
1370 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1371 its existance, without considering it an error. */
1372
1373 int
1374 lin_lwp_attach_lwp (ptid_t ptid)
1375 {
1376 struct lwp_info *lp;
1377 sigset_t prev_mask;
1378 int lwpid;
1379
1380 gdb_assert (is_lwp (ptid));
1381
1382 block_child_signals (&prev_mask);
1383
1384 lp = find_lwp_pid (ptid);
1385 lwpid = GET_LWP (ptid);
1386
1387 /* We assume that we're already attached to any LWP that has an id
1388 equal to the overall process id, and to any LWP that is already
1389 in our list of LWPs. If we're not seeing exit events from threads
1390 and we've had PID wraparound since we last tried to stop all threads,
1391 this assumption might be wrong; fortunately, this is very unlikely
1392 to happen. */
1393 if (lwpid != GET_PID (ptid) && lp == NULL)
1394 {
1395 int status, cloned = 0, signalled = 0;
1396
1397 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1398 {
1399 if (linux_supports_tracefork_flag)
1400 {
1401 /* If we haven't stopped all threads when we get here,
1402 we may have seen a thread listed in thread_db's list,
1403 but not processed the PTRACE_EVENT_CLONE yet. If
1404 that's the case, ignore this new thread, and let
1405 normal event handling discover it later. */
1406 if (in_pid_list_p (stopped_pids, lwpid))
1407 {
1408 /* We've already seen this thread stop, but we
1409 haven't seen the PTRACE_EVENT_CLONE extended
1410 event yet. */
1411 restore_child_signals_mask (&prev_mask);
1412 return 0;
1413 }
1414 else
1415 {
1416 int new_pid;
1417 int status;
1418
1419 /* See if we've got a stop for this new child
1420 pending. If so, we're already attached. */
1421 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1422 if (new_pid == -1 && errno == ECHILD)
1423 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1424 if (new_pid != -1)
1425 {
1426 if (WIFSTOPPED (status))
1427 add_to_pid_list (&stopped_pids, lwpid, status);
1428
1429 restore_child_signals_mask (&prev_mask);
1430 return 1;
1431 }
1432 }
1433 }
1434
1435 /* If we fail to attach to the thread, issue a warning,
1436 but continue. One way this can happen is if thread
1437 creation is interrupted; as of Linux kernel 2.6.19, a
1438 bug may place threads in the thread list and then fail
1439 to create them. */
1440 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1441 safe_strerror (errno));
1442 restore_child_signals_mask (&prev_mask);
1443 return -1;
1444 }
1445
1446 if (debug_linux_nat)
1447 fprintf_unfiltered (gdb_stdlog,
1448 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1449 target_pid_to_str (ptid));
1450
1451 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1452 if (!WIFSTOPPED (status))
1453 {
1454 restore_child_signals_mask (&prev_mask);
1455 return 1;
1456 }
1457
1458 lp = add_lwp (ptid);
1459 lp->stopped = 1;
1460 lp->cloned = cloned;
1461 lp->signalled = signalled;
1462 if (WSTOPSIG (status) != SIGSTOP)
1463 {
1464 lp->resumed = 1;
1465 lp->status = status;
1466 }
1467
1468 target_post_attach (GET_LWP (lp->ptid));
1469
1470 if (debug_linux_nat)
1471 {
1472 fprintf_unfiltered (gdb_stdlog,
1473 "LLAL: waitpid %s received %s\n",
1474 target_pid_to_str (ptid),
1475 status_to_str (status));
1476 }
1477 }
1478 else
1479 {
1480 /* We assume that the LWP representing the original process is
1481 already stopped. Mark it as stopped in the data structure
1482 that the GNU/linux ptrace layer uses to keep track of
1483 threads. Note that this won't have already been done since
1484 the main thread will have, we assume, been stopped by an
1485 attach from a different layer. */
1486 if (lp == NULL)
1487 lp = add_lwp (ptid);
1488 lp->stopped = 1;
1489 }
1490
1491 lp->last_resume_kind = resume_stop;
1492 restore_child_signals_mask (&prev_mask);
1493 return 0;
1494 }
1495
1496 static void
1497 linux_nat_create_inferior (struct target_ops *ops,
1498 char *exec_file, char *allargs, char **env,
1499 int from_tty)
1500 {
1501 #ifdef HAVE_PERSONALITY
1502 int personality_orig = 0, personality_set = 0;
1503 #endif /* HAVE_PERSONALITY */
1504
1505 /* The fork_child mechanism is synchronous and calls target_wait, so
1506 we have to mask the async mode. */
1507
1508 #ifdef HAVE_PERSONALITY
1509 if (disable_randomization)
1510 {
1511 errno = 0;
1512 personality_orig = personality (0xffffffff);
1513 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1514 {
1515 personality_set = 1;
1516 personality (personality_orig | ADDR_NO_RANDOMIZE);
1517 }
1518 if (errno != 0 || (personality_set
1519 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1520 warning (_("Error disabling address space randomization: %s"),
1521 safe_strerror (errno));
1522 }
1523 #endif /* HAVE_PERSONALITY */
1524
1525 /* Make sure we report all signals during startup. */
1526 linux_nat_pass_signals (0, NULL);
1527
1528 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1529
1530 #ifdef HAVE_PERSONALITY
1531 if (personality_set)
1532 {
1533 errno = 0;
1534 personality (personality_orig);
1535 if (errno != 0)
1536 warning (_("Error restoring address space randomization: %s"),
1537 safe_strerror (errno));
1538 }
1539 #endif /* HAVE_PERSONALITY */
1540 }
1541
1542 static void
1543 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1544 {
1545 struct lwp_info *lp;
1546 int status;
1547 ptid_t ptid;
1548
1549 /* Make sure we report all signals during attach. */
1550 linux_nat_pass_signals (0, NULL);
1551
1552 linux_ops->to_attach (ops, args, from_tty);
1553
1554 /* The ptrace base target adds the main thread with (pid,0,0)
1555 format. Decorate it with lwp info. */
1556 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1557 thread_change_ptid (inferior_ptid, ptid);
1558
1559 /* Add the initial process as the first LWP to the list. */
1560 lp = add_lwp (ptid);
1561
1562 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1563 &lp->signalled);
1564 if (!WIFSTOPPED (status))
1565 {
1566 if (WIFEXITED (status))
1567 {
1568 int exit_code = WEXITSTATUS (status);
1569
1570 target_terminal_ours ();
1571 target_mourn_inferior ();
1572 if (exit_code == 0)
1573 error (_("Unable to attach: program exited normally."));
1574 else
1575 error (_("Unable to attach: program exited with code %d."),
1576 exit_code);
1577 }
1578 else if (WIFSIGNALED (status))
1579 {
1580 enum target_signal signo;
1581
1582 target_terminal_ours ();
1583 target_mourn_inferior ();
1584
1585 signo = target_signal_from_host (WTERMSIG (status));
1586 error (_("Unable to attach: program terminated with signal "
1587 "%s, %s."),
1588 target_signal_to_name (signo),
1589 target_signal_to_string (signo));
1590 }
1591
1592 internal_error (__FILE__, __LINE__,
1593 _("unexpected status %d for PID %ld"),
1594 status, (long) GET_LWP (ptid));
1595 }
1596
1597 lp->stopped = 1;
1598
1599 /* Save the wait status to report later. */
1600 lp->resumed = 1;
1601 if (debug_linux_nat)
1602 fprintf_unfiltered (gdb_stdlog,
1603 "LNA: waitpid %ld, saving status %s\n",
1604 (long) GET_PID (lp->ptid), status_to_str (status));
1605
1606 lp->status = status;
1607
1608 if (target_can_async_p ())
1609 target_async (inferior_event_handler, 0);
1610 }
1611
1612 /* Get pending status of LP. */
1613 static int
1614 get_pending_status (struct lwp_info *lp, int *status)
1615 {
1616 enum target_signal signo = TARGET_SIGNAL_0;
1617
1618 /* If we paused threads momentarily, we may have stored pending
1619 events in lp->status or lp->waitstatus (see stop_wait_callback),
1620 and GDB core hasn't seen any signal for those threads.
1621 Otherwise, the last signal reported to the core is found in the
1622 thread object's stop_signal.
1623
1624 There's a corner case that isn't handled here at present. Only
1625 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1626 stop_signal make sense as a real signal to pass to the inferior.
1627 Some catchpoint related events, like
1628 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1629 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1630 those traps are debug API (ptrace in our case) related and
1631 induced; the inferior wouldn't see them if it wasn't being
1632 traced. Hence, we should never pass them to the inferior, even
1633 when set to pass state. Since this corner case isn't handled by
1634 infrun.c when proceeding with a signal, for consistency, neither
1635 do we handle it here (or elsewhere in the file we check for
1636 signal pass state). Normally SIGTRAP isn't set to pass state, so
1637 this is really a corner case. */
1638
1639 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1640 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1641 else if (lp->status)
1642 signo = target_signal_from_host (WSTOPSIG (lp->status));
1643 else if (non_stop && !is_executing (lp->ptid))
1644 {
1645 struct thread_info *tp = find_thread_ptid (lp->ptid);
1646
1647 signo = tp->suspend.stop_signal;
1648 }
1649 else if (!non_stop)
1650 {
1651 struct target_waitstatus last;
1652 ptid_t last_ptid;
1653
1654 get_last_target_status (&last_ptid, &last);
1655
1656 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1657 {
1658 struct thread_info *tp = find_thread_ptid (lp->ptid);
1659
1660 signo = tp->suspend.stop_signal;
1661 }
1662 }
1663
1664 *status = 0;
1665
1666 if (signo == TARGET_SIGNAL_0)
1667 {
1668 if (debug_linux_nat)
1669 fprintf_unfiltered (gdb_stdlog,
1670 "GPT: lwp %s has no pending signal\n",
1671 target_pid_to_str (lp->ptid));
1672 }
1673 else if (!signal_pass_state (signo))
1674 {
1675 if (debug_linux_nat)
1676 fprintf_unfiltered (gdb_stdlog,
1677 "GPT: lwp %s had signal %s, "
1678 "but it is in no pass state\n",
1679 target_pid_to_str (lp->ptid),
1680 target_signal_to_string (signo));
1681 }
1682 else
1683 {
1684 *status = W_STOPCODE (target_signal_to_host (signo));
1685
1686 if (debug_linux_nat)
1687 fprintf_unfiltered (gdb_stdlog,
1688 "GPT: lwp %s has pending signal %s\n",
1689 target_pid_to_str (lp->ptid),
1690 target_signal_to_string (signo));
1691 }
1692
1693 return 0;
1694 }
1695
1696 static int
1697 detach_callback (struct lwp_info *lp, void *data)
1698 {
1699 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1700
1701 if (debug_linux_nat && lp->status)
1702 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1703 strsignal (WSTOPSIG (lp->status)),
1704 target_pid_to_str (lp->ptid));
1705
1706 /* If there is a pending SIGSTOP, get rid of it. */
1707 if (lp->signalled)
1708 {
1709 if (debug_linux_nat)
1710 fprintf_unfiltered (gdb_stdlog,
1711 "DC: Sending SIGCONT to %s\n",
1712 target_pid_to_str (lp->ptid));
1713
1714 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1715 lp->signalled = 0;
1716 }
1717
1718 /* We don't actually detach from the LWP that has an id equal to the
1719 overall process id just yet. */
1720 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1721 {
1722 int status = 0;
1723
1724 /* Pass on any pending signal for this LWP. */
1725 get_pending_status (lp, &status);
1726
1727 errno = 0;
1728 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1729 WSTOPSIG (status)) < 0)
1730 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1731 safe_strerror (errno));
1732
1733 if (debug_linux_nat)
1734 fprintf_unfiltered (gdb_stdlog,
1735 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1736 target_pid_to_str (lp->ptid),
1737 strsignal (WSTOPSIG (status)));
1738
1739 delete_lwp (lp->ptid);
1740 }
1741
1742 return 0;
1743 }
1744
1745 static void
1746 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1747 {
1748 int pid;
1749 int status;
1750 struct lwp_info *main_lwp;
1751
1752 pid = GET_PID (inferior_ptid);
1753
1754 if (target_can_async_p ())
1755 linux_nat_async (NULL, 0);
1756
1757 /* Stop all threads before detaching. ptrace requires that the
1758 thread is stopped to sucessfully detach. */
1759 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1760 /* ... and wait until all of them have reported back that
1761 they're no longer running. */
1762 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1763
1764 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1765
1766 /* Only the initial process should be left right now. */
1767 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1768
1769 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1770
1771 /* Pass on any pending signal for the last LWP. */
1772 if ((args == NULL || *args == '\0')
1773 && get_pending_status (main_lwp, &status) != -1
1774 && WIFSTOPPED (status))
1775 {
1776 /* Put the signal number in ARGS so that inf_ptrace_detach will
1777 pass it along with PTRACE_DETACH. */
1778 args = alloca (8);
1779 sprintf (args, "%d", (int) WSTOPSIG (status));
1780 if (debug_linux_nat)
1781 fprintf_unfiltered (gdb_stdlog,
1782 "LND: Sending signal %s to %s\n",
1783 args,
1784 target_pid_to_str (main_lwp->ptid));
1785 }
1786
1787 delete_lwp (main_lwp->ptid);
1788
1789 if (forks_exist_p ())
1790 {
1791 /* Multi-fork case. The current inferior_ptid is being detached
1792 from, but there are other viable forks to debug. Detach from
1793 the current fork, and context-switch to the first
1794 available. */
1795 linux_fork_detach (args, from_tty);
1796
1797 if (non_stop && target_can_async_p ())
1798 target_async (inferior_event_handler, 0);
1799 }
1800 else
1801 linux_ops->to_detach (ops, args, from_tty);
1802 }
1803
1804 /* Resume LP. */
1805
1806 static void
1807 resume_lwp (struct lwp_info *lp, int step)
1808 {
1809 if (lp->stopped)
1810 {
1811 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1812
1813 if (inf->vfork_child != NULL)
1814 {
1815 if (debug_linux_nat)
1816 fprintf_unfiltered (gdb_stdlog,
1817 "RC: Not resuming %s (vfork parent)\n",
1818 target_pid_to_str (lp->ptid));
1819 }
1820 else if (lp->status == 0
1821 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1822 {
1823 if (debug_linux_nat)
1824 fprintf_unfiltered (gdb_stdlog,
1825 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1826 target_pid_to_str (lp->ptid));
1827
1828 linux_ops->to_resume (linux_ops,
1829 pid_to_ptid (GET_LWP (lp->ptid)),
1830 step, TARGET_SIGNAL_0);
1831 lp->stopped = 0;
1832 lp->step = step;
1833 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1834 lp->stopped_by_watchpoint = 0;
1835 }
1836 else
1837 {
1838 if (debug_linux_nat)
1839 fprintf_unfiltered (gdb_stdlog,
1840 "RC: Not resuming sibling %s (has pending)\n",
1841 target_pid_to_str (lp->ptid));
1842 }
1843 }
1844 else
1845 {
1846 if (debug_linux_nat)
1847 fprintf_unfiltered (gdb_stdlog,
1848 "RC: Not resuming sibling %s (not stopped)\n",
1849 target_pid_to_str (lp->ptid));
1850 }
1851 }
1852
1853 static int
1854 resume_callback (struct lwp_info *lp, void *data)
1855 {
1856 resume_lwp (lp, 0);
1857 return 0;
1858 }
1859
1860 static int
1861 resume_clear_callback (struct lwp_info *lp, void *data)
1862 {
1863 lp->resumed = 0;
1864 lp->last_resume_kind = resume_stop;
1865 return 0;
1866 }
1867
1868 static int
1869 resume_set_callback (struct lwp_info *lp, void *data)
1870 {
1871 lp->resumed = 1;
1872 lp->last_resume_kind = resume_continue;
1873 return 0;
1874 }
1875
1876 static void
1877 linux_nat_resume (struct target_ops *ops,
1878 ptid_t ptid, int step, enum target_signal signo)
1879 {
1880 sigset_t prev_mask;
1881 struct lwp_info *lp;
1882 int resume_many;
1883
1884 if (debug_linux_nat)
1885 fprintf_unfiltered (gdb_stdlog,
1886 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1887 step ? "step" : "resume",
1888 target_pid_to_str (ptid),
1889 (signo != TARGET_SIGNAL_0
1890 ? strsignal (target_signal_to_host (signo)) : "0"),
1891 target_pid_to_str (inferior_ptid));
1892
1893 block_child_signals (&prev_mask);
1894
1895 /* A specific PTID means `step only this process id'. */
1896 resume_many = (ptid_equal (minus_one_ptid, ptid)
1897 || ptid_is_pid (ptid));
1898
1899 /* Mark the lwps we're resuming as resumed. */
1900 iterate_over_lwps (ptid, resume_set_callback, NULL);
1901
1902 /* See if it's the current inferior that should be handled
1903 specially. */
1904 if (resume_many)
1905 lp = find_lwp_pid (inferior_ptid);
1906 else
1907 lp = find_lwp_pid (ptid);
1908 gdb_assert (lp != NULL);
1909
1910 /* Remember if we're stepping. */
1911 lp->step = step;
1912 lp->last_resume_kind = step ? resume_step : resume_continue;
1913
1914 /* If we have a pending wait status for this thread, there is no
1915 point in resuming the process. But first make sure that
1916 linux_nat_wait won't preemptively handle the event - we
1917 should never take this short-circuit if we are going to
1918 leave LP running, since we have skipped resuming all the
1919 other threads. This bit of code needs to be synchronized
1920 with linux_nat_wait. */
1921
1922 if (lp->status && WIFSTOPPED (lp->status))
1923 {
1924 if (!lp->step
1925 && WSTOPSIG (lp->status)
1926 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1927 {
1928 if (debug_linux_nat)
1929 fprintf_unfiltered (gdb_stdlog,
1930 "LLR: Not short circuiting for ignored "
1931 "status 0x%x\n", lp->status);
1932
1933 /* FIXME: What should we do if we are supposed to continue
1934 this thread with a signal? */
1935 gdb_assert (signo == TARGET_SIGNAL_0);
1936 signo = target_signal_from_host (WSTOPSIG (lp->status));
1937 lp->status = 0;
1938 }
1939 }
1940
1941 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1942 {
1943 /* FIXME: What should we do if we are supposed to continue
1944 this thread with a signal? */
1945 gdb_assert (signo == TARGET_SIGNAL_0);
1946
1947 if (debug_linux_nat)
1948 fprintf_unfiltered (gdb_stdlog,
1949 "LLR: Short circuiting for status 0x%x\n",
1950 lp->status);
1951
1952 restore_child_signals_mask (&prev_mask);
1953 if (target_can_async_p ())
1954 {
1955 target_async (inferior_event_handler, 0);
1956 /* Tell the event loop we have something to process. */
1957 async_file_mark ();
1958 }
1959 return;
1960 }
1961
1962 /* Mark LWP as not stopped to prevent it from being continued by
1963 resume_callback. */
1964 lp->stopped = 0;
1965
1966 if (resume_many)
1967 iterate_over_lwps (ptid, resume_callback, NULL);
1968
1969 /* Convert to something the lower layer understands. */
1970 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1971
1972 linux_ops->to_resume (linux_ops, ptid, step, signo);
1973 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1974 lp->stopped_by_watchpoint = 0;
1975
1976 if (debug_linux_nat)
1977 fprintf_unfiltered (gdb_stdlog,
1978 "LLR: %s %s, %s (resume event thread)\n",
1979 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1980 target_pid_to_str (ptid),
1981 (signo != TARGET_SIGNAL_0
1982 ? strsignal (target_signal_to_host (signo)) : "0"));
1983
1984 restore_child_signals_mask (&prev_mask);
1985 if (target_can_async_p ())
1986 target_async (inferior_event_handler, 0);
1987 }
1988
1989 /* Send a signal to an LWP. */
1990
1991 static int
1992 kill_lwp (int lwpid, int signo)
1993 {
1994 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1995 fails, then we are not using nptl threads and we should be using kill. */
1996
1997 #ifdef HAVE_TKILL_SYSCALL
1998 {
1999 static int tkill_failed;
2000
2001 if (!tkill_failed)
2002 {
2003 int ret;
2004
2005 errno = 0;
2006 ret = syscall (__NR_tkill, lwpid, signo);
2007 if (errno != ENOSYS)
2008 return ret;
2009 tkill_failed = 1;
2010 }
2011 }
2012 #endif
2013
2014 return kill (lwpid, signo);
2015 }
2016
2017 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2018 event, check if the core is interested in it: if not, ignore the
2019 event, and keep waiting; otherwise, we need to toggle the LWP's
2020 syscall entry/exit status, since the ptrace event itself doesn't
2021 indicate it, and report the trap to higher layers. */
2022
2023 static int
2024 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2025 {
2026 struct target_waitstatus *ourstatus = &lp->waitstatus;
2027 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2028 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2029
2030 if (stopping)
2031 {
2032 /* If we're stopping threads, there's a SIGSTOP pending, which
2033 makes it so that the LWP reports an immediate syscall return,
2034 followed by the SIGSTOP. Skip seeing that "return" using
2035 PTRACE_CONT directly, and let stop_wait_callback collect the
2036 SIGSTOP. Later when the thread is resumed, a new syscall
2037 entry event. If we didn't do this (and returned 0), we'd
2038 leave a syscall entry pending, and our caller, by using
2039 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2040 itself. Later, when the user re-resumes this LWP, we'd see
2041 another syscall entry event and we'd mistake it for a return.
2042
2043 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2044 (leaving immediately with LWP->signalled set, without issuing
2045 a PTRACE_CONT), it would still be problematic to leave this
2046 syscall enter pending, as later when the thread is resumed,
2047 it would then see the same syscall exit mentioned above,
2048 followed by the delayed SIGSTOP, while the syscall didn't
2049 actually get to execute. It seems it would be even more
2050 confusing to the user. */
2051
2052 if (debug_linux_nat)
2053 fprintf_unfiltered (gdb_stdlog,
2054 "LHST: ignoring syscall %d "
2055 "for LWP %ld (stopping threads), "
2056 "resuming with PTRACE_CONT for SIGSTOP\n",
2057 syscall_number,
2058 GET_LWP (lp->ptid));
2059
2060 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2061 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2062 return 1;
2063 }
2064
2065 if (catch_syscall_enabled ())
2066 {
2067 /* Always update the entry/return state, even if this particular
2068 syscall isn't interesting to the core now. In async mode,
2069 the user could install a new catchpoint for this syscall
2070 between syscall enter/return, and we'll need to know to
2071 report a syscall return if that happens. */
2072 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2073 ? TARGET_WAITKIND_SYSCALL_RETURN
2074 : TARGET_WAITKIND_SYSCALL_ENTRY);
2075
2076 if (catching_syscall_number (syscall_number))
2077 {
2078 /* Alright, an event to report. */
2079 ourstatus->kind = lp->syscall_state;
2080 ourstatus->value.syscall_number = syscall_number;
2081
2082 if (debug_linux_nat)
2083 fprintf_unfiltered (gdb_stdlog,
2084 "LHST: stopping for %s of syscall %d"
2085 " for LWP %ld\n",
2086 lp->syscall_state
2087 == TARGET_WAITKIND_SYSCALL_ENTRY
2088 ? "entry" : "return",
2089 syscall_number,
2090 GET_LWP (lp->ptid));
2091 return 0;
2092 }
2093
2094 if (debug_linux_nat)
2095 fprintf_unfiltered (gdb_stdlog,
2096 "LHST: ignoring %s of syscall %d "
2097 "for LWP %ld\n",
2098 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2099 ? "entry" : "return",
2100 syscall_number,
2101 GET_LWP (lp->ptid));
2102 }
2103 else
2104 {
2105 /* If we had been syscall tracing, and hence used PT_SYSCALL
2106 before on this LWP, it could happen that the user removes all
2107 syscall catchpoints before we get to process this event.
2108 There are two noteworthy issues here:
2109
2110 - When stopped at a syscall entry event, resuming with
2111 PT_STEP still resumes executing the syscall and reports a
2112 syscall return.
2113
2114 - Only PT_SYSCALL catches syscall enters. If we last
2115 single-stepped this thread, then this event can't be a
2116 syscall enter. If we last single-stepped this thread, this
2117 has to be a syscall exit.
2118
2119 The points above mean that the next resume, be it PT_STEP or
2120 PT_CONTINUE, can not trigger a syscall trace event. */
2121 if (debug_linux_nat)
2122 fprintf_unfiltered (gdb_stdlog,
2123 "LHST: caught syscall event "
2124 "with no syscall catchpoints."
2125 " %d for LWP %ld, ignoring\n",
2126 syscall_number,
2127 GET_LWP (lp->ptid));
2128 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2129 }
2130
2131 /* The core isn't interested in this event. For efficiency, avoid
2132 stopping all threads only to have the core resume them all again.
2133 Since we're not stopping threads, if we're still syscall tracing
2134 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2135 subsequent syscall. Simply resume using the inf-ptrace layer,
2136 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2137
2138 /* Note that gdbarch_get_syscall_number may access registers, hence
2139 fill a regcache. */
2140 registers_changed ();
2141 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2142 lp->step, TARGET_SIGNAL_0);
2143 return 1;
2144 }
2145
2146 /* Handle a GNU/Linux extended wait response. If we see a clone
2147 event, we need to add the new LWP to our list (and not report the
2148 trap to higher layers). This function returns non-zero if the
2149 event should be ignored and we should wait again. If STOPPING is
2150 true, the new LWP remains stopped, otherwise it is continued. */
2151
2152 static int
2153 linux_handle_extended_wait (struct lwp_info *lp, int status,
2154 int stopping)
2155 {
2156 int pid = GET_LWP (lp->ptid);
2157 struct target_waitstatus *ourstatus = &lp->waitstatus;
2158 int event = status >> 16;
2159
2160 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2161 || event == PTRACE_EVENT_CLONE)
2162 {
2163 unsigned long new_pid;
2164 int ret;
2165
2166 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2167
2168 /* If we haven't already seen the new PID stop, wait for it now. */
2169 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2170 {
2171 /* The new child has a pending SIGSTOP. We can't affect it until it
2172 hits the SIGSTOP, but we're already attached. */
2173 ret = my_waitpid (new_pid, &status,
2174 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2175 if (ret == -1)
2176 perror_with_name (_("waiting for new child"));
2177 else if (ret != new_pid)
2178 internal_error (__FILE__, __LINE__,
2179 _("wait returned unexpected PID %d"), ret);
2180 else if (!WIFSTOPPED (status))
2181 internal_error (__FILE__, __LINE__,
2182 _("wait returned unexpected status 0x%x"), status);
2183 }
2184
2185 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2186
2187 if (event == PTRACE_EVENT_FORK
2188 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2189 {
2190 /* Handle checkpointing by linux-fork.c here as a special
2191 case. We don't want the follow-fork-mode or 'catch fork'
2192 to interfere with this. */
2193
2194 /* This won't actually modify the breakpoint list, but will
2195 physically remove the breakpoints from the child. */
2196 detach_breakpoints (new_pid);
2197
2198 /* Retain child fork in ptrace (stopped) state. */
2199 if (!find_fork_pid (new_pid))
2200 add_fork (new_pid);
2201
2202 /* Report as spurious, so that infrun doesn't want to follow
2203 this fork. We're actually doing an infcall in
2204 linux-fork.c. */
2205 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2206 linux_enable_event_reporting (pid_to_ptid (new_pid));
2207
2208 /* Report the stop to the core. */
2209 return 0;
2210 }
2211
2212 if (event == PTRACE_EVENT_FORK)
2213 ourstatus->kind = TARGET_WAITKIND_FORKED;
2214 else if (event == PTRACE_EVENT_VFORK)
2215 ourstatus->kind = TARGET_WAITKIND_VFORKED;
2216 else
2217 {
2218 struct lwp_info *new_lp;
2219
2220 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2221
2222 if (debug_linux_nat)
2223 fprintf_unfiltered (gdb_stdlog,
2224 "LHEW: Got clone event "
2225 "from LWP %d, new child is LWP %ld\n",
2226 pid, new_pid);
2227
2228 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2229 new_lp->cloned = 1;
2230 new_lp->stopped = 1;
2231
2232 if (WSTOPSIG (status) != SIGSTOP)
2233 {
2234 /* This can happen if someone starts sending signals to
2235 the new thread before it gets a chance to run, which
2236 have a lower number than SIGSTOP (e.g. SIGUSR1).
2237 This is an unlikely case, and harder to handle for
2238 fork / vfork than for clone, so we do not try - but
2239 we handle it for clone events here. We'll send
2240 the other signal on to the thread below. */
2241
2242 new_lp->signalled = 1;
2243 }
2244 else
2245 {
2246 struct thread_info *tp;
2247
2248 /* When we stop for an event in some other thread, and
2249 pull the thread list just as this thread has cloned,
2250 we'll have seen the new thread in the thread_db list
2251 before handling the CLONE event (glibc's
2252 pthread_create adds the new thread to the thread list
2253 before clone'ing, and has the kernel fill in the
2254 thread's tid on the clone call with
2255 CLONE_PARENT_SETTID). If that happened, and the core
2256 had requested the new thread to stop, we'll have
2257 killed it with SIGSTOP. But since SIGSTOP is not an
2258 RT signal, it can only be queued once. We need to be
2259 careful to not resume the LWP if we wanted it to
2260 stop. In that case, we'll leave the SIGSTOP pending.
2261 It will later be reported as TARGET_SIGNAL_0. */
2262 tp = find_thread_ptid (new_lp->ptid);
2263 if (tp != NULL && tp->stop_requested)
2264 new_lp->last_resume_kind = resume_stop;
2265 else
2266 status = 0;
2267 }
2268
2269 if (non_stop)
2270 {
2271 /* Add the new thread to GDB's lists as soon as possible
2272 so that:
2273
2274 1) the frontend doesn't have to wait for a stop to
2275 display them, and,
2276
2277 2) we tag it with the correct running state. */
2278
2279 /* If the thread_db layer is active, let it know about
2280 this new thread, and add it to GDB's list. */
2281 if (!thread_db_attach_lwp (new_lp->ptid))
2282 {
2283 /* We're not using thread_db. Add it to GDB's
2284 list. */
2285 target_post_attach (GET_LWP (new_lp->ptid));
2286 add_thread (new_lp->ptid);
2287 }
2288
2289 if (!stopping)
2290 {
2291 set_running (new_lp->ptid, 1);
2292 set_executing (new_lp->ptid, 1);
2293 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2294 resume_stop. */
2295 new_lp->last_resume_kind = resume_continue;
2296 }
2297 }
2298
2299 if (status != 0)
2300 {
2301 /* We created NEW_LP so it cannot yet contain STATUS. */
2302 gdb_assert (new_lp->status == 0);
2303
2304 /* Save the wait status to report later. */
2305 if (debug_linux_nat)
2306 fprintf_unfiltered (gdb_stdlog,
2307 "LHEW: waitpid of new LWP %ld, "
2308 "saving status %s\n",
2309 (long) GET_LWP (new_lp->ptid),
2310 status_to_str (status));
2311 new_lp->status = status;
2312 }
2313
2314 /* Note the need to use the low target ops to resume, to
2315 handle resuming with PT_SYSCALL if we have syscall
2316 catchpoints. */
2317 if (!stopping)
2318 {
2319 new_lp->resumed = 1;
2320
2321 if (status == 0)
2322 {
2323 gdb_assert (new_lp->last_resume_kind == resume_continue);
2324 if (debug_linux_nat)
2325 fprintf_unfiltered (gdb_stdlog,
2326 "LHEW: resuming new LWP %ld\n",
2327 GET_LWP (new_lp->ptid));
2328 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2329 0, TARGET_SIGNAL_0);
2330 new_lp->stopped = 0;
2331 }
2332 }
2333
2334 if (debug_linux_nat)
2335 fprintf_unfiltered (gdb_stdlog,
2336 "LHEW: resuming parent LWP %d\n", pid);
2337 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2338 0, TARGET_SIGNAL_0);
2339
2340 return 1;
2341 }
2342
2343 return 0;
2344 }
2345
2346 if (event == PTRACE_EVENT_EXEC)
2347 {
2348 if (debug_linux_nat)
2349 fprintf_unfiltered (gdb_stdlog,
2350 "LHEW: Got exec event from LWP %ld\n",
2351 GET_LWP (lp->ptid));
2352
2353 ourstatus->kind = TARGET_WAITKIND_EXECD;
2354 ourstatus->value.execd_pathname
2355 = xstrdup (linux_child_pid_to_exec_file (pid));
2356
2357 return 0;
2358 }
2359
2360 if (event == PTRACE_EVENT_VFORK_DONE)
2361 {
2362 if (current_inferior ()->waiting_for_vfork_done)
2363 {
2364 if (debug_linux_nat)
2365 fprintf_unfiltered (gdb_stdlog,
2366 "LHEW: Got expected PTRACE_EVENT_"
2367 "VFORK_DONE from LWP %ld: stopping\n",
2368 GET_LWP (lp->ptid));
2369
2370 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2371 return 0;
2372 }
2373
2374 if (debug_linux_nat)
2375 fprintf_unfiltered (gdb_stdlog,
2376 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2377 "from LWP %ld: resuming\n",
2378 GET_LWP (lp->ptid));
2379 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2380 return 1;
2381 }
2382
2383 internal_error (__FILE__, __LINE__,
2384 _("unknown ptrace event %d"), event);
2385 }
2386
2387 /* Return non-zero if LWP is a zombie. */
2388
2389 static int
2390 linux_lwp_is_zombie (long lwp)
2391 {
2392 char buffer[MAXPATHLEN];
2393 FILE *procfile;
2394 int retval;
2395 int have_state;
2396
2397 xsnprintf (buffer, sizeof (buffer), "/proc/%ld/status", lwp);
2398 procfile = fopen (buffer, "r");
2399 if (procfile == NULL)
2400 {
2401 warning (_("unable to open /proc file '%s'"), buffer);
2402 return 0;
2403 }
2404
2405 have_state = 0;
2406 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2407 if (strncmp (buffer, "State:", 6) == 0)
2408 {
2409 have_state = 1;
2410 break;
2411 }
2412 retval = (have_state
2413 && strcmp (buffer, "State:\tZ (zombie)\n") == 0);
2414 fclose (procfile);
2415 return retval;
2416 }
2417
2418 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2419 exited. */
2420
2421 static int
2422 wait_lwp (struct lwp_info *lp)
2423 {
2424 pid_t pid;
2425 int status = 0;
2426 int thread_dead = 0;
2427 sigset_t prev_mask;
2428
2429 gdb_assert (!lp->stopped);
2430 gdb_assert (lp->status == 0);
2431
2432 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2433 block_child_signals (&prev_mask);
2434
2435 for (;;)
2436 {
2437 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2438 was right and we should just call sigsuspend. */
2439
2440 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
2441 if (pid == -1 && errno == ECHILD)
2442 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
2443 if (pid == -1 && errno == ECHILD)
2444 {
2445 /* The thread has previously exited. We need to delete it
2446 now because, for some vendor 2.4 kernels with NPTL
2447 support backported, there won't be an exit event unless
2448 it is the main thread. 2.6 kernels will report an exit
2449 event for each thread that exits, as expected. */
2450 thread_dead = 1;
2451 if (debug_linux_nat)
2452 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2453 target_pid_to_str (lp->ptid));
2454 }
2455 if (pid != 0)
2456 break;
2457
2458 /* Bugs 10970, 12702.
2459 Thread group leader may have exited in which case we'll lock up in
2460 waitpid if there are other threads, even if they are all zombies too.
2461 Basically, we're not supposed to use waitpid this way.
2462 __WCLONE is not applicable for the leader so we can't use that.
2463 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2464 process; it gets ESRCH both for the zombie and for running processes.
2465
2466 As a workaround, check if we're waiting for the thread group leader and
2467 if it's a zombie, and avoid calling waitpid if it is.
2468
2469 This is racy, what if the tgl becomes a zombie right after we check?
2470 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2471 waiting waitpid but the linux_lwp_is_zombie is safe this way. */
2472
2473 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2474 && linux_lwp_is_zombie (GET_LWP (lp->ptid)))
2475 {
2476 thread_dead = 1;
2477 if (debug_linux_nat)
2478 fprintf_unfiltered (gdb_stdlog,
2479 "WL: Thread group leader %s vanished.\n",
2480 target_pid_to_str (lp->ptid));
2481 break;
2482 }
2483
2484 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2485 get invoked despite our caller had them intentionally blocked by
2486 block_child_signals. This is sensitive only to the loop of
2487 linux_nat_wait_1 and there if we get called my_waitpid gets called
2488 again before it gets to sigsuspend so we can safely let the handlers
2489 get executed here. */
2490
2491 sigsuspend (&suspend_mask);
2492 }
2493
2494 restore_child_signals_mask (&prev_mask);
2495
2496 if (!thread_dead)
2497 {
2498 gdb_assert (pid == GET_LWP (lp->ptid));
2499
2500 if (debug_linux_nat)
2501 {
2502 fprintf_unfiltered (gdb_stdlog,
2503 "WL: waitpid %s received %s\n",
2504 target_pid_to_str (lp->ptid),
2505 status_to_str (status));
2506 }
2507
2508 /* Check if the thread has exited. */
2509 if (WIFEXITED (status) || WIFSIGNALED (status))
2510 {
2511 thread_dead = 1;
2512 if (debug_linux_nat)
2513 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2514 target_pid_to_str (lp->ptid));
2515 }
2516 }
2517
2518 if (thread_dead)
2519 {
2520 exit_lwp (lp);
2521 return 0;
2522 }
2523
2524 gdb_assert (WIFSTOPPED (status));
2525
2526 /* Handle GNU/Linux's syscall SIGTRAPs. */
2527 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2528 {
2529 /* No longer need the sysgood bit. The ptrace event ends up
2530 recorded in lp->waitstatus if we care for it. We can carry
2531 on handling the event like a regular SIGTRAP from here
2532 on. */
2533 status = W_STOPCODE (SIGTRAP);
2534 if (linux_handle_syscall_trap (lp, 1))
2535 return wait_lwp (lp);
2536 }
2537
2538 /* Handle GNU/Linux's extended waitstatus for trace events. */
2539 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2540 {
2541 if (debug_linux_nat)
2542 fprintf_unfiltered (gdb_stdlog,
2543 "WL: Handling extended status 0x%06x\n",
2544 status);
2545 if (linux_handle_extended_wait (lp, status, 1))
2546 return wait_lwp (lp);
2547 }
2548
2549 return status;
2550 }
2551
2552 /* Save the most recent siginfo for LP. This is currently only called
2553 for SIGTRAP; some ports use the si_addr field for
2554 target_stopped_data_address. In the future, it may also be used to
2555 restore the siginfo of requeued signals. */
2556
2557 static void
2558 save_siginfo (struct lwp_info *lp)
2559 {
2560 errno = 0;
2561 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2562 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2563
2564 if (errno != 0)
2565 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2566 }
2567
2568 /* Send a SIGSTOP to LP. */
2569
2570 static int
2571 stop_callback (struct lwp_info *lp, void *data)
2572 {
2573 if (!lp->stopped && !lp->signalled)
2574 {
2575 int ret;
2576
2577 if (debug_linux_nat)
2578 {
2579 fprintf_unfiltered (gdb_stdlog,
2580 "SC: kill %s **<SIGSTOP>**\n",
2581 target_pid_to_str (lp->ptid));
2582 }
2583 errno = 0;
2584 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2585 if (debug_linux_nat)
2586 {
2587 fprintf_unfiltered (gdb_stdlog,
2588 "SC: lwp kill %d %s\n",
2589 ret,
2590 errno ? safe_strerror (errno) : "ERRNO-OK");
2591 }
2592
2593 lp->signalled = 1;
2594 gdb_assert (lp->status == 0);
2595 }
2596
2597 return 0;
2598 }
2599
2600 /* Return non-zero if LWP PID has a pending SIGINT. */
2601
2602 static int
2603 linux_nat_has_pending_sigint (int pid)
2604 {
2605 sigset_t pending, blocked, ignored;
2606
2607 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2608
2609 if (sigismember (&pending, SIGINT)
2610 && !sigismember (&ignored, SIGINT))
2611 return 1;
2612
2613 return 0;
2614 }
2615
2616 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2617
2618 static int
2619 set_ignore_sigint (struct lwp_info *lp, void *data)
2620 {
2621 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2622 flag to consume the next one. */
2623 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2624 && WSTOPSIG (lp->status) == SIGINT)
2625 lp->status = 0;
2626 else
2627 lp->ignore_sigint = 1;
2628
2629 return 0;
2630 }
2631
2632 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2633 This function is called after we know the LWP has stopped; if the LWP
2634 stopped before the expected SIGINT was delivered, then it will never have
2635 arrived. Also, if the signal was delivered to a shared queue and consumed
2636 by a different thread, it will never be delivered to this LWP. */
2637
2638 static void
2639 maybe_clear_ignore_sigint (struct lwp_info *lp)
2640 {
2641 if (!lp->ignore_sigint)
2642 return;
2643
2644 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2645 {
2646 if (debug_linux_nat)
2647 fprintf_unfiltered (gdb_stdlog,
2648 "MCIS: Clearing bogus flag for %s\n",
2649 target_pid_to_str (lp->ptid));
2650 lp->ignore_sigint = 0;
2651 }
2652 }
2653
2654 /* Fetch the possible triggered data watchpoint info and store it in
2655 LP.
2656
2657 On some archs, like x86, that use debug registers to set
2658 watchpoints, it's possible that the way to know which watched
2659 address trapped, is to check the register that is used to select
2660 which address to watch. Problem is, between setting the watchpoint
2661 and reading back which data address trapped, the user may change
2662 the set of watchpoints, and, as a consequence, GDB changes the
2663 debug registers in the inferior. To avoid reading back a stale
2664 stopped-data-address when that happens, we cache in LP the fact
2665 that a watchpoint trapped, and the corresponding data address, as
2666 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2667 registers meanwhile, we have the cached data we can rely on. */
2668
2669 static void
2670 save_sigtrap (struct lwp_info *lp)
2671 {
2672 struct cleanup *old_chain;
2673
2674 if (linux_ops->to_stopped_by_watchpoint == NULL)
2675 {
2676 lp->stopped_by_watchpoint = 0;
2677 return;
2678 }
2679
2680 old_chain = save_inferior_ptid ();
2681 inferior_ptid = lp->ptid;
2682
2683 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2684
2685 if (lp->stopped_by_watchpoint)
2686 {
2687 if (linux_ops->to_stopped_data_address != NULL)
2688 lp->stopped_data_address_p =
2689 linux_ops->to_stopped_data_address (&current_target,
2690 &lp->stopped_data_address);
2691 else
2692 lp->stopped_data_address_p = 0;
2693 }
2694
2695 do_cleanups (old_chain);
2696 }
2697
2698 /* See save_sigtrap. */
2699
2700 static int
2701 linux_nat_stopped_by_watchpoint (void)
2702 {
2703 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2704
2705 gdb_assert (lp != NULL);
2706
2707 return lp->stopped_by_watchpoint;
2708 }
2709
2710 static int
2711 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2712 {
2713 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2714
2715 gdb_assert (lp != NULL);
2716
2717 *addr_p = lp->stopped_data_address;
2718
2719 return lp->stopped_data_address_p;
2720 }
2721
2722 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2723
2724 static int
2725 sigtrap_is_event (int status)
2726 {
2727 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2728 }
2729
2730 /* SIGTRAP-like events recognizer. */
2731
2732 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2733
2734 /* Check for SIGTRAP-like events in LP. */
2735
2736 static int
2737 linux_nat_lp_status_is_event (struct lwp_info *lp)
2738 {
2739 /* We check for lp->waitstatus in addition to lp->status, because we can
2740 have pending process exits recorded in lp->status
2741 and W_EXITCODE(0,0) == 0. We should probably have an additional
2742 lp->status_p flag. */
2743
2744 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2745 && linux_nat_status_is_event (lp->status));
2746 }
2747
2748 /* Set alternative SIGTRAP-like events recognizer. If
2749 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2750 applied. */
2751
2752 void
2753 linux_nat_set_status_is_event (struct target_ops *t,
2754 int (*status_is_event) (int status))
2755 {
2756 linux_nat_status_is_event = status_is_event;
2757 }
2758
2759 /* Wait until LP is stopped. */
2760
2761 static int
2762 stop_wait_callback (struct lwp_info *lp, void *data)
2763 {
2764 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2765
2766 /* If this is a vfork parent, bail out, it is not going to report
2767 any SIGSTOP until the vfork is done with. */
2768 if (inf->vfork_child != NULL)
2769 return 0;
2770
2771 if (!lp->stopped)
2772 {
2773 int status;
2774
2775 status = wait_lwp (lp);
2776 if (status == 0)
2777 return 0;
2778
2779 if (lp->ignore_sigint && WIFSTOPPED (status)
2780 && WSTOPSIG (status) == SIGINT)
2781 {
2782 lp->ignore_sigint = 0;
2783
2784 errno = 0;
2785 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2786 if (debug_linux_nat)
2787 fprintf_unfiltered (gdb_stdlog,
2788 "PTRACE_CONT %s, 0, 0 (%s) "
2789 "(discarding SIGINT)\n",
2790 target_pid_to_str (lp->ptid),
2791 errno ? safe_strerror (errno) : "OK");
2792
2793 return stop_wait_callback (lp, NULL);
2794 }
2795
2796 maybe_clear_ignore_sigint (lp);
2797
2798 if (WSTOPSIG (status) != SIGSTOP)
2799 {
2800 if (linux_nat_status_is_event (status))
2801 {
2802 /* If a LWP other than the LWP that we're reporting an
2803 event for has hit a GDB breakpoint (as opposed to
2804 some random trap signal), then just arrange for it to
2805 hit it again later. We don't keep the SIGTRAP status
2806 and don't forward the SIGTRAP signal to the LWP. We
2807 will handle the current event, eventually we will
2808 resume all LWPs, and this one will get its breakpoint
2809 trap again.
2810
2811 If we do not do this, then we run the risk that the
2812 user will delete or disable the breakpoint, but the
2813 thread will have already tripped on it. */
2814
2815 /* Save the trap's siginfo in case we need it later. */
2816 save_siginfo (lp);
2817
2818 save_sigtrap (lp);
2819
2820 /* Now resume this LWP and get the SIGSTOP event. */
2821 errno = 0;
2822 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2823 if (debug_linux_nat)
2824 {
2825 fprintf_unfiltered (gdb_stdlog,
2826 "PTRACE_CONT %s, 0, 0 (%s)\n",
2827 target_pid_to_str (lp->ptid),
2828 errno ? safe_strerror (errno) : "OK");
2829
2830 fprintf_unfiltered (gdb_stdlog,
2831 "SWC: Candidate SIGTRAP event in %s\n",
2832 target_pid_to_str (lp->ptid));
2833 }
2834 /* Hold this event/waitstatus while we check to see if
2835 there are any more (we still want to get that SIGSTOP). */
2836 stop_wait_callback (lp, NULL);
2837
2838 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2839 there's another event, throw it back into the
2840 queue. */
2841 if (lp->status)
2842 {
2843 if (debug_linux_nat)
2844 fprintf_unfiltered (gdb_stdlog,
2845 "SWC: kill %s, %s\n",
2846 target_pid_to_str (lp->ptid),
2847 status_to_str ((int) status));
2848 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
2849 }
2850
2851 /* Save the sigtrap event. */
2852 lp->status = status;
2853 return 0;
2854 }
2855 else
2856 {
2857 /* The thread was stopped with a signal other than
2858 SIGSTOP, and didn't accidentally trip a breakpoint. */
2859
2860 if (debug_linux_nat)
2861 {
2862 fprintf_unfiltered (gdb_stdlog,
2863 "SWC: Pending event %s in %s\n",
2864 status_to_str ((int) status),
2865 target_pid_to_str (lp->ptid));
2866 }
2867 /* Now resume this LWP and get the SIGSTOP event. */
2868 errno = 0;
2869 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2870 if (debug_linux_nat)
2871 fprintf_unfiltered (gdb_stdlog,
2872 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2873 target_pid_to_str (lp->ptid),
2874 errno ? safe_strerror (errno) : "OK");
2875
2876 /* Hold this event/waitstatus while we check to see if
2877 there are any more (we still want to get that SIGSTOP). */
2878 stop_wait_callback (lp, NULL);
2879
2880 /* If the lp->status field is still empty, use it to
2881 hold this event. If not, then this event must be
2882 returned to the event queue of the LWP. */
2883 if (lp->status)
2884 {
2885 if (debug_linux_nat)
2886 {
2887 fprintf_unfiltered (gdb_stdlog,
2888 "SWC: kill %s, %s\n",
2889 target_pid_to_str (lp->ptid),
2890 status_to_str ((int) status));
2891 }
2892 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2893 }
2894 else
2895 lp->status = status;
2896 return 0;
2897 }
2898 }
2899 else
2900 {
2901 /* We caught the SIGSTOP that we intended to catch, so
2902 there's no SIGSTOP pending. */
2903 lp->stopped = 1;
2904 lp->signalled = 0;
2905 }
2906 }
2907
2908 return 0;
2909 }
2910
2911 /* Return non-zero if LP has a wait status pending. */
2912
2913 static int
2914 status_callback (struct lwp_info *lp, void *data)
2915 {
2916 /* Only report a pending wait status if we pretend that this has
2917 indeed been resumed. */
2918 if (!lp->resumed)
2919 return 0;
2920
2921 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2922 {
2923 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2924 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2925 0', so a clean process exit can not be stored pending in
2926 lp->status, it is indistinguishable from
2927 no-pending-status. */
2928 return 1;
2929 }
2930
2931 if (lp->status != 0)
2932 return 1;
2933
2934 return 0;
2935 }
2936
2937 /* Return non-zero if LP isn't stopped. */
2938
2939 static int
2940 running_callback (struct lwp_info *lp, void *data)
2941 {
2942 return (!lp->stopped
2943 || ((lp->status != 0
2944 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2945 && lp->resumed));
2946 }
2947
2948 /* Count the LWP's that have had events. */
2949
2950 static int
2951 count_events_callback (struct lwp_info *lp, void *data)
2952 {
2953 int *count = data;
2954
2955 gdb_assert (count != NULL);
2956
2957 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2958 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2959 (*count)++;
2960
2961 return 0;
2962 }
2963
2964 /* Select the LWP (if any) that is currently being single-stepped. */
2965
2966 static int
2967 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2968 {
2969 if (lp->last_resume_kind == resume_step
2970 && lp->status != 0)
2971 return 1;
2972 else
2973 return 0;
2974 }
2975
2976 /* Select the Nth LWP that has had a SIGTRAP event. */
2977
2978 static int
2979 select_event_lwp_callback (struct lwp_info *lp, void *data)
2980 {
2981 int *selector = data;
2982
2983 gdb_assert (selector != NULL);
2984
2985 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2986 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2987 if ((*selector)-- == 0)
2988 return 1;
2989
2990 return 0;
2991 }
2992
2993 static int
2994 cancel_breakpoint (struct lwp_info *lp)
2995 {
2996 /* Arrange for a breakpoint to be hit again later. We don't keep
2997 the SIGTRAP status and don't forward the SIGTRAP signal to the
2998 LWP. We will handle the current event, eventually we will resume
2999 this LWP, and this breakpoint will trap again.
3000
3001 If we do not do this, then we run the risk that the user will
3002 delete or disable the breakpoint, but the LWP will have already
3003 tripped on it. */
3004
3005 struct regcache *regcache = get_thread_regcache (lp->ptid);
3006 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3007 CORE_ADDR pc;
3008
3009 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
3010 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3011 {
3012 if (debug_linux_nat)
3013 fprintf_unfiltered (gdb_stdlog,
3014 "CB: Push back breakpoint for %s\n",
3015 target_pid_to_str (lp->ptid));
3016
3017 /* Back up the PC if necessary. */
3018 if (gdbarch_decr_pc_after_break (gdbarch))
3019 regcache_write_pc (regcache, pc);
3020
3021 return 1;
3022 }
3023 return 0;
3024 }
3025
3026 static int
3027 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3028 {
3029 struct lwp_info *event_lp = data;
3030
3031 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3032 if (lp == event_lp)
3033 return 0;
3034
3035 /* If a LWP other than the LWP that we're reporting an event for has
3036 hit a GDB breakpoint (as opposed to some random trap signal),
3037 then just arrange for it to hit it again later. We don't keep
3038 the SIGTRAP status and don't forward the SIGTRAP signal to the
3039 LWP. We will handle the current event, eventually we will resume
3040 all LWPs, and this one will get its breakpoint trap again.
3041
3042 If we do not do this, then we run the risk that the user will
3043 delete or disable the breakpoint, but the LWP will have already
3044 tripped on it. */
3045
3046 if (linux_nat_lp_status_is_event (lp)
3047 && cancel_breakpoint (lp))
3048 /* Throw away the SIGTRAP. */
3049 lp->status = 0;
3050
3051 return 0;
3052 }
3053
3054 /* Select one LWP out of those that have events pending. */
3055
3056 static void
3057 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
3058 {
3059 int num_events = 0;
3060 int random_selector;
3061 struct lwp_info *event_lp;
3062
3063 /* Record the wait status for the original LWP. */
3064 (*orig_lp)->status = *status;
3065
3066 /* Give preference to any LWP that is being single-stepped. */
3067 event_lp = iterate_over_lwps (filter,
3068 select_singlestep_lwp_callback, NULL);
3069 if (event_lp != NULL)
3070 {
3071 if (debug_linux_nat)
3072 fprintf_unfiltered (gdb_stdlog,
3073 "SEL: Select single-step %s\n",
3074 target_pid_to_str (event_lp->ptid));
3075 }
3076 else
3077 {
3078 /* No single-stepping LWP. Select one at random, out of those
3079 which have had SIGTRAP events. */
3080
3081 /* First see how many SIGTRAP events we have. */
3082 iterate_over_lwps (filter, count_events_callback, &num_events);
3083
3084 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3085 random_selector = (int)
3086 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3087
3088 if (debug_linux_nat && num_events > 1)
3089 fprintf_unfiltered (gdb_stdlog,
3090 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3091 num_events, random_selector);
3092
3093 event_lp = iterate_over_lwps (filter,
3094 select_event_lwp_callback,
3095 &random_selector);
3096 }
3097
3098 if (event_lp != NULL)
3099 {
3100 /* Switch the event LWP. */
3101 *orig_lp = event_lp;
3102 *status = event_lp->status;
3103 }
3104
3105 /* Flush the wait status for the event LWP. */
3106 (*orig_lp)->status = 0;
3107 }
3108
3109 /* Return non-zero if LP has been resumed. */
3110
3111 static int
3112 resumed_callback (struct lwp_info *lp, void *data)
3113 {
3114 return lp->resumed;
3115 }
3116
3117 /* Stop an active thread, verify it still exists, then resume it. If
3118 the thread ends up with a pending status, then it is not resumed,
3119 and *DATA (really a pointer to int), is set. */
3120
3121 static int
3122 stop_and_resume_callback (struct lwp_info *lp, void *data)
3123 {
3124 int *new_pending_p = data;
3125
3126 if (!lp->stopped)
3127 {
3128 ptid_t ptid = lp->ptid;
3129
3130 stop_callback (lp, NULL);
3131 stop_wait_callback (lp, NULL);
3132
3133 /* Resume if the lwp still exists, and the core wanted it
3134 running. */
3135 lp = find_lwp_pid (ptid);
3136 if (lp != NULL)
3137 {
3138 if (lp->last_resume_kind == resume_stop
3139 && lp->status == 0)
3140 {
3141 /* The core wanted the LWP to stop. Even if it stopped
3142 cleanly (with SIGSTOP), leave the event pending. */
3143 if (debug_linux_nat)
3144 fprintf_unfiltered (gdb_stdlog,
3145 "SARC: core wanted LWP %ld stopped "
3146 "(leaving SIGSTOP pending)\n",
3147 GET_LWP (lp->ptid));
3148 lp->status = W_STOPCODE (SIGSTOP);
3149 }
3150
3151 if (lp->status == 0)
3152 {
3153 if (debug_linux_nat)
3154 fprintf_unfiltered (gdb_stdlog,
3155 "SARC: re-resuming LWP %ld\n",
3156 GET_LWP (lp->ptid));
3157 resume_lwp (lp, lp->step);
3158 }
3159 else
3160 {
3161 if (debug_linux_nat)
3162 fprintf_unfiltered (gdb_stdlog,
3163 "SARC: not re-resuming LWP %ld "
3164 "(has pending)\n",
3165 GET_LWP (lp->ptid));
3166 if (new_pending_p)
3167 *new_pending_p = 1;
3168 }
3169 }
3170 }
3171 return 0;
3172 }
3173
3174 /* Check if we should go on and pass this event to common code.
3175 Return the affected lwp if we are, or NULL otherwise. If we stop
3176 all lwps temporarily, we may end up with new pending events in some
3177 other lwp. In that case set *NEW_PENDING_P to true. */
3178
3179 static struct lwp_info *
3180 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
3181 {
3182 struct lwp_info *lp;
3183
3184 *new_pending_p = 0;
3185
3186 lp = find_lwp_pid (pid_to_ptid (lwpid));
3187
3188 /* Check for stop events reported by a process we didn't already
3189 know about - anything not already in our LWP list.
3190
3191 If we're expecting to receive stopped processes after
3192 fork, vfork, and clone events, then we'll just add the
3193 new one to our list and go back to waiting for the event
3194 to be reported - the stopped process might be returned
3195 from waitpid before or after the event is.
3196
3197 But note the case of a non-leader thread exec'ing after the
3198 leader having exited, and gone from our lists. The non-leader
3199 thread changes its tid to the tgid. */
3200
3201 if (WIFSTOPPED (status) && lp == NULL
3202 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3203 {
3204 /* A multi-thread exec after we had seen the leader exiting. */
3205 if (debug_linux_nat)
3206 fprintf_unfiltered (gdb_stdlog,
3207 "LLW: Re-adding thread group leader LWP %d.\n",
3208 lwpid);
3209
3210 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3211 lp->stopped = 1;
3212 lp->resumed = 1;
3213 add_thread (lp->ptid);
3214 }
3215
3216 if (WIFSTOPPED (status) && !lp)
3217 {
3218 add_to_pid_list (&stopped_pids, lwpid, status);
3219 return NULL;
3220 }
3221
3222 /* Make sure we don't report an event for the exit of an LWP not in
3223 our list, i.e. not part of the current process. This can happen
3224 if we detach from a program we originally forked and then it
3225 exits. */
3226 if (!WIFSTOPPED (status) && !lp)
3227 return NULL;
3228
3229 /* Handle GNU/Linux's syscall SIGTRAPs. */
3230 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3231 {
3232 /* No longer need the sysgood bit. The ptrace event ends up
3233 recorded in lp->waitstatus if we care for it. We can carry
3234 on handling the event like a regular SIGTRAP from here
3235 on. */
3236 status = W_STOPCODE (SIGTRAP);
3237 if (linux_handle_syscall_trap (lp, 0))
3238 return NULL;
3239 }
3240
3241 /* Handle GNU/Linux's extended waitstatus for trace events. */
3242 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3243 {
3244 if (debug_linux_nat)
3245 fprintf_unfiltered (gdb_stdlog,
3246 "LLW: Handling extended status 0x%06x\n",
3247 status);
3248 if (linux_handle_extended_wait (lp, status, 0))
3249 return NULL;
3250 }
3251
3252 if (linux_nat_status_is_event (status))
3253 {
3254 /* Save the trap's siginfo in case we need it later. */
3255 save_siginfo (lp);
3256
3257 save_sigtrap (lp);
3258 }
3259
3260 /* Check if the thread has exited. */
3261 if ((WIFEXITED (status) || WIFSIGNALED (status))
3262 && num_lwps (GET_PID (lp->ptid)) > 1)
3263 {
3264 /* If this is the main thread, we must stop all threads and verify
3265 if they are still alive. This is because in the nptl thread model
3266 on Linux 2.4, there is no signal issued for exiting LWPs
3267 other than the main thread. We only get the main thread exit
3268 signal once all child threads have already exited. If we
3269 stop all the threads and use the stop_wait_callback to check
3270 if they have exited we can determine whether this signal
3271 should be ignored or whether it means the end of the debugged
3272 application, regardless of which threading model is being
3273 used. */
3274 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3275 {
3276 lp->stopped = 1;
3277 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3278 stop_and_resume_callback, new_pending_p);
3279 }
3280
3281 if (debug_linux_nat)
3282 fprintf_unfiltered (gdb_stdlog,
3283 "LLW: %s exited.\n",
3284 target_pid_to_str (lp->ptid));
3285
3286 if (num_lwps (GET_PID (lp->ptid)) > 1)
3287 {
3288 /* If there is at least one more LWP, then the exit signal
3289 was not the end of the debugged application and should be
3290 ignored. */
3291 exit_lwp (lp);
3292 return NULL;
3293 }
3294 }
3295
3296 /* Check if the current LWP has previously exited. In the nptl
3297 thread model, LWPs other than the main thread do not issue
3298 signals when they exit so we must check whenever the thread has
3299 stopped. A similar check is made in stop_wait_callback(). */
3300 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3301 {
3302 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3303
3304 if (debug_linux_nat)
3305 fprintf_unfiltered (gdb_stdlog,
3306 "LLW: %s exited.\n",
3307 target_pid_to_str (lp->ptid));
3308
3309 exit_lwp (lp);
3310
3311 /* Make sure there is at least one thread running. */
3312 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3313
3314 /* Discard the event. */
3315 return NULL;
3316 }
3317
3318 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3319 an attempt to stop an LWP. */
3320 if (lp->signalled
3321 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3322 {
3323 if (debug_linux_nat)
3324 fprintf_unfiltered (gdb_stdlog,
3325 "LLW: Delayed SIGSTOP caught for %s.\n",
3326 target_pid_to_str (lp->ptid));
3327
3328 lp->signalled = 0;
3329
3330 if (lp->last_resume_kind != resume_stop)
3331 {
3332 /* This is a delayed SIGSTOP. */
3333
3334 registers_changed ();
3335
3336 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3337 lp->step, TARGET_SIGNAL_0);
3338 if (debug_linux_nat)
3339 fprintf_unfiltered (gdb_stdlog,
3340 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3341 lp->step ?
3342 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3343 target_pid_to_str (lp->ptid));
3344
3345 lp->stopped = 0;
3346 gdb_assert (lp->resumed);
3347
3348 /* Discard the event. */
3349 return NULL;
3350 }
3351 }
3352
3353 /* Make sure we don't report a SIGINT that we have already displayed
3354 for another thread. */
3355 if (lp->ignore_sigint
3356 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3357 {
3358 if (debug_linux_nat)
3359 fprintf_unfiltered (gdb_stdlog,
3360 "LLW: Delayed SIGINT caught for %s.\n",
3361 target_pid_to_str (lp->ptid));
3362
3363 /* This is a delayed SIGINT. */
3364 lp->ignore_sigint = 0;
3365
3366 registers_changed ();
3367 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3368 lp->step, TARGET_SIGNAL_0);
3369 if (debug_linux_nat)
3370 fprintf_unfiltered (gdb_stdlog,
3371 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3372 lp->step ?
3373 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3374 target_pid_to_str (lp->ptid));
3375
3376 lp->stopped = 0;
3377 gdb_assert (lp->resumed);
3378
3379 /* Discard the event. */
3380 return NULL;
3381 }
3382
3383 /* An interesting event. */
3384 gdb_assert (lp);
3385 lp->status = status;
3386 return lp;
3387 }
3388
3389 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3390 their exits until all other threads in the group have exited. */
3391
3392 static void
3393 check_zombie_leaders (void)
3394 {
3395 struct inferior *inf;
3396
3397 ALL_INFERIORS (inf)
3398 {
3399 struct lwp_info *leader_lp;
3400
3401 if (inf->pid == 0)
3402 continue;
3403
3404 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3405 if (leader_lp != NULL
3406 /* Check if there are other threads in the group, as we may
3407 have raced with the inferior simply exiting. */
3408 && num_lwps (inf->pid) > 1
3409 && linux_lwp_is_zombie (inf->pid))
3410 {
3411 if (debug_linux_nat)
3412 fprintf_unfiltered (gdb_stdlog,
3413 "CZL: Thread group leader %d zombie "
3414 "(it exited, or another thread execd).\n",
3415 inf->pid);
3416
3417 /* A leader zombie can mean one of two things:
3418
3419 - It exited, and there's an exit status pending
3420 available, or only the leader exited (not the whole
3421 program). In the latter case, we can't waitpid the
3422 leader's exit status until all other threads are gone.
3423
3424 - There are 3 or more threads in the group, and a thread
3425 other than the leader exec'd. On an exec, the Linux
3426 kernel destroys all other threads (except the execing
3427 one) in the thread group, and resets the execing thread's
3428 tid to the tgid. No exit notification is sent for the
3429 execing thread -- from the ptracer's perspective, it
3430 appears as though the execing thread just vanishes.
3431 Until we reap all other threads except the leader and the
3432 execing thread, the leader will be zombie, and the
3433 execing thread will be in `D (disc sleep)'. As soon as
3434 all other threads are reaped, the execing thread changes
3435 it's tid to the tgid, and the previous (zombie) leader
3436 vanishes, giving place to the "new" leader. We could try
3437 distinguishing the exit and exec cases, by waiting once
3438 more, and seeing if something comes out, but it doesn't
3439 sound useful. The previous leader _does_ go away, and
3440 we'll re-add the new one once we see the exec event
3441 (which is just the same as what would happen if the
3442 previous leader did exit voluntarily before some other
3443 thread execs). */
3444
3445 if (debug_linux_nat)
3446 fprintf_unfiltered (gdb_stdlog,
3447 "CZL: Thread group leader %d vanished.\n",
3448 inf->pid);
3449 exit_lwp (leader_lp);
3450 }
3451 }
3452 }
3453
3454 static ptid_t
3455 linux_nat_wait_1 (struct target_ops *ops,
3456 ptid_t ptid, struct target_waitstatus *ourstatus,
3457 int target_options)
3458 {
3459 static sigset_t prev_mask;
3460 enum resume_kind last_resume_kind;
3461 struct lwp_info *lp;
3462 int status;
3463
3464 if (debug_linux_nat)
3465 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3466
3467 /* The first time we get here after starting a new inferior, we may
3468 not have added it to the LWP list yet - this is the earliest
3469 moment at which we know its PID. */
3470 if (ptid_is_pid (inferior_ptid))
3471 {
3472 /* Upgrade the main thread's ptid. */
3473 thread_change_ptid (inferior_ptid,
3474 BUILD_LWP (GET_PID (inferior_ptid),
3475 GET_PID (inferior_ptid)));
3476
3477 lp = add_lwp (inferior_ptid);
3478 lp->resumed = 1;
3479 }
3480
3481 /* Make sure SIGCHLD is blocked. */
3482 block_child_signals (&prev_mask);
3483
3484 retry:
3485 lp = NULL;
3486 status = 0;
3487
3488 /* First check if there is a LWP with a wait status pending. */
3489 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3490 {
3491 /* Any LWP in the PTID group that's been resumed will do. */
3492 lp = iterate_over_lwps (ptid, status_callback, NULL);
3493 if (lp)
3494 {
3495 if (debug_linux_nat && lp->status)
3496 fprintf_unfiltered (gdb_stdlog,
3497 "LLW: Using pending wait status %s for %s.\n",
3498 status_to_str (lp->status),
3499 target_pid_to_str (lp->ptid));
3500 }
3501 }
3502 else if (is_lwp (ptid))
3503 {
3504 if (debug_linux_nat)
3505 fprintf_unfiltered (gdb_stdlog,
3506 "LLW: Waiting for specific LWP %s.\n",
3507 target_pid_to_str (ptid));
3508
3509 /* We have a specific LWP to check. */
3510 lp = find_lwp_pid (ptid);
3511 gdb_assert (lp);
3512
3513 if (debug_linux_nat && lp->status)
3514 fprintf_unfiltered (gdb_stdlog,
3515 "LLW: Using pending wait status %s for %s.\n",
3516 status_to_str (lp->status),
3517 target_pid_to_str (lp->ptid));
3518
3519 /* We check for lp->waitstatus in addition to lp->status,
3520 because we can have pending process exits recorded in
3521 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3522 an additional lp->status_p flag. */
3523 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3524 lp = NULL;
3525 }
3526
3527 if (lp && lp->signalled && lp->last_resume_kind != resume_stop)
3528 {
3529 /* A pending SIGSTOP may interfere with the normal stream of
3530 events. In a typical case where interference is a problem,
3531 we have a SIGSTOP signal pending for LWP A while
3532 single-stepping it, encounter an event in LWP B, and take the
3533 pending SIGSTOP while trying to stop LWP A. After processing
3534 the event in LWP B, LWP A is continued, and we'll never see
3535 the SIGTRAP associated with the last time we were
3536 single-stepping LWP A. */
3537
3538 /* Resume the thread. It should halt immediately returning the
3539 pending SIGSTOP. */
3540 registers_changed ();
3541 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3542 lp->step, TARGET_SIGNAL_0);
3543 if (debug_linux_nat)
3544 fprintf_unfiltered (gdb_stdlog,
3545 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3546 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3547 target_pid_to_str (lp->ptid));
3548 lp->stopped = 0;
3549 gdb_assert (lp->resumed);
3550
3551 /* Catch the pending SIGSTOP. */
3552 status = lp->status;
3553 lp->status = 0;
3554
3555 stop_wait_callback (lp, NULL);
3556
3557 /* If the lp->status field isn't empty, we caught another signal
3558 while flushing the SIGSTOP. Return it back to the event
3559 queue of the LWP, as we already have an event to handle. */
3560 if (lp->status)
3561 {
3562 if (debug_linux_nat)
3563 fprintf_unfiltered (gdb_stdlog,
3564 "LLW: kill %s, %s\n",
3565 target_pid_to_str (lp->ptid),
3566 status_to_str (lp->status));
3567 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3568 }
3569
3570 lp->status = status;
3571 }
3572
3573 if (!target_can_async_p ())
3574 {
3575 /* Causes SIGINT to be passed on to the attached process. */
3576 set_sigint_trap ();
3577 }
3578
3579 /* But if we don't find a pending event, we'll have to wait. */
3580
3581 while (lp == NULL)
3582 {
3583 pid_t lwpid;
3584
3585 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3586 quirks:
3587
3588 - If the thread group leader exits while other threads in the
3589 thread group still exist, waitpid(TGID, ...) hangs. That
3590 waitpid won't return an exit status until the other threads
3591 in the group are reapped.
3592
3593 - When a non-leader thread execs, that thread just vanishes
3594 without reporting an exit (so we'd hang if we waited for it
3595 explicitly in that case). The exec event is reported to
3596 the TGID pid. */
3597
3598 errno = 0;
3599 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3600 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3601 lwpid = my_waitpid (-1, &status, WNOHANG);
3602
3603 if (debug_linux_nat)
3604 fprintf_unfiltered (gdb_stdlog,
3605 "LNW: waitpid(-1, ...) returned %d, %s\n",
3606 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3607
3608 if (lwpid > 0)
3609 {
3610 /* If this is true, then we paused LWPs momentarily, and may
3611 now have pending events to handle. */
3612 int new_pending;
3613
3614 if (debug_linux_nat)
3615 {
3616 fprintf_unfiltered (gdb_stdlog,
3617 "LLW: waitpid %ld received %s\n",
3618 (long) lwpid, status_to_str (status));
3619 }
3620
3621 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3622
3623 /* STATUS is now no longer valid, use LP->STATUS instead. */
3624 status = 0;
3625
3626 if (lp && !ptid_match (lp->ptid, ptid))
3627 {
3628 gdb_assert (lp->resumed);
3629
3630 if (debug_linux_nat)
3631 fprintf (stderr,
3632 "LWP %ld got an event %06x, leaving pending.\n",
3633 ptid_get_lwp (lp->ptid), lp->status);
3634
3635 if (WIFSTOPPED (lp->status))
3636 {
3637 if (WSTOPSIG (lp->status) != SIGSTOP)
3638 {
3639 /* Cancel breakpoint hits. The breakpoint may
3640 be removed before we fetch events from this
3641 process to report to the core. It is best
3642 not to assume the moribund breakpoints
3643 heuristic always handles these cases --- it
3644 could be too many events go through to the
3645 core before this one is handled. All-stop
3646 always cancels breakpoint hits in all
3647 threads. */
3648 if (non_stop
3649 && linux_nat_lp_status_is_event (lp)
3650 && cancel_breakpoint (lp))
3651 {
3652 /* Throw away the SIGTRAP. */
3653 lp->status = 0;
3654
3655 if (debug_linux_nat)
3656 fprintf (stderr,
3657 "LLW: LWP %ld hit a breakpoint while"
3658 " waiting for another process;"
3659 " cancelled it\n",
3660 ptid_get_lwp (lp->ptid));
3661 }
3662 lp->stopped = 1;
3663 }
3664 else
3665 {
3666 lp->stopped = 1;
3667 lp->signalled = 0;
3668 }
3669 }
3670 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3671 {
3672 if (debug_linux_nat)
3673 fprintf (stderr,
3674 "Process %ld exited while stopping LWPs\n",
3675 ptid_get_lwp (lp->ptid));
3676
3677 /* This was the last lwp in the process. Since
3678 events are serialized to GDB core, and we can't
3679 report this one right now, but GDB core and the
3680 other target layers will want to be notified
3681 about the exit code/signal, leave the status
3682 pending for the next time we're able to report
3683 it. */
3684
3685 /* Prevent trying to stop this thread again. We'll
3686 never try to resume it because it has a pending
3687 status. */
3688 lp->stopped = 1;
3689
3690 /* Dead LWP's aren't expected to reported a pending
3691 sigstop. */
3692 lp->signalled = 0;
3693
3694 /* Store the pending event in the waitstatus as
3695 well, because W_EXITCODE(0,0) == 0. */
3696 store_waitstatus (&lp->waitstatus, lp->status);
3697 }
3698
3699 /* Keep looking. */
3700 lp = NULL;
3701 }
3702
3703 if (new_pending)
3704 {
3705 /* Some LWP now has a pending event. Go all the way
3706 back to check it. */
3707 goto retry;
3708 }
3709
3710 if (lp)
3711 {
3712 /* We got an event to report to the core. */
3713 break;
3714 }
3715
3716 /* Retry until nothing comes out of waitpid. A single
3717 SIGCHLD can indicate more than one child stopped. */
3718 continue;
3719 }
3720
3721 /* Check for zombie thread group leaders. Those can't be reaped
3722 until all other threads in the thread group are. */
3723 check_zombie_leaders ();
3724
3725 /* If there are no resumed children left, bail. We'd be stuck
3726 forever in the sigsuspend call below otherwise. */
3727 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3728 {
3729 if (debug_linux_nat)
3730 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3731
3732 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3733
3734 if (!target_can_async_p ())
3735 clear_sigint_trap ();
3736
3737 restore_child_signals_mask (&prev_mask);
3738 return minus_one_ptid;
3739 }
3740
3741 /* No interesting event to report to the core. */
3742
3743 if (target_options & TARGET_WNOHANG)
3744 {
3745 if (debug_linux_nat)
3746 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3747
3748 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3749 restore_child_signals_mask (&prev_mask);
3750 return minus_one_ptid;
3751 }
3752
3753 /* We shouldn't end up here unless we want to try again. */
3754 gdb_assert (lp == NULL);
3755
3756 /* Block until we get an event reported with SIGCHLD. */
3757 sigsuspend (&suspend_mask);
3758 }
3759
3760 if (!target_can_async_p ())
3761 clear_sigint_trap ();
3762
3763 gdb_assert (lp);
3764
3765 status = lp->status;
3766 lp->status = 0;
3767
3768 /* Don't report signals that GDB isn't interested in, such as
3769 signals that are neither printed nor stopped upon. Stopping all
3770 threads can be a bit time-consuming so if we want decent
3771 performance with heavily multi-threaded programs, especially when
3772 they're using a high frequency timer, we'd better avoid it if we
3773 can. */
3774
3775 if (WIFSTOPPED (status))
3776 {
3777 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
3778
3779 /* When using hardware single-step, we need to report every signal.
3780 Otherwise, signals in pass_mask may be short-circuited. */
3781 if (!lp->step
3782 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3783 {
3784 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3785 here? It is not clear we should. GDB may not expect
3786 other threads to run. On the other hand, not resuming
3787 newly attached threads may cause an unwanted delay in
3788 getting them running. */
3789 registers_changed ();
3790 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3791 lp->step, signo);
3792 if (debug_linux_nat)
3793 fprintf_unfiltered (gdb_stdlog,
3794 "LLW: %s %s, %s (preempt 'handle')\n",
3795 lp->step ?
3796 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3797 target_pid_to_str (lp->ptid),
3798 (signo != TARGET_SIGNAL_0
3799 ? strsignal (target_signal_to_host (signo))
3800 : "0"));
3801 lp->stopped = 0;
3802 goto retry;
3803 }
3804
3805 if (!non_stop)
3806 {
3807 /* Only do the below in all-stop, as we currently use SIGINT
3808 to implement target_stop (see linux_nat_stop) in
3809 non-stop. */
3810 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3811 {
3812 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3813 forwarded to the entire process group, that is, all LWPs
3814 will receive it - unless they're using CLONE_THREAD to
3815 share signals. Since we only want to report it once, we
3816 mark it as ignored for all LWPs except this one. */
3817 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3818 set_ignore_sigint, NULL);
3819 lp->ignore_sigint = 0;
3820 }
3821 else
3822 maybe_clear_ignore_sigint (lp);
3823 }
3824 }
3825
3826 /* This LWP is stopped now. */
3827 lp->stopped = 1;
3828
3829 if (debug_linux_nat)
3830 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3831 status_to_str (status), target_pid_to_str (lp->ptid));
3832
3833 if (!non_stop)
3834 {
3835 /* Now stop all other LWP's ... */
3836 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3837
3838 /* ... and wait until all of them have reported back that
3839 they're no longer running. */
3840 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3841
3842 /* If we're not waiting for a specific LWP, choose an event LWP
3843 from among those that have had events. Giving equal priority
3844 to all LWPs that have had events helps prevent
3845 starvation. */
3846 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3847 select_event_lwp (ptid, &lp, &status);
3848
3849 /* Now that we've selected our final event LWP, cancel any
3850 breakpoints in other LWPs that have hit a GDB breakpoint.
3851 See the comment in cancel_breakpoints_callback to find out
3852 why. */
3853 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3854
3855 /* We'll need this to determine whether to report a SIGSTOP as
3856 TARGET_WAITKIND_0. Need to take a copy because
3857 resume_clear_callback clears it. */
3858 last_resume_kind = lp->last_resume_kind;
3859
3860 /* In all-stop, from the core's perspective, all LWPs are now
3861 stopped until a new resume action is sent over. */
3862 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3863 }
3864 else
3865 {
3866 /* See above. */
3867 last_resume_kind = lp->last_resume_kind;
3868 resume_clear_callback (lp, NULL);
3869 }
3870
3871 if (linux_nat_status_is_event (status))
3872 {
3873 if (debug_linux_nat)
3874 fprintf_unfiltered (gdb_stdlog,
3875 "LLW: trap ptid is %s.\n",
3876 target_pid_to_str (lp->ptid));
3877 }
3878
3879 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3880 {
3881 *ourstatus = lp->waitstatus;
3882 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3883 }
3884 else
3885 store_waitstatus (ourstatus, status);
3886
3887 if (debug_linux_nat)
3888 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3889
3890 restore_child_signals_mask (&prev_mask);
3891
3892 if (last_resume_kind == resume_stop
3893 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3894 && WSTOPSIG (status) == SIGSTOP)
3895 {
3896 /* A thread that has been requested to stop by GDB with
3897 target_stop, and it stopped cleanly, so report as SIG0. The
3898 use of SIGSTOP is an implementation detail. */
3899 ourstatus->value.sig = TARGET_SIGNAL_0;
3900 }
3901
3902 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3903 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3904 lp->core = -1;
3905 else
3906 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3907
3908 return lp->ptid;
3909 }
3910
3911 /* Resume LWPs that are currently stopped without any pending status
3912 to report, but are resumed from the core's perspective. */
3913
3914 static int
3915 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3916 {
3917 ptid_t *wait_ptid_p = data;
3918
3919 if (lp->stopped
3920 && lp->resumed
3921 && lp->status == 0
3922 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3923 {
3924 struct regcache *regcache = get_thread_regcache (lp->ptid);
3925 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3926 CORE_ADDR pc = regcache_read_pc (regcache);
3927
3928 gdb_assert (is_executing (lp->ptid));
3929
3930 /* Don't bother if there's a breakpoint at PC that we'd hit
3931 immediately, and we're not waiting for this LWP. */
3932 if (!ptid_match (lp->ptid, *wait_ptid_p))
3933 {
3934 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3935 return 0;
3936 }
3937
3938 if (debug_linux_nat)
3939 fprintf_unfiltered (gdb_stdlog,
3940 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3941 target_pid_to_str (lp->ptid),
3942 paddress (gdbarch, pc),
3943 lp->step);
3944
3945 registers_changed ();
3946 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3947 lp->step, TARGET_SIGNAL_0);
3948 lp->stopped = 0;
3949 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
3950 lp->stopped_by_watchpoint = 0;
3951 }
3952
3953 return 0;
3954 }
3955
3956 static ptid_t
3957 linux_nat_wait (struct target_ops *ops,
3958 ptid_t ptid, struct target_waitstatus *ourstatus,
3959 int target_options)
3960 {
3961 ptid_t event_ptid;
3962
3963 if (debug_linux_nat)
3964 fprintf_unfiltered (gdb_stdlog,
3965 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
3966
3967 /* Flush the async file first. */
3968 if (target_can_async_p ())
3969 async_file_flush ();
3970
3971 /* Resume LWPs that are currently stopped without any pending status
3972 to report, but are resumed from the core's perspective. LWPs get
3973 in this state if we find them stopping at a time we're not
3974 interested in reporting the event (target_wait on a
3975 specific_process, for example, see linux_nat_wait_1), and
3976 meanwhile the event became uninteresting. Don't bother resuming
3977 LWPs we're not going to wait for if they'd stop immediately. */
3978 if (non_stop)
3979 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3980
3981 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
3982
3983 /* If we requested any event, and something came out, assume there
3984 may be more. If we requested a specific lwp or process, also
3985 assume there may be more. */
3986 if (target_can_async_p ()
3987 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3988 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
3989 || !ptid_equal (ptid, minus_one_ptid)))
3990 async_file_mark ();
3991
3992 /* Get ready for the next event. */
3993 if (target_can_async_p ())
3994 target_async (inferior_event_handler, 0);
3995
3996 return event_ptid;
3997 }
3998
3999 static int
4000 kill_callback (struct lwp_info *lp, void *data)
4001 {
4002 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
4003
4004 errno = 0;
4005 kill (GET_LWP (lp->ptid), SIGKILL);
4006 if (debug_linux_nat)
4007 fprintf_unfiltered (gdb_stdlog,
4008 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4009 target_pid_to_str (lp->ptid),
4010 errno ? safe_strerror (errno) : "OK");
4011
4012 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4013
4014 errno = 0;
4015 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
4016 if (debug_linux_nat)
4017 fprintf_unfiltered (gdb_stdlog,
4018 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4019 target_pid_to_str (lp->ptid),
4020 errno ? safe_strerror (errno) : "OK");
4021
4022 return 0;
4023 }
4024
4025 static int
4026 kill_wait_callback (struct lwp_info *lp, void *data)
4027 {
4028 pid_t pid;
4029
4030 /* We must make sure that there are no pending events (delayed
4031 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4032 program doesn't interfere with any following debugging session. */
4033
4034 /* For cloned processes we must check both with __WCLONE and
4035 without, since the exit status of a cloned process isn't reported
4036 with __WCLONE. */
4037 if (lp->cloned)
4038 {
4039 do
4040 {
4041 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
4042 if (pid != (pid_t) -1)
4043 {
4044 if (debug_linux_nat)
4045 fprintf_unfiltered (gdb_stdlog,
4046 "KWC: wait %s received unknown.\n",
4047 target_pid_to_str (lp->ptid));
4048 /* The Linux kernel sometimes fails to kill a thread
4049 completely after PTRACE_KILL; that goes from the stop
4050 point in do_fork out to the one in
4051 get_signal_to_deliever and waits again. So kill it
4052 again. */
4053 kill_callback (lp, NULL);
4054 }
4055 }
4056 while (pid == GET_LWP (lp->ptid));
4057
4058 gdb_assert (pid == -1 && errno == ECHILD);
4059 }
4060
4061 do
4062 {
4063 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
4064 if (pid != (pid_t) -1)
4065 {
4066 if (debug_linux_nat)
4067 fprintf_unfiltered (gdb_stdlog,
4068 "KWC: wait %s received unk.\n",
4069 target_pid_to_str (lp->ptid));
4070 /* See the call to kill_callback above. */
4071 kill_callback (lp, NULL);
4072 }
4073 }
4074 while (pid == GET_LWP (lp->ptid));
4075
4076 gdb_assert (pid == -1 && errno == ECHILD);
4077 return 0;
4078 }
4079
4080 static void
4081 linux_nat_kill (struct target_ops *ops)
4082 {
4083 struct target_waitstatus last;
4084 ptid_t last_ptid;
4085 int status;
4086
4087 /* If we're stopped while forking and we haven't followed yet,
4088 kill the other task. We need to do this first because the
4089 parent will be sleeping if this is a vfork. */
4090
4091 get_last_target_status (&last_ptid, &last);
4092
4093 if (last.kind == TARGET_WAITKIND_FORKED
4094 || last.kind == TARGET_WAITKIND_VFORKED)
4095 {
4096 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
4097 wait (&status);
4098 }
4099
4100 if (forks_exist_p ())
4101 linux_fork_killall ();
4102 else
4103 {
4104 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
4105
4106 /* Stop all threads before killing them, since ptrace requires
4107 that the thread is stopped to sucessfully PTRACE_KILL. */
4108 iterate_over_lwps (ptid, stop_callback, NULL);
4109 /* ... and wait until all of them have reported back that
4110 they're no longer running. */
4111 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4112
4113 /* Kill all LWP's ... */
4114 iterate_over_lwps (ptid, kill_callback, NULL);
4115
4116 /* ... and wait until we've flushed all events. */
4117 iterate_over_lwps (ptid, kill_wait_callback, NULL);
4118 }
4119
4120 target_mourn_inferior ();
4121 }
4122
4123 static void
4124 linux_nat_mourn_inferior (struct target_ops *ops)
4125 {
4126 purge_lwp_list (ptid_get_pid (inferior_ptid));
4127
4128 if (! forks_exist_p ())
4129 /* Normal case, no other forks available. */
4130 linux_ops->to_mourn_inferior (ops);
4131 else
4132 /* Multi-fork case. The current inferior_ptid has exited, but
4133 there are other viable forks to debug. Delete the exiting
4134 one and context-switch to the first available. */
4135 linux_fork_mourn_inferior ();
4136 }
4137
4138 /* Convert a native/host siginfo object, into/from the siginfo in the
4139 layout of the inferiors' architecture. */
4140
4141 static void
4142 siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
4143 {
4144 int done = 0;
4145
4146 if (linux_nat_siginfo_fixup != NULL)
4147 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4148
4149 /* If there was no callback, or the callback didn't do anything,
4150 then just do a straight memcpy. */
4151 if (!done)
4152 {
4153 if (direction == 1)
4154 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4155 else
4156 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4157 }
4158 }
4159
4160 static LONGEST
4161 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4162 const char *annex, gdb_byte *readbuf,
4163 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4164 {
4165 int pid;
4166 struct siginfo siginfo;
4167 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4168
4169 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4170 gdb_assert (readbuf || writebuf);
4171
4172 pid = GET_LWP (inferior_ptid);
4173 if (pid == 0)
4174 pid = GET_PID (inferior_ptid);
4175
4176 if (offset > sizeof (siginfo))
4177 return -1;
4178
4179 errno = 0;
4180 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4181 if (errno != 0)
4182 return -1;
4183
4184 /* When GDB is built as a 64-bit application, ptrace writes into
4185 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4186 inferior with a 64-bit GDB should look the same as debugging it
4187 with a 32-bit GDB, we need to convert it. GDB core always sees
4188 the converted layout, so any read/write will have to be done
4189 post-conversion. */
4190 siginfo_fixup (&siginfo, inf_siginfo, 0);
4191
4192 if (offset + len > sizeof (siginfo))
4193 len = sizeof (siginfo) - offset;
4194
4195 if (readbuf != NULL)
4196 memcpy (readbuf, inf_siginfo + offset, len);
4197 else
4198 {
4199 memcpy (inf_siginfo + offset, writebuf, len);
4200
4201 /* Convert back to ptrace layout before flushing it out. */
4202 siginfo_fixup (&siginfo, inf_siginfo, 1);
4203
4204 errno = 0;
4205 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4206 if (errno != 0)
4207 return -1;
4208 }
4209
4210 return len;
4211 }
4212
4213 static LONGEST
4214 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4215 const char *annex, gdb_byte *readbuf,
4216 const gdb_byte *writebuf,
4217 ULONGEST offset, LONGEST len)
4218 {
4219 struct cleanup *old_chain;
4220 LONGEST xfer;
4221
4222 if (object == TARGET_OBJECT_SIGNAL_INFO)
4223 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4224 offset, len);
4225
4226 /* The target is connected but no live inferior is selected. Pass
4227 this request down to a lower stratum (e.g., the executable
4228 file). */
4229 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4230 return 0;
4231
4232 old_chain = save_inferior_ptid ();
4233
4234 if (is_lwp (inferior_ptid))
4235 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4236
4237 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4238 offset, len);
4239
4240 do_cleanups (old_chain);
4241 return xfer;
4242 }
4243
4244 static int
4245 linux_thread_alive (ptid_t ptid)
4246 {
4247 int err, tmp_errno;
4248
4249 gdb_assert (is_lwp (ptid));
4250
4251 /* Send signal 0 instead of anything ptrace, because ptracing a
4252 running thread errors out claiming that the thread doesn't
4253 exist. */
4254 err = kill_lwp (GET_LWP (ptid), 0);
4255 tmp_errno = errno;
4256 if (debug_linux_nat)
4257 fprintf_unfiltered (gdb_stdlog,
4258 "LLTA: KILL(SIG0) %s (%s)\n",
4259 target_pid_to_str (ptid),
4260 err ? safe_strerror (tmp_errno) : "OK");
4261
4262 if (err != 0)
4263 return 0;
4264
4265 return 1;
4266 }
4267
4268 static int
4269 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4270 {
4271 return linux_thread_alive (ptid);
4272 }
4273
4274 static char *
4275 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4276 {
4277 static char buf[64];
4278
4279 if (is_lwp (ptid)
4280 && (GET_PID (ptid) != GET_LWP (ptid)
4281 || num_lwps (GET_PID (ptid)) > 1))
4282 {
4283 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4284 return buf;
4285 }
4286
4287 return normal_pid_to_str (ptid);
4288 }
4289
4290 static char *
4291 linux_nat_thread_name (struct thread_info *thr)
4292 {
4293 int pid = ptid_get_pid (thr->ptid);
4294 long lwp = ptid_get_lwp (thr->ptid);
4295 #define FORMAT "/proc/%d/task/%ld/comm"
4296 char buf[sizeof (FORMAT) + 30];
4297 FILE *comm_file;
4298 char *result = NULL;
4299
4300 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4301 comm_file = fopen (buf, "r");
4302 if (comm_file)
4303 {
4304 /* Not exported by the kernel, so we define it here. */
4305 #define COMM_LEN 16
4306 static char line[COMM_LEN + 1];
4307
4308 if (fgets (line, sizeof (line), comm_file))
4309 {
4310 char *nl = strchr (line, '\n');
4311
4312 if (nl)
4313 *nl = '\0';
4314 if (*line != '\0')
4315 result = line;
4316 }
4317
4318 fclose (comm_file);
4319 }
4320
4321 #undef COMM_LEN
4322 #undef FORMAT
4323
4324 return result;
4325 }
4326
4327 /* Accepts an integer PID; Returns a string representing a file that
4328 can be opened to get the symbols for the child process. */
4329
4330 static char *
4331 linux_child_pid_to_exec_file (int pid)
4332 {
4333 char *name1, *name2;
4334
4335 name1 = xmalloc (MAXPATHLEN);
4336 name2 = xmalloc (MAXPATHLEN);
4337 make_cleanup (xfree, name1);
4338 make_cleanup (xfree, name2);
4339 memset (name2, 0, MAXPATHLEN);
4340
4341 sprintf (name1, "/proc/%d/exe", pid);
4342 if (readlink (name1, name2, MAXPATHLEN) > 0)
4343 return name2;
4344 else
4345 return name1;
4346 }
4347
4348 /* Service function for corefiles and info proc. */
4349
4350 static int
4351 read_mapping (FILE *mapfile,
4352 long long *addr,
4353 long long *endaddr,
4354 char *permissions,
4355 long long *offset,
4356 char *device, long long *inode, char *filename)
4357 {
4358 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4359 addr, endaddr, permissions, offset, device, inode);
4360
4361 filename[0] = '\0';
4362 if (ret > 0 && ret != EOF)
4363 {
4364 /* Eat everything up to EOL for the filename. This will prevent
4365 weird filenames (such as one with embedded whitespace) from
4366 confusing this code. It also makes this code more robust in
4367 respect to annotations the kernel may add after the filename.
4368
4369 Note the filename is used for informational purposes
4370 only. */
4371 ret += fscanf (mapfile, "%[^\n]\n", filename);
4372 }
4373
4374 return (ret != 0 && ret != EOF);
4375 }
4376
4377 /* Fills the "to_find_memory_regions" target vector. Lists the memory
4378 regions in the inferior for a corefile. */
4379
4380 static int
4381 linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
4382 {
4383 int pid = PIDGET (inferior_ptid);
4384 char mapsfilename[MAXPATHLEN];
4385 FILE *mapsfile;
4386 long long addr, endaddr, size, offset, inode;
4387 char permissions[8], device[8], filename[MAXPATHLEN];
4388 int read, write, exec;
4389 struct cleanup *cleanup;
4390
4391 /* Compose the filename for the /proc memory map, and open it. */
4392 sprintf (mapsfilename, "/proc/%d/maps", pid);
4393 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
4394 error (_("Could not open %s."), mapsfilename);
4395 cleanup = make_cleanup_fclose (mapsfile);
4396
4397 if (info_verbose)
4398 fprintf_filtered (gdb_stdout,
4399 "Reading memory regions from %s\n", mapsfilename);
4400
4401 /* Now iterate until end-of-file. */
4402 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4403 &offset, &device[0], &inode, &filename[0]))
4404 {
4405 size = endaddr - addr;
4406
4407 /* Get the segment's permissions. */
4408 read = (strchr (permissions, 'r') != 0);
4409 write = (strchr (permissions, 'w') != 0);
4410 exec = (strchr (permissions, 'x') != 0);
4411
4412 if (info_verbose)
4413 {
4414 fprintf_filtered (gdb_stdout,
4415 "Save segment, %s bytes at %s (%c%c%c)",
4416 plongest (size), paddress (target_gdbarch, addr),
4417 read ? 'r' : ' ',
4418 write ? 'w' : ' ', exec ? 'x' : ' ');
4419 if (filename[0])
4420 fprintf_filtered (gdb_stdout, " for %s", filename);
4421 fprintf_filtered (gdb_stdout, "\n");
4422 }
4423
4424 /* Invoke the callback function to create the corefile
4425 segment. */
4426 func (addr, size, read, write, exec, obfd);
4427 }
4428 do_cleanups (cleanup);
4429 return 0;
4430 }
4431
4432 static int
4433 find_signalled_thread (struct thread_info *info, void *data)
4434 {
4435 if (info->suspend.stop_signal != TARGET_SIGNAL_0
4436 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4437 return 1;
4438
4439 return 0;
4440 }
4441
4442 static enum target_signal
4443 find_stop_signal (void)
4444 {
4445 struct thread_info *info =
4446 iterate_over_threads (find_signalled_thread, NULL);
4447
4448 if (info)
4449 return info->suspend.stop_signal;
4450 else
4451 return TARGET_SIGNAL_0;
4452 }
4453
4454 /* Records the thread's register state for the corefile note
4455 section. */
4456
4457 static char *
4458 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
4459 char *note_data, int *note_size,
4460 enum target_signal stop_signal)
4461 {
4462 unsigned long lwp = ptid_get_lwp (ptid);
4463 struct gdbarch *gdbarch = target_gdbarch;
4464 struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4465 const struct regset *regset;
4466 int core_regset_p;
4467 struct cleanup *old_chain;
4468 struct core_regset_section *sect_list;
4469 char *gdb_regset;
4470
4471 old_chain = save_inferior_ptid ();
4472 inferior_ptid = ptid;
4473 target_fetch_registers (regcache, -1);
4474 do_cleanups (old_chain);
4475
4476 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4477 sect_list = gdbarch_core_regset_sections (gdbarch);
4478
4479 /* The loop below uses the new struct core_regset_section, which stores
4480 the supported section names and sizes for the core file. Note that
4481 note PRSTATUS needs to be treated specially. But the other notes are
4482 structurally the same, so they can benefit from the new struct. */
4483 if (core_regset_p && sect_list != NULL)
4484 while (sect_list->sect_name != NULL)
4485 {
4486 regset = gdbarch_regset_from_core_section (gdbarch,
4487 sect_list->sect_name,
4488 sect_list->size);
4489 gdb_assert (regset && regset->collect_regset);
4490 gdb_regset = xmalloc (sect_list->size);
4491 regset->collect_regset (regset, regcache, -1,
4492 gdb_regset, sect_list->size);
4493
4494 if (strcmp (sect_list->sect_name, ".reg") == 0)
4495 note_data = (char *) elfcore_write_prstatus
4496 (obfd, note_data, note_size,
4497 lwp, target_signal_to_host (stop_signal),
4498 gdb_regset);
4499 else
4500 note_data = (char *) elfcore_write_register_note
4501 (obfd, note_data, note_size,
4502 sect_list->sect_name, gdb_regset,
4503 sect_list->size);
4504 xfree (gdb_regset);
4505 sect_list++;
4506 }
4507
4508 /* For architectures that does not have the struct core_regset_section
4509 implemented, we use the old method. When all the architectures have
4510 the new support, the code below should be deleted. */
4511 else
4512 {
4513 gdb_gregset_t gregs;
4514 gdb_fpregset_t fpregs;
4515
4516 if (core_regset_p
4517 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4518 sizeof (gregs)))
4519 != NULL && regset->collect_regset != NULL)
4520 regset->collect_regset (regset, regcache, -1,
4521 &gregs, sizeof (gregs));
4522 else
4523 fill_gregset (regcache, &gregs, -1);
4524
4525 note_data = (char *) elfcore_write_prstatus
4526 (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
4527 &gregs);
4528
4529 if (core_regset_p
4530 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4531 sizeof (fpregs)))
4532 != NULL && regset->collect_regset != NULL)
4533 regset->collect_regset (regset, regcache, -1,
4534 &fpregs, sizeof (fpregs));
4535 else
4536 fill_fpregset (regcache, &fpregs, -1);
4537
4538 note_data = (char *) elfcore_write_prfpreg (obfd,
4539 note_data,
4540 note_size,
4541 &fpregs, sizeof (fpregs));
4542 }
4543
4544 return note_data;
4545 }
4546
4547 struct linux_nat_corefile_thread_data
4548 {
4549 bfd *obfd;
4550 char *note_data;
4551 int *note_size;
4552 int num_notes;
4553 enum target_signal stop_signal;
4554 };
4555
4556 /* Called by gdbthread.c once per thread. Records the thread's
4557 register state for the corefile note section. */
4558
4559 static int
4560 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4561 {
4562 struct linux_nat_corefile_thread_data *args = data;
4563
4564 args->note_data = linux_nat_do_thread_registers (args->obfd,
4565 ti->ptid,
4566 args->note_data,
4567 args->note_size,
4568 args->stop_signal);
4569 args->num_notes++;
4570
4571 return 0;
4572 }
4573
4574 /* Enumerate spufs IDs for process PID. */
4575
4576 static void
4577 iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4578 {
4579 char path[128];
4580 DIR *dir;
4581 struct dirent *entry;
4582
4583 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4584 dir = opendir (path);
4585 if (!dir)
4586 return;
4587
4588 rewinddir (dir);
4589 while ((entry = readdir (dir)) != NULL)
4590 {
4591 struct stat st;
4592 struct statfs stfs;
4593 int fd;
4594
4595 fd = atoi (entry->d_name);
4596 if (!fd)
4597 continue;
4598
4599 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4600 if (stat (path, &st) != 0)
4601 continue;
4602 if (!S_ISDIR (st.st_mode))
4603 continue;
4604
4605 if (statfs (path, &stfs) != 0)
4606 continue;
4607 if (stfs.f_type != SPUFS_MAGIC)
4608 continue;
4609
4610 callback (data, fd);
4611 }
4612
4613 closedir (dir);
4614 }
4615
4616 /* Generate corefile notes for SPU contexts. */
4617
4618 struct linux_spu_corefile_data
4619 {
4620 bfd *obfd;
4621 char *note_data;
4622 int *note_size;
4623 };
4624
4625 static void
4626 linux_spu_corefile_callback (void *data, int fd)
4627 {
4628 struct linux_spu_corefile_data *args = data;
4629 int i;
4630
4631 static const char *spu_files[] =
4632 {
4633 "object-id",
4634 "mem",
4635 "regs",
4636 "fpcr",
4637 "lslr",
4638 "decr",
4639 "decr_status",
4640 "signal1",
4641 "signal1_type",
4642 "signal2",
4643 "signal2_type",
4644 "event_mask",
4645 "event_status",
4646 "mbox_info",
4647 "ibox_info",
4648 "wbox_info",
4649 "dma_info",
4650 "proxydma_info",
4651 };
4652
4653 for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4654 {
4655 char annex[32], note_name[32];
4656 gdb_byte *spu_data;
4657 LONGEST spu_len;
4658
4659 xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4660 spu_len = target_read_alloc (&current_target, TARGET_OBJECT_SPU,
4661 annex, &spu_data);
4662 if (spu_len > 0)
4663 {
4664 xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4665 args->note_data = elfcore_write_note (args->obfd, args->note_data,
4666 args->note_size, note_name,
4667 NT_SPU, spu_data, spu_len);
4668 xfree (spu_data);
4669 }
4670 }
4671 }
4672
4673 static char *
4674 linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4675 {
4676 struct linux_spu_corefile_data args;
4677
4678 args.obfd = obfd;
4679 args.note_data = note_data;
4680 args.note_size = note_size;
4681
4682 iterate_over_spus (PIDGET (inferior_ptid),
4683 linux_spu_corefile_callback, &args);
4684
4685 return args.note_data;
4686 }
4687
4688 /* Fills the "to_make_corefile_note" target vector. Builds the note
4689 section for a corefile, and returns it in a malloc buffer. */
4690
4691 static char *
4692 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4693 {
4694 struct linux_nat_corefile_thread_data thread_args;
4695 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
4696 char fname[16] = { '\0' };
4697 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
4698 char psargs[80] = { '\0' };
4699 char *note_data = NULL;
4700 ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
4701 gdb_byte *auxv;
4702 int auxv_len;
4703
4704 if (get_exec_file (0))
4705 {
4706 strncpy (fname, lbasename (get_exec_file (0)), sizeof (fname));
4707 strncpy (psargs, get_exec_file (0), sizeof (psargs));
4708 if (get_inferior_args ())
4709 {
4710 char *string_end;
4711 char *psargs_end = psargs + sizeof (psargs);
4712
4713 /* linux_elfcore_write_prpsinfo () handles zero unterminated
4714 strings fine. */
4715 string_end = memchr (psargs, 0, sizeof (psargs));
4716 if (string_end != NULL)
4717 {
4718 *string_end++ = ' ';
4719 strncpy (string_end, get_inferior_args (),
4720 psargs_end - string_end);
4721 }
4722 }
4723 note_data = (char *) elfcore_write_prpsinfo (obfd,
4724 note_data,
4725 note_size, fname, psargs);
4726 }
4727
4728 /* Dump information for threads. */
4729 thread_args.obfd = obfd;
4730 thread_args.note_data = note_data;
4731 thread_args.note_size = note_size;
4732 thread_args.num_notes = 0;
4733 thread_args.stop_signal = find_stop_signal ();
4734 iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
4735 gdb_assert (thread_args.num_notes != 0);
4736 note_data = thread_args.note_data;
4737
4738 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
4739 NULL, &auxv);
4740 if (auxv_len > 0)
4741 {
4742 note_data = elfcore_write_note (obfd, note_data, note_size,
4743 "CORE", NT_AUXV, auxv, auxv_len);
4744 xfree (auxv);
4745 }
4746
4747 note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4748
4749 make_cleanup (xfree, note_data);
4750 return note_data;
4751 }
4752
4753 /* Implement the "info proc" command. */
4754
4755 enum info_proc_what
4756 {
4757 /* Display the default cmdline, cwd and exe outputs. */
4758 IP_MINIMAL,
4759
4760 /* Display `info proc mappings'. */
4761 IP_MAPPINGS,
4762
4763 /* Display `info proc status'. */
4764 IP_STATUS,
4765
4766 /* Display `info proc stat'. */
4767 IP_STAT,
4768
4769 /* Display `info proc cmdline'. */
4770 IP_CMDLINE,
4771
4772 /* Display `info proc exe'. */
4773 IP_EXE,
4774
4775 /* Display `info proc cwd'. */
4776 IP_CWD,
4777
4778 /* Display all of the above. */
4779 IP_ALL
4780 };
4781
4782 static void
4783 linux_nat_info_proc_cmd_1 (char *args, enum info_proc_what what, int from_tty)
4784 {
4785 /* A long is used for pid instead of an int to avoid a loss of precision
4786 compiler warning from the output of strtoul. */
4787 long pid = PIDGET (inferior_ptid);
4788 FILE *procfile;
4789 char buffer[MAXPATHLEN];
4790 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
4791 int cmdline_f = (what == IP_MINIMAL || what == IP_CMDLINE || what == IP_ALL);
4792 int cwd_f = (what == IP_MINIMAL || what == IP_CWD || what == IP_ALL);
4793 int exe_f = (what == IP_MINIMAL || what == IP_EXE || what == IP_ALL);
4794 int mappings_f = (what == IP_MAPPINGS || what == IP_ALL);
4795 int status_f = (what == IP_STATUS || what == IP_ALL);
4796 int stat_f = (what == IP_STAT || what == IP_ALL);
4797 struct stat dummy;
4798
4799 if (args && isdigit (args[0]))
4800 pid = strtoul (args, &args, 10);
4801
4802 args = skip_spaces (args);
4803 if (args && args[0])
4804 error (_("Too many parameters: %s"), args);
4805
4806 if (pid == 0)
4807 error (_("No current process: you must name one."));
4808
4809 sprintf (fname1, "/proc/%ld", pid);
4810 if (stat (fname1, &dummy) != 0)
4811 error (_("No /proc directory: '%s'"), fname1);
4812
4813 printf_filtered (_("process %ld\n"), pid);
4814 if (cmdline_f)
4815 {
4816 sprintf (fname1, "/proc/%ld/cmdline", pid);
4817 if ((procfile = fopen (fname1, "r")) != NULL)
4818 {
4819 struct cleanup *cleanup = make_cleanup_fclose (procfile);
4820
4821 if (fgets (buffer, sizeof (buffer), procfile))
4822 printf_filtered ("cmdline = '%s'\n", buffer);
4823 else
4824 warning (_("unable to read '%s'"), fname1);
4825 do_cleanups (cleanup);
4826 }
4827 else
4828 warning (_("unable to open /proc file '%s'"), fname1);
4829 }
4830 if (cwd_f)
4831 {
4832 sprintf (fname1, "/proc/%ld/cwd", pid);
4833 memset (fname2, 0, sizeof (fname2));
4834 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4835 printf_filtered ("cwd = '%s'\n", fname2);
4836 else
4837 warning (_("unable to read link '%s'"), fname1);
4838 }
4839 if (exe_f)
4840 {
4841 sprintf (fname1, "/proc/%ld/exe", pid);
4842 memset (fname2, 0, sizeof (fname2));
4843 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
4844 printf_filtered ("exe = '%s'\n", fname2);
4845 else
4846 warning (_("unable to read link '%s'"), fname1);
4847 }
4848 if (mappings_f)
4849 {
4850 sprintf (fname1, "/proc/%ld/maps", pid);
4851 if ((procfile = fopen (fname1, "r")) != NULL)
4852 {
4853 long long addr, endaddr, size, offset, inode;
4854 char permissions[8], device[8], filename[MAXPATHLEN];
4855 struct cleanup *cleanup;
4856
4857 cleanup = make_cleanup_fclose (procfile);
4858 printf_filtered (_("Mapped address spaces:\n\n"));
4859 if (gdbarch_addr_bit (target_gdbarch) == 32)
4860 {
4861 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
4862 "Start Addr",
4863 " End Addr",
4864 " Size", " Offset", "objfile");
4865 }
4866 else
4867 {
4868 printf_filtered (" %18s %18s %10s %10s %7s\n",
4869 "Start Addr",
4870 " End Addr",
4871 " Size", " Offset", "objfile");
4872 }
4873
4874 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
4875 &offset, &device[0], &inode, &filename[0]))
4876 {
4877 size = endaddr - addr;
4878
4879 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
4880 calls here (and possibly above) should be abstracted
4881 out into their own functions? Andrew suggests using
4882 a generic local_address_string instead to print out
4883 the addresses; that makes sense to me, too. */
4884
4885 if (gdbarch_addr_bit (target_gdbarch) == 32)
4886 {
4887 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
4888 (unsigned long) addr, /* FIXME: pr_addr */
4889 (unsigned long) endaddr,
4890 (int) size,
4891 (unsigned int) offset,
4892 filename[0] ? filename : "");
4893 }
4894 else
4895 {
4896 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
4897 (unsigned long) addr, /* FIXME: pr_addr */
4898 (unsigned long) endaddr,
4899 (int) size,
4900 (unsigned int) offset,
4901 filename[0] ? filename : "");
4902 }
4903 }
4904
4905 do_cleanups (cleanup);
4906 }
4907 else
4908 warning (_("unable to open /proc file '%s'"), fname1);
4909 }
4910 if (status_f)
4911 {
4912 sprintf (fname1, "/proc/%ld/status", pid);
4913 if ((procfile = fopen (fname1, "r")) != NULL)
4914 {
4915 struct cleanup *cleanup = make_cleanup_fclose (procfile);
4916
4917 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
4918 puts_filtered (buffer);
4919 do_cleanups (cleanup);
4920 }
4921 else
4922 warning (_("unable to open /proc file '%s'"), fname1);
4923 }
4924 if (stat_f)
4925 {
4926 sprintf (fname1, "/proc/%ld/stat", pid);
4927 if ((procfile = fopen (fname1, "r")) != NULL)
4928 {
4929 int itmp;
4930 char ctmp;
4931 long ltmp;
4932 struct cleanup *cleanup = make_cleanup_fclose (procfile);
4933
4934 if (fscanf (procfile, "%d ", &itmp) > 0)
4935 printf_filtered (_("Process: %d\n"), itmp);
4936 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
4937 printf_filtered (_("Exec file: %s\n"), buffer);
4938 if (fscanf (procfile, "%c ", &ctmp) > 0)
4939 printf_filtered (_("State: %c\n"), ctmp);
4940 if (fscanf (procfile, "%d ", &itmp) > 0)
4941 printf_filtered (_("Parent process: %d\n"), itmp);
4942 if (fscanf (procfile, "%d ", &itmp) > 0)
4943 printf_filtered (_("Process group: %d\n"), itmp);
4944 if (fscanf (procfile, "%d ", &itmp) > 0)
4945 printf_filtered (_("Session id: %d\n"), itmp);
4946 if (fscanf (procfile, "%d ", &itmp) > 0)
4947 printf_filtered (_("TTY: %d\n"), itmp);
4948 if (fscanf (procfile, "%d ", &itmp) > 0)
4949 printf_filtered (_("TTY owner process group: %d\n"), itmp);
4950 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4951 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
4952 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4953 printf_filtered (_("Minor faults (no memory page): %lu\n"),
4954 (unsigned long) ltmp);
4955 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4956 printf_filtered (_("Minor faults, children: %lu\n"),
4957 (unsigned long) ltmp);
4958 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4959 printf_filtered (_("Major faults (memory page faults): %lu\n"),
4960 (unsigned long) ltmp);
4961 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4962 printf_filtered (_("Major faults, children: %lu\n"),
4963 (unsigned long) ltmp);
4964 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4965 printf_filtered (_("utime: %ld\n"), ltmp);
4966 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4967 printf_filtered (_("stime: %ld\n"), ltmp);
4968 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4969 printf_filtered (_("utime, children: %ld\n"), ltmp);
4970 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4971 printf_filtered (_("stime, children: %ld\n"), ltmp);
4972 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4973 printf_filtered (_("jiffies remaining in current "
4974 "time slice: %ld\n"), ltmp);
4975 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4976 printf_filtered (_("'nice' value: %ld\n"), ltmp);
4977 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4978 printf_filtered (_("jiffies until next timeout: %lu\n"),
4979 (unsigned long) ltmp);
4980 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4981 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
4982 (unsigned long) ltmp);
4983 if (fscanf (procfile, "%ld ", &ltmp) > 0)
4984 printf_filtered (_("start time (jiffies since "
4985 "system boot): %ld\n"), ltmp);
4986 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4987 printf_filtered (_("Virtual memory size: %lu\n"),
4988 (unsigned long) ltmp);
4989 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4990 printf_filtered (_("Resident set size: %lu\n"),
4991 (unsigned long) ltmp);
4992 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4993 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
4994 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4995 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
4996 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4997 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
4998 if (fscanf (procfile, "%lu ", &ltmp) > 0)
4999 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
5000 #if 0 /* Don't know how architecture-dependent the rest is...
5001 Anyway the signal bitmap info is available from "status". */
5002 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
5003 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
5004 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
5005 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
5006 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5007 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
5008 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5009 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
5010 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5011 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
5012 if (fscanf (procfile, "%ld ", &ltmp) > 0)
5013 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
5014 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
5015 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
5016 #endif
5017 do_cleanups (cleanup);
5018 }
5019 else
5020 warning (_("unable to open /proc file '%s'"), fname1);
5021 }
5022 }
5023
5024 /* Implement `info proc' when given without any futher parameters. */
5025
5026 static void
5027 linux_nat_info_proc_cmd (char *args, int from_tty)
5028 {
5029 linux_nat_info_proc_cmd_1 (args, IP_MINIMAL, from_tty);
5030 }
5031
5032 /* Implement `info proc mappings'. */
5033
5034 static void
5035 linux_nat_info_proc_cmd_mappings (char *args, int from_tty)
5036 {
5037 linux_nat_info_proc_cmd_1 (args, IP_MAPPINGS, from_tty);
5038 }
5039
5040 /* Implement `info proc stat'. */
5041
5042 static void
5043 linux_nat_info_proc_cmd_stat (char *args, int from_tty)
5044 {
5045 linux_nat_info_proc_cmd_1 (args, IP_STAT, from_tty);
5046 }
5047
5048 /* Implement `info proc status'. */
5049
5050 static void
5051 linux_nat_info_proc_cmd_status (char *args, int from_tty)
5052 {
5053 linux_nat_info_proc_cmd_1 (args, IP_STATUS, from_tty);
5054 }
5055
5056 /* Implement `info proc cwd'. */
5057
5058 static void
5059 linux_nat_info_proc_cmd_cwd (char *args, int from_tty)
5060 {
5061 linux_nat_info_proc_cmd_1 (args, IP_CWD, from_tty);
5062 }
5063
5064 /* Implement `info proc cmdline'. */
5065
5066 static void
5067 linux_nat_info_proc_cmd_cmdline (char *args, int from_tty)
5068 {
5069 linux_nat_info_proc_cmd_1 (args, IP_CMDLINE, from_tty);
5070 }
5071
5072 /* Implement `info proc exe'. */
5073
5074 static void
5075 linux_nat_info_proc_cmd_exe (char *args, int from_tty)
5076 {
5077 linux_nat_info_proc_cmd_1 (args, IP_EXE, from_tty);
5078 }
5079
5080 /* Implement `info proc all'. */
5081
5082 static void
5083 linux_nat_info_proc_cmd_all (char *args, int from_tty)
5084 {
5085 linux_nat_info_proc_cmd_1 (args, IP_ALL, from_tty);
5086 }
5087
5088 /* Implement the to_xfer_partial interface for memory reads using the /proc
5089 filesystem. Because we can use a single read() call for /proc, this
5090 can be much more efficient than banging away at PTRACE_PEEKTEXT,
5091 but it doesn't support writes. */
5092
5093 static LONGEST
5094 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
5095 const char *annex, gdb_byte *readbuf,
5096 const gdb_byte *writebuf,
5097 ULONGEST offset, LONGEST len)
5098 {
5099 LONGEST ret;
5100 int fd;
5101 char filename[64];
5102
5103 if (object != TARGET_OBJECT_MEMORY || !readbuf)
5104 return 0;
5105
5106 /* Don't bother for one word. */
5107 if (len < 3 * sizeof (long))
5108 return 0;
5109
5110 /* We could keep this file open and cache it - possibly one per
5111 thread. That requires some juggling, but is even faster. */
5112 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
5113 fd = open (filename, O_RDONLY | O_LARGEFILE);
5114 if (fd == -1)
5115 return 0;
5116
5117 /* If pread64 is available, use it. It's faster if the kernel
5118 supports it (only one syscall), and it's 64-bit safe even on
5119 32-bit platforms (for instance, SPARC debugging a SPARC64
5120 application). */
5121 #ifdef HAVE_PREAD64
5122 if (pread64 (fd, readbuf, len, offset) != len)
5123 #else
5124 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
5125 #endif
5126 ret = 0;
5127 else
5128 ret = len;
5129
5130 close (fd);
5131 return ret;
5132 }
5133
5134
5135 /* Enumerate spufs IDs for process PID. */
5136 static LONGEST
5137 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
5138 {
5139 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
5140 LONGEST pos = 0;
5141 LONGEST written = 0;
5142 char path[128];
5143 DIR *dir;
5144 struct dirent *entry;
5145
5146 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
5147 dir = opendir (path);
5148 if (!dir)
5149 return -1;
5150
5151 rewinddir (dir);
5152 while ((entry = readdir (dir)) != NULL)
5153 {
5154 struct stat st;
5155 struct statfs stfs;
5156 int fd;
5157
5158 fd = atoi (entry->d_name);
5159 if (!fd)
5160 continue;
5161
5162 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
5163 if (stat (path, &st) != 0)
5164 continue;
5165 if (!S_ISDIR (st.st_mode))
5166 continue;
5167
5168 if (statfs (path, &stfs) != 0)
5169 continue;
5170 if (stfs.f_type != SPUFS_MAGIC)
5171 continue;
5172
5173 if (pos >= offset && pos + 4 <= offset + len)
5174 {
5175 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
5176 written += 4;
5177 }
5178 pos += 4;
5179 }
5180
5181 closedir (dir);
5182 return written;
5183 }
5184
5185 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
5186 object type, using the /proc file system. */
5187 static LONGEST
5188 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
5189 const char *annex, gdb_byte *readbuf,
5190 const gdb_byte *writebuf,
5191 ULONGEST offset, LONGEST len)
5192 {
5193 char buf[128];
5194 int fd = 0;
5195 int ret = -1;
5196 int pid = PIDGET (inferior_ptid);
5197
5198 if (!annex)
5199 {
5200 if (!readbuf)
5201 return -1;
5202 else
5203 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
5204 }
5205
5206 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
5207 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
5208 if (fd <= 0)
5209 return -1;
5210
5211 if (offset != 0
5212 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
5213 {
5214 close (fd);
5215 return 0;
5216 }
5217
5218 if (writebuf)
5219 ret = write (fd, writebuf, (size_t) len);
5220 else if (readbuf)
5221 ret = read (fd, readbuf, (size_t) len);
5222
5223 close (fd);
5224 return ret;
5225 }
5226
5227
5228 /* Parse LINE as a signal set and add its set bits to SIGS. */
5229
5230 static void
5231 add_line_to_sigset (const char *line, sigset_t *sigs)
5232 {
5233 int len = strlen (line) - 1;
5234 const char *p;
5235 int signum;
5236
5237 if (line[len] != '\n')
5238 error (_("Could not parse signal set: %s"), line);
5239
5240 p = line;
5241 signum = len * 4;
5242 while (len-- > 0)
5243 {
5244 int digit;
5245
5246 if (*p >= '0' && *p <= '9')
5247 digit = *p - '0';
5248 else if (*p >= 'a' && *p <= 'f')
5249 digit = *p - 'a' + 10;
5250 else
5251 error (_("Could not parse signal set: %s"), line);
5252
5253 signum -= 4;
5254
5255 if (digit & 1)
5256 sigaddset (sigs, signum + 1);
5257 if (digit & 2)
5258 sigaddset (sigs, signum + 2);
5259 if (digit & 4)
5260 sigaddset (sigs, signum + 3);
5261 if (digit & 8)
5262 sigaddset (sigs, signum + 4);
5263
5264 p++;
5265 }
5266 }
5267
5268 /* Find process PID's pending signals from /proc/pid/status and set
5269 SIGS to match. */
5270
5271 void
5272 linux_proc_pending_signals (int pid, sigset_t *pending,
5273 sigset_t *blocked, sigset_t *ignored)
5274 {
5275 FILE *procfile;
5276 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
5277 struct cleanup *cleanup;
5278
5279 sigemptyset (pending);
5280 sigemptyset (blocked);
5281 sigemptyset (ignored);
5282 sprintf (fname, "/proc/%d/status", pid);
5283 procfile = fopen (fname, "r");
5284 if (procfile == NULL)
5285 error (_("Could not open %s"), fname);
5286 cleanup = make_cleanup_fclose (procfile);
5287
5288 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
5289 {
5290 /* Normal queued signals are on the SigPnd line in the status
5291 file. However, 2.6 kernels also have a "shared" pending
5292 queue for delivering signals to a thread group, so check for
5293 a ShdPnd line also.
5294
5295 Unfortunately some Red Hat kernels include the shared pending
5296 queue but not the ShdPnd status field. */
5297
5298 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
5299 add_line_to_sigset (buffer + 8, pending);
5300 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
5301 add_line_to_sigset (buffer + 8, pending);
5302 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
5303 add_line_to_sigset (buffer + 8, blocked);
5304 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
5305 add_line_to_sigset (buffer + 8, ignored);
5306 }
5307
5308 do_cleanups (cleanup);
5309 }
5310
5311 static LONGEST
5312 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
5313 const char *annex, gdb_byte *readbuf,
5314 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5315 {
5316 gdb_assert (object == TARGET_OBJECT_OSDATA);
5317
5318 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5319 }
5320
5321 static LONGEST
5322 linux_xfer_partial (struct target_ops *ops, enum target_object object,
5323 const char *annex, gdb_byte *readbuf,
5324 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5325 {
5326 LONGEST xfer;
5327
5328 if (object == TARGET_OBJECT_AUXV)
5329 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
5330 offset, len);
5331
5332 if (object == TARGET_OBJECT_OSDATA)
5333 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5334 offset, len);
5335
5336 if (object == TARGET_OBJECT_SPU)
5337 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5338 offset, len);
5339
5340 /* GDB calculates all the addresses in possibly larget width of the address.
5341 Address width needs to be masked before its final use - either by
5342 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5343
5344 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
5345
5346 if (object == TARGET_OBJECT_MEMORY)
5347 {
5348 int addr_bit = gdbarch_addr_bit (target_gdbarch);
5349
5350 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5351 offset &= ((ULONGEST) 1 << addr_bit) - 1;
5352 }
5353
5354 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5355 offset, len);
5356 if (xfer != 0)
5357 return xfer;
5358
5359 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5360 offset, len);
5361 }
5362
5363 /* Create a prototype generic GNU/Linux target. The client can override
5364 it with local methods. */
5365
5366 static void
5367 linux_target_install_ops (struct target_ops *t)
5368 {
5369 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
5370 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
5371 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
5372 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
5373 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
5374 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
5375 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
5376 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
5377 t->to_post_startup_inferior = linux_child_post_startup_inferior;
5378 t->to_post_attach = linux_child_post_attach;
5379 t->to_follow_fork = linux_child_follow_fork;
5380 t->to_find_memory_regions = linux_nat_find_memory_regions;
5381 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5382
5383 super_xfer_partial = t->to_xfer_partial;
5384 t->to_xfer_partial = linux_xfer_partial;
5385 }
5386
5387 struct target_ops *
5388 linux_target (void)
5389 {
5390 struct target_ops *t;
5391
5392 t = inf_ptrace_target ();
5393 linux_target_install_ops (t);
5394
5395 return t;
5396 }
5397
5398 struct target_ops *
5399 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
5400 {
5401 struct target_ops *t;
5402
5403 t = inf_ptrace_trad_target (register_u_offset);
5404 linux_target_install_ops (t);
5405
5406 return t;
5407 }
5408
5409 /* target_is_async_p implementation. */
5410
5411 static int
5412 linux_nat_is_async_p (void)
5413 {
5414 /* NOTE: palves 2008-03-21: We're only async when the user requests
5415 it explicitly with the "set target-async" command.
5416 Someday, linux will always be async. */
5417 return target_async_permitted;
5418 }
5419
5420 /* target_can_async_p implementation. */
5421
5422 static int
5423 linux_nat_can_async_p (void)
5424 {
5425 /* NOTE: palves 2008-03-21: We're only async when the user requests
5426 it explicitly with the "set target-async" command.
5427 Someday, linux will always be async. */
5428 return target_async_permitted;
5429 }
5430
5431 static int
5432 linux_nat_supports_non_stop (void)
5433 {
5434 return 1;
5435 }
5436
5437 /* True if we want to support multi-process. To be removed when GDB
5438 supports multi-exec. */
5439
5440 int linux_multi_process = 1;
5441
5442 static int
5443 linux_nat_supports_multi_process (void)
5444 {
5445 return linux_multi_process;
5446 }
5447
5448 static int
5449 linux_nat_supports_disable_randomization (void)
5450 {
5451 #ifdef HAVE_PERSONALITY
5452 return 1;
5453 #else
5454 return 0;
5455 #endif
5456 }
5457
5458 static int async_terminal_is_ours = 1;
5459
5460 /* target_terminal_inferior implementation. */
5461
5462 static void
5463 linux_nat_terminal_inferior (void)
5464 {
5465 if (!target_is_async_p ())
5466 {
5467 /* Async mode is disabled. */
5468 terminal_inferior ();
5469 return;
5470 }
5471
5472 terminal_inferior ();
5473
5474 /* Calls to target_terminal_*() are meant to be idempotent. */
5475 if (!async_terminal_is_ours)
5476 return;
5477
5478 delete_file_handler (input_fd);
5479 async_terminal_is_ours = 0;
5480 set_sigint_trap ();
5481 }
5482
5483 /* target_terminal_ours implementation. */
5484
5485 static void
5486 linux_nat_terminal_ours (void)
5487 {
5488 if (!target_is_async_p ())
5489 {
5490 /* Async mode is disabled. */
5491 terminal_ours ();
5492 return;
5493 }
5494
5495 /* GDB should never give the terminal to the inferior if the
5496 inferior is running in the background (run&, continue&, etc.),
5497 but claiming it sure should. */
5498 terminal_ours ();
5499
5500 if (async_terminal_is_ours)
5501 return;
5502
5503 clear_sigint_trap ();
5504 add_file_handler (input_fd, stdin_event_handler, 0);
5505 async_terminal_is_ours = 1;
5506 }
5507
5508 static void (*async_client_callback) (enum inferior_event_type event_type,
5509 void *context);
5510 static void *async_client_context;
5511
5512 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5513 so we notice when any child changes state, and notify the
5514 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5515 above to wait for the arrival of a SIGCHLD. */
5516
5517 static void
5518 sigchld_handler (int signo)
5519 {
5520 int old_errno = errno;
5521
5522 if (debug_linux_nat)
5523 ui_file_write_async_safe (gdb_stdlog,
5524 "sigchld\n", sizeof ("sigchld\n") - 1);
5525
5526 if (signo == SIGCHLD
5527 && linux_nat_event_pipe[0] != -1)
5528 async_file_mark (); /* Let the event loop know that there are
5529 events to handle. */
5530
5531 errno = old_errno;
5532 }
5533
5534 /* Callback registered with the target events file descriptor. */
5535
5536 static void
5537 handle_target_event (int error, gdb_client_data client_data)
5538 {
5539 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5540 }
5541
5542 /* Create/destroy the target events pipe. Returns previous state. */
5543
5544 static int
5545 linux_async_pipe (int enable)
5546 {
5547 int previous = (linux_nat_event_pipe[0] != -1);
5548
5549 if (previous != enable)
5550 {
5551 sigset_t prev_mask;
5552
5553 block_child_signals (&prev_mask);
5554
5555 if (enable)
5556 {
5557 if (pipe (linux_nat_event_pipe) == -1)
5558 internal_error (__FILE__, __LINE__,
5559 "creating event pipe failed.");
5560
5561 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5562 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5563 }
5564 else
5565 {
5566 close (linux_nat_event_pipe[0]);
5567 close (linux_nat_event_pipe[1]);
5568 linux_nat_event_pipe[0] = -1;
5569 linux_nat_event_pipe[1] = -1;
5570 }
5571
5572 restore_child_signals_mask (&prev_mask);
5573 }
5574
5575 return previous;
5576 }
5577
5578 /* target_async implementation. */
5579
5580 static void
5581 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5582 void *context), void *context)
5583 {
5584 if (callback != NULL)
5585 {
5586 async_client_callback = callback;
5587 async_client_context = context;
5588 if (!linux_async_pipe (1))
5589 {
5590 add_file_handler (linux_nat_event_pipe[0],
5591 handle_target_event, NULL);
5592 /* There may be pending events to handle. Tell the event loop
5593 to poll them. */
5594 async_file_mark ();
5595 }
5596 }
5597 else
5598 {
5599 async_client_callback = callback;
5600 async_client_context = context;
5601 delete_file_handler (linux_nat_event_pipe[0]);
5602 linux_async_pipe (0);
5603 }
5604 return;
5605 }
5606
5607 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5608 event came out. */
5609
5610 static int
5611 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
5612 {
5613 if (!lwp->stopped)
5614 {
5615 ptid_t ptid = lwp->ptid;
5616
5617 if (debug_linux_nat)
5618 fprintf_unfiltered (gdb_stdlog,
5619 "LNSL: running -> suspending %s\n",
5620 target_pid_to_str (lwp->ptid));
5621
5622
5623 if (lwp->last_resume_kind == resume_stop)
5624 {
5625 if (debug_linux_nat)
5626 fprintf_unfiltered (gdb_stdlog,
5627 "linux-nat: already stopping LWP %ld at "
5628 "GDB's request\n",
5629 ptid_get_lwp (lwp->ptid));
5630 return 0;
5631 }
5632
5633 stop_callback (lwp, NULL);
5634 lwp->last_resume_kind = resume_stop;
5635 }
5636 else
5637 {
5638 /* Already known to be stopped; do nothing. */
5639
5640 if (debug_linux_nat)
5641 {
5642 if (find_thread_ptid (lwp->ptid)->stop_requested)
5643 fprintf_unfiltered (gdb_stdlog,
5644 "LNSL: already stopped/stop_requested %s\n",
5645 target_pid_to_str (lwp->ptid));
5646 else
5647 fprintf_unfiltered (gdb_stdlog,
5648 "LNSL: already stopped/no "
5649 "stop_requested yet %s\n",
5650 target_pid_to_str (lwp->ptid));
5651 }
5652 }
5653 return 0;
5654 }
5655
5656 static void
5657 linux_nat_stop (ptid_t ptid)
5658 {
5659 if (non_stop)
5660 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
5661 else
5662 linux_ops->to_stop (ptid);
5663 }
5664
5665 static void
5666 linux_nat_close (int quitting)
5667 {
5668 /* Unregister from the event loop. */
5669 if (target_is_async_p ())
5670 target_async (NULL, 0);
5671
5672 if (linux_ops->to_close)
5673 linux_ops->to_close (quitting);
5674 }
5675
5676 /* When requests are passed down from the linux-nat layer to the
5677 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5678 used. The address space pointer is stored in the inferior object,
5679 but the common code that is passed such ptid can't tell whether
5680 lwpid is a "main" process id or not (it assumes so). We reverse
5681 look up the "main" process id from the lwp here. */
5682
5683 struct address_space *
5684 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5685 {
5686 struct lwp_info *lwp;
5687 struct inferior *inf;
5688 int pid;
5689
5690 pid = GET_LWP (ptid);
5691 if (GET_LWP (ptid) == 0)
5692 {
5693 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5694 tgid. */
5695 lwp = find_lwp_pid (ptid);
5696 pid = GET_PID (lwp->ptid);
5697 }
5698 else
5699 {
5700 /* A (pid,lwpid,0) ptid. */
5701 pid = GET_PID (ptid);
5702 }
5703
5704 inf = find_inferior_pid (pid);
5705 gdb_assert (inf != NULL);
5706 return inf->aspace;
5707 }
5708
5709 int
5710 linux_nat_core_of_thread_1 (ptid_t ptid)
5711 {
5712 struct cleanup *back_to;
5713 char *filename;
5714 FILE *f;
5715 char *content = NULL;
5716 char *p;
5717 char *ts = 0;
5718 int content_read = 0;
5719 int i;
5720 int core;
5721
5722 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5723 GET_PID (ptid), GET_LWP (ptid));
5724 back_to = make_cleanup (xfree, filename);
5725
5726 f = fopen (filename, "r");
5727 if (!f)
5728 {
5729 do_cleanups (back_to);
5730 return -1;
5731 }
5732
5733 make_cleanup_fclose (f);
5734
5735 for (;;)
5736 {
5737 int n;
5738
5739 content = xrealloc (content, content_read + 1024);
5740 n = fread (content + content_read, 1, 1024, f);
5741 content_read += n;
5742 if (n < 1024)
5743 {
5744 content[content_read] = '\0';
5745 break;
5746 }
5747 }
5748
5749 make_cleanup (xfree, content);
5750
5751 p = strchr (content, '(');
5752
5753 /* Skip ")". */
5754 if (p != NULL)
5755 p = strchr (p, ')');
5756 if (p != NULL)
5757 p++;
5758
5759 /* If the first field after program name has index 0, then core number is
5760 the field with index 36. There's no constant for that anywhere. */
5761 if (p != NULL)
5762 p = strtok_r (p, " ", &ts);
5763 for (i = 0; p != NULL && i != 36; ++i)
5764 p = strtok_r (NULL, " ", &ts);
5765
5766 if (p == NULL || sscanf (p, "%d", &core) == 0)
5767 core = -1;
5768
5769 do_cleanups (back_to);
5770
5771 return core;
5772 }
5773
5774 /* Return the cached value of the processor core for thread PTID. */
5775
5776 int
5777 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5778 {
5779 struct lwp_info *info = find_lwp_pid (ptid);
5780
5781 if (info)
5782 return info->core;
5783 return -1;
5784 }
5785
5786 void
5787 linux_nat_add_target (struct target_ops *t)
5788 {
5789 /* Save the provided single-threaded target. We save this in a separate
5790 variable because another target we've inherited from (e.g. inf-ptrace)
5791 may have saved a pointer to T; we want to use it for the final
5792 process stratum target. */
5793 linux_ops_saved = *t;
5794 linux_ops = &linux_ops_saved;
5795
5796 /* Override some methods for multithreading. */
5797 t->to_create_inferior = linux_nat_create_inferior;
5798 t->to_attach = linux_nat_attach;
5799 t->to_detach = linux_nat_detach;
5800 t->to_resume = linux_nat_resume;
5801 t->to_wait = linux_nat_wait;
5802 t->to_pass_signals = linux_nat_pass_signals;
5803 t->to_xfer_partial = linux_nat_xfer_partial;
5804 t->to_kill = linux_nat_kill;
5805 t->to_mourn_inferior = linux_nat_mourn_inferior;
5806 t->to_thread_alive = linux_nat_thread_alive;
5807 t->to_pid_to_str = linux_nat_pid_to_str;
5808 t->to_thread_name = linux_nat_thread_name;
5809 t->to_has_thread_control = tc_schedlock;
5810 t->to_thread_address_space = linux_nat_thread_address_space;
5811 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5812 t->to_stopped_data_address = linux_nat_stopped_data_address;
5813
5814 t->to_can_async_p = linux_nat_can_async_p;
5815 t->to_is_async_p = linux_nat_is_async_p;
5816 t->to_supports_non_stop = linux_nat_supports_non_stop;
5817 t->to_async = linux_nat_async;
5818 t->to_terminal_inferior = linux_nat_terminal_inferior;
5819 t->to_terminal_ours = linux_nat_terminal_ours;
5820 t->to_close = linux_nat_close;
5821
5822 /* Methods for non-stop support. */
5823 t->to_stop = linux_nat_stop;
5824
5825 t->to_supports_multi_process = linux_nat_supports_multi_process;
5826
5827 t->to_supports_disable_randomization
5828 = linux_nat_supports_disable_randomization;
5829
5830 t->to_core_of_thread = linux_nat_core_of_thread;
5831
5832 /* We don't change the stratum; this target will sit at
5833 process_stratum and thread_db will set at thread_stratum. This
5834 is a little strange, since this is a multi-threaded-capable
5835 target, but we want to be on the stack below thread_db, and we
5836 also want to be used for single-threaded processes. */
5837
5838 add_target (t);
5839 }
5840
5841 /* Register a method to call whenever a new thread is attached. */
5842 void
5843 linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
5844 {
5845 /* Save the pointer. We only support a single registered instance
5846 of the GNU/Linux native target, so we do not need to map this to
5847 T. */
5848 linux_nat_new_thread = new_thread;
5849 }
5850
5851 /* Register a method that converts a siginfo object between the layout
5852 that ptrace returns, and the layout in the architecture of the
5853 inferior. */
5854 void
5855 linux_nat_set_siginfo_fixup (struct target_ops *t,
5856 int (*siginfo_fixup) (struct siginfo *,
5857 gdb_byte *,
5858 int))
5859 {
5860 /* Save the pointer. */
5861 linux_nat_siginfo_fixup = siginfo_fixup;
5862 }
5863
5864 /* Return the saved siginfo associated with PTID. */
5865 struct siginfo *
5866 linux_nat_get_siginfo (ptid_t ptid)
5867 {
5868 struct lwp_info *lp = find_lwp_pid (ptid);
5869
5870 gdb_assert (lp != NULL);
5871
5872 return &lp->siginfo;
5873 }
5874
5875 /* Provide a prototype to silence -Wmissing-prototypes. */
5876 extern initialize_file_ftype _initialize_linux_nat;
5877
5878 void
5879 _initialize_linux_nat (void)
5880 {
5881 static struct cmd_list_element *info_proc_cmdlist;
5882
5883 add_prefix_cmd ("proc", class_info, linux_nat_info_proc_cmd,
5884 _("\
5885 Show /proc process information about any running process.\n\
5886 Specify any process id, or use the program being debugged by default."),
5887 &info_proc_cmdlist, "info proc ",
5888 1/*allow-unknown*/, &infolist);
5889
5890 add_cmd ("mappings", class_info, linux_nat_info_proc_cmd_mappings, _("\
5891 List of mapped memory regions."),
5892 &info_proc_cmdlist);
5893
5894 add_cmd ("stat", class_info, linux_nat_info_proc_cmd_stat, _("\
5895 List process info from /proc/PID/stat."),
5896 &info_proc_cmdlist);
5897
5898 add_cmd ("status", class_info, linux_nat_info_proc_cmd_status, _("\
5899 List process info from /proc/PID/status."),
5900 &info_proc_cmdlist);
5901
5902 add_cmd ("cwd", class_info, linux_nat_info_proc_cmd_cwd, _("\
5903 List current working directory of the process."),
5904 &info_proc_cmdlist);
5905
5906 add_cmd ("cmdline", class_info, linux_nat_info_proc_cmd_cmdline, _("\
5907 List command line arguments of the process."),
5908 &info_proc_cmdlist);
5909
5910 add_cmd ("exe", class_info, linux_nat_info_proc_cmd_exe, _("\
5911 List absolute filename for executable of the process."),
5912 &info_proc_cmdlist);
5913
5914 add_cmd ("all", class_info, linux_nat_info_proc_cmd_all, _("\
5915 List all available /proc info."),
5916 &info_proc_cmdlist);
5917
5918 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5919 &debug_linux_nat, _("\
5920 Set debugging of GNU/Linux lwp module."), _("\
5921 Show debugging of GNU/Linux lwp module."), _("\
5922 Enables printf debugging output."),
5923 NULL,
5924 show_debug_linux_nat,
5925 &setdebuglist, &showdebuglist);
5926
5927 /* Save this mask as the default. */
5928 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5929
5930 /* Install a SIGCHLD handler. */
5931 sigchld_action.sa_handler = sigchld_handler;
5932 sigemptyset (&sigchld_action.sa_mask);
5933 sigchld_action.sa_flags = SA_RESTART;
5934
5935 /* Make it the default. */
5936 sigaction (SIGCHLD, &sigchld_action, NULL);
5937
5938 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5939 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5940 sigdelset (&suspend_mask, SIGCHLD);
5941
5942 sigemptyset (&blocked_mask);
5943 }
5944 \f
5945
5946 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5947 the GNU/Linux Threads library and therefore doesn't really belong
5948 here. */
5949
5950 /* Read variable NAME in the target and return its value if found.
5951 Otherwise return zero. It is assumed that the type of the variable
5952 is `int'. */
5953
5954 static int
5955 get_signo (const char *name)
5956 {
5957 struct minimal_symbol *ms;
5958 int signo;
5959
5960 ms = lookup_minimal_symbol (name, NULL, NULL);
5961 if (ms == NULL)
5962 return 0;
5963
5964 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
5965 sizeof (signo)) != 0)
5966 return 0;
5967
5968 return signo;
5969 }
5970
5971 /* Return the set of signals used by the threads library in *SET. */
5972
5973 void
5974 lin_thread_get_thread_signals (sigset_t *set)
5975 {
5976 struct sigaction action;
5977 int restart, cancel;
5978
5979 sigemptyset (&blocked_mask);
5980 sigemptyset (set);
5981
5982 restart = get_signo ("__pthread_sig_restart");
5983 cancel = get_signo ("__pthread_sig_cancel");
5984
5985 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5986 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5987 not provide any way for the debugger to query the signal numbers -
5988 fortunately they don't change! */
5989
5990 if (restart == 0)
5991 restart = __SIGRTMIN;
5992
5993 if (cancel == 0)
5994 cancel = __SIGRTMIN + 1;
5995
5996 sigaddset (set, restart);
5997 sigaddset (set, cancel);
5998
5999 /* The GNU/Linux Threads library makes terminating threads send a
6000 special "cancel" signal instead of SIGCHLD. Make sure we catch
6001 those (to prevent them from terminating GDB itself, which is
6002 likely to be their default action) and treat them the same way as
6003 SIGCHLD. */
6004
6005 action.sa_handler = sigchld_handler;
6006 sigemptyset (&action.sa_mask);
6007 action.sa_flags = SA_RESTART;
6008 sigaction (cancel, &action, NULL);
6009
6010 /* We block the "cancel" signal throughout this code ... */
6011 sigaddset (&blocked_mask, cancel);
6012 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
6013
6014 /* ... except during a sigsuspend. */
6015 sigdelset (&suspend_mask, cancel);
6016 }
This page took 0.223146 seconds and 4 git commands to generate.