* gdbarch.sh (info_proc): New callback.
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001-2012 Free Software Foundation, Inc.
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
19
20 #include "defs.h"
21 #include "inferior.h"
22 #include "target.h"
23 #include "gdb_string.h"
24 #include "gdb_wait.h"
25 #include "gdb_assert.h"
26 #ifdef HAVE_TKILL_SYSCALL
27 #include <unistd.h>
28 #include <sys/syscall.h>
29 #endif
30 #include <sys/ptrace.h>
31 #include "linux-nat.h"
32 #include "linux-ptrace.h"
33 #include "linux-procfs.h"
34 #include "linux-fork.h"
35 #include "gdbthread.h"
36 #include "gdbcmd.h"
37 #include "regcache.h"
38 #include "regset.h"
39 #include "inf-ptrace.h"
40 #include "auxv.h"
41 #include <sys/param.h> /* for MAXPATHLEN */
42 #include <sys/procfs.h> /* for elf_gregset etc. */
43 #include "elf-bfd.h" /* for elfcore_write_* */
44 #include "gregset.h" /* for gregset */
45 #include "gdbcore.h" /* for get_exec_file */
46 #include <ctype.h> /* for isdigit */
47 #include "gdbthread.h" /* for struct thread_info etc. */
48 #include "gdb_stat.h" /* for struct stat */
49 #include <fcntl.h> /* for O_RDONLY */
50 #include "inf-loop.h"
51 #include "event-loop.h"
52 #include "event-top.h"
53 #include <pwd.h>
54 #include <sys/types.h>
55 #include "gdb_dirent.h"
56 #include "xml-support.h"
57 #include "terminal.h"
58 #include <sys/vfs.h>
59 #include "solib.h"
60 #include "linux-osdata.h"
61
62 #ifndef SPUFS_MAGIC
63 #define SPUFS_MAGIC 0x23c9b64e
64 #endif
65
66 #ifdef HAVE_PERSONALITY
67 # include <sys/personality.h>
68 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
69 # define ADDR_NO_RANDOMIZE 0x0040000
70 # endif
71 #endif /* HAVE_PERSONALITY */
72
73 /* This comment documents high-level logic of this file.
74
75 Waiting for events in sync mode
76 ===============================
77
78 When waiting for an event in a specific thread, we just use waitpid, passing
79 the specific pid, and not passing WNOHANG.
80
81 When waiting for an event in all threads, waitpid is not quite good. Prior to
82 version 2.4, Linux can either wait for event in main thread, or in secondary
83 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
84 miss an event. The solution is to use non-blocking waitpid, together with
85 sigsuspend. First, we use non-blocking waitpid to get an event in the main
86 process, if any. Second, we use non-blocking waitpid with the __WCLONED
87 flag to check for events in cloned processes. If nothing is found, we use
88 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
89 happened to a child process -- and SIGCHLD will be delivered both for events
90 in main debugged process and in cloned processes. As soon as we know there's
91 an event, we get back to calling nonblocking waitpid with and without
92 __WCLONED.
93
94 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
95 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
96 blocked, the signal becomes pending and sigsuspend immediately
97 notices it and returns.
98
99 Waiting for events in async mode
100 ================================
101
102 In async mode, GDB should always be ready to handle both user input
103 and target events, so neither blocking waitpid nor sigsuspend are
104 viable options. Instead, we should asynchronously notify the GDB main
105 event loop whenever there's an unprocessed event from the target. We
106 detect asynchronous target events by handling SIGCHLD signals. To
107 notify the event loop about target events, the self-pipe trick is used
108 --- a pipe is registered as waitable event source in the event loop,
109 the event loop select/poll's on the read end of this pipe (as well on
110 other event sources, e.g., stdin), and the SIGCHLD handler writes a
111 byte to this pipe. This is more portable than relying on
112 pselect/ppoll, since on kernels that lack those syscalls, libc
113 emulates them with select/poll+sigprocmask, and that is racy
114 (a.k.a. plain broken).
115
116 Obviously, if we fail to notify the event loop if there's a target
117 event, it's bad. OTOH, if we notify the event loop when there's no
118 event from the target, linux_nat_wait will detect that there's no real
119 event to report, and return event of type TARGET_WAITKIND_IGNORE.
120 This is mostly harmless, but it will waste time and is better avoided.
121
122 The main design point is that every time GDB is outside linux-nat.c,
123 we have a SIGCHLD handler installed that is called when something
124 happens to the target and notifies the GDB event loop. Whenever GDB
125 core decides to handle the event, and calls into linux-nat.c, we
126 process things as in sync mode, except that the we never block in
127 sigsuspend.
128
129 While processing an event, we may end up momentarily blocked in
130 waitpid calls. Those waitpid calls, while blocking, are guarantied to
131 return quickly. E.g., in all-stop mode, before reporting to the core
132 that an LWP hit a breakpoint, all LWPs are stopped by sending them
133 SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
134 Note that this is different from blocking indefinitely waiting for the
135 next event --- here, we're already handling an event.
136
137 Use of signals
138 ==============
139
140 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
141 signal is not entirely significant; we just need for a signal to be delivered,
142 so that we can intercept it. SIGSTOP's advantage is that it can not be
143 blocked. A disadvantage is that it is not a real-time signal, so it can only
144 be queued once; we do not keep track of other sources of SIGSTOP.
145
146 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
147 use them, because they have special behavior when the signal is generated -
148 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
149 kills the entire thread group.
150
151 A delivered SIGSTOP would stop the entire thread group, not just the thread we
152 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
153 cancel it (by PTRACE_CONT without passing SIGSTOP).
154
155 We could use a real-time signal instead. This would solve those problems; we
156 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
157 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
158 generates it, and there are races with trying to find a signal that is not
159 blocked. */
160
161 #ifndef O_LARGEFILE
162 #define O_LARGEFILE 0
163 #endif
164
165 /* Unlike other extended result codes, WSTOPSIG (status) on
166 PTRACE_O_TRACESYSGOOD syscall events doesn't return SIGTRAP, but
167 instead SIGTRAP with bit 7 set. */
168 #define SYSCALL_SIGTRAP (SIGTRAP | 0x80)
169
170 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
171 the use of the multi-threaded target. */
172 static struct target_ops *linux_ops;
173 static struct target_ops linux_ops_saved;
174
175 /* The method to call, if any, when a new thread is attached. */
176 static void (*linux_nat_new_thread) (struct lwp_info *);
177
178 /* Hook to call prior to resuming a thread. */
179 static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
180
181 /* The method to call, if any, when the siginfo object needs to be
182 converted between the layout returned by ptrace, and the layout in
183 the architecture of the inferior. */
184 static int (*linux_nat_siginfo_fixup) (struct siginfo *,
185 gdb_byte *,
186 int);
187
188 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
189 Called by our to_xfer_partial. */
190 static LONGEST (*super_xfer_partial) (struct target_ops *,
191 enum target_object,
192 const char *, gdb_byte *,
193 const gdb_byte *,
194 ULONGEST, LONGEST);
195
196 static int debug_linux_nat;
197 static void
198 show_debug_linux_nat (struct ui_file *file, int from_tty,
199 struct cmd_list_element *c, const char *value)
200 {
201 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
202 value);
203 }
204
205 struct simple_pid_list
206 {
207 int pid;
208 int status;
209 struct simple_pid_list *next;
210 };
211 struct simple_pid_list *stopped_pids;
212
213 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
214 can not be used, 1 if it can. */
215
216 static int linux_supports_tracefork_flag = -1;
217
218 /* This variable is a tri-state flag: -1 for unknown, 0 if
219 PTRACE_O_TRACESYSGOOD can not be used, 1 if it can. */
220
221 static int linux_supports_tracesysgood_flag = -1;
222
223 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
224 PTRACE_O_TRACEVFORKDONE. */
225
226 static int linux_supports_tracevforkdone_flag = -1;
227
228 /* Stores the current used ptrace() options. */
229 static int current_ptrace_options = 0;
230
231 /* Async mode support. */
232
233 /* The read/write ends of the pipe registered as waitable file in the
234 event loop. */
235 static int linux_nat_event_pipe[2] = { -1, -1 };
236
237 /* Flush the event pipe. */
238
239 static void
240 async_file_flush (void)
241 {
242 int ret;
243 char buf;
244
245 do
246 {
247 ret = read (linux_nat_event_pipe[0], &buf, 1);
248 }
249 while (ret >= 0 || (ret == -1 && errno == EINTR));
250 }
251
252 /* Put something (anything, doesn't matter what, or how much) in event
253 pipe, so that the select/poll in the event-loop realizes we have
254 something to process. */
255
256 static void
257 async_file_mark (void)
258 {
259 int ret;
260
261 /* It doesn't really matter what the pipe contains, as long we end
262 up with something in it. Might as well flush the previous
263 left-overs. */
264 async_file_flush ();
265
266 do
267 {
268 ret = write (linux_nat_event_pipe[1], "+", 1);
269 }
270 while (ret == -1 && errno == EINTR);
271
272 /* Ignore EAGAIN. If the pipe is full, the event loop will already
273 be awakened anyway. */
274 }
275
276 static void linux_nat_async (void (*callback)
277 (enum inferior_event_type event_type,
278 void *context),
279 void *context);
280 static int kill_lwp (int lwpid, int signo);
281
282 static int stop_callback (struct lwp_info *lp, void *data);
283
284 static void block_child_signals (sigset_t *prev_mask);
285 static void restore_child_signals_mask (sigset_t *prev_mask);
286
287 struct lwp_info;
288 static struct lwp_info *add_lwp (ptid_t ptid);
289 static void purge_lwp_list (int pid);
290 static struct lwp_info *find_lwp_pid (ptid_t ptid);
291
292 \f
293 /* Trivial list manipulation functions to keep track of a list of
294 new stopped processes. */
295 static void
296 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
297 {
298 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
299
300 new_pid->pid = pid;
301 new_pid->status = status;
302 new_pid->next = *listp;
303 *listp = new_pid;
304 }
305
306 static int
307 in_pid_list_p (struct simple_pid_list *list, int pid)
308 {
309 struct simple_pid_list *p;
310
311 for (p = list; p != NULL; p = p->next)
312 if (p->pid == pid)
313 return 1;
314 return 0;
315 }
316
317 static int
318 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
319 {
320 struct simple_pid_list **p;
321
322 for (p = listp; *p != NULL; p = &(*p)->next)
323 if ((*p)->pid == pid)
324 {
325 struct simple_pid_list *next = (*p)->next;
326
327 *statusp = (*p)->status;
328 xfree (*p);
329 *p = next;
330 return 1;
331 }
332 return 0;
333 }
334
335 \f
336 /* A helper function for linux_test_for_tracefork, called after fork (). */
337
338 static void
339 linux_tracefork_child (void)
340 {
341 ptrace (PTRACE_TRACEME, 0, 0, 0);
342 kill (getpid (), SIGSTOP);
343 fork ();
344 _exit (0);
345 }
346
347 /* Wrapper function for waitpid which handles EINTR. */
348
349 static int
350 my_waitpid (int pid, int *statusp, int flags)
351 {
352 int ret;
353
354 do
355 {
356 ret = waitpid (pid, statusp, flags);
357 }
358 while (ret == -1 && errno == EINTR);
359
360 return ret;
361 }
362
363 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
364
365 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
366 we know that the feature is not available. This may change the tracing
367 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
368
369 However, if it succeeds, we don't know for sure that the feature is
370 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
371 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
372 fork tracing, and let it fork. If the process exits, we assume that we
373 can't use TRACEFORK; if we get the fork notification, and we can extract
374 the new child's PID, then we assume that we can. */
375
376 static void
377 linux_test_for_tracefork (int original_pid)
378 {
379 int child_pid, ret, status;
380 long second_pid;
381 sigset_t prev_mask;
382
383 /* We don't want those ptrace calls to be interrupted. */
384 block_child_signals (&prev_mask);
385
386 linux_supports_tracefork_flag = 0;
387 linux_supports_tracevforkdone_flag = 0;
388
389 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
390 if (ret != 0)
391 {
392 restore_child_signals_mask (&prev_mask);
393 return;
394 }
395
396 child_pid = fork ();
397 if (child_pid == -1)
398 perror_with_name (("fork"));
399
400 if (child_pid == 0)
401 linux_tracefork_child ();
402
403 ret = my_waitpid (child_pid, &status, 0);
404 if (ret == -1)
405 perror_with_name (("waitpid"));
406 else if (ret != child_pid)
407 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
408 if (! WIFSTOPPED (status))
409 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."),
410 status);
411
412 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
413 if (ret != 0)
414 {
415 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
416 if (ret != 0)
417 {
418 warning (_("linux_test_for_tracefork: failed to kill child"));
419 restore_child_signals_mask (&prev_mask);
420 return;
421 }
422
423 ret = my_waitpid (child_pid, &status, 0);
424 if (ret != child_pid)
425 warning (_("linux_test_for_tracefork: failed "
426 "to wait for killed child"));
427 else if (!WIFSIGNALED (status))
428 warning (_("linux_test_for_tracefork: unexpected "
429 "wait status 0x%x from killed child"), status);
430
431 restore_child_signals_mask (&prev_mask);
432 return;
433 }
434
435 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
436 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
437 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
438 linux_supports_tracevforkdone_flag = (ret == 0);
439
440 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
441 if (ret != 0)
442 warning (_("linux_test_for_tracefork: failed to resume child"));
443
444 ret = my_waitpid (child_pid, &status, 0);
445
446 if (ret == child_pid && WIFSTOPPED (status)
447 && status >> 16 == PTRACE_EVENT_FORK)
448 {
449 second_pid = 0;
450 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
451 if (ret == 0 && second_pid != 0)
452 {
453 int second_status;
454
455 linux_supports_tracefork_flag = 1;
456 my_waitpid (second_pid, &second_status, 0);
457 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
458 if (ret != 0)
459 warning (_("linux_test_for_tracefork: "
460 "failed to kill second child"));
461 my_waitpid (second_pid, &status, 0);
462 }
463 }
464 else
465 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
466 "(%d, status 0x%x)"), ret, status);
467
468 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
469 if (ret != 0)
470 warning (_("linux_test_for_tracefork: failed to kill child"));
471 my_waitpid (child_pid, &status, 0);
472
473 restore_child_signals_mask (&prev_mask);
474 }
475
476 /* Determine if PTRACE_O_TRACESYSGOOD can be used to follow syscalls.
477
478 We try to enable syscall tracing on ORIGINAL_PID. If this fails,
479 we know that the feature is not available. This may change the tracing
480 options for ORIGINAL_PID, but we'll be setting them shortly anyway. */
481
482 static void
483 linux_test_for_tracesysgood (int original_pid)
484 {
485 int ret;
486 sigset_t prev_mask;
487
488 /* We don't want those ptrace calls to be interrupted. */
489 block_child_signals (&prev_mask);
490
491 linux_supports_tracesysgood_flag = 0;
492
493 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACESYSGOOD);
494 if (ret != 0)
495 goto out;
496
497 linux_supports_tracesysgood_flag = 1;
498 out:
499 restore_child_signals_mask (&prev_mask);
500 }
501
502 /* Determine wether we support PTRACE_O_TRACESYSGOOD option available.
503 This function also sets linux_supports_tracesysgood_flag. */
504
505 static int
506 linux_supports_tracesysgood (int pid)
507 {
508 if (linux_supports_tracesysgood_flag == -1)
509 linux_test_for_tracesysgood (pid);
510 return linux_supports_tracesysgood_flag;
511 }
512
513 /* Return non-zero iff we have tracefork functionality available.
514 This function also sets linux_supports_tracefork_flag. */
515
516 static int
517 linux_supports_tracefork (int pid)
518 {
519 if (linux_supports_tracefork_flag == -1)
520 linux_test_for_tracefork (pid);
521 return linux_supports_tracefork_flag;
522 }
523
524 static int
525 linux_supports_tracevforkdone (int pid)
526 {
527 if (linux_supports_tracefork_flag == -1)
528 linux_test_for_tracefork (pid);
529 return linux_supports_tracevforkdone_flag;
530 }
531
532 static void
533 linux_enable_tracesysgood (ptid_t ptid)
534 {
535 int pid = ptid_get_lwp (ptid);
536
537 if (pid == 0)
538 pid = ptid_get_pid (ptid);
539
540 if (linux_supports_tracesysgood (pid) == 0)
541 return;
542
543 current_ptrace_options |= PTRACE_O_TRACESYSGOOD;
544
545 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
546 }
547
548 \f
549 void
550 linux_enable_event_reporting (ptid_t ptid)
551 {
552 int pid = ptid_get_lwp (ptid);
553
554 if (pid == 0)
555 pid = ptid_get_pid (ptid);
556
557 if (! linux_supports_tracefork (pid))
558 return;
559
560 current_ptrace_options |= PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK
561 | PTRACE_O_TRACEEXEC | PTRACE_O_TRACECLONE;
562
563 if (linux_supports_tracevforkdone (pid))
564 current_ptrace_options |= PTRACE_O_TRACEVFORKDONE;
565
566 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
567 read-only process state. */
568
569 ptrace (PTRACE_SETOPTIONS, pid, 0, current_ptrace_options);
570 }
571
572 static void
573 linux_child_post_attach (int pid)
574 {
575 linux_enable_event_reporting (pid_to_ptid (pid));
576 linux_enable_tracesysgood (pid_to_ptid (pid));
577 }
578
579 static void
580 linux_child_post_startup_inferior (ptid_t ptid)
581 {
582 linux_enable_event_reporting (ptid);
583 linux_enable_tracesysgood (ptid);
584 }
585
586 static int
587 linux_child_follow_fork (struct target_ops *ops, int follow_child)
588 {
589 sigset_t prev_mask;
590 int has_vforked;
591 int parent_pid, child_pid;
592
593 block_child_signals (&prev_mask);
594
595 has_vforked = (inferior_thread ()->pending_follow.kind
596 == TARGET_WAITKIND_VFORKED);
597 parent_pid = ptid_get_lwp (inferior_ptid);
598 if (parent_pid == 0)
599 parent_pid = ptid_get_pid (inferior_ptid);
600 child_pid = PIDGET (inferior_thread ()->pending_follow.value.related_pid);
601
602 if (!detach_fork)
603 linux_enable_event_reporting (pid_to_ptid (child_pid));
604
605 if (has_vforked
606 && !non_stop /* Non-stop always resumes both branches. */
607 && (!target_is_async_p () || sync_execution)
608 && !(follow_child || detach_fork || sched_multi))
609 {
610 /* The parent stays blocked inside the vfork syscall until the
611 child execs or exits. If we don't let the child run, then
612 the parent stays blocked. If we're telling the parent to run
613 in the foreground, the user will not be able to ctrl-c to get
614 back the terminal, effectively hanging the debug session. */
615 fprintf_filtered (gdb_stderr, _("\
616 Can not resume the parent process over vfork in the foreground while\n\
617 holding the child stopped. Try \"set detach-on-fork\" or \
618 \"set schedule-multiple\".\n"));
619 /* FIXME output string > 80 columns. */
620 return 1;
621 }
622
623 if (! follow_child)
624 {
625 struct lwp_info *child_lp = NULL;
626
627 /* We're already attached to the parent, by default. */
628
629 /* Detach new forked process? */
630 if (detach_fork)
631 {
632 /* Before detaching from the child, remove all breakpoints
633 from it. If we forked, then this has already been taken
634 care of by infrun.c. If we vforked however, any
635 breakpoint inserted in the parent is visible in the
636 child, even those added while stopped in a vfork
637 catchpoint. This will remove the breakpoints from the
638 parent also, but they'll be reinserted below. */
639 if (has_vforked)
640 {
641 /* keep breakpoints list in sync. */
642 remove_breakpoints_pid (GET_PID (inferior_ptid));
643 }
644
645 if (info_verbose || debug_linux_nat)
646 {
647 target_terminal_ours ();
648 fprintf_filtered (gdb_stdlog,
649 "Detaching after fork from "
650 "child process %d.\n",
651 child_pid);
652 }
653
654 ptrace (PTRACE_DETACH, child_pid, 0, 0);
655 }
656 else
657 {
658 struct inferior *parent_inf, *child_inf;
659 struct cleanup *old_chain;
660
661 /* Add process to GDB's tables. */
662 child_inf = add_inferior (child_pid);
663
664 parent_inf = current_inferior ();
665 child_inf->attach_flag = parent_inf->attach_flag;
666 copy_terminal_info (child_inf, parent_inf);
667
668 old_chain = save_inferior_ptid ();
669 save_current_program_space ();
670
671 inferior_ptid = ptid_build (child_pid, child_pid, 0);
672 add_thread (inferior_ptid);
673 child_lp = add_lwp (inferior_ptid);
674 child_lp->stopped = 1;
675 child_lp->last_resume_kind = resume_stop;
676
677 /* If this is a vfork child, then the address-space is
678 shared with the parent. */
679 if (has_vforked)
680 {
681 child_inf->pspace = parent_inf->pspace;
682 child_inf->aspace = parent_inf->aspace;
683
684 /* The parent will be frozen until the child is done
685 with the shared region. Keep track of the
686 parent. */
687 child_inf->vfork_parent = parent_inf;
688 child_inf->pending_detach = 0;
689 parent_inf->vfork_child = child_inf;
690 parent_inf->pending_detach = 0;
691 }
692 else
693 {
694 child_inf->aspace = new_address_space ();
695 child_inf->pspace = add_program_space (child_inf->aspace);
696 child_inf->removable = 1;
697 set_current_program_space (child_inf->pspace);
698 clone_program_space (child_inf->pspace, parent_inf->pspace);
699
700 /* Let the shared library layer (solib-svr4) learn about
701 this new process, relocate the cloned exec, pull in
702 shared libraries, and install the solib event
703 breakpoint. If a "cloned-VM" event was propagated
704 better throughout the core, this wouldn't be
705 required. */
706 solib_create_inferior_hook (0);
707 }
708
709 /* Let the thread_db layer learn about this new process. */
710 check_for_thread_db ();
711
712 do_cleanups (old_chain);
713 }
714
715 if (has_vforked)
716 {
717 struct lwp_info *parent_lp;
718 struct inferior *parent_inf;
719
720 parent_inf = current_inferior ();
721
722 /* If we detached from the child, then we have to be careful
723 to not insert breakpoints in the parent until the child
724 is done with the shared memory region. However, if we're
725 staying attached to the child, then we can and should
726 insert breakpoints, so that we can debug it. A
727 subsequent child exec or exit is enough to know when does
728 the child stops using the parent's address space. */
729 parent_inf->waiting_for_vfork_done = detach_fork;
730 parent_inf->pspace->breakpoints_not_allowed = detach_fork;
731
732 parent_lp = find_lwp_pid (pid_to_ptid (parent_pid));
733 gdb_assert (linux_supports_tracefork_flag >= 0);
734
735 if (linux_supports_tracevforkdone (0))
736 {
737 if (debug_linux_nat)
738 fprintf_unfiltered (gdb_stdlog,
739 "LCFF: waiting for VFORK_DONE on %d\n",
740 parent_pid);
741 parent_lp->stopped = 1;
742
743 /* We'll handle the VFORK_DONE event like any other
744 event, in target_wait. */
745 }
746 else
747 {
748 /* We can't insert breakpoints until the child has
749 finished with the shared memory region. We need to
750 wait until that happens. Ideal would be to just
751 call:
752 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
753 - waitpid (parent_pid, &status, __WALL);
754 However, most architectures can't handle a syscall
755 being traced on the way out if it wasn't traced on
756 the way in.
757
758 We might also think to loop, continuing the child
759 until it exits or gets a SIGTRAP. One problem is
760 that the child might call ptrace with PTRACE_TRACEME.
761
762 There's no simple and reliable way to figure out when
763 the vforked child will be done with its copy of the
764 shared memory. We could step it out of the syscall,
765 two instructions, let it go, and then single-step the
766 parent once. When we have hardware single-step, this
767 would work; with software single-step it could still
768 be made to work but we'd have to be able to insert
769 single-step breakpoints in the child, and we'd have
770 to insert -just- the single-step breakpoint in the
771 parent. Very awkward.
772
773 In the end, the best we can do is to make sure it
774 runs for a little while. Hopefully it will be out of
775 range of any breakpoints we reinsert. Usually this
776 is only the single-step breakpoint at vfork's return
777 point. */
778
779 if (debug_linux_nat)
780 fprintf_unfiltered (gdb_stdlog,
781 "LCFF: no VFORK_DONE "
782 "support, sleeping a bit\n");
783
784 usleep (10000);
785
786 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
787 and leave it pending. The next linux_nat_resume call
788 will notice a pending event, and bypasses actually
789 resuming the inferior. */
790 parent_lp->status = 0;
791 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
792 parent_lp->stopped = 1;
793
794 /* If we're in async mode, need to tell the event loop
795 there's something here to process. */
796 if (target_can_async_p ())
797 async_file_mark ();
798 }
799 }
800 }
801 else
802 {
803 struct inferior *parent_inf, *child_inf;
804 struct lwp_info *child_lp;
805 struct program_space *parent_pspace;
806
807 if (info_verbose || debug_linux_nat)
808 {
809 target_terminal_ours ();
810 if (has_vforked)
811 fprintf_filtered (gdb_stdlog,
812 _("Attaching after process %d "
813 "vfork to child process %d.\n"),
814 parent_pid, child_pid);
815 else
816 fprintf_filtered (gdb_stdlog,
817 _("Attaching after process %d "
818 "fork to child process %d.\n"),
819 parent_pid, child_pid);
820 }
821
822 /* Add the new inferior first, so that the target_detach below
823 doesn't unpush the target. */
824
825 child_inf = add_inferior (child_pid);
826
827 parent_inf = current_inferior ();
828 child_inf->attach_flag = parent_inf->attach_flag;
829 copy_terminal_info (child_inf, parent_inf);
830
831 parent_pspace = parent_inf->pspace;
832
833 /* If we're vforking, we want to hold on to the parent until the
834 child exits or execs. At child exec or exit time we can
835 remove the old breakpoints from the parent and detach or
836 resume debugging it. Otherwise, detach the parent now; we'll
837 want to reuse it's program/address spaces, but we can't set
838 them to the child before removing breakpoints from the
839 parent, otherwise, the breakpoints module could decide to
840 remove breakpoints from the wrong process (since they'd be
841 assigned to the same address space). */
842
843 if (has_vforked)
844 {
845 gdb_assert (child_inf->vfork_parent == NULL);
846 gdb_assert (parent_inf->vfork_child == NULL);
847 child_inf->vfork_parent = parent_inf;
848 child_inf->pending_detach = 0;
849 parent_inf->vfork_child = child_inf;
850 parent_inf->pending_detach = detach_fork;
851 parent_inf->waiting_for_vfork_done = 0;
852 }
853 else if (detach_fork)
854 target_detach (NULL, 0);
855
856 /* Note that the detach above makes PARENT_INF dangling. */
857
858 /* Add the child thread to the appropriate lists, and switch to
859 this new thread, before cloning the program space, and
860 informing the solib layer about this new process. */
861
862 inferior_ptid = ptid_build (child_pid, child_pid, 0);
863 add_thread (inferior_ptid);
864 child_lp = add_lwp (inferior_ptid);
865 child_lp->stopped = 1;
866 child_lp->last_resume_kind = resume_stop;
867
868 /* If this is a vfork child, then the address-space is shared
869 with the parent. If we detached from the parent, then we can
870 reuse the parent's program/address spaces. */
871 if (has_vforked || detach_fork)
872 {
873 child_inf->pspace = parent_pspace;
874 child_inf->aspace = child_inf->pspace->aspace;
875 }
876 else
877 {
878 child_inf->aspace = new_address_space ();
879 child_inf->pspace = add_program_space (child_inf->aspace);
880 child_inf->removable = 1;
881 set_current_program_space (child_inf->pspace);
882 clone_program_space (child_inf->pspace, parent_pspace);
883
884 /* Let the shared library layer (solib-svr4) learn about
885 this new process, relocate the cloned exec, pull in
886 shared libraries, and install the solib event breakpoint.
887 If a "cloned-VM" event was propagated better throughout
888 the core, this wouldn't be required. */
889 solib_create_inferior_hook (0);
890 }
891
892 /* Let the thread_db layer learn about this new process. */
893 check_for_thread_db ();
894 }
895
896 restore_child_signals_mask (&prev_mask);
897 return 0;
898 }
899
900 \f
901 static int
902 linux_child_insert_fork_catchpoint (int pid)
903 {
904 return !linux_supports_tracefork (pid);
905 }
906
907 static int
908 linux_child_remove_fork_catchpoint (int pid)
909 {
910 return 0;
911 }
912
913 static int
914 linux_child_insert_vfork_catchpoint (int pid)
915 {
916 return !linux_supports_tracefork (pid);
917 }
918
919 static int
920 linux_child_remove_vfork_catchpoint (int pid)
921 {
922 return 0;
923 }
924
925 static int
926 linux_child_insert_exec_catchpoint (int pid)
927 {
928 return !linux_supports_tracefork (pid);
929 }
930
931 static int
932 linux_child_remove_exec_catchpoint (int pid)
933 {
934 return 0;
935 }
936
937 static int
938 linux_child_set_syscall_catchpoint (int pid, int needed, int any_count,
939 int table_size, int *table)
940 {
941 if (!linux_supports_tracesysgood (pid))
942 return 1;
943
944 /* On GNU/Linux, we ignore the arguments. It means that we only
945 enable the syscall catchpoints, but do not disable them.
946
947 Also, we do not use the `table' information because we do not
948 filter system calls here. We let GDB do the logic for us. */
949 return 0;
950 }
951
952 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
953 are processes sharing the same VM space. A multi-threaded process
954 is basically a group of such processes. However, such a grouping
955 is almost entirely a user-space issue; the kernel doesn't enforce
956 such a grouping at all (this might change in the future). In
957 general, we'll rely on the threads library (i.e. the GNU/Linux
958 Threads library) to provide such a grouping.
959
960 It is perfectly well possible to write a multi-threaded application
961 without the assistance of a threads library, by using the clone
962 system call directly. This module should be able to give some
963 rudimentary support for debugging such applications if developers
964 specify the CLONE_PTRACE flag in the clone system call, and are
965 using the Linux kernel 2.4 or above.
966
967 Note that there are some peculiarities in GNU/Linux that affect
968 this code:
969
970 - In general one should specify the __WCLONE flag to waitpid in
971 order to make it report events for any of the cloned processes
972 (and leave it out for the initial process). However, if a cloned
973 process has exited the exit status is only reported if the
974 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
975 we cannot use it since GDB must work on older systems too.
976
977 - When a traced, cloned process exits and is waited for by the
978 debugger, the kernel reassigns it to the original parent and
979 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
980 library doesn't notice this, which leads to the "zombie problem":
981 When debugged a multi-threaded process that spawns a lot of
982 threads will run out of processes, even if the threads exit,
983 because the "zombies" stay around. */
984
985 /* List of known LWPs. */
986 struct lwp_info *lwp_list;
987 \f
988
989 /* Original signal mask. */
990 static sigset_t normal_mask;
991
992 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
993 _initialize_linux_nat. */
994 static sigset_t suspend_mask;
995
996 /* Signals to block to make that sigsuspend work. */
997 static sigset_t blocked_mask;
998
999 /* SIGCHLD action. */
1000 struct sigaction sigchld_action;
1001
1002 /* Block child signals (SIGCHLD and linux threads signals), and store
1003 the previous mask in PREV_MASK. */
1004
1005 static void
1006 block_child_signals (sigset_t *prev_mask)
1007 {
1008 /* Make sure SIGCHLD is blocked. */
1009 if (!sigismember (&blocked_mask, SIGCHLD))
1010 sigaddset (&blocked_mask, SIGCHLD);
1011
1012 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
1013 }
1014
1015 /* Restore child signals mask, previously returned by
1016 block_child_signals. */
1017
1018 static void
1019 restore_child_signals_mask (sigset_t *prev_mask)
1020 {
1021 sigprocmask (SIG_SETMASK, prev_mask, NULL);
1022 }
1023
1024 /* Mask of signals to pass directly to the inferior. */
1025 static sigset_t pass_mask;
1026
1027 /* Update signals to pass to the inferior. */
1028 static void
1029 linux_nat_pass_signals (int numsigs, unsigned char *pass_signals)
1030 {
1031 int signo;
1032
1033 sigemptyset (&pass_mask);
1034
1035 for (signo = 1; signo < NSIG; signo++)
1036 {
1037 int target_signo = target_signal_from_host (signo);
1038 if (target_signo < numsigs && pass_signals[target_signo])
1039 sigaddset (&pass_mask, signo);
1040 }
1041 }
1042
1043 \f
1044
1045 /* Prototypes for local functions. */
1046 static int stop_wait_callback (struct lwp_info *lp, void *data);
1047 static int linux_thread_alive (ptid_t ptid);
1048 static char *linux_child_pid_to_exec_file (int pid);
1049
1050 \f
1051 /* Convert wait status STATUS to a string. Used for printing debug
1052 messages only. */
1053
1054 static char *
1055 status_to_str (int status)
1056 {
1057 static char buf[64];
1058
1059 if (WIFSTOPPED (status))
1060 {
1061 if (WSTOPSIG (status) == SYSCALL_SIGTRAP)
1062 snprintf (buf, sizeof (buf), "%s (stopped at syscall)",
1063 strsignal (SIGTRAP));
1064 else
1065 snprintf (buf, sizeof (buf), "%s (stopped)",
1066 strsignal (WSTOPSIG (status)));
1067 }
1068 else if (WIFSIGNALED (status))
1069 snprintf (buf, sizeof (buf), "%s (terminated)",
1070 strsignal (WTERMSIG (status)));
1071 else
1072 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1073
1074 return buf;
1075 }
1076
1077 /* Destroy and free LP. */
1078
1079 static void
1080 lwp_free (struct lwp_info *lp)
1081 {
1082 xfree (lp->arch_private);
1083 xfree (lp);
1084 }
1085
1086 /* Remove all LWPs belong to PID from the lwp list. */
1087
1088 static void
1089 purge_lwp_list (int pid)
1090 {
1091 struct lwp_info *lp, *lpprev, *lpnext;
1092
1093 lpprev = NULL;
1094
1095 for (lp = lwp_list; lp; lp = lpnext)
1096 {
1097 lpnext = lp->next;
1098
1099 if (ptid_get_pid (lp->ptid) == pid)
1100 {
1101 if (lp == lwp_list)
1102 lwp_list = lp->next;
1103 else
1104 lpprev->next = lp->next;
1105
1106 lwp_free (lp);
1107 }
1108 else
1109 lpprev = lp;
1110 }
1111 }
1112
1113 /* Return the number of known LWPs in the tgid given by PID. */
1114
1115 static int
1116 num_lwps (int pid)
1117 {
1118 int count = 0;
1119 struct lwp_info *lp;
1120
1121 for (lp = lwp_list; lp; lp = lp->next)
1122 if (ptid_get_pid (lp->ptid) == pid)
1123 count++;
1124
1125 return count;
1126 }
1127
1128 /* Add the LWP specified by PID to the list. Return a pointer to the
1129 structure describing the new LWP. The LWP should already be stopped
1130 (with an exception for the very first LWP). */
1131
1132 static struct lwp_info *
1133 add_lwp (ptid_t ptid)
1134 {
1135 struct lwp_info *lp;
1136
1137 gdb_assert (is_lwp (ptid));
1138
1139 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1140
1141 memset (lp, 0, sizeof (struct lwp_info));
1142
1143 lp->last_resume_kind = resume_continue;
1144 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1145
1146 lp->ptid = ptid;
1147 lp->core = -1;
1148
1149 lp->next = lwp_list;
1150 lwp_list = lp;
1151
1152 /* Let the arch specific bits know about this new thread. Current
1153 clients of this callback take the opportunity to install
1154 watchpoints in the new thread. Don't do this for the first
1155 thread though. If we're spawning a child ("run"), the thread
1156 executes the shell wrapper first, and we shouldn't touch it until
1157 it execs the program we want to debug. For "attach", it'd be
1158 okay to call the callback, but it's not necessary, because
1159 watchpoints can't yet have been inserted into the inferior. */
1160 if (num_lwps (GET_PID (ptid)) > 1 && linux_nat_new_thread != NULL)
1161 linux_nat_new_thread (lp);
1162
1163 return lp;
1164 }
1165
1166 /* Remove the LWP specified by PID from the list. */
1167
1168 static void
1169 delete_lwp (ptid_t ptid)
1170 {
1171 struct lwp_info *lp, *lpprev;
1172
1173 lpprev = NULL;
1174
1175 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1176 if (ptid_equal (lp->ptid, ptid))
1177 break;
1178
1179 if (!lp)
1180 return;
1181
1182 if (lpprev)
1183 lpprev->next = lp->next;
1184 else
1185 lwp_list = lp->next;
1186
1187 lwp_free (lp);
1188 }
1189
1190 /* Return a pointer to the structure describing the LWP corresponding
1191 to PID. If no corresponding LWP could be found, return NULL. */
1192
1193 static struct lwp_info *
1194 find_lwp_pid (ptid_t ptid)
1195 {
1196 struct lwp_info *lp;
1197 int lwp;
1198
1199 if (is_lwp (ptid))
1200 lwp = GET_LWP (ptid);
1201 else
1202 lwp = GET_PID (ptid);
1203
1204 for (lp = lwp_list; lp; lp = lp->next)
1205 if (lwp == GET_LWP (lp->ptid))
1206 return lp;
1207
1208 return NULL;
1209 }
1210
1211 /* Call CALLBACK with its second argument set to DATA for every LWP in
1212 the list. If CALLBACK returns 1 for a particular LWP, return a
1213 pointer to the structure describing that LWP immediately.
1214 Otherwise return NULL. */
1215
1216 struct lwp_info *
1217 iterate_over_lwps (ptid_t filter,
1218 int (*callback) (struct lwp_info *, void *),
1219 void *data)
1220 {
1221 struct lwp_info *lp, *lpnext;
1222
1223 for (lp = lwp_list; lp; lp = lpnext)
1224 {
1225 lpnext = lp->next;
1226
1227 if (ptid_match (lp->ptid, filter))
1228 {
1229 if ((*callback) (lp, data))
1230 return lp;
1231 }
1232 }
1233
1234 return NULL;
1235 }
1236
1237 /* Update our internal state when changing from one checkpoint to
1238 another indicated by NEW_PTID. We can only switch single-threaded
1239 applications, so we only create one new LWP, and the previous list
1240 is discarded. */
1241
1242 void
1243 linux_nat_switch_fork (ptid_t new_ptid)
1244 {
1245 struct lwp_info *lp;
1246
1247 purge_lwp_list (GET_PID (inferior_ptid));
1248
1249 lp = add_lwp (new_ptid);
1250 lp->stopped = 1;
1251
1252 /* This changes the thread's ptid while preserving the gdb thread
1253 num. Also changes the inferior pid, while preserving the
1254 inferior num. */
1255 thread_change_ptid (inferior_ptid, new_ptid);
1256
1257 /* We've just told GDB core that the thread changed target id, but,
1258 in fact, it really is a different thread, with different register
1259 contents. */
1260 registers_changed ();
1261 }
1262
1263 /* Handle the exit of a single thread LP. */
1264
1265 static void
1266 exit_lwp (struct lwp_info *lp)
1267 {
1268 struct thread_info *th = find_thread_ptid (lp->ptid);
1269
1270 if (th)
1271 {
1272 if (print_thread_events)
1273 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1274
1275 delete_thread (lp->ptid);
1276 }
1277
1278 delete_lwp (lp->ptid);
1279 }
1280
1281 /* Detect `T (stopped)' in `/proc/PID/status'.
1282 Other states including `T (tracing stop)' are reported as false. */
1283
1284 static int
1285 pid_is_stopped (pid_t pid)
1286 {
1287 FILE *status_file;
1288 char buf[100];
1289 int retval = 0;
1290
1291 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1292 status_file = fopen (buf, "r");
1293 if (status_file != NULL)
1294 {
1295 int have_state = 0;
1296
1297 while (fgets (buf, sizeof (buf), status_file))
1298 {
1299 if (strncmp (buf, "State:", 6) == 0)
1300 {
1301 have_state = 1;
1302 break;
1303 }
1304 }
1305 if (have_state && strstr (buf, "T (stopped)") != NULL)
1306 retval = 1;
1307 fclose (status_file);
1308 }
1309 return retval;
1310 }
1311
1312 /* Wait for the LWP specified by LP, which we have just attached to.
1313 Returns a wait status for that LWP, to cache. */
1314
1315 static int
1316 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1317 int *signalled)
1318 {
1319 pid_t new_pid, pid = GET_LWP (ptid);
1320 int status;
1321
1322 if (pid_is_stopped (pid))
1323 {
1324 if (debug_linux_nat)
1325 fprintf_unfiltered (gdb_stdlog,
1326 "LNPAW: Attaching to a stopped process\n");
1327
1328 /* The process is definitely stopped. It is in a job control
1329 stop, unless the kernel predates the TASK_STOPPED /
1330 TASK_TRACED distinction, in which case it might be in a
1331 ptrace stop. Make sure it is in a ptrace stop; from there we
1332 can kill it, signal it, et cetera.
1333
1334 First make sure there is a pending SIGSTOP. Since we are
1335 already attached, the process can not transition from stopped
1336 to running without a PTRACE_CONT; so we know this signal will
1337 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1338 probably already in the queue (unless this kernel is old
1339 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1340 is not an RT signal, it can only be queued once. */
1341 kill_lwp (pid, SIGSTOP);
1342
1343 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1344 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1345 ptrace (PTRACE_CONT, pid, 0, 0);
1346 }
1347
1348 /* Make sure the initial process is stopped. The user-level threads
1349 layer might want to poke around in the inferior, and that won't
1350 work if things haven't stabilized yet. */
1351 new_pid = my_waitpid (pid, &status, 0);
1352 if (new_pid == -1 && errno == ECHILD)
1353 {
1354 if (first)
1355 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1356
1357 /* Try again with __WCLONE to check cloned processes. */
1358 new_pid = my_waitpid (pid, &status, __WCLONE);
1359 *cloned = 1;
1360 }
1361
1362 gdb_assert (pid == new_pid);
1363
1364 if (!WIFSTOPPED (status))
1365 {
1366 /* The pid we tried to attach has apparently just exited. */
1367 if (debug_linux_nat)
1368 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1369 pid, status_to_str (status));
1370 return status;
1371 }
1372
1373 if (WSTOPSIG (status) != SIGSTOP)
1374 {
1375 *signalled = 1;
1376 if (debug_linux_nat)
1377 fprintf_unfiltered (gdb_stdlog,
1378 "LNPAW: Received %s after attaching\n",
1379 status_to_str (status));
1380 }
1381
1382 return status;
1383 }
1384
1385 /* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1386 the new LWP could not be attached, or 1 if we're already auto
1387 attached to this thread, but haven't processed the
1388 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1389 its existance, without considering it an error. */
1390
1391 int
1392 lin_lwp_attach_lwp (ptid_t ptid)
1393 {
1394 struct lwp_info *lp;
1395 sigset_t prev_mask;
1396 int lwpid;
1397
1398 gdb_assert (is_lwp (ptid));
1399
1400 block_child_signals (&prev_mask);
1401
1402 lp = find_lwp_pid (ptid);
1403 lwpid = GET_LWP (ptid);
1404
1405 /* We assume that we're already attached to any LWP that has an id
1406 equal to the overall process id, and to any LWP that is already
1407 in our list of LWPs. If we're not seeing exit events from threads
1408 and we've had PID wraparound since we last tried to stop all threads,
1409 this assumption might be wrong; fortunately, this is very unlikely
1410 to happen. */
1411 if (lwpid != GET_PID (ptid) && lp == NULL)
1412 {
1413 int status, cloned = 0, signalled = 0;
1414
1415 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1416 {
1417 if (linux_supports_tracefork_flag)
1418 {
1419 /* If we haven't stopped all threads when we get here,
1420 we may have seen a thread listed in thread_db's list,
1421 but not processed the PTRACE_EVENT_CLONE yet. If
1422 that's the case, ignore this new thread, and let
1423 normal event handling discover it later. */
1424 if (in_pid_list_p (stopped_pids, lwpid))
1425 {
1426 /* We've already seen this thread stop, but we
1427 haven't seen the PTRACE_EVENT_CLONE extended
1428 event yet. */
1429 restore_child_signals_mask (&prev_mask);
1430 return 0;
1431 }
1432 else
1433 {
1434 int new_pid;
1435 int status;
1436
1437 /* See if we've got a stop for this new child
1438 pending. If so, we're already attached. */
1439 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1440 if (new_pid == -1 && errno == ECHILD)
1441 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1442 if (new_pid != -1)
1443 {
1444 if (WIFSTOPPED (status))
1445 add_to_pid_list (&stopped_pids, lwpid, status);
1446
1447 restore_child_signals_mask (&prev_mask);
1448 return 1;
1449 }
1450 }
1451 }
1452
1453 /* If we fail to attach to the thread, issue a warning,
1454 but continue. One way this can happen is if thread
1455 creation is interrupted; as of Linux kernel 2.6.19, a
1456 bug may place threads in the thread list and then fail
1457 to create them. */
1458 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1459 safe_strerror (errno));
1460 restore_child_signals_mask (&prev_mask);
1461 return -1;
1462 }
1463
1464 if (debug_linux_nat)
1465 fprintf_unfiltered (gdb_stdlog,
1466 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1467 target_pid_to_str (ptid));
1468
1469 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1470 if (!WIFSTOPPED (status))
1471 {
1472 restore_child_signals_mask (&prev_mask);
1473 return 1;
1474 }
1475
1476 lp = add_lwp (ptid);
1477 lp->stopped = 1;
1478 lp->cloned = cloned;
1479 lp->signalled = signalled;
1480 if (WSTOPSIG (status) != SIGSTOP)
1481 {
1482 lp->resumed = 1;
1483 lp->status = status;
1484 }
1485
1486 target_post_attach (GET_LWP (lp->ptid));
1487
1488 if (debug_linux_nat)
1489 {
1490 fprintf_unfiltered (gdb_stdlog,
1491 "LLAL: waitpid %s received %s\n",
1492 target_pid_to_str (ptid),
1493 status_to_str (status));
1494 }
1495 }
1496 else
1497 {
1498 /* We assume that the LWP representing the original process is
1499 already stopped. Mark it as stopped in the data structure
1500 that the GNU/linux ptrace layer uses to keep track of
1501 threads. Note that this won't have already been done since
1502 the main thread will have, we assume, been stopped by an
1503 attach from a different layer. */
1504 if (lp == NULL)
1505 lp = add_lwp (ptid);
1506 lp->stopped = 1;
1507 }
1508
1509 lp->last_resume_kind = resume_stop;
1510 restore_child_signals_mask (&prev_mask);
1511 return 0;
1512 }
1513
1514 static void
1515 linux_nat_create_inferior (struct target_ops *ops,
1516 char *exec_file, char *allargs, char **env,
1517 int from_tty)
1518 {
1519 #ifdef HAVE_PERSONALITY
1520 int personality_orig = 0, personality_set = 0;
1521 #endif /* HAVE_PERSONALITY */
1522
1523 /* The fork_child mechanism is synchronous and calls target_wait, so
1524 we have to mask the async mode. */
1525
1526 #ifdef HAVE_PERSONALITY
1527 if (disable_randomization)
1528 {
1529 errno = 0;
1530 personality_orig = personality (0xffffffff);
1531 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1532 {
1533 personality_set = 1;
1534 personality (personality_orig | ADDR_NO_RANDOMIZE);
1535 }
1536 if (errno != 0 || (personality_set
1537 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1538 warning (_("Error disabling address space randomization: %s"),
1539 safe_strerror (errno));
1540 }
1541 #endif /* HAVE_PERSONALITY */
1542
1543 /* Make sure we report all signals during startup. */
1544 linux_nat_pass_signals (0, NULL);
1545
1546 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1547
1548 #ifdef HAVE_PERSONALITY
1549 if (personality_set)
1550 {
1551 errno = 0;
1552 personality (personality_orig);
1553 if (errno != 0)
1554 warning (_("Error restoring address space randomization: %s"),
1555 safe_strerror (errno));
1556 }
1557 #endif /* HAVE_PERSONALITY */
1558 }
1559
1560 static void
1561 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1562 {
1563 struct lwp_info *lp;
1564 int status;
1565 ptid_t ptid;
1566
1567 /* Make sure we report all signals during attach. */
1568 linux_nat_pass_signals (0, NULL);
1569
1570 linux_ops->to_attach (ops, args, from_tty);
1571
1572 /* The ptrace base target adds the main thread with (pid,0,0)
1573 format. Decorate it with lwp info. */
1574 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1575 thread_change_ptid (inferior_ptid, ptid);
1576
1577 /* Add the initial process as the first LWP to the list. */
1578 lp = add_lwp (ptid);
1579
1580 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1581 &lp->signalled);
1582 if (!WIFSTOPPED (status))
1583 {
1584 if (WIFEXITED (status))
1585 {
1586 int exit_code = WEXITSTATUS (status);
1587
1588 target_terminal_ours ();
1589 target_mourn_inferior ();
1590 if (exit_code == 0)
1591 error (_("Unable to attach: program exited normally."));
1592 else
1593 error (_("Unable to attach: program exited with code %d."),
1594 exit_code);
1595 }
1596 else if (WIFSIGNALED (status))
1597 {
1598 enum target_signal signo;
1599
1600 target_terminal_ours ();
1601 target_mourn_inferior ();
1602
1603 signo = target_signal_from_host (WTERMSIG (status));
1604 error (_("Unable to attach: program terminated with signal "
1605 "%s, %s."),
1606 target_signal_to_name (signo),
1607 target_signal_to_string (signo));
1608 }
1609
1610 internal_error (__FILE__, __LINE__,
1611 _("unexpected status %d for PID %ld"),
1612 status, (long) GET_LWP (ptid));
1613 }
1614
1615 lp->stopped = 1;
1616
1617 /* Save the wait status to report later. */
1618 lp->resumed = 1;
1619 if (debug_linux_nat)
1620 fprintf_unfiltered (gdb_stdlog,
1621 "LNA: waitpid %ld, saving status %s\n",
1622 (long) GET_PID (lp->ptid), status_to_str (status));
1623
1624 lp->status = status;
1625
1626 if (target_can_async_p ())
1627 target_async (inferior_event_handler, 0);
1628 }
1629
1630 /* Get pending status of LP. */
1631 static int
1632 get_pending_status (struct lwp_info *lp, int *status)
1633 {
1634 enum target_signal signo = TARGET_SIGNAL_0;
1635
1636 /* If we paused threads momentarily, we may have stored pending
1637 events in lp->status or lp->waitstatus (see stop_wait_callback),
1638 and GDB core hasn't seen any signal for those threads.
1639 Otherwise, the last signal reported to the core is found in the
1640 thread object's stop_signal.
1641
1642 There's a corner case that isn't handled here at present. Only
1643 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1644 stop_signal make sense as a real signal to pass to the inferior.
1645 Some catchpoint related events, like
1646 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
1647 to TARGET_SIGNAL_SIGTRAP when the catchpoint triggers. But,
1648 those traps are debug API (ptrace in our case) related and
1649 induced; the inferior wouldn't see them if it wasn't being
1650 traced. Hence, we should never pass them to the inferior, even
1651 when set to pass state. Since this corner case isn't handled by
1652 infrun.c when proceeding with a signal, for consistency, neither
1653 do we handle it here (or elsewhere in the file we check for
1654 signal pass state). Normally SIGTRAP isn't set to pass state, so
1655 this is really a corner case. */
1656
1657 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1658 signo = TARGET_SIGNAL_0; /* a pending ptrace event, not a real signal. */
1659 else if (lp->status)
1660 signo = target_signal_from_host (WSTOPSIG (lp->status));
1661 else if (non_stop && !is_executing (lp->ptid))
1662 {
1663 struct thread_info *tp = find_thread_ptid (lp->ptid);
1664
1665 signo = tp->suspend.stop_signal;
1666 }
1667 else if (!non_stop)
1668 {
1669 struct target_waitstatus last;
1670 ptid_t last_ptid;
1671
1672 get_last_target_status (&last_ptid, &last);
1673
1674 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1675 {
1676 struct thread_info *tp = find_thread_ptid (lp->ptid);
1677
1678 signo = tp->suspend.stop_signal;
1679 }
1680 }
1681
1682 *status = 0;
1683
1684 if (signo == TARGET_SIGNAL_0)
1685 {
1686 if (debug_linux_nat)
1687 fprintf_unfiltered (gdb_stdlog,
1688 "GPT: lwp %s has no pending signal\n",
1689 target_pid_to_str (lp->ptid));
1690 }
1691 else if (!signal_pass_state (signo))
1692 {
1693 if (debug_linux_nat)
1694 fprintf_unfiltered (gdb_stdlog,
1695 "GPT: lwp %s had signal %s, "
1696 "but it is in no pass state\n",
1697 target_pid_to_str (lp->ptid),
1698 target_signal_to_string (signo));
1699 }
1700 else
1701 {
1702 *status = W_STOPCODE (target_signal_to_host (signo));
1703
1704 if (debug_linux_nat)
1705 fprintf_unfiltered (gdb_stdlog,
1706 "GPT: lwp %s has pending signal %s\n",
1707 target_pid_to_str (lp->ptid),
1708 target_signal_to_string (signo));
1709 }
1710
1711 return 0;
1712 }
1713
1714 static int
1715 detach_callback (struct lwp_info *lp, void *data)
1716 {
1717 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1718
1719 if (debug_linux_nat && lp->status)
1720 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1721 strsignal (WSTOPSIG (lp->status)),
1722 target_pid_to_str (lp->ptid));
1723
1724 /* If there is a pending SIGSTOP, get rid of it. */
1725 if (lp->signalled)
1726 {
1727 if (debug_linux_nat)
1728 fprintf_unfiltered (gdb_stdlog,
1729 "DC: Sending SIGCONT to %s\n",
1730 target_pid_to_str (lp->ptid));
1731
1732 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1733 lp->signalled = 0;
1734 }
1735
1736 /* We don't actually detach from the LWP that has an id equal to the
1737 overall process id just yet. */
1738 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1739 {
1740 int status = 0;
1741
1742 /* Pass on any pending signal for this LWP. */
1743 get_pending_status (lp, &status);
1744
1745 if (linux_nat_prepare_to_resume != NULL)
1746 linux_nat_prepare_to_resume (lp);
1747 errno = 0;
1748 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1749 WSTOPSIG (status)) < 0)
1750 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1751 safe_strerror (errno));
1752
1753 if (debug_linux_nat)
1754 fprintf_unfiltered (gdb_stdlog,
1755 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1756 target_pid_to_str (lp->ptid),
1757 strsignal (WSTOPSIG (status)));
1758
1759 delete_lwp (lp->ptid);
1760 }
1761
1762 return 0;
1763 }
1764
1765 static void
1766 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1767 {
1768 int pid;
1769 int status;
1770 struct lwp_info *main_lwp;
1771
1772 pid = GET_PID (inferior_ptid);
1773
1774 if (target_can_async_p ())
1775 linux_nat_async (NULL, 0);
1776
1777 /* Stop all threads before detaching. ptrace requires that the
1778 thread is stopped to sucessfully detach. */
1779 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
1780 /* ... and wait until all of them have reported back that
1781 they're no longer running. */
1782 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
1783
1784 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
1785
1786 /* Only the initial process should be left right now. */
1787 gdb_assert (num_lwps (GET_PID (inferior_ptid)) == 1);
1788
1789 main_lwp = find_lwp_pid (pid_to_ptid (pid));
1790
1791 /* Pass on any pending signal for the last LWP. */
1792 if ((args == NULL || *args == '\0')
1793 && get_pending_status (main_lwp, &status) != -1
1794 && WIFSTOPPED (status))
1795 {
1796 /* Put the signal number in ARGS so that inf_ptrace_detach will
1797 pass it along with PTRACE_DETACH. */
1798 args = alloca (8);
1799 sprintf (args, "%d", (int) WSTOPSIG (status));
1800 if (debug_linux_nat)
1801 fprintf_unfiltered (gdb_stdlog,
1802 "LND: Sending signal %s to %s\n",
1803 args,
1804 target_pid_to_str (main_lwp->ptid));
1805 }
1806
1807 if (linux_nat_prepare_to_resume != NULL)
1808 linux_nat_prepare_to_resume (main_lwp);
1809 delete_lwp (main_lwp->ptid);
1810
1811 if (forks_exist_p ())
1812 {
1813 /* Multi-fork case. The current inferior_ptid is being detached
1814 from, but there are other viable forks to debug. Detach from
1815 the current fork, and context-switch to the first
1816 available. */
1817 linux_fork_detach (args, from_tty);
1818
1819 if (non_stop && target_can_async_p ())
1820 target_async (inferior_event_handler, 0);
1821 }
1822 else
1823 linux_ops->to_detach (ops, args, from_tty);
1824 }
1825
1826 /* Resume LP. */
1827
1828 static void
1829 resume_lwp (struct lwp_info *lp, int step)
1830 {
1831 if (lp->stopped)
1832 {
1833 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
1834
1835 if (inf->vfork_child != NULL)
1836 {
1837 if (debug_linux_nat)
1838 fprintf_unfiltered (gdb_stdlog,
1839 "RC: Not resuming %s (vfork parent)\n",
1840 target_pid_to_str (lp->ptid));
1841 }
1842 else if (lp->status == 0
1843 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
1844 {
1845 if (debug_linux_nat)
1846 fprintf_unfiltered (gdb_stdlog,
1847 "RC: PTRACE_CONT %s, 0, 0 (resuming sibling)\n",
1848 target_pid_to_str (lp->ptid));
1849
1850 if (linux_nat_prepare_to_resume != NULL)
1851 linux_nat_prepare_to_resume (lp);
1852 linux_ops->to_resume (linux_ops,
1853 pid_to_ptid (GET_LWP (lp->ptid)),
1854 step, TARGET_SIGNAL_0);
1855 lp->stopped = 0;
1856 lp->step = step;
1857 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1858 lp->stopped_by_watchpoint = 0;
1859 }
1860 else
1861 {
1862 if (debug_linux_nat)
1863 fprintf_unfiltered (gdb_stdlog,
1864 "RC: Not resuming sibling %s (has pending)\n",
1865 target_pid_to_str (lp->ptid));
1866 }
1867 }
1868 else
1869 {
1870 if (debug_linux_nat)
1871 fprintf_unfiltered (gdb_stdlog,
1872 "RC: Not resuming sibling %s (not stopped)\n",
1873 target_pid_to_str (lp->ptid));
1874 }
1875 }
1876
1877 static int
1878 resume_callback (struct lwp_info *lp, void *data)
1879 {
1880 resume_lwp (lp, 0);
1881 return 0;
1882 }
1883
1884 static int
1885 resume_clear_callback (struct lwp_info *lp, void *data)
1886 {
1887 lp->resumed = 0;
1888 lp->last_resume_kind = resume_stop;
1889 return 0;
1890 }
1891
1892 static int
1893 resume_set_callback (struct lwp_info *lp, void *data)
1894 {
1895 lp->resumed = 1;
1896 lp->last_resume_kind = resume_continue;
1897 return 0;
1898 }
1899
1900 static void
1901 linux_nat_resume (struct target_ops *ops,
1902 ptid_t ptid, int step, enum target_signal signo)
1903 {
1904 sigset_t prev_mask;
1905 struct lwp_info *lp;
1906 int resume_many;
1907
1908 if (debug_linux_nat)
1909 fprintf_unfiltered (gdb_stdlog,
1910 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1911 step ? "step" : "resume",
1912 target_pid_to_str (ptid),
1913 (signo != TARGET_SIGNAL_0
1914 ? strsignal (target_signal_to_host (signo)) : "0"),
1915 target_pid_to_str (inferior_ptid));
1916
1917 block_child_signals (&prev_mask);
1918
1919 /* A specific PTID means `step only this process id'. */
1920 resume_many = (ptid_equal (minus_one_ptid, ptid)
1921 || ptid_is_pid (ptid));
1922
1923 /* Mark the lwps we're resuming as resumed. */
1924 iterate_over_lwps (ptid, resume_set_callback, NULL);
1925
1926 /* See if it's the current inferior that should be handled
1927 specially. */
1928 if (resume_many)
1929 lp = find_lwp_pid (inferior_ptid);
1930 else
1931 lp = find_lwp_pid (ptid);
1932 gdb_assert (lp != NULL);
1933
1934 /* Remember if we're stepping. */
1935 lp->step = step;
1936 lp->last_resume_kind = step ? resume_step : resume_continue;
1937
1938 /* If we have a pending wait status for this thread, there is no
1939 point in resuming the process. But first make sure that
1940 linux_nat_wait won't preemptively handle the event - we
1941 should never take this short-circuit if we are going to
1942 leave LP running, since we have skipped resuming all the
1943 other threads. This bit of code needs to be synchronized
1944 with linux_nat_wait. */
1945
1946 if (lp->status && WIFSTOPPED (lp->status))
1947 {
1948 if (!lp->step
1949 && WSTOPSIG (lp->status)
1950 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
1951 {
1952 if (debug_linux_nat)
1953 fprintf_unfiltered (gdb_stdlog,
1954 "LLR: Not short circuiting for ignored "
1955 "status 0x%x\n", lp->status);
1956
1957 /* FIXME: What should we do if we are supposed to continue
1958 this thread with a signal? */
1959 gdb_assert (signo == TARGET_SIGNAL_0);
1960 signo = target_signal_from_host (WSTOPSIG (lp->status));
1961 lp->status = 0;
1962 }
1963 }
1964
1965 if (lp->status || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
1966 {
1967 /* FIXME: What should we do if we are supposed to continue
1968 this thread with a signal? */
1969 gdb_assert (signo == TARGET_SIGNAL_0);
1970
1971 if (debug_linux_nat)
1972 fprintf_unfiltered (gdb_stdlog,
1973 "LLR: Short circuiting for status 0x%x\n",
1974 lp->status);
1975
1976 restore_child_signals_mask (&prev_mask);
1977 if (target_can_async_p ())
1978 {
1979 target_async (inferior_event_handler, 0);
1980 /* Tell the event loop we have something to process. */
1981 async_file_mark ();
1982 }
1983 return;
1984 }
1985
1986 /* Mark LWP as not stopped to prevent it from being continued by
1987 resume_callback. */
1988 lp->stopped = 0;
1989
1990 if (resume_many)
1991 iterate_over_lwps (ptid, resume_callback, NULL);
1992
1993 /* Convert to something the lower layer understands. */
1994 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1995
1996 if (linux_nat_prepare_to_resume != NULL)
1997 linux_nat_prepare_to_resume (lp);
1998 linux_ops->to_resume (linux_ops, ptid, step, signo);
1999 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2000 lp->stopped_by_watchpoint = 0;
2001
2002 if (debug_linux_nat)
2003 fprintf_unfiltered (gdb_stdlog,
2004 "LLR: %s %s, %s (resume event thread)\n",
2005 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2006 target_pid_to_str (ptid),
2007 (signo != TARGET_SIGNAL_0
2008 ? strsignal (target_signal_to_host (signo)) : "0"));
2009
2010 restore_child_signals_mask (&prev_mask);
2011 if (target_can_async_p ())
2012 target_async (inferior_event_handler, 0);
2013 }
2014
2015 /* Send a signal to an LWP. */
2016
2017 static int
2018 kill_lwp (int lwpid, int signo)
2019 {
2020 /* Use tkill, if possible, in case we are using nptl threads. If tkill
2021 fails, then we are not using nptl threads and we should be using kill. */
2022
2023 #ifdef HAVE_TKILL_SYSCALL
2024 {
2025 static int tkill_failed;
2026
2027 if (!tkill_failed)
2028 {
2029 int ret;
2030
2031 errno = 0;
2032 ret = syscall (__NR_tkill, lwpid, signo);
2033 if (errno != ENOSYS)
2034 return ret;
2035 tkill_failed = 1;
2036 }
2037 }
2038 #endif
2039
2040 return kill (lwpid, signo);
2041 }
2042
2043 /* Handle a GNU/Linux syscall trap wait response. If we see a syscall
2044 event, check if the core is interested in it: if not, ignore the
2045 event, and keep waiting; otherwise, we need to toggle the LWP's
2046 syscall entry/exit status, since the ptrace event itself doesn't
2047 indicate it, and report the trap to higher layers. */
2048
2049 static int
2050 linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
2051 {
2052 struct target_waitstatus *ourstatus = &lp->waitstatus;
2053 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
2054 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
2055
2056 if (stopping)
2057 {
2058 /* If we're stopping threads, there's a SIGSTOP pending, which
2059 makes it so that the LWP reports an immediate syscall return,
2060 followed by the SIGSTOP. Skip seeing that "return" using
2061 PTRACE_CONT directly, and let stop_wait_callback collect the
2062 SIGSTOP. Later when the thread is resumed, a new syscall
2063 entry event. If we didn't do this (and returned 0), we'd
2064 leave a syscall entry pending, and our caller, by using
2065 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
2066 itself. Later, when the user re-resumes this LWP, we'd see
2067 another syscall entry event and we'd mistake it for a return.
2068
2069 If stop_wait_callback didn't force the SIGSTOP out of the LWP
2070 (leaving immediately with LWP->signalled set, without issuing
2071 a PTRACE_CONT), it would still be problematic to leave this
2072 syscall enter pending, as later when the thread is resumed,
2073 it would then see the same syscall exit mentioned above,
2074 followed by the delayed SIGSTOP, while the syscall didn't
2075 actually get to execute. It seems it would be even more
2076 confusing to the user. */
2077
2078 if (debug_linux_nat)
2079 fprintf_unfiltered (gdb_stdlog,
2080 "LHST: ignoring syscall %d "
2081 "for LWP %ld (stopping threads), "
2082 "resuming with PTRACE_CONT for SIGSTOP\n",
2083 syscall_number,
2084 GET_LWP (lp->ptid));
2085
2086 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2087 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2088 return 1;
2089 }
2090
2091 if (catch_syscall_enabled ())
2092 {
2093 /* Always update the entry/return state, even if this particular
2094 syscall isn't interesting to the core now. In async mode,
2095 the user could install a new catchpoint for this syscall
2096 between syscall enter/return, and we'll need to know to
2097 report a syscall return if that happens. */
2098 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2099 ? TARGET_WAITKIND_SYSCALL_RETURN
2100 : TARGET_WAITKIND_SYSCALL_ENTRY);
2101
2102 if (catching_syscall_number (syscall_number))
2103 {
2104 /* Alright, an event to report. */
2105 ourstatus->kind = lp->syscall_state;
2106 ourstatus->value.syscall_number = syscall_number;
2107
2108 if (debug_linux_nat)
2109 fprintf_unfiltered (gdb_stdlog,
2110 "LHST: stopping for %s of syscall %d"
2111 " for LWP %ld\n",
2112 lp->syscall_state
2113 == TARGET_WAITKIND_SYSCALL_ENTRY
2114 ? "entry" : "return",
2115 syscall_number,
2116 GET_LWP (lp->ptid));
2117 return 0;
2118 }
2119
2120 if (debug_linux_nat)
2121 fprintf_unfiltered (gdb_stdlog,
2122 "LHST: ignoring %s of syscall %d "
2123 "for LWP %ld\n",
2124 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
2125 ? "entry" : "return",
2126 syscall_number,
2127 GET_LWP (lp->ptid));
2128 }
2129 else
2130 {
2131 /* If we had been syscall tracing, and hence used PT_SYSCALL
2132 before on this LWP, it could happen that the user removes all
2133 syscall catchpoints before we get to process this event.
2134 There are two noteworthy issues here:
2135
2136 - When stopped at a syscall entry event, resuming with
2137 PT_STEP still resumes executing the syscall and reports a
2138 syscall return.
2139
2140 - Only PT_SYSCALL catches syscall enters. If we last
2141 single-stepped this thread, then this event can't be a
2142 syscall enter. If we last single-stepped this thread, this
2143 has to be a syscall exit.
2144
2145 The points above mean that the next resume, be it PT_STEP or
2146 PT_CONTINUE, can not trigger a syscall trace event. */
2147 if (debug_linux_nat)
2148 fprintf_unfiltered (gdb_stdlog,
2149 "LHST: caught syscall event "
2150 "with no syscall catchpoints."
2151 " %d for LWP %ld, ignoring\n",
2152 syscall_number,
2153 GET_LWP (lp->ptid));
2154 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2155 }
2156
2157 /* The core isn't interested in this event. For efficiency, avoid
2158 stopping all threads only to have the core resume them all again.
2159 Since we're not stopping threads, if we're still syscall tracing
2160 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
2161 subsequent syscall. Simply resume using the inf-ptrace layer,
2162 which knows when to use PT_SYSCALL or PT_CONTINUE. */
2163
2164 /* Note that gdbarch_get_syscall_number may access registers, hence
2165 fill a regcache. */
2166 registers_changed ();
2167 if (linux_nat_prepare_to_resume != NULL)
2168 linux_nat_prepare_to_resume (lp);
2169 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2170 lp->step, TARGET_SIGNAL_0);
2171 return 1;
2172 }
2173
2174 /* Handle a GNU/Linux extended wait response. If we see a clone
2175 event, we need to add the new LWP to our list (and not report the
2176 trap to higher layers). This function returns non-zero if the
2177 event should be ignored and we should wait again. If STOPPING is
2178 true, the new LWP remains stopped, otherwise it is continued. */
2179
2180 static int
2181 linux_handle_extended_wait (struct lwp_info *lp, int status,
2182 int stopping)
2183 {
2184 int pid = GET_LWP (lp->ptid);
2185 struct target_waitstatus *ourstatus = &lp->waitstatus;
2186 int event = status >> 16;
2187
2188 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
2189 || event == PTRACE_EVENT_CLONE)
2190 {
2191 unsigned long new_pid;
2192 int ret;
2193
2194 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
2195
2196 /* If we haven't already seen the new PID stop, wait for it now. */
2197 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
2198 {
2199 /* The new child has a pending SIGSTOP. We can't affect it until it
2200 hits the SIGSTOP, but we're already attached. */
2201 ret = my_waitpid (new_pid, &status,
2202 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
2203 if (ret == -1)
2204 perror_with_name (_("waiting for new child"));
2205 else if (ret != new_pid)
2206 internal_error (__FILE__, __LINE__,
2207 _("wait returned unexpected PID %d"), ret);
2208 else if (!WIFSTOPPED (status))
2209 internal_error (__FILE__, __LINE__,
2210 _("wait returned unexpected status 0x%x"), status);
2211 }
2212
2213 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
2214
2215 if (event == PTRACE_EVENT_FORK
2216 && linux_fork_checkpointing_p (GET_PID (lp->ptid)))
2217 {
2218 /* Handle checkpointing by linux-fork.c here as a special
2219 case. We don't want the follow-fork-mode or 'catch fork'
2220 to interfere with this. */
2221
2222 /* This won't actually modify the breakpoint list, but will
2223 physically remove the breakpoints from the child. */
2224 detach_breakpoints (new_pid);
2225
2226 /* Retain child fork in ptrace (stopped) state. */
2227 if (!find_fork_pid (new_pid))
2228 add_fork (new_pid);
2229
2230 /* Report as spurious, so that infrun doesn't want to follow
2231 this fork. We're actually doing an infcall in
2232 linux-fork.c. */
2233 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2234 linux_enable_event_reporting (pid_to_ptid (new_pid));
2235
2236 /* Report the stop to the core. */
2237 return 0;
2238 }
2239
2240 if (event == PTRACE_EVENT_FORK)
2241 ourstatus->kind = TARGET_WAITKIND_FORKED;
2242 else if (event == PTRACE_EVENT_VFORK)
2243 ourstatus->kind = TARGET_WAITKIND_VFORKED;
2244 else
2245 {
2246 struct lwp_info *new_lp;
2247
2248 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2249
2250 if (debug_linux_nat)
2251 fprintf_unfiltered (gdb_stdlog,
2252 "LHEW: Got clone event "
2253 "from LWP %d, new child is LWP %ld\n",
2254 pid, new_pid);
2255
2256 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (lp->ptid)));
2257 new_lp->cloned = 1;
2258 new_lp->stopped = 1;
2259
2260 if (WSTOPSIG (status) != SIGSTOP)
2261 {
2262 /* This can happen if someone starts sending signals to
2263 the new thread before it gets a chance to run, which
2264 have a lower number than SIGSTOP (e.g. SIGUSR1).
2265 This is an unlikely case, and harder to handle for
2266 fork / vfork than for clone, so we do not try - but
2267 we handle it for clone events here. We'll send
2268 the other signal on to the thread below. */
2269
2270 new_lp->signalled = 1;
2271 }
2272 else
2273 {
2274 struct thread_info *tp;
2275
2276 /* When we stop for an event in some other thread, and
2277 pull the thread list just as this thread has cloned,
2278 we'll have seen the new thread in the thread_db list
2279 before handling the CLONE event (glibc's
2280 pthread_create adds the new thread to the thread list
2281 before clone'ing, and has the kernel fill in the
2282 thread's tid on the clone call with
2283 CLONE_PARENT_SETTID). If that happened, and the core
2284 had requested the new thread to stop, we'll have
2285 killed it with SIGSTOP. But since SIGSTOP is not an
2286 RT signal, it can only be queued once. We need to be
2287 careful to not resume the LWP if we wanted it to
2288 stop. In that case, we'll leave the SIGSTOP pending.
2289 It will later be reported as TARGET_SIGNAL_0. */
2290 tp = find_thread_ptid (new_lp->ptid);
2291 if (tp != NULL && tp->stop_requested)
2292 new_lp->last_resume_kind = resume_stop;
2293 else
2294 status = 0;
2295 }
2296
2297 if (non_stop)
2298 {
2299 /* Add the new thread to GDB's lists as soon as possible
2300 so that:
2301
2302 1) the frontend doesn't have to wait for a stop to
2303 display them, and,
2304
2305 2) we tag it with the correct running state. */
2306
2307 /* If the thread_db layer is active, let it know about
2308 this new thread, and add it to GDB's list. */
2309 if (!thread_db_attach_lwp (new_lp->ptid))
2310 {
2311 /* We're not using thread_db. Add it to GDB's
2312 list. */
2313 target_post_attach (GET_LWP (new_lp->ptid));
2314 add_thread (new_lp->ptid);
2315 }
2316
2317 if (!stopping)
2318 {
2319 set_running (new_lp->ptid, 1);
2320 set_executing (new_lp->ptid, 1);
2321 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2322 resume_stop. */
2323 new_lp->last_resume_kind = resume_continue;
2324 }
2325 }
2326
2327 if (status != 0)
2328 {
2329 /* We created NEW_LP so it cannot yet contain STATUS. */
2330 gdb_assert (new_lp->status == 0);
2331
2332 /* Save the wait status to report later. */
2333 if (debug_linux_nat)
2334 fprintf_unfiltered (gdb_stdlog,
2335 "LHEW: waitpid of new LWP %ld, "
2336 "saving status %s\n",
2337 (long) GET_LWP (new_lp->ptid),
2338 status_to_str (status));
2339 new_lp->status = status;
2340 }
2341
2342 /* Note the need to use the low target ops to resume, to
2343 handle resuming with PT_SYSCALL if we have syscall
2344 catchpoints. */
2345 if (!stopping)
2346 {
2347 new_lp->resumed = 1;
2348
2349 if (status == 0)
2350 {
2351 gdb_assert (new_lp->last_resume_kind == resume_continue);
2352 if (debug_linux_nat)
2353 fprintf_unfiltered (gdb_stdlog,
2354 "LHEW: resuming new LWP %ld\n",
2355 GET_LWP (new_lp->ptid));
2356 if (linux_nat_prepare_to_resume != NULL)
2357 linux_nat_prepare_to_resume (new_lp);
2358 linux_ops->to_resume (linux_ops, pid_to_ptid (new_pid),
2359 0, TARGET_SIGNAL_0);
2360 new_lp->stopped = 0;
2361 }
2362 }
2363
2364 if (debug_linux_nat)
2365 fprintf_unfiltered (gdb_stdlog,
2366 "LHEW: resuming parent LWP %d\n", pid);
2367 if (linux_nat_prepare_to_resume != NULL)
2368 linux_nat_prepare_to_resume (lp);
2369 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
2370 0, TARGET_SIGNAL_0);
2371
2372 return 1;
2373 }
2374
2375 return 0;
2376 }
2377
2378 if (event == PTRACE_EVENT_EXEC)
2379 {
2380 if (debug_linux_nat)
2381 fprintf_unfiltered (gdb_stdlog,
2382 "LHEW: Got exec event from LWP %ld\n",
2383 GET_LWP (lp->ptid));
2384
2385 ourstatus->kind = TARGET_WAITKIND_EXECD;
2386 ourstatus->value.execd_pathname
2387 = xstrdup (linux_child_pid_to_exec_file (pid));
2388
2389 return 0;
2390 }
2391
2392 if (event == PTRACE_EVENT_VFORK_DONE)
2393 {
2394 if (current_inferior ()->waiting_for_vfork_done)
2395 {
2396 if (debug_linux_nat)
2397 fprintf_unfiltered (gdb_stdlog,
2398 "LHEW: Got expected PTRACE_EVENT_"
2399 "VFORK_DONE from LWP %ld: stopping\n",
2400 GET_LWP (lp->ptid));
2401
2402 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2403 return 0;
2404 }
2405
2406 if (debug_linux_nat)
2407 fprintf_unfiltered (gdb_stdlog,
2408 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
2409 "from LWP %ld: resuming\n",
2410 GET_LWP (lp->ptid));
2411 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2412 return 1;
2413 }
2414
2415 internal_error (__FILE__, __LINE__,
2416 _("unknown ptrace event %d"), event);
2417 }
2418
2419 /* Return non-zero if LWP is a zombie. */
2420
2421 static int
2422 linux_lwp_is_zombie (long lwp)
2423 {
2424 char buffer[MAXPATHLEN];
2425 FILE *procfile;
2426 int retval;
2427 int have_state;
2428
2429 xsnprintf (buffer, sizeof (buffer), "/proc/%ld/status", lwp);
2430 procfile = fopen (buffer, "r");
2431 if (procfile == NULL)
2432 {
2433 warning (_("unable to open /proc file '%s'"), buffer);
2434 return 0;
2435 }
2436
2437 have_state = 0;
2438 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
2439 if (strncmp (buffer, "State:", 6) == 0)
2440 {
2441 have_state = 1;
2442 break;
2443 }
2444 retval = (have_state
2445 && strcmp (buffer, "State:\tZ (zombie)\n") == 0);
2446 fclose (procfile);
2447 return retval;
2448 }
2449
2450 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2451 exited. */
2452
2453 static int
2454 wait_lwp (struct lwp_info *lp)
2455 {
2456 pid_t pid;
2457 int status = 0;
2458 int thread_dead = 0;
2459 sigset_t prev_mask;
2460
2461 gdb_assert (!lp->stopped);
2462 gdb_assert (lp->status == 0);
2463
2464 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2465 block_child_signals (&prev_mask);
2466
2467 for (;;)
2468 {
2469 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2470 was right and we should just call sigsuspend. */
2471
2472 pid = my_waitpid (GET_LWP (lp->ptid), &status, WNOHANG);
2473 if (pid == -1 && errno == ECHILD)
2474 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE | WNOHANG);
2475 if (pid == -1 && errno == ECHILD)
2476 {
2477 /* The thread has previously exited. We need to delete it
2478 now because, for some vendor 2.4 kernels with NPTL
2479 support backported, there won't be an exit event unless
2480 it is the main thread. 2.6 kernels will report an exit
2481 event for each thread that exits, as expected. */
2482 thread_dead = 1;
2483 if (debug_linux_nat)
2484 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2485 target_pid_to_str (lp->ptid));
2486 }
2487 if (pid != 0)
2488 break;
2489
2490 /* Bugs 10970, 12702.
2491 Thread group leader may have exited in which case we'll lock up in
2492 waitpid if there are other threads, even if they are all zombies too.
2493 Basically, we're not supposed to use waitpid this way.
2494 __WCLONE is not applicable for the leader so we can't use that.
2495 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2496 process; it gets ESRCH both for the zombie and for running processes.
2497
2498 As a workaround, check if we're waiting for the thread group leader and
2499 if it's a zombie, and avoid calling waitpid if it is.
2500
2501 This is racy, what if the tgl becomes a zombie right after we check?
2502 Therefore always use WNOHANG with sigsuspend - it is equivalent to
2503 waiting waitpid but the linux_lwp_is_zombie is safe this way. */
2504
2505 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid)
2506 && linux_lwp_is_zombie (GET_LWP (lp->ptid)))
2507 {
2508 thread_dead = 1;
2509 if (debug_linux_nat)
2510 fprintf_unfiltered (gdb_stdlog,
2511 "WL: Thread group leader %s vanished.\n",
2512 target_pid_to_str (lp->ptid));
2513 break;
2514 }
2515
2516 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2517 get invoked despite our caller had them intentionally blocked by
2518 block_child_signals. This is sensitive only to the loop of
2519 linux_nat_wait_1 and there if we get called my_waitpid gets called
2520 again before it gets to sigsuspend so we can safely let the handlers
2521 get executed here. */
2522
2523 sigsuspend (&suspend_mask);
2524 }
2525
2526 restore_child_signals_mask (&prev_mask);
2527
2528 if (!thread_dead)
2529 {
2530 gdb_assert (pid == GET_LWP (lp->ptid));
2531
2532 if (debug_linux_nat)
2533 {
2534 fprintf_unfiltered (gdb_stdlog,
2535 "WL: waitpid %s received %s\n",
2536 target_pid_to_str (lp->ptid),
2537 status_to_str (status));
2538 }
2539
2540 /* Check if the thread has exited. */
2541 if (WIFEXITED (status) || WIFSIGNALED (status))
2542 {
2543 thread_dead = 1;
2544 if (debug_linux_nat)
2545 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2546 target_pid_to_str (lp->ptid));
2547 }
2548 }
2549
2550 if (thread_dead)
2551 {
2552 exit_lwp (lp);
2553 return 0;
2554 }
2555
2556 gdb_assert (WIFSTOPPED (status));
2557
2558 /* Handle GNU/Linux's syscall SIGTRAPs. */
2559 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2560 {
2561 /* No longer need the sysgood bit. The ptrace event ends up
2562 recorded in lp->waitstatus if we care for it. We can carry
2563 on handling the event like a regular SIGTRAP from here
2564 on. */
2565 status = W_STOPCODE (SIGTRAP);
2566 if (linux_handle_syscall_trap (lp, 1))
2567 return wait_lwp (lp);
2568 }
2569
2570 /* Handle GNU/Linux's extended waitstatus for trace events. */
2571 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2572 {
2573 if (debug_linux_nat)
2574 fprintf_unfiltered (gdb_stdlog,
2575 "WL: Handling extended status 0x%06x\n",
2576 status);
2577 if (linux_handle_extended_wait (lp, status, 1))
2578 return wait_lwp (lp);
2579 }
2580
2581 return status;
2582 }
2583
2584 /* Save the most recent siginfo for LP. This is currently only called
2585 for SIGTRAP; some ports use the si_addr field for
2586 target_stopped_data_address. In the future, it may also be used to
2587 restore the siginfo of requeued signals. */
2588
2589 static void
2590 save_siginfo (struct lwp_info *lp)
2591 {
2592 errno = 0;
2593 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2594 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2595
2596 if (errno != 0)
2597 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2598 }
2599
2600 /* Send a SIGSTOP to LP. */
2601
2602 static int
2603 stop_callback (struct lwp_info *lp, void *data)
2604 {
2605 if (!lp->stopped && !lp->signalled)
2606 {
2607 int ret;
2608
2609 if (debug_linux_nat)
2610 {
2611 fprintf_unfiltered (gdb_stdlog,
2612 "SC: kill %s **<SIGSTOP>**\n",
2613 target_pid_to_str (lp->ptid));
2614 }
2615 errno = 0;
2616 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2617 if (debug_linux_nat)
2618 {
2619 fprintf_unfiltered (gdb_stdlog,
2620 "SC: lwp kill %d %s\n",
2621 ret,
2622 errno ? safe_strerror (errno) : "ERRNO-OK");
2623 }
2624
2625 lp->signalled = 1;
2626 gdb_assert (lp->status == 0);
2627 }
2628
2629 return 0;
2630 }
2631
2632 /* Request a stop on LWP. */
2633
2634 void
2635 linux_stop_lwp (struct lwp_info *lwp)
2636 {
2637 stop_callback (lwp, NULL);
2638 }
2639
2640 /* Return non-zero if LWP PID has a pending SIGINT. */
2641
2642 static int
2643 linux_nat_has_pending_sigint (int pid)
2644 {
2645 sigset_t pending, blocked, ignored;
2646
2647 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2648
2649 if (sigismember (&pending, SIGINT)
2650 && !sigismember (&ignored, SIGINT))
2651 return 1;
2652
2653 return 0;
2654 }
2655
2656 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2657
2658 static int
2659 set_ignore_sigint (struct lwp_info *lp, void *data)
2660 {
2661 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2662 flag to consume the next one. */
2663 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2664 && WSTOPSIG (lp->status) == SIGINT)
2665 lp->status = 0;
2666 else
2667 lp->ignore_sigint = 1;
2668
2669 return 0;
2670 }
2671
2672 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2673 This function is called after we know the LWP has stopped; if the LWP
2674 stopped before the expected SIGINT was delivered, then it will never have
2675 arrived. Also, if the signal was delivered to a shared queue and consumed
2676 by a different thread, it will never be delivered to this LWP. */
2677
2678 static void
2679 maybe_clear_ignore_sigint (struct lwp_info *lp)
2680 {
2681 if (!lp->ignore_sigint)
2682 return;
2683
2684 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2685 {
2686 if (debug_linux_nat)
2687 fprintf_unfiltered (gdb_stdlog,
2688 "MCIS: Clearing bogus flag for %s\n",
2689 target_pid_to_str (lp->ptid));
2690 lp->ignore_sigint = 0;
2691 }
2692 }
2693
2694 /* Fetch the possible triggered data watchpoint info and store it in
2695 LP.
2696
2697 On some archs, like x86, that use debug registers to set
2698 watchpoints, it's possible that the way to know which watched
2699 address trapped, is to check the register that is used to select
2700 which address to watch. Problem is, between setting the watchpoint
2701 and reading back which data address trapped, the user may change
2702 the set of watchpoints, and, as a consequence, GDB changes the
2703 debug registers in the inferior. To avoid reading back a stale
2704 stopped-data-address when that happens, we cache in LP the fact
2705 that a watchpoint trapped, and the corresponding data address, as
2706 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2707 registers meanwhile, we have the cached data we can rely on. */
2708
2709 static void
2710 save_sigtrap (struct lwp_info *lp)
2711 {
2712 struct cleanup *old_chain;
2713
2714 if (linux_ops->to_stopped_by_watchpoint == NULL)
2715 {
2716 lp->stopped_by_watchpoint = 0;
2717 return;
2718 }
2719
2720 old_chain = save_inferior_ptid ();
2721 inferior_ptid = lp->ptid;
2722
2723 lp->stopped_by_watchpoint = linux_ops->to_stopped_by_watchpoint ();
2724
2725 if (lp->stopped_by_watchpoint)
2726 {
2727 if (linux_ops->to_stopped_data_address != NULL)
2728 lp->stopped_data_address_p =
2729 linux_ops->to_stopped_data_address (&current_target,
2730 &lp->stopped_data_address);
2731 else
2732 lp->stopped_data_address_p = 0;
2733 }
2734
2735 do_cleanups (old_chain);
2736 }
2737
2738 /* See save_sigtrap. */
2739
2740 static int
2741 linux_nat_stopped_by_watchpoint (void)
2742 {
2743 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2744
2745 gdb_assert (lp != NULL);
2746
2747 return lp->stopped_by_watchpoint;
2748 }
2749
2750 static int
2751 linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2752 {
2753 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2754
2755 gdb_assert (lp != NULL);
2756
2757 *addr_p = lp->stopped_data_address;
2758
2759 return lp->stopped_data_address_p;
2760 }
2761
2762 /* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2763
2764 static int
2765 sigtrap_is_event (int status)
2766 {
2767 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2768 }
2769
2770 /* SIGTRAP-like events recognizer. */
2771
2772 static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
2773
2774 /* Check for SIGTRAP-like events in LP. */
2775
2776 static int
2777 linux_nat_lp_status_is_event (struct lwp_info *lp)
2778 {
2779 /* We check for lp->waitstatus in addition to lp->status, because we can
2780 have pending process exits recorded in lp->status
2781 and W_EXITCODE(0,0) == 0. We should probably have an additional
2782 lp->status_p flag. */
2783
2784 return (lp->waitstatus.kind == TARGET_WAITKIND_IGNORE
2785 && linux_nat_status_is_event (lp->status));
2786 }
2787
2788 /* Set alternative SIGTRAP-like events recognizer. If
2789 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2790 applied. */
2791
2792 void
2793 linux_nat_set_status_is_event (struct target_ops *t,
2794 int (*status_is_event) (int status))
2795 {
2796 linux_nat_status_is_event = status_is_event;
2797 }
2798
2799 /* Wait until LP is stopped. */
2800
2801 static int
2802 stop_wait_callback (struct lwp_info *lp, void *data)
2803 {
2804 struct inferior *inf = find_inferior_pid (GET_PID (lp->ptid));
2805
2806 /* If this is a vfork parent, bail out, it is not going to report
2807 any SIGSTOP until the vfork is done with. */
2808 if (inf->vfork_child != NULL)
2809 return 0;
2810
2811 if (!lp->stopped)
2812 {
2813 int status;
2814
2815 status = wait_lwp (lp);
2816 if (status == 0)
2817 return 0;
2818
2819 if (lp->ignore_sigint && WIFSTOPPED (status)
2820 && WSTOPSIG (status) == SIGINT)
2821 {
2822 lp->ignore_sigint = 0;
2823
2824 errno = 0;
2825 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2826 if (debug_linux_nat)
2827 fprintf_unfiltered (gdb_stdlog,
2828 "PTRACE_CONT %s, 0, 0 (%s) "
2829 "(discarding SIGINT)\n",
2830 target_pid_to_str (lp->ptid),
2831 errno ? safe_strerror (errno) : "OK");
2832
2833 return stop_wait_callback (lp, NULL);
2834 }
2835
2836 maybe_clear_ignore_sigint (lp);
2837
2838 if (WSTOPSIG (status) != SIGSTOP)
2839 {
2840 if (linux_nat_status_is_event (status))
2841 {
2842 /* If a LWP other than the LWP that we're reporting an
2843 event for has hit a GDB breakpoint (as opposed to
2844 some random trap signal), then just arrange for it to
2845 hit it again later. We don't keep the SIGTRAP status
2846 and don't forward the SIGTRAP signal to the LWP. We
2847 will handle the current event, eventually we will
2848 resume all LWPs, and this one will get its breakpoint
2849 trap again.
2850
2851 If we do not do this, then we run the risk that the
2852 user will delete or disable the breakpoint, but the
2853 thread will have already tripped on it. */
2854
2855 /* Save the trap's siginfo in case we need it later. */
2856 save_siginfo (lp);
2857
2858 save_sigtrap (lp);
2859
2860 /* Now resume this LWP and get the SIGSTOP event. */
2861 errno = 0;
2862 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2863 if (debug_linux_nat)
2864 {
2865 fprintf_unfiltered (gdb_stdlog,
2866 "PTRACE_CONT %s, 0, 0 (%s)\n",
2867 target_pid_to_str (lp->ptid),
2868 errno ? safe_strerror (errno) : "OK");
2869
2870 fprintf_unfiltered (gdb_stdlog,
2871 "SWC: Candidate SIGTRAP event in %s\n",
2872 target_pid_to_str (lp->ptid));
2873 }
2874 /* Hold this event/waitstatus while we check to see if
2875 there are any more (we still want to get that SIGSTOP). */
2876 stop_wait_callback (lp, NULL);
2877
2878 /* Hold the SIGTRAP for handling by linux_nat_wait. If
2879 there's another event, throw it back into the
2880 queue. */
2881 if (lp->status)
2882 {
2883 if (debug_linux_nat)
2884 fprintf_unfiltered (gdb_stdlog,
2885 "SWC: kill %s, %s\n",
2886 target_pid_to_str (lp->ptid),
2887 status_to_str ((int) status));
2888 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
2889 }
2890
2891 /* Save the sigtrap event. */
2892 lp->status = status;
2893 return 0;
2894 }
2895 else
2896 {
2897 /* The thread was stopped with a signal other than
2898 SIGSTOP, and didn't accidentally trip a breakpoint. */
2899
2900 if (debug_linux_nat)
2901 {
2902 fprintf_unfiltered (gdb_stdlog,
2903 "SWC: Pending event %s in %s\n",
2904 status_to_str ((int) status),
2905 target_pid_to_str (lp->ptid));
2906 }
2907 /* Now resume this LWP and get the SIGSTOP event. */
2908 errno = 0;
2909 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2910 if (debug_linux_nat)
2911 fprintf_unfiltered (gdb_stdlog,
2912 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2913 target_pid_to_str (lp->ptid),
2914 errno ? safe_strerror (errno) : "OK");
2915
2916 /* Hold this event/waitstatus while we check to see if
2917 there are any more (we still want to get that SIGSTOP). */
2918 stop_wait_callback (lp, NULL);
2919
2920 /* If the lp->status field is still empty, use it to
2921 hold this event. If not, then this event must be
2922 returned to the event queue of the LWP. */
2923 if (lp->status)
2924 {
2925 if (debug_linux_nat)
2926 {
2927 fprintf_unfiltered (gdb_stdlog,
2928 "SWC: kill %s, %s\n",
2929 target_pid_to_str (lp->ptid),
2930 status_to_str ((int) status));
2931 }
2932 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2933 }
2934 else
2935 lp->status = status;
2936 return 0;
2937 }
2938 }
2939 else
2940 {
2941 /* We caught the SIGSTOP that we intended to catch, so
2942 there's no SIGSTOP pending. */
2943 lp->stopped = 1;
2944 lp->signalled = 0;
2945 }
2946 }
2947
2948 return 0;
2949 }
2950
2951 /* Return non-zero if LP has a wait status pending. */
2952
2953 static int
2954 status_callback (struct lwp_info *lp, void *data)
2955 {
2956 /* Only report a pending wait status if we pretend that this has
2957 indeed been resumed. */
2958 if (!lp->resumed)
2959 return 0;
2960
2961 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2962 {
2963 /* A ptrace event, like PTRACE_FORK|VFORK|EXEC, syscall event,
2964 or a pending process exit. Note that `W_EXITCODE(0,0) ==
2965 0', so a clean process exit can not be stored pending in
2966 lp->status, it is indistinguishable from
2967 no-pending-status. */
2968 return 1;
2969 }
2970
2971 if (lp->status != 0)
2972 return 1;
2973
2974 return 0;
2975 }
2976
2977 /* Return non-zero if LP isn't stopped. */
2978
2979 static int
2980 running_callback (struct lwp_info *lp, void *data)
2981 {
2982 return (!lp->stopped
2983 || ((lp->status != 0
2984 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2985 && lp->resumed));
2986 }
2987
2988 /* Count the LWP's that have had events. */
2989
2990 static int
2991 count_events_callback (struct lwp_info *lp, void *data)
2992 {
2993 int *count = data;
2994
2995 gdb_assert (count != NULL);
2996
2997 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2998 if (lp->resumed && linux_nat_lp_status_is_event (lp))
2999 (*count)++;
3000
3001 return 0;
3002 }
3003
3004 /* Select the LWP (if any) that is currently being single-stepped. */
3005
3006 static int
3007 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
3008 {
3009 if (lp->last_resume_kind == resume_step
3010 && lp->status != 0)
3011 return 1;
3012 else
3013 return 0;
3014 }
3015
3016 /* Select the Nth LWP that has had a SIGTRAP event. */
3017
3018 static int
3019 select_event_lwp_callback (struct lwp_info *lp, void *data)
3020 {
3021 int *selector = data;
3022
3023 gdb_assert (selector != NULL);
3024
3025 /* Select only resumed LWPs that have a SIGTRAP event pending. */
3026 if (lp->resumed && linux_nat_lp_status_is_event (lp))
3027 if ((*selector)-- == 0)
3028 return 1;
3029
3030 return 0;
3031 }
3032
3033 static int
3034 cancel_breakpoint (struct lwp_info *lp)
3035 {
3036 /* Arrange for a breakpoint to be hit again later. We don't keep
3037 the SIGTRAP status and don't forward the SIGTRAP signal to the
3038 LWP. We will handle the current event, eventually we will resume
3039 this LWP, and this breakpoint will trap again.
3040
3041 If we do not do this, then we run the risk that the user will
3042 delete or disable the breakpoint, but the LWP will have already
3043 tripped on it. */
3044
3045 struct regcache *regcache = get_thread_regcache (lp->ptid);
3046 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3047 CORE_ADDR pc;
3048
3049 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
3050 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3051 {
3052 if (debug_linux_nat)
3053 fprintf_unfiltered (gdb_stdlog,
3054 "CB: Push back breakpoint for %s\n",
3055 target_pid_to_str (lp->ptid));
3056
3057 /* Back up the PC if necessary. */
3058 if (gdbarch_decr_pc_after_break (gdbarch))
3059 regcache_write_pc (regcache, pc);
3060
3061 return 1;
3062 }
3063 return 0;
3064 }
3065
3066 static int
3067 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
3068 {
3069 struct lwp_info *event_lp = data;
3070
3071 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
3072 if (lp == event_lp)
3073 return 0;
3074
3075 /* If a LWP other than the LWP that we're reporting an event for has
3076 hit a GDB breakpoint (as opposed to some random trap signal),
3077 then just arrange for it to hit it again later. We don't keep
3078 the SIGTRAP status and don't forward the SIGTRAP signal to the
3079 LWP. We will handle the current event, eventually we will resume
3080 all LWPs, and this one will get its breakpoint trap again.
3081
3082 If we do not do this, then we run the risk that the user will
3083 delete or disable the breakpoint, but the LWP will have already
3084 tripped on it. */
3085
3086 if (linux_nat_lp_status_is_event (lp)
3087 && cancel_breakpoint (lp))
3088 /* Throw away the SIGTRAP. */
3089 lp->status = 0;
3090
3091 return 0;
3092 }
3093
3094 /* Select one LWP out of those that have events pending. */
3095
3096 static void
3097 select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
3098 {
3099 int num_events = 0;
3100 int random_selector;
3101 struct lwp_info *event_lp;
3102
3103 /* Record the wait status for the original LWP. */
3104 (*orig_lp)->status = *status;
3105
3106 /* Give preference to any LWP that is being single-stepped. */
3107 event_lp = iterate_over_lwps (filter,
3108 select_singlestep_lwp_callback, NULL);
3109 if (event_lp != NULL)
3110 {
3111 if (debug_linux_nat)
3112 fprintf_unfiltered (gdb_stdlog,
3113 "SEL: Select single-step %s\n",
3114 target_pid_to_str (event_lp->ptid));
3115 }
3116 else
3117 {
3118 /* No single-stepping LWP. Select one at random, out of those
3119 which have had SIGTRAP events. */
3120
3121 /* First see how many SIGTRAP events we have. */
3122 iterate_over_lwps (filter, count_events_callback, &num_events);
3123
3124 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
3125 random_selector = (int)
3126 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
3127
3128 if (debug_linux_nat && num_events > 1)
3129 fprintf_unfiltered (gdb_stdlog,
3130 "SEL: Found %d SIGTRAP events, selecting #%d\n",
3131 num_events, random_selector);
3132
3133 event_lp = iterate_over_lwps (filter,
3134 select_event_lwp_callback,
3135 &random_selector);
3136 }
3137
3138 if (event_lp != NULL)
3139 {
3140 /* Switch the event LWP. */
3141 *orig_lp = event_lp;
3142 *status = event_lp->status;
3143 }
3144
3145 /* Flush the wait status for the event LWP. */
3146 (*orig_lp)->status = 0;
3147 }
3148
3149 /* Return non-zero if LP has been resumed. */
3150
3151 static int
3152 resumed_callback (struct lwp_info *lp, void *data)
3153 {
3154 return lp->resumed;
3155 }
3156
3157 /* Stop an active thread, verify it still exists, then resume it. If
3158 the thread ends up with a pending status, then it is not resumed,
3159 and *DATA (really a pointer to int), is set. */
3160
3161 static int
3162 stop_and_resume_callback (struct lwp_info *lp, void *data)
3163 {
3164 int *new_pending_p = data;
3165
3166 if (!lp->stopped)
3167 {
3168 ptid_t ptid = lp->ptid;
3169
3170 stop_callback (lp, NULL);
3171 stop_wait_callback (lp, NULL);
3172
3173 /* Resume if the lwp still exists, and the core wanted it
3174 running. */
3175 lp = find_lwp_pid (ptid);
3176 if (lp != NULL)
3177 {
3178 if (lp->last_resume_kind == resume_stop
3179 && lp->status == 0)
3180 {
3181 /* The core wanted the LWP to stop. Even if it stopped
3182 cleanly (with SIGSTOP), leave the event pending. */
3183 if (debug_linux_nat)
3184 fprintf_unfiltered (gdb_stdlog,
3185 "SARC: core wanted LWP %ld stopped "
3186 "(leaving SIGSTOP pending)\n",
3187 GET_LWP (lp->ptid));
3188 lp->status = W_STOPCODE (SIGSTOP);
3189 }
3190
3191 if (lp->status == 0)
3192 {
3193 if (debug_linux_nat)
3194 fprintf_unfiltered (gdb_stdlog,
3195 "SARC: re-resuming LWP %ld\n",
3196 GET_LWP (lp->ptid));
3197 resume_lwp (lp, lp->step);
3198 }
3199 else
3200 {
3201 if (debug_linux_nat)
3202 fprintf_unfiltered (gdb_stdlog,
3203 "SARC: not re-resuming LWP %ld "
3204 "(has pending)\n",
3205 GET_LWP (lp->ptid));
3206 if (new_pending_p)
3207 *new_pending_p = 1;
3208 }
3209 }
3210 }
3211 return 0;
3212 }
3213
3214 /* Check if we should go on and pass this event to common code.
3215 Return the affected lwp if we are, or NULL otherwise. If we stop
3216 all lwps temporarily, we may end up with new pending events in some
3217 other lwp. In that case set *NEW_PENDING_P to true. */
3218
3219 static struct lwp_info *
3220 linux_nat_filter_event (int lwpid, int status, int *new_pending_p)
3221 {
3222 struct lwp_info *lp;
3223
3224 *new_pending_p = 0;
3225
3226 lp = find_lwp_pid (pid_to_ptid (lwpid));
3227
3228 /* Check for stop events reported by a process we didn't already
3229 know about - anything not already in our LWP list.
3230
3231 If we're expecting to receive stopped processes after
3232 fork, vfork, and clone events, then we'll just add the
3233 new one to our list and go back to waiting for the event
3234 to be reported - the stopped process might be returned
3235 from waitpid before or after the event is.
3236
3237 But note the case of a non-leader thread exec'ing after the
3238 leader having exited, and gone from our lists. The non-leader
3239 thread changes its tid to the tgid. */
3240
3241 if (WIFSTOPPED (status) && lp == NULL
3242 && (WSTOPSIG (status) == SIGTRAP && status >> 16 == PTRACE_EVENT_EXEC))
3243 {
3244 /* A multi-thread exec after we had seen the leader exiting. */
3245 if (debug_linux_nat)
3246 fprintf_unfiltered (gdb_stdlog,
3247 "LLW: Re-adding thread group leader LWP %d.\n",
3248 lwpid);
3249
3250 lp = add_lwp (BUILD_LWP (lwpid, lwpid));
3251 lp->stopped = 1;
3252 lp->resumed = 1;
3253 add_thread (lp->ptid);
3254 }
3255
3256 if (WIFSTOPPED (status) && !lp)
3257 {
3258 add_to_pid_list (&stopped_pids, lwpid, status);
3259 return NULL;
3260 }
3261
3262 /* Make sure we don't report an event for the exit of an LWP not in
3263 our list, i.e. not part of the current process. This can happen
3264 if we detach from a program we originally forked and then it
3265 exits. */
3266 if (!WIFSTOPPED (status) && !lp)
3267 return NULL;
3268
3269 /* Handle GNU/Linux's syscall SIGTRAPs. */
3270 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3271 {
3272 /* No longer need the sysgood bit. The ptrace event ends up
3273 recorded in lp->waitstatus if we care for it. We can carry
3274 on handling the event like a regular SIGTRAP from here
3275 on. */
3276 status = W_STOPCODE (SIGTRAP);
3277 if (linux_handle_syscall_trap (lp, 0))
3278 return NULL;
3279 }
3280
3281 /* Handle GNU/Linux's extended waitstatus for trace events. */
3282 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
3283 {
3284 if (debug_linux_nat)
3285 fprintf_unfiltered (gdb_stdlog,
3286 "LLW: Handling extended status 0x%06x\n",
3287 status);
3288 if (linux_handle_extended_wait (lp, status, 0))
3289 return NULL;
3290 }
3291
3292 if (linux_nat_status_is_event (status))
3293 {
3294 /* Save the trap's siginfo in case we need it later. */
3295 save_siginfo (lp);
3296
3297 save_sigtrap (lp);
3298 }
3299
3300 /* Check if the thread has exited. */
3301 if ((WIFEXITED (status) || WIFSIGNALED (status))
3302 && num_lwps (GET_PID (lp->ptid)) > 1)
3303 {
3304 /* If this is the main thread, we must stop all threads and verify
3305 if they are still alive. This is because in the nptl thread model
3306 on Linux 2.4, there is no signal issued for exiting LWPs
3307 other than the main thread. We only get the main thread exit
3308 signal once all child threads have already exited. If we
3309 stop all the threads and use the stop_wait_callback to check
3310 if they have exited we can determine whether this signal
3311 should be ignored or whether it means the end of the debugged
3312 application, regardless of which threading model is being
3313 used. */
3314 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
3315 {
3316 lp->stopped = 1;
3317 iterate_over_lwps (pid_to_ptid (GET_PID (lp->ptid)),
3318 stop_and_resume_callback, new_pending_p);
3319 }
3320
3321 if (debug_linux_nat)
3322 fprintf_unfiltered (gdb_stdlog,
3323 "LLW: %s exited.\n",
3324 target_pid_to_str (lp->ptid));
3325
3326 if (num_lwps (GET_PID (lp->ptid)) > 1)
3327 {
3328 /* If there is at least one more LWP, then the exit signal
3329 was not the end of the debugged application and should be
3330 ignored. */
3331 exit_lwp (lp);
3332 return NULL;
3333 }
3334 }
3335
3336 /* Check if the current LWP has previously exited. In the nptl
3337 thread model, LWPs other than the main thread do not issue
3338 signals when they exit so we must check whenever the thread has
3339 stopped. A similar check is made in stop_wait_callback(). */
3340 if (num_lwps (GET_PID (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
3341 {
3342 ptid_t ptid = pid_to_ptid (GET_PID (lp->ptid));
3343
3344 if (debug_linux_nat)
3345 fprintf_unfiltered (gdb_stdlog,
3346 "LLW: %s exited.\n",
3347 target_pid_to_str (lp->ptid));
3348
3349 exit_lwp (lp);
3350
3351 /* Make sure there is at least one thread running. */
3352 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
3353
3354 /* Discard the event. */
3355 return NULL;
3356 }
3357
3358 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3359 an attempt to stop an LWP. */
3360 if (lp->signalled
3361 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3362 {
3363 if (debug_linux_nat)
3364 fprintf_unfiltered (gdb_stdlog,
3365 "LLW: Delayed SIGSTOP caught for %s.\n",
3366 target_pid_to_str (lp->ptid));
3367
3368 lp->signalled = 0;
3369
3370 if (lp->last_resume_kind != resume_stop)
3371 {
3372 /* This is a delayed SIGSTOP. */
3373
3374 registers_changed ();
3375
3376 if (linux_nat_prepare_to_resume != NULL)
3377 linux_nat_prepare_to_resume (lp);
3378 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3379 lp->step, TARGET_SIGNAL_0);
3380 if (debug_linux_nat)
3381 fprintf_unfiltered (gdb_stdlog,
3382 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3383 lp->step ?
3384 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3385 target_pid_to_str (lp->ptid));
3386
3387 lp->stopped = 0;
3388 gdb_assert (lp->resumed);
3389
3390 /* Discard the event. */
3391 return NULL;
3392 }
3393 }
3394
3395 /* Make sure we don't report a SIGINT that we have already displayed
3396 for another thread. */
3397 if (lp->ignore_sigint
3398 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3399 {
3400 if (debug_linux_nat)
3401 fprintf_unfiltered (gdb_stdlog,
3402 "LLW: Delayed SIGINT caught for %s.\n",
3403 target_pid_to_str (lp->ptid));
3404
3405 /* This is a delayed SIGINT. */
3406 lp->ignore_sigint = 0;
3407
3408 registers_changed ();
3409 if (linux_nat_prepare_to_resume != NULL)
3410 linux_nat_prepare_to_resume (lp);
3411 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3412 lp->step, TARGET_SIGNAL_0);
3413 if (debug_linux_nat)
3414 fprintf_unfiltered (gdb_stdlog,
3415 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3416 lp->step ?
3417 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3418 target_pid_to_str (lp->ptid));
3419
3420 lp->stopped = 0;
3421 gdb_assert (lp->resumed);
3422
3423 /* Discard the event. */
3424 return NULL;
3425 }
3426
3427 /* An interesting event. */
3428 gdb_assert (lp);
3429 lp->status = status;
3430 return lp;
3431 }
3432
3433 /* Detect zombie thread group leaders, and "exit" them. We can't reap
3434 their exits until all other threads in the group have exited. */
3435
3436 static void
3437 check_zombie_leaders (void)
3438 {
3439 struct inferior *inf;
3440
3441 ALL_INFERIORS (inf)
3442 {
3443 struct lwp_info *leader_lp;
3444
3445 if (inf->pid == 0)
3446 continue;
3447
3448 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3449 if (leader_lp != NULL
3450 /* Check if there are other threads in the group, as we may
3451 have raced with the inferior simply exiting. */
3452 && num_lwps (inf->pid) > 1
3453 && linux_lwp_is_zombie (inf->pid))
3454 {
3455 if (debug_linux_nat)
3456 fprintf_unfiltered (gdb_stdlog,
3457 "CZL: Thread group leader %d zombie "
3458 "(it exited, or another thread execd).\n",
3459 inf->pid);
3460
3461 /* A leader zombie can mean one of two things:
3462
3463 - It exited, and there's an exit status pending
3464 available, or only the leader exited (not the whole
3465 program). In the latter case, we can't waitpid the
3466 leader's exit status until all other threads are gone.
3467
3468 - There are 3 or more threads in the group, and a thread
3469 other than the leader exec'd. On an exec, the Linux
3470 kernel destroys all other threads (except the execing
3471 one) in the thread group, and resets the execing thread's
3472 tid to the tgid. No exit notification is sent for the
3473 execing thread -- from the ptracer's perspective, it
3474 appears as though the execing thread just vanishes.
3475 Until we reap all other threads except the leader and the
3476 execing thread, the leader will be zombie, and the
3477 execing thread will be in `D (disc sleep)'. As soon as
3478 all other threads are reaped, the execing thread changes
3479 it's tid to the tgid, and the previous (zombie) leader
3480 vanishes, giving place to the "new" leader. We could try
3481 distinguishing the exit and exec cases, by waiting once
3482 more, and seeing if something comes out, but it doesn't
3483 sound useful. The previous leader _does_ go away, and
3484 we'll re-add the new one once we see the exec event
3485 (which is just the same as what would happen if the
3486 previous leader did exit voluntarily before some other
3487 thread execs). */
3488
3489 if (debug_linux_nat)
3490 fprintf_unfiltered (gdb_stdlog,
3491 "CZL: Thread group leader %d vanished.\n",
3492 inf->pid);
3493 exit_lwp (leader_lp);
3494 }
3495 }
3496 }
3497
3498 static ptid_t
3499 linux_nat_wait_1 (struct target_ops *ops,
3500 ptid_t ptid, struct target_waitstatus *ourstatus,
3501 int target_options)
3502 {
3503 static sigset_t prev_mask;
3504 enum resume_kind last_resume_kind;
3505 struct lwp_info *lp;
3506 int status;
3507
3508 if (debug_linux_nat)
3509 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3510
3511 /* The first time we get here after starting a new inferior, we may
3512 not have added it to the LWP list yet - this is the earliest
3513 moment at which we know its PID. */
3514 if (ptid_is_pid (inferior_ptid))
3515 {
3516 /* Upgrade the main thread's ptid. */
3517 thread_change_ptid (inferior_ptid,
3518 BUILD_LWP (GET_PID (inferior_ptid),
3519 GET_PID (inferior_ptid)));
3520
3521 lp = add_lwp (inferior_ptid);
3522 lp->resumed = 1;
3523 }
3524
3525 /* Make sure SIGCHLD is blocked. */
3526 block_child_signals (&prev_mask);
3527
3528 retry:
3529 lp = NULL;
3530 status = 0;
3531
3532 /* First check if there is a LWP with a wait status pending. */
3533 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3534 {
3535 /* Any LWP in the PTID group that's been resumed will do. */
3536 lp = iterate_over_lwps (ptid, status_callback, NULL);
3537 if (lp)
3538 {
3539 if (debug_linux_nat && lp->status)
3540 fprintf_unfiltered (gdb_stdlog,
3541 "LLW: Using pending wait status %s for %s.\n",
3542 status_to_str (lp->status),
3543 target_pid_to_str (lp->ptid));
3544 }
3545 }
3546 else if (is_lwp (ptid))
3547 {
3548 if (debug_linux_nat)
3549 fprintf_unfiltered (gdb_stdlog,
3550 "LLW: Waiting for specific LWP %s.\n",
3551 target_pid_to_str (ptid));
3552
3553 /* We have a specific LWP to check. */
3554 lp = find_lwp_pid (ptid);
3555 gdb_assert (lp);
3556
3557 if (debug_linux_nat && lp->status)
3558 fprintf_unfiltered (gdb_stdlog,
3559 "LLW: Using pending wait status %s for %s.\n",
3560 status_to_str (lp->status),
3561 target_pid_to_str (lp->ptid));
3562
3563 /* We check for lp->waitstatus in addition to lp->status,
3564 because we can have pending process exits recorded in
3565 lp->status and W_EXITCODE(0,0) == 0. We should probably have
3566 an additional lp->status_p flag. */
3567 if (lp->status == 0 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3568 lp = NULL;
3569 }
3570
3571 if (lp && lp->signalled && lp->last_resume_kind != resume_stop)
3572 {
3573 /* A pending SIGSTOP may interfere with the normal stream of
3574 events. In a typical case where interference is a problem,
3575 we have a SIGSTOP signal pending for LWP A while
3576 single-stepping it, encounter an event in LWP B, and take the
3577 pending SIGSTOP while trying to stop LWP A. After processing
3578 the event in LWP B, LWP A is continued, and we'll never see
3579 the SIGTRAP associated with the last time we were
3580 single-stepping LWP A. */
3581
3582 /* Resume the thread. It should halt immediately returning the
3583 pending SIGSTOP. */
3584 registers_changed ();
3585 if (linux_nat_prepare_to_resume != NULL)
3586 linux_nat_prepare_to_resume (lp);
3587 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3588 lp->step, TARGET_SIGNAL_0);
3589 if (debug_linux_nat)
3590 fprintf_unfiltered (gdb_stdlog,
3591 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
3592 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3593 target_pid_to_str (lp->ptid));
3594 lp->stopped = 0;
3595 gdb_assert (lp->resumed);
3596
3597 /* Catch the pending SIGSTOP. */
3598 status = lp->status;
3599 lp->status = 0;
3600
3601 stop_wait_callback (lp, NULL);
3602
3603 /* If the lp->status field isn't empty, we caught another signal
3604 while flushing the SIGSTOP. Return it back to the event
3605 queue of the LWP, as we already have an event to handle. */
3606 if (lp->status)
3607 {
3608 if (debug_linux_nat)
3609 fprintf_unfiltered (gdb_stdlog,
3610 "LLW: kill %s, %s\n",
3611 target_pid_to_str (lp->ptid),
3612 status_to_str (lp->status));
3613 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
3614 }
3615
3616 lp->status = status;
3617 }
3618
3619 if (!target_can_async_p ())
3620 {
3621 /* Causes SIGINT to be passed on to the attached process. */
3622 set_sigint_trap ();
3623 }
3624
3625 /* But if we don't find a pending event, we'll have to wait. */
3626
3627 while (lp == NULL)
3628 {
3629 pid_t lwpid;
3630
3631 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3632 quirks:
3633
3634 - If the thread group leader exits while other threads in the
3635 thread group still exist, waitpid(TGID, ...) hangs. That
3636 waitpid won't return an exit status until the other threads
3637 in the group are reapped.
3638
3639 - When a non-leader thread execs, that thread just vanishes
3640 without reporting an exit (so we'd hang if we waited for it
3641 explicitly in that case). The exec event is reported to
3642 the TGID pid. */
3643
3644 errno = 0;
3645 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3646 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3647 lwpid = my_waitpid (-1, &status, WNOHANG);
3648
3649 if (debug_linux_nat)
3650 fprintf_unfiltered (gdb_stdlog,
3651 "LNW: waitpid(-1, ...) returned %d, %s\n",
3652 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
3653
3654 if (lwpid > 0)
3655 {
3656 /* If this is true, then we paused LWPs momentarily, and may
3657 now have pending events to handle. */
3658 int new_pending;
3659
3660 if (debug_linux_nat)
3661 {
3662 fprintf_unfiltered (gdb_stdlog,
3663 "LLW: waitpid %ld received %s\n",
3664 (long) lwpid, status_to_str (status));
3665 }
3666
3667 lp = linux_nat_filter_event (lwpid, status, &new_pending);
3668
3669 /* STATUS is now no longer valid, use LP->STATUS instead. */
3670 status = 0;
3671
3672 if (lp && !ptid_match (lp->ptid, ptid))
3673 {
3674 gdb_assert (lp->resumed);
3675
3676 if (debug_linux_nat)
3677 fprintf (stderr,
3678 "LWP %ld got an event %06x, leaving pending.\n",
3679 ptid_get_lwp (lp->ptid), lp->status);
3680
3681 if (WIFSTOPPED (lp->status))
3682 {
3683 if (WSTOPSIG (lp->status) != SIGSTOP)
3684 {
3685 /* Cancel breakpoint hits. The breakpoint may
3686 be removed before we fetch events from this
3687 process to report to the core. It is best
3688 not to assume the moribund breakpoints
3689 heuristic always handles these cases --- it
3690 could be too many events go through to the
3691 core before this one is handled. All-stop
3692 always cancels breakpoint hits in all
3693 threads. */
3694 if (non_stop
3695 && linux_nat_lp_status_is_event (lp)
3696 && cancel_breakpoint (lp))
3697 {
3698 /* Throw away the SIGTRAP. */
3699 lp->status = 0;
3700
3701 if (debug_linux_nat)
3702 fprintf (stderr,
3703 "LLW: LWP %ld hit a breakpoint while"
3704 " waiting for another process;"
3705 " cancelled it\n",
3706 ptid_get_lwp (lp->ptid));
3707 }
3708 lp->stopped = 1;
3709 }
3710 else
3711 {
3712 lp->stopped = 1;
3713 lp->signalled = 0;
3714 }
3715 }
3716 else if (WIFEXITED (lp->status) || WIFSIGNALED (lp->status))
3717 {
3718 if (debug_linux_nat)
3719 fprintf (stderr,
3720 "Process %ld exited while stopping LWPs\n",
3721 ptid_get_lwp (lp->ptid));
3722
3723 /* This was the last lwp in the process. Since
3724 events are serialized to GDB core, and we can't
3725 report this one right now, but GDB core and the
3726 other target layers will want to be notified
3727 about the exit code/signal, leave the status
3728 pending for the next time we're able to report
3729 it. */
3730
3731 /* Prevent trying to stop this thread again. We'll
3732 never try to resume it because it has a pending
3733 status. */
3734 lp->stopped = 1;
3735
3736 /* Dead LWP's aren't expected to reported a pending
3737 sigstop. */
3738 lp->signalled = 0;
3739
3740 /* Store the pending event in the waitstatus as
3741 well, because W_EXITCODE(0,0) == 0. */
3742 store_waitstatus (&lp->waitstatus, lp->status);
3743 }
3744
3745 /* Keep looking. */
3746 lp = NULL;
3747 }
3748
3749 if (new_pending)
3750 {
3751 /* Some LWP now has a pending event. Go all the way
3752 back to check it. */
3753 goto retry;
3754 }
3755
3756 if (lp)
3757 {
3758 /* We got an event to report to the core. */
3759 break;
3760 }
3761
3762 /* Retry until nothing comes out of waitpid. A single
3763 SIGCHLD can indicate more than one child stopped. */
3764 continue;
3765 }
3766
3767 /* Check for zombie thread group leaders. Those can't be reaped
3768 until all other threads in the thread group are. */
3769 check_zombie_leaders ();
3770
3771 /* If there are no resumed children left, bail. We'd be stuck
3772 forever in the sigsuspend call below otherwise. */
3773 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3774 {
3775 if (debug_linux_nat)
3776 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
3777
3778 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
3779
3780 if (!target_can_async_p ())
3781 clear_sigint_trap ();
3782
3783 restore_child_signals_mask (&prev_mask);
3784 return minus_one_ptid;
3785 }
3786
3787 /* No interesting event to report to the core. */
3788
3789 if (target_options & TARGET_WNOHANG)
3790 {
3791 if (debug_linux_nat)
3792 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3793
3794 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3795 restore_child_signals_mask (&prev_mask);
3796 return minus_one_ptid;
3797 }
3798
3799 /* We shouldn't end up here unless we want to try again. */
3800 gdb_assert (lp == NULL);
3801
3802 /* Block until we get an event reported with SIGCHLD. */
3803 sigsuspend (&suspend_mask);
3804 }
3805
3806 if (!target_can_async_p ())
3807 clear_sigint_trap ();
3808
3809 gdb_assert (lp);
3810
3811 status = lp->status;
3812 lp->status = 0;
3813
3814 /* Don't report signals that GDB isn't interested in, such as
3815 signals that are neither printed nor stopped upon. Stopping all
3816 threads can be a bit time-consuming so if we want decent
3817 performance with heavily multi-threaded programs, especially when
3818 they're using a high frequency timer, we'd better avoid it if we
3819 can. */
3820
3821 if (WIFSTOPPED (status))
3822 {
3823 enum target_signal signo = target_signal_from_host (WSTOPSIG (status));
3824
3825 /* When using hardware single-step, we need to report every signal.
3826 Otherwise, signals in pass_mask may be short-circuited. */
3827 if (!lp->step
3828 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status)))
3829 {
3830 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3831 here? It is not clear we should. GDB may not expect
3832 other threads to run. On the other hand, not resuming
3833 newly attached threads may cause an unwanted delay in
3834 getting them running. */
3835 registers_changed ();
3836 if (linux_nat_prepare_to_resume != NULL)
3837 linux_nat_prepare_to_resume (lp);
3838 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3839 lp->step, signo);
3840 if (debug_linux_nat)
3841 fprintf_unfiltered (gdb_stdlog,
3842 "LLW: %s %s, %s (preempt 'handle')\n",
3843 lp->step ?
3844 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3845 target_pid_to_str (lp->ptid),
3846 (signo != TARGET_SIGNAL_0
3847 ? strsignal (target_signal_to_host (signo))
3848 : "0"));
3849 lp->stopped = 0;
3850 goto retry;
3851 }
3852
3853 if (!non_stop)
3854 {
3855 /* Only do the below in all-stop, as we currently use SIGINT
3856 to implement target_stop (see linux_nat_stop) in
3857 non-stop. */
3858 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3859 {
3860 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3861 forwarded to the entire process group, that is, all LWPs
3862 will receive it - unless they're using CLONE_THREAD to
3863 share signals. Since we only want to report it once, we
3864 mark it as ignored for all LWPs except this one. */
3865 iterate_over_lwps (pid_to_ptid (ptid_get_pid (ptid)),
3866 set_ignore_sigint, NULL);
3867 lp->ignore_sigint = 0;
3868 }
3869 else
3870 maybe_clear_ignore_sigint (lp);
3871 }
3872 }
3873
3874 /* This LWP is stopped now. */
3875 lp->stopped = 1;
3876
3877 if (debug_linux_nat)
3878 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3879 status_to_str (status), target_pid_to_str (lp->ptid));
3880
3881 if (!non_stop)
3882 {
3883 /* Now stop all other LWP's ... */
3884 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
3885
3886 /* ... and wait until all of them have reported back that
3887 they're no longer running. */
3888 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
3889
3890 /* If we're not waiting for a specific LWP, choose an event LWP
3891 from among those that have had events. Giving equal priority
3892 to all LWPs that have had events helps prevent
3893 starvation. */
3894 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3895 select_event_lwp (ptid, &lp, &status);
3896
3897 /* Now that we've selected our final event LWP, cancel any
3898 breakpoints in other LWPs that have hit a GDB breakpoint.
3899 See the comment in cancel_breakpoints_callback to find out
3900 why. */
3901 iterate_over_lwps (minus_one_ptid, cancel_breakpoints_callback, lp);
3902
3903 /* We'll need this to determine whether to report a SIGSTOP as
3904 TARGET_WAITKIND_0. Need to take a copy because
3905 resume_clear_callback clears it. */
3906 last_resume_kind = lp->last_resume_kind;
3907
3908 /* In all-stop, from the core's perspective, all LWPs are now
3909 stopped until a new resume action is sent over. */
3910 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3911 }
3912 else
3913 {
3914 /* See above. */
3915 last_resume_kind = lp->last_resume_kind;
3916 resume_clear_callback (lp, NULL);
3917 }
3918
3919 if (linux_nat_status_is_event (status))
3920 {
3921 if (debug_linux_nat)
3922 fprintf_unfiltered (gdb_stdlog,
3923 "LLW: trap ptid is %s.\n",
3924 target_pid_to_str (lp->ptid));
3925 }
3926
3927 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3928 {
3929 *ourstatus = lp->waitstatus;
3930 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3931 }
3932 else
3933 store_waitstatus (ourstatus, status);
3934
3935 if (debug_linux_nat)
3936 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3937
3938 restore_child_signals_mask (&prev_mask);
3939
3940 if (last_resume_kind == resume_stop
3941 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3942 && WSTOPSIG (status) == SIGSTOP)
3943 {
3944 /* A thread that has been requested to stop by GDB with
3945 target_stop, and it stopped cleanly, so report as SIG0. The
3946 use of SIGSTOP is an implementation detail. */
3947 ourstatus->value.sig = TARGET_SIGNAL_0;
3948 }
3949
3950 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3951 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3952 lp->core = -1;
3953 else
3954 lp->core = linux_nat_core_of_thread_1 (lp->ptid);
3955
3956 return lp->ptid;
3957 }
3958
3959 /* Resume LWPs that are currently stopped without any pending status
3960 to report, but are resumed from the core's perspective. */
3961
3962 static int
3963 resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3964 {
3965 ptid_t *wait_ptid_p = data;
3966
3967 if (lp->stopped
3968 && lp->resumed
3969 && lp->status == 0
3970 && lp->waitstatus.kind == TARGET_WAITKIND_IGNORE)
3971 {
3972 struct regcache *regcache = get_thread_regcache (lp->ptid);
3973 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3974 CORE_ADDR pc = regcache_read_pc (regcache);
3975
3976 gdb_assert (is_executing (lp->ptid));
3977
3978 /* Don't bother if there's a breakpoint at PC that we'd hit
3979 immediately, and we're not waiting for this LWP. */
3980 if (!ptid_match (lp->ptid, *wait_ptid_p))
3981 {
3982 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3983 return 0;
3984 }
3985
3986 if (debug_linux_nat)
3987 fprintf_unfiltered (gdb_stdlog,
3988 "RSRL: resuming stopped-resumed LWP %s at %s: step=%d\n",
3989 target_pid_to_str (lp->ptid),
3990 paddress (gdbarch, pc),
3991 lp->step);
3992
3993 registers_changed ();
3994 if (linux_nat_prepare_to_resume != NULL)
3995 linux_nat_prepare_to_resume (lp);
3996 linux_ops->to_resume (linux_ops, pid_to_ptid (GET_LWP (lp->ptid)),
3997 lp->step, TARGET_SIGNAL_0);
3998 lp->stopped = 0;
3999 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
4000 lp->stopped_by_watchpoint = 0;
4001 }
4002
4003 return 0;
4004 }
4005
4006 static ptid_t
4007 linux_nat_wait (struct target_ops *ops,
4008 ptid_t ptid, struct target_waitstatus *ourstatus,
4009 int target_options)
4010 {
4011 ptid_t event_ptid;
4012
4013 if (debug_linux_nat)
4014 fprintf_unfiltered (gdb_stdlog,
4015 "linux_nat_wait: [%s]\n", target_pid_to_str (ptid));
4016
4017 /* Flush the async file first. */
4018 if (target_can_async_p ())
4019 async_file_flush ();
4020
4021 /* Resume LWPs that are currently stopped without any pending status
4022 to report, but are resumed from the core's perspective. LWPs get
4023 in this state if we find them stopping at a time we're not
4024 interested in reporting the event (target_wait on a
4025 specific_process, for example, see linux_nat_wait_1), and
4026 meanwhile the event became uninteresting. Don't bother resuming
4027 LWPs we're not going to wait for if they'd stop immediately. */
4028 if (non_stop)
4029 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
4030
4031 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
4032
4033 /* If we requested any event, and something came out, assume there
4034 may be more. If we requested a specific lwp or process, also
4035 assume there may be more. */
4036 if (target_can_async_p ()
4037 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
4038 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
4039 || !ptid_equal (ptid, minus_one_ptid)))
4040 async_file_mark ();
4041
4042 /* Get ready for the next event. */
4043 if (target_can_async_p ())
4044 target_async (inferior_event_handler, 0);
4045
4046 return event_ptid;
4047 }
4048
4049 static int
4050 kill_callback (struct lwp_info *lp, void *data)
4051 {
4052 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
4053
4054 errno = 0;
4055 kill (GET_LWP (lp->ptid), SIGKILL);
4056 if (debug_linux_nat)
4057 fprintf_unfiltered (gdb_stdlog,
4058 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
4059 target_pid_to_str (lp->ptid),
4060 errno ? safe_strerror (errno) : "OK");
4061
4062 /* Some kernels ignore even SIGKILL for processes under ptrace. */
4063
4064 errno = 0;
4065 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
4066 if (debug_linux_nat)
4067 fprintf_unfiltered (gdb_stdlog,
4068 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
4069 target_pid_to_str (lp->ptid),
4070 errno ? safe_strerror (errno) : "OK");
4071
4072 return 0;
4073 }
4074
4075 static int
4076 kill_wait_callback (struct lwp_info *lp, void *data)
4077 {
4078 pid_t pid;
4079
4080 /* We must make sure that there are no pending events (delayed
4081 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
4082 program doesn't interfere with any following debugging session. */
4083
4084 /* For cloned processes we must check both with __WCLONE and
4085 without, since the exit status of a cloned process isn't reported
4086 with __WCLONE. */
4087 if (lp->cloned)
4088 {
4089 do
4090 {
4091 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
4092 if (pid != (pid_t) -1)
4093 {
4094 if (debug_linux_nat)
4095 fprintf_unfiltered (gdb_stdlog,
4096 "KWC: wait %s received unknown.\n",
4097 target_pid_to_str (lp->ptid));
4098 /* The Linux kernel sometimes fails to kill a thread
4099 completely after PTRACE_KILL; that goes from the stop
4100 point in do_fork out to the one in
4101 get_signal_to_deliever and waits again. So kill it
4102 again. */
4103 kill_callback (lp, NULL);
4104 }
4105 }
4106 while (pid == GET_LWP (lp->ptid));
4107
4108 gdb_assert (pid == -1 && errno == ECHILD);
4109 }
4110
4111 do
4112 {
4113 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
4114 if (pid != (pid_t) -1)
4115 {
4116 if (debug_linux_nat)
4117 fprintf_unfiltered (gdb_stdlog,
4118 "KWC: wait %s received unk.\n",
4119 target_pid_to_str (lp->ptid));
4120 /* See the call to kill_callback above. */
4121 kill_callback (lp, NULL);
4122 }
4123 }
4124 while (pid == GET_LWP (lp->ptid));
4125
4126 gdb_assert (pid == -1 && errno == ECHILD);
4127 return 0;
4128 }
4129
4130 static void
4131 linux_nat_kill (struct target_ops *ops)
4132 {
4133 struct target_waitstatus last;
4134 ptid_t last_ptid;
4135 int status;
4136
4137 /* If we're stopped while forking and we haven't followed yet,
4138 kill the other task. We need to do this first because the
4139 parent will be sleeping if this is a vfork. */
4140
4141 get_last_target_status (&last_ptid, &last);
4142
4143 if (last.kind == TARGET_WAITKIND_FORKED
4144 || last.kind == TARGET_WAITKIND_VFORKED)
4145 {
4146 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
4147 wait (&status);
4148 }
4149
4150 if (forks_exist_p ())
4151 linux_fork_killall ();
4152 else
4153 {
4154 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
4155
4156 /* Stop all threads before killing them, since ptrace requires
4157 that the thread is stopped to sucessfully PTRACE_KILL. */
4158 iterate_over_lwps (ptid, stop_callback, NULL);
4159 /* ... and wait until all of them have reported back that
4160 they're no longer running. */
4161 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4162
4163 /* Kill all LWP's ... */
4164 iterate_over_lwps (ptid, kill_callback, NULL);
4165
4166 /* ... and wait until we've flushed all events. */
4167 iterate_over_lwps (ptid, kill_wait_callback, NULL);
4168 }
4169
4170 target_mourn_inferior ();
4171 }
4172
4173 static void
4174 linux_nat_mourn_inferior (struct target_ops *ops)
4175 {
4176 purge_lwp_list (ptid_get_pid (inferior_ptid));
4177
4178 if (! forks_exist_p ())
4179 /* Normal case, no other forks available. */
4180 linux_ops->to_mourn_inferior (ops);
4181 else
4182 /* Multi-fork case. The current inferior_ptid has exited, but
4183 there are other viable forks to debug. Delete the exiting
4184 one and context-switch to the first available. */
4185 linux_fork_mourn_inferior ();
4186 }
4187
4188 /* Convert a native/host siginfo object, into/from the siginfo in the
4189 layout of the inferiors' architecture. */
4190
4191 static void
4192 siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
4193 {
4194 int done = 0;
4195
4196 if (linux_nat_siginfo_fixup != NULL)
4197 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
4198
4199 /* If there was no callback, or the callback didn't do anything,
4200 then just do a straight memcpy. */
4201 if (!done)
4202 {
4203 if (direction == 1)
4204 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
4205 else
4206 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
4207 }
4208 }
4209
4210 static LONGEST
4211 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
4212 const char *annex, gdb_byte *readbuf,
4213 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4214 {
4215 int pid;
4216 struct siginfo siginfo;
4217 gdb_byte inf_siginfo[sizeof (struct siginfo)];
4218
4219 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
4220 gdb_assert (readbuf || writebuf);
4221
4222 pid = GET_LWP (inferior_ptid);
4223 if (pid == 0)
4224 pid = GET_PID (inferior_ptid);
4225
4226 if (offset > sizeof (siginfo))
4227 return -1;
4228
4229 errno = 0;
4230 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4231 if (errno != 0)
4232 return -1;
4233
4234 /* When GDB is built as a 64-bit application, ptrace writes into
4235 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
4236 inferior with a 64-bit GDB should look the same as debugging it
4237 with a 32-bit GDB, we need to convert it. GDB core always sees
4238 the converted layout, so any read/write will have to be done
4239 post-conversion. */
4240 siginfo_fixup (&siginfo, inf_siginfo, 0);
4241
4242 if (offset + len > sizeof (siginfo))
4243 len = sizeof (siginfo) - offset;
4244
4245 if (readbuf != NULL)
4246 memcpy (readbuf, inf_siginfo + offset, len);
4247 else
4248 {
4249 memcpy (inf_siginfo + offset, writebuf, len);
4250
4251 /* Convert back to ptrace layout before flushing it out. */
4252 siginfo_fixup (&siginfo, inf_siginfo, 1);
4253
4254 errno = 0;
4255 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
4256 if (errno != 0)
4257 return -1;
4258 }
4259
4260 return len;
4261 }
4262
4263 static LONGEST
4264 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
4265 const char *annex, gdb_byte *readbuf,
4266 const gdb_byte *writebuf,
4267 ULONGEST offset, LONGEST len)
4268 {
4269 struct cleanup *old_chain;
4270 LONGEST xfer;
4271
4272 if (object == TARGET_OBJECT_SIGNAL_INFO)
4273 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
4274 offset, len);
4275
4276 /* The target is connected but no live inferior is selected. Pass
4277 this request down to a lower stratum (e.g., the executable
4278 file). */
4279 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
4280 return 0;
4281
4282 old_chain = save_inferior_ptid ();
4283
4284 if (is_lwp (inferior_ptid))
4285 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
4286
4287 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
4288 offset, len);
4289
4290 do_cleanups (old_chain);
4291 return xfer;
4292 }
4293
4294 static int
4295 linux_thread_alive (ptid_t ptid)
4296 {
4297 int err, tmp_errno;
4298
4299 gdb_assert (is_lwp (ptid));
4300
4301 /* Send signal 0 instead of anything ptrace, because ptracing a
4302 running thread errors out claiming that the thread doesn't
4303 exist. */
4304 err = kill_lwp (GET_LWP (ptid), 0);
4305 tmp_errno = errno;
4306 if (debug_linux_nat)
4307 fprintf_unfiltered (gdb_stdlog,
4308 "LLTA: KILL(SIG0) %s (%s)\n",
4309 target_pid_to_str (ptid),
4310 err ? safe_strerror (tmp_errno) : "OK");
4311
4312 if (err != 0)
4313 return 0;
4314
4315 return 1;
4316 }
4317
4318 static int
4319 linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4320 {
4321 return linux_thread_alive (ptid);
4322 }
4323
4324 static char *
4325 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
4326 {
4327 static char buf[64];
4328
4329 if (is_lwp (ptid)
4330 && (GET_PID (ptid) != GET_LWP (ptid)
4331 || num_lwps (GET_PID (ptid)) > 1))
4332 {
4333 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
4334 return buf;
4335 }
4336
4337 return normal_pid_to_str (ptid);
4338 }
4339
4340 static char *
4341 linux_nat_thread_name (struct thread_info *thr)
4342 {
4343 int pid = ptid_get_pid (thr->ptid);
4344 long lwp = ptid_get_lwp (thr->ptid);
4345 #define FORMAT "/proc/%d/task/%ld/comm"
4346 char buf[sizeof (FORMAT) + 30];
4347 FILE *comm_file;
4348 char *result = NULL;
4349
4350 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
4351 comm_file = fopen (buf, "r");
4352 if (comm_file)
4353 {
4354 /* Not exported by the kernel, so we define it here. */
4355 #define COMM_LEN 16
4356 static char line[COMM_LEN + 1];
4357
4358 if (fgets (line, sizeof (line), comm_file))
4359 {
4360 char *nl = strchr (line, '\n');
4361
4362 if (nl)
4363 *nl = '\0';
4364 if (*line != '\0')
4365 result = line;
4366 }
4367
4368 fclose (comm_file);
4369 }
4370
4371 #undef COMM_LEN
4372 #undef FORMAT
4373
4374 return result;
4375 }
4376
4377 /* Accepts an integer PID; Returns a string representing a file that
4378 can be opened to get the symbols for the child process. */
4379
4380 static char *
4381 linux_child_pid_to_exec_file (int pid)
4382 {
4383 char *name1, *name2;
4384
4385 name1 = xmalloc (MAXPATHLEN);
4386 name2 = xmalloc (MAXPATHLEN);
4387 make_cleanup (xfree, name1);
4388 make_cleanup (xfree, name2);
4389 memset (name2, 0, MAXPATHLEN);
4390
4391 sprintf (name1, "/proc/%d/exe", pid);
4392 if (readlink (name1, name2, MAXPATHLEN) > 0)
4393 return name2;
4394 else
4395 return name1;
4396 }
4397
4398 /* Service function for corefiles and info proc. */
4399
4400 static int
4401 read_mapping (FILE *mapfile,
4402 long long *addr,
4403 long long *endaddr,
4404 char *permissions,
4405 long long *offset,
4406 char *device, long long *inode, char *filename)
4407 {
4408 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
4409 addr, endaddr, permissions, offset, device, inode);
4410
4411 filename[0] = '\0';
4412 if (ret > 0 && ret != EOF)
4413 {
4414 /* Eat everything up to EOL for the filename. This will prevent
4415 weird filenames (such as one with embedded whitespace) from
4416 confusing this code. It also makes this code more robust in
4417 respect to annotations the kernel may add after the filename.
4418
4419 Note the filename is used for informational purposes
4420 only. */
4421 ret += fscanf (mapfile, "%[^\n]\n", filename);
4422 }
4423
4424 return (ret != 0 && ret != EOF);
4425 }
4426
4427 /* Fills the "to_find_memory_regions" target vector. Lists the memory
4428 regions in the inferior for a corefile. */
4429
4430 static int
4431 linux_nat_find_memory_regions (find_memory_region_ftype func, void *obfd)
4432 {
4433 int pid = PIDGET (inferior_ptid);
4434 char mapsfilename[MAXPATHLEN];
4435 FILE *mapsfile;
4436 long long addr, endaddr, size, offset, inode;
4437 char permissions[8], device[8], filename[MAXPATHLEN];
4438 int read, write, exec;
4439 struct cleanup *cleanup;
4440
4441 /* Compose the filename for the /proc memory map, and open it. */
4442 sprintf (mapsfilename, "/proc/%d/maps", pid);
4443 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
4444 error (_("Could not open %s."), mapsfilename);
4445 cleanup = make_cleanup_fclose (mapsfile);
4446
4447 if (info_verbose)
4448 fprintf_filtered (gdb_stdout,
4449 "Reading memory regions from %s\n", mapsfilename);
4450
4451 /* Now iterate until end-of-file. */
4452 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
4453 &offset, &device[0], &inode, &filename[0]))
4454 {
4455 size = endaddr - addr;
4456
4457 /* Get the segment's permissions. */
4458 read = (strchr (permissions, 'r') != 0);
4459 write = (strchr (permissions, 'w') != 0);
4460 exec = (strchr (permissions, 'x') != 0);
4461
4462 if (info_verbose)
4463 {
4464 fprintf_filtered (gdb_stdout,
4465 "Save segment, %s bytes at %s (%c%c%c)",
4466 plongest (size), paddress (target_gdbarch, addr),
4467 read ? 'r' : ' ',
4468 write ? 'w' : ' ', exec ? 'x' : ' ');
4469 if (filename[0])
4470 fprintf_filtered (gdb_stdout, " for %s", filename);
4471 fprintf_filtered (gdb_stdout, "\n");
4472 }
4473
4474 /* Invoke the callback function to create the corefile
4475 segment. */
4476 func (addr, size, read, write, exec, obfd);
4477 }
4478 do_cleanups (cleanup);
4479 return 0;
4480 }
4481
4482 static int
4483 find_signalled_thread (struct thread_info *info, void *data)
4484 {
4485 if (info->suspend.stop_signal != TARGET_SIGNAL_0
4486 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
4487 return 1;
4488
4489 return 0;
4490 }
4491
4492 static enum target_signal
4493 find_stop_signal (void)
4494 {
4495 struct thread_info *info =
4496 iterate_over_threads (find_signalled_thread, NULL);
4497
4498 if (info)
4499 return info->suspend.stop_signal;
4500 else
4501 return TARGET_SIGNAL_0;
4502 }
4503
4504 /* Records the thread's register state for the corefile note
4505 section. */
4506
4507 static char *
4508 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
4509 char *note_data, int *note_size,
4510 enum target_signal stop_signal)
4511 {
4512 unsigned long lwp = ptid_get_lwp (ptid);
4513 struct gdbarch *gdbarch = target_gdbarch;
4514 struct regcache *regcache = get_thread_arch_regcache (ptid, gdbarch);
4515 const struct regset *regset;
4516 int core_regset_p;
4517 struct cleanup *old_chain;
4518 struct core_regset_section *sect_list;
4519 char *gdb_regset;
4520
4521 old_chain = save_inferior_ptid ();
4522 inferior_ptid = ptid;
4523 target_fetch_registers (regcache, -1);
4524 do_cleanups (old_chain);
4525
4526 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
4527 sect_list = gdbarch_core_regset_sections (gdbarch);
4528
4529 /* The loop below uses the new struct core_regset_section, which stores
4530 the supported section names and sizes for the core file. Note that
4531 note PRSTATUS needs to be treated specially. But the other notes are
4532 structurally the same, so they can benefit from the new struct. */
4533 if (core_regset_p && sect_list != NULL)
4534 while (sect_list->sect_name != NULL)
4535 {
4536 regset = gdbarch_regset_from_core_section (gdbarch,
4537 sect_list->sect_name,
4538 sect_list->size);
4539 gdb_assert (regset && regset->collect_regset);
4540 gdb_regset = xmalloc (sect_list->size);
4541 regset->collect_regset (regset, regcache, -1,
4542 gdb_regset, sect_list->size);
4543
4544 if (strcmp (sect_list->sect_name, ".reg") == 0)
4545 note_data = (char *) elfcore_write_prstatus
4546 (obfd, note_data, note_size,
4547 lwp, target_signal_to_host (stop_signal),
4548 gdb_regset);
4549 else
4550 note_data = (char *) elfcore_write_register_note
4551 (obfd, note_data, note_size,
4552 sect_list->sect_name, gdb_regset,
4553 sect_list->size);
4554 xfree (gdb_regset);
4555 sect_list++;
4556 }
4557
4558 /* For architectures that does not have the struct core_regset_section
4559 implemented, we use the old method. When all the architectures have
4560 the new support, the code below should be deleted. */
4561 else
4562 {
4563 gdb_gregset_t gregs;
4564 gdb_fpregset_t fpregs;
4565
4566 if (core_regset_p
4567 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
4568 sizeof (gregs)))
4569 != NULL && regset->collect_regset != NULL)
4570 regset->collect_regset (regset, regcache, -1,
4571 &gregs, sizeof (gregs));
4572 else
4573 fill_gregset (regcache, &gregs, -1);
4574
4575 note_data = (char *) elfcore_write_prstatus
4576 (obfd, note_data, note_size, lwp, target_signal_to_host (stop_signal),
4577 &gregs);
4578
4579 if (core_regset_p
4580 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
4581 sizeof (fpregs)))
4582 != NULL && regset->collect_regset != NULL)
4583 regset->collect_regset (regset, regcache, -1,
4584 &fpregs, sizeof (fpregs));
4585 else
4586 fill_fpregset (regcache, &fpregs, -1);
4587
4588 note_data = (char *) elfcore_write_prfpreg (obfd,
4589 note_data,
4590 note_size,
4591 &fpregs, sizeof (fpregs));
4592 }
4593
4594 return note_data;
4595 }
4596
4597 struct linux_nat_corefile_thread_data
4598 {
4599 bfd *obfd;
4600 char *note_data;
4601 int *note_size;
4602 int num_notes;
4603 enum target_signal stop_signal;
4604 };
4605
4606 /* Called by gdbthread.c once per thread. Records the thread's
4607 register state for the corefile note section. */
4608
4609 static int
4610 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
4611 {
4612 struct linux_nat_corefile_thread_data *args = data;
4613
4614 args->note_data = linux_nat_do_thread_registers (args->obfd,
4615 ti->ptid,
4616 args->note_data,
4617 args->note_size,
4618 args->stop_signal);
4619 args->num_notes++;
4620
4621 return 0;
4622 }
4623
4624 /* Enumerate spufs IDs for process PID. */
4625
4626 static void
4627 iterate_over_spus (int pid, void (*callback) (void *, int), void *data)
4628 {
4629 char path[128];
4630 DIR *dir;
4631 struct dirent *entry;
4632
4633 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4634 dir = opendir (path);
4635 if (!dir)
4636 return;
4637
4638 rewinddir (dir);
4639 while ((entry = readdir (dir)) != NULL)
4640 {
4641 struct stat st;
4642 struct statfs stfs;
4643 int fd;
4644
4645 fd = atoi (entry->d_name);
4646 if (!fd)
4647 continue;
4648
4649 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4650 if (stat (path, &st) != 0)
4651 continue;
4652 if (!S_ISDIR (st.st_mode))
4653 continue;
4654
4655 if (statfs (path, &stfs) != 0)
4656 continue;
4657 if (stfs.f_type != SPUFS_MAGIC)
4658 continue;
4659
4660 callback (data, fd);
4661 }
4662
4663 closedir (dir);
4664 }
4665
4666 /* Generate corefile notes for SPU contexts. */
4667
4668 struct linux_spu_corefile_data
4669 {
4670 bfd *obfd;
4671 char *note_data;
4672 int *note_size;
4673 };
4674
4675 static void
4676 linux_spu_corefile_callback (void *data, int fd)
4677 {
4678 struct linux_spu_corefile_data *args = data;
4679 int i;
4680
4681 static const char *spu_files[] =
4682 {
4683 "object-id",
4684 "mem",
4685 "regs",
4686 "fpcr",
4687 "lslr",
4688 "decr",
4689 "decr_status",
4690 "signal1",
4691 "signal1_type",
4692 "signal2",
4693 "signal2_type",
4694 "event_mask",
4695 "event_status",
4696 "mbox_info",
4697 "ibox_info",
4698 "wbox_info",
4699 "dma_info",
4700 "proxydma_info",
4701 };
4702
4703 for (i = 0; i < sizeof (spu_files) / sizeof (spu_files[0]); i++)
4704 {
4705 char annex[32], note_name[32];
4706 gdb_byte *spu_data;
4707 LONGEST spu_len;
4708
4709 xsnprintf (annex, sizeof annex, "%d/%s", fd, spu_files[i]);
4710 spu_len = target_read_alloc (&current_target, TARGET_OBJECT_SPU,
4711 annex, &spu_data);
4712 if (spu_len > 0)
4713 {
4714 xsnprintf (note_name, sizeof note_name, "SPU/%s", annex);
4715 args->note_data = elfcore_write_note (args->obfd, args->note_data,
4716 args->note_size, note_name,
4717 NT_SPU, spu_data, spu_len);
4718 xfree (spu_data);
4719 }
4720 }
4721 }
4722
4723 static char *
4724 linux_spu_make_corefile_notes (bfd *obfd, char *note_data, int *note_size)
4725 {
4726 struct linux_spu_corefile_data args;
4727
4728 args.obfd = obfd;
4729 args.note_data = note_data;
4730 args.note_size = note_size;
4731
4732 iterate_over_spus (PIDGET (inferior_ptid),
4733 linux_spu_corefile_callback, &args);
4734
4735 return args.note_data;
4736 }
4737
4738 /* Fills the "to_make_corefile_note" target vector. Builds the note
4739 section for a corefile, and returns it in a malloc buffer. */
4740
4741 static char *
4742 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
4743 {
4744 struct linux_nat_corefile_thread_data thread_args;
4745 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
4746 char fname[16] = { '\0' };
4747 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
4748 char psargs[80] = { '\0' };
4749 char *note_data = NULL;
4750 ptid_t filter = pid_to_ptid (ptid_get_pid (inferior_ptid));
4751 gdb_byte *auxv;
4752 int auxv_len;
4753
4754 if (get_exec_file (0))
4755 {
4756 strncpy (fname, lbasename (get_exec_file (0)), sizeof (fname));
4757 strncpy (psargs, get_exec_file (0), sizeof (psargs));
4758 if (get_inferior_args ())
4759 {
4760 char *string_end;
4761 char *psargs_end = psargs + sizeof (psargs);
4762
4763 /* linux_elfcore_write_prpsinfo () handles zero unterminated
4764 strings fine. */
4765 string_end = memchr (psargs, 0, sizeof (psargs));
4766 if (string_end != NULL)
4767 {
4768 *string_end++ = ' ';
4769 strncpy (string_end, get_inferior_args (),
4770 psargs_end - string_end);
4771 }
4772 }
4773 note_data = (char *) elfcore_write_prpsinfo (obfd,
4774 note_data,
4775 note_size, fname, psargs);
4776 }
4777
4778 /* Dump information for threads. */
4779 thread_args.obfd = obfd;
4780 thread_args.note_data = note_data;
4781 thread_args.note_size = note_size;
4782 thread_args.num_notes = 0;
4783 thread_args.stop_signal = find_stop_signal ();
4784 iterate_over_lwps (filter, linux_nat_corefile_thread_callback, &thread_args);
4785 gdb_assert (thread_args.num_notes != 0);
4786 note_data = thread_args.note_data;
4787
4788 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
4789 NULL, &auxv);
4790 if (auxv_len > 0)
4791 {
4792 note_data = elfcore_write_note (obfd, note_data, note_size,
4793 "CORE", NT_AUXV, auxv, auxv_len);
4794 xfree (auxv);
4795 }
4796
4797 note_data = linux_spu_make_corefile_notes (obfd, note_data, note_size);
4798
4799 make_cleanup (xfree, note_data);
4800 return note_data;
4801 }
4802
4803 /* Implement the to_xfer_partial interface for memory reads using the /proc
4804 filesystem. Because we can use a single read() call for /proc, this
4805 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4806 but it doesn't support writes. */
4807
4808 static LONGEST
4809 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4810 const char *annex, gdb_byte *readbuf,
4811 const gdb_byte *writebuf,
4812 ULONGEST offset, LONGEST len)
4813 {
4814 LONGEST ret;
4815 int fd;
4816 char filename[64];
4817
4818 if (object != TARGET_OBJECT_MEMORY || !readbuf)
4819 return 0;
4820
4821 /* Don't bother for one word. */
4822 if (len < 3 * sizeof (long))
4823 return 0;
4824
4825 /* We could keep this file open and cache it - possibly one per
4826 thread. That requires some juggling, but is even faster. */
4827 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4828 fd = open (filename, O_RDONLY | O_LARGEFILE);
4829 if (fd == -1)
4830 return 0;
4831
4832 /* If pread64 is available, use it. It's faster if the kernel
4833 supports it (only one syscall), and it's 64-bit safe even on
4834 32-bit platforms (for instance, SPARC debugging a SPARC64
4835 application). */
4836 #ifdef HAVE_PREAD64
4837 if (pread64 (fd, readbuf, len, offset) != len)
4838 #else
4839 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4840 #endif
4841 ret = 0;
4842 else
4843 ret = len;
4844
4845 close (fd);
4846 return ret;
4847 }
4848
4849
4850 /* Enumerate spufs IDs for process PID. */
4851 static LONGEST
4852 spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, LONGEST len)
4853 {
4854 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch);
4855 LONGEST pos = 0;
4856 LONGEST written = 0;
4857 char path[128];
4858 DIR *dir;
4859 struct dirent *entry;
4860
4861 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4862 dir = opendir (path);
4863 if (!dir)
4864 return -1;
4865
4866 rewinddir (dir);
4867 while ((entry = readdir (dir)) != NULL)
4868 {
4869 struct stat st;
4870 struct statfs stfs;
4871 int fd;
4872
4873 fd = atoi (entry->d_name);
4874 if (!fd)
4875 continue;
4876
4877 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4878 if (stat (path, &st) != 0)
4879 continue;
4880 if (!S_ISDIR (st.st_mode))
4881 continue;
4882
4883 if (statfs (path, &stfs) != 0)
4884 continue;
4885 if (stfs.f_type != SPUFS_MAGIC)
4886 continue;
4887
4888 if (pos >= offset && pos + 4 <= offset + len)
4889 {
4890 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4891 written += 4;
4892 }
4893 pos += 4;
4894 }
4895
4896 closedir (dir);
4897 return written;
4898 }
4899
4900 /* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4901 object type, using the /proc file system. */
4902 static LONGEST
4903 linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4904 const char *annex, gdb_byte *readbuf,
4905 const gdb_byte *writebuf,
4906 ULONGEST offset, LONGEST len)
4907 {
4908 char buf[128];
4909 int fd = 0;
4910 int ret = -1;
4911 int pid = PIDGET (inferior_ptid);
4912
4913 if (!annex)
4914 {
4915 if (!readbuf)
4916 return -1;
4917 else
4918 return spu_enumerate_spu_ids (pid, readbuf, offset, len);
4919 }
4920
4921 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
4922 fd = open (buf, writebuf? O_WRONLY : O_RDONLY);
4923 if (fd <= 0)
4924 return -1;
4925
4926 if (offset != 0
4927 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4928 {
4929 close (fd);
4930 return 0;
4931 }
4932
4933 if (writebuf)
4934 ret = write (fd, writebuf, (size_t) len);
4935 else if (readbuf)
4936 ret = read (fd, readbuf, (size_t) len);
4937
4938 close (fd);
4939 return ret;
4940 }
4941
4942
4943 /* Parse LINE as a signal set and add its set bits to SIGS. */
4944
4945 static void
4946 add_line_to_sigset (const char *line, sigset_t *sigs)
4947 {
4948 int len = strlen (line) - 1;
4949 const char *p;
4950 int signum;
4951
4952 if (line[len] != '\n')
4953 error (_("Could not parse signal set: %s"), line);
4954
4955 p = line;
4956 signum = len * 4;
4957 while (len-- > 0)
4958 {
4959 int digit;
4960
4961 if (*p >= '0' && *p <= '9')
4962 digit = *p - '0';
4963 else if (*p >= 'a' && *p <= 'f')
4964 digit = *p - 'a' + 10;
4965 else
4966 error (_("Could not parse signal set: %s"), line);
4967
4968 signum -= 4;
4969
4970 if (digit & 1)
4971 sigaddset (sigs, signum + 1);
4972 if (digit & 2)
4973 sigaddset (sigs, signum + 2);
4974 if (digit & 4)
4975 sigaddset (sigs, signum + 3);
4976 if (digit & 8)
4977 sigaddset (sigs, signum + 4);
4978
4979 p++;
4980 }
4981 }
4982
4983 /* Find process PID's pending signals from /proc/pid/status and set
4984 SIGS to match. */
4985
4986 void
4987 linux_proc_pending_signals (int pid, sigset_t *pending,
4988 sigset_t *blocked, sigset_t *ignored)
4989 {
4990 FILE *procfile;
4991 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
4992 struct cleanup *cleanup;
4993
4994 sigemptyset (pending);
4995 sigemptyset (blocked);
4996 sigemptyset (ignored);
4997 sprintf (fname, "/proc/%d/status", pid);
4998 procfile = fopen (fname, "r");
4999 if (procfile == NULL)
5000 error (_("Could not open %s"), fname);
5001 cleanup = make_cleanup_fclose (procfile);
5002
5003 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
5004 {
5005 /* Normal queued signals are on the SigPnd line in the status
5006 file. However, 2.6 kernels also have a "shared" pending
5007 queue for delivering signals to a thread group, so check for
5008 a ShdPnd line also.
5009
5010 Unfortunately some Red Hat kernels include the shared pending
5011 queue but not the ShdPnd status field. */
5012
5013 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
5014 add_line_to_sigset (buffer + 8, pending);
5015 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
5016 add_line_to_sigset (buffer + 8, pending);
5017 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
5018 add_line_to_sigset (buffer + 8, blocked);
5019 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
5020 add_line_to_sigset (buffer + 8, ignored);
5021 }
5022
5023 do_cleanups (cleanup);
5024 }
5025
5026 static LONGEST
5027 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
5028 const char *annex, gdb_byte *readbuf,
5029 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5030 {
5031 gdb_assert (object == TARGET_OBJECT_OSDATA);
5032
5033 return linux_common_xfer_osdata (annex, readbuf, offset, len);
5034 }
5035
5036 static LONGEST
5037 linux_xfer_partial (struct target_ops *ops, enum target_object object,
5038 const char *annex, gdb_byte *readbuf,
5039 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
5040 {
5041 LONGEST xfer;
5042
5043 if (object == TARGET_OBJECT_AUXV)
5044 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
5045 offset, len);
5046
5047 if (object == TARGET_OBJECT_OSDATA)
5048 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
5049 offset, len);
5050
5051 if (object == TARGET_OBJECT_SPU)
5052 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
5053 offset, len);
5054
5055 /* GDB calculates all the addresses in possibly larget width of the address.
5056 Address width needs to be masked before its final use - either by
5057 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
5058
5059 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
5060
5061 if (object == TARGET_OBJECT_MEMORY)
5062 {
5063 int addr_bit = gdbarch_addr_bit (target_gdbarch);
5064
5065 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
5066 offset &= ((ULONGEST) 1 << addr_bit) - 1;
5067 }
5068
5069 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
5070 offset, len);
5071 if (xfer != 0)
5072 return xfer;
5073
5074 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
5075 offset, len);
5076 }
5077
5078 /* Create a prototype generic GNU/Linux target. The client can override
5079 it with local methods. */
5080
5081 static void
5082 linux_target_install_ops (struct target_ops *t)
5083 {
5084 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
5085 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
5086 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
5087 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
5088 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
5089 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
5090 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
5091 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
5092 t->to_post_startup_inferior = linux_child_post_startup_inferior;
5093 t->to_post_attach = linux_child_post_attach;
5094 t->to_follow_fork = linux_child_follow_fork;
5095 t->to_find_memory_regions = linux_nat_find_memory_regions;
5096 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
5097
5098 super_xfer_partial = t->to_xfer_partial;
5099 t->to_xfer_partial = linux_xfer_partial;
5100 }
5101
5102 struct target_ops *
5103 linux_target (void)
5104 {
5105 struct target_ops *t;
5106
5107 t = inf_ptrace_target ();
5108 linux_target_install_ops (t);
5109
5110 return t;
5111 }
5112
5113 struct target_ops *
5114 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
5115 {
5116 struct target_ops *t;
5117
5118 t = inf_ptrace_trad_target (register_u_offset);
5119 linux_target_install_ops (t);
5120
5121 return t;
5122 }
5123
5124 /* target_is_async_p implementation. */
5125
5126 static int
5127 linux_nat_is_async_p (void)
5128 {
5129 /* NOTE: palves 2008-03-21: We're only async when the user requests
5130 it explicitly with the "set target-async" command.
5131 Someday, linux will always be async. */
5132 return target_async_permitted;
5133 }
5134
5135 /* target_can_async_p implementation. */
5136
5137 static int
5138 linux_nat_can_async_p (void)
5139 {
5140 /* NOTE: palves 2008-03-21: We're only async when the user requests
5141 it explicitly with the "set target-async" command.
5142 Someday, linux will always be async. */
5143 return target_async_permitted;
5144 }
5145
5146 static int
5147 linux_nat_supports_non_stop (void)
5148 {
5149 return 1;
5150 }
5151
5152 /* True if we want to support multi-process. To be removed when GDB
5153 supports multi-exec. */
5154
5155 int linux_multi_process = 1;
5156
5157 static int
5158 linux_nat_supports_multi_process (void)
5159 {
5160 return linux_multi_process;
5161 }
5162
5163 static int
5164 linux_nat_supports_disable_randomization (void)
5165 {
5166 #ifdef HAVE_PERSONALITY
5167 return 1;
5168 #else
5169 return 0;
5170 #endif
5171 }
5172
5173 static int async_terminal_is_ours = 1;
5174
5175 /* target_terminal_inferior implementation. */
5176
5177 static void
5178 linux_nat_terminal_inferior (void)
5179 {
5180 if (!target_is_async_p ())
5181 {
5182 /* Async mode is disabled. */
5183 terminal_inferior ();
5184 return;
5185 }
5186
5187 terminal_inferior ();
5188
5189 /* Calls to target_terminal_*() are meant to be idempotent. */
5190 if (!async_terminal_is_ours)
5191 return;
5192
5193 delete_file_handler (input_fd);
5194 async_terminal_is_ours = 0;
5195 set_sigint_trap ();
5196 }
5197
5198 /* target_terminal_ours implementation. */
5199
5200 static void
5201 linux_nat_terminal_ours (void)
5202 {
5203 if (!target_is_async_p ())
5204 {
5205 /* Async mode is disabled. */
5206 terminal_ours ();
5207 return;
5208 }
5209
5210 /* GDB should never give the terminal to the inferior if the
5211 inferior is running in the background (run&, continue&, etc.),
5212 but claiming it sure should. */
5213 terminal_ours ();
5214
5215 if (async_terminal_is_ours)
5216 return;
5217
5218 clear_sigint_trap ();
5219 add_file_handler (input_fd, stdin_event_handler, 0);
5220 async_terminal_is_ours = 1;
5221 }
5222
5223 static void (*async_client_callback) (enum inferior_event_type event_type,
5224 void *context);
5225 static void *async_client_context;
5226
5227 /* SIGCHLD handler that serves two purposes: In non-stop/async mode,
5228 so we notice when any child changes state, and notify the
5229 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
5230 above to wait for the arrival of a SIGCHLD. */
5231
5232 static void
5233 sigchld_handler (int signo)
5234 {
5235 int old_errno = errno;
5236
5237 if (debug_linux_nat)
5238 ui_file_write_async_safe (gdb_stdlog,
5239 "sigchld\n", sizeof ("sigchld\n") - 1);
5240
5241 if (signo == SIGCHLD
5242 && linux_nat_event_pipe[0] != -1)
5243 async_file_mark (); /* Let the event loop know that there are
5244 events to handle. */
5245
5246 errno = old_errno;
5247 }
5248
5249 /* Callback registered with the target events file descriptor. */
5250
5251 static void
5252 handle_target_event (int error, gdb_client_data client_data)
5253 {
5254 (*async_client_callback) (INF_REG_EVENT, async_client_context);
5255 }
5256
5257 /* Create/destroy the target events pipe. Returns previous state. */
5258
5259 static int
5260 linux_async_pipe (int enable)
5261 {
5262 int previous = (linux_nat_event_pipe[0] != -1);
5263
5264 if (previous != enable)
5265 {
5266 sigset_t prev_mask;
5267
5268 block_child_signals (&prev_mask);
5269
5270 if (enable)
5271 {
5272 if (pipe (linux_nat_event_pipe) == -1)
5273 internal_error (__FILE__, __LINE__,
5274 "creating event pipe failed.");
5275
5276 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
5277 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
5278 }
5279 else
5280 {
5281 close (linux_nat_event_pipe[0]);
5282 close (linux_nat_event_pipe[1]);
5283 linux_nat_event_pipe[0] = -1;
5284 linux_nat_event_pipe[1] = -1;
5285 }
5286
5287 restore_child_signals_mask (&prev_mask);
5288 }
5289
5290 return previous;
5291 }
5292
5293 /* target_async implementation. */
5294
5295 static void
5296 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
5297 void *context), void *context)
5298 {
5299 if (callback != NULL)
5300 {
5301 async_client_callback = callback;
5302 async_client_context = context;
5303 if (!linux_async_pipe (1))
5304 {
5305 add_file_handler (linux_nat_event_pipe[0],
5306 handle_target_event, NULL);
5307 /* There may be pending events to handle. Tell the event loop
5308 to poll them. */
5309 async_file_mark ();
5310 }
5311 }
5312 else
5313 {
5314 async_client_callback = callback;
5315 async_client_context = context;
5316 delete_file_handler (linux_nat_event_pipe[0]);
5317 linux_async_pipe (0);
5318 }
5319 return;
5320 }
5321
5322 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
5323 event came out. */
5324
5325 static int
5326 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
5327 {
5328 if (!lwp->stopped)
5329 {
5330 ptid_t ptid = lwp->ptid;
5331
5332 if (debug_linux_nat)
5333 fprintf_unfiltered (gdb_stdlog,
5334 "LNSL: running -> suspending %s\n",
5335 target_pid_to_str (lwp->ptid));
5336
5337
5338 if (lwp->last_resume_kind == resume_stop)
5339 {
5340 if (debug_linux_nat)
5341 fprintf_unfiltered (gdb_stdlog,
5342 "linux-nat: already stopping LWP %ld at "
5343 "GDB's request\n",
5344 ptid_get_lwp (lwp->ptid));
5345 return 0;
5346 }
5347
5348 stop_callback (lwp, NULL);
5349 lwp->last_resume_kind = resume_stop;
5350 }
5351 else
5352 {
5353 /* Already known to be stopped; do nothing. */
5354
5355 if (debug_linux_nat)
5356 {
5357 if (find_thread_ptid (lwp->ptid)->stop_requested)
5358 fprintf_unfiltered (gdb_stdlog,
5359 "LNSL: already stopped/stop_requested %s\n",
5360 target_pid_to_str (lwp->ptid));
5361 else
5362 fprintf_unfiltered (gdb_stdlog,
5363 "LNSL: already stopped/no "
5364 "stop_requested yet %s\n",
5365 target_pid_to_str (lwp->ptid));
5366 }
5367 }
5368 return 0;
5369 }
5370
5371 static void
5372 linux_nat_stop (ptid_t ptid)
5373 {
5374 if (non_stop)
5375 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
5376 else
5377 linux_ops->to_stop (ptid);
5378 }
5379
5380 static void
5381 linux_nat_close (int quitting)
5382 {
5383 /* Unregister from the event loop. */
5384 if (linux_nat_is_async_p ())
5385 linux_nat_async (NULL, 0);
5386
5387 if (linux_ops->to_close)
5388 linux_ops->to_close (quitting);
5389 }
5390
5391 /* When requests are passed down from the linux-nat layer to the
5392 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
5393 used. The address space pointer is stored in the inferior object,
5394 but the common code that is passed such ptid can't tell whether
5395 lwpid is a "main" process id or not (it assumes so). We reverse
5396 look up the "main" process id from the lwp here. */
5397
5398 struct address_space *
5399 linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
5400 {
5401 struct lwp_info *lwp;
5402 struct inferior *inf;
5403 int pid;
5404
5405 pid = GET_LWP (ptid);
5406 if (GET_LWP (ptid) == 0)
5407 {
5408 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
5409 tgid. */
5410 lwp = find_lwp_pid (ptid);
5411 pid = GET_PID (lwp->ptid);
5412 }
5413 else
5414 {
5415 /* A (pid,lwpid,0) ptid. */
5416 pid = GET_PID (ptid);
5417 }
5418
5419 inf = find_inferior_pid (pid);
5420 gdb_assert (inf != NULL);
5421 return inf->aspace;
5422 }
5423
5424 int
5425 linux_nat_core_of_thread_1 (ptid_t ptid)
5426 {
5427 struct cleanup *back_to;
5428 char *filename;
5429 FILE *f;
5430 char *content = NULL;
5431 char *p;
5432 char *ts = 0;
5433 int content_read = 0;
5434 int i;
5435 int core;
5436
5437 filename = xstrprintf ("/proc/%d/task/%ld/stat",
5438 GET_PID (ptid), GET_LWP (ptid));
5439 back_to = make_cleanup (xfree, filename);
5440
5441 f = fopen (filename, "r");
5442 if (!f)
5443 {
5444 do_cleanups (back_to);
5445 return -1;
5446 }
5447
5448 make_cleanup_fclose (f);
5449
5450 for (;;)
5451 {
5452 int n;
5453
5454 content = xrealloc (content, content_read + 1024);
5455 n = fread (content + content_read, 1, 1024, f);
5456 content_read += n;
5457 if (n < 1024)
5458 {
5459 content[content_read] = '\0';
5460 break;
5461 }
5462 }
5463
5464 make_cleanup (xfree, content);
5465
5466 p = strchr (content, '(');
5467
5468 /* Skip ")". */
5469 if (p != NULL)
5470 p = strchr (p, ')');
5471 if (p != NULL)
5472 p++;
5473
5474 /* If the first field after program name has index 0, then core number is
5475 the field with index 36. There's no constant for that anywhere. */
5476 if (p != NULL)
5477 p = strtok_r (p, " ", &ts);
5478 for (i = 0; p != NULL && i != 36; ++i)
5479 p = strtok_r (NULL, " ", &ts);
5480
5481 if (p == NULL || sscanf (p, "%d", &core) == 0)
5482 core = -1;
5483
5484 do_cleanups (back_to);
5485
5486 return core;
5487 }
5488
5489 /* Return the cached value of the processor core for thread PTID. */
5490
5491 int
5492 linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
5493 {
5494 struct lwp_info *info = find_lwp_pid (ptid);
5495
5496 if (info)
5497 return info->core;
5498 return -1;
5499 }
5500
5501 void
5502 linux_nat_add_target (struct target_ops *t)
5503 {
5504 /* Save the provided single-threaded target. We save this in a separate
5505 variable because another target we've inherited from (e.g. inf-ptrace)
5506 may have saved a pointer to T; we want to use it for the final
5507 process stratum target. */
5508 linux_ops_saved = *t;
5509 linux_ops = &linux_ops_saved;
5510
5511 /* Override some methods for multithreading. */
5512 t->to_create_inferior = linux_nat_create_inferior;
5513 t->to_attach = linux_nat_attach;
5514 t->to_detach = linux_nat_detach;
5515 t->to_resume = linux_nat_resume;
5516 t->to_wait = linux_nat_wait;
5517 t->to_pass_signals = linux_nat_pass_signals;
5518 t->to_xfer_partial = linux_nat_xfer_partial;
5519 t->to_kill = linux_nat_kill;
5520 t->to_mourn_inferior = linux_nat_mourn_inferior;
5521 t->to_thread_alive = linux_nat_thread_alive;
5522 t->to_pid_to_str = linux_nat_pid_to_str;
5523 t->to_thread_name = linux_nat_thread_name;
5524 t->to_has_thread_control = tc_schedlock;
5525 t->to_thread_address_space = linux_nat_thread_address_space;
5526 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
5527 t->to_stopped_data_address = linux_nat_stopped_data_address;
5528
5529 t->to_can_async_p = linux_nat_can_async_p;
5530 t->to_is_async_p = linux_nat_is_async_p;
5531 t->to_supports_non_stop = linux_nat_supports_non_stop;
5532 t->to_async = linux_nat_async;
5533 t->to_terminal_inferior = linux_nat_terminal_inferior;
5534 t->to_terminal_ours = linux_nat_terminal_ours;
5535 t->to_close = linux_nat_close;
5536
5537 /* Methods for non-stop support. */
5538 t->to_stop = linux_nat_stop;
5539
5540 t->to_supports_multi_process = linux_nat_supports_multi_process;
5541
5542 t->to_supports_disable_randomization
5543 = linux_nat_supports_disable_randomization;
5544
5545 t->to_core_of_thread = linux_nat_core_of_thread;
5546
5547 /* We don't change the stratum; this target will sit at
5548 process_stratum and thread_db will set at thread_stratum. This
5549 is a little strange, since this is a multi-threaded-capable
5550 target, but we want to be on the stack below thread_db, and we
5551 also want to be used for single-threaded processes. */
5552
5553 add_target (t);
5554 }
5555
5556 /* Register a method to call whenever a new thread is attached. */
5557 void
5558 linux_nat_set_new_thread (struct target_ops *t,
5559 void (*new_thread) (struct lwp_info *))
5560 {
5561 /* Save the pointer. We only support a single registered instance
5562 of the GNU/Linux native target, so we do not need to map this to
5563 T. */
5564 linux_nat_new_thread = new_thread;
5565 }
5566
5567 /* Register a method that converts a siginfo object between the layout
5568 that ptrace returns, and the layout in the architecture of the
5569 inferior. */
5570 void
5571 linux_nat_set_siginfo_fixup (struct target_ops *t,
5572 int (*siginfo_fixup) (struct siginfo *,
5573 gdb_byte *,
5574 int))
5575 {
5576 /* Save the pointer. */
5577 linux_nat_siginfo_fixup = siginfo_fixup;
5578 }
5579
5580 /* Register a method to call prior to resuming a thread. */
5581
5582 void
5583 linux_nat_set_prepare_to_resume (struct target_ops *t,
5584 void (*prepare_to_resume) (struct lwp_info *))
5585 {
5586 /* Save the pointer. */
5587 linux_nat_prepare_to_resume = prepare_to_resume;
5588 }
5589
5590 /* Return the saved siginfo associated with PTID. */
5591 struct siginfo *
5592 linux_nat_get_siginfo (ptid_t ptid)
5593 {
5594 struct lwp_info *lp = find_lwp_pid (ptid);
5595
5596 gdb_assert (lp != NULL);
5597
5598 return &lp->siginfo;
5599 }
5600
5601 /* Provide a prototype to silence -Wmissing-prototypes. */
5602 extern initialize_file_ftype _initialize_linux_nat;
5603
5604 void
5605 _initialize_linux_nat (void)
5606 {
5607 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
5608 &debug_linux_nat, _("\
5609 Set debugging of GNU/Linux lwp module."), _("\
5610 Show debugging of GNU/Linux lwp module."), _("\
5611 Enables printf debugging output."),
5612 NULL,
5613 show_debug_linux_nat,
5614 &setdebuglist, &showdebuglist);
5615
5616 /* Save this mask as the default. */
5617 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
5618
5619 /* Install a SIGCHLD handler. */
5620 sigchld_action.sa_handler = sigchld_handler;
5621 sigemptyset (&sigchld_action.sa_mask);
5622 sigchld_action.sa_flags = SA_RESTART;
5623
5624 /* Make it the default. */
5625 sigaction (SIGCHLD, &sigchld_action, NULL);
5626
5627 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5628 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5629 sigdelset (&suspend_mask, SIGCHLD);
5630
5631 sigemptyset (&blocked_mask);
5632 }
5633 \f
5634
5635 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5636 the GNU/Linux Threads library and therefore doesn't really belong
5637 here. */
5638
5639 /* Read variable NAME in the target and return its value if found.
5640 Otherwise return zero. It is assumed that the type of the variable
5641 is `int'. */
5642
5643 static int
5644 get_signo (const char *name)
5645 {
5646 struct minimal_symbol *ms;
5647 int signo;
5648
5649 ms = lookup_minimal_symbol (name, NULL, NULL);
5650 if (ms == NULL)
5651 return 0;
5652
5653 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
5654 sizeof (signo)) != 0)
5655 return 0;
5656
5657 return signo;
5658 }
5659
5660 /* Return the set of signals used by the threads library in *SET. */
5661
5662 void
5663 lin_thread_get_thread_signals (sigset_t *set)
5664 {
5665 struct sigaction action;
5666 int restart, cancel;
5667
5668 sigemptyset (&blocked_mask);
5669 sigemptyset (set);
5670
5671 restart = get_signo ("__pthread_sig_restart");
5672 cancel = get_signo ("__pthread_sig_cancel");
5673
5674 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5675 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5676 not provide any way for the debugger to query the signal numbers -
5677 fortunately they don't change! */
5678
5679 if (restart == 0)
5680 restart = __SIGRTMIN;
5681
5682 if (cancel == 0)
5683 cancel = __SIGRTMIN + 1;
5684
5685 sigaddset (set, restart);
5686 sigaddset (set, cancel);
5687
5688 /* The GNU/Linux Threads library makes terminating threads send a
5689 special "cancel" signal instead of SIGCHLD. Make sure we catch
5690 those (to prevent them from terminating GDB itself, which is
5691 likely to be their default action) and treat them the same way as
5692 SIGCHLD. */
5693
5694 action.sa_handler = sigchld_handler;
5695 sigemptyset (&action.sa_mask);
5696 action.sa_flags = SA_RESTART;
5697 sigaction (cancel, &action, NULL);
5698
5699 /* We block the "cancel" signal throughout this code ... */
5700 sigaddset (&blocked_mask, cancel);
5701 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5702
5703 /* ... except during a sigsuspend. */
5704 sigdelset (&suspend_mask, cancel);
5705 }
This page took 0.15728 seconds and 4 git commands to generate.