* gnu-nat.c (gnu_attach): Add process to inferiors table.
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "inferior.h"
23 #include "target.h"
24 #include "gdb_string.h"
25 #include "gdb_wait.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
35 #include "gdbcmd.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "inf-ptrace.h"
39 #include "auxv.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
49 #include "inf-loop.h"
50 #include "event-loop.h"
51 #include "event-top.h"
52
53 #ifdef HAVE_PERSONALITY
54 # include <sys/personality.h>
55 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
56 # define ADDR_NO_RANDOMIZE 0x0040000
57 # endif
58 #endif /* HAVE_PERSONALITY */
59
60 /* This comment documents high-level logic of this file.
61
62 Waiting for events in sync mode
63 ===============================
64
65 When waiting for an event in a specific thread, we just use waitpid, passing
66 the specific pid, and not passing WNOHANG.
67
68 When waiting for an event in all threads, waitpid is not quite good. Prior to
69 version 2.4, Linux can either wait for event in main thread, or in secondary
70 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
71 miss an event. The solution is to use non-blocking waitpid, together with
72 sigsuspend. First, we use non-blocking waitpid to get an event in the main
73 process, if any. Second, we use non-blocking waitpid with the __WCLONED
74 flag to check for events in cloned processes. If nothing is found, we use
75 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
76 happened to a child process -- and SIGCHLD will be delivered both for events
77 in main debugged process and in cloned processes. As soon as we know there's
78 an event, we get back to calling nonblocking waitpid with and without __WCLONED.
79
80 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
81 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
82 blocked, the signal becomes pending and sigsuspend immediately
83 notices it and returns.
84
85 Waiting for events in async mode
86 ================================
87
88 In async mode, GDB should always be ready to handle both user input and target
89 events, so neither blocking waitpid nor sigsuspend are viable
90 options. Instead, we should notify the GDB main event loop whenever there's
91 unprocessed event from the target. The only way to notify this event loop is
92 to make it wait on input from a pipe, and write something to the pipe whenever
93 there's event. Obviously, if we fail to notify the event loop if there's
94 target event, it's bad. If we notify the event loop when there's no event
95 from target, linux-nat.c will detect that there's no event, actually, and
96 report event of type TARGET_WAITKIND_IGNORE, but it will waste time and
97 better avoided.
98
99 The main design point is that every time GDB is outside linux-nat.c, we have a
100 SIGCHLD handler installed that is called when something happens to the target
101 and notifies the GDB event loop. Also, the event is extracted from the target
102 using waitpid and stored for future use. Whenever GDB core decides to handle
103 the event, and calls into linux-nat.c, we disable SIGCHLD and process things
104 as in sync mode, except that before waitpid call we check if there are any
105 previously read events.
106
107 It could happen that during event processing, we'll try to get more events
108 than there are events in the local queue, which will result to waitpid call.
109 Those waitpid calls, while blocking, are guarantied to always have
110 something for waitpid to return. E.g., stopping a thread with SIGSTOP, and
111 waiting for the lwp to stop.
112
113 The event loop is notified about new events using a pipe. SIGCHLD handler does
114 waitpid and writes the results in to a pipe. GDB event loop has the other end
115 of the pipe among the sources. When event loop starts to process the event
116 and calls a function in linux-nat.c, all events from the pipe are transferred
117 into a local queue and SIGCHLD is blocked. Further processing goes as in sync
118 mode. Before we return from linux_nat_wait, we transfer all unprocessed events
119 from local queue back to the pipe, so that when we get back to event loop,
120 event loop will notice there's something more to do.
121
122 SIGCHLD is blocked when we're inside target_wait, so that should we actually
123 want to wait for some more events, SIGCHLD handler does not steal them from
124 us. Technically, it would be possible to add new events to the local queue but
125 it's about the same amount of work as blocking SIGCHLD.
126
127 This moving of events from pipe into local queue and back into pipe when we
128 enter/leave linux-nat.c is somewhat ugly. Unfortunately, GDB event loop is
129 home-grown and incapable to wait on any queue.
130
131 Use of signals
132 ==============
133
134 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
135 signal is not entirely significant; we just need for a signal to be delivered,
136 so that we can intercept it. SIGSTOP's advantage is that it can not be
137 blocked. A disadvantage is that it is not a real-time signal, so it can only
138 be queued once; we do not keep track of other sources of SIGSTOP.
139
140 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
141 use them, because they have special behavior when the signal is generated -
142 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
143 kills the entire thread group.
144
145 A delivered SIGSTOP would stop the entire thread group, not just the thread we
146 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
147 cancel it (by PTRACE_CONT without passing SIGSTOP).
148
149 We could use a real-time signal instead. This would solve those problems; we
150 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
151 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
152 generates it, and there are races with trying to find a signal that is not
153 blocked. */
154
155 #ifndef O_LARGEFILE
156 #define O_LARGEFILE 0
157 #endif
158
159 /* If the system headers did not provide the constants, hard-code the normal
160 values. */
161 #ifndef PTRACE_EVENT_FORK
162
163 #define PTRACE_SETOPTIONS 0x4200
164 #define PTRACE_GETEVENTMSG 0x4201
165
166 /* options set using PTRACE_SETOPTIONS */
167 #define PTRACE_O_TRACESYSGOOD 0x00000001
168 #define PTRACE_O_TRACEFORK 0x00000002
169 #define PTRACE_O_TRACEVFORK 0x00000004
170 #define PTRACE_O_TRACECLONE 0x00000008
171 #define PTRACE_O_TRACEEXEC 0x00000010
172 #define PTRACE_O_TRACEVFORKDONE 0x00000020
173 #define PTRACE_O_TRACEEXIT 0x00000040
174
175 /* Wait extended result codes for the above trace options. */
176 #define PTRACE_EVENT_FORK 1
177 #define PTRACE_EVENT_VFORK 2
178 #define PTRACE_EVENT_CLONE 3
179 #define PTRACE_EVENT_EXEC 4
180 #define PTRACE_EVENT_VFORK_DONE 5
181 #define PTRACE_EVENT_EXIT 6
182
183 #endif /* PTRACE_EVENT_FORK */
184
185 /* We can't always assume that this flag is available, but all systems
186 with the ptrace event handlers also have __WALL, so it's safe to use
187 here. */
188 #ifndef __WALL
189 #define __WALL 0x40000000 /* Wait for any child. */
190 #endif
191
192 #ifndef PTRACE_GETSIGINFO
193 #define PTRACE_GETSIGINFO 0x4202
194 #endif
195
196 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
197 the use of the multi-threaded target. */
198 static struct target_ops *linux_ops;
199 static struct target_ops linux_ops_saved;
200
201 /* The method to call, if any, when a new thread is attached. */
202 static void (*linux_nat_new_thread) (ptid_t);
203
204 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
205 Called by our to_xfer_partial. */
206 static LONGEST (*super_xfer_partial) (struct target_ops *,
207 enum target_object,
208 const char *, gdb_byte *,
209 const gdb_byte *,
210 ULONGEST, LONGEST);
211
212 static int debug_linux_nat;
213 static void
214 show_debug_linux_nat (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216 {
217 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
218 value);
219 }
220
221 static int debug_linux_nat_async = 0;
222 static void
223 show_debug_linux_nat_async (struct ui_file *file, int from_tty,
224 struct cmd_list_element *c, const char *value)
225 {
226 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
227 value);
228 }
229
230 static int disable_randomization = 1;
231
232 static void
233 show_disable_randomization (struct ui_file *file, int from_tty,
234 struct cmd_list_element *c, const char *value)
235 {
236 #ifdef HAVE_PERSONALITY
237 fprintf_filtered (file, _("\
238 Disabling randomization of debuggee's virtual address space is %s.\n"),
239 value);
240 #else /* !HAVE_PERSONALITY */
241 fputs_filtered (_("\
242 Disabling randomization of debuggee's virtual address space is unsupported on\n\
243 this platform.\n"), file);
244 #endif /* !HAVE_PERSONALITY */
245 }
246
247 static void
248 set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c)
249 {
250 #ifndef HAVE_PERSONALITY
251 error (_("\
252 Disabling randomization of debuggee's virtual address space is unsupported on\n\
253 this platform."));
254 #endif /* !HAVE_PERSONALITY */
255 }
256
257 static int linux_parent_pid;
258
259 struct simple_pid_list
260 {
261 int pid;
262 int status;
263 struct simple_pid_list *next;
264 };
265 struct simple_pid_list *stopped_pids;
266
267 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
268 can not be used, 1 if it can. */
269
270 static int linux_supports_tracefork_flag = -1;
271
272 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
273 PTRACE_O_TRACEVFORKDONE. */
274
275 static int linux_supports_tracevforkdone_flag = -1;
276
277 /* Async mode support */
278
279 /* Zero if the async mode, although enabled, is masked, which means
280 linux_nat_wait should behave as if async mode was off. */
281 static int linux_nat_async_mask_value = 1;
282
283 /* The read/write ends of the pipe registered as waitable file in the
284 event loop. */
285 static int linux_nat_event_pipe[2] = { -1, -1 };
286
287 /* Number of queued events in the pipe. */
288 static volatile int linux_nat_num_queued_events;
289
290 /* The possible SIGCHLD handling states. */
291
292 enum sigchld_state
293 {
294 /* SIGCHLD disabled, with action set to sigchld_handler, for the
295 sigsuspend in linux_nat_wait. */
296 sigchld_sync,
297 /* SIGCHLD enabled, with action set to async_sigchld_handler. */
298 sigchld_async,
299 /* Set SIGCHLD to default action. Used while creating an
300 inferior. */
301 sigchld_default
302 };
303
304 /* The current SIGCHLD handling state. */
305 static enum sigchld_state linux_nat_async_events_state;
306
307 static enum sigchld_state linux_nat_async_events (enum sigchld_state enable);
308 static void pipe_to_local_event_queue (void);
309 static void local_event_queue_to_pipe (void);
310 static void linux_nat_event_pipe_push (int pid, int status, int options);
311 static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
312 static void linux_nat_set_async_mode (int on);
313 static void linux_nat_async (void (*callback)
314 (enum inferior_event_type event_type, void *context),
315 void *context);
316 static int linux_nat_async_mask (int mask);
317 static int kill_lwp (int lwpid, int signo);
318
319 static int send_sigint_callback (struct lwp_info *lp, void *data);
320 static int stop_callback (struct lwp_info *lp, void *data);
321
322 /* Captures the result of a successful waitpid call, along with the
323 options used in that call. */
324 struct waitpid_result
325 {
326 int pid;
327 int status;
328 int options;
329 struct waitpid_result *next;
330 };
331
332 /* A singly-linked list of the results of the waitpid calls performed
333 in the async SIGCHLD handler. */
334 static struct waitpid_result *waitpid_queue = NULL;
335
336 static int
337 queued_waitpid (int pid, int *status, int flags)
338 {
339 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
340
341 if (debug_linux_nat_async)
342 fprintf_unfiltered (gdb_stdlog,
343 "\
344 QWPID: linux_nat_async_events_state(%d), linux_nat_num_queued_events(%d)\n",
345 linux_nat_async_events_state,
346 linux_nat_num_queued_events);
347
348 if (flags & __WALL)
349 {
350 for (; msg; prev = msg, msg = msg->next)
351 if (pid == -1 || pid == msg->pid)
352 break;
353 }
354 else if (flags & __WCLONE)
355 {
356 for (; msg; prev = msg, msg = msg->next)
357 if (msg->options & __WCLONE
358 && (pid == -1 || pid == msg->pid))
359 break;
360 }
361 else
362 {
363 for (; msg; prev = msg, msg = msg->next)
364 if ((msg->options & __WCLONE) == 0
365 && (pid == -1 || pid == msg->pid))
366 break;
367 }
368
369 if (msg)
370 {
371 int pid;
372
373 if (prev)
374 prev->next = msg->next;
375 else
376 waitpid_queue = msg->next;
377
378 msg->next = NULL;
379 if (status)
380 *status = msg->status;
381 pid = msg->pid;
382
383 if (debug_linux_nat_async)
384 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
385 pid, msg->status);
386 xfree (msg);
387
388 return pid;
389 }
390
391 if (debug_linux_nat_async)
392 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
393
394 if (status)
395 *status = 0;
396 return -1;
397 }
398
399 static void
400 push_waitpid (int pid, int status, int options)
401 {
402 struct waitpid_result *event, *new_event;
403
404 new_event = xmalloc (sizeof (*new_event));
405 new_event->pid = pid;
406 new_event->status = status;
407 new_event->options = options;
408 new_event->next = NULL;
409
410 if (waitpid_queue)
411 {
412 for (event = waitpid_queue;
413 event && event->next;
414 event = event->next)
415 ;
416
417 event->next = new_event;
418 }
419 else
420 waitpid_queue = new_event;
421 }
422
423 /* Drain all queued events of PID. If PID is -1, the effect is of
424 draining all events. */
425 static void
426 drain_queued_events (int pid)
427 {
428 while (queued_waitpid (pid, NULL, __WALL) != -1)
429 ;
430 }
431
432 \f
433 /* Trivial list manipulation functions to keep track of a list of
434 new stopped processes. */
435 static void
436 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
437 {
438 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
439 new_pid->pid = pid;
440 new_pid->status = status;
441 new_pid->next = *listp;
442 *listp = new_pid;
443 }
444
445 static int
446 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
447 {
448 struct simple_pid_list **p;
449
450 for (p = listp; *p != NULL; p = &(*p)->next)
451 if ((*p)->pid == pid)
452 {
453 struct simple_pid_list *next = (*p)->next;
454 *status = (*p)->status;
455 xfree (*p);
456 *p = next;
457 return 1;
458 }
459 return 0;
460 }
461
462 static void
463 linux_record_stopped_pid (int pid, int status)
464 {
465 add_to_pid_list (&stopped_pids, pid, status);
466 }
467
468 \f
469 /* A helper function for linux_test_for_tracefork, called after fork (). */
470
471 static void
472 linux_tracefork_child (void)
473 {
474 int ret;
475
476 ptrace (PTRACE_TRACEME, 0, 0, 0);
477 kill (getpid (), SIGSTOP);
478 fork ();
479 _exit (0);
480 }
481
482 /* Wrapper function for waitpid which handles EINTR, and checks for
483 locally queued events. */
484
485 static int
486 my_waitpid (int pid, int *status, int flags)
487 {
488 int ret;
489
490 /* There should be no concurrent calls to waitpid. */
491 gdb_assert (linux_nat_async_events_state == sigchld_sync);
492
493 ret = queued_waitpid (pid, status, flags);
494 if (ret != -1)
495 return ret;
496
497 do
498 {
499 ret = waitpid (pid, status, flags);
500 }
501 while (ret == -1 && errno == EINTR);
502
503 return ret;
504 }
505
506 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
507
508 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
509 we know that the feature is not available. This may change the tracing
510 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
511
512 However, if it succeeds, we don't know for sure that the feature is
513 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
514 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
515 fork tracing, and let it fork. If the process exits, we assume that we
516 can't use TRACEFORK; if we get the fork notification, and we can extract
517 the new child's PID, then we assume that we can. */
518
519 static void
520 linux_test_for_tracefork (int original_pid)
521 {
522 int child_pid, ret, status;
523 long second_pid;
524 enum sigchld_state async_events_original_state;
525
526 async_events_original_state = linux_nat_async_events (sigchld_sync);
527
528 linux_supports_tracefork_flag = 0;
529 linux_supports_tracevforkdone_flag = 0;
530
531 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
532 if (ret != 0)
533 return;
534
535 child_pid = fork ();
536 if (child_pid == -1)
537 perror_with_name (("fork"));
538
539 if (child_pid == 0)
540 linux_tracefork_child ();
541
542 ret = my_waitpid (child_pid, &status, 0);
543 if (ret == -1)
544 perror_with_name (("waitpid"));
545 else if (ret != child_pid)
546 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
547 if (! WIFSTOPPED (status))
548 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
549
550 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
551 if (ret != 0)
552 {
553 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
554 if (ret != 0)
555 {
556 warning (_("linux_test_for_tracefork: failed to kill child"));
557 linux_nat_async_events (async_events_original_state);
558 return;
559 }
560
561 ret = my_waitpid (child_pid, &status, 0);
562 if (ret != child_pid)
563 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
564 else if (!WIFSIGNALED (status))
565 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
566 "killed child"), status);
567
568 linux_nat_async_events (async_events_original_state);
569 return;
570 }
571
572 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
573 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
574 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
575 linux_supports_tracevforkdone_flag = (ret == 0);
576
577 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
578 if (ret != 0)
579 warning (_("linux_test_for_tracefork: failed to resume child"));
580
581 ret = my_waitpid (child_pid, &status, 0);
582
583 if (ret == child_pid && WIFSTOPPED (status)
584 && status >> 16 == PTRACE_EVENT_FORK)
585 {
586 second_pid = 0;
587 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
588 if (ret == 0 && second_pid != 0)
589 {
590 int second_status;
591
592 linux_supports_tracefork_flag = 1;
593 my_waitpid (second_pid, &second_status, 0);
594 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
595 if (ret != 0)
596 warning (_("linux_test_for_tracefork: failed to kill second child"));
597 my_waitpid (second_pid, &status, 0);
598 }
599 }
600 else
601 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
602 "(%d, status 0x%x)"), ret, status);
603
604 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
605 if (ret != 0)
606 warning (_("linux_test_for_tracefork: failed to kill child"));
607 my_waitpid (child_pid, &status, 0);
608
609 linux_nat_async_events (async_events_original_state);
610 }
611
612 /* Return non-zero iff we have tracefork functionality available.
613 This function also sets linux_supports_tracefork_flag. */
614
615 static int
616 linux_supports_tracefork (int pid)
617 {
618 if (linux_supports_tracefork_flag == -1)
619 linux_test_for_tracefork (pid);
620 return linux_supports_tracefork_flag;
621 }
622
623 static int
624 linux_supports_tracevforkdone (int pid)
625 {
626 if (linux_supports_tracefork_flag == -1)
627 linux_test_for_tracefork (pid);
628 return linux_supports_tracevforkdone_flag;
629 }
630
631 \f
632 void
633 linux_enable_event_reporting (ptid_t ptid)
634 {
635 int pid = ptid_get_lwp (ptid);
636 int options;
637
638 if (pid == 0)
639 pid = ptid_get_pid (ptid);
640
641 if (! linux_supports_tracefork (pid))
642 return;
643
644 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
645 | PTRACE_O_TRACECLONE;
646 if (linux_supports_tracevforkdone (pid))
647 options |= PTRACE_O_TRACEVFORKDONE;
648
649 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
650 read-only process state. */
651
652 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
653 }
654
655 static void
656 linux_child_post_attach (int pid)
657 {
658 linux_enable_event_reporting (pid_to_ptid (pid));
659 check_for_thread_db ();
660 }
661
662 static void
663 linux_child_post_startup_inferior (ptid_t ptid)
664 {
665 linux_enable_event_reporting (ptid);
666 check_for_thread_db ();
667 }
668
669 static int
670 linux_child_follow_fork (struct target_ops *ops, int follow_child)
671 {
672 ptid_t last_ptid;
673 struct target_waitstatus last_status;
674 int has_vforked;
675 int parent_pid, child_pid;
676
677 if (target_can_async_p ())
678 target_async (NULL, 0);
679
680 get_last_target_status (&last_ptid, &last_status);
681 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
682 parent_pid = ptid_get_lwp (last_ptid);
683 if (parent_pid == 0)
684 parent_pid = ptid_get_pid (last_ptid);
685 child_pid = PIDGET (last_status.value.related_pid);
686
687 if (! follow_child)
688 {
689 /* We're already attached to the parent, by default. */
690
691 /* Before detaching from the child, remove all breakpoints from
692 it. (This won't actually modify the breakpoint list, but will
693 physically remove the breakpoints from the child.) */
694 /* If we vforked this will remove the breakpoints from the parent
695 also, but they'll be reinserted below. */
696 detach_breakpoints (child_pid);
697
698 /* Detach new forked process? */
699 if (detach_fork)
700 {
701 if (info_verbose || debug_linux_nat)
702 {
703 target_terminal_ours ();
704 fprintf_filtered (gdb_stdlog,
705 "Detaching after fork from child process %d.\n",
706 child_pid);
707 }
708
709 ptrace (PTRACE_DETACH, child_pid, 0, 0);
710 }
711 else
712 {
713 struct fork_info *fp;
714
715 /* Add process to GDB's tables. */
716 add_inferior (child_pid);
717
718 /* Retain child fork in ptrace (stopped) state. */
719 fp = find_fork_pid (child_pid);
720 if (!fp)
721 fp = add_fork (child_pid);
722 fork_save_infrun_state (fp, 0);
723 }
724
725 if (has_vforked)
726 {
727 gdb_assert (linux_supports_tracefork_flag >= 0);
728 if (linux_supports_tracevforkdone (0))
729 {
730 int status;
731
732 ptrace (PTRACE_CONT, parent_pid, 0, 0);
733 my_waitpid (parent_pid, &status, __WALL);
734 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
735 warning (_("Unexpected waitpid result %06x when waiting for "
736 "vfork-done"), status);
737 }
738 else
739 {
740 /* We can't insert breakpoints until the child has
741 finished with the shared memory region. We need to
742 wait until that happens. Ideal would be to just
743 call:
744 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
745 - waitpid (parent_pid, &status, __WALL);
746 However, most architectures can't handle a syscall
747 being traced on the way out if it wasn't traced on
748 the way in.
749
750 We might also think to loop, continuing the child
751 until it exits or gets a SIGTRAP. One problem is
752 that the child might call ptrace with PTRACE_TRACEME.
753
754 There's no simple and reliable way to figure out when
755 the vforked child will be done with its copy of the
756 shared memory. We could step it out of the syscall,
757 two instructions, let it go, and then single-step the
758 parent once. When we have hardware single-step, this
759 would work; with software single-step it could still
760 be made to work but we'd have to be able to insert
761 single-step breakpoints in the child, and we'd have
762 to insert -just- the single-step breakpoint in the
763 parent. Very awkward.
764
765 In the end, the best we can do is to make sure it
766 runs for a little while. Hopefully it will be out of
767 range of any breakpoints we reinsert. Usually this
768 is only the single-step breakpoint at vfork's return
769 point. */
770
771 usleep (10000);
772 }
773
774 /* Since we vforked, breakpoints were removed in the parent
775 too. Put them back. */
776 reattach_breakpoints (parent_pid);
777 }
778 }
779 else
780 {
781 struct thread_info *last_tp = find_thread_pid (last_ptid);
782 struct thread_info *tp;
783 char child_pid_spelling[40];
784
785 /* Copy user stepping state to the new inferior thread. */
786 struct breakpoint *step_resume_breakpoint = last_tp->step_resume_breakpoint;
787 CORE_ADDR step_range_start = last_tp->step_range_start;
788 CORE_ADDR step_range_end = last_tp->step_range_end;
789 struct frame_id step_frame_id = last_tp->step_frame_id;
790
791 /* Otherwise, deleting the parent would get rid of this
792 breakpoint. */
793 last_tp->step_resume_breakpoint = NULL;
794
795 /* Needed to keep the breakpoint lists in sync. */
796 if (! has_vforked)
797 detach_breakpoints (child_pid);
798
799 /* Before detaching from the parent, remove all breakpoints from it. */
800 remove_breakpoints ();
801
802 if (info_verbose || debug_linux_nat)
803 {
804 target_terminal_ours ();
805 fprintf_filtered (gdb_stdlog,
806 "Attaching after fork to child process %d.\n",
807 child_pid);
808 }
809
810 /* If we're vforking, we may want to hold on to the parent until
811 the child exits or execs. At exec time we can remove the old
812 breakpoints from the parent and detach it; at exit time we
813 could do the same (or even, sneakily, resume debugging it - the
814 child's exec has failed, or something similar).
815
816 This doesn't clean up "properly", because we can't call
817 target_detach, but that's OK; if the current target is "child",
818 then it doesn't need any further cleanups, and lin_lwp will
819 generally not encounter vfork (vfork is defined to fork
820 in libpthread.so).
821
822 The holding part is very easy if we have VFORKDONE events;
823 but keeping track of both processes is beyond GDB at the
824 moment. So we don't expose the parent to the rest of GDB.
825 Instead we quietly hold onto it until such time as we can
826 safely resume it. */
827
828 if (has_vforked)
829 {
830 linux_parent_pid = parent_pid;
831 detach_inferior (parent_pid);
832 }
833 else if (!detach_fork)
834 {
835 struct fork_info *fp;
836 /* Retain parent fork in ptrace (stopped) state. */
837 fp = find_fork_pid (parent_pid);
838 if (!fp)
839 fp = add_fork (parent_pid);
840 fork_save_infrun_state (fp, 0);
841 }
842 else
843 target_detach (NULL, 0);
844
845 inferior_ptid = ptid_build (child_pid, child_pid, 0);
846 add_inferior (child_pid);
847
848 /* Reinstall ourselves, since we might have been removed in
849 target_detach (which does other necessary cleanup). */
850
851 push_target (ops);
852 linux_nat_switch_fork (inferior_ptid);
853 check_for_thread_db ();
854
855 tp = inferior_thread ();
856 tp->step_resume_breakpoint = step_resume_breakpoint;
857 tp->step_range_start = step_range_start;
858 tp->step_range_end = step_range_end;
859 tp->step_frame_id = step_frame_id;
860
861 /* Reset breakpoints in the child as appropriate. */
862 follow_inferior_reset_breakpoints ();
863 }
864
865 if (target_can_async_p ())
866 target_async (inferior_event_handler, 0);
867
868 return 0;
869 }
870
871 \f
872 static void
873 linux_child_insert_fork_catchpoint (int pid)
874 {
875 if (! linux_supports_tracefork (pid))
876 error (_("Your system does not support fork catchpoints."));
877 }
878
879 static void
880 linux_child_insert_vfork_catchpoint (int pid)
881 {
882 if (!linux_supports_tracefork (pid))
883 error (_("Your system does not support vfork catchpoints."));
884 }
885
886 static void
887 linux_child_insert_exec_catchpoint (int pid)
888 {
889 if (!linux_supports_tracefork (pid))
890 error (_("Your system does not support exec catchpoints."));
891 }
892
893 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
894 are processes sharing the same VM space. A multi-threaded process
895 is basically a group of such processes. However, such a grouping
896 is almost entirely a user-space issue; the kernel doesn't enforce
897 such a grouping at all (this might change in the future). In
898 general, we'll rely on the threads library (i.e. the GNU/Linux
899 Threads library) to provide such a grouping.
900
901 It is perfectly well possible to write a multi-threaded application
902 without the assistance of a threads library, by using the clone
903 system call directly. This module should be able to give some
904 rudimentary support for debugging such applications if developers
905 specify the CLONE_PTRACE flag in the clone system call, and are
906 using the Linux kernel 2.4 or above.
907
908 Note that there are some peculiarities in GNU/Linux that affect
909 this code:
910
911 - In general one should specify the __WCLONE flag to waitpid in
912 order to make it report events for any of the cloned processes
913 (and leave it out for the initial process). However, if a cloned
914 process has exited the exit status is only reported if the
915 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
916 we cannot use it since GDB must work on older systems too.
917
918 - When a traced, cloned process exits and is waited for by the
919 debugger, the kernel reassigns it to the original parent and
920 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
921 library doesn't notice this, which leads to the "zombie problem":
922 When debugged a multi-threaded process that spawns a lot of
923 threads will run out of processes, even if the threads exit,
924 because the "zombies" stay around. */
925
926 /* List of known LWPs. */
927 struct lwp_info *lwp_list;
928
929 /* Number of LWPs in the list. */
930 static int num_lwps;
931 \f
932
933 /* Original signal mask. */
934 static sigset_t normal_mask;
935
936 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
937 _initialize_linux_nat. */
938 static sigset_t suspend_mask;
939
940 /* SIGCHLD action for synchronous mode. */
941 struct sigaction sync_sigchld_action;
942
943 /* SIGCHLD action for asynchronous mode. */
944 static struct sigaction async_sigchld_action;
945
946 /* SIGCHLD default action, to pass to new inferiors. */
947 static struct sigaction sigchld_default_action;
948 \f
949
950 /* Prototypes for local functions. */
951 static int stop_wait_callback (struct lwp_info *lp, void *data);
952 static int linux_nat_thread_alive (ptid_t ptid);
953 static char *linux_child_pid_to_exec_file (int pid);
954 static int cancel_breakpoint (struct lwp_info *lp);
955
956 \f
957 /* Convert wait status STATUS to a string. Used for printing debug
958 messages only. */
959
960 static char *
961 status_to_str (int status)
962 {
963 static char buf[64];
964
965 if (WIFSTOPPED (status))
966 snprintf (buf, sizeof (buf), "%s (stopped)",
967 strsignal (WSTOPSIG (status)));
968 else if (WIFSIGNALED (status))
969 snprintf (buf, sizeof (buf), "%s (terminated)",
970 strsignal (WSTOPSIG (status)));
971 else
972 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
973
974 return buf;
975 }
976
977 /* Initialize the list of LWPs. Note that this module, contrary to
978 what GDB's generic threads layer does for its thread list,
979 re-initializes the LWP lists whenever we mourn or detach (which
980 doesn't involve mourning) the inferior. */
981
982 static void
983 init_lwp_list (void)
984 {
985 struct lwp_info *lp, *lpnext;
986
987 for (lp = lwp_list; lp; lp = lpnext)
988 {
989 lpnext = lp->next;
990 xfree (lp);
991 }
992
993 lwp_list = NULL;
994 num_lwps = 0;
995 }
996
997 /* Add the LWP specified by PID to the list. Return a pointer to the
998 structure describing the new LWP. The LWP should already be stopped
999 (with an exception for the very first LWP). */
1000
1001 static struct lwp_info *
1002 add_lwp (ptid_t ptid)
1003 {
1004 struct lwp_info *lp;
1005
1006 gdb_assert (is_lwp (ptid));
1007
1008 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1009
1010 memset (lp, 0, sizeof (struct lwp_info));
1011
1012 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1013
1014 lp->ptid = ptid;
1015
1016 lp->next = lwp_list;
1017 lwp_list = lp;
1018 ++num_lwps;
1019
1020 if (num_lwps > 1 && linux_nat_new_thread != NULL)
1021 linux_nat_new_thread (ptid);
1022
1023 return lp;
1024 }
1025
1026 /* Remove the LWP specified by PID from the list. */
1027
1028 static void
1029 delete_lwp (ptid_t ptid)
1030 {
1031 struct lwp_info *lp, *lpprev;
1032
1033 lpprev = NULL;
1034
1035 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1036 if (ptid_equal (lp->ptid, ptid))
1037 break;
1038
1039 if (!lp)
1040 return;
1041
1042 num_lwps--;
1043
1044 if (lpprev)
1045 lpprev->next = lp->next;
1046 else
1047 lwp_list = lp->next;
1048
1049 xfree (lp);
1050 }
1051
1052 /* Return a pointer to the structure describing the LWP corresponding
1053 to PID. If no corresponding LWP could be found, return NULL. */
1054
1055 static struct lwp_info *
1056 find_lwp_pid (ptid_t ptid)
1057 {
1058 struct lwp_info *lp;
1059 int lwp;
1060
1061 if (is_lwp (ptid))
1062 lwp = GET_LWP (ptid);
1063 else
1064 lwp = GET_PID (ptid);
1065
1066 for (lp = lwp_list; lp; lp = lp->next)
1067 if (lwp == GET_LWP (lp->ptid))
1068 return lp;
1069
1070 return NULL;
1071 }
1072
1073 /* Call CALLBACK with its second argument set to DATA for every LWP in
1074 the list. If CALLBACK returns 1 for a particular LWP, return a
1075 pointer to the structure describing that LWP immediately.
1076 Otherwise return NULL. */
1077
1078 struct lwp_info *
1079 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
1080 {
1081 struct lwp_info *lp, *lpnext;
1082
1083 for (lp = lwp_list; lp; lp = lpnext)
1084 {
1085 lpnext = lp->next;
1086 if ((*callback) (lp, data))
1087 return lp;
1088 }
1089
1090 return NULL;
1091 }
1092
1093 /* Update our internal state when changing from one fork (checkpoint,
1094 et cetera) to another indicated by NEW_PTID. We can only switch
1095 single-threaded applications, so we only create one new LWP, and
1096 the previous list is discarded. */
1097
1098 void
1099 linux_nat_switch_fork (ptid_t new_ptid)
1100 {
1101 struct lwp_info *lp;
1102
1103 init_lwp_list ();
1104 lp = add_lwp (new_ptid);
1105 lp->stopped = 1;
1106
1107 init_thread_list ();
1108 add_thread_silent (new_ptid);
1109 }
1110
1111 /* Handle the exit of a single thread LP. */
1112
1113 static void
1114 exit_lwp (struct lwp_info *lp)
1115 {
1116 struct thread_info *th = find_thread_pid (lp->ptid);
1117
1118 if (th)
1119 {
1120 if (print_thread_events)
1121 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1122
1123 delete_thread (lp->ptid);
1124 }
1125
1126 delete_lwp (lp->ptid);
1127 }
1128
1129 /* Detect `T (stopped)' in `/proc/PID/status'.
1130 Other states including `T (tracing stop)' are reported as false. */
1131
1132 static int
1133 pid_is_stopped (pid_t pid)
1134 {
1135 FILE *status_file;
1136 char buf[100];
1137 int retval = 0;
1138
1139 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1140 status_file = fopen (buf, "r");
1141 if (status_file != NULL)
1142 {
1143 int have_state = 0;
1144
1145 while (fgets (buf, sizeof (buf), status_file))
1146 {
1147 if (strncmp (buf, "State:", 6) == 0)
1148 {
1149 have_state = 1;
1150 break;
1151 }
1152 }
1153 if (have_state && strstr (buf, "T (stopped)") != NULL)
1154 retval = 1;
1155 fclose (status_file);
1156 }
1157 return retval;
1158 }
1159
1160 /* Wait for the LWP specified by LP, which we have just attached to.
1161 Returns a wait status for that LWP, to cache. */
1162
1163 static int
1164 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1165 int *signalled)
1166 {
1167 pid_t new_pid, pid = GET_LWP (ptid);
1168 int status;
1169
1170 if (pid_is_stopped (pid))
1171 {
1172 if (debug_linux_nat)
1173 fprintf_unfiltered (gdb_stdlog,
1174 "LNPAW: Attaching to a stopped process\n");
1175
1176 /* The process is definitely stopped. It is in a job control
1177 stop, unless the kernel predates the TASK_STOPPED /
1178 TASK_TRACED distinction, in which case it might be in a
1179 ptrace stop. Make sure it is in a ptrace stop; from there we
1180 can kill it, signal it, et cetera.
1181
1182 First make sure there is a pending SIGSTOP. Since we are
1183 already attached, the process can not transition from stopped
1184 to running without a PTRACE_CONT; so we know this signal will
1185 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1186 probably already in the queue (unless this kernel is old
1187 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1188 is not an RT signal, it can only be queued once. */
1189 kill_lwp (pid, SIGSTOP);
1190
1191 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1192 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1193 ptrace (PTRACE_CONT, pid, 0, 0);
1194 }
1195
1196 /* Make sure the initial process is stopped. The user-level threads
1197 layer might want to poke around in the inferior, and that won't
1198 work if things haven't stabilized yet. */
1199 new_pid = my_waitpid (pid, &status, 0);
1200 if (new_pid == -1 && errno == ECHILD)
1201 {
1202 if (first)
1203 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1204
1205 /* Try again with __WCLONE to check cloned processes. */
1206 new_pid = my_waitpid (pid, &status, __WCLONE);
1207 *cloned = 1;
1208 }
1209
1210 gdb_assert (pid == new_pid && WIFSTOPPED (status));
1211
1212 if (WSTOPSIG (status) != SIGSTOP)
1213 {
1214 *signalled = 1;
1215 if (debug_linux_nat)
1216 fprintf_unfiltered (gdb_stdlog,
1217 "LNPAW: Received %s after attaching\n",
1218 status_to_str (status));
1219 }
1220
1221 return status;
1222 }
1223
1224 /* Attach to the LWP specified by PID. Return 0 if successful or -1
1225 if the new LWP could not be attached. */
1226
1227 int
1228 lin_lwp_attach_lwp (ptid_t ptid)
1229 {
1230 struct lwp_info *lp;
1231 enum sigchld_state async_events_original_state;
1232
1233 gdb_assert (is_lwp (ptid));
1234
1235 async_events_original_state = linux_nat_async_events (sigchld_sync);
1236
1237 lp = find_lwp_pid (ptid);
1238
1239 /* We assume that we're already attached to any LWP that has an id
1240 equal to the overall process id, and to any LWP that is already
1241 in our list of LWPs. If we're not seeing exit events from threads
1242 and we've had PID wraparound since we last tried to stop all threads,
1243 this assumption might be wrong; fortunately, this is very unlikely
1244 to happen. */
1245 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
1246 {
1247 int status, cloned = 0, signalled = 0;
1248
1249 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
1250 {
1251 /* If we fail to attach to the thread, issue a warning,
1252 but continue. One way this can happen is if thread
1253 creation is interrupted; as of Linux kernel 2.6.19, a
1254 bug may place threads in the thread list and then fail
1255 to create them. */
1256 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1257 safe_strerror (errno));
1258 return -1;
1259 }
1260
1261 if (debug_linux_nat)
1262 fprintf_unfiltered (gdb_stdlog,
1263 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1264 target_pid_to_str (ptid));
1265
1266 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1267 lp = add_lwp (ptid);
1268 lp->stopped = 1;
1269 lp->cloned = cloned;
1270 lp->signalled = signalled;
1271 if (WSTOPSIG (status) != SIGSTOP)
1272 {
1273 lp->resumed = 1;
1274 lp->status = status;
1275 }
1276
1277 target_post_attach (GET_LWP (lp->ptid));
1278
1279 if (debug_linux_nat)
1280 {
1281 fprintf_unfiltered (gdb_stdlog,
1282 "LLAL: waitpid %s received %s\n",
1283 target_pid_to_str (ptid),
1284 status_to_str (status));
1285 }
1286 }
1287 else
1288 {
1289 /* We assume that the LWP representing the original process is
1290 already stopped. Mark it as stopped in the data structure
1291 that the GNU/linux ptrace layer uses to keep track of
1292 threads. Note that this won't have already been done since
1293 the main thread will have, we assume, been stopped by an
1294 attach from a different layer. */
1295 if (lp == NULL)
1296 lp = add_lwp (ptid);
1297 lp->stopped = 1;
1298 }
1299
1300 linux_nat_async_events (async_events_original_state);
1301 return 0;
1302 }
1303
1304 static void
1305 linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
1306 int from_tty)
1307 {
1308 int saved_async = 0;
1309 #ifdef HAVE_PERSONALITY
1310 int personality_orig = 0, personality_set = 0;
1311 #endif /* HAVE_PERSONALITY */
1312
1313 /* The fork_child mechanism is synchronous and calls target_wait, so
1314 we have to mask the async mode. */
1315
1316 if (target_can_async_p ())
1317 /* Mask async mode. Creating a child requires a loop calling
1318 wait_for_inferior currently. */
1319 saved_async = linux_nat_async_mask (0);
1320 else
1321 {
1322 /* Restore the original signal mask. */
1323 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1324 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1325 suspend_mask = normal_mask;
1326 sigdelset (&suspend_mask, SIGCHLD);
1327 }
1328
1329 /* Set SIGCHLD to the default action, until after execing the child,
1330 since the inferior inherits the superior's signal mask. It will
1331 be blocked again in linux_nat_wait, which is only reached after
1332 the inferior execing. */
1333 linux_nat_async_events (sigchld_default);
1334
1335 #ifdef HAVE_PERSONALITY
1336 if (disable_randomization)
1337 {
1338 errno = 0;
1339 personality_orig = personality (0xffffffff);
1340 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1341 {
1342 personality_set = 1;
1343 personality (personality_orig | ADDR_NO_RANDOMIZE);
1344 }
1345 if (errno != 0 || (personality_set
1346 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1347 warning (_("Error disabling address space randomization: %s"),
1348 safe_strerror (errno));
1349 }
1350 #endif /* HAVE_PERSONALITY */
1351
1352 linux_ops->to_create_inferior (exec_file, allargs, env, from_tty);
1353
1354 #ifdef HAVE_PERSONALITY
1355 if (personality_set)
1356 {
1357 errno = 0;
1358 personality (personality_orig);
1359 if (errno != 0)
1360 warning (_("Error restoring address space randomization: %s"),
1361 safe_strerror (errno));
1362 }
1363 #endif /* HAVE_PERSONALITY */
1364
1365 if (saved_async)
1366 linux_nat_async_mask (saved_async);
1367 }
1368
1369 static void
1370 linux_nat_attach (char *args, int from_tty)
1371 {
1372 struct lwp_info *lp;
1373 int status;
1374 ptid_t ptid;
1375
1376 /* FIXME: We should probably accept a list of process id's, and
1377 attach all of them. */
1378 linux_ops->to_attach (args, from_tty);
1379
1380 if (!target_can_async_p ())
1381 {
1382 /* Restore the original signal mask. */
1383 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1384 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1385 suspend_mask = normal_mask;
1386 sigdelset (&suspend_mask, SIGCHLD);
1387 }
1388
1389 /* The ptrace base target adds the main thread with (pid,0,0)
1390 format. Decorate it with lwp info. */
1391 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1392 thread_change_ptid (inferior_ptid, ptid);
1393
1394 /* Add the initial process as the first LWP to the list. */
1395 lp = add_lwp (ptid);
1396
1397 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1398 &lp->signalled);
1399 lp->stopped = 1;
1400
1401 /* Save the wait status to report later. */
1402 lp->resumed = 1;
1403 if (debug_linux_nat)
1404 fprintf_unfiltered (gdb_stdlog,
1405 "LNA: waitpid %ld, saving status %s\n",
1406 (long) GET_PID (lp->ptid), status_to_str (status));
1407
1408 if (!target_can_async_p ())
1409 lp->status = status;
1410 else
1411 {
1412 /* We already waited for this LWP, so put the wait result on the
1413 pipe. The event loop will wake up and gets us to handling
1414 this event. */
1415 linux_nat_event_pipe_push (GET_PID (lp->ptid), status,
1416 lp->cloned ? __WCLONE : 0);
1417 /* Register in the event loop. */
1418 target_async (inferior_event_handler, 0);
1419 }
1420 }
1421
1422 /* Get pending status of LP. */
1423 static int
1424 get_pending_status (struct lwp_info *lp, int *status)
1425 {
1426 struct target_waitstatus last;
1427 ptid_t last_ptid;
1428
1429 get_last_target_status (&last_ptid, &last);
1430
1431 /* If this lwp is the ptid that GDB is processing an event from, the
1432 signal will be in stop_signal. Otherwise, in all-stop + sync
1433 mode, we may cache pending events in lp->status while trying to
1434 stop all threads (see stop_wait_callback). In async mode, the
1435 events are always cached in waitpid_queue. */
1436
1437 *status = 0;
1438
1439 if (non_stop)
1440 {
1441 enum target_signal signo = TARGET_SIGNAL_0;
1442
1443 if (is_executing (lp->ptid))
1444 {
1445 /* If the core thought this lwp was executing --- e.g., the
1446 executing property hasn't been updated yet, but the
1447 thread has been stopped with a stop_callback /
1448 stop_wait_callback sequence (see linux_nat_detach for
1449 example) --- we can only have pending events in the local
1450 queue. */
1451 if (queued_waitpid (GET_LWP (lp->ptid), status, __WALL) != -1)
1452 {
1453 if (WIFSTOPPED (status))
1454 signo = target_signal_from_host (WSTOPSIG (status));
1455
1456 /* If not stopped, then the lwp is gone, no use in
1457 resending a signal. */
1458 }
1459 }
1460 else
1461 {
1462 /* If the core knows the thread is not executing, then we
1463 have the last signal recorded in
1464 thread_info->stop_signal. */
1465
1466 struct thread_info *tp = find_thread_pid (lp->ptid);
1467 signo = tp->stop_signal;
1468 }
1469
1470 if (signo != TARGET_SIGNAL_0
1471 && !signal_pass_state (signo))
1472 {
1473 if (debug_linux_nat)
1474 fprintf_unfiltered (gdb_stdlog, "\
1475 GPT: lwp %s had signal %s, but it is in no pass state\n",
1476 target_pid_to_str (lp->ptid),
1477 target_signal_to_string (signo));
1478 }
1479 else
1480 {
1481 if (signo != TARGET_SIGNAL_0)
1482 *status = W_STOPCODE (target_signal_to_host (signo));
1483
1484 if (debug_linux_nat)
1485 fprintf_unfiltered (gdb_stdlog,
1486 "GPT: lwp %s as pending signal %s\n",
1487 target_pid_to_str (lp->ptid),
1488 target_signal_to_string (signo));
1489 }
1490 }
1491 else
1492 {
1493 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1494 {
1495 struct thread_info *tp = find_thread_pid (lp->ptid);
1496 if (tp->stop_signal != TARGET_SIGNAL_0
1497 && signal_pass_state (tp->stop_signal))
1498 *status = W_STOPCODE (target_signal_to_host (tp->stop_signal));
1499 }
1500 else if (target_can_async_p ())
1501 queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
1502 else
1503 *status = lp->status;
1504 }
1505
1506 return 0;
1507 }
1508
1509 static int
1510 detach_callback (struct lwp_info *lp, void *data)
1511 {
1512 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1513
1514 if (debug_linux_nat && lp->status)
1515 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1516 strsignal (WSTOPSIG (lp->status)),
1517 target_pid_to_str (lp->ptid));
1518
1519 /* If there is a pending SIGSTOP, get rid of it. */
1520 if (lp->signalled)
1521 {
1522 if (debug_linux_nat)
1523 fprintf_unfiltered (gdb_stdlog,
1524 "DC: Sending SIGCONT to %s\n",
1525 target_pid_to_str (lp->ptid));
1526
1527 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1528 lp->signalled = 0;
1529 }
1530
1531 /* We don't actually detach from the LWP that has an id equal to the
1532 overall process id just yet. */
1533 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1534 {
1535 int status = 0;
1536
1537 /* Pass on any pending signal for this LWP. */
1538 get_pending_status (lp, &status);
1539
1540 errno = 0;
1541 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1542 WSTOPSIG (status)) < 0)
1543 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1544 safe_strerror (errno));
1545
1546 if (debug_linux_nat)
1547 fprintf_unfiltered (gdb_stdlog,
1548 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1549 target_pid_to_str (lp->ptid),
1550 strsignal (WSTOPSIG (lp->status)));
1551
1552 delete_lwp (lp->ptid);
1553 }
1554
1555 return 0;
1556 }
1557
1558 static void
1559 linux_nat_detach (char *args, int from_tty)
1560 {
1561 int pid;
1562 int status;
1563 enum target_signal sig;
1564
1565 if (target_can_async_p ())
1566 linux_nat_async (NULL, 0);
1567
1568 /* Stop all threads before detaching. ptrace requires that the
1569 thread is stopped to sucessfully detach. */
1570 iterate_over_lwps (stop_callback, NULL);
1571 /* ... and wait until all of them have reported back that
1572 they're no longer running. */
1573 iterate_over_lwps (stop_wait_callback, NULL);
1574
1575 iterate_over_lwps (detach_callback, NULL);
1576
1577 /* Only the initial process should be left right now. */
1578 gdb_assert (num_lwps == 1);
1579
1580 /* Pass on any pending signal for the last LWP. */
1581 if ((args == NULL || *args == '\0')
1582 && get_pending_status (lwp_list, &status) != -1
1583 && WIFSTOPPED (status))
1584 {
1585 /* Put the signal number in ARGS so that inf_ptrace_detach will
1586 pass it along with PTRACE_DETACH. */
1587 args = alloca (8);
1588 sprintf (args, "%d", (int) WSTOPSIG (status));
1589 fprintf_unfiltered (gdb_stdlog,
1590 "LND: Sending signal %s to %s\n",
1591 args,
1592 target_pid_to_str (lwp_list->ptid));
1593 }
1594
1595 /* Destroy LWP info; it's no longer valid. */
1596 init_lwp_list ();
1597
1598 pid = GET_PID (inferior_ptid);
1599 inferior_ptid = pid_to_ptid (pid);
1600 linux_ops->to_detach (args, from_tty);
1601
1602 if (target_can_async_p ())
1603 drain_queued_events (pid);
1604 }
1605
1606 /* Resume LP. */
1607
1608 static int
1609 resume_callback (struct lwp_info *lp, void *data)
1610 {
1611 if (lp->stopped && lp->status == 0)
1612 {
1613 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1614 0, TARGET_SIGNAL_0);
1615 if (debug_linux_nat)
1616 fprintf_unfiltered (gdb_stdlog,
1617 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1618 target_pid_to_str (lp->ptid));
1619 lp->stopped = 0;
1620 lp->step = 0;
1621 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1622 }
1623 else if (lp->stopped && debug_linux_nat)
1624 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
1625 target_pid_to_str (lp->ptid));
1626 else if (debug_linux_nat)
1627 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n",
1628 target_pid_to_str (lp->ptid));
1629
1630 return 0;
1631 }
1632
1633 static int
1634 resume_clear_callback (struct lwp_info *lp, void *data)
1635 {
1636 lp->resumed = 0;
1637 return 0;
1638 }
1639
1640 static int
1641 resume_set_callback (struct lwp_info *lp, void *data)
1642 {
1643 lp->resumed = 1;
1644 return 0;
1645 }
1646
1647 static void
1648 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1649 {
1650 struct lwp_info *lp;
1651 int resume_all;
1652
1653 if (debug_linux_nat)
1654 fprintf_unfiltered (gdb_stdlog,
1655 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1656 step ? "step" : "resume",
1657 target_pid_to_str (ptid),
1658 signo ? strsignal (signo) : "0",
1659 target_pid_to_str (inferior_ptid));
1660
1661 if (target_can_async_p ())
1662 /* Block events while we're here. */
1663 linux_nat_async_events (sigchld_sync);
1664
1665 /* A specific PTID means `step only this process id'. */
1666 resume_all = (PIDGET (ptid) == -1);
1667
1668 if (non_stop && resume_all)
1669 internal_error (__FILE__, __LINE__,
1670 "can't resume all in non-stop mode");
1671
1672 if (!non_stop)
1673 {
1674 if (resume_all)
1675 iterate_over_lwps (resume_set_callback, NULL);
1676 else
1677 iterate_over_lwps (resume_clear_callback, NULL);
1678 }
1679
1680 /* If PID is -1, it's the current inferior that should be
1681 handled specially. */
1682 if (PIDGET (ptid) == -1)
1683 ptid = inferior_ptid;
1684
1685 lp = find_lwp_pid (ptid);
1686 gdb_assert (lp != NULL);
1687
1688 /* Convert to something the lower layer understands. */
1689 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1690
1691 /* Remember if we're stepping. */
1692 lp->step = step;
1693
1694 /* Mark this LWP as resumed. */
1695 lp->resumed = 1;
1696
1697 /* If we have a pending wait status for this thread, there is no
1698 point in resuming the process. But first make sure that
1699 linux_nat_wait won't preemptively handle the event - we
1700 should never take this short-circuit if we are going to
1701 leave LP running, since we have skipped resuming all the
1702 other threads. This bit of code needs to be synchronized
1703 with linux_nat_wait. */
1704
1705 /* In async mode, we never have pending wait status. */
1706 if (target_can_async_p () && lp->status)
1707 internal_error (__FILE__, __LINE__, "Pending status in async mode");
1708
1709 if (lp->status && WIFSTOPPED (lp->status))
1710 {
1711 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1712
1713 if (signal_stop_state (saved_signo) == 0
1714 && signal_print_state (saved_signo) == 0
1715 && signal_pass_state (saved_signo) == 1)
1716 {
1717 if (debug_linux_nat)
1718 fprintf_unfiltered (gdb_stdlog,
1719 "LLR: Not short circuiting for ignored "
1720 "status 0x%x\n", lp->status);
1721
1722 /* FIXME: What should we do if we are supposed to continue
1723 this thread with a signal? */
1724 gdb_assert (signo == TARGET_SIGNAL_0);
1725 signo = saved_signo;
1726 lp->status = 0;
1727 }
1728 }
1729
1730 if (lp->status)
1731 {
1732 /* FIXME: What should we do if we are supposed to continue
1733 this thread with a signal? */
1734 gdb_assert (signo == TARGET_SIGNAL_0);
1735
1736 if (debug_linux_nat)
1737 fprintf_unfiltered (gdb_stdlog,
1738 "LLR: Short circuiting for status 0x%x\n",
1739 lp->status);
1740
1741 return;
1742 }
1743
1744 /* Mark LWP as not stopped to prevent it from being continued by
1745 resume_callback. */
1746 lp->stopped = 0;
1747
1748 if (resume_all)
1749 iterate_over_lwps (resume_callback, NULL);
1750
1751 linux_ops->to_resume (ptid, step, signo);
1752 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1753
1754 if (debug_linux_nat)
1755 fprintf_unfiltered (gdb_stdlog,
1756 "LLR: %s %s, %s (resume event thread)\n",
1757 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1758 target_pid_to_str (ptid),
1759 signo ? strsignal (signo) : "0");
1760
1761 if (target_can_async_p ())
1762 target_async (inferior_event_handler, 0);
1763 }
1764
1765 /* Issue kill to specified lwp. */
1766
1767 static int tkill_failed;
1768
1769 static int
1770 kill_lwp (int lwpid, int signo)
1771 {
1772 errno = 0;
1773
1774 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1775 fails, then we are not using nptl threads and we should be using kill. */
1776
1777 #ifdef HAVE_TKILL_SYSCALL
1778 if (!tkill_failed)
1779 {
1780 int ret = syscall (__NR_tkill, lwpid, signo);
1781 if (errno != ENOSYS)
1782 return ret;
1783 errno = 0;
1784 tkill_failed = 1;
1785 }
1786 #endif
1787
1788 return kill (lwpid, signo);
1789 }
1790
1791 /* Handle a GNU/Linux extended wait response. If we see a clone
1792 event, we need to add the new LWP to our list (and not report the
1793 trap to higher layers). This function returns non-zero if the
1794 event should be ignored and we should wait again. If STOPPING is
1795 true, the new LWP remains stopped, otherwise it is continued. */
1796
1797 static int
1798 linux_handle_extended_wait (struct lwp_info *lp, int status,
1799 int stopping)
1800 {
1801 int pid = GET_LWP (lp->ptid);
1802 struct target_waitstatus *ourstatus = &lp->waitstatus;
1803 struct lwp_info *new_lp = NULL;
1804 int event = status >> 16;
1805
1806 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1807 || event == PTRACE_EVENT_CLONE)
1808 {
1809 unsigned long new_pid;
1810 int ret;
1811
1812 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1813
1814 /* If we haven't already seen the new PID stop, wait for it now. */
1815 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1816 {
1817 /* The new child has a pending SIGSTOP. We can't affect it until it
1818 hits the SIGSTOP, but we're already attached. */
1819 ret = my_waitpid (new_pid, &status,
1820 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1821 if (ret == -1)
1822 perror_with_name (_("waiting for new child"));
1823 else if (ret != new_pid)
1824 internal_error (__FILE__, __LINE__,
1825 _("wait returned unexpected PID %d"), ret);
1826 else if (!WIFSTOPPED (status))
1827 internal_error (__FILE__, __LINE__,
1828 _("wait returned unexpected status 0x%x"), status);
1829 }
1830
1831 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
1832
1833 if (event == PTRACE_EVENT_FORK)
1834 ourstatus->kind = TARGET_WAITKIND_FORKED;
1835 else if (event == PTRACE_EVENT_VFORK)
1836 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1837 else
1838 {
1839 struct cleanup *old_chain;
1840
1841 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1842 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1843 new_lp->cloned = 1;
1844 new_lp->stopped = 1;
1845
1846 if (WSTOPSIG (status) != SIGSTOP)
1847 {
1848 /* This can happen if someone starts sending signals to
1849 the new thread before it gets a chance to run, which
1850 have a lower number than SIGSTOP (e.g. SIGUSR1).
1851 This is an unlikely case, and harder to handle for
1852 fork / vfork than for clone, so we do not try - but
1853 we handle it for clone events here. We'll send
1854 the other signal on to the thread below. */
1855
1856 new_lp->signalled = 1;
1857 }
1858 else
1859 status = 0;
1860
1861 if (non_stop)
1862 {
1863 /* Add the new thread to GDB's lists as soon as possible
1864 so that:
1865
1866 1) the frontend doesn't have to wait for a stop to
1867 display them, and,
1868
1869 2) we tag it with the correct running state. */
1870
1871 /* If the thread_db layer is active, let it know about
1872 this new thread, and add it to GDB's list. */
1873 if (!thread_db_attach_lwp (new_lp->ptid))
1874 {
1875 /* We're not using thread_db. Add it to GDB's
1876 list. */
1877 target_post_attach (GET_LWP (new_lp->ptid));
1878 add_thread (new_lp->ptid);
1879 }
1880
1881 if (!stopping)
1882 {
1883 set_running (new_lp->ptid, 1);
1884 set_executing (new_lp->ptid, 1);
1885 }
1886 }
1887
1888 if (!stopping)
1889 {
1890 new_lp->stopped = 0;
1891 new_lp->resumed = 1;
1892 ptrace (PTRACE_CONT, new_pid, 0,
1893 status ? WSTOPSIG (status) : 0);
1894 }
1895
1896 if (debug_linux_nat)
1897 fprintf_unfiltered (gdb_stdlog,
1898 "LHEW: Got clone event from LWP %ld, resuming\n",
1899 GET_LWP (lp->ptid));
1900 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1901
1902 return 1;
1903 }
1904
1905 return 0;
1906 }
1907
1908 if (event == PTRACE_EVENT_EXEC)
1909 {
1910 ourstatus->kind = TARGET_WAITKIND_EXECD;
1911 ourstatus->value.execd_pathname
1912 = xstrdup (linux_child_pid_to_exec_file (pid));
1913
1914 if (linux_parent_pid)
1915 {
1916 detach_breakpoints (linux_parent_pid);
1917 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1918
1919 linux_parent_pid = 0;
1920 }
1921
1922 /* At this point, all inserted breakpoints are gone. Doing this
1923 as soon as we detect an exec prevents the badness of deleting
1924 a breakpoint writing the current "shadow contents" to lift
1925 the bp. That shadow is NOT valid after an exec.
1926
1927 Note that we have to do this after the detach_breakpoints
1928 call above, otherwise breakpoints wouldn't be lifted from the
1929 parent on a vfork, because detach_breakpoints would think
1930 that breakpoints are not inserted. */
1931 mark_breakpoints_out ();
1932 return 0;
1933 }
1934
1935 internal_error (__FILE__, __LINE__,
1936 _("unknown ptrace event %d"), event);
1937 }
1938
1939 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1940 exited. */
1941
1942 static int
1943 wait_lwp (struct lwp_info *lp)
1944 {
1945 pid_t pid;
1946 int status;
1947 int thread_dead = 0;
1948
1949 gdb_assert (!lp->stopped);
1950 gdb_assert (lp->status == 0);
1951
1952 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
1953 if (pid == -1 && errno == ECHILD)
1954 {
1955 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
1956 if (pid == -1 && errno == ECHILD)
1957 {
1958 /* The thread has previously exited. We need to delete it
1959 now because, for some vendor 2.4 kernels with NPTL
1960 support backported, there won't be an exit event unless
1961 it is the main thread. 2.6 kernels will report an exit
1962 event for each thread that exits, as expected. */
1963 thread_dead = 1;
1964 if (debug_linux_nat)
1965 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1966 target_pid_to_str (lp->ptid));
1967 }
1968 }
1969
1970 if (!thread_dead)
1971 {
1972 gdb_assert (pid == GET_LWP (lp->ptid));
1973
1974 if (debug_linux_nat)
1975 {
1976 fprintf_unfiltered (gdb_stdlog,
1977 "WL: waitpid %s received %s\n",
1978 target_pid_to_str (lp->ptid),
1979 status_to_str (status));
1980 }
1981 }
1982
1983 /* Check if the thread has exited. */
1984 if (WIFEXITED (status) || WIFSIGNALED (status))
1985 {
1986 thread_dead = 1;
1987 if (debug_linux_nat)
1988 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1989 target_pid_to_str (lp->ptid));
1990 }
1991
1992 if (thread_dead)
1993 {
1994 exit_lwp (lp);
1995 return 0;
1996 }
1997
1998 gdb_assert (WIFSTOPPED (status));
1999
2000 /* Handle GNU/Linux's extended waitstatus for trace events. */
2001 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2002 {
2003 if (debug_linux_nat)
2004 fprintf_unfiltered (gdb_stdlog,
2005 "WL: Handling extended status 0x%06x\n",
2006 status);
2007 if (linux_handle_extended_wait (lp, status, 1))
2008 return wait_lwp (lp);
2009 }
2010
2011 return status;
2012 }
2013
2014 /* Save the most recent siginfo for LP. This is currently only called
2015 for SIGTRAP; some ports use the si_addr field for
2016 target_stopped_data_address. In the future, it may also be used to
2017 restore the siginfo of requeued signals. */
2018
2019 static void
2020 save_siginfo (struct lwp_info *lp)
2021 {
2022 errno = 0;
2023 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2024 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2025
2026 if (errno != 0)
2027 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2028 }
2029
2030 /* Send a SIGSTOP to LP. */
2031
2032 static int
2033 stop_callback (struct lwp_info *lp, void *data)
2034 {
2035 if (!lp->stopped && !lp->signalled)
2036 {
2037 int ret;
2038
2039 if (debug_linux_nat)
2040 {
2041 fprintf_unfiltered (gdb_stdlog,
2042 "SC: kill %s **<SIGSTOP>**\n",
2043 target_pid_to_str (lp->ptid));
2044 }
2045 errno = 0;
2046 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2047 if (debug_linux_nat)
2048 {
2049 fprintf_unfiltered (gdb_stdlog,
2050 "SC: lwp kill %d %s\n",
2051 ret,
2052 errno ? safe_strerror (errno) : "ERRNO-OK");
2053 }
2054
2055 lp->signalled = 1;
2056 gdb_assert (lp->status == 0);
2057 }
2058
2059 return 0;
2060 }
2061
2062 /* Return non-zero if LWP PID has a pending SIGINT. */
2063
2064 static int
2065 linux_nat_has_pending_sigint (int pid)
2066 {
2067 sigset_t pending, blocked, ignored;
2068 int i;
2069
2070 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2071
2072 if (sigismember (&pending, SIGINT)
2073 && !sigismember (&ignored, SIGINT))
2074 return 1;
2075
2076 return 0;
2077 }
2078
2079 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2080
2081 static int
2082 set_ignore_sigint (struct lwp_info *lp, void *data)
2083 {
2084 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2085 flag to consume the next one. */
2086 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2087 && WSTOPSIG (lp->status) == SIGINT)
2088 lp->status = 0;
2089 else
2090 lp->ignore_sigint = 1;
2091
2092 return 0;
2093 }
2094
2095 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2096 This function is called after we know the LWP has stopped; if the LWP
2097 stopped before the expected SIGINT was delivered, then it will never have
2098 arrived. Also, if the signal was delivered to a shared queue and consumed
2099 by a different thread, it will never be delivered to this LWP. */
2100
2101 static void
2102 maybe_clear_ignore_sigint (struct lwp_info *lp)
2103 {
2104 if (!lp->ignore_sigint)
2105 return;
2106
2107 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2108 {
2109 if (debug_linux_nat)
2110 fprintf_unfiltered (gdb_stdlog,
2111 "MCIS: Clearing bogus flag for %s\n",
2112 target_pid_to_str (lp->ptid));
2113 lp->ignore_sigint = 0;
2114 }
2115 }
2116
2117 /* Wait until LP is stopped. */
2118
2119 static int
2120 stop_wait_callback (struct lwp_info *lp, void *data)
2121 {
2122 if (!lp->stopped)
2123 {
2124 int status;
2125
2126 status = wait_lwp (lp);
2127 if (status == 0)
2128 return 0;
2129
2130 if (lp->ignore_sigint && WIFSTOPPED (status)
2131 && WSTOPSIG (status) == SIGINT)
2132 {
2133 lp->ignore_sigint = 0;
2134
2135 errno = 0;
2136 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2137 if (debug_linux_nat)
2138 fprintf_unfiltered (gdb_stdlog,
2139 "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
2140 target_pid_to_str (lp->ptid),
2141 errno ? safe_strerror (errno) : "OK");
2142
2143 return stop_wait_callback (lp, NULL);
2144 }
2145
2146 maybe_clear_ignore_sigint (lp);
2147
2148 if (WSTOPSIG (status) != SIGSTOP)
2149 {
2150 if (WSTOPSIG (status) == SIGTRAP)
2151 {
2152 /* If a LWP other than the LWP that we're reporting an
2153 event for has hit a GDB breakpoint (as opposed to
2154 some random trap signal), then just arrange for it to
2155 hit it again later. We don't keep the SIGTRAP status
2156 and don't forward the SIGTRAP signal to the LWP. We
2157 will handle the current event, eventually we will
2158 resume all LWPs, and this one will get its breakpoint
2159 trap again.
2160
2161 If we do not do this, then we run the risk that the
2162 user will delete or disable the breakpoint, but the
2163 thread will have already tripped on it. */
2164
2165 /* Save the trap's siginfo in case we need it later. */
2166 save_siginfo (lp);
2167
2168 /* Now resume this LWP and get the SIGSTOP event. */
2169 errno = 0;
2170 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2171 if (debug_linux_nat)
2172 {
2173 fprintf_unfiltered (gdb_stdlog,
2174 "PTRACE_CONT %s, 0, 0 (%s)\n",
2175 target_pid_to_str (lp->ptid),
2176 errno ? safe_strerror (errno) : "OK");
2177
2178 fprintf_unfiltered (gdb_stdlog,
2179 "SWC: Candidate SIGTRAP event in %s\n",
2180 target_pid_to_str (lp->ptid));
2181 }
2182 /* Hold this event/waitstatus while we check to see if
2183 there are any more (we still want to get that SIGSTOP). */
2184 stop_wait_callback (lp, NULL);
2185
2186 if (target_can_async_p ())
2187 {
2188 /* Don't leave a pending wait status in async mode.
2189 Retrigger the breakpoint. */
2190 if (!cancel_breakpoint (lp))
2191 {
2192 /* There was no gdb breakpoint set at pc. Put
2193 the event back in the queue. */
2194 if (debug_linux_nat)
2195 fprintf_unfiltered (gdb_stdlog,
2196 "SWC: kill %s, %s\n",
2197 target_pid_to_str (lp->ptid),
2198 status_to_str ((int) status));
2199 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2200 }
2201 }
2202 else
2203 {
2204 /* Hold the SIGTRAP for handling by
2205 linux_nat_wait. */
2206 /* If there's another event, throw it back into the
2207 queue. */
2208 if (lp->status)
2209 {
2210 if (debug_linux_nat)
2211 fprintf_unfiltered (gdb_stdlog,
2212 "SWC: kill %s, %s\n",
2213 target_pid_to_str (lp->ptid),
2214 status_to_str ((int) status));
2215 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
2216 }
2217 /* Save the sigtrap event. */
2218 lp->status = status;
2219 }
2220 return 0;
2221 }
2222 else
2223 {
2224 /* The thread was stopped with a signal other than
2225 SIGSTOP, and didn't accidentally trip a breakpoint. */
2226
2227 if (debug_linux_nat)
2228 {
2229 fprintf_unfiltered (gdb_stdlog,
2230 "SWC: Pending event %s in %s\n",
2231 status_to_str ((int) status),
2232 target_pid_to_str (lp->ptid));
2233 }
2234 /* Now resume this LWP and get the SIGSTOP event. */
2235 errno = 0;
2236 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2237 if (debug_linux_nat)
2238 fprintf_unfiltered (gdb_stdlog,
2239 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2240 target_pid_to_str (lp->ptid),
2241 errno ? safe_strerror (errno) : "OK");
2242
2243 /* Hold this event/waitstatus while we check to see if
2244 there are any more (we still want to get that SIGSTOP). */
2245 stop_wait_callback (lp, NULL);
2246
2247 /* If the lp->status field is still empty, use it to
2248 hold this event. If not, then this event must be
2249 returned to the event queue of the LWP. */
2250 if (lp->status || target_can_async_p ())
2251 {
2252 if (debug_linux_nat)
2253 {
2254 fprintf_unfiltered (gdb_stdlog,
2255 "SWC: kill %s, %s\n",
2256 target_pid_to_str (lp->ptid),
2257 status_to_str ((int) status));
2258 }
2259 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2260 }
2261 else
2262 lp->status = status;
2263 return 0;
2264 }
2265 }
2266 else
2267 {
2268 /* We caught the SIGSTOP that we intended to catch, so
2269 there's no SIGSTOP pending. */
2270 lp->stopped = 1;
2271 lp->signalled = 0;
2272 }
2273 }
2274
2275 return 0;
2276 }
2277
2278 /* Return non-zero if LP has a wait status pending. */
2279
2280 static int
2281 status_callback (struct lwp_info *lp, void *data)
2282 {
2283 /* Only report a pending wait status if we pretend that this has
2284 indeed been resumed. */
2285 return (lp->status != 0 && lp->resumed);
2286 }
2287
2288 /* Return non-zero if LP isn't stopped. */
2289
2290 static int
2291 running_callback (struct lwp_info *lp, void *data)
2292 {
2293 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2294 }
2295
2296 /* Count the LWP's that have had events. */
2297
2298 static int
2299 count_events_callback (struct lwp_info *lp, void *data)
2300 {
2301 int *count = data;
2302
2303 gdb_assert (count != NULL);
2304
2305 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2306 if (lp->status != 0 && lp->resumed
2307 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2308 (*count)++;
2309
2310 return 0;
2311 }
2312
2313 /* Select the LWP (if any) that is currently being single-stepped. */
2314
2315 static int
2316 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2317 {
2318 if (lp->step && lp->status != 0)
2319 return 1;
2320 else
2321 return 0;
2322 }
2323
2324 /* Select the Nth LWP that has had a SIGTRAP event. */
2325
2326 static int
2327 select_event_lwp_callback (struct lwp_info *lp, void *data)
2328 {
2329 int *selector = data;
2330
2331 gdb_assert (selector != NULL);
2332
2333 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2334 if (lp->status != 0 && lp->resumed
2335 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2336 if ((*selector)-- == 0)
2337 return 1;
2338
2339 return 0;
2340 }
2341
2342 static int
2343 cancel_breakpoint (struct lwp_info *lp)
2344 {
2345 /* Arrange for a breakpoint to be hit again later. We don't keep
2346 the SIGTRAP status and don't forward the SIGTRAP signal to the
2347 LWP. We will handle the current event, eventually we will resume
2348 this LWP, and this breakpoint will trap again.
2349
2350 If we do not do this, then we run the risk that the user will
2351 delete or disable the breakpoint, but the LWP will have already
2352 tripped on it. */
2353
2354 struct regcache *regcache = get_thread_regcache (lp->ptid);
2355 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2356 CORE_ADDR pc;
2357
2358 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
2359 if (breakpoint_inserted_here_p (pc))
2360 {
2361 if (debug_linux_nat)
2362 fprintf_unfiltered (gdb_stdlog,
2363 "CB: Push back breakpoint for %s\n",
2364 target_pid_to_str (lp->ptid));
2365
2366 /* Back up the PC if necessary. */
2367 if (gdbarch_decr_pc_after_break (gdbarch))
2368 regcache_write_pc (regcache, pc);
2369
2370 return 1;
2371 }
2372 return 0;
2373 }
2374
2375 static int
2376 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2377 {
2378 struct lwp_info *event_lp = data;
2379
2380 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2381 if (lp == event_lp)
2382 return 0;
2383
2384 /* If a LWP other than the LWP that we're reporting an event for has
2385 hit a GDB breakpoint (as opposed to some random trap signal),
2386 then just arrange for it to hit it again later. We don't keep
2387 the SIGTRAP status and don't forward the SIGTRAP signal to the
2388 LWP. We will handle the current event, eventually we will resume
2389 all LWPs, and this one will get its breakpoint trap again.
2390
2391 If we do not do this, then we run the risk that the user will
2392 delete or disable the breakpoint, but the LWP will have already
2393 tripped on it. */
2394
2395 if (lp->status != 0
2396 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
2397 && cancel_breakpoint (lp))
2398 /* Throw away the SIGTRAP. */
2399 lp->status = 0;
2400
2401 return 0;
2402 }
2403
2404 /* Select one LWP out of those that have events pending. */
2405
2406 static void
2407 select_event_lwp (struct lwp_info **orig_lp, int *status)
2408 {
2409 int num_events = 0;
2410 int random_selector;
2411 struct lwp_info *event_lp;
2412
2413 /* Record the wait status for the original LWP. */
2414 (*orig_lp)->status = *status;
2415
2416 /* Give preference to any LWP that is being single-stepped. */
2417 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2418 if (event_lp != NULL)
2419 {
2420 if (debug_linux_nat)
2421 fprintf_unfiltered (gdb_stdlog,
2422 "SEL: Select single-step %s\n",
2423 target_pid_to_str (event_lp->ptid));
2424 }
2425 else
2426 {
2427 /* No single-stepping LWP. Select one at random, out of those
2428 which have had SIGTRAP events. */
2429
2430 /* First see how many SIGTRAP events we have. */
2431 iterate_over_lwps (count_events_callback, &num_events);
2432
2433 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2434 random_selector = (int)
2435 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2436
2437 if (debug_linux_nat && num_events > 1)
2438 fprintf_unfiltered (gdb_stdlog,
2439 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2440 num_events, random_selector);
2441
2442 event_lp = iterate_over_lwps (select_event_lwp_callback,
2443 &random_selector);
2444 }
2445
2446 if (event_lp != NULL)
2447 {
2448 /* Switch the event LWP. */
2449 *orig_lp = event_lp;
2450 *status = event_lp->status;
2451 }
2452
2453 /* Flush the wait status for the event LWP. */
2454 (*orig_lp)->status = 0;
2455 }
2456
2457 /* Return non-zero if LP has been resumed. */
2458
2459 static int
2460 resumed_callback (struct lwp_info *lp, void *data)
2461 {
2462 return lp->resumed;
2463 }
2464
2465 /* Stop an active thread, verify it still exists, then resume it. */
2466
2467 static int
2468 stop_and_resume_callback (struct lwp_info *lp, void *data)
2469 {
2470 struct lwp_info *ptr;
2471
2472 if (!lp->stopped && !lp->signalled)
2473 {
2474 stop_callback (lp, NULL);
2475 stop_wait_callback (lp, NULL);
2476 /* Resume if the lwp still exists. */
2477 for (ptr = lwp_list; ptr; ptr = ptr->next)
2478 if (lp == ptr)
2479 {
2480 resume_callback (lp, NULL);
2481 resume_set_callback (lp, NULL);
2482 }
2483 }
2484 return 0;
2485 }
2486
2487 /* Check if we should go on and pass this event to common code.
2488 Return the affected lwp if we are, or NULL otherwise. */
2489 static struct lwp_info *
2490 linux_nat_filter_event (int lwpid, int status, int options)
2491 {
2492 struct lwp_info *lp;
2493
2494 lp = find_lwp_pid (pid_to_ptid (lwpid));
2495
2496 /* Check for stop events reported by a process we didn't already
2497 know about - anything not already in our LWP list.
2498
2499 If we're expecting to receive stopped processes after
2500 fork, vfork, and clone events, then we'll just add the
2501 new one to our list and go back to waiting for the event
2502 to be reported - the stopped process might be returned
2503 from waitpid before or after the event is. */
2504 if (WIFSTOPPED (status) && !lp)
2505 {
2506 linux_record_stopped_pid (lwpid, status);
2507 return NULL;
2508 }
2509
2510 /* Make sure we don't report an event for the exit of an LWP not in
2511 our list, i.e. not part of the current process. This can happen
2512 if we detach from a program we original forked and then it
2513 exits. */
2514 if (!WIFSTOPPED (status) && !lp)
2515 return NULL;
2516
2517 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2518 CLONE_PTRACE processes which do not use the thread library -
2519 otherwise we wouldn't find the new LWP this way. That doesn't
2520 currently work, and the following code is currently unreachable
2521 due to the two blocks above. If it's fixed some day, this code
2522 should be broken out into a function so that we can also pick up
2523 LWPs from the new interface. */
2524 if (!lp)
2525 {
2526 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2527 if (options & __WCLONE)
2528 lp->cloned = 1;
2529
2530 gdb_assert (WIFSTOPPED (status)
2531 && WSTOPSIG (status) == SIGSTOP);
2532 lp->signalled = 1;
2533
2534 if (!in_thread_list (inferior_ptid))
2535 {
2536 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2537 GET_PID (inferior_ptid));
2538 add_thread (inferior_ptid);
2539 }
2540
2541 add_thread (lp->ptid);
2542 }
2543
2544 /* Save the trap's siginfo in case we need it later. */
2545 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2546 save_siginfo (lp);
2547
2548 /* Handle GNU/Linux's extended waitstatus for trace events. */
2549 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2550 {
2551 if (debug_linux_nat)
2552 fprintf_unfiltered (gdb_stdlog,
2553 "LLW: Handling extended status 0x%06x\n",
2554 status);
2555 if (linux_handle_extended_wait (lp, status, 0))
2556 return NULL;
2557 }
2558
2559 /* Check if the thread has exited. */
2560 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2561 {
2562 /* If this is the main thread, we must stop all threads and
2563 verify if they are still alive. This is because in the nptl
2564 thread model, there is no signal issued for exiting LWPs
2565 other than the main thread. We only get the main thread exit
2566 signal once all child threads have already exited. If we
2567 stop all the threads and use the stop_wait_callback to check
2568 if they have exited we can determine whether this signal
2569 should be ignored or whether it means the end of the debugged
2570 application, regardless of which threading model is being
2571 used. */
2572 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2573 {
2574 lp->stopped = 1;
2575 iterate_over_lwps (stop_and_resume_callback, NULL);
2576 }
2577
2578 if (debug_linux_nat)
2579 fprintf_unfiltered (gdb_stdlog,
2580 "LLW: %s exited.\n",
2581 target_pid_to_str (lp->ptid));
2582
2583 exit_lwp (lp);
2584
2585 /* If there is at least one more LWP, then the exit signal was
2586 not the end of the debugged application and should be
2587 ignored. */
2588 if (num_lwps > 0)
2589 return NULL;
2590 }
2591
2592 /* Check if the current LWP has previously exited. In the nptl
2593 thread model, LWPs other than the main thread do not issue
2594 signals when they exit so we must check whenever the thread has
2595 stopped. A similar check is made in stop_wait_callback(). */
2596 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2597 {
2598 if (debug_linux_nat)
2599 fprintf_unfiltered (gdb_stdlog,
2600 "LLW: %s exited.\n",
2601 target_pid_to_str (lp->ptid));
2602
2603 exit_lwp (lp);
2604
2605 /* Make sure there is at least one thread running. */
2606 gdb_assert (iterate_over_lwps (running_callback, NULL));
2607
2608 /* Discard the event. */
2609 return NULL;
2610 }
2611
2612 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2613 an attempt to stop an LWP. */
2614 if (lp->signalled
2615 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2616 {
2617 if (debug_linux_nat)
2618 fprintf_unfiltered (gdb_stdlog,
2619 "LLW: Delayed SIGSTOP caught for %s.\n",
2620 target_pid_to_str (lp->ptid));
2621
2622 /* This is a delayed SIGSTOP. */
2623 lp->signalled = 0;
2624
2625 registers_changed ();
2626
2627 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2628 lp->step, TARGET_SIGNAL_0);
2629 if (debug_linux_nat)
2630 fprintf_unfiltered (gdb_stdlog,
2631 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2632 lp->step ?
2633 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2634 target_pid_to_str (lp->ptid));
2635
2636 lp->stopped = 0;
2637 gdb_assert (lp->resumed);
2638
2639 /* Discard the event. */
2640 return NULL;
2641 }
2642
2643 /* Make sure we don't report a SIGINT that we have already displayed
2644 for another thread. */
2645 if (lp->ignore_sigint
2646 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2647 {
2648 if (debug_linux_nat)
2649 fprintf_unfiltered (gdb_stdlog,
2650 "LLW: Delayed SIGINT caught for %s.\n",
2651 target_pid_to_str (lp->ptid));
2652
2653 /* This is a delayed SIGINT. */
2654 lp->ignore_sigint = 0;
2655
2656 registers_changed ();
2657 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2658 lp->step, TARGET_SIGNAL_0);
2659 if (debug_linux_nat)
2660 fprintf_unfiltered (gdb_stdlog,
2661 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2662 lp->step ?
2663 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2664 target_pid_to_str (lp->ptid));
2665
2666 lp->stopped = 0;
2667 gdb_assert (lp->resumed);
2668
2669 /* Discard the event. */
2670 return NULL;
2671 }
2672
2673 /* An interesting event. */
2674 gdb_assert (lp);
2675 return lp;
2676 }
2677
2678 /* Get the events stored in the pipe into the local queue, so they are
2679 accessible to queued_waitpid. We need to do this, since it is not
2680 always the case that the event at the head of the pipe is the event
2681 we want. */
2682
2683 static void
2684 pipe_to_local_event_queue (void)
2685 {
2686 if (debug_linux_nat_async)
2687 fprintf_unfiltered (gdb_stdlog,
2688 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2689 linux_nat_num_queued_events);
2690 while (linux_nat_num_queued_events)
2691 {
2692 int lwpid, status, options;
2693 lwpid = linux_nat_event_pipe_pop (&status, &options);
2694 gdb_assert (lwpid > 0);
2695 push_waitpid (lwpid, status, options);
2696 }
2697 }
2698
2699 /* Get the unprocessed events stored in the local queue back into the
2700 pipe, so the event loop realizes there's something else to
2701 process. */
2702
2703 static void
2704 local_event_queue_to_pipe (void)
2705 {
2706 struct waitpid_result *w = waitpid_queue;
2707 while (w)
2708 {
2709 struct waitpid_result *next = w->next;
2710 linux_nat_event_pipe_push (w->pid,
2711 w->status,
2712 w->options);
2713 xfree (w);
2714 w = next;
2715 }
2716 waitpid_queue = NULL;
2717
2718 if (debug_linux_nat_async)
2719 fprintf_unfiltered (gdb_stdlog,
2720 "LEQTP: linux_nat_num_queued_events(%d)\n",
2721 linux_nat_num_queued_events);
2722 }
2723
2724 static ptid_t
2725 linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
2726 {
2727 struct lwp_info *lp = NULL;
2728 int options = 0;
2729 int status = 0;
2730 pid_t pid = PIDGET (ptid);
2731
2732 if (debug_linux_nat_async)
2733 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2734
2735 /* The first time we get here after starting a new inferior, we may
2736 not have added it to the LWP list yet - this is the earliest
2737 moment at which we know its PID. */
2738 if (num_lwps == 0)
2739 {
2740 gdb_assert (!is_lwp (inferior_ptid));
2741
2742 /* Upgrade the main thread's ptid. */
2743 thread_change_ptid (inferior_ptid,
2744 BUILD_LWP (GET_PID (inferior_ptid),
2745 GET_PID (inferior_ptid)));
2746
2747 lp = add_lwp (inferior_ptid);
2748 lp->resumed = 1;
2749 }
2750
2751 /* Block events while we're here. */
2752 linux_nat_async_events (sigchld_sync);
2753
2754 retry:
2755
2756 /* Make sure there is at least one LWP that has been resumed. */
2757 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
2758
2759 /* First check if there is a LWP with a wait status pending. */
2760 if (pid == -1)
2761 {
2762 /* Any LWP that's been resumed will do. */
2763 lp = iterate_over_lwps (status_callback, NULL);
2764 if (lp)
2765 {
2766 if (target_can_async_p ())
2767 internal_error (__FILE__, __LINE__,
2768 "Found an LWP with a pending status in async mode.");
2769
2770 status = lp->status;
2771 lp->status = 0;
2772
2773 if (debug_linux_nat && status)
2774 fprintf_unfiltered (gdb_stdlog,
2775 "LLW: Using pending wait status %s for %s.\n",
2776 status_to_str (status),
2777 target_pid_to_str (lp->ptid));
2778 }
2779
2780 /* But if we don't find one, we'll have to wait, and check both
2781 cloned and uncloned processes. We start with the cloned
2782 processes. */
2783 options = __WCLONE | WNOHANG;
2784 }
2785 else if (is_lwp (ptid))
2786 {
2787 if (debug_linux_nat)
2788 fprintf_unfiltered (gdb_stdlog,
2789 "LLW: Waiting for specific LWP %s.\n",
2790 target_pid_to_str (ptid));
2791
2792 /* We have a specific LWP to check. */
2793 lp = find_lwp_pid (ptid);
2794 gdb_assert (lp);
2795 status = lp->status;
2796 lp->status = 0;
2797
2798 if (debug_linux_nat && status)
2799 fprintf_unfiltered (gdb_stdlog,
2800 "LLW: Using pending wait status %s for %s.\n",
2801 status_to_str (status),
2802 target_pid_to_str (lp->ptid));
2803
2804 /* If we have to wait, take into account whether PID is a cloned
2805 process or not. And we have to convert it to something that
2806 the layer beneath us can understand. */
2807 options = lp->cloned ? __WCLONE : 0;
2808 pid = GET_LWP (ptid);
2809 }
2810
2811 if (status && lp->signalled)
2812 {
2813 /* A pending SIGSTOP may interfere with the normal stream of
2814 events. In a typical case where interference is a problem,
2815 we have a SIGSTOP signal pending for LWP A while
2816 single-stepping it, encounter an event in LWP B, and take the
2817 pending SIGSTOP while trying to stop LWP A. After processing
2818 the event in LWP B, LWP A is continued, and we'll never see
2819 the SIGTRAP associated with the last time we were
2820 single-stepping LWP A. */
2821
2822 /* Resume the thread. It should halt immediately returning the
2823 pending SIGSTOP. */
2824 registers_changed ();
2825 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2826 lp->step, TARGET_SIGNAL_0);
2827 if (debug_linux_nat)
2828 fprintf_unfiltered (gdb_stdlog,
2829 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2830 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2831 target_pid_to_str (lp->ptid));
2832 lp->stopped = 0;
2833 gdb_assert (lp->resumed);
2834
2835 /* This should catch the pending SIGSTOP. */
2836 stop_wait_callback (lp, NULL);
2837 }
2838
2839 if (!target_can_async_p ())
2840 {
2841 /* Causes SIGINT to be passed on to the attached process. */
2842 set_sigint_trap ();
2843 set_sigio_trap ();
2844 }
2845
2846 while (status == 0)
2847 {
2848 pid_t lwpid;
2849
2850 if (target_can_async_p ())
2851 /* In async mode, don't ever block. Only look at the locally
2852 queued events. */
2853 lwpid = queued_waitpid (pid, &status, options);
2854 else
2855 lwpid = my_waitpid (pid, &status, options);
2856
2857 if (lwpid > 0)
2858 {
2859 gdb_assert (pid == -1 || lwpid == pid);
2860
2861 if (debug_linux_nat)
2862 {
2863 fprintf_unfiltered (gdb_stdlog,
2864 "LLW: waitpid %ld received %s\n",
2865 (long) lwpid, status_to_str (status));
2866 }
2867
2868 lp = linux_nat_filter_event (lwpid, status, options);
2869 if (!lp)
2870 {
2871 /* A discarded event. */
2872 status = 0;
2873 continue;
2874 }
2875
2876 break;
2877 }
2878
2879 if (pid == -1)
2880 {
2881 /* Alternate between checking cloned and uncloned processes. */
2882 options ^= __WCLONE;
2883
2884 /* And every time we have checked both:
2885 In async mode, return to event loop;
2886 In sync mode, suspend waiting for a SIGCHLD signal. */
2887 if (options & __WCLONE)
2888 {
2889 if (target_can_async_p ())
2890 {
2891 /* No interesting event. */
2892 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2893
2894 /* Get ready for the next event. */
2895 target_async (inferior_event_handler, 0);
2896
2897 if (debug_linux_nat_async)
2898 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2899
2900 return minus_one_ptid;
2901 }
2902
2903 sigsuspend (&suspend_mask);
2904 }
2905 }
2906
2907 /* We shouldn't end up here unless we want to try again. */
2908 gdb_assert (status == 0);
2909 }
2910
2911 if (!target_can_async_p ())
2912 {
2913 clear_sigio_trap ();
2914 clear_sigint_trap ();
2915 }
2916
2917 gdb_assert (lp);
2918
2919 /* Don't report signals that GDB isn't interested in, such as
2920 signals that are neither printed nor stopped upon. Stopping all
2921 threads can be a bit time-consuming so if we want decent
2922 performance with heavily multi-threaded programs, especially when
2923 they're using a high frequency timer, we'd better avoid it if we
2924 can. */
2925
2926 if (WIFSTOPPED (status))
2927 {
2928 int signo = target_signal_from_host (WSTOPSIG (status));
2929
2930 /* If we get a signal while single-stepping, we may need special
2931 care, e.g. to skip the signal handler. Defer to common code. */
2932 if (!lp->step
2933 && signal_stop_state (signo) == 0
2934 && signal_print_state (signo) == 0
2935 && signal_pass_state (signo) == 1)
2936 {
2937 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2938 here? It is not clear we should. GDB may not expect
2939 other threads to run. On the other hand, not resuming
2940 newly attached threads may cause an unwanted delay in
2941 getting them running. */
2942 registers_changed ();
2943 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2944 lp->step, signo);
2945 if (debug_linux_nat)
2946 fprintf_unfiltered (gdb_stdlog,
2947 "LLW: %s %s, %s (preempt 'handle')\n",
2948 lp->step ?
2949 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2950 target_pid_to_str (lp->ptid),
2951 signo ? strsignal (signo) : "0");
2952 lp->stopped = 0;
2953 status = 0;
2954 goto retry;
2955 }
2956
2957 if (!non_stop)
2958 {
2959 /* Only do the below in all-stop, as we currently use SIGINT
2960 to implement target_stop (see linux_nat_stop) in
2961 non-stop. */
2962 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2963 {
2964 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2965 forwarded to the entire process group, that is, all LWPs
2966 will receive it - unless they're using CLONE_THREAD to
2967 share signals. Since we only want to report it once, we
2968 mark it as ignored for all LWPs except this one. */
2969 iterate_over_lwps (set_ignore_sigint, NULL);
2970 lp->ignore_sigint = 0;
2971 }
2972 else
2973 maybe_clear_ignore_sigint (lp);
2974 }
2975 }
2976
2977 /* This LWP is stopped now. */
2978 lp->stopped = 1;
2979
2980 if (debug_linux_nat)
2981 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2982 status_to_str (status), target_pid_to_str (lp->ptid));
2983
2984 if (!non_stop)
2985 {
2986 /* Now stop all other LWP's ... */
2987 iterate_over_lwps (stop_callback, NULL);
2988
2989 /* ... and wait until all of them have reported back that
2990 they're no longer running. */
2991 iterate_over_lwps (stop_wait_callback, NULL);
2992
2993 /* If we're not waiting for a specific LWP, choose an event LWP
2994 from among those that have had events. Giving equal priority
2995 to all LWPs that have had events helps prevent
2996 starvation. */
2997 if (pid == -1)
2998 select_event_lwp (&lp, &status);
2999 }
3000
3001 /* Now that we've selected our final event LWP, cancel any
3002 breakpoints in other LWPs that have hit a GDB breakpoint. See
3003 the comment in cancel_breakpoints_callback to find out why. */
3004 iterate_over_lwps (cancel_breakpoints_callback, lp);
3005
3006 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
3007 {
3008 if (debug_linux_nat)
3009 fprintf_unfiltered (gdb_stdlog,
3010 "LLW: trap ptid is %s.\n",
3011 target_pid_to_str (lp->ptid));
3012 }
3013
3014 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3015 {
3016 *ourstatus = lp->waitstatus;
3017 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3018 }
3019 else
3020 store_waitstatus (ourstatus, status);
3021
3022 /* Get ready for the next event. */
3023 if (target_can_async_p ())
3024 target_async (inferior_event_handler, 0);
3025
3026 if (debug_linux_nat_async)
3027 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3028
3029 return lp->ptid;
3030 }
3031
3032 static int
3033 kill_callback (struct lwp_info *lp, void *data)
3034 {
3035 errno = 0;
3036 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3037 if (debug_linux_nat)
3038 fprintf_unfiltered (gdb_stdlog,
3039 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3040 target_pid_to_str (lp->ptid),
3041 errno ? safe_strerror (errno) : "OK");
3042
3043 return 0;
3044 }
3045
3046 static int
3047 kill_wait_callback (struct lwp_info *lp, void *data)
3048 {
3049 pid_t pid;
3050
3051 /* We must make sure that there are no pending events (delayed
3052 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3053 program doesn't interfere with any following debugging session. */
3054
3055 /* For cloned processes we must check both with __WCLONE and
3056 without, since the exit status of a cloned process isn't reported
3057 with __WCLONE. */
3058 if (lp->cloned)
3059 {
3060 do
3061 {
3062 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
3063 if (pid != (pid_t) -1)
3064 {
3065 if (debug_linux_nat)
3066 fprintf_unfiltered (gdb_stdlog,
3067 "KWC: wait %s received unknown.\n",
3068 target_pid_to_str (lp->ptid));
3069 /* The Linux kernel sometimes fails to kill a thread
3070 completely after PTRACE_KILL; that goes from the stop
3071 point in do_fork out to the one in
3072 get_signal_to_deliever and waits again. So kill it
3073 again. */
3074 kill_callback (lp, NULL);
3075 }
3076 }
3077 while (pid == GET_LWP (lp->ptid));
3078
3079 gdb_assert (pid == -1 && errno == ECHILD);
3080 }
3081
3082 do
3083 {
3084 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
3085 if (pid != (pid_t) -1)
3086 {
3087 if (debug_linux_nat)
3088 fprintf_unfiltered (gdb_stdlog,
3089 "KWC: wait %s received unk.\n",
3090 target_pid_to_str (lp->ptid));
3091 /* See the call to kill_callback above. */
3092 kill_callback (lp, NULL);
3093 }
3094 }
3095 while (pid == GET_LWP (lp->ptid));
3096
3097 gdb_assert (pid == -1 && errno == ECHILD);
3098 return 0;
3099 }
3100
3101 static void
3102 linux_nat_kill (void)
3103 {
3104 struct target_waitstatus last;
3105 ptid_t last_ptid;
3106 int status;
3107
3108 if (target_can_async_p ())
3109 target_async (NULL, 0);
3110
3111 /* If we're stopped while forking and we haven't followed yet,
3112 kill the other task. We need to do this first because the
3113 parent will be sleeping if this is a vfork. */
3114
3115 get_last_target_status (&last_ptid, &last);
3116
3117 if (last.kind == TARGET_WAITKIND_FORKED
3118 || last.kind == TARGET_WAITKIND_VFORKED)
3119 {
3120 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
3121 wait (&status);
3122 }
3123
3124 if (forks_exist_p ())
3125 {
3126 linux_fork_killall ();
3127 drain_queued_events (-1);
3128 }
3129 else
3130 {
3131 /* Stop all threads before killing them, since ptrace requires
3132 that the thread is stopped to sucessfully PTRACE_KILL. */
3133 iterate_over_lwps (stop_callback, NULL);
3134 /* ... and wait until all of them have reported back that
3135 they're no longer running. */
3136 iterate_over_lwps (stop_wait_callback, NULL);
3137
3138 /* Kill all LWP's ... */
3139 iterate_over_lwps (kill_callback, NULL);
3140
3141 /* ... and wait until we've flushed all events. */
3142 iterate_over_lwps (kill_wait_callback, NULL);
3143 }
3144
3145 target_mourn_inferior ();
3146 }
3147
3148 static void
3149 linux_nat_mourn_inferior (void)
3150 {
3151 /* Destroy LWP info; it's no longer valid. */
3152 init_lwp_list ();
3153
3154 if (! forks_exist_p ())
3155 {
3156 /* Normal case, no other forks available. */
3157 if (target_can_async_p ())
3158 linux_nat_async (NULL, 0);
3159 linux_ops->to_mourn_inferior ();
3160 }
3161 else
3162 /* Multi-fork case. The current inferior_ptid has exited, but
3163 there are other viable forks to debug. Delete the exiting
3164 one and context-switch to the first available. */
3165 linux_fork_mourn_inferior ();
3166 }
3167
3168 static LONGEST
3169 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3170 const char *annex, gdb_byte *readbuf,
3171 const gdb_byte *writebuf,
3172 ULONGEST offset, LONGEST len)
3173 {
3174 struct cleanup *old_chain = save_inferior_ptid ();
3175 LONGEST xfer;
3176
3177 if (is_lwp (inferior_ptid))
3178 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
3179
3180 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3181 offset, len);
3182
3183 do_cleanups (old_chain);
3184 return xfer;
3185 }
3186
3187 static int
3188 linux_nat_thread_alive (ptid_t ptid)
3189 {
3190 int err;
3191
3192 gdb_assert (is_lwp (ptid));
3193
3194 /* Send signal 0 instead of anything ptrace, because ptracing a
3195 running thread errors out claiming that the thread doesn't
3196 exist. */
3197 err = kill_lwp (GET_LWP (ptid), 0);
3198
3199 if (debug_linux_nat)
3200 fprintf_unfiltered (gdb_stdlog,
3201 "LLTA: KILL(SIG0) %s (%s)\n",
3202 target_pid_to_str (ptid),
3203 err ? safe_strerror (err) : "OK");
3204
3205 if (err != 0)
3206 return 0;
3207
3208 return 1;
3209 }
3210
3211 static char *
3212 linux_nat_pid_to_str (ptid_t ptid)
3213 {
3214 static char buf[64];
3215
3216 if (is_lwp (ptid)
3217 && ((lwp_list && lwp_list->next)
3218 || GET_PID (ptid) != GET_LWP (ptid)))
3219 {
3220 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
3221 return buf;
3222 }
3223
3224 return normal_pid_to_str (ptid);
3225 }
3226
3227 static void
3228 sigchld_handler (int signo)
3229 {
3230 if (target_async_permitted
3231 && linux_nat_async_events_state != sigchld_sync
3232 && signo == SIGCHLD)
3233 /* It is *always* a bug to hit this. */
3234 internal_error (__FILE__, __LINE__,
3235 "sigchld_handler called when async events are enabled");
3236
3237 /* Do nothing. The only reason for this handler is that it allows
3238 us to use sigsuspend in linux_nat_wait above to wait for the
3239 arrival of a SIGCHLD. */
3240 }
3241
3242 /* Accepts an integer PID; Returns a string representing a file that
3243 can be opened to get the symbols for the child process. */
3244
3245 static char *
3246 linux_child_pid_to_exec_file (int pid)
3247 {
3248 char *name1, *name2;
3249
3250 name1 = xmalloc (MAXPATHLEN);
3251 name2 = xmalloc (MAXPATHLEN);
3252 make_cleanup (xfree, name1);
3253 make_cleanup (xfree, name2);
3254 memset (name2, 0, MAXPATHLEN);
3255
3256 sprintf (name1, "/proc/%d/exe", pid);
3257 if (readlink (name1, name2, MAXPATHLEN) > 0)
3258 return name2;
3259 else
3260 return name1;
3261 }
3262
3263 /* Service function for corefiles and info proc. */
3264
3265 static int
3266 read_mapping (FILE *mapfile,
3267 long long *addr,
3268 long long *endaddr,
3269 char *permissions,
3270 long long *offset,
3271 char *device, long long *inode, char *filename)
3272 {
3273 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
3274 addr, endaddr, permissions, offset, device, inode);
3275
3276 filename[0] = '\0';
3277 if (ret > 0 && ret != EOF)
3278 {
3279 /* Eat everything up to EOL for the filename. This will prevent
3280 weird filenames (such as one with embedded whitespace) from
3281 confusing this code. It also makes this code more robust in
3282 respect to annotations the kernel may add after the filename.
3283
3284 Note the filename is used for informational purposes
3285 only. */
3286 ret += fscanf (mapfile, "%[^\n]\n", filename);
3287 }
3288
3289 return (ret != 0 && ret != EOF);
3290 }
3291
3292 /* Fills the "to_find_memory_regions" target vector. Lists the memory
3293 regions in the inferior for a corefile. */
3294
3295 static int
3296 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
3297 unsigned long,
3298 int, int, int, void *), void *obfd)
3299 {
3300 long long pid = PIDGET (inferior_ptid);
3301 char mapsfilename[MAXPATHLEN];
3302 FILE *mapsfile;
3303 long long addr, endaddr, size, offset, inode;
3304 char permissions[8], device[8], filename[MAXPATHLEN];
3305 int read, write, exec;
3306 int ret;
3307
3308 /* Compose the filename for the /proc memory map, and open it. */
3309 sprintf (mapsfilename, "/proc/%lld/maps", pid);
3310 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
3311 error (_("Could not open %s."), mapsfilename);
3312
3313 if (info_verbose)
3314 fprintf_filtered (gdb_stdout,
3315 "Reading memory regions from %s\n", mapsfilename);
3316
3317 /* Now iterate until end-of-file. */
3318 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
3319 &offset, &device[0], &inode, &filename[0]))
3320 {
3321 size = endaddr - addr;
3322
3323 /* Get the segment's permissions. */
3324 read = (strchr (permissions, 'r') != 0);
3325 write = (strchr (permissions, 'w') != 0);
3326 exec = (strchr (permissions, 'x') != 0);
3327
3328 if (info_verbose)
3329 {
3330 fprintf_filtered (gdb_stdout,
3331 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3332 size, paddr_nz (addr),
3333 read ? 'r' : ' ',
3334 write ? 'w' : ' ', exec ? 'x' : ' ');
3335 if (filename[0])
3336 fprintf_filtered (gdb_stdout, " for %s", filename);
3337 fprintf_filtered (gdb_stdout, "\n");
3338 }
3339
3340 /* Invoke the callback function to create the corefile
3341 segment. */
3342 func (addr, size, read, write, exec, obfd);
3343 }
3344 fclose (mapsfile);
3345 return 0;
3346 }
3347
3348 static int
3349 find_signalled_thread (struct thread_info *info, void *data)
3350 {
3351 if (info->stop_signal != TARGET_SIGNAL_0
3352 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
3353 return 1;
3354
3355 return 0;
3356 }
3357
3358 static enum target_signal
3359 find_stop_signal (void)
3360 {
3361 struct thread_info *info =
3362 iterate_over_threads (find_signalled_thread, NULL);
3363
3364 if (info)
3365 return info->stop_signal;
3366 else
3367 return TARGET_SIGNAL_0;
3368 }
3369
3370 /* Records the thread's register state for the corefile note
3371 section. */
3372
3373 static char *
3374 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
3375 char *note_data, int *note_size,
3376 enum target_signal stop_signal)
3377 {
3378 gdb_gregset_t gregs;
3379 gdb_fpregset_t fpregs;
3380 unsigned long lwp = ptid_get_lwp (ptid);
3381 struct regcache *regcache = get_thread_regcache (ptid);
3382 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3383 const struct regset *regset;
3384 int core_regset_p;
3385 struct cleanup *old_chain;
3386 struct core_regset_section *sect_list;
3387 char *gdb_regset;
3388
3389 old_chain = save_inferior_ptid ();
3390 inferior_ptid = ptid;
3391 target_fetch_registers (regcache, -1);
3392 do_cleanups (old_chain);
3393
3394 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
3395 sect_list = gdbarch_core_regset_sections (gdbarch);
3396
3397 if (core_regset_p
3398 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3399 sizeof (gregs))) != NULL
3400 && regset->collect_regset != NULL)
3401 regset->collect_regset (regset, regcache, -1,
3402 &gregs, sizeof (gregs));
3403 else
3404 fill_gregset (regcache, &gregs, -1);
3405
3406 note_data = (char *) elfcore_write_prstatus (obfd,
3407 note_data,
3408 note_size,
3409 lwp,
3410 stop_signal, &gregs);
3411
3412 /* The loop below uses the new struct core_regset_section, which stores
3413 the supported section names and sizes for the core file. Note that
3414 note PRSTATUS needs to be treated specially. But the other notes are
3415 structurally the same, so they can benefit from the new struct. */
3416 if (core_regset_p && sect_list != NULL)
3417 while (sect_list->sect_name != NULL)
3418 {
3419 /* .reg was already handled above. */
3420 if (strcmp (sect_list->sect_name, ".reg") == 0)
3421 {
3422 sect_list++;
3423 continue;
3424 }
3425 regset = gdbarch_regset_from_core_section (gdbarch,
3426 sect_list->sect_name,
3427 sect_list->size);
3428 gdb_assert (regset && regset->collect_regset);
3429 gdb_regset = xmalloc (sect_list->size);
3430 regset->collect_regset (regset, regcache, -1,
3431 gdb_regset, sect_list->size);
3432 note_data = (char *) elfcore_write_register_note (obfd,
3433 note_data,
3434 note_size,
3435 sect_list->sect_name,
3436 gdb_regset,
3437 sect_list->size);
3438 xfree (gdb_regset);
3439 sect_list++;
3440 }
3441
3442 /* For architectures that does not have the struct core_regset_section
3443 implemented, we use the old method. When all the architectures have
3444 the new support, the code below should be deleted. */
3445 else
3446 {
3447 if (core_regset_p
3448 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3449 sizeof (fpregs))) != NULL
3450 && regset->collect_regset != NULL)
3451 regset->collect_regset (regset, regcache, -1,
3452 &fpregs, sizeof (fpregs));
3453 else
3454 fill_fpregset (regcache, &fpregs, -1);
3455
3456 note_data = (char *) elfcore_write_prfpreg (obfd,
3457 note_data,
3458 note_size,
3459 &fpregs, sizeof (fpregs));
3460 }
3461
3462 return note_data;
3463 }
3464
3465 struct linux_nat_corefile_thread_data
3466 {
3467 bfd *obfd;
3468 char *note_data;
3469 int *note_size;
3470 int num_notes;
3471 enum target_signal stop_signal;
3472 };
3473
3474 /* Called by gdbthread.c once per thread. Records the thread's
3475 register state for the corefile note section. */
3476
3477 static int
3478 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3479 {
3480 struct linux_nat_corefile_thread_data *args = data;
3481
3482 args->note_data = linux_nat_do_thread_registers (args->obfd,
3483 ti->ptid,
3484 args->note_data,
3485 args->note_size,
3486 args->stop_signal);
3487 args->num_notes++;
3488
3489 return 0;
3490 }
3491
3492 /* Fills the "to_make_corefile_note" target vector. Builds the note
3493 section for a corefile, and returns it in a malloc buffer. */
3494
3495 static char *
3496 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3497 {
3498 struct linux_nat_corefile_thread_data thread_args;
3499 struct cleanup *old_chain;
3500 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
3501 char fname[16] = { '\0' };
3502 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
3503 char psargs[80] = { '\0' };
3504 char *note_data = NULL;
3505 ptid_t current_ptid = inferior_ptid;
3506 gdb_byte *auxv;
3507 int auxv_len;
3508
3509 if (get_exec_file (0))
3510 {
3511 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3512 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3513 if (get_inferior_args ())
3514 {
3515 char *string_end;
3516 char *psargs_end = psargs + sizeof (psargs);
3517
3518 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3519 strings fine. */
3520 string_end = memchr (psargs, 0, sizeof (psargs));
3521 if (string_end != NULL)
3522 {
3523 *string_end++ = ' ';
3524 strncpy (string_end, get_inferior_args (),
3525 psargs_end - string_end);
3526 }
3527 }
3528 note_data = (char *) elfcore_write_prpsinfo (obfd,
3529 note_data,
3530 note_size, fname, psargs);
3531 }
3532
3533 /* Dump information for threads. */
3534 thread_args.obfd = obfd;
3535 thread_args.note_data = note_data;
3536 thread_args.note_size = note_size;
3537 thread_args.num_notes = 0;
3538 thread_args.stop_signal = find_stop_signal ();
3539 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
3540 gdb_assert (thread_args.num_notes != 0);
3541 note_data = thread_args.note_data;
3542
3543 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3544 NULL, &auxv);
3545 if (auxv_len > 0)
3546 {
3547 note_data = elfcore_write_note (obfd, note_data, note_size,
3548 "CORE", NT_AUXV, auxv, auxv_len);
3549 xfree (auxv);
3550 }
3551
3552 make_cleanup (xfree, note_data);
3553 return note_data;
3554 }
3555
3556 /* Implement the "info proc" command. */
3557
3558 static void
3559 linux_nat_info_proc_cmd (char *args, int from_tty)
3560 {
3561 long long pid = PIDGET (inferior_ptid);
3562 FILE *procfile;
3563 char **argv = NULL;
3564 char buffer[MAXPATHLEN];
3565 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3566 int cmdline_f = 1;
3567 int cwd_f = 1;
3568 int exe_f = 1;
3569 int mappings_f = 0;
3570 int environ_f = 0;
3571 int status_f = 0;
3572 int stat_f = 0;
3573 int all = 0;
3574 struct stat dummy;
3575
3576 if (args)
3577 {
3578 /* Break up 'args' into an argv array. */
3579 if ((argv = buildargv (args)) == NULL)
3580 nomem (0);
3581 else
3582 make_cleanup_freeargv (argv);
3583 }
3584 while (argv != NULL && *argv != NULL)
3585 {
3586 if (isdigit (argv[0][0]))
3587 {
3588 pid = strtoul (argv[0], NULL, 10);
3589 }
3590 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3591 {
3592 mappings_f = 1;
3593 }
3594 else if (strcmp (argv[0], "status") == 0)
3595 {
3596 status_f = 1;
3597 }
3598 else if (strcmp (argv[0], "stat") == 0)
3599 {
3600 stat_f = 1;
3601 }
3602 else if (strcmp (argv[0], "cmd") == 0)
3603 {
3604 cmdline_f = 1;
3605 }
3606 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3607 {
3608 exe_f = 1;
3609 }
3610 else if (strcmp (argv[0], "cwd") == 0)
3611 {
3612 cwd_f = 1;
3613 }
3614 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3615 {
3616 all = 1;
3617 }
3618 else
3619 {
3620 /* [...] (future options here) */
3621 }
3622 argv++;
3623 }
3624 if (pid == 0)
3625 error (_("No current process: you must name one."));
3626
3627 sprintf (fname1, "/proc/%lld", pid);
3628 if (stat (fname1, &dummy) != 0)
3629 error (_("No /proc directory: '%s'"), fname1);
3630
3631 printf_filtered (_("process %lld\n"), pid);
3632 if (cmdline_f || all)
3633 {
3634 sprintf (fname1, "/proc/%lld/cmdline", pid);
3635 if ((procfile = fopen (fname1, "r")) != NULL)
3636 {
3637 fgets (buffer, sizeof (buffer), procfile);
3638 printf_filtered ("cmdline = '%s'\n", buffer);
3639 fclose (procfile);
3640 }
3641 else
3642 warning (_("unable to open /proc file '%s'"), fname1);
3643 }
3644 if (cwd_f || all)
3645 {
3646 sprintf (fname1, "/proc/%lld/cwd", pid);
3647 memset (fname2, 0, sizeof (fname2));
3648 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3649 printf_filtered ("cwd = '%s'\n", fname2);
3650 else
3651 warning (_("unable to read link '%s'"), fname1);
3652 }
3653 if (exe_f || all)
3654 {
3655 sprintf (fname1, "/proc/%lld/exe", pid);
3656 memset (fname2, 0, sizeof (fname2));
3657 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3658 printf_filtered ("exe = '%s'\n", fname2);
3659 else
3660 warning (_("unable to read link '%s'"), fname1);
3661 }
3662 if (mappings_f || all)
3663 {
3664 sprintf (fname1, "/proc/%lld/maps", pid);
3665 if ((procfile = fopen (fname1, "r")) != NULL)
3666 {
3667 long long addr, endaddr, size, offset, inode;
3668 char permissions[8], device[8], filename[MAXPATHLEN];
3669
3670 printf_filtered (_("Mapped address spaces:\n\n"));
3671 if (gdbarch_addr_bit (current_gdbarch) == 32)
3672 {
3673 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3674 "Start Addr",
3675 " End Addr",
3676 " Size", " Offset", "objfile");
3677 }
3678 else
3679 {
3680 printf_filtered (" %18s %18s %10s %10s %7s\n",
3681 "Start Addr",
3682 " End Addr",
3683 " Size", " Offset", "objfile");
3684 }
3685
3686 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3687 &offset, &device[0], &inode, &filename[0]))
3688 {
3689 size = endaddr - addr;
3690
3691 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3692 calls here (and possibly above) should be abstracted
3693 out into their own functions? Andrew suggests using
3694 a generic local_address_string instead to print out
3695 the addresses; that makes sense to me, too. */
3696
3697 if (gdbarch_addr_bit (current_gdbarch) == 32)
3698 {
3699 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3700 (unsigned long) addr, /* FIXME: pr_addr */
3701 (unsigned long) endaddr,
3702 (int) size,
3703 (unsigned int) offset,
3704 filename[0] ? filename : "");
3705 }
3706 else
3707 {
3708 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3709 (unsigned long) addr, /* FIXME: pr_addr */
3710 (unsigned long) endaddr,
3711 (int) size,
3712 (unsigned int) offset,
3713 filename[0] ? filename : "");
3714 }
3715 }
3716
3717 fclose (procfile);
3718 }
3719 else
3720 warning (_("unable to open /proc file '%s'"), fname1);
3721 }
3722 if (status_f || all)
3723 {
3724 sprintf (fname1, "/proc/%lld/status", pid);
3725 if ((procfile = fopen (fname1, "r")) != NULL)
3726 {
3727 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3728 puts_filtered (buffer);
3729 fclose (procfile);
3730 }
3731 else
3732 warning (_("unable to open /proc file '%s'"), fname1);
3733 }
3734 if (stat_f || all)
3735 {
3736 sprintf (fname1, "/proc/%lld/stat", pid);
3737 if ((procfile = fopen (fname1, "r")) != NULL)
3738 {
3739 int itmp;
3740 char ctmp;
3741 long ltmp;
3742
3743 if (fscanf (procfile, "%d ", &itmp) > 0)
3744 printf_filtered (_("Process: %d\n"), itmp);
3745 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
3746 printf_filtered (_("Exec file: %s\n"), buffer);
3747 if (fscanf (procfile, "%c ", &ctmp) > 0)
3748 printf_filtered (_("State: %c\n"), ctmp);
3749 if (fscanf (procfile, "%d ", &itmp) > 0)
3750 printf_filtered (_("Parent process: %d\n"), itmp);
3751 if (fscanf (procfile, "%d ", &itmp) > 0)
3752 printf_filtered (_("Process group: %d\n"), itmp);
3753 if (fscanf (procfile, "%d ", &itmp) > 0)
3754 printf_filtered (_("Session id: %d\n"), itmp);
3755 if (fscanf (procfile, "%d ", &itmp) > 0)
3756 printf_filtered (_("TTY: %d\n"), itmp);
3757 if (fscanf (procfile, "%d ", &itmp) > 0)
3758 printf_filtered (_("TTY owner process group: %d\n"), itmp);
3759 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3760 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3761 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3762 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3763 (unsigned long) ltmp);
3764 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3765 printf_filtered (_("Minor faults, children: %lu\n"),
3766 (unsigned long) ltmp);
3767 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3768 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3769 (unsigned long) ltmp);
3770 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3771 printf_filtered (_("Major faults, children: %lu\n"),
3772 (unsigned long) ltmp);
3773 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3774 printf_filtered (_("utime: %ld\n"), ltmp);
3775 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3776 printf_filtered (_("stime: %ld\n"), ltmp);
3777 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3778 printf_filtered (_("utime, children: %ld\n"), ltmp);
3779 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3780 printf_filtered (_("stime, children: %ld\n"), ltmp);
3781 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3782 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3783 ltmp);
3784 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3785 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3786 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3787 printf_filtered (_("jiffies until next timeout: %lu\n"),
3788 (unsigned long) ltmp);
3789 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3790 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3791 (unsigned long) ltmp);
3792 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3793 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3794 ltmp);
3795 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3796 printf_filtered (_("Virtual memory size: %lu\n"),
3797 (unsigned long) ltmp);
3798 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3799 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3800 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3801 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3802 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3803 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3804 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3805 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3806 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3807 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3808 #if 0 /* Don't know how architecture-dependent the rest is...
3809 Anyway the signal bitmap info is available from "status". */
3810 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3811 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3812 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3813 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3814 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3815 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3816 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3817 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3818 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3819 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3820 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3821 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3822 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3823 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
3824 #endif
3825 fclose (procfile);
3826 }
3827 else
3828 warning (_("unable to open /proc file '%s'"), fname1);
3829 }
3830 }
3831
3832 /* Implement the to_xfer_partial interface for memory reads using the /proc
3833 filesystem. Because we can use a single read() call for /proc, this
3834 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3835 but it doesn't support writes. */
3836
3837 static LONGEST
3838 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3839 const char *annex, gdb_byte *readbuf,
3840 const gdb_byte *writebuf,
3841 ULONGEST offset, LONGEST len)
3842 {
3843 LONGEST ret;
3844 int fd;
3845 char filename[64];
3846
3847 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3848 return 0;
3849
3850 /* Don't bother for one word. */
3851 if (len < 3 * sizeof (long))
3852 return 0;
3853
3854 /* We could keep this file open and cache it - possibly one per
3855 thread. That requires some juggling, but is even faster. */
3856 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3857 fd = open (filename, O_RDONLY | O_LARGEFILE);
3858 if (fd == -1)
3859 return 0;
3860
3861 /* If pread64 is available, use it. It's faster if the kernel
3862 supports it (only one syscall), and it's 64-bit safe even on
3863 32-bit platforms (for instance, SPARC debugging a SPARC64
3864 application). */
3865 #ifdef HAVE_PREAD64
3866 if (pread64 (fd, readbuf, len, offset) != len)
3867 #else
3868 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3869 #endif
3870 ret = 0;
3871 else
3872 ret = len;
3873
3874 close (fd);
3875 return ret;
3876 }
3877
3878 /* Parse LINE as a signal set and add its set bits to SIGS. */
3879
3880 static void
3881 add_line_to_sigset (const char *line, sigset_t *sigs)
3882 {
3883 int len = strlen (line) - 1;
3884 const char *p;
3885 int signum;
3886
3887 if (line[len] != '\n')
3888 error (_("Could not parse signal set: %s"), line);
3889
3890 p = line;
3891 signum = len * 4;
3892 while (len-- > 0)
3893 {
3894 int digit;
3895
3896 if (*p >= '0' && *p <= '9')
3897 digit = *p - '0';
3898 else if (*p >= 'a' && *p <= 'f')
3899 digit = *p - 'a' + 10;
3900 else
3901 error (_("Could not parse signal set: %s"), line);
3902
3903 signum -= 4;
3904
3905 if (digit & 1)
3906 sigaddset (sigs, signum + 1);
3907 if (digit & 2)
3908 sigaddset (sigs, signum + 2);
3909 if (digit & 4)
3910 sigaddset (sigs, signum + 3);
3911 if (digit & 8)
3912 sigaddset (sigs, signum + 4);
3913
3914 p++;
3915 }
3916 }
3917
3918 /* Find process PID's pending signals from /proc/pid/status and set
3919 SIGS to match. */
3920
3921 void
3922 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3923 {
3924 FILE *procfile;
3925 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3926 int signum;
3927
3928 sigemptyset (pending);
3929 sigemptyset (blocked);
3930 sigemptyset (ignored);
3931 sprintf (fname, "/proc/%d/status", pid);
3932 procfile = fopen (fname, "r");
3933 if (procfile == NULL)
3934 error (_("Could not open %s"), fname);
3935
3936 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3937 {
3938 /* Normal queued signals are on the SigPnd line in the status
3939 file. However, 2.6 kernels also have a "shared" pending
3940 queue for delivering signals to a thread group, so check for
3941 a ShdPnd line also.
3942
3943 Unfortunately some Red Hat kernels include the shared pending
3944 queue but not the ShdPnd status field. */
3945
3946 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3947 add_line_to_sigset (buffer + 8, pending);
3948 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3949 add_line_to_sigset (buffer + 8, pending);
3950 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3951 add_line_to_sigset (buffer + 8, blocked);
3952 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3953 add_line_to_sigset (buffer + 8, ignored);
3954 }
3955
3956 fclose (procfile);
3957 }
3958
3959 static LONGEST
3960 linux_xfer_partial (struct target_ops *ops, enum target_object object,
3961 const char *annex, gdb_byte *readbuf,
3962 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3963 {
3964 LONGEST xfer;
3965
3966 if (object == TARGET_OBJECT_AUXV)
3967 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3968 offset, len);
3969
3970 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3971 offset, len);
3972 if (xfer != 0)
3973 return xfer;
3974
3975 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3976 offset, len);
3977 }
3978
3979 /* Create a prototype generic GNU/Linux target. The client can override
3980 it with local methods. */
3981
3982 static void
3983 linux_target_install_ops (struct target_ops *t)
3984 {
3985 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3986 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
3987 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
3988 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
3989 t->to_post_startup_inferior = linux_child_post_startup_inferior;
3990 t->to_post_attach = linux_child_post_attach;
3991 t->to_follow_fork = linux_child_follow_fork;
3992 t->to_find_memory_regions = linux_nat_find_memory_regions;
3993 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3994
3995 super_xfer_partial = t->to_xfer_partial;
3996 t->to_xfer_partial = linux_xfer_partial;
3997 }
3998
3999 struct target_ops *
4000 linux_target (void)
4001 {
4002 struct target_ops *t;
4003
4004 t = inf_ptrace_target ();
4005 linux_target_install_ops (t);
4006
4007 return t;
4008 }
4009
4010 struct target_ops *
4011 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4012 {
4013 struct target_ops *t;
4014
4015 t = inf_ptrace_trad_target (register_u_offset);
4016 linux_target_install_ops (t);
4017
4018 return t;
4019 }
4020
4021 /* target_is_async_p implementation. */
4022
4023 static int
4024 linux_nat_is_async_p (void)
4025 {
4026 /* NOTE: palves 2008-03-21: We're only async when the user requests
4027 it explicitly with the "maintenance set target-async" command.
4028 Someday, linux will always be async. */
4029 if (!target_async_permitted)
4030 return 0;
4031
4032 return 1;
4033 }
4034
4035 /* target_can_async_p implementation. */
4036
4037 static int
4038 linux_nat_can_async_p (void)
4039 {
4040 /* NOTE: palves 2008-03-21: We're only async when the user requests
4041 it explicitly with the "maintenance set target-async" command.
4042 Someday, linux will always be async. */
4043 if (!target_async_permitted)
4044 return 0;
4045
4046 /* See target.h/target_async_mask. */
4047 return linux_nat_async_mask_value;
4048 }
4049
4050 static int
4051 linux_nat_supports_non_stop (void)
4052 {
4053 return 1;
4054 }
4055
4056 /* target_async_mask implementation. */
4057
4058 static int
4059 linux_nat_async_mask (int mask)
4060 {
4061 int current_state;
4062 current_state = linux_nat_async_mask_value;
4063
4064 if (current_state != mask)
4065 {
4066 if (mask == 0)
4067 {
4068 linux_nat_async (NULL, 0);
4069 linux_nat_async_mask_value = mask;
4070 }
4071 else
4072 {
4073 linux_nat_async_mask_value = mask;
4074 linux_nat_async (inferior_event_handler, 0);
4075 }
4076 }
4077
4078 return current_state;
4079 }
4080
4081 /* Pop an event from the event pipe. */
4082
4083 static int
4084 linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
4085 {
4086 struct waitpid_result event = {0};
4087 int ret;
4088
4089 do
4090 {
4091 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
4092 }
4093 while (ret == -1 && errno == EINTR);
4094
4095 gdb_assert (ret == sizeof (event));
4096
4097 *ptr_status = event.status;
4098 *ptr_options = event.options;
4099
4100 linux_nat_num_queued_events--;
4101
4102 return event.pid;
4103 }
4104
4105 /* Push an event into the event pipe. */
4106
4107 static void
4108 linux_nat_event_pipe_push (int pid, int status, int options)
4109 {
4110 int ret;
4111 struct waitpid_result event = {0};
4112 event.pid = pid;
4113 event.status = status;
4114 event.options = options;
4115
4116 do
4117 {
4118 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
4119 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
4120 } while (ret == -1 && errno == EINTR);
4121
4122 linux_nat_num_queued_events++;
4123 }
4124
4125 static void
4126 get_pending_events (void)
4127 {
4128 int status, options, pid;
4129
4130 if (!target_async_permitted
4131 || linux_nat_async_events_state != sigchld_async)
4132 internal_error (__FILE__, __LINE__,
4133 "get_pending_events called with async masked");
4134
4135 while (1)
4136 {
4137 status = 0;
4138 options = __WCLONE | WNOHANG;
4139
4140 do
4141 {
4142 pid = waitpid (-1, &status, options);
4143 }
4144 while (pid == -1 && errno == EINTR);
4145
4146 if (pid <= 0)
4147 {
4148 options = WNOHANG;
4149 do
4150 {
4151 pid = waitpid (-1, &status, options);
4152 }
4153 while (pid == -1 && errno == EINTR);
4154 }
4155
4156 if (pid <= 0)
4157 /* No more children reporting events. */
4158 break;
4159
4160 if (debug_linux_nat_async)
4161 fprintf_unfiltered (gdb_stdlog, "\
4162 get_pending_events: pid(%d), status(%x), options (%x)\n",
4163 pid, status, options);
4164
4165 linux_nat_event_pipe_push (pid, status, options);
4166 }
4167
4168 if (debug_linux_nat_async)
4169 fprintf_unfiltered (gdb_stdlog, "\
4170 get_pending_events: linux_nat_num_queued_events(%d)\n",
4171 linux_nat_num_queued_events);
4172 }
4173
4174 /* SIGCHLD handler for async mode. */
4175
4176 static void
4177 async_sigchld_handler (int signo)
4178 {
4179 if (debug_linux_nat_async)
4180 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
4181
4182 get_pending_events ();
4183 }
4184
4185 /* Set SIGCHLD handling state to STATE. Returns previous state. */
4186
4187 static enum sigchld_state
4188 linux_nat_async_events (enum sigchld_state state)
4189 {
4190 enum sigchld_state current_state = linux_nat_async_events_state;
4191
4192 if (debug_linux_nat_async)
4193 fprintf_unfiltered (gdb_stdlog,
4194 "LNAE: state(%d): linux_nat_async_events_state(%d), "
4195 "linux_nat_num_queued_events(%d)\n",
4196 state, linux_nat_async_events_state,
4197 linux_nat_num_queued_events);
4198
4199 if (current_state != state)
4200 {
4201 sigset_t mask;
4202 sigemptyset (&mask);
4203 sigaddset (&mask, SIGCHLD);
4204
4205 /* Always block before changing state. */
4206 sigprocmask (SIG_BLOCK, &mask, NULL);
4207
4208 /* Set new state. */
4209 linux_nat_async_events_state = state;
4210
4211 switch (state)
4212 {
4213 case sigchld_sync:
4214 {
4215 /* Block target events. */
4216 sigprocmask (SIG_BLOCK, &mask, NULL);
4217 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4218 /* Get events out of queue, and make them available to
4219 queued_waitpid / my_waitpid. */
4220 pipe_to_local_event_queue ();
4221 }
4222 break;
4223 case sigchld_async:
4224 {
4225 /* Unblock target events for async mode. */
4226
4227 sigprocmask (SIG_BLOCK, &mask, NULL);
4228
4229 /* Put events we already waited on, in the pipe first, so
4230 events are FIFO. */
4231 local_event_queue_to_pipe ();
4232 /* While in masked async, we may have not collected all
4233 the pending events. Get them out now. */
4234 get_pending_events ();
4235
4236 /* Let'em come. */
4237 sigaction (SIGCHLD, &async_sigchld_action, NULL);
4238 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4239 }
4240 break;
4241 case sigchld_default:
4242 {
4243 /* SIGCHLD default mode. */
4244 sigaction (SIGCHLD, &sigchld_default_action, NULL);
4245
4246 /* Get events out of queue, and make them available to
4247 queued_waitpid / my_waitpid. */
4248 pipe_to_local_event_queue ();
4249
4250 /* Unblock SIGCHLD. */
4251 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4252 }
4253 break;
4254 }
4255 }
4256
4257 return current_state;
4258 }
4259
4260 static int async_terminal_is_ours = 1;
4261
4262 /* target_terminal_inferior implementation. */
4263
4264 static void
4265 linux_nat_terminal_inferior (void)
4266 {
4267 if (!target_is_async_p ())
4268 {
4269 /* Async mode is disabled. */
4270 terminal_inferior ();
4271 return;
4272 }
4273
4274 /* GDB should never give the terminal to the inferior, if the
4275 inferior is running in the background (run&, continue&, etc.).
4276 This check can be removed when the common code is fixed. */
4277 if (!sync_execution)
4278 return;
4279
4280 terminal_inferior ();
4281
4282 if (!async_terminal_is_ours)
4283 return;
4284
4285 delete_file_handler (input_fd);
4286 async_terminal_is_ours = 0;
4287 set_sigint_trap ();
4288 }
4289
4290 /* target_terminal_ours implementation. */
4291
4292 void
4293 linux_nat_terminal_ours (void)
4294 {
4295 if (!target_is_async_p ())
4296 {
4297 /* Async mode is disabled. */
4298 terminal_ours ();
4299 return;
4300 }
4301
4302 /* GDB should never give the terminal to the inferior if the
4303 inferior is running in the background (run&, continue&, etc.),
4304 but claiming it sure should. */
4305 terminal_ours ();
4306
4307 if (!sync_execution)
4308 return;
4309
4310 if (async_terminal_is_ours)
4311 return;
4312
4313 clear_sigint_trap ();
4314 add_file_handler (input_fd, stdin_event_handler, 0);
4315 async_terminal_is_ours = 1;
4316 }
4317
4318 static void (*async_client_callback) (enum inferior_event_type event_type,
4319 void *context);
4320 static void *async_client_context;
4321
4322 static void
4323 linux_nat_async_file_handler (int error, gdb_client_data client_data)
4324 {
4325 async_client_callback (INF_REG_EVENT, async_client_context);
4326 }
4327
4328 /* target_async implementation. */
4329
4330 static void
4331 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4332 void *context), void *context)
4333 {
4334 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
4335 internal_error (__FILE__, __LINE__,
4336 "Calling target_async when async is masked");
4337
4338 if (callback != NULL)
4339 {
4340 async_client_callback = callback;
4341 async_client_context = context;
4342 add_file_handler (linux_nat_event_pipe[0],
4343 linux_nat_async_file_handler, NULL);
4344
4345 linux_nat_async_events (sigchld_async);
4346 }
4347 else
4348 {
4349 async_client_callback = callback;
4350 async_client_context = context;
4351
4352 linux_nat_async_events (sigchld_sync);
4353 delete_file_handler (linux_nat_event_pipe[0]);
4354 }
4355 return;
4356 }
4357
4358 static int
4359 send_sigint_callback (struct lwp_info *lp, void *data)
4360 {
4361 /* Use is_running instead of !lp->stopped, because the lwp may be
4362 stopped due to an internal event, and we want to interrupt it in
4363 that case too. What we want is to check if the thread is stopped
4364 from the point of view of the user. */
4365 if (is_running (lp->ptid))
4366 kill_lwp (GET_LWP (lp->ptid), SIGINT);
4367 return 0;
4368 }
4369
4370 static void
4371 linux_nat_stop (ptid_t ptid)
4372 {
4373 if (non_stop)
4374 {
4375 if (ptid_equal (ptid, minus_one_ptid))
4376 iterate_over_lwps (send_sigint_callback, &ptid);
4377 else
4378 {
4379 struct lwp_info *lp = find_lwp_pid (ptid);
4380 send_sigint_callback (lp, NULL);
4381 }
4382 }
4383 else
4384 linux_ops->to_stop (ptid);
4385 }
4386
4387 void
4388 linux_nat_add_target (struct target_ops *t)
4389 {
4390 /* Save the provided single-threaded target. We save this in a separate
4391 variable because another target we've inherited from (e.g. inf-ptrace)
4392 may have saved a pointer to T; we want to use it for the final
4393 process stratum target. */
4394 linux_ops_saved = *t;
4395 linux_ops = &linux_ops_saved;
4396
4397 /* Override some methods for multithreading. */
4398 t->to_create_inferior = linux_nat_create_inferior;
4399 t->to_attach = linux_nat_attach;
4400 t->to_detach = linux_nat_detach;
4401 t->to_resume = linux_nat_resume;
4402 t->to_wait = linux_nat_wait;
4403 t->to_xfer_partial = linux_nat_xfer_partial;
4404 t->to_kill = linux_nat_kill;
4405 t->to_mourn_inferior = linux_nat_mourn_inferior;
4406 t->to_thread_alive = linux_nat_thread_alive;
4407 t->to_pid_to_str = linux_nat_pid_to_str;
4408 t->to_has_thread_control = tc_schedlock;
4409
4410 t->to_can_async_p = linux_nat_can_async_p;
4411 t->to_is_async_p = linux_nat_is_async_p;
4412 t->to_supports_non_stop = linux_nat_supports_non_stop;
4413 t->to_async = linux_nat_async;
4414 t->to_async_mask = linux_nat_async_mask;
4415 t->to_terminal_inferior = linux_nat_terminal_inferior;
4416 t->to_terminal_ours = linux_nat_terminal_ours;
4417
4418 /* Methods for non-stop support. */
4419 t->to_stop = linux_nat_stop;
4420
4421 /* We don't change the stratum; this target will sit at
4422 process_stratum and thread_db will set at thread_stratum. This
4423 is a little strange, since this is a multi-threaded-capable
4424 target, but we want to be on the stack below thread_db, and we
4425 also want to be used for single-threaded processes. */
4426
4427 add_target (t);
4428
4429 /* TODO: Eliminate this and have libthread_db use
4430 find_target_beneath. */
4431 thread_db_init (t);
4432 }
4433
4434 /* Register a method to call whenever a new thread is attached. */
4435 void
4436 linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
4437 {
4438 /* Save the pointer. We only support a single registered instance
4439 of the GNU/Linux native target, so we do not need to map this to
4440 T. */
4441 linux_nat_new_thread = new_thread;
4442 }
4443
4444 /* Return the saved siginfo associated with PTID. */
4445 struct siginfo *
4446 linux_nat_get_siginfo (ptid_t ptid)
4447 {
4448 struct lwp_info *lp = find_lwp_pid (ptid);
4449
4450 gdb_assert (lp != NULL);
4451
4452 return &lp->siginfo;
4453 }
4454
4455 /* Enable/Disable async mode. */
4456
4457 static void
4458 linux_nat_setup_async (void)
4459 {
4460 if (pipe (linux_nat_event_pipe) == -1)
4461 internal_error (__FILE__, __LINE__,
4462 "creating event pipe failed.");
4463 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4464 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4465 }
4466
4467 void
4468 _initialize_linux_nat (void)
4469 {
4470 sigset_t mask;
4471
4472 add_info ("proc", linux_nat_info_proc_cmd, _("\
4473 Show /proc process information about any running process.\n\
4474 Specify any process id, or use the program being debugged by default.\n\
4475 Specify any of the following keywords for detailed info:\n\
4476 mappings -- list of mapped memory regions.\n\
4477 stat -- list a bunch of random process info.\n\
4478 status -- list a different bunch of random process info.\n\
4479 all -- list all available /proc info."));
4480
4481 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4482 &debug_linux_nat, _("\
4483 Set debugging of GNU/Linux lwp module."), _("\
4484 Show debugging of GNU/Linux lwp module."), _("\
4485 Enables printf debugging output."),
4486 NULL,
4487 show_debug_linux_nat,
4488 &setdebuglist, &showdebuglist);
4489
4490 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4491 &debug_linux_nat_async, _("\
4492 Set debugging of GNU/Linux async lwp module."), _("\
4493 Show debugging of GNU/Linux async lwp module."), _("\
4494 Enables printf debugging output."),
4495 NULL,
4496 show_debug_linux_nat_async,
4497 &setdebuglist, &showdebuglist);
4498
4499 /* Get the default SIGCHLD action. Used while forking an inferior
4500 (see linux_nat_create_inferior/linux_nat_async_events). */
4501 sigaction (SIGCHLD, NULL, &sigchld_default_action);
4502
4503 /* Block SIGCHLD by default. Doing this early prevents it getting
4504 unblocked if an exception is thrown due to an error while the
4505 inferior is starting (sigsetjmp/siglongjmp). */
4506 sigemptyset (&mask);
4507 sigaddset (&mask, SIGCHLD);
4508 sigprocmask (SIG_BLOCK, &mask, NULL);
4509
4510 /* Save this mask as the default. */
4511 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4512
4513 /* The synchronous SIGCHLD handler. */
4514 sync_sigchld_action.sa_handler = sigchld_handler;
4515 sigemptyset (&sync_sigchld_action.sa_mask);
4516 sync_sigchld_action.sa_flags = SA_RESTART;
4517
4518 /* Make it the default. */
4519 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4520
4521 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4522 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4523 sigdelset (&suspend_mask, SIGCHLD);
4524
4525 /* SIGCHLD handler for async mode. */
4526 async_sigchld_action.sa_handler = async_sigchld_handler;
4527 sigemptyset (&async_sigchld_action.sa_mask);
4528 async_sigchld_action.sa_flags = SA_RESTART;
4529
4530 linux_nat_setup_async ();
4531
4532 add_setshow_boolean_cmd ("disable-randomization", class_support,
4533 &disable_randomization, _("\
4534 Set disabling of debuggee's virtual address space randomization."), _("\
4535 Show disabling of debuggee's virtual address space randomization."), _("\
4536 When this mode is on (which is the default), randomization of the virtual\n\
4537 address space is disabled. Standalone programs run with the randomization\n\
4538 enabled by default on some platforms."),
4539 &set_disable_randomization,
4540 &show_disable_randomization,
4541 &setlist, &showlist);
4542 }
4543 \f
4544
4545 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4546 the GNU/Linux Threads library and therefore doesn't really belong
4547 here. */
4548
4549 /* Read variable NAME in the target and return its value if found.
4550 Otherwise return zero. It is assumed that the type of the variable
4551 is `int'. */
4552
4553 static int
4554 get_signo (const char *name)
4555 {
4556 struct minimal_symbol *ms;
4557 int signo;
4558
4559 ms = lookup_minimal_symbol (name, NULL, NULL);
4560 if (ms == NULL)
4561 return 0;
4562
4563 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
4564 sizeof (signo)) != 0)
4565 return 0;
4566
4567 return signo;
4568 }
4569
4570 /* Return the set of signals used by the threads library in *SET. */
4571
4572 void
4573 lin_thread_get_thread_signals (sigset_t *set)
4574 {
4575 struct sigaction action;
4576 int restart, cancel;
4577 sigset_t blocked_mask;
4578
4579 sigemptyset (&blocked_mask);
4580 sigemptyset (set);
4581
4582 restart = get_signo ("__pthread_sig_restart");
4583 cancel = get_signo ("__pthread_sig_cancel");
4584
4585 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4586 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4587 not provide any way for the debugger to query the signal numbers -
4588 fortunately they don't change! */
4589
4590 if (restart == 0)
4591 restart = __SIGRTMIN;
4592
4593 if (cancel == 0)
4594 cancel = __SIGRTMIN + 1;
4595
4596 sigaddset (set, restart);
4597 sigaddset (set, cancel);
4598
4599 /* The GNU/Linux Threads library makes terminating threads send a
4600 special "cancel" signal instead of SIGCHLD. Make sure we catch
4601 those (to prevent them from terminating GDB itself, which is
4602 likely to be their default action) and treat them the same way as
4603 SIGCHLD. */
4604
4605 action.sa_handler = sigchld_handler;
4606 sigemptyset (&action.sa_mask);
4607 action.sa_flags = SA_RESTART;
4608 sigaction (cancel, &action, NULL);
4609
4610 /* We block the "cancel" signal throughout this code ... */
4611 sigaddset (&blocked_mask, cancel);
4612 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4613
4614 /* ... except during a sigsuspend. */
4615 sigdelset (&suspend_mask, cancel);
4616 }
This page took 0.123212 seconds and 5 git commands to generate.