* amd64-linux-nat.c (compat_int_t, compat_uptr_t, compat_time_t)
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "inferior.h"
23 #include "target.h"
24 #include "gdb_string.h"
25 #include "gdb_wait.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
35 #include "gdbcmd.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "inf-ptrace.h"
39 #include "auxv.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
49 #include "inf-loop.h"
50 #include "event-loop.h"
51 #include "event-top.h"
52 #include <pwd.h>
53 #include <sys/types.h>
54 #include "gdb_dirent.h"
55 #include "xml-support.h"
56
57 #ifdef HAVE_PERSONALITY
58 # include <sys/personality.h>
59 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
60 # define ADDR_NO_RANDOMIZE 0x0040000
61 # endif
62 #endif /* HAVE_PERSONALITY */
63
64 /* This comment documents high-level logic of this file.
65
66 Waiting for events in sync mode
67 ===============================
68
69 When waiting for an event in a specific thread, we just use waitpid, passing
70 the specific pid, and not passing WNOHANG.
71
72 When waiting for an event in all threads, waitpid is not quite good. Prior to
73 version 2.4, Linux can either wait for event in main thread, or in secondary
74 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
75 miss an event. The solution is to use non-blocking waitpid, together with
76 sigsuspend. First, we use non-blocking waitpid to get an event in the main
77 process, if any. Second, we use non-blocking waitpid with the __WCLONED
78 flag to check for events in cloned processes. If nothing is found, we use
79 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
80 happened to a child process -- and SIGCHLD will be delivered both for events
81 in main debugged process and in cloned processes. As soon as we know there's
82 an event, we get back to calling nonblocking waitpid with and without __WCLONED.
83
84 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
85 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
86 blocked, the signal becomes pending and sigsuspend immediately
87 notices it and returns.
88
89 Waiting for events in async mode
90 ================================
91
92 In async mode, GDB should always be ready to handle both user input and target
93 events, so neither blocking waitpid nor sigsuspend are viable
94 options. Instead, we should notify the GDB main event loop whenever there's
95 unprocessed event from the target. The only way to notify this event loop is
96 to make it wait on input from a pipe, and write something to the pipe whenever
97 there's event. Obviously, if we fail to notify the event loop if there's
98 target event, it's bad. If we notify the event loop when there's no event
99 from target, linux-nat.c will detect that there's no event, actually, and
100 report event of type TARGET_WAITKIND_IGNORE, but it will waste time and
101 better avoided.
102
103 The main design point is that every time GDB is outside linux-nat.c, we have a
104 SIGCHLD handler installed that is called when something happens to the target
105 and notifies the GDB event loop. Also, the event is extracted from the target
106 using waitpid and stored for future use. Whenever GDB core decides to handle
107 the event, and calls into linux-nat.c, we disable SIGCHLD and process things
108 as in sync mode, except that before waitpid call we check if there are any
109 previously read events.
110
111 It could happen that during event processing, we'll try to get more events
112 than there are events in the local queue, which will result to waitpid call.
113 Those waitpid calls, while blocking, are guarantied to always have
114 something for waitpid to return. E.g., stopping a thread with SIGSTOP, and
115 waiting for the lwp to stop.
116
117 The event loop is notified about new events using a pipe. SIGCHLD handler does
118 waitpid and writes the results in to a pipe. GDB event loop has the other end
119 of the pipe among the sources. When event loop starts to process the event
120 and calls a function in linux-nat.c, all events from the pipe are transferred
121 into a local queue and SIGCHLD is blocked. Further processing goes as in sync
122 mode. Before we return from linux_nat_wait, we transfer all unprocessed events
123 from local queue back to the pipe, so that when we get back to event loop,
124 event loop will notice there's something more to do.
125
126 SIGCHLD is blocked when we're inside target_wait, so that should we actually
127 want to wait for some more events, SIGCHLD handler does not steal them from
128 us. Technically, it would be possible to add new events to the local queue but
129 it's about the same amount of work as blocking SIGCHLD.
130
131 This moving of events from pipe into local queue and back into pipe when we
132 enter/leave linux-nat.c is somewhat ugly. Unfortunately, GDB event loop is
133 home-grown and incapable to wait on any queue.
134
135 Use of signals
136 ==============
137
138 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
139 signal is not entirely significant; we just need for a signal to be delivered,
140 so that we can intercept it. SIGSTOP's advantage is that it can not be
141 blocked. A disadvantage is that it is not a real-time signal, so it can only
142 be queued once; we do not keep track of other sources of SIGSTOP.
143
144 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
145 use them, because they have special behavior when the signal is generated -
146 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
147 kills the entire thread group.
148
149 A delivered SIGSTOP would stop the entire thread group, not just the thread we
150 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
151 cancel it (by PTRACE_CONT without passing SIGSTOP).
152
153 We could use a real-time signal instead. This would solve those problems; we
154 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
155 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
156 generates it, and there are races with trying to find a signal that is not
157 blocked. */
158
159 #ifndef O_LARGEFILE
160 #define O_LARGEFILE 0
161 #endif
162
163 /* If the system headers did not provide the constants, hard-code the normal
164 values. */
165 #ifndef PTRACE_EVENT_FORK
166
167 #define PTRACE_SETOPTIONS 0x4200
168 #define PTRACE_GETEVENTMSG 0x4201
169
170 /* options set using PTRACE_SETOPTIONS */
171 #define PTRACE_O_TRACESYSGOOD 0x00000001
172 #define PTRACE_O_TRACEFORK 0x00000002
173 #define PTRACE_O_TRACEVFORK 0x00000004
174 #define PTRACE_O_TRACECLONE 0x00000008
175 #define PTRACE_O_TRACEEXEC 0x00000010
176 #define PTRACE_O_TRACEVFORKDONE 0x00000020
177 #define PTRACE_O_TRACEEXIT 0x00000040
178
179 /* Wait extended result codes for the above trace options. */
180 #define PTRACE_EVENT_FORK 1
181 #define PTRACE_EVENT_VFORK 2
182 #define PTRACE_EVENT_CLONE 3
183 #define PTRACE_EVENT_EXEC 4
184 #define PTRACE_EVENT_VFORK_DONE 5
185 #define PTRACE_EVENT_EXIT 6
186
187 #endif /* PTRACE_EVENT_FORK */
188
189 /* We can't always assume that this flag is available, but all systems
190 with the ptrace event handlers also have __WALL, so it's safe to use
191 here. */
192 #ifndef __WALL
193 #define __WALL 0x40000000 /* Wait for any child. */
194 #endif
195
196 #ifndef PTRACE_GETSIGINFO
197 #define PTRACE_GETSIGINFO 0x4202
198 #endif
199
200 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
201 the use of the multi-threaded target. */
202 static struct target_ops *linux_ops;
203 static struct target_ops linux_ops_saved;
204
205 /* The method to call, if any, when a new thread is attached. */
206 static void (*linux_nat_new_thread) (ptid_t);
207
208 /* The method to call, if any, when the siginfo object needs to be
209 converted between the layout returned by ptrace, and the layout in
210 the architecture of the inferior. */
211 static int (*linux_nat_siginfo_fixup) (struct siginfo *,
212 gdb_byte *,
213 int);
214
215 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
216 Called by our to_xfer_partial. */
217 static LONGEST (*super_xfer_partial) (struct target_ops *,
218 enum target_object,
219 const char *, gdb_byte *,
220 const gdb_byte *,
221 ULONGEST, LONGEST);
222
223 static int debug_linux_nat;
224 static void
225 show_debug_linux_nat (struct ui_file *file, int from_tty,
226 struct cmd_list_element *c, const char *value)
227 {
228 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
229 value);
230 }
231
232 static int debug_linux_nat_async = 0;
233 static void
234 show_debug_linux_nat_async (struct ui_file *file, int from_tty,
235 struct cmd_list_element *c, const char *value)
236 {
237 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
238 value);
239 }
240
241 static int disable_randomization = 1;
242
243 static void
244 show_disable_randomization (struct ui_file *file, int from_tty,
245 struct cmd_list_element *c, const char *value)
246 {
247 #ifdef HAVE_PERSONALITY
248 fprintf_filtered (file, _("\
249 Disabling randomization of debuggee's virtual address space is %s.\n"),
250 value);
251 #else /* !HAVE_PERSONALITY */
252 fputs_filtered (_("\
253 Disabling randomization of debuggee's virtual address space is unsupported on\n\
254 this platform.\n"), file);
255 #endif /* !HAVE_PERSONALITY */
256 }
257
258 static void
259 set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c)
260 {
261 #ifndef HAVE_PERSONALITY
262 error (_("\
263 Disabling randomization of debuggee's virtual address space is unsupported on\n\
264 this platform."));
265 #endif /* !HAVE_PERSONALITY */
266 }
267
268 static int linux_parent_pid;
269
270 struct simple_pid_list
271 {
272 int pid;
273 int status;
274 struct simple_pid_list *next;
275 };
276 struct simple_pid_list *stopped_pids;
277
278 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
279 can not be used, 1 if it can. */
280
281 static int linux_supports_tracefork_flag = -1;
282
283 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
284 PTRACE_O_TRACEVFORKDONE. */
285
286 static int linux_supports_tracevforkdone_flag = -1;
287
288 /* Async mode support */
289
290 /* Zero if the async mode, although enabled, is masked, which means
291 linux_nat_wait should behave as if async mode was off. */
292 static int linux_nat_async_mask_value = 1;
293
294 /* The read/write ends of the pipe registered as waitable file in the
295 event loop. */
296 static int linux_nat_event_pipe[2] = { -1, -1 };
297
298 /* Number of queued events in the pipe. */
299 static volatile int linux_nat_num_queued_events;
300
301 /* The possible SIGCHLD handling states. */
302
303 enum sigchld_state
304 {
305 /* SIGCHLD disabled, with action set to sigchld_handler, for the
306 sigsuspend in linux_nat_wait. */
307 sigchld_sync,
308 /* SIGCHLD enabled, with action set to async_sigchld_handler. */
309 sigchld_async,
310 /* Set SIGCHLD to default action. Used while creating an
311 inferior. */
312 sigchld_default
313 };
314
315 /* The current SIGCHLD handling state. */
316 static enum sigchld_state linux_nat_async_events_state;
317
318 static enum sigchld_state linux_nat_async_events (enum sigchld_state enable);
319 static void pipe_to_local_event_queue (void);
320 static void local_event_queue_to_pipe (void);
321 static void linux_nat_event_pipe_push (int pid, int status, int options);
322 static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
323 static void linux_nat_set_async_mode (int on);
324 static void linux_nat_async (void (*callback)
325 (enum inferior_event_type event_type, void *context),
326 void *context);
327 static int linux_nat_async_mask (int mask);
328 static int kill_lwp (int lwpid, int signo);
329
330 static int stop_callback (struct lwp_info *lp, void *data);
331
332 /* Captures the result of a successful waitpid call, along with the
333 options used in that call. */
334 struct waitpid_result
335 {
336 int pid;
337 int status;
338 int options;
339 struct waitpid_result *next;
340 };
341
342 /* A singly-linked list of the results of the waitpid calls performed
343 in the async SIGCHLD handler. */
344 static struct waitpid_result *waitpid_queue = NULL;
345
346 /* Similarly to `waitpid', but check the local event queue instead of
347 querying the kernel queue. If PEEK, don't remove the event found
348 from the queue. */
349
350 static int
351 queued_waitpid_1 (int pid, int *status, int flags, int peek)
352 {
353 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
354
355 if (debug_linux_nat_async)
356 fprintf_unfiltered (gdb_stdlog,
357 "\
358 QWPID: linux_nat_async_events_state(%d), linux_nat_num_queued_events(%d)\n",
359 linux_nat_async_events_state,
360 linux_nat_num_queued_events);
361
362 if (flags & __WALL)
363 {
364 for (; msg; prev = msg, msg = msg->next)
365 if (pid == -1 || pid == msg->pid)
366 break;
367 }
368 else if (flags & __WCLONE)
369 {
370 for (; msg; prev = msg, msg = msg->next)
371 if (msg->options & __WCLONE
372 && (pid == -1 || pid == msg->pid))
373 break;
374 }
375 else
376 {
377 for (; msg; prev = msg, msg = msg->next)
378 if ((msg->options & __WCLONE) == 0
379 && (pid == -1 || pid == msg->pid))
380 break;
381 }
382
383 if (msg)
384 {
385 int pid;
386
387 if (status)
388 *status = msg->status;
389 pid = msg->pid;
390
391 if (debug_linux_nat_async)
392 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
393 pid, msg->status);
394
395 if (!peek)
396 {
397 if (prev)
398 prev->next = msg->next;
399 else
400 waitpid_queue = msg->next;
401
402 msg->next = NULL;
403 xfree (msg);
404 }
405
406 return pid;
407 }
408
409 if (debug_linux_nat_async)
410 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
411
412 if (status)
413 *status = 0;
414 return -1;
415 }
416
417 /* Similarly to `waitpid', but check the local event queue. */
418
419 static int
420 queued_waitpid (int pid, int *status, int flags)
421 {
422 return queued_waitpid_1 (pid, status, flags, 0);
423 }
424
425 static void
426 push_waitpid (int pid, int status, int options)
427 {
428 struct waitpid_result *event, *new_event;
429
430 new_event = xmalloc (sizeof (*new_event));
431 new_event->pid = pid;
432 new_event->status = status;
433 new_event->options = options;
434 new_event->next = NULL;
435
436 if (waitpid_queue)
437 {
438 for (event = waitpid_queue;
439 event && event->next;
440 event = event->next)
441 ;
442
443 event->next = new_event;
444 }
445 else
446 waitpid_queue = new_event;
447 }
448
449 /* Drain all queued events of PID. If PID is -1, the effect is of
450 draining all events. */
451 static void
452 drain_queued_events (int pid)
453 {
454 while (queued_waitpid (pid, NULL, __WALL) != -1)
455 ;
456 }
457
458 \f
459 /* Trivial list manipulation functions to keep track of a list of
460 new stopped processes. */
461 static void
462 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
463 {
464 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
465 new_pid->pid = pid;
466 new_pid->status = status;
467 new_pid->next = *listp;
468 *listp = new_pid;
469 }
470
471 static int
472 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
473 {
474 struct simple_pid_list **p;
475
476 for (p = listp; *p != NULL; p = &(*p)->next)
477 if ((*p)->pid == pid)
478 {
479 struct simple_pid_list *next = (*p)->next;
480 *status = (*p)->status;
481 xfree (*p);
482 *p = next;
483 return 1;
484 }
485 return 0;
486 }
487
488 static void
489 linux_record_stopped_pid (int pid, int status)
490 {
491 add_to_pid_list (&stopped_pids, pid, status);
492 }
493
494 \f
495 /* A helper function for linux_test_for_tracefork, called after fork (). */
496
497 static void
498 linux_tracefork_child (void)
499 {
500 int ret;
501
502 ptrace (PTRACE_TRACEME, 0, 0, 0);
503 kill (getpid (), SIGSTOP);
504 fork ();
505 _exit (0);
506 }
507
508 /* Wrapper function for waitpid which handles EINTR, and checks for
509 locally queued events. */
510
511 static int
512 my_waitpid (int pid, int *status, int flags)
513 {
514 int ret;
515
516 /* There should be no concurrent calls to waitpid. */
517 gdb_assert (linux_nat_async_events_state == sigchld_sync);
518
519 ret = queued_waitpid (pid, status, flags);
520 if (ret != -1)
521 return ret;
522
523 do
524 {
525 ret = waitpid (pid, status, flags);
526 }
527 while (ret == -1 && errno == EINTR);
528
529 return ret;
530 }
531
532 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
533
534 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
535 we know that the feature is not available. This may change the tracing
536 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
537
538 However, if it succeeds, we don't know for sure that the feature is
539 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
540 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
541 fork tracing, and let it fork. If the process exits, we assume that we
542 can't use TRACEFORK; if we get the fork notification, and we can extract
543 the new child's PID, then we assume that we can. */
544
545 static void
546 linux_test_for_tracefork (int original_pid)
547 {
548 int child_pid, ret, status;
549 long second_pid;
550 enum sigchld_state async_events_original_state;
551
552 async_events_original_state = linux_nat_async_events (sigchld_sync);
553
554 linux_supports_tracefork_flag = 0;
555 linux_supports_tracevforkdone_flag = 0;
556
557 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
558 if (ret != 0)
559 return;
560
561 child_pid = fork ();
562 if (child_pid == -1)
563 perror_with_name (("fork"));
564
565 if (child_pid == 0)
566 linux_tracefork_child ();
567
568 ret = my_waitpid (child_pid, &status, 0);
569 if (ret == -1)
570 perror_with_name (("waitpid"));
571 else if (ret != child_pid)
572 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
573 if (! WIFSTOPPED (status))
574 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
575
576 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
577 if (ret != 0)
578 {
579 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
580 if (ret != 0)
581 {
582 warning (_("linux_test_for_tracefork: failed to kill child"));
583 linux_nat_async_events (async_events_original_state);
584 return;
585 }
586
587 ret = my_waitpid (child_pid, &status, 0);
588 if (ret != child_pid)
589 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
590 else if (!WIFSIGNALED (status))
591 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
592 "killed child"), status);
593
594 linux_nat_async_events (async_events_original_state);
595 return;
596 }
597
598 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
599 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
600 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
601 linux_supports_tracevforkdone_flag = (ret == 0);
602
603 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
604 if (ret != 0)
605 warning (_("linux_test_for_tracefork: failed to resume child"));
606
607 ret = my_waitpid (child_pid, &status, 0);
608
609 if (ret == child_pid && WIFSTOPPED (status)
610 && status >> 16 == PTRACE_EVENT_FORK)
611 {
612 second_pid = 0;
613 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
614 if (ret == 0 && second_pid != 0)
615 {
616 int second_status;
617
618 linux_supports_tracefork_flag = 1;
619 my_waitpid (second_pid, &second_status, 0);
620 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
621 if (ret != 0)
622 warning (_("linux_test_for_tracefork: failed to kill second child"));
623 my_waitpid (second_pid, &status, 0);
624 }
625 }
626 else
627 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
628 "(%d, status 0x%x)"), ret, status);
629
630 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
631 if (ret != 0)
632 warning (_("linux_test_for_tracefork: failed to kill child"));
633 my_waitpid (child_pid, &status, 0);
634
635 linux_nat_async_events (async_events_original_state);
636 }
637
638 /* Return non-zero iff we have tracefork functionality available.
639 This function also sets linux_supports_tracefork_flag. */
640
641 static int
642 linux_supports_tracefork (int pid)
643 {
644 if (linux_supports_tracefork_flag == -1)
645 linux_test_for_tracefork (pid);
646 return linux_supports_tracefork_flag;
647 }
648
649 static int
650 linux_supports_tracevforkdone (int pid)
651 {
652 if (linux_supports_tracefork_flag == -1)
653 linux_test_for_tracefork (pid);
654 return linux_supports_tracevforkdone_flag;
655 }
656
657 \f
658 void
659 linux_enable_event_reporting (ptid_t ptid)
660 {
661 int pid = ptid_get_lwp (ptid);
662 int options;
663
664 if (pid == 0)
665 pid = ptid_get_pid (ptid);
666
667 if (! linux_supports_tracefork (pid))
668 return;
669
670 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
671 | PTRACE_O_TRACECLONE;
672 if (linux_supports_tracevforkdone (pid))
673 options |= PTRACE_O_TRACEVFORKDONE;
674
675 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
676 read-only process state. */
677
678 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
679 }
680
681 static void
682 linux_child_post_attach (int pid)
683 {
684 linux_enable_event_reporting (pid_to_ptid (pid));
685 check_for_thread_db ();
686 }
687
688 static void
689 linux_child_post_startup_inferior (ptid_t ptid)
690 {
691 linux_enable_event_reporting (ptid);
692 check_for_thread_db ();
693 }
694
695 static int
696 linux_child_follow_fork (struct target_ops *ops, int follow_child)
697 {
698 ptid_t last_ptid;
699 struct target_waitstatus last_status;
700 int has_vforked;
701 int parent_pid, child_pid;
702
703 if (target_can_async_p ())
704 target_async (NULL, 0);
705
706 get_last_target_status (&last_ptid, &last_status);
707 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
708 parent_pid = ptid_get_lwp (last_ptid);
709 if (parent_pid == 0)
710 parent_pid = ptid_get_pid (last_ptid);
711 child_pid = PIDGET (last_status.value.related_pid);
712
713 if (! follow_child)
714 {
715 /* We're already attached to the parent, by default. */
716
717 /* Before detaching from the child, remove all breakpoints from
718 it. (This won't actually modify the breakpoint list, but will
719 physically remove the breakpoints from the child.) */
720 /* If we vforked this will remove the breakpoints from the parent
721 also, but they'll be reinserted below. */
722 detach_breakpoints (child_pid);
723
724 /* Detach new forked process? */
725 if (detach_fork)
726 {
727 if (info_verbose || debug_linux_nat)
728 {
729 target_terminal_ours ();
730 fprintf_filtered (gdb_stdlog,
731 "Detaching after fork from child process %d.\n",
732 child_pid);
733 }
734
735 ptrace (PTRACE_DETACH, child_pid, 0, 0);
736 }
737 else
738 {
739 struct fork_info *fp;
740 struct inferior *parent_inf, *child_inf;
741
742 /* Add process to GDB's tables. */
743 child_inf = add_inferior (child_pid);
744
745 parent_inf = find_inferior_pid (GET_PID (last_ptid));
746 child_inf->attach_flag = parent_inf->attach_flag;
747
748 /* Retain child fork in ptrace (stopped) state. */
749 fp = find_fork_pid (child_pid);
750 if (!fp)
751 fp = add_fork (child_pid);
752 fork_save_infrun_state (fp, 0);
753 }
754
755 if (has_vforked)
756 {
757 gdb_assert (linux_supports_tracefork_flag >= 0);
758 if (linux_supports_tracevforkdone (0))
759 {
760 int status;
761
762 ptrace (PTRACE_CONT, parent_pid, 0, 0);
763 my_waitpid (parent_pid, &status, __WALL);
764 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
765 warning (_("Unexpected waitpid result %06x when waiting for "
766 "vfork-done"), status);
767 }
768 else
769 {
770 /* We can't insert breakpoints until the child has
771 finished with the shared memory region. We need to
772 wait until that happens. Ideal would be to just
773 call:
774 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
775 - waitpid (parent_pid, &status, __WALL);
776 However, most architectures can't handle a syscall
777 being traced on the way out if it wasn't traced on
778 the way in.
779
780 We might also think to loop, continuing the child
781 until it exits or gets a SIGTRAP. One problem is
782 that the child might call ptrace with PTRACE_TRACEME.
783
784 There's no simple and reliable way to figure out when
785 the vforked child will be done with its copy of the
786 shared memory. We could step it out of the syscall,
787 two instructions, let it go, and then single-step the
788 parent once. When we have hardware single-step, this
789 would work; with software single-step it could still
790 be made to work but we'd have to be able to insert
791 single-step breakpoints in the child, and we'd have
792 to insert -just- the single-step breakpoint in the
793 parent. Very awkward.
794
795 In the end, the best we can do is to make sure it
796 runs for a little while. Hopefully it will be out of
797 range of any breakpoints we reinsert. Usually this
798 is only the single-step breakpoint at vfork's return
799 point. */
800
801 usleep (10000);
802 }
803
804 /* Since we vforked, breakpoints were removed in the parent
805 too. Put them back. */
806 reattach_breakpoints (parent_pid);
807 }
808 }
809 else
810 {
811 struct thread_info *last_tp = find_thread_pid (last_ptid);
812 struct thread_info *tp;
813 char child_pid_spelling[40];
814 struct inferior *parent_inf, *child_inf;
815
816 /* Copy user stepping state to the new inferior thread. */
817 struct breakpoint *step_resume_breakpoint = last_tp->step_resume_breakpoint;
818 CORE_ADDR step_range_start = last_tp->step_range_start;
819 CORE_ADDR step_range_end = last_tp->step_range_end;
820 struct frame_id step_frame_id = last_tp->step_frame_id;
821
822 /* Otherwise, deleting the parent would get rid of this
823 breakpoint. */
824 last_tp->step_resume_breakpoint = NULL;
825
826 /* Needed to keep the breakpoint lists in sync. */
827 if (! has_vforked)
828 detach_breakpoints (child_pid);
829
830 /* Before detaching from the parent, remove all breakpoints from it. */
831 remove_breakpoints ();
832
833 if (info_verbose || debug_linux_nat)
834 {
835 target_terminal_ours ();
836 fprintf_filtered (gdb_stdlog,
837 "Attaching after fork to child process %d.\n",
838 child_pid);
839 }
840
841 /* Add the new inferior first, so that the target_detach below
842 doesn't unpush the target. */
843
844 child_inf = add_inferior (child_pid);
845
846 parent_inf = find_inferior_pid (GET_PID (last_ptid));
847 child_inf->attach_flag = parent_inf->attach_flag;
848
849 /* If we're vforking, we may want to hold on to the parent until
850 the child exits or execs. At exec time we can remove the old
851 breakpoints from the parent and detach it; at exit time we
852 could do the same (or even, sneakily, resume debugging it - the
853 child's exec has failed, or something similar).
854
855 This doesn't clean up "properly", because we can't call
856 target_detach, but that's OK; if the current target is "child",
857 then it doesn't need any further cleanups, and lin_lwp will
858 generally not encounter vfork (vfork is defined to fork
859 in libpthread.so).
860
861 The holding part is very easy if we have VFORKDONE events;
862 but keeping track of both processes is beyond GDB at the
863 moment. So we don't expose the parent to the rest of GDB.
864 Instead we quietly hold onto it until such time as we can
865 safely resume it. */
866
867 if (has_vforked)
868 {
869 linux_parent_pid = parent_pid;
870 detach_inferior (parent_pid);
871 }
872 else if (!detach_fork)
873 {
874 struct fork_info *fp;
875 /* Retain parent fork in ptrace (stopped) state. */
876 fp = find_fork_pid (parent_pid);
877 if (!fp)
878 fp = add_fork (parent_pid);
879 fork_save_infrun_state (fp, 0);
880
881 /* Also add an entry for the child fork. */
882 fp = find_fork_pid (child_pid);
883 if (!fp)
884 fp = add_fork (child_pid);
885 fork_save_infrun_state (fp, 0);
886 }
887 else
888 target_detach (NULL, 0);
889
890 inferior_ptid = ptid_build (child_pid, child_pid, 0);
891
892 linux_nat_switch_fork (inferior_ptid);
893 check_for_thread_db ();
894
895 tp = inferior_thread ();
896 tp->step_resume_breakpoint = step_resume_breakpoint;
897 tp->step_range_start = step_range_start;
898 tp->step_range_end = step_range_end;
899 tp->step_frame_id = step_frame_id;
900
901 /* Reset breakpoints in the child as appropriate. */
902 follow_inferior_reset_breakpoints ();
903 }
904
905 if (target_can_async_p ())
906 target_async (inferior_event_handler, 0);
907
908 return 0;
909 }
910
911 \f
912 static void
913 linux_child_insert_fork_catchpoint (int pid)
914 {
915 if (! linux_supports_tracefork (pid))
916 error (_("Your system does not support fork catchpoints."));
917 }
918
919 static void
920 linux_child_insert_vfork_catchpoint (int pid)
921 {
922 if (!linux_supports_tracefork (pid))
923 error (_("Your system does not support vfork catchpoints."));
924 }
925
926 static void
927 linux_child_insert_exec_catchpoint (int pid)
928 {
929 if (!linux_supports_tracefork (pid))
930 error (_("Your system does not support exec catchpoints."));
931 }
932
933 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
934 are processes sharing the same VM space. A multi-threaded process
935 is basically a group of such processes. However, such a grouping
936 is almost entirely a user-space issue; the kernel doesn't enforce
937 such a grouping at all (this might change in the future). In
938 general, we'll rely on the threads library (i.e. the GNU/Linux
939 Threads library) to provide such a grouping.
940
941 It is perfectly well possible to write a multi-threaded application
942 without the assistance of a threads library, by using the clone
943 system call directly. This module should be able to give some
944 rudimentary support for debugging such applications if developers
945 specify the CLONE_PTRACE flag in the clone system call, and are
946 using the Linux kernel 2.4 or above.
947
948 Note that there are some peculiarities in GNU/Linux that affect
949 this code:
950
951 - In general one should specify the __WCLONE flag to waitpid in
952 order to make it report events for any of the cloned processes
953 (and leave it out for the initial process). However, if a cloned
954 process has exited the exit status is only reported if the
955 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
956 we cannot use it since GDB must work on older systems too.
957
958 - When a traced, cloned process exits and is waited for by the
959 debugger, the kernel reassigns it to the original parent and
960 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
961 library doesn't notice this, which leads to the "zombie problem":
962 When debugged a multi-threaded process that spawns a lot of
963 threads will run out of processes, even if the threads exit,
964 because the "zombies" stay around. */
965
966 /* List of known LWPs. */
967 struct lwp_info *lwp_list;
968
969 /* Number of LWPs in the list. */
970 static int num_lwps;
971 \f
972
973 /* Original signal mask. */
974 static sigset_t normal_mask;
975
976 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
977 _initialize_linux_nat. */
978 static sigset_t suspend_mask;
979
980 /* SIGCHLD action for synchronous mode. */
981 struct sigaction sync_sigchld_action;
982
983 /* SIGCHLD action for asynchronous mode. */
984 static struct sigaction async_sigchld_action;
985
986 /* SIGCHLD default action, to pass to new inferiors. */
987 static struct sigaction sigchld_default_action;
988 \f
989
990 /* Prototypes for local functions. */
991 static int stop_wait_callback (struct lwp_info *lp, void *data);
992 static int linux_nat_thread_alive (ptid_t ptid);
993 static char *linux_child_pid_to_exec_file (int pid);
994 static int cancel_breakpoint (struct lwp_info *lp);
995
996 \f
997 /* Convert wait status STATUS to a string. Used for printing debug
998 messages only. */
999
1000 static char *
1001 status_to_str (int status)
1002 {
1003 static char buf[64];
1004
1005 if (WIFSTOPPED (status))
1006 snprintf (buf, sizeof (buf), "%s (stopped)",
1007 strsignal (WSTOPSIG (status)));
1008 else if (WIFSIGNALED (status))
1009 snprintf (buf, sizeof (buf), "%s (terminated)",
1010 strsignal (WSTOPSIG (status)));
1011 else
1012 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1013
1014 return buf;
1015 }
1016
1017 /* Initialize the list of LWPs. Note that this module, contrary to
1018 what GDB's generic threads layer does for its thread list,
1019 re-initializes the LWP lists whenever we mourn or detach (which
1020 doesn't involve mourning) the inferior. */
1021
1022 static void
1023 init_lwp_list (void)
1024 {
1025 struct lwp_info *lp, *lpnext;
1026
1027 for (lp = lwp_list; lp; lp = lpnext)
1028 {
1029 lpnext = lp->next;
1030 xfree (lp);
1031 }
1032
1033 lwp_list = NULL;
1034 num_lwps = 0;
1035 }
1036
1037 /* Add the LWP specified by PID to the list. Return a pointer to the
1038 structure describing the new LWP. The LWP should already be stopped
1039 (with an exception for the very first LWP). */
1040
1041 static struct lwp_info *
1042 add_lwp (ptid_t ptid)
1043 {
1044 struct lwp_info *lp;
1045
1046 gdb_assert (is_lwp (ptid));
1047
1048 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1049
1050 memset (lp, 0, sizeof (struct lwp_info));
1051
1052 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1053
1054 lp->ptid = ptid;
1055
1056 lp->next = lwp_list;
1057 lwp_list = lp;
1058 ++num_lwps;
1059
1060 if (num_lwps > 1 && linux_nat_new_thread != NULL)
1061 linux_nat_new_thread (ptid);
1062
1063 return lp;
1064 }
1065
1066 /* Remove the LWP specified by PID from the list. */
1067
1068 static void
1069 delete_lwp (ptid_t ptid)
1070 {
1071 struct lwp_info *lp, *lpprev;
1072
1073 lpprev = NULL;
1074
1075 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1076 if (ptid_equal (lp->ptid, ptid))
1077 break;
1078
1079 if (!lp)
1080 return;
1081
1082 num_lwps--;
1083
1084 if (lpprev)
1085 lpprev->next = lp->next;
1086 else
1087 lwp_list = lp->next;
1088
1089 xfree (lp);
1090 }
1091
1092 /* Return a pointer to the structure describing the LWP corresponding
1093 to PID. If no corresponding LWP could be found, return NULL. */
1094
1095 static struct lwp_info *
1096 find_lwp_pid (ptid_t ptid)
1097 {
1098 struct lwp_info *lp;
1099 int lwp;
1100
1101 if (is_lwp (ptid))
1102 lwp = GET_LWP (ptid);
1103 else
1104 lwp = GET_PID (ptid);
1105
1106 for (lp = lwp_list; lp; lp = lp->next)
1107 if (lwp == GET_LWP (lp->ptid))
1108 return lp;
1109
1110 return NULL;
1111 }
1112
1113 /* Call CALLBACK with its second argument set to DATA for every LWP in
1114 the list. If CALLBACK returns 1 for a particular LWP, return a
1115 pointer to the structure describing that LWP immediately.
1116 Otherwise return NULL. */
1117
1118 struct lwp_info *
1119 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
1120 {
1121 struct lwp_info *lp, *lpnext;
1122
1123 for (lp = lwp_list; lp; lp = lpnext)
1124 {
1125 lpnext = lp->next;
1126 if ((*callback) (lp, data))
1127 return lp;
1128 }
1129
1130 return NULL;
1131 }
1132
1133 /* Update our internal state when changing from one fork (checkpoint,
1134 et cetera) to another indicated by NEW_PTID. We can only switch
1135 single-threaded applications, so we only create one new LWP, and
1136 the previous list is discarded. */
1137
1138 void
1139 linux_nat_switch_fork (ptid_t new_ptid)
1140 {
1141 struct lwp_info *lp;
1142
1143 init_lwp_list ();
1144 lp = add_lwp (new_ptid);
1145 lp->stopped = 1;
1146
1147 init_thread_list ();
1148 add_thread_silent (new_ptid);
1149 }
1150
1151 /* Handle the exit of a single thread LP. */
1152
1153 static void
1154 exit_lwp (struct lwp_info *lp)
1155 {
1156 struct thread_info *th = find_thread_pid (lp->ptid);
1157
1158 if (th)
1159 {
1160 if (print_thread_events)
1161 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1162
1163 delete_thread (lp->ptid);
1164 }
1165
1166 delete_lwp (lp->ptid);
1167 }
1168
1169 /* Detect `T (stopped)' in `/proc/PID/status'.
1170 Other states including `T (tracing stop)' are reported as false. */
1171
1172 static int
1173 pid_is_stopped (pid_t pid)
1174 {
1175 FILE *status_file;
1176 char buf[100];
1177 int retval = 0;
1178
1179 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1180 status_file = fopen (buf, "r");
1181 if (status_file != NULL)
1182 {
1183 int have_state = 0;
1184
1185 while (fgets (buf, sizeof (buf), status_file))
1186 {
1187 if (strncmp (buf, "State:", 6) == 0)
1188 {
1189 have_state = 1;
1190 break;
1191 }
1192 }
1193 if (have_state && strstr (buf, "T (stopped)") != NULL)
1194 retval = 1;
1195 fclose (status_file);
1196 }
1197 return retval;
1198 }
1199
1200 /* Wait for the LWP specified by LP, which we have just attached to.
1201 Returns a wait status for that LWP, to cache. */
1202
1203 static int
1204 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1205 int *signalled)
1206 {
1207 pid_t new_pid, pid = GET_LWP (ptid);
1208 int status;
1209
1210 if (pid_is_stopped (pid))
1211 {
1212 if (debug_linux_nat)
1213 fprintf_unfiltered (gdb_stdlog,
1214 "LNPAW: Attaching to a stopped process\n");
1215
1216 /* The process is definitely stopped. It is in a job control
1217 stop, unless the kernel predates the TASK_STOPPED /
1218 TASK_TRACED distinction, in which case it might be in a
1219 ptrace stop. Make sure it is in a ptrace stop; from there we
1220 can kill it, signal it, et cetera.
1221
1222 First make sure there is a pending SIGSTOP. Since we are
1223 already attached, the process can not transition from stopped
1224 to running without a PTRACE_CONT; so we know this signal will
1225 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1226 probably already in the queue (unless this kernel is old
1227 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1228 is not an RT signal, it can only be queued once. */
1229 kill_lwp (pid, SIGSTOP);
1230
1231 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1232 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1233 ptrace (PTRACE_CONT, pid, 0, 0);
1234 }
1235
1236 /* Make sure the initial process is stopped. The user-level threads
1237 layer might want to poke around in the inferior, and that won't
1238 work if things haven't stabilized yet. */
1239 new_pid = my_waitpid (pid, &status, 0);
1240 if (new_pid == -1 && errno == ECHILD)
1241 {
1242 if (first)
1243 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1244
1245 /* Try again with __WCLONE to check cloned processes. */
1246 new_pid = my_waitpid (pid, &status, __WCLONE);
1247 *cloned = 1;
1248 }
1249
1250 gdb_assert (pid == new_pid && WIFSTOPPED (status));
1251
1252 if (WSTOPSIG (status) != SIGSTOP)
1253 {
1254 *signalled = 1;
1255 if (debug_linux_nat)
1256 fprintf_unfiltered (gdb_stdlog,
1257 "LNPAW: Received %s after attaching\n",
1258 status_to_str (status));
1259 }
1260
1261 return status;
1262 }
1263
1264 /* Attach to the LWP specified by PID. Return 0 if successful or -1
1265 if the new LWP could not be attached. */
1266
1267 int
1268 lin_lwp_attach_lwp (ptid_t ptid)
1269 {
1270 struct lwp_info *lp;
1271 enum sigchld_state async_events_original_state;
1272
1273 gdb_assert (is_lwp (ptid));
1274
1275 async_events_original_state = linux_nat_async_events (sigchld_sync);
1276
1277 lp = find_lwp_pid (ptid);
1278
1279 /* We assume that we're already attached to any LWP that has an id
1280 equal to the overall process id, and to any LWP that is already
1281 in our list of LWPs. If we're not seeing exit events from threads
1282 and we've had PID wraparound since we last tried to stop all threads,
1283 this assumption might be wrong; fortunately, this is very unlikely
1284 to happen. */
1285 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
1286 {
1287 int status, cloned = 0, signalled = 0;
1288
1289 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
1290 {
1291 /* If we fail to attach to the thread, issue a warning,
1292 but continue. One way this can happen is if thread
1293 creation is interrupted; as of Linux kernel 2.6.19, a
1294 bug may place threads in the thread list and then fail
1295 to create them. */
1296 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1297 safe_strerror (errno));
1298 return -1;
1299 }
1300
1301 if (debug_linux_nat)
1302 fprintf_unfiltered (gdb_stdlog,
1303 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1304 target_pid_to_str (ptid));
1305
1306 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1307 lp = add_lwp (ptid);
1308 lp->stopped = 1;
1309 lp->cloned = cloned;
1310 lp->signalled = signalled;
1311 if (WSTOPSIG (status) != SIGSTOP)
1312 {
1313 lp->resumed = 1;
1314 lp->status = status;
1315 }
1316
1317 target_post_attach (GET_LWP (lp->ptid));
1318
1319 if (debug_linux_nat)
1320 {
1321 fprintf_unfiltered (gdb_stdlog,
1322 "LLAL: waitpid %s received %s\n",
1323 target_pid_to_str (ptid),
1324 status_to_str (status));
1325 }
1326 }
1327 else
1328 {
1329 /* We assume that the LWP representing the original process is
1330 already stopped. Mark it as stopped in the data structure
1331 that the GNU/linux ptrace layer uses to keep track of
1332 threads. Note that this won't have already been done since
1333 the main thread will have, we assume, been stopped by an
1334 attach from a different layer. */
1335 if (lp == NULL)
1336 lp = add_lwp (ptid);
1337 lp->stopped = 1;
1338 }
1339
1340 linux_nat_async_events (async_events_original_state);
1341 return 0;
1342 }
1343
1344 static void
1345 linux_nat_create_inferior (struct target_ops *ops,
1346 char *exec_file, char *allargs, char **env,
1347 int from_tty)
1348 {
1349 int saved_async = 0;
1350 #ifdef HAVE_PERSONALITY
1351 int personality_orig = 0, personality_set = 0;
1352 #endif /* HAVE_PERSONALITY */
1353
1354 /* The fork_child mechanism is synchronous and calls target_wait, so
1355 we have to mask the async mode. */
1356
1357 if (target_can_async_p ())
1358 /* Mask async mode. Creating a child requires a loop calling
1359 wait_for_inferior currently. */
1360 saved_async = linux_nat_async_mask (0);
1361 else
1362 {
1363 /* Restore the original signal mask. */
1364 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1365 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1366 suspend_mask = normal_mask;
1367 sigdelset (&suspend_mask, SIGCHLD);
1368 }
1369
1370 /* Set SIGCHLD to the default action, until after execing the child,
1371 since the inferior inherits the superior's signal mask. It will
1372 be blocked again in linux_nat_wait, which is only reached after
1373 the inferior execing. */
1374 linux_nat_async_events (sigchld_default);
1375
1376 #ifdef HAVE_PERSONALITY
1377 if (disable_randomization)
1378 {
1379 errno = 0;
1380 personality_orig = personality (0xffffffff);
1381 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1382 {
1383 personality_set = 1;
1384 personality (personality_orig | ADDR_NO_RANDOMIZE);
1385 }
1386 if (errno != 0 || (personality_set
1387 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1388 warning (_("Error disabling address space randomization: %s"),
1389 safe_strerror (errno));
1390 }
1391 #endif /* HAVE_PERSONALITY */
1392
1393 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1394
1395 #ifdef HAVE_PERSONALITY
1396 if (personality_set)
1397 {
1398 errno = 0;
1399 personality (personality_orig);
1400 if (errno != 0)
1401 warning (_("Error restoring address space randomization: %s"),
1402 safe_strerror (errno));
1403 }
1404 #endif /* HAVE_PERSONALITY */
1405
1406 if (saved_async)
1407 linux_nat_async_mask (saved_async);
1408 }
1409
1410 static void
1411 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1412 {
1413 struct lwp_info *lp;
1414 int status;
1415 ptid_t ptid;
1416
1417 /* FIXME: We should probably accept a list of process id's, and
1418 attach all of them. */
1419 linux_ops->to_attach (ops, args, from_tty);
1420
1421 if (!target_can_async_p ())
1422 {
1423 /* Restore the original signal mask. */
1424 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1425 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1426 suspend_mask = normal_mask;
1427 sigdelset (&suspend_mask, SIGCHLD);
1428 }
1429
1430 /* The ptrace base target adds the main thread with (pid,0,0)
1431 format. Decorate it with lwp info. */
1432 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1433 thread_change_ptid (inferior_ptid, ptid);
1434
1435 /* Add the initial process as the first LWP to the list. */
1436 lp = add_lwp (ptid);
1437
1438 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1439 &lp->signalled);
1440 lp->stopped = 1;
1441
1442 /* Save the wait status to report later. */
1443 lp->resumed = 1;
1444 if (debug_linux_nat)
1445 fprintf_unfiltered (gdb_stdlog,
1446 "LNA: waitpid %ld, saving status %s\n",
1447 (long) GET_PID (lp->ptid), status_to_str (status));
1448
1449 if (!target_can_async_p ())
1450 lp->status = status;
1451 else
1452 {
1453 /* We already waited for this LWP, so put the wait result on the
1454 pipe. The event loop will wake up and gets us to handling
1455 this event. */
1456 linux_nat_event_pipe_push (GET_PID (lp->ptid), status,
1457 lp->cloned ? __WCLONE : 0);
1458 /* Register in the event loop. */
1459 target_async (inferior_event_handler, 0);
1460 }
1461 }
1462
1463 /* Get pending status of LP. */
1464 static int
1465 get_pending_status (struct lwp_info *lp, int *status)
1466 {
1467 struct target_waitstatus last;
1468 ptid_t last_ptid;
1469
1470 get_last_target_status (&last_ptid, &last);
1471
1472 /* If this lwp is the ptid that GDB is processing an event from, the
1473 signal will be in stop_signal. Otherwise, in all-stop + sync
1474 mode, we may cache pending events in lp->status while trying to
1475 stop all threads (see stop_wait_callback). In async mode, the
1476 events are always cached in waitpid_queue. */
1477
1478 *status = 0;
1479
1480 if (non_stop)
1481 {
1482 enum target_signal signo = TARGET_SIGNAL_0;
1483
1484 if (is_executing (lp->ptid))
1485 {
1486 /* If the core thought this lwp was executing --- e.g., the
1487 executing property hasn't been updated yet, but the
1488 thread has been stopped with a stop_callback /
1489 stop_wait_callback sequence (see linux_nat_detach for
1490 example) --- we can only have pending events in the local
1491 queue. */
1492 if (queued_waitpid (GET_LWP (lp->ptid), status, __WALL) != -1)
1493 {
1494 if (WIFSTOPPED (*status))
1495 signo = target_signal_from_host (WSTOPSIG (*status));
1496
1497 /* If not stopped, then the lwp is gone, no use in
1498 resending a signal. */
1499 }
1500 }
1501 else
1502 {
1503 /* If the core knows the thread is not executing, then we
1504 have the last signal recorded in
1505 thread_info->stop_signal. */
1506
1507 struct thread_info *tp = find_thread_pid (lp->ptid);
1508 signo = tp->stop_signal;
1509 }
1510
1511 if (signo != TARGET_SIGNAL_0
1512 && !signal_pass_state (signo))
1513 {
1514 if (debug_linux_nat)
1515 fprintf_unfiltered (gdb_stdlog, "\
1516 GPT: lwp %s had signal %s, but it is in no pass state\n",
1517 target_pid_to_str (lp->ptid),
1518 target_signal_to_string (signo));
1519 }
1520 else
1521 {
1522 if (signo != TARGET_SIGNAL_0)
1523 *status = W_STOPCODE (target_signal_to_host (signo));
1524
1525 if (debug_linux_nat)
1526 fprintf_unfiltered (gdb_stdlog,
1527 "GPT: lwp %s as pending signal %s\n",
1528 target_pid_to_str (lp->ptid),
1529 target_signal_to_string (signo));
1530 }
1531 }
1532 else
1533 {
1534 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1535 {
1536 struct thread_info *tp = find_thread_pid (lp->ptid);
1537 if (tp->stop_signal != TARGET_SIGNAL_0
1538 && signal_pass_state (tp->stop_signal))
1539 *status = W_STOPCODE (target_signal_to_host (tp->stop_signal));
1540 }
1541 else if (target_can_async_p ())
1542 queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
1543 else
1544 *status = lp->status;
1545 }
1546
1547 return 0;
1548 }
1549
1550 static int
1551 detach_callback (struct lwp_info *lp, void *data)
1552 {
1553 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1554
1555 if (debug_linux_nat && lp->status)
1556 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1557 strsignal (WSTOPSIG (lp->status)),
1558 target_pid_to_str (lp->ptid));
1559
1560 /* If there is a pending SIGSTOP, get rid of it. */
1561 if (lp->signalled)
1562 {
1563 if (debug_linux_nat)
1564 fprintf_unfiltered (gdb_stdlog,
1565 "DC: Sending SIGCONT to %s\n",
1566 target_pid_to_str (lp->ptid));
1567
1568 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1569 lp->signalled = 0;
1570 }
1571
1572 /* We don't actually detach from the LWP that has an id equal to the
1573 overall process id just yet. */
1574 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1575 {
1576 int status = 0;
1577
1578 /* Pass on any pending signal for this LWP. */
1579 get_pending_status (lp, &status);
1580
1581 errno = 0;
1582 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1583 WSTOPSIG (status)) < 0)
1584 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1585 safe_strerror (errno));
1586
1587 if (debug_linux_nat)
1588 fprintf_unfiltered (gdb_stdlog,
1589 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1590 target_pid_to_str (lp->ptid),
1591 strsignal (WSTOPSIG (lp->status)));
1592
1593 delete_lwp (lp->ptid);
1594 }
1595
1596 return 0;
1597 }
1598
1599 static void
1600 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1601 {
1602 int pid;
1603 int status;
1604 enum target_signal sig;
1605
1606 if (target_can_async_p ())
1607 linux_nat_async (NULL, 0);
1608
1609 /* Stop all threads before detaching. ptrace requires that the
1610 thread is stopped to sucessfully detach. */
1611 iterate_over_lwps (stop_callback, NULL);
1612 /* ... and wait until all of them have reported back that
1613 they're no longer running. */
1614 iterate_over_lwps (stop_wait_callback, NULL);
1615
1616 iterate_over_lwps (detach_callback, NULL);
1617
1618 /* Only the initial process should be left right now. */
1619 gdb_assert (num_lwps == 1);
1620
1621 /* Pass on any pending signal for the last LWP. */
1622 if ((args == NULL || *args == '\0')
1623 && get_pending_status (lwp_list, &status) != -1
1624 && WIFSTOPPED (status))
1625 {
1626 /* Put the signal number in ARGS so that inf_ptrace_detach will
1627 pass it along with PTRACE_DETACH. */
1628 args = alloca (8);
1629 sprintf (args, "%d", (int) WSTOPSIG (status));
1630 fprintf_unfiltered (gdb_stdlog,
1631 "LND: Sending signal %s to %s\n",
1632 args,
1633 target_pid_to_str (lwp_list->ptid));
1634 }
1635
1636 /* Destroy LWP info; it's no longer valid. */
1637 init_lwp_list ();
1638
1639 pid = ptid_get_pid (inferior_ptid);
1640
1641 if (target_can_async_p ())
1642 drain_queued_events (pid);
1643
1644 if (forks_exist_p ())
1645 {
1646 /* Multi-fork case. The current inferior_ptid is being detached
1647 from, but there are other viable forks to debug. Detach from
1648 the current fork, and context-switch to the first
1649 available. */
1650 linux_fork_detach (args, from_tty);
1651
1652 if (non_stop && target_can_async_p ())
1653 target_async (inferior_event_handler, 0);
1654 }
1655 else
1656 linux_ops->to_detach (ops, args, from_tty);
1657 }
1658
1659 /* Resume LP. */
1660
1661 static int
1662 resume_callback (struct lwp_info *lp, void *data)
1663 {
1664 if (lp->stopped && lp->status == 0)
1665 {
1666 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1667 0, TARGET_SIGNAL_0);
1668 if (debug_linux_nat)
1669 fprintf_unfiltered (gdb_stdlog,
1670 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1671 target_pid_to_str (lp->ptid));
1672 lp->stopped = 0;
1673 lp->step = 0;
1674 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1675 }
1676 else if (lp->stopped && debug_linux_nat)
1677 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
1678 target_pid_to_str (lp->ptid));
1679 else if (debug_linux_nat)
1680 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n",
1681 target_pid_to_str (lp->ptid));
1682
1683 return 0;
1684 }
1685
1686 static int
1687 resume_clear_callback (struct lwp_info *lp, void *data)
1688 {
1689 lp->resumed = 0;
1690 return 0;
1691 }
1692
1693 static int
1694 resume_set_callback (struct lwp_info *lp, void *data)
1695 {
1696 lp->resumed = 1;
1697 return 0;
1698 }
1699
1700 static void
1701 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1702 {
1703 struct lwp_info *lp;
1704 int resume_all;
1705
1706 if (debug_linux_nat)
1707 fprintf_unfiltered (gdb_stdlog,
1708 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1709 step ? "step" : "resume",
1710 target_pid_to_str (ptid),
1711 signo ? strsignal (signo) : "0",
1712 target_pid_to_str (inferior_ptid));
1713
1714 if (target_can_async_p ())
1715 /* Block events while we're here. */
1716 linux_nat_async_events (sigchld_sync);
1717
1718 /* A specific PTID means `step only this process id'. */
1719 resume_all = (PIDGET (ptid) == -1);
1720
1721 if (non_stop && resume_all)
1722 internal_error (__FILE__, __LINE__,
1723 "can't resume all in non-stop mode");
1724
1725 if (!non_stop)
1726 {
1727 if (resume_all)
1728 iterate_over_lwps (resume_set_callback, NULL);
1729 else
1730 iterate_over_lwps (resume_clear_callback, NULL);
1731 }
1732
1733 /* If PID is -1, it's the current inferior that should be
1734 handled specially. */
1735 if (PIDGET (ptid) == -1)
1736 ptid = inferior_ptid;
1737
1738 lp = find_lwp_pid (ptid);
1739 gdb_assert (lp != NULL);
1740
1741 /* Convert to something the lower layer understands. */
1742 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1743
1744 /* Remember if we're stepping. */
1745 lp->step = step;
1746
1747 /* Mark this LWP as resumed. */
1748 lp->resumed = 1;
1749
1750 /* If we have a pending wait status for this thread, there is no
1751 point in resuming the process. But first make sure that
1752 linux_nat_wait won't preemptively handle the event - we
1753 should never take this short-circuit if we are going to
1754 leave LP running, since we have skipped resuming all the
1755 other threads. This bit of code needs to be synchronized
1756 with linux_nat_wait. */
1757
1758 /* In async mode, we never have pending wait status. */
1759 if (target_can_async_p () && lp->status)
1760 internal_error (__FILE__, __LINE__, "Pending status in async mode");
1761
1762 if (lp->status && WIFSTOPPED (lp->status))
1763 {
1764 int saved_signo;
1765 struct inferior *inf;
1766
1767 inf = find_inferior_pid (ptid_get_pid (ptid));
1768 gdb_assert (inf);
1769 saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1770
1771 /* Defer to common code if we're gaining control of the
1772 inferior. */
1773 if (inf->stop_soon == NO_STOP_QUIETLY
1774 && signal_stop_state (saved_signo) == 0
1775 && signal_print_state (saved_signo) == 0
1776 && signal_pass_state (saved_signo) == 1)
1777 {
1778 if (debug_linux_nat)
1779 fprintf_unfiltered (gdb_stdlog,
1780 "LLR: Not short circuiting for ignored "
1781 "status 0x%x\n", lp->status);
1782
1783 /* FIXME: What should we do if we are supposed to continue
1784 this thread with a signal? */
1785 gdb_assert (signo == TARGET_SIGNAL_0);
1786 signo = saved_signo;
1787 lp->status = 0;
1788 }
1789 }
1790
1791 if (lp->status)
1792 {
1793 /* FIXME: What should we do if we are supposed to continue
1794 this thread with a signal? */
1795 gdb_assert (signo == TARGET_SIGNAL_0);
1796
1797 if (debug_linux_nat)
1798 fprintf_unfiltered (gdb_stdlog,
1799 "LLR: Short circuiting for status 0x%x\n",
1800 lp->status);
1801
1802 return;
1803 }
1804
1805 /* Mark LWP as not stopped to prevent it from being continued by
1806 resume_callback. */
1807 lp->stopped = 0;
1808
1809 if (resume_all)
1810 iterate_over_lwps (resume_callback, NULL);
1811
1812 linux_ops->to_resume (ptid, step, signo);
1813 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1814
1815 if (debug_linux_nat)
1816 fprintf_unfiltered (gdb_stdlog,
1817 "LLR: %s %s, %s (resume event thread)\n",
1818 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1819 target_pid_to_str (ptid),
1820 signo ? strsignal (signo) : "0");
1821
1822 if (target_can_async_p ())
1823 target_async (inferior_event_handler, 0);
1824 }
1825
1826 /* Issue kill to specified lwp. */
1827
1828 static int tkill_failed;
1829
1830 static int
1831 kill_lwp (int lwpid, int signo)
1832 {
1833 errno = 0;
1834
1835 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1836 fails, then we are not using nptl threads and we should be using kill. */
1837
1838 #ifdef HAVE_TKILL_SYSCALL
1839 if (!tkill_failed)
1840 {
1841 int ret = syscall (__NR_tkill, lwpid, signo);
1842 if (errno != ENOSYS)
1843 return ret;
1844 errno = 0;
1845 tkill_failed = 1;
1846 }
1847 #endif
1848
1849 return kill (lwpid, signo);
1850 }
1851
1852 /* Handle a GNU/Linux extended wait response. If we see a clone
1853 event, we need to add the new LWP to our list (and not report the
1854 trap to higher layers). This function returns non-zero if the
1855 event should be ignored and we should wait again. If STOPPING is
1856 true, the new LWP remains stopped, otherwise it is continued. */
1857
1858 static int
1859 linux_handle_extended_wait (struct lwp_info *lp, int status,
1860 int stopping)
1861 {
1862 int pid = GET_LWP (lp->ptid);
1863 struct target_waitstatus *ourstatus = &lp->waitstatus;
1864 struct lwp_info *new_lp = NULL;
1865 int event = status >> 16;
1866
1867 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1868 || event == PTRACE_EVENT_CLONE)
1869 {
1870 unsigned long new_pid;
1871 int ret;
1872
1873 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1874
1875 /* If we haven't already seen the new PID stop, wait for it now. */
1876 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1877 {
1878 /* The new child has a pending SIGSTOP. We can't affect it until it
1879 hits the SIGSTOP, but we're already attached. */
1880 ret = my_waitpid (new_pid, &status,
1881 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1882 if (ret == -1)
1883 perror_with_name (_("waiting for new child"));
1884 else if (ret != new_pid)
1885 internal_error (__FILE__, __LINE__,
1886 _("wait returned unexpected PID %d"), ret);
1887 else if (!WIFSTOPPED (status))
1888 internal_error (__FILE__, __LINE__,
1889 _("wait returned unexpected status 0x%x"), status);
1890 }
1891
1892 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
1893
1894 if (event == PTRACE_EVENT_FORK)
1895 ourstatus->kind = TARGET_WAITKIND_FORKED;
1896 else if (event == PTRACE_EVENT_VFORK)
1897 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1898 else
1899 {
1900 struct cleanup *old_chain;
1901
1902 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1903 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1904 new_lp->cloned = 1;
1905 new_lp->stopped = 1;
1906
1907 if (WSTOPSIG (status) != SIGSTOP)
1908 {
1909 /* This can happen if someone starts sending signals to
1910 the new thread before it gets a chance to run, which
1911 have a lower number than SIGSTOP (e.g. SIGUSR1).
1912 This is an unlikely case, and harder to handle for
1913 fork / vfork than for clone, so we do not try - but
1914 we handle it for clone events here. We'll send
1915 the other signal on to the thread below. */
1916
1917 new_lp->signalled = 1;
1918 }
1919 else
1920 status = 0;
1921
1922 if (non_stop)
1923 {
1924 /* Add the new thread to GDB's lists as soon as possible
1925 so that:
1926
1927 1) the frontend doesn't have to wait for a stop to
1928 display them, and,
1929
1930 2) we tag it with the correct running state. */
1931
1932 /* If the thread_db layer is active, let it know about
1933 this new thread, and add it to GDB's list. */
1934 if (!thread_db_attach_lwp (new_lp->ptid))
1935 {
1936 /* We're not using thread_db. Add it to GDB's
1937 list. */
1938 target_post_attach (GET_LWP (new_lp->ptid));
1939 add_thread (new_lp->ptid);
1940 }
1941
1942 if (!stopping)
1943 {
1944 set_running (new_lp->ptid, 1);
1945 set_executing (new_lp->ptid, 1);
1946 }
1947 }
1948
1949 if (!stopping)
1950 {
1951 new_lp->stopped = 0;
1952 new_lp->resumed = 1;
1953 ptrace (PTRACE_CONT, new_pid, 0,
1954 status ? WSTOPSIG (status) : 0);
1955 }
1956
1957 if (debug_linux_nat)
1958 fprintf_unfiltered (gdb_stdlog,
1959 "LHEW: Got clone event from LWP %ld, resuming\n",
1960 GET_LWP (lp->ptid));
1961 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1962
1963 return 1;
1964 }
1965
1966 return 0;
1967 }
1968
1969 if (event == PTRACE_EVENT_EXEC)
1970 {
1971 ourstatus->kind = TARGET_WAITKIND_EXECD;
1972 ourstatus->value.execd_pathname
1973 = xstrdup (linux_child_pid_to_exec_file (pid));
1974
1975 if (linux_parent_pid)
1976 {
1977 detach_breakpoints (linux_parent_pid);
1978 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1979
1980 linux_parent_pid = 0;
1981 }
1982
1983 /* At this point, all inserted breakpoints are gone. Doing this
1984 as soon as we detect an exec prevents the badness of deleting
1985 a breakpoint writing the current "shadow contents" to lift
1986 the bp. That shadow is NOT valid after an exec.
1987
1988 Note that we have to do this after the detach_breakpoints
1989 call above, otherwise breakpoints wouldn't be lifted from the
1990 parent on a vfork, because detach_breakpoints would think
1991 that breakpoints are not inserted. */
1992 mark_breakpoints_out ();
1993 return 0;
1994 }
1995
1996 internal_error (__FILE__, __LINE__,
1997 _("unknown ptrace event %d"), event);
1998 }
1999
2000 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2001 exited. */
2002
2003 static int
2004 wait_lwp (struct lwp_info *lp)
2005 {
2006 pid_t pid;
2007 int status;
2008 int thread_dead = 0;
2009
2010 gdb_assert (!lp->stopped);
2011 gdb_assert (lp->status == 0);
2012
2013 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
2014 if (pid == -1 && errno == ECHILD)
2015 {
2016 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
2017 if (pid == -1 && errno == ECHILD)
2018 {
2019 /* The thread has previously exited. We need to delete it
2020 now because, for some vendor 2.4 kernels with NPTL
2021 support backported, there won't be an exit event unless
2022 it is the main thread. 2.6 kernels will report an exit
2023 event for each thread that exits, as expected. */
2024 thread_dead = 1;
2025 if (debug_linux_nat)
2026 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2027 target_pid_to_str (lp->ptid));
2028 }
2029 }
2030
2031 if (!thread_dead)
2032 {
2033 gdb_assert (pid == GET_LWP (lp->ptid));
2034
2035 if (debug_linux_nat)
2036 {
2037 fprintf_unfiltered (gdb_stdlog,
2038 "WL: waitpid %s received %s\n",
2039 target_pid_to_str (lp->ptid),
2040 status_to_str (status));
2041 }
2042 }
2043
2044 /* Check if the thread has exited. */
2045 if (WIFEXITED (status) || WIFSIGNALED (status))
2046 {
2047 thread_dead = 1;
2048 if (debug_linux_nat)
2049 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2050 target_pid_to_str (lp->ptid));
2051 }
2052
2053 if (thread_dead)
2054 {
2055 exit_lwp (lp);
2056 return 0;
2057 }
2058
2059 gdb_assert (WIFSTOPPED (status));
2060
2061 /* Handle GNU/Linux's extended waitstatus for trace events. */
2062 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2063 {
2064 if (debug_linux_nat)
2065 fprintf_unfiltered (gdb_stdlog,
2066 "WL: Handling extended status 0x%06x\n",
2067 status);
2068 if (linux_handle_extended_wait (lp, status, 1))
2069 return wait_lwp (lp);
2070 }
2071
2072 return status;
2073 }
2074
2075 /* Save the most recent siginfo for LP. This is currently only called
2076 for SIGTRAP; some ports use the si_addr field for
2077 target_stopped_data_address. In the future, it may also be used to
2078 restore the siginfo of requeued signals. */
2079
2080 static void
2081 save_siginfo (struct lwp_info *lp)
2082 {
2083 errno = 0;
2084 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2085 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2086
2087 if (errno != 0)
2088 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2089 }
2090
2091 /* Send a SIGSTOP to LP. */
2092
2093 static int
2094 stop_callback (struct lwp_info *lp, void *data)
2095 {
2096 if (!lp->stopped && !lp->signalled)
2097 {
2098 int ret;
2099
2100 if (debug_linux_nat)
2101 {
2102 fprintf_unfiltered (gdb_stdlog,
2103 "SC: kill %s **<SIGSTOP>**\n",
2104 target_pid_to_str (lp->ptid));
2105 }
2106 errno = 0;
2107 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2108 if (debug_linux_nat)
2109 {
2110 fprintf_unfiltered (gdb_stdlog,
2111 "SC: lwp kill %d %s\n",
2112 ret,
2113 errno ? safe_strerror (errno) : "ERRNO-OK");
2114 }
2115
2116 lp->signalled = 1;
2117 gdb_assert (lp->status == 0);
2118 }
2119
2120 return 0;
2121 }
2122
2123 /* Return non-zero if LWP PID has a pending SIGINT. */
2124
2125 static int
2126 linux_nat_has_pending_sigint (int pid)
2127 {
2128 sigset_t pending, blocked, ignored;
2129 int i;
2130
2131 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2132
2133 if (sigismember (&pending, SIGINT)
2134 && !sigismember (&ignored, SIGINT))
2135 return 1;
2136
2137 return 0;
2138 }
2139
2140 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2141
2142 static int
2143 set_ignore_sigint (struct lwp_info *lp, void *data)
2144 {
2145 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2146 flag to consume the next one. */
2147 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2148 && WSTOPSIG (lp->status) == SIGINT)
2149 lp->status = 0;
2150 else
2151 lp->ignore_sigint = 1;
2152
2153 return 0;
2154 }
2155
2156 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2157 This function is called after we know the LWP has stopped; if the LWP
2158 stopped before the expected SIGINT was delivered, then it will never have
2159 arrived. Also, if the signal was delivered to a shared queue and consumed
2160 by a different thread, it will never be delivered to this LWP. */
2161
2162 static void
2163 maybe_clear_ignore_sigint (struct lwp_info *lp)
2164 {
2165 if (!lp->ignore_sigint)
2166 return;
2167
2168 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2169 {
2170 if (debug_linux_nat)
2171 fprintf_unfiltered (gdb_stdlog,
2172 "MCIS: Clearing bogus flag for %s\n",
2173 target_pid_to_str (lp->ptid));
2174 lp->ignore_sigint = 0;
2175 }
2176 }
2177
2178 /* Wait until LP is stopped. */
2179
2180 static int
2181 stop_wait_callback (struct lwp_info *lp, void *data)
2182 {
2183 if (!lp->stopped)
2184 {
2185 int status;
2186
2187 status = wait_lwp (lp);
2188 if (status == 0)
2189 return 0;
2190
2191 if (lp->ignore_sigint && WIFSTOPPED (status)
2192 && WSTOPSIG (status) == SIGINT)
2193 {
2194 lp->ignore_sigint = 0;
2195
2196 errno = 0;
2197 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2198 if (debug_linux_nat)
2199 fprintf_unfiltered (gdb_stdlog,
2200 "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
2201 target_pid_to_str (lp->ptid),
2202 errno ? safe_strerror (errno) : "OK");
2203
2204 return stop_wait_callback (lp, NULL);
2205 }
2206
2207 maybe_clear_ignore_sigint (lp);
2208
2209 if (WSTOPSIG (status) != SIGSTOP)
2210 {
2211 if (WSTOPSIG (status) == SIGTRAP)
2212 {
2213 /* If a LWP other than the LWP that we're reporting an
2214 event for has hit a GDB breakpoint (as opposed to
2215 some random trap signal), then just arrange for it to
2216 hit it again later. We don't keep the SIGTRAP status
2217 and don't forward the SIGTRAP signal to the LWP. We
2218 will handle the current event, eventually we will
2219 resume all LWPs, and this one will get its breakpoint
2220 trap again.
2221
2222 If we do not do this, then we run the risk that the
2223 user will delete or disable the breakpoint, but the
2224 thread will have already tripped on it. */
2225
2226 /* Save the trap's siginfo in case we need it later. */
2227 save_siginfo (lp);
2228
2229 /* Now resume this LWP and get the SIGSTOP event. */
2230 errno = 0;
2231 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2232 if (debug_linux_nat)
2233 {
2234 fprintf_unfiltered (gdb_stdlog,
2235 "PTRACE_CONT %s, 0, 0 (%s)\n",
2236 target_pid_to_str (lp->ptid),
2237 errno ? safe_strerror (errno) : "OK");
2238
2239 fprintf_unfiltered (gdb_stdlog,
2240 "SWC: Candidate SIGTRAP event in %s\n",
2241 target_pid_to_str (lp->ptid));
2242 }
2243 /* Hold this event/waitstatus while we check to see if
2244 there are any more (we still want to get that SIGSTOP). */
2245 stop_wait_callback (lp, NULL);
2246
2247 if (target_can_async_p ())
2248 {
2249 /* Don't leave a pending wait status in async mode.
2250 Retrigger the breakpoint. */
2251 if (!cancel_breakpoint (lp))
2252 {
2253 /* There was no gdb breakpoint set at pc. Put
2254 the event back in the queue. */
2255 if (debug_linux_nat)
2256 fprintf_unfiltered (gdb_stdlog, "\
2257 SWC: leaving SIGTRAP in local queue of %s\n", target_pid_to_str (lp->ptid));
2258 push_waitpid (GET_LWP (lp->ptid),
2259 W_STOPCODE (SIGTRAP),
2260 lp->cloned ? __WCLONE : 0);
2261 }
2262 }
2263 else
2264 {
2265 /* Hold the SIGTRAP for handling by
2266 linux_nat_wait. */
2267 /* If there's another event, throw it back into the
2268 queue. */
2269 if (lp->status)
2270 {
2271 if (debug_linux_nat)
2272 fprintf_unfiltered (gdb_stdlog,
2273 "SWC: kill %s, %s\n",
2274 target_pid_to_str (lp->ptid),
2275 status_to_str ((int) status));
2276 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
2277 }
2278 /* Save the sigtrap event. */
2279 lp->status = status;
2280 }
2281 return 0;
2282 }
2283 else
2284 {
2285 /* The thread was stopped with a signal other than
2286 SIGSTOP, and didn't accidentally trip a breakpoint. */
2287
2288 if (debug_linux_nat)
2289 {
2290 fprintf_unfiltered (gdb_stdlog,
2291 "SWC: Pending event %s in %s\n",
2292 status_to_str ((int) status),
2293 target_pid_to_str (lp->ptid));
2294 }
2295 /* Now resume this LWP and get the SIGSTOP event. */
2296 errno = 0;
2297 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2298 if (debug_linux_nat)
2299 fprintf_unfiltered (gdb_stdlog,
2300 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2301 target_pid_to_str (lp->ptid),
2302 errno ? safe_strerror (errno) : "OK");
2303
2304 /* Hold this event/waitstatus while we check to see if
2305 there are any more (we still want to get that SIGSTOP). */
2306 stop_wait_callback (lp, NULL);
2307
2308 /* If the lp->status field is still empty, use it to
2309 hold this event. If not, then this event must be
2310 returned to the event queue of the LWP. */
2311 if (lp->status || target_can_async_p ())
2312 {
2313 if (debug_linux_nat)
2314 {
2315 fprintf_unfiltered (gdb_stdlog,
2316 "SWC: kill %s, %s\n",
2317 target_pid_to_str (lp->ptid),
2318 status_to_str ((int) status));
2319 }
2320 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2321 }
2322 else
2323 lp->status = status;
2324 return 0;
2325 }
2326 }
2327 else
2328 {
2329 /* We caught the SIGSTOP that we intended to catch, so
2330 there's no SIGSTOP pending. */
2331 lp->stopped = 1;
2332 lp->signalled = 0;
2333 }
2334 }
2335
2336 return 0;
2337 }
2338
2339 /* Return non-zero if LP has a wait status pending. */
2340
2341 static int
2342 status_callback (struct lwp_info *lp, void *data)
2343 {
2344 /* Only report a pending wait status if we pretend that this has
2345 indeed been resumed. */
2346 return (lp->status != 0 && lp->resumed);
2347 }
2348
2349 /* Return non-zero if LP isn't stopped. */
2350
2351 static int
2352 running_callback (struct lwp_info *lp, void *data)
2353 {
2354 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2355 }
2356
2357 /* Count the LWP's that have had events. */
2358
2359 static int
2360 count_events_callback (struct lwp_info *lp, void *data)
2361 {
2362 int *count = data;
2363
2364 gdb_assert (count != NULL);
2365
2366 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2367 if (lp->status != 0 && lp->resumed
2368 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2369 (*count)++;
2370
2371 return 0;
2372 }
2373
2374 /* Select the LWP (if any) that is currently being single-stepped. */
2375
2376 static int
2377 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2378 {
2379 if (lp->step && lp->status != 0)
2380 return 1;
2381 else
2382 return 0;
2383 }
2384
2385 /* Select the Nth LWP that has had a SIGTRAP event. */
2386
2387 static int
2388 select_event_lwp_callback (struct lwp_info *lp, void *data)
2389 {
2390 int *selector = data;
2391
2392 gdb_assert (selector != NULL);
2393
2394 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2395 if (lp->status != 0 && lp->resumed
2396 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2397 if ((*selector)-- == 0)
2398 return 1;
2399
2400 return 0;
2401 }
2402
2403 static int
2404 cancel_breakpoint (struct lwp_info *lp)
2405 {
2406 /* Arrange for a breakpoint to be hit again later. We don't keep
2407 the SIGTRAP status and don't forward the SIGTRAP signal to the
2408 LWP. We will handle the current event, eventually we will resume
2409 this LWP, and this breakpoint will trap again.
2410
2411 If we do not do this, then we run the risk that the user will
2412 delete or disable the breakpoint, but the LWP will have already
2413 tripped on it. */
2414
2415 struct regcache *regcache = get_thread_regcache (lp->ptid);
2416 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2417 CORE_ADDR pc;
2418
2419 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
2420 if (breakpoint_inserted_here_p (pc))
2421 {
2422 if (debug_linux_nat)
2423 fprintf_unfiltered (gdb_stdlog,
2424 "CB: Push back breakpoint for %s\n",
2425 target_pid_to_str (lp->ptid));
2426
2427 /* Back up the PC if necessary. */
2428 if (gdbarch_decr_pc_after_break (gdbarch))
2429 regcache_write_pc (regcache, pc);
2430
2431 return 1;
2432 }
2433 return 0;
2434 }
2435
2436 static int
2437 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2438 {
2439 struct lwp_info *event_lp = data;
2440
2441 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2442 if (lp == event_lp)
2443 return 0;
2444
2445 /* If a LWP other than the LWP that we're reporting an event for has
2446 hit a GDB breakpoint (as opposed to some random trap signal),
2447 then just arrange for it to hit it again later. We don't keep
2448 the SIGTRAP status and don't forward the SIGTRAP signal to the
2449 LWP. We will handle the current event, eventually we will resume
2450 all LWPs, and this one will get its breakpoint trap again.
2451
2452 If we do not do this, then we run the risk that the user will
2453 delete or disable the breakpoint, but the LWP will have already
2454 tripped on it. */
2455
2456 if (lp->status != 0
2457 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
2458 && cancel_breakpoint (lp))
2459 /* Throw away the SIGTRAP. */
2460 lp->status = 0;
2461
2462 return 0;
2463 }
2464
2465 /* Select one LWP out of those that have events pending. */
2466
2467 static void
2468 select_event_lwp (struct lwp_info **orig_lp, int *status)
2469 {
2470 int num_events = 0;
2471 int random_selector;
2472 struct lwp_info *event_lp;
2473
2474 /* Record the wait status for the original LWP. */
2475 (*orig_lp)->status = *status;
2476
2477 /* Give preference to any LWP that is being single-stepped. */
2478 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2479 if (event_lp != NULL)
2480 {
2481 if (debug_linux_nat)
2482 fprintf_unfiltered (gdb_stdlog,
2483 "SEL: Select single-step %s\n",
2484 target_pid_to_str (event_lp->ptid));
2485 }
2486 else
2487 {
2488 /* No single-stepping LWP. Select one at random, out of those
2489 which have had SIGTRAP events. */
2490
2491 /* First see how many SIGTRAP events we have. */
2492 iterate_over_lwps (count_events_callback, &num_events);
2493
2494 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2495 random_selector = (int)
2496 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2497
2498 if (debug_linux_nat && num_events > 1)
2499 fprintf_unfiltered (gdb_stdlog,
2500 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2501 num_events, random_selector);
2502
2503 event_lp = iterate_over_lwps (select_event_lwp_callback,
2504 &random_selector);
2505 }
2506
2507 if (event_lp != NULL)
2508 {
2509 /* Switch the event LWP. */
2510 *orig_lp = event_lp;
2511 *status = event_lp->status;
2512 }
2513
2514 /* Flush the wait status for the event LWP. */
2515 (*orig_lp)->status = 0;
2516 }
2517
2518 /* Return non-zero if LP has been resumed. */
2519
2520 static int
2521 resumed_callback (struct lwp_info *lp, void *data)
2522 {
2523 return lp->resumed;
2524 }
2525
2526 /* Stop an active thread, verify it still exists, then resume it. */
2527
2528 static int
2529 stop_and_resume_callback (struct lwp_info *lp, void *data)
2530 {
2531 struct lwp_info *ptr;
2532
2533 if (!lp->stopped && !lp->signalled)
2534 {
2535 stop_callback (lp, NULL);
2536 stop_wait_callback (lp, NULL);
2537 /* Resume if the lwp still exists. */
2538 for (ptr = lwp_list; ptr; ptr = ptr->next)
2539 if (lp == ptr)
2540 {
2541 resume_callback (lp, NULL);
2542 resume_set_callback (lp, NULL);
2543 }
2544 }
2545 return 0;
2546 }
2547
2548 /* Check if we should go on and pass this event to common code.
2549 Return the affected lwp if we are, or NULL otherwise. */
2550 static struct lwp_info *
2551 linux_nat_filter_event (int lwpid, int status, int options)
2552 {
2553 struct lwp_info *lp;
2554
2555 lp = find_lwp_pid (pid_to_ptid (lwpid));
2556
2557 /* Check for stop events reported by a process we didn't already
2558 know about - anything not already in our LWP list.
2559
2560 If we're expecting to receive stopped processes after
2561 fork, vfork, and clone events, then we'll just add the
2562 new one to our list and go back to waiting for the event
2563 to be reported - the stopped process might be returned
2564 from waitpid before or after the event is. */
2565 if (WIFSTOPPED (status) && !lp)
2566 {
2567 linux_record_stopped_pid (lwpid, status);
2568 return NULL;
2569 }
2570
2571 /* Make sure we don't report an event for the exit of an LWP not in
2572 our list, i.e. not part of the current process. This can happen
2573 if we detach from a program we original forked and then it
2574 exits. */
2575 if (!WIFSTOPPED (status) && !lp)
2576 return NULL;
2577
2578 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2579 CLONE_PTRACE processes which do not use the thread library -
2580 otherwise we wouldn't find the new LWP this way. That doesn't
2581 currently work, and the following code is currently unreachable
2582 due to the two blocks above. If it's fixed some day, this code
2583 should be broken out into a function so that we can also pick up
2584 LWPs from the new interface. */
2585 if (!lp)
2586 {
2587 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2588 if (options & __WCLONE)
2589 lp->cloned = 1;
2590
2591 gdb_assert (WIFSTOPPED (status)
2592 && WSTOPSIG (status) == SIGSTOP);
2593 lp->signalled = 1;
2594
2595 if (!in_thread_list (inferior_ptid))
2596 {
2597 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2598 GET_PID (inferior_ptid));
2599 add_thread (inferior_ptid);
2600 }
2601
2602 add_thread (lp->ptid);
2603 }
2604
2605 /* Save the trap's siginfo in case we need it later. */
2606 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2607 save_siginfo (lp);
2608
2609 /* Handle GNU/Linux's extended waitstatus for trace events. */
2610 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2611 {
2612 if (debug_linux_nat)
2613 fprintf_unfiltered (gdb_stdlog,
2614 "LLW: Handling extended status 0x%06x\n",
2615 status);
2616 if (linux_handle_extended_wait (lp, status, 0))
2617 return NULL;
2618 }
2619
2620 /* Check if the thread has exited. */
2621 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2622 {
2623 /* If this is the main thread, we must stop all threads and
2624 verify if they are still alive. This is because in the nptl
2625 thread model, there is no signal issued for exiting LWPs
2626 other than the main thread. We only get the main thread exit
2627 signal once all child threads have already exited. If we
2628 stop all the threads and use the stop_wait_callback to check
2629 if they have exited we can determine whether this signal
2630 should be ignored or whether it means the end of the debugged
2631 application, regardless of which threading model is being
2632 used. */
2633 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2634 {
2635 lp->stopped = 1;
2636 iterate_over_lwps (stop_and_resume_callback, NULL);
2637 }
2638
2639 if (debug_linux_nat)
2640 fprintf_unfiltered (gdb_stdlog,
2641 "LLW: %s exited.\n",
2642 target_pid_to_str (lp->ptid));
2643
2644 exit_lwp (lp);
2645
2646 /* If there is at least one more LWP, then the exit signal was
2647 not the end of the debugged application and should be
2648 ignored. */
2649 if (num_lwps > 0)
2650 return NULL;
2651 }
2652
2653 /* Check if the current LWP has previously exited. In the nptl
2654 thread model, LWPs other than the main thread do not issue
2655 signals when they exit so we must check whenever the thread has
2656 stopped. A similar check is made in stop_wait_callback(). */
2657 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2658 {
2659 if (debug_linux_nat)
2660 fprintf_unfiltered (gdb_stdlog,
2661 "LLW: %s exited.\n",
2662 target_pid_to_str (lp->ptid));
2663
2664 exit_lwp (lp);
2665
2666 /* Make sure there is at least one thread running. */
2667 gdb_assert (iterate_over_lwps (running_callback, NULL));
2668
2669 /* Discard the event. */
2670 return NULL;
2671 }
2672
2673 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2674 an attempt to stop an LWP. */
2675 if (lp->signalled
2676 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2677 {
2678 if (debug_linux_nat)
2679 fprintf_unfiltered (gdb_stdlog,
2680 "LLW: Delayed SIGSTOP caught for %s.\n",
2681 target_pid_to_str (lp->ptid));
2682
2683 /* This is a delayed SIGSTOP. */
2684 lp->signalled = 0;
2685
2686 registers_changed ();
2687
2688 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2689 lp->step, TARGET_SIGNAL_0);
2690 if (debug_linux_nat)
2691 fprintf_unfiltered (gdb_stdlog,
2692 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2693 lp->step ?
2694 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2695 target_pid_to_str (lp->ptid));
2696
2697 lp->stopped = 0;
2698 gdb_assert (lp->resumed);
2699
2700 /* Discard the event. */
2701 return NULL;
2702 }
2703
2704 /* Make sure we don't report a SIGINT that we have already displayed
2705 for another thread. */
2706 if (lp->ignore_sigint
2707 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2708 {
2709 if (debug_linux_nat)
2710 fprintf_unfiltered (gdb_stdlog,
2711 "LLW: Delayed SIGINT caught for %s.\n",
2712 target_pid_to_str (lp->ptid));
2713
2714 /* This is a delayed SIGINT. */
2715 lp->ignore_sigint = 0;
2716
2717 registers_changed ();
2718 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2719 lp->step, TARGET_SIGNAL_0);
2720 if (debug_linux_nat)
2721 fprintf_unfiltered (gdb_stdlog,
2722 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2723 lp->step ?
2724 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2725 target_pid_to_str (lp->ptid));
2726
2727 lp->stopped = 0;
2728 gdb_assert (lp->resumed);
2729
2730 /* Discard the event. */
2731 return NULL;
2732 }
2733
2734 /* An interesting event. */
2735 gdb_assert (lp);
2736 return lp;
2737 }
2738
2739 /* Get the events stored in the pipe into the local queue, so they are
2740 accessible to queued_waitpid. We need to do this, since it is not
2741 always the case that the event at the head of the pipe is the event
2742 we want. */
2743
2744 static void
2745 pipe_to_local_event_queue (void)
2746 {
2747 if (debug_linux_nat_async)
2748 fprintf_unfiltered (gdb_stdlog,
2749 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2750 linux_nat_num_queued_events);
2751 while (linux_nat_num_queued_events)
2752 {
2753 int lwpid, status, options;
2754 lwpid = linux_nat_event_pipe_pop (&status, &options);
2755 gdb_assert (lwpid > 0);
2756 push_waitpid (lwpid, status, options);
2757 }
2758 }
2759
2760 /* Get the unprocessed events stored in the local queue back into the
2761 pipe, so the event loop realizes there's something else to
2762 process. */
2763
2764 static void
2765 local_event_queue_to_pipe (void)
2766 {
2767 struct waitpid_result *w = waitpid_queue;
2768 while (w)
2769 {
2770 struct waitpid_result *next = w->next;
2771 linux_nat_event_pipe_push (w->pid,
2772 w->status,
2773 w->options);
2774 xfree (w);
2775 w = next;
2776 }
2777 waitpid_queue = NULL;
2778
2779 if (debug_linux_nat_async)
2780 fprintf_unfiltered (gdb_stdlog,
2781 "LEQTP: linux_nat_num_queued_events(%d)\n",
2782 linux_nat_num_queued_events);
2783 }
2784
2785 static ptid_t
2786 linux_nat_wait (struct target_ops *ops,
2787 ptid_t ptid, struct target_waitstatus *ourstatus)
2788 {
2789 struct lwp_info *lp = NULL;
2790 int options = 0;
2791 int status = 0;
2792 pid_t pid = PIDGET (ptid);
2793
2794 if (debug_linux_nat_async)
2795 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2796
2797 /* The first time we get here after starting a new inferior, we may
2798 not have added it to the LWP list yet - this is the earliest
2799 moment at which we know its PID. */
2800 if (num_lwps == 0)
2801 {
2802 gdb_assert (!is_lwp (inferior_ptid));
2803
2804 /* Upgrade the main thread's ptid. */
2805 thread_change_ptid (inferior_ptid,
2806 BUILD_LWP (GET_PID (inferior_ptid),
2807 GET_PID (inferior_ptid)));
2808
2809 lp = add_lwp (inferior_ptid);
2810 lp->resumed = 1;
2811 }
2812
2813 /* Block events while we're here. */
2814 linux_nat_async_events (sigchld_sync);
2815
2816 retry:
2817
2818 /* Make sure there is at least one LWP that has been resumed. */
2819 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
2820
2821 /* First check if there is a LWP with a wait status pending. */
2822 if (pid == -1)
2823 {
2824 /* Any LWP that's been resumed will do. */
2825 lp = iterate_over_lwps (status_callback, NULL);
2826 if (lp)
2827 {
2828 if (target_can_async_p ())
2829 internal_error (__FILE__, __LINE__,
2830 "Found an LWP with a pending status in async mode.");
2831
2832 status = lp->status;
2833 lp->status = 0;
2834
2835 if (debug_linux_nat && status)
2836 fprintf_unfiltered (gdb_stdlog,
2837 "LLW: Using pending wait status %s for %s.\n",
2838 status_to_str (status),
2839 target_pid_to_str (lp->ptid));
2840 }
2841
2842 /* But if we don't find one, we'll have to wait, and check both
2843 cloned and uncloned processes. We start with the cloned
2844 processes. */
2845 options = __WCLONE | WNOHANG;
2846 }
2847 else if (is_lwp (ptid))
2848 {
2849 if (debug_linux_nat)
2850 fprintf_unfiltered (gdb_stdlog,
2851 "LLW: Waiting for specific LWP %s.\n",
2852 target_pid_to_str (ptid));
2853
2854 /* We have a specific LWP to check. */
2855 lp = find_lwp_pid (ptid);
2856 gdb_assert (lp);
2857 status = lp->status;
2858 lp->status = 0;
2859
2860 if (debug_linux_nat && status)
2861 fprintf_unfiltered (gdb_stdlog,
2862 "LLW: Using pending wait status %s for %s.\n",
2863 status_to_str (status),
2864 target_pid_to_str (lp->ptid));
2865
2866 /* If we have to wait, take into account whether PID is a cloned
2867 process or not. And we have to convert it to something that
2868 the layer beneath us can understand. */
2869 options = lp->cloned ? __WCLONE : 0;
2870 pid = GET_LWP (ptid);
2871 }
2872
2873 if (status && lp->signalled)
2874 {
2875 /* A pending SIGSTOP may interfere with the normal stream of
2876 events. In a typical case where interference is a problem,
2877 we have a SIGSTOP signal pending for LWP A while
2878 single-stepping it, encounter an event in LWP B, and take the
2879 pending SIGSTOP while trying to stop LWP A. After processing
2880 the event in LWP B, LWP A is continued, and we'll never see
2881 the SIGTRAP associated with the last time we were
2882 single-stepping LWP A. */
2883
2884 /* Resume the thread. It should halt immediately returning the
2885 pending SIGSTOP. */
2886 registers_changed ();
2887 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2888 lp->step, TARGET_SIGNAL_0);
2889 if (debug_linux_nat)
2890 fprintf_unfiltered (gdb_stdlog,
2891 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2892 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2893 target_pid_to_str (lp->ptid));
2894 lp->stopped = 0;
2895 gdb_assert (lp->resumed);
2896
2897 /* This should catch the pending SIGSTOP. */
2898 stop_wait_callback (lp, NULL);
2899 }
2900
2901 if (!target_can_async_p ())
2902 {
2903 /* Causes SIGINT to be passed on to the attached process. */
2904 set_sigint_trap ();
2905 }
2906
2907 while (status == 0)
2908 {
2909 pid_t lwpid;
2910
2911 if (target_can_async_p ())
2912 /* In async mode, don't ever block. Only look at the locally
2913 queued events. */
2914 lwpid = queued_waitpid (pid, &status, options);
2915 else
2916 lwpid = my_waitpid (pid, &status, options);
2917
2918 if (lwpid > 0)
2919 {
2920 gdb_assert (pid == -1 || lwpid == pid);
2921
2922 if (debug_linux_nat)
2923 {
2924 fprintf_unfiltered (gdb_stdlog,
2925 "LLW: waitpid %ld received %s\n",
2926 (long) lwpid, status_to_str (status));
2927 }
2928
2929 lp = linux_nat_filter_event (lwpid, status, options);
2930 if (!lp)
2931 {
2932 /* A discarded event. */
2933 status = 0;
2934 continue;
2935 }
2936
2937 break;
2938 }
2939
2940 if (pid == -1)
2941 {
2942 /* Alternate between checking cloned and uncloned processes. */
2943 options ^= __WCLONE;
2944
2945 /* And every time we have checked both:
2946 In async mode, return to event loop;
2947 In sync mode, suspend waiting for a SIGCHLD signal. */
2948 if (options & __WCLONE)
2949 {
2950 if (target_can_async_p ())
2951 {
2952 /* No interesting event. */
2953 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2954
2955 /* Get ready for the next event. */
2956 target_async (inferior_event_handler, 0);
2957
2958 if (debug_linux_nat_async)
2959 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2960
2961 return minus_one_ptid;
2962 }
2963
2964 sigsuspend (&suspend_mask);
2965 }
2966 }
2967
2968 /* We shouldn't end up here unless we want to try again. */
2969 gdb_assert (status == 0);
2970 }
2971
2972 if (!target_can_async_p ())
2973 clear_sigint_trap ();
2974
2975 gdb_assert (lp);
2976
2977 /* Don't report signals that GDB isn't interested in, such as
2978 signals that are neither printed nor stopped upon. Stopping all
2979 threads can be a bit time-consuming so if we want decent
2980 performance with heavily multi-threaded programs, especially when
2981 they're using a high frequency timer, we'd better avoid it if we
2982 can. */
2983
2984 if (WIFSTOPPED (status))
2985 {
2986 int signo = target_signal_from_host (WSTOPSIG (status));
2987 struct inferior *inf;
2988
2989 inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2990 gdb_assert (inf);
2991
2992 /* Defer to common code if we get a signal while
2993 single-stepping, since that may need special care, e.g. to
2994 skip the signal handler, or, if we're gaining control of the
2995 inferior. */
2996 if (!lp->step
2997 && inf->stop_soon == NO_STOP_QUIETLY
2998 && signal_stop_state (signo) == 0
2999 && signal_print_state (signo) == 0
3000 && signal_pass_state (signo) == 1)
3001 {
3002 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
3003 here? It is not clear we should. GDB may not expect
3004 other threads to run. On the other hand, not resuming
3005 newly attached threads may cause an unwanted delay in
3006 getting them running. */
3007 registers_changed ();
3008 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
3009 lp->step, signo);
3010 if (debug_linux_nat)
3011 fprintf_unfiltered (gdb_stdlog,
3012 "LLW: %s %s, %s (preempt 'handle')\n",
3013 lp->step ?
3014 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3015 target_pid_to_str (lp->ptid),
3016 signo ? strsignal (signo) : "0");
3017 lp->stopped = 0;
3018 status = 0;
3019 goto retry;
3020 }
3021
3022 if (!non_stop)
3023 {
3024 /* Only do the below in all-stop, as we currently use SIGINT
3025 to implement target_stop (see linux_nat_stop) in
3026 non-stop. */
3027 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3028 {
3029 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3030 forwarded to the entire process group, that is, all LWPs
3031 will receive it - unless they're using CLONE_THREAD to
3032 share signals. Since we only want to report it once, we
3033 mark it as ignored for all LWPs except this one. */
3034 iterate_over_lwps (set_ignore_sigint, NULL);
3035 lp->ignore_sigint = 0;
3036 }
3037 else
3038 maybe_clear_ignore_sigint (lp);
3039 }
3040 }
3041
3042 /* This LWP is stopped now. */
3043 lp->stopped = 1;
3044
3045 if (debug_linux_nat)
3046 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3047 status_to_str (status), target_pid_to_str (lp->ptid));
3048
3049 if (!non_stop)
3050 {
3051 /* Now stop all other LWP's ... */
3052 iterate_over_lwps (stop_callback, NULL);
3053
3054 /* ... and wait until all of them have reported back that
3055 they're no longer running. */
3056 iterate_over_lwps (stop_wait_callback, NULL);
3057
3058 /* If we're not waiting for a specific LWP, choose an event LWP
3059 from among those that have had events. Giving equal priority
3060 to all LWPs that have had events helps prevent
3061 starvation. */
3062 if (pid == -1)
3063 select_event_lwp (&lp, &status);
3064 }
3065
3066 /* Now that we've selected our final event LWP, cancel any
3067 breakpoints in other LWPs that have hit a GDB breakpoint. See
3068 the comment in cancel_breakpoints_callback to find out why. */
3069 iterate_over_lwps (cancel_breakpoints_callback, lp);
3070
3071 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
3072 {
3073 if (debug_linux_nat)
3074 fprintf_unfiltered (gdb_stdlog,
3075 "LLW: trap ptid is %s.\n",
3076 target_pid_to_str (lp->ptid));
3077 }
3078
3079 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3080 {
3081 *ourstatus = lp->waitstatus;
3082 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3083 }
3084 else
3085 store_waitstatus (ourstatus, status);
3086
3087 /* Get ready for the next event. */
3088 if (target_can_async_p ())
3089 target_async (inferior_event_handler, 0);
3090
3091 if (debug_linux_nat_async)
3092 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3093
3094 return lp->ptid;
3095 }
3096
3097 static int
3098 kill_callback (struct lwp_info *lp, void *data)
3099 {
3100 errno = 0;
3101 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3102 if (debug_linux_nat)
3103 fprintf_unfiltered (gdb_stdlog,
3104 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3105 target_pid_to_str (lp->ptid),
3106 errno ? safe_strerror (errno) : "OK");
3107
3108 return 0;
3109 }
3110
3111 static int
3112 kill_wait_callback (struct lwp_info *lp, void *data)
3113 {
3114 pid_t pid;
3115
3116 /* We must make sure that there are no pending events (delayed
3117 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3118 program doesn't interfere with any following debugging session. */
3119
3120 /* For cloned processes we must check both with __WCLONE and
3121 without, since the exit status of a cloned process isn't reported
3122 with __WCLONE. */
3123 if (lp->cloned)
3124 {
3125 do
3126 {
3127 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
3128 if (pid != (pid_t) -1)
3129 {
3130 if (debug_linux_nat)
3131 fprintf_unfiltered (gdb_stdlog,
3132 "KWC: wait %s received unknown.\n",
3133 target_pid_to_str (lp->ptid));
3134 /* The Linux kernel sometimes fails to kill a thread
3135 completely after PTRACE_KILL; that goes from the stop
3136 point in do_fork out to the one in
3137 get_signal_to_deliever and waits again. So kill it
3138 again. */
3139 kill_callback (lp, NULL);
3140 }
3141 }
3142 while (pid == GET_LWP (lp->ptid));
3143
3144 gdb_assert (pid == -1 && errno == ECHILD);
3145 }
3146
3147 do
3148 {
3149 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
3150 if (pid != (pid_t) -1)
3151 {
3152 if (debug_linux_nat)
3153 fprintf_unfiltered (gdb_stdlog,
3154 "KWC: wait %s received unk.\n",
3155 target_pid_to_str (lp->ptid));
3156 /* See the call to kill_callback above. */
3157 kill_callback (lp, NULL);
3158 }
3159 }
3160 while (pid == GET_LWP (lp->ptid));
3161
3162 gdb_assert (pid == -1 && errno == ECHILD);
3163 return 0;
3164 }
3165
3166 static void
3167 linux_nat_kill (void)
3168 {
3169 struct target_waitstatus last;
3170 ptid_t last_ptid;
3171 int status;
3172
3173 if (target_can_async_p ())
3174 target_async (NULL, 0);
3175
3176 /* If we're stopped while forking and we haven't followed yet,
3177 kill the other task. We need to do this first because the
3178 parent will be sleeping if this is a vfork. */
3179
3180 get_last_target_status (&last_ptid, &last);
3181
3182 if (last.kind == TARGET_WAITKIND_FORKED
3183 || last.kind == TARGET_WAITKIND_VFORKED)
3184 {
3185 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
3186 wait (&status);
3187 }
3188
3189 if (forks_exist_p ())
3190 {
3191 linux_fork_killall ();
3192 drain_queued_events (-1);
3193 }
3194 else
3195 {
3196 /* Stop all threads before killing them, since ptrace requires
3197 that the thread is stopped to sucessfully PTRACE_KILL. */
3198 iterate_over_lwps (stop_callback, NULL);
3199 /* ... and wait until all of them have reported back that
3200 they're no longer running. */
3201 iterate_over_lwps (stop_wait_callback, NULL);
3202
3203 /* Kill all LWP's ... */
3204 iterate_over_lwps (kill_callback, NULL);
3205
3206 /* ... and wait until we've flushed all events. */
3207 iterate_over_lwps (kill_wait_callback, NULL);
3208 }
3209
3210 target_mourn_inferior ();
3211 }
3212
3213 static void
3214 linux_nat_mourn_inferior (struct target_ops *ops)
3215 {
3216 /* Destroy LWP info; it's no longer valid. */
3217 init_lwp_list ();
3218
3219 if (! forks_exist_p ())
3220 {
3221 /* Normal case, no other forks available. */
3222 if (target_can_async_p ())
3223 linux_nat_async (NULL, 0);
3224 linux_ops->to_mourn_inferior (ops);
3225 }
3226 else
3227 /* Multi-fork case. The current inferior_ptid has exited, but
3228 there are other viable forks to debug. Delete the exiting
3229 one and context-switch to the first available. */
3230 linux_fork_mourn_inferior ();
3231 }
3232
3233 /* Convert a native/host siginfo object, into/from the siginfo in the
3234 layout of the inferiors' architecture. */
3235
3236 static void
3237 siginfo_fixup (struct siginfo *siginfo, gdb_byte *inf_siginfo, int direction)
3238 {
3239 int done = 0;
3240
3241 if (linux_nat_siginfo_fixup != NULL)
3242 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3243
3244 /* If there was no callback, or the callback didn't do anything,
3245 then just do a straight memcpy. */
3246 if (!done)
3247 {
3248 if (direction == 1)
3249 memcpy (siginfo, inf_siginfo, sizeof (struct siginfo));
3250 else
3251 memcpy (inf_siginfo, siginfo, sizeof (struct siginfo));
3252 }
3253 }
3254
3255 static LONGEST
3256 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3257 const char *annex, gdb_byte *readbuf,
3258 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3259 {
3260 struct lwp_info *lp;
3261 LONGEST n;
3262 int pid;
3263 struct siginfo siginfo;
3264 gdb_byte inf_siginfo[sizeof (struct siginfo)];
3265
3266 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3267 gdb_assert (readbuf || writebuf);
3268
3269 pid = GET_LWP (inferior_ptid);
3270 if (pid == 0)
3271 pid = GET_PID (inferior_ptid);
3272
3273 if (offset > sizeof (siginfo))
3274 return -1;
3275
3276 errno = 0;
3277 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3278 if (errno != 0)
3279 return -1;
3280
3281 /* When GDB is built as a 64-bit application, ptrace writes into
3282 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3283 inferior with a 64-bit GDB should look the same as debugging it
3284 with a 32-bit GDB, we need to convert it. GDB core always sees
3285 the converted layout, so any read/write will have to be done
3286 post-conversion. */
3287 siginfo_fixup (&siginfo, inf_siginfo, 0);
3288
3289 if (offset + len > sizeof (siginfo))
3290 len = sizeof (siginfo) - offset;
3291
3292 if (readbuf != NULL)
3293 memcpy (readbuf, inf_siginfo + offset, len);
3294 else
3295 {
3296 memcpy (inf_siginfo + offset, writebuf, len);
3297
3298 /* Convert back to ptrace layout before flushing it out. */
3299 siginfo_fixup (&siginfo, inf_siginfo, 1);
3300
3301 errno = 0;
3302 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3303 if (errno != 0)
3304 return -1;
3305 }
3306
3307 return len;
3308 }
3309
3310 static LONGEST
3311 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3312 const char *annex, gdb_byte *readbuf,
3313 const gdb_byte *writebuf,
3314 ULONGEST offset, LONGEST len)
3315 {
3316 struct cleanup *old_chain;
3317 LONGEST xfer;
3318
3319 if (object == TARGET_OBJECT_SIGNAL_INFO)
3320 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
3321 offset, len);
3322
3323 old_chain = save_inferior_ptid ();
3324
3325 if (is_lwp (inferior_ptid))
3326 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
3327
3328 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3329 offset, len);
3330
3331 do_cleanups (old_chain);
3332 return xfer;
3333 }
3334
3335 static int
3336 linux_nat_thread_alive (ptid_t ptid)
3337 {
3338 int err;
3339
3340 gdb_assert (is_lwp (ptid));
3341
3342 /* Send signal 0 instead of anything ptrace, because ptracing a
3343 running thread errors out claiming that the thread doesn't
3344 exist. */
3345 err = kill_lwp (GET_LWP (ptid), 0);
3346
3347 if (debug_linux_nat)
3348 fprintf_unfiltered (gdb_stdlog,
3349 "LLTA: KILL(SIG0) %s (%s)\n",
3350 target_pid_to_str (ptid),
3351 err ? safe_strerror (err) : "OK");
3352
3353 if (err != 0)
3354 return 0;
3355
3356 return 1;
3357 }
3358
3359 static char *
3360 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
3361 {
3362 static char buf[64];
3363
3364 if (is_lwp (ptid)
3365 && ((lwp_list && lwp_list->next)
3366 || GET_PID (ptid) != GET_LWP (ptid)))
3367 {
3368 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
3369 return buf;
3370 }
3371
3372 return normal_pid_to_str (ptid);
3373 }
3374
3375 static void
3376 sigchld_handler (int signo)
3377 {
3378 if (target_async_permitted
3379 && linux_nat_async_events_state != sigchld_sync
3380 && signo == SIGCHLD)
3381 /* It is *always* a bug to hit this. */
3382 internal_error (__FILE__, __LINE__,
3383 "sigchld_handler called when async events are enabled");
3384
3385 /* Do nothing. The only reason for this handler is that it allows
3386 us to use sigsuspend in linux_nat_wait above to wait for the
3387 arrival of a SIGCHLD. */
3388 }
3389
3390 /* Accepts an integer PID; Returns a string representing a file that
3391 can be opened to get the symbols for the child process. */
3392
3393 static char *
3394 linux_child_pid_to_exec_file (int pid)
3395 {
3396 char *name1, *name2;
3397
3398 name1 = xmalloc (MAXPATHLEN);
3399 name2 = xmalloc (MAXPATHLEN);
3400 make_cleanup (xfree, name1);
3401 make_cleanup (xfree, name2);
3402 memset (name2, 0, MAXPATHLEN);
3403
3404 sprintf (name1, "/proc/%d/exe", pid);
3405 if (readlink (name1, name2, MAXPATHLEN) > 0)
3406 return name2;
3407 else
3408 return name1;
3409 }
3410
3411 /* Service function for corefiles and info proc. */
3412
3413 static int
3414 read_mapping (FILE *mapfile,
3415 long long *addr,
3416 long long *endaddr,
3417 char *permissions,
3418 long long *offset,
3419 char *device, long long *inode, char *filename)
3420 {
3421 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
3422 addr, endaddr, permissions, offset, device, inode);
3423
3424 filename[0] = '\0';
3425 if (ret > 0 && ret != EOF)
3426 {
3427 /* Eat everything up to EOL for the filename. This will prevent
3428 weird filenames (such as one with embedded whitespace) from
3429 confusing this code. It also makes this code more robust in
3430 respect to annotations the kernel may add after the filename.
3431
3432 Note the filename is used for informational purposes
3433 only. */
3434 ret += fscanf (mapfile, "%[^\n]\n", filename);
3435 }
3436
3437 return (ret != 0 && ret != EOF);
3438 }
3439
3440 /* Fills the "to_find_memory_regions" target vector. Lists the memory
3441 regions in the inferior for a corefile. */
3442
3443 static int
3444 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
3445 unsigned long,
3446 int, int, int, void *), void *obfd)
3447 {
3448 long long pid = PIDGET (inferior_ptid);
3449 char mapsfilename[MAXPATHLEN];
3450 FILE *mapsfile;
3451 long long addr, endaddr, size, offset, inode;
3452 char permissions[8], device[8], filename[MAXPATHLEN];
3453 int read, write, exec;
3454 int ret;
3455 struct cleanup *cleanup;
3456
3457 /* Compose the filename for the /proc memory map, and open it. */
3458 sprintf (mapsfilename, "/proc/%lld/maps", pid);
3459 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
3460 error (_("Could not open %s."), mapsfilename);
3461 cleanup = make_cleanup_fclose (mapsfile);
3462
3463 if (info_verbose)
3464 fprintf_filtered (gdb_stdout,
3465 "Reading memory regions from %s\n", mapsfilename);
3466
3467 /* Now iterate until end-of-file. */
3468 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
3469 &offset, &device[0], &inode, &filename[0]))
3470 {
3471 size = endaddr - addr;
3472
3473 /* Get the segment's permissions. */
3474 read = (strchr (permissions, 'r') != 0);
3475 write = (strchr (permissions, 'w') != 0);
3476 exec = (strchr (permissions, 'x') != 0);
3477
3478 if (info_verbose)
3479 {
3480 fprintf_filtered (gdb_stdout,
3481 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3482 size, paddr_nz (addr),
3483 read ? 'r' : ' ',
3484 write ? 'w' : ' ', exec ? 'x' : ' ');
3485 if (filename[0])
3486 fprintf_filtered (gdb_stdout, " for %s", filename);
3487 fprintf_filtered (gdb_stdout, "\n");
3488 }
3489
3490 /* Invoke the callback function to create the corefile
3491 segment. */
3492 func (addr, size, read, write, exec, obfd);
3493 }
3494 do_cleanups (cleanup);
3495 return 0;
3496 }
3497
3498 static int
3499 find_signalled_thread (struct thread_info *info, void *data)
3500 {
3501 if (info->stop_signal != TARGET_SIGNAL_0
3502 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
3503 return 1;
3504
3505 return 0;
3506 }
3507
3508 static enum target_signal
3509 find_stop_signal (void)
3510 {
3511 struct thread_info *info =
3512 iterate_over_threads (find_signalled_thread, NULL);
3513
3514 if (info)
3515 return info->stop_signal;
3516 else
3517 return TARGET_SIGNAL_0;
3518 }
3519
3520 /* Records the thread's register state for the corefile note
3521 section. */
3522
3523 static char *
3524 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
3525 char *note_data, int *note_size,
3526 enum target_signal stop_signal)
3527 {
3528 gdb_gregset_t gregs;
3529 gdb_fpregset_t fpregs;
3530 unsigned long lwp = ptid_get_lwp (ptid);
3531 struct regcache *regcache = get_thread_regcache (ptid);
3532 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3533 const struct regset *regset;
3534 int core_regset_p;
3535 struct cleanup *old_chain;
3536 struct core_regset_section *sect_list;
3537 char *gdb_regset;
3538
3539 old_chain = save_inferior_ptid ();
3540 inferior_ptid = ptid;
3541 target_fetch_registers (regcache, -1);
3542 do_cleanups (old_chain);
3543
3544 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
3545 sect_list = gdbarch_core_regset_sections (gdbarch);
3546
3547 if (core_regset_p
3548 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3549 sizeof (gregs))) != NULL
3550 && regset->collect_regset != NULL)
3551 regset->collect_regset (regset, regcache, -1,
3552 &gregs, sizeof (gregs));
3553 else
3554 fill_gregset (regcache, &gregs, -1);
3555
3556 note_data = (char *) elfcore_write_prstatus (obfd,
3557 note_data,
3558 note_size,
3559 lwp,
3560 stop_signal, &gregs);
3561
3562 /* The loop below uses the new struct core_regset_section, which stores
3563 the supported section names and sizes for the core file. Note that
3564 note PRSTATUS needs to be treated specially. But the other notes are
3565 structurally the same, so they can benefit from the new struct. */
3566 if (core_regset_p && sect_list != NULL)
3567 while (sect_list->sect_name != NULL)
3568 {
3569 /* .reg was already handled above. */
3570 if (strcmp (sect_list->sect_name, ".reg") == 0)
3571 {
3572 sect_list++;
3573 continue;
3574 }
3575 regset = gdbarch_regset_from_core_section (gdbarch,
3576 sect_list->sect_name,
3577 sect_list->size);
3578 gdb_assert (regset && regset->collect_regset);
3579 gdb_regset = xmalloc (sect_list->size);
3580 regset->collect_regset (regset, regcache, -1,
3581 gdb_regset, sect_list->size);
3582 note_data = (char *) elfcore_write_register_note (obfd,
3583 note_data,
3584 note_size,
3585 sect_list->sect_name,
3586 gdb_regset,
3587 sect_list->size);
3588 xfree (gdb_regset);
3589 sect_list++;
3590 }
3591
3592 /* For architectures that does not have the struct core_regset_section
3593 implemented, we use the old method. When all the architectures have
3594 the new support, the code below should be deleted. */
3595 else
3596 {
3597 if (core_regset_p
3598 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3599 sizeof (fpregs))) != NULL
3600 && regset->collect_regset != NULL)
3601 regset->collect_regset (regset, regcache, -1,
3602 &fpregs, sizeof (fpregs));
3603 else
3604 fill_fpregset (regcache, &fpregs, -1);
3605
3606 note_data = (char *) elfcore_write_prfpreg (obfd,
3607 note_data,
3608 note_size,
3609 &fpregs, sizeof (fpregs));
3610 }
3611
3612 return note_data;
3613 }
3614
3615 struct linux_nat_corefile_thread_data
3616 {
3617 bfd *obfd;
3618 char *note_data;
3619 int *note_size;
3620 int num_notes;
3621 enum target_signal stop_signal;
3622 };
3623
3624 /* Called by gdbthread.c once per thread. Records the thread's
3625 register state for the corefile note section. */
3626
3627 static int
3628 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3629 {
3630 struct linux_nat_corefile_thread_data *args = data;
3631
3632 args->note_data = linux_nat_do_thread_registers (args->obfd,
3633 ti->ptid,
3634 args->note_data,
3635 args->note_size,
3636 args->stop_signal);
3637 args->num_notes++;
3638
3639 return 0;
3640 }
3641
3642 /* Fills the "to_make_corefile_note" target vector. Builds the note
3643 section for a corefile, and returns it in a malloc buffer. */
3644
3645 static char *
3646 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3647 {
3648 struct linux_nat_corefile_thread_data thread_args;
3649 struct cleanup *old_chain;
3650 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
3651 char fname[16] = { '\0' };
3652 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
3653 char psargs[80] = { '\0' };
3654 char *note_data = NULL;
3655 ptid_t current_ptid = inferior_ptid;
3656 gdb_byte *auxv;
3657 int auxv_len;
3658
3659 if (get_exec_file (0))
3660 {
3661 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3662 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3663 if (get_inferior_args ())
3664 {
3665 char *string_end;
3666 char *psargs_end = psargs + sizeof (psargs);
3667
3668 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3669 strings fine. */
3670 string_end = memchr (psargs, 0, sizeof (psargs));
3671 if (string_end != NULL)
3672 {
3673 *string_end++ = ' ';
3674 strncpy (string_end, get_inferior_args (),
3675 psargs_end - string_end);
3676 }
3677 }
3678 note_data = (char *) elfcore_write_prpsinfo (obfd,
3679 note_data,
3680 note_size, fname, psargs);
3681 }
3682
3683 /* Dump information for threads. */
3684 thread_args.obfd = obfd;
3685 thread_args.note_data = note_data;
3686 thread_args.note_size = note_size;
3687 thread_args.num_notes = 0;
3688 thread_args.stop_signal = find_stop_signal ();
3689 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
3690 gdb_assert (thread_args.num_notes != 0);
3691 note_data = thread_args.note_data;
3692
3693 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3694 NULL, &auxv);
3695 if (auxv_len > 0)
3696 {
3697 note_data = elfcore_write_note (obfd, note_data, note_size,
3698 "CORE", NT_AUXV, auxv, auxv_len);
3699 xfree (auxv);
3700 }
3701
3702 make_cleanup (xfree, note_data);
3703 return note_data;
3704 }
3705
3706 /* Implement the "info proc" command. */
3707
3708 static void
3709 linux_nat_info_proc_cmd (char *args, int from_tty)
3710 {
3711 long long pid = PIDGET (inferior_ptid);
3712 FILE *procfile;
3713 char **argv = NULL;
3714 char buffer[MAXPATHLEN];
3715 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3716 int cmdline_f = 1;
3717 int cwd_f = 1;
3718 int exe_f = 1;
3719 int mappings_f = 0;
3720 int environ_f = 0;
3721 int status_f = 0;
3722 int stat_f = 0;
3723 int all = 0;
3724 struct stat dummy;
3725
3726 if (args)
3727 {
3728 /* Break up 'args' into an argv array. */
3729 argv = gdb_buildargv (args);
3730 make_cleanup_freeargv (argv);
3731 }
3732 while (argv != NULL && *argv != NULL)
3733 {
3734 if (isdigit (argv[0][0]))
3735 {
3736 pid = strtoul (argv[0], NULL, 10);
3737 }
3738 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3739 {
3740 mappings_f = 1;
3741 }
3742 else if (strcmp (argv[0], "status") == 0)
3743 {
3744 status_f = 1;
3745 }
3746 else if (strcmp (argv[0], "stat") == 0)
3747 {
3748 stat_f = 1;
3749 }
3750 else if (strcmp (argv[0], "cmd") == 0)
3751 {
3752 cmdline_f = 1;
3753 }
3754 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3755 {
3756 exe_f = 1;
3757 }
3758 else if (strcmp (argv[0], "cwd") == 0)
3759 {
3760 cwd_f = 1;
3761 }
3762 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3763 {
3764 all = 1;
3765 }
3766 else
3767 {
3768 /* [...] (future options here) */
3769 }
3770 argv++;
3771 }
3772 if (pid == 0)
3773 error (_("No current process: you must name one."));
3774
3775 sprintf (fname1, "/proc/%lld", pid);
3776 if (stat (fname1, &dummy) != 0)
3777 error (_("No /proc directory: '%s'"), fname1);
3778
3779 printf_filtered (_("process %lld\n"), pid);
3780 if (cmdline_f || all)
3781 {
3782 sprintf (fname1, "/proc/%lld/cmdline", pid);
3783 if ((procfile = fopen (fname1, "r")) != NULL)
3784 {
3785 struct cleanup *cleanup = make_cleanup_fclose (procfile);
3786 if (fgets (buffer, sizeof (buffer), procfile))
3787 printf_filtered ("cmdline = '%s'\n", buffer);
3788 else
3789 warning (_("unable to read '%s'"), fname1);
3790 do_cleanups (cleanup);
3791 }
3792 else
3793 warning (_("unable to open /proc file '%s'"), fname1);
3794 }
3795 if (cwd_f || all)
3796 {
3797 sprintf (fname1, "/proc/%lld/cwd", pid);
3798 memset (fname2, 0, sizeof (fname2));
3799 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3800 printf_filtered ("cwd = '%s'\n", fname2);
3801 else
3802 warning (_("unable to read link '%s'"), fname1);
3803 }
3804 if (exe_f || all)
3805 {
3806 sprintf (fname1, "/proc/%lld/exe", pid);
3807 memset (fname2, 0, sizeof (fname2));
3808 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3809 printf_filtered ("exe = '%s'\n", fname2);
3810 else
3811 warning (_("unable to read link '%s'"), fname1);
3812 }
3813 if (mappings_f || all)
3814 {
3815 sprintf (fname1, "/proc/%lld/maps", pid);
3816 if ((procfile = fopen (fname1, "r")) != NULL)
3817 {
3818 long long addr, endaddr, size, offset, inode;
3819 char permissions[8], device[8], filename[MAXPATHLEN];
3820 struct cleanup *cleanup;
3821
3822 cleanup = make_cleanup_fclose (procfile);
3823 printf_filtered (_("Mapped address spaces:\n\n"));
3824 if (gdbarch_addr_bit (current_gdbarch) == 32)
3825 {
3826 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3827 "Start Addr",
3828 " End Addr",
3829 " Size", " Offset", "objfile");
3830 }
3831 else
3832 {
3833 printf_filtered (" %18s %18s %10s %10s %7s\n",
3834 "Start Addr",
3835 " End Addr",
3836 " Size", " Offset", "objfile");
3837 }
3838
3839 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3840 &offset, &device[0], &inode, &filename[0]))
3841 {
3842 size = endaddr - addr;
3843
3844 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3845 calls here (and possibly above) should be abstracted
3846 out into their own functions? Andrew suggests using
3847 a generic local_address_string instead to print out
3848 the addresses; that makes sense to me, too. */
3849
3850 if (gdbarch_addr_bit (current_gdbarch) == 32)
3851 {
3852 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3853 (unsigned long) addr, /* FIXME: pr_addr */
3854 (unsigned long) endaddr,
3855 (int) size,
3856 (unsigned int) offset,
3857 filename[0] ? filename : "");
3858 }
3859 else
3860 {
3861 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3862 (unsigned long) addr, /* FIXME: pr_addr */
3863 (unsigned long) endaddr,
3864 (int) size,
3865 (unsigned int) offset,
3866 filename[0] ? filename : "");
3867 }
3868 }
3869
3870 do_cleanups (cleanup);
3871 }
3872 else
3873 warning (_("unable to open /proc file '%s'"), fname1);
3874 }
3875 if (status_f || all)
3876 {
3877 sprintf (fname1, "/proc/%lld/status", pid);
3878 if ((procfile = fopen (fname1, "r")) != NULL)
3879 {
3880 struct cleanup *cleanup = make_cleanup_fclose (procfile);
3881 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3882 puts_filtered (buffer);
3883 do_cleanups (cleanup);
3884 }
3885 else
3886 warning (_("unable to open /proc file '%s'"), fname1);
3887 }
3888 if (stat_f || all)
3889 {
3890 sprintf (fname1, "/proc/%lld/stat", pid);
3891 if ((procfile = fopen (fname1, "r")) != NULL)
3892 {
3893 int itmp;
3894 char ctmp;
3895 long ltmp;
3896 struct cleanup *cleanup = make_cleanup_fclose (procfile);
3897
3898 if (fscanf (procfile, "%d ", &itmp) > 0)
3899 printf_filtered (_("Process: %d\n"), itmp);
3900 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
3901 printf_filtered (_("Exec file: %s\n"), buffer);
3902 if (fscanf (procfile, "%c ", &ctmp) > 0)
3903 printf_filtered (_("State: %c\n"), ctmp);
3904 if (fscanf (procfile, "%d ", &itmp) > 0)
3905 printf_filtered (_("Parent process: %d\n"), itmp);
3906 if (fscanf (procfile, "%d ", &itmp) > 0)
3907 printf_filtered (_("Process group: %d\n"), itmp);
3908 if (fscanf (procfile, "%d ", &itmp) > 0)
3909 printf_filtered (_("Session id: %d\n"), itmp);
3910 if (fscanf (procfile, "%d ", &itmp) > 0)
3911 printf_filtered (_("TTY: %d\n"), itmp);
3912 if (fscanf (procfile, "%d ", &itmp) > 0)
3913 printf_filtered (_("TTY owner process group: %d\n"), itmp);
3914 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3915 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3916 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3917 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3918 (unsigned long) ltmp);
3919 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3920 printf_filtered (_("Minor faults, children: %lu\n"),
3921 (unsigned long) ltmp);
3922 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3923 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3924 (unsigned long) ltmp);
3925 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3926 printf_filtered (_("Major faults, children: %lu\n"),
3927 (unsigned long) ltmp);
3928 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3929 printf_filtered (_("utime: %ld\n"), ltmp);
3930 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3931 printf_filtered (_("stime: %ld\n"), ltmp);
3932 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3933 printf_filtered (_("utime, children: %ld\n"), ltmp);
3934 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3935 printf_filtered (_("stime, children: %ld\n"), ltmp);
3936 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3937 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3938 ltmp);
3939 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3940 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3941 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3942 printf_filtered (_("jiffies until next timeout: %lu\n"),
3943 (unsigned long) ltmp);
3944 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3945 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3946 (unsigned long) ltmp);
3947 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3948 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3949 ltmp);
3950 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3951 printf_filtered (_("Virtual memory size: %lu\n"),
3952 (unsigned long) ltmp);
3953 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3954 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3955 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3956 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3957 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3958 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3959 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3960 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3961 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3962 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3963 #if 0 /* Don't know how architecture-dependent the rest is...
3964 Anyway the signal bitmap info is available from "status". */
3965 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3966 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3967 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3968 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3969 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3970 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3971 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3972 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3973 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3974 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3975 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3976 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3977 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3978 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
3979 #endif
3980 do_cleanups (cleanup);
3981 }
3982 else
3983 warning (_("unable to open /proc file '%s'"), fname1);
3984 }
3985 }
3986
3987 /* Implement the to_xfer_partial interface for memory reads using the /proc
3988 filesystem. Because we can use a single read() call for /proc, this
3989 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3990 but it doesn't support writes. */
3991
3992 static LONGEST
3993 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3994 const char *annex, gdb_byte *readbuf,
3995 const gdb_byte *writebuf,
3996 ULONGEST offset, LONGEST len)
3997 {
3998 LONGEST ret;
3999 int fd;
4000 char filename[64];
4001
4002 if (object != TARGET_OBJECT_MEMORY || !readbuf)
4003 return 0;
4004
4005 /* Don't bother for one word. */
4006 if (len < 3 * sizeof (long))
4007 return 0;
4008
4009 /* We could keep this file open and cache it - possibly one per
4010 thread. That requires some juggling, but is even faster. */
4011 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
4012 fd = open (filename, O_RDONLY | O_LARGEFILE);
4013 if (fd == -1)
4014 return 0;
4015
4016 /* If pread64 is available, use it. It's faster if the kernel
4017 supports it (only one syscall), and it's 64-bit safe even on
4018 32-bit platforms (for instance, SPARC debugging a SPARC64
4019 application). */
4020 #ifdef HAVE_PREAD64
4021 if (pread64 (fd, readbuf, len, offset) != len)
4022 #else
4023 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
4024 #endif
4025 ret = 0;
4026 else
4027 ret = len;
4028
4029 close (fd);
4030 return ret;
4031 }
4032
4033 /* Parse LINE as a signal set and add its set bits to SIGS. */
4034
4035 static void
4036 add_line_to_sigset (const char *line, sigset_t *sigs)
4037 {
4038 int len = strlen (line) - 1;
4039 const char *p;
4040 int signum;
4041
4042 if (line[len] != '\n')
4043 error (_("Could not parse signal set: %s"), line);
4044
4045 p = line;
4046 signum = len * 4;
4047 while (len-- > 0)
4048 {
4049 int digit;
4050
4051 if (*p >= '0' && *p <= '9')
4052 digit = *p - '0';
4053 else if (*p >= 'a' && *p <= 'f')
4054 digit = *p - 'a' + 10;
4055 else
4056 error (_("Could not parse signal set: %s"), line);
4057
4058 signum -= 4;
4059
4060 if (digit & 1)
4061 sigaddset (sigs, signum + 1);
4062 if (digit & 2)
4063 sigaddset (sigs, signum + 2);
4064 if (digit & 4)
4065 sigaddset (sigs, signum + 3);
4066 if (digit & 8)
4067 sigaddset (sigs, signum + 4);
4068
4069 p++;
4070 }
4071 }
4072
4073 /* Find process PID's pending signals from /proc/pid/status and set
4074 SIGS to match. */
4075
4076 void
4077 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
4078 {
4079 FILE *procfile;
4080 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
4081 int signum;
4082 struct cleanup *cleanup;
4083
4084 sigemptyset (pending);
4085 sigemptyset (blocked);
4086 sigemptyset (ignored);
4087 sprintf (fname, "/proc/%d/status", pid);
4088 procfile = fopen (fname, "r");
4089 if (procfile == NULL)
4090 error (_("Could not open %s"), fname);
4091 cleanup = make_cleanup_fclose (procfile);
4092
4093 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4094 {
4095 /* Normal queued signals are on the SigPnd line in the status
4096 file. However, 2.6 kernels also have a "shared" pending
4097 queue for delivering signals to a thread group, so check for
4098 a ShdPnd line also.
4099
4100 Unfortunately some Red Hat kernels include the shared pending
4101 queue but not the ShdPnd status field. */
4102
4103 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4104 add_line_to_sigset (buffer + 8, pending);
4105 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4106 add_line_to_sigset (buffer + 8, pending);
4107 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4108 add_line_to_sigset (buffer + 8, blocked);
4109 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4110 add_line_to_sigset (buffer + 8, ignored);
4111 }
4112
4113 do_cleanups (cleanup);
4114 }
4115
4116 static LONGEST
4117 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4118 const char *annex, gdb_byte *readbuf,
4119 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4120 {
4121 /* We make the process list snapshot when the object starts to be
4122 read. */
4123 static const char *buf;
4124 static LONGEST len_avail = -1;
4125 static struct obstack obstack;
4126
4127 DIR *dirp;
4128
4129 gdb_assert (object == TARGET_OBJECT_OSDATA);
4130
4131 if (strcmp (annex, "processes") != 0)
4132 return 0;
4133
4134 gdb_assert (readbuf && !writebuf);
4135
4136 if (offset == 0)
4137 {
4138 if (len_avail != -1 && len_avail != 0)
4139 obstack_free (&obstack, NULL);
4140 len_avail = 0;
4141 buf = NULL;
4142 obstack_init (&obstack);
4143 obstack_grow_str (&obstack, "<osdata type=\"processes\">\n");
4144
4145 dirp = opendir ("/proc");
4146 if (dirp)
4147 {
4148 struct dirent *dp;
4149 while ((dp = readdir (dirp)) != NULL)
4150 {
4151 struct stat statbuf;
4152 char procentry[sizeof ("/proc/4294967295")];
4153
4154 if (!isdigit (dp->d_name[0])
4155 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
4156 continue;
4157
4158 sprintf (procentry, "/proc/%s", dp->d_name);
4159 if (stat (procentry, &statbuf) == 0
4160 && S_ISDIR (statbuf.st_mode))
4161 {
4162 char *pathname;
4163 FILE *f;
4164 char cmd[MAXPATHLEN + 1];
4165 struct passwd *entry;
4166
4167 pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
4168 entry = getpwuid (statbuf.st_uid);
4169
4170 if ((f = fopen (pathname, "r")) != NULL)
4171 {
4172 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
4173 if (len > 0)
4174 {
4175 int i;
4176 for (i = 0; i < len; i++)
4177 if (cmd[i] == '\0')
4178 cmd[i] = ' ';
4179 cmd[len] = '\0';
4180
4181 obstack_xml_printf (
4182 &obstack,
4183 "<item>"
4184 "<column name=\"pid\">%s</column>"
4185 "<column name=\"user\">%s</column>"
4186 "<column name=\"command\">%s</column>"
4187 "</item>",
4188 dp->d_name,
4189 entry ? entry->pw_name : "?",
4190 cmd);
4191 }
4192 fclose (f);
4193 }
4194
4195 xfree (pathname);
4196 }
4197 }
4198
4199 closedir (dirp);
4200 }
4201
4202 obstack_grow_str0 (&obstack, "</osdata>\n");
4203 buf = obstack_finish (&obstack);
4204 len_avail = strlen (buf);
4205 }
4206
4207 if (offset >= len_avail)
4208 {
4209 /* Done. Get rid of the obstack. */
4210 obstack_free (&obstack, NULL);
4211 buf = NULL;
4212 len_avail = 0;
4213 return 0;
4214 }
4215
4216 if (len > len_avail - offset)
4217 len = len_avail - offset;
4218 memcpy (readbuf, buf + offset, len);
4219
4220 return len;
4221 }
4222
4223 static LONGEST
4224 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4225 const char *annex, gdb_byte *readbuf,
4226 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4227 {
4228 LONGEST xfer;
4229
4230 if (object == TARGET_OBJECT_AUXV)
4231 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
4232 offset, len);
4233
4234 if (object == TARGET_OBJECT_OSDATA)
4235 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4236 offset, len);
4237
4238 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4239 offset, len);
4240 if (xfer != 0)
4241 return xfer;
4242
4243 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4244 offset, len);
4245 }
4246
4247 /* Create a prototype generic GNU/Linux target. The client can override
4248 it with local methods. */
4249
4250 static void
4251 linux_target_install_ops (struct target_ops *t)
4252 {
4253 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4254 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4255 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4256 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4257 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4258 t->to_post_attach = linux_child_post_attach;
4259 t->to_follow_fork = linux_child_follow_fork;
4260 t->to_find_memory_regions = linux_nat_find_memory_regions;
4261 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4262
4263 super_xfer_partial = t->to_xfer_partial;
4264 t->to_xfer_partial = linux_xfer_partial;
4265 }
4266
4267 struct target_ops *
4268 linux_target (void)
4269 {
4270 struct target_ops *t;
4271
4272 t = inf_ptrace_target ();
4273 linux_target_install_ops (t);
4274
4275 return t;
4276 }
4277
4278 struct target_ops *
4279 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4280 {
4281 struct target_ops *t;
4282
4283 t = inf_ptrace_trad_target (register_u_offset);
4284 linux_target_install_ops (t);
4285
4286 return t;
4287 }
4288
4289 /* target_is_async_p implementation. */
4290
4291 static int
4292 linux_nat_is_async_p (void)
4293 {
4294 /* NOTE: palves 2008-03-21: We're only async when the user requests
4295 it explicitly with the "maintenance set target-async" command.
4296 Someday, linux will always be async. */
4297 if (!target_async_permitted)
4298 return 0;
4299
4300 return 1;
4301 }
4302
4303 /* target_can_async_p implementation. */
4304
4305 static int
4306 linux_nat_can_async_p (void)
4307 {
4308 /* NOTE: palves 2008-03-21: We're only async when the user requests
4309 it explicitly with the "maintenance set target-async" command.
4310 Someday, linux will always be async. */
4311 if (!target_async_permitted)
4312 return 0;
4313
4314 /* See target.h/target_async_mask. */
4315 return linux_nat_async_mask_value;
4316 }
4317
4318 static int
4319 linux_nat_supports_non_stop (void)
4320 {
4321 return 1;
4322 }
4323
4324 /* target_async_mask implementation. */
4325
4326 static int
4327 linux_nat_async_mask (int mask)
4328 {
4329 int current_state;
4330 current_state = linux_nat_async_mask_value;
4331
4332 if (current_state != mask)
4333 {
4334 if (mask == 0)
4335 {
4336 linux_nat_async (NULL, 0);
4337 linux_nat_async_mask_value = mask;
4338 }
4339 else
4340 {
4341 linux_nat_async_mask_value = mask;
4342 linux_nat_async (inferior_event_handler, 0);
4343 }
4344 }
4345
4346 return current_state;
4347 }
4348
4349 /* Pop an event from the event pipe. */
4350
4351 static int
4352 linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
4353 {
4354 struct waitpid_result event = {0};
4355 int ret;
4356
4357 do
4358 {
4359 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
4360 }
4361 while (ret == -1 && errno == EINTR);
4362
4363 gdb_assert (ret == sizeof (event));
4364
4365 *ptr_status = event.status;
4366 *ptr_options = event.options;
4367
4368 linux_nat_num_queued_events--;
4369
4370 return event.pid;
4371 }
4372
4373 /* Push an event into the event pipe. */
4374
4375 static void
4376 linux_nat_event_pipe_push (int pid, int status, int options)
4377 {
4378 int ret;
4379 struct waitpid_result event = {0};
4380 event.pid = pid;
4381 event.status = status;
4382 event.options = options;
4383
4384 do
4385 {
4386 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
4387 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
4388 } while (ret == -1 && errno == EINTR);
4389
4390 linux_nat_num_queued_events++;
4391 }
4392
4393 static void
4394 get_pending_events (void)
4395 {
4396 int status, options, pid;
4397
4398 if (!target_async_permitted
4399 || linux_nat_async_events_state != sigchld_async)
4400 internal_error (__FILE__, __LINE__,
4401 "get_pending_events called with async masked");
4402
4403 while (1)
4404 {
4405 status = 0;
4406 options = __WCLONE | WNOHANG;
4407
4408 do
4409 {
4410 pid = waitpid (-1, &status, options);
4411 }
4412 while (pid == -1 && errno == EINTR);
4413
4414 if (pid <= 0)
4415 {
4416 options = WNOHANG;
4417 do
4418 {
4419 pid = waitpid (-1, &status, options);
4420 }
4421 while (pid == -1 && errno == EINTR);
4422 }
4423
4424 if (pid <= 0)
4425 /* No more children reporting events. */
4426 break;
4427
4428 if (debug_linux_nat_async)
4429 fprintf_unfiltered (gdb_stdlog, "\
4430 get_pending_events: pid(%d), status(%x), options (%x)\n",
4431 pid, status, options);
4432
4433 linux_nat_event_pipe_push (pid, status, options);
4434 }
4435
4436 if (debug_linux_nat_async)
4437 fprintf_unfiltered (gdb_stdlog, "\
4438 get_pending_events: linux_nat_num_queued_events(%d)\n",
4439 linux_nat_num_queued_events);
4440 }
4441
4442 /* SIGCHLD handler for async mode. */
4443
4444 static void
4445 async_sigchld_handler (int signo)
4446 {
4447 if (debug_linux_nat_async)
4448 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
4449
4450 get_pending_events ();
4451 }
4452
4453 /* Set SIGCHLD handling state to STATE. Returns previous state. */
4454
4455 static enum sigchld_state
4456 linux_nat_async_events (enum sigchld_state state)
4457 {
4458 enum sigchld_state current_state = linux_nat_async_events_state;
4459
4460 if (debug_linux_nat_async)
4461 fprintf_unfiltered (gdb_stdlog,
4462 "LNAE: state(%d): linux_nat_async_events_state(%d), "
4463 "linux_nat_num_queued_events(%d)\n",
4464 state, linux_nat_async_events_state,
4465 linux_nat_num_queued_events);
4466
4467 if (current_state != state)
4468 {
4469 sigset_t mask;
4470 sigemptyset (&mask);
4471 sigaddset (&mask, SIGCHLD);
4472
4473 /* Always block before changing state. */
4474 sigprocmask (SIG_BLOCK, &mask, NULL);
4475
4476 /* Set new state. */
4477 linux_nat_async_events_state = state;
4478
4479 switch (state)
4480 {
4481 case sigchld_sync:
4482 {
4483 /* Block target events. */
4484 sigprocmask (SIG_BLOCK, &mask, NULL);
4485 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4486 /* Get events out of queue, and make them available to
4487 queued_waitpid / my_waitpid. */
4488 pipe_to_local_event_queue ();
4489 }
4490 break;
4491 case sigchld_async:
4492 {
4493 /* Unblock target events for async mode. */
4494
4495 sigprocmask (SIG_BLOCK, &mask, NULL);
4496
4497 /* Put events we already waited on, in the pipe first, so
4498 events are FIFO. */
4499 local_event_queue_to_pipe ();
4500 /* While in masked async, we may have not collected all
4501 the pending events. Get them out now. */
4502 get_pending_events ();
4503
4504 /* Let'em come. */
4505 sigaction (SIGCHLD, &async_sigchld_action, NULL);
4506 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4507 }
4508 break;
4509 case sigchld_default:
4510 {
4511 /* SIGCHLD default mode. */
4512 sigaction (SIGCHLD, &sigchld_default_action, NULL);
4513
4514 /* Get events out of queue, and make them available to
4515 queued_waitpid / my_waitpid. */
4516 pipe_to_local_event_queue ();
4517
4518 /* Unblock SIGCHLD. */
4519 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4520 }
4521 break;
4522 }
4523 }
4524
4525 return current_state;
4526 }
4527
4528 static int async_terminal_is_ours = 1;
4529
4530 /* target_terminal_inferior implementation. */
4531
4532 static void
4533 linux_nat_terminal_inferior (void)
4534 {
4535 if (!target_is_async_p ())
4536 {
4537 /* Async mode is disabled. */
4538 terminal_inferior ();
4539 return;
4540 }
4541
4542 /* GDB should never give the terminal to the inferior, if the
4543 inferior is running in the background (run&, continue&, etc.).
4544 This check can be removed when the common code is fixed. */
4545 if (!sync_execution)
4546 return;
4547
4548 terminal_inferior ();
4549
4550 if (!async_terminal_is_ours)
4551 return;
4552
4553 delete_file_handler (input_fd);
4554 async_terminal_is_ours = 0;
4555 set_sigint_trap ();
4556 }
4557
4558 /* target_terminal_ours implementation. */
4559
4560 void
4561 linux_nat_terminal_ours (void)
4562 {
4563 if (!target_is_async_p ())
4564 {
4565 /* Async mode is disabled. */
4566 terminal_ours ();
4567 return;
4568 }
4569
4570 /* GDB should never give the terminal to the inferior if the
4571 inferior is running in the background (run&, continue&, etc.),
4572 but claiming it sure should. */
4573 terminal_ours ();
4574
4575 if (!sync_execution)
4576 return;
4577
4578 if (async_terminal_is_ours)
4579 return;
4580
4581 clear_sigint_trap ();
4582 add_file_handler (input_fd, stdin_event_handler, 0);
4583 async_terminal_is_ours = 1;
4584 }
4585
4586 static void (*async_client_callback) (enum inferior_event_type event_type,
4587 void *context);
4588 static void *async_client_context;
4589
4590 static void
4591 linux_nat_async_file_handler (int error, gdb_client_data client_data)
4592 {
4593 async_client_callback (INF_REG_EVENT, async_client_context);
4594 }
4595
4596 /* target_async implementation. */
4597
4598 static void
4599 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4600 void *context), void *context)
4601 {
4602 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
4603 internal_error (__FILE__, __LINE__,
4604 "Calling target_async when async is masked");
4605
4606 if (callback != NULL)
4607 {
4608 async_client_callback = callback;
4609 async_client_context = context;
4610 add_file_handler (linux_nat_event_pipe[0],
4611 linux_nat_async_file_handler, NULL);
4612
4613 linux_nat_async_events (sigchld_async);
4614 }
4615 else
4616 {
4617 async_client_callback = callback;
4618 async_client_context = context;
4619
4620 linux_nat_async_events (sigchld_sync);
4621 delete_file_handler (linux_nat_event_pipe[0]);
4622 }
4623 return;
4624 }
4625
4626 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
4627 event came out. */
4628
4629 static int
4630 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4631 {
4632 ptid_t ptid = * (ptid_t *) data;
4633
4634 if (ptid_equal (lwp->ptid, ptid)
4635 || ptid_equal (minus_one_ptid, ptid)
4636 || (ptid_is_pid (ptid)
4637 && ptid_get_pid (ptid) == ptid_get_pid (lwp->ptid)))
4638 {
4639 if (!lwp->stopped)
4640 {
4641 int pid, status;
4642
4643 if (debug_linux_nat)
4644 fprintf_unfiltered (gdb_stdlog,
4645 "LNSL: running -> suspending %s\n",
4646 target_pid_to_str (lwp->ptid));
4647
4648 /* Peek once, to check if we've already waited for this
4649 LWP. */
4650 pid = queued_waitpid_1 (ptid_get_lwp (lwp->ptid), &status,
4651 lwp->cloned ? __WCLONE : 0, 1 /* peek */);
4652
4653 if (pid == -1)
4654 {
4655 ptid_t ptid = lwp->ptid;
4656
4657 stop_callback (lwp, NULL);
4658 stop_wait_callback (lwp, NULL);
4659
4660 /* If the lwp exits while we try to stop it, there's
4661 nothing else to do. */
4662 lwp = find_lwp_pid (ptid);
4663 if (lwp == NULL)
4664 return 0;
4665
4666 pid = queued_waitpid_1 (ptid_get_lwp (lwp->ptid), &status,
4667 lwp->cloned ? __WCLONE : 0,
4668 1 /* peek */);
4669 }
4670
4671 /* If we didn't collect any signal other than SIGSTOP while
4672 stopping the LWP, push a SIGNAL_0 event. In either case,
4673 the event-loop will end up calling target_wait which will
4674 collect these. */
4675 if (pid == -1)
4676 push_waitpid (ptid_get_lwp (lwp->ptid), W_STOPCODE (0),
4677 lwp->cloned ? __WCLONE : 0);
4678 }
4679 else
4680 {
4681 /* Already known to be stopped; do nothing. */
4682
4683 if (debug_linux_nat)
4684 {
4685 if (find_thread_pid (lwp->ptid)->stop_requested)
4686 fprintf_unfiltered (gdb_stdlog, "\
4687 LNSL: already stopped/stop_requested %s\n",
4688 target_pid_to_str (lwp->ptid));
4689 else
4690 fprintf_unfiltered (gdb_stdlog, "\
4691 LNSL: already stopped/no stop_requested yet %s\n",
4692 target_pid_to_str (lwp->ptid));
4693 }
4694 }
4695 }
4696 return 0;
4697 }
4698
4699 static void
4700 linux_nat_stop (ptid_t ptid)
4701 {
4702 if (non_stop)
4703 {
4704 linux_nat_async_events (sigchld_sync);
4705 iterate_over_lwps (linux_nat_stop_lwp, &ptid);
4706 target_async (inferior_event_handler, 0);
4707 }
4708 else
4709 linux_ops->to_stop (ptid);
4710 }
4711
4712 void
4713 linux_nat_add_target (struct target_ops *t)
4714 {
4715 /* Save the provided single-threaded target. We save this in a separate
4716 variable because another target we've inherited from (e.g. inf-ptrace)
4717 may have saved a pointer to T; we want to use it for the final
4718 process stratum target. */
4719 linux_ops_saved = *t;
4720 linux_ops = &linux_ops_saved;
4721
4722 /* Override some methods for multithreading. */
4723 t->to_create_inferior = linux_nat_create_inferior;
4724 t->to_attach = linux_nat_attach;
4725 t->to_detach = linux_nat_detach;
4726 t->to_resume = linux_nat_resume;
4727 t->to_wait = linux_nat_wait;
4728 t->to_xfer_partial = linux_nat_xfer_partial;
4729 t->to_kill = linux_nat_kill;
4730 t->to_mourn_inferior = linux_nat_mourn_inferior;
4731 t->to_thread_alive = linux_nat_thread_alive;
4732 t->to_pid_to_str = linux_nat_pid_to_str;
4733 t->to_has_thread_control = tc_schedlock;
4734
4735 t->to_can_async_p = linux_nat_can_async_p;
4736 t->to_is_async_p = linux_nat_is_async_p;
4737 t->to_supports_non_stop = linux_nat_supports_non_stop;
4738 t->to_async = linux_nat_async;
4739 t->to_async_mask = linux_nat_async_mask;
4740 t->to_terminal_inferior = linux_nat_terminal_inferior;
4741 t->to_terminal_ours = linux_nat_terminal_ours;
4742
4743 /* Methods for non-stop support. */
4744 t->to_stop = linux_nat_stop;
4745
4746 /* We don't change the stratum; this target will sit at
4747 process_stratum and thread_db will set at thread_stratum. This
4748 is a little strange, since this is a multi-threaded-capable
4749 target, but we want to be on the stack below thread_db, and we
4750 also want to be used for single-threaded processes. */
4751
4752 add_target (t);
4753 }
4754
4755 /* Register a method to call whenever a new thread is attached. */
4756 void
4757 linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
4758 {
4759 /* Save the pointer. We only support a single registered instance
4760 of the GNU/Linux native target, so we do not need to map this to
4761 T. */
4762 linux_nat_new_thread = new_thread;
4763 }
4764
4765 /* Register a method that converts a siginfo object between the layout
4766 that ptrace returns, and the layout in the architecture of the
4767 inferior. */
4768 void
4769 linux_nat_set_siginfo_fixup (struct target_ops *t,
4770 int (*siginfo_fixup) (struct siginfo *,
4771 gdb_byte *,
4772 int))
4773 {
4774 /* Save the pointer. */
4775 linux_nat_siginfo_fixup = siginfo_fixup;
4776 }
4777
4778 /* Return the saved siginfo associated with PTID. */
4779 struct siginfo *
4780 linux_nat_get_siginfo (ptid_t ptid)
4781 {
4782 struct lwp_info *lp = find_lwp_pid (ptid);
4783
4784 gdb_assert (lp != NULL);
4785
4786 return &lp->siginfo;
4787 }
4788
4789 /* Enable/Disable async mode. */
4790
4791 static void
4792 linux_nat_setup_async (void)
4793 {
4794 if (pipe (linux_nat_event_pipe) == -1)
4795 internal_error (__FILE__, __LINE__,
4796 "creating event pipe failed.");
4797 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4798 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4799 }
4800
4801 void
4802 _initialize_linux_nat (void)
4803 {
4804 sigset_t mask;
4805
4806 add_info ("proc", linux_nat_info_proc_cmd, _("\
4807 Show /proc process information about any running process.\n\
4808 Specify any process id, or use the program being debugged by default.\n\
4809 Specify any of the following keywords for detailed info:\n\
4810 mappings -- list of mapped memory regions.\n\
4811 stat -- list a bunch of random process info.\n\
4812 status -- list a different bunch of random process info.\n\
4813 all -- list all available /proc info."));
4814
4815 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4816 &debug_linux_nat, _("\
4817 Set debugging of GNU/Linux lwp module."), _("\
4818 Show debugging of GNU/Linux lwp module."), _("\
4819 Enables printf debugging output."),
4820 NULL,
4821 show_debug_linux_nat,
4822 &setdebuglist, &showdebuglist);
4823
4824 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4825 &debug_linux_nat_async, _("\
4826 Set debugging of GNU/Linux async lwp module."), _("\
4827 Show debugging of GNU/Linux async lwp module."), _("\
4828 Enables printf debugging output."),
4829 NULL,
4830 show_debug_linux_nat_async,
4831 &setdebuglist, &showdebuglist);
4832
4833 /* Get the default SIGCHLD action. Used while forking an inferior
4834 (see linux_nat_create_inferior/linux_nat_async_events). */
4835 sigaction (SIGCHLD, NULL, &sigchld_default_action);
4836
4837 /* Block SIGCHLD by default. Doing this early prevents it getting
4838 unblocked if an exception is thrown due to an error while the
4839 inferior is starting (sigsetjmp/siglongjmp). */
4840 sigemptyset (&mask);
4841 sigaddset (&mask, SIGCHLD);
4842 sigprocmask (SIG_BLOCK, &mask, NULL);
4843
4844 /* Save this mask as the default. */
4845 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4846
4847 /* The synchronous SIGCHLD handler. */
4848 sync_sigchld_action.sa_handler = sigchld_handler;
4849 sigemptyset (&sync_sigchld_action.sa_mask);
4850 sync_sigchld_action.sa_flags = SA_RESTART;
4851
4852 /* Make it the default. */
4853 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4854
4855 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4856 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4857 sigdelset (&suspend_mask, SIGCHLD);
4858
4859 /* SIGCHLD handler for async mode. */
4860 async_sigchld_action.sa_handler = async_sigchld_handler;
4861 sigemptyset (&async_sigchld_action.sa_mask);
4862 async_sigchld_action.sa_flags = SA_RESTART;
4863
4864 linux_nat_setup_async ();
4865
4866 add_setshow_boolean_cmd ("disable-randomization", class_support,
4867 &disable_randomization, _("\
4868 Set disabling of debuggee's virtual address space randomization."), _("\
4869 Show disabling of debuggee's virtual address space randomization."), _("\
4870 When this mode is on (which is the default), randomization of the virtual\n\
4871 address space is disabled. Standalone programs run with the randomization\n\
4872 enabled by default on some platforms."),
4873 &set_disable_randomization,
4874 &show_disable_randomization,
4875 &setlist, &showlist);
4876 }
4877 \f
4878
4879 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4880 the GNU/Linux Threads library and therefore doesn't really belong
4881 here. */
4882
4883 /* Read variable NAME in the target and return its value if found.
4884 Otherwise return zero. It is assumed that the type of the variable
4885 is `int'. */
4886
4887 static int
4888 get_signo (const char *name)
4889 {
4890 struct minimal_symbol *ms;
4891 int signo;
4892
4893 ms = lookup_minimal_symbol (name, NULL, NULL);
4894 if (ms == NULL)
4895 return 0;
4896
4897 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
4898 sizeof (signo)) != 0)
4899 return 0;
4900
4901 return signo;
4902 }
4903
4904 /* Return the set of signals used by the threads library in *SET. */
4905
4906 void
4907 lin_thread_get_thread_signals (sigset_t *set)
4908 {
4909 struct sigaction action;
4910 int restart, cancel;
4911 sigset_t blocked_mask;
4912
4913 sigemptyset (&blocked_mask);
4914 sigemptyset (set);
4915
4916 restart = get_signo ("__pthread_sig_restart");
4917 cancel = get_signo ("__pthread_sig_cancel");
4918
4919 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4920 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4921 not provide any way for the debugger to query the signal numbers -
4922 fortunately they don't change! */
4923
4924 if (restart == 0)
4925 restart = __SIGRTMIN;
4926
4927 if (cancel == 0)
4928 cancel = __SIGRTMIN + 1;
4929
4930 sigaddset (set, restart);
4931 sigaddset (set, cancel);
4932
4933 /* The GNU/Linux Threads library makes terminating threads send a
4934 special "cancel" signal instead of SIGCHLD. Make sure we catch
4935 those (to prevent them from terminating GDB itself, which is
4936 likely to be their default action) and treat them the same way as
4937 SIGCHLD. */
4938
4939 action.sa_handler = sigchld_handler;
4940 sigemptyset (&action.sa_mask);
4941 action.sa_flags = SA_RESTART;
4942 sigaction (cancel, &action, NULL);
4943
4944 /* We block the "cancel" signal throughout this code ... */
4945 sigaddset (&blocked_mask, cancel);
4946 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4947
4948 /* ... except during a sigsuspend. */
4949 sigdelset (&suspend_mask, cancel);
4950 }
This page took 0.133645 seconds and 5 git commands to generate.