2009-02-06 Pedro Alves <pedro@codesourcery.com>
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
1 /* GNU/Linux native-dependent code common to multiple platforms.
2
3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3 of the License, or
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
20
21 #include "defs.h"
22 #include "inferior.h"
23 #include "target.h"
24 #include "gdb_string.h"
25 #include "gdb_wait.h"
26 #include "gdb_assert.h"
27 #ifdef HAVE_TKILL_SYSCALL
28 #include <unistd.h>
29 #include <sys/syscall.h>
30 #endif
31 #include <sys/ptrace.h>
32 #include "linux-nat.h"
33 #include "linux-fork.h"
34 #include "gdbthread.h"
35 #include "gdbcmd.h"
36 #include "regcache.h"
37 #include "regset.h"
38 #include "inf-ptrace.h"
39 #include "auxv.h"
40 #include <sys/param.h> /* for MAXPATHLEN */
41 #include <sys/procfs.h> /* for elf_gregset etc. */
42 #include "elf-bfd.h" /* for elfcore_write_* */
43 #include "gregset.h" /* for gregset */
44 #include "gdbcore.h" /* for get_exec_file */
45 #include <ctype.h> /* for isdigit */
46 #include "gdbthread.h" /* for struct thread_info etc. */
47 #include "gdb_stat.h" /* for struct stat */
48 #include <fcntl.h> /* for O_RDONLY */
49 #include "inf-loop.h"
50 #include "event-loop.h"
51 #include "event-top.h"
52 #include <pwd.h>
53 #include <sys/types.h>
54 #include "gdb_dirent.h"
55 #include "xml-support.h"
56
57 #ifdef HAVE_PERSONALITY
58 # include <sys/personality.h>
59 # if !HAVE_DECL_ADDR_NO_RANDOMIZE
60 # define ADDR_NO_RANDOMIZE 0x0040000
61 # endif
62 #endif /* HAVE_PERSONALITY */
63
64 /* This comment documents high-level logic of this file.
65
66 Waiting for events in sync mode
67 ===============================
68
69 When waiting for an event in a specific thread, we just use waitpid, passing
70 the specific pid, and not passing WNOHANG.
71
72 When waiting for an event in all threads, waitpid is not quite good. Prior to
73 version 2.4, Linux can either wait for event in main thread, or in secondary
74 threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
75 miss an event. The solution is to use non-blocking waitpid, together with
76 sigsuspend. First, we use non-blocking waitpid to get an event in the main
77 process, if any. Second, we use non-blocking waitpid with the __WCLONED
78 flag to check for events in cloned processes. If nothing is found, we use
79 sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
80 happened to a child process -- and SIGCHLD will be delivered both for events
81 in main debugged process and in cloned processes. As soon as we know there's
82 an event, we get back to calling nonblocking waitpid with and without __WCLONED.
83
84 Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
85 so that we don't miss a signal. If SIGCHLD arrives in between, when it's
86 blocked, the signal becomes pending and sigsuspend immediately
87 notices it and returns.
88
89 Waiting for events in async mode
90 ================================
91
92 In async mode, GDB should always be ready to handle both user input and target
93 events, so neither blocking waitpid nor sigsuspend are viable
94 options. Instead, we should notify the GDB main event loop whenever there's
95 unprocessed event from the target. The only way to notify this event loop is
96 to make it wait on input from a pipe, and write something to the pipe whenever
97 there's event. Obviously, if we fail to notify the event loop if there's
98 target event, it's bad. If we notify the event loop when there's no event
99 from target, linux-nat.c will detect that there's no event, actually, and
100 report event of type TARGET_WAITKIND_IGNORE, but it will waste time and
101 better avoided.
102
103 The main design point is that every time GDB is outside linux-nat.c, we have a
104 SIGCHLD handler installed that is called when something happens to the target
105 and notifies the GDB event loop. Also, the event is extracted from the target
106 using waitpid and stored for future use. Whenever GDB core decides to handle
107 the event, and calls into linux-nat.c, we disable SIGCHLD and process things
108 as in sync mode, except that before waitpid call we check if there are any
109 previously read events.
110
111 It could happen that during event processing, we'll try to get more events
112 than there are events in the local queue, which will result to waitpid call.
113 Those waitpid calls, while blocking, are guarantied to always have
114 something for waitpid to return. E.g., stopping a thread with SIGSTOP, and
115 waiting for the lwp to stop.
116
117 The event loop is notified about new events using a pipe. SIGCHLD handler does
118 waitpid and writes the results in to a pipe. GDB event loop has the other end
119 of the pipe among the sources. When event loop starts to process the event
120 and calls a function in linux-nat.c, all events from the pipe are transferred
121 into a local queue and SIGCHLD is blocked. Further processing goes as in sync
122 mode. Before we return from linux_nat_wait, we transfer all unprocessed events
123 from local queue back to the pipe, so that when we get back to event loop,
124 event loop will notice there's something more to do.
125
126 SIGCHLD is blocked when we're inside target_wait, so that should we actually
127 want to wait for some more events, SIGCHLD handler does not steal them from
128 us. Technically, it would be possible to add new events to the local queue but
129 it's about the same amount of work as blocking SIGCHLD.
130
131 This moving of events from pipe into local queue and back into pipe when we
132 enter/leave linux-nat.c is somewhat ugly. Unfortunately, GDB event loop is
133 home-grown and incapable to wait on any queue.
134
135 Use of signals
136 ==============
137
138 We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
139 signal is not entirely significant; we just need for a signal to be delivered,
140 so that we can intercept it. SIGSTOP's advantage is that it can not be
141 blocked. A disadvantage is that it is not a real-time signal, so it can only
142 be queued once; we do not keep track of other sources of SIGSTOP.
143
144 Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
145 use them, because they have special behavior when the signal is generated -
146 not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
147 kills the entire thread group.
148
149 A delivered SIGSTOP would stop the entire thread group, not just the thread we
150 tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
151 cancel it (by PTRACE_CONT without passing SIGSTOP).
152
153 We could use a real-time signal instead. This would solve those problems; we
154 could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
155 But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
156 generates it, and there are races with trying to find a signal that is not
157 blocked. */
158
159 #ifndef O_LARGEFILE
160 #define O_LARGEFILE 0
161 #endif
162
163 /* If the system headers did not provide the constants, hard-code the normal
164 values. */
165 #ifndef PTRACE_EVENT_FORK
166
167 #define PTRACE_SETOPTIONS 0x4200
168 #define PTRACE_GETEVENTMSG 0x4201
169
170 /* options set using PTRACE_SETOPTIONS */
171 #define PTRACE_O_TRACESYSGOOD 0x00000001
172 #define PTRACE_O_TRACEFORK 0x00000002
173 #define PTRACE_O_TRACEVFORK 0x00000004
174 #define PTRACE_O_TRACECLONE 0x00000008
175 #define PTRACE_O_TRACEEXEC 0x00000010
176 #define PTRACE_O_TRACEVFORKDONE 0x00000020
177 #define PTRACE_O_TRACEEXIT 0x00000040
178
179 /* Wait extended result codes for the above trace options. */
180 #define PTRACE_EVENT_FORK 1
181 #define PTRACE_EVENT_VFORK 2
182 #define PTRACE_EVENT_CLONE 3
183 #define PTRACE_EVENT_EXEC 4
184 #define PTRACE_EVENT_VFORK_DONE 5
185 #define PTRACE_EVENT_EXIT 6
186
187 #endif /* PTRACE_EVENT_FORK */
188
189 /* We can't always assume that this flag is available, but all systems
190 with the ptrace event handlers also have __WALL, so it's safe to use
191 here. */
192 #ifndef __WALL
193 #define __WALL 0x40000000 /* Wait for any child. */
194 #endif
195
196 #ifndef PTRACE_GETSIGINFO
197 #define PTRACE_GETSIGINFO 0x4202
198 #endif
199
200 /* The single-threaded native GNU/Linux target_ops. We save a pointer for
201 the use of the multi-threaded target. */
202 static struct target_ops *linux_ops;
203 static struct target_ops linux_ops_saved;
204
205 /* The method to call, if any, when a new thread is attached. */
206 static void (*linux_nat_new_thread) (ptid_t);
207
208 /* The saved to_xfer_partial method, inherited from inf-ptrace.c.
209 Called by our to_xfer_partial. */
210 static LONGEST (*super_xfer_partial) (struct target_ops *,
211 enum target_object,
212 const char *, gdb_byte *,
213 const gdb_byte *,
214 ULONGEST, LONGEST);
215
216 static int debug_linux_nat;
217 static void
218 show_debug_linux_nat (struct ui_file *file, int from_tty,
219 struct cmd_list_element *c, const char *value)
220 {
221 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
222 value);
223 }
224
225 static int debug_linux_nat_async = 0;
226 static void
227 show_debug_linux_nat_async (struct ui_file *file, int from_tty,
228 struct cmd_list_element *c, const char *value)
229 {
230 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
231 value);
232 }
233
234 static int disable_randomization = 1;
235
236 static void
237 show_disable_randomization (struct ui_file *file, int from_tty,
238 struct cmd_list_element *c, const char *value)
239 {
240 #ifdef HAVE_PERSONALITY
241 fprintf_filtered (file, _("\
242 Disabling randomization of debuggee's virtual address space is %s.\n"),
243 value);
244 #else /* !HAVE_PERSONALITY */
245 fputs_filtered (_("\
246 Disabling randomization of debuggee's virtual address space is unsupported on\n\
247 this platform.\n"), file);
248 #endif /* !HAVE_PERSONALITY */
249 }
250
251 static void
252 set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c)
253 {
254 #ifndef HAVE_PERSONALITY
255 error (_("\
256 Disabling randomization of debuggee's virtual address space is unsupported on\n\
257 this platform."));
258 #endif /* !HAVE_PERSONALITY */
259 }
260
261 static int linux_parent_pid;
262
263 struct simple_pid_list
264 {
265 int pid;
266 int status;
267 struct simple_pid_list *next;
268 };
269 struct simple_pid_list *stopped_pids;
270
271 /* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
272 can not be used, 1 if it can. */
273
274 static int linux_supports_tracefork_flag = -1;
275
276 /* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
277 PTRACE_O_TRACEVFORKDONE. */
278
279 static int linux_supports_tracevforkdone_flag = -1;
280
281 /* Async mode support */
282
283 /* Zero if the async mode, although enabled, is masked, which means
284 linux_nat_wait should behave as if async mode was off. */
285 static int linux_nat_async_mask_value = 1;
286
287 /* The read/write ends of the pipe registered as waitable file in the
288 event loop. */
289 static int linux_nat_event_pipe[2] = { -1, -1 };
290
291 /* Number of queued events in the pipe. */
292 static volatile int linux_nat_num_queued_events;
293
294 /* The possible SIGCHLD handling states. */
295
296 enum sigchld_state
297 {
298 /* SIGCHLD disabled, with action set to sigchld_handler, for the
299 sigsuspend in linux_nat_wait. */
300 sigchld_sync,
301 /* SIGCHLD enabled, with action set to async_sigchld_handler. */
302 sigchld_async,
303 /* Set SIGCHLD to default action. Used while creating an
304 inferior. */
305 sigchld_default
306 };
307
308 /* The current SIGCHLD handling state. */
309 static enum sigchld_state linux_nat_async_events_state;
310
311 static enum sigchld_state linux_nat_async_events (enum sigchld_state enable);
312 static void pipe_to_local_event_queue (void);
313 static void local_event_queue_to_pipe (void);
314 static void linux_nat_event_pipe_push (int pid, int status, int options);
315 static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
316 static void linux_nat_set_async_mode (int on);
317 static void linux_nat_async (void (*callback)
318 (enum inferior_event_type event_type, void *context),
319 void *context);
320 static int linux_nat_async_mask (int mask);
321 static int kill_lwp (int lwpid, int signo);
322
323 static int stop_callback (struct lwp_info *lp, void *data);
324
325 /* Captures the result of a successful waitpid call, along with the
326 options used in that call. */
327 struct waitpid_result
328 {
329 int pid;
330 int status;
331 int options;
332 struct waitpid_result *next;
333 };
334
335 /* A singly-linked list of the results of the waitpid calls performed
336 in the async SIGCHLD handler. */
337 static struct waitpid_result *waitpid_queue = NULL;
338
339 /* Similarly to `waitpid', but check the local event queue instead of
340 querying the kernel queue. If PEEK, don't remove the event found
341 from the queue. */
342
343 static int
344 queued_waitpid_1 (int pid, int *status, int flags, int peek)
345 {
346 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
347
348 if (debug_linux_nat_async)
349 fprintf_unfiltered (gdb_stdlog,
350 "\
351 QWPID: linux_nat_async_events_state(%d), linux_nat_num_queued_events(%d)\n",
352 linux_nat_async_events_state,
353 linux_nat_num_queued_events);
354
355 if (flags & __WALL)
356 {
357 for (; msg; prev = msg, msg = msg->next)
358 if (pid == -1 || pid == msg->pid)
359 break;
360 }
361 else if (flags & __WCLONE)
362 {
363 for (; msg; prev = msg, msg = msg->next)
364 if (msg->options & __WCLONE
365 && (pid == -1 || pid == msg->pid))
366 break;
367 }
368 else
369 {
370 for (; msg; prev = msg, msg = msg->next)
371 if ((msg->options & __WCLONE) == 0
372 && (pid == -1 || pid == msg->pid))
373 break;
374 }
375
376 if (msg)
377 {
378 int pid;
379
380 if (status)
381 *status = msg->status;
382 pid = msg->pid;
383
384 if (debug_linux_nat_async)
385 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
386 pid, msg->status);
387
388 if (!peek)
389 {
390 if (prev)
391 prev->next = msg->next;
392 else
393 waitpid_queue = msg->next;
394
395 msg->next = NULL;
396 xfree (msg);
397 }
398
399 return pid;
400 }
401
402 if (debug_linux_nat_async)
403 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
404
405 if (status)
406 *status = 0;
407 return -1;
408 }
409
410 /* Similarly to `waitpid', but check the local event queue. */
411
412 static int
413 queued_waitpid (int pid, int *status, int flags)
414 {
415 return queued_waitpid_1 (pid, status, flags, 0);
416 }
417
418 static void
419 push_waitpid (int pid, int status, int options)
420 {
421 struct waitpid_result *event, *new_event;
422
423 new_event = xmalloc (sizeof (*new_event));
424 new_event->pid = pid;
425 new_event->status = status;
426 new_event->options = options;
427 new_event->next = NULL;
428
429 if (waitpid_queue)
430 {
431 for (event = waitpid_queue;
432 event && event->next;
433 event = event->next)
434 ;
435
436 event->next = new_event;
437 }
438 else
439 waitpid_queue = new_event;
440 }
441
442 /* Drain all queued events of PID. If PID is -1, the effect is of
443 draining all events. */
444 static void
445 drain_queued_events (int pid)
446 {
447 while (queued_waitpid (pid, NULL, __WALL) != -1)
448 ;
449 }
450
451 \f
452 /* Trivial list manipulation functions to keep track of a list of
453 new stopped processes. */
454 static void
455 add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
456 {
457 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
458 new_pid->pid = pid;
459 new_pid->status = status;
460 new_pid->next = *listp;
461 *listp = new_pid;
462 }
463
464 static int
465 pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
466 {
467 struct simple_pid_list **p;
468
469 for (p = listp; *p != NULL; p = &(*p)->next)
470 if ((*p)->pid == pid)
471 {
472 struct simple_pid_list *next = (*p)->next;
473 *status = (*p)->status;
474 xfree (*p);
475 *p = next;
476 return 1;
477 }
478 return 0;
479 }
480
481 static void
482 linux_record_stopped_pid (int pid, int status)
483 {
484 add_to_pid_list (&stopped_pids, pid, status);
485 }
486
487 \f
488 /* A helper function for linux_test_for_tracefork, called after fork (). */
489
490 static void
491 linux_tracefork_child (void)
492 {
493 int ret;
494
495 ptrace (PTRACE_TRACEME, 0, 0, 0);
496 kill (getpid (), SIGSTOP);
497 fork ();
498 _exit (0);
499 }
500
501 /* Wrapper function for waitpid which handles EINTR, and checks for
502 locally queued events. */
503
504 static int
505 my_waitpid (int pid, int *status, int flags)
506 {
507 int ret;
508
509 /* There should be no concurrent calls to waitpid. */
510 gdb_assert (linux_nat_async_events_state == sigchld_sync);
511
512 ret = queued_waitpid (pid, status, flags);
513 if (ret != -1)
514 return ret;
515
516 do
517 {
518 ret = waitpid (pid, status, flags);
519 }
520 while (ret == -1 && errno == EINTR);
521
522 return ret;
523 }
524
525 /* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
526
527 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
528 we know that the feature is not available. This may change the tracing
529 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
530
531 However, if it succeeds, we don't know for sure that the feature is
532 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
533 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
534 fork tracing, and let it fork. If the process exits, we assume that we
535 can't use TRACEFORK; if we get the fork notification, and we can extract
536 the new child's PID, then we assume that we can. */
537
538 static void
539 linux_test_for_tracefork (int original_pid)
540 {
541 int child_pid, ret, status;
542 long second_pid;
543 enum sigchld_state async_events_original_state;
544
545 async_events_original_state = linux_nat_async_events (sigchld_sync);
546
547 linux_supports_tracefork_flag = 0;
548 linux_supports_tracevforkdone_flag = 0;
549
550 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
551 if (ret != 0)
552 return;
553
554 child_pid = fork ();
555 if (child_pid == -1)
556 perror_with_name (("fork"));
557
558 if (child_pid == 0)
559 linux_tracefork_child ();
560
561 ret = my_waitpid (child_pid, &status, 0);
562 if (ret == -1)
563 perror_with_name (("waitpid"));
564 else if (ret != child_pid)
565 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
566 if (! WIFSTOPPED (status))
567 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
568
569 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
570 if (ret != 0)
571 {
572 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
573 if (ret != 0)
574 {
575 warning (_("linux_test_for_tracefork: failed to kill child"));
576 linux_nat_async_events (async_events_original_state);
577 return;
578 }
579
580 ret = my_waitpid (child_pid, &status, 0);
581 if (ret != child_pid)
582 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
583 else if (!WIFSIGNALED (status))
584 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
585 "killed child"), status);
586
587 linux_nat_async_events (async_events_original_state);
588 return;
589 }
590
591 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
592 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
593 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
594 linux_supports_tracevforkdone_flag = (ret == 0);
595
596 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
597 if (ret != 0)
598 warning (_("linux_test_for_tracefork: failed to resume child"));
599
600 ret = my_waitpid (child_pid, &status, 0);
601
602 if (ret == child_pid && WIFSTOPPED (status)
603 && status >> 16 == PTRACE_EVENT_FORK)
604 {
605 second_pid = 0;
606 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
607 if (ret == 0 && second_pid != 0)
608 {
609 int second_status;
610
611 linux_supports_tracefork_flag = 1;
612 my_waitpid (second_pid, &second_status, 0);
613 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
614 if (ret != 0)
615 warning (_("linux_test_for_tracefork: failed to kill second child"));
616 my_waitpid (second_pid, &status, 0);
617 }
618 }
619 else
620 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
621 "(%d, status 0x%x)"), ret, status);
622
623 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
624 if (ret != 0)
625 warning (_("linux_test_for_tracefork: failed to kill child"));
626 my_waitpid (child_pid, &status, 0);
627
628 linux_nat_async_events (async_events_original_state);
629 }
630
631 /* Return non-zero iff we have tracefork functionality available.
632 This function also sets linux_supports_tracefork_flag. */
633
634 static int
635 linux_supports_tracefork (int pid)
636 {
637 if (linux_supports_tracefork_flag == -1)
638 linux_test_for_tracefork (pid);
639 return linux_supports_tracefork_flag;
640 }
641
642 static int
643 linux_supports_tracevforkdone (int pid)
644 {
645 if (linux_supports_tracefork_flag == -1)
646 linux_test_for_tracefork (pid);
647 return linux_supports_tracevforkdone_flag;
648 }
649
650 \f
651 void
652 linux_enable_event_reporting (ptid_t ptid)
653 {
654 int pid = ptid_get_lwp (ptid);
655 int options;
656
657 if (pid == 0)
658 pid = ptid_get_pid (ptid);
659
660 if (! linux_supports_tracefork (pid))
661 return;
662
663 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
664 | PTRACE_O_TRACECLONE;
665 if (linux_supports_tracevforkdone (pid))
666 options |= PTRACE_O_TRACEVFORKDONE;
667
668 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
669 read-only process state. */
670
671 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
672 }
673
674 static void
675 linux_child_post_attach (int pid)
676 {
677 linux_enable_event_reporting (pid_to_ptid (pid));
678 check_for_thread_db ();
679 }
680
681 static void
682 linux_child_post_startup_inferior (ptid_t ptid)
683 {
684 linux_enable_event_reporting (ptid);
685 check_for_thread_db ();
686 }
687
688 static int
689 linux_child_follow_fork (struct target_ops *ops, int follow_child)
690 {
691 ptid_t last_ptid;
692 struct target_waitstatus last_status;
693 int has_vforked;
694 int parent_pid, child_pid;
695
696 if (target_can_async_p ())
697 target_async (NULL, 0);
698
699 get_last_target_status (&last_ptid, &last_status);
700 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
701 parent_pid = ptid_get_lwp (last_ptid);
702 if (parent_pid == 0)
703 parent_pid = ptid_get_pid (last_ptid);
704 child_pid = PIDGET (last_status.value.related_pid);
705
706 if (! follow_child)
707 {
708 /* We're already attached to the parent, by default. */
709
710 /* Before detaching from the child, remove all breakpoints from
711 it. (This won't actually modify the breakpoint list, but will
712 physically remove the breakpoints from the child.) */
713 /* If we vforked this will remove the breakpoints from the parent
714 also, but they'll be reinserted below. */
715 detach_breakpoints (child_pid);
716
717 /* Detach new forked process? */
718 if (detach_fork)
719 {
720 if (info_verbose || debug_linux_nat)
721 {
722 target_terminal_ours ();
723 fprintf_filtered (gdb_stdlog,
724 "Detaching after fork from child process %d.\n",
725 child_pid);
726 }
727
728 ptrace (PTRACE_DETACH, child_pid, 0, 0);
729 }
730 else
731 {
732 struct fork_info *fp;
733 struct inferior *parent_inf, *child_inf;
734
735 /* Add process to GDB's tables. */
736 child_inf = add_inferior (child_pid);
737
738 parent_inf = find_inferior_pid (GET_PID (last_ptid));
739 child_inf->attach_flag = parent_inf->attach_flag;
740
741 /* Retain child fork in ptrace (stopped) state. */
742 fp = find_fork_pid (child_pid);
743 if (!fp)
744 fp = add_fork (child_pid);
745 fork_save_infrun_state (fp, 0);
746 }
747
748 if (has_vforked)
749 {
750 gdb_assert (linux_supports_tracefork_flag >= 0);
751 if (linux_supports_tracevforkdone (0))
752 {
753 int status;
754
755 ptrace (PTRACE_CONT, parent_pid, 0, 0);
756 my_waitpid (parent_pid, &status, __WALL);
757 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
758 warning (_("Unexpected waitpid result %06x when waiting for "
759 "vfork-done"), status);
760 }
761 else
762 {
763 /* We can't insert breakpoints until the child has
764 finished with the shared memory region. We need to
765 wait until that happens. Ideal would be to just
766 call:
767 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
768 - waitpid (parent_pid, &status, __WALL);
769 However, most architectures can't handle a syscall
770 being traced on the way out if it wasn't traced on
771 the way in.
772
773 We might also think to loop, continuing the child
774 until it exits or gets a SIGTRAP. One problem is
775 that the child might call ptrace with PTRACE_TRACEME.
776
777 There's no simple and reliable way to figure out when
778 the vforked child will be done with its copy of the
779 shared memory. We could step it out of the syscall,
780 two instructions, let it go, and then single-step the
781 parent once. When we have hardware single-step, this
782 would work; with software single-step it could still
783 be made to work but we'd have to be able to insert
784 single-step breakpoints in the child, and we'd have
785 to insert -just- the single-step breakpoint in the
786 parent. Very awkward.
787
788 In the end, the best we can do is to make sure it
789 runs for a little while. Hopefully it will be out of
790 range of any breakpoints we reinsert. Usually this
791 is only the single-step breakpoint at vfork's return
792 point. */
793
794 usleep (10000);
795 }
796
797 /* Since we vforked, breakpoints were removed in the parent
798 too. Put them back. */
799 reattach_breakpoints (parent_pid);
800 }
801 }
802 else
803 {
804 struct thread_info *last_tp = find_thread_pid (last_ptid);
805 struct thread_info *tp;
806 char child_pid_spelling[40];
807 struct inferior *parent_inf, *child_inf;
808
809 /* Copy user stepping state to the new inferior thread. */
810 struct breakpoint *step_resume_breakpoint = last_tp->step_resume_breakpoint;
811 CORE_ADDR step_range_start = last_tp->step_range_start;
812 CORE_ADDR step_range_end = last_tp->step_range_end;
813 struct frame_id step_frame_id = last_tp->step_frame_id;
814
815 /* Otherwise, deleting the parent would get rid of this
816 breakpoint. */
817 last_tp->step_resume_breakpoint = NULL;
818
819 /* Needed to keep the breakpoint lists in sync. */
820 if (! has_vforked)
821 detach_breakpoints (child_pid);
822
823 /* Before detaching from the parent, remove all breakpoints from it. */
824 remove_breakpoints ();
825
826 if (info_verbose || debug_linux_nat)
827 {
828 target_terminal_ours ();
829 fprintf_filtered (gdb_stdlog,
830 "Attaching after fork to child process %d.\n",
831 child_pid);
832 }
833
834 /* Add the new inferior first, so that the target_detach below
835 doesn't unpush the target. */
836
837 child_inf = add_inferior (child_pid);
838
839 parent_inf = find_inferior_pid (GET_PID (last_ptid));
840 child_inf->attach_flag = parent_inf->attach_flag;
841
842 /* If we're vforking, we may want to hold on to the parent until
843 the child exits or execs. At exec time we can remove the old
844 breakpoints from the parent and detach it; at exit time we
845 could do the same (or even, sneakily, resume debugging it - the
846 child's exec has failed, or something similar).
847
848 This doesn't clean up "properly", because we can't call
849 target_detach, but that's OK; if the current target is "child",
850 then it doesn't need any further cleanups, and lin_lwp will
851 generally not encounter vfork (vfork is defined to fork
852 in libpthread.so).
853
854 The holding part is very easy if we have VFORKDONE events;
855 but keeping track of both processes is beyond GDB at the
856 moment. So we don't expose the parent to the rest of GDB.
857 Instead we quietly hold onto it until such time as we can
858 safely resume it. */
859
860 if (has_vforked)
861 {
862 linux_parent_pid = parent_pid;
863 detach_inferior (parent_pid);
864 }
865 else if (!detach_fork)
866 {
867 struct fork_info *fp;
868 /* Retain parent fork in ptrace (stopped) state. */
869 fp = find_fork_pid (parent_pid);
870 if (!fp)
871 fp = add_fork (parent_pid);
872 fork_save_infrun_state (fp, 0);
873
874 /* Also add an entry for the child fork. */
875 fp = find_fork_pid (child_pid);
876 if (!fp)
877 fp = add_fork (child_pid);
878 fork_save_infrun_state (fp, 0);
879 }
880 else
881 target_detach (NULL, 0);
882
883 inferior_ptid = ptid_build (child_pid, child_pid, 0);
884
885 linux_nat_switch_fork (inferior_ptid);
886 check_for_thread_db ();
887
888 tp = inferior_thread ();
889 tp->step_resume_breakpoint = step_resume_breakpoint;
890 tp->step_range_start = step_range_start;
891 tp->step_range_end = step_range_end;
892 tp->step_frame_id = step_frame_id;
893
894 /* Reset breakpoints in the child as appropriate. */
895 follow_inferior_reset_breakpoints ();
896 }
897
898 if (target_can_async_p ())
899 target_async (inferior_event_handler, 0);
900
901 return 0;
902 }
903
904 \f
905 static void
906 linux_child_insert_fork_catchpoint (int pid)
907 {
908 if (! linux_supports_tracefork (pid))
909 error (_("Your system does not support fork catchpoints."));
910 }
911
912 static void
913 linux_child_insert_vfork_catchpoint (int pid)
914 {
915 if (!linux_supports_tracefork (pid))
916 error (_("Your system does not support vfork catchpoints."));
917 }
918
919 static void
920 linux_child_insert_exec_catchpoint (int pid)
921 {
922 if (!linux_supports_tracefork (pid))
923 error (_("Your system does not support exec catchpoints."));
924 }
925
926 /* On GNU/Linux there are no real LWP's. The closest thing to LWP's
927 are processes sharing the same VM space. A multi-threaded process
928 is basically a group of such processes. However, such a grouping
929 is almost entirely a user-space issue; the kernel doesn't enforce
930 such a grouping at all (this might change in the future). In
931 general, we'll rely on the threads library (i.e. the GNU/Linux
932 Threads library) to provide such a grouping.
933
934 It is perfectly well possible to write a multi-threaded application
935 without the assistance of a threads library, by using the clone
936 system call directly. This module should be able to give some
937 rudimentary support for debugging such applications if developers
938 specify the CLONE_PTRACE flag in the clone system call, and are
939 using the Linux kernel 2.4 or above.
940
941 Note that there are some peculiarities in GNU/Linux that affect
942 this code:
943
944 - In general one should specify the __WCLONE flag to waitpid in
945 order to make it report events for any of the cloned processes
946 (and leave it out for the initial process). However, if a cloned
947 process has exited the exit status is only reported if the
948 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
949 we cannot use it since GDB must work on older systems too.
950
951 - When a traced, cloned process exits and is waited for by the
952 debugger, the kernel reassigns it to the original parent and
953 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
954 library doesn't notice this, which leads to the "zombie problem":
955 When debugged a multi-threaded process that spawns a lot of
956 threads will run out of processes, even if the threads exit,
957 because the "zombies" stay around. */
958
959 /* List of known LWPs. */
960 struct lwp_info *lwp_list;
961
962 /* Number of LWPs in the list. */
963 static int num_lwps;
964 \f
965
966 /* Original signal mask. */
967 static sigset_t normal_mask;
968
969 /* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
970 _initialize_linux_nat. */
971 static sigset_t suspend_mask;
972
973 /* SIGCHLD action for synchronous mode. */
974 struct sigaction sync_sigchld_action;
975
976 /* SIGCHLD action for asynchronous mode. */
977 static struct sigaction async_sigchld_action;
978
979 /* SIGCHLD default action, to pass to new inferiors. */
980 static struct sigaction sigchld_default_action;
981 \f
982
983 /* Prototypes for local functions. */
984 static int stop_wait_callback (struct lwp_info *lp, void *data);
985 static int linux_nat_thread_alive (ptid_t ptid);
986 static char *linux_child_pid_to_exec_file (int pid);
987 static int cancel_breakpoint (struct lwp_info *lp);
988
989 \f
990 /* Convert wait status STATUS to a string. Used for printing debug
991 messages only. */
992
993 static char *
994 status_to_str (int status)
995 {
996 static char buf[64];
997
998 if (WIFSTOPPED (status))
999 snprintf (buf, sizeof (buf), "%s (stopped)",
1000 strsignal (WSTOPSIG (status)));
1001 else if (WIFSIGNALED (status))
1002 snprintf (buf, sizeof (buf), "%s (terminated)",
1003 strsignal (WSTOPSIG (status)));
1004 else
1005 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
1006
1007 return buf;
1008 }
1009
1010 /* Initialize the list of LWPs. Note that this module, contrary to
1011 what GDB's generic threads layer does for its thread list,
1012 re-initializes the LWP lists whenever we mourn or detach (which
1013 doesn't involve mourning) the inferior. */
1014
1015 static void
1016 init_lwp_list (void)
1017 {
1018 struct lwp_info *lp, *lpnext;
1019
1020 for (lp = lwp_list; lp; lp = lpnext)
1021 {
1022 lpnext = lp->next;
1023 xfree (lp);
1024 }
1025
1026 lwp_list = NULL;
1027 num_lwps = 0;
1028 }
1029
1030 /* Add the LWP specified by PID to the list. Return a pointer to the
1031 structure describing the new LWP. The LWP should already be stopped
1032 (with an exception for the very first LWP). */
1033
1034 static struct lwp_info *
1035 add_lwp (ptid_t ptid)
1036 {
1037 struct lwp_info *lp;
1038
1039 gdb_assert (is_lwp (ptid));
1040
1041 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1042
1043 memset (lp, 0, sizeof (struct lwp_info));
1044
1045 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1046
1047 lp->ptid = ptid;
1048
1049 lp->next = lwp_list;
1050 lwp_list = lp;
1051 ++num_lwps;
1052
1053 if (num_lwps > 1 && linux_nat_new_thread != NULL)
1054 linux_nat_new_thread (ptid);
1055
1056 return lp;
1057 }
1058
1059 /* Remove the LWP specified by PID from the list. */
1060
1061 static void
1062 delete_lwp (ptid_t ptid)
1063 {
1064 struct lwp_info *lp, *lpprev;
1065
1066 lpprev = NULL;
1067
1068 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1069 if (ptid_equal (lp->ptid, ptid))
1070 break;
1071
1072 if (!lp)
1073 return;
1074
1075 num_lwps--;
1076
1077 if (lpprev)
1078 lpprev->next = lp->next;
1079 else
1080 lwp_list = lp->next;
1081
1082 xfree (lp);
1083 }
1084
1085 /* Return a pointer to the structure describing the LWP corresponding
1086 to PID. If no corresponding LWP could be found, return NULL. */
1087
1088 static struct lwp_info *
1089 find_lwp_pid (ptid_t ptid)
1090 {
1091 struct lwp_info *lp;
1092 int lwp;
1093
1094 if (is_lwp (ptid))
1095 lwp = GET_LWP (ptid);
1096 else
1097 lwp = GET_PID (ptid);
1098
1099 for (lp = lwp_list; lp; lp = lp->next)
1100 if (lwp == GET_LWP (lp->ptid))
1101 return lp;
1102
1103 return NULL;
1104 }
1105
1106 /* Call CALLBACK with its second argument set to DATA for every LWP in
1107 the list. If CALLBACK returns 1 for a particular LWP, return a
1108 pointer to the structure describing that LWP immediately.
1109 Otherwise return NULL. */
1110
1111 struct lwp_info *
1112 iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
1113 {
1114 struct lwp_info *lp, *lpnext;
1115
1116 for (lp = lwp_list; lp; lp = lpnext)
1117 {
1118 lpnext = lp->next;
1119 if ((*callback) (lp, data))
1120 return lp;
1121 }
1122
1123 return NULL;
1124 }
1125
1126 /* Update our internal state when changing from one fork (checkpoint,
1127 et cetera) to another indicated by NEW_PTID. We can only switch
1128 single-threaded applications, so we only create one new LWP, and
1129 the previous list is discarded. */
1130
1131 void
1132 linux_nat_switch_fork (ptid_t new_ptid)
1133 {
1134 struct lwp_info *lp;
1135
1136 init_lwp_list ();
1137 lp = add_lwp (new_ptid);
1138 lp->stopped = 1;
1139
1140 init_thread_list ();
1141 add_thread_silent (new_ptid);
1142 }
1143
1144 /* Handle the exit of a single thread LP. */
1145
1146 static void
1147 exit_lwp (struct lwp_info *lp)
1148 {
1149 struct thread_info *th = find_thread_pid (lp->ptid);
1150
1151 if (th)
1152 {
1153 if (print_thread_events)
1154 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1155
1156 delete_thread (lp->ptid);
1157 }
1158
1159 delete_lwp (lp->ptid);
1160 }
1161
1162 /* Detect `T (stopped)' in `/proc/PID/status'.
1163 Other states including `T (tracing stop)' are reported as false. */
1164
1165 static int
1166 pid_is_stopped (pid_t pid)
1167 {
1168 FILE *status_file;
1169 char buf[100];
1170 int retval = 0;
1171
1172 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1173 status_file = fopen (buf, "r");
1174 if (status_file != NULL)
1175 {
1176 int have_state = 0;
1177
1178 while (fgets (buf, sizeof (buf), status_file))
1179 {
1180 if (strncmp (buf, "State:", 6) == 0)
1181 {
1182 have_state = 1;
1183 break;
1184 }
1185 }
1186 if (have_state && strstr (buf, "T (stopped)") != NULL)
1187 retval = 1;
1188 fclose (status_file);
1189 }
1190 return retval;
1191 }
1192
1193 /* Wait for the LWP specified by LP, which we have just attached to.
1194 Returns a wait status for that LWP, to cache. */
1195
1196 static int
1197 linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1198 int *signalled)
1199 {
1200 pid_t new_pid, pid = GET_LWP (ptid);
1201 int status;
1202
1203 if (pid_is_stopped (pid))
1204 {
1205 if (debug_linux_nat)
1206 fprintf_unfiltered (gdb_stdlog,
1207 "LNPAW: Attaching to a stopped process\n");
1208
1209 /* The process is definitely stopped. It is in a job control
1210 stop, unless the kernel predates the TASK_STOPPED /
1211 TASK_TRACED distinction, in which case it might be in a
1212 ptrace stop. Make sure it is in a ptrace stop; from there we
1213 can kill it, signal it, et cetera.
1214
1215 First make sure there is a pending SIGSTOP. Since we are
1216 already attached, the process can not transition from stopped
1217 to running without a PTRACE_CONT; so we know this signal will
1218 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1219 probably already in the queue (unless this kernel is old
1220 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1221 is not an RT signal, it can only be queued once. */
1222 kill_lwp (pid, SIGSTOP);
1223
1224 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1225 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1226 ptrace (PTRACE_CONT, pid, 0, 0);
1227 }
1228
1229 /* Make sure the initial process is stopped. The user-level threads
1230 layer might want to poke around in the inferior, and that won't
1231 work if things haven't stabilized yet. */
1232 new_pid = my_waitpid (pid, &status, 0);
1233 if (new_pid == -1 && errno == ECHILD)
1234 {
1235 if (first)
1236 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1237
1238 /* Try again with __WCLONE to check cloned processes. */
1239 new_pid = my_waitpid (pid, &status, __WCLONE);
1240 *cloned = 1;
1241 }
1242
1243 gdb_assert (pid == new_pid && WIFSTOPPED (status));
1244
1245 if (WSTOPSIG (status) != SIGSTOP)
1246 {
1247 *signalled = 1;
1248 if (debug_linux_nat)
1249 fprintf_unfiltered (gdb_stdlog,
1250 "LNPAW: Received %s after attaching\n",
1251 status_to_str (status));
1252 }
1253
1254 return status;
1255 }
1256
1257 /* Attach to the LWP specified by PID. Return 0 if successful or -1
1258 if the new LWP could not be attached. */
1259
1260 int
1261 lin_lwp_attach_lwp (ptid_t ptid)
1262 {
1263 struct lwp_info *lp;
1264 enum sigchld_state async_events_original_state;
1265
1266 gdb_assert (is_lwp (ptid));
1267
1268 async_events_original_state = linux_nat_async_events (sigchld_sync);
1269
1270 lp = find_lwp_pid (ptid);
1271
1272 /* We assume that we're already attached to any LWP that has an id
1273 equal to the overall process id, and to any LWP that is already
1274 in our list of LWPs. If we're not seeing exit events from threads
1275 and we've had PID wraparound since we last tried to stop all threads,
1276 this assumption might be wrong; fortunately, this is very unlikely
1277 to happen. */
1278 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
1279 {
1280 int status, cloned = 0, signalled = 0;
1281
1282 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
1283 {
1284 /* If we fail to attach to the thread, issue a warning,
1285 but continue. One way this can happen is if thread
1286 creation is interrupted; as of Linux kernel 2.6.19, a
1287 bug may place threads in the thread list and then fail
1288 to create them. */
1289 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1290 safe_strerror (errno));
1291 return -1;
1292 }
1293
1294 if (debug_linux_nat)
1295 fprintf_unfiltered (gdb_stdlog,
1296 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1297 target_pid_to_str (ptid));
1298
1299 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1300 lp = add_lwp (ptid);
1301 lp->stopped = 1;
1302 lp->cloned = cloned;
1303 lp->signalled = signalled;
1304 if (WSTOPSIG (status) != SIGSTOP)
1305 {
1306 lp->resumed = 1;
1307 lp->status = status;
1308 }
1309
1310 target_post_attach (GET_LWP (lp->ptid));
1311
1312 if (debug_linux_nat)
1313 {
1314 fprintf_unfiltered (gdb_stdlog,
1315 "LLAL: waitpid %s received %s\n",
1316 target_pid_to_str (ptid),
1317 status_to_str (status));
1318 }
1319 }
1320 else
1321 {
1322 /* We assume that the LWP representing the original process is
1323 already stopped. Mark it as stopped in the data structure
1324 that the GNU/linux ptrace layer uses to keep track of
1325 threads. Note that this won't have already been done since
1326 the main thread will have, we assume, been stopped by an
1327 attach from a different layer. */
1328 if (lp == NULL)
1329 lp = add_lwp (ptid);
1330 lp->stopped = 1;
1331 }
1332
1333 linux_nat_async_events (async_events_original_state);
1334 return 0;
1335 }
1336
1337 static void
1338 linux_nat_create_inferior (struct target_ops *ops,
1339 char *exec_file, char *allargs, char **env,
1340 int from_tty)
1341 {
1342 int saved_async = 0;
1343 #ifdef HAVE_PERSONALITY
1344 int personality_orig = 0, personality_set = 0;
1345 #endif /* HAVE_PERSONALITY */
1346
1347 /* The fork_child mechanism is synchronous and calls target_wait, so
1348 we have to mask the async mode. */
1349
1350 if (target_can_async_p ())
1351 /* Mask async mode. Creating a child requires a loop calling
1352 wait_for_inferior currently. */
1353 saved_async = linux_nat_async_mask (0);
1354 else
1355 {
1356 /* Restore the original signal mask. */
1357 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1358 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1359 suspend_mask = normal_mask;
1360 sigdelset (&suspend_mask, SIGCHLD);
1361 }
1362
1363 /* Set SIGCHLD to the default action, until after execing the child,
1364 since the inferior inherits the superior's signal mask. It will
1365 be blocked again in linux_nat_wait, which is only reached after
1366 the inferior execing. */
1367 linux_nat_async_events (sigchld_default);
1368
1369 #ifdef HAVE_PERSONALITY
1370 if (disable_randomization)
1371 {
1372 errno = 0;
1373 personality_orig = personality (0xffffffff);
1374 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1375 {
1376 personality_set = 1;
1377 personality (personality_orig | ADDR_NO_RANDOMIZE);
1378 }
1379 if (errno != 0 || (personality_set
1380 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1381 warning (_("Error disabling address space randomization: %s"),
1382 safe_strerror (errno));
1383 }
1384 #endif /* HAVE_PERSONALITY */
1385
1386 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
1387
1388 #ifdef HAVE_PERSONALITY
1389 if (personality_set)
1390 {
1391 errno = 0;
1392 personality (personality_orig);
1393 if (errno != 0)
1394 warning (_("Error restoring address space randomization: %s"),
1395 safe_strerror (errno));
1396 }
1397 #endif /* HAVE_PERSONALITY */
1398
1399 if (saved_async)
1400 linux_nat_async_mask (saved_async);
1401 }
1402
1403 static void
1404 linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
1405 {
1406 struct lwp_info *lp;
1407 int status;
1408 ptid_t ptid;
1409
1410 /* FIXME: We should probably accept a list of process id's, and
1411 attach all of them. */
1412 linux_ops->to_attach (ops, args, from_tty);
1413
1414 if (!target_can_async_p ())
1415 {
1416 /* Restore the original signal mask. */
1417 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1418 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1419 suspend_mask = normal_mask;
1420 sigdelset (&suspend_mask, SIGCHLD);
1421 }
1422
1423 /* The ptrace base target adds the main thread with (pid,0,0)
1424 format. Decorate it with lwp info. */
1425 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1426 thread_change_ptid (inferior_ptid, ptid);
1427
1428 /* Add the initial process as the first LWP to the list. */
1429 lp = add_lwp (ptid);
1430
1431 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1432 &lp->signalled);
1433 lp->stopped = 1;
1434
1435 /* Save the wait status to report later. */
1436 lp->resumed = 1;
1437 if (debug_linux_nat)
1438 fprintf_unfiltered (gdb_stdlog,
1439 "LNA: waitpid %ld, saving status %s\n",
1440 (long) GET_PID (lp->ptid), status_to_str (status));
1441
1442 if (!target_can_async_p ())
1443 lp->status = status;
1444 else
1445 {
1446 /* We already waited for this LWP, so put the wait result on the
1447 pipe. The event loop will wake up and gets us to handling
1448 this event. */
1449 linux_nat_event_pipe_push (GET_PID (lp->ptid), status,
1450 lp->cloned ? __WCLONE : 0);
1451 /* Register in the event loop. */
1452 target_async (inferior_event_handler, 0);
1453 }
1454 }
1455
1456 /* Get pending status of LP. */
1457 static int
1458 get_pending_status (struct lwp_info *lp, int *status)
1459 {
1460 struct target_waitstatus last;
1461 ptid_t last_ptid;
1462
1463 get_last_target_status (&last_ptid, &last);
1464
1465 /* If this lwp is the ptid that GDB is processing an event from, the
1466 signal will be in stop_signal. Otherwise, in all-stop + sync
1467 mode, we may cache pending events in lp->status while trying to
1468 stop all threads (see stop_wait_callback). In async mode, the
1469 events are always cached in waitpid_queue. */
1470
1471 *status = 0;
1472
1473 if (non_stop)
1474 {
1475 enum target_signal signo = TARGET_SIGNAL_0;
1476
1477 if (is_executing (lp->ptid))
1478 {
1479 /* If the core thought this lwp was executing --- e.g., the
1480 executing property hasn't been updated yet, but the
1481 thread has been stopped with a stop_callback /
1482 stop_wait_callback sequence (see linux_nat_detach for
1483 example) --- we can only have pending events in the local
1484 queue. */
1485 if (queued_waitpid (GET_LWP (lp->ptid), status, __WALL) != -1)
1486 {
1487 if (WIFSTOPPED (*status))
1488 signo = target_signal_from_host (WSTOPSIG (*status));
1489
1490 /* If not stopped, then the lwp is gone, no use in
1491 resending a signal. */
1492 }
1493 }
1494 else
1495 {
1496 /* If the core knows the thread is not executing, then we
1497 have the last signal recorded in
1498 thread_info->stop_signal. */
1499
1500 struct thread_info *tp = find_thread_pid (lp->ptid);
1501 signo = tp->stop_signal;
1502 }
1503
1504 if (signo != TARGET_SIGNAL_0
1505 && !signal_pass_state (signo))
1506 {
1507 if (debug_linux_nat)
1508 fprintf_unfiltered (gdb_stdlog, "\
1509 GPT: lwp %s had signal %s, but it is in no pass state\n",
1510 target_pid_to_str (lp->ptid),
1511 target_signal_to_string (signo));
1512 }
1513 else
1514 {
1515 if (signo != TARGET_SIGNAL_0)
1516 *status = W_STOPCODE (target_signal_to_host (signo));
1517
1518 if (debug_linux_nat)
1519 fprintf_unfiltered (gdb_stdlog,
1520 "GPT: lwp %s as pending signal %s\n",
1521 target_pid_to_str (lp->ptid),
1522 target_signal_to_string (signo));
1523 }
1524 }
1525 else
1526 {
1527 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1528 {
1529 struct thread_info *tp = find_thread_pid (lp->ptid);
1530 if (tp->stop_signal != TARGET_SIGNAL_0
1531 && signal_pass_state (tp->stop_signal))
1532 *status = W_STOPCODE (target_signal_to_host (tp->stop_signal));
1533 }
1534 else if (target_can_async_p ())
1535 queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
1536 else
1537 *status = lp->status;
1538 }
1539
1540 return 0;
1541 }
1542
1543 static int
1544 detach_callback (struct lwp_info *lp, void *data)
1545 {
1546 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1547
1548 if (debug_linux_nat && lp->status)
1549 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1550 strsignal (WSTOPSIG (lp->status)),
1551 target_pid_to_str (lp->ptid));
1552
1553 /* If there is a pending SIGSTOP, get rid of it. */
1554 if (lp->signalled)
1555 {
1556 if (debug_linux_nat)
1557 fprintf_unfiltered (gdb_stdlog,
1558 "DC: Sending SIGCONT to %s\n",
1559 target_pid_to_str (lp->ptid));
1560
1561 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
1562 lp->signalled = 0;
1563 }
1564
1565 /* We don't actually detach from the LWP that has an id equal to the
1566 overall process id just yet. */
1567 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1568 {
1569 int status = 0;
1570
1571 /* Pass on any pending signal for this LWP. */
1572 get_pending_status (lp, &status);
1573
1574 errno = 0;
1575 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1576 WSTOPSIG (status)) < 0)
1577 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
1578 safe_strerror (errno));
1579
1580 if (debug_linux_nat)
1581 fprintf_unfiltered (gdb_stdlog,
1582 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1583 target_pid_to_str (lp->ptid),
1584 strsignal (WSTOPSIG (lp->status)));
1585
1586 delete_lwp (lp->ptid);
1587 }
1588
1589 return 0;
1590 }
1591
1592 static void
1593 linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
1594 {
1595 int pid;
1596 int status;
1597 enum target_signal sig;
1598
1599 if (target_can_async_p ())
1600 linux_nat_async (NULL, 0);
1601
1602 /* Stop all threads before detaching. ptrace requires that the
1603 thread is stopped to sucessfully detach. */
1604 iterate_over_lwps (stop_callback, NULL);
1605 /* ... and wait until all of them have reported back that
1606 they're no longer running. */
1607 iterate_over_lwps (stop_wait_callback, NULL);
1608
1609 iterate_over_lwps (detach_callback, NULL);
1610
1611 /* Only the initial process should be left right now. */
1612 gdb_assert (num_lwps == 1);
1613
1614 /* Pass on any pending signal for the last LWP. */
1615 if ((args == NULL || *args == '\0')
1616 && get_pending_status (lwp_list, &status) != -1
1617 && WIFSTOPPED (status))
1618 {
1619 /* Put the signal number in ARGS so that inf_ptrace_detach will
1620 pass it along with PTRACE_DETACH. */
1621 args = alloca (8);
1622 sprintf (args, "%d", (int) WSTOPSIG (status));
1623 fprintf_unfiltered (gdb_stdlog,
1624 "LND: Sending signal %s to %s\n",
1625 args,
1626 target_pid_to_str (lwp_list->ptid));
1627 }
1628
1629 /* Destroy LWP info; it's no longer valid. */
1630 init_lwp_list ();
1631
1632 pid = ptid_get_pid (inferior_ptid);
1633
1634 if (target_can_async_p ())
1635 drain_queued_events (pid);
1636
1637 if (forks_exist_p ())
1638 {
1639 /* Multi-fork case. The current inferior_ptid is being detached
1640 from, but there are other viable forks to debug. Detach from
1641 the current fork, and context-switch to the first
1642 available. */
1643 linux_fork_detach (args, from_tty);
1644
1645 if (non_stop && target_can_async_p ())
1646 target_async (inferior_event_handler, 0);
1647 }
1648 else
1649 linux_ops->to_detach (ops, args, from_tty);
1650 }
1651
1652 /* Resume LP. */
1653
1654 static int
1655 resume_callback (struct lwp_info *lp, void *data)
1656 {
1657 if (lp->stopped && lp->status == 0)
1658 {
1659 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1660 0, TARGET_SIGNAL_0);
1661 if (debug_linux_nat)
1662 fprintf_unfiltered (gdb_stdlog,
1663 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1664 target_pid_to_str (lp->ptid));
1665 lp->stopped = 0;
1666 lp->step = 0;
1667 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1668 }
1669 else if (lp->stopped && debug_linux_nat)
1670 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
1671 target_pid_to_str (lp->ptid));
1672 else if (debug_linux_nat)
1673 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n",
1674 target_pid_to_str (lp->ptid));
1675
1676 return 0;
1677 }
1678
1679 static int
1680 resume_clear_callback (struct lwp_info *lp, void *data)
1681 {
1682 lp->resumed = 0;
1683 return 0;
1684 }
1685
1686 static int
1687 resume_set_callback (struct lwp_info *lp, void *data)
1688 {
1689 lp->resumed = 1;
1690 return 0;
1691 }
1692
1693 static void
1694 linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1695 {
1696 struct lwp_info *lp;
1697 int resume_all;
1698
1699 if (debug_linux_nat)
1700 fprintf_unfiltered (gdb_stdlog,
1701 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1702 step ? "step" : "resume",
1703 target_pid_to_str (ptid),
1704 signo ? strsignal (signo) : "0",
1705 target_pid_to_str (inferior_ptid));
1706
1707 if (target_can_async_p ())
1708 /* Block events while we're here. */
1709 linux_nat_async_events (sigchld_sync);
1710
1711 /* A specific PTID means `step only this process id'. */
1712 resume_all = (PIDGET (ptid) == -1);
1713
1714 if (non_stop && resume_all)
1715 internal_error (__FILE__, __LINE__,
1716 "can't resume all in non-stop mode");
1717
1718 if (!non_stop)
1719 {
1720 if (resume_all)
1721 iterate_over_lwps (resume_set_callback, NULL);
1722 else
1723 iterate_over_lwps (resume_clear_callback, NULL);
1724 }
1725
1726 /* If PID is -1, it's the current inferior that should be
1727 handled specially. */
1728 if (PIDGET (ptid) == -1)
1729 ptid = inferior_ptid;
1730
1731 lp = find_lwp_pid (ptid);
1732 gdb_assert (lp != NULL);
1733
1734 /* Convert to something the lower layer understands. */
1735 ptid = pid_to_ptid (GET_LWP (lp->ptid));
1736
1737 /* Remember if we're stepping. */
1738 lp->step = step;
1739
1740 /* Mark this LWP as resumed. */
1741 lp->resumed = 1;
1742
1743 /* If we have a pending wait status for this thread, there is no
1744 point in resuming the process. But first make sure that
1745 linux_nat_wait won't preemptively handle the event - we
1746 should never take this short-circuit if we are going to
1747 leave LP running, since we have skipped resuming all the
1748 other threads. This bit of code needs to be synchronized
1749 with linux_nat_wait. */
1750
1751 /* In async mode, we never have pending wait status. */
1752 if (target_can_async_p () && lp->status)
1753 internal_error (__FILE__, __LINE__, "Pending status in async mode");
1754
1755 if (lp->status && WIFSTOPPED (lp->status))
1756 {
1757 int saved_signo;
1758 struct inferior *inf;
1759
1760 inf = find_inferior_pid (ptid_get_pid (ptid));
1761 gdb_assert (inf);
1762 saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1763
1764 /* Defer to common code if we're gaining control of the
1765 inferior. */
1766 if (inf->stop_soon == NO_STOP_QUIETLY
1767 && signal_stop_state (saved_signo) == 0
1768 && signal_print_state (saved_signo) == 0
1769 && signal_pass_state (saved_signo) == 1)
1770 {
1771 if (debug_linux_nat)
1772 fprintf_unfiltered (gdb_stdlog,
1773 "LLR: Not short circuiting for ignored "
1774 "status 0x%x\n", lp->status);
1775
1776 /* FIXME: What should we do if we are supposed to continue
1777 this thread with a signal? */
1778 gdb_assert (signo == TARGET_SIGNAL_0);
1779 signo = saved_signo;
1780 lp->status = 0;
1781 }
1782 }
1783
1784 if (lp->status)
1785 {
1786 /* FIXME: What should we do if we are supposed to continue
1787 this thread with a signal? */
1788 gdb_assert (signo == TARGET_SIGNAL_0);
1789
1790 if (debug_linux_nat)
1791 fprintf_unfiltered (gdb_stdlog,
1792 "LLR: Short circuiting for status 0x%x\n",
1793 lp->status);
1794
1795 return;
1796 }
1797
1798 /* Mark LWP as not stopped to prevent it from being continued by
1799 resume_callback. */
1800 lp->stopped = 0;
1801
1802 if (resume_all)
1803 iterate_over_lwps (resume_callback, NULL);
1804
1805 linux_ops->to_resume (ptid, step, signo);
1806 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1807
1808 if (debug_linux_nat)
1809 fprintf_unfiltered (gdb_stdlog,
1810 "LLR: %s %s, %s (resume event thread)\n",
1811 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1812 target_pid_to_str (ptid),
1813 signo ? strsignal (signo) : "0");
1814
1815 if (target_can_async_p ())
1816 target_async (inferior_event_handler, 0);
1817 }
1818
1819 /* Issue kill to specified lwp. */
1820
1821 static int tkill_failed;
1822
1823 static int
1824 kill_lwp (int lwpid, int signo)
1825 {
1826 errno = 0;
1827
1828 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1829 fails, then we are not using nptl threads and we should be using kill. */
1830
1831 #ifdef HAVE_TKILL_SYSCALL
1832 if (!tkill_failed)
1833 {
1834 int ret = syscall (__NR_tkill, lwpid, signo);
1835 if (errno != ENOSYS)
1836 return ret;
1837 errno = 0;
1838 tkill_failed = 1;
1839 }
1840 #endif
1841
1842 return kill (lwpid, signo);
1843 }
1844
1845 /* Handle a GNU/Linux extended wait response. If we see a clone
1846 event, we need to add the new LWP to our list (and not report the
1847 trap to higher layers). This function returns non-zero if the
1848 event should be ignored and we should wait again. If STOPPING is
1849 true, the new LWP remains stopped, otherwise it is continued. */
1850
1851 static int
1852 linux_handle_extended_wait (struct lwp_info *lp, int status,
1853 int stopping)
1854 {
1855 int pid = GET_LWP (lp->ptid);
1856 struct target_waitstatus *ourstatus = &lp->waitstatus;
1857 struct lwp_info *new_lp = NULL;
1858 int event = status >> 16;
1859
1860 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1861 || event == PTRACE_EVENT_CLONE)
1862 {
1863 unsigned long new_pid;
1864 int ret;
1865
1866 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
1867
1868 /* If we haven't already seen the new PID stop, wait for it now. */
1869 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1870 {
1871 /* The new child has a pending SIGSTOP. We can't affect it until it
1872 hits the SIGSTOP, but we're already attached. */
1873 ret = my_waitpid (new_pid, &status,
1874 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1875 if (ret == -1)
1876 perror_with_name (_("waiting for new child"));
1877 else if (ret != new_pid)
1878 internal_error (__FILE__, __LINE__,
1879 _("wait returned unexpected PID %d"), ret);
1880 else if (!WIFSTOPPED (status))
1881 internal_error (__FILE__, __LINE__,
1882 _("wait returned unexpected status 0x%x"), status);
1883 }
1884
1885 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
1886
1887 if (event == PTRACE_EVENT_FORK)
1888 ourstatus->kind = TARGET_WAITKIND_FORKED;
1889 else if (event == PTRACE_EVENT_VFORK)
1890 ourstatus->kind = TARGET_WAITKIND_VFORKED;
1891 else
1892 {
1893 struct cleanup *old_chain;
1894
1895 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1896 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1897 new_lp->cloned = 1;
1898 new_lp->stopped = 1;
1899
1900 if (WSTOPSIG (status) != SIGSTOP)
1901 {
1902 /* This can happen if someone starts sending signals to
1903 the new thread before it gets a chance to run, which
1904 have a lower number than SIGSTOP (e.g. SIGUSR1).
1905 This is an unlikely case, and harder to handle for
1906 fork / vfork than for clone, so we do not try - but
1907 we handle it for clone events here. We'll send
1908 the other signal on to the thread below. */
1909
1910 new_lp->signalled = 1;
1911 }
1912 else
1913 status = 0;
1914
1915 if (non_stop)
1916 {
1917 /* Add the new thread to GDB's lists as soon as possible
1918 so that:
1919
1920 1) the frontend doesn't have to wait for a stop to
1921 display them, and,
1922
1923 2) we tag it with the correct running state. */
1924
1925 /* If the thread_db layer is active, let it know about
1926 this new thread, and add it to GDB's list. */
1927 if (!thread_db_attach_lwp (new_lp->ptid))
1928 {
1929 /* We're not using thread_db. Add it to GDB's
1930 list. */
1931 target_post_attach (GET_LWP (new_lp->ptid));
1932 add_thread (new_lp->ptid);
1933 }
1934
1935 if (!stopping)
1936 {
1937 set_running (new_lp->ptid, 1);
1938 set_executing (new_lp->ptid, 1);
1939 }
1940 }
1941
1942 if (!stopping)
1943 {
1944 new_lp->stopped = 0;
1945 new_lp->resumed = 1;
1946 ptrace (PTRACE_CONT, new_pid, 0,
1947 status ? WSTOPSIG (status) : 0);
1948 }
1949
1950 if (debug_linux_nat)
1951 fprintf_unfiltered (gdb_stdlog,
1952 "LHEW: Got clone event from LWP %ld, resuming\n",
1953 GET_LWP (lp->ptid));
1954 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1955
1956 return 1;
1957 }
1958
1959 return 0;
1960 }
1961
1962 if (event == PTRACE_EVENT_EXEC)
1963 {
1964 ourstatus->kind = TARGET_WAITKIND_EXECD;
1965 ourstatus->value.execd_pathname
1966 = xstrdup (linux_child_pid_to_exec_file (pid));
1967
1968 if (linux_parent_pid)
1969 {
1970 detach_breakpoints (linux_parent_pid);
1971 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1972
1973 linux_parent_pid = 0;
1974 }
1975
1976 /* At this point, all inserted breakpoints are gone. Doing this
1977 as soon as we detect an exec prevents the badness of deleting
1978 a breakpoint writing the current "shadow contents" to lift
1979 the bp. That shadow is NOT valid after an exec.
1980
1981 Note that we have to do this after the detach_breakpoints
1982 call above, otherwise breakpoints wouldn't be lifted from the
1983 parent on a vfork, because detach_breakpoints would think
1984 that breakpoints are not inserted. */
1985 mark_breakpoints_out ();
1986 return 0;
1987 }
1988
1989 internal_error (__FILE__, __LINE__,
1990 _("unknown ptrace event %d"), event);
1991 }
1992
1993 /* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1994 exited. */
1995
1996 static int
1997 wait_lwp (struct lwp_info *lp)
1998 {
1999 pid_t pid;
2000 int status;
2001 int thread_dead = 0;
2002
2003 gdb_assert (!lp->stopped);
2004 gdb_assert (lp->status == 0);
2005
2006 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
2007 if (pid == -1 && errno == ECHILD)
2008 {
2009 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
2010 if (pid == -1 && errno == ECHILD)
2011 {
2012 /* The thread has previously exited. We need to delete it
2013 now because, for some vendor 2.4 kernels with NPTL
2014 support backported, there won't be an exit event unless
2015 it is the main thread. 2.6 kernels will report an exit
2016 event for each thread that exits, as expected. */
2017 thread_dead = 1;
2018 if (debug_linux_nat)
2019 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2020 target_pid_to_str (lp->ptid));
2021 }
2022 }
2023
2024 if (!thread_dead)
2025 {
2026 gdb_assert (pid == GET_LWP (lp->ptid));
2027
2028 if (debug_linux_nat)
2029 {
2030 fprintf_unfiltered (gdb_stdlog,
2031 "WL: waitpid %s received %s\n",
2032 target_pid_to_str (lp->ptid),
2033 status_to_str (status));
2034 }
2035 }
2036
2037 /* Check if the thread has exited. */
2038 if (WIFEXITED (status) || WIFSIGNALED (status))
2039 {
2040 thread_dead = 1;
2041 if (debug_linux_nat)
2042 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2043 target_pid_to_str (lp->ptid));
2044 }
2045
2046 if (thread_dead)
2047 {
2048 exit_lwp (lp);
2049 return 0;
2050 }
2051
2052 gdb_assert (WIFSTOPPED (status));
2053
2054 /* Handle GNU/Linux's extended waitstatus for trace events. */
2055 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2056 {
2057 if (debug_linux_nat)
2058 fprintf_unfiltered (gdb_stdlog,
2059 "WL: Handling extended status 0x%06x\n",
2060 status);
2061 if (linux_handle_extended_wait (lp, status, 1))
2062 return wait_lwp (lp);
2063 }
2064
2065 return status;
2066 }
2067
2068 /* Save the most recent siginfo for LP. This is currently only called
2069 for SIGTRAP; some ports use the si_addr field for
2070 target_stopped_data_address. In the future, it may also be used to
2071 restore the siginfo of requeued signals. */
2072
2073 static void
2074 save_siginfo (struct lwp_info *lp)
2075 {
2076 errno = 0;
2077 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2078 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2079
2080 if (errno != 0)
2081 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2082 }
2083
2084 /* Send a SIGSTOP to LP. */
2085
2086 static int
2087 stop_callback (struct lwp_info *lp, void *data)
2088 {
2089 if (!lp->stopped && !lp->signalled)
2090 {
2091 int ret;
2092
2093 if (debug_linux_nat)
2094 {
2095 fprintf_unfiltered (gdb_stdlog,
2096 "SC: kill %s **<SIGSTOP>**\n",
2097 target_pid_to_str (lp->ptid));
2098 }
2099 errno = 0;
2100 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2101 if (debug_linux_nat)
2102 {
2103 fprintf_unfiltered (gdb_stdlog,
2104 "SC: lwp kill %d %s\n",
2105 ret,
2106 errno ? safe_strerror (errno) : "ERRNO-OK");
2107 }
2108
2109 lp->signalled = 1;
2110 gdb_assert (lp->status == 0);
2111 }
2112
2113 return 0;
2114 }
2115
2116 /* Return non-zero if LWP PID has a pending SIGINT. */
2117
2118 static int
2119 linux_nat_has_pending_sigint (int pid)
2120 {
2121 sigset_t pending, blocked, ignored;
2122 int i;
2123
2124 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2125
2126 if (sigismember (&pending, SIGINT)
2127 && !sigismember (&ignored, SIGINT))
2128 return 1;
2129
2130 return 0;
2131 }
2132
2133 /* Set a flag in LP indicating that we should ignore its next SIGINT. */
2134
2135 static int
2136 set_ignore_sigint (struct lwp_info *lp, void *data)
2137 {
2138 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2139 flag to consume the next one. */
2140 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2141 && WSTOPSIG (lp->status) == SIGINT)
2142 lp->status = 0;
2143 else
2144 lp->ignore_sigint = 1;
2145
2146 return 0;
2147 }
2148
2149 /* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2150 This function is called after we know the LWP has stopped; if the LWP
2151 stopped before the expected SIGINT was delivered, then it will never have
2152 arrived. Also, if the signal was delivered to a shared queue and consumed
2153 by a different thread, it will never be delivered to this LWP. */
2154
2155 static void
2156 maybe_clear_ignore_sigint (struct lwp_info *lp)
2157 {
2158 if (!lp->ignore_sigint)
2159 return;
2160
2161 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2162 {
2163 if (debug_linux_nat)
2164 fprintf_unfiltered (gdb_stdlog,
2165 "MCIS: Clearing bogus flag for %s\n",
2166 target_pid_to_str (lp->ptid));
2167 lp->ignore_sigint = 0;
2168 }
2169 }
2170
2171 /* Wait until LP is stopped. */
2172
2173 static int
2174 stop_wait_callback (struct lwp_info *lp, void *data)
2175 {
2176 if (!lp->stopped)
2177 {
2178 int status;
2179
2180 status = wait_lwp (lp);
2181 if (status == 0)
2182 return 0;
2183
2184 if (lp->ignore_sigint && WIFSTOPPED (status)
2185 && WSTOPSIG (status) == SIGINT)
2186 {
2187 lp->ignore_sigint = 0;
2188
2189 errno = 0;
2190 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2191 if (debug_linux_nat)
2192 fprintf_unfiltered (gdb_stdlog,
2193 "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
2194 target_pid_to_str (lp->ptid),
2195 errno ? safe_strerror (errno) : "OK");
2196
2197 return stop_wait_callback (lp, NULL);
2198 }
2199
2200 maybe_clear_ignore_sigint (lp);
2201
2202 if (WSTOPSIG (status) != SIGSTOP)
2203 {
2204 if (WSTOPSIG (status) == SIGTRAP)
2205 {
2206 /* If a LWP other than the LWP that we're reporting an
2207 event for has hit a GDB breakpoint (as opposed to
2208 some random trap signal), then just arrange for it to
2209 hit it again later. We don't keep the SIGTRAP status
2210 and don't forward the SIGTRAP signal to the LWP. We
2211 will handle the current event, eventually we will
2212 resume all LWPs, and this one will get its breakpoint
2213 trap again.
2214
2215 If we do not do this, then we run the risk that the
2216 user will delete or disable the breakpoint, but the
2217 thread will have already tripped on it. */
2218
2219 /* Save the trap's siginfo in case we need it later. */
2220 save_siginfo (lp);
2221
2222 /* Now resume this LWP and get the SIGSTOP event. */
2223 errno = 0;
2224 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2225 if (debug_linux_nat)
2226 {
2227 fprintf_unfiltered (gdb_stdlog,
2228 "PTRACE_CONT %s, 0, 0 (%s)\n",
2229 target_pid_to_str (lp->ptid),
2230 errno ? safe_strerror (errno) : "OK");
2231
2232 fprintf_unfiltered (gdb_stdlog,
2233 "SWC: Candidate SIGTRAP event in %s\n",
2234 target_pid_to_str (lp->ptid));
2235 }
2236 /* Hold this event/waitstatus while we check to see if
2237 there are any more (we still want to get that SIGSTOP). */
2238 stop_wait_callback (lp, NULL);
2239
2240 if (target_can_async_p ())
2241 {
2242 /* Don't leave a pending wait status in async mode.
2243 Retrigger the breakpoint. */
2244 if (!cancel_breakpoint (lp))
2245 {
2246 /* There was no gdb breakpoint set at pc. Put
2247 the event back in the queue. */
2248 if (debug_linux_nat)
2249 fprintf_unfiltered (gdb_stdlog, "\
2250 SWC: leaving SIGTRAP in local queue of %s\n", target_pid_to_str (lp->ptid));
2251 push_waitpid (GET_LWP (lp->ptid),
2252 W_STOPCODE (SIGTRAP),
2253 lp->cloned ? __WCLONE : 0);
2254 }
2255 }
2256 else
2257 {
2258 /* Hold the SIGTRAP for handling by
2259 linux_nat_wait. */
2260 /* If there's another event, throw it back into the
2261 queue. */
2262 if (lp->status)
2263 {
2264 if (debug_linux_nat)
2265 fprintf_unfiltered (gdb_stdlog,
2266 "SWC: kill %s, %s\n",
2267 target_pid_to_str (lp->ptid),
2268 status_to_str ((int) status));
2269 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
2270 }
2271 /* Save the sigtrap event. */
2272 lp->status = status;
2273 }
2274 return 0;
2275 }
2276 else
2277 {
2278 /* The thread was stopped with a signal other than
2279 SIGSTOP, and didn't accidentally trip a breakpoint. */
2280
2281 if (debug_linux_nat)
2282 {
2283 fprintf_unfiltered (gdb_stdlog,
2284 "SWC: Pending event %s in %s\n",
2285 status_to_str ((int) status),
2286 target_pid_to_str (lp->ptid));
2287 }
2288 /* Now resume this LWP and get the SIGSTOP event. */
2289 errno = 0;
2290 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2291 if (debug_linux_nat)
2292 fprintf_unfiltered (gdb_stdlog,
2293 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2294 target_pid_to_str (lp->ptid),
2295 errno ? safe_strerror (errno) : "OK");
2296
2297 /* Hold this event/waitstatus while we check to see if
2298 there are any more (we still want to get that SIGSTOP). */
2299 stop_wait_callback (lp, NULL);
2300
2301 /* If the lp->status field is still empty, use it to
2302 hold this event. If not, then this event must be
2303 returned to the event queue of the LWP. */
2304 if (lp->status || target_can_async_p ())
2305 {
2306 if (debug_linux_nat)
2307 {
2308 fprintf_unfiltered (gdb_stdlog,
2309 "SWC: kill %s, %s\n",
2310 target_pid_to_str (lp->ptid),
2311 status_to_str ((int) status));
2312 }
2313 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2314 }
2315 else
2316 lp->status = status;
2317 return 0;
2318 }
2319 }
2320 else
2321 {
2322 /* We caught the SIGSTOP that we intended to catch, so
2323 there's no SIGSTOP pending. */
2324 lp->stopped = 1;
2325 lp->signalled = 0;
2326 }
2327 }
2328
2329 return 0;
2330 }
2331
2332 /* Return non-zero if LP has a wait status pending. */
2333
2334 static int
2335 status_callback (struct lwp_info *lp, void *data)
2336 {
2337 /* Only report a pending wait status if we pretend that this has
2338 indeed been resumed. */
2339 return (lp->status != 0 && lp->resumed);
2340 }
2341
2342 /* Return non-zero if LP isn't stopped. */
2343
2344 static int
2345 running_callback (struct lwp_info *lp, void *data)
2346 {
2347 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2348 }
2349
2350 /* Count the LWP's that have had events. */
2351
2352 static int
2353 count_events_callback (struct lwp_info *lp, void *data)
2354 {
2355 int *count = data;
2356
2357 gdb_assert (count != NULL);
2358
2359 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2360 if (lp->status != 0 && lp->resumed
2361 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2362 (*count)++;
2363
2364 return 0;
2365 }
2366
2367 /* Select the LWP (if any) that is currently being single-stepped. */
2368
2369 static int
2370 select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2371 {
2372 if (lp->step && lp->status != 0)
2373 return 1;
2374 else
2375 return 0;
2376 }
2377
2378 /* Select the Nth LWP that has had a SIGTRAP event. */
2379
2380 static int
2381 select_event_lwp_callback (struct lwp_info *lp, void *data)
2382 {
2383 int *selector = data;
2384
2385 gdb_assert (selector != NULL);
2386
2387 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2388 if (lp->status != 0 && lp->resumed
2389 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2390 if ((*selector)-- == 0)
2391 return 1;
2392
2393 return 0;
2394 }
2395
2396 static int
2397 cancel_breakpoint (struct lwp_info *lp)
2398 {
2399 /* Arrange for a breakpoint to be hit again later. We don't keep
2400 the SIGTRAP status and don't forward the SIGTRAP signal to the
2401 LWP. We will handle the current event, eventually we will resume
2402 this LWP, and this breakpoint will trap again.
2403
2404 If we do not do this, then we run the risk that the user will
2405 delete or disable the breakpoint, but the LWP will have already
2406 tripped on it. */
2407
2408 struct regcache *regcache = get_thread_regcache (lp->ptid);
2409 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2410 CORE_ADDR pc;
2411
2412 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
2413 if (breakpoint_inserted_here_p (pc))
2414 {
2415 if (debug_linux_nat)
2416 fprintf_unfiltered (gdb_stdlog,
2417 "CB: Push back breakpoint for %s\n",
2418 target_pid_to_str (lp->ptid));
2419
2420 /* Back up the PC if necessary. */
2421 if (gdbarch_decr_pc_after_break (gdbarch))
2422 regcache_write_pc (regcache, pc);
2423
2424 return 1;
2425 }
2426 return 0;
2427 }
2428
2429 static int
2430 cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2431 {
2432 struct lwp_info *event_lp = data;
2433
2434 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2435 if (lp == event_lp)
2436 return 0;
2437
2438 /* If a LWP other than the LWP that we're reporting an event for has
2439 hit a GDB breakpoint (as opposed to some random trap signal),
2440 then just arrange for it to hit it again later. We don't keep
2441 the SIGTRAP status and don't forward the SIGTRAP signal to the
2442 LWP. We will handle the current event, eventually we will resume
2443 all LWPs, and this one will get its breakpoint trap again.
2444
2445 If we do not do this, then we run the risk that the user will
2446 delete or disable the breakpoint, but the LWP will have already
2447 tripped on it. */
2448
2449 if (lp->status != 0
2450 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
2451 && cancel_breakpoint (lp))
2452 /* Throw away the SIGTRAP. */
2453 lp->status = 0;
2454
2455 return 0;
2456 }
2457
2458 /* Select one LWP out of those that have events pending. */
2459
2460 static void
2461 select_event_lwp (struct lwp_info **orig_lp, int *status)
2462 {
2463 int num_events = 0;
2464 int random_selector;
2465 struct lwp_info *event_lp;
2466
2467 /* Record the wait status for the original LWP. */
2468 (*orig_lp)->status = *status;
2469
2470 /* Give preference to any LWP that is being single-stepped. */
2471 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2472 if (event_lp != NULL)
2473 {
2474 if (debug_linux_nat)
2475 fprintf_unfiltered (gdb_stdlog,
2476 "SEL: Select single-step %s\n",
2477 target_pid_to_str (event_lp->ptid));
2478 }
2479 else
2480 {
2481 /* No single-stepping LWP. Select one at random, out of those
2482 which have had SIGTRAP events. */
2483
2484 /* First see how many SIGTRAP events we have. */
2485 iterate_over_lwps (count_events_callback, &num_events);
2486
2487 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2488 random_selector = (int)
2489 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2490
2491 if (debug_linux_nat && num_events > 1)
2492 fprintf_unfiltered (gdb_stdlog,
2493 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2494 num_events, random_selector);
2495
2496 event_lp = iterate_over_lwps (select_event_lwp_callback,
2497 &random_selector);
2498 }
2499
2500 if (event_lp != NULL)
2501 {
2502 /* Switch the event LWP. */
2503 *orig_lp = event_lp;
2504 *status = event_lp->status;
2505 }
2506
2507 /* Flush the wait status for the event LWP. */
2508 (*orig_lp)->status = 0;
2509 }
2510
2511 /* Return non-zero if LP has been resumed. */
2512
2513 static int
2514 resumed_callback (struct lwp_info *lp, void *data)
2515 {
2516 return lp->resumed;
2517 }
2518
2519 /* Stop an active thread, verify it still exists, then resume it. */
2520
2521 static int
2522 stop_and_resume_callback (struct lwp_info *lp, void *data)
2523 {
2524 struct lwp_info *ptr;
2525
2526 if (!lp->stopped && !lp->signalled)
2527 {
2528 stop_callback (lp, NULL);
2529 stop_wait_callback (lp, NULL);
2530 /* Resume if the lwp still exists. */
2531 for (ptr = lwp_list; ptr; ptr = ptr->next)
2532 if (lp == ptr)
2533 {
2534 resume_callback (lp, NULL);
2535 resume_set_callback (lp, NULL);
2536 }
2537 }
2538 return 0;
2539 }
2540
2541 /* Check if we should go on and pass this event to common code.
2542 Return the affected lwp if we are, or NULL otherwise. */
2543 static struct lwp_info *
2544 linux_nat_filter_event (int lwpid, int status, int options)
2545 {
2546 struct lwp_info *lp;
2547
2548 lp = find_lwp_pid (pid_to_ptid (lwpid));
2549
2550 /* Check for stop events reported by a process we didn't already
2551 know about - anything not already in our LWP list.
2552
2553 If we're expecting to receive stopped processes after
2554 fork, vfork, and clone events, then we'll just add the
2555 new one to our list and go back to waiting for the event
2556 to be reported - the stopped process might be returned
2557 from waitpid before or after the event is. */
2558 if (WIFSTOPPED (status) && !lp)
2559 {
2560 linux_record_stopped_pid (lwpid, status);
2561 return NULL;
2562 }
2563
2564 /* Make sure we don't report an event for the exit of an LWP not in
2565 our list, i.e. not part of the current process. This can happen
2566 if we detach from a program we original forked and then it
2567 exits. */
2568 if (!WIFSTOPPED (status) && !lp)
2569 return NULL;
2570
2571 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2572 CLONE_PTRACE processes which do not use the thread library -
2573 otherwise we wouldn't find the new LWP this way. That doesn't
2574 currently work, and the following code is currently unreachable
2575 due to the two blocks above. If it's fixed some day, this code
2576 should be broken out into a function so that we can also pick up
2577 LWPs from the new interface. */
2578 if (!lp)
2579 {
2580 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2581 if (options & __WCLONE)
2582 lp->cloned = 1;
2583
2584 gdb_assert (WIFSTOPPED (status)
2585 && WSTOPSIG (status) == SIGSTOP);
2586 lp->signalled = 1;
2587
2588 if (!in_thread_list (inferior_ptid))
2589 {
2590 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2591 GET_PID (inferior_ptid));
2592 add_thread (inferior_ptid);
2593 }
2594
2595 add_thread (lp->ptid);
2596 }
2597
2598 /* Save the trap's siginfo in case we need it later. */
2599 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2600 save_siginfo (lp);
2601
2602 /* Handle GNU/Linux's extended waitstatus for trace events. */
2603 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2604 {
2605 if (debug_linux_nat)
2606 fprintf_unfiltered (gdb_stdlog,
2607 "LLW: Handling extended status 0x%06x\n",
2608 status);
2609 if (linux_handle_extended_wait (lp, status, 0))
2610 return NULL;
2611 }
2612
2613 /* Check if the thread has exited. */
2614 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2615 {
2616 /* If this is the main thread, we must stop all threads and
2617 verify if they are still alive. This is because in the nptl
2618 thread model, there is no signal issued for exiting LWPs
2619 other than the main thread. We only get the main thread exit
2620 signal once all child threads have already exited. If we
2621 stop all the threads and use the stop_wait_callback to check
2622 if they have exited we can determine whether this signal
2623 should be ignored or whether it means the end of the debugged
2624 application, regardless of which threading model is being
2625 used. */
2626 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2627 {
2628 lp->stopped = 1;
2629 iterate_over_lwps (stop_and_resume_callback, NULL);
2630 }
2631
2632 if (debug_linux_nat)
2633 fprintf_unfiltered (gdb_stdlog,
2634 "LLW: %s exited.\n",
2635 target_pid_to_str (lp->ptid));
2636
2637 exit_lwp (lp);
2638
2639 /* If there is at least one more LWP, then the exit signal was
2640 not the end of the debugged application and should be
2641 ignored. */
2642 if (num_lwps > 0)
2643 return NULL;
2644 }
2645
2646 /* Check if the current LWP has previously exited. In the nptl
2647 thread model, LWPs other than the main thread do not issue
2648 signals when they exit so we must check whenever the thread has
2649 stopped. A similar check is made in stop_wait_callback(). */
2650 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2651 {
2652 if (debug_linux_nat)
2653 fprintf_unfiltered (gdb_stdlog,
2654 "LLW: %s exited.\n",
2655 target_pid_to_str (lp->ptid));
2656
2657 exit_lwp (lp);
2658
2659 /* Make sure there is at least one thread running. */
2660 gdb_assert (iterate_over_lwps (running_callback, NULL));
2661
2662 /* Discard the event. */
2663 return NULL;
2664 }
2665
2666 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2667 an attempt to stop an LWP. */
2668 if (lp->signalled
2669 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2670 {
2671 if (debug_linux_nat)
2672 fprintf_unfiltered (gdb_stdlog,
2673 "LLW: Delayed SIGSTOP caught for %s.\n",
2674 target_pid_to_str (lp->ptid));
2675
2676 /* This is a delayed SIGSTOP. */
2677 lp->signalled = 0;
2678
2679 registers_changed ();
2680
2681 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2682 lp->step, TARGET_SIGNAL_0);
2683 if (debug_linux_nat)
2684 fprintf_unfiltered (gdb_stdlog,
2685 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2686 lp->step ?
2687 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2688 target_pid_to_str (lp->ptid));
2689
2690 lp->stopped = 0;
2691 gdb_assert (lp->resumed);
2692
2693 /* Discard the event. */
2694 return NULL;
2695 }
2696
2697 /* Make sure we don't report a SIGINT that we have already displayed
2698 for another thread. */
2699 if (lp->ignore_sigint
2700 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2701 {
2702 if (debug_linux_nat)
2703 fprintf_unfiltered (gdb_stdlog,
2704 "LLW: Delayed SIGINT caught for %s.\n",
2705 target_pid_to_str (lp->ptid));
2706
2707 /* This is a delayed SIGINT. */
2708 lp->ignore_sigint = 0;
2709
2710 registers_changed ();
2711 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2712 lp->step, TARGET_SIGNAL_0);
2713 if (debug_linux_nat)
2714 fprintf_unfiltered (gdb_stdlog,
2715 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2716 lp->step ?
2717 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2718 target_pid_to_str (lp->ptid));
2719
2720 lp->stopped = 0;
2721 gdb_assert (lp->resumed);
2722
2723 /* Discard the event. */
2724 return NULL;
2725 }
2726
2727 /* An interesting event. */
2728 gdb_assert (lp);
2729 return lp;
2730 }
2731
2732 /* Get the events stored in the pipe into the local queue, so they are
2733 accessible to queued_waitpid. We need to do this, since it is not
2734 always the case that the event at the head of the pipe is the event
2735 we want. */
2736
2737 static void
2738 pipe_to_local_event_queue (void)
2739 {
2740 if (debug_linux_nat_async)
2741 fprintf_unfiltered (gdb_stdlog,
2742 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2743 linux_nat_num_queued_events);
2744 while (linux_nat_num_queued_events)
2745 {
2746 int lwpid, status, options;
2747 lwpid = linux_nat_event_pipe_pop (&status, &options);
2748 gdb_assert (lwpid > 0);
2749 push_waitpid (lwpid, status, options);
2750 }
2751 }
2752
2753 /* Get the unprocessed events stored in the local queue back into the
2754 pipe, so the event loop realizes there's something else to
2755 process. */
2756
2757 static void
2758 local_event_queue_to_pipe (void)
2759 {
2760 struct waitpid_result *w = waitpid_queue;
2761 while (w)
2762 {
2763 struct waitpid_result *next = w->next;
2764 linux_nat_event_pipe_push (w->pid,
2765 w->status,
2766 w->options);
2767 xfree (w);
2768 w = next;
2769 }
2770 waitpid_queue = NULL;
2771
2772 if (debug_linux_nat_async)
2773 fprintf_unfiltered (gdb_stdlog,
2774 "LEQTP: linux_nat_num_queued_events(%d)\n",
2775 linux_nat_num_queued_events);
2776 }
2777
2778 static ptid_t
2779 linux_nat_wait (struct target_ops *ops,
2780 ptid_t ptid, struct target_waitstatus *ourstatus)
2781 {
2782 struct lwp_info *lp = NULL;
2783 int options = 0;
2784 int status = 0;
2785 pid_t pid = PIDGET (ptid);
2786
2787 if (debug_linux_nat_async)
2788 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2789
2790 /* The first time we get here after starting a new inferior, we may
2791 not have added it to the LWP list yet - this is the earliest
2792 moment at which we know its PID. */
2793 if (num_lwps == 0)
2794 {
2795 gdb_assert (!is_lwp (inferior_ptid));
2796
2797 /* Upgrade the main thread's ptid. */
2798 thread_change_ptid (inferior_ptid,
2799 BUILD_LWP (GET_PID (inferior_ptid),
2800 GET_PID (inferior_ptid)));
2801
2802 lp = add_lwp (inferior_ptid);
2803 lp->resumed = 1;
2804 }
2805
2806 /* Block events while we're here. */
2807 linux_nat_async_events (sigchld_sync);
2808
2809 retry:
2810
2811 /* Make sure there is at least one LWP that has been resumed. */
2812 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
2813
2814 /* First check if there is a LWP with a wait status pending. */
2815 if (pid == -1)
2816 {
2817 /* Any LWP that's been resumed will do. */
2818 lp = iterate_over_lwps (status_callback, NULL);
2819 if (lp)
2820 {
2821 if (target_can_async_p ())
2822 internal_error (__FILE__, __LINE__,
2823 "Found an LWP with a pending status in async mode.");
2824
2825 status = lp->status;
2826 lp->status = 0;
2827
2828 if (debug_linux_nat && status)
2829 fprintf_unfiltered (gdb_stdlog,
2830 "LLW: Using pending wait status %s for %s.\n",
2831 status_to_str (status),
2832 target_pid_to_str (lp->ptid));
2833 }
2834
2835 /* But if we don't find one, we'll have to wait, and check both
2836 cloned and uncloned processes. We start with the cloned
2837 processes. */
2838 options = __WCLONE | WNOHANG;
2839 }
2840 else if (is_lwp (ptid))
2841 {
2842 if (debug_linux_nat)
2843 fprintf_unfiltered (gdb_stdlog,
2844 "LLW: Waiting for specific LWP %s.\n",
2845 target_pid_to_str (ptid));
2846
2847 /* We have a specific LWP to check. */
2848 lp = find_lwp_pid (ptid);
2849 gdb_assert (lp);
2850 status = lp->status;
2851 lp->status = 0;
2852
2853 if (debug_linux_nat && status)
2854 fprintf_unfiltered (gdb_stdlog,
2855 "LLW: Using pending wait status %s for %s.\n",
2856 status_to_str (status),
2857 target_pid_to_str (lp->ptid));
2858
2859 /* If we have to wait, take into account whether PID is a cloned
2860 process or not. And we have to convert it to something that
2861 the layer beneath us can understand. */
2862 options = lp->cloned ? __WCLONE : 0;
2863 pid = GET_LWP (ptid);
2864 }
2865
2866 if (status && lp->signalled)
2867 {
2868 /* A pending SIGSTOP may interfere with the normal stream of
2869 events. In a typical case where interference is a problem,
2870 we have a SIGSTOP signal pending for LWP A while
2871 single-stepping it, encounter an event in LWP B, and take the
2872 pending SIGSTOP while trying to stop LWP A. After processing
2873 the event in LWP B, LWP A is continued, and we'll never see
2874 the SIGTRAP associated with the last time we were
2875 single-stepping LWP A. */
2876
2877 /* Resume the thread. It should halt immediately returning the
2878 pending SIGSTOP. */
2879 registers_changed ();
2880 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2881 lp->step, TARGET_SIGNAL_0);
2882 if (debug_linux_nat)
2883 fprintf_unfiltered (gdb_stdlog,
2884 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2885 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2886 target_pid_to_str (lp->ptid));
2887 lp->stopped = 0;
2888 gdb_assert (lp->resumed);
2889
2890 /* This should catch the pending SIGSTOP. */
2891 stop_wait_callback (lp, NULL);
2892 }
2893
2894 if (!target_can_async_p ())
2895 {
2896 /* Causes SIGINT to be passed on to the attached process. */
2897 set_sigint_trap ();
2898 }
2899
2900 while (status == 0)
2901 {
2902 pid_t lwpid;
2903
2904 if (target_can_async_p ())
2905 /* In async mode, don't ever block. Only look at the locally
2906 queued events. */
2907 lwpid = queued_waitpid (pid, &status, options);
2908 else
2909 lwpid = my_waitpid (pid, &status, options);
2910
2911 if (lwpid > 0)
2912 {
2913 gdb_assert (pid == -1 || lwpid == pid);
2914
2915 if (debug_linux_nat)
2916 {
2917 fprintf_unfiltered (gdb_stdlog,
2918 "LLW: waitpid %ld received %s\n",
2919 (long) lwpid, status_to_str (status));
2920 }
2921
2922 lp = linux_nat_filter_event (lwpid, status, options);
2923 if (!lp)
2924 {
2925 /* A discarded event. */
2926 status = 0;
2927 continue;
2928 }
2929
2930 break;
2931 }
2932
2933 if (pid == -1)
2934 {
2935 /* Alternate between checking cloned and uncloned processes. */
2936 options ^= __WCLONE;
2937
2938 /* And every time we have checked both:
2939 In async mode, return to event loop;
2940 In sync mode, suspend waiting for a SIGCHLD signal. */
2941 if (options & __WCLONE)
2942 {
2943 if (target_can_async_p ())
2944 {
2945 /* No interesting event. */
2946 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2947
2948 /* Get ready for the next event. */
2949 target_async (inferior_event_handler, 0);
2950
2951 if (debug_linux_nat_async)
2952 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2953
2954 return minus_one_ptid;
2955 }
2956
2957 sigsuspend (&suspend_mask);
2958 }
2959 }
2960
2961 /* We shouldn't end up here unless we want to try again. */
2962 gdb_assert (status == 0);
2963 }
2964
2965 if (!target_can_async_p ())
2966 clear_sigint_trap ();
2967
2968 gdb_assert (lp);
2969
2970 /* Don't report signals that GDB isn't interested in, such as
2971 signals that are neither printed nor stopped upon. Stopping all
2972 threads can be a bit time-consuming so if we want decent
2973 performance with heavily multi-threaded programs, especially when
2974 they're using a high frequency timer, we'd better avoid it if we
2975 can. */
2976
2977 if (WIFSTOPPED (status))
2978 {
2979 int signo = target_signal_from_host (WSTOPSIG (status));
2980 struct inferior *inf;
2981
2982 inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2983 gdb_assert (inf);
2984
2985 /* Defer to common code if we get a signal while
2986 single-stepping, since that may need special care, e.g. to
2987 skip the signal handler, or, if we're gaining control of the
2988 inferior. */
2989 if (!lp->step
2990 && inf->stop_soon == NO_STOP_QUIETLY
2991 && signal_stop_state (signo) == 0
2992 && signal_print_state (signo) == 0
2993 && signal_pass_state (signo) == 1)
2994 {
2995 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2996 here? It is not clear we should. GDB may not expect
2997 other threads to run. On the other hand, not resuming
2998 newly attached threads may cause an unwanted delay in
2999 getting them running. */
3000 registers_changed ();
3001 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
3002 lp->step, signo);
3003 if (debug_linux_nat)
3004 fprintf_unfiltered (gdb_stdlog,
3005 "LLW: %s %s, %s (preempt 'handle')\n",
3006 lp->step ?
3007 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3008 target_pid_to_str (lp->ptid),
3009 signo ? strsignal (signo) : "0");
3010 lp->stopped = 0;
3011 status = 0;
3012 goto retry;
3013 }
3014
3015 if (!non_stop)
3016 {
3017 /* Only do the below in all-stop, as we currently use SIGINT
3018 to implement target_stop (see linux_nat_stop) in
3019 non-stop. */
3020 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
3021 {
3022 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3023 forwarded to the entire process group, that is, all LWPs
3024 will receive it - unless they're using CLONE_THREAD to
3025 share signals. Since we only want to report it once, we
3026 mark it as ignored for all LWPs except this one. */
3027 iterate_over_lwps (set_ignore_sigint, NULL);
3028 lp->ignore_sigint = 0;
3029 }
3030 else
3031 maybe_clear_ignore_sigint (lp);
3032 }
3033 }
3034
3035 /* This LWP is stopped now. */
3036 lp->stopped = 1;
3037
3038 if (debug_linux_nat)
3039 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3040 status_to_str (status), target_pid_to_str (lp->ptid));
3041
3042 if (!non_stop)
3043 {
3044 /* Now stop all other LWP's ... */
3045 iterate_over_lwps (stop_callback, NULL);
3046
3047 /* ... and wait until all of them have reported back that
3048 they're no longer running. */
3049 iterate_over_lwps (stop_wait_callback, NULL);
3050
3051 /* If we're not waiting for a specific LWP, choose an event LWP
3052 from among those that have had events. Giving equal priority
3053 to all LWPs that have had events helps prevent
3054 starvation. */
3055 if (pid == -1)
3056 select_event_lwp (&lp, &status);
3057 }
3058
3059 /* Now that we've selected our final event LWP, cancel any
3060 breakpoints in other LWPs that have hit a GDB breakpoint. See
3061 the comment in cancel_breakpoints_callback to find out why. */
3062 iterate_over_lwps (cancel_breakpoints_callback, lp);
3063
3064 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
3065 {
3066 if (debug_linux_nat)
3067 fprintf_unfiltered (gdb_stdlog,
3068 "LLW: trap ptid is %s.\n",
3069 target_pid_to_str (lp->ptid));
3070 }
3071
3072 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3073 {
3074 *ourstatus = lp->waitstatus;
3075 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3076 }
3077 else
3078 store_waitstatus (ourstatus, status);
3079
3080 /* Get ready for the next event. */
3081 if (target_can_async_p ())
3082 target_async (inferior_event_handler, 0);
3083
3084 if (debug_linux_nat_async)
3085 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3086
3087 return lp->ptid;
3088 }
3089
3090 static int
3091 kill_callback (struct lwp_info *lp, void *data)
3092 {
3093 errno = 0;
3094 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3095 if (debug_linux_nat)
3096 fprintf_unfiltered (gdb_stdlog,
3097 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3098 target_pid_to_str (lp->ptid),
3099 errno ? safe_strerror (errno) : "OK");
3100
3101 return 0;
3102 }
3103
3104 static int
3105 kill_wait_callback (struct lwp_info *lp, void *data)
3106 {
3107 pid_t pid;
3108
3109 /* We must make sure that there are no pending events (delayed
3110 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3111 program doesn't interfere with any following debugging session. */
3112
3113 /* For cloned processes we must check both with __WCLONE and
3114 without, since the exit status of a cloned process isn't reported
3115 with __WCLONE. */
3116 if (lp->cloned)
3117 {
3118 do
3119 {
3120 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
3121 if (pid != (pid_t) -1)
3122 {
3123 if (debug_linux_nat)
3124 fprintf_unfiltered (gdb_stdlog,
3125 "KWC: wait %s received unknown.\n",
3126 target_pid_to_str (lp->ptid));
3127 /* The Linux kernel sometimes fails to kill a thread
3128 completely after PTRACE_KILL; that goes from the stop
3129 point in do_fork out to the one in
3130 get_signal_to_deliever and waits again. So kill it
3131 again. */
3132 kill_callback (lp, NULL);
3133 }
3134 }
3135 while (pid == GET_LWP (lp->ptid));
3136
3137 gdb_assert (pid == -1 && errno == ECHILD);
3138 }
3139
3140 do
3141 {
3142 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
3143 if (pid != (pid_t) -1)
3144 {
3145 if (debug_linux_nat)
3146 fprintf_unfiltered (gdb_stdlog,
3147 "KWC: wait %s received unk.\n",
3148 target_pid_to_str (lp->ptid));
3149 /* See the call to kill_callback above. */
3150 kill_callback (lp, NULL);
3151 }
3152 }
3153 while (pid == GET_LWP (lp->ptid));
3154
3155 gdb_assert (pid == -1 && errno == ECHILD);
3156 return 0;
3157 }
3158
3159 static void
3160 linux_nat_kill (void)
3161 {
3162 struct target_waitstatus last;
3163 ptid_t last_ptid;
3164 int status;
3165
3166 if (target_can_async_p ())
3167 target_async (NULL, 0);
3168
3169 /* If we're stopped while forking and we haven't followed yet,
3170 kill the other task. We need to do this first because the
3171 parent will be sleeping if this is a vfork. */
3172
3173 get_last_target_status (&last_ptid, &last);
3174
3175 if (last.kind == TARGET_WAITKIND_FORKED
3176 || last.kind == TARGET_WAITKIND_VFORKED)
3177 {
3178 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
3179 wait (&status);
3180 }
3181
3182 if (forks_exist_p ())
3183 {
3184 linux_fork_killall ();
3185 drain_queued_events (-1);
3186 }
3187 else
3188 {
3189 /* Stop all threads before killing them, since ptrace requires
3190 that the thread is stopped to sucessfully PTRACE_KILL. */
3191 iterate_over_lwps (stop_callback, NULL);
3192 /* ... and wait until all of them have reported back that
3193 they're no longer running. */
3194 iterate_over_lwps (stop_wait_callback, NULL);
3195
3196 /* Kill all LWP's ... */
3197 iterate_over_lwps (kill_callback, NULL);
3198
3199 /* ... and wait until we've flushed all events. */
3200 iterate_over_lwps (kill_wait_callback, NULL);
3201 }
3202
3203 target_mourn_inferior ();
3204 }
3205
3206 static void
3207 linux_nat_mourn_inferior (struct target_ops *ops)
3208 {
3209 /* Destroy LWP info; it's no longer valid. */
3210 init_lwp_list ();
3211
3212 if (! forks_exist_p ())
3213 {
3214 /* Normal case, no other forks available. */
3215 if (target_can_async_p ())
3216 linux_nat_async (NULL, 0);
3217 linux_ops->to_mourn_inferior (ops);
3218 }
3219 else
3220 /* Multi-fork case. The current inferior_ptid has exited, but
3221 there are other viable forks to debug. Delete the exiting
3222 one and context-switch to the first available. */
3223 linux_fork_mourn_inferior ();
3224 }
3225
3226 static LONGEST
3227 linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3228 const char *annex, gdb_byte *readbuf,
3229 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3230 {
3231 struct lwp_info *lp;
3232 LONGEST n;
3233 int pid;
3234 struct siginfo siginfo;
3235
3236 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3237 gdb_assert (readbuf || writebuf);
3238
3239 pid = GET_LWP (inferior_ptid);
3240 if (pid == 0)
3241 pid = GET_PID (inferior_ptid);
3242
3243 if (offset > sizeof (siginfo))
3244 return -1;
3245
3246 errno = 0;
3247 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3248 if (errno != 0)
3249 return -1;
3250
3251 if (offset + len > sizeof (siginfo))
3252 len = sizeof (siginfo) - offset;
3253
3254 if (readbuf != NULL)
3255 memcpy (readbuf, (char *)&siginfo + offset, len);
3256 else
3257 {
3258 memcpy ((char *)&siginfo + offset, writebuf, len);
3259 errno = 0;
3260 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3261 if (errno != 0)
3262 return -1;
3263 }
3264
3265 return len;
3266 }
3267
3268 static LONGEST
3269 linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3270 const char *annex, gdb_byte *readbuf,
3271 const gdb_byte *writebuf,
3272 ULONGEST offset, LONGEST len)
3273 {
3274 struct cleanup *old_chain;
3275 LONGEST xfer;
3276
3277 if (object == TARGET_OBJECT_SIGNAL_INFO)
3278 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
3279 offset, len);
3280
3281 old_chain = save_inferior_ptid ();
3282
3283 if (is_lwp (inferior_ptid))
3284 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
3285
3286 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3287 offset, len);
3288
3289 do_cleanups (old_chain);
3290 return xfer;
3291 }
3292
3293 static int
3294 linux_nat_thread_alive (ptid_t ptid)
3295 {
3296 int err;
3297
3298 gdb_assert (is_lwp (ptid));
3299
3300 /* Send signal 0 instead of anything ptrace, because ptracing a
3301 running thread errors out claiming that the thread doesn't
3302 exist. */
3303 err = kill_lwp (GET_LWP (ptid), 0);
3304
3305 if (debug_linux_nat)
3306 fprintf_unfiltered (gdb_stdlog,
3307 "LLTA: KILL(SIG0) %s (%s)\n",
3308 target_pid_to_str (ptid),
3309 err ? safe_strerror (err) : "OK");
3310
3311 if (err != 0)
3312 return 0;
3313
3314 return 1;
3315 }
3316
3317 static char *
3318 linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
3319 {
3320 static char buf[64];
3321
3322 if (is_lwp (ptid)
3323 && ((lwp_list && lwp_list->next)
3324 || GET_PID (ptid) != GET_LWP (ptid)))
3325 {
3326 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
3327 return buf;
3328 }
3329
3330 return normal_pid_to_str (ptid);
3331 }
3332
3333 static void
3334 sigchld_handler (int signo)
3335 {
3336 if (target_async_permitted
3337 && linux_nat_async_events_state != sigchld_sync
3338 && signo == SIGCHLD)
3339 /* It is *always* a bug to hit this. */
3340 internal_error (__FILE__, __LINE__,
3341 "sigchld_handler called when async events are enabled");
3342
3343 /* Do nothing. The only reason for this handler is that it allows
3344 us to use sigsuspend in linux_nat_wait above to wait for the
3345 arrival of a SIGCHLD. */
3346 }
3347
3348 /* Accepts an integer PID; Returns a string representing a file that
3349 can be opened to get the symbols for the child process. */
3350
3351 static char *
3352 linux_child_pid_to_exec_file (int pid)
3353 {
3354 char *name1, *name2;
3355
3356 name1 = xmalloc (MAXPATHLEN);
3357 name2 = xmalloc (MAXPATHLEN);
3358 make_cleanup (xfree, name1);
3359 make_cleanup (xfree, name2);
3360 memset (name2, 0, MAXPATHLEN);
3361
3362 sprintf (name1, "/proc/%d/exe", pid);
3363 if (readlink (name1, name2, MAXPATHLEN) > 0)
3364 return name2;
3365 else
3366 return name1;
3367 }
3368
3369 /* Service function for corefiles and info proc. */
3370
3371 static int
3372 read_mapping (FILE *mapfile,
3373 long long *addr,
3374 long long *endaddr,
3375 char *permissions,
3376 long long *offset,
3377 char *device, long long *inode, char *filename)
3378 {
3379 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
3380 addr, endaddr, permissions, offset, device, inode);
3381
3382 filename[0] = '\0';
3383 if (ret > 0 && ret != EOF)
3384 {
3385 /* Eat everything up to EOL for the filename. This will prevent
3386 weird filenames (such as one with embedded whitespace) from
3387 confusing this code. It also makes this code more robust in
3388 respect to annotations the kernel may add after the filename.
3389
3390 Note the filename is used for informational purposes
3391 only. */
3392 ret += fscanf (mapfile, "%[^\n]\n", filename);
3393 }
3394
3395 return (ret != 0 && ret != EOF);
3396 }
3397
3398 /* Fills the "to_find_memory_regions" target vector. Lists the memory
3399 regions in the inferior for a corefile. */
3400
3401 static int
3402 linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
3403 unsigned long,
3404 int, int, int, void *), void *obfd)
3405 {
3406 long long pid = PIDGET (inferior_ptid);
3407 char mapsfilename[MAXPATHLEN];
3408 FILE *mapsfile;
3409 long long addr, endaddr, size, offset, inode;
3410 char permissions[8], device[8], filename[MAXPATHLEN];
3411 int read, write, exec;
3412 int ret;
3413 struct cleanup *cleanup;
3414
3415 /* Compose the filename for the /proc memory map, and open it. */
3416 sprintf (mapsfilename, "/proc/%lld/maps", pid);
3417 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
3418 error (_("Could not open %s."), mapsfilename);
3419 cleanup = make_cleanup_fclose (mapsfile);
3420
3421 if (info_verbose)
3422 fprintf_filtered (gdb_stdout,
3423 "Reading memory regions from %s\n", mapsfilename);
3424
3425 /* Now iterate until end-of-file. */
3426 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
3427 &offset, &device[0], &inode, &filename[0]))
3428 {
3429 size = endaddr - addr;
3430
3431 /* Get the segment's permissions. */
3432 read = (strchr (permissions, 'r') != 0);
3433 write = (strchr (permissions, 'w') != 0);
3434 exec = (strchr (permissions, 'x') != 0);
3435
3436 if (info_verbose)
3437 {
3438 fprintf_filtered (gdb_stdout,
3439 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3440 size, paddr_nz (addr),
3441 read ? 'r' : ' ',
3442 write ? 'w' : ' ', exec ? 'x' : ' ');
3443 if (filename[0])
3444 fprintf_filtered (gdb_stdout, " for %s", filename);
3445 fprintf_filtered (gdb_stdout, "\n");
3446 }
3447
3448 /* Invoke the callback function to create the corefile
3449 segment. */
3450 func (addr, size, read, write, exec, obfd);
3451 }
3452 do_cleanups (cleanup);
3453 return 0;
3454 }
3455
3456 static int
3457 find_signalled_thread (struct thread_info *info, void *data)
3458 {
3459 if (info->stop_signal != TARGET_SIGNAL_0
3460 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
3461 return 1;
3462
3463 return 0;
3464 }
3465
3466 static enum target_signal
3467 find_stop_signal (void)
3468 {
3469 struct thread_info *info =
3470 iterate_over_threads (find_signalled_thread, NULL);
3471
3472 if (info)
3473 return info->stop_signal;
3474 else
3475 return TARGET_SIGNAL_0;
3476 }
3477
3478 /* Records the thread's register state for the corefile note
3479 section. */
3480
3481 static char *
3482 linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
3483 char *note_data, int *note_size,
3484 enum target_signal stop_signal)
3485 {
3486 gdb_gregset_t gregs;
3487 gdb_fpregset_t fpregs;
3488 unsigned long lwp = ptid_get_lwp (ptid);
3489 struct regcache *regcache = get_thread_regcache (ptid);
3490 struct gdbarch *gdbarch = get_regcache_arch (regcache);
3491 const struct regset *regset;
3492 int core_regset_p;
3493 struct cleanup *old_chain;
3494 struct core_regset_section *sect_list;
3495 char *gdb_regset;
3496
3497 old_chain = save_inferior_ptid ();
3498 inferior_ptid = ptid;
3499 target_fetch_registers (regcache, -1);
3500 do_cleanups (old_chain);
3501
3502 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
3503 sect_list = gdbarch_core_regset_sections (gdbarch);
3504
3505 if (core_regset_p
3506 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3507 sizeof (gregs))) != NULL
3508 && regset->collect_regset != NULL)
3509 regset->collect_regset (regset, regcache, -1,
3510 &gregs, sizeof (gregs));
3511 else
3512 fill_gregset (regcache, &gregs, -1);
3513
3514 note_data = (char *) elfcore_write_prstatus (obfd,
3515 note_data,
3516 note_size,
3517 lwp,
3518 stop_signal, &gregs);
3519
3520 /* The loop below uses the new struct core_regset_section, which stores
3521 the supported section names and sizes for the core file. Note that
3522 note PRSTATUS needs to be treated specially. But the other notes are
3523 structurally the same, so they can benefit from the new struct. */
3524 if (core_regset_p && sect_list != NULL)
3525 while (sect_list->sect_name != NULL)
3526 {
3527 /* .reg was already handled above. */
3528 if (strcmp (sect_list->sect_name, ".reg") == 0)
3529 {
3530 sect_list++;
3531 continue;
3532 }
3533 regset = gdbarch_regset_from_core_section (gdbarch,
3534 sect_list->sect_name,
3535 sect_list->size);
3536 gdb_assert (regset && regset->collect_regset);
3537 gdb_regset = xmalloc (sect_list->size);
3538 regset->collect_regset (regset, regcache, -1,
3539 gdb_regset, sect_list->size);
3540 note_data = (char *) elfcore_write_register_note (obfd,
3541 note_data,
3542 note_size,
3543 sect_list->sect_name,
3544 gdb_regset,
3545 sect_list->size);
3546 xfree (gdb_regset);
3547 sect_list++;
3548 }
3549
3550 /* For architectures that does not have the struct core_regset_section
3551 implemented, we use the old method. When all the architectures have
3552 the new support, the code below should be deleted. */
3553 else
3554 {
3555 if (core_regset_p
3556 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3557 sizeof (fpregs))) != NULL
3558 && regset->collect_regset != NULL)
3559 regset->collect_regset (regset, regcache, -1,
3560 &fpregs, sizeof (fpregs));
3561 else
3562 fill_fpregset (regcache, &fpregs, -1);
3563
3564 note_data = (char *) elfcore_write_prfpreg (obfd,
3565 note_data,
3566 note_size,
3567 &fpregs, sizeof (fpregs));
3568 }
3569
3570 return note_data;
3571 }
3572
3573 struct linux_nat_corefile_thread_data
3574 {
3575 bfd *obfd;
3576 char *note_data;
3577 int *note_size;
3578 int num_notes;
3579 enum target_signal stop_signal;
3580 };
3581
3582 /* Called by gdbthread.c once per thread. Records the thread's
3583 register state for the corefile note section. */
3584
3585 static int
3586 linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3587 {
3588 struct linux_nat_corefile_thread_data *args = data;
3589
3590 args->note_data = linux_nat_do_thread_registers (args->obfd,
3591 ti->ptid,
3592 args->note_data,
3593 args->note_size,
3594 args->stop_signal);
3595 args->num_notes++;
3596
3597 return 0;
3598 }
3599
3600 /* Fills the "to_make_corefile_note" target vector. Builds the note
3601 section for a corefile, and returns it in a malloc buffer. */
3602
3603 static char *
3604 linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3605 {
3606 struct linux_nat_corefile_thread_data thread_args;
3607 struct cleanup *old_chain;
3608 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
3609 char fname[16] = { '\0' };
3610 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
3611 char psargs[80] = { '\0' };
3612 char *note_data = NULL;
3613 ptid_t current_ptid = inferior_ptid;
3614 gdb_byte *auxv;
3615 int auxv_len;
3616
3617 if (get_exec_file (0))
3618 {
3619 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3620 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3621 if (get_inferior_args ())
3622 {
3623 char *string_end;
3624 char *psargs_end = psargs + sizeof (psargs);
3625
3626 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3627 strings fine. */
3628 string_end = memchr (psargs, 0, sizeof (psargs));
3629 if (string_end != NULL)
3630 {
3631 *string_end++ = ' ';
3632 strncpy (string_end, get_inferior_args (),
3633 psargs_end - string_end);
3634 }
3635 }
3636 note_data = (char *) elfcore_write_prpsinfo (obfd,
3637 note_data,
3638 note_size, fname, psargs);
3639 }
3640
3641 /* Dump information for threads. */
3642 thread_args.obfd = obfd;
3643 thread_args.note_data = note_data;
3644 thread_args.note_size = note_size;
3645 thread_args.num_notes = 0;
3646 thread_args.stop_signal = find_stop_signal ();
3647 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
3648 gdb_assert (thread_args.num_notes != 0);
3649 note_data = thread_args.note_data;
3650
3651 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3652 NULL, &auxv);
3653 if (auxv_len > 0)
3654 {
3655 note_data = elfcore_write_note (obfd, note_data, note_size,
3656 "CORE", NT_AUXV, auxv, auxv_len);
3657 xfree (auxv);
3658 }
3659
3660 make_cleanup (xfree, note_data);
3661 return note_data;
3662 }
3663
3664 /* Implement the "info proc" command. */
3665
3666 static void
3667 linux_nat_info_proc_cmd (char *args, int from_tty)
3668 {
3669 long long pid = PIDGET (inferior_ptid);
3670 FILE *procfile;
3671 char **argv = NULL;
3672 char buffer[MAXPATHLEN];
3673 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3674 int cmdline_f = 1;
3675 int cwd_f = 1;
3676 int exe_f = 1;
3677 int mappings_f = 0;
3678 int environ_f = 0;
3679 int status_f = 0;
3680 int stat_f = 0;
3681 int all = 0;
3682 struct stat dummy;
3683
3684 if (args)
3685 {
3686 /* Break up 'args' into an argv array. */
3687 argv = gdb_buildargv (args);
3688 make_cleanup_freeargv (argv);
3689 }
3690 while (argv != NULL && *argv != NULL)
3691 {
3692 if (isdigit (argv[0][0]))
3693 {
3694 pid = strtoul (argv[0], NULL, 10);
3695 }
3696 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3697 {
3698 mappings_f = 1;
3699 }
3700 else if (strcmp (argv[0], "status") == 0)
3701 {
3702 status_f = 1;
3703 }
3704 else if (strcmp (argv[0], "stat") == 0)
3705 {
3706 stat_f = 1;
3707 }
3708 else if (strcmp (argv[0], "cmd") == 0)
3709 {
3710 cmdline_f = 1;
3711 }
3712 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3713 {
3714 exe_f = 1;
3715 }
3716 else if (strcmp (argv[0], "cwd") == 0)
3717 {
3718 cwd_f = 1;
3719 }
3720 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3721 {
3722 all = 1;
3723 }
3724 else
3725 {
3726 /* [...] (future options here) */
3727 }
3728 argv++;
3729 }
3730 if (pid == 0)
3731 error (_("No current process: you must name one."));
3732
3733 sprintf (fname1, "/proc/%lld", pid);
3734 if (stat (fname1, &dummy) != 0)
3735 error (_("No /proc directory: '%s'"), fname1);
3736
3737 printf_filtered (_("process %lld\n"), pid);
3738 if (cmdline_f || all)
3739 {
3740 sprintf (fname1, "/proc/%lld/cmdline", pid);
3741 if ((procfile = fopen (fname1, "r")) != NULL)
3742 {
3743 struct cleanup *cleanup = make_cleanup_fclose (procfile);
3744 if (fgets (buffer, sizeof (buffer), procfile))
3745 printf_filtered ("cmdline = '%s'\n", buffer);
3746 else
3747 warning (_("unable to read '%s'"), fname1);
3748 do_cleanups (cleanup);
3749 }
3750 else
3751 warning (_("unable to open /proc file '%s'"), fname1);
3752 }
3753 if (cwd_f || all)
3754 {
3755 sprintf (fname1, "/proc/%lld/cwd", pid);
3756 memset (fname2, 0, sizeof (fname2));
3757 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3758 printf_filtered ("cwd = '%s'\n", fname2);
3759 else
3760 warning (_("unable to read link '%s'"), fname1);
3761 }
3762 if (exe_f || all)
3763 {
3764 sprintf (fname1, "/proc/%lld/exe", pid);
3765 memset (fname2, 0, sizeof (fname2));
3766 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3767 printf_filtered ("exe = '%s'\n", fname2);
3768 else
3769 warning (_("unable to read link '%s'"), fname1);
3770 }
3771 if (mappings_f || all)
3772 {
3773 sprintf (fname1, "/proc/%lld/maps", pid);
3774 if ((procfile = fopen (fname1, "r")) != NULL)
3775 {
3776 long long addr, endaddr, size, offset, inode;
3777 char permissions[8], device[8], filename[MAXPATHLEN];
3778 struct cleanup *cleanup;
3779
3780 cleanup = make_cleanup_fclose (procfile);
3781 printf_filtered (_("Mapped address spaces:\n\n"));
3782 if (gdbarch_addr_bit (current_gdbarch) == 32)
3783 {
3784 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3785 "Start Addr",
3786 " End Addr",
3787 " Size", " Offset", "objfile");
3788 }
3789 else
3790 {
3791 printf_filtered (" %18s %18s %10s %10s %7s\n",
3792 "Start Addr",
3793 " End Addr",
3794 " Size", " Offset", "objfile");
3795 }
3796
3797 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3798 &offset, &device[0], &inode, &filename[0]))
3799 {
3800 size = endaddr - addr;
3801
3802 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3803 calls here (and possibly above) should be abstracted
3804 out into their own functions? Andrew suggests using
3805 a generic local_address_string instead to print out
3806 the addresses; that makes sense to me, too. */
3807
3808 if (gdbarch_addr_bit (current_gdbarch) == 32)
3809 {
3810 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3811 (unsigned long) addr, /* FIXME: pr_addr */
3812 (unsigned long) endaddr,
3813 (int) size,
3814 (unsigned int) offset,
3815 filename[0] ? filename : "");
3816 }
3817 else
3818 {
3819 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3820 (unsigned long) addr, /* FIXME: pr_addr */
3821 (unsigned long) endaddr,
3822 (int) size,
3823 (unsigned int) offset,
3824 filename[0] ? filename : "");
3825 }
3826 }
3827
3828 do_cleanups (cleanup);
3829 }
3830 else
3831 warning (_("unable to open /proc file '%s'"), fname1);
3832 }
3833 if (status_f || all)
3834 {
3835 sprintf (fname1, "/proc/%lld/status", pid);
3836 if ((procfile = fopen (fname1, "r")) != NULL)
3837 {
3838 struct cleanup *cleanup = make_cleanup_fclose (procfile);
3839 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3840 puts_filtered (buffer);
3841 do_cleanups (cleanup);
3842 }
3843 else
3844 warning (_("unable to open /proc file '%s'"), fname1);
3845 }
3846 if (stat_f || all)
3847 {
3848 sprintf (fname1, "/proc/%lld/stat", pid);
3849 if ((procfile = fopen (fname1, "r")) != NULL)
3850 {
3851 int itmp;
3852 char ctmp;
3853 long ltmp;
3854 struct cleanup *cleanup = make_cleanup_fclose (procfile);
3855
3856 if (fscanf (procfile, "%d ", &itmp) > 0)
3857 printf_filtered (_("Process: %d\n"), itmp);
3858 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
3859 printf_filtered (_("Exec file: %s\n"), buffer);
3860 if (fscanf (procfile, "%c ", &ctmp) > 0)
3861 printf_filtered (_("State: %c\n"), ctmp);
3862 if (fscanf (procfile, "%d ", &itmp) > 0)
3863 printf_filtered (_("Parent process: %d\n"), itmp);
3864 if (fscanf (procfile, "%d ", &itmp) > 0)
3865 printf_filtered (_("Process group: %d\n"), itmp);
3866 if (fscanf (procfile, "%d ", &itmp) > 0)
3867 printf_filtered (_("Session id: %d\n"), itmp);
3868 if (fscanf (procfile, "%d ", &itmp) > 0)
3869 printf_filtered (_("TTY: %d\n"), itmp);
3870 if (fscanf (procfile, "%d ", &itmp) > 0)
3871 printf_filtered (_("TTY owner process group: %d\n"), itmp);
3872 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3873 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3874 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3875 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3876 (unsigned long) ltmp);
3877 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3878 printf_filtered (_("Minor faults, children: %lu\n"),
3879 (unsigned long) ltmp);
3880 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3881 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3882 (unsigned long) ltmp);
3883 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3884 printf_filtered (_("Major faults, children: %lu\n"),
3885 (unsigned long) ltmp);
3886 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3887 printf_filtered (_("utime: %ld\n"), ltmp);
3888 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3889 printf_filtered (_("stime: %ld\n"), ltmp);
3890 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3891 printf_filtered (_("utime, children: %ld\n"), ltmp);
3892 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3893 printf_filtered (_("stime, children: %ld\n"), ltmp);
3894 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3895 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3896 ltmp);
3897 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3898 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3899 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3900 printf_filtered (_("jiffies until next timeout: %lu\n"),
3901 (unsigned long) ltmp);
3902 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3903 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3904 (unsigned long) ltmp);
3905 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3906 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3907 ltmp);
3908 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3909 printf_filtered (_("Virtual memory size: %lu\n"),
3910 (unsigned long) ltmp);
3911 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3912 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3913 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3914 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3915 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3916 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3917 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3918 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3919 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3920 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
3921 #if 0 /* Don't know how architecture-dependent the rest is...
3922 Anyway the signal bitmap info is available from "status". */
3923 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3924 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3925 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3926 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3927 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3928 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3929 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3930 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3931 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3932 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3933 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3934 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3935 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3936 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
3937 #endif
3938 do_cleanups (cleanup);
3939 }
3940 else
3941 warning (_("unable to open /proc file '%s'"), fname1);
3942 }
3943 }
3944
3945 /* Implement the to_xfer_partial interface for memory reads using the /proc
3946 filesystem. Because we can use a single read() call for /proc, this
3947 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3948 but it doesn't support writes. */
3949
3950 static LONGEST
3951 linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3952 const char *annex, gdb_byte *readbuf,
3953 const gdb_byte *writebuf,
3954 ULONGEST offset, LONGEST len)
3955 {
3956 LONGEST ret;
3957 int fd;
3958 char filename[64];
3959
3960 if (object != TARGET_OBJECT_MEMORY || !readbuf)
3961 return 0;
3962
3963 /* Don't bother for one word. */
3964 if (len < 3 * sizeof (long))
3965 return 0;
3966
3967 /* We could keep this file open and cache it - possibly one per
3968 thread. That requires some juggling, but is even faster. */
3969 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3970 fd = open (filename, O_RDONLY | O_LARGEFILE);
3971 if (fd == -1)
3972 return 0;
3973
3974 /* If pread64 is available, use it. It's faster if the kernel
3975 supports it (only one syscall), and it's 64-bit safe even on
3976 32-bit platforms (for instance, SPARC debugging a SPARC64
3977 application). */
3978 #ifdef HAVE_PREAD64
3979 if (pread64 (fd, readbuf, len, offset) != len)
3980 #else
3981 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
3982 #endif
3983 ret = 0;
3984 else
3985 ret = len;
3986
3987 close (fd);
3988 return ret;
3989 }
3990
3991 /* Parse LINE as a signal set and add its set bits to SIGS. */
3992
3993 static void
3994 add_line_to_sigset (const char *line, sigset_t *sigs)
3995 {
3996 int len = strlen (line) - 1;
3997 const char *p;
3998 int signum;
3999
4000 if (line[len] != '\n')
4001 error (_("Could not parse signal set: %s"), line);
4002
4003 p = line;
4004 signum = len * 4;
4005 while (len-- > 0)
4006 {
4007 int digit;
4008
4009 if (*p >= '0' && *p <= '9')
4010 digit = *p - '0';
4011 else if (*p >= 'a' && *p <= 'f')
4012 digit = *p - 'a' + 10;
4013 else
4014 error (_("Could not parse signal set: %s"), line);
4015
4016 signum -= 4;
4017
4018 if (digit & 1)
4019 sigaddset (sigs, signum + 1);
4020 if (digit & 2)
4021 sigaddset (sigs, signum + 2);
4022 if (digit & 4)
4023 sigaddset (sigs, signum + 3);
4024 if (digit & 8)
4025 sigaddset (sigs, signum + 4);
4026
4027 p++;
4028 }
4029 }
4030
4031 /* Find process PID's pending signals from /proc/pid/status and set
4032 SIGS to match. */
4033
4034 void
4035 linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
4036 {
4037 FILE *procfile;
4038 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
4039 int signum;
4040 struct cleanup *cleanup;
4041
4042 sigemptyset (pending);
4043 sigemptyset (blocked);
4044 sigemptyset (ignored);
4045 sprintf (fname, "/proc/%d/status", pid);
4046 procfile = fopen (fname, "r");
4047 if (procfile == NULL)
4048 error (_("Could not open %s"), fname);
4049 cleanup = make_cleanup_fclose (procfile);
4050
4051 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
4052 {
4053 /* Normal queued signals are on the SigPnd line in the status
4054 file. However, 2.6 kernels also have a "shared" pending
4055 queue for delivering signals to a thread group, so check for
4056 a ShdPnd line also.
4057
4058 Unfortunately some Red Hat kernels include the shared pending
4059 queue but not the ShdPnd status field. */
4060
4061 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
4062 add_line_to_sigset (buffer + 8, pending);
4063 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
4064 add_line_to_sigset (buffer + 8, pending);
4065 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
4066 add_line_to_sigset (buffer + 8, blocked);
4067 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
4068 add_line_to_sigset (buffer + 8, ignored);
4069 }
4070
4071 do_cleanups (cleanup);
4072 }
4073
4074 static LONGEST
4075 linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4076 const char *annex, gdb_byte *readbuf,
4077 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4078 {
4079 /* We make the process list snapshot when the object starts to be
4080 read. */
4081 static const char *buf;
4082 static LONGEST len_avail = -1;
4083 static struct obstack obstack;
4084
4085 DIR *dirp;
4086
4087 gdb_assert (object == TARGET_OBJECT_OSDATA);
4088
4089 if (strcmp (annex, "processes") != 0)
4090 return 0;
4091
4092 gdb_assert (readbuf && !writebuf);
4093
4094 if (offset == 0)
4095 {
4096 if (len_avail != -1 && len_avail != 0)
4097 obstack_free (&obstack, NULL);
4098 len_avail = 0;
4099 buf = NULL;
4100 obstack_init (&obstack);
4101 obstack_grow_str (&obstack, "<osdata type=\"processes\">\n");
4102
4103 dirp = opendir ("/proc");
4104 if (dirp)
4105 {
4106 struct dirent *dp;
4107 while ((dp = readdir (dirp)) != NULL)
4108 {
4109 struct stat statbuf;
4110 char procentry[sizeof ("/proc/4294967295")];
4111
4112 if (!isdigit (dp->d_name[0])
4113 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
4114 continue;
4115
4116 sprintf (procentry, "/proc/%s", dp->d_name);
4117 if (stat (procentry, &statbuf) == 0
4118 && S_ISDIR (statbuf.st_mode))
4119 {
4120 char *pathname;
4121 FILE *f;
4122 char cmd[MAXPATHLEN + 1];
4123 struct passwd *entry;
4124
4125 pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
4126 entry = getpwuid (statbuf.st_uid);
4127
4128 if ((f = fopen (pathname, "r")) != NULL)
4129 {
4130 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
4131 if (len > 0)
4132 {
4133 int i;
4134 for (i = 0; i < len; i++)
4135 if (cmd[i] == '\0')
4136 cmd[i] = ' ';
4137 cmd[len] = '\0';
4138
4139 obstack_xml_printf (
4140 &obstack,
4141 "<item>"
4142 "<column name=\"pid\">%s</column>"
4143 "<column name=\"user\">%s</column>"
4144 "<column name=\"command\">%s</column>"
4145 "</item>",
4146 dp->d_name,
4147 entry ? entry->pw_name : "?",
4148 cmd);
4149 }
4150 fclose (f);
4151 }
4152
4153 xfree (pathname);
4154 }
4155 }
4156
4157 closedir (dirp);
4158 }
4159
4160 obstack_grow_str0 (&obstack, "</osdata>\n");
4161 buf = obstack_finish (&obstack);
4162 len_avail = strlen (buf);
4163 }
4164
4165 if (offset >= len_avail)
4166 {
4167 /* Done. Get rid of the obstack. */
4168 obstack_free (&obstack, NULL);
4169 buf = NULL;
4170 len_avail = 0;
4171 return 0;
4172 }
4173
4174 if (len > len_avail - offset)
4175 len = len_avail - offset;
4176 memcpy (readbuf, buf + offset, len);
4177
4178 return len;
4179 }
4180
4181 static LONGEST
4182 linux_xfer_partial (struct target_ops *ops, enum target_object object,
4183 const char *annex, gdb_byte *readbuf,
4184 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4185 {
4186 LONGEST xfer;
4187
4188 if (object == TARGET_OBJECT_AUXV)
4189 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
4190 offset, len);
4191
4192 if (object == TARGET_OBJECT_OSDATA)
4193 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4194 offset, len);
4195
4196 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4197 offset, len);
4198 if (xfer != 0)
4199 return xfer;
4200
4201 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4202 offset, len);
4203 }
4204
4205 /* Create a prototype generic GNU/Linux target. The client can override
4206 it with local methods. */
4207
4208 static void
4209 linux_target_install_ops (struct target_ops *t)
4210 {
4211 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4212 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4213 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4214 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
4215 t->to_post_startup_inferior = linux_child_post_startup_inferior;
4216 t->to_post_attach = linux_child_post_attach;
4217 t->to_follow_fork = linux_child_follow_fork;
4218 t->to_find_memory_regions = linux_nat_find_memory_regions;
4219 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4220
4221 super_xfer_partial = t->to_xfer_partial;
4222 t->to_xfer_partial = linux_xfer_partial;
4223 }
4224
4225 struct target_ops *
4226 linux_target (void)
4227 {
4228 struct target_ops *t;
4229
4230 t = inf_ptrace_target ();
4231 linux_target_install_ops (t);
4232
4233 return t;
4234 }
4235
4236 struct target_ops *
4237 linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
4238 {
4239 struct target_ops *t;
4240
4241 t = inf_ptrace_trad_target (register_u_offset);
4242 linux_target_install_ops (t);
4243
4244 return t;
4245 }
4246
4247 /* target_is_async_p implementation. */
4248
4249 static int
4250 linux_nat_is_async_p (void)
4251 {
4252 /* NOTE: palves 2008-03-21: We're only async when the user requests
4253 it explicitly with the "maintenance set target-async" command.
4254 Someday, linux will always be async. */
4255 if (!target_async_permitted)
4256 return 0;
4257
4258 return 1;
4259 }
4260
4261 /* target_can_async_p implementation. */
4262
4263 static int
4264 linux_nat_can_async_p (void)
4265 {
4266 /* NOTE: palves 2008-03-21: We're only async when the user requests
4267 it explicitly with the "maintenance set target-async" command.
4268 Someday, linux will always be async. */
4269 if (!target_async_permitted)
4270 return 0;
4271
4272 /* See target.h/target_async_mask. */
4273 return linux_nat_async_mask_value;
4274 }
4275
4276 static int
4277 linux_nat_supports_non_stop (void)
4278 {
4279 return 1;
4280 }
4281
4282 /* target_async_mask implementation. */
4283
4284 static int
4285 linux_nat_async_mask (int mask)
4286 {
4287 int current_state;
4288 current_state = linux_nat_async_mask_value;
4289
4290 if (current_state != mask)
4291 {
4292 if (mask == 0)
4293 {
4294 linux_nat_async (NULL, 0);
4295 linux_nat_async_mask_value = mask;
4296 }
4297 else
4298 {
4299 linux_nat_async_mask_value = mask;
4300 linux_nat_async (inferior_event_handler, 0);
4301 }
4302 }
4303
4304 return current_state;
4305 }
4306
4307 /* Pop an event from the event pipe. */
4308
4309 static int
4310 linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
4311 {
4312 struct waitpid_result event = {0};
4313 int ret;
4314
4315 do
4316 {
4317 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
4318 }
4319 while (ret == -1 && errno == EINTR);
4320
4321 gdb_assert (ret == sizeof (event));
4322
4323 *ptr_status = event.status;
4324 *ptr_options = event.options;
4325
4326 linux_nat_num_queued_events--;
4327
4328 return event.pid;
4329 }
4330
4331 /* Push an event into the event pipe. */
4332
4333 static void
4334 linux_nat_event_pipe_push (int pid, int status, int options)
4335 {
4336 int ret;
4337 struct waitpid_result event = {0};
4338 event.pid = pid;
4339 event.status = status;
4340 event.options = options;
4341
4342 do
4343 {
4344 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
4345 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
4346 } while (ret == -1 && errno == EINTR);
4347
4348 linux_nat_num_queued_events++;
4349 }
4350
4351 static void
4352 get_pending_events (void)
4353 {
4354 int status, options, pid;
4355
4356 if (!target_async_permitted
4357 || linux_nat_async_events_state != sigchld_async)
4358 internal_error (__FILE__, __LINE__,
4359 "get_pending_events called with async masked");
4360
4361 while (1)
4362 {
4363 status = 0;
4364 options = __WCLONE | WNOHANG;
4365
4366 do
4367 {
4368 pid = waitpid (-1, &status, options);
4369 }
4370 while (pid == -1 && errno == EINTR);
4371
4372 if (pid <= 0)
4373 {
4374 options = WNOHANG;
4375 do
4376 {
4377 pid = waitpid (-1, &status, options);
4378 }
4379 while (pid == -1 && errno == EINTR);
4380 }
4381
4382 if (pid <= 0)
4383 /* No more children reporting events. */
4384 break;
4385
4386 if (debug_linux_nat_async)
4387 fprintf_unfiltered (gdb_stdlog, "\
4388 get_pending_events: pid(%d), status(%x), options (%x)\n",
4389 pid, status, options);
4390
4391 linux_nat_event_pipe_push (pid, status, options);
4392 }
4393
4394 if (debug_linux_nat_async)
4395 fprintf_unfiltered (gdb_stdlog, "\
4396 get_pending_events: linux_nat_num_queued_events(%d)\n",
4397 linux_nat_num_queued_events);
4398 }
4399
4400 /* SIGCHLD handler for async mode. */
4401
4402 static void
4403 async_sigchld_handler (int signo)
4404 {
4405 if (debug_linux_nat_async)
4406 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
4407
4408 get_pending_events ();
4409 }
4410
4411 /* Set SIGCHLD handling state to STATE. Returns previous state. */
4412
4413 static enum sigchld_state
4414 linux_nat_async_events (enum sigchld_state state)
4415 {
4416 enum sigchld_state current_state = linux_nat_async_events_state;
4417
4418 if (debug_linux_nat_async)
4419 fprintf_unfiltered (gdb_stdlog,
4420 "LNAE: state(%d): linux_nat_async_events_state(%d), "
4421 "linux_nat_num_queued_events(%d)\n",
4422 state, linux_nat_async_events_state,
4423 linux_nat_num_queued_events);
4424
4425 if (current_state != state)
4426 {
4427 sigset_t mask;
4428 sigemptyset (&mask);
4429 sigaddset (&mask, SIGCHLD);
4430
4431 /* Always block before changing state. */
4432 sigprocmask (SIG_BLOCK, &mask, NULL);
4433
4434 /* Set new state. */
4435 linux_nat_async_events_state = state;
4436
4437 switch (state)
4438 {
4439 case sigchld_sync:
4440 {
4441 /* Block target events. */
4442 sigprocmask (SIG_BLOCK, &mask, NULL);
4443 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4444 /* Get events out of queue, and make them available to
4445 queued_waitpid / my_waitpid. */
4446 pipe_to_local_event_queue ();
4447 }
4448 break;
4449 case sigchld_async:
4450 {
4451 /* Unblock target events for async mode. */
4452
4453 sigprocmask (SIG_BLOCK, &mask, NULL);
4454
4455 /* Put events we already waited on, in the pipe first, so
4456 events are FIFO. */
4457 local_event_queue_to_pipe ();
4458 /* While in masked async, we may have not collected all
4459 the pending events. Get them out now. */
4460 get_pending_events ();
4461
4462 /* Let'em come. */
4463 sigaction (SIGCHLD, &async_sigchld_action, NULL);
4464 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4465 }
4466 break;
4467 case sigchld_default:
4468 {
4469 /* SIGCHLD default mode. */
4470 sigaction (SIGCHLD, &sigchld_default_action, NULL);
4471
4472 /* Get events out of queue, and make them available to
4473 queued_waitpid / my_waitpid. */
4474 pipe_to_local_event_queue ();
4475
4476 /* Unblock SIGCHLD. */
4477 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4478 }
4479 break;
4480 }
4481 }
4482
4483 return current_state;
4484 }
4485
4486 static int async_terminal_is_ours = 1;
4487
4488 /* target_terminal_inferior implementation. */
4489
4490 static void
4491 linux_nat_terminal_inferior (void)
4492 {
4493 if (!target_is_async_p ())
4494 {
4495 /* Async mode is disabled. */
4496 terminal_inferior ();
4497 return;
4498 }
4499
4500 /* GDB should never give the terminal to the inferior, if the
4501 inferior is running in the background (run&, continue&, etc.).
4502 This check can be removed when the common code is fixed. */
4503 if (!sync_execution)
4504 return;
4505
4506 terminal_inferior ();
4507
4508 if (!async_terminal_is_ours)
4509 return;
4510
4511 delete_file_handler (input_fd);
4512 async_terminal_is_ours = 0;
4513 set_sigint_trap ();
4514 }
4515
4516 /* target_terminal_ours implementation. */
4517
4518 void
4519 linux_nat_terminal_ours (void)
4520 {
4521 if (!target_is_async_p ())
4522 {
4523 /* Async mode is disabled. */
4524 terminal_ours ();
4525 return;
4526 }
4527
4528 /* GDB should never give the terminal to the inferior if the
4529 inferior is running in the background (run&, continue&, etc.),
4530 but claiming it sure should. */
4531 terminal_ours ();
4532
4533 if (!sync_execution)
4534 return;
4535
4536 if (async_terminal_is_ours)
4537 return;
4538
4539 clear_sigint_trap ();
4540 add_file_handler (input_fd, stdin_event_handler, 0);
4541 async_terminal_is_ours = 1;
4542 }
4543
4544 static void (*async_client_callback) (enum inferior_event_type event_type,
4545 void *context);
4546 static void *async_client_context;
4547
4548 static void
4549 linux_nat_async_file_handler (int error, gdb_client_data client_data)
4550 {
4551 async_client_callback (INF_REG_EVENT, async_client_context);
4552 }
4553
4554 /* target_async implementation. */
4555
4556 static void
4557 linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4558 void *context), void *context)
4559 {
4560 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
4561 internal_error (__FILE__, __LINE__,
4562 "Calling target_async when async is masked");
4563
4564 if (callback != NULL)
4565 {
4566 async_client_callback = callback;
4567 async_client_context = context;
4568 add_file_handler (linux_nat_event_pipe[0],
4569 linux_nat_async_file_handler, NULL);
4570
4571 linux_nat_async_events (sigchld_async);
4572 }
4573 else
4574 {
4575 async_client_callback = callback;
4576 async_client_context = context;
4577
4578 linux_nat_async_events (sigchld_sync);
4579 delete_file_handler (linux_nat_event_pipe[0]);
4580 }
4581 return;
4582 }
4583
4584 /* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
4585 event came out. */
4586
4587 static int
4588 linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4589 {
4590 ptid_t ptid = * (ptid_t *) data;
4591
4592 if (ptid_equal (lwp->ptid, ptid)
4593 || ptid_equal (minus_one_ptid, ptid)
4594 || (ptid_is_pid (ptid)
4595 && ptid_get_pid (ptid) == ptid_get_pid (lwp->ptid)))
4596 {
4597 if (!lwp->stopped)
4598 {
4599 int pid, status;
4600
4601 if (debug_linux_nat)
4602 fprintf_unfiltered (gdb_stdlog,
4603 "LNSL: running -> suspending %s\n",
4604 target_pid_to_str (lwp->ptid));
4605
4606 /* Peek once, to check if we've already waited for this
4607 LWP. */
4608 pid = queued_waitpid_1 (ptid_get_lwp (lwp->ptid), &status,
4609 lwp->cloned ? __WCLONE : 0, 1 /* peek */);
4610
4611 if (pid == -1)
4612 {
4613 ptid_t ptid = lwp->ptid;
4614
4615 stop_callback (lwp, NULL);
4616 stop_wait_callback (lwp, NULL);
4617
4618 /* If the lwp exits while we try to stop it, there's
4619 nothing else to do. */
4620 lwp = find_lwp_pid (ptid);
4621 if (lwp == NULL)
4622 return 0;
4623
4624 pid = queued_waitpid_1 (ptid_get_lwp (lwp->ptid), &status,
4625 lwp->cloned ? __WCLONE : 0,
4626 1 /* peek */);
4627 }
4628
4629 /* If we didn't collect any signal other than SIGSTOP while
4630 stopping the LWP, push a SIGNAL_0 event. In either case,
4631 the event-loop will end up calling target_wait which will
4632 collect these. */
4633 if (pid == -1)
4634 push_waitpid (ptid_get_lwp (lwp->ptid), W_STOPCODE (0),
4635 lwp->cloned ? __WCLONE : 0);
4636 }
4637 else
4638 {
4639 /* Already known to be stopped; do nothing. */
4640
4641 if (debug_linux_nat)
4642 {
4643 if (find_thread_pid (lwp->ptid)->stop_requested)
4644 fprintf_unfiltered (gdb_stdlog, "\
4645 LNSL: already stopped/stop_requested %s\n",
4646 target_pid_to_str (lwp->ptid));
4647 else
4648 fprintf_unfiltered (gdb_stdlog, "\
4649 LNSL: already stopped/no stop_requested yet %s\n",
4650 target_pid_to_str (lwp->ptid));
4651 }
4652 }
4653 }
4654 return 0;
4655 }
4656
4657 static void
4658 linux_nat_stop (ptid_t ptid)
4659 {
4660 if (non_stop)
4661 {
4662 linux_nat_async_events (sigchld_sync);
4663 iterate_over_lwps (linux_nat_stop_lwp, &ptid);
4664 target_async (inferior_event_handler, 0);
4665 }
4666 else
4667 linux_ops->to_stop (ptid);
4668 }
4669
4670 void
4671 linux_nat_add_target (struct target_ops *t)
4672 {
4673 /* Save the provided single-threaded target. We save this in a separate
4674 variable because another target we've inherited from (e.g. inf-ptrace)
4675 may have saved a pointer to T; we want to use it for the final
4676 process stratum target. */
4677 linux_ops_saved = *t;
4678 linux_ops = &linux_ops_saved;
4679
4680 /* Override some methods for multithreading. */
4681 t->to_create_inferior = linux_nat_create_inferior;
4682 t->to_attach = linux_nat_attach;
4683 t->to_detach = linux_nat_detach;
4684 t->to_resume = linux_nat_resume;
4685 t->to_wait = linux_nat_wait;
4686 t->to_xfer_partial = linux_nat_xfer_partial;
4687 t->to_kill = linux_nat_kill;
4688 t->to_mourn_inferior = linux_nat_mourn_inferior;
4689 t->to_thread_alive = linux_nat_thread_alive;
4690 t->to_pid_to_str = linux_nat_pid_to_str;
4691 t->to_has_thread_control = tc_schedlock;
4692
4693 t->to_can_async_p = linux_nat_can_async_p;
4694 t->to_is_async_p = linux_nat_is_async_p;
4695 t->to_supports_non_stop = linux_nat_supports_non_stop;
4696 t->to_async = linux_nat_async;
4697 t->to_async_mask = linux_nat_async_mask;
4698 t->to_terminal_inferior = linux_nat_terminal_inferior;
4699 t->to_terminal_ours = linux_nat_terminal_ours;
4700
4701 /* Methods for non-stop support. */
4702 t->to_stop = linux_nat_stop;
4703
4704 /* We don't change the stratum; this target will sit at
4705 process_stratum and thread_db will set at thread_stratum. This
4706 is a little strange, since this is a multi-threaded-capable
4707 target, but we want to be on the stack below thread_db, and we
4708 also want to be used for single-threaded processes. */
4709
4710 add_target (t);
4711 }
4712
4713 /* Register a method to call whenever a new thread is attached. */
4714 void
4715 linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
4716 {
4717 /* Save the pointer. We only support a single registered instance
4718 of the GNU/Linux native target, so we do not need to map this to
4719 T. */
4720 linux_nat_new_thread = new_thread;
4721 }
4722
4723 /* Return the saved siginfo associated with PTID. */
4724 struct siginfo *
4725 linux_nat_get_siginfo (ptid_t ptid)
4726 {
4727 struct lwp_info *lp = find_lwp_pid (ptid);
4728
4729 gdb_assert (lp != NULL);
4730
4731 return &lp->siginfo;
4732 }
4733
4734 /* Enable/Disable async mode. */
4735
4736 static void
4737 linux_nat_setup_async (void)
4738 {
4739 if (pipe (linux_nat_event_pipe) == -1)
4740 internal_error (__FILE__, __LINE__,
4741 "creating event pipe failed.");
4742 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4743 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4744 }
4745
4746 void
4747 _initialize_linux_nat (void)
4748 {
4749 sigset_t mask;
4750
4751 add_info ("proc", linux_nat_info_proc_cmd, _("\
4752 Show /proc process information about any running process.\n\
4753 Specify any process id, or use the program being debugged by default.\n\
4754 Specify any of the following keywords for detailed info:\n\
4755 mappings -- list of mapped memory regions.\n\
4756 stat -- list a bunch of random process info.\n\
4757 status -- list a different bunch of random process info.\n\
4758 all -- list all available /proc info."));
4759
4760 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4761 &debug_linux_nat, _("\
4762 Set debugging of GNU/Linux lwp module."), _("\
4763 Show debugging of GNU/Linux lwp module."), _("\
4764 Enables printf debugging output."),
4765 NULL,
4766 show_debug_linux_nat,
4767 &setdebuglist, &showdebuglist);
4768
4769 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4770 &debug_linux_nat_async, _("\
4771 Set debugging of GNU/Linux async lwp module."), _("\
4772 Show debugging of GNU/Linux async lwp module."), _("\
4773 Enables printf debugging output."),
4774 NULL,
4775 show_debug_linux_nat_async,
4776 &setdebuglist, &showdebuglist);
4777
4778 /* Get the default SIGCHLD action. Used while forking an inferior
4779 (see linux_nat_create_inferior/linux_nat_async_events). */
4780 sigaction (SIGCHLD, NULL, &sigchld_default_action);
4781
4782 /* Block SIGCHLD by default. Doing this early prevents it getting
4783 unblocked if an exception is thrown due to an error while the
4784 inferior is starting (sigsetjmp/siglongjmp). */
4785 sigemptyset (&mask);
4786 sigaddset (&mask, SIGCHLD);
4787 sigprocmask (SIG_BLOCK, &mask, NULL);
4788
4789 /* Save this mask as the default. */
4790 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4791
4792 /* The synchronous SIGCHLD handler. */
4793 sync_sigchld_action.sa_handler = sigchld_handler;
4794 sigemptyset (&sync_sigchld_action.sa_mask);
4795 sync_sigchld_action.sa_flags = SA_RESTART;
4796
4797 /* Make it the default. */
4798 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4799
4800 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4801 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4802 sigdelset (&suspend_mask, SIGCHLD);
4803
4804 /* SIGCHLD handler for async mode. */
4805 async_sigchld_action.sa_handler = async_sigchld_handler;
4806 sigemptyset (&async_sigchld_action.sa_mask);
4807 async_sigchld_action.sa_flags = SA_RESTART;
4808
4809 linux_nat_setup_async ();
4810
4811 add_setshow_boolean_cmd ("disable-randomization", class_support,
4812 &disable_randomization, _("\
4813 Set disabling of debuggee's virtual address space randomization."), _("\
4814 Show disabling of debuggee's virtual address space randomization."), _("\
4815 When this mode is on (which is the default), randomization of the virtual\n\
4816 address space is disabled. Standalone programs run with the randomization\n\
4817 enabled by default on some platforms."),
4818 &set_disable_randomization,
4819 &show_disable_randomization,
4820 &setlist, &showlist);
4821 }
4822 \f
4823
4824 /* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4825 the GNU/Linux Threads library and therefore doesn't really belong
4826 here. */
4827
4828 /* Read variable NAME in the target and return its value if found.
4829 Otherwise return zero. It is assumed that the type of the variable
4830 is `int'. */
4831
4832 static int
4833 get_signo (const char *name)
4834 {
4835 struct minimal_symbol *ms;
4836 int signo;
4837
4838 ms = lookup_minimal_symbol (name, NULL, NULL);
4839 if (ms == NULL)
4840 return 0;
4841
4842 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
4843 sizeof (signo)) != 0)
4844 return 0;
4845
4846 return signo;
4847 }
4848
4849 /* Return the set of signals used by the threads library in *SET. */
4850
4851 void
4852 lin_thread_get_thread_signals (sigset_t *set)
4853 {
4854 struct sigaction action;
4855 int restart, cancel;
4856 sigset_t blocked_mask;
4857
4858 sigemptyset (&blocked_mask);
4859 sigemptyset (set);
4860
4861 restart = get_signo ("__pthread_sig_restart");
4862 cancel = get_signo ("__pthread_sig_cancel");
4863
4864 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4865 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4866 not provide any way for the debugger to query the signal numbers -
4867 fortunately they don't change! */
4868
4869 if (restart == 0)
4870 restart = __SIGRTMIN;
4871
4872 if (cancel == 0)
4873 cancel = __SIGRTMIN + 1;
4874
4875 sigaddset (set, restart);
4876 sigaddset (set, cancel);
4877
4878 /* The GNU/Linux Threads library makes terminating threads send a
4879 special "cancel" signal instead of SIGCHLD. Make sure we catch
4880 those (to prevent them from terminating GDB itself, which is
4881 likely to be their default action) and treat them the same way as
4882 SIGCHLD. */
4883
4884 action.sa_handler = sigchld_handler;
4885 sigemptyset (&action.sa_mask);
4886 action.sa_flags = SA_RESTART;
4887 sigaction (cancel, &action, NULL);
4888
4889 /* We block the "cancel" signal throughout this code ... */
4890 sigaddset (&blocked_mask, cancel);
4891 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4892
4893 /* ... except during a sigsuspend. */
4894 sigdelset (&suspend_mask, cancel);
4895 }
This page took 0.30867 seconds and 4 git commands to generate.