* gdb.base/foll-fork.exp: Adjust the expected output to match
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
9b254dd1 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
e26af52f 4 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f
AC
34#include "gdbthread.h"
35#include "gdbcmd.h"
36#include "regcache.h"
4f844a66 37#include "regset.h"
10d6c8cd
DJ
38#include "inf-ptrace.h"
39#include "auxv.h"
dba24537
AC
40#include <sys/param.h> /* for MAXPATHLEN */
41#include <sys/procfs.h> /* for elf_gregset etc. */
42#include "elf-bfd.h" /* for elfcore_write_* */
43#include "gregset.h" /* for gregset */
44#include "gdbcore.h" /* for get_exec_file */
45#include <ctype.h> /* for isdigit */
46#include "gdbthread.h" /* for struct thread_info etc. */
47#include "gdb_stat.h" /* for struct stat */
48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
dba24537 52
10568435
JK
53#ifdef HAVE_PERSONALITY
54# include <sys/personality.h>
55# if !HAVE_DECL_ADDR_NO_RANDOMIZE
56# define ADDR_NO_RANDOMIZE 0x0040000
57# endif
58#endif /* HAVE_PERSONALITY */
59
8a77dff3
VP
60/* This comment documents high-level logic of this file.
61
62Waiting for events in sync mode
63===============================
64
65When waiting for an event in a specific thread, we just use waitpid, passing
66the specific pid, and not passing WNOHANG.
67
68When waiting for an event in all threads, waitpid is not quite good. Prior to
69version 2.4, Linux can either wait for event in main thread, or in secondary
70threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
71miss an event. The solution is to use non-blocking waitpid, together with
72sigsuspend. First, we use non-blocking waitpid to get an event in the main
73process, if any. Second, we use non-blocking waitpid with the __WCLONED
74flag to check for events in cloned processes. If nothing is found, we use
75sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
76happened to a child process -- and SIGCHLD will be delivered both for events
77in main debugged process and in cloned processes. As soon as we know there's
78an event, we get back to calling nonblocking waitpid with and without __WCLONED.
79
80Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
81so that we don't miss a signal. If SIGCHLD arrives in between, when it's
82blocked, the signal becomes pending and sigsuspend immediately
83notices it and returns.
84
85Waiting for events in async mode
86================================
87
88In async mode, GDB should always be ready to handle both user input and target
89events, so neither blocking waitpid nor sigsuspend are viable
90options. Instead, we should notify the GDB main event loop whenever there's
91unprocessed event from the target. The only way to notify this event loop is
92to make it wait on input from a pipe, and write something to the pipe whenever
93there's event. Obviously, if we fail to notify the event loop if there's
94target event, it's bad. If we notify the event loop when there's no event
95from target, linux-nat.c will detect that there's no event, actually, and
96report event of type TARGET_WAITKIND_IGNORE, but it will waste time and
97better avoided.
98
99The main design point is that every time GDB is outside linux-nat.c, we have a
100SIGCHLD handler installed that is called when something happens to the target
101and notifies the GDB event loop. Also, the event is extracted from the target
102using waitpid and stored for future use. Whenever GDB core decides to handle
103the event, and calls into linux-nat.c, we disable SIGCHLD and process things
104as in sync mode, except that before waitpid call we check if there are any
105previously read events.
106
107It could happen that during event processing, we'll try to get more events
108than there are events in the local queue, which will result to waitpid call.
109Those waitpid calls, while blocking, are guarantied to always have
110something for waitpid to return. E.g., stopping a thread with SIGSTOP, and
111waiting for the lwp to stop.
112
113The event loop is notified about new events using a pipe. SIGCHLD handler does
114waitpid and writes the results in to a pipe. GDB event loop has the other end
115of the pipe among the sources. When event loop starts to process the event
116and calls a function in linux-nat.c, all events from the pipe are transferred
117into a local queue and SIGCHLD is blocked. Further processing goes as in sync
118mode. Before we return from linux_nat_wait, we transfer all unprocessed events
119from local queue back to the pipe, so that when we get back to event loop,
120event loop will notice there's something more to do.
121
122SIGCHLD is blocked when we're inside target_wait, so that should we actually
123want to wait for some more events, SIGCHLD handler does not steal them from
124us. Technically, it would be possible to add new events to the local queue but
125it's about the same amount of work as blocking SIGCHLD.
126
127This moving of events from pipe into local queue and back into pipe when we
128enter/leave linux-nat.c is somewhat ugly. Unfortunately, GDB event loop is
129home-grown and incapable to wait on any queue.
130
131Use of signals
132==============
133
134We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
135signal is not entirely significant; we just need for a signal to be delivered,
136so that we can intercept it. SIGSTOP's advantage is that it can not be
137blocked. A disadvantage is that it is not a real-time signal, so it can only
138be queued once; we do not keep track of other sources of SIGSTOP.
139
140Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
141use them, because they have special behavior when the signal is generated -
142not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
143kills the entire thread group.
144
145A delivered SIGSTOP would stop the entire thread group, not just the thread we
146tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
147cancel it (by PTRACE_CONT without passing SIGSTOP).
148
149We could use a real-time signal instead. This would solve those problems; we
150could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
151But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
152generates it, and there are races with trying to find a signal that is not
153blocked. */
a0ef4274 154
dba24537
AC
155#ifndef O_LARGEFILE
156#define O_LARGEFILE 0
157#endif
0274a8ce 158
3993f6b1
DJ
159/* If the system headers did not provide the constants, hard-code the normal
160 values. */
161#ifndef PTRACE_EVENT_FORK
162
163#define PTRACE_SETOPTIONS 0x4200
164#define PTRACE_GETEVENTMSG 0x4201
165
166/* options set using PTRACE_SETOPTIONS */
167#define PTRACE_O_TRACESYSGOOD 0x00000001
168#define PTRACE_O_TRACEFORK 0x00000002
169#define PTRACE_O_TRACEVFORK 0x00000004
170#define PTRACE_O_TRACECLONE 0x00000008
171#define PTRACE_O_TRACEEXEC 0x00000010
9016a515
DJ
172#define PTRACE_O_TRACEVFORKDONE 0x00000020
173#define PTRACE_O_TRACEEXIT 0x00000040
3993f6b1
DJ
174
175/* Wait extended result codes for the above trace options. */
176#define PTRACE_EVENT_FORK 1
177#define PTRACE_EVENT_VFORK 2
178#define PTRACE_EVENT_CLONE 3
179#define PTRACE_EVENT_EXEC 4
c874c7fc 180#define PTRACE_EVENT_VFORK_DONE 5
9016a515 181#define PTRACE_EVENT_EXIT 6
3993f6b1
DJ
182
183#endif /* PTRACE_EVENT_FORK */
184
185/* We can't always assume that this flag is available, but all systems
186 with the ptrace event handlers also have __WALL, so it's safe to use
187 here. */
188#ifndef __WALL
189#define __WALL 0x40000000 /* Wait for any child. */
190#endif
191
02d3ff8c
UW
192#ifndef PTRACE_GETSIGINFO
193#define PTRACE_GETSIGINFO 0x4202
194#endif
195
10d6c8cd
DJ
196/* The single-threaded native GNU/Linux target_ops. We save a pointer for
197 the use of the multi-threaded target. */
198static struct target_ops *linux_ops;
f973ed9c 199static struct target_ops linux_ops_saved;
10d6c8cd 200
9f0bdab8
DJ
201/* The method to call, if any, when a new thread is attached. */
202static void (*linux_nat_new_thread) (ptid_t);
203
ac264b3b
MS
204/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
205 Called by our to_xfer_partial. */
206static LONGEST (*super_xfer_partial) (struct target_ops *,
207 enum target_object,
208 const char *, gdb_byte *,
209 const gdb_byte *,
10d6c8cd
DJ
210 ULONGEST, LONGEST);
211
d6b0e80f 212static int debug_linux_nat;
920d2a44
AC
213static void
214show_debug_linux_nat (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216{
217 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
218 value);
219}
d6b0e80f 220
b84876c2
PA
221static int debug_linux_nat_async = 0;
222static void
223show_debug_linux_nat_async (struct ui_file *file, int from_tty,
224 struct cmd_list_element *c, const char *value)
225{
226 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
227 value);
228}
229
10568435
JK
230static int disable_randomization = 1;
231
232static void
233show_disable_randomization (struct ui_file *file, int from_tty,
234 struct cmd_list_element *c, const char *value)
235{
236#ifdef HAVE_PERSONALITY
237 fprintf_filtered (file, _("\
238Disabling randomization of debuggee's virtual address space is %s.\n"),
239 value);
240#else /* !HAVE_PERSONALITY */
241 fputs_filtered (_("\
242Disabling randomization of debuggee's virtual address space is unsupported on\n\
243this platform.\n"), file);
244#endif /* !HAVE_PERSONALITY */
245}
246
247static void
248set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c)
249{
250#ifndef HAVE_PERSONALITY
251 error (_("\
252Disabling randomization of debuggee's virtual address space is unsupported on\n\
253this platform."));
254#endif /* !HAVE_PERSONALITY */
255}
256
9016a515
DJ
257static int linux_parent_pid;
258
ae087d01
DJ
259struct simple_pid_list
260{
261 int pid;
3d799a95 262 int status;
ae087d01
DJ
263 struct simple_pid_list *next;
264};
265struct simple_pid_list *stopped_pids;
266
3993f6b1
DJ
267/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
268 can not be used, 1 if it can. */
269
270static int linux_supports_tracefork_flag = -1;
271
9016a515
DJ
272/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
273 PTRACE_O_TRACEVFORKDONE. */
274
275static int linux_supports_tracevforkdone_flag = -1;
276
b84876c2
PA
277/* Async mode support */
278
b84876c2
PA
279/* Zero if the async mode, although enabled, is masked, which means
280 linux_nat_wait should behave as if async mode was off. */
281static int linux_nat_async_mask_value = 1;
282
283/* The read/write ends of the pipe registered as waitable file in the
284 event loop. */
285static int linux_nat_event_pipe[2] = { -1, -1 };
286
287/* Number of queued events in the pipe. */
288static volatile int linux_nat_num_queued_events;
289
84e46146 290/* The possible SIGCHLD handling states. */
b84876c2 291
84e46146
PA
292enum sigchld_state
293{
294 /* SIGCHLD disabled, with action set to sigchld_handler, for the
295 sigsuspend in linux_nat_wait. */
296 sigchld_sync,
297 /* SIGCHLD enabled, with action set to async_sigchld_handler. */
298 sigchld_async,
299 /* Set SIGCHLD to default action. Used while creating an
300 inferior. */
301 sigchld_default
302};
303
304/* The current SIGCHLD handling state. */
305static enum sigchld_state linux_nat_async_events_state;
306
307static enum sigchld_state linux_nat_async_events (enum sigchld_state enable);
b84876c2
PA
308static void pipe_to_local_event_queue (void);
309static void local_event_queue_to_pipe (void);
310static void linux_nat_event_pipe_push (int pid, int status, int options);
311static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
312static void linux_nat_set_async_mode (int on);
313static void linux_nat_async (void (*callback)
314 (enum inferior_event_type event_type, void *context),
315 void *context);
316static int linux_nat_async_mask (int mask);
a0ef4274 317static int kill_lwp (int lwpid, int signo);
b84876c2 318
4c28f408
PA
319static int send_sigint_callback (struct lwp_info *lp, void *data);
320static int stop_callback (struct lwp_info *lp, void *data);
321
b84876c2
PA
322/* Captures the result of a successful waitpid call, along with the
323 options used in that call. */
324struct waitpid_result
325{
326 int pid;
327 int status;
328 int options;
329 struct waitpid_result *next;
330};
331
332/* A singly-linked list of the results of the waitpid calls performed
333 in the async SIGCHLD handler. */
334static struct waitpid_result *waitpid_queue = NULL;
335
336static int
337queued_waitpid (int pid, int *status, int flags)
338{
339 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
340
341 if (debug_linux_nat_async)
342 fprintf_unfiltered (gdb_stdlog,
343 "\
84e46146
PA
344QWPID: linux_nat_async_events_state(%d), linux_nat_num_queued_events(%d)\n",
345 linux_nat_async_events_state,
b84876c2
PA
346 linux_nat_num_queued_events);
347
348 if (flags & __WALL)
349 {
350 for (; msg; prev = msg, msg = msg->next)
351 if (pid == -1 || pid == msg->pid)
352 break;
353 }
354 else if (flags & __WCLONE)
355 {
356 for (; msg; prev = msg, msg = msg->next)
357 if (msg->options & __WCLONE
358 && (pid == -1 || pid == msg->pid))
359 break;
360 }
361 else
362 {
363 for (; msg; prev = msg, msg = msg->next)
364 if ((msg->options & __WCLONE) == 0
365 && (pid == -1 || pid == msg->pid))
366 break;
367 }
368
369 if (msg)
370 {
371 int pid;
372
373 if (prev)
374 prev->next = msg->next;
375 else
376 waitpid_queue = msg->next;
377
378 msg->next = NULL;
379 if (status)
380 *status = msg->status;
381 pid = msg->pid;
382
383 if (debug_linux_nat_async)
384 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
385 pid, msg->status);
386 xfree (msg);
387
388 return pid;
389 }
390
391 if (debug_linux_nat_async)
392 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
393
394 if (status)
395 *status = 0;
396 return -1;
397}
398
399static void
400push_waitpid (int pid, int status, int options)
401{
402 struct waitpid_result *event, *new_event;
403
404 new_event = xmalloc (sizeof (*new_event));
405 new_event->pid = pid;
406 new_event->status = status;
407 new_event->options = options;
408 new_event->next = NULL;
409
410 if (waitpid_queue)
411 {
412 for (event = waitpid_queue;
413 event && event->next;
414 event = event->next)
415 ;
416
417 event->next = new_event;
418 }
419 else
420 waitpid_queue = new_event;
421}
422
710151dd 423/* Drain all queued events of PID. If PID is -1, the effect is of
b84876c2
PA
424 draining all events. */
425static void
426drain_queued_events (int pid)
427{
428 while (queued_waitpid (pid, NULL, __WALL) != -1)
429 ;
430}
431
ae087d01
DJ
432\f
433/* Trivial list manipulation functions to keep track of a list of
434 new stopped processes. */
435static void
3d799a95 436add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
437{
438 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
439 new_pid->pid = pid;
3d799a95 440 new_pid->status = status;
ae087d01
DJ
441 new_pid->next = *listp;
442 *listp = new_pid;
443}
444
445static int
3d799a95 446pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
ae087d01
DJ
447{
448 struct simple_pid_list **p;
449
450 for (p = listp; *p != NULL; p = &(*p)->next)
451 if ((*p)->pid == pid)
452 {
453 struct simple_pid_list *next = (*p)->next;
3d799a95 454 *status = (*p)->status;
ae087d01
DJ
455 xfree (*p);
456 *p = next;
457 return 1;
458 }
459 return 0;
460}
461
3d799a95
DJ
462static void
463linux_record_stopped_pid (int pid, int status)
ae087d01 464{
3d799a95 465 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
466}
467
3993f6b1
DJ
468\f
469/* A helper function for linux_test_for_tracefork, called after fork (). */
470
471static void
472linux_tracefork_child (void)
473{
474 int ret;
475
476 ptrace (PTRACE_TRACEME, 0, 0, 0);
477 kill (getpid (), SIGSTOP);
478 fork ();
48bb3cce 479 _exit (0);
3993f6b1
DJ
480}
481
b84876c2
PA
482/* Wrapper function for waitpid which handles EINTR, and checks for
483 locally queued events. */
b957e937
DJ
484
485static int
486my_waitpid (int pid, int *status, int flags)
487{
488 int ret;
b84876c2
PA
489
490 /* There should be no concurrent calls to waitpid. */
84e46146 491 gdb_assert (linux_nat_async_events_state == sigchld_sync);
b84876c2
PA
492
493 ret = queued_waitpid (pid, status, flags);
494 if (ret != -1)
495 return ret;
496
b957e937
DJ
497 do
498 {
499 ret = waitpid (pid, status, flags);
500 }
501 while (ret == -1 && errno == EINTR);
502
503 return ret;
504}
505
506/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
507
508 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
509 we know that the feature is not available. This may change the tracing
510 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
511
512 However, if it succeeds, we don't know for sure that the feature is
513 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 514 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
515 fork tracing, and let it fork. If the process exits, we assume that we
516 can't use TRACEFORK; if we get the fork notification, and we can extract
517 the new child's PID, then we assume that we can. */
3993f6b1
DJ
518
519static void
b957e937 520linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
521{
522 int child_pid, ret, status;
523 long second_pid;
4c28f408
PA
524 enum sigchld_state async_events_original_state;
525
526 async_events_original_state = linux_nat_async_events (sigchld_sync);
3993f6b1 527
b957e937
DJ
528 linux_supports_tracefork_flag = 0;
529 linux_supports_tracevforkdone_flag = 0;
530
531 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
532 if (ret != 0)
533 return;
534
3993f6b1
DJ
535 child_pid = fork ();
536 if (child_pid == -1)
e2e0b3e5 537 perror_with_name (("fork"));
3993f6b1
DJ
538
539 if (child_pid == 0)
540 linux_tracefork_child ();
541
b957e937 542 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 543 if (ret == -1)
e2e0b3e5 544 perror_with_name (("waitpid"));
3993f6b1 545 else if (ret != child_pid)
8a3fe4f8 546 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 547 if (! WIFSTOPPED (status))
8a3fe4f8 548 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
3993f6b1 549
3993f6b1
DJ
550 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
551 if (ret != 0)
552 {
b957e937
DJ
553 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
554 if (ret != 0)
555 {
8a3fe4f8 556 warning (_("linux_test_for_tracefork: failed to kill child"));
4c28f408 557 linux_nat_async_events (async_events_original_state);
b957e937
DJ
558 return;
559 }
560
561 ret = my_waitpid (child_pid, &status, 0);
562 if (ret != child_pid)
8a3fe4f8 563 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
b957e937 564 else if (!WIFSIGNALED (status))
8a3fe4f8
AC
565 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
566 "killed child"), status);
b957e937 567
4c28f408 568 linux_nat_async_events (async_events_original_state);
3993f6b1
DJ
569 return;
570 }
571
9016a515
DJ
572 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
573 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
574 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
575 linux_supports_tracevforkdone_flag = (ret == 0);
576
b957e937
DJ
577 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
578 if (ret != 0)
8a3fe4f8 579 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
580
581 ret = my_waitpid (child_pid, &status, 0);
582
3993f6b1
DJ
583 if (ret == child_pid && WIFSTOPPED (status)
584 && status >> 16 == PTRACE_EVENT_FORK)
585 {
586 second_pid = 0;
587 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
588 if (ret == 0 && second_pid != 0)
589 {
590 int second_status;
591
592 linux_supports_tracefork_flag = 1;
b957e937
DJ
593 my_waitpid (second_pid, &second_status, 0);
594 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
595 if (ret != 0)
8a3fe4f8 596 warning (_("linux_test_for_tracefork: failed to kill second child"));
97725dc4 597 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
598 }
599 }
b957e937 600 else
8a3fe4f8
AC
601 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
602 "(%d, status 0x%x)"), ret, status);
3993f6b1 603
b957e937
DJ
604 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
605 if (ret != 0)
8a3fe4f8 606 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 607 my_waitpid (child_pid, &status, 0);
4c28f408
PA
608
609 linux_nat_async_events (async_events_original_state);
3993f6b1
DJ
610}
611
612/* Return non-zero iff we have tracefork functionality available.
613 This function also sets linux_supports_tracefork_flag. */
614
615static int
b957e937 616linux_supports_tracefork (int pid)
3993f6b1
DJ
617{
618 if (linux_supports_tracefork_flag == -1)
b957e937 619 linux_test_for_tracefork (pid);
3993f6b1
DJ
620 return linux_supports_tracefork_flag;
621}
622
9016a515 623static int
b957e937 624linux_supports_tracevforkdone (int pid)
9016a515
DJ
625{
626 if (linux_supports_tracefork_flag == -1)
b957e937 627 linux_test_for_tracefork (pid);
9016a515
DJ
628 return linux_supports_tracevforkdone_flag;
629}
630
3993f6b1 631\f
4de4c07c
DJ
632void
633linux_enable_event_reporting (ptid_t ptid)
634{
d3587048 635 int pid = ptid_get_lwp (ptid);
4de4c07c
DJ
636 int options;
637
d3587048
DJ
638 if (pid == 0)
639 pid = ptid_get_pid (ptid);
640
b957e937 641 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
642 return;
643
a2f23071
DJ
644 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
645 | PTRACE_O_TRACECLONE;
b957e937 646 if (linux_supports_tracevforkdone (pid))
9016a515
DJ
647 options |= PTRACE_O_TRACEVFORKDONE;
648
649 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
650 read-only process state. */
4de4c07c
DJ
651
652 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
653}
654
6d8fd2b7
UW
655static void
656linux_child_post_attach (int pid)
4de4c07c
DJ
657{
658 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 659 check_for_thread_db ();
4de4c07c
DJ
660}
661
10d6c8cd 662static void
4de4c07c
DJ
663linux_child_post_startup_inferior (ptid_t ptid)
664{
665 linux_enable_event_reporting (ptid);
0ec9a092 666 check_for_thread_db ();
4de4c07c
DJ
667}
668
6d8fd2b7
UW
669static int
670linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 671{
4de4c07c
DJ
672 ptid_t last_ptid;
673 struct target_waitstatus last_status;
9016a515 674 int has_vforked;
4de4c07c
DJ
675 int parent_pid, child_pid;
676
b84876c2
PA
677 if (target_can_async_p ())
678 target_async (NULL, 0);
679
4de4c07c 680 get_last_target_status (&last_ptid, &last_status);
9016a515 681 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
d3587048
DJ
682 parent_pid = ptid_get_lwp (last_ptid);
683 if (parent_pid == 0)
684 parent_pid = ptid_get_pid (last_ptid);
3a3e9ee3 685 child_pid = PIDGET (last_status.value.related_pid);
4de4c07c
DJ
686
687 if (! follow_child)
688 {
689 /* We're already attached to the parent, by default. */
690
691 /* Before detaching from the child, remove all breakpoints from
692 it. (This won't actually modify the breakpoint list, but will
693 physically remove the breakpoints from the child.) */
9016a515
DJ
694 /* If we vforked this will remove the breakpoints from the parent
695 also, but they'll be reinserted below. */
4de4c07c
DJ
696 detach_breakpoints (child_pid);
697
ac264b3b
MS
698 /* Detach new forked process? */
699 if (detach_fork)
f75c00e4 700 {
e85a822c 701 if (info_verbose || debug_linux_nat)
ac264b3b
MS
702 {
703 target_terminal_ours ();
704 fprintf_filtered (gdb_stdlog,
705 "Detaching after fork from child process %d.\n",
706 child_pid);
707 }
4de4c07c 708
ac264b3b
MS
709 ptrace (PTRACE_DETACH, child_pid, 0, 0);
710 }
711 else
712 {
713 struct fork_info *fp;
7f9f62ba
PA
714
715 /* Add process to GDB's tables. */
716 add_inferior (child_pid);
717
ac264b3b
MS
718 /* Retain child fork in ptrace (stopped) state. */
719 fp = find_fork_pid (child_pid);
720 if (!fp)
721 fp = add_fork (child_pid);
722 fork_save_infrun_state (fp, 0);
723 }
9016a515
DJ
724
725 if (has_vforked)
726 {
b957e937
DJ
727 gdb_assert (linux_supports_tracefork_flag >= 0);
728 if (linux_supports_tracevforkdone (0))
9016a515
DJ
729 {
730 int status;
731
732 ptrace (PTRACE_CONT, parent_pid, 0, 0);
58aecb61 733 my_waitpid (parent_pid, &status, __WALL);
c874c7fc 734 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
8a3fe4f8
AC
735 warning (_("Unexpected waitpid result %06x when waiting for "
736 "vfork-done"), status);
9016a515
DJ
737 }
738 else
739 {
740 /* We can't insert breakpoints until the child has
741 finished with the shared memory region. We need to
742 wait until that happens. Ideal would be to just
743 call:
744 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
745 - waitpid (parent_pid, &status, __WALL);
746 However, most architectures can't handle a syscall
747 being traced on the way out if it wasn't traced on
748 the way in.
749
750 We might also think to loop, continuing the child
751 until it exits or gets a SIGTRAP. One problem is
752 that the child might call ptrace with PTRACE_TRACEME.
753
754 There's no simple and reliable way to figure out when
755 the vforked child will be done with its copy of the
756 shared memory. We could step it out of the syscall,
757 two instructions, let it go, and then single-step the
758 parent once. When we have hardware single-step, this
759 would work; with software single-step it could still
760 be made to work but we'd have to be able to insert
761 single-step breakpoints in the child, and we'd have
762 to insert -just- the single-step breakpoint in the
763 parent. Very awkward.
764
765 In the end, the best we can do is to make sure it
766 runs for a little while. Hopefully it will be out of
767 range of any breakpoints we reinsert. Usually this
768 is only the single-step breakpoint at vfork's return
769 point. */
770
771 usleep (10000);
772 }
773
774 /* Since we vforked, breakpoints were removed in the parent
775 too. Put them back. */
776 reattach_breakpoints (parent_pid);
777 }
4de4c07c 778 }
3993f6b1 779 else
4de4c07c 780 {
4e1c45ea
PA
781 struct thread_info *last_tp = find_thread_pid (last_ptid);
782 struct thread_info *tp;
4de4c07c
DJ
783 char child_pid_spelling[40];
784
4e1c45ea
PA
785 /* Copy user stepping state to the new inferior thread. */
786 struct breakpoint *step_resume_breakpoint = last_tp->step_resume_breakpoint;
787 CORE_ADDR step_range_start = last_tp->step_range_start;
788 CORE_ADDR step_range_end = last_tp->step_range_end;
789 struct frame_id step_frame_id = last_tp->step_frame_id;
790
791 /* Otherwise, deleting the parent would get rid of this
792 breakpoint. */
793 last_tp->step_resume_breakpoint = NULL;
794
4de4c07c 795 /* Needed to keep the breakpoint lists in sync. */
9016a515
DJ
796 if (! has_vforked)
797 detach_breakpoints (child_pid);
4de4c07c
DJ
798
799 /* Before detaching from the parent, remove all breakpoints from it. */
800 remove_breakpoints ();
801
e85a822c 802 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
803 {
804 target_terminal_ours ();
ac264b3b
MS
805 fprintf_filtered (gdb_stdlog,
806 "Attaching after fork to child process %d.\n",
807 child_pid);
f75c00e4 808 }
4de4c07c 809
9016a515
DJ
810 /* If we're vforking, we may want to hold on to the parent until
811 the child exits or execs. At exec time we can remove the old
812 breakpoints from the parent and detach it; at exit time we
813 could do the same (or even, sneakily, resume debugging it - the
814 child's exec has failed, or something similar).
815
816 This doesn't clean up "properly", because we can't call
817 target_detach, but that's OK; if the current target is "child",
818 then it doesn't need any further cleanups, and lin_lwp will
819 generally not encounter vfork (vfork is defined to fork
820 in libpthread.so).
821
822 The holding part is very easy if we have VFORKDONE events;
823 but keeping track of both processes is beyond GDB at the
824 moment. So we don't expose the parent to the rest of GDB.
825 Instead we quietly hold onto it until such time as we can
826 safely resume it. */
827
828 if (has_vforked)
7f9f62ba
PA
829 {
830 linux_parent_pid = parent_pid;
831 detach_inferior (parent_pid);
832 }
ac264b3b
MS
833 else if (!detach_fork)
834 {
835 struct fork_info *fp;
836 /* Retain parent fork in ptrace (stopped) state. */
837 fp = find_fork_pid (parent_pid);
838 if (!fp)
839 fp = add_fork (parent_pid);
840 fork_save_infrun_state (fp, 0);
841 }
9016a515 842 else
b84876c2 843 target_detach (NULL, 0);
4de4c07c 844
9f0bdab8 845 inferior_ptid = ptid_build (child_pid, child_pid, 0);
7f9f62ba 846 add_inferior (child_pid);
ee057212
DJ
847
848 /* Reinstall ourselves, since we might have been removed in
849 target_detach (which does other necessary cleanup). */
ac264b3b 850
ee057212 851 push_target (ops);
9f0bdab8 852 linux_nat_switch_fork (inferior_ptid);
ef29ce1a 853 check_for_thread_db ();
4de4c07c 854
4e1c45ea
PA
855 tp = inferior_thread ();
856 tp->step_resume_breakpoint = step_resume_breakpoint;
857 tp->step_range_start = step_range_start;
858 tp->step_range_end = step_range_end;
859 tp->step_frame_id = step_frame_id;
860
4de4c07c
DJ
861 /* Reset breakpoints in the child as appropriate. */
862 follow_inferior_reset_breakpoints ();
863 }
864
b84876c2
PA
865 if (target_can_async_p ())
866 target_async (inferior_event_handler, 0);
867
4de4c07c
DJ
868 return 0;
869}
870
4de4c07c 871\f
6d8fd2b7
UW
872static void
873linux_child_insert_fork_catchpoint (int pid)
4de4c07c 874{
b957e937 875 if (! linux_supports_tracefork (pid))
8a3fe4f8 876 error (_("Your system does not support fork catchpoints."));
3993f6b1
DJ
877}
878
6d8fd2b7
UW
879static void
880linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 881{
b957e937 882 if (!linux_supports_tracefork (pid))
8a3fe4f8 883 error (_("Your system does not support vfork catchpoints."));
3993f6b1
DJ
884}
885
6d8fd2b7
UW
886static void
887linux_child_insert_exec_catchpoint (int pid)
3993f6b1 888{
b957e937 889 if (!linux_supports_tracefork (pid))
8a3fe4f8 890 error (_("Your system does not support exec catchpoints."));
3993f6b1
DJ
891}
892
d6b0e80f
AC
893/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
894 are processes sharing the same VM space. A multi-threaded process
895 is basically a group of such processes. However, such a grouping
896 is almost entirely a user-space issue; the kernel doesn't enforce
897 such a grouping at all (this might change in the future). In
898 general, we'll rely on the threads library (i.e. the GNU/Linux
899 Threads library) to provide such a grouping.
900
901 It is perfectly well possible to write a multi-threaded application
902 without the assistance of a threads library, by using the clone
903 system call directly. This module should be able to give some
904 rudimentary support for debugging such applications if developers
905 specify the CLONE_PTRACE flag in the clone system call, and are
906 using the Linux kernel 2.4 or above.
907
908 Note that there are some peculiarities in GNU/Linux that affect
909 this code:
910
911 - In general one should specify the __WCLONE flag to waitpid in
912 order to make it report events for any of the cloned processes
913 (and leave it out for the initial process). However, if a cloned
914 process has exited the exit status is only reported if the
915 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
916 we cannot use it since GDB must work on older systems too.
917
918 - When a traced, cloned process exits and is waited for by the
919 debugger, the kernel reassigns it to the original parent and
920 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
921 library doesn't notice this, which leads to the "zombie problem":
922 When debugged a multi-threaded process that spawns a lot of
923 threads will run out of processes, even if the threads exit,
924 because the "zombies" stay around. */
925
926/* List of known LWPs. */
9f0bdab8 927struct lwp_info *lwp_list;
d6b0e80f
AC
928
929/* Number of LWPs in the list. */
930static int num_lwps;
d6b0e80f
AC
931\f
932
d6b0e80f
AC
933/* Original signal mask. */
934static sigset_t normal_mask;
935
936/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
937 _initialize_linux_nat. */
938static sigset_t suspend_mask;
939
b84876c2
PA
940/* SIGCHLD action for synchronous mode. */
941struct sigaction sync_sigchld_action;
942
943/* SIGCHLD action for asynchronous mode. */
944static struct sigaction async_sigchld_action;
84e46146
PA
945
946/* SIGCHLD default action, to pass to new inferiors. */
947static struct sigaction sigchld_default_action;
d6b0e80f
AC
948\f
949
950/* Prototypes for local functions. */
951static int stop_wait_callback (struct lwp_info *lp, void *data);
952static int linux_nat_thread_alive (ptid_t ptid);
6d8fd2b7 953static char *linux_child_pid_to_exec_file (int pid);
710151dd
PA
954static int cancel_breakpoint (struct lwp_info *lp);
955
d6b0e80f
AC
956\f
957/* Convert wait status STATUS to a string. Used for printing debug
958 messages only. */
959
960static char *
961status_to_str (int status)
962{
963 static char buf[64];
964
965 if (WIFSTOPPED (status))
966 snprintf (buf, sizeof (buf), "%s (stopped)",
967 strsignal (WSTOPSIG (status)));
968 else if (WIFSIGNALED (status))
969 snprintf (buf, sizeof (buf), "%s (terminated)",
970 strsignal (WSTOPSIG (status)));
971 else
972 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
973
974 return buf;
975}
976
977/* Initialize the list of LWPs. Note that this module, contrary to
978 what GDB's generic threads layer does for its thread list,
979 re-initializes the LWP lists whenever we mourn or detach (which
980 doesn't involve mourning) the inferior. */
981
982static void
983init_lwp_list (void)
984{
985 struct lwp_info *lp, *lpnext;
986
987 for (lp = lwp_list; lp; lp = lpnext)
988 {
989 lpnext = lp->next;
990 xfree (lp);
991 }
992
993 lwp_list = NULL;
994 num_lwps = 0;
d6b0e80f
AC
995}
996
f973ed9c 997/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
998 structure describing the new LWP. The LWP should already be stopped
999 (with an exception for the very first LWP). */
d6b0e80f
AC
1000
1001static struct lwp_info *
1002add_lwp (ptid_t ptid)
1003{
1004 struct lwp_info *lp;
1005
1006 gdb_assert (is_lwp (ptid));
1007
1008 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1009
1010 memset (lp, 0, sizeof (struct lwp_info));
1011
1012 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1013
1014 lp->ptid = ptid;
1015
1016 lp->next = lwp_list;
1017 lwp_list = lp;
f973ed9c 1018 ++num_lwps;
d6b0e80f 1019
9f0bdab8
DJ
1020 if (num_lwps > 1 && linux_nat_new_thread != NULL)
1021 linux_nat_new_thread (ptid);
1022
d6b0e80f
AC
1023 return lp;
1024}
1025
1026/* Remove the LWP specified by PID from the list. */
1027
1028static void
1029delete_lwp (ptid_t ptid)
1030{
1031 struct lwp_info *lp, *lpprev;
1032
1033 lpprev = NULL;
1034
1035 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1036 if (ptid_equal (lp->ptid, ptid))
1037 break;
1038
1039 if (!lp)
1040 return;
1041
d6b0e80f
AC
1042 num_lwps--;
1043
1044 if (lpprev)
1045 lpprev->next = lp->next;
1046 else
1047 lwp_list = lp->next;
1048
1049 xfree (lp);
1050}
1051
1052/* Return a pointer to the structure describing the LWP corresponding
1053 to PID. If no corresponding LWP could be found, return NULL. */
1054
1055static struct lwp_info *
1056find_lwp_pid (ptid_t ptid)
1057{
1058 struct lwp_info *lp;
1059 int lwp;
1060
1061 if (is_lwp (ptid))
1062 lwp = GET_LWP (ptid);
1063 else
1064 lwp = GET_PID (ptid);
1065
1066 for (lp = lwp_list; lp; lp = lp->next)
1067 if (lwp == GET_LWP (lp->ptid))
1068 return lp;
1069
1070 return NULL;
1071}
1072
1073/* Call CALLBACK with its second argument set to DATA for every LWP in
1074 the list. If CALLBACK returns 1 for a particular LWP, return a
1075 pointer to the structure describing that LWP immediately.
1076 Otherwise return NULL. */
1077
1078struct lwp_info *
1079iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
1080{
1081 struct lwp_info *lp, *lpnext;
1082
1083 for (lp = lwp_list; lp; lp = lpnext)
1084 {
1085 lpnext = lp->next;
1086 if ((*callback) (lp, data))
1087 return lp;
1088 }
1089
1090 return NULL;
1091}
1092
f973ed9c
DJ
1093/* Update our internal state when changing from one fork (checkpoint,
1094 et cetera) to another indicated by NEW_PTID. We can only switch
1095 single-threaded applications, so we only create one new LWP, and
1096 the previous list is discarded. */
1097
1098void
1099linux_nat_switch_fork (ptid_t new_ptid)
1100{
1101 struct lwp_info *lp;
1102
1103 init_lwp_list ();
1104 lp = add_lwp (new_ptid);
1105 lp->stopped = 1;
e26af52f 1106
4f8d22e3
PA
1107 init_thread_list ();
1108 add_thread_silent (new_ptid);
e26af52f
DJ
1109}
1110
e26af52f
DJ
1111/* Handle the exit of a single thread LP. */
1112
1113static void
1114exit_lwp (struct lwp_info *lp)
1115{
063bfe2e
VP
1116 struct thread_info *th = find_thread_pid (lp->ptid);
1117
1118 if (th)
e26af52f 1119 {
17faa917
DJ
1120 if (print_thread_events)
1121 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1122
4f8d22e3 1123 delete_thread (lp->ptid);
e26af52f
DJ
1124 }
1125
1126 delete_lwp (lp->ptid);
1127}
1128
a0ef4274
DJ
1129/* Detect `T (stopped)' in `/proc/PID/status'.
1130 Other states including `T (tracing stop)' are reported as false. */
1131
1132static int
1133pid_is_stopped (pid_t pid)
1134{
1135 FILE *status_file;
1136 char buf[100];
1137 int retval = 0;
1138
1139 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1140 status_file = fopen (buf, "r");
1141 if (status_file != NULL)
1142 {
1143 int have_state = 0;
1144
1145 while (fgets (buf, sizeof (buf), status_file))
1146 {
1147 if (strncmp (buf, "State:", 6) == 0)
1148 {
1149 have_state = 1;
1150 break;
1151 }
1152 }
1153 if (have_state && strstr (buf, "T (stopped)") != NULL)
1154 retval = 1;
1155 fclose (status_file);
1156 }
1157 return retval;
1158}
1159
1160/* Wait for the LWP specified by LP, which we have just attached to.
1161 Returns a wait status for that LWP, to cache. */
1162
1163static int
1164linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1165 int *signalled)
1166{
1167 pid_t new_pid, pid = GET_LWP (ptid);
1168 int status;
1169
1170 if (pid_is_stopped (pid))
1171 {
1172 if (debug_linux_nat)
1173 fprintf_unfiltered (gdb_stdlog,
1174 "LNPAW: Attaching to a stopped process\n");
1175
1176 /* The process is definitely stopped. It is in a job control
1177 stop, unless the kernel predates the TASK_STOPPED /
1178 TASK_TRACED distinction, in which case it might be in a
1179 ptrace stop. Make sure it is in a ptrace stop; from there we
1180 can kill it, signal it, et cetera.
1181
1182 First make sure there is a pending SIGSTOP. Since we are
1183 already attached, the process can not transition from stopped
1184 to running without a PTRACE_CONT; so we know this signal will
1185 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1186 probably already in the queue (unless this kernel is old
1187 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1188 is not an RT signal, it can only be queued once. */
1189 kill_lwp (pid, SIGSTOP);
1190
1191 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1192 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1193 ptrace (PTRACE_CONT, pid, 0, 0);
1194 }
1195
1196 /* Make sure the initial process is stopped. The user-level threads
1197 layer might want to poke around in the inferior, and that won't
1198 work if things haven't stabilized yet. */
1199 new_pid = my_waitpid (pid, &status, 0);
1200 if (new_pid == -1 && errno == ECHILD)
1201 {
1202 if (first)
1203 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1204
1205 /* Try again with __WCLONE to check cloned processes. */
1206 new_pid = my_waitpid (pid, &status, __WCLONE);
1207 *cloned = 1;
1208 }
1209
1210 gdb_assert (pid == new_pid && WIFSTOPPED (status));
1211
1212 if (WSTOPSIG (status) != SIGSTOP)
1213 {
1214 *signalled = 1;
1215 if (debug_linux_nat)
1216 fprintf_unfiltered (gdb_stdlog,
1217 "LNPAW: Received %s after attaching\n",
1218 status_to_str (status));
1219 }
1220
1221 return status;
1222}
1223
1224/* Attach to the LWP specified by PID. Return 0 if successful or -1
1225 if the new LWP could not be attached. */
d6b0e80f 1226
9ee57c33 1227int
93815fbf 1228lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1229{
9ee57c33 1230 struct lwp_info *lp;
84e46146 1231 enum sigchld_state async_events_original_state;
d6b0e80f
AC
1232
1233 gdb_assert (is_lwp (ptid));
1234
84e46146 1235 async_events_original_state = linux_nat_async_events (sigchld_sync);
d6b0e80f 1236
9ee57c33 1237 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1238
1239 /* We assume that we're already attached to any LWP that has an id
1240 equal to the overall process id, and to any LWP that is already
1241 in our list of LWPs. If we're not seeing exit events from threads
1242 and we've had PID wraparound since we last tried to stop all threads,
1243 this assumption might be wrong; fortunately, this is very unlikely
1244 to happen. */
9ee57c33 1245 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1246 {
a0ef4274 1247 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1248
1249 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1250 {
1251 /* If we fail to attach to the thread, issue a warning,
1252 but continue. One way this can happen is if thread
e9efe249 1253 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1254 bug may place threads in the thread list and then fail
1255 to create them. */
1256 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1257 safe_strerror (errno));
1258 return -1;
1259 }
1260
d6b0e80f
AC
1261 if (debug_linux_nat)
1262 fprintf_unfiltered (gdb_stdlog,
1263 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1264 target_pid_to_str (ptid));
1265
a0ef4274
DJ
1266 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1267 lp = add_lwp (ptid);
1268 lp->stopped = 1;
1269 lp->cloned = cloned;
1270 lp->signalled = signalled;
1271 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1272 {
a0ef4274
DJ
1273 lp->resumed = 1;
1274 lp->status = status;
d6b0e80f
AC
1275 }
1276
a0ef4274 1277 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1278
1279 if (debug_linux_nat)
1280 {
1281 fprintf_unfiltered (gdb_stdlog,
1282 "LLAL: waitpid %s received %s\n",
1283 target_pid_to_str (ptid),
1284 status_to_str (status));
1285 }
1286 }
1287 else
1288 {
1289 /* We assume that the LWP representing the original process is
1290 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1291 that the GNU/linux ptrace layer uses to keep track of
1292 threads. Note that this won't have already been done since
1293 the main thread will have, we assume, been stopped by an
1294 attach from a different layer. */
9ee57c33
DJ
1295 if (lp == NULL)
1296 lp = add_lwp (ptid);
d6b0e80f
AC
1297 lp->stopped = 1;
1298 }
9ee57c33 1299
84e46146 1300 linux_nat_async_events (async_events_original_state);
9ee57c33 1301 return 0;
d6b0e80f
AC
1302}
1303
b84876c2
PA
1304static void
1305linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
1306 int from_tty)
1307{
1308 int saved_async = 0;
10568435
JK
1309#ifdef HAVE_PERSONALITY
1310 int personality_orig = 0, personality_set = 0;
1311#endif /* HAVE_PERSONALITY */
b84876c2
PA
1312
1313 /* The fork_child mechanism is synchronous and calls target_wait, so
1314 we have to mask the async mode. */
1315
1316 if (target_can_async_p ())
84e46146
PA
1317 /* Mask async mode. Creating a child requires a loop calling
1318 wait_for_inferior currently. */
b84876c2
PA
1319 saved_async = linux_nat_async_mask (0);
1320 else
1321 {
1322 /* Restore the original signal mask. */
1323 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1324 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1325 suspend_mask = normal_mask;
1326 sigdelset (&suspend_mask, SIGCHLD);
1327 }
1328
84e46146
PA
1329 /* Set SIGCHLD to the default action, until after execing the child,
1330 since the inferior inherits the superior's signal mask. It will
1331 be blocked again in linux_nat_wait, which is only reached after
1332 the inferior execing. */
1333 linux_nat_async_events (sigchld_default);
1334
10568435
JK
1335#ifdef HAVE_PERSONALITY
1336 if (disable_randomization)
1337 {
1338 errno = 0;
1339 personality_orig = personality (0xffffffff);
1340 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1341 {
1342 personality_set = 1;
1343 personality (personality_orig | ADDR_NO_RANDOMIZE);
1344 }
1345 if (errno != 0 || (personality_set
1346 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1347 warning (_("Error disabling address space randomization: %s"),
1348 safe_strerror (errno));
1349 }
1350#endif /* HAVE_PERSONALITY */
1351
b84876c2
PA
1352 linux_ops->to_create_inferior (exec_file, allargs, env, from_tty);
1353
10568435
JK
1354#ifdef HAVE_PERSONALITY
1355 if (personality_set)
1356 {
1357 errno = 0;
1358 personality (personality_orig);
1359 if (errno != 0)
1360 warning (_("Error restoring address space randomization: %s"),
1361 safe_strerror (errno));
1362 }
1363#endif /* HAVE_PERSONALITY */
1364
b84876c2
PA
1365 if (saved_async)
1366 linux_nat_async_mask (saved_async);
1367}
1368
d6b0e80f
AC
1369static void
1370linux_nat_attach (char *args, int from_tty)
1371{
1372 struct lwp_info *lp;
d6b0e80f 1373 int status;
af990527 1374 ptid_t ptid;
d6b0e80f
AC
1375
1376 /* FIXME: We should probably accept a list of process id's, and
1377 attach all of them. */
10d6c8cd 1378 linux_ops->to_attach (args, from_tty);
d6b0e80f 1379
b84876c2
PA
1380 if (!target_can_async_p ())
1381 {
1382 /* Restore the original signal mask. */
1383 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1384 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1385 suspend_mask = normal_mask;
1386 sigdelset (&suspend_mask, SIGCHLD);
1387 }
1388
af990527
PA
1389 /* The ptrace base target adds the main thread with (pid,0,0)
1390 format. Decorate it with lwp info. */
1391 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1392 thread_change_ptid (inferior_ptid, ptid);
1393
9f0bdab8 1394 /* Add the initial process as the first LWP to the list. */
af990527 1395 lp = add_lwp (ptid);
a0ef4274
DJ
1396
1397 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1398 &lp->signalled);
1399 lp->stopped = 1;
9f0bdab8 1400
a0ef4274 1401 /* Save the wait status to report later. */
d6b0e80f 1402 lp->resumed = 1;
a0ef4274
DJ
1403 if (debug_linux_nat)
1404 fprintf_unfiltered (gdb_stdlog,
1405 "LNA: waitpid %ld, saving status %s\n",
1406 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd
PA
1407
1408 if (!target_can_async_p ())
a0ef4274 1409 lp->status = status;
710151dd
PA
1410 else
1411 {
1412 /* We already waited for this LWP, so put the wait result on the
1413 pipe. The event loop will wake up and gets us to handling
1414 this event. */
a0ef4274
DJ
1415 linux_nat_event_pipe_push (GET_PID (lp->ptid), status,
1416 lp->cloned ? __WCLONE : 0);
b84876c2
PA
1417 /* Register in the event loop. */
1418 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1419 }
1420}
1421
a0ef4274
DJ
1422/* Get pending status of LP. */
1423static int
1424get_pending_status (struct lwp_info *lp, int *status)
1425{
1426 struct target_waitstatus last;
1427 ptid_t last_ptid;
1428
1429 get_last_target_status (&last_ptid, &last);
1430
1431 /* If this lwp is the ptid that GDB is processing an event from, the
1432 signal will be in stop_signal. Otherwise, in all-stop + sync
1433 mode, we may cache pending events in lp->status while trying to
1434 stop all threads (see stop_wait_callback). In async mode, the
1435 events are always cached in waitpid_queue. */
1436
1437 *status = 0;
4c28f408
PA
1438
1439 if (non_stop)
a0ef4274 1440 {
4c28f408
PA
1441 enum target_signal signo = TARGET_SIGNAL_0;
1442
1443 if (is_executing (lp->ptid))
1444 {
1445 /* If the core thought this lwp was executing --- e.g., the
1446 executing property hasn't been updated yet, but the
1447 thread has been stopped with a stop_callback /
1448 stop_wait_callback sequence (see linux_nat_detach for
1449 example) --- we can only have pending events in the local
1450 queue. */
1451 if (queued_waitpid (GET_LWP (lp->ptid), status, __WALL) != -1)
1452 {
8b8655b3
TJB
1453 if (WIFSTOPPED (*status))
1454 signo = target_signal_from_host (WSTOPSIG (*status));
4c28f408
PA
1455
1456 /* If not stopped, then the lwp is gone, no use in
1457 resending a signal. */
1458 }
1459 }
1460 else
1461 {
1462 /* If the core knows the thread is not executing, then we
1463 have the last signal recorded in
2020b7ab 1464 thread_info->stop_signal. */
4c28f408 1465
2020b7ab
PA
1466 struct thread_info *tp = find_thread_pid (lp->ptid);
1467 signo = tp->stop_signal;
4c28f408
PA
1468 }
1469
1470 if (signo != TARGET_SIGNAL_0
1471 && !signal_pass_state (signo))
1472 {
1473 if (debug_linux_nat)
1474 fprintf_unfiltered (gdb_stdlog, "\
1475GPT: lwp %s had signal %s, but it is in no pass state\n",
1476 target_pid_to_str (lp->ptid),
1477 target_signal_to_string (signo));
1478 }
1479 else
1480 {
1481 if (signo != TARGET_SIGNAL_0)
1482 *status = W_STOPCODE (target_signal_to_host (signo));
1483
1484 if (debug_linux_nat)
1485 fprintf_unfiltered (gdb_stdlog,
1486 "GPT: lwp %s as pending signal %s\n",
1487 target_pid_to_str (lp->ptid),
1488 target_signal_to_string (signo));
1489 }
a0ef4274 1490 }
a0ef4274 1491 else
4c28f408
PA
1492 {
1493 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1494 {
2020b7ab
PA
1495 struct thread_info *tp = find_thread_pid (lp->ptid);
1496 if (tp->stop_signal != TARGET_SIGNAL_0
1497 && signal_pass_state (tp->stop_signal))
1498 *status = W_STOPCODE (target_signal_to_host (tp->stop_signal));
4c28f408
PA
1499 }
1500 else if (target_can_async_p ())
1501 queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
1502 else
1503 *status = lp->status;
1504 }
a0ef4274
DJ
1505
1506 return 0;
1507}
1508
d6b0e80f
AC
1509static int
1510detach_callback (struct lwp_info *lp, void *data)
1511{
1512 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1513
1514 if (debug_linux_nat && lp->status)
1515 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1516 strsignal (WSTOPSIG (lp->status)),
1517 target_pid_to_str (lp->ptid));
1518
a0ef4274
DJ
1519 /* If there is a pending SIGSTOP, get rid of it. */
1520 if (lp->signalled)
d6b0e80f 1521 {
d6b0e80f
AC
1522 if (debug_linux_nat)
1523 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1524 "DC: Sending SIGCONT to %s\n",
1525 target_pid_to_str (lp->ptid));
d6b0e80f 1526
a0ef4274 1527 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1528 lp->signalled = 0;
d6b0e80f
AC
1529 }
1530
1531 /* We don't actually detach from the LWP that has an id equal to the
1532 overall process id just yet. */
1533 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1534 {
a0ef4274
DJ
1535 int status = 0;
1536
1537 /* Pass on any pending signal for this LWP. */
1538 get_pending_status (lp, &status);
1539
d6b0e80f
AC
1540 errno = 0;
1541 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1542 WSTOPSIG (status)) < 0)
8a3fe4f8 1543 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1544 safe_strerror (errno));
1545
1546 if (debug_linux_nat)
1547 fprintf_unfiltered (gdb_stdlog,
1548 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1549 target_pid_to_str (lp->ptid),
1550 strsignal (WSTOPSIG (lp->status)));
1551
1552 delete_lwp (lp->ptid);
1553 }
1554
1555 return 0;
1556}
1557
1558static void
1559linux_nat_detach (char *args, int from_tty)
1560{
b84876c2 1561 int pid;
a0ef4274
DJ
1562 int status;
1563 enum target_signal sig;
1564
b84876c2
PA
1565 if (target_can_async_p ())
1566 linux_nat_async (NULL, 0);
1567
4c28f408
PA
1568 /* Stop all threads before detaching. ptrace requires that the
1569 thread is stopped to sucessfully detach. */
1570 iterate_over_lwps (stop_callback, NULL);
1571 /* ... and wait until all of them have reported back that
1572 they're no longer running. */
1573 iterate_over_lwps (stop_wait_callback, NULL);
1574
d6b0e80f
AC
1575 iterate_over_lwps (detach_callback, NULL);
1576
1577 /* Only the initial process should be left right now. */
1578 gdb_assert (num_lwps == 1);
1579
a0ef4274
DJ
1580 /* Pass on any pending signal for the last LWP. */
1581 if ((args == NULL || *args == '\0')
1582 && get_pending_status (lwp_list, &status) != -1
1583 && WIFSTOPPED (status))
1584 {
1585 /* Put the signal number in ARGS so that inf_ptrace_detach will
1586 pass it along with PTRACE_DETACH. */
1587 args = alloca (8);
1588 sprintf (args, "%d", (int) WSTOPSIG (status));
1589 fprintf_unfiltered (gdb_stdlog,
1590 "LND: Sending signal %s to %s\n",
1591 args,
1592 target_pid_to_str (lwp_list->ptid));
1593 }
1594
d6b0e80f
AC
1595 /* Destroy LWP info; it's no longer valid. */
1596 init_lwp_list ();
1597
b84876c2
PA
1598 pid = GET_PID (inferior_ptid);
1599 inferior_ptid = pid_to_ptid (pid);
10d6c8cd 1600 linux_ops->to_detach (args, from_tty);
b84876c2
PA
1601
1602 if (target_can_async_p ())
1603 drain_queued_events (pid);
d6b0e80f
AC
1604}
1605
1606/* Resume LP. */
1607
1608static int
1609resume_callback (struct lwp_info *lp, void *data)
1610{
1611 if (lp->stopped && lp->status == 0)
1612 {
10d6c8cd
DJ
1613 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1614 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1615 if (debug_linux_nat)
1616 fprintf_unfiltered (gdb_stdlog,
1617 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1618 target_pid_to_str (lp->ptid));
1619 lp->stopped = 0;
1620 lp->step = 0;
9f0bdab8 1621 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
d6b0e80f 1622 }
57380f4e
DJ
1623 else if (lp->stopped && debug_linux_nat)
1624 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
1625 target_pid_to_str (lp->ptid));
1626 else if (debug_linux_nat)
1627 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n",
1628 target_pid_to_str (lp->ptid));
d6b0e80f
AC
1629
1630 return 0;
1631}
1632
1633static int
1634resume_clear_callback (struct lwp_info *lp, void *data)
1635{
1636 lp->resumed = 0;
1637 return 0;
1638}
1639
1640static int
1641resume_set_callback (struct lwp_info *lp, void *data)
1642{
1643 lp->resumed = 1;
1644 return 0;
1645}
1646
1647static void
1648linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1649{
1650 struct lwp_info *lp;
1651 int resume_all;
1652
76f50ad1
DJ
1653 if (debug_linux_nat)
1654 fprintf_unfiltered (gdb_stdlog,
1655 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1656 step ? "step" : "resume",
1657 target_pid_to_str (ptid),
1658 signo ? strsignal (signo) : "0",
1659 target_pid_to_str (inferior_ptid));
1660
b84876c2
PA
1661 if (target_can_async_p ())
1662 /* Block events while we're here. */
84e46146 1663 linux_nat_async_events (sigchld_sync);
b84876c2 1664
d6b0e80f
AC
1665 /* A specific PTID means `step only this process id'. */
1666 resume_all = (PIDGET (ptid) == -1);
1667
4c28f408
PA
1668 if (non_stop && resume_all)
1669 internal_error (__FILE__, __LINE__,
1670 "can't resume all in non-stop mode");
1671
1672 if (!non_stop)
1673 {
1674 if (resume_all)
1675 iterate_over_lwps (resume_set_callback, NULL);
1676 else
1677 iterate_over_lwps (resume_clear_callback, NULL);
1678 }
d6b0e80f
AC
1679
1680 /* If PID is -1, it's the current inferior that should be
1681 handled specially. */
1682 if (PIDGET (ptid) == -1)
1683 ptid = inferior_ptid;
1684
1685 lp = find_lwp_pid (ptid);
9f0bdab8 1686 gdb_assert (lp != NULL);
d6b0e80f 1687
4c28f408 1688 /* Convert to something the lower layer understands. */
9f0bdab8 1689 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1690
9f0bdab8
DJ
1691 /* Remember if we're stepping. */
1692 lp->step = step;
d6b0e80f 1693
9f0bdab8
DJ
1694 /* Mark this LWP as resumed. */
1695 lp->resumed = 1;
76f50ad1 1696
9f0bdab8
DJ
1697 /* If we have a pending wait status for this thread, there is no
1698 point in resuming the process. But first make sure that
1699 linux_nat_wait won't preemptively handle the event - we
1700 should never take this short-circuit if we are going to
1701 leave LP running, since we have skipped resuming all the
1702 other threads. This bit of code needs to be synchronized
1703 with linux_nat_wait. */
76f50ad1 1704
710151dd
PA
1705 /* In async mode, we never have pending wait status. */
1706 if (target_can_async_p () && lp->status)
1707 internal_error (__FILE__, __LINE__, "Pending status in async mode");
1708
9f0bdab8
DJ
1709 if (lp->status && WIFSTOPPED (lp->status))
1710 {
d6b48e9c
PA
1711 int saved_signo;
1712 struct inferior *inf;
76f50ad1 1713
d6b48e9c
PA
1714 inf = find_inferior_pid (ptid_get_pid (ptid));
1715 gdb_assert (inf);
1716 saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1717
1718 /* Defer to common code if we're gaining control of the
1719 inferior. */
1720 if (inf->stop_soon == NO_STOP_QUIETLY
1721 && signal_stop_state (saved_signo) == 0
9f0bdab8
DJ
1722 && signal_print_state (saved_signo) == 0
1723 && signal_pass_state (saved_signo) == 1)
d6b0e80f 1724 {
9f0bdab8
DJ
1725 if (debug_linux_nat)
1726 fprintf_unfiltered (gdb_stdlog,
1727 "LLR: Not short circuiting for ignored "
1728 "status 0x%x\n", lp->status);
1729
d6b0e80f
AC
1730 /* FIXME: What should we do if we are supposed to continue
1731 this thread with a signal? */
1732 gdb_assert (signo == TARGET_SIGNAL_0);
9f0bdab8
DJ
1733 signo = saved_signo;
1734 lp->status = 0;
1735 }
1736 }
76f50ad1 1737
9f0bdab8
DJ
1738 if (lp->status)
1739 {
1740 /* FIXME: What should we do if we are supposed to continue
1741 this thread with a signal? */
1742 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1743
9f0bdab8
DJ
1744 if (debug_linux_nat)
1745 fprintf_unfiltered (gdb_stdlog,
1746 "LLR: Short circuiting for status 0x%x\n",
1747 lp->status);
d6b0e80f 1748
9f0bdab8 1749 return;
d6b0e80f
AC
1750 }
1751
9f0bdab8
DJ
1752 /* Mark LWP as not stopped to prevent it from being continued by
1753 resume_callback. */
1754 lp->stopped = 0;
1755
d6b0e80f
AC
1756 if (resume_all)
1757 iterate_over_lwps (resume_callback, NULL);
1758
10d6c8cd 1759 linux_ops->to_resume (ptid, step, signo);
9f0bdab8
DJ
1760 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1761
d6b0e80f
AC
1762 if (debug_linux_nat)
1763 fprintf_unfiltered (gdb_stdlog,
1764 "LLR: %s %s, %s (resume event thread)\n",
1765 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1766 target_pid_to_str (ptid),
1767 signo ? strsignal (signo) : "0");
b84876c2
PA
1768
1769 if (target_can_async_p ())
8ea051c5 1770 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1771}
1772
1773/* Issue kill to specified lwp. */
1774
1775static int tkill_failed;
1776
1777static int
1778kill_lwp (int lwpid, int signo)
1779{
1780 errno = 0;
1781
1782/* Use tkill, if possible, in case we are using nptl threads. If tkill
1783 fails, then we are not using nptl threads and we should be using kill. */
1784
1785#ifdef HAVE_TKILL_SYSCALL
1786 if (!tkill_failed)
1787 {
1788 int ret = syscall (__NR_tkill, lwpid, signo);
1789 if (errno != ENOSYS)
1790 return ret;
1791 errno = 0;
1792 tkill_failed = 1;
1793 }
1794#endif
1795
1796 return kill (lwpid, signo);
1797}
1798
3d799a95
DJ
1799/* Handle a GNU/Linux extended wait response. If we see a clone
1800 event, we need to add the new LWP to our list (and not report the
1801 trap to higher layers). This function returns non-zero if the
1802 event should be ignored and we should wait again. If STOPPING is
1803 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1804
1805static int
3d799a95
DJ
1806linux_handle_extended_wait (struct lwp_info *lp, int status,
1807 int stopping)
d6b0e80f 1808{
3d799a95
DJ
1809 int pid = GET_LWP (lp->ptid);
1810 struct target_waitstatus *ourstatus = &lp->waitstatus;
1811 struct lwp_info *new_lp = NULL;
1812 int event = status >> 16;
d6b0e80f 1813
3d799a95
DJ
1814 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1815 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1816 {
3d799a95
DJ
1817 unsigned long new_pid;
1818 int ret;
1819
1820 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1821
3d799a95
DJ
1822 /* If we haven't already seen the new PID stop, wait for it now. */
1823 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1824 {
1825 /* The new child has a pending SIGSTOP. We can't affect it until it
1826 hits the SIGSTOP, but we're already attached. */
1827 ret = my_waitpid (new_pid, &status,
1828 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1829 if (ret == -1)
1830 perror_with_name (_("waiting for new child"));
1831 else if (ret != new_pid)
1832 internal_error (__FILE__, __LINE__,
1833 _("wait returned unexpected PID %d"), ret);
1834 else if (!WIFSTOPPED (status))
1835 internal_error (__FILE__, __LINE__,
1836 _("wait returned unexpected status 0x%x"), status);
1837 }
1838
3a3e9ee3 1839 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95
DJ
1840
1841 if (event == PTRACE_EVENT_FORK)
1842 ourstatus->kind = TARGET_WAITKIND_FORKED;
1843 else if (event == PTRACE_EVENT_VFORK)
1844 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 1845 else
3d799a95 1846 {
4c28f408
PA
1847 struct cleanup *old_chain;
1848
3d799a95
DJ
1849 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1850 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1851 new_lp->cloned = 1;
4c28f408 1852 new_lp->stopped = 1;
d6b0e80f 1853
3d799a95
DJ
1854 if (WSTOPSIG (status) != SIGSTOP)
1855 {
1856 /* This can happen if someone starts sending signals to
1857 the new thread before it gets a chance to run, which
1858 have a lower number than SIGSTOP (e.g. SIGUSR1).
1859 This is an unlikely case, and harder to handle for
1860 fork / vfork than for clone, so we do not try - but
1861 we handle it for clone events here. We'll send
1862 the other signal on to the thread below. */
1863
1864 new_lp->signalled = 1;
1865 }
1866 else
1867 status = 0;
d6b0e80f 1868
4c28f408 1869 if (non_stop)
3d799a95 1870 {
4c28f408
PA
1871 /* Add the new thread to GDB's lists as soon as possible
1872 so that:
1873
1874 1) the frontend doesn't have to wait for a stop to
1875 display them, and,
1876
1877 2) we tag it with the correct running state. */
1878
1879 /* If the thread_db layer is active, let it know about
1880 this new thread, and add it to GDB's list. */
1881 if (!thread_db_attach_lwp (new_lp->ptid))
1882 {
1883 /* We're not using thread_db. Add it to GDB's
1884 list. */
1885 target_post_attach (GET_LWP (new_lp->ptid));
1886 add_thread (new_lp->ptid);
1887 }
1888
1889 if (!stopping)
1890 {
1891 set_running (new_lp->ptid, 1);
1892 set_executing (new_lp->ptid, 1);
1893 }
1894 }
1895
1896 if (!stopping)
1897 {
1898 new_lp->stopped = 0;
3d799a95 1899 new_lp->resumed = 1;
4c28f408 1900 ptrace (PTRACE_CONT, new_pid, 0,
3d799a95
DJ
1901 status ? WSTOPSIG (status) : 0);
1902 }
d6b0e80f 1903
3d799a95
DJ
1904 if (debug_linux_nat)
1905 fprintf_unfiltered (gdb_stdlog,
1906 "LHEW: Got clone event from LWP %ld, resuming\n",
1907 GET_LWP (lp->ptid));
1908 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1909
1910 return 1;
1911 }
1912
1913 return 0;
d6b0e80f
AC
1914 }
1915
3d799a95
DJ
1916 if (event == PTRACE_EVENT_EXEC)
1917 {
1918 ourstatus->kind = TARGET_WAITKIND_EXECD;
1919 ourstatus->value.execd_pathname
6d8fd2b7 1920 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95
DJ
1921
1922 if (linux_parent_pid)
1923 {
1924 detach_breakpoints (linux_parent_pid);
1925 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1926
1927 linux_parent_pid = 0;
1928 }
1929
25b22b0a
PA
1930 /* At this point, all inserted breakpoints are gone. Doing this
1931 as soon as we detect an exec prevents the badness of deleting
1932 a breakpoint writing the current "shadow contents" to lift
1933 the bp. That shadow is NOT valid after an exec.
1934
1935 Note that we have to do this after the detach_breakpoints
1936 call above, otherwise breakpoints wouldn't be lifted from the
1937 parent on a vfork, because detach_breakpoints would think
1938 that breakpoints are not inserted. */
1939 mark_breakpoints_out ();
3d799a95
DJ
1940 return 0;
1941 }
1942
1943 internal_error (__FILE__, __LINE__,
1944 _("unknown ptrace event %d"), event);
d6b0e80f
AC
1945}
1946
1947/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1948 exited. */
1949
1950static int
1951wait_lwp (struct lwp_info *lp)
1952{
1953 pid_t pid;
1954 int status;
1955 int thread_dead = 0;
1956
1957 gdb_assert (!lp->stopped);
1958 gdb_assert (lp->status == 0);
1959
58aecb61 1960 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
1961 if (pid == -1 && errno == ECHILD)
1962 {
58aecb61 1963 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
1964 if (pid == -1 && errno == ECHILD)
1965 {
1966 /* The thread has previously exited. We need to delete it
1967 now because, for some vendor 2.4 kernels with NPTL
1968 support backported, there won't be an exit event unless
1969 it is the main thread. 2.6 kernels will report an exit
1970 event for each thread that exits, as expected. */
1971 thread_dead = 1;
1972 if (debug_linux_nat)
1973 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1974 target_pid_to_str (lp->ptid));
1975 }
1976 }
1977
1978 if (!thread_dead)
1979 {
1980 gdb_assert (pid == GET_LWP (lp->ptid));
1981
1982 if (debug_linux_nat)
1983 {
1984 fprintf_unfiltered (gdb_stdlog,
1985 "WL: waitpid %s received %s\n",
1986 target_pid_to_str (lp->ptid),
1987 status_to_str (status));
1988 }
1989 }
1990
1991 /* Check if the thread has exited. */
1992 if (WIFEXITED (status) || WIFSIGNALED (status))
1993 {
1994 thread_dead = 1;
1995 if (debug_linux_nat)
1996 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1997 target_pid_to_str (lp->ptid));
1998 }
1999
2000 if (thread_dead)
2001 {
e26af52f 2002 exit_lwp (lp);
d6b0e80f
AC
2003 return 0;
2004 }
2005
2006 gdb_assert (WIFSTOPPED (status));
2007
2008 /* Handle GNU/Linux's extended waitstatus for trace events. */
2009 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2010 {
2011 if (debug_linux_nat)
2012 fprintf_unfiltered (gdb_stdlog,
2013 "WL: Handling extended status 0x%06x\n",
2014 status);
3d799a95 2015 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2016 return wait_lwp (lp);
2017 }
2018
2019 return status;
2020}
2021
9f0bdab8
DJ
2022/* Save the most recent siginfo for LP. This is currently only called
2023 for SIGTRAP; some ports use the si_addr field for
2024 target_stopped_data_address. In the future, it may also be used to
2025 restore the siginfo of requeued signals. */
2026
2027static void
2028save_siginfo (struct lwp_info *lp)
2029{
2030 errno = 0;
2031 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2032 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2033
2034 if (errno != 0)
2035 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2036}
2037
d6b0e80f
AC
2038/* Send a SIGSTOP to LP. */
2039
2040static int
2041stop_callback (struct lwp_info *lp, void *data)
2042{
2043 if (!lp->stopped && !lp->signalled)
2044 {
2045 int ret;
2046
2047 if (debug_linux_nat)
2048 {
2049 fprintf_unfiltered (gdb_stdlog,
2050 "SC: kill %s **<SIGSTOP>**\n",
2051 target_pid_to_str (lp->ptid));
2052 }
2053 errno = 0;
2054 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2055 if (debug_linux_nat)
2056 {
2057 fprintf_unfiltered (gdb_stdlog,
2058 "SC: lwp kill %d %s\n",
2059 ret,
2060 errno ? safe_strerror (errno) : "ERRNO-OK");
2061 }
2062
2063 lp->signalled = 1;
2064 gdb_assert (lp->status == 0);
2065 }
2066
2067 return 0;
2068}
2069
57380f4e 2070/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2071
2072static int
57380f4e
DJ
2073linux_nat_has_pending_sigint (int pid)
2074{
2075 sigset_t pending, blocked, ignored;
2076 int i;
2077
2078 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2079
2080 if (sigismember (&pending, SIGINT)
2081 && !sigismember (&ignored, SIGINT))
2082 return 1;
2083
2084 return 0;
2085}
2086
2087/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2088
2089static int
2090set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2091{
57380f4e
DJ
2092 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2093 flag to consume the next one. */
2094 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2095 && WSTOPSIG (lp->status) == SIGINT)
2096 lp->status = 0;
2097 else
2098 lp->ignore_sigint = 1;
2099
2100 return 0;
2101}
2102
2103/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2104 This function is called after we know the LWP has stopped; if the LWP
2105 stopped before the expected SIGINT was delivered, then it will never have
2106 arrived. Also, if the signal was delivered to a shared queue and consumed
2107 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2108
57380f4e
DJ
2109static void
2110maybe_clear_ignore_sigint (struct lwp_info *lp)
2111{
2112 if (!lp->ignore_sigint)
2113 return;
2114
2115 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2116 {
2117 if (debug_linux_nat)
2118 fprintf_unfiltered (gdb_stdlog,
2119 "MCIS: Clearing bogus flag for %s\n",
2120 target_pid_to_str (lp->ptid));
2121 lp->ignore_sigint = 0;
2122 }
2123}
2124
2125/* Wait until LP is stopped. */
2126
2127static int
2128stop_wait_callback (struct lwp_info *lp, void *data)
2129{
d6b0e80f
AC
2130 if (!lp->stopped)
2131 {
2132 int status;
2133
2134 status = wait_lwp (lp);
2135 if (status == 0)
2136 return 0;
2137
57380f4e
DJ
2138 if (lp->ignore_sigint && WIFSTOPPED (status)
2139 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2140 {
57380f4e 2141 lp->ignore_sigint = 0;
d6b0e80f
AC
2142
2143 errno = 0;
2144 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2145 if (debug_linux_nat)
2146 fprintf_unfiltered (gdb_stdlog,
57380f4e 2147 "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
d6b0e80f
AC
2148 target_pid_to_str (lp->ptid),
2149 errno ? safe_strerror (errno) : "OK");
2150
57380f4e 2151 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2152 }
2153
57380f4e
DJ
2154 maybe_clear_ignore_sigint (lp);
2155
d6b0e80f
AC
2156 if (WSTOPSIG (status) != SIGSTOP)
2157 {
2158 if (WSTOPSIG (status) == SIGTRAP)
2159 {
2160 /* If a LWP other than the LWP that we're reporting an
2161 event for has hit a GDB breakpoint (as opposed to
2162 some random trap signal), then just arrange for it to
2163 hit it again later. We don't keep the SIGTRAP status
2164 and don't forward the SIGTRAP signal to the LWP. We
2165 will handle the current event, eventually we will
2166 resume all LWPs, and this one will get its breakpoint
2167 trap again.
2168
2169 If we do not do this, then we run the risk that the
2170 user will delete or disable the breakpoint, but the
2171 thread will have already tripped on it. */
2172
9f0bdab8
DJ
2173 /* Save the trap's siginfo in case we need it later. */
2174 save_siginfo (lp);
2175
d6b0e80f
AC
2176 /* Now resume this LWP and get the SIGSTOP event. */
2177 errno = 0;
2178 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2179 if (debug_linux_nat)
2180 {
2181 fprintf_unfiltered (gdb_stdlog,
2182 "PTRACE_CONT %s, 0, 0 (%s)\n",
2183 target_pid_to_str (lp->ptid),
2184 errno ? safe_strerror (errno) : "OK");
2185
2186 fprintf_unfiltered (gdb_stdlog,
2187 "SWC: Candidate SIGTRAP event in %s\n",
2188 target_pid_to_str (lp->ptid));
2189 }
710151dd
PA
2190 /* Hold this event/waitstatus while we check to see if
2191 there are any more (we still want to get that SIGSTOP). */
57380f4e 2192 stop_wait_callback (lp, NULL);
710151dd
PA
2193
2194 if (target_can_async_p ())
d6b0e80f 2195 {
710151dd
PA
2196 /* Don't leave a pending wait status in async mode.
2197 Retrigger the breakpoint. */
2198 if (!cancel_breakpoint (lp))
d6b0e80f 2199 {
710151dd
PA
2200 /* There was no gdb breakpoint set at pc. Put
2201 the event back in the queue. */
2202 if (debug_linux_nat)
2203 fprintf_unfiltered (gdb_stdlog,
2204 "SWC: kill %s, %s\n",
2205 target_pid_to_str (lp->ptid),
2206 status_to_str ((int) status));
2207 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2208 }
2209 }
2210 else
2211 {
2212 /* Hold the SIGTRAP for handling by
2213 linux_nat_wait. */
2214 /* If there's another event, throw it back into the
2215 queue. */
2216 if (lp->status)
2217 {
2218 if (debug_linux_nat)
2219 fprintf_unfiltered (gdb_stdlog,
2220 "SWC: kill %s, %s\n",
2221 target_pid_to_str (lp->ptid),
2222 status_to_str ((int) status));
2223 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2224 }
710151dd
PA
2225 /* Save the sigtrap event. */
2226 lp->status = status;
d6b0e80f 2227 }
d6b0e80f
AC
2228 return 0;
2229 }
2230 else
2231 {
2232 /* The thread was stopped with a signal other than
2233 SIGSTOP, and didn't accidentally trip a breakpoint. */
2234
2235 if (debug_linux_nat)
2236 {
2237 fprintf_unfiltered (gdb_stdlog,
2238 "SWC: Pending event %s in %s\n",
2239 status_to_str ((int) status),
2240 target_pid_to_str (lp->ptid));
2241 }
2242 /* Now resume this LWP and get the SIGSTOP event. */
2243 errno = 0;
2244 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2245 if (debug_linux_nat)
2246 fprintf_unfiltered (gdb_stdlog,
2247 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2248 target_pid_to_str (lp->ptid),
2249 errno ? safe_strerror (errno) : "OK");
2250
2251 /* Hold this event/waitstatus while we check to see if
2252 there are any more (we still want to get that SIGSTOP). */
57380f4e 2253 stop_wait_callback (lp, NULL);
710151dd
PA
2254
2255 /* If the lp->status field is still empty, use it to
2256 hold this event. If not, then this event must be
2257 returned to the event queue of the LWP. */
2258 if (lp->status || target_can_async_p ())
d6b0e80f
AC
2259 {
2260 if (debug_linux_nat)
2261 {
2262 fprintf_unfiltered (gdb_stdlog,
2263 "SWC: kill %s, %s\n",
2264 target_pid_to_str (lp->ptid),
2265 status_to_str ((int) status));
2266 }
2267 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2268 }
710151dd
PA
2269 else
2270 lp->status = status;
d6b0e80f
AC
2271 return 0;
2272 }
2273 }
2274 else
2275 {
2276 /* We caught the SIGSTOP that we intended to catch, so
2277 there's no SIGSTOP pending. */
2278 lp->stopped = 1;
2279 lp->signalled = 0;
2280 }
2281 }
2282
2283 return 0;
2284}
2285
d6b0e80f
AC
2286/* Return non-zero if LP has a wait status pending. */
2287
2288static int
2289status_callback (struct lwp_info *lp, void *data)
2290{
2291 /* Only report a pending wait status if we pretend that this has
2292 indeed been resumed. */
2293 return (lp->status != 0 && lp->resumed);
2294}
2295
2296/* Return non-zero if LP isn't stopped. */
2297
2298static int
2299running_callback (struct lwp_info *lp, void *data)
2300{
2301 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2302}
2303
2304/* Count the LWP's that have had events. */
2305
2306static int
2307count_events_callback (struct lwp_info *lp, void *data)
2308{
2309 int *count = data;
2310
2311 gdb_assert (count != NULL);
2312
e09490f1
DJ
2313 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2314 if (lp->status != 0 && lp->resumed
d6b0e80f
AC
2315 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2316 (*count)++;
2317
2318 return 0;
2319}
2320
2321/* Select the LWP (if any) that is currently being single-stepped. */
2322
2323static int
2324select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2325{
2326 if (lp->step && lp->status != 0)
2327 return 1;
2328 else
2329 return 0;
2330}
2331
2332/* Select the Nth LWP that has had a SIGTRAP event. */
2333
2334static int
2335select_event_lwp_callback (struct lwp_info *lp, void *data)
2336{
2337 int *selector = data;
2338
2339 gdb_assert (selector != NULL);
2340
e09490f1
DJ
2341 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2342 if (lp->status != 0 && lp->resumed
d6b0e80f
AC
2343 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2344 if ((*selector)-- == 0)
2345 return 1;
2346
2347 return 0;
2348}
2349
710151dd
PA
2350static int
2351cancel_breakpoint (struct lwp_info *lp)
2352{
2353 /* Arrange for a breakpoint to be hit again later. We don't keep
2354 the SIGTRAP status and don't forward the SIGTRAP signal to the
2355 LWP. We will handle the current event, eventually we will resume
2356 this LWP, and this breakpoint will trap again.
2357
2358 If we do not do this, then we run the risk that the user will
2359 delete or disable the breakpoint, but the LWP will have already
2360 tripped on it. */
2361
515630c5
UW
2362 struct regcache *regcache = get_thread_regcache (lp->ptid);
2363 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2364 CORE_ADDR pc;
2365
2366 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
2367 if (breakpoint_inserted_here_p (pc))
710151dd
PA
2368 {
2369 if (debug_linux_nat)
2370 fprintf_unfiltered (gdb_stdlog,
2371 "CB: Push back breakpoint for %s\n",
2372 target_pid_to_str (lp->ptid));
2373
2374 /* Back up the PC if necessary. */
515630c5
UW
2375 if (gdbarch_decr_pc_after_break (gdbarch))
2376 regcache_write_pc (regcache, pc);
2377
710151dd
PA
2378 return 1;
2379 }
2380 return 0;
2381}
2382
d6b0e80f
AC
2383static int
2384cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2385{
2386 struct lwp_info *event_lp = data;
2387
2388 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2389 if (lp == event_lp)
2390 return 0;
2391
2392 /* If a LWP other than the LWP that we're reporting an event for has
2393 hit a GDB breakpoint (as opposed to some random trap signal),
2394 then just arrange for it to hit it again later. We don't keep
2395 the SIGTRAP status and don't forward the SIGTRAP signal to the
2396 LWP. We will handle the current event, eventually we will resume
2397 all LWPs, and this one will get its breakpoint trap again.
2398
2399 If we do not do this, then we run the risk that the user will
2400 delete or disable the breakpoint, but the LWP will have already
2401 tripped on it. */
2402
2403 if (lp->status != 0
2404 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
710151dd
PA
2405 && cancel_breakpoint (lp))
2406 /* Throw away the SIGTRAP. */
2407 lp->status = 0;
d6b0e80f
AC
2408
2409 return 0;
2410}
2411
2412/* Select one LWP out of those that have events pending. */
2413
2414static void
2415select_event_lwp (struct lwp_info **orig_lp, int *status)
2416{
2417 int num_events = 0;
2418 int random_selector;
2419 struct lwp_info *event_lp;
2420
ac264b3b 2421 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2422 (*orig_lp)->status = *status;
2423
2424 /* Give preference to any LWP that is being single-stepped. */
2425 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2426 if (event_lp != NULL)
2427 {
2428 if (debug_linux_nat)
2429 fprintf_unfiltered (gdb_stdlog,
2430 "SEL: Select single-step %s\n",
2431 target_pid_to_str (event_lp->ptid));
2432 }
2433 else
2434 {
2435 /* No single-stepping LWP. Select one at random, out of those
2436 which have had SIGTRAP events. */
2437
2438 /* First see how many SIGTRAP events we have. */
2439 iterate_over_lwps (count_events_callback, &num_events);
2440
2441 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2442 random_selector = (int)
2443 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2444
2445 if (debug_linux_nat && num_events > 1)
2446 fprintf_unfiltered (gdb_stdlog,
2447 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2448 num_events, random_selector);
2449
2450 event_lp = iterate_over_lwps (select_event_lwp_callback,
2451 &random_selector);
2452 }
2453
2454 if (event_lp != NULL)
2455 {
2456 /* Switch the event LWP. */
2457 *orig_lp = event_lp;
2458 *status = event_lp->status;
2459 }
2460
2461 /* Flush the wait status for the event LWP. */
2462 (*orig_lp)->status = 0;
2463}
2464
2465/* Return non-zero if LP has been resumed. */
2466
2467static int
2468resumed_callback (struct lwp_info *lp, void *data)
2469{
2470 return lp->resumed;
2471}
2472
d6b0e80f
AC
2473/* Stop an active thread, verify it still exists, then resume it. */
2474
2475static int
2476stop_and_resume_callback (struct lwp_info *lp, void *data)
2477{
2478 struct lwp_info *ptr;
2479
2480 if (!lp->stopped && !lp->signalled)
2481 {
2482 stop_callback (lp, NULL);
2483 stop_wait_callback (lp, NULL);
2484 /* Resume if the lwp still exists. */
2485 for (ptr = lwp_list; ptr; ptr = ptr->next)
2486 if (lp == ptr)
2487 {
2488 resume_callback (lp, NULL);
2489 resume_set_callback (lp, NULL);
2490 }
2491 }
2492 return 0;
2493}
2494
02f3fc28 2495/* Check if we should go on and pass this event to common code.
fa2c6a57 2496 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
2497static struct lwp_info *
2498linux_nat_filter_event (int lwpid, int status, int options)
2499{
2500 struct lwp_info *lp;
2501
2502 lp = find_lwp_pid (pid_to_ptid (lwpid));
2503
2504 /* Check for stop events reported by a process we didn't already
2505 know about - anything not already in our LWP list.
2506
2507 If we're expecting to receive stopped processes after
2508 fork, vfork, and clone events, then we'll just add the
2509 new one to our list and go back to waiting for the event
2510 to be reported - the stopped process might be returned
2511 from waitpid before or after the event is. */
2512 if (WIFSTOPPED (status) && !lp)
2513 {
2514 linux_record_stopped_pid (lwpid, status);
2515 return NULL;
2516 }
2517
2518 /* Make sure we don't report an event for the exit of an LWP not in
2519 our list, i.e. not part of the current process. This can happen
2520 if we detach from a program we original forked and then it
2521 exits. */
2522 if (!WIFSTOPPED (status) && !lp)
2523 return NULL;
2524
2525 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2526 CLONE_PTRACE processes which do not use the thread library -
2527 otherwise we wouldn't find the new LWP this way. That doesn't
2528 currently work, and the following code is currently unreachable
2529 due to the two blocks above. If it's fixed some day, this code
2530 should be broken out into a function so that we can also pick up
2531 LWPs from the new interface. */
2532 if (!lp)
2533 {
2534 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2535 if (options & __WCLONE)
2536 lp->cloned = 1;
2537
2538 gdb_assert (WIFSTOPPED (status)
2539 && WSTOPSIG (status) == SIGSTOP);
2540 lp->signalled = 1;
2541
2542 if (!in_thread_list (inferior_ptid))
2543 {
2544 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2545 GET_PID (inferior_ptid));
2546 add_thread (inferior_ptid);
2547 }
2548
2549 add_thread (lp->ptid);
2550 }
2551
2552 /* Save the trap's siginfo in case we need it later. */
2553 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2554 save_siginfo (lp);
2555
2556 /* Handle GNU/Linux's extended waitstatus for trace events. */
2557 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2558 {
2559 if (debug_linux_nat)
2560 fprintf_unfiltered (gdb_stdlog,
2561 "LLW: Handling extended status 0x%06x\n",
2562 status);
2563 if (linux_handle_extended_wait (lp, status, 0))
2564 return NULL;
2565 }
2566
2567 /* Check if the thread has exited. */
2568 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2569 {
2570 /* If this is the main thread, we must stop all threads and
2571 verify if they are still alive. This is because in the nptl
2572 thread model, there is no signal issued for exiting LWPs
2573 other than the main thread. We only get the main thread exit
2574 signal once all child threads have already exited. If we
2575 stop all the threads and use the stop_wait_callback to check
2576 if they have exited we can determine whether this signal
2577 should be ignored or whether it means the end of the debugged
2578 application, regardless of which threading model is being
2579 used. */
2580 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2581 {
2582 lp->stopped = 1;
2583 iterate_over_lwps (stop_and_resume_callback, NULL);
2584 }
2585
2586 if (debug_linux_nat)
2587 fprintf_unfiltered (gdb_stdlog,
2588 "LLW: %s exited.\n",
2589 target_pid_to_str (lp->ptid));
2590
2591 exit_lwp (lp);
2592
2593 /* If there is at least one more LWP, then the exit signal was
2594 not the end of the debugged application and should be
2595 ignored. */
2596 if (num_lwps > 0)
4c28f408 2597 return NULL;
02f3fc28
PA
2598 }
2599
2600 /* Check if the current LWP has previously exited. In the nptl
2601 thread model, LWPs other than the main thread do not issue
2602 signals when they exit so we must check whenever the thread has
2603 stopped. A similar check is made in stop_wait_callback(). */
2604 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2605 {
2606 if (debug_linux_nat)
2607 fprintf_unfiltered (gdb_stdlog,
2608 "LLW: %s exited.\n",
2609 target_pid_to_str (lp->ptid));
2610
2611 exit_lwp (lp);
2612
2613 /* Make sure there is at least one thread running. */
2614 gdb_assert (iterate_over_lwps (running_callback, NULL));
2615
2616 /* Discard the event. */
2617 return NULL;
2618 }
2619
2620 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2621 an attempt to stop an LWP. */
2622 if (lp->signalled
2623 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2624 {
2625 if (debug_linux_nat)
2626 fprintf_unfiltered (gdb_stdlog,
2627 "LLW: Delayed SIGSTOP caught for %s.\n",
2628 target_pid_to_str (lp->ptid));
2629
2630 /* This is a delayed SIGSTOP. */
2631 lp->signalled = 0;
2632
2633 registers_changed ();
2634
2635 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2636 lp->step, TARGET_SIGNAL_0);
2637 if (debug_linux_nat)
2638 fprintf_unfiltered (gdb_stdlog,
2639 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2640 lp->step ?
2641 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2642 target_pid_to_str (lp->ptid));
2643
2644 lp->stopped = 0;
2645 gdb_assert (lp->resumed);
2646
2647 /* Discard the event. */
2648 return NULL;
2649 }
2650
57380f4e
DJ
2651 /* Make sure we don't report a SIGINT that we have already displayed
2652 for another thread. */
2653 if (lp->ignore_sigint
2654 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2655 {
2656 if (debug_linux_nat)
2657 fprintf_unfiltered (gdb_stdlog,
2658 "LLW: Delayed SIGINT caught for %s.\n",
2659 target_pid_to_str (lp->ptid));
2660
2661 /* This is a delayed SIGINT. */
2662 lp->ignore_sigint = 0;
2663
2664 registers_changed ();
2665 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2666 lp->step, TARGET_SIGNAL_0);
2667 if (debug_linux_nat)
2668 fprintf_unfiltered (gdb_stdlog,
2669 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2670 lp->step ?
2671 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2672 target_pid_to_str (lp->ptid));
2673
2674 lp->stopped = 0;
2675 gdb_assert (lp->resumed);
2676
2677 /* Discard the event. */
2678 return NULL;
2679 }
2680
02f3fc28
PA
2681 /* An interesting event. */
2682 gdb_assert (lp);
2683 return lp;
2684}
2685
b84876c2
PA
2686/* Get the events stored in the pipe into the local queue, so they are
2687 accessible to queued_waitpid. We need to do this, since it is not
2688 always the case that the event at the head of the pipe is the event
2689 we want. */
2690
2691static void
2692pipe_to_local_event_queue (void)
2693{
2694 if (debug_linux_nat_async)
2695 fprintf_unfiltered (gdb_stdlog,
2696 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2697 linux_nat_num_queued_events);
2698 while (linux_nat_num_queued_events)
2699 {
2700 int lwpid, status, options;
b84876c2 2701 lwpid = linux_nat_event_pipe_pop (&status, &options);
b84876c2
PA
2702 gdb_assert (lwpid > 0);
2703 push_waitpid (lwpid, status, options);
2704 }
2705}
2706
2707/* Get the unprocessed events stored in the local queue back into the
2708 pipe, so the event loop realizes there's something else to
2709 process. */
2710
2711static void
2712local_event_queue_to_pipe (void)
2713{
2714 struct waitpid_result *w = waitpid_queue;
2715 while (w)
2716 {
2717 struct waitpid_result *next = w->next;
2718 linux_nat_event_pipe_push (w->pid,
2719 w->status,
2720 w->options);
2721 xfree (w);
2722 w = next;
2723 }
2724 waitpid_queue = NULL;
2725
2726 if (debug_linux_nat_async)
2727 fprintf_unfiltered (gdb_stdlog,
2728 "LEQTP: linux_nat_num_queued_events(%d)\n",
2729 linux_nat_num_queued_events);
2730}
2731
d6b0e80f
AC
2732static ptid_t
2733linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
2734{
2735 struct lwp_info *lp = NULL;
2736 int options = 0;
2737 int status = 0;
2738 pid_t pid = PIDGET (ptid);
d6b0e80f 2739
b84876c2
PA
2740 if (debug_linux_nat_async)
2741 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2742
f973ed9c
DJ
2743 /* The first time we get here after starting a new inferior, we may
2744 not have added it to the LWP list yet - this is the earliest
2745 moment at which we know its PID. */
2746 if (num_lwps == 0)
2747 {
2748 gdb_assert (!is_lwp (inferior_ptid));
2749
27c9d204
PA
2750 /* Upgrade the main thread's ptid. */
2751 thread_change_ptid (inferior_ptid,
2752 BUILD_LWP (GET_PID (inferior_ptid),
2753 GET_PID (inferior_ptid)));
2754
f973ed9c
DJ
2755 lp = add_lwp (inferior_ptid);
2756 lp->resumed = 1;
2757 }
2758
84e46146
PA
2759 /* Block events while we're here. */
2760 linux_nat_async_events (sigchld_sync);
d6b0e80f
AC
2761
2762retry:
2763
f973ed9c
DJ
2764 /* Make sure there is at least one LWP that has been resumed. */
2765 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
d6b0e80f
AC
2766
2767 /* First check if there is a LWP with a wait status pending. */
2768 if (pid == -1)
2769 {
2770 /* Any LWP that's been resumed will do. */
2771 lp = iterate_over_lwps (status_callback, NULL);
2772 if (lp)
2773 {
710151dd
PA
2774 if (target_can_async_p ())
2775 internal_error (__FILE__, __LINE__,
2776 "Found an LWP with a pending status in async mode.");
2777
d6b0e80f
AC
2778 status = lp->status;
2779 lp->status = 0;
2780
2781 if (debug_linux_nat && status)
2782 fprintf_unfiltered (gdb_stdlog,
2783 "LLW: Using pending wait status %s for %s.\n",
2784 status_to_str (status),
2785 target_pid_to_str (lp->ptid));
2786 }
2787
b84876c2 2788 /* But if we don't find one, we'll have to wait, and check both
d6b0e80f
AC
2789 cloned and uncloned processes. We start with the cloned
2790 processes. */
2791 options = __WCLONE | WNOHANG;
2792 }
2793 else if (is_lwp (ptid))
2794 {
2795 if (debug_linux_nat)
2796 fprintf_unfiltered (gdb_stdlog,
2797 "LLW: Waiting for specific LWP %s.\n",
2798 target_pid_to_str (ptid));
2799
2800 /* We have a specific LWP to check. */
2801 lp = find_lwp_pid (ptid);
2802 gdb_assert (lp);
2803 status = lp->status;
2804 lp->status = 0;
2805
2806 if (debug_linux_nat && status)
2807 fprintf_unfiltered (gdb_stdlog,
2808 "LLW: Using pending wait status %s for %s.\n",
2809 status_to_str (status),
2810 target_pid_to_str (lp->ptid));
2811
2812 /* If we have to wait, take into account whether PID is a cloned
2813 process or not. And we have to convert it to something that
2814 the layer beneath us can understand. */
2815 options = lp->cloned ? __WCLONE : 0;
2816 pid = GET_LWP (ptid);
2817 }
2818
2819 if (status && lp->signalled)
2820 {
2821 /* A pending SIGSTOP may interfere with the normal stream of
2822 events. In a typical case where interference is a problem,
2823 we have a SIGSTOP signal pending for LWP A while
2824 single-stepping it, encounter an event in LWP B, and take the
2825 pending SIGSTOP while trying to stop LWP A. After processing
2826 the event in LWP B, LWP A is continued, and we'll never see
2827 the SIGTRAP associated with the last time we were
2828 single-stepping LWP A. */
2829
2830 /* Resume the thread. It should halt immediately returning the
2831 pending SIGSTOP. */
2832 registers_changed ();
10d6c8cd
DJ
2833 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2834 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
2835 if (debug_linux_nat)
2836 fprintf_unfiltered (gdb_stdlog,
2837 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2838 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2839 target_pid_to_str (lp->ptid));
2840 lp->stopped = 0;
2841 gdb_assert (lp->resumed);
2842
2843 /* This should catch the pending SIGSTOP. */
2844 stop_wait_callback (lp, NULL);
2845 }
2846
b84876c2
PA
2847 if (!target_can_async_p ())
2848 {
2849 /* Causes SIGINT to be passed on to the attached process. */
2850 set_sigint_trap ();
2851 set_sigio_trap ();
2852 }
d6b0e80f
AC
2853
2854 while (status == 0)
2855 {
2856 pid_t lwpid;
2857
b84876c2
PA
2858 if (target_can_async_p ())
2859 /* In async mode, don't ever block. Only look at the locally
2860 queued events. */
2861 lwpid = queued_waitpid (pid, &status, options);
2862 else
2863 lwpid = my_waitpid (pid, &status, options);
2864
d6b0e80f
AC
2865 if (lwpid > 0)
2866 {
2867 gdb_assert (pid == -1 || lwpid == pid);
2868
2869 if (debug_linux_nat)
2870 {
2871 fprintf_unfiltered (gdb_stdlog,
2872 "LLW: waitpid %ld received %s\n",
2873 (long) lwpid, status_to_str (status));
2874 }
2875
02f3fc28 2876 lp = linux_nat_filter_event (lwpid, status, options);
d6b0e80f
AC
2877 if (!lp)
2878 {
02f3fc28 2879 /* A discarded event. */
d6b0e80f
AC
2880 status = 0;
2881 continue;
2882 }
2883
2884 break;
2885 }
2886
2887 if (pid == -1)
2888 {
2889 /* Alternate between checking cloned and uncloned processes. */
2890 options ^= __WCLONE;
2891
b84876c2
PA
2892 /* And every time we have checked both:
2893 In async mode, return to event loop;
2894 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 2895 if (options & __WCLONE)
b84876c2
PA
2896 {
2897 if (target_can_async_p ())
2898 {
2899 /* No interesting event. */
2900 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2901
2902 /* Get ready for the next event. */
2903 target_async (inferior_event_handler, 0);
2904
2905 if (debug_linux_nat_async)
2906 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2907
2908 return minus_one_ptid;
2909 }
2910
2911 sigsuspend (&suspend_mask);
2912 }
d6b0e80f
AC
2913 }
2914
2915 /* We shouldn't end up here unless we want to try again. */
2916 gdb_assert (status == 0);
2917 }
2918
b84876c2
PA
2919 if (!target_can_async_p ())
2920 {
2921 clear_sigio_trap ();
2922 clear_sigint_trap ();
2923 }
d6b0e80f
AC
2924
2925 gdb_assert (lp);
2926
2927 /* Don't report signals that GDB isn't interested in, such as
2928 signals that are neither printed nor stopped upon. Stopping all
2929 threads can be a bit time-consuming so if we want decent
2930 performance with heavily multi-threaded programs, especially when
2931 they're using a high frequency timer, we'd better avoid it if we
2932 can. */
2933
2934 if (WIFSTOPPED (status))
2935 {
2936 int signo = target_signal_from_host (WSTOPSIG (status));
d6b48e9c
PA
2937 struct inferior *inf;
2938
2939 inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2940 gdb_assert (inf);
d6b0e80f 2941
d6b48e9c
PA
2942 /* Defer to common code if we get a signal while
2943 single-stepping, since that may need special care, e.g. to
2944 skip the signal handler, or, if we're gaining control of the
2945 inferior. */
d539ed7e 2946 if (!lp->step
d6b48e9c 2947 && inf->stop_soon == NO_STOP_QUIETLY
d539ed7e 2948 && signal_stop_state (signo) == 0
d6b0e80f
AC
2949 && signal_print_state (signo) == 0
2950 && signal_pass_state (signo) == 1)
2951 {
2952 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2953 here? It is not clear we should. GDB may not expect
2954 other threads to run. On the other hand, not resuming
2955 newly attached threads may cause an unwanted delay in
2956 getting them running. */
2957 registers_changed ();
10d6c8cd
DJ
2958 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2959 lp->step, signo);
d6b0e80f
AC
2960 if (debug_linux_nat)
2961 fprintf_unfiltered (gdb_stdlog,
2962 "LLW: %s %s, %s (preempt 'handle')\n",
2963 lp->step ?
2964 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2965 target_pid_to_str (lp->ptid),
2966 signo ? strsignal (signo) : "0");
2967 lp->stopped = 0;
2968 status = 0;
2969 goto retry;
2970 }
2971
1ad15515 2972 if (!non_stop)
d6b0e80f 2973 {
1ad15515
PA
2974 /* Only do the below in all-stop, as we currently use SIGINT
2975 to implement target_stop (see linux_nat_stop) in
2976 non-stop. */
2977 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2978 {
2979 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2980 forwarded to the entire process group, that is, all LWPs
2981 will receive it - unless they're using CLONE_THREAD to
2982 share signals. Since we only want to report it once, we
2983 mark it as ignored for all LWPs except this one. */
2984 iterate_over_lwps (set_ignore_sigint, NULL);
2985 lp->ignore_sigint = 0;
2986 }
2987 else
2988 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
2989 }
2990 }
2991
2992 /* This LWP is stopped now. */
2993 lp->stopped = 1;
2994
2995 if (debug_linux_nat)
2996 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2997 status_to_str (status), target_pid_to_str (lp->ptid));
2998
4c28f408
PA
2999 if (!non_stop)
3000 {
3001 /* Now stop all other LWP's ... */
3002 iterate_over_lwps (stop_callback, NULL);
3003
3004 /* ... and wait until all of them have reported back that
3005 they're no longer running. */
57380f4e 3006 iterate_over_lwps (stop_wait_callback, NULL);
4c28f408
PA
3007
3008 /* If we're not waiting for a specific LWP, choose an event LWP
3009 from among those that have had events. Giving equal priority
3010 to all LWPs that have had events helps prevent
3011 starvation. */
3012 if (pid == -1)
3013 select_event_lwp (&lp, &status);
3014 }
d6b0e80f
AC
3015
3016 /* Now that we've selected our final event LWP, cancel any
3017 breakpoints in other LWPs that have hit a GDB breakpoint. See
3018 the comment in cancel_breakpoints_callback to find out why. */
3019 iterate_over_lwps (cancel_breakpoints_callback, lp);
3020
d6b0e80f
AC
3021 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
3022 {
d6b0e80f
AC
3023 if (debug_linux_nat)
3024 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3025 "LLW: trap ptid is %s.\n",
3026 target_pid_to_str (lp->ptid));
d6b0e80f 3027 }
d6b0e80f
AC
3028
3029 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3030 {
3031 *ourstatus = lp->waitstatus;
3032 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3033 }
3034 else
3035 store_waitstatus (ourstatus, status);
3036
b84876c2
PA
3037 /* Get ready for the next event. */
3038 if (target_can_async_p ())
3039 target_async (inferior_event_handler, 0);
3040
3041 if (debug_linux_nat_async)
3042 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3043
f973ed9c 3044 return lp->ptid;
d6b0e80f
AC
3045}
3046
3047static int
3048kill_callback (struct lwp_info *lp, void *data)
3049{
3050 errno = 0;
3051 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3052 if (debug_linux_nat)
3053 fprintf_unfiltered (gdb_stdlog,
3054 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3055 target_pid_to_str (lp->ptid),
3056 errno ? safe_strerror (errno) : "OK");
3057
3058 return 0;
3059}
3060
3061static int
3062kill_wait_callback (struct lwp_info *lp, void *data)
3063{
3064 pid_t pid;
3065
3066 /* We must make sure that there are no pending events (delayed
3067 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3068 program doesn't interfere with any following debugging session. */
3069
3070 /* For cloned processes we must check both with __WCLONE and
3071 without, since the exit status of a cloned process isn't reported
3072 with __WCLONE. */
3073 if (lp->cloned)
3074 {
3075 do
3076 {
58aecb61 3077 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 3078 if (pid != (pid_t) -1)
d6b0e80f 3079 {
e85a822c
DJ
3080 if (debug_linux_nat)
3081 fprintf_unfiltered (gdb_stdlog,
3082 "KWC: wait %s received unknown.\n",
3083 target_pid_to_str (lp->ptid));
3084 /* The Linux kernel sometimes fails to kill a thread
3085 completely after PTRACE_KILL; that goes from the stop
3086 point in do_fork out to the one in
3087 get_signal_to_deliever and waits again. So kill it
3088 again. */
3089 kill_callback (lp, NULL);
d6b0e80f
AC
3090 }
3091 }
3092 while (pid == GET_LWP (lp->ptid));
3093
3094 gdb_assert (pid == -1 && errno == ECHILD);
3095 }
3096
3097 do
3098 {
58aecb61 3099 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 3100 if (pid != (pid_t) -1)
d6b0e80f 3101 {
e85a822c
DJ
3102 if (debug_linux_nat)
3103 fprintf_unfiltered (gdb_stdlog,
3104 "KWC: wait %s received unk.\n",
3105 target_pid_to_str (lp->ptid));
3106 /* See the call to kill_callback above. */
3107 kill_callback (lp, NULL);
d6b0e80f
AC
3108 }
3109 }
3110 while (pid == GET_LWP (lp->ptid));
3111
3112 gdb_assert (pid == -1 && errno == ECHILD);
3113 return 0;
3114}
3115
3116static void
3117linux_nat_kill (void)
3118{
f973ed9c
DJ
3119 struct target_waitstatus last;
3120 ptid_t last_ptid;
3121 int status;
d6b0e80f 3122
b84876c2
PA
3123 if (target_can_async_p ())
3124 target_async (NULL, 0);
3125
f973ed9c
DJ
3126 /* If we're stopped while forking and we haven't followed yet,
3127 kill the other task. We need to do this first because the
3128 parent will be sleeping if this is a vfork. */
d6b0e80f 3129
f973ed9c 3130 get_last_target_status (&last_ptid, &last);
d6b0e80f 3131
f973ed9c
DJ
3132 if (last.kind == TARGET_WAITKIND_FORKED
3133 || last.kind == TARGET_WAITKIND_VFORKED)
3134 {
3a3e9ee3 3135 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
3136 wait (&status);
3137 }
3138
3139 if (forks_exist_p ())
b84876c2
PA
3140 {
3141 linux_fork_killall ();
3142 drain_queued_events (-1);
3143 }
f973ed9c
DJ
3144 else
3145 {
4c28f408
PA
3146 /* Stop all threads before killing them, since ptrace requires
3147 that the thread is stopped to sucessfully PTRACE_KILL. */
3148 iterate_over_lwps (stop_callback, NULL);
3149 /* ... and wait until all of them have reported back that
3150 they're no longer running. */
3151 iterate_over_lwps (stop_wait_callback, NULL);
3152
f973ed9c
DJ
3153 /* Kill all LWP's ... */
3154 iterate_over_lwps (kill_callback, NULL);
3155
3156 /* ... and wait until we've flushed all events. */
3157 iterate_over_lwps (kill_wait_callback, NULL);
3158 }
3159
3160 target_mourn_inferior ();
d6b0e80f
AC
3161}
3162
3163static void
3164linux_nat_mourn_inferior (void)
3165{
d6b0e80f
AC
3166 /* Destroy LWP info; it's no longer valid. */
3167 init_lwp_list ();
3168
f973ed9c 3169 if (! forks_exist_p ())
b84876c2
PA
3170 {
3171 /* Normal case, no other forks available. */
3172 if (target_can_async_p ())
3173 linux_nat_async (NULL, 0);
3174 linux_ops->to_mourn_inferior ();
3175 }
f973ed9c
DJ
3176 else
3177 /* Multi-fork case. The current inferior_ptid has exited, but
3178 there are other viable forks to debug. Delete the exiting
3179 one and context-switch to the first available. */
3180 linux_fork_mourn_inferior ();
d6b0e80f
AC
3181}
3182
10d6c8cd
DJ
3183static LONGEST
3184linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3185 const char *annex, gdb_byte *readbuf,
3186 const gdb_byte *writebuf,
3187 ULONGEST offset, LONGEST len)
d6b0e80f
AC
3188{
3189 struct cleanup *old_chain = save_inferior_ptid ();
10d6c8cd 3190 LONGEST xfer;
d6b0e80f
AC
3191
3192 if (is_lwp (inferior_ptid))
3193 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
3194
10d6c8cd
DJ
3195 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3196 offset, len);
d6b0e80f
AC
3197
3198 do_cleanups (old_chain);
3199 return xfer;
3200}
3201
3202static int
3203linux_nat_thread_alive (ptid_t ptid)
3204{
4c28f408
PA
3205 int err;
3206
d6b0e80f
AC
3207 gdb_assert (is_lwp (ptid));
3208
4c28f408
PA
3209 /* Send signal 0 instead of anything ptrace, because ptracing a
3210 running thread errors out claiming that the thread doesn't
3211 exist. */
3212 err = kill_lwp (GET_LWP (ptid), 0);
3213
d6b0e80f
AC
3214 if (debug_linux_nat)
3215 fprintf_unfiltered (gdb_stdlog,
4c28f408 3216 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 3217 target_pid_to_str (ptid),
4c28f408 3218 err ? safe_strerror (err) : "OK");
9c0dd46b 3219
4c28f408 3220 if (err != 0)
d6b0e80f
AC
3221 return 0;
3222
3223 return 1;
3224}
3225
3226static char *
3227linux_nat_pid_to_str (ptid_t ptid)
3228{
3229 static char buf[64];
3230
a0ef4274
DJ
3231 if (is_lwp (ptid)
3232 && ((lwp_list && lwp_list->next)
3233 || GET_PID (ptid) != GET_LWP (ptid)))
d6b0e80f
AC
3234 {
3235 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
3236 return buf;
3237 }
3238
3239 return normal_pid_to_str (ptid);
3240}
3241
d6b0e80f
AC
3242static void
3243sigchld_handler (int signo)
3244{
c6ebd6cf 3245 if (target_async_permitted
84e46146 3246 && linux_nat_async_events_state != sigchld_sync
b84876c2
PA
3247 && signo == SIGCHLD)
3248 /* It is *always* a bug to hit this. */
3249 internal_error (__FILE__, __LINE__,
3250 "sigchld_handler called when async events are enabled");
3251
d6b0e80f
AC
3252 /* Do nothing. The only reason for this handler is that it allows
3253 us to use sigsuspend in linux_nat_wait above to wait for the
3254 arrival of a SIGCHLD. */
3255}
3256
dba24537
AC
3257/* Accepts an integer PID; Returns a string representing a file that
3258 can be opened to get the symbols for the child process. */
3259
6d8fd2b7
UW
3260static char *
3261linux_child_pid_to_exec_file (int pid)
dba24537
AC
3262{
3263 char *name1, *name2;
3264
3265 name1 = xmalloc (MAXPATHLEN);
3266 name2 = xmalloc (MAXPATHLEN);
3267 make_cleanup (xfree, name1);
3268 make_cleanup (xfree, name2);
3269 memset (name2, 0, MAXPATHLEN);
3270
3271 sprintf (name1, "/proc/%d/exe", pid);
3272 if (readlink (name1, name2, MAXPATHLEN) > 0)
3273 return name2;
3274 else
3275 return name1;
3276}
3277
3278/* Service function for corefiles and info proc. */
3279
3280static int
3281read_mapping (FILE *mapfile,
3282 long long *addr,
3283 long long *endaddr,
3284 char *permissions,
3285 long long *offset,
3286 char *device, long long *inode, char *filename)
3287{
3288 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
3289 addr, endaddr, permissions, offset, device, inode);
3290
2e14c2ea
MS
3291 filename[0] = '\0';
3292 if (ret > 0 && ret != EOF)
dba24537
AC
3293 {
3294 /* Eat everything up to EOL for the filename. This will prevent
3295 weird filenames (such as one with embedded whitespace) from
3296 confusing this code. It also makes this code more robust in
3297 respect to annotations the kernel may add after the filename.
3298
3299 Note the filename is used for informational purposes
3300 only. */
3301 ret += fscanf (mapfile, "%[^\n]\n", filename);
3302 }
2e14c2ea 3303
dba24537
AC
3304 return (ret != 0 && ret != EOF);
3305}
3306
3307/* Fills the "to_find_memory_regions" target vector. Lists the memory
3308 regions in the inferior for a corefile. */
3309
3310static int
3311linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
3312 unsigned long,
3313 int, int, int, void *), void *obfd)
3314{
3315 long long pid = PIDGET (inferior_ptid);
3316 char mapsfilename[MAXPATHLEN];
3317 FILE *mapsfile;
3318 long long addr, endaddr, size, offset, inode;
3319 char permissions[8], device[8], filename[MAXPATHLEN];
3320 int read, write, exec;
3321 int ret;
3322
3323 /* Compose the filename for the /proc memory map, and open it. */
3324 sprintf (mapsfilename, "/proc/%lld/maps", pid);
3325 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 3326 error (_("Could not open %s."), mapsfilename);
dba24537
AC
3327
3328 if (info_verbose)
3329 fprintf_filtered (gdb_stdout,
3330 "Reading memory regions from %s\n", mapsfilename);
3331
3332 /* Now iterate until end-of-file. */
3333 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
3334 &offset, &device[0], &inode, &filename[0]))
3335 {
3336 size = endaddr - addr;
3337
3338 /* Get the segment's permissions. */
3339 read = (strchr (permissions, 'r') != 0);
3340 write = (strchr (permissions, 'w') != 0);
3341 exec = (strchr (permissions, 'x') != 0);
3342
3343 if (info_verbose)
3344 {
3345 fprintf_filtered (gdb_stdout,
3346 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3347 size, paddr_nz (addr),
3348 read ? 'r' : ' ',
3349 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 3350 if (filename[0])
dba24537
AC
3351 fprintf_filtered (gdb_stdout, " for %s", filename);
3352 fprintf_filtered (gdb_stdout, "\n");
3353 }
3354
3355 /* Invoke the callback function to create the corefile
3356 segment. */
3357 func (addr, size, read, write, exec, obfd);
3358 }
3359 fclose (mapsfile);
3360 return 0;
3361}
3362
2020b7ab
PA
3363static int
3364find_signalled_thread (struct thread_info *info, void *data)
3365{
3366 if (info->stop_signal != TARGET_SIGNAL_0
3367 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
3368 return 1;
3369
3370 return 0;
3371}
3372
3373static enum target_signal
3374find_stop_signal (void)
3375{
3376 struct thread_info *info =
3377 iterate_over_threads (find_signalled_thread, NULL);
3378
3379 if (info)
3380 return info->stop_signal;
3381 else
3382 return TARGET_SIGNAL_0;
3383}
3384
dba24537
AC
3385/* Records the thread's register state for the corefile note
3386 section. */
3387
3388static char *
3389linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2020b7ab
PA
3390 char *note_data, int *note_size,
3391 enum target_signal stop_signal)
dba24537
AC
3392{
3393 gdb_gregset_t gregs;
3394 gdb_fpregset_t fpregs;
dba24537 3395 unsigned long lwp = ptid_get_lwp (ptid);
594f7785
UW
3396 struct regcache *regcache = get_thread_regcache (ptid);
3397 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 3398 const struct regset *regset;
55e969c1 3399 int core_regset_p;
594f7785 3400 struct cleanup *old_chain;
17ea7499
CES
3401 struct core_regset_section *sect_list;
3402 char *gdb_regset;
594f7785
UW
3403
3404 old_chain = save_inferior_ptid ();
3405 inferior_ptid = ptid;
3406 target_fetch_registers (regcache, -1);
3407 do_cleanups (old_chain);
4f844a66
DM
3408
3409 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
3410 sect_list = gdbarch_core_regset_sections (gdbarch);
3411
55e969c1
DM
3412 if (core_regset_p
3413 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3414 sizeof (gregs))) != NULL
3415 && regset->collect_regset != NULL)
594f7785 3416 regset->collect_regset (regset, regcache, -1,
55e969c1 3417 &gregs, sizeof (gregs));
4f844a66 3418 else
594f7785 3419 fill_gregset (regcache, &gregs, -1);
4f844a66 3420
55e969c1
DM
3421 note_data = (char *) elfcore_write_prstatus (obfd,
3422 note_data,
3423 note_size,
3424 lwp,
3425 stop_signal, &gregs);
3426
17ea7499
CES
3427 /* The loop below uses the new struct core_regset_section, which stores
3428 the supported section names and sizes for the core file. Note that
3429 note PRSTATUS needs to be treated specially. But the other notes are
3430 structurally the same, so they can benefit from the new struct. */
3431 if (core_regset_p && sect_list != NULL)
3432 while (sect_list->sect_name != NULL)
3433 {
3434 /* .reg was already handled above. */
3435 if (strcmp (sect_list->sect_name, ".reg") == 0)
3436 {
3437 sect_list++;
3438 continue;
3439 }
3440 regset = gdbarch_regset_from_core_section (gdbarch,
3441 sect_list->sect_name,
3442 sect_list->size);
3443 gdb_assert (regset && regset->collect_regset);
3444 gdb_regset = xmalloc (sect_list->size);
3445 regset->collect_regset (regset, regcache, -1,
3446 gdb_regset, sect_list->size);
3447 note_data = (char *) elfcore_write_register_note (obfd,
3448 note_data,
3449 note_size,
3450 sect_list->sect_name,
3451 gdb_regset,
3452 sect_list->size);
3453 xfree (gdb_regset);
3454 sect_list++;
3455 }
dba24537 3456
17ea7499
CES
3457 /* For architectures that does not have the struct core_regset_section
3458 implemented, we use the old method. When all the architectures have
3459 the new support, the code below should be deleted. */
4f844a66 3460 else
17ea7499
CES
3461 {
3462 if (core_regset_p
3463 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3464 sizeof (fpregs))) != NULL
3465 && regset->collect_regset != NULL)
3466 regset->collect_regset (regset, regcache, -1,
3467 &fpregs, sizeof (fpregs));
3468 else
3469 fill_fpregset (regcache, &fpregs, -1);
3470
3471 note_data = (char *) elfcore_write_prfpreg (obfd,
3472 note_data,
3473 note_size,
3474 &fpregs, sizeof (fpregs));
3475 }
4f844a66 3476
dba24537
AC
3477 return note_data;
3478}
3479
3480struct linux_nat_corefile_thread_data
3481{
3482 bfd *obfd;
3483 char *note_data;
3484 int *note_size;
3485 int num_notes;
2020b7ab 3486 enum target_signal stop_signal;
dba24537
AC
3487};
3488
3489/* Called by gdbthread.c once per thread. Records the thread's
3490 register state for the corefile note section. */
3491
3492static int
3493linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3494{
3495 struct linux_nat_corefile_thread_data *args = data;
dba24537 3496
dba24537
AC
3497 args->note_data = linux_nat_do_thread_registers (args->obfd,
3498 ti->ptid,
3499 args->note_data,
2020b7ab
PA
3500 args->note_size,
3501 args->stop_signal);
dba24537 3502 args->num_notes++;
56be3814 3503
dba24537
AC
3504 return 0;
3505}
3506
dba24537
AC
3507/* Fills the "to_make_corefile_note" target vector. Builds the note
3508 section for a corefile, and returns it in a malloc buffer. */
3509
3510static char *
3511linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3512{
3513 struct linux_nat_corefile_thread_data thread_args;
3514 struct cleanup *old_chain;
d99148ef 3515 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 3516 char fname[16] = { '\0' };
d99148ef 3517 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
3518 char psargs[80] = { '\0' };
3519 char *note_data = NULL;
3520 ptid_t current_ptid = inferior_ptid;
c6826062 3521 gdb_byte *auxv;
dba24537
AC
3522 int auxv_len;
3523
3524 if (get_exec_file (0))
3525 {
3526 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3527 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3528 if (get_inferior_args ())
3529 {
d99148ef
JK
3530 char *string_end;
3531 char *psargs_end = psargs + sizeof (psargs);
3532
3533 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3534 strings fine. */
3535 string_end = memchr (psargs, 0, sizeof (psargs));
3536 if (string_end != NULL)
3537 {
3538 *string_end++ = ' ';
3539 strncpy (string_end, get_inferior_args (),
3540 psargs_end - string_end);
3541 }
dba24537
AC
3542 }
3543 note_data = (char *) elfcore_write_prpsinfo (obfd,
3544 note_data,
3545 note_size, fname, psargs);
3546 }
3547
3548 /* Dump information for threads. */
3549 thread_args.obfd = obfd;
3550 thread_args.note_data = note_data;
3551 thread_args.note_size = note_size;
3552 thread_args.num_notes = 0;
2020b7ab 3553 thread_args.stop_signal = find_stop_signal ();
dba24537 3554 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2020b7ab
PA
3555 gdb_assert (thread_args.num_notes != 0);
3556 note_data = thread_args.note_data;
dba24537 3557
13547ab6
DJ
3558 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3559 NULL, &auxv);
dba24537
AC
3560 if (auxv_len > 0)
3561 {
3562 note_data = elfcore_write_note (obfd, note_data, note_size,
3563 "CORE", NT_AUXV, auxv, auxv_len);
3564 xfree (auxv);
3565 }
3566
3567 make_cleanup (xfree, note_data);
3568 return note_data;
3569}
3570
3571/* Implement the "info proc" command. */
3572
3573static void
3574linux_nat_info_proc_cmd (char *args, int from_tty)
3575{
3576 long long pid = PIDGET (inferior_ptid);
3577 FILE *procfile;
3578 char **argv = NULL;
3579 char buffer[MAXPATHLEN];
3580 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3581 int cmdline_f = 1;
3582 int cwd_f = 1;
3583 int exe_f = 1;
3584 int mappings_f = 0;
3585 int environ_f = 0;
3586 int status_f = 0;
3587 int stat_f = 0;
3588 int all = 0;
3589 struct stat dummy;
3590
3591 if (args)
3592 {
3593 /* Break up 'args' into an argv array. */
d1a41061
PP
3594 argv = gdb_buildargv (args);
3595 make_cleanup_freeargv (argv);
dba24537
AC
3596 }
3597 while (argv != NULL && *argv != NULL)
3598 {
3599 if (isdigit (argv[0][0]))
3600 {
3601 pid = strtoul (argv[0], NULL, 10);
3602 }
3603 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3604 {
3605 mappings_f = 1;
3606 }
3607 else if (strcmp (argv[0], "status") == 0)
3608 {
3609 status_f = 1;
3610 }
3611 else if (strcmp (argv[0], "stat") == 0)
3612 {
3613 stat_f = 1;
3614 }
3615 else if (strcmp (argv[0], "cmd") == 0)
3616 {
3617 cmdline_f = 1;
3618 }
3619 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3620 {
3621 exe_f = 1;
3622 }
3623 else if (strcmp (argv[0], "cwd") == 0)
3624 {
3625 cwd_f = 1;
3626 }
3627 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3628 {
3629 all = 1;
3630 }
3631 else
3632 {
3633 /* [...] (future options here) */
3634 }
3635 argv++;
3636 }
3637 if (pid == 0)
8a3fe4f8 3638 error (_("No current process: you must name one."));
dba24537
AC
3639
3640 sprintf (fname1, "/proc/%lld", pid);
3641 if (stat (fname1, &dummy) != 0)
8a3fe4f8 3642 error (_("No /proc directory: '%s'"), fname1);
dba24537 3643
a3f17187 3644 printf_filtered (_("process %lld\n"), pid);
dba24537
AC
3645 if (cmdline_f || all)
3646 {
3647 sprintf (fname1, "/proc/%lld/cmdline", pid);
d5d6fca5 3648 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3649 {
3650 fgets (buffer, sizeof (buffer), procfile);
3651 printf_filtered ("cmdline = '%s'\n", buffer);
3652 fclose (procfile);
3653 }
3654 else
8a3fe4f8 3655 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3656 }
3657 if (cwd_f || all)
3658 {
3659 sprintf (fname1, "/proc/%lld/cwd", pid);
3660 memset (fname2, 0, sizeof (fname2));
3661 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3662 printf_filtered ("cwd = '%s'\n", fname2);
3663 else
8a3fe4f8 3664 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3665 }
3666 if (exe_f || all)
3667 {
3668 sprintf (fname1, "/proc/%lld/exe", pid);
3669 memset (fname2, 0, sizeof (fname2));
3670 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3671 printf_filtered ("exe = '%s'\n", fname2);
3672 else
8a3fe4f8 3673 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3674 }
3675 if (mappings_f || all)
3676 {
3677 sprintf (fname1, "/proc/%lld/maps", pid);
d5d6fca5 3678 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3679 {
3680 long long addr, endaddr, size, offset, inode;
3681 char permissions[8], device[8], filename[MAXPATHLEN];
3682
a3f17187 3683 printf_filtered (_("Mapped address spaces:\n\n"));
17a912b6 3684 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3685 {
3686 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3687 "Start Addr",
3688 " End Addr",
3689 " Size", " Offset", "objfile");
3690 }
3691 else
3692 {
3693 printf_filtered (" %18s %18s %10s %10s %7s\n",
3694 "Start Addr",
3695 " End Addr",
3696 " Size", " Offset", "objfile");
3697 }
3698
3699 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3700 &offset, &device[0], &inode, &filename[0]))
3701 {
3702 size = endaddr - addr;
3703
3704 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3705 calls here (and possibly above) should be abstracted
3706 out into their own functions? Andrew suggests using
3707 a generic local_address_string instead to print out
3708 the addresses; that makes sense to me, too. */
3709
17a912b6 3710 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3711 {
3712 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3713 (unsigned long) addr, /* FIXME: pr_addr */
3714 (unsigned long) endaddr,
3715 (int) size,
3716 (unsigned int) offset,
3717 filename[0] ? filename : "");
3718 }
3719 else
3720 {
3721 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3722 (unsigned long) addr, /* FIXME: pr_addr */
3723 (unsigned long) endaddr,
3724 (int) size,
3725 (unsigned int) offset,
3726 filename[0] ? filename : "");
3727 }
3728 }
3729
3730 fclose (procfile);
3731 }
3732 else
8a3fe4f8 3733 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3734 }
3735 if (status_f || all)
3736 {
3737 sprintf (fname1, "/proc/%lld/status", pid);
d5d6fca5 3738 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3739 {
3740 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3741 puts_filtered (buffer);
3742 fclose (procfile);
3743 }
3744 else
8a3fe4f8 3745 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3746 }
3747 if (stat_f || all)
3748 {
3749 sprintf (fname1, "/proc/%lld/stat", pid);
d5d6fca5 3750 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3751 {
3752 int itmp;
3753 char ctmp;
a25694b4 3754 long ltmp;
dba24537
AC
3755
3756 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3757 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 3758 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 3759 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 3760 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 3761 printf_filtered (_("State: %c\n"), ctmp);
dba24537 3762 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3763 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 3764 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3765 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 3766 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3767 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 3768 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3769 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 3770 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3771 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
3772 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3773 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3774 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3775 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3776 (unsigned long) ltmp);
3777 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3778 printf_filtered (_("Minor faults, children: %lu\n"),
3779 (unsigned long) ltmp);
3780 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3781 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3782 (unsigned long) ltmp);
3783 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3784 printf_filtered (_("Major faults, children: %lu\n"),
3785 (unsigned long) ltmp);
3786 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3787 printf_filtered (_("utime: %ld\n"), ltmp);
3788 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3789 printf_filtered (_("stime: %ld\n"), ltmp);
3790 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3791 printf_filtered (_("utime, children: %ld\n"), ltmp);
3792 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3793 printf_filtered (_("stime, children: %ld\n"), ltmp);
3794 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3795 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3796 ltmp);
3797 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3798 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3799 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3800 printf_filtered (_("jiffies until next timeout: %lu\n"),
3801 (unsigned long) ltmp);
3802 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3803 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3804 (unsigned long) ltmp);
3805 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3806 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3807 ltmp);
3808 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3809 printf_filtered (_("Virtual memory size: %lu\n"),
3810 (unsigned long) ltmp);
3811 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3812 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3813 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3814 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3815 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3816 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3817 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3818 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3819 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3820 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
dba24537
AC
3821#if 0 /* Don't know how architecture-dependent the rest is...
3822 Anyway the signal bitmap info is available from "status". */
a25694b4
AS
3823 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3824 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3825 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3826 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3827 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3828 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3829 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3830 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3831 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3832 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3833 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3834 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3835 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3836 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537
AC
3837#endif
3838 fclose (procfile);
3839 }
3840 else
8a3fe4f8 3841 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3842 }
3843}
3844
10d6c8cd
DJ
3845/* Implement the to_xfer_partial interface for memory reads using the /proc
3846 filesystem. Because we can use a single read() call for /proc, this
3847 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3848 but it doesn't support writes. */
3849
3850static LONGEST
3851linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3852 const char *annex, gdb_byte *readbuf,
3853 const gdb_byte *writebuf,
3854 ULONGEST offset, LONGEST len)
dba24537 3855{
10d6c8cd
DJ
3856 LONGEST ret;
3857 int fd;
dba24537
AC
3858 char filename[64];
3859
10d6c8cd 3860 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
3861 return 0;
3862
3863 /* Don't bother for one word. */
3864 if (len < 3 * sizeof (long))
3865 return 0;
3866
3867 /* We could keep this file open and cache it - possibly one per
3868 thread. That requires some juggling, but is even faster. */
3869 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3870 fd = open (filename, O_RDONLY | O_LARGEFILE);
3871 if (fd == -1)
3872 return 0;
3873
3874 /* If pread64 is available, use it. It's faster if the kernel
3875 supports it (only one syscall), and it's 64-bit safe even on
3876 32-bit platforms (for instance, SPARC debugging a SPARC64
3877 application). */
3878#ifdef HAVE_PREAD64
10d6c8cd 3879 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3880#else
10d6c8cd 3881 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3882#endif
3883 ret = 0;
3884 else
3885 ret = len;
3886
3887 close (fd);
3888 return ret;
3889}
3890
3891/* Parse LINE as a signal set and add its set bits to SIGS. */
3892
3893static void
3894add_line_to_sigset (const char *line, sigset_t *sigs)
3895{
3896 int len = strlen (line) - 1;
3897 const char *p;
3898 int signum;
3899
3900 if (line[len] != '\n')
8a3fe4f8 3901 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3902
3903 p = line;
3904 signum = len * 4;
3905 while (len-- > 0)
3906 {
3907 int digit;
3908
3909 if (*p >= '0' && *p <= '9')
3910 digit = *p - '0';
3911 else if (*p >= 'a' && *p <= 'f')
3912 digit = *p - 'a' + 10;
3913 else
8a3fe4f8 3914 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3915
3916 signum -= 4;
3917
3918 if (digit & 1)
3919 sigaddset (sigs, signum + 1);
3920 if (digit & 2)
3921 sigaddset (sigs, signum + 2);
3922 if (digit & 4)
3923 sigaddset (sigs, signum + 3);
3924 if (digit & 8)
3925 sigaddset (sigs, signum + 4);
3926
3927 p++;
3928 }
3929}
3930
3931/* Find process PID's pending signals from /proc/pid/status and set
3932 SIGS to match. */
3933
3934void
3935linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3936{
3937 FILE *procfile;
3938 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3939 int signum;
3940
3941 sigemptyset (pending);
3942 sigemptyset (blocked);
3943 sigemptyset (ignored);
3944 sprintf (fname, "/proc/%d/status", pid);
3945 procfile = fopen (fname, "r");
3946 if (procfile == NULL)
8a3fe4f8 3947 error (_("Could not open %s"), fname);
dba24537
AC
3948
3949 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3950 {
3951 /* Normal queued signals are on the SigPnd line in the status
3952 file. However, 2.6 kernels also have a "shared" pending
3953 queue for delivering signals to a thread group, so check for
3954 a ShdPnd line also.
3955
3956 Unfortunately some Red Hat kernels include the shared pending
3957 queue but not the ShdPnd status field. */
3958
3959 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3960 add_line_to_sigset (buffer + 8, pending);
3961 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3962 add_line_to_sigset (buffer + 8, pending);
3963 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3964 add_line_to_sigset (buffer + 8, blocked);
3965 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3966 add_line_to_sigset (buffer + 8, ignored);
3967 }
3968
3969 fclose (procfile);
3970}
3971
10d6c8cd
DJ
3972static LONGEST
3973linux_xfer_partial (struct target_ops *ops, enum target_object object,
3974 const char *annex, gdb_byte *readbuf,
3975 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3976{
3977 LONGEST xfer;
3978
3979 if (object == TARGET_OBJECT_AUXV)
3980 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3981 offset, len);
3982
3983 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3984 offset, len);
3985 if (xfer != 0)
3986 return xfer;
3987
3988 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3989 offset, len);
3990}
3991
e9efe249 3992/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
3993 it with local methods. */
3994
910122bf
UW
3995static void
3996linux_target_install_ops (struct target_ops *t)
10d6c8cd 3997{
6d8fd2b7
UW
3998 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3999 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4000 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4001 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4002 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4003 t->to_post_attach = linux_child_post_attach;
4004 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4005 t->to_find_memory_regions = linux_nat_find_memory_regions;
4006 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4007
4008 super_xfer_partial = t->to_xfer_partial;
4009 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
4010}
4011
4012struct target_ops *
4013linux_target (void)
4014{
4015 struct target_ops *t;
4016
4017 t = inf_ptrace_target ();
4018 linux_target_install_ops (t);
4019
4020 return t;
4021}
4022
4023struct target_ops *
7714d83a 4024linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4025{
4026 struct target_ops *t;
4027
4028 t = inf_ptrace_trad_target (register_u_offset);
4029 linux_target_install_ops (t);
10d6c8cd 4030
10d6c8cd
DJ
4031 return t;
4032}
4033
b84876c2
PA
4034/* target_is_async_p implementation. */
4035
4036static int
4037linux_nat_is_async_p (void)
4038{
4039 /* NOTE: palves 2008-03-21: We're only async when the user requests
c6ebd6cf 4040 it explicitly with the "maintenance set target-async" command.
b84876c2 4041 Someday, linux will always be async. */
c6ebd6cf 4042 if (!target_async_permitted)
b84876c2
PA
4043 return 0;
4044
4045 return 1;
4046}
4047
4048/* target_can_async_p implementation. */
4049
4050static int
4051linux_nat_can_async_p (void)
4052{
4053 /* NOTE: palves 2008-03-21: We're only async when the user requests
c6ebd6cf 4054 it explicitly with the "maintenance set target-async" command.
b84876c2 4055 Someday, linux will always be async. */
c6ebd6cf 4056 if (!target_async_permitted)
b84876c2
PA
4057 return 0;
4058
4059 /* See target.h/target_async_mask. */
4060 return linux_nat_async_mask_value;
4061}
4062
9908b566
VP
4063static int
4064linux_nat_supports_non_stop (void)
4065{
4066 return 1;
4067}
4068
b84876c2
PA
4069/* target_async_mask implementation. */
4070
4071static int
4072linux_nat_async_mask (int mask)
4073{
4074 int current_state;
4075 current_state = linux_nat_async_mask_value;
4076
4077 if (current_state != mask)
4078 {
4079 if (mask == 0)
4080 {
4081 linux_nat_async (NULL, 0);
4082 linux_nat_async_mask_value = mask;
b84876c2
PA
4083 }
4084 else
4085 {
b84876c2
PA
4086 linux_nat_async_mask_value = mask;
4087 linux_nat_async (inferior_event_handler, 0);
4088 }
4089 }
4090
4091 return current_state;
4092}
4093
4094/* Pop an event from the event pipe. */
4095
4096static int
4097linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
4098{
4099 struct waitpid_result event = {0};
4100 int ret;
4101
4102 do
4103 {
4104 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
4105 }
4106 while (ret == -1 && errno == EINTR);
4107
4108 gdb_assert (ret == sizeof (event));
4109
4110 *ptr_status = event.status;
4111 *ptr_options = event.options;
4112
4113 linux_nat_num_queued_events--;
4114
4115 return event.pid;
4116}
4117
4118/* Push an event into the event pipe. */
4119
4120static void
4121linux_nat_event_pipe_push (int pid, int status, int options)
4122{
4123 int ret;
4124 struct waitpid_result event = {0};
4125 event.pid = pid;
4126 event.status = status;
4127 event.options = options;
4128
4129 do
4130 {
4131 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
4132 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
4133 } while (ret == -1 && errno == EINTR);
4134
4135 linux_nat_num_queued_events++;
4136}
4137
4138static void
4139get_pending_events (void)
4140{
4141 int status, options, pid;
4142
c6ebd6cf 4143 if (!target_async_permitted
84e46146 4144 || linux_nat_async_events_state != sigchld_async)
b84876c2
PA
4145 internal_error (__FILE__, __LINE__,
4146 "get_pending_events called with async masked");
4147
4148 while (1)
4149 {
4150 status = 0;
4151 options = __WCLONE | WNOHANG;
4152
4153 do
4154 {
4155 pid = waitpid (-1, &status, options);
4156 }
4157 while (pid == -1 && errno == EINTR);
4158
4159 if (pid <= 0)
4160 {
4161 options = WNOHANG;
4162 do
4163 {
4164 pid = waitpid (-1, &status, options);
4165 }
4166 while (pid == -1 && errno == EINTR);
4167 }
4168
4169 if (pid <= 0)
4170 /* No more children reporting events. */
4171 break;
4172
4173 if (debug_linux_nat_async)
4174 fprintf_unfiltered (gdb_stdlog, "\
4175get_pending_events: pid(%d), status(%x), options (%x)\n",
4176 pid, status, options);
4177
4178 linux_nat_event_pipe_push (pid, status, options);
4179 }
4180
4181 if (debug_linux_nat_async)
4182 fprintf_unfiltered (gdb_stdlog, "\
4183get_pending_events: linux_nat_num_queued_events(%d)\n",
4184 linux_nat_num_queued_events);
4185}
4186
4187/* SIGCHLD handler for async mode. */
4188
4189static void
4190async_sigchld_handler (int signo)
4191{
4192 if (debug_linux_nat_async)
4193 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
4194
4195 get_pending_events ();
4196}
4197
84e46146 4198/* Set SIGCHLD handling state to STATE. Returns previous state. */
b84876c2 4199
84e46146
PA
4200static enum sigchld_state
4201linux_nat_async_events (enum sigchld_state state)
b84876c2 4202{
84e46146 4203 enum sigchld_state current_state = linux_nat_async_events_state;
b84876c2
PA
4204
4205 if (debug_linux_nat_async)
4206 fprintf_unfiltered (gdb_stdlog,
84e46146 4207 "LNAE: state(%d): linux_nat_async_events_state(%d), "
b84876c2 4208 "linux_nat_num_queued_events(%d)\n",
84e46146 4209 state, linux_nat_async_events_state,
b84876c2
PA
4210 linux_nat_num_queued_events);
4211
84e46146 4212 if (current_state != state)
b84876c2
PA
4213 {
4214 sigset_t mask;
4215 sigemptyset (&mask);
4216 sigaddset (&mask, SIGCHLD);
84e46146
PA
4217
4218 /* Always block before changing state. */
4219 sigprocmask (SIG_BLOCK, &mask, NULL);
4220
4221 /* Set new state. */
4222 linux_nat_async_events_state = state;
4223
4224 switch (state)
b84876c2 4225 {
84e46146
PA
4226 case sigchld_sync:
4227 {
4228 /* Block target events. */
4229 sigprocmask (SIG_BLOCK, &mask, NULL);
4230 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4231 /* Get events out of queue, and make them available to
4232 queued_waitpid / my_waitpid. */
4233 pipe_to_local_event_queue ();
4234 }
4235 break;
4236 case sigchld_async:
4237 {
4238 /* Unblock target events for async mode. */
4239
4240 sigprocmask (SIG_BLOCK, &mask, NULL);
4241
4242 /* Put events we already waited on, in the pipe first, so
4243 events are FIFO. */
4244 local_event_queue_to_pipe ();
4245 /* While in masked async, we may have not collected all
4246 the pending events. Get them out now. */
4247 get_pending_events ();
4248
4249 /* Let'em come. */
4250 sigaction (SIGCHLD, &async_sigchld_action, NULL);
4251 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4252 }
4253 break;
4254 case sigchld_default:
4255 {
4256 /* SIGCHLD default mode. */
4257 sigaction (SIGCHLD, &sigchld_default_action, NULL);
4258
4259 /* Get events out of queue, and make them available to
4260 queued_waitpid / my_waitpid. */
4261 pipe_to_local_event_queue ();
4262
4263 /* Unblock SIGCHLD. */
4264 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4265 }
4266 break;
b84876c2
PA
4267 }
4268 }
4269
4270 return current_state;
4271}
4272
4273static int async_terminal_is_ours = 1;
4274
4275/* target_terminal_inferior implementation. */
4276
4277static void
4278linux_nat_terminal_inferior (void)
4279{
4280 if (!target_is_async_p ())
4281 {
4282 /* Async mode is disabled. */
4283 terminal_inferior ();
4284 return;
4285 }
4286
4287 /* GDB should never give the terminal to the inferior, if the
4288 inferior is running in the background (run&, continue&, etc.).
4289 This check can be removed when the common code is fixed. */
4290 if (!sync_execution)
4291 return;
4292
4293 terminal_inferior ();
4294
4295 if (!async_terminal_is_ours)
4296 return;
4297
4298 delete_file_handler (input_fd);
4299 async_terminal_is_ours = 0;
4300 set_sigint_trap ();
4301}
4302
4303/* target_terminal_ours implementation. */
4304
4305void
4306linux_nat_terminal_ours (void)
4307{
4308 if (!target_is_async_p ())
4309 {
4310 /* Async mode is disabled. */
4311 terminal_ours ();
4312 return;
4313 }
4314
4315 /* GDB should never give the terminal to the inferior if the
4316 inferior is running in the background (run&, continue&, etc.),
4317 but claiming it sure should. */
4318 terminal_ours ();
4319
4320 if (!sync_execution)
4321 return;
4322
4323 if (async_terminal_is_ours)
4324 return;
4325
4326 clear_sigint_trap ();
4327 add_file_handler (input_fd, stdin_event_handler, 0);
4328 async_terminal_is_ours = 1;
4329}
4330
4331static void (*async_client_callback) (enum inferior_event_type event_type,
4332 void *context);
4333static void *async_client_context;
4334
4335static void
4336linux_nat_async_file_handler (int error, gdb_client_data client_data)
4337{
4338 async_client_callback (INF_REG_EVENT, async_client_context);
4339}
4340
4341/* target_async implementation. */
4342
4343static void
4344linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4345 void *context), void *context)
4346{
c6ebd6cf 4347 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
b84876c2
PA
4348 internal_error (__FILE__, __LINE__,
4349 "Calling target_async when async is masked");
4350
4351 if (callback != NULL)
4352 {
4353 async_client_callback = callback;
4354 async_client_context = context;
4355 add_file_handler (linux_nat_event_pipe[0],
4356 linux_nat_async_file_handler, NULL);
4357
84e46146 4358 linux_nat_async_events (sigchld_async);
b84876c2
PA
4359 }
4360 else
4361 {
4362 async_client_callback = callback;
4363 async_client_context = context;
4364
84e46146 4365 linux_nat_async_events (sigchld_sync);
b84876c2
PA
4366 delete_file_handler (linux_nat_event_pipe[0]);
4367 }
4368 return;
4369}
4370
4c28f408
PA
4371static int
4372send_sigint_callback (struct lwp_info *lp, void *data)
4373{
4374 /* Use is_running instead of !lp->stopped, because the lwp may be
4375 stopped due to an internal event, and we want to interrupt it in
4376 that case too. What we want is to check if the thread is stopped
4377 from the point of view of the user. */
4378 if (is_running (lp->ptid))
4379 kill_lwp (GET_LWP (lp->ptid), SIGINT);
4380 return 0;
4381}
4382
4383static void
4384linux_nat_stop (ptid_t ptid)
4385{
4386 if (non_stop)
4387 {
4388 if (ptid_equal (ptid, minus_one_ptid))
4389 iterate_over_lwps (send_sigint_callback, &ptid);
4390 else
4391 {
4392 struct lwp_info *lp = find_lwp_pid (ptid);
4393 send_sigint_callback (lp, NULL);
4394 }
4395 }
4396 else
4397 linux_ops->to_stop (ptid);
4398}
4399
f973ed9c
DJ
4400void
4401linux_nat_add_target (struct target_ops *t)
4402{
f973ed9c
DJ
4403 /* Save the provided single-threaded target. We save this in a separate
4404 variable because another target we've inherited from (e.g. inf-ptrace)
4405 may have saved a pointer to T; we want to use it for the final
4406 process stratum target. */
4407 linux_ops_saved = *t;
4408 linux_ops = &linux_ops_saved;
4409
4410 /* Override some methods for multithreading. */
b84876c2 4411 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4412 t->to_attach = linux_nat_attach;
4413 t->to_detach = linux_nat_detach;
4414 t->to_resume = linux_nat_resume;
4415 t->to_wait = linux_nat_wait;
4416 t->to_xfer_partial = linux_nat_xfer_partial;
4417 t->to_kill = linux_nat_kill;
4418 t->to_mourn_inferior = linux_nat_mourn_inferior;
4419 t->to_thread_alive = linux_nat_thread_alive;
4420 t->to_pid_to_str = linux_nat_pid_to_str;
4421 t->to_has_thread_control = tc_schedlock;
4422
b84876c2
PA
4423 t->to_can_async_p = linux_nat_can_async_p;
4424 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4425 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2
PA
4426 t->to_async = linux_nat_async;
4427 t->to_async_mask = linux_nat_async_mask;
4428 t->to_terminal_inferior = linux_nat_terminal_inferior;
4429 t->to_terminal_ours = linux_nat_terminal_ours;
4430
4c28f408
PA
4431 /* Methods for non-stop support. */
4432 t->to_stop = linux_nat_stop;
4433
f973ed9c
DJ
4434 /* We don't change the stratum; this target will sit at
4435 process_stratum and thread_db will set at thread_stratum. This
4436 is a little strange, since this is a multi-threaded-capable
4437 target, but we want to be on the stack below thread_db, and we
4438 also want to be used for single-threaded processes. */
4439
4440 add_target (t);
4441
4442 /* TODO: Eliminate this and have libthread_db use
4443 find_target_beneath. */
4444 thread_db_init (t);
4445}
4446
9f0bdab8
DJ
4447/* Register a method to call whenever a new thread is attached. */
4448void
4449linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
4450{
4451 /* Save the pointer. We only support a single registered instance
4452 of the GNU/Linux native target, so we do not need to map this to
4453 T. */
4454 linux_nat_new_thread = new_thread;
4455}
4456
4457/* Return the saved siginfo associated with PTID. */
4458struct siginfo *
4459linux_nat_get_siginfo (ptid_t ptid)
4460{
4461 struct lwp_info *lp = find_lwp_pid (ptid);
4462
4463 gdb_assert (lp != NULL);
4464
4465 return &lp->siginfo;
4466}
4467
c6ebd6cf
VP
4468/* Enable/Disable async mode. */
4469
4470static void
4471linux_nat_setup_async (void)
4472{
4473 if (pipe (linux_nat_event_pipe) == -1)
4474 internal_error (__FILE__, __LINE__,
4475 "creating event pipe failed.");
4476 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4477 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4478}
4479
d6b0e80f
AC
4480void
4481_initialize_linux_nat (void)
4482{
b84876c2 4483 sigset_t mask;
dba24537 4484
1bedd215
AC
4485 add_info ("proc", linux_nat_info_proc_cmd, _("\
4486Show /proc process information about any running process.\n\
dba24537
AC
4487Specify any process id, or use the program being debugged by default.\n\
4488Specify any of the following keywords for detailed info:\n\
4489 mappings -- list of mapped memory regions.\n\
4490 stat -- list a bunch of random process info.\n\
4491 status -- list a different bunch of random process info.\n\
1bedd215 4492 all -- list all available /proc info."));
d6b0e80f 4493
b84876c2
PA
4494 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4495 &debug_linux_nat, _("\
4496Set debugging of GNU/Linux lwp module."), _("\
4497Show debugging of GNU/Linux lwp module."), _("\
4498Enables printf debugging output."),
4499 NULL,
4500 show_debug_linux_nat,
4501 &setdebuglist, &showdebuglist);
4502
4503 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4504 &debug_linux_nat_async, _("\
4505Set debugging of GNU/Linux async lwp module."), _("\
4506Show debugging of GNU/Linux async lwp module."), _("\
4507Enables printf debugging output."),
4508 NULL,
4509 show_debug_linux_nat_async,
4510 &setdebuglist, &showdebuglist);
4511
84e46146
PA
4512 /* Get the default SIGCHLD action. Used while forking an inferior
4513 (see linux_nat_create_inferior/linux_nat_async_events). */
4514 sigaction (SIGCHLD, NULL, &sigchld_default_action);
4515
b84876c2
PA
4516 /* Block SIGCHLD by default. Doing this early prevents it getting
4517 unblocked if an exception is thrown due to an error while the
4518 inferior is starting (sigsetjmp/siglongjmp). */
4519 sigemptyset (&mask);
4520 sigaddset (&mask, SIGCHLD);
4521 sigprocmask (SIG_BLOCK, &mask, NULL);
4522
4523 /* Save this mask as the default. */
d6b0e80f
AC
4524 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4525
b84876c2
PA
4526 /* The synchronous SIGCHLD handler. */
4527 sync_sigchld_action.sa_handler = sigchld_handler;
4528 sigemptyset (&sync_sigchld_action.sa_mask);
4529 sync_sigchld_action.sa_flags = SA_RESTART;
4530
4531 /* Make it the default. */
4532 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
d6b0e80f
AC
4533
4534 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4535 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4536 sigdelset (&suspend_mask, SIGCHLD);
4537
b84876c2
PA
4538 /* SIGCHLD handler for async mode. */
4539 async_sigchld_action.sa_handler = async_sigchld_handler;
4540 sigemptyset (&async_sigchld_action.sa_mask);
4541 async_sigchld_action.sa_flags = SA_RESTART;
d6b0e80f 4542
c6ebd6cf 4543 linux_nat_setup_async ();
10568435
JK
4544
4545 add_setshow_boolean_cmd ("disable-randomization", class_support,
4546 &disable_randomization, _("\
4547Set disabling of debuggee's virtual address space randomization."), _("\
4548Show disabling of debuggee's virtual address space randomization."), _("\
4549When this mode is on (which is the default), randomization of the virtual\n\
4550address space is disabled. Standalone programs run with the randomization\n\
4551enabled by default on some platforms."),
4552 &set_disable_randomization,
4553 &show_disable_randomization,
4554 &setlist, &showlist);
d6b0e80f
AC
4555}
4556\f
4557
4558/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4559 the GNU/Linux Threads library and therefore doesn't really belong
4560 here. */
4561
4562/* Read variable NAME in the target and return its value if found.
4563 Otherwise return zero. It is assumed that the type of the variable
4564 is `int'. */
4565
4566static int
4567get_signo (const char *name)
4568{
4569 struct minimal_symbol *ms;
4570 int signo;
4571
4572 ms = lookup_minimal_symbol (name, NULL, NULL);
4573 if (ms == NULL)
4574 return 0;
4575
8e70166d 4576 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
4577 sizeof (signo)) != 0)
4578 return 0;
4579
4580 return signo;
4581}
4582
4583/* Return the set of signals used by the threads library in *SET. */
4584
4585void
4586lin_thread_get_thread_signals (sigset_t *set)
4587{
4588 struct sigaction action;
4589 int restart, cancel;
b84876c2 4590 sigset_t blocked_mask;
d6b0e80f 4591
b84876c2 4592 sigemptyset (&blocked_mask);
d6b0e80f
AC
4593 sigemptyset (set);
4594
4595 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
4596 cancel = get_signo ("__pthread_sig_cancel");
4597
4598 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4599 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4600 not provide any way for the debugger to query the signal numbers -
4601 fortunately they don't change! */
4602
d6b0e80f 4603 if (restart == 0)
17fbb0bd 4604 restart = __SIGRTMIN;
d6b0e80f 4605
d6b0e80f 4606 if (cancel == 0)
17fbb0bd 4607 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
4608
4609 sigaddset (set, restart);
4610 sigaddset (set, cancel);
4611
4612 /* The GNU/Linux Threads library makes terminating threads send a
4613 special "cancel" signal instead of SIGCHLD. Make sure we catch
4614 those (to prevent them from terminating GDB itself, which is
4615 likely to be their default action) and treat them the same way as
4616 SIGCHLD. */
4617
4618 action.sa_handler = sigchld_handler;
4619 sigemptyset (&action.sa_mask);
58aecb61 4620 action.sa_flags = SA_RESTART;
d6b0e80f
AC
4621 sigaction (cancel, &action, NULL);
4622
4623 /* We block the "cancel" signal throughout this code ... */
4624 sigaddset (&blocked_mask, cancel);
4625 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4626
4627 /* ... except during a sigsuspend. */
4628 sigdelset (&suspend_mask, cancel);
4629}
This page took 1.198125 seconds and 4 git commands to generate.