Add "continue -a" and "interrupt -a" options for non-stop mode.
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
9b254dd1 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
e26af52f 4 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f
AC
34#include "gdbthread.h"
35#include "gdbcmd.h"
36#include "regcache.h"
4f844a66 37#include "regset.h"
10d6c8cd
DJ
38#include "inf-ptrace.h"
39#include "auxv.h"
dba24537
AC
40#include <sys/param.h> /* for MAXPATHLEN */
41#include <sys/procfs.h> /* for elf_gregset etc. */
42#include "elf-bfd.h" /* for elfcore_write_* */
43#include "gregset.h" /* for gregset */
44#include "gdbcore.h" /* for get_exec_file */
45#include <ctype.h> /* for isdigit */
46#include "gdbthread.h" /* for struct thread_info etc. */
47#include "gdb_stat.h" /* for struct stat */
48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
dba24537 52
10568435
JK
53#ifdef HAVE_PERSONALITY
54# include <sys/personality.h>
55# if !HAVE_DECL_ADDR_NO_RANDOMIZE
56# define ADDR_NO_RANDOMIZE 0x0040000
57# endif
58#endif /* HAVE_PERSONALITY */
59
8a77dff3
VP
60/* This comment documents high-level logic of this file.
61
62Waiting for events in sync mode
63===============================
64
65When waiting for an event in a specific thread, we just use waitpid, passing
66the specific pid, and not passing WNOHANG.
67
68When waiting for an event in all threads, waitpid is not quite good. Prior to
69version 2.4, Linux can either wait for event in main thread, or in secondary
70threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
71miss an event. The solution is to use non-blocking waitpid, together with
72sigsuspend. First, we use non-blocking waitpid to get an event in the main
73process, if any. Second, we use non-blocking waitpid with the __WCLONED
74flag to check for events in cloned processes. If nothing is found, we use
75sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
76happened to a child process -- and SIGCHLD will be delivered both for events
77in main debugged process and in cloned processes. As soon as we know there's
78an event, we get back to calling nonblocking waitpid with and without __WCLONED.
79
80Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
81so that we don't miss a signal. If SIGCHLD arrives in between, when it's
82blocked, the signal becomes pending and sigsuspend immediately
83notices it and returns.
84
85Waiting for events in async mode
86================================
87
88In async mode, GDB should always be ready to handle both user input and target
89events, so neither blocking waitpid nor sigsuspend are viable
90options. Instead, we should notify the GDB main event loop whenever there's
91unprocessed event from the target. The only way to notify this event loop is
92to make it wait on input from a pipe, and write something to the pipe whenever
93there's event. Obviously, if we fail to notify the event loop if there's
94target event, it's bad. If we notify the event loop when there's no event
95from target, linux-nat.c will detect that there's no event, actually, and
96report event of type TARGET_WAITKIND_IGNORE, but it will waste time and
97better avoided.
98
99The main design point is that every time GDB is outside linux-nat.c, we have a
100SIGCHLD handler installed that is called when something happens to the target
101and notifies the GDB event loop. Also, the event is extracted from the target
102using waitpid and stored for future use. Whenever GDB core decides to handle
103the event, and calls into linux-nat.c, we disable SIGCHLD and process things
104as in sync mode, except that before waitpid call we check if there are any
105previously read events.
106
107It could happen that during event processing, we'll try to get more events
108than there are events in the local queue, which will result to waitpid call.
109Those waitpid calls, while blocking, are guarantied to always have
110something for waitpid to return. E.g., stopping a thread with SIGSTOP, and
111waiting for the lwp to stop.
112
113The event loop is notified about new events using a pipe. SIGCHLD handler does
114waitpid and writes the results in to a pipe. GDB event loop has the other end
115of the pipe among the sources. When event loop starts to process the event
116and calls a function in linux-nat.c, all events from the pipe are transferred
117into a local queue and SIGCHLD is blocked. Further processing goes as in sync
118mode. Before we return from linux_nat_wait, we transfer all unprocessed events
119from local queue back to the pipe, so that when we get back to event loop,
120event loop will notice there's something more to do.
121
122SIGCHLD is blocked when we're inside target_wait, so that should we actually
123want to wait for some more events, SIGCHLD handler does not steal them from
124us. Technically, it would be possible to add new events to the local queue but
125it's about the same amount of work as blocking SIGCHLD.
126
127This moving of events from pipe into local queue and back into pipe when we
128enter/leave linux-nat.c is somewhat ugly. Unfortunately, GDB event loop is
129home-grown and incapable to wait on any queue.
130
131Use of signals
132==============
133
134We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
135signal is not entirely significant; we just need for a signal to be delivered,
136so that we can intercept it. SIGSTOP's advantage is that it can not be
137blocked. A disadvantage is that it is not a real-time signal, so it can only
138be queued once; we do not keep track of other sources of SIGSTOP.
139
140Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
141use them, because they have special behavior when the signal is generated -
142not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
143kills the entire thread group.
144
145A delivered SIGSTOP would stop the entire thread group, not just the thread we
146tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
147cancel it (by PTRACE_CONT without passing SIGSTOP).
148
149We could use a real-time signal instead. This would solve those problems; we
150could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
151But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
152generates it, and there are races with trying to find a signal that is not
153blocked. */
a0ef4274 154
dba24537
AC
155#ifndef O_LARGEFILE
156#define O_LARGEFILE 0
157#endif
0274a8ce 158
3993f6b1
DJ
159/* If the system headers did not provide the constants, hard-code the normal
160 values. */
161#ifndef PTRACE_EVENT_FORK
162
163#define PTRACE_SETOPTIONS 0x4200
164#define PTRACE_GETEVENTMSG 0x4201
165
166/* options set using PTRACE_SETOPTIONS */
167#define PTRACE_O_TRACESYSGOOD 0x00000001
168#define PTRACE_O_TRACEFORK 0x00000002
169#define PTRACE_O_TRACEVFORK 0x00000004
170#define PTRACE_O_TRACECLONE 0x00000008
171#define PTRACE_O_TRACEEXEC 0x00000010
9016a515
DJ
172#define PTRACE_O_TRACEVFORKDONE 0x00000020
173#define PTRACE_O_TRACEEXIT 0x00000040
3993f6b1
DJ
174
175/* Wait extended result codes for the above trace options. */
176#define PTRACE_EVENT_FORK 1
177#define PTRACE_EVENT_VFORK 2
178#define PTRACE_EVENT_CLONE 3
179#define PTRACE_EVENT_EXEC 4
c874c7fc 180#define PTRACE_EVENT_VFORK_DONE 5
9016a515 181#define PTRACE_EVENT_EXIT 6
3993f6b1
DJ
182
183#endif /* PTRACE_EVENT_FORK */
184
185/* We can't always assume that this flag is available, but all systems
186 with the ptrace event handlers also have __WALL, so it's safe to use
187 here. */
188#ifndef __WALL
189#define __WALL 0x40000000 /* Wait for any child. */
190#endif
191
02d3ff8c
UW
192#ifndef PTRACE_GETSIGINFO
193#define PTRACE_GETSIGINFO 0x4202
194#endif
195
10d6c8cd
DJ
196/* The single-threaded native GNU/Linux target_ops. We save a pointer for
197 the use of the multi-threaded target. */
198static struct target_ops *linux_ops;
f973ed9c 199static struct target_ops linux_ops_saved;
10d6c8cd 200
9f0bdab8
DJ
201/* The method to call, if any, when a new thread is attached. */
202static void (*linux_nat_new_thread) (ptid_t);
203
ac264b3b
MS
204/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
205 Called by our to_xfer_partial. */
206static LONGEST (*super_xfer_partial) (struct target_ops *,
207 enum target_object,
208 const char *, gdb_byte *,
209 const gdb_byte *,
10d6c8cd
DJ
210 ULONGEST, LONGEST);
211
d6b0e80f 212static int debug_linux_nat;
920d2a44
AC
213static void
214show_debug_linux_nat (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216{
217 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
218 value);
219}
d6b0e80f 220
b84876c2
PA
221static int debug_linux_nat_async = 0;
222static void
223show_debug_linux_nat_async (struct ui_file *file, int from_tty,
224 struct cmd_list_element *c, const char *value)
225{
226 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
227 value);
228}
229
10568435
JK
230static int disable_randomization = 1;
231
232static void
233show_disable_randomization (struct ui_file *file, int from_tty,
234 struct cmd_list_element *c, const char *value)
235{
236#ifdef HAVE_PERSONALITY
237 fprintf_filtered (file, _("\
238Disabling randomization of debuggee's virtual address space is %s.\n"),
239 value);
240#else /* !HAVE_PERSONALITY */
241 fputs_filtered (_("\
242Disabling randomization of debuggee's virtual address space is unsupported on\n\
243this platform.\n"), file);
244#endif /* !HAVE_PERSONALITY */
245}
246
247static void
248set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c)
249{
250#ifndef HAVE_PERSONALITY
251 error (_("\
252Disabling randomization of debuggee's virtual address space is unsupported on\n\
253this platform."));
254#endif /* !HAVE_PERSONALITY */
255}
256
9016a515
DJ
257static int linux_parent_pid;
258
ae087d01
DJ
259struct simple_pid_list
260{
261 int pid;
3d799a95 262 int status;
ae087d01
DJ
263 struct simple_pid_list *next;
264};
265struct simple_pid_list *stopped_pids;
266
3993f6b1
DJ
267/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
268 can not be used, 1 if it can. */
269
270static int linux_supports_tracefork_flag = -1;
271
9016a515
DJ
272/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
273 PTRACE_O_TRACEVFORKDONE. */
274
275static int linux_supports_tracevforkdone_flag = -1;
276
b84876c2
PA
277/* Async mode support */
278
b84876c2
PA
279/* True if async mode is currently on. */
280static int linux_nat_async_enabled;
281
282/* Zero if the async mode, although enabled, is masked, which means
283 linux_nat_wait should behave as if async mode was off. */
284static int linux_nat_async_mask_value = 1;
285
286/* The read/write ends of the pipe registered as waitable file in the
287 event loop. */
288static int linux_nat_event_pipe[2] = { -1, -1 };
289
290/* Number of queued events in the pipe. */
291static volatile int linux_nat_num_queued_events;
292
84e46146 293/* The possible SIGCHLD handling states. */
b84876c2 294
84e46146
PA
295enum sigchld_state
296{
297 /* SIGCHLD disabled, with action set to sigchld_handler, for the
298 sigsuspend in linux_nat_wait. */
299 sigchld_sync,
300 /* SIGCHLD enabled, with action set to async_sigchld_handler. */
301 sigchld_async,
302 /* Set SIGCHLD to default action. Used while creating an
303 inferior. */
304 sigchld_default
305};
306
307/* The current SIGCHLD handling state. */
308static enum sigchld_state linux_nat_async_events_state;
309
310static enum sigchld_state linux_nat_async_events (enum sigchld_state enable);
b84876c2
PA
311static void pipe_to_local_event_queue (void);
312static void local_event_queue_to_pipe (void);
313static void linux_nat_event_pipe_push (int pid, int status, int options);
314static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
315static void linux_nat_set_async_mode (int on);
316static void linux_nat_async (void (*callback)
317 (enum inferior_event_type event_type, void *context),
318 void *context);
319static int linux_nat_async_mask (int mask);
a0ef4274 320static int kill_lwp (int lwpid, int signo);
b84876c2 321
4c28f408
PA
322static int send_sigint_callback (struct lwp_info *lp, void *data);
323static int stop_callback (struct lwp_info *lp, void *data);
324
b84876c2
PA
325/* Captures the result of a successful waitpid call, along with the
326 options used in that call. */
327struct waitpid_result
328{
329 int pid;
330 int status;
331 int options;
332 struct waitpid_result *next;
333};
334
335/* A singly-linked list of the results of the waitpid calls performed
336 in the async SIGCHLD handler. */
337static struct waitpid_result *waitpid_queue = NULL;
338
339static int
340queued_waitpid (int pid, int *status, int flags)
341{
342 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
343
344 if (debug_linux_nat_async)
345 fprintf_unfiltered (gdb_stdlog,
346 "\
84e46146
PA
347QWPID: linux_nat_async_events_state(%d), linux_nat_num_queued_events(%d)\n",
348 linux_nat_async_events_state,
b84876c2
PA
349 linux_nat_num_queued_events);
350
351 if (flags & __WALL)
352 {
353 for (; msg; prev = msg, msg = msg->next)
354 if (pid == -1 || pid == msg->pid)
355 break;
356 }
357 else if (flags & __WCLONE)
358 {
359 for (; msg; prev = msg, msg = msg->next)
360 if (msg->options & __WCLONE
361 && (pid == -1 || pid == msg->pid))
362 break;
363 }
364 else
365 {
366 for (; msg; prev = msg, msg = msg->next)
367 if ((msg->options & __WCLONE) == 0
368 && (pid == -1 || pid == msg->pid))
369 break;
370 }
371
372 if (msg)
373 {
374 int pid;
375
376 if (prev)
377 prev->next = msg->next;
378 else
379 waitpid_queue = msg->next;
380
381 msg->next = NULL;
382 if (status)
383 *status = msg->status;
384 pid = msg->pid;
385
386 if (debug_linux_nat_async)
387 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
388 pid, msg->status);
389 xfree (msg);
390
391 return pid;
392 }
393
394 if (debug_linux_nat_async)
395 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
396
397 if (status)
398 *status = 0;
399 return -1;
400}
401
402static void
403push_waitpid (int pid, int status, int options)
404{
405 struct waitpid_result *event, *new_event;
406
407 new_event = xmalloc (sizeof (*new_event));
408 new_event->pid = pid;
409 new_event->status = status;
410 new_event->options = options;
411 new_event->next = NULL;
412
413 if (waitpid_queue)
414 {
415 for (event = waitpid_queue;
416 event && event->next;
417 event = event->next)
418 ;
419
420 event->next = new_event;
421 }
422 else
423 waitpid_queue = new_event;
424}
425
710151dd 426/* Drain all queued events of PID. If PID is -1, the effect is of
b84876c2
PA
427 draining all events. */
428static void
429drain_queued_events (int pid)
430{
431 while (queued_waitpid (pid, NULL, __WALL) != -1)
432 ;
433}
434
ae087d01
DJ
435\f
436/* Trivial list manipulation functions to keep track of a list of
437 new stopped processes. */
438static void
3d799a95 439add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
440{
441 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
442 new_pid->pid = pid;
3d799a95 443 new_pid->status = status;
ae087d01
DJ
444 new_pid->next = *listp;
445 *listp = new_pid;
446}
447
448static int
3d799a95 449pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
ae087d01
DJ
450{
451 struct simple_pid_list **p;
452
453 for (p = listp; *p != NULL; p = &(*p)->next)
454 if ((*p)->pid == pid)
455 {
456 struct simple_pid_list *next = (*p)->next;
3d799a95 457 *status = (*p)->status;
ae087d01
DJ
458 xfree (*p);
459 *p = next;
460 return 1;
461 }
462 return 0;
463}
464
3d799a95
DJ
465static void
466linux_record_stopped_pid (int pid, int status)
ae087d01 467{
3d799a95 468 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
469}
470
3993f6b1
DJ
471\f
472/* A helper function for linux_test_for_tracefork, called after fork (). */
473
474static void
475linux_tracefork_child (void)
476{
477 int ret;
478
479 ptrace (PTRACE_TRACEME, 0, 0, 0);
480 kill (getpid (), SIGSTOP);
481 fork ();
48bb3cce 482 _exit (0);
3993f6b1
DJ
483}
484
b84876c2
PA
485/* Wrapper function for waitpid which handles EINTR, and checks for
486 locally queued events. */
b957e937
DJ
487
488static int
489my_waitpid (int pid, int *status, int flags)
490{
491 int ret;
b84876c2
PA
492
493 /* There should be no concurrent calls to waitpid. */
84e46146 494 gdb_assert (linux_nat_async_events_state == sigchld_sync);
b84876c2
PA
495
496 ret = queued_waitpid (pid, status, flags);
497 if (ret != -1)
498 return ret;
499
b957e937
DJ
500 do
501 {
502 ret = waitpid (pid, status, flags);
503 }
504 while (ret == -1 && errno == EINTR);
505
506 return ret;
507}
508
509/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
510
511 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
512 we know that the feature is not available. This may change the tracing
513 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
514
515 However, if it succeeds, we don't know for sure that the feature is
516 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 517 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
518 fork tracing, and let it fork. If the process exits, we assume that we
519 can't use TRACEFORK; if we get the fork notification, and we can extract
520 the new child's PID, then we assume that we can. */
3993f6b1
DJ
521
522static void
b957e937 523linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
524{
525 int child_pid, ret, status;
526 long second_pid;
4c28f408
PA
527 enum sigchld_state async_events_original_state;
528
529 async_events_original_state = linux_nat_async_events (sigchld_sync);
3993f6b1 530
b957e937
DJ
531 linux_supports_tracefork_flag = 0;
532 linux_supports_tracevforkdone_flag = 0;
533
534 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
535 if (ret != 0)
536 return;
537
3993f6b1
DJ
538 child_pid = fork ();
539 if (child_pid == -1)
e2e0b3e5 540 perror_with_name (("fork"));
3993f6b1
DJ
541
542 if (child_pid == 0)
543 linux_tracefork_child ();
544
b957e937 545 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 546 if (ret == -1)
e2e0b3e5 547 perror_with_name (("waitpid"));
3993f6b1 548 else if (ret != child_pid)
8a3fe4f8 549 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 550 if (! WIFSTOPPED (status))
8a3fe4f8 551 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
3993f6b1 552
3993f6b1
DJ
553 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
554 if (ret != 0)
555 {
b957e937
DJ
556 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
557 if (ret != 0)
558 {
8a3fe4f8 559 warning (_("linux_test_for_tracefork: failed to kill child"));
4c28f408 560 linux_nat_async_events (async_events_original_state);
b957e937
DJ
561 return;
562 }
563
564 ret = my_waitpid (child_pid, &status, 0);
565 if (ret != child_pid)
8a3fe4f8 566 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
b957e937 567 else if (!WIFSIGNALED (status))
8a3fe4f8
AC
568 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
569 "killed child"), status);
b957e937 570
4c28f408 571 linux_nat_async_events (async_events_original_state);
3993f6b1
DJ
572 return;
573 }
574
9016a515
DJ
575 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
576 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
577 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
578 linux_supports_tracevforkdone_flag = (ret == 0);
579
b957e937
DJ
580 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
581 if (ret != 0)
8a3fe4f8 582 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
583
584 ret = my_waitpid (child_pid, &status, 0);
585
3993f6b1
DJ
586 if (ret == child_pid && WIFSTOPPED (status)
587 && status >> 16 == PTRACE_EVENT_FORK)
588 {
589 second_pid = 0;
590 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
591 if (ret == 0 && second_pid != 0)
592 {
593 int second_status;
594
595 linux_supports_tracefork_flag = 1;
b957e937
DJ
596 my_waitpid (second_pid, &second_status, 0);
597 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
598 if (ret != 0)
8a3fe4f8 599 warning (_("linux_test_for_tracefork: failed to kill second child"));
97725dc4 600 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
601 }
602 }
b957e937 603 else
8a3fe4f8
AC
604 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
605 "(%d, status 0x%x)"), ret, status);
3993f6b1 606
b957e937
DJ
607 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
608 if (ret != 0)
8a3fe4f8 609 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 610 my_waitpid (child_pid, &status, 0);
4c28f408
PA
611
612 linux_nat_async_events (async_events_original_state);
3993f6b1
DJ
613}
614
615/* Return non-zero iff we have tracefork functionality available.
616 This function also sets linux_supports_tracefork_flag. */
617
618static int
b957e937 619linux_supports_tracefork (int pid)
3993f6b1
DJ
620{
621 if (linux_supports_tracefork_flag == -1)
b957e937 622 linux_test_for_tracefork (pid);
3993f6b1
DJ
623 return linux_supports_tracefork_flag;
624}
625
9016a515 626static int
b957e937 627linux_supports_tracevforkdone (int pid)
9016a515
DJ
628{
629 if (linux_supports_tracefork_flag == -1)
b957e937 630 linux_test_for_tracefork (pid);
9016a515
DJ
631 return linux_supports_tracevforkdone_flag;
632}
633
3993f6b1 634\f
4de4c07c
DJ
635void
636linux_enable_event_reporting (ptid_t ptid)
637{
d3587048 638 int pid = ptid_get_lwp (ptid);
4de4c07c
DJ
639 int options;
640
d3587048
DJ
641 if (pid == 0)
642 pid = ptid_get_pid (ptid);
643
b957e937 644 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
645 return;
646
a2f23071
DJ
647 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
648 | PTRACE_O_TRACECLONE;
b957e937 649 if (linux_supports_tracevforkdone (pid))
9016a515
DJ
650 options |= PTRACE_O_TRACEVFORKDONE;
651
652 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
653 read-only process state. */
4de4c07c
DJ
654
655 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
656}
657
6d8fd2b7
UW
658static void
659linux_child_post_attach (int pid)
4de4c07c
DJ
660{
661 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 662 check_for_thread_db ();
4de4c07c
DJ
663}
664
10d6c8cd 665static void
4de4c07c
DJ
666linux_child_post_startup_inferior (ptid_t ptid)
667{
668 linux_enable_event_reporting (ptid);
0ec9a092 669 check_for_thread_db ();
4de4c07c
DJ
670}
671
6d8fd2b7
UW
672static int
673linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 674{
4de4c07c
DJ
675 ptid_t last_ptid;
676 struct target_waitstatus last_status;
9016a515 677 int has_vforked;
4de4c07c
DJ
678 int parent_pid, child_pid;
679
b84876c2
PA
680 if (target_can_async_p ())
681 target_async (NULL, 0);
682
4de4c07c 683 get_last_target_status (&last_ptid, &last_status);
9016a515 684 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
d3587048
DJ
685 parent_pid = ptid_get_lwp (last_ptid);
686 if (parent_pid == 0)
687 parent_pid = ptid_get_pid (last_ptid);
3a3e9ee3 688 child_pid = PIDGET (last_status.value.related_pid);
4de4c07c
DJ
689
690 if (! follow_child)
691 {
692 /* We're already attached to the parent, by default. */
693
694 /* Before detaching from the child, remove all breakpoints from
695 it. (This won't actually modify the breakpoint list, but will
696 physically remove the breakpoints from the child.) */
9016a515
DJ
697 /* If we vforked this will remove the breakpoints from the parent
698 also, but they'll be reinserted below. */
4de4c07c
DJ
699 detach_breakpoints (child_pid);
700
ac264b3b
MS
701 /* Detach new forked process? */
702 if (detach_fork)
f75c00e4 703 {
e85a822c 704 if (info_verbose || debug_linux_nat)
ac264b3b
MS
705 {
706 target_terminal_ours ();
707 fprintf_filtered (gdb_stdlog,
708 "Detaching after fork from child process %d.\n",
709 child_pid);
710 }
4de4c07c 711
ac264b3b
MS
712 ptrace (PTRACE_DETACH, child_pid, 0, 0);
713 }
714 else
715 {
716 struct fork_info *fp;
717 /* Retain child fork in ptrace (stopped) state. */
718 fp = find_fork_pid (child_pid);
719 if (!fp)
720 fp = add_fork (child_pid);
721 fork_save_infrun_state (fp, 0);
722 }
9016a515
DJ
723
724 if (has_vforked)
725 {
b957e937
DJ
726 gdb_assert (linux_supports_tracefork_flag >= 0);
727 if (linux_supports_tracevforkdone (0))
9016a515
DJ
728 {
729 int status;
730
731 ptrace (PTRACE_CONT, parent_pid, 0, 0);
58aecb61 732 my_waitpid (parent_pid, &status, __WALL);
c874c7fc 733 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
8a3fe4f8
AC
734 warning (_("Unexpected waitpid result %06x when waiting for "
735 "vfork-done"), status);
9016a515
DJ
736 }
737 else
738 {
739 /* We can't insert breakpoints until the child has
740 finished with the shared memory region. We need to
741 wait until that happens. Ideal would be to just
742 call:
743 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
744 - waitpid (parent_pid, &status, __WALL);
745 However, most architectures can't handle a syscall
746 being traced on the way out if it wasn't traced on
747 the way in.
748
749 We might also think to loop, continuing the child
750 until it exits or gets a SIGTRAP. One problem is
751 that the child might call ptrace with PTRACE_TRACEME.
752
753 There's no simple and reliable way to figure out when
754 the vforked child will be done with its copy of the
755 shared memory. We could step it out of the syscall,
756 two instructions, let it go, and then single-step the
757 parent once. When we have hardware single-step, this
758 would work; with software single-step it could still
759 be made to work but we'd have to be able to insert
760 single-step breakpoints in the child, and we'd have
761 to insert -just- the single-step breakpoint in the
762 parent. Very awkward.
763
764 In the end, the best we can do is to make sure it
765 runs for a little while. Hopefully it will be out of
766 range of any breakpoints we reinsert. Usually this
767 is only the single-step breakpoint at vfork's return
768 point. */
769
770 usleep (10000);
771 }
772
773 /* Since we vforked, breakpoints were removed in the parent
774 too. Put them back. */
775 reattach_breakpoints (parent_pid);
776 }
4de4c07c 777 }
3993f6b1 778 else
4de4c07c
DJ
779 {
780 char child_pid_spelling[40];
781
782 /* Needed to keep the breakpoint lists in sync. */
9016a515
DJ
783 if (! has_vforked)
784 detach_breakpoints (child_pid);
4de4c07c
DJ
785
786 /* Before detaching from the parent, remove all breakpoints from it. */
787 remove_breakpoints ();
788
e85a822c 789 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
790 {
791 target_terminal_ours ();
ac264b3b
MS
792 fprintf_filtered (gdb_stdlog,
793 "Attaching after fork to child process %d.\n",
794 child_pid);
f75c00e4 795 }
4de4c07c 796
9016a515
DJ
797 /* If we're vforking, we may want to hold on to the parent until
798 the child exits or execs. At exec time we can remove the old
799 breakpoints from the parent and detach it; at exit time we
800 could do the same (or even, sneakily, resume debugging it - the
801 child's exec has failed, or something similar).
802
803 This doesn't clean up "properly", because we can't call
804 target_detach, but that's OK; if the current target is "child",
805 then it doesn't need any further cleanups, and lin_lwp will
806 generally not encounter vfork (vfork is defined to fork
807 in libpthread.so).
808
809 The holding part is very easy if we have VFORKDONE events;
810 but keeping track of both processes is beyond GDB at the
811 moment. So we don't expose the parent to the rest of GDB.
812 Instead we quietly hold onto it until such time as we can
813 safely resume it. */
814
815 if (has_vforked)
816 linux_parent_pid = parent_pid;
ac264b3b
MS
817 else if (!detach_fork)
818 {
819 struct fork_info *fp;
820 /* Retain parent fork in ptrace (stopped) state. */
821 fp = find_fork_pid (parent_pid);
822 if (!fp)
823 fp = add_fork (parent_pid);
824 fork_save_infrun_state (fp, 0);
825 }
9016a515 826 else
b84876c2 827 target_detach (NULL, 0);
4de4c07c 828
9f0bdab8 829 inferior_ptid = ptid_build (child_pid, child_pid, 0);
ee057212
DJ
830
831 /* Reinstall ourselves, since we might have been removed in
832 target_detach (which does other necessary cleanup). */
ac264b3b 833
ee057212 834 push_target (ops);
9f0bdab8 835 linux_nat_switch_fork (inferior_ptid);
ef29ce1a 836 check_for_thread_db ();
4de4c07c
DJ
837
838 /* Reset breakpoints in the child as appropriate. */
839 follow_inferior_reset_breakpoints ();
840 }
841
b84876c2
PA
842 if (target_can_async_p ())
843 target_async (inferior_event_handler, 0);
844
4de4c07c
DJ
845 return 0;
846}
847
4de4c07c 848\f
6d8fd2b7
UW
849static void
850linux_child_insert_fork_catchpoint (int pid)
4de4c07c 851{
b957e937 852 if (! linux_supports_tracefork (pid))
8a3fe4f8 853 error (_("Your system does not support fork catchpoints."));
3993f6b1
DJ
854}
855
6d8fd2b7
UW
856static void
857linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 858{
b957e937 859 if (!linux_supports_tracefork (pid))
8a3fe4f8 860 error (_("Your system does not support vfork catchpoints."));
3993f6b1
DJ
861}
862
6d8fd2b7
UW
863static void
864linux_child_insert_exec_catchpoint (int pid)
3993f6b1 865{
b957e937 866 if (!linux_supports_tracefork (pid))
8a3fe4f8 867 error (_("Your system does not support exec catchpoints."));
3993f6b1
DJ
868}
869
d6b0e80f
AC
870/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
871 are processes sharing the same VM space. A multi-threaded process
872 is basically a group of such processes. However, such a grouping
873 is almost entirely a user-space issue; the kernel doesn't enforce
874 such a grouping at all (this might change in the future). In
875 general, we'll rely on the threads library (i.e. the GNU/Linux
876 Threads library) to provide such a grouping.
877
878 It is perfectly well possible to write a multi-threaded application
879 without the assistance of a threads library, by using the clone
880 system call directly. This module should be able to give some
881 rudimentary support for debugging such applications if developers
882 specify the CLONE_PTRACE flag in the clone system call, and are
883 using the Linux kernel 2.4 or above.
884
885 Note that there are some peculiarities in GNU/Linux that affect
886 this code:
887
888 - In general one should specify the __WCLONE flag to waitpid in
889 order to make it report events for any of the cloned processes
890 (and leave it out for the initial process). However, if a cloned
891 process has exited the exit status is only reported if the
892 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
893 we cannot use it since GDB must work on older systems too.
894
895 - When a traced, cloned process exits and is waited for by the
896 debugger, the kernel reassigns it to the original parent and
897 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
898 library doesn't notice this, which leads to the "zombie problem":
899 When debugged a multi-threaded process that spawns a lot of
900 threads will run out of processes, even if the threads exit,
901 because the "zombies" stay around. */
902
903/* List of known LWPs. */
9f0bdab8 904struct lwp_info *lwp_list;
d6b0e80f
AC
905
906/* Number of LWPs in the list. */
907static int num_lwps;
d6b0e80f
AC
908\f
909
d6b0e80f
AC
910/* Original signal mask. */
911static sigset_t normal_mask;
912
913/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
914 _initialize_linux_nat. */
915static sigset_t suspend_mask;
916
b84876c2
PA
917/* SIGCHLD action for synchronous mode. */
918struct sigaction sync_sigchld_action;
919
920/* SIGCHLD action for asynchronous mode. */
921static struct sigaction async_sigchld_action;
84e46146
PA
922
923/* SIGCHLD default action, to pass to new inferiors. */
924static struct sigaction sigchld_default_action;
d6b0e80f
AC
925\f
926
927/* Prototypes for local functions. */
928static int stop_wait_callback (struct lwp_info *lp, void *data);
929static int linux_nat_thread_alive (ptid_t ptid);
6d8fd2b7 930static char *linux_child_pid_to_exec_file (int pid);
710151dd
PA
931static int cancel_breakpoint (struct lwp_info *lp);
932
d6b0e80f
AC
933\f
934/* Convert wait status STATUS to a string. Used for printing debug
935 messages only. */
936
937static char *
938status_to_str (int status)
939{
940 static char buf[64];
941
942 if (WIFSTOPPED (status))
943 snprintf (buf, sizeof (buf), "%s (stopped)",
944 strsignal (WSTOPSIG (status)));
945 else if (WIFSIGNALED (status))
946 snprintf (buf, sizeof (buf), "%s (terminated)",
947 strsignal (WSTOPSIG (status)));
948 else
949 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
950
951 return buf;
952}
953
954/* Initialize the list of LWPs. Note that this module, contrary to
955 what GDB's generic threads layer does for its thread list,
956 re-initializes the LWP lists whenever we mourn or detach (which
957 doesn't involve mourning) the inferior. */
958
959static void
960init_lwp_list (void)
961{
962 struct lwp_info *lp, *lpnext;
963
964 for (lp = lwp_list; lp; lp = lpnext)
965 {
966 lpnext = lp->next;
967 xfree (lp);
968 }
969
970 lwp_list = NULL;
971 num_lwps = 0;
d6b0e80f
AC
972}
973
f973ed9c 974/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
975 structure describing the new LWP. The LWP should already be stopped
976 (with an exception for the very first LWP). */
d6b0e80f
AC
977
978static struct lwp_info *
979add_lwp (ptid_t ptid)
980{
981 struct lwp_info *lp;
982
983 gdb_assert (is_lwp (ptid));
984
985 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
986
987 memset (lp, 0, sizeof (struct lwp_info));
988
989 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
990
991 lp->ptid = ptid;
992
993 lp->next = lwp_list;
994 lwp_list = lp;
f973ed9c 995 ++num_lwps;
d6b0e80f 996
9f0bdab8
DJ
997 if (num_lwps > 1 && linux_nat_new_thread != NULL)
998 linux_nat_new_thread (ptid);
999
d6b0e80f
AC
1000 return lp;
1001}
1002
1003/* Remove the LWP specified by PID from the list. */
1004
1005static void
1006delete_lwp (ptid_t ptid)
1007{
1008 struct lwp_info *lp, *lpprev;
1009
1010 lpprev = NULL;
1011
1012 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1013 if (ptid_equal (lp->ptid, ptid))
1014 break;
1015
1016 if (!lp)
1017 return;
1018
d6b0e80f
AC
1019 num_lwps--;
1020
1021 if (lpprev)
1022 lpprev->next = lp->next;
1023 else
1024 lwp_list = lp->next;
1025
1026 xfree (lp);
1027}
1028
1029/* Return a pointer to the structure describing the LWP corresponding
1030 to PID. If no corresponding LWP could be found, return NULL. */
1031
1032static struct lwp_info *
1033find_lwp_pid (ptid_t ptid)
1034{
1035 struct lwp_info *lp;
1036 int lwp;
1037
1038 if (is_lwp (ptid))
1039 lwp = GET_LWP (ptid);
1040 else
1041 lwp = GET_PID (ptid);
1042
1043 for (lp = lwp_list; lp; lp = lp->next)
1044 if (lwp == GET_LWP (lp->ptid))
1045 return lp;
1046
1047 return NULL;
1048}
1049
1050/* Call CALLBACK with its second argument set to DATA for every LWP in
1051 the list. If CALLBACK returns 1 for a particular LWP, return a
1052 pointer to the structure describing that LWP immediately.
1053 Otherwise return NULL. */
1054
1055struct lwp_info *
1056iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
1057{
1058 struct lwp_info *lp, *lpnext;
1059
1060 for (lp = lwp_list; lp; lp = lpnext)
1061 {
1062 lpnext = lp->next;
1063 if ((*callback) (lp, data))
1064 return lp;
1065 }
1066
1067 return NULL;
1068}
1069
f973ed9c
DJ
1070/* Update our internal state when changing from one fork (checkpoint,
1071 et cetera) to another indicated by NEW_PTID. We can only switch
1072 single-threaded applications, so we only create one new LWP, and
1073 the previous list is discarded. */
1074
1075void
1076linux_nat_switch_fork (ptid_t new_ptid)
1077{
1078 struct lwp_info *lp;
1079
728c8f58 1080 init_thread_list ();
f973ed9c
DJ
1081 init_lwp_list ();
1082 lp = add_lwp (new_ptid);
728c8f58 1083 add_thread_silent (new_ptid);
f973ed9c
DJ
1084 lp->stopped = 1;
1085}
1086
e26af52f
DJ
1087/* Record a PTID for later deletion. */
1088
1089struct saved_ptids
1090{
1091 ptid_t ptid;
1092 struct saved_ptids *next;
1093};
1094static struct saved_ptids *threads_to_delete;
1095
1096static void
1097record_dead_thread (ptid_t ptid)
1098{
1099 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
1100 p->ptid = ptid;
1101 p->next = threads_to_delete;
1102 threads_to_delete = p;
1103}
1104
1105/* Delete any dead threads which are not the current thread. */
1106
1107static void
1108prune_lwps (void)
1109{
1110 struct saved_ptids **p = &threads_to_delete;
1111
1112 while (*p)
1113 if (! ptid_equal ((*p)->ptid, inferior_ptid))
1114 {
1115 struct saved_ptids *tmp = *p;
1116 delete_thread (tmp->ptid);
1117 *p = tmp->next;
1118 xfree (tmp);
1119 }
1120 else
1121 p = &(*p)->next;
1122}
1123
e26af52f
DJ
1124/* Handle the exit of a single thread LP. */
1125
1126static void
1127exit_lwp (struct lwp_info *lp)
1128{
063bfe2e
VP
1129 struct thread_info *th = find_thread_pid (lp->ptid);
1130
1131 if (th)
e26af52f 1132 {
17faa917
DJ
1133 if (print_thread_events)
1134 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1135
e26af52f
DJ
1136 /* Core GDB cannot deal with us deleting the current thread. */
1137 if (!ptid_equal (lp->ptid, inferior_ptid))
1138 delete_thread (lp->ptid);
1139 else
1140 record_dead_thread (lp->ptid);
e26af52f
DJ
1141 }
1142
1143 delete_lwp (lp->ptid);
1144}
1145
a0ef4274
DJ
1146/* Detect `T (stopped)' in `/proc/PID/status'.
1147 Other states including `T (tracing stop)' are reported as false. */
1148
1149static int
1150pid_is_stopped (pid_t pid)
1151{
1152 FILE *status_file;
1153 char buf[100];
1154 int retval = 0;
1155
1156 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1157 status_file = fopen (buf, "r");
1158 if (status_file != NULL)
1159 {
1160 int have_state = 0;
1161
1162 while (fgets (buf, sizeof (buf), status_file))
1163 {
1164 if (strncmp (buf, "State:", 6) == 0)
1165 {
1166 have_state = 1;
1167 break;
1168 }
1169 }
1170 if (have_state && strstr (buf, "T (stopped)") != NULL)
1171 retval = 1;
1172 fclose (status_file);
1173 }
1174 return retval;
1175}
1176
1177/* Wait for the LWP specified by LP, which we have just attached to.
1178 Returns a wait status for that LWP, to cache. */
1179
1180static int
1181linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1182 int *signalled)
1183{
1184 pid_t new_pid, pid = GET_LWP (ptid);
1185 int status;
1186
1187 if (pid_is_stopped (pid))
1188 {
1189 if (debug_linux_nat)
1190 fprintf_unfiltered (gdb_stdlog,
1191 "LNPAW: Attaching to a stopped process\n");
1192
1193 /* The process is definitely stopped. It is in a job control
1194 stop, unless the kernel predates the TASK_STOPPED /
1195 TASK_TRACED distinction, in which case it might be in a
1196 ptrace stop. Make sure it is in a ptrace stop; from there we
1197 can kill it, signal it, et cetera.
1198
1199 First make sure there is a pending SIGSTOP. Since we are
1200 already attached, the process can not transition from stopped
1201 to running without a PTRACE_CONT; so we know this signal will
1202 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1203 probably already in the queue (unless this kernel is old
1204 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1205 is not an RT signal, it can only be queued once. */
1206 kill_lwp (pid, SIGSTOP);
1207
1208 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1209 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1210 ptrace (PTRACE_CONT, pid, 0, 0);
1211 }
1212
1213 /* Make sure the initial process is stopped. The user-level threads
1214 layer might want to poke around in the inferior, and that won't
1215 work if things haven't stabilized yet. */
1216 new_pid = my_waitpid (pid, &status, 0);
1217 if (new_pid == -1 && errno == ECHILD)
1218 {
1219 if (first)
1220 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1221
1222 /* Try again with __WCLONE to check cloned processes. */
1223 new_pid = my_waitpid (pid, &status, __WCLONE);
1224 *cloned = 1;
1225 }
1226
1227 gdb_assert (pid == new_pid && WIFSTOPPED (status));
1228
1229 if (WSTOPSIG (status) != SIGSTOP)
1230 {
1231 *signalled = 1;
1232 if (debug_linux_nat)
1233 fprintf_unfiltered (gdb_stdlog,
1234 "LNPAW: Received %s after attaching\n",
1235 status_to_str (status));
1236 }
1237
1238 return status;
1239}
1240
1241/* Attach to the LWP specified by PID. Return 0 if successful or -1
1242 if the new LWP could not be attached. */
d6b0e80f 1243
9ee57c33 1244int
93815fbf 1245lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1246{
9ee57c33 1247 struct lwp_info *lp;
84e46146 1248 enum sigchld_state async_events_original_state;
d6b0e80f
AC
1249
1250 gdb_assert (is_lwp (ptid));
1251
84e46146 1252 async_events_original_state = linux_nat_async_events (sigchld_sync);
d6b0e80f 1253
9ee57c33 1254 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1255
1256 /* We assume that we're already attached to any LWP that has an id
1257 equal to the overall process id, and to any LWP that is already
1258 in our list of LWPs. If we're not seeing exit events from threads
1259 and we've had PID wraparound since we last tried to stop all threads,
1260 this assumption might be wrong; fortunately, this is very unlikely
1261 to happen. */
9ee57c33 1262 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1263 {
a0ef4274 1264 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1265
1266 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1267 {
1268 /* If we fail to attach to the thread, issue a warning,
1269 but continue. One way this can happen is if thread
e9efe249 1270 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1271 bug may place threads in the thread list and then fail
1272 to create them. */
1273 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1274 safe_strerror (errno));
1275 return -1;
1276 }
1277
d6b0e80f
AC
1278 if (debug_linux_nat)
1279 fprintf_unfiltered (gdb_stdlog,
1280 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1281 target_pid_to_str (ptid));
1282
a0ef4274
DJ
1283 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1284 lp = add_lwp (ptid);
1285 lp->stopped = 1;
1286 lp->cloned = cloned;
1287 lp->signalled = signalled;
1288 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1289 {
a0ef4274
DJ
1290 lp->resumed = 1;
1291 lp->status = status;
d6b0e80f
AC
1292 }
1293
a0ef4274 1294 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1295
1296 if (debug_linux_nat)
1297 {
1298 fprintf_unfiltered (gdb_stdlog,
1299 "LLAL: waitpid %s received %s\n",
1300 target_pid_to_str (ptid),
1301 status_to_str (status));
1302 }
1303 }
1304 else
1305 {
1306 /* We assume that the LWP representing the original process is
1307 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1308 that the GNU/linux ptrace layer uses to keep track of
1309 threads. Note that this won't have already been done since
1310 the main thread will have, we assume, been stopped by an
1311 attach from a different layer. */
9ee57c33
DJ
1312 if (lp == NULL)
1313 lp = add_lwp (ptid);
d6b0e80f
AC
1314 lp->stopped = 1;
1315 }
9ee57c33 1316
84e46146 1317 linux_nat_async_events (async_events_original_state);
9ee57c33 1318 return 0;
d6b0e80f
AC
1319}
1320
b84876c2
PA
1321static void
1322linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
1323 int from_tty)
1324{
1325 int saved_async = 0;
10568435
JK
1326#ifdef HAVE_PERSONALITY
1327 int personality_orig = 0, personality_set = 0;
1328#endif /* HAVE_PERSONALITY */
b84876c2
PA
1329
1330 /* The fork_child mechanism is synchronous and calls target_wait, so
1331 we have to mask the async mode. */
1332
1333 if (target_can_async_p ())
84e46146
PA
1334 /* Mask async mode. Creating a child requires a loop calling
1335 wait_for_inferior currently. */
b84876c2
PA
1336 saved_async = linux_nat_async_mask (0);
1337 else
1338 {
1339 /* Restore the original signal mask. */
1340 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1341 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1342 suspend_mask = normal_mask;
1343 sigdelset (&suspend_mask, SIGCHLD);
1344 }
1345
84e46146
PA
1346 /* Set SIGCHLD to the default action, until after execing the child,
1347 since the inferior inherits the superior's signal mask. It will
1348 be blocked again in linux_nat_wait, which is only reached after
1349 the inferior execing. */
1350 linux_nat_async_events (sigchld_default);
1351
10568435
JK
1352#ifdef HAVE_PERSONALITY
1353 if (disable_randomization)
1354 {
1355 errno = 0;
1356 personality_orig = personality (0xffffffff);
1357 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1358 {
1359 personality_set = 1;
1360 personality (personality_orig | ADDR_NO_RANDOMIZE);
1361 }
1362 if (errno != 0 || (personality_set
1363 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1364 warning (_("Error disabling address space randomization: %s"),
1365 safe_strerror (errno));
1366 }
1367#endif /* HAVE_PERSONALITY */
1368
b84876c2
PA
1369 linux_ops->to_create_inferior (exec_file, allargs, env, from_tty);
1370
10568435
JK
1371#ifdef HAVE_PERSONALITY
1372 if (personality_set)
1373 {
1374 errno = 0;
1375 personality (personality_orig);
1376 if (errno != 0)
1377 warning (_("Error restoring address space randomization: %s"),
1378 safe_strerror (errno));
1379 }
1380#endif /* HAVE_PERSONALITY */
1381
b84876c2
PA
1382 if (saved_async)
1383 linux_nat_async_mask (saved_async);
1384}
1385
d6b0e80f
AC
1386static void
1387linux_nat_attach (char *args, int from_tty)
1388{
1389 struct lwp_info *lp;
d6b0e80f
AC
1390 int status;
1391
1392 /* FIXME: We should probably accept a list of process id's, and
1393 attach all of them. */
10d6c8cd 1394 linux_ops->to_attach (args, from_tty);
d6b0e80f 1395
b84876c2
PA
1396 if (!target_can_async_p ())
1397 {
1398 /* Restore the original signal mask. */
1399 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1400 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1401 suspend_mask = normal_mask;
1402 sigdelset (&suspend_mask, SIGCHLD);
1403 }
1404
9f0bdab8
DJ
1405 /* Add the initial process as the first LWP to the list. */
1406 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1407 lp = add_lwp (inferior_ptid);
a0ef4274
DJ
1408
1409 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1410 &lp->signalled);
1411 lp->stopped = 1;
9f0bdab8 1412
403fe197
PA
1413 /* If this process is not using thread_db, then we still don't
1414 detect any other threads, but add at least this one. */
1415 add_thread_silent (lp->ptid);
1416
a0ef4274 1417 /* Save the wait status to report later. */
d6b0e80f 1418 lp->resumed = 1;
a0ef4274
DJ
1419 if (debug_linux_nat)
1420 fprintf_unfiltered (gdb_stdlog,
1421 "LNA: waitpid %ld, saving status %s\n",
1422 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd
PA
1423
1424 if (!target_can_async_p ())
a0ef4274 1425 lp->status = status;
710151dd
PA
1426 else
1427 {
1428 /* We already waited for this LWP, so put the wait result on the
1429 pipe. The event loop will wake up and gets us to handling
1430 this event. */
a0ef4274
DJ
1431 linux_nat_event_pipe_push (GET_PID (lp->ptid), status,
1432 lp->cloned ? __WCLONE : 0);
b84876c2
PA
1433 /* Register in the event loop. */
1434 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1435 }
1436}
1437
a0ef4274
DJ
1438/* Get pending status of LP. */
1439static int
1440get_pending_status (struct lwp_info *lp, int *status)
1441{
1442 struct target_waitstatus last;
1443 ptid_t last_ptid;
1444
1445 get_last_target_status (&last_ptid, &last);
1446
1447 /* If this lwp is the ptid that GDB is processing an event from, the
1448 signal will be in stop_signal. Otherwise, in all-stop + sync
1449 mode, we may cache pending events in lp->status while trying to
1450 stop all threads (see stop_wait_callback). In async mode, the
1451 events are always cached in waitpid_queue. */
1452
1453 *status = 0;
4c28f408
PA
1454
1455 if (non_stop)
a0ef4274 1456 {
4c28f408
PA
1457 enum target_signal signo = TARGET_SIGNAL_0;
1458
1459 if (is_executing (lp->ptid))
1460 {
1461 /* If the core thought this lwp was executing --- e.g., the
1462 executing property hasn't been updated yet, but the
1463 thread has been stopped with a stop_callback /
1464 stop_wait_callback sequence (see linux_nat_detach for
1465 example) --- we can only have pending events in the local
1466 queue. */
1467 if (queued_waitpid (GET_LWP (lp->ptid), status, __WALL) != -1)
1468 {
1469 if (WIFSTOPPED (status))
1470 signo = target_signal_from_host (WSTOPSIG (status));
1471
1472 /* If not stopped, then the lwp is gone, no use in
1473 resending a signal. */
1474 }
1475 }
1476 else
1477 {
1478 /* If the core knows the thread is not executing, then we
1479 have the last signal recorded in
1480 thread_info->stop_signal, unless this is inferior_ptid,
1481 in which case, it's in the global stop_signal, due to
1482 context switching. */
1483
1484 if (ptid_equal (lp->ptid, inferior_ptid))
1485 signo = stop_signal;
1486 else
1487 {
1488 struct thread_info *tp = find_thread_pid (lp->ptid);
1489 gdb_assert (tp);
1490 signo = tp->stop_signal;
1491 }
1492 }
1493
1494 if (signo != TARGET_SIGNAL_0
1495 && !signal_pass_state (signo))
1496 {
1497 if (debug_linux_nat)
1498 fprintf_unfiltered (gdb_stdlog, "\
1499GPT: lwp %s had signal %s, but it is in no pass state\n",
1500 target_pid_to_str (lp->ptid),
1501 target_signal_to_string (signo));
1502 }
1503 else
1504 {
1505 if (signo != TARGET_SIGNAL_0)
1506 *status = W_STOPCODE (target_signal_to_host (signo));
1507
1508 if (debug_linux_nat)
1509 fprintf_unfiltered (gdb_stdlog,
1510 "GPT: lwp %s as pending signal %s\n",
1511 target_pid_to_str (lp->ptid),
1512 target_signal_to_string (signo));
1513 }
a0ef4274 1514 }
a0ef4274 1515 else
4c28f408
PA
1516 {
1517 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1518 {
1519 if (stop_signal != TARGET_SIGNAL_0
1520 && signal_pass_state (stop_signal))
1521 *status = W_STOPCODE (target_signal_to_host (stop_signal));
1522 }
1523 else if (target_can_async_p ())
1524 queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
1525 else
1526 *status = lp->status;
1527 }
a0ef4274
DJ
1528
1529 return 0;
1530}
1531
d6b0e80f
AC
1532static int
1533detach_callback (struct lwp_info *lp, void *data)
1534{
1535 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1536
1537 if (debug_linux_nat && lp->status)
1538 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1539 strsignal (WSTOPSIG (lp->status)),
1540 target_pid_to_str (lp->ptid));
1541
a0ef4274
DJ
1542 /* If there is a pending SIGSTOP, get rid of it. */
1543 if (lp->signalled)
d6b0e80f 1544 {
d6b0e80f
AC
1545 if (debug_linux_nat)
1546 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1547 "DC: Sending SIGCONT to %s\n",
1548 target_pid_to_str (lp->ptid));
d6b0e80f 1549
a0ef4274 1550 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1551 lp->signalled = 0;
d6b0e80f
AC
1552 }
1553
1554 /* We don't actually detach from the LWP that has an id equal to the
1555 overall process id just yet. */
1556 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1557 {
a0ef4274
DJ
1558 int status = 0;
1559
1560 /* Pass on any pending signal for this LWP. */
1561 get_pending_status (lp, &status);
1562
d6b0e80f
AC
1563 errno = 0;
1564 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1565 WSTOPSIG (status)) < 0)
8a3fe4f8 1566 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1567 safe_strerror (errno));
1568
1569 if (debug_linux_nat)
1570 fprintf_unfiltered (gdb_stdlog,
1571 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1572 target_pid_to_str (lp->ptid),
1573 strsignal (WSTOPSIG (lp->status)));
1574
1575 delete_lwp (lp->ptid);
1576 }
1577
1578 return 0;
1579}
1580
1581static void
1582linux_nat_detach (char *args, int from_tty)
1583{
b84876c2 1584 int pid;
a0ef4274
DJ
1585 int status;
1586 enum target_signal sig;
1587
b84876c2
PA
1588 if (target_can_async_p ())
1589 linux_nat_async (NULL, 0);
1590
4c28f408
PA
1591 /* Stop all threads before detaching. ptrace requires that the
1592 thread is stopped to sucessfully detach. */
1593 iterate_over_lwps (stop_callback, NULL);
1594 /* ... and wait until all of them have reported back that
1595 they're no longer running. */
1596 iterate_over_lwps (stop_wait_callback, NULL);
1597
d6b0e80f
AC
1598 iterate_over_lwps (detach_callback, NULL);
1599
1600 /* Only the initial process should be left right now. */
1601 gdb_assert (num_lwps == 1);
1602
a0ef4274
DJ
1603 /* Pass on any pending signal for the last LWP. */
1604 if ((args == NULL || *args == '\0')
1605 && get_pending_status (lwp_list, &status) != -1
1606 && WIFSTOPPED (status))
1607 {
1608 /* Put the signal number in ARGS so that inf_ptrace_detach will
1609 pass it along with PTRACE_DETACH. */
1610 args = alloca (8);
1611 sprintf (args, "%d", (int) WSTOPSIG (status));
1612 fprintf_unfiltered (gdb_stdlog,
1613 "LND: Sending signal %s to %s\n",
1614 args,
1615 target_pid_to_str (lwp_list->ptid));
1616 }
1617
d6b0e80f
AC
1618 /* Destroy LWP info; it's no longer valid. */
1619 init_lwp_list ();
1620
b84876c2
PA
1621 pid = GET_PID (inferior_ptid);
1622 inferior_ptid = pid_to_ptid (pid);
10d6c8cd 1623 linux_ops->to_detach (args, from_tty);
b84876c2
PA
1624
1625 if (target_can_async_p ())
1626 drain_queued_events (pid);
d6b0e80f
AC
1627}
1628
1629/* Resume LP. */
1630
1631static int
1632resume_callback (struct lwp_info *lp, void *data)
1633{
1634 if (lp->stopped && lp->status == 0)
1635 {
10d6c8cd
DJ
1636 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1637 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1638 if (debug_linux_nat)
1639 fprintf_unfiltered (gdb_stdlog,
1640 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1641 target_pid_to_str (lp->ptid));
1642 lp->stopped = 0;
1643 lp->step = 0;
9f0bdab8 1644 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
d6b0e80f
AC
1645 }
1646
1647 return 0;
1648}
1649
1650static int
1651resume_clear_callback (struct lwp_info *lp, void *data)
1652{
1653 lp->resumed = 0;
1654 return 0;
1655}
1656
1657static int
1658resume_set_callback (struct lwp_info *lp, void *data)
1659{
1660 lp->resumed = 1;
1661 return 0;
1662}
1663
1664static void
1665linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1666{
1667 struct lwp_info *lp;
1668 int resume_all;
1669
76f50ad1
DJ
1670 if (debug_linux_nat)
1671 fprintf_unfiltered (gdb_stdlog,
1672 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1673 step ? "step" : "resume",
1674 target_pid_to_str (ptid),
1675 signo ? strsignal (signo) : "0",
1676 target_pid_to_str (inferior_ptid));
1677
e26af52f
DJ
1678 prune_lwps ();
1679
b84876c2
PA
1680 if (target_can_async_p ())
1681 /* Block events while we're here. */
84e46146 1682 linux_nat_async_events (sigchld_sync);
b84876c2 1683
d6b0e80f
AC
1684 /* A specific PTID means `step only this process id'. */
1685 resume_all = (PIDGET (ptid) == -1);
1686
4c28f408
PA
1687 if (non_stop && resume_all)
1688 internal_error (__FILE__, __LINE__,
1689 "can't resume all in non-stop mode");
1690
1691 if (!non_stop)
1692 {
1693 if (resume_all)
1694 iterate_over_lwps (resume_set_callback, NULL);
1695 else
1696 iterate_over_lwps (resume_clear_callback, NULL);
1697 }
d6b0e80f
AC
1698
1699 /* If PID is -1, it's the current inferior that should be
1700 handled specially. */
1701 if (PIDGET (ptid) == -1)
1702 ptid = inferior_ptid;
1703
1704 lp = find_lwp_pid (ptid);
9f0bdab8 1705 gdb_assert (lp != NULL);
d6b0e80f 1706
4c28f408 1707 /* Convert to something the lower layer understands. */
9f0bdab8 1708 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1709
9f0bdab8
DJ
1710 /* Remember if we're stepping. */
1711 lp->step = step;
d6b0e80f 1712
9f0bdab8
DJ
1713 /* Mark this LWP as resumed. */
1714 lp->resumed = 1;
76f50ad1 1715
9f0bdab8
DJ
1716 /* If we have a pending wait status for this thread, there is no
1717 point in resuming the process. But first make sure that
1718 linux_nat_wait won't preemptively handle the event - we
1719 should never take this short-circuit if we are going to
1720 leave LP running, since we have skipped resuming all the
1721 other threads. This bit of code needs to be synchronized
1722 with linux_nat_wait. */
76f50ad1 1723
710151dd
PA
1724 /* In async mode, we never have pending wait status. */
1725 if (target_can_async_p () && lp->status)
1726 internal_error (__FILE__, __LINE__, "Pending status in async mode");
1727
9f0bdab8
DJ
1728 if (lp->status && WIFSTOPPED (lp->status))
1729 {
1730 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
76f50ad1 1731
9f0bdab8
DJ
1732 if (signal_stop_state (saved_signo) == 0
1733 && signal_print_state (saved_signo) == 0
1734 && signal_pass_state (saved_signo) == 1)
d6b0e80f 1735 {
9f0bdab8
DJ
1736 if (debug_linux_nat)
1737 fprintf_unfiltered (gdb_stdlog,
1738 "LLR: Not short circuiting for ignored "
1739 "status 0x%x\n", lp->status);
1740
d6b0e80f
AC
1741 /* FIXME: What should we do if we are supposed to continue
1742 this thread with a signal? */
1743 gdb_assert (signo == TARGET_SIGNAL_0);
9f0bdab8
DJ
1744 signo = saved_signo;
1745 lp->status = 0;
1746 }
1747 }
76f50ad1 1748
9f0bdab8
DJ
1749 if (lp->status)
1750 {
1751 /* FIXME: What should we do if we are supposed to continue
1752 this thread with a signal? */
1753 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1754
9f0bdab8
DJ
1755 if (debug_linux_nat)
1756 fprintf_unfiltered (gdb_stdlog,
1757 "LLR: Short circuiting for status 0x%x\n",
1758 lp->status);
d6b0e80f 1759
9f0bdab8 1760 return;
d6b0e80f
AC
1761 }
1762
9f0bdab8
DJ
1763 /* Mark LWP as not stopped to prevent it from being continued by
1764 resume_callback. */
1765 lp->stopped = 0;
1766
d6b0e80f
AC
1767 if (resume_all)
1768 iterate_over_lwps (resume_callback, NULL);
1769
10d6c8cd 1770 linux_ops->to_resume (ptid, step, signo);
9f0bdab8
DJ
1771 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1772
d6b0e80f
AC
1773 if (debug_linux_nat)
1774 fprintf_unfiltered (gdb_stdlog,
1775 "LLR: %s %s, %s (resume event thread)\n",
1776 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1777 target_pid_to_str (ptid),
1778 signo ? strsignal (signo) : "0");
b84876c2
PA
1779
1780 if (target_can_async_p ())
8ea051c5 1781 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1782}
1783
1784/* Issue kill to specified lwp. */
1785
1786static int tkill_failed;
1787
1788static int
1789kill_lwp (int lwpid, int signo)
1790{
1791 errno = 0;
1792
1793/* Use tkill, if possible, in case we are using nptl threads. If tkill
1794 fails, then we are not using nptl threads and we should be using kill. */
1795
1796#ifdef HAVE_TKILL_SYSCALL
1797 if (!tkill_failed)
1798 {
1799 int ret = syscall (__NR_tkill, lwpid, signo);
1800 if (errno != ENOSYS)
1801 return ret;
1802 errno = 0;
1803 tkill_failed = 1;
1804 }
1805#endif
1806
1807 return kill (lwpid, signo);
1808}
1809
3d799a95
DJ
1810/* Handle a GNU/Linux extended wait response. If we see a clone
1811 event, we need to add the new LWP to our list (and not report the
1812 trap to higher layers). This function returns non-zero if the
1813 event should be ignored and we should wait again. If STOPPING is
1814 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1815
1816static int
3d799a95
DJ
1817linux_handle_extended_wait (struct lwp_info *lp, int status,
1818 int stopping)
d6b0e80f 1819{
3d799a95
DJ
1820 int pid = GET_LWP (lp->ptid);
1821 struct target_waitstatus *ourstatus = &lp->waitstatus;
1822 struct lwp_info *new_lp = NULL;
1823 int event = status >> 16;
d6b0e80f 1824
3d799a95
DJ
1825 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1826 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1827 {
3d799a95
DJ
1828 unsigned long new_pid;
1829 int ret;
1830
1831 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1832
3d799a95
DJ
1833 /* If we haven't already seen the new PID stop, wait for it now. */
1834 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1835 {
1836 /* The new child has a pending SIGSTOP. We can't affect it until it
1837 hits the SIGSTOP, but we're already attached. */
1838 ret = my_waitpid (new_pid, &status,
1839 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1840 if (ret == -1)
1841 perror_with_name (_("waiting for new child"));
1842 else if (ret != new_pid)
1843 internal_error (__FILE__, __LINE__,
1844 _("wait returned unexpected PID %d"), ret);
1845 else if (!WIFSTOPPED (status))
1846 internal_error (__FILE__, __LINE__,
1847 _("wait returned unexpected status 0x%x"), status);
1848 }
1849
3a3e9ee3 1850 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95
DJ
1851
1852 if (event == PTRACE_EVENT_FORK)
1853 ourstatus->kind = TARGET_WAITKIND_FORKED;
1854 else if (event == PTRACE_EVENT_VFORK)
1855 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 1856 else
3d799a95 1857 {
4c28f408
PA
1858 struct cleanup *old_chain;
1859
3d799a95
DJ
1860 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1861 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1862 new_lp->cloned = 1;
4c28f408 1863 new_lp->stopped = 1;
d6b0e80f 1864
3d799a95
DJ
1865 if (WSTOPSIG (status) != SIGSTOP)
1866 {
1867 /* This can happen if someone starts sending signals to
1868 the new thread before it gets a chance to run, which
1869 have a lower number than SIGSTOP (e.g. SIGUSR1).
1870 This is an unlikely case, and harder to handle for
1871 fork / vfork than for clone, so we do not try - but
1872 we handle it for clone events here. We'll send
1873 the other signal on to the thread below. */
1874
1875 new_lp->signalled = 1;
1876 }
1877 else
1878 status = 0;
d6b0e80f 1879
4c28f408 1880 if (non_stop)
3d799a95 1881 {
4c28f408
PA
1882 /* Add the new thread to GDB's lists as soon as possible
1883 so that:
1884
1885 1) the frontend doesn't have to wait for a stop to
1886 display them, and,
1887
1888 2) we tag it with the correct running state. */
1889
1890 /* If the thread_db layer is active, let it know about
1891 this new thread, and add it to GDB's list. */
1892 if (!thread_db_attach_lwp (new_lp->ptid))
1893 {
1894 /* We're not using thread_db. Add it to GDB's
1895 list. */
1896 target_post_attach (GET_LWP (new_lp->ptid));
1897 add_thread (new_lp->ptid);
1898 }
1899
1900 if (!stopping)
1901 {
1902 set_running (new_lp->ptid, 1);
1903 set_executing (new_lp->ptid, 1);
1904 }
1905 }
1906
1907 if (!stopping)
1908 {
1909 new_lp->stopped = 0;
3d799a95 1910 new_lp->resumed = 1;
4c28f408 1911 ptrace (PTRACE_CONT, new_pid, 0,
3d799a95
DJ
1912 status ? WSTOPSIG (status) : 0);
1913 }
d6b0e80f 1914
3d799a95
DJ
1915 if (debug_linux_nat)
1916 fprintf_unfiltered (gdb_stdlog,
1917 "LHEW: Got clone event from LWP %ld, resuming\n",
1918 GET_LWP (lp->ptid));
1919 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1920
1921 return 1;
1922 }
1923
1924 return 0;
d6b0e80f
AC
1925 }
1926
3d799a95
DJ
1927 if (event == PTRACE_EVENT_EXEC)
1928 {
1929 ourstatus->kind = TARGET_WAITKIND_EXECD;
1930 ourstatus->value.execd_pathname
6d8fd2b7 1931 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95
DJ
1932
1933 if (linux_parent_pid)
1934 {
1935 detach_breakpoints (linux_parent_pid);
1936 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1937
1938 linux_parent_pid = 0;
1939 }
1940
25b22b0a
PA
1941 /* At this point, all inserted breakpoints are gone. Doing this
1942 as soon as we detect an exec prevents the badness of deleting
1943 a breakpoint writing the current "shadow contents" to lift
1944 the bp. That shadow is NOT valid after an exec.
1945
1946 Note that we have to do this after the detach_breakpoints
1947 call above, otherwise breakpoints wouldn't be lifted from the
1948 parent on a vfork, because detach_breakpoints would think
1949 that breakpoints are not inserted. */
1950 mark_breakpoints_out ();
3d799a95
DJ
1951 return 0;
1952 }
1953
1954 internal_error (__FILE__, __LINE__,
1955 _("unknown ptrace event %d"), event);
d6b0e80f
AC
1956}
1957
1958/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1959 exited. */
1960
1961static int
1962wait_lwp (struct lwp_info *lp)
1963{
1964 pid_t pid;
1965 int status;
1966 int thread_dead = 0;
1967
1968 gdb_assert (!lp->stopped);
1969 gdb_assert (lp->status == 0);
1970
58aecb61 1971 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
1972 if (pid == -1 && errno == ECHILD)
1973 {
58aecb61 1974 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
1975 if (pid == -1 && errno == ECHILD)
1976 {
1977 /* The thread has previously exited. We need to delete it
1978 now because, for some vendor 2.4 kernels with NPTL
1979 support backported, there won't be an exit event unless
1980 it is the main thread. 2.6 kernels will report an exit
1981 event for each thread that exits, as expected. */
1982 thread_dead = 1;
1983 if (debug_linux_nat)
1984 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1985 target_pid_to_str (lp->ptid));
1986 }
1987 }
1988
1989 if (!thread_dead)
1990 {
1991 gdb_assert (pid == GET_LWP (lp->ptid));
1992
1993 if (debug_linux_nat)
1994 {
1995 fprintf_unfiltered (gdb_stdlog,
1996 "WL: waitpid %s received %s\n",
1997 target_pid_to_str (lp->ptid),
1998 status_to_str (status));
1999 }
2000 }
2001
2002 /* Check if the thread has exited. */
2003 if (WIFEXITED (status) || WIFSIGNALED (status))
2004 {
2005 thread_dead = 1;
2006 if (debug_linux_nat)
2007 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2008 target_pid_to_str (lp->ptid));
2009 }
2010
2011 if (thread_dead)
2012 {
e26af52f 2013 exit_lwp (lp);
d6b0e80f
AC
2014 return 0;
2015 }
2016
2017 gdb_assert (WIFSTOPPED (status));
2018
2019 /* Handle GNU/Linux's extended waitstatus for trace events. */
2020 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2021 {
2022 if (debug_linux_nat)
2023 fprintf_unfiltered (gdb_stdlog,
2024 "WL: Handling extended status 0x%06x\n",
2025 status);
3d799a95 2026 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2027 return wait_lwp (lp);
2028 }
2029
2030 return status;
2031}
2032
9f0bdab8
DJ
2033/* Save the most recent siginfo for LP. This is currently only called
2034 for SIGTRAP; some ports use the si_addr field for
2035 target_stopped_data_address. In the future, it may also be used to
2036 restore the siginfo of requeued signals. */
2037
2038static void
2039save_siginfo (struct lwp_info *lp)
2040{
2041 errno = 0;
2042 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2043 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2044
2045 if (errno != 0)
2046 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2047}
2048
d6b0e80f
AC
2049/* Send a SIGSTOP to LP. */
2050
2051static int
2052stop_callback (struct lwp_info *lp, void *data)
2053{
2054 if (!lp->stopped && !lp->signalled)
2055 {
2056 int ret;
2057
2058 if (debug_linux_nat)
2059 {
2060 fprintf_unfiltered (gdb_stdlog,
2061 "SC: kill %s **<SIGSTOP>**\n",
2062 target_pid_to_str (lp->ptid));
2063 }
2064 errno = 0;
2065 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2066 if (debug_linux_nat)
2067 {
2068 fprintf_unfiltered (gdb_stdlog,
2069 "SC: lwp kill %d %s\n",
2070 ret,
2071 errno ? safe_strerror (errno) : "ERRNO-OK");
2072 }
2073
2074 lp->signalled = 1;
2075 gdb_assert (lp->status == 0);
2076 }
2077
2078 return 0;
2079}
2080
2081/* Wait until LP is stopped. If DATA is non-null it is interpreted as
2082 a pointer to a set of signals to be flushed immediately. */
2083
2084static int
2085stop_wait_callback (struct lwp_info *lp, void *data)
2086{
2087 sigset_t *flush_mask = data;
2088
2089 if (!lp->stopped)
2090 {
2091 int status;
2092
2093 status = wait_lwp (lp);
2094 if (status == 0)
2095 return 0;
2096
2097 /* Ignore any signals in FLUSH_MASK. */
2098 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
2099 {
2100 if (!lp->signalled)
2101 {
2102 lp->stopped = 1;
2103 return 0;
2104 }
2105
2106 errno = 0;
2107 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2108 if (debug_linux_nat)
2109 fprintf_unfiltered (gdb_stdlog,
2110 "PTRACE_CONT %s, 0, 0 (%s)\n",
2111 target_pid_to_str (lp->ptid),
2112 errno ? safe_strerror (errno) : "OK");
2113
2114 return stop_wait_callback (lp, flush_mask);
2115 }
2116
2117 if (WSTOPSIG (status) != SIGSTOP)
2118 {
2119 if (WSTOPSIG (status) == SIGTRAP)
2120 {
2121 /* If a LWP other than the LWP that we're reporting an
2122 event for has hit a GDB breakpoint (as opposed to
2123 some random trap signal), then just arrange for it to
2124 hit it again later. We don't keep the SIGTRAP status
2125 and don't forward the SIGTRAP signal to the LWP. We
2126 will handle the current event, eventually we will
2127 resume all LWPs, and this one will get its breakpoint
2128 trap again.
2129
2130 If we do not do this, then we run the risk that the
2131 user will delete or disable the breakpoint, but the
2132 thread will have already tripped on it. */
2133
9f0bdab8
DJ
2134 /* Save the trap's siginfo in case we need it later. */
2135 save_siginfo (lp);
2136
d6b0e80f
AC
2137 /* Now resume this LWP and get the SIGSTOP event. */
2138 errno = 0;
2139 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2140 if (debug_linux_nat)
2141 {
2142 fprintf_unfiltered (gdb_stdlog,
2143 "PTRACE_CONT %s, 0, 0 (%s)\n",
2144 target_pid_to_str (lp->ptid),
2145 errno ? safe_strerror (errno) : "OK");
2146
2147 fprintf_unfiltered (gdb_stdlog,
2148 "SWC: Candidate SIGTRAP event in %s\n",
2149 target_pid_to_str (lp->ptid));
2150 }
710151dd
PA
2151 /* Hold this event/waitstatus while we check to see if
2152 there are any more (we still want to get that SIGSTOP). */
d6b0e80f 2153 stop_wait_callback (lp, data);
710151dd
PA
2154
2155 if (target_can_async_p ())
d6b0e80f 2156 {
710151dd
PA
2157 /* Don't leave a pending wait status in async mode.
2158 Retrigger the breakpoint. */
2159 if (!cancel_breakpoint (lp))
d6b0e80f 2160 {
710151dd
PA
2161 /* There was no gdb breakpoint set at pc. Put
2162 the event back in the queue. */
2163 if (debug_linux_nat)
2164 fprintf_unfiltered (gdb_stdlog,
2165 "SWC: kill %s, %s\n",
2166 target_pid_to_str (lp->ptid),
2167 status_to_str ((int) status));
2168 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2169 }
2170 }
2171 else
2172 {
2173 /* Hold the SIGTRAP for handling by
2174 linux_nat_wait. */
2175 /* If there's another event, throw it back into the
2176 queue. */
2177 if (lp->status)
2178 {
2179 if (debug_linux_nat)
2180 fprintf_unfiltered (gdb_stdlog,
2181 "SWC: kill %s, %s\n",
2182 target_pid_to_str (lp->ptid),
2183 status_to_str ((int) status));
2184 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2185 }
710151dd
PA
2186 /* Save the sigtrap event. */
2187 lp->status = status;
d6b0e80f 2188 }
d6b0e80f
AC
2189 return 0;
2190 }
2191 else
2192 {
2193 /* The thread was stopped with a signal other than
2194 SIGSTOP, and didn't accidentally trip a breakpoint. */
2195
2196 if (debug_linux_nat)
2197 {
2198 fprintf_unfiltered (gdb_stdlog,
2199 "SWC: Pending event %s in %s\n",
2200 status_to_str ((int) status),
2201 target_pid_to_str (lp->ptid));
2202 }
2203 /* Now resume this LWP and get the SIGSTOP event. */
2204 errno = 0;
2205 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2206 if (debug_linux_nat)
2207 fprintf_unfiltered (gdb_stdlog,
2208 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2209 target_pid_to_str (lp->ptid),
2210 errno ? safe_strerror (errno) : "OK");
2211
2212 /* Hold this event/waitstatus while we check to see if
2213 there are any more (we still want to get that SIGSTOP). */
2214 stop_wait_callback (lp, data);
710151dd
PA
2215
2216 /* If the lp->status field is still empty, use it to
2217 hold this event. If not, then this event must be
2218 returned to the event queue of the LWP. */
2219 if (lp->status || target_can_async_p ())
d6b0e80f
AC
2220 {
2221 if (debug_linux_nat)
2222 {
2223 fprintf_unfiltered (gdb_stdlog,
2224 "SWC: kill %s, %s\n",
2225 target_pid_to_str (lp->ptid),
2226 status_to_str ((int) status));
2227 }
2228 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2229 }
710151dd
PA
2230 else
2231 lp->status = status;
d6b0e80f
AC
2232 return 0;
2233 }
2234 }
2235 else
2236 {
2237 /* We caught the SIGSTOP that we intended to catch, so
2238 there's no SIGSTOP pending. */
2239 lp->stopped = 1;
2240 lp->signalled = 0;
2241 }
2242 }
2243
2244 return 0;
2245}
2246
2247/* Check whether PID has any pending signals in FLUSH_MASK. If so set
2248 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
2249
2250static int
2251linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
2252{
2253 sigset_t blocked, ignored;
2254 int i;
2255
2256 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
2257
2258 if (!flush_mask)
2259 return 0;
2260
2261 for (i = 1; i < NSIG; i++)
2262 if (sigismember (pending, i))
2263 if (!sigismember (flush_mask, i)
2264 || sigismember (&blocked, i)
2265 || sigismember (&ignored, i))
2266 sigdelset (pending, i);
2267
2268 if (sigisemptyset (pending))
2269 return 0;
2270
2271 return 1;
2272}
2273
2274/* DATA is interpreted as a mask of signals to flush. If LP has
2275 signals pending, and they are all in the flush mask, then arrange
2276 to flush them. LP should be stopped, as should all other threads
2277 it might share a signal queue with. */
2278
2279static int
2280flush_callback (struct lwp_info *lp, void *data)
2281{
2282 sigset_t *flush_mask = data;
2283 sigset_t pending, intersection, blocked, ignored;
2284 int pid, status;
2285
2286 /* Normally, when an LWP exits, it is removed from the LWP list. The
2287 last LWP isn't removed till later, however. So if there is only
2288 one LWP on the list, make sure it's alive. */
2289 if (lwp_list == lp && lp->next == NULL)
2290 if (!linux_nat_thread_alive (lp->ptid))
2291 return 0;
2292
2293 /* Just because the LWP is stopped doesn't mean that new signals
2294 can't arrive from outside, so this function must be careful of
2295 race conditions. However, because all threads are stopped, we
2296 can assume that the pending mask will not shrink unless we resume
2297 the LWP, and that it will then get another signal. We can't
2298 control which one, however. */
2299
2300 if (lp->status)
2301 {
2302 if (debug_linux_nat)
a3f17187 2303 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
d6b0e80f
AC
2304 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
2305 lp->status = 0;
2306 }
2307
3d799a95
DJ
2308 /* While there is a pending signal we would like to flush, continue
2309 the inferior and collect another signal. But if there's already
2310 a saved status that we don't want to flush, we can't resume the
2311 inferior - if it stopped for some other reason we wouldn't have
2312 anywhere to save the new status. In that case, we must leave the
2313 signal unflushed (and possibly generate an extra SIGINT stop).
2314 That's much less bad than losing a signal. */
2315 while (lp->status == 0
2316 && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
d6b0e80f
AC
2317 {
2318 int ret;
2319
2320 errno = 0;
2321 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2322 if (debug_linux_nat)
2323 fprintf_unfiltered (gdb_stderr,
2324 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
2325
2326 lp->stopped = 0;
2327 stop_wait_callback (lp, flush_mask);
2328 if (debug_linux_nat)
2329 fprintf_unfiltered (gdb_stderr,
2330 "FC: Wait finished; saved status is %d\n",
2331 lp->status);
2332 }
2333
2334 return 0;
2335}
2336
2337/* Return non-zero if LP has a wait status pending. */
2338
2339static int
2340status_callback (struct lwp_info *lp, void *data)
2341{
2342 /* Only report a pending wait status if we pretend that this has
2343 indeed been resumed. */
2344 return (lp->status != 0 && lp->resumed);
2345}
2346
2347/* Return non-zero if LP isn't stopped. */
2348
2349static int
2350running_callback (struct lwp_info *lp, void *data)
2351{
2352 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2353}
2354
2355/* Count the LWP's that have had events. */
2356
2357static int
2358count_events_callback (struct lwp_info *lp, void *data)
2359{
2360 int *count = data;
2361
2362 gdb_assert (count != NULL);
2363
2364 /* Count only LWPs that have a SIGTRAP event pending. */
2365 if (lp->status != 0
2366 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2367 (*count)++;
2368
2369 return 0;
2370}
2371
2372/* Select the LWP (if any) that is currently being single-stepped. */
2373
2374static int
2375select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2376{
2377 if (lp->step && lp->status != 0)
2378 return 1;
2379 else
2380 return 0;
2381}
2382
2383/* Select the Nth LWP that has had a SIGTRAP event. */
2384
2385static int
2386select_event_lwp_callback (struct lwp_info *lp, void *data)
2387{
2388 int *selector = data;
2389
2390 gdb_assert (selector != NULL);
2391
2392 /* Select only LWPs that have a SIGTRAP event pending. */
2393 if (lp->status != 0
2394 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2395 if ((*selector)-- == 0)
2396 return 1;
2397
2398 return 0;
2399}
2400
710151dd
PA
2401static int
2402cancel_breakpoint (struct lwp_info *lp)
2403{
2404 /* Arrange for a breakpoint to be hit again later. We don't keep
2405 the SIGTRAP status and don't forward the SIGTRAP signal to the
2406 LWP. We will handle the current event, eventually we will resume
2407 this LWP, and this breakpoint will trap again.
2408
2409 If we do not do this, then we run the risk that the user will
2410 delete or disable the breakpoint, but the LWP will have already
2411 tripped on it. */
2412
515630c5
UW
2413 struct regcache *regcache = get_thread_regcache (lp->ptid);
2414 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2415 CORE_ADDR pc;
2416
2417 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
2418 if (breakpoint_inserted_here_p (pc))
710151dd
PA
2419 {
2420 if (debug_linux_nat)
2421 fprintf_unfiltered (gdb_stdlog,
2422 "CB: Push back breakpoint for %s\n",
2423 target_pid_to_str (lp->ptid));
2424
2425 /* Back up the PC if necessary. */
515630c5
UW
2426 if (gdbarch_decr_pc_after_break (gdbarch))
2427 regcache_write_pc (regcache, pc);
2428
710151dd
PA
2429 return 1;
2430 }
2431 return 0;
2432}
2433
d6b0e80f
AC
2434static int
2435cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2436{
2437 struct lwp_info *event_lp = data;
2438
2439 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2440 if (lp == event_lp)
2441 return 0;
2442
2443 /* If a LWP other than the LWP that we're reporting an event for has
2444 hit a GDB breakpoint (as opposed to some random trap signal),
2445 then just arrange for it to hit it again later. We don't keep
2446 the SIGTRAP status and don't forward the SIGTRAP signal to the
2447 LWP. We will handle the current event, eventually we will resume
2448 all LWPs, and this one will get its breakpoint trap again.
2449
2450 If we do not do this, then we run the risk that the user will
2451 delete or disable the breakpoint, but the LWP will have already
2452 tripped on it. */
2453
2454 if (lp->status != 0
2455 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
710151dd
PA
2456 && cancel_breakpoint (lp))
2457 /* Throw away the SIGTRAP. */
2458 lp->status = 0;
d6b0e80f
AC
2459
2460 return 0;
2461}
2462
2463/* Select one LWP out of those that have events pending. */
2464
2465static void
2466select_event_lwp (struct lwp_info **orig_lp, int *status)
2467{
2468 int num_events = 0;
2469 int random_selector;
2470 struct lwp_info *event_lp;
2471
ac264b3b 2472 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2473 (*orig_lp)->status = *status;
2474
2475 /* Give preference to any LWP that is being single-stepped. */
2476 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2477 if (event_lp != NULL)
2478 {
2479 if (debug_linux_nat)
2480 fprintf_unfiltered (gdb_stdlog,
2481 "SEL: Select single-step %s\n",
2482 target_pid_to_str (event_lp->ptid));
2483 }
2484 else
2485 {
2486 /* No single-stepping LWP. Select one at random, out of those
2487 which have had SIGTRAP events. */
2488
2489 /* First see how many SIGTRAP events we have. */
2490 iterate_over_lwps (count_events_callback, &num_events);
2491
2492 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2493 random_selector = (int)
2494 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2495
2496 if (debug_linux_nat && num_events > 1)
2497 fprintf_unfiltered (gdb_stdlog,
2498 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2499 num_events, random_selector);
2500
2501 event_lp = iterate_over_lwps (select_event_lwp_callback,
2502 &random_selector);
2503 }
2504
2505 if (event_lp != NULL)
2506 {
2507 /* Switch the event LWP. */
2508 *orig_lp = event_lp;
2509 *status = event_lp->status;
2510 }
2511
2512 /* Flush the wait status for the event LWP. */
2513 (*orig_lp)->status = 0;
2514}
2515
2516/* Return non-zero if LP has been resumed. */
2517
2518static int
2519resumed_callback (struct lwp_info *lp, void *data)
2520{
2521 return lp->resumed;
2522}
2523
d6b0e80f
AC
2524/* Stop an active thread, verify it still exists, then resume it. */
2525
2526static int
2527stop_and_resume_callback (struct lwp_info *lp, void *data)
2528{
2529 struct lwp_info *ptr;
2530
2531 if (!lp->stopped && !lp->signalled)
2532 {
2533 stop_callback (lp, NULL);
2534 stop_wait_callback (lp, NULL);
2535 /* Resume if the lwp still exists. */
2536 for (ptr = lwp_list; ptr; ptr = ptr->next)
2537 if (lp == ptr)
2538 {
2539 resume_callback (lp, NULL);
2540 resume_set_callback (lp, NULL);
2541 }
2542 }
2543 return 0;
2544}
2545
02f3fc28 2546/* Check if we should go on and pass this event to common code.
fa2c6a57 2547 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
2548static struct lwp_info *
2549linux_nat_filter_event (int lwpid, int status, int options)
2550{
2551 struct lwp_info *lp;
2552
2553 lp = find_lwp_pid (pid_to_ptid (lwpid));
2554
2555 /* Check for stop events reported by a process we didn't already
2556 know about - anything not already in our LWP list.
2557
2558 If we're expecting to receive stopped processes after
2559 fork, vfork, and clone events, then we'll just add the
2560 new one to our list and go back to waiting for the event
2561 to be reported - the stopped process might be returned
2562 from waitpid before or after the event is. */
2563 if (WIFSTOPPED (status) && !lp)
2564 {
2565 linux_record_stopped_pid (lwpid, status);
2566 return NULL;
2567 }
2568
2569 /* Make sure we don't report an event for the exit of an LWP not in
2570 our list, i.e. not part of the current process. This can happen
2571 if we detach from a program we original forked and then it
2572 exits. */
2573 if (!WIFSTOPPED (status) && !lp)
2574 return NULL;
2575
2576 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2577 CLONE_PTRACE processes which do not use the thread library -
2578 otherwise we wouldn't find the new LWP this way. That doesn't
2579 currently work, and the following code is currently unreachable
2580 due to the two blocks above. If it's fixed some day, this code
2581 should be broken out into a function so that we can also pick up
2582 LWPs from the new interface. */
2583 if (!lp)
2584 {
2585 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2586 if (options & __WCLONE)
2587 lp->cloned = 1;
2588
2589 gdb_assert (WIFSTOPPED (status)
2590 && WSTOPSIG (status) == SIGSTOP);
2591 lp->signalled = 1;
2592
2593 if (!in_thread_list (inferior_ptid))
2594 {
2595 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2596 GET_PID (inferior_ptid));
2597 add_thread (inferior_ptid);
2598 }
2599
2600 add_thread (lp->ptid);
2601 }
2602
2603 /* Save the trap's siginfo in case we need it later. */
2604 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2605 save_siginfo (lp);
2606
2607 /* Handle GNU/Linux's extended waitstatus for trace events. */
2608 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2609 {
2610 if (debug_linux_nat)
2611 fprintf_unfiltered (gdb_stdlog,
2612 "LLW: Handling extended status 0x%06x\n",
2613 status);
2614 if (linux_handle_extended_wait (lp, status, 0))
2615 return NULL;
2616 }
2617
2618 /* Check if the thread has exited. */
2619 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2620 {
2621 /* If this is the main thread, we must stop all threads and
2622 verify if they are still alive. This is because in the nptl
2623 thread model, there is no signal issued for exiting LWPs
2624 other than the main thread. We only get the main thread exit
2625 signal once all child threads have already exited. If we
2626 stop all the threads and use the stop_wait_callback to check
2627 if they have exited we can determine whether this signal
2628 should be ignored or whether it means the end of the debugged
2629 application, regardless of which threading model is being
2630 used. */
2631 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2632 {
2633 lp->stopped = 1;
2634 iterate_over_lwps (stop_and_resume_callback, NULL);
2635 }
2636
2637 if (debug_linux_nat)
2638 fprintf_unfiltered (gdb_stdlog,
2639 "LLW: %s exited.\n",
2640 target_pid_to_str (lp->ptid));
2641
2642 exit_lwp (lp);
2643
2644 /* If there is at least one more LWP, then the exit signal was
2645 not the end of the debugged application and should be
2646 ignored. */
2647 if (num_lwps > 0)
4c28f408 2648 return NULL;
02f3fc28
PA
2649 }
2650
2651 /* Check if the current LWP has previously exited. In the nptl
2652 thread model, LWPs other than the main thread do not issue
2653 signals when they exit so we must check whenever the thread has
2654 stopped. A similar check is made in stop_wait_callback(). */
2655 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2656 {
2657 if (debug_linux_nat)
2658 fprintf_unfiltered (gdb_stdlog,
2659 "LLW: %s exited.\n",
2660 target_pid_to_str (lp->ptid));
2661
2662 exit_lwp (lp);
2663
2664 /* Make sure there is at least one thread running. */
2665 gdb_assert (iterate_over_lwps (running_callback, NULL));
2666
2667 /* Discard the event. */
2668 return NULL;
2669 }
2670
2671 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2672 an attempt to stop an LWP. */
2673 if (lp->signalled
2674 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2675 {
2676 if (debug_linux_nat)
2677 fprintf_unfiltered (gdb_stdlog,
2678 "LLW: Delayed SIGSTOP caught for %s.\n",
2679 target_pid_to_str (lp->ptid));
2680
2681 /* This is a delayed SIGSTOP. */
2682 lp->signalled = 0;
2683
2684 registers_changed ();
2685
2686 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2687 lp->step, TARGET_SIGNAL_0);
2688 if (debug_linux_nat)
2689 fprintf_unfiltered (gdb_stdlog,
2690 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2691 lp->step ?
2692 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2693 target_pid_to_str (lp->ptid));
2694
2695 lp->stopped = 0;
2696 gdb_assert (lp->resumed);
2697
2698 /* Discard the event. */
2699 return NULL;
2700 }
2701
2702 /* An interesting event. */
2703 gdb_assert (lp);
2704 return lp;
2705}
2706
b84876c2
PA
2707/* Get the events stored in the pipe into the local queue, so they are
2708 accessible to queued_waitpid. We need to do this, since it is not
2709 always the case that the event at the head of the pipe is the event
2710 we want. */
2711
2712static void
2713pipe_to_local_event_queue (void)
2714{
2715 if (debug_linux_nat_async)
2716 fprintf_unfiltered (gdb_stdlog,
2717 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2718 linux_nat_num_queued_events);
2719 while (linux_nat_num_queued_events)
2720 {
2721 int lwpid, status, options;
b84876c2 2722 lwpid = linux_nat_event_pipe_pop (&status, &options);
b84876c2
PA
2723 gdb_assert (lwpid > 0);
2724 push_waitpid (lwpid, status, options);
2725 }
2726}
2727
2728/* Get the unprocessed events stored in the local queue back into the
2729 pipe, so the event loop realizes there's something else to
2730 process. */
2731
2732static void
2733local_event_queue_to_pipe (void)
2734{
2735 struct waitpid_result *w = waitpid_queue;
2736 while (w)
2737 {
2738 struct waitpid_result *next = w->next;
2739 linux_nat_event_pipe_push (w->pid,
2740 w->status,
2741 w->options);
2742 xfree (w);
2743 w = next;
2744 }
2745 waitpid_queue = NULL;
2746
2747 if (debug_linux_nat_async)
2748 fprintf_unfiltered (gdb_stdlog,
2749 "LEQTP: linux_nat_num_queued_events(%d)\n",
2750 linux_nat_num_queued_events);
2751}
2752
d6b0e80f
AC
2753static ptid_t
2754linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
2755{
2756 struct lwp_info *lp = NULL;
2757 int options = 0;
2758 int status = 0;
2759 pid_t pid = PIDGET (ptid);
2760 sigset_t flush_mask;
2761
b84876c2
PA
2762 if (debug_linux_nat_async)
2763 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2764
f973ed9c
DJ
2765 /* The first time we get here after starting a new inferior, we may
2766 not have added it to the LWP list yet - this is the earliest
2767 moment at which we know its PID. */
2768 if (num_lwps == 0)
2769 {
2770 gdb_assert (!is_lwp (inferior_ptid));
2771
2772 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2773 GET_PID (inferior_ptid));
2774 lp = add_lwp (inferior_ptid);
2775 lp->resumed = 1;
403fe197
PA
2776 /* Add the main thread to GDB's thread list. */
2777 add_thread_silent (lp->ptid);
4c28f408
PA
2778 set_running (lp->ptid, 1);
2779 set_executing (lp->ptid, 1);
f973ed9c
DJ
2780 }
2781
d6b0e80f
AC
2782 sigemptyset (&flush_mask);
2783
84e46146
PA
2784 /* Block events while we're here. */
2785 linux_nat_async_events (sigchld_sync);
d6b0e80f
AC
2786
2787retry:
2788
f973ed9c
DJ
2789 /* Make sure there is at least one LWP that has been resumed. */
2790 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
d6b0e80f
AC
2791
2792 /* First check if there is a LWP with a wait status pending. */
2793 if (pid == -1)
2794 {
2795 /* Any LWP that's been resumed will do. */
2796 lp = iterate_over_lwps (status_callback, NULL);
2797 if (lp)
2798 {
710151dd
PA
2799 if (target_can_async_p ())
2800 internal_error (__FILE__, __LINE__,
2801 "Found an LWP with a pending status in async mode.");
2802
d6b0e80f
AC
2803 status = lp->status;
2804 lp->status = 0;
2805
2806 if (debug_linux_nat && status)
2807 fprintf_unfiltered (gdb_stdlog,
2808 "LLW: Using pending wait status %s for %s.\n",
2809 status_to_str (status),
2810 target_pid_to_str (lp->ptid));
2811 }
2812
b84876c2 2813 /* But if we don't find one, we'll have to wait, and check both
d6b0e80f
AC
2814 cloned and uncloned processes. We start with the cloned
2815 processes. */
2816 options = __WCLONE | WNOHANG;
2817 }
2818 else if (is_lwp (ptid))
2819 {
2820 if (debug_linux_nat)
2821 fprintf_unfiltered (gdb_stdlog,
2822 "LLW: Waiting for specific LWP %s.\n",
2823 target_pid_to_str (ptid));
2824
2825 /* We have a specific LWP to check. */
2826 lp = find_lwp_pid (ptid);
2827 gdb_assert (lp);
2828 status = lp->status;
2829 lp->status = 0;
2830
2831 if (debug_linux_nat && status)
2832 fprintf_unfiltered (gdb_stdlog,
2833 "LLW: Using pending wait status %s for %s.\n",
2834 status_to_str (status),
2835 target_pid_to_str (lp->ptid));
2836
2837 /* If we have to wait, take into account whether PID is a cloned
2838 process or not. And we have to convert it to something that
2839 the layer beneath us can understand. */
2840 options = lp->cloned ? __WCLONE : 0;
2841 pid = GET_LWP (ptid);
2842 }
2843
2844 if (status && lp->signalled)
2845 {
2846 /* A pending SIGSTOP may interfere with the normal stream of
2847 events. In a typical case where interference is a problem,
2848 we have a SIGSTOP signal pending for LWP A while
2849 single-stepping it, encounter an event in LWP B, and take the
2850 pending SIGSTOP while trying to stop LWP A. After processing
2851 the event in LWP B, LWP A is continued, and we'll never see
2852 the SIGTRAP associated with the last time we were
2853 single-stepping LWP A. */
2854
2855 /* Resume the thread. It should halt immediately returning the
2856 pending SIGSTOP. */
2857 registers_changed ();
10d6c8cd
DJ
2858 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2859 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
2860 if (debug_linux_nat)
2861 fprintf_unfiltered (gdb_stdlog,
2862 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2863 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2864 target_pid_to_str (lp->ptid));
2865 lp->stopped = 0;
2866 gdb_assert (lp->resumed);
2867
2868 /* This should catch the pending SIGSTOP. */
2869 stop_wait_callback (lp, NULL);
2870 }
2871
b84876c2
PA
2872 if (!target_can_async_p ())
2873 {
2874 /* Causes SIGINT to be passed on to the attached process. */
2875 set_sigint_trap ();
2876 set_sigio_trap ();
2877 }
d6b0e80f
AC
2878
2879 while (status == 0)
2880 {
2881 pid_t lwpid;
2882
b84876c2
PA
2883 if (target_can_async_p ())
2884 /* In async mode, don't ever block. Only look at the locally
2885 queued events. */
2886 lwpid = queued_waitpid (pid, &status, options);
2887 else
2888 lwpid = my_waitpid (pid, &status, options);
2889
d6b0e80f
AC
2890 if (lwpid > 0)
2891 {
2892 gdb_assert (pid == -1 || lwpid == pid);
2893
2894 if (debug_linux_nat)
2895 {
2896 fprintf_unfiltered (gdb_stdlog,
2897 "LLW: waitpid %ld received %s\n",
2898 (long) lwpid, status_to_str (status));
2899 }
2900
02f3fc28 2901 lp = linux_nat_filter_event (lwpid, status, options);
d6b0e80f
AC
2902 if (!lp)
2903 {
02f3fc28 2904 /* A discarded event. */
d6b0e80f
AC
2905 status = 0;
2906 continue;
2907 }
2908
2909 break;
2910 }
2911
2912 if (pid == -1)
2913 {
2914 /* Alternate between checking cloned and uncloned processes. */
2915 options ^= __WCLONE;
2916
b84876c2
PA
2917 /* And every time we have checked both:
2918 In async mode, return to event loop;
2919 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 2920 if (options & __WCLONE)
b84876c2
PA
2921 {
2922 if (target_can_async_p ())
2923 {
2924 /* No interesting event. */
2925 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2926
2927 /* Get ready for the next event. */
2928 target_async (inferior_event_handler, 0);
2929
2930 if (debug_linux_nat_async)
2931 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2932
2933 return minus_one_ptid;
2934 }
2935
2936 sigsuspend (&suspend_mask);
2937 }
d6b0e80f
AC
2938 }
2939
2940 /* We shouldn't end up here unless we want to try again. */
2941 gdb_assert (status == 0);
2942 }
2943
b84876c2
PA
2944 if (!target_can_async_p ())
2945 {
2946 clear_sigio_trap ();
2947 clear_sigint_trap ();
2948 }
d6b0e80f
AC
2949
2950 gdb_assert (lp);
2951
2952 /* Don't report signals that GDB isn't interested in, such as
2953 signals that are neither printed nor stopped upon. Stopping all
2954 threads can be a bit time-consuming so if we want decent
2955 performance with heavily multi-threaded programs, especially when
2956 they're using a high frequency timer, we'd better avoid it if we
2957 can. */
2958
2959 if (WIFSTOPPED (status))
2960 {
2961 int signo = target_signal_from_host (WSTOPSIG (status));
2962
d539ed7e
UW
2963 /* If we get a signal while single-stepping, we may need special
2964 care, e.g. to skip the signal handler. Defer to common code. */
2965 if (!lp->step
2966 && signal_stop_state (signo) == 0
d6b0e80f
AC
2967 && signal_print_state (signo) == 0
2968 && signal_pass_state (signo) == 1)
2969 {
2970 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2971 here? It is not clear we should. GDB may not expect
2972 other threads to run. On the other hand, not resuming
2973 newly attached threads may cause an unwanted delay in
2974 getting them running. */
2975 registers_changed ();
10d6c8cd
DJ
2976 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2977 lp->step, signo);
d6b0e80f
AC
2978 if (debug_linux_nat)
2979 fprintf_unfiltered (gdb_stdlog,
2980 "LLW: %s %s, %s (preempt 'handle')\n",
2981 lp->step ?
2982 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2983 target_pid_to_str (lp->ptid),
2984 signo ? strsignal (signo) : "0");
2985 lp->stopped = 0;
2986 status = 0;
2987 goto retry;
2988 }
2989
2990 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2991 {
2992 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2993 forwarded to the entire process group, that is, all LWP's
2994 will receive it. Since we only want to report it once,
2995 we try to flush it from all LWPs except this one. */
2996 sigaddset (&flush_mask, SIGINT);
2997 }
2998 }
2999
3000 /* This LWP is stopped now. */
3001 lp->stopped = 1;
3002
3003 if (debug_linux_nat)
3004 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3005 status_to_str (status), target_pid_to_str (lp->ptid));
3006
4c28f408
PA
3007 if (!non_stop)
3008 {
3009 /* Now stop all other LWP's ... */
3010 iterate_over_lwps (stop_callback, NULL);
3011
3012 /* ... and wait until all of them have reported back that
3013 they're no longer running. */
3014 iterate_over_lwps (stop_wait_callback, &flush_mask);
3015 iterate_over_lwps (flush_callback, &flush_mask);
3016
3017 /* If we're not waiting for a specific LWP, choose an event LWP
3018 from among those that have had events. Giving equal priority
3019 to all LWPs that have had events helps prevent
3020 starvation. */
3021 if (pid == -1)
3022 select_event_lwp (&lp, &status);
3023 }
d6b0e80f
AC
3024
3025 /* Now that we've selected our final event LWP, cancel any
3026 breakpoints in other LWPs that have hit a GDB breakpoint. See
3027 the comment in cancel_breakpoints_callback to find out why. */
3028 iterate_over_lwps (cancel_breakpoints_callback, lp);
3029
d6b0e80f
AC
3030 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
3031 {
d6b0e80f
AC
3032 if (debug_linux_nat)
3033 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3034 "LLW: trap ptid is %s.\n",
3035 target_pid_to_str (lp->ptid));
d6b0e80f 3036 }
d6b0e80f
AC
3037
3038 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3039 {
3040 *ourstatus = lp->waitstatus;
3041 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3042 }
3043 else
3044 store_waitstatus (ourstatus, status);
3045
b84876c2
PA
3046 /* Get ready for the next event. */
3047 if (target_can_async_p ())
3048 target_async (inferior_event_handler, 0);
3049
3050 if (debug_linux_nat_async)
3051 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3052
f973ed9c 3053 return lp->ptid;
d6b0e80f
AC
3054}
3055
3056static int
3057kill_callback (struct lwp_info *lp, void *data)
3058{
3059 errno = 0;
3060 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3061 if (debug_linux_nat)
3062 fprintf_unfiltered (gdb_stdlog,
3063 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3064 target_pid_to_str (lp->ptid),
3065 errno ? safe_strerror (errno) : "OK");
3066
3067 return 0;
3068}
3069
3070static int
3071kill_wait_callback (struct lwp_info *lp, void *data)
3072{
3073 pid_t pid;
3074
3075 /* We must make sure that there are no pending events (delayed
3076 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3077 program doesn't interfere with any following debugging session. */
3078
3079 /* For cloned processes we must check both with __WCLONE and
3080 without, since the exit status of a cloned process isn't reported
3081 with __WCLONE. */
3082 if (lp->cloned)
3083 {
3084 do
3085 {
58aecb61 3086 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 3087 if (pid != (pid_t) -1)
d6b0e80f 3088 {
e85a822c
DJ
3089 if (debug_linux_nat)
3090 fprintf_unfiltered (gdb_stdlog,
3091 "KWC: wait %s received unknown.\n",
3092 target_pid_to_str (lp->ptid));
3093 /* The Linux kernel sometimes fails to kill a thread
3094 completely after PTRACE_KILL; that goes from the stop
3095 point in do_fork out to the one in
3096 get_signal_to_deliever and waits again. So kill it
3097 again. */
3098 kill_callback (lp, NULL);
d6b0e80f
AC
3099 }
3100 }
3101 while (pid == GET_LWP (lp->ptid));
3102
3103 gdb_assert (pid == -1 && errno == ECHILD);
3104 }
3105
3106 do
3107 {
58aecb61 3108 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 3109 if (pid != (pid_t) -1)
d6b0e80f 3110 {
e85a822c
DJ
3111 if (debug_linux_nat)
3112 fprintf_unfiltered (gdb_stdlog,
3113 "KWC: wait %s received unk.\n",
3114 target_pid_to_str (lp->ptid));
3115 /* See the call to kill_callback above. */
3116 kill_callback (lp, NULL);
d6b0e80f
AC
3117 }
3118 }
3119 while (pid == GET_LWP (lp->ptid));
3120
3121 gdb_assert (pid == -1 && errno == ECHILD);
3122 return 0;
3123}
3124
3125static void
3126linux_nat_kill (void)
3127{
f973ed9c
DJ
3128 struct target_waitstatus last;
3129 ptid_t last_ptid;
3130 int status;
d6b0e80f 3131
b84876c2
PA
3132 if (target_can_async_p ())
3133 target_async (NULL, 0);
3134
f973ed9c
DJ
3135 /* If we're stopped while forking and we haven't followed yet,
3136 kill the other task. We need to do this first because the
3137 parent will be sleeping if this is a vfork. */
d6b0e80f 3138
f973ed9c 3139 get_last_target_status (&last_ptid, &last);
d6b0e80f 3140
f973ed9c
DJ
3141 if (last.kind == TARGET_WAITKIND_FORKED
3142 || last.kind == TARGET_WAITKIND_VFORKED)
3143 {
3a3e9ee3 3144 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
3145 wait (&status);
3146 }
3147
3148 if (forks_exist_p ())
b84876c2
PA
3149 {
3150 linux_fork_killall ();
3151 drain_queued_events (-1);
3152 }
f973ed9c
DJ
3153 else
3154 {
4c28f408
PA
3155 /* Stop all threads before killing them, since ptrace requires
3156 that the thread is stopped to sucessfully PTRACE_KILL. */
3157 iterate_over_lwps (stop_callback, NULL);
3158 /* ... and wait until all of them have reported back that
3159 they're no longer running. */
3160 iterate_over_lwps (stop_wait_callback, NULL);
3161
f973ed9c
DJ
3162 /* Kill all LWP's ... */
3163 iterate_over_lwps (kill_callback, NULL);
3164
3165 /* ... and wait until we've flushed all events. */
3166 iterate_over_lwps (kill_wait_callback, NULL);
3167 }
3168
3169 target_mourn_inferior ();
d6b0e80f
AC
3170}
3171
3172static void
3173linux_nat_mourn_inferior (void)
3174{
d6b0e80f
AC
3175 /* Destroy LWP info; it's no longer valid. */
3176 init_lwp_list ();
3177
f973ed9c 3178 if (! forks_exist_p ())
b84876c2
PA
3179 {
3180 /* Normal case, no other forks available. */
3181 if (target_can_async_p ())
3182 linux_nat_async (NULL, 0);
3183 linux_ops->to_mourn_inferior ();
3184 }
f973ed9c
DJ
3185 else
3186 /* Multi-fork case. The current inferior_ptid has exited, but
3187 there are other viable forks to debug. Delete the exiting
3188 one and context-switch to the first available. */
3189 linux_fork_mourn_inferior ();
d6b0e80f
AC
3190}
3191
10d6c8cd
DJ
3192static LONGEST
3193linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3194 const char *annex, gdb_byte *readbuf,
3195 const gdb_byte *writebuf,
3196 ULONGEST offset, LONGEST len)
d6b0e80f
AC
3197{
3198 struct cleanup *old_chain = save_inferior_ptid ();
10d6c8cd 3199 LONGEST xfer;
d6b0e80f
AC
3200
3201 if (is_lwp (inferior_ptid))
3202 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
3203
10d6c8cd
DJ
3204 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3205 offset, len);
d6b0e80f
AC
3206
3207 do_cleanups (old_chain);
3208 return xfer;
3209}
3210
3211static int
3212linux_nat_thread_alive (ptid_t ptid)
3213{
4c28f408
PA
3214 int err;
3215
d6b0e80f
AC
3216 gdb_assert (is_lwp (ptid));
3217
4c28f408
PA
3218 /* Send signal 0 instead of anything ptrace, because ptracing a
3219 running thread errors out claiming that the thread doesn't
3220 exist. */
3221 err = kill_lwp (GET_LWP (ptid), 0);
3222
d6b0e80f
AC
3223 if (debug_linux_nat)
3224 fprintf_unfiltered (gdb_stdlog,
4c28f408 3225 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 3226 target_pid_to_str (ptid),
4c28f408 3227 err ? safe_strerror (err) : "OK");
9c0dd46b 3228
4c28f408 3229 if (err != 0)
d6b0e80f
AC
3230 return 0;
3231
3232 return 1;
3233}
3234
3235static char *
3236linux_nat_pid_to_str (ptid_t ptid)
3237{
3238 static char buf[64];
3239
a0ef4274
DJ
3240 if (is_lwp (ptid)
3241 && ((lwp_list && lwp_list->next)
3242 || GET_PID (ptid) != GET_LWP (ptid)))
d6b0e80f
AC
3243 {
3244 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
3245 return buf;
3246 }
3247
3248 return normal_pid_to_str (ptid);
3249}
3250
d6b0e80f
AC
3251static void
3252sigchld_handler (int signo)
3253{
b84876c2 3254 if (linux_nat_async_enabled
84e46146 3255 && linux_nat_async_events_state != sigchld_sync
b84876c2
PA
3256 && signo == SIGCHLD)
3257 /* It is *always* a bug to hit this. */
3258 internal_error (__FILE__, __LINE__,
3259 "sigchld_handler called when async events are enabled");
3260
d6b0e80f
AC
3261 /* Do nothing. The only reason for this handler is that it allows
3262 us to use sigsuspend in linux_nat_wait above to wait for the
3263 arrival of a SIGCHLD. */
3264}
3265
dba24537
AC
3266/* Accepts an integer PID; Returns a string representing a file that
3267 can be opened to get the symbols for the child process. */
3268
6d8fd2b7
UW
3269static char *
3270linux_child_pid_to_exec_file (int pid)
dba24537
AC
3271{
3272 char *name1, *name2;
3273
3274 name1 = xmalloc (MAXPATHLEN);
3275 name2 = xmalloc (MAXPATHLEN);
3276 make_cleanup (xfree, name1);
3277 make_cleanup (xfree, name2);
3278 memset (name2, 0, MAXPATHLEN);
3279
3280 sprintf (name1, "/proc/%d/exe", pid);
3281 if (readlink (name1, name2, MAXPATHLEN) > 0)
3282 return name2;
3283 else
3284 return name1;
3285}
3286
3287/* Service function for corefiles and info proc. */
3288
3289static int
3290read_mapping (FILE *mapfile,
3291 long long *addr,
3292 long long *endaddr,
3293 char *permissions,
3294 long long *offset,
3295 char *device, long long *inode, char *filename)
3296{
3297 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
3298 addr, endaddr, permissions, offset, device, inode);
3299
2e14c2ea
MS
3300 filename[0] = '\0';
3301 if (ret > 0 && ret != EOF)
dba24537
AC
3302 {
3303 /* Eat everything up to EOL for the filename. This will prevent
3304 weird filenames (such as one with embedded whitespace) from
3305 confusing this code. It also makes this code more robust in
3306 respect to annotations the kernel may add after the filename.
3307
3308 Note the filename is used for informational purposes
3309 only. */
3310 ret += fscanf (mapfile, "%[^\n]\n", filename);
3311 }
2e14c2ea 3312
dba24537
AC
3313 return (ret != 0 && ret != EOF);
3314}
3315
3316/* Fills the "to_find_memory_regions" target vector. Lists the memory
3317 regions in the inferior for a corefile. */
3318
3319static int
3320linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
3321 unsigned long,
3322 int, int, int, void *), void *obfd)
3323{
3324 long long pid = PIDGET (inferior_ptid);
3325 char mapsfilename[MAXPATHLEN];
3326 FILE *mapsfile;
3327 long long addr, endaddr, size, offset, inode;
3328 char permissions[8], device[8], filename[MAXPATHLEN];
3329 int read, write, exec;
3330 int ret;
3331
3332 /* Compose the filename for the /proc memory map, and open it. */
3333 sprintf (mapsfilename, "/proc/%lld/maps", pid);
3334 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 3335 error (_("Could not open %s."), mapsfilename);
dba24537
AC
3336
3337 if (info_verbose)
3338 fprintf_filtered (gdb_stdout,
3339 "Reading memory regions from %s\n", mapsfilename);
3340
3341 /* Now iterate until end-of-file. */
3342 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
3343 &offset, &device[0], &inode, &filename[0]))
3344 {
3345 size = endaddr - addr;
3346
3347 /* Get the segment's permissions. */
3348 read = (strchr (permissions, 'r') != 0);
3349 write = (strchr (permissions, 'w') != 0);
3350 exec = (strchr (permissions, 'x') != 0);
3351
3352 if (info_verbose)
3353 {
3354 fprintf_filtered (gdb_stdout,
3355 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3356 size, paddr_nz (addr),
3357 read ? 'r' : ' ',
3358 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 3359 if (filename[0])
dba24537
AC
3360 fprintf_filtered (gdb_stdout, " for %s", filename);
3361 fprintf_filtered (gdb_stdout, "\n");
3362 }
3363
3364 /* Invoke the callback function to create the corefile
3365 segment. */
3366 func (addr, size, read, write, exec, obfd);
3367 }
3368 fclose (mapsfile);
3369 return 0;
3370}
3371
3372/* Records the thread's register state for the corefile note
3373 section. */
3374
3375static char *
3376linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
3377 char *note_data, int *note_size)
3378{
3379 gdb_gregset_t gregs;
3380 gdb_fpregset_t fpregs;
dba24537 3381 unsigned long lwp = ptid_get_lwp (ptid);
594f7785
UW
3382 struct regcache *regcache = get_thread_regcache (ptid);
3383 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 3384 const struct regset *regset;
55e969c1 3385 int core_regset_p;
594f7785 3386 struct cleanup *old_chain;
17ea7499
CES
3387 struct core_regset_section *sect_list;
3388 char *gdb_regset;
594f7785
UW
3389
3390 old_chain = save_inferior_ptid ();
3391 inferior_ptid = ptid;
3392 target_fetch_registers (regcache, -1);
3393 do_cleanups (old_chain);
4f844a66
DM
3394
3395 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
3396 sect_list = gdbarch_core_regset_sections (gdbarch);
3397
55e969c1
DM
3398 if (core_regset_p
3399 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3400 sizeof (gregs))) != NULL
3401 && regset->collect_regset != NULL)
594f7785 3402 regset->collect_regset (regset, regcache, -1,
55e969c1 3403 &gregs, sizeof (gregs));
4f844a66 3404 else
594f7785 3405 fill_gregset (regcache, &gregs, -1);
4f844a66 3406
55e969c1
DM
3407 note_data = (char *) elfcore_write_prstatus (obfd,
3408 note_data,
3409 note_size,
3410 lwp,
3411 stop_signal, &gregs);
3412
17ea7499
CES
3413 /* The loop below uses the new struct core_regset_section, which stores
3414 the supported section names and sizes for the core file. Note that
3415 note PRSTATUS needs to be treated specially. But the other notes are
3416 structurally the same, so they can benefit from the new struct. */
3417 if (core_regset_p && sect_list != NULL)
3418 while (sect_list->sect_name != NULL)
3419 {
3420 /* .reg was already handled above. */
3421 if (strcmp (sect_list->sect_name, ".reg") == 0)
3422 {
3423 sect_list++;
3424 continue;
3425 }
3426 regset = gdbarch_regset_from_core_section (gdbarch,
3427 sect_list->sect_name,
3428 sect_list->size);
3429 gdb_assert (regset && regset->collect_regset);
3430 gdb_regset = xmalloc (sect_list->size);
3431 regset->collect_regset (regset, regcache, -1,
3432 gdb_regset, sect_list->size);
3433 note_data = (char *) elfcore_write_register_note (obfd,
3434 note_data,
3435 note_size,
3436 sect_list->sect_name,
3437 gdb_regset,
3438 sect_list->size);
3439 xfree (gdb_regset);
3440 sect_list++;
3441 }
dba24537 3442
17ea7499
CES
3443 /* For architectures that does not have the struct core_regset_section
3444 implemented, we use the old method. When all the architectures have
3445 the new support, the code below should be deleted. */
4f844a66 3446 else
17ea7499
CES
3447 {
3448 if (core_regset_p
3449 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3450 sizeof (fpregs))) != NULL
3451 && regset->collect_regset != NULL)
3452 regset->collect_regset (regset, regcache, -1,
3453 &fpregs, sizeof (fpregs));
3454 else
3455 fill_fpregset (regcache, &fpregs, -1);
3456
3457 note_data = (char *) elfcore_write_prfpreg (obfd,
3458 note_data,
3459 note_size,
3460 &fpregs, sizeof (fpregs));
3461 }
4f844a66 3462
dba24537
AC
3463 return note_data;
3464}
3465
3466struct linux_nat_corefile_thread_data
3467{
3468 bfd *obfd;
3469 char *note_data;
3470 int *note_size;
3471 int num_notes;
3472};
3473
3474/* Called by gdbthread.c once per thread. Records the thread's
3475 register state for the corefile note section. */
3476
3477static int
3478linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3479{
3480 struct linux_nat_corefile_thread_data *args = data;
dba24537 3481
dba24537
AC
3482 args->note_data = linux_nat_do_thread_registers (args->obfd,
3483 ti->ptid,
3484 args->note_data,
3485 args->note_size);
3486 args->num_notes++;
56be3814 3487
dba24537
AC
3488 return 0;
3489}
3490
3491/* Records the register state for the corefile note section. */
3492
3493static char *
3494linux_nat_do_registers (bfd *obfd, ptid_t ptid,
3495 char *note_data, int *note_size)
3496{
dba24537
AC
3497 return linux_nat_do_thread_registers (obfd,
3498 ptid_build (ptid_get_pid (inferior_ptid),
3499 ptid_get_pid (inferior_ptid),
3500 0),
3501 note_data, note_size);
dba24537
AC
3502}
3503
3504/* Fills the "to_make_corefile_note" target vector. Builds the note
3505 section for a corefile, and returns it in a malloc buffer. */
3506
3507static char *
3508linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3509{
3510 struct linux_nat_corefile_thread_data thread_args;
3511 struct cleanup *old_chain;
d99148ef 3512 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 3513 char fname[16] = { '\0' };
d99148ef 3514 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
3515 char psargs[80] = { '\0' };
3516 char *note_data = NULL;
3517 ptid_t current_ptid = inferior_ptid;
c6826062 3518 gdb_byte *auxv;
dba24537
AC
3519 int auxv_len;
3520
3521 if (get_exec_file (0))
3522 {
3523 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3524 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3525 if (get_inferior_args ())
3526 {
d99148ef
JK
3527 char *string_end;
3528 char *psargs_end = psargs + sizeof (psargs);
3529
3530 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3531 strings fine. */
3532 string_end = memchr (psargs, 0, sizeof (psargs));
3533 if (string_end != NULL)
3534 {
3535 *string_end++ = ' ';
3536 strncpy (string_end, get_inferior_args (),
3537 psargs_end - string_end);
3538 }
dba24537
AC
3539 }
3540 note_data = (char *) elfcore_write_prpsinfo (obfd,
3541 note_data,
3542 note_size, fname, psargs);
3543 }
3544
3545 /* Dump information for threads. */
3546 thread_args.obfd = obfd;
3547 thread_args.note_data = note_data;
3548 thread_args.note_size = note_size;
3549 thread_args.num_notes = 0;
3550 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
3551 if (thread_args.num_notes == 0)
3552 {
3553 /* iterate_over_threads didn't come up with any threads; just
3554 use inferior_ptid. */
3555 note_data = linux_nat_do_registers (obfd, inferior_ptid,
3556 note_data, note_size);
3557 }
3558 else
3559 {
3560 note_data = thread_args.note_data;
3561 }
3562
13547ab6
DJ
3563 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3564 NULL, &auxv);
dba24537
AC
3565 if (auxv_len > 0)
3566 {
3567 note_data = elfcore_write_note (obfd, note_data, note_size,
3568 "CORE", NT_AUXV, auxv, auxv_len);
3569 xfree (auxv);
3570 }
3571
3572 make_cleanup (xfree, note_data);
3573 return note_data;
3574}
3575
3576/* Implement the "info proc" command. */
3577
3578static void
3579linux_nat_info_proc_cmd (char *args, int from_tty)
3580{
3581 long long pid = PIDGET (inferior_ptid);
3582 FILE *procfile;
3583 char **argv = NULL;
3584 char buffer[MAXPATHLEN];
3585 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3586 int cmdline_f = 1;
3587 int cwd_f = 1;
3588 int exe_f = 1;
3589 int mappings_f = 0;
3590 int environ_f = 0;
3591 int status_f = 0;
3592 int stat_f = 0;
3593 int all = 0;
3594 struct stat dummy;
3595
3596 if (args)
3597 {
3598 /* Break up 'args' into an argv array. */
3599 if ((argv = buildargv (args)) == NULL)
3600 nomem (0);
3601 else
3602 make_cleanup_freeargv (argv);
3603 }
3604 while (argv != NULL && *argv != NULL)
3605 {
3606 if (isdigit (argv[0][0]))
3607 {
3608 pid = strtoul (argv[0], NULL, 10);
3609 }
3610 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3611 {
3612 mappings_f = 1;
3613 }
3614 else if (strcmp (argv[0], "status") == 0)
3615 {
3616 status_f = 1;
3617 }
3618 else if (strcmp (argv[0], "stat") == 0)
3619 {
3620 stat_f = 1;
3621 }
3622 else if (strcmp (argv[0], "cmd") == 0)
3623 {
3624 cmdline_f = 1;
3625 }
3626 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3627 {
3628 exe_f = 1;
3629 }
3630 else if (strcmp (argv[0], "cwd") == 0)
3631 {
3632 cwd_f = 1;
3633 }
3634 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3635 {
3636 all = 1;
3637 }
3638 else
3639 {
3640 /* [...] (future options here) */
3641 }
3642 argv++;
3643 }
3644 if (pid == 0)
8a3fe4f8 3645 error (_("No current process: you must name one."));
dba24537
AC
3646
3647 sprintf (fname1, "/proc/%lld", pid);
3648 if (stat (fname1, &dummy) != 0)
8a3fe4f8 3649 error (_("No /proc directory: '%s'"), fname1);
dba24537 3650
a3f17187 3651 printf_filtered (_("process %lld\n"), pid);
dba24537
AC
3652 if (cmdline_f || all)
3653 {
3654 sprintf (fname1, "/proc/%lld/cmdline", pid);
d5d6fca5 3655 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3656 {
3657 fgets (buffer, sizeof (buffer), procfile);
3658 printf_filtered ("cmdline = '%s'\n", buffer);
3659 fclose (procfile);
3660 }
3661 else
8a3fe4f8 3662 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3663 }
3664 if (cwd_f || all)
3665 {
3666 sprintf (fname1, "/proc/%lld/cwd", pid);
3667 memset (fname2, 0, sizeof (fname2));
3668 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3669 printf_filtered ("cwd = '%s'\n", fname2);
3670 else
8a3fe4f8 3671 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3672 }
3673 if (exe_f || all)
3674 {
3675 sprintf (fname1, "/proc/%lld/exe", pid);
3676 memset (fname2, 0, sizeof (fname2));
3677 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3678 printf_filtered ("exe = '%s'\n", fname2);
3679 else
8a3fe4f8 3680 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3681 }
3682 if (mappings_f || all)
3683 {
3684 sprintf (fname1, "/proc/%lld/maps", pid);
d5d6fca5 3685 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3686 {
3687 long long addr, endaddr, size, offset, inode;
3688 char permissions[8], device[8], filename[MAXPATHLEN];
3689
a3f17187 3690 printf_filtered (_("Mapped address spaces:\n\n"));
17a912b6 3691 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3692 {
3693 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3694 "Start Addr",
3695 " End Addr",
3696 " Size", " Offset", "objfile");
3697 }
3698 else
3699 {
3700 printf_filtered (" %18s %18s %10s %10s %7s\n",
3701 "Start Addr",
3702 " End Addr",
3703 " Size", " Offset", "objfile");
3704 }
3705
3706 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3707 &offset, &device[0], &inode, &filename[0]))
3708 {
3709 size = endaddr - addr;
3710
3711 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3712 calls here (and possibly above) should be abstracted
3713 out into their own functions? Andrew suggests using
3714 a generic local_address_string instead to print out
3715 the addresses; that makes sense to me, too. */
3716
17a912b6 3717 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3718 {
3719 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3720 (unsigned long) addr, /* FIXME: pr_addr */
3721 (unsigned long) endaddr,
3722 (int) size,
3723 (unsigned int) offset,
3724 filename[0] ? filename : "");
3725 }
3726 else
3727 {
3728 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3729 (unsigned long) addr, /* FIXME: pr_addr */
3730 (unsigned long) endaddr,
3731 (int) size,
3732 (unsigned int) offset,
3733 filename[0] ? filename : "");
3734 }
3735 }
3736
3737 fclose (procfile);
3738 }
3739 else
8a3fe4f8 3740 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3741 }
3742 if (status_f || all)
3743 {
3744 sprintf (fname1, "/proc/%lld/status", pid);
d5d6fca5 3745 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3746 {
3747 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3748 puts_filtered (buffer);
3749 fclose (procfile);
3750 }
3751 else
8a3fe4f8 3752 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3753 }
3754 if (stat_f || all)
3755 {
3756 sprintf (fname1, "/proc/%lld/stat", pid);
d5d6fca5 3757 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3758 {
3759 int itmp;
3760 char ctmp;
a25694b4 3761 long ltmp;
dba24537
AC
3762
3763 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3764 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 3765 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 3766 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 3767 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 3768 printf_filtered (_("State: %c\n"), ctmp);
dba24537 3769 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3770 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 3771 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3772 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 3773 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3774 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 3775 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3776 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 3777 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3778 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
3779 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3780 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3781 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3782 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3783 (unsigned long) ltmp);
3784 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3785 printf_filtered (_("Minor faults, children: %lu\n"),
3786 (unsigned long) ltmp);
3787 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3788 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3789 (unsigned long) ltmp);
3790 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3791 printf_filtered (_("Major faults, children: %lu\n"),
3792 (unsigned long) ltmp);
3793 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3794 printf_filtered (_("utime: %ld\n"), ltmp);
3795 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3796 printf_filtered (_("stime: %ld\n"), ltmp);
3797 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3798 printf_filtered (_("utime, children: %ld\n"), ltmp);
3799 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3800 printf_filtered (_("stime, children: %ld\n"), ltmp);
3801 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3802 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3803 ltmp);
3804 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3805 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3806 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3807 printf_filtered (_("jiffies until next timeout: %lu\n"),
3808 (unsigned long) ltmp);
3809 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3810 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3811 (unsigned long) ltmp);
3812 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3813 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3814 ltmp);
3815 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3816 printf_filtered (_("Virtual memory size: %lu\n"),
3817 (unsigned long) ltmp);
3818 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3819 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3820 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3821 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3822 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3823 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3824 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3825 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3826 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3827 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
dba24537
AC
3828#if 0 /* Don't know how architecture-dependent the rest is...
3829 Anyway the signal bitmap info is available from "status". */
a25694b4
AS
3830 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3831 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3832 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3833 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3834 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3835 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3836 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3837 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3838 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3839 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3840 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3841 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3842 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3843 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537
AC
3844#endif
3845 fclose (procfile);
3846 }
3847 else
8a3fe4f8 3848 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3849 }
3850}
3851
10d6c8cd
DJ
3852/* Implement the to_xfer_partial interface for memory reads using the /proc
3853 filesystem. Because we can use a single read() call for /proc, this
3854 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3855 but it doesn't support writes. */
3856
3857static LONGEST
3858linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3859 const char *annex, gdb_byte *readbuf,
3860 const gdb_byte *writebuf,
3861 ULONGEST offset, LONGEST len)
dba24537 3862{
10d6c8cd
DJ
3863 LONGEST ret;
3864 int fd;
dba24537
AC
3865 char filename[64];
3866
10d6c8cd 3867 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
3868 return 0;
3869
3870 /* Don't bother for one word. */
3871 if (len < 3 * sizeof (long))
3872 return 0;
3873
3874 /* We could keep this file open and cache it - possibly one per
3875 thread. That requires some juggling, but is even faster. */
3876 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3877 fd = open (filename, O_RDONLY | O_LARGEFILE);
3878 if (fd == -1)
3879 return 0;
3880
3881 /* If pread64 is available, use it. It's faster if the kernel
3882 supports it (only one syscall), and it's 64-bit safe even on
3883 32-bit platforms (for instance, SPARC debugging a SPARC64
3884 application). */
3885#ifdef HAVE_PREAD64
10d6c8cd 3886 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3887#else
10d6c8cd 3888 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3889#endif
3890 ret = 0;
3891 else
3892 ret = len;
3893
3894 close (fd);
3895 return ret;
3896}
3897
3898/* Parse LINE as a signal set and add its set bits to SIGS. */
3899
3900static void
3901add_line_to_sigset (const char *line, sigset_t *sigs)
3902{
3903 int len = strlen (line) - 1;
3904 const char *p;
3905 int signum;
3906
3907 if (line[len] != '\n')
8a3fe4f8 3908 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3909
3910 p = line;
3911 signum = len * 4;
3912 while (len-- > 0)
3913 {
3914 int digit;
3915
3916 if (*p >= '0' && *p <= '9')
3917 digit = *p - '0';
3918 else if (*p >= 'a' && *p <= 'f')
3919 digit = *p - 'a' + 10;
3920 else
8a3fe4f8 3921 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3922
3923 signum -= 4;
3924
3925 if (digit & 1)
3926 sigaddset (sigs, signum + 1);
3927 if (digit & 2)
3928 sigaddset (sigs, signum + 2);
3929 if (digit & 4)
3930 sigaddset (sigs, signum + 3);
3931 if (digit & 8)
3932 sigaddset (sigs, signum + 4);
3933
3934 p++;
3935 }
3936}
3937
3938/* Find process PID's pending signals from /proc/pid/status and set
3939 SIGS to match. */
3940
3941void
3942linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3943{
3944 FILE *procfile;
3945 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3946 int signum;
3947
3948 sigemptyset (pending);
3949 sigemptyset (blocked);
3950 sigemptyset (ignored);
3951 sprintf (fname, "/proc/%d/status", pid);
3952 procfile = fopen (fname, "r");
3953 if (procfile == NULL)
8a3fe4f8 3954 error (_("Could not open %s"), fname);
dba24537
AC
3955
3956 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3957 {
3958 /* Normal queued signals are on the SigPnd line in the status
3959 file. However, 2.6 kernels also have a "shared" pending
3960 queue for delivering signals to a thread group, so check for
3961 a ShdPnd line also.
3962
3963 Unfortunately some Red Hat kernels include the shared pending
3964 queue but not the ShdPnd status field. */
3965
3966 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3967 add_line_to_sigset (buffer + 8, pending);
3968 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3969 add_line_to_sigset (buffer + 8, pending);
3970 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3971 add_line_to_sigset (buffer + 8, blocked);
3972 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3973 add_line_to_sigset (buffer + 8, ignored);
3974 }
3975
3976 fclose (procfile);
3977}
3978
10d6c8cd
DJ
3979static LONGEST
3980linux_xfer_partial (struct target_ops *ops, enum target_object object,
3981 const char *annex, gdb_byte *readbuf,
3982 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3983{
3984 LONGEST xfer;
3985
3986 if (object == TARGET_OBJECT_AUXV)
3987 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3988 offset, len);
3989
3990 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3991 offset, len);
3992 if (xfer != 0)
3993 return xfer;
3994
3995 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3996 offset, len);
3997}
3998
e9efe249 3999/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4000 it with local methods. */
4001
910122bf
UW
4002static void
4003linux_target_install_ops (struct target_ops *t)
10d6c8cd 4004{
6d8fd2b7
UW
4005 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4006 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4007 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4008 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4009 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4010 t->to_post_attach = linux_child_post_attach;
4011 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4012 t->to_find_memory_regions = linux_nat_find_memory_regions;
4013 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4014
4015 super_xfer_partial = t->to_xfer_partial;
4016 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
4017}
4018
4019struct target_ops *
4020linux_target (void)
4021{
4022 struct target_ops *t;
4023
4024 t = inf_ptrace_target ();
4025 linux_target_install_ops (t);
4026
4027 return t;
4028}
4029
4030struct target_ops *
7714d83a 4031linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4032{
4033 struct target_ops *t;
4034
4035 t = inf_ptrace_trad_target (register_u_offset);
4036 linux_target_install_ops (t);
10d6c8cd 4037
10d6c8cd
DJ
4038 return t;
4039}
4040
b84876c2
PA
4041/* Controls if async mode is permitted. */
4042static int linux_async_permitted = 0;
4043
4044/* The set command writes to this variable. If the inferior is
4045 executing, linux_nat_async_permitted is *not* updated. */
4046static int linux_async_permitted_1 = 0;
4047
4048static void
4049set_maintenance_linux_async_permitted (char *args, int from_tty,
4050 struct cmd_list_element *c)
4051{
4052 if (target_has_execution)
4053 {
4054 linux_async_permitted_1 = linux_async_permitted;
4055 error (_("Cannot change this setting while the inferior is running."));
4056 }
4057
4058 linux_async_permitted = linux_async_permitted_1;
4059 linux_nat_set_async_mode (linux_async_permitted);
4060}
4061
4062static void
4063show_maintenance_linux_async_permitted (struct ui_file *file, int from_tty,
4064 struct cmd_list_element *c, const char *value)
4065{
4066 fprintf_filtered (file, _("\
4067Controlling the GNU/Linux inferior in asynchronous mode is %s.\n"),
4068 value);
4069}
4070
4071/* target_is_async_p implementation. */
4072
4073static int
4074linux_nat_is_async_p (void)
4075{
4076 /* NOTE: palves 2008-03-21: We're only async when the user requests
4077 it explicitly with the "maintenance set linux-async" command.
4078 Someday, linux will always be async. */
4079 if (!linux_async_permitted)
4080 return 0;
4081
4082 return 1;
4083}
4084
4085/* target_can_async_p implementation. */
4086
4087static int
4088linux_nat_can_async_p (void)
4089{
4090 /* NOTE: palves 2008-03-21: We're only async when the user requests
4091 it explicitly with the "maintenance set linux-async" command.
4092 Someday, linux will always be async. */
4093 if (!linux_async_permitted)
4094 return 0;
4095
4096 /* See target.h/target_async_mask. */
4097 return linux_nat_async_mask_value;
4098}
4099
4100/* target_async_mask implementation. */
4101
4102static int
4103linux_nat_async_mask (int mask)
4104{
4105 int current_state;
4106 current_state = linux_nat_async_mask_value;
4107
4108 if (current_state != mask)
4109 {
4110 if (mask == 0)
4111 {
4112 linux_nat_async (NULL, 0);
4113 linux_nat_async_mask_value = mask;
b84876c2
PA
4114 }
4115 else
4116 {
b84876c2
PA
4117 linux_nat_async_mask_value = mask;
4118 linux_nat_async (inferior_event_handler, 0);
4119 }
4120 }
4121
4122 return current_state;
4123}
4124
4125/* Pop an event from the event pipe. */
4126
4127static int
4128linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
4129{
4130 struct waitpid_result event = {0};
4131 int ret;
4132
4133 do
4134 {
4135 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
4136 }
4137 while (ret == -1 && errno == EINTR);
4138
4139 gdb_assert (ret == sizeof (event));
4140
4141 *ptr_status = event.status;
4142 *ptr_options = event.options;
4143
4144 linux_nat_num_queued_events--;
4145
4146 return event.pid;
4147}
4148
4149/* Push an event into the event pipe. */
4150
4151static void
4152linux_nat_event_pipe_push (int pid, int status, int options)
4153{
4154 int ret;
4155 struct waitpid_result event = {0};
4156 event.pid = pid;
4157 event.status = status;
4158 event.options = options;
4159
4160 do
4161 {
4162 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
4163 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
4164 } while (ret == -1 && errno == EINTR);
4165
4166 linux_nat_num_queued_events++;
4167}
4168
4169static void
4170get_pending_events (void)
4171{
4172 int status, options, pid;
4173
84e46146
PA
4174 if (!linux_nat_async_enabled
4175 || linux_nat_async_events_state != sigchld_async)
b84876c2
PA
4176 internal_error (__FILE__, __LINE__,
4177 "get_pending_events called with async masked");
4178
4179 while (1)
4180 {
4181 status = 0;
4182 options = __WCLONE | WNOHANG;
4183
4184 do
4185 {
4186 pid = waitpid (-1, &status, options);
4187 }
4188 while (pid == -1 && errno == EINTR);
4189
4190 if (pid <= 0)
4191 {
4192 options = WNOHANG;
4193 do
4194 {
4195 pid = waitpid (-1, &status, options);
4196 }
4197 while (pid == -1 && errno == EINTR);
4198 }
4199
4200 if (pid <= 0)
4201 /* No more children reporting events. */
4202 break;
4203
4204 if (debug_linux_nat_async)
4205 fprintf_unfiltered (gdb_stdlog, "\
4206get_pending_events: pid(%d), status(%x), options (%x)\n",
4207 pid, status, options);
4208
4209 linux_nat_event_pipe_push (pid, status, options);
4210 }
4211
4212 if (debug_linux_nat_async)
4213 fprintf_unfiltered (gdb_stdlog, "\
4214get_pending_events: linux_nat_num_queued_events(%d)\n",
4215 linux_nat_num_queued_events);
4216}
4217
4218/* SIGCHLD handler for async mode. */
4219
4220static void
4221async_sigchld_handler (int signo)
4222{
4223 if (debug_linux_nat_async)
4224 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
4225
4226 get_pending_events ();
4227}
4228
84e46146 4229/* Set SIGCHLD handling state to STATE. Returns previous state. */
b84876c2 4230
84e46146
PA
4231static enum sigchld_state
4232linux_nat_async_events (enum sigchld_state state)
b84876c2 4233{
84e46146 4234 enum sigchld_state current_state = linux_nat_async_events_state;
b84876c2
PA
4235
4236 if (debug_linux_nat_async)
4237 fprintf_unfiltered (gdb_stdlog,
84e46146 4238 "LNAE: state(%d): linux_nat_async_events_state(%d), "
b84876c2 4239 "linux_nat_num_queued_events(%d)\n",
84e46146 4240 state, linux_nat_async_events_state,
b84876c2
PA
4241 linux_nat_num_queued_events);
4242
84e46146 4243 if (current_state != state)
b84876c2
PA
4244 {
4245 sigset_t mask;
4246 sigemptyset (&mask);
4247 sigaddset (&mask, SIGCHLD);
84e46146
PA
4248
4249 /* Always block before changing state. */
4250 sigprocmask (SIG_BLOCK, &mask, NULL);
4251
4252 /* Set new state. */
4253 linux_nat_async_events_state = state;
4254
4255 switch (state)
b84876c2 4256 {
84e46146
PA
4257 case sigchld_sync:
4258 {
4259 /* Block target events. */
4260 sigprocmask (SIG_BLOCK, &mask, NULL);
4261 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4262 /* Get events out of queue, and make them available to
4263 queued_waitpid / my_waitpid. */
4264 pipe_to_local_event_queue ();
4265 }
4266 break;
4267 case sigchld_async:
4268 {
4269 /* Unblock target events for async mode. */
4270
4271 sigprocmask (SIG_BLOCK, &mask, NULL);
4272
4273 /* Put events we already waited on, in the pipe first, so
4274 events are FIFO. */
4275 local_event_queue_to_pipe ();
4276 /* While in masked async, we may have not collected all
4277 the pending events. Get them out now. */
4278 get_pending_events ();
4279
4280 /* Let'em come. */
4281 sigaction (SIGCHLD, &async_sigchld_action, NULL);
4282 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4283 }
4284 break;
4285 case sigchld_default:
4286 {
4287 /* SIGCHLD default mode. */
4288 sigaction (SIGCHLD, &sigchld_default_action, NULL);
4289
4290 /* Get events out of queue, and make them available to
4291 queued_waitpid / my_waitpid. */
4292 pipe_to_local_event_queue ();
4293
4294 /* Unblock SIGCHLD. */
4295 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4296 }
4297 break;
b84876c2
PA
4298 }
4299 }
4300
4301 return current_state;
4302}
4303
4304static int async_terminal_is_ours = 1;
4305
4306/* target_terminal_inferior implementation. */
4307
4308static void
4309linux_nat_terminal_inferior (void)
4310{
4311 if (!target_is_async_p ())
4312 {
4313 /* Async mode is disabled. */
4314 terminal_inferior ();
4315 return;
4316 }
4317
4318 /* GDB should never give the terminal to the inferior, if the
4319 inferior is running in the background (run&, continue&, etc.).
4320 This check can be removed when the common code is fixed. */
4321 if (!sync_execution)
4322 return;
4323
4324 terminal_inferior ();
4325
4326 if (!async_terminal_is_ours)
4327 return;
4328
4329 delete_file_handler (input_fd);
4330 async_terminal_is_ours = 0;
4331 set_sigint_trap ();
4332}
4333
4334/* target_terminal_ours implementation. */
4335
4336void
4337linux_nat_terminal_ours (void)
4338{
4339 if (!target_is_async_p ())
4340 {
4341 /* Async mode is disabled. */
4342 terminal_ours ();
4343 return;
4344 }
4345
4346 /* GDB should never give the terminal to the inferior if the
4347 inferior is running in the background (run&, continue&, etc.),
4348 but claiming it sure should. */
4349 terminal_ours ();
4350
4351 if (!sync_execution)
4352 return;
4353
4354 if (async_terminal_is_ours)
4355 return;
4356
4357 clear_sigint_trap ();
4358 add_file_handler (input_fd, stdin_event_handler, 0);
4359 async_terminal_is_ours = 1;
4360}
4361
4362static void (*async_client_callback) (enum inferior_event_type event_type,
4363 void *context);
4364static void *async_client_context;
4365
4366static void
4367linux_nat_async_file_handler (int error, gdb_client_data client_data)
4368{
4369 async_client_callback (INF_REG_EVENT, async_client_context);
4370}
4371
4372/* target_async implementation. */
4373
4374static void
4375linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4376 void *context), void *context)
4377{
4378 if (linux_nat_async_mask_value == 0 || !linux_nat_async_enabled)
4379 internal_error (__FILE__, __LINE__,
4380 "Calling target_async when async is masked");
4381
4382 if (callback != NULL)
4383 {
4384 async_client_callback = callback;
4385 async_client_context = context;
4386 add_file_handler (linux_nat_event_pipe[0],
4387 linux_nat_async_file_handler, NULL);
4388
84e46146 4389 linux_nat_async_events (sigchld_async);
b84876c2
PA
4390 }
4391 else
4392 {
4393 async_client_callback = callback;
4394 async_client_context = context;
4395
84e46146 4396 linux_nat_async_events (sigchld_sync);
b84876c2
PA
4397 delete_file_handler (linux_nat_event_pipe[0]);
4398 }
4399 return;
4400}
4401
4402/* Enable/Disable async mode. */
4403
4404static void
4405linux_nat_set_async_mode (int on)
4406{
4407 if (linux_nat_async_enabled != on)
4408 {
4409 if (on)
4410 {
4411 gdb_assert (waitpid_queue == NULL);
b84876c2
PA
4412 if (pipe (linux_nat_event_pipe) == -1)
4413 internal_error (__FILE__, __LINE__,
4414 "creating event pipe failed.");
b84876c2
PA
4415 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4416 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4417 }
4418 else
4419 {
b84876c2 4420 drain_queued_events (-1);
b84876c2
PA
4421 linux_nat_num_queued_events = 0;
4422 close (linux_nat_event_pipe[0]);
4423 close (linux_nat_event_pipe[1]);
4424 linux_nat_event_pipe[0] = linux_nat_event_pipe[1] = -1;
4425
4426 }
4427 }
4428 linux_nat_async_enabled = on;
4429}
4430
4c28f408
PA
4431static int
4432send_sigint_callback (struct lwp_info *lp, void *data)
4433{
4434 /* Use is_running instead of !lp->stopped, because the lwp may be
4435 stopped due to an internal event, and we want to interrupt it in
4436 that case too. What we want is to check if the thread is stopped
4437 from the point of view of the user. */
4438 if (is_running (lp->ptid))
4439 kill_lwp (GET_LWP (lp->ptid), SIGINT);
4440 return 0;
4441}
4442
4443static void
4444linux_nat_stop (ptid_t ptid)
4445{
4446 if (non_stop)
4447 {
4448 if (ptid_equal (ptid, minus_one_ptid))
4449 iterate_over_lwps (send_sigint_callback, &ptid);
4450 else
4451 {
4452 struct lwp_info *lp = find_lwp_pid (ptid);
4453 send_sigint_callback (lp, NULL);
4454 }
4455 }
4456 else
4457 linux_ops->to_stop (ptid);
4458}
4459
f973ed9c
DJ
4460void
4461linux_nat_add_target (struct target_ops *t)
4462{
f973ed9c
DJ
4463 /* Save the provided single-threaded target. We save this in a separate
4464 variable because another target we've inherited from (e.g. inf-ptrace)
4465 may have saved a pointer to T; we want to use it for the final
4466 process stratum target. */
4467 linux_ops_saved = *t;
4468 linux_ops = &linux_ops_saved;
4469
4470 /* Override some methods for multithreading. */
b84876c2 4471 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4472 t->to_attach = linux_nat_attach;
4473 t->to_detach = linux_nat_detach;
4474 t->to_resume = linux_nat_resume;
4475 t->to_wait = linux_nat_wait;
4476 t->to_xfer_partial = linux_nat_xfer_partial;
4477 t->to_kill = linux_nat_kill;
4478 t->to_mourn_inferior = linux_nat_mourn_inferior;
4479 t->to_thread_alive = linux_nat_thread_alive;
4480 t->to_pid_to_str = linux_nat_pid_to_str;
4481 t->to_has_thread_control = tc_schedlock;
4482
b84876c2
PA
4483 t->to_can_async_p = linux_nat_can_async_p;
4484 t->to_is_async_p = linux_nat_is_async_p;
4485 t->to_async = linux_nat_async;
4486 t->to_async_mask = linux_nat_async_mask;
4487 t->to_terminal_inferior = linux_nat_terminal_inferior;
4488 t->to_terminal_ours = linux_nat_terminal_ours;
4489
4c28f408
PA
4490 /* Methods for non-stop support. */
4491 t->to_stop = linux_nat_stop;
4492
f973ed9c
DJ
4493 /* We don't change the stratum; this target will sit at
4494 process_stratum and thread_db will set at thread_stratum. This
4495 is a little strange, since this is a multi-threaded-capable
4496 target, but we want to be on the stack below thread_db, and we
4497 also want to be used for single-threaded processes. */
4498
4499 add_target (t);
4500
4501 /* TODO: Eliminate this and have libthread_db use
4502 find_target_beneath. */
4503 thread_db_init (t);
4504}
4505
9f0bdab8
DJ
4506/* Register a method to call whenever a new thread is attached. */
4507void
4508linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
4509{
4510 /* Save the pointer. We only support a single registered instance
4511 of the GNU/Linux native target, so we do not need to map this to
4512 T. */
4513 linux_nat_new_thread = new_thread;
4514}
4515
4516/* Return the saved siginfo associated with PTID. */
4517struct siginfo *
4518linux_nat_get_siginfo (ptid_t ptid)
4519{
4520 struct lwp_info *lp = find_lwp_pid (ptid);
4521
4522 gdb_assert (lp != NULL);
4523
4524 return &lp->siginfo;
4525}
4526
d6b0e80f
AC
4527void
4528_initialize_linux_nat (void)
4529{
b84876c2 4530 sigset_t mask;
dba24537 4531
1bedd215
AC
4532 add_info ("proc", linux_nat_info_proc_cmd, _("\
4533Show /proc process information about any running process.\n\
dba24537
AC
4534Specify any process id, or use the program being debugged by default.\n\
4535Specify any of the following keywords for detailed info:\n\
4536 mappings -- list of mapped memory regions.\n\
4537 stat -- list a bunch of random process info.\n\
4538 status -- list a different bunch of random process info.\n\
1bedd215 4539 all -- list all available /proc info."));
d6b0e80f 4540
b84876c2
PA
4541 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4542 &debug_linux_nat, _("\
4543Set debugging of GNU/Linux lwp module."), _("\
4544Show debugging of GNU/Linux lwp module."), _("\
4545Enables printf debugging output."),
4546 NULL,
4547 show_debug_linux_nat,
4548 &setdebuglist, &showdebuglist);
4549
4550 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4551 &debug_linux_nat_async, _("\
4552Set debugging of GNU/Linux async lwp module."), _("\
4553Show debugging of GNU/Linux async lwp module."), _("\
4554Enables printf debugging output."),
4555 NULL,
4556 show_debug_linux_nat_async,
4557 &setdebuglist, &showdebuglist);
4558
4559 add_setshow_boolean_cmd ("linux-async", class_maintenance,
4560 &linux_async_permitted_1, _("\
4561Set whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4562Show whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4563Tells gdb whether to control the GNU/Linux inferior in asynchronous mode."),
4564 set_maintenance_linux_async_permitted,
4565 show_maintenance_linux_async_permitted,
4566 &maintenance_set_cmdlist,
4567 &maintenance_show_cmdlist);
4568
84e46146
PA
4569 /* Get the default SIGCHLD action. Used while forking an inferior
4570 (see linux_nat_create_inferior/linux_nat_async_events). */
4571 sigaction (SIGCHLD, NULL, &sigchld_default_action);
4572
b84876c2
PA
4573 /* Block SIGCHLD by default. Doing this early prevents it getting
4574 unblocked if an exception is thrown due to an error while the
4575 inferior is starting (sigsetjmp/siglongjmp). */
4576 sigemptyset (&mask);
4577 sigaddset (&mask, SIGCHLD);
4578 sigprocmask (SIG_BLOCK, &mask, NULL);
4579
4580 /* Save this mask as the default. */
d6b0e80f
AC
4581 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4582
b84876c2
PA
4583 /* The synchronous SIGCHLD handler. */
4584 sync_sigchld_action.sa_handler = sigchld_handler;
4585 sigemptyset (&sync_sigchld_action.sa_mask);
4586 sync_sigchld_action.sa_flags = SA_RESTART;
4587
4588 /* Make it the default. */
4589 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
d6b0e80f
AC
4590
4591 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4592 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4593 sigdelset (&suspend_mask, SIGCHLD);
4594
b84876c2
PA
4595 /* SIGCHLD handler for async mode. */
4596 async_sigchld_action.sa_handler = async_sigchld_handler;
4597 sigemptyset (&async_sigchld_action.sa_mask);
4598 async_sigchld_action.sa_flags = SA_RESTART;
d6b0e80f 4599
b84876c2
PA
4600 /* Install the default mode. */
4601 linux_nat_set_async_mode (linux_async_permitted);
10568435
JK
4602
4603 add_setshow_boolean_cmd ("disable-randomization", class_support,
4604 &disable_randomization, _("\
4605Set disabling of debuggee's virtual address space randomization."), _("\
4606Show disabling of debuggee's virtual address space randomization."), _("\
4607When this mode is on (which is the default), randomization of the virtual\n\
4608address space is disabled. Standalone programs run with the randomization\n\
4609enabled by default on some platforms."),
4610 &set_disable_randomization,
4611 &show_disable_randomization,
4612 &setlist, &showlist);
d6b0e80f
AC
4613}
4614\f
4615
4616/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4617 the GNU/Linux Threads library and therefore doesn't really belong
4618 here. */
4619
4620/* Read variable NAME in the target and return its value if found.
4621 Otherwise return zero. It is assumed that the type of the variable
4622 is `int'. */
4623
4624static int
4625get_signo (const char *name)
4626{
4627 struct minimal_symbol *ms;
4628 int signo;
4629
4630 ms = lookup_minimal_symbol (name, NULL, NULL);
4631 if (ms == NULL)
4632 return 0;
4633
8e70166d 4634 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
4635 sizeof (signo)) != 0)
4636 return 0;
4637
4638 return signo;
4639}
4640
4641/* Return the set of signals used by the threads library in *SET. */
4642
4643void
4644lin_thread_get_thread_signals (sigset_t *set)
4645{
4646 struct sigaction action;
4647 int restart, cancel;
b84876c2 4648 sigset_t blocked_mask;
d6b0e80f 4649
b84876c2 4650 sigemptyset (&blocked_mask);
d6b0e80f
AC
4651 sigemptyset (set);
4652
4653 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
4654 cancel = get_signo ("__pthread_sig_cancel");
4655
4656 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4657 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4658 not provide any way for the debugger to query the signal numbers -
4659 fortunately they don't change! */
4660
d6b0e80f 4661 if (restart == 0)
17fbb0bd 4662 restart = __SIGRTMIN;
d6b0e80f 4663
d6b0e80f 4664 if (cancel == 0)
17fbb0bd 4665 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
4666
4667 sigaddset (set, restart);
4668 sigaddset (set, cancel);
4669
4670 /* The GNU/Linux Threads library makes terminating threads send a
4671 special "cancel" signal instead of SIGCHLD. Make sure we catch
4672 those (to prevent them from terminating GDB itself, which is
4673 likely to be their default action) and treat them the same way as
4674 SIGCHLD. */
4675
4676 action.sa_handler = sigchld_handler;
4677 sigemptyset (&action.sa_mask);
58aecb61 4678 action.sa_flags = SA_RESTART;
d6b0e80f
AC
4679 sigaction (cancel, &action, NULL);
4680
4681 /* We block the "cancel" signal throughout this code ... */
4682 sigaddset (&blocked_mask, cancel);
4683 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4684
4685 /* ... except during a sigsuspend. */
4686 sigdelset (&suspend_mask, cancel);
4687}
This page took 0.757338 seconds and 4 git commands to generate.