daily update
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
9b254dd1 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
e26af52f 4 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f
AC
34#include "gdbthread.h"
35#include "gdbcmd.h"
36#include "regcache.h"
4f844a66 37#include "regset.h"
10d6c8cd
DJ
38#include "inf-ptrace.h"
39#include "auxv.h"
dba24537
AC
40#include <sys/param.h> /* for MAXPATHLEN */
41#include <sys/procfs.h> /* for elf_gregset etc. */
42#include "elf-bfd.h" /* for elfcore_write_* */
43#include "gregset.h" /* for gregset */
44#include "gdbcore.h" /* for get_exec_file */
45#include <ctype.h> /* for isdigit */
46#include "gdbthread.h" /* for struct thread_info etc. */
47#include "gdb_stat.h" /* for struct stat */
48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
dba24537 52
10568435
JK
53#ifdef HAVE_PERSONALITY
54# include <sys/personality.h>
55# if !HAVE_DECL_ADDR_NO_RANDOMIZE
56# define ADDR_NO_RANDOMIZE 0x0040000
57# endif
58#endif /* HAVE_PERSONALITY */
59
8a77dff3
VP
60/* This comment documents high-level logic of this file.
61
62Waiting for events in sync mode
63===============================
64
65When waiting for an event in a specific thread, we just use waitpid, passing
66the specific pid, and not passing WNOHANG.
67
68When waiting for an event in all threads, waitpid is not quite good. Prior to
69version 2.4, Linux can either wait for event in main thread, or in secondary
70threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
71miss an event. The solution is to use non-blocking waitpid, together with
72sigsuspend. First, we use non-blocking waitpid to get an event in the main
73process, if any. Second, we use non-blocking waitpid with the __WCLONED
74flag to check for events in cloned processes. If nothing is found, we use
75sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
76happened to a child process -- and SIGCHLD will be delivered both for events
77in main debugged process and in cloned processes. As soon as we know there's
78an event, we get back to calling nonblocking waitpid with and without __WCLONED.
79
80Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
81so that we don't miss a signal. If SIGCHLD arrives in between, when it's
82blocked, the signal becomes pending and sigsuspend immediately
83notices it and returns.
84
85Waiting for events in async mode
86================================
87
88In async mode, GDB should always be ready to handle both user input and target
89events, so neither blocking waitpid nor sigsuspend are viable
90options. Instead, we should notify the GDB main event loop whenever there's
91unprocessed event from the target. The only way to notify this event loop is
92to make it wait on input from a pipe, and write something to the pipe whenever
93there's event. Obviously, if we fail to notify the event loop if there's
94target event, it's bad. If we notify the event loop when there's no event
95from target, linux-nat.c will detect that there's no event, actually, and
96report event of type TARGET_WAITKIND_IGNORE, but it will waste time and
97better avoided.
98
99The main design point is that every time GDB is outside linux-nat.c, we have a
100SIGCHLD handler installed that is called when something happens to the target
101and notifies the GDB event loop. Also, the event is extracted from the target
102using waitpid and stored for future use. Whenever GDB core decides to handle
103the event, and calls into linux-nat.c, we disable SIGCHLD and process things
104as in sync mode, except that before waitpid call we check if there are any
105previously read events.
106
107It could happen that during event processing, we'll try to get more events
108than there are events in the local queue, which will result to waitpid call.
109Those waitpid calls, while blocking, are guarantied to always have
110something for waitpid to return. E.g., stopping a thread with SIGSTOP, and
111waiting for the lwp to stop.
112
113The event loop is notified about new events using a pipe. SIGCHLD handler does
114waitpid and writes the results in to a pipe. GDB event loop has the other end
115of the pipe among the sources. When event loop starts to process the event
116and calls a function in linux-nat.c, all events from the pipe are transferred
117into a local queue and SIGCHLD is blocked. Further processing goes as in sync
118mode. Before we return from linux_nat_wait, we transfer all unprocessed events
119from local queue back to the pipe, so that when we get back to event loop,
120event loop will notice there's something more to do.
121
122SIGCHLD is blocked when we're inside target_wait, so that should we actually
123want to wait for some more events, SIGCHLD handler does not steal them from
124us. Technically, it would be possible to add new events to the local queue but
125it's about the same amount of work as blocking SIGCHLD.
126
127This moving of events from pipe into local queue and back into pipe when we
128enter/leave linux-nat.c is somewhat ugly. Unfortunately, GDB event loop is
129home-grown and incapable to wait on any queue.
130
131Use of signals
132==============
133
134We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
135signal is not entirely significant; we just need for a signal to be delivered,
136so that we can intercept it. SIGSTOP's advantage is that it can not be
137blocked. A disadvantage is that it is not a real-time signal, so it can only
138be queued once; we do not keep track of other sources of SIGSTOP.
139
140Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
141use them, because they have special behavior when the signal is generated -
142not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
143kills the entire thread group.
144
145A delivered SIGSTOP would stop the entire thread group, not just the thread we
146tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
147cancel it (by PTRACE_CONT without passing SIGSTOP).
148
149We could use a real-time signal instead. This would solve those problems; we
150could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
151But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
152generates it, and there are races with trying to find a signal that is not
153blocked. */
a0ef4274 154
dba24537
AC
155#ifndef O_LARGEFILE
156#define O_LARGEFILE 0
157#endif
0274a8ce 158
3993f6b1
DJ
159/* If the system headers did not provide the constants, hard-code the normal
160 values. */
161#ifndef PTRACE_EVENT_FORK
162
163#define PTRACE_SETOPTIONS 0x4200
164#define PTRACE_GETEVENTMSG 0x4201
165
166/* options set using PTRACE_SETOPTIONS */
167#define PTRACE_O_TRACESYSGOOD 0x00000001
168#define PTRACE_O_TRACEFORK 0x00000002
169#define PTRACE_O_TRACEVFORK 0x00000004
170#define PTRACE_O_TRACECLONE 0x00000008
171#define PTRACE_O_TRACEEXEC 0x00000010
9016a515
DJ
172#define PTRACE_O_TRACEVFORKDONE 0x00000020
173#define PTRACE_O_TRACEEXIT 0x00000040
3993f6b1
DJ
174
175/* Wait extended result codes for the above trace options. */
176#define PTRACE_EVENT_FORK 1
177#define PTRACE_EVENT_VFORK 2
178#define PTRACE_EVENT_CLONE 3
179#define PTRACE_EVENT_EXEC 4
c874c7fc 180#define PTRACE_EVENT_VFORK_DONE 5
9016a515 181#define PTRACE_EVENT_EXIT 6
3993f6b1
DJ
182
183#endif /* PTRACE_EVENT_FORK */
184
185/* We can't always assume that this flag is available, but all systems
186 with the ptrace event handlers also have __WALL, so it's safe to use
187 here. */
188#ifndef __WALL
189#define __WALL 0x40000000 /* Wait for any child. */
190#endif
191
02d3ff8c
UW
192#ifndef PTRACE_GETSIGINFO
193#define PTRACE_GETSIGINFO 0x4202
194#endif
195
10d6c8cd
DJ
196/* The single-threaded native GNU/Linux target_ops. We save a pointer for
197 the use of the multi-threaded target. */
198static struct target_ops *linux_ops;
f973ed9c 199static struct target_ops linux_ops_saved;
10d6c8cd 200
9f0bdab8
DJ
201/* The method to call, if any, when a new thread is attached. */
202static void (*linux_nat_new_thread) (ptid_t);
203
ac264b3b
MS
204/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
205 Called by our to_xfer_partial. */
206static LONGEST (*super_xfer_partial) (struct target_ops *,
207 enum target_object,
208 const char *, gdb_byte *,
209 const gdb_byte *,
10d6c8cd
DJ
210 ULONGEST, LONGEST);
211
d6b0e80f 212static int debug_linux_nat;
920d2a44
AC
213static void
214show_debug_linux_nat (struct ui_file *file, int from_tty,
215 struct cmd_list_element *c, const char *value)
216{
217 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
218 value);
219}
d6b0e80f 220
b84876c2
PA
221static int debug_linux_nat_async = 0;
222static void
223show_debug_linux_nat_async (struct ui_file *file, int from_tty,
224 struct cmd_list_element *c, const char *value)
225{
226 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
227 value);
228}
229
10568435
JK
230static int disable_randomization = 1;
231
232static void
233show_disable_randomization (struct ui_file *file, int from_tty,
234 struct cmd_list_element *c, const char *value)
235{
236#ifdef HAVE_PERSONALITY
237 fprintf_filtered (file, _("\
238Disabling randomization of debuggee's virtual address space is %s.\n"),
239 value);
240#else /* !HAVE_PERSONALITY */
241 fputs_filtered (_("\
242Disabling randomization of debuggee's virtual address space is unsupported on\n\
243this platform.\n"), file);
244#endif /* !HAVE_PERSONALITY */
245}
246
247static void
248set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c)
249{
250#ifndef HAVE_PERSONALITY
251 error (_("\
252Disabling randomization of debuggee's virtual address space is unsupported on\n\
253this platform."));
254#endif /* !HAVE_PERSONALITY */
255}
256
9016a515
DJ
257static int linux_parent_pid;
258
ae087d01
DJ
259struct simple_pid_list
260{
261 int pid;
3d799a95 262 int status;
ae087d01
DJ
263 struct simple_pid_list *next;
264};
265struct simple_pid_list *stopped_pids;
266
3993f6b1
DJ
267/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
268 can not be used, 1 if it can. */
269
270static int linux_supports_tracefork_flag = -1;
271
9016a515
DJ
272/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
273 PTRACE_O_TRACEVFORKDONE. */
274
275static int linux_supports_tracevforkdone_flag = -1;
276
b84876c2
PA
277/* Async mode support */
278
b84876c2
PA
279/* Zero if the async mode, although enabled, is masked, which means
280 linux_nat_wait should behave as if async mode was off. */
281static int linux_nat_async_mask_value = 1;
282
283/* The read/write ends of the pipe registered as waitable file in the
284 event loop. */
285static int linux_nat_event_pipe[2] = { -1, -1 };
286
287/* Number of queued events in the pipe. */
288static volatile int linux_nat_num_queued_events;
289
84e46146 290/* The possible SIGCHLD handling states. */
b84876c2 291
84e46146
PA
292enum sigchld_state
293{
294 /* SIGCHLD disabled, with action set to sigchld_handler, for the
295 sigsuspend in linux_nat_wait. */
296 sigchld_sync,
297 /* SIGCHLD enabled, with action set to async_sigchld_handler. */
298 sigchld_async,
299 /* Set SIGCHLD to default action. Used while creating an
300 inferior. */
301 sigchld_default
302};
303
304/* The current SIGCHLD handling state. */
305static enum sigchld_state linux_nat_async_events_state;
306
307static enum sigchld_state linux_nat_async_events (enum sigchld_state enable);
b84876c2
PA
308static void pipe_to_local_event_queue (void);
309static void local_event_queue_to_pipe (void);
310static void linux_nat_event_pipe_push (int pid, int status, int options);
311static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
312static void linux_nat_set_async_mode (int on);
313static void linux_nat_async (void (*callback)
314 (enum inferior_event_type event_type, void *context),
315 void *context);
316static int linux_nat_async_mask (int mask);
a0ef4274 317static int kill_lwp (int lwpid, int signo);
b84876c2 318
4c28f408
PA
319static int stop_callback (struct lwp_info *lp, void *data);
320
b84876c2
PA
321/* Captures the result of a successful waitpid call, along with the
322 options used in that call. */
323struct waitpid_result
324{
325 int pid;
326 int status;
327 int options;
328 struct waitpid_result *next;
329};
330
331/* A singly-linked list of the results of the waitpid calls performed
332 in the async SIGCHLD handler. */
333static struct waitpid_result *waitpid_queue = NULL;
334
252fbfc8
PA
335/* Similarly to `waitpid', but check the local event queue instead of
336 querying the kernel queue. If PEEK, don't remove the event found
337 from the queue. */
338
b84876c2 339static int
252fbfc8 340queued_waitpid_1 (int pid, int *status, int flags, int peek)
b84876c2
PA
341{
342 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
343
344 if (debug_linux_nat_async)
345 fprintf_unfiltered (gdb_stdlog,
346 "\
84e46146
PA
347QWPID: linux_nat_async_events_state(%d), linux_nat_num_queued_events(%d)\n",
348 linux_nat_async_events_state,
b84876c2
PA
349 linux_nat_num_queued_events);
350
351 if (flags & __WALL)
352 {
353 for (; msg; prev = msg, msg = msg->next)
354 if (pid == -1 || pid == msg->pid)
355 break;
356 }
357 else if (flags & __WCLONE)
358 {
359 for (; msg; prev = msg, msg = msg->next)
360 if (msg->options & __WCLONE
361 && (pid == -1 || pid == msg->pid))
362 break;
363 }
364 else
365 {
366 for (; msg; prev = msg, msg = msg->next)
367 if ((msg->options & __WCLONE) == 0
368 && (pid == -1 || pid == msg->pid))
369 break;
370 }
371
372 if (msg)
373 {
374 int pid;
375
b84876c2
PA
376 if (status)
377 *status = msg->status;
378 pid = msg->pid;
379
380 if (debug_linux_nat_async)
381 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
382 pid, msg->status);
252fbfc8
PA
383
384 if (!peek)
385 {
386 if (prev)
387 prev->next = msg->next;
388 else
389 waitpid_queue = msg->next;
390
391 msg->next = NULL;
392 xfree (msg);
393 }
b84876c2
PA
394
395 return pid;
396 }
397
398 if (debug_linux_nat_async)
399 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
400
401 if (status)
402 *status = 0;
403 return -1;
404}
405
252fbfc8
PA
406/* Similarly to `waitpid', but check the local event queue. */
407
408static int
409queued_waitpid (int pid, int *status, int flags)
410{
411 return queued_waitpid_1 (pid, status, flags, 0);
412}
413
b84876c2
PA
414static void
415push_waitpid (int pid, int status, int options)
416{
417 struct waitpid_result *event, *new_event;
418
419 new_event = xmalloc (sizeof (*new_event));
420 new_event->pid = pid;
421 new_event->status = status;
422 new_event->options = options;
423 new_event->next = NULL;
424
425 if (waitpid_queue)
426 {
427 for (event = waitpid_queue;
428 event && event->next;
429 event = event->next)
430 ;
431
432 event->next = new_event;
433 }
434 else
435 waitpid_queue = new_event;
436}
437
710151dd 438/* Drain all queued events of PID. If PID is -1, the effect is of
b84876c2
PA
439 draining all events. */
440static void
441drain_queued_events (int pid)
442{
443 while (queued_waitpid (pid, NULL, __WALL) != -1)
444 ;
445}
446
ae087d01
DJ
447\f
448/* Trivial list manipulation functions to keep track of a list of
449 new stopped processes. */
450static void
3d799a95 451add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
452{
453 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
454 new_pid->pid = pid;
3d799a95 455 new_pid->status = status;
ae087d01
DJ
456 new_pid->next = *listp;
457 *listp = new_pid;
458}
459
460static int
3d799a95 461pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
ae087d01
DJ
462{
463 struct simple_pid_list **p;
464
465 for (p = listp; *p != NULL; p = &(*p)->next)
466 if ((*p)->pid == pid)
467 {
468 struct simple_pid_list *next = (*p)->next;
3d799a95 469 *status = (*p)->status;
ae087d01
DJ
470 xfree (*p);
471 *p = next;
472 return 1;
473 }
474 return 0;
475}
476
3d799a95
DJ
477static void
478linux_record_stopped_pid (int pid, int status)
ae087d01 479{
3d799a95 480 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
481}
482
3993f6b1
DJ
483\f
484/* A helper function for linux_test_for_tracefork, called after fork (). */
485
486static void
487linux_tracefork_child (void)
488{
489 int ret;
490
491 ptrace (PTRACE_TRACEME, 0, 0, 0);
492 kill (getpid (), SIGSTOP);
493 fork ();
48bb3cce 494 _exit (0);
3993f6b1
DJ
495}
496
b84876c2
PA
497/* Wrapper function for waitpid which handles EINTR, and checks for
498 locally queued events. */
b957e937
DJ
499
500static int
501my_waitpid (int pid, int *status, int flags)
502{
503 int ret;
b84876c2
PA
504
505 /* There should be no concurrent calls to waitpid. */
84e46146 506 gdb_assert (linux_nat_async_events_state == sigchld_sync);
b84876c2
PA
507
508 ret = queued_waitpid (pid, status, flags);
509 if (ret != -1)
510 return ret;
511
b957e937
DJ
512 do
513 {
514 ret = waitpid (pid, status, flags);
515 }
516 while (ret == -1 && errno == EINTR);
517
518 return ret;
519}
520
521/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
522
523 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
524 we know that the feature is not available. This may change the tracing
525 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
526
527 However, if it succeeds, we don't know for sure that the feature is
528 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 529 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
530 fork tracing, and let it fork. If the process exits, we assume that we
531 can't use TRACEFORK; if we get the fork notification, and we can extract
532 the new child's PID, then we assume that we can. */
3993f6b1
DJ
533
534static void
b957e937 535linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
536{
537 int child_pid, ret, status;
538 long second_pid;
4c28f408
PA
539 enum sigchld_state async_events_original_state;
540
541 async_events_original_state = linux_nat_async_events (sigchld_sync);
3993f6b1 542
b957e937
DJ
543 linux_supports_tracefork_flag = 0;
544 linux_supports_tracevforkdone_flag = 0;
545
546 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
547 if (ret != 0)
548 return;
549
3993f6b1
DJ
550 child_pid = fork ();
551 if (child_pid == -1)
e2e0b3e5 552 perror_with_name (("fork"));
3993f6b1
DJ
553
554 if (child_pid == 0)
555 linux_tracefork_child ();
556
b957e937 557 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 558 if (ret == -1)
e2e0b3e5 559 perror_with_name (("waitpid"));
3993f6b1 560 else if (ret != child_pid)
8a3fe4f8 561 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 562 if (! WIFSTOPPED (status))
8a3fe4f8 563 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
3993f6b1 564
3993f6b1
DJ
565 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
566 if (ret != 0)
567 {
b957e937
DJ
568 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
569 if (ret != 0)
570 {
8a3fe4f8 571 warning (_("linux_test_for_tracefork: failed to kill child"));
4c28f408 572 linux_nat_async_events (async_events_original_state);
b957e937
DJ
573 return;
574 }
575
576 ret = my_waitpid (child_pid, &status, 0);
577 if (ret != child_pid)
8a3fe4f8 578 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
b957e937 579 else if (!WIFSIGNALED (status))
8a3fe4f8
AC
580 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
581 "killed child"), status);
b957e937 582
4c28f408 583 linux_nat_async_events (async_events_original_state);
3993f6b1
DJ
584 return;
585 }
586
9016a515
DJ
587 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
588 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
589 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
590 linux_supports_tracevforkdone_flag = (ret == 0);
591
b957e937
DJ
592 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
593 if (ret != 0)
8a3fe4f8 594 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
595
596 ret = my_waitpid (child_pid, &status, 0);
597
3993f6b1
DJ
598 if (ret == child_pid && WIFSTOPPED (status)
599 && status >> 16 == PTRACE_EVENT_FORK)
600 {
601 second_pid = 0;
602 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
603 if (ret == 0 && second_pid != 0)
604 {
605 int second_status;
606
607 linux_supports_tracefork_flag = 1;
b957e937
DJ
608 my_waitpid (second_pid, &second_status, 0);
609 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
610 if (ret != 0)
8a3fe4f8 611 warning (_("linux_test_for_tracefork: failed to kill second child"));
97725dc4 612 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
613 }
614 }
b957e937 615 else
8a3fe4f8
AC
616 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
617 "(%d, status 0x%x)"), ret, status);
3993f6b1 618
b957e937
DJ
619 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
620 if (ret != 0)
8a3fe4f8 621 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 622 my_waitpid (child_pid, &status, 0);
4c28f408
PA
623
624 linux_nat_async_events (async_events_original_state);
3993f6b1
DJ
625}
626
627/* Return non-zero iff we have tracefork functionality available.
628 This function also sets linux_supports_tracefork_flag. */
629
630static int
b957e937 631linux_supports_tracefork (int pid)
3993f6b1
DJ
632{
633 if (linux_supports_tracefork_flag == -1)
b957e937 634 linux_test_for_tracefork (pid);
3993f6b1
DJ
635 return linux_supports_tracefork_flag;
636}
637
9016a515 638static int
b957e937 639linux_supports_tracevforkdone (int pid)
9016a515
DJ
640{
641 if (linux_supports_tracefork_flag == -1)
b957e937 642 linux_test_for_tracefork (pid);
9016a515
DJ
643 return linux_supports_tracevforkdone_flag;
644}
645
3993f6b1 646\f
4de4c07c
DJ
647void
648linux_enable_event_reporting (ptid_t ptid)
649{
d3587048 650 int pid = ptid_get_lwp (ptid);
4de4c07c
DJ
651 int options;
652
d3587048
DJ
653 if (pid == 0)
654 pid = ptid_get_pid (ptid);
655
b957e937 656 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
657 return;
658
a2f23071
DJ
659 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
660 | PTRACE_O_TRACECLONE;
b957e937 661 if (linux_supports_tracevforkdone (pid))
9016a515
DJ
662 options |= PTRACE_O_TRACEVFORKDONE;
663
664 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
665 read-only process state. */
4de4c07c
DJ
666
667 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
668}
669
6d8fd2b7
UW
670static void
671linux_child_post_attach (int pid)
4de4c07c
DJ
672{
673 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 674 check_for_thread_db ();
4de4c07c
DJ
675}
676
10d6c8cd 677static void
4de4c07c
DJ
678linux_child_post_startup_inferior (ptid_t ptid)
679{
680 linux_enable_event_reporting (ptid);
0ec9a092 681 check_for_thread_db ();
4de4c07c
DJ
682}
683
6d8fd2b7
UW
684static int
685linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 686{
4de4c07c
DJ
687 ptid_t last_ptid;
688 struct target_waitstatus last_status;
9016a515 689 int has_vforked;
4de4c07c
DJ
690 int parent_pid, child_pid;
691
b84876c2
PA
692 if (target_can_async_p ())
693 target_async (NULL, 0);
694
4de4c07c 695 get_last_target_status (&last_ptid, &last_status);
9016a515 696 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
d3587048
DJ
697 parent_pid = ptid_get_lwp (last_ptid);
698 if (parent_pid == 0)
699 parent_pid = ptid_get_pid (last_ptid);
3a3e9ee3 700 child_pid = PIDGET (last_status.value.related_pid);
4de4c07c
DJ
701
702 if (! follow_child)
703 {
704 /* We're already attached to the parent, by default. */
705
706 /* Before detaching from the child, remove all breakpoints from
707 it. (This won't actually modify the breakpoint list, but will
708 physically remove the breakpoints from the child.) */
9016a515
DJ
709 /* If we vforked this will remove the breakpoints from the parent
710 also, but they'll be reinserted below. */
4de4c07c
DJ
711 detach_breakpoints (child_pid);
712
ac264b3b
MS
713 /* Detach new forked process? */
714 if (detach_fork)
f75c00e4 715 {
e85a822c 716 if (info_verbose || debug_linux_nat)
ac264b3b
MS
717 {
718 target_terminal_ours ();
719 fprintf_filtered (gdb_stdlog,
720 "Detaching after fork from child process %d.\n",
721 child_pid);
722 }
4de4c07c 723
ac264b3b
MS
724 ptrace (PTRACE_DETACH, child_pid, 0, 0);
725 }
726 else
727 {
728 struct fork_info *fp;
7f9f62ba
PA
729
730 /* Add process to GDB's tables. */
731 add_inferior (child_pid);
732
ac264b3b
MS
733 /* Retain child fork in ptrace (stopped) state. */
734 fp = find_fork_pid (child_pid);
735 if (!fp)
736 fp = add_fork (child_pid);
737 fork_save_infrun_state (fp, 0);
738 }
9016a515
DJ
739
740 if (has_vforked)
741 {
b957e937
DJ
742 gdb_assert (linux_supports_tracefork_flag >= 0);
743 if (linux_supports_tracevforkdone (0))
9016a515
DJ
744 {
745 int status;
746
747 ptrace (PTRACE_CONT, parent_pid, 0, 0);
58aecb61 748 my_waitpid (parent_pid, &status, __WALL);
c874c7fc 749 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
8a3fe4f8
AC
750 warning (_("Unexpected waitpid result %06x when waiting for "
751 "vfork-done"), status);
9016a515
DJ
752 }
753 else
754 {
755 /* We can't insert breakpoints until the child has
756 finished with the shared memory region. We need to
757 wait until that happens. Ideal would be to just
758 call:
759 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
760 - waitpid (parent_pid, &status, __WALL);
761 However, most architectures can't handle a syscall
762 being traced on the way out if it wasn't traced on
763 the way in.
764
765 We might also think to loop, continuing the child
766 until it exits or gets a SIGTRAP. One problem is
767 that the child might call ptrace with PTRACE_TRACEME.
768
769 There's no simple and reliable way to figure out when
770 the vforked child will be done with its copy of the
771 shared memory. We could step it out of the syscall,
772 two instructions, let it go, and then single-step the
773 parent once. When we have hardware single-step, this
774 would work; with software single-step it could still
775 be made to work but we'd have to be able to insert
776 single-step breakpoints in the child, and we'd have
777 to insert -just- the single-step breakpoint in the
778 parent. Very awkward.
779
780 In the end, the best we can do is to make sure it
781 runs for a little while. Hopefully it will be out of
782 range of any breakpoints we reinsert. Usually this
783 is only the single-step breakpoint at vfork's return
784 point. */
785
786 usleep (10000);
787 }
788
789 /* Since we vforked, breakpoints were removed in the parent
790 too. Put them back. */
791 reattach_breakpoints (parent_pid);
792 }
4de4c07c 793 }
3993f6b1 794 else
4de4c07c 795 {
4e1c45ea
PA
796 struct thread_info *last_tp = find_thread_pid (last_ptid);
797 struct thread_info *tp;
4de4c07c
DJ
798 char child_pid_spelling[40];
799
4e1c45ea
PA
800 /* Copy user stepping state to the new inferior thread. */
801 struct breakpoint *step_resume_breakpoint = last_tp->step_resume_breakpoint;
802 CORE_ADDR step_range_start = last_tp->step_range_start;
803 CORE_ADDR step_range_end = last_tp->step_range_end;
804 struct frame_id step_frame_id = last_tp->step_frame_id;
805
806 /* Otherwise, deleting the parent would get rid of this
807 breakpoint. */
808 last_tp->step_resume_breakpoint = NULL;
809
4de4c07c 810 /* Needed to keep the breakpoint lists in sync. */
9016a515
DJ
811 if (! has_vforked)
812 detach_breakpoints (child_pid);
4de4c07c
DJ
813
814 /* Before detaching from the parent, remove all breakpoints from it. */
815 remove_breakpoints ();
816
e85a822c 817 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
818 {
819 target_terminal_ours ();
ac264b3b
MS
820 fprintf_filtered (gdb_stdlog,
821 "Attaching after fork to child process %d.\n",
822 child_pid);
f75c00e4 823 }
4de4c07c 824
9016a515
DJ
825 /* If we're vforking, we may want to hold on to the parent until
826 the child exits or execs. At exec time we can remove the old
827 breakpoints from the parent and detach it; at exit time we
828 could do the same (or even, sneakily, resume debugging it - the
829 child's exec has failed, or something similar).
830
831 This doesn't clean up "properly", because we can't call
832 target_detach, but that's OK; if the current target is "child",
833 then it doesn't need any further cleanups, and lin_lwp will
834 generally not encounter vfork (vfork is defined to fork
835 in libpthread.so).
836
837 The holding part is very easy if we have VFORKDONE events;
838 but keeping track of both processes is beyond GDB at the
839 moment. So we don't expose the parent to the rest of GDB.
840 Instead we quietly hold onto it until such time as we can
841 safely resume it. */
842
843 if (has_vforked)
7f9f62ba
PA
844 {
845 linux_parent_pid = parent_pid;
846 detach_inferior (parent_pid);
847 }
ac264b3b
MS
848 else if (!detach_fork)
849 {
850 struct fork_info *fp;
851 /* Retain parent fork in ptrace (stopped) state. */
852 fp = find_fork_pid (parent_pid);
853 if (!fp)
854 fp = add_fork (parent_pid);
855 fork_save_infrun_state (fp, 0);
856 }
9016a515 857 else
b84876c2 858 target_detach (NULL, 0);
4de4c07c 859
9f0bdab8 860 inferior_ptid = ptid_build (child_pid, child_pid, 0);
7f9f62ba 861 add_inferior (child_pid);
ee057212
DJ
862
863 /* Reinstall ourselves, since we might have been removed in
864 target_detach (which does other necessary cleanup). */
ac264b3b 865
ee057212 866 push_target (ops);
9f0bdab8 867 linux_nat_switch_fork (inferior_ptid);
ef29ce1a 868 check_for_thread_db ();
4de4c07c 869
4e1c45ea
PA
870 tp = inferior_thread ();
871 tp->step_resume_breakpoint = step_resume_breakpoint;
872 tp->step_range_start = step_range_start;
873 tp->step_range_end = step_range_end;
874 tp->step_frame_id = step_frame_id;
875
4de4c07c
DJ
876 /* Reset breakpoints in the child as appropriate. */
877 follow_inferior_reset_breakpoints ();
878 }
879
b84876c2
PA
880 if (target_can_async_p ())
881 target_async (inferior_event_handler, 0);
882
4de4c07c
DJ
883 return 0;
884}
885
4de4c07c 886\f
6d8fd2b7
UW
887static void
888linux_child_insert_fork_catchpoint (int pid)
4de4c07c 889{
b957e937 890 if (! linux_supports_tracefork (pid))
8a3fe4f8 891 error (_("Your system does not support fork catchpoints."));
3993f6b1
DJ
892}
893
6d8fd2b7
UW
894static void
895linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 896{
b957e937 897 if (!linux_supports_tracefork (pid))
8a3fe4f8 898 error (_("Your system does not support vfork catchpoints."));
3993f6b1
DJ
899}
900
6d8fd2b7
UW
901static void
902linux_child_insert_exec_catchpoint (int pid)
3993f6b1 903{
b957e937 904 if (!linux_supports_tracefork (pid))
8a3fe4f8 905 error (_("Your system does not support exec catchpoints."));
3993f6b1
DJ
906}
907
d6b0e80f
AC
908/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
909 are processes sharing the same VM space. A multi-threaded process
910 is basically a group of such processes. However, such a grouping
911 is almost entirely a user-space issue; the kernel doesn't enforce
912 such a grouping at all (this might change in the future). In
913 general, we'll rely on the threads library (i.e. the GNU/Linux
914 Threads library) to provide such a grouping.
915
916 It is perfectly well possible to write a multi-threaded application
917 without the assistance of a threads library, by using the clone
918 system call directly. This module should be able to give some
919 rudimentary support for debugging such applications if developers
920 specify the CLONE_PTRACE flag in the clone system call, and are
921 using the Linux kernel 2.4 or above.
922
923 Note that there are some peculiarities in GNU/Linux that affect
924 this code:
925
926 - In general one should specify the __WCLONE flag to waitpid in
927 order to make it report events for any of the cloned processes
928 (and leave it out for the initial process). However, if a cloned
929 process has exited the exit status is only reported if the
930 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
931 we cannot use it since GDB must work on older systems too.
932
933 - When a traced, cloned process exits and is waited for by the
934 debugger, the kernel reassigns it to the original parent and
935 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
936 library doesn't notice this, which leads to the "zombie problem":
937 When debugged a multi-threaded process that spawns a lot of
938 threads will run out of processes, even if the threads exit,
939 because the "zombies" stay around. */
940
941/* List of known LWPs. */
9f0bdab8 942struct lwp_info *lwp_list;
d6b0e80f
AC
943
944/* Number of LWPs in the list. */
945static int num_lwps;
d6b0e80f
AC
946\f
947
d6b0e80f
AC
948/* Original signal mask. */
949static sigset_t normal_mask;
950
951/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
952 _initialize_linux_nat. */
953static sigset_t suspend_mask;
954
b84876c2
PA
955/* SIGCHLD action for synchronous mode. */
956struct sigaction sync_sigchld_action;
957
958/* SIGCHLD action for asynchronous mode. */
959static struct sigaction async_sigchld_action;
84e46146
PA
960
961/* SIGCHLD default action, to pass to new inferiors. */
962static struct sigaction sigchld_default_action;
d6b0e80f
AC
963\f
964
965/* Prototypes for local functions. */
966static int stop_wait_callback (struct lwp_info *lp, void *data);
967static int linux_nat_thread_alive (ptid_t ptid);
6d8fd2b7 968static char *linux_child_pid_to_exec_file (int pid);
710151dd
PA
969static int cancel_breakpoint (struct lwp_info *lp);
970
d6b0e80f
AC
971\f
972/* Convert wait status STATUS to a string. Used for printing debug
973 messages only. */
974
975static char *
976status_to_str (int status)
977{
978 static char buf[64];
979
980 if (WIFSTOPPED (status))
981 snprintf (buf, sizeof (buf), "%s (stopped)",
982 strsignal (WSTOPSIG (status)));
983 else if (WIFSIGNALED (status))
984 snprintf (buf, sizeof (buf), "%s (terminated)",
985 strsignal (WSTOPSIG (status)));
986 else
987 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
988
989 return buf;
990}
991
992/* Initialize the list of LWPs. Note that this module, contrary to
993 what GDB's generic threads layer does for its thread list,
994 re-initializes the LWP lists whenever we mourn or detach (which
995 doesn't involve mourning) the inferior. */
996
997static void
998init_lwp_list (void)
999{
1000 struct lwp_info *lp, *lpnext;
1001
1002 for (lp = lwp_list; lp; lp = lpnext)
1003 {
1004 lpnext = lp->next;
1005 xfree (lp);
1006 }
1007
1008 lwp_list = NULL;
1009 num_lwps = 0;
d6b0e80f
AC
1010}
1011
f973ed9c 1012/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1013 structure describing the new LWP. The LWP should already be stopped
1014 (with an exception for the very first LWP). */
d6b0e80f
AC
1015
1016static struct lwp_info *
1017add_lwp (ptid_t ptid)
1018{
1019 struct lwp_info *lp;
1020
1021 gdb_assert (is_lwp (ptid));
1022
1023 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1024
1025 memset (lp, 0, sizeof (struct lwp_info));
1026
1027 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1028
1029 lp->ptid = ptid;
1030
1031 lp->next = lwp_list;
1032 lwp_list = lp;
f973ed9c 1033 ++num_lwps;
d6b0e80f 1034
9f0bdab8
DJ
1035 if (num_lwps > 1 && linux_nat_new_thread != NULL)
1036 linux_nat_new_thread (ptid);
1037
d6b0e80f
AC
1038 return lp;
1039}
1040
1041/* Remove the LWP specified by PID from the list. */
1042
1043static void
1044delete_lwp (ptid_t ptid)
1045{
1046 struct lwp_info *lp, *lpprev;
1047
1048 lpprev = NULL;
1049
1050 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1051 if (ptid_equal (lp->ptid, ptid))
1052 break;
1053
1054 if (!lp)
1055 return;
1056
d6b0e80f
AC
1057 num_lwps--;
1058
1059 if (lpprev)
1060 lpprev->next = lp->next;
1061 else
1062 lwp_list = lp->next;
1063
1064 xfree (lp);
1065}
1066
1067/* Return a pointer to the structure describing the LWP corresponding
1068 to PID. If no corresponding LWP could be found, return NULL. */
1069
1070static struct lwp_info *
1071find_lwp_pid (ptid_t ptid)
1072{
1073 struct lwp_info *lp;
1074 int lwp;
1075
1076 if (is_lwp (ptid))
1077 lwp = GET_LWP (ptid);
1078 else
1079 lwp = GET_PID (ptid);
1080
1081 for (lp = lwp_list; lp; lp = lp->next)
1082 if (lwp == GET_LWP (lp->ptid))
1083 return lp;
1084
1085 return NULL;
1086}
1087
1088/* Call CALLBACK with its second argument set to DATA for every LWP in
1089 the list. If CALLBACK returns 1 for a particular LWP, return a
1090 pointer to the structure describing that LWP immediately.
1091 Otherwise return NULL. */
1092
1093struct lwp_info *
1094iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
1095{
1096 struct lwp_info *lp, *lpnext;
1097
1098 for (lp = lwp_list; lp; lp = lpnext)
1099 {
1100 lpnext = lp->next;
1101 if ((*callback) (lp, data))
1102 return lp;
1103 }
1104
1105 return NULL;
1106}
1107
f973ed9c
DJ
1108/* Update our internal state when changing from one fork (checkpoint,
1109 et cetera) to another indicated by NEW_PTID. We can only switch
1110 single-threaded applications, so we only create one new LWP, and
1111 the previous list is discarded. */
1112
1113void
1114linux_nat_switch_fork (ptid_t new_ptid)
1115{
1116 struct lwp_info *lp;
1117
1118 init_lwp_list ();
1119 lp = add_lwp (new_ptid);
1120 lp->stopped = 1;
e26af52f 1121
4f8d22e3
PA
1122 init_thread_list ();
1123 add_thread_silent (new_ptid);
e26af52f
DJ
1124}
1125
e26af52f
DJ
1126/* Handle the exit of a single thread LP. */
1127
1128static void
1129exit_lwp (struct lwp_info *lp)
1130{
063bfe2e
VP
1131 struct thread_info *th = find_thread_pid (lp->ptid);
1132
1133 if (th)
e26af52f 1134 {
17faa917
DJ
1135 if (print_thread_events)
1136 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1137
4f8d22e3 1138 delete_thread (lp->ptid);
e26af52f
DJ
1139 }
1140
1141 delete_lwp (lp->ptid);
1142}
1143
a0ef4274
DJ
1144/* Detect `T (stopped)' in `/proc/PID/status'.
1145 Other states including `T (tracing stop)' are reported as false. */
1146
1147static int
1148pid_is_stopped (pid_t pid)
1149{
1150 FILE *status_file;
1151 char buf[100];
1152 int retval = 0;
1153
1154 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1155 status_file = fopen (buf, "r");
1156 if (status_file != NULL)
1157 {
1158 int have_state = 0;
1159
1160 while (fgets (buf, sizeof (buf), status_file))
1161 {
1162 if (strncmp (buf, "State:", 6) == 0)
1163 {
1164 have_state = 1;
1165 break;
1166 }
1167 }
1168 if (have_state && strstr (buf, "T (stopped)") != NULL)
1169 retval = 1;
1170 fclose (status_file);
1171 }
1172 return retval;
1173}
1174
1175/* Wait for the LWP specified by LP, which we have just attached to.
1176 Returns a wait status for that LWP, to cache. */
1177
1178static int
1179linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1180 int *signalled)
1181{
1182 pid_t new_pid, pid = GET_LWP (ptid);
1183 int status;
1184
1185 if (pid_is_stopped (pid))
1186 {
1187 if (debug_linux_nat)
1188 fprintf_unfiltered (gdb_stdlog,
1189 "LNPAW: Attaching to a stopped process\n");
1190
1191 /* The process is definitely stopped. It is in a job control
1192 stop, unless the kernel predates the TASK_STOPPED /
1193 TASK_TRACED distinction, in which case it might be in a
1194 ptrace stop. Make sure it is in a ptrace stop; from there we
1195 can kill it, signal it, et cetera.
1196
1197 First make sure there is a pending SIGSTOP. Since we are
1198 already attached, the process can not transition from stopped
1199 to running without a PTRACE_CONT; so we know this signal will
1200 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1201 probably already in the queue (unless this kernel is old
1202 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1203 is not an RT signal, it can only be queued once. */
1204 kill_lwp (pid, SIGSTOP);
1205
1206 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1207 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1208 ptrace (PTRACE_CONT, pid, 0, 0);
1209 }
1210
1211 /* Make sure the initial process is stopped. The user-level threads
1212 layer might want to poke around in the inferior, and that won't
1213 work if things haven't stabilized yet. */
1214 new_pid = my_waitpid (pid, &status, 0);
1215 if (new_pid == -1 && errno == ECHILD)
1216 {
1217 if (first)
1218 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1219
1220 /* Try again with __WCLONE to check cloned processes. */
1221 new_pid = my_waitpid (pid, &status, __WCLONE);
1222 *cloned = 1;
1223 }
1224
1225 gdb_assert (pid == new_pid && WIFSTOPPED (status));
1226
1227 if (WSTOPSIG (status) != SIGSTOP)
1228 {
1229 *signalled = 1;
1230 if (debug_linux_nat)
1231 fprintf_unfiltered (gdb_stdlog,
1232 "LNPAW: Received %s after attaching\n",
1233 status_to_str (status));
1234 }
1235
1236 return status;
1237}
1238
1239/* Attach to the LWP specified by PID. Return 0 if successful or -1
1240 if the new LWP could not be attached. */
d6b0e80f 1241
9ee57c33 1242int
93815fbf 1243lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1244{
9ee57c33 1245 struct lwp_info *lp;
84e46146 1246 enum sigchld_state async_events_original_state;
d6b0e80f
AC
1247
1248 gdb_assert (is_lwp (ptid));
1249
84e46146 1250 async_events_original_state = linux_nat_async_events (sigchld_sync);
d6b0e80f 1251
9ee57c33 1252 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1253
1254 /* We assume that we're already attached to any LWP that has an id
1255 equal to the overall process id, and to any LWP that is already
1256 in our list of LWPs. If we're not seeing exit events from threads
1257 and we've had PID wraparound since we last tried to stop all threads,
1258 this assumption might be wrong; fortunately, this is very unlikely
1259 to happen. */
9ee57c33 1260 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1261 {
a0ef4274 1262 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1263
1264 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1265 {
1266 /* If we fail to attach to the thread, issue a warning,
1267 but continue. One way this can happen is if thread
e9efe249 1268 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1269 bug may place threads in the thread list and then fail
1270 to create them. */
1271 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1272 safe_strerror (errno));
1273 return -1;
1274 }
1275
d6b0e80f
AC
1276 if (debug_linux_nat)
1277 fprintf_unfiltered (gdb_stdlog,
1278 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1279 target_pid_to_str (ptid));
1280
a0ef4274
DJ
1281 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1282 lp = add_lwp (ptid);
1283 lp->stopped = 1;
1284 lp->cloned = cloned;
1285 lp->signalled = signalled;
1286 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1287 {
a0ef4274
DJ
1288 lp->resumed = 1;
1289 lp->status = status;
d6b0e80f
AC
1290 }
1291
a0ef4274 1292 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1293
1294 if (debug_linux_nat)
1295 {
1296 fprintf_unfiltered (gdb_stdlog,
1297 "LLAL: waitpid %s received %s\n",
1298 target_pid_to_str (ptid),
1299 status_to_str (status));
1300 }
1301 }
1302 else
1303 {
1304 /* We assume that the LWP representing the original process is
1305 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1306 that the GNU/linux ptrace layer uses to keep track of
1307 threads. Note that this won't have already been done since
1308 the main thread will have, we assume, been stopped by an
1309 attach from a different layer. */
9ee57c33
DJ
1310 if (lp == NULL)
1311 lp = add_lwp (ptid);
d6b0e80f
AC
1312 lp->stopped = 1;
1313 }
9ee57c33 1314
84e46146 1315 linux_nat_async_events (async_events_original_state);
9ee57c33 1316 return 0;
d6b0e80f
AC
1317}
1318
b84876c2
PA
1319static void
1320linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
1321 int from_tty)
1322{
1323 int saved_async = 0;
10568435
JK
1324#ifdef HAVE_PERSONALITY
1325 int personality_orig = 0, personality_set = 0;
1326#endif /* HAVE_PERSONALITY */
b84876c2
PA
1327
1328 /* The fork_child mechanism is synchronous and calls target_wait, so
1329 we have to mask the async mode. */
1330
1331 if (target_can_async_p ())
84e46146
PA
1332 /* Mask async mode. Creating a child requires a loop calling
1333 wait_for_inferior currently. */
b84876c2
PA
1334 saved_async = linux_nat_async_mask (0);
1335 else
1336 {
1337 /* Restore the original signal mask. */
1338 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1339 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1340 suspend_mask = normal_mask;
1341 sigdelset (&suspend_mask, SIGCHLD);
1342 }
1343
84e46146
PA
1344 /* Set SIGCHLD to the default action, until after execing the child,
1345 since the inferior inherits the superior's signal mask. It will
1346 be blocked again in linux_nat_wait, which is only reached after
1347 the inferior execing. */
1348 linux_nat_async_events (sigchld_default);
1349
10568435
JK
1350#ifdef HAVE_PERSONALITY
1351 if (disable_randomization)
1352 {
1353 errno = 0;
1354 personality_orig = personality (0xffffffff);
1355 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1356 {
1357 personality_set = 1;
1358 personality (personality_orig | ADDR_NO_RANDOMIZE);
1359 }
1360 if (errno != 0 || (personality_set
1361 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1362 warning (_("Error disabling address space randomization: %s"),
1363 safe_strerror (errno));
1364 }
1365#endif /* HAVE_PERSONALITY */
1366
b84876c2
PA
1367 linux_ops->to_create_inferior (exec_file, allargs, env, from_tty);
1368
10568435
JK
1369#ifdef HAVE_PERSONALITY
1370 if (personality_set)
1371 {
1372 errno = 0;
1373 personality (personality_orig);
1374 if (errno != 0)
1375 warning (_("Error restoring address space randomization: %s"),
1376 safe_strerror (errno));
1377 }
1378#endif /* HAVE_PERSONALITY */
1379
b84876c2
PA
1380 if (saved_async)
1381 linux_nat_async_mask (saved_async);
1382}
1383
d6b0e80f
AC
1384static void
1385linux_nat_attach (char *args, int from_tty)
1386{
1387 struct lwp_info *lp;
d6b0e80f 1388 int status;
af990527 1389 ptid_t ptid;
d6b0e80f
AC
1390
1391 /* FIXME: We should probably accept a list of process id's, and
1392 attach all of them. */
10d6c8cd 1393 linux_ops->to_attach (args, from_tty);
d6b0e80f 1394
b84876c2
PA
1395 if (!target_can_async_p ())
1396 {
1397 /* Restore the original signal mask. */
1398 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1399 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1400 suspend_mask = normal_mask;
1401 sigdelset (&suspend_mask, SIGCHLD);
1402 }
1403
af990527
PA
1404 /* The ptrace base target adds the main thread with (pid,0,0)
1405 format. Decorate it with lwp info. */
1406 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1407 thread_change_ptid (inferior_ptid, ptid);
1408
9f0bdab8 1409 /* Add the initial process as the first LWP to the list. */
af990527 1410 lp = add_lwp (ptid);
a0ef4274
DJ
1411
1412 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1413 &lp->signalled);
1414 lp->stopped = 1;
9f0bdab8 1415
a0ef4274 1416 /* Save the wait status to report later. */
d6b0e80f 1417 lp->resumed = 1;
a0ef4274
DJ
1418 if (debug_linux_nat)
1419 fprintf_unfiltered (gdb_stdlog,
1420 "LNA: waitpid %ld, saving status %s\n",
1421 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd
PA
1422
1423 if (!target_can_async_p ())
a0ef4274 1424 lp->status = status;
710151dd
PA
1425 else
1426 {
1427 /* We already waited for this LWP, so put the wait result on the
1428 pipe. The event loop will wake up and gets us to handling
1429 this event. */
a0ef4274
DJ
1430 linux_nat_event_pipe_push (GET_PID (lp->ptid), status,
1431 lp->cloned ? __WCLONE : 0);
b84876c2
PA
1432 /* Register in the event loop. */
1433 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1434 }
1435}
1436
a0ef4274
DJ
1437/* Get pending status of LP. */
1438static int
1439get_pending_status (struct lwp_info *lp, int *status)
1440{
1441 struct target_waitstatus last;
1442 ptid_t last_ptid;
1443
1444 get_last_target_status (&last_ptid, &last);
1445
1446 /* If this lwp is the ptid that GDB is processing an event from, the
1447 signal will be in stop_signal. Otherwise, in all-stop + sync
1448 mode, we may cache pending events in lp->status while trying to
1449 stop all threads (see stop_wait_callback). In async mode, the
1450 events are always cached in waitpid_queue. */
1451
1452 *status = 0;
4c28f408
PA
1453
1454 if (non_stop)
a0ef4274 1455 {
4c28f408
PA
1456 enum target_signal signo = TARGET_SIGNAL_0;
1457
1458 if (is_executing (lp->ptid))
1459 {
1460 /* If the core thought this lwp was executing --- e.g., the
1461 executing property hasn't been updated yet, but the
1462 thread has been stopped with a stop_callback /
1463 stop_wait_callback sequence (see linux_nat_detach for
1464 example) --- we can only have pending events in the local
1465 queue. */
1466 if (queued_waitpid (GET_LWP (lp->ptid), status, __WALL) != -1)
1467 {
8b8655b3
TJB
1468 if (WIFSTOPPED (*status))
1469 signo = target_signal_from_host (WSTOPSIG (*status));
4c28f408
PA
1470
1471 /* If not stopped, then the lwp is gone, no use in
1472 resending a signal. */
1473 }
1474 }
1475 else
1476 {
1477 /* If the core knows the thread is not executing, then we
1478 have the last signal recorded in
2020b7ab 1479 thread_info->stop_signal. */
4c28f408 1480
2020b7ab
PA
1481 struct thread_info *tp = find_thread_pid (lp->ptid);
1482 signo = tp->stop_signal;
4c28f408
PA
1483 }
1484
1485 if (signo != TARGET_SIGNAL_0
1486 && !signal_pass_state (signo))
1487 {
1488 if (debug_linux_nat)
1489 fprintf_unfiltered (gdb_stdlog, "\
1490GPT: lwp %s had signal %s, but it is in no pass state\n",
1491 target_pid_to_str (lp->ptid),
1492 target_signal_to_string (signo));
1493 }
1494 else
1495 {
1496 if (signo != TARGET_SIGNAL_0)
1497 *status = W_STOPCODE (target_signal_to_host (signo));
1498
1499 if (debug_linux_nat)
1500 fprintf_unfiltered (gdb_stdlog,
1501 "GPT: lwp %s as pending signal %s\n",
1502 target_pid_to_str (lp->ptid),
1503 target_signal_to_string (signo));
1504 }
a0ef4274 1505 }
a0ef4274 1506 else
4c28f408
PA
1507 {
1508 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1509 {
2020b7ab
PA
1510 struct thread_info *tp = find_thread_pid (lp->ptid);
1511 if (tp->stop_signal != TARGET_SIGNAL_0
1512 && signal_pass_state (tp->stop_signal))
1513 *status = W_STOPCODE (target_signal_to_host (tp->stop_signal));
4c28f408
PA
1514 }
1515 else if (target_can_async_p ())
1516 queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
1517 else
1518 *status = lp->status;
1519 }
a0ef4274
DJ
1520
1521 return 0;
1522}
1523
d6b0e80f
AC
1524static int
1525detach_callback (struct lwp_info *lp, void *data)
1526{
1527 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1528
1529 if (debug_linux_nat && lp->status)
1530 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1531 strsignal (WSTOPSIG (lp->status)),
1532 target_pid_to_str (lp->ptid));
1533
a0ef4274
DJ
1534 /* If there is a pending SIGSTOP, get rid of it. */
1535 if (lp->signalled)
d6b0e80f 1536 {
d6b0e80f
AC
1537 if (debug_linux_nat)
1538 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1539 "DC: Sending SIGCONT to %s\n",
1540 target_pid_to_str (lp->ptid));
d6b0e80f 1541
a0ef4274 1542 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1543 lp->signalled = 0;
d6b0e80f
AC
1544 }
1545
1546 /* We don't actually detach from the LWP that has an id equal to the
1547 overall process id just yet. */
1548 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1549 {
a0ef4274
DJ
1550 int status = 0;
1551
1552 /* Pass on any pending signal for this LWP. */
1553 get_pending_status (lp, &status);
1554
d6b0e80f
AC
1555 errno = 0;
1556 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1557 WSTOPSIG (status)) < 0)
8a3fe4f8 1558 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1559 safe_strerror (errno));
1560
1561 if (debug_linux_nat)
1562 fprintf_unfiltered (gdb_stdlog,
1563 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1564 target_pid_to_str (lp->ptid),
1565 strsignal (WSTOPSIG (lp->status)));
1566
1567 delete_lwp (lp->ptid);
1568 }
1569
1570 return 0;
1571}
1572
1573static void
1574linux_nat_detach (char *args, int from_tty)
1575{
b84876c2 1576 int pid;
a0ef4274
DJ
1577 int status;
1578 enum target_signal sig;
1579
b84876c2
PA
1580 if (target_can_async_p ())
1581 linux_nat_async (NULL, 0);
1582
4c28f408
PA
1583 /* Stop all threads before detaching. ptrace requires that the
1584 thread is stopped to sucessfully detach. */
1585 iterate_over_lwps (stop_callback, NULL);
1586 /* ... and wait until all of them have reported back that
1587 they're no longer running. */
1588 iterate_over_lwps (stop_wait_callback, NULL);
1589
d6b0e80f
AC
1590 iterate_over_lwps (detach_callback, NULL);
1591
1592 /* Only the initial process should be left right now. */
1593 gdb_assert (num_lwps == 1);
1594
a0ef4274
DJ
1595 /* Pass on any pending signal for the last LWP. */
1596 if ((args == NULL || *args == '\0')
1597 && get_pending_status (lwp_list, &status) != -1
1598 && WIFSTOPPED (status))
1599 {
1600 /* Put the signal number in ARGS so that inf_ptrace_detach will
1601 pass it along with PTRACE_DETACH. */
1602 args = alloca (8);
1603 sprintf (args, "%d", (int) WSTOPSIG (status));
1604 fprintf_unfiltered (gdb_stdlog,
1605 "LND: Sending signal %s to %s\n",
1606 args,
1607 target_pid_to_str (lwp_list->ptid));
1608 }
1609
d6b0e80f
AC
1610 /* Destroy LWP info; it's no longer valid. */
1611 init_lwp_list ();
1612
b84876c2
PA
1613 pid = GET_PID (inferior_ptid);
1614 inferior_ptid = pid_to_ptid (pid);
10d6c8cd 1615 linux_ops->to_detach (args, from_tty);
b84876c2
PA
1616
1617 if (target_can_async_p ())
1618 drain_queued_events (pid);
d6b0e80f
AC
1619}
1620
1621/* Resume LP. */
1622
1623static int
1624resume_callback (struct lwp_info *lp, void *data)
1625{
1626 if (lp->stopped && lp->status == 0)
1627 {
10d6c8cd
DJ
1628 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1629 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1630 if (debug_linux_nat)
1631 fprintf_unfiltered (gdb_stdlog,
1632 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1633 target_pid_to_str (lp->ptid));
1634 lp->stopped = 0;
1635 lp->step = 0;
9f0bdab8 1636 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
d6b0e80f 1637 }
57380f4e
DJ
1638 else if (lp->stopped && debug_linux_nat)
1639 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
1640 target_pid_to_str (lp->ptid));
1641 else if (debug_linux_nat)
1642 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n",
1643 target_pid_to_str (lp->ptid));
d6b0e80f
AC
1644
1645 return 0;
1646}
1647
1648static int
1649resume_clear_callback (struct lwp_info *lp, void *data)
1650{
1651 lp->resumed = 0;
1652 return 0;
1653}
1654
1655static int
1656resume_set_callback (struct lwp_info *lp, void *data)
1657{
1658 lp->resumed = 1;
1659 return 0;
1660}
1661
1662static void
1663linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1664{
1665 struct lwp_info *lp;
1666 int resume_all;
1667
76f50ad1
DJ
1668 if (debug_linux_nat)
1669 fprintf_unfiltered (gdb_stdlog,
1670 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1671 step ? "step" : "resume",
1672 target_pid_to_str (ptid),
1673 signo ? strsignal (signo) : "0",
1674 target_pid_to_str (inferior_ptid));
1675
b84876c2
PA
1676 if (target_can_async_p ())
1677 /* Block events while we're here. */
84e46146 1678 linux_nat_async_events (sigchld_sync);
b84876c2 1679
d6b0e80f
AC
1680 /* A specific PTID means `step only this process id'. */
1681 resume_all = (PIDGET (ptid) == -1);
1682
4c28f408
PA
1683 if (non_stop && resume_all)
1684 internal_error (__FILE__, __LINE__,
1685 "can't resume all in non-stop mode");
1686
1687 if (!non_stop)
1688 {
1689 if (resume_all)
1690 iterate_over_lwps (resume_set_callback, NULL);
1691 else
1692 iterate_over_lwps (resume_clear_callback, NULL);
1693 }
d6b0e80f
AC
1694
1695 /* If PID is -1, it's the current inferior that should be
1696 handled specially. */
1697 if (PIDGET (ptid) == -1)
1698 ptid = inferior_ptid;
1699
1700 lp = find_lwp_pid (ptid);
9f0bdab8 1701 gdb_assert (lp != NULL);
d6b0e80f 1702
4c28f408 1703 /* Convert to something the lower layer understands. */
9f0bdab8 1704 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1705
9f0bdab8
DJ
1706 /* Remember if we're stepping. */
1707 lp->step = step;
d6b0e80f 1708
9f0bdab8
DJ
1709 /* Mark this LWP as resumed. */
1710 lp->resumed = 1;
76f50ad1 1711
9f0bdab8
DJ
1712 /* If we have a pending wait status for this thread, there is no
1713 point in resuming the process. But first make sure that
1714 linux_nat_wait won't preemptively handle the event - we
1715 should never take this short-circuit if we are going to
1716 leave LP running, since we have skipped resuming all the
1717 other threads. This bit of code needs to be synchronized
1718 with linux_nat_wait. */
76f50ad1 1719
710151dd
PA
1720 /* In async mode, we never have pending wait status. */
1721 if (target_can_async_p () && lp->status)
1722 internal_error (__FILE__, __LINE__, "Pending status in async mode");
1723
9f0bdab8
DJ
1724 if (lp->status && WIFSTOPPED (lp->status))
1725 {
d6b48e9c
PA
1726 int saved_signo;
1727 struct inferior *inf;
76f50ad1 1728
d6b48e9c
PA
1729 inf = find_inferior_pid (ptid_get_pid (ptid));
1730 gdb_assert (inf);
1731 saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1732
1733 /* Defer to common code if we're gaining control of the
1734 inferior. */
1735 if (inf->stop_soon == NO_STOP_QUIETLY
1736 && signal_stop_state (saved_signo) == 0
9f0bdab8
DJ
1737 && signal_print_state (saved_signo) == 0
1738 && signal_pass_state (saved_signo) == 1)
d6b0e80f 1739 {
9f0bdab8
DJ
1740 if (debug_linux_nat)
1741 fprintf_unfiltered (gdb_stdlog,
1742 "LLR: Not short circuiting for ignored "
1743 "status 0x%x\n", lp->status);
1744
d6b0e80f
AC
1745 /* FIXME: What should we do if we are supposed to continue
1746 this thread with a signal? */
1747 gdb_assert (signo == TARGET_SIGNAL_0);
9f0bdab8
DJ
1748 signo = saved_signo;
1749 lp->status = 0;
1750 }
1751 }
76f50ad1 1752
9f0bdab8
DJ
1753 if (lp->status)
1754 {
1755 /* FIXME: What should we do if we are supposed to continue
1756 this thread with a signal? */
1757 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1758
9f0bdab8
DJ
1759 if (debug_linux_nat)
1760 fprintf_unfiltered (gdb_stdlog,
1761 "LLR: Short circuiting for status 0x%x\n",
1762 lp->status);
d6b0e80f 1763
9f0bdab8 1764 return;
d6b0e80f
AC
1765 }
1766
9f0bdab8
DJ
1767 /* Mark LWP as not stopped to prevent it from being continued by
1768 resume_callback. */
1769 lp->stopped = 0;
1770
d6b0e80f
AC
1771 if (resume_all)
1772 iterate_over_lwps (resume_callback, NULL);
1773
10d6c8cd 1774 linux_ops->to_resume (ptid, step, signo);
9f0bdab8
DJ
1775 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1776
d6b0e80f
AC
1777 if (debug_linux_nat)
1778 fprintf_unfiltered (gdb_stdlog,
1779 "LLR: %s %s, %s (resume event thread)\n",
1780 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1781 target_pid_to_str (ptid),
1782 signo ? strsignal (signo) : "0");
b84876c2
PA
1783
1784 if (target_can_async_p ())
8ea051c5 1785 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1786}
1787
1788/* Issue kill to specified lwp. */
1789
1790static int tkill_failed;
1791
1792static int
1793kill_lwp (int lwpid, int signo)
1794{
1795 errno = 0;
1796
1797/* Use tkill, if possible, in case we are using nptl threads. If tkill
1798 fails, then we are not using nptl threads and we should be using kill. */
1799
1800#ifdef HAVE_TKILL_SYSCALL
1801 if (!tkill_failed)
1802 {
1803 int ret = syscall (__NR_tkill, lwpid, signo);
1804 if (errno != ENOSYS)
1805 return ret;
1806 errno = 0;
1807 tkill_failed = 1;
1808 }
1809#endif
1810
1811 return kill (lwpid, signo);
1812}
1813
3d799a95
DJ
1814/* Handle a GNU/Linux extended wait response. If we see a clone
1815 event, we need to add the new LWP to our list (and not report the
1816 trap to higher layers). This function returns non-zero if the
1817 event should be ignored and we should wait again. If STOPPING is
1818 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1819
1820static int
3d799a95
DJ
1821linux_handle_extended_wait (struct lwp_info *lp, int status,
1822 int stopping)
d6b0e80f 1823{
3d799a95
DJ
1824 int pid = GET_LWP (lp->ptid);
1825 struct target_waitstatus *ourstatus = &lp->waitstatus;
1826 struct lwp_info *new_lp = NULL;
1827 int event = status >> 16;
d6b0e80f 1828
3d799a95
DJ
1829 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1830 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1831 {
3d799a95
DJ
1832 unsigned long new_pid;
1833 int ret;
1834
1835 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1836
3d799a95
DJ
1837 /* If we haven't already seen the new PID stop, wait for it now. */
1838 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1839 {
1840 /* The new child has a pending SIGSTOP. We can't affect it until it
1841 hits the SIGSTOP, but we're already attached. */
1842 ret = my_waitpid (new_pid, &status,
1843 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1844 if (ret == -1)
1845 perror_with_name (_("waiting for new child"));
1846 else if (ret != new_pid)
1847 internal_error (__FILE__, __LINE__,
1848 _("wait returned unexpected PID %d"), ret);
1849 else if (!WIFSTOPPED (status))
1850 internal_error (__FILE__, __LINE__,
1851 _("wait returned unexpected status 0x%x"), status);
1852 }
1853
3a3e9ee3 1854 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95
DJ
1855
1856 if (event == PTRACE_EVENT_FORK)
1857 ourstatus->kind = TARGET_WAITKIND_FORKED;
1858 else if (event == PTRACE_EVENT_VFORK)
1859 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 1860 else
3d799a95 1861 {
4c28f408
PA
1862 struct cleanup *old_chain;
1863
3d799a95
DJ
1864 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1865 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1866 new_lp->cloned = 1;
4c28f408 1867 new_lp->stopped = 1;
d6b0e80f 1868
3d799a95
DJ
1869 if (WSTOPSIG (status) != SIGSTOP)
1870 {
1871 /* This can happen if someone starts sending signals to
1872 the new thread before it gets a chance to run, which
1873 have a lower number than SIGSTOP (e.g. SIGUSR1).
1874 This is an unlikely case, and harder to handle for
1875 fork / vfork than for clone, so we do not try - but
1876 we handle it for clone events here. We'll send
1877 the other signal on to the thread below. */
1878
1879 new_lp->signalled = 1;
1880 }
1881 else
1882 status = 0;
d6b0e80f 1883
4c28f408 1884 if (non_stop)
3d799a95 1885 {
4c28f408
PA
1886 /* Add the new thread to GDB's lists as soon as possible
1887 so that:
1888
1889 1) the frontend doesn't have to wait for a stop to
1890 display them, and,
1891
1892 2) we tag it with the correct running state. */
1893
1894 /* If the thread_db layer is active, let it know about
1895 this new thread, and add it to GDB's list. */
1896 if (!thread_db_attach_lwp (new_lp->ptid))
1897 {
1898 /* We're not using thread_db. Add it to GDB's
1899 list. */
1900 target_post_attach (GET_LWP (new_lp->ptid));
1901 add_thread (new_lp->ptid);
1902 }
1903
1904 if (!stopping)
1905 {
1906 set_running (new_lp->ptid, 1);
1907 set_executing (new_lp->ptid, 1);
1908 }
1909 }
1910
1911 if (!stopping)
1912 {
1913 new_lp->stopped = 0;
3d799a95 1914 new_lp->resumed = 1;
4c28f408 1915 ptrace (PTRACE_CONT, new_pid, 0,
3d799a95
DJ
1916 status ? WSTOPSIG (status) : 0);
1917 }
d6b0e80f 1918
3d799a95
DJ
1919 if (debug_linux_nat)
1920 fprintf_unfiltered (gdb_stdlog,
1921 "LHEW: Got clone event from LWP %ld, resuming\n",
1922 GET_LWP (lp->ptid));
1923 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1924
1925 return 1;
1926 }
1927
1928 return 0;
d6b0e80f
AC
1929 }
1930
3d799a95
DJ
1931 if (event == PTRACE_EVENT_EXEC)
1932 {
1933 ourstatus->kind = TARGET_WAITKIND_EXECD;
1934 ourstatus->value.execd_pathname
6d8fd2b7 1935 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95
DJ
1936
1937 if (linux_parent_pid)
1938 {
1939 detach_breakpoints (linux_parent_pid);
1940 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1941
1942 linux_parent_pid = 0;
1943 }
1944
25b22b0a
PA
1945 /* At this point, all inserted breakpoints are gone. Doing this
1946 as soon as we detect an exec prevents the badness of deleting
1947 a breakpoint writing the current "shadow contents" to lift
1948 the bp. That shadow is NOT valid after an exec.
1949
1950 Note that we have to do this after the detach_breakpoints
1951 call above, otherwise breakpoints wouldn't be lifted from the
1952 parent on a vfork, because detach_breakpoints would think
1953 that breakpoints are not inserted. */
1954 mark_breakpoints_out ();
3d799a95
DJ
1955 return 0;
1956 }
1957
1958 internal_error (__FILE__, __LINE__,
1959 _("unknown ptrace event %d"), event);
d6b0e80f
AC
1960}
1961
1962/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1963 exited. */
1964
1965static int
1966wait_lwp (struct lwp_info *lp)
1967{
1968 pid_t pid;
1969 int status;
1970 int thread_dead = 0;
1971
1972 gdb_assert (!lp->stopped);
1973 gdb_assert (lp->status == 0);
1974
58aecb61 1975 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
1976 if (pid == -1 && errno == ECHILD)
1977 {
58aecb61 1978 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
1979 if (pid == -1 && errno == ECHILD)
1980 {
1981 /* The thread has previously exited. We need to delete it
1982 now because, for some vendor 2.4 kernels with NPTL
1983 support backported, there won't be an exit event unless
1984 it is the main thread. 2.6 kernels will report an exit
1985 event for each thread that exits, as expected. */
1986 thread_dead = 1;
1987 if (debug_linux_nat)
1988 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1989 target_pid_to_str (lp->ptid));
1990 }
1991 }
1992
1993 if (!thread_dead)
1994 {
1995 gdb_assert (pid == GET_LWP (lp->ptid));
1996
1997 if (debug_linux_nat)
1998 {
1999 fprintf_unfiltered (gdb_stdlog,
2000 "WL: waitpid %s received %s\n",
2001 target_pid_to_str (lp->ptid),
2002 status_to_str (status));
2003 }
2004 }
2005
2006 /* Check if the thread has exited. */
2007 if (WIFEXITED (status) || WIFSIGNALED (status))
2008 {
2009 thread_dead = 1;
2010 if (debug_linux_nat)
2011 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2012 target_pid_to_str (lp->ptid));
2013 }
2014
2015 if (thread_dead)
2016 {
e26af52f 2017 exit_lwp (lp);
d6b0e80f
AC
2018 return 0;
2019 }
2020
2021 gdb_assert (WIFSTOPPED (status));
2022
2023 /* Handle GNU/Linux's extended waitstatus for trace events. */
2024 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2025 {
2026 if (debug_linux_nat)
2027 fprintf_unfiltered (gdb_stdlog,
2028 "WL: Handling extended status 0x%06x\n",
2029 status);
3d799a95 2030 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2031 return wait_lwp (lp);
2032 }
2033
2034 return status;
2035}
2036
9f0bdab8
DJ
2037/* Save the most recent siginfo for LP. This is currently only called
2038 for SIGTRAP; some ports use the si_addr field for
2039 target_stopped_data_address. In the future, it may also be used to
2040 restore the siginfo of requeued signals. */
2041
2042static void
2043save_siginfo (struct lwp_info *lp)
2044{
2045 errno = 0;
2046 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2047 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2048
2049 if (errno != 0)
2050 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2051}
2052
d6b0e80f
AC
2053/* Send a SIGSTOP to LP. */
2054
2055static int
2056stop_callback (struct lwp_info *lp, void *data)
2057{
2058 if (!lp->stopped && !lp->signalled)
2059 {
2060 int ret;
2061
2062 if (debug_linux_nat)
2063 {
2064 fprintf_unfiltered (gdb_stdlog,
2065 "SC: kill %s **<SIGSTOP>**\n",
2066 target_pid_to_str (lp->ptid));
2067 }
2068 errno = 0;
2069 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2070 if (debug_linux_nat)
2071 {
2072 fprintf_unfiltered (gdb_stdlog,
2073 "SC: lwp kill %d %s\n",
2074 ret,
2075 errno ? safe_strerror (errno) : "ERRNO-OK");
2076 }
2077
2078 lp->signalled = 1;
2079 gdb_assert (lp->status == 0);
2080 }
2081
2082 return 0;
2083}
2084
57380f4e 2085/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2086
2087static int
57380f4e
DJ
2088linux_nat_has_pending_sigint (int pid)
2089{
2090 sigset_t pending, blocked, ignored;
2091 int i;
2092
2093 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2094
2095 if (sigismember (&pending, SIGINT)
2096 && !sigismember (&ignored, SIGINT))
2097 return 1;
2098
2099 return 0;
2100}
2101
2102/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2103
2104static int
2105set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2106{
57380f4e
DJ
2107 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2108 flag to consume the next one. */
2109 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2110 && WSTOPSIG (lp->status) == SIGINT)
2111 lp->status = 0;
2112 else
2113 lp->ignore_sigint = 1;
2114
2115 return 0;
2116}
2117
2118/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2119 This function is called after we know the LWP has stopped; if the LWP
2120 stopped before the expected SIGINT was delivered, then it will never have
2121 arrived. Also, if the signal was delivered to a shared queue and consumed
2122 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2123
57380f4e
DJ
2124static void
2125maybe_clear_ignore_sigint (struct lwp_info *lp)
2126{
2127 if (!lp->ignore_sigint)
2128 return;
2129
2130 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2131 {
2132 if (debug_linux_nat)
2133 fprintf_unfiltered (gdb_stdlog,
2134 "MCIS: Clearing bogus flag for %s\n",
2135 target_pid_to_str (lp->ptid));
2136 lp->ignore_sigint = 0;
2137 }
2138}
2139
2140/* Wait until LP is stopped. */
2141
2142static int
2143stop_wait_callback (struct lwp_info *lp, void *data)
2144{
d6b0e80f
AC
2145 if (!lp->stopped)
2146 {
2147 int status;
2148
2149 status = wait_lwp (lp);
2150 if (status == 0)
2151 return 0;
2152
57380f4e
DJ
2153 if (lp->ignore_sigint && WIFSTOPPED (status)
2154 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2155 {
57380f4e 2156 lp->ignore_sigint = 0;
d6b0e80f
AC
2157
2158 errno = 0;
2159 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2160 if (debug_linux_nat)
2161 fprintf_unfiltered (gdb_stdlog,
57380f4e 2162 "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
d6b0e80f
AC
2163 target_pid_to_str (lp->ptid),
2164 errno ? safe_strerror (errno) : "OK");
2165
57380f4e 2166 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2167 }
2168
57380f4e
DJ
2169 maybe_clear_ignore_sigint (lp);
2170
d6b0e80f
AC
2171 if (WSTOPSIG (status) != SIGSTOP)
2172 {
2173 if (WSTOPSIG (status) == SIGTRAP)
2174 {
2175 /* If a LWP other than the LWP that we're reporting an
2176 event for has hit a GDB breakpoint (as opposed to
2177 some random trap signal), then just arrange for it to
2178 hit it again later. We don't keep the SIGTRAP status
2179 and don't forward the SIGTRAP signal to the LWP. We
2180 will handle the current event, eventually we will
2181 resume all LWPs, and this one will get its breakpoint
2182 trap again.
2183
2184 If we do not do this, then we run the risk that the
2185 user will delete or disable the breakpoint, but the
2186 thread will have already tripped on it. */
2187
9f0bdab8
DJ
2188 /* Save the trap's siginfo in case we need it later. */
2189 save_siginfo (lp);
2190
d6b0e80f
AC
2191 /* Now resume this LWP and get the SIGSTOP event. */
2192 errno = 0;
2193 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2194 if (debug_linux_nat)
2195 {
2196 fprintf_unfiltered (gdb_stdlog,
2197 "PTRACE_CONT %s, 0, 0 (%s)\n",
2198 target_pid_to_str (lp->ptid),
2199 errno ? safe_strerror (errno) : "OK");
2200
2201 fprintf_unfiltered (gdb_stdlog,
2202 "SWC: Candidate SIGTRAP event in %s\n",
2203 target_pid_to_str (lp->ptid));
2204 }
710151dd
PA
2205 /* Hold this event/waitstatus while we check to see if
2206 there are any more (we still want to get that SIGSTOP). */
57380f4e 2207 stop_wait_callback (lp, NULL);
710151dd
PA
2208
2209 if (target_can_async_p ())
d6b0e80f 2210 {
710151dd
PA
2211 /* Don't leave a pending wait status in async mode.
2212 Retrigger the breakpoint. */
2213 if (!cancel_breakpoint (lp))
d6b0e80f 2214 {
710151dd
PA
2215 /* There was no gdb breakpoint set at pc. Put
2216 the event back in the queue. */
2217 if (debug_linux_nat)
252fbfc8
PA
2218 fprintf_unfiltered (gdb_stdlog, "\
2219SWC: leaving SIGTRAP in local queue of %s\n", target_pid_to_str (lp->ptid));
2220 push_waitpid (GET_LWP (lp->ptid),
2221 W_STOPCODE (SIGTRAP),
2222 lp->cloned ? __WCLONE : 0);
710151dd
PA
2223 }
2224 }
2225 else
2226 {
2227 /* Hold the SIGTRAP for handling by
2228 linux_nat_wait. */
2229 /* If there's another event, throw it back into the
2230 queue. */
2231 if (lp->status)
2232 {
2233 if (debug_linux_nat)
2234 fprintf_unfiltered (gdb_stdlog,
2235 "SWC: kill %s, %s\n",
2236 target_pid_to_str (lp->ptid),
2237 status_to_str ((int) status));
2238 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2239 }
710151dd
PA
2240 /* Save the sigtrap event. */
2241 lp->status = status;
d6b0e80f 2242 }
d6b0e80f
AC
2243 return 0;
2244 }
2245 else
2246 {
2247 /* The thread was stopped with a signal other than
2248 SIGSTOP, and didn't accidentally trip a breakpoint. */
2249
2250 if (debug_linux_nat)
2251 {
2252 fprintf_unfiltered (gdb_stdlog,
2253 "SWC: Pending event %s in %s\n",
2254 status_to_str ((int) status),
2255 target_pid_to_str (lp->ptid));
2256 }
2257 /* Now resume this LWP and get the SIGSTOP event. */
2258 errno = 0;
2259 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2260 if (debug_linux_nat)
2261 fprintf_unfiltered (gdb_stdlog,
2262 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2263 target_pid_to_str (lp->ptid),
2264 errno ? safe_strerror (errno) : "OK");
2265
2266 /* Hold this event/waitstatus while we check to see if
2267 there are any more (we still want to get that SIGSTOP). */
57380f4e 2268 stop_wait_callback (lp, NULL);
710151dd
PA
2269
2270 /* If the lp->status field is still empty, use it to
2271 hold this event. If not, then this event must be
2272 returned to the event queue of the LWP. */
2273 if (lp->status || target_can_async_p ())
d6b0e80f
AC
2274 {
2275 if (debug_linux_nat)
2276 {
2277 fprintf_unfiltered (gdb_stdlog,
2278 "SWC: kill %s, %s\n",
2279 target_pid_to_str (lp->ptid),
2280 status_to_str ((int) status));
2281 }
2282 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2283 }
710151dd
PA
2284 else
2285 lp->status = status;
d6b0e80f
AC
2286 return 0;
2287 }
2288 }
2289 else
2290 {
2291 /* We caught the SIGSTOP that we intended to catch, so
2292 there's no SIGSTOP pending. */
2293 lp->stopped = 1;
2294 lp->signalled = 0;
2295 }
2296 }
2297
2298 return 0;
2299}
2300
d6b0e80f
AC
2301/* Return non-zero if LP has a wait status pending. */
2302
2303static int
2304status_callback (struct lwp_info *lp, void *data)
2305{
2306 /* Only report a pending wait status if we pretend that this has
2307 indeed been resumed. */
2308 return (lp->status != 0 && lp->resumed);
2309}
2310
2311/* Return non-zero if LP isn't stopped. */
2312
2313static int
2314running_callback (struct lwp_info *lp, void *data)
2315{
2316 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2317}
2318
2319/* Count the LWP's that have had events. */
2320
2321static int
2322count_events_callback (struct lwp_info *lp, void *data)
2323{
2324 int *count = data;
2325
2326 gdb_assert (count != NULL);
2327
e09490f1
DJ
2328 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2329 if (lp->status != 0 && lp->resumed
d6b0e80f
AC
2330 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2331 (*count)++;
2332
2333 return 0;
2334}
2335
2336/* Select the LWP (if any) that is currently being single-stepped. */
2337
2338static int
2339select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2340{
2341 if (lp->step && lp->status != 0)
2342 return 1;
2343 else
2344 return 0;
2345}
2346
2347/* Select the Nth LWP that has had a SIGTRAP event. */
2348
2349static int
2350select_event_lwp_callback (struct lwp_info *lp, void *data)
2351{
2352 int *selector = data;
2353
2354 gdb_assert (selector != NULL);
2355
e09490f1
DJ
2356 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2357 if (lp->status != 0 && lp->resumed
d6b0e80f
AC
2358 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2359 if ((*selector)-- == 0)
2360 return 1;
2361
2362 return 0;
2363}
2364
710151dd
PA
2365static int
2366cancel_breakpoint (struct lwp_info *lp)
2367{
2368 /* Arrange for a breakpoint to be hit again later. We don't keep
2369 the SIGTRAP status and don't forward the SIGTRAP signal to the
2370 LWP. We will handle the current event, eventually we will resume
2371 this LWP, and this breakpoint will trap again.
2372
2373 If we do not do this, then we run the risk that the user will
2374 delete or disable the breakpoint, but the LWP will have already
2375 tripped on it. */
2376
515630c5
UW
2377 struct regcache *regcache = get_thread_regcache (lp->ptid);
2378 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2379 CORE_ADDR pc;
2380
2381 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
2382 if (breakpoint_inserted_here_p (pc))
710151dd
PA
2383 {
2384 if (debug_linux_nat)
2385 fprintf_unfiltered (gdb_stdlog,
2386 "CB: Push back breakpoint for %s\n",
2387 target_pid_to_str (lp->ptid));
2388
2389 /* Back up the PC if necessary. */
515630c5
UW
2390 if (gdbarch_decr_pc_after_break (gdbarch))
2391 regcache_write_pc (regcache, pc);
2392
710151dd
PA
2393 return 1;
2394 }
2395 return 0;
2396}
2397
d6b0e80f
AC
2398static int
2399cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2400{
2401 struct lwp_info *event_lp = data;
2402
2403 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2404 if (lp == event_lp)
2405 return 0;
2406
2407 /* If a LWP other than the LWP that we're reporting an event for has
2408 hit a GDB breakpoint (as opposed to some random trap signal),
2409 then just arrange for it to hit it again later. We don't keep
2410 the SIGTRAP status and don't forward the SIGTRAP signal to the
2411 LWP. We will handle the current event, eventually we will resume
2412 all LWPs, and this one will get its breakpoint trap again.
2413
2414 If we do not do this, then we run the risk that the user will
2415 delete or disable the breakpoint, but the LWP will have already
2416 tripped on it. */
2417
2418 if (lp->status != 0
2419 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
710151dd
PA
2420 && cancel_breakpoint (lp))
2421 /* Throw away the SIGTRAP. */
2422 lp->status = 0;
d6b0e80f
AC
2423
2424 return 0;
2425}
2426
2427/* Select one LWP out of those that have events pending. */
2428
2429static void
2430select_event_lwp (struct lwp_info **orig_lp, int *status)
2431{
2432 int num_events = 0;
2433 int random_selector;
2434 struct lwp_info *event_lp;
2435
ac264b3b 2436 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2437 (*orig_lp)->status = *status;
2438
2439 /* Give preference to any LWP that is being single-stepped. */
2440 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2441 if (event_lp != NULL)
2442 {
2443 if (debug_linux_nat)
2444 fprintf_unfiltered (gdb_stdlog,
2445 "SEL: Select single-step %s\n",
2446 target_pid_to_str (event_lp->ptid));
2447 }
2448 else
2449 {
2450 /* No single-stepping LWP. Select one at random, out of those
2451 which have had SIGTRAP events. */
2452
2453 /* First see how many SIGTRAP events we have. */
2454 iterate_over_lwps (count_events_callback, &num_events);
2455
2456 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2457 random_selector = (int)
2458 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2459
2460 if (debug_linux_nat && num_events > 1)
2461 fprintf_unfiltered (gdb_stdlog,
2462 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2463 num_events, random_selector);
2464
2465 event_lp = iterate_over_lwps (select_event_lwp_callback,
2466 &random_selector);
2467 }
2468
2469 if (event_lp != NULL)
2470 {
2471 /* Switch the event LWP. */
2472 *orig_lp = event_lp;
2473 *status = event_lp->status;
2474 }
2475
2476 /* Flush the wait status for the event LWP. */
2477 (*orig_lp)->status = 0;
2478}
2479
2480/* Return non-zero if LP has been resumed. */
2481
2482static int
2483resumed_callback (struct lwp_info *lp, void *data)
2484{
2485 return lp->resumed;
2486}
2487
d6b0e80f
AC
2488/* Stop an active thread, verify it still exists, then resume it. */
2489
2490static int
2491stop_and_resume_callback (struct lwp_info *lp, void *data)
2492{
2493 struct lwp_info *ptr;
2494
2495 if (!lp->stopped && !lp->signalled)
2496 {
2497 stop_callback (lp, NULL);
2498 stop_wait_callback (lp, NULL);
2499 /* Resume if the lwp still exists. */
2500 for (ptr = lwp_list; ptr; ptr = ptr->next)
2501 if (lp == ptr)
2502 {
2503 resume_callback (lp, NULL);
2504 resume_set_callback (lp, NULL);
2505 }
2506 }
2507 return 0;
2508}
2509
02f3fc28 2510/* Check if we should go on and pass this event to common code.
fa2c6a57 2511 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
2512static struct lwp_info *
2513linux_nat_filter_event (int lwpid, int status, int options)
2514{
2515 struct lwp_info *lp;
2516
2517 lp = find_lwp_pid (pid_to_ptid (lwpid));
2518
2519 /* Check for stop events reported by a process we didn't already
2520 know about - anything not already in our LWP list.
2521
2522 If we're expecting to receive stopped processes after
2523 fork, vfork, and clone events, then we'll just add the
2524 new one to our list and go back to waiting for the event
2525 to be reported - the stopped process might be returned
2526 from waitpid before or after the event is. */
2527 if (WIFSTOPPED (status) && !lp)
2528 {
2529 linux_record_stopped_pid (lwpid, status);
2530 return NULL;
2531 }
2532
2533 /* Make sure we don't report an event for the exit of an LWP not in
2534 our list, i.e. not part of the current process. This can happen
2535 if we detach from a program we original forked and then it
2536 exits. */
2537 if (!WIFSTOPPED (status) && !lp)
2538 return NULL;
2539
2540 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2541 CLONE_PTRACE processes which do not use the thread library -
2542 otherwise we wouldn't find the new LWP this way. That doesn't
2543 currently work, and the following code is currently unreachable
2544 due to the two blocks above. If it's fixed some day, this code
2545 should be broken out into a function so that we can also pick up
2546 LWPs from the new interface. */
2547 if (!lp)
2548 {
2549 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2550 if (options & __WCLONE)
2551 lp->cloned = 1;
2552
2553 gdb_assert (WIFSTOPPED (status)
2554 && WSTOPSIG (status) == SIGSTOP);
2555 lp->signalled = 1;
2556
2557 if (!in_thread_list (inferior_ptid))
2558 {
2559 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2560 GET_PID (inferior_ptid));
2561 add_thread (inferior_ptid);
2562 }
2563
2564 add_thread (lp->ptid);
2565 }
2566
2567 /* Save the trap's siginfo in case we need it later. */
2568 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2569 save_siginfo (lp);
2570
2571 /* Handle GNU/Linux's extended waitstatus for trace events. */
2572 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2573 {
2574 if (debug_linux_nat)
2575 fprintf_unfiltered (gdb_stdlog,
2576 "LLW: Handling extended status 0x%06x\n",
2577 status);
2578 if (linux_handle_extended_wait (lp, status, 0))
2579 return NULL;
2580 }
2581
2582 /* Check if the thread has exited. */
2583 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2584 {
2585 /* If this is the main thread, we must stop all threads and
2586 verify if they are still alive. This is because in the nptl
2587 thread model, there is no signal issued for exiting LWPs
2588 other than the main thread. We only get the main thread exit
2589 signal once all child threads have already exited. If we
2590 stop all the threads and use the stop_wait_callback to check
2591 if they have exited we can determine whether this signal
2592 should be ignored or whether it means the end of the debugged
2593 application, regardless of which threading model is being
2594 used. */
2595 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2596 {
2597 lp->stopped = 1;
2598 iterate_over_lwps (stop_and_resume_callback, NULL);
2599 }
2600
2601 if (debug_linux_nat)
2602 fprintf_unfiltered (gdb_stdlog,
2603 "LLW: %s exited.\n",
2604 target_pid_to_str (lp->ptid));
2605
2606 exit_lwp (lp);
2607
2608 /* If there is at least one more LWP, then the exit signal was
2609 not the end of the debugged application and should be
2610 ignored. */
2611 if (num_lwps > 0)
4c28f408 2612 return NULL;
02f3fc28
PA
2613 }
2614
2615 /* Check if the current LWP has previously exited. In the nptl
2616 thread model, LWPs other than the main thread do not issue
2617 signals when they exit so we must check whenever the thread has
2618 stopped. A similar check is made in stop_wait_callback(). */
2619 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2620 {
2621 if (debug_linux_nat)
2622 fprintf_unfiltered (gdb_stdlog,
2623 "LLW: %s exited.\n",
2624 target_pid_to_str (lp->ptid));
2625
2626 exit_lwp (lp);
2627
2628 /* Make sure there is at least one thread running. */
2629 gdb_assert (iterate_over_lwps (running_callback, NULL));
2630
2631 /* Discard the event. */
2632 return NULL;
2633 }
2634
2635 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2636 an attempt to stop an LWP. */
2637 if (lp->signalled
2638 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2639 {
2640 if (debug_linux_nat)
2641 fprintf_unfiltered (gdb_stdlog,
2642 "LLW: Delayed SIGSTOP caught for %s.\n",
2643 target_pid_to_str (lp->ptid));
2644
2645 /* This is a delayed SIGSTOP. */
2646 lp->signalled = 0;
2647
2648 registers_changed ();
2649
2650 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2651 lp->step, TARGET_SIGNAL_0);
2652 if (debug_linux_nat)
2653 fprintf_unfiltered (gdb_stdlog,
2654 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2655 lp->step ?
2656 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2657 target_pid_to_str (lp->ptid));
2658
2659 lp->stopped = 0;
2660 gdb_assert (lp->resumed);
2661
2662 /* Discard the event. */
2663 return NULL;
2664 }
2665
57380f4e
DJ
2666 /* Make sure we don't report a SIGINT that we have already displayed
2667 for another thread. */
2668 if (lp->ignore_sigint
2669 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2670 {
2671 if (debug_linux_nat)
2672 fprintf_unfiltered (gdb_stdlog,
2673 "LLW: Delayed SIGINT caught for %s.\n",
2674 target_pid_to_str (lp->ptid));
2675
2676 /* This is a delayed SIGINT. */
2677 lp->ignore_sigint = 0;
2678
2679 registers_changed ();
2680 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2681 lp->step, TARGET_SIGNAL_0);
2682 if (debug_linux_nat)
2683 fprintf_unfiltered (gdb_stdlog,
2684 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2685 lp->step ?
2686 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2687 target_pid_to_str (lp->ptid));
2688
2689 lp->stopped = 0;
2690 gdb_assert (lp->resumed);
2691
2692 /* Discard the event. */
2693 return NULL;
2694 }
2695
02f3fc28
PA
2696 /* An interesting event. */
2697 gdb_assert (lp);
2698 return lp;
2699}
2700
b84876c2
PA
2701/* Get the events stored in the pipe into the local queue, so they are
2702 accessible to queued_waitpid. We need to do this, since it is not
2703 always the case that the event at the head of the pipe is the event
2704 we want. */
2705
2706static void
2707pipe_to_local_event_queue (void)
2708{
2709 if (debug_linux_nat_async)
2710 fprintf_unfiltered (gdb_stdlog,
2711 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2712 linux_nat_num_queued_events);
2713 while (linux_nat_num_queued_events)
2714 {
2715 int lwpid, status, options;
b84876c2 2716 lwpid = linux_nat_event_pipe_pop (&status, &options);
b84876c2
PA
2717 gdb_assert (lwpid > 0);
2718 push_waitpid (lwpid, status, options);
2719 }
2720}
2721
2722/* Get the unprocessed events stored in the local queue back into the
2723 pipe, so the event loop realizes there's something else to
2724 process. */
2725
2726static void
2727local_event_queue_to_pipe (void)
2728{
2729 struct waitpid_result *w = waitpid_queue;
2730 while (w)
2731 {
2732 struct waitpid_result *next = w->next;
2733 linux_nat_event_pipe_push (w->pid,
2734 w->status,
2735 w->options);
2736 xfree (w);
2737 w = next;
2738 }
2739 waitpid_queue = NULL;
2740
2741 if (debug_linux_nat_async)
2742 fprintf_unfiltered (gdb_stdlog,
2743 "LEQTP: linux_nat_num_queued_events(%d)\n",
2744 linux_nat_num_queued_events);
2745}
2746
d6b0e80f
AC
2747static ptid_t
2748linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
2749{
2750 struct lwp_info *lp = NULL;
2751 int options = 0;
2752 int status = 0;
2753 pid_t pid = PIDGET (ptid);
d6b0e80f 2754
b84876c2
PA
2755 if (debug_linux_nat_async)
2756 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2757
f973ed9c
DJ
2758 /* The first time we get here after starting a new inferior, we may
2759 not have added it to the LWP list yet - this is the earliest
2760 moment at which we know its PID. */
2761 if (num_lwps == 0)
2762 {
2763 gdb_assert (!is_lwp (inferior_ptid));
2764
27c9d204
PA
2765 /* Upgrade the main thread's ptid. */
2766 thread_change_ptid (inferior_ptid,
2767 BUILD_LWP (GET_PID (inferior_ptid),
2768 GET_PID (inferior_ptid)));
2769
f973ed9c
DJ
2770 lp = add_lwp (inferior_ptid);
2771 lp->resumed = 1;
2772 }
2773
84e46146
PA
2774 /* Block events while we're here. */
2775 linux_nat_async_events (sigchld_sync);
d6b0e80f
AC
2776
2777retry:
2778
f973ed9c
DJ
2779 /* Make sure there is at least one LWP that has been resumed. */
2780 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
d6b0e80f
AC
2781
2782 /* First check if there is a LWP with a wait status pending. */
2783 if (pid == -1)
2784 {
2785 /* Any LWP that's been resumed will do. */
2786 lp = iterate_over_lwps (status_callback, NULL);
2787 if (lp)
2788 {
710151dd
PA
2789 if (target_can_async_p ())
2790 internal_error (__FILE__, __LINE__,
2791 "Found an LWP with a pending status in async mode.");
2792
d6b0e80f
AC
2793 status = lp->status;
2794 lp->status = 0;
2795
2796 if (debug_linux_nat && status)
2797 fprintf_unfiltered (gdb_stdlog,
2798 "LLW: Using pending wait status %s for %s.\n",
2799 status_to_str (status),
2800 target_pid_to_str (lp->ptid));
2801 }
2802
b84876c2 2803 /* But if we don't find one, we'll have to wait, and check both
d6b0e80f
AC
2804 cloned and uncloned processes. We start with the cloned
2805 processes. */
2806 options = __WCLONE | WNOHANG;
2807 }
2808 else if (is_lwp (ptid))
2809 {
2810 if (debug_linux_nat)
2811 fprintf_unfiltered (gdb_stdlog,
2812 "LLW: Waiting for specific LWP %s.\n",
2813 target_pid_to_str (ptid));
2814
2815 /* We have a specific LWP to check. */
2816 lp = find_lwp_pid (ptid);
2817 gdb_assert (lp);
2818 status = lp->status;
2819 lp->status = 0;
2820
2821 if (debug_linux_nat && status)
2822 fprintf_unfiltered (gdb_stdlog,
2823 "LLW: Using pending wait status %s for %s.\n",
2824 status_to_str (status),
2825 target_pid_to_str (lp->ptid));
2826
2827 /* If we have to wait, take into account whether PID is a cloned
2828 process or not. And we have to convert it to something that
2829 the layer beneath us can understand. */
2830 options = lp->cloned ? __WCLONE : 0;
2831 pid = GET_LWP (ptid);
2832 }
2833
2834 if (status && lp->signalled)
2835 {
2836 /* A pending SIGSTOP may interfere with the normal stream of
2837 events. In a typical case where interference is a problem,
2838 we have a SIGSTOP signal pending for LWP A while
2839 single-stepping it, encounter an event in LWP B, and take the
2840 pending SIGSTOP while trying to stop LWP A. After processing
2841 the event in LWP B, LWP A is continued, and we'll never see
2842 the SIGTRAP associated with the last time we were
2843 single-stepping LWP A. */
2844
2845 /* Resume the thread. It should halt immediately returning the
2846 pending SIGSTOP. */
2847 registers_changed ();
10d6c8cd
DJ
2848 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2849 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
2850 if (debug_linux_nat)
2851 fprintf_unfiltered (gdb_stdlog,
2852 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2853 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2854 target_pid_to_str (lp->ptid));
2855 lp->stopped = 0;
2856 gdb_assert (lp->resumed);
2857
2858 /* This should catch the pending SIGSTOP. */
2859 stop_wait_callback (lp, NULL);
2860 }
2861
b84876c2
PA
2862 if (!target_can_async_p ())
2863 {
2864 /* Causes SIGINT to be passed on to the attached process. */
2865 set_sigint_trap ();
2866 set_sigio_trap ();
2867 }
d6b0e80f
AC
2868
2869 while (status == 0)
2870 {
2871 pid_t lwpid;
2872
b84876c2
PA
2873 if (target_can_async_p ())
2874 /* In async mode, don't ever block. Only look at the locally
2875 queued events. */
2876 lwpid = queued_waitpid (pid, &status, options);
2877 else
2878 lwpid = my_waitpid (pid, &status, options);
2879
d6b0e80f
AC
2880 if (lwpid > 0)
2881 {
2882 gdb_assert (pid == -1 || lwpid == pid);
2883
2884 if (debug_linux_nat)
2885 {
2886 fprintf_unfiltered (gdb_stdlog,
2887 "LLW: waitpid %ld received %s\n",
2888 (long) lwpid, status_to_str (status));
2889 }
2890
02f3fc28 2891 lp = linux_nat_filter_event (lwpid, status, options);
d6b0e80f
AC
2892 if (!lp)
2893 {
02f3fc28 2894 /* A discarded event. */
d6b0e80f
AC
2895 status = 0;
2896 continue;
2897 }
2898
2899 break;
2900 }
2901
2902 if (pid == -1)
2903 {
2904 /* Alternate between checking cloned and uncloned processes. */
2905 options ^= __WCLONE;
2906
b84876c2
PA
2907 /* And every time we have checked both:
2908 In async mode, return to event loop;
2909 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 2910 if (options & __WCLONE)
b84876c2
PA
2911 {
2912 if (target_can_async_p ())
2913 {
2914 /* No interesting event. */
2915 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2916
2917 /* Get ready for the next event. */
2918 target_async (inferior_event_handler, 0);
2919
2920 if (debug_linux_nat_async)
2921 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2922
2923 return minus_one_ptid;
2924 }
2925
2926 sigsuspend (&suspend_mask);
2927 }
d6b0e80f
AC
2928 }
2929
2930 /* We shouldn't end up here unless we want to try again. */
2931 gdb_assert (status == 0);
2932 }
2933
b84876c2
PA
2934 if (!target_can_async_p ())
2935 {
2936 clear_sigio_trap ();
2937 clear_sigint_trap ();
2938 }
d6b0e80f
AC
2939
2940 gdb_assert (lp);
2941
2942 /* Don't report signals that GDB isn't interested in, such as
2943 signals that are neither printed nor stopped upon. Stopping all
2944 threads can be a bit time-consuming so if we want decent
2945 performance with heavily multi-threaded programs, especially when
2946 they're using a high frequency timer, we'd better avoid it if we
2947 can. */
2948
2949 if (WIFSTOPPED (status))
2950 {
2951 int signo = target_signal_from_host (WSTOPSIG (status));
d6b48e9c
PA
2952 struct inferior *inf;
2953
2954 inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2955 gdb_assert (inf);
d6b0e80f 2956
d6b48e9c
PA
2957 /* Defer to common code if we get a signal while
2958 single-stepping, since that may need special care, e.g. to
2959 skip the signal handler, or, if we're gaining control of the
2960 inferior. */
d539ed7e 2961 if (!lp->step
d6b48e9c 2962 && inf->stop_soon == NO_STOP_QUIETLY
d539ed7e 2963 && signal_stop_state (signo) == 0
d6b0e80f
AC
2964 && signal_print_state (signo) == 0
2965 && signal_pass_state (signo) == 1)
2966 {
2967 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2968 here? It is not clear we should. GDB may not expect
2969 other threads to run. On the other hand, not resuming
2970 newly attached threads may cause an unwanted delay in
2971 getting them running. */
2972 registers_changed ();
10d6c8cd
DJ
2973 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2974 lp->step, signo);
d6b0e80f
AC
2975 if (debug_linux_nat)
2976 fprintf_unfiltered (gdb_stdlog,
2977 "LLW: %s %s, %s (preempt 'handle')\n",
2978 lp->step ?
2979 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2980 target_pid_to_str (lp->ptid),
2981 signo ? strsignal (signo) : "0");
2982 lp->stopped = 0;
2983 status = 0;
2984 goto retry;
2985 }
2986
1ad15515 2987 if (!non_stop)
d6b0e80f 2988 {
1ad15515
PA
2989 /* Only do the below in all-stop, as we currently use SIGINT
2990 to implement target_stop (see linux_nat_stop) in
2991 non-stop. */
2992 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2993 {
2994 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2995 forwarded to the entire process group, that is, all LWPs
2996 will receive it - unless they're using CLONE_THREAD to
2997 share signals. Since we only want to report it once, we
2998 mark it as ignored for all LWPs except this one. */
2999 iterate_over_lwps (set_ignore_sigint, NULL);
3000 lp->ignore_sigint = 0;
3001 }
3002 else
3003 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3004 }
3005 }
3006
3007 /* This LWP is stopped now. */
3008 lp->stopped = 1;
3009
3010 if (debug_linux_nat)
3011 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3012 status_to_str (status), target_pid_to_str (lp->ptid));
3013
4c28f408
PA
3014 if (!non_stop)
3015 {
3016 /* Now stop all other LWP's ... */
3017 iterate_over_lwps (stop_callback, NULL);
3018
3019 /* ... and wait until all of them have reported back that
3020 they're no longer running. */
57380f4e 3021 iterate_over_lwps (stop_wait_callback, NULL);
4c28f408
PA
3022
3023 /* If we're not waiting for a specific LWP, choose an event LWP
3024 from among those that have had events. Giving equal priority
3025 to all LWPs that have had events helps prevent
3026 starvation. */
3027 if (pid == -1)
3028 select_event_lwp (&lp, &status);
3029 }
d6b0e80f
AC
3030
3031 /* Now that we've selected our final event LWP, cancel any
3032 breakpoints in other LWPs that have hit a GDB breakpoint. See
3033 the comment in cancel_breakpoints_callback to find out why. */
3034 iterate_over_lwps (cancel_breakpoints_callback, lp);
3035
d6b0e80f
AC
3036 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
3037 {
d6b0e80f
AC
3038 if (debug_linux_nat)
3039 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3040 "LLW: trap ptid is %s.\n",
3041 target_pid_to_str (lp->ptid));
d6b0e80f 3042 }
d6b0e80f
AC
3043
3044 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3045 {
3046 *ourstatus = lp->waitstatus;
3047 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3048 }
3049 else
3050 store_waitstatus (ourstatus, status);
3051
b84876c2
PA
3052 /* Get ready for the next event. */
3053 if (target_can_async_p ())
3054 target_async (inferior_event_handler, 0);
3055
3056 if (debug_linux_nat_async)
3057 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3058
f973ed9c 3059 return lp->ptid;
d6b0e80f
AC
3060}
3061
3062static int
3063kill_callback (struct lwp_info *lp, void *data)
3064{
3065 errno = 0;
3066 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3067 if (debug_linux_nat)
3068 fprintf_unfiltered (gdb_stdlog,
3069 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3070 target_pid_to_str (lp->ptid),
3071 errno ? safe_strerror (errno) : "OK");
3072
3073 return 0;
3074}
3075
3076static int
3077kill_wait_callback (struct lwp_info *lp, void *data)
3078{
3079 pid_t pid;
3080
3081 /* We must make sure that there are no pending events (delayed
3082 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3083 program doesn't interfere with any following debugging session. */
3084
3085 /* For cloned processes we must check both with __WCLONE and
3086 without, since the exit status of a cloned process isn't reported
3087 with __WCLONE. */
3088 if (lp->cloned)
3089 {
3090 do
3091 {
58aecb61 3092 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 3093 if (pid != (pid_t) -1)
d6b0e80f 3094 {
e85a822c
DJ
3095 if (debug_linux_nat)
3096 fprintf_unfiltered (gdb_stdlog,
3097 "KWC: wait %s received unknown.\n",
3098 target_pid_to_str (lp->ptid));
3099 /* The Linux kernel sometimes fails to kill a thread
3100 completely after PTRACE_KILL; that goes from the stop
3101 point in do_fork out to the one in
3102 get_signal_to_deliever and waits again. So kill it
3103 again. */
3104 kill_callback (lp, NULL);
d6b0e80f
AC
3105 }
3106 }
3107 while (pid == GET_LWP (lp->ptid));
3108
3109 gdb_assert (pid == -1 && errno == ECHILD);
3110 }
3111
3112 do
3113 {
58aecb61 3114 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 3115 if (pid != (pid_t) -1)
d6b0e80f 3116 {
e85a822c
DJ
3117 if (debug_linux_nat)
3118 fprintf_unfiltered (gdb_stdlog,
3119 "KWC: wait %s received unk.\n",
3120 target_pid_to_str (lp->ptid));
3121 /* See the call to kill_callback above. */
3122 kill_callback (lp, NULL);
d6b0e80f
AC
3123 }
3124 }
3125 while (pid == GET_LWP (lp->ptid));
3126
3127 gdb_assert (pid == -1 && errno == ECHILD);
3128 return 0;
3129}
3130
3131static void
3132linux_nat_kill (void)
3133{
f973ed9c
DJ
3134 struct target_waitstatus last;
3135 ptid_t last_ptid;
3136 int status;
d6b0e80f 3137
b84876c2
PA
3138 if (target_can_async_p ())
3139 target_async (NULL, 0);
3140
f973ed9c
DJ
3141 /* If we're stopped while forking and we haven't followed yet,
3142 kill the other task. We need to do this first because the
3143 parent will be sleeping if this is a vfork. */
d6b0e80f 3144
f973ed9c 3145 get_last_target_status (&last_ptid, &last);
d6b0e80f 3146
f973ed9c
DJ
3147 if (last.kind == TARGET_WAITKIND_FORKED
3148 || last.kind == TARGET_WAITKIND_VFORKED)
3149 {
3a3e9ee3 3150 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
3151 wait (&status);
3152 }
3153
3154 if (forks_exist_p ())
b84876c2
PA
3155 {
3156 linux_fork_killall ();
3157 drain_queued_events (-1);
3158 }
f973ed9c
DJ
3159 else
3160 {
4c28f408
PA
3161 /* Stop all threads before killing them, since ptrace requires
3162 that the thread is stopped to sucessfully PTRACE_KILL. */
3163 iterate_over_lwps (stop_callback, NULL);
3164 /* ... and wait until all of them have reported back that
3165 they're no longer running. */
3166 iterate_over_lwps (stop_wait_callback, NULL);
3167
f973ed9c
DJ
3168 /* Kill all LWP's ... */
3169 iterate_over_lwps (kill_callback, NULL);
3170
3171 /* ... and wait until we've flushed all events. */
3172 iterate_over_lwps (kill_wait_callback, NULL);
3173 }
3174
3175 target_mourn_inferior ();
d6b0e80f
AC
3176}
3177
3178static void
3179linux_nat_mourn_inferior (void)
3180{
d6b0e80f
AC
3181 /* Destroy LWP info; it's no longer valid. */
3182 init_lwp_list ();
3183
f973ed9c 3184 if (! forks_exist_p ())
b84876c2
PA
3185 {
3186 /* Normal case, no other forks available. */
3187 if (target_can_async_p ())
3188 linux_nat_async (NULL, 0);
3189 linux_ops->to_mourn_inferior ();
3190 }
f973ed9c
DJ
3191 else
3192 /* Multi-fork case. The current inferior_ptid has exited, but
3193 there are other viable forks to debug. Delete the exiting
3194 one and context-switch to the first available. */
3195 linux_fork_mourn_inferior ();
d6b0e80f
AC
3196}
3197
10d6c8cd
DJ
3198static LONGEST
3199linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3200 const char *annex, gdb_byte *readbuf,
3201 const gdb_byte *writebuf,
3202 ULONGEST offset, LONGEST len)
d6b0e80f
AC
3203{
3204 struct cleanup *old_chain = save_inferior_ptid ();
10d6c8cd 3205 LONGEST xfer;
d6b0e80f
AC
3206
3207 if (is_lwp (inferior_ptid))
3208 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
3209
10d6c8cd
DJ
3210 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3211 offset, len);
d6b0e80f
AC
3212
3213 do_cleanups (old_chain);
3214 return xfer;
3215}
3216
3217static int
3218linux_nat_thread_alive (ptid_t ptid)
3219{
4c28f408
PA
3220 int err;
3221
d6b0e80f
AC
3222 gdb_assert (is_lwp (ptid));
3223
4c28f408
PA
3224 /* Send signal 0 instead of anything ptrace, because ptracing a
3225 running thread errors out claiming that the thread doesn't
3226 exist. */
3227 err = kill_lwp (GET_LWP (ptid), 0);
3228
d6b0e80f
AC
3229 if (debug_linux_nat)
3230 fprintf_unfiltered (gdb_stdlog,
4c28f408 3231 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 3232 target_pid_to_str (ptid),
4c28f408 3233 err ? safe_strerror (err) : "OK");
9c0dd46b 3234
4c28f408 3235 if (err != 0)
d6b0e80f
AC
3236 return 0;
3237
3238 return 1;
3239}
3240
3241static char *
3242linux_nat_pid_to_str (ptid_t ptid)
3243{
3244 static char buf[64];
3245
a0ef4274
DJ
3246 if (is_lwp (ptid)
3247 && ((lwp_list && lwp_list->next)
3248 || GET_PID (ptid) != GET_LWP (ptid)))
d6b0e80f
AC
3249 {
3250 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
3251 return buf;
3252 }
3253
3254 return normal_pid_to_str (ptid);
3255}
3256
d6b0e80f
AC
3257static void
3258sigchld_handler (int signo)
3259{
c6ebd6cf 3260 if (target_async_permitted
84e46146 3261 && linux_nat_async_events_state != sigchld_sync
b84876c2
PA
3262 && signo == SIGCHLD)
3263 /* It is *always* a bug to hit this. */
3264 internal_error (__FILE__, __LINE__,
3265 "sigchld_handler called when async events are enabled");
3266
d6b0e80f
AC
3267 /* Do nothing. The only reason for this handler is that it allows
3268 us to use sigsuspend in linux_nat_wait above to wait for the
3269 arrival of a SIGCHLD. */
3270}
3271
dba24537
AC
3272/* Accepts an integer PID; Returns a string representing a file that
3273 can be opened to get the symbols for the child process. */
3274
6d8fd2b7
UW
3275static char *
3276linux_child_pid_to_exec_file (int pid)
dba24537
AC
3277{
3278 char *name1, *name2;
3279
3280 name1 = xmalloc (MAXPATHLEN);
3281 name2 = xmalloc (MAXPATHLEN);
3282 make_cleanup (xfree, name1);
3283 make_cleanup (xfree, name2);
3284 memset (name2, 0, MAXPATHLEN);
3285
3286 sprintf (name1, "/proc/%d/exe", pid);
3287 if (readlink (name1, name2, MAXPATHLEN) > 0)
3288 return name2;
3289 else
3290 return name1;
3291}
3292
3293/* Service function for corefiles and info proc. */
3294
3295static int
3296read_mapping (FILE *mapfile,
3297 long long *addr,
3298 long long *endaddr,
3299 char *permissions,
3300 long long *offset,
3301 char *device, long long *inode, char *filename)
3302{
3303 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
3304 addr, endaddr, permissions, offset, device, inode);
3305
2e14c2ea
MS
3306 filename[0] = '\0';
3307 if (ret > 0 && ret != EOF)
dba24537
AC
3308 {
3309 /* Eat everything up to EOL for the filename. This will prevent
3310 weird filenames (such as one with embedded whitespace) from
3311 confusing this code. It also makes this code more robust in
3312 respect to annotations the kernel may add after the filename.
3313
3314 Note the filename is used for informational purposes
3315 only. */
3316 ret += fscanf (mapfile, "%[^\n]\n", filename);
3317 }
2e14c2ea 3318
dba24537
AC
3319 return (ret != 0 && ret != EOF);
3320}
3321
3322/* Fills the "to_find_memory_regions" target vector. Lists the memory
3323 regions in the inferior for a corefile. */
3324
3325static int
3326linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
3327 unsigned long,
3328 int, int, int, void *), void *obfd)
3329{
3330 long long pid = PIDGET (inferior_ptid);
3331 char mapsfilename[MAXPATHLEN];
3332 FILE *mapsfile;
3333 long long addr, endaddr, size, offset, inode;
3334 char permissions[8], device[8], filename[MAXPATHLEN];
3335 int read, write, exec;
3336 int ret;
3337
3338 /* Compose the filename for the /proc memory map, and open it. */
3339 sprintf (mapsfilename, "/proc/%lld/maps", pid);
3340 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 3341 error (_("Could not open %s."), mapsfilename);
dba24537
AC
3342
3343 if (info_verbose)
3344 fprintf_filtered (gdb_stdout,
3345 "Reading memory regions from %s\n", mapsfilename);
3346
3347 /* Now iterate until end-of-file. */
3348 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
3349 &offset, &device[0], &inode, &filename[0]))
3350 {
3351 size = endaddr - addr;
3352
3353 /* Get the segment's permissions. */
3354 read = (strchr (permissions, 'r') != 0);
3355 write = (strchr (permissions, 'w') != 0);
3356 exec = (strchr (permissions, 'x') != 0);
3357
3358 if (info_verbose)
3359 {
3360 fprintf_filtered (gdb_stdout,
3361 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3362 size, paddr_nz (addr),
3363 read ? 'r' : ' ',
3364 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 3365 if (filename[0])
dba24537
AC
3366 fprintf_filtered (gdb_stdout, " for %s", filename);
3367 fprintf_filtered (gdb_stdout, "\n");
3368 }
3369
3370 /* Invoke the callback function to create the corefile
3371 segment. */
3372 func (addr, size, read, write, exec, obfd);
3373 }
3374 fclose (mapsfile);
3375 return 0;
3376}
3377
2020b7ab
PA
3378static int
3379find_signalled_thread (struct thread_info *info, void *data)
3380{
3381 if (info->stop_signal != TARGET_SIGNAL_0
3382 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
3383 return 1;
3384
3385 return 0;
3386}
3387
3388static enum target_signal
3389find_stop_signal (void)
3390{
3391 struct thread_info *info =
3392 iterate_over_threads (find_signalled_thread, NULL);
3393
3394 if (info)
3395 return info->stop_signal;
3396 else
3397 return TARGET_SIGNAL_0;
3398}
3399
dba24537
AC
3400/* Records the thread's register state for the corefile note
3401 section. */
3402
3403static char *
3404linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2020b7ab
PA
3405 char *note_data, int *note_size,
3406 enum target_signal stop_signal)
dba24537
AC
3407{
3408 gdb_gregset_t gregs;
3409 gdb_fpregset_t fpregs;
dba24537 3410 unsigned long lwp = ptid_get_lwp (ptid);
594f7785
UW
3411 struct regcache *regcache = get_thread_regcache (ptid);
3412 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 3413 const struct regset *regset;
55e969c1 3414 int core_regset_p;
594f7785 3415 struct cleanup *old_chain;
17ea7499
CES
3416 struct core_regset_section *sect_list;
3417 char *gdb_regset;
594f7785
UW
3418
3419 old_chain = save_inferior_ptid ();
3420 inferior_ptid = ptid;
3421 target_fetch_registers (regcache, -1);
3422 do_cleanups (old_chain);
4f844a66
DM
3423
3424 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
3425 sect_list = gdbarch_core_regset_sections (gdbarch);
3426
55e969c1
DM
3427 if (core_regset_p
3428 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3429 sizeof (gregs))) != NULL
3430 && regset->collect_regset != NULL)
594f7785 3431 regset->collect_regset (regset, regcache, -1,
55e969c1 3432 &gregs, sizeof (gregs));
4f844a66 3433 else
594f7785 3434 fill_gregset (regcache, &gregs, -1);
4f844a66 3435
55e969c1
DM
3436 note_data = (char *) elfcore_write_prstatus (obfd,
3437 note_data,
3438 note_size,
3439 lwp,
3440 stop_signal, &gregs);
3441
17ea7499
CES
3442 /* The loop below uses the new struct core_regset_section, which stores
3443 the supported section names and sizes for the core file. Note that
3444 note PRSTATUS needs to be treated specially. But the other notes are
3445 structurally the same, so they can benefit from the new struct. */
3446 if (core_regset_p && sect_list != NULL)
3447 while (sect_list->sect_name != NULL)
3448 {
3449 /* .reg was already handled above. */
3450 if (strcmp (sect_list->sect_name, ".reg") == 0)
3451 {
3452 sect_list++;
3453 continue;
3454 }
3455 regset = gdbarch_regset_from_core_section (gdbarch,
3456 sect_list->sect_name,
3457 sect_list->size);
3458 gdb_assert (regset && regset->collect_regset);
3459 gdb_regset = xmalloc (sect_list->size);
3460 regset->collect_regset (regset, regcache, -1,
3461 gdb_regset, sect_list->size);
3462 note_data = (char *) elfcore_write_register_note (obfd,
3463 note_data,
3464 note_size,
3465 sect_list->sect_name,
3466 gdb_regset,
3467 sect_list->size);
3468 xfree (gdb_regset);
3469 sect_list++;
3470 }
dba24537 3471
17ea7499
CES
3472 /* For architectures that does not have the struct core_regset_section
3473 implemented, we use the old method. When all the architectures have
3474 the new support, the code below should be deleted. */
4f844a66 3475 else
17ea7499
CES
3476 {
3477 if (core_regset_p
3478 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3479 sizeof (fpregs))) != NULL
3480 && regset->collect_regset != NULL)
3481 regset->collect_regset (regset, regcache, -1,
3482 &fpregs, sizeof (fpregs));
3483 else
3484 fill_fpregset (regcache, &fpregs, -1);
3485
3486 note_data = (char *) elfcore_write_prfpreg (obfd,
3487 note_data,
3488 note_size,
3489 &fpregs, sizeof (fpregs));
3490 }
4f844a66 3491
dba24537
AC
3492 return note_data;
3493}
3494
3495struct linux_nat_corefile_thread_data
3496{
3497 bfd *obfd;
3498 char *note_data;
3499 int *note_size;
3500 int num_notes;
2020b7ab 3501 enum target_signal stop_signal;
dba24537
AC
3502};
3503
3504/* Called by gdbthread.c once per thread. Records the thread's
3505 register state for the corefile note section. */
3506
3507static int
3508linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3509{
3510 struct linux_nat_corefile_thread_data *args = data;
dba24537 3511
dba24537
AC
3512 args->note_data = linux_nat_do_thread_registers (args->obfd,
3513 ti->ptid,
3514 args->note_data,
2020b7ab
PA
3515 args->note_size,
3516 args->stop_signal);
dba24537 3517 args->num_notes++;
56be3814 3518
dba24537
AC
3519 return 0;
3520}
3521
dba24537
AC
3522/* Fills the "to_make_corefile_note" target vector. Builds the note
3523 section for a corefile, and returns it in a malloc buffer. */
3524
3525static char *
3526linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3527{
3528 struct linux_nat_corefile_thread_data thread_args;
3529 struct cleanup *old_chain;
d99148ef 3530 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 3531 char fname[16] = { '\0' };
d99148ef 3532 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
3533 char psargs[80] = { '\0' };
3534 char *note_data = NULL;
3535 ptid_t current_ptid = inferior_ptid;
c6826062 3536 gdb_byte *auxv;
dba24537
AC
3537 int auxv_len;
3538
3539 if (get_exec_file (0))
3540 {
3541 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3542 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3543 if (get_inferior_args ())
3544 {
d99148ef
JK
3545 char *string_end;
3546 char *psargs_end = psargs + sizeof (psargs);
3547
3548 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3549 strings fine. */
3550 string_end = memchr (psargs, 0, sizeof (psargs));
3551 if (string_end != NULL)
3552 {
3553 *string_end++ = ' ';
3554 strncpy (string_end, get_inferior_args (),
3555 psargs_end - string_end);
3556 }
dba24537
AC
3557 }
3558 note_data = (char *) elfcore_write_prpsinfo (obfd,
3559 note_data,
3560 note_size, fname, psargs);
3561 }
3562
3563 /* Dump information for threads. */
3564 thread_args.obfd = obfd;
3565 thread_args.note_data = note_data;
3566 thread_args.note_size = note_size;
3567 thread_args.num_notes = 0;
2020b7ab 3568 thread_args.stop_signal = find_stop_signal ();
dba24537 3569 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2020b7ab
PA
3570 gdb_assert (thread_args.num_notes != 0);
3571 note_data = thread_args.note_data;
dba24537 3572
13547ab6
DJ
3573 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3574 NULL, &auxv);
dba24537
AC
3575 if (auxv_len > 0)
3576 {
3577 note_data = elfcore_write_note (obfd, note_data, note_size,
3578 "CORE", NT_AUXV, auxv, auxv_len);
3579 xfree (auxv);
3580 }
3581
3582 make_cleanup (xfree, note_data);
3583 return note_data;
3584}
3585
3586/* Implement the "info proc" command. */
3587
3588static void
3589linux_nat_info_proc_cmd (char *args, int from_tty)
3590{
3591 long long pid = PIDGET (inferior_ptid);
3592 FILE *procfile;
3593 char **argv = NULL;
3594 char buffer[MAXPATHLEN];
3595 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3596 int cmdline_f = 1;
3597 int cwd_f = 1;
3598 int exe_f = 1;
3599 int mappings_f = 0;
3600 int environ_f = 0;
3601 int status_f = 0;
3602 int stat_f = 0;
3603 int all = 0;
3604 struct stat dummy;
3605
3606 if (args)
3607 {
3608 /* Break up 'args' into an argv array. */
d1a41061
PP
3609 argv = gdb_buildargv (args);
3610 make_cleanup_freeargv (argv);
dba24537
AC
3611 }
3612 while (argv != NULL && *argv != NULL)
3613 {
3614 if (isdigit (argv[0][0]))
3615 {
3616 pid = strtoul (argv[0], NULL, 10);
3617 }
3618 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3619 {
3620 mappings_f = 1;
3621 }
3622 else if (strcmp (argv[0], "status") == 0)
3623 {
3624 status_f = 1;
3625 }
3626 else if (strcmp (argv[0], "stat") == 0)
3627 {
3628 stat_f = 1;
3629 }
3630 else if (strcmp (argv[0], "cmd") == 0)
3631 {
3632 cmdline_f = 1;
3633 }
3634 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3635 {
3636 exe_f = 1;
3637 }
3638 else if (strcmp (argv[0], "cwd") == 0)
3639 {
3640 cwd_f = 1;
3641 }
3642 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3643 {
3644 all = 1;
3645 }
3646 else
3647 {
3648 /* [...] (future options here) */
3649 }
3650 argv++;
3651 }
3652 if (pid == 0)
8a3fe4f8 3653 error (_("No current process: you must name one."));
dba24537
AC
3654
3655 sprintf (fname1, "/proc/%lld", pid);
3656 if (stat (fname1, &dummy) != 0)
8a3fe4f8 3657 error (_("No /proc directory: '%s'"), fname1);
dba24537 3658
a3f17187 3659 printf_filtered (_("process %lld\n"), pid);
dba24537
AC
3660 if (cmdline_f || all)
3661 {
3662 sprintf (fname1, "/proc/%lld/cmdline", pid);
d5d6fca5 3663 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3664 {
3665 fgets (buffer, sizeof (buffer), procfile);
3666 printf_filtered ("cmdline = '%s'\n", buffer);
3667 fclose (procfile);
3668 }
3669 else
8a3fe4f8 3670 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3671 }
3672 if (cwd_f || all)
3673 {
3674 sprintf (fname1, "/proc/%lld/cwd", pid);
3675 memset (fname2, 0, sizeof (fname2));
3676 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3677 printf_filtered ("cwd = '%s'\n", fname2);
3678 else
8a3fe4f8 3679 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3680 }
3681 if (exe_f || all)
3682 {
3683 sprintf (fname1, "/proc/%lld/exe", pid);
3684 memset (fname2, 0, sizeof (fname2));
3685 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3686 printf_filtered ("exe = '%s'\n", fname2);
3687 else
8a3fe4f8 3688 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3689 }
3690 if (mappings_f || all)
3691 {
3692 sprintf (fname1, "/proc/%lld/maps", pid);
d5d6fca5 3693 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3694 {
3695 long long addr, endaddr, size, offset, inode;
3696 char permissions[8], device[8], filename[MAXPATHLEN];
3697
a3f17187 3698 printf_filtered (_("Mapped address spaces:\n\n"));
17a912b6 3699 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3700 {
3701 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3702 "Start Addr",
3703 " End Addr",
3704 " Size", " Offset", "objfile");
3705 }
3706 else
3707 {
3708 printf_filtered (" %18s %18s %10s %10s %7s\n",
3709 "Start Addr",
3710 " End Addr",
3711 " Size", " Offset", "objfile");
3712 }
3713
3714 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3715 &offset, &device[0], &inode, &filename[0]))
3716 {
3717 size = endaddr - addr;
3718
3719 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3720 calls here (and possibly above) should be abstracted
3721 out into their own functions? Andrew suggests using
3722 a generic local_address_string instead to print out
3723 the addresses; that makes sense to me, too. */
3724
17a912b6 3725 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3726 {
3727 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3728 (unsigned long) addr, /* FIXME: pr_addr */
3729 (unsigned long) endaddr,
3730 (int) size,
3731 (unsigned int) offset,
3732 filename[0] ? filename : "");
3733 }
3734 else
3735 {
3736 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3737 (unsigned long) addr, /* FIXME: pr_addr */
3738 (unsigned long) endaddr,
3739 (int) size,
3740 (unsigned int) offset,
3741 filename[0] ? filename : "");
3742 }
3743 }
3744
3745 fclose (procfile);
3746 }
3747 else
8a3fe4f8 3748 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3749 }
3750 if (status_f || all)
3751 {
3752 sprintf (fname1, "/proc/%lld/status", pid);
d5d6fca5 3753 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3754 {
3755 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3756 puts_filtered (buffer);
3757 fclose (procfile);
3758 }
3759 else
8a3fe4f8 3760 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3761 }
3762 if (stat_f || all)
3763 {
3764 sprintf (fname1, "/proc/%lld/stat", pid);
d5d6fca5 3765 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3766 {
3767 int itmp;
3768 char ctmp;
a25694b4 3769 long ltmp;
dba24537
AC
3770
3771 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3772 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 3773 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 3774 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 3775 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 3776 printf_filtered (_("State: %c\n"), ctmp);
dba24537 3777 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3778 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 3779 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3780 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 3781 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3782 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 3783 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3784 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 3785 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3786 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
3787 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3788 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3789 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3790 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3791 (unsigned long) ltmp);
3792 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3793 printf_filtered (_("Minor faults, children: %lu\n"),
3794 (unsigned long) ltmp);
3795 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3796 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3797 (unsigned long) ltmp);
3798 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3799 printf_filtered (_("Major faults, children: %lu\n"),
3800 (unsigned long) ltmp);
3801 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3802 printf_filtered (_("utime: %ld\n"), ltmp);
3803 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3804 printf_filtered (_("stime: %ld\n"), ltmp);
3805 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3806 printf_filtered (_("utime, children: %ld\n"), ltmp);
3807 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3808 printf_filtered (_("stime, children: %ld\n"), ltmp);
3809 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3810 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3811 ltmp);
3812 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3813 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3814 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3815 printf_filtered (_("jiffies until next timeout: %lu\n"),
3816 (unsigned long) ltmp);
3817 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3818 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3819 (unsigned long) ltmp);
3820 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3821 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3822 ltmp);
3823 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3824 printf_filtered (_("Virtual memory size: %lu\n"),
3825 (unsigned long) ltmp);
3826 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3827 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3828 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3829 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3830 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3831 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3832 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3833 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3834 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3835 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
dba24537
AC
3836#if 0 /* Don't know how architecture-dependent the rest is...
3837 Anyway the signal bitmap info is available from "status". */
a25694b4
AS
3838 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3839 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3840 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3841 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3842 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3843 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3844 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3845 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3846 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3847 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3848 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3849 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3850 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3851 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537
AC
3852#endif
3853 fclose (procfile);
3854 }
3855 else
8a3fe4f8 3856 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3857 }
3858}
3859
10d6c8cd
DJ
3860/* Implement the to_xfer_partial interface for memory reads using the /proc
3861 filesystem. Because we can use a single read() call for /proc, this
3862 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3863 but it doesn't support writes. */
3864
3865static LONGEST
3866linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3867 const char *annex, gdb_byte *readbuf,
3868 const gdb_byte *writebuf,
3869 ULONGEST offset, LONGEST len)
dba24537 3870{
10d6c8cd
DJ
3871 LONGEST ret;
3872 int fd;
dba24537
AC
3873 char filename[64];
3874
10d6c8cd 3875 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
3876 return 0;
3877
3878 /* Don't bother for one word. */
3879 if (len < 3 * sizeof (long))
3880 return 0;
3881
3882 /* We could keep this file open and cache it - possibly one per
3883 thread. That requires some juggling, but is even faster. */
3884 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3885 fd = open (filename, O_RDONLY | O_LARGEFILE);
3886 if (fd == -1)
3887 return 0;
3888
3889 /* If pread64 is available, use it. It's faster if the kernel
3890 supports it (only one syscall), and it's 64-bit safe even on
3891 32-bit platforms (for instance, SPARC debugging a SPARC64
3892 application). */
3893#ifdef HAVE_PREAD64
10d6c8cd 3894 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3895#else
10d6c8cd 3896 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3897#endif
3898 ret = 0;
3899 else
3900 ret = len;
3901
3902 close (fd);
3903 return ret;
3904}
3905
3906/* Parse LINE as a signal set and add its set bits to SIGS. */
3907
3908static void
3909add_line_to_sigset (const char *line, sigset_t *sigs)
3910{
3911 int len = strlen (line) - 1;
3912 const char *p;
3913 int signum;
3914
3915 if (line[len] != '\n')
8a3fe4f8 3916 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3917
3918 p = line;
3919 signum = len * 4;
3920 while (len-- > 0)
3921 {
3922 int digit;
3923
3924 if (*p >= '0' && *p <= '9')
3925 digit = *p - '0';
3926 else if (*p >= 'a' && *p <= 'f')
3927 digit = *p - 'a' + 10;
3928 else
8a3fe4f8 3929 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3930
3931 signum -= 4;
3932
3933 if (digit & 1)
3934 sigaddset (sigs, signum + 1);
3935 if (digit & 2)
3936 sigaddset (sigs, signum + 2);
3937 if (digit & 4)
3938 sigaddset (sigs, signum + 3);
3939 if (digit & 8)
3940 sigaddset (sigs, signum + 4);
3941
3942 p++;
3943 }
3944}
3945
3946/* Find process PID's pending signals from /proc/pid/status and set
3947 SIGS to match. */
3948
3949void
3950linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3951{
3952 FILE *procfile;
3953 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3954 int signum;
3955
3956 sigemptyset (pending);
3957 sigemptyset (blocked);
3958 sigemptyset (ignored);
3959 sprintf (fname, "/proc/%d/status", pid);
3960 procfile = fopen (fname, "r");
3961 if (procfile == NULL)
8a3fe4f8 3962 error (_("Could not open %s"), fname);
dba24537
AC
3963
3964 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3965 {
3966 /* Normal queued signals are on the SigPnd line in the status
3967 file. However, 2.6 kernels also have a "shared" pending
3968 queue for delivering signals to a thread group, so check for
3969 a ShdPnd line also.
3970
3971 Unfortunately some Red Hat kernels include the shared pending
3972 queue but not the ShdPnd status field. */
3973
3974 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3975 add_line_to_sigset (buffer + 8, pending);
3976 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3977 add_line_to_sigset (buffer + 8, pending);
3978 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3979 add_line_to_sigset (buffer + 8, blocked);
3980 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3981 add_line_to_sigset (buffer + 8, ignored);
3982 }
3983
3984 fclose (procfile);
3985}
3986
10d6c8cd
DJ
3987static LONGEST
3988linux_xfer_partial (struct target_ops *ops, enum target_object object,
3989 const char *annex, gdb_byte *readbuf,
3990 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3991{
3992 LONGEST xfer;
3993
3994 if (object == TARGET_OBJECT_AUXV)
3995 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3996 offset, len);
3997
3998 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3999 offset, len);
4000 if (xfer != 0)
4001 return xfer;
4002
4003 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4004 offset, len);
4005}
4006
e9efe249 4007/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4008 it with local methods. */
4009
910122bf
UW
4010static void
4011linux_target_install_ops (struct target_ops *t)
10d6c8cd 4012{
6d8fd2b7
UW
4013 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4014 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4015 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4016 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4017 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4018 t->to_post_attach = linux_child_post_attach;
4019 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4020 t->to_find_memory_regions = linux_nat_find_memory_regions;
4021 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4022
4023 super_xfer_partial = t->to_xfer_partial;
4024 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
4025}
4026
4027struct target_ops *
4028linux_target (void)
4029{
4030 struct target_ops *t;
4031
4032 t = inf_ptrace_target ();
4033 linux_target_install_ops (t);
4034
4035 return t;
4036}
4037
4038struct target_ops *
7714d83a 4039linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4040{
4041 struct target_ops *t;
4042
4043 t = inf_ptrace_trad_target (register_u_offset);
4044 linux_target_install_ops (t);
10d6c8cd 4045
10d6c8cd
DJ
4046 return t;
4047}
4048
b84876c2
PA
4049/* target_is_async_p implementation. */
4050
4051static int
4052linux_nat_is_async_p (void)
4053{
4054 /* NOTE: palves 2008-03-21: We're only async when the user requests
c6ebd6cf 4055 it explicitly with the "maintenance set target-async" command.
b84876c2 4056 Someday, linux will always be async. */
c6ebd6cf 4057 if (!target_async_permitted)
b84876c2
PA
4058 return 0;
4059
4060 return 1;
4061}
4062
4063/* target_can_async_p implementation. */
4064
4065static int
4066linux_nat_can_async_p (void)
4067{
4068 /* NOTE: palves 2008-03-21: We're only async when the user requests
c6ebd6cf 4069 it explicitly with the "maintenance set target-async" command.
b84876c2 4070 Someday, linux will always be async. */
c6ebd6cf 4071 if (!target_async_permitted)
b84876c2
PA
4072 return 0;
4073
4074 /* See target.h/target_async_mask. */
4075 return linux_nat_async_mask_value;
4076}
4077
9908b566
VP
4078static int
4079linux_nat_supports_non_stop (void)
4080{
4081 return 1;
4082}
4083
b84876c2
PA
4084/* target_async_mask implementation. */
4085
4086static int
4087linux_nat_async_mask (int mask)
4088{
4089 int current_state;
4090 current_state = linux_nat_async_mask_value;
4091
4092 if (current_state != mask)
4093 {
4094 if (mask == 0)
4095 {
4096 linux_nat_async (NULL, 0);
4097 linux_nat_async_mask_value = mask;
b84876c2
PA
4098 }
4099 else
4100 {
b84876c2
PA
4101 linux_nat_async_mask_value = mask;
4102 linux_nat_async (inferior_event_handler, 0);
4103 }
4104 }
4105
4106 return current_state;
4107}
4108
4109/* Pop an event from the event pipe. */
4110
4111static int
4112linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
4113{
4114 struct waitpid_result event = {0};
4115 int ret;
4116
4117 do
4118 {
4119 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
4120 }
4121 while (ret == -1 && errno == EINTR);
4122
4123 gdb_assert (ret == sizeof (event));
4124
4125 *ptr_status = event.status;
4126 *ptr_options = event.options;
4127
4128 linux_nat_num_queued_events--;
4129
4130 return event.pid;
4131}
4132
4133/* Push an event into the event pipe. */
4134
4135static void
4136linux_nat_event_pipe_push (int pid, int status, int options)
4137{
4138 int ret;
4139 struct waitpid_result event = {0};
4140 event.pid = pid;
4141 event.status = status;
4142 event.options = options;
4143
4144 do
4145 {
4146 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
4147 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
4148 } while (ret == -1 && errno == EINTR);
4149
4150 linux_nat_num_queued_events++;
4151}
4152
4153static void
4154get_pending_events (void)
4155{
4156 int status, options, pid;
4157
c6ebd6cf 4158 if (!target_async_permitted
84e46146 4159 || linux_nat_async_events_state != sigchld_async)
b84876c2
PA
4160 internal_error (__FILE__, __LINE__,
4161 "get_pending_events called with async masked");
4162
4163 while (1)
4164 {
4165 status = 0;
4166 options = __WCLONE | WNOHANG;
4167
4168 do
4169 {
4170 pid = waitpid (-1, &status, options);
4171 }
4172 while (pid == -1 && errno == EINTR);
4173
4174 if (pid <= 0)
4175 {
4176 options = WNOHANG;
4177 do
4178 {
4179 pid = waitpid (-1, &status, options);
4180 }
4181 while (pid == -1 && errno == EINTR);
4182 }
4183
4184 if (pid <= 0)
4185 /* No more children reporting events. */
4186 break;
4187
4188 if (debug_linux_nat_async)
4189 fprintf_unfiltered (gdb_stdlog, "\
4190get_pending_events: pid(%d), status(%x), options (%x)\n",
4191 pid, status, options);
4192
4193 linux_nat_event_pipe_push (pid, status, options);
4194 }
4195
4196 if (debug_linux_nat_async)
4197 fprintf_unfiltered (gdb_stdlog, "\
4198get_pending_events: linux_nat_num_queued_events(%d)\n",
4199 linux_nat_num_queued_events);
4200}
4201
4202/* SIGCHLD handler for async mode. */
4203
4204static void
4205async_sigchld_handler (int signo)
4206{
4207 if (debug_linux_nat_async)
4208 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
4209
4210 get_pending_events ();
4211}
4212
84e46146 4213/* Set SIGCHLD handling state to STATE. Returns previous state. */
b84876c2 4214
84e46146
PA
4215static enum sigchld_state
4216linux_nat_async_events (enum sigchld_state state)
b84876c2 4217{
84e46146 4218 enum sigchld_state current_state = linux_nat_async_events_state;
b84876c2
PA
4219
4220 if (debug_linux_nat_async)
4221 fprintf_unfiltered (gdb_stdlog,
84e46146 4222 "LNAE: state(%d): linux_nat_async_events_state(%d), "
b84876c2 4223 "linux_nat_num_queued_events(%d)\n",
84e46146 4224 state, linux_nat_async_events_state,
b84876c2
PA
4225 linux_nat_num_queued_events);
4226
84e46146 4227 if (current_state != state)
b84876c2
PA
4228 {
4229 sigset_t mask;
4230 sigemptyset (&mask);
4231 sigaddset (&mask, SIGCHLD);
84e46146
PA
4232
4233 /* Always block before changing state. */
4234 sigprocmask (SIG_BLOCK, &mask, NULL);
4235
4236 /* Set new state. */
4237 linux_nat_async_events_state = state;
4238
4239 switch (state)
b84876c2 4240 {
84e46146
PA
4241 case sigchld_sync:
4242 {
4243 /* Block target events. */
4244 sigprocmask (SIG_BLOCK, &mask, NULL);
4245 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4246 /* Get events out of queue, and make them available to
4247 queued_waitpid / my_waitpid. */
4248 pipe_to_local_event_queue ();
4249 }
4250 break;
4251 case sigchld_async:
4252 {
4253 /* Unblock target events for async mode. */
4254
4255 sigprocmask (SIG_BLOCK, &mask, NULL);
4256
4257 /* Put events we already waited on, in the pipe first, so
4258 events are FIFO. */
4259 local_event_queue_to_pipe ();
4260 /* While in masked async, we may have not collected all
4261 the pending events. Get them out now. */
4262 get_pending_events ();
4263
4264 /* Let'em come. */
4265 sigaction (SIGCHLD, &async_sigchld_action, NULL);
4266 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4267 }
4268 break;
4269 case sigchld_default:
4270 {
4271 /* SIGCHLD default mode. */
4272 sigaction (SIGCHLD, &sigchld_default_action, NULL);
4273
4274 /* Get events out of queue, and make them available to
4275 queued_waitpid / my_waitpid. */
4276 pipe_to_local_event_queue ();
4277
4278 /* Unblock SIGCHLD. */
4279 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4280 }
4281 break;
b84876c2
PA
4282 }
4283 }
4284
4285 return current_state;
4286}
4287
4288static int async_terminal_is_ours = 1;
4289
4290/* target_terminal_inferior implementation. */
4291
4292static void
4293linux_nat_terminal_inferior (void)
4294{
4295 if (!target_is_async_p ())
4296 {
4297 /* Async mode is disabled. */
4298 terminal_inferior ();
4299 return;
4300 }
4301
4302 /* GDB should never give the terminal to the inferior, if the
4303 inferior is running in the background (run&, continue&, etc.).
4304 This check can be removed when the common code is fixed. */
4305 if (!sync_execution)
4306 return;
4307
4308 terminal_inferior ();
4309
4310 if (!async_terminal_is_ours)
4311 return;
4312
4313 delete_file_handler (input_fd);
4314 async_terminal_is_ours = 0;
4315 set_sigint_trap ();
4316}
4317
4318/* target_terminal_ours implementation. */
4319
4320void
4321linux_nat_terminal_ours (void)
4322{
4323 if (!target_is_async_p ())
4324 {
4325 /* Async mode is disabled. */
4326 terminal_ours ();
4327 return;
4328 }
4329
4330 /* GDB should never give the terminal to the inferior if the
4331 inferior is running in the background (run&, continue&, etc.),
4332 but claiming it sure should. */
4333 terminal_ours ();
4334
4335 if (!sync_execution)
4336 return;
4337
4338 if (async_terminal_is_ours)
4339 return;
4340
4341 clear_sigint_trap ();
4342 add_file_handler (input_fd, stdin_event_handler, 0);
4343 async_terminal_is_ours = 1;
4344}
4345
4346static void (*async_client_callback) (enum inferior_event_type event_type,
4347 void *context);
4348static void *async_client_context;
4349
4350static void
4351linux_nat_async_file_handler (int error, gdb_client_data client_data)
4352{
4353 async_client_callback (INF_REG_EVENT, async_client_context);
4354}
4355
4356/* target_async implementation. */
4357
4358static void
4359linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4360 void *context), void *context)
4361{
c6ebd6cf 4362 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
b84876c2
PA
4363 internal_error (__FILE__, __LINE__,
4364 "Calling target_async when async is masked");
4365
4366 if (callback != NULL)
4367 {
4368 async_client_callback = callback;
4369 async_client_context = context;
4370 add_file_handler (linux_nat_event_pipe[0],
4371 linux_nat_async_file_handler, NULL);
4372
84e46146 4373 linux_nat_async_events (sigchld_async);
b84876c2
PA
4374 }
4375 else
4376 {
4377 async_client_callback = callback;
4378 async_client_context = context;
4379
84e46146 4380 linux_nat_async_events (sigchld_sync);
b84876c2
PA
4381 delete_file_handler (linux_nat_event_pipe[0]);
4382 }
4383 return;
4384}
4385
252fbfc8
PA
4386/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
4387 event came out. */
4388
4c28f408 4389static int
252fbfc8 4390linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4391{
252fbfc8
PA
4392 ptid_t ptid = * (ptid_t *) data;
4393
4394 if (ptid_equal (lwp->ptid, ptid)
4395 || ptid_equal (minus_one_ptid, ptid)
4396 || (ptid_is_pid (ptid)
4397 && ptid_get_pid (ptid) == ptid_get_pid (lwp->ptid)))
4398 {
4399 if (!lwp->stopped)
4400 {
4401 int pid, status;
4402
4403 if (debug_linux_nat)
4404 fprintf_unfiltered (gdb_stdlog,
4405 "LNSL: running -> suspending %s\n",
4406 target_pid_to_str (lwp->ptid));
4407
4408 /* Peek once, to check if we've already waited for this
4409 LWP. */
4410 pid = queued_waitpid_1 (ptid_get_lwp (lwp->ptid), &status,
4411 lwp->cloned ? __WCLONE : 0, 1 /* peek */);
4412
4413 if (pid == -1)
4414 {
4415 ptid_t ptid = lwp->ptid;
4416
4417 stop_callback (lwp, NULL);
4418 stop_wait_callback (lwp, NULL);
4419
4420 /* If the lwp exits while we try to stop it, there's
4421 nothing else to do. */
4422 lwp = find_lwp_pid (ptid);
4423 if (lwp == NULL)
4424 return 0;
4425
4426 pid = queued_waitpid_1 (ptid_get_lwp (lwp->ptid), &status,
4427 lwp->cloned ? __WCLONE : 0,
4428 1 /* peek */);
4429 }
4430
4431 /* If we didn't collect any signal other than SIGSTOP while
4432 stopping the LWP, push a SIGNAL_0 event. In either case,
4433 the event-loop will end up calling target_wait which will
4434 collect these. */
4435 if (pid == -1)
4436 push_waitpid (ptid_get_lwp (lwp->ptid), W_STOPCODE (0),
4437 lwp->cloned ? __WCLONE : 0);
4438 }
4439 else
4440 {
4441 /* Already known to be stopped; do nothing. */
4442
4443 if (debug_linux_nat)
4444 {
4445 if (find_thread_pid (lwp->ptid)->stop_requested)
4446 fprintf_unfiltered (gdb_stdlog, "\
4447LNSL: already stopped/stop_requested %s\n",
4448 target_pid_to_str (lwp->ptid));
4449 else
4450 fprintf_unfiltered (gdb_stdlog, "\
4451LNSL: already stopped/no stop_requested yet %s\n",
4452 target_pid_to_str (lwp->ptid));
4453 }
4454 }
4455 }
4c28f408
PA
4456 return 0;
4457}
4458
4459static void
4460linux_nat_stop (ptid_t ptid)
4461{
4462 if (non_stop)
4463 {
252fbfc8
PA
4464 linux_nat_async_events (sigchld_sync);
4465 iterate_over_lwps (linux_nat_stop_lwp, &ptid);
4466 target_async (inferior_event_handler, 0);
4c28f408
PA
4467 }
4468 else
4469 linux_ops->to_stop (ptid);
4470}
4471
f973ed9c
DJ
4472void
4473linux_nat_add_target (struct target_ops *t)
4474{
f973ed9c
DJ
4475 /* Save the provided single-threaded target. We save this in a separate
4476 variable because another target we've inherited from (e.g. inf-ptrace)
4477 may have saved a pointer to T; we want to use it for the final
4478 process stratum target. */
4479 linux_ops_saved = *t;
4480 linux_ops = &linux_ops_saved;
4481
4482 /* Override some methods for multithreading. */
b84876c2 4483 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4484 t->to_attach = linux_nat_attach;
4485 t->to_detach = linux_nat_detach;
4486 t->to_resume = linux_nat_resume;
4487 t->to_wait = linux_nat_wait;
4488 t->to_xfer_partial = linux_nat_xfer_partial;
4489 t->to_kill = linux_nat_kill;
4490 t->to_mourn_inferior = linux_nat_mourn_inferior;
4491 t->to_thread_alive = linux_nat_thread_alive;
4492 t->to_pid_to_str = linux_nat_pid_to_str;
4493 t->to_has_thread_control = tc_schedlock;
4494
b84876c2
PA
4495 t->to_can_async_p = linux_nat_can_async_p;
4496 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4497 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2
PA
4498 t->to_async = linux_nat_async;
4499 t->to_async_mask = linux_nat_async_mask;
4500 t->to_terminal_inferior = linux_nat_terminal_inferior;
4501 t->to_terminal_ours = linux_nat_terminal_ours;
4502
4c28f408
PA
4503 /* Methods for non-stop support. */
4504 t->to_stop = linux_nat_stop;
4505
f973ed9c
DJ
4506 /* We don't change the stratum; this target will sit at
4507 process_stratum and thread_db will set at thread_stratum. This
4508 is a little strange, since this is a multi-threaded-capable
4509 target, but we want to be on the stack below thread_db, and we
4510 also want to be used for single-threaded processes. */
4511
4512 add_target (t);
4513
4514 /* TODO: Eliminate this and have libthread_db use
4515 find_target_beneath. */
4516 thread_db_init (t);
4517}
4518
9f0bdab8
DJ
4519/* Register a method to call whenever a new thread is attached. */
4520void
4521linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
4522{
4523 /* Save the pointer. We only support a single registered instance
4524 of the GNU/Linux native target, so we do not need to map this to
4525 T. */
4526 linux_nat_new_thread = new_thread;
4527}
4528
4529/* Return the saved siginfo associated with PTID. */
4530struct siginfo *
4531linux_nat_get_siginfo (ptid_t ptid)
4532{
4533 struct lwp_info *lp = find_lwp_pid (ptid);
4534
4535 gdb_assert (lp != NULL);
4536
4537 return &lp->siginfo;
4538}
4539
c6ebd6cf
VP
4540/* Enable/Disable async mode. */
4541
4542static void
4543linux_nat_setup_async (void)
4544{
4545 if (pipe (linux_nat_event_pipe) == -1)
4546 internal_error (__FILE__, __LINE__,
4547 "creating event pipe failed.");
4548 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4549 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4550}
4551
d6b0e80f
AC
4552void
4553_initialize_linux_nat (void)
4554{
b84876c2 4555 sigset_t mask;
dba24537 4556
1bedd215
AC
4557 add_info ("proc", linux_nat_info_proc_cmd, _("\
4558Show /proc process information about any running process.\n\
dba24537
AC
4559Specify any process id, or use the program being debugged by default.\n\
4560Specify any of the following keywords for detailed info:\n\
4561 mappings -- list of mapped memory regions.\n\
4562 stat -- list a bunch of random process info.\n\
4563 status -- list a different bunch of random process info.\n\
1bedd215 4564 all -- list all available /proc info."));
d6b0e80f 4565
b84876c2
PA
4566 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4567 &debug_linux_nat, _("\
4568Set debugging of GNU/Linux lwp module."), _("\
4569Show debugging of GNU/Linux lwp module."), _("\
4570Enables printf debugging output."),
4571 NULL,
4572 show_debug_linux_nat,
4573 &setdebuglist, &showdebuglist);
4574
4575 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4576 &debug_linux_nat_async, _("\
4577Set debugging of GNU/Linux async lwp module."), _("\
4578Show debugging of GNU/Linux async lwp module."), _("\
4579Enables printf debugging output."),
4580 NULL,
4581 show_debug_linux_nat_async,
4582 &setdebuglist, &showdebuglist);
4583
84e46146
PA
4584 /* Get the default SIGCHLD action. Used while forking an inferior
4585 (see linux_nat_create_inferior/linux_nat_async_events). */
4586 sigaction (SIGCHLD, NULL, &sigchld_default_action);
4587
b84876c2
PA
4588 /* Block SIGCHLD by default. Doing this early prevents it getting
4589 unblocked if an exception is thrown due to an error while the
4590 inferior is starting (sigsetjmp/siglongjmp). */
4591 sigemptyset (&mask);
4592 sigaddset (&mask, SIGCHLD);
4593 sigprocmask (SIG_BLOCK, &mask, NULL);
4594
4595 /* Save this mask as the default. */
d6b0e80f
AC
4596 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4597
b84876c2
PA
4598 /* The synchronous SIGCHLD handler. */
4599 sync_sigchld_action.sa_handler = sigchld_handler;
4600 sigemptyset (&sync_sigchld_action.sa_mask);
4601 sync_sigchld_action.sa_flags = SA_RESTART;
4602
4603 /* Make it the default. */
4604 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
d6b0e80f
AC
4605
4606 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4607 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4608 sigdelset (&suspend_mask, SIGCHLD);
4609
b84876c2
PA
4610 /* SIGCHLD handler for async mode. */
4611 async_sigchld_action.sa_handler = async_sigchld_handler;
4612 sigemptyset (&async_sigchld_action.sa_mask);
4613 async_sigchld_action.sa_flags = SA_RESTART;
d6b0e80f 4614
c6ebd6cf 4615 linux_nat_setup_async ();
10568435
JK
4616
4617 add_setshow_boolean_cmd ("disable-randomization", class_support,
4618 &disable_randomization, _("\
4619Set disabling of debuggee's virtual address space randomization."), _("\
4620Show disabling of debuggee's virtual address space randomization."), _("\
4621When this mode is on (which is the default), randomization of the virtual\n\
4622address space is disabled. Standalone programs run with the randomization\n\
4623enabled by default on some platforms."),
4624 &set_disable_randomization,
4625 &show_disable_randomization,
4626 &setlist, &showlist);
d6b0e80f
AC
4627}
4628\f
4629
4630/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4631 the GNU/Linux Threads library and therefore doesn't really belong
4632 here. */
4633
4634/* Read variable NAME in the target and return its value if found.
4635 Otherwise return zero. It is assumed that the type of the variable
4636 is `int'. */
4637
4638static int
4639get_signo (const char *name)
4640{
4641 struct minimal_symbol *ms;
4642 int signo;
4643
4644 ms = lookup_minimal_symbol (name, NULL, NULL);
4645 if (ms == NULL)
4646 return 0;
4647
8e70166d 4648 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
4649 sizeof (signo)) != 0)
4650 return 0;
4651
4652 return signo;
4653}
4654
4655/* Return the set of signals used by the threads library in *SET. */
4656
4657void
4658lin_thread_get_thread_signals (sigset_t *set)
4659{
4660 struct sigaction action;
4661 int restart, cancel;
b84876c2 4662 sigset_t blocked_mask;
d6b0e80f 4663
b84876c2 4664 sigemptyset (&blocked_mask);
d6b0e80f
AC
4665 sigemptyset (set);
4666
4667 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
4668 cancel = get_signo ("__pthread_sig_cancel");
4669
4670 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4671 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4672 not provide any way for the debugger to query the signal numbers -
4673 fortunately they don't change! */
4674
d6b0e80f 4675 if (restart == 0)
17fbb0bd 4676 restart = __SIGRTMIN;
d6b0e80f 4677
d6b0e80f 4678 if (cancel == 0)
17fbb0bd 4679 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
4680
4681 sigaddset (set, restart);
4682 sigaddset (set, cancel);
4683
4684 /* The GNU/Linux Threads library makes terminating threads send a
4685 special "cancel" signal instead of SIGCHLD. Make sure we catch
4686 those (to prevent them from terminating GDB itself, which is
4687 likely to be their default action) and treat them the same way as
4688 SIGCHLD. */
4689
4690 action.sa_handler = sigchld_handler;
4691 sigemptyset (&action.sa_mask);
58aecb61 4692 action.sa_flags = SA_RESTART;
d6b0e80f
AC
4693 sigaction (cancel, &action, NULL);
4694
4695 /* We block the "cancel" signal throughout this code ... */
4696 sigaddset (&blocked_mask, cancel);
4697 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4698
4699 /* ... except during a sigsuspend. */
4700 sigdelset (&suspend_mask, cancel);
4701}
This page took 0.782231 seconds and 4 git commands to generate.