* gdb.texinfo (Omissions from Ada): Add missing GDB prompt in
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
9b254dd1 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
e26af52f 4 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f
AC
34#include "gdbthread.h"
35#include "gdbcmd.h"
36#include "regcache.h"
4f844a66 37#include "regset.h"
10d6c8cd
DJ
38#include "inf-ptrace.h"
39#include "auxv.h"
dba24537
AC
40#include <sys/param.h> /* for MAXPATHLEN */
41#include <sys/procfs.h> /* for elf_gregset etc. */
42#include "elf-bfd.h" /* for elfcore_write_* */
43#include "gregset.h" /* for gregset */
44#include "gdbcore.h" /* for get_exec_file */
45#include <ctype.h> /* for isdigit */
46#include "gdbthread.h" /* for struct thread_info etc. */
47#include "gdb_stat.h" /* for struct stat */
48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
54#include "gdb_dirent.h"
55#include "xml-support.h"
dba24537 56
10568435
JK
57#ifdef HAVE_PERSONALITY
58# include <sys/personality.h>
59# if !HAVE_DECL_ADDR_NO_RANDOMIZE
60# define ADDR_NO_RANDOMIZE 0x0040000
61# endif
62#endif /* HAVE_PERSONALITY */
63
8a77dff3
VP
64/* This comment documents high-level logic of this file.
65
66Waiting for events in sync mode
67===============================
68
69When waiting for an event in a specific thread, we just use waitpid, passing
70the specific pid, and not passing WNOHANG.
71
72When waiting for an event in all threads, waitpid is not quite good. Prior to
73version 2.4, Linux can either wait for event in main thread, or in secondary
74threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
75miss an event. The solution is to use non-blocking waitpid, together with
76sigsuspend. First, we use non-blocking waitpid to get an event in the main
77process, if any. Second, we use non-blocking waitpid with the __WCLONED
78flag to check for events in cloned processes. If nothing is found, we use
79sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
80happened to a child process -- and SIGCHLD will be delivered both for events
81in main debugged process and in cloned processes. As soon as we know there's
82an event, we get back to calling nonblocking waitpid with and without __WCLONED.
83
84Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
85so that we don't miss a signal. If SIGCHLD arrives in between, when it's
86blocked, the signal becomes pending and sigsuspend immediately
87notices it and returns.
88
89Waiting for events in async mode
90================================
91
92In async mode, GDB should always be ready to handle both user input and target
93events, so neither blocking waitpid nor sigsuspend are viable
94options. Instead, we should notify the GDB main event loop whenever there's
95unprocessed event from the target. The only way to notify this event loop is
96to make it wait on input from a pipe, and write something to the pipe whenever
97there's event. Obviously, if we fail to notify the event loop if there's
98target event, it's bad. If we notify the event loop when there's no event
99from target, linux-nat.c will detect that there's no event, actually, and
100report event of type TARGET_WAITKIND_IGNORE, but it will waste time and
101better avoided.
102
103The main design point is that every time GDB is outside linux-nat.c, we have a
104SIGCHLD handler installed that is called when something happens to the target
105and notifies the GDB event loop. Also, the event is extracted from the target
106using waitpid and stored for future use. Whenever GDB core decides to handle
107the event, and calls into linux-nat.c, we disable SIGCHLD and process things
108as in sync mode, except that before waitpid call we check if there are any
109previously read events.
110
111It could happen that during event processing, we'll try to get more events
112than there are events in the local queue, which will result to waitpid call.
113Those waitpid calls, while blocking, are guarantied to always have
114something for waitpid to return. E.g., stopping a thread with SIGSTOP, and
115waiting for the lwp to stop.
116
117The event loop is notified about new events using a pipe. SIGCHLD handler does
118waitpid and writes the results in to a pipe. GDB event loop has the other end
119of the pipe among the sources. When event loop starts to process the event
120and calls a function in linux-nat.c, all events from the pipe are transferred
121into a local queue and SIGCHLD is blocked. Further processing goes as in sync
122mode. Before we return from linux_nat_wait, we transfer all unprocessed events
123from local queue back to the pipe, so that when we get back to event loop,
124event loop will notice there's something more to do.
125
126SIGCHLD is blocked when we're inside target_wait, so that should we actually
127want to wait for some more events, SIGCHLD handler does not steal them from
128us. Technically, it would be possible to add new events to the local queue but
129it's about the same amount of work as blocking SIGCHLD.
130
131This moving of events from pipe into local queue and back into pipe when we
132enter/leave linux-nat.c is somewhat ugly. Unfortunately, GDB event loop is
133home-grown and incapable to wait on any queue.
134
135Use of signals
136==============
137
138We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
139signal is not entirely significant; we just need for a signal to be delivered,
140so that we can intercept it. SIGSTOP's advantage is that it can not be
141blocked. A disadvantage is that it is not a real-time signal, so it can only
142be queued once; we do not keep track of other sources of SIGSTOP.
143
144Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
145use them, because they have special behavior when the signal is generated -
146not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
147kills the entire thread group.
148
149A delivered SIGSTOP would stop the entire thread group, not just the thread we
150tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
151cancel it (by PTRACE_CONT without passing SIGSTOP).
152
153We could use a real-time signal instead. This would solve those problems; we
154could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
155But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
156generates it, and there are races with trying to find a signal that is not
157blocked. */
a0ef4274 158
dba24537
AC
159#ifndef O_LARGEFILE
160#define O_LARGEFILE 0
161#endif
0274a8ce 162
3993f6b1
DJ
163/* If the system headers did not provide the constants, hard-code the normal
164 values. */
165#ifndef PTRACE_EVENT_FORK
166
167#define PTRACE_SETOPTIONS 0x4200
168#define PTRACE_GETEVENTMSG 0x4201
169
170/* options set using PTRACE_SETOPTIONS */
171#define PTRACE_O_TRACESYSGOOD 0x00000001
172#define PTRACE_O_TRACEFORK 0x00000002
173#define PTRACE_O_TRACEVFORK 0x00000004
174#define PTRACE_O_TRACECLONE 0x00000008
175#define PTRACE_O_TRACEEXEC 0x00000010
9016a515
DJ
176#define PTRACE_O_TRACEVFORKDONE 0x00000020
177#define PTRACE_O_TRACEEXIT 0x00000040
3993f6b1
DJ
178
179/* Wait extended result codes for the above trace options. */
180#define PTRACE_EVENT_FORK 1
181#define PTRACE_EVENT_VFORK 2
182#define PTRACE_EVENT_CLONE 3
183#define PTRACE_EVENT_EXEC 4
c874c7fc 184#define PTRACE_EVENT_VFORK_DONE 5
9016a515 185#define PTRACE_EVENT_EXIT 6
3993f6b1
DJ
186
187#endif /* PTRACE_EVENT_FORK */
188
189/* We can't always assume that this flag is available, but all systems
190 with the ptrace event handlers also have __WALL, so it's safe to use
191 here. */
192#ifndef __WALL
193#define __WALL 0x40000000 /* Wait for any child. */
194#endif
195
02d3ff8c
UW
196#ifndef PTRACE_GETSIGINFO
197#define PTRACE_GETSIGINFO 0x4202
198#endif
199
10d6c8cd
DJ
200/* The single-threaded native GNU/Linux target_ops. We save a pointer for
201 the use of the multi-threaded target. */
202static struct target_ops *linux_ops;
f973ed9c 203static struct target_ops linux_ops_saved;
10d6c8cd 204
9f0bdab8
DJ
205/* The method to call, if any, when a new thread is attached. */
206static void (*linux_nat_new_thread) (ptid_t);
207
ac264b3b
MS
208/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
209 Called by our to_xfer_partial. */
210static LONGEST (*super_xfer_partial) (struct target_ops *,
211 enum target_object,
212 const char *, gdb_byte *,
213 const gdb_byte *,
10d6c8cd
DJ
214 ULONGEST, LONGEST);
215
d6b0e80f 216static int debug_linux_nat;
920d2a44
AC
217static void
218show_debug_linux_nat (struct ui_file *file, int from_tty,
219 struct cmd_list_element *c, const char *value)
220{
221 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
222 value);
223}
d6b0e80f 224
b84876c2
PA
225static int debug_linux_nat_async = 0;
226static void
227show_debug_linux_nat_async (struct ui_file *file, int from_tty,
228 struct cmd_list_element *c, const char *value)
229{
230 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
231 value);
232}
233
10568435
JK
234static int disable_randomization = 1;
235
236static void
237show_disable_randomization (struct ui_file *file, int from_tty,
238 struct cmd_list_element *c, const char *value)
239{
240#ifdef HAVE_PERSONALITY
241 fprintf_filtered (file, _("\
242Disabling randomization of debuggee's virtual address space is %s.\n"),
243 value);
244#else /* !HAVE_PERSONALITY */
245 fputs_filtered (_("\
246Disabling randomization of debuggee's virtual address space is unsupported on\n\
247this platform.\n"), file);
248#endif /* !HAVE_PERSONALITY */
249}
250
251static void
252set_disable_randomization (char *args, int from_tty, struct cmd_list_element *c)
253{
254#ifndef HAVE_PERSONALITY
255 error (_("\
256Disabling randomization of debuggee's virtual address space is unsupported on\n\
257this platform."));
258#endif /* !HAVE_PERSONALITY */
259}
260
9016a515
DJ
261static int linux_parent_pid;
262
ae087d01
DJ
263struct simple_pid_list
264{
265 int pid;
3d799a95 266 int status;
ae087d01
DJ
267 struct simple_pid_list *next;
268};
269struct simple_pid_list *stopped_pids;
270
3993f6b1
DJ
271/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
272 can not be used, 1 if it can. */
273
274static int linux_supports_tracefork_flag = -1;
275
9016a515
DJ
276/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
277 PTRACE_O_TRACEVFORKDONE. */
278
279static int linux_supports_tracevforkdone_flag = -1;
280
b84876c2
PA
281/* Async mode support */
282
b84876c2
PA
283/* Zero if the async mode, although enabled, is masked, which means
284 linux_nat_wait should behave as if async mode was off. */
285static int linux_nat_async_mask_value = 1;
286
287/* The read/write ends of the pipe registered as waitable file in the
288 event loop. */
289static int linux_nat_event_pipe[2] = { -1, -1 };
290
291/* Number of queued events in the pipe. */
292static volatile int linux_nat_num_queued_events;
293
84e46146 294/* The possible SIGCHLD handling states. */
b84876c2 295
84e46146
PA
296enum sigchld_state
297{
298 /* SIGCHLD disabled, with action set to sigchld_handler, for the
299 sigsuspend in linux_nat_wait. */
300 sigchld_sync,
301 /* SIGCHLD enabled, with action set to async_sigchld_handler. */
302 sigchld_async,
303 /* Set SIGCHLD to default action. Used while creating an
304 inferior. */
305 sigchld_default
306};
307
308/* The current SIGCHLD handling state. */
309static enum sigchld_state linux_nat_async_events_state;
310
311static enum sigchld_state linux_nat_async_events (enum sigchld_state enable);
b84876c2
PA
312static void pipe_to_local_event_queue (void);
313static void local_event_queue_to_pipe (void);
314static void linux_nat_event_pipe_push (int pid, int status, int options);
315static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
316static void linux_nat_set_async_mode (int on);
317static void linux_nat_async (void (*callback)
318 (enum inferior_event_type event_type, void *context),
319 void *context);
320static int linux_nat_async_mask (int mask);
a0ef4274 321static int kill_lwp (int lwpid, int signo);
b84876c2 322
4c28f408
PA
323static int stop_callback (struct lwp_info *lp, void *data);
324
b84876c2
PA
325/* Captures the result of a successful waitpid call, along with the
326 options used in that call. */
327struct waitpid_result
328{
329 int pid;
330 int status;
331 int options;
332 struct waitpid_result *next;
333};
334
335/* A singly-linked list of the results of the waitpid calls performed
336 in the async SIGCHLD handler. */
337static struct waitpid_result *waitpid_queue = NULL;
338
252fbfc8
PA
339/* Similarly to `waitpid', but check the local event queue instead of
340 querying the kernel queue. If PEEK, don't remove the event found
341 from the queue. */
342
b84876c2 343static int
252fbfc8 344queued_waitpid_1 (int pid, int *status, int flags, int peek)
b84876c2
PA
345{
346 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
347
348 if (debug_linux_nat_async)
349 fprintf_unfiltered (gdb_stdlog,
350 "\
84e46146
PA
351QWPID: linux_nat_async_events_state(%d), linux_nat_num_queued_events(%d)\n",
352 linux_nat_async_events_state,
b84876c2
PA
353 linux_nat_num_queued_events);
354
355 if (flags & __WALL)
356 {
357 for (; msg; prev = msg, msg = msg->next)
358 if (pid == -1 || pid == msg->pid)
359 break;
360 }
361 else if (flags & __WCLONE)
362 {
363 for (; msg; prev = msg, msg = msg->next)
364 if (msg->options & __WCLONE
365 && (pid == -1 || pid == msg->pid))
366 break;
367 }
368 else
369 {
370 for (; msg; prev = msg, msg = msg->next)
371 if ((msg->options & __WCLONE) == 0
372 && (pid == -1 || pid == msg->pid))
373 break;
374 }
375
376 if (msg)
377 {
378 int pid;
379
b84876c2
PA
380 if (status)
381 *status = msg->status;
382 pid = msg->pid;
383
384 if (debug_linux_nat_async)
385 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
386 pid, msg->status);
252fbfc8
PA
387
388 if (!peek)
389 {
390 if (prev)
391 prev->next = msg->next;
392 else
393 waitpid_queue = msg->next;
394
395 msg->next = NULL;
396 xfree (msg);
397 }
b84876c2
PA
398
399 return pid;
400 }
401
402 if (debug_linux_nat_async)
403 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
404
405 if (status)
406 *status = 0;
407 return -1;
408}
409
252fbfc8
PA
410/* Similarly to `waitpid', but check the local event queue. */
411
412static int
413queued_waitpid (int pid, int *status, int flags)
414{
415 return queued_waitpid_1 (pid, status, flags, 0);
416}
417
b84876c2
PA
418static void
419push_waitpid (int pid, int status, int options)
420{
421 struct waitpid_result *event, *new_event;
422
423 new_event = xmalloc (sizeof (*new_event));
424 new_event->pid = pid;
425 new_event->status = status;
426 new_event->options = options;
427 new_event->next = NULL;
428
429 if (waitpid_queue)
430 {
431 for (event = waitpid_queue;
432 event && event->next;
433 event = event->next)
434 ;
435
436 event->next = new_event;
437 }
438 else
439 waitpid_queue = new_event;
440}
441
710151dd 442/* Drain all queued events of PID. If PID is -1, the effect is of
b84876c2
PA
443 draining all events. */
444static void
445drain_queued_events (int pid)
446{
447 while (queued_waitpid (pid, NULL, __WALL) != -1)
448 ;
449}
450
ae087d01
DJ
451\f
452/* Trivial list manipulation functions to keep track of a list of
453 new stopped processes. */
454static void
3d799a95 455add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
456{
457 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
458 new_pid->pid = pid;
3d799a95 459 new_pid->status = status;
ae087d01
DJ
460 new_pid->next = *listp;
461 *listp = new_pid;
462}
463
464static int
3d799a95 465pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
ae087d01
DJ
466{
467 struct simple_pid_list **p;
468
469 for (p = listp; *p != NULL; p = &(*p)->next)
470 if ((*p)->pid == pid)
471 {
472 struct simple_pid_list *next = (*p)->next;
3d799a95 473 *status = (*p)->status;
ae087d01
DJ
474 xfree (*p);
475 *p = next;
476 return 1;
477 }
478 return 0;
479}
480
3d799a95
DJ
481static void
482linux_record_stopped_pid (int pid, int status)
ae087d01 483{
3d799a95 484 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
485}
486
3993f6b1
DJ
487\f
488/* A helper function for linux_test_for_tracefork, called after fork (). */
489
490static void
491linux_tracefork_child (void)
492{
493 int ret;
494
495 ptrace (PTRACE_TRACEME, 0, 0, 0);
496 kill (getpid (), SIGSTOP);
497 fork ();
48bb3cce 498 _exit (0);
3993f6b1
DJ
499}
500
b84876c2
PA
501/* Wrapper function for waitpid which handles EINTR, and checks for
502 locally queued events. */
b957e937
DJ
503
504static int
505my_waitpid (int pid, int *status, int flags)
506{
507 int ret;
b84876c2
PA
508
509 /* There should be no concurrent calls to waitpid. */
84e46146 510 gdb_assert (linux_nat_async_events_state == sigchld_sync);
b84876c2
PA
511
512 ret = queued_waitpid (pid, status, flags);
513 if (ret != -1)
514 return ret;
515
b957e937
DJ
516 do
517 {
518 ret = waitpid (pid, status, flags);
519 }
520 while (ret == -1 && errno == EINTR);
521
522 return ret;
523}
524
525/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
526
527 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
528 we know that the feature is not available. This may change the tracing
529 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
530
531 However, if it succeeds, we don't know for sure that the feature is
532 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 533 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
534 fork tracing, and let it fork. If the process exits, we assume that we
535 can't use TRACEFORK; if we get the fork notification, and we can extract
536 the new child's PID, then we assume that we can. */
3993f6b1
DJ
537
538static void
b957e937 539linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
540{
541 int child_pid, ret, status;
542 long second_pid;
4c28f408
PA
543 enum sigchld_state async_events_original_state;
544
545 async_events_original_state = linux_nat_async_events (sigchld_sync);
3993f6b1 546
b957e937
DJ
547 linux_supports_tracefork_flag = 0;
548 linux_supports_tracevforkdone_flag = 0;
549
550 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
551 if (ret != 0)
552 return;
553
3993f6b1
DJ
554 child_pid = fork ();
555 if (child_pid == -1)
e2e0b3e5 556 perror_with_name (("fork"));
3993f6b1
DJ
557
558 if (child_pid == 0)
559 linux_tracefork_child ();
560
b957e937 561 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 562 if (ret == -1)
e2e0b3e5 563 perror_with_name (("waitpid"));
3993f6b1 564 else if (ret != child_pid)
8a3fe4f8 565 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 566 if (! WIFSTOPPED (status))
8a3fe4f8 567 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
3993f6b1 568
3993f6b1
DJ
569 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
570 if (ret != 0)
571 {
b957e937
DJ
572 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
573 if (ret != 0)
574 {
8a3fe4f8 575 warning (_("linux_test_for_tracefork: failed to kill child"));
4c28f408 576 linux_nat_async_events (async_events_original_state);
b957e937
DJ
577 return;
578 }
579
580 ret = my_waitpid (child_pid, &status, 0);
581 if (ret != child_pid)
8a3fe4f8 582 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
b957e937 583 else if (!WIFSIGNALED (status))
8a3fe4f8
AC
584 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
585 "killed child"), status);
b957e937 586
4c28f408 587 linux_nat_async_events (async_events_original_state);
3993f6b1
DJ
588 return;
589 }
590
9016a515
DJ
591 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
592 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
593 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
594 linux_supports_tracevforkdone_flag = (ret == 0);
595
b957e937
DJ
596 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
597 if (ret != 0)
8a3fe4f8 598 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
599
600 ret = my_waitpid (child_pid, &status, 0);
601
3993f6b1
DJ
602 if (ret == child_pid && WIFSTOPPED (status)
603 && status >> 16 == PTRACE_EVENT_FORK)
604 {
605 second_pid = 0;
606 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
607 if (ret == 0 && second_pid != 0)
608 {
609 int second_status;
610
611 linux_supports_tracefork_flag = 1;
b957e937
DJ
612 my_waitpid (second_pid, &second_status, 0);
613 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
614 if (ret != 0)
8a3fe4f8 615 warning (_("linux_test_for_tracefork: failed to kill second child"));
97725dc4 616 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
617 }
618 }
b957e937 619 else
8a3fe4f8
AC
620 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
621 "(%d, status 0x%x)"), ret, status);
3993f6b1 622
b957e937
DJ
623 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
624 if (ret != 0)
8a3fe4f8 625 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 626 my_waitpid (child_pid, &status, 0);
4c28f408
PA
627
628 linux_nat_async_events (async_events_original_state);
3993f6b1
DJ
629}
630
631/* Return non-zero iff we have tracefork functionality available.
632 This function also sets linux_supports_tracefork_flag. */
633
634static int
b957e937 635linux_supports_tracefork (int pid)
3993f6b1
DJ
636{
637 if (linux_supports_tracefork_flag == -1)
b957e937 638 linux_test_for_tracefork (pid);
3993f6b1
DJ
639 return linux_supports_tracefork_flag;
640}
641
9016a515 642static int
b957e937 643linux_supports_tracevforkdone (int pid)
9016a515
DJ
644{
645 if (linux_supports_tracefork_flag == -1)
b957e937 646 linux_test_for_tracefork (pid);
9016a515
DJ
647 return linux_supports_tracevforkdone_flag;
648}
649
3993f6b1 650\f
4de4c07c
DJ
651void
652linux_enable_event_reporting (ptid_t ptid)
653{
d3587048 654 int pid = ptid_get_lwp (ptid);
4de4c07c
DJ
655 int options;
656
d3587048
DJ
657 if (pid == 0)
658 pid = ptid_get_pid (ptid);
659
b957e937 660 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
661 return;
662
a2f23071
DJ
663 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
664 | PTRACE_O_TRACECLONE;
b957e937 665 if (linux_supports_tracevforkdone (pid))
9016a515
DJ
666 options |= PTRACE_O_TRACEVFORKDONE;
667
668 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
669 read-only process state. */
4de4c07c
DJ
670
671 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
672}
673
6d8fd2b7
UW
674static void
675linux_child_post_attach (int pid)
4de4c07c
DJ
676{
677 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 678 check_for_thread_db ();
4de4c07c
DJ
679}
680
10d6c8cd 681static void
4de4c07c
DJ
682linux_child_post_startup_inferior (ptid_t ptid)
683{
684 linux_enable_event_reporting (ptid);
0ec9a092 685 check_for_thread_db ();
4de4c07c
DJ
686}
687
6d8fd2b7
UW
688static int
689linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 690{
4de4c07c
DJ
691 ptid_t last_ptid;
692 struct target_waitstatus last_status;
9016a515 693 int has_vforked;
4de4c07c
DJ
694 int parent_pid, child_pid;
695
b84876c2
PA
696 if (target_can_async_p ())
697 target_async (NULL, 0);
698
4de4c07c 699 get_last_target_status (&last_ptid, &last_status);
9016a515 700 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
d3587048
DJ
701 parent_pid = ptid_get_lwp (last_ptid);
702 if (parent_pid == 0)
703 parent_pid = ptid_get_pid (last_ptid);
3a3e9ee3 704 child_pid = PIDGET (last_status.value.related_pid);
4de4c07c
DJ
705
706 if (! follow_child)
707 {
708 /* We're already attached to the parent, by default. */
709
710 /* Before detaching from the child, remove all breakpoints from
711 it. (This won't actually modify the breakpoint list, but will
712 physically remove the breakpoints from the child.) */
9016a515
DJ
713 /* If we vforked this will remove the breakpoints from the parent
714 also, but they'll be reinserted below. */
4de4c07c
DJ
715 detach_breakpoints (child_pid);
716
ac264b3b
MS
717 /* Detach new forked process? */
718 if (detach_fork)
f75c00e4 719 {
e85a822c 720 if (info_verbose || debug_linux_nat)
ac264b3b
MS
721 {
722 target_terminal_ours ();
723 fprintf_filtered (gdb_stdlog,
724 "Detaching after fork from child process %d.\n",
725 child_pid);
726 }
4de4c07c 727
ac264b3b
MS
728 ptrace (PTRACE_DETACH, child_pid, 0, 0);
729 }
730 else
731 {
732 struct fork_info *fp;
7f9f62ba
PA
733
734 /* Add process to GDB's tables. */
735 add_inferior (child_pid);
736
ac264b3b
MS
737 /* Retain child fork in ptrace (stopped) state. */
738 fp = find_fork_pid (child_pid);
739 if (!fp)
740 fp = add_fork (child_pid);
741 fork_save_infrun_state (fp, 0);
742 }
9016a515
DJ
743
744 if (has_vforked)
745 {
b957e937
DJ
746 gdb_assert (linux_supports_tracefork_flag >= 0);
747 if (linux_supports_tracevforkdone (0))
9016a515
DJ
748 {
749 int status;
750
751 ptrace (PTRACE_CONT, parent_pid, 0, 0);
58aecb61 752 my_waitpid (parent_pid, &status, __WALL);
c874c7fc 753 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
8a3fe4f8
AC
754 warning (_("Unexpected waitpid result %06x when waiting for "
755 "vfork-done"), status);
9016a515
DJ
756 }
757 else
758 {
759 /* We can't insert breakpoints until the child has
760 finished with the shared memory region. We need to
761 wait until that happens. Ideal would be to just
762 call:
763 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
764 - waitpid (parent_pid, &status, __WALL);
765 However, most architectures can't handle a syscall
766 being traced on the way out if it wasn't traced on
767 the way in.
768
769 We might also think to loop, continuing the child
770 until it exits or gets a SIGTRAP. One problem is
771 that the child might call ptrace with PTRACE_TRACEME.
772
773 There's no simple and reliable way to figure out when
774 the vforked child will be done with its copy of the
775 shared memory. We could step it out of the syscall,
776 two instructions, let it go, and then single-step the
777 parent once. When we have hardware single-step, this
778 would work; with software single-step it could still
779 be made to work but we'd have to be able to insert
780 single-step breakpoints in the child, and we'd have
781 to insert -just- the single-step breakpoint in the
782 parent. Very awkward.
783
784 In the end, the best we can do is to make sure it
785 runs for a little while. Hopefully it will be out of
786 range of any breakpoints we reinsert. Usually this
787 is only the single-step breakpoint at vfork's return
788 point. */
789
790 usleep (10000);
791 }
792
793 /* Since we vforked, breakpoints were removed in the parent
794 too. Put them back. */
795 reattach_breakpoints (parent_pid);
796 }
4de4c07c 797 }
3993f6b1 798 else
4de4c07c 799 {
4e1c45ea
PA
800 struct thread_info *last_tp = find_thread_pid (last_ptid);
801 struct thread_info *tp;
4de4c07c
DJ
802 char child_pid_spelling[40];
803
4e1c45ea
PA
804 /* Copy user stepping state to the new inferior thread. */
805 struct breakpoint *step_resume_breakpoint = last_tp->step_resume_breakpoint;
806 CORE_ADDR step_range_start = last_tp->step_range_start;
807 CORE_ADDR step_range_end = last_tp->step_range_end;
808 struct frame_id step_frame_id = last_tp->step_frame_id;
809
810 /* Otherwise, deleting the parent would get rid of this
811 breakpoint. */
812 last_tp->step_resume_breakpoint = NULL;
813
4de4c07c 814 /* Needed to keep the breakpoint lists in sync. */
9016a515
DJ
815 if (! has_vforked)
816 detach_breakpoints (child_pid);
4de4c07c
DJ
817
818 /* Before detaching from the parent, remove all breakpoints from it. */
819 remove_breakpoints ();
820
e85a822c 821 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
822 {
823 target_terminal_ours ();
ac264b3b
MS
824 fprintf_filtered (gdb_stdlog,
825 "Attaching after fork to child process %d.\n",
826 child_pid);
f75c00e4 827 }
4de4c07c 828
9016a515
DJ
829 /* If we're vforking, we may want to hold on to the parent until
830 the child exits or execs. At exec time we can remove the old
831 breakpoints from the parent and detach it; at exit time we
832 could do the same (or even, sneakily, resume debugging it - the
833 child's exec has failed, or something similar).
834
835 This doesn't clean up "properly", because we can't call
836 target_detach, but that's OK; if the current target is "child",
837 then it doesn't need any further cleanups, and lin_lwp will
838 generally not encounter vfork (vfork is defined to fork
839 in libpthread.so).
840
841 The holding part is very easy if we have VFORKDONE events;
842 but keeping track of both processes is beyond GDB at the
843 moment. So we don't expose the parent to the rest of GDB.
844 Instead we quietly hold onto it until such time as we can
845 safely resume it. */
846
847 if (has_vforked)
7f9f62ba
PA
848 {
849 linux_parent_pid = parent_pid;
850 detach_inferior (parent_pid);
851 }
ac264b3b
MS
852 else if (!detach_fork)
853 {
854 struct fork_info *fp;
855 /* Retain parent fork in ptrace (stopped) state. */
856 fp = find_fork_pid (parent_pid);
857 if (!fp)
858 fp = add_fork (parent_pid);
859 fork_save_infrun_state (fp, 0);
860 }
9016a515 861 else
b84876c2 862 target_detach (NULL, 0);
4de4c07c 863
9f0bdab8 864 inferior_ptid = ptid_build (child_pid, child_pid, 0);
7f9f62ba 865 add_inferior (child_pid);
ee057212
DJ
866
867 /* Reinstall ourselves, since we might have been removed in
868 target_detach (which does other necessary cleanup). */
ac264b3b 869
ee057212 870 push_target (ops);
9f0bdab8 871 linux_nat_switch_fork (inferior_ptid);
ef29ce1a 872 check_for_thread_db ();
4de4c07c 873
4e1c45ea
PA
874 tp = inferior_thread ();
875 tp->step_resume_breakpoint = step_resume_breakpoint;
876 tp->step_range_start = step_range_start;
877 tp->step_range_end = step_range_end;
878 tp->step_frame_id = step_frame_id;
879
4de4c07c
DJ
880 /* Reset breakpoints in the child as appropriate. */
881 follow_inferior_reset_breakpoints ();
882 }
883
b84876c2
PA
884 if (target_can_async_p ())
885 target_async (inferior_event_handler, 0);
886
4de4c07c
DJ
887 return 0;
888}
889
4de4c07c 890\f
6d8fd2b7
UW
891static void
892linux_child_insert_fork_catchpoint (int pid)
4de4c07c 893{
b957e937 894 if (! linux_supports_tracefork (pid))
8a3fe4f8 895 error (_("Your system does not support fork catchpoints."));
3993f6b1
DJ
896}
897
6d8fd2b7
UW
898static void
899linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 900{
b957e937 901 if (!linux_supports_tracefork (pid))
8a3fe4f8 902 error (_("Your system does not support vfork catchpoints."));
3993f6b1
DJ
903}
904
6d8fd2b7
UW
905static void
906linux_child_insert_exec_catchpoint (int pid)
3993f6b1 907{
b957e937 908 if (!linux_supports_tracefork (pid))
8a3fe4f8 909 error (_("Your system does not support exec catchpoints."));
3993f6b1
DJ
910}
911
d6b0e80f
AC
912/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
913 are processes sharing the same VM space. A multi-threaded process
914 is basically a group of such processes. However, such a grouping
915 is almost entirely a user-space issue; the kernel doesn't enforce
916 such a grouping at all (this might change in the future). In
917 general, we'll rely on the threads library (i.e. the GNU/Linux
918 Threads library) to provide such a grouping.
919
920 It is perfectly well possible to write a multi-threaded application
921 without the assistance of a threads library, by using the clone
922 system call directly. This module should be able to give some
923 rudimentary support for debugging such applications if developers
924 specify the CLONE_PTRACE flag in the clone system call, and are
925 using the Linux kernel 2.4 or above.
926
927 Note that there are some peculiarities in GNU/Linux that affect
928 this code:
929
930 - In general one should specify the __WCLONE flag to waitpid in
931 order to make it report events for any of the cloned processes
932 (and leave it out for the initial process). However, if a cloned
933 process has exited the exit status is only reported if the
934 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
935 we cannot use it since GDB must work on older systems too.
936
937 - When a traced, cloned process exits and is waited for by the
938 debugger, the kernel reassigns it to the original parent and
939 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
940 library doesn't notice this, which leads to the "zombie problem":
941 When debugged a multi-threaded process that spawns a lot of
942 threads will run out of processes, even if the threads exit,
943 because the "zombies" stay around. */
944
945/* List of known LWPs. */
9f0bdab8 946struct lwp_info *lwp_list;
d6b0e80f
AC
947
948/* Number of LWPs in the list. */
949static int num_lwps;
d6b0e80f
AC
950\f
951
d6b0e80f
AC
952/* Original signal mask. */
953static sigset_t normal_mask;
954
955/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
956 _initialize_linux_nat. */
957static sigset_t suspend_mask;
958
b84876c2
PA
959/* SIGCHLD action for synchronous mode. */
960struct sigaction sync_sigchld_action;
961
962/* SIGCHLD action for asynchronous mode. */
963static struct sigaction async_sigchld_action;
84e46146
PA
964
965/* SIGCHLD default action, to pass to new inferiors. */
966static struct sigaction sigchld_default_action;
d6b0e80f
AC
967\f
968
969/* Prototypes for local functions. */
970static int stop_wait_callback (struct lwp_info *lp, void *data);
971static int linux_nat_thread_alive (ptid_t ptid);
6d8fd2b7 972static char *linux_child_pid_to_exec_file (int pid);
710151dd
PA
973static int cancel_breakpoint (struct lwp_info *lp);
974
d6b0e80f
AC
975\f
976/* Convert wait status STATUS to a string. Used for printing debug
977 messages only. */
978
979static char *
980status_to_str (int status)
981{
982 static char buf[64];
983
984 if (WIFSTOPPED (status))
985 snprintf (buf, sizeof (buf), "%s (stopped)",
986 strsignal (WSTOPSIG (status)));
987 else if (WIFSIGNALED (status))
988 snprintf (buf, sizeof (buf), "%s (terminated)",
989 strsignal (WSTOPSIG (status)));
990 else
991 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
992
993 return buf;
994}
995
996/* Initialize the list of LWPs. Note that this module, contrary to
997 what GDB's generic threads layer does for its thread list,
998 re-initializes the LWP lists whenever we mourn or detach (which
999 doesn't involve mourning) the inferior. */
1000
1001static void
1002init_lwp_list (void)
1003{
1004 struct lwp_info *lp, *lpnext;
1005
1006 for (lp = lwp_list; lp; lp = lpnext)
1007 {
1008 lpnext = lp->next;
1009 xfree (lp);
1010 }
1011
1012 lwp_list = NULL;
1013 num_lwps = 0;
d6b0e80f
AC
1014}
1015
f973ed9c 1016/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
1017 structure describing the new LWP. The LWP should already be stopped
1018 (with an exception for the very first LWP). */
d6b0e80f
AC
1019
1020static struct lwp_info *
1021add_lwp (ptid_t ptid)
1022{
1023 struct lwp_info *lp;
1024
1025 gdb_assert (is_lwp (ptid));
1026
1027 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
1028
1029 memset (lp, 0, sizeof (struct lwp_info));
1030
1031 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1032
1033 lp->ptid = ptid;
1034
1035 lp->next = lwp_list;
1036 lwp_list = lp;
f973ed9c 1037 ++num_lwps;
d6b0e80f 1038
9f0bdab8
DJ
1039 if (num_lwps > 1 && linux_nat_new_thread != NULL)
1040 linux_nat_new_thread (ptid);
1041
d6b0e80f
AC
1042 return lp;
1043}
1044
1045/* Remove the LWP specified by PID from the list. */
1046
1047static void
1048delete_lwp (ptid_t ptid)
1049{
1050 struct lwp_info *lp, *lpprev;
1051
1052 lpprev = NULL;
1053
1054 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
1055 if (ptid_equal (lp->ptid, ptid))
1056 break;
1057
1058 if (!lp)
1059 return;
1060
d6b0e80f
AC
1061 num_lwps--;
1062
1063 if (lpprev)
1064 lpprev->next = lp->next;
1065 else
1066 lwp_list = lp->next;
1067
1068 xfree (lp);
1069}
1070
1071/* Return a pointer to the structure describing the LWP corresponding
1072 to PID. If no corresponding LWP could be found, return NULL. */
1073
1074static struct lwp_info *
1075find_lwp_pid (ptid_t ptid)
1076{
1077 struct lwp_info *lp;
1078 int lwp;
1079
1080 if (is_lwp (ptid))
1081 lwp = GET_LWP (ptid);
1082 else
1083 lwp = GET_PID (ptid);
1084
1085 for (lp = lwp_list; lp; lp = lp->next)
1086 if (lwp == GET_LWP (lp->ptid))
1087 return lp;
1088
1089 return NULL;
1090}
1091
1092/* Call CALLBACK with its second argument set to DATA for every LWP in
1093 the list. If CALLBACK returns 1 for a particular LWP, return a
1094 pointer to the structure describing that LWP immediately.
1095 Otherwise return NULL. */
1096
1097struct lwp_info *
1098iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
1099{
1100 struct lwp_info *lp, *lpnext;
1101
1102 for (lp = lwp_list; lp; lp = lpnext)
1103 {
1104 lpnext = lp->next;
1105 if ((*callback) (lp, data))
1106 return lp;
1107 }
1108
1109 return NULL;
1110}
1111
f973ed9c
DJ
1112/* Update our internal state when changing from one fork (checkpoint,
1113 et cetera) to another indicated by NEW_PTID. We can only switch
1114 single-threaded applications, so we only create one new LWP, and
1115 the previous list is discarded. */
1116
1117void
1118linux_nat_switch_fork (ptid_t new_ptid)
1119{
1120 struct lwp_info *lp;
1121
1122 init_lwp_list ();
1123 lp = add_lwp (new_ptid);
1124 lp->stopped = 1;
e26af52f 1125
4f8d22e3
PA
1126 init_thread_list ();
1127 add_thread_silent (new_ptid);
e26af52f
DJ
1128}
1129
e26af52f
DJ
1130/* Handle the exit of a single thread LP. */
1131
1132static void
1133exit_lwp (struct lwp_info *lp)
1134{
063bfe2e
VP
1135 struct thread_info *th = find_thread_pid (lp->ptid);
1136
1137 if (th)
e26af52f 1138 {
17faa917
DJ
1139 if (print_thread_events)
1140 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1141
4f8d22e3 1142 delete_thread (lp->ptid);
e26af52f
DJ
1143 }
1144
1145 delete_lwp (lp->ptid);
1146}
1147
a0ef4274
DJ
1148/* Detect `T (stopped)' in `/proc/PID/status'.
1149 Other states including `T (tracing stop)' are reported as false. */
1150
1151static int
1152pid_is_stopped (pid_t pid)
1153{
1154 FILE *status_file;
1155 char buf[100];
1156 int retval = 0;
1157
1158 snprintf (buf, sizeof (buf), "/proc/%d/status", (int) pid);
1159 status_file = fopen (buf, "r");
1160 if (status_file != NULL)
1161 {
1162 int have_state = 0;
1163
1164 while (fgets (buf, sizeof (buf), status_file))
1165 {
1166 if (strncmp (buf, "State:", 6) == 0)
1167 {
1168 have_state = 1;
1169 break;
1170 }
1171 }
1172 if (have_state && strstr (buf, "T (stopped)") != NULL)
1173 retval = 1;
1174 fclose (status_file);
1175 }
1176 return retval;
1177}
1178
1179/* Wait for the LWP specified by LP, which we have just attached to.
1180 Returns a wait status for that LWP, to cache. */
1181
1182static int
1183linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
1184 int *signalled)
1185{
1186 pid_t new_pid, pid = GET_LWP (ptid);
1187 int status;
1188
1189 if (pid_is_stopped (pid))
1190 {
1191 if (debug_linux_nat)
1192 fprintf_unfiltered (gdb_stdlog,
1193 "LNPAW: Attaching to a stopped process\n");
1194
1195 /* The process is definitely stopped. It is in a job control
1196 stop, unless the kernel predates the TASK_STOPPED /
1197 TASK_TRACED distinction, in which case it might be in a
1198 ptrace stop. Make sure it is in a ptrace stop; from there we
1199 can kill it, signal it, et cetera.
1200
1201 First make sure there is a pending SIGSTOP. Since we are
1202 already attached, the process can not transition from stopped
1203 to running without a PTRACE_CONT; so we know this signal will
1204 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1205 probably already in the queue (unless this kernel is old
1206 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1207 is not an RT signal, it can only be queued once. */
1208 kill_lwp (pid, SIGSTOP);
1209
1210 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1211 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1212 ptrace (PTRACE_CONT, pid, 0, 0);
1213 }
1214
1215 /* Make sure the initial process is stopped. The user-level threads
1216 layer might want to poke around in the inferior, and that won't
1217 work if things haven't stabilized yet. */
1218 new_pid = my_waitpid (pid, &status, 0);
1219 if (new_pid == -1 && errno == ECHILD)
1220 {
1221 if (first)
1222 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
1223
1224 /* Try again with __WCLONE to check cloned processes. */
1225 new_pid = my_waitpid (pid, &status, __WCLONE);
1226 *cloned = 1;
1227 }
1228
1229 gdb_assert (pid == new_pid && WIFSTOPPED (status));
1230
1231 if (WSTOPSIG (status) != SIGSTOP)
1232 {
1233 *signalled = 1;
1234 if (debug_linux_nat)
1235 fprintf_unfiltered (gdb_stdlog,
1236 "LNPAW: Received %s after attaching\n",
1237 status_to_str (status));
1238 }
1239
1240 return status;
1241}
1242
1243/* Attach to the LWP specified by PID. Return 0 if successful or -1
1244 if the new LWP could not be attached. */
d6b0e80f 1245
9ee57c33 1246int
93815fbf 1247lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1248{
9ee57c33 1249 struct lwp_info *lp;
84e46146 1250 enum sigchld_state async_events_original_state;
d6b0e80f
AC
1251
1252 gdb_assert (is_lwp (ptid));
1253
84e46146 1254 async_events_original_state = linux_nat_async_events (sigchld_sync);
d6b0e80f 1255
9ee57c33 1256 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1257
1258 /* We assume that we're already attached to any LWP that has an id
1259 equal to the overall process id, and to any LWP that is already
1260 in our list of LWPs. If we're not seeing exit events from threads
1261 and we've had PID wraparound since we last tried to stop all threads,
1262 this assumption might be wrong; fortunately, this is very unlikely
1263 to happen. */
9ee57c33 1264 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f 1265 {
a0ef4274 1266 int status, cloned = 0, signalled = 0;
d6b0e80f
AC
1267
1268 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1269 {
1270 /* If we fail to attach to the thread, issue a warning,
1271 but continue. One way this can happen is if thread
e9efe249 1272 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1273 bug may place threads in the thread list and then fail
1274 to create them. */
1275 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1276 safe_strerror (errno));
1277 return -1;
1278 }
1279
d6b0e80f
AC
1280 if (debug_linux_nat)
1281 fprintf_unfiltered (gdb_stdlog,
1282 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1283 target_pid_to_str (ptid));
1284
a0ef4274
DJ
1285 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
1286 lp = add_lwp (ptid);
1287 lp->stopped = 1;
1288 lp->cloned = cloned;
1289 lp->signalled = signalled;
1290 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1291 {
a0ef4274
DJ
1292 lp->resumed = 1;
1293 lp->status = status;
d6b0e80f
AC
1294 }
1295
a0ef4274 1296 target_post_attach (GET_LWP (lp->ptid));
d6b0e80f
AC
1297
1298 if (debug_linux_nat)
1299 {
1300 fprintf_unfiltered (gdb_stdlog,
1301 "LLAL: waitpid %s received %s\n",
1302 target_pid_to_str (ptid),
1303 status_to_str (status));
1304 }
1305 }
1306 else
1307 {
1308 /* We assume that the LWP representing the original process is
1309 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1310 that the GNU/linux ptrace layer uses to keep track of
1311 threads. Note that this won't have already been done since
1312 the main thread will have, we assume, been stopped by an
1313 attach from a different layer. */
9ee57c33
DJ
1314 if (lp == NULL)
1315 lp = add_lwp (ptid);
d6b0e80f
AC
1316 lp->stopped = 1;
1317 }
9ee57c33 1318
84e46146 1319 linux_nat_async_events (async_events_original_state);
9ee57c33 1320 return 0;
d6b0e80f
AC
1321}
1322
b84876c2 1323static void
136d6dae
VP
1324linux_nat_create_inferior (struct target_ops *ops,
1325 char *exec_file, char *allargs, char **env,
b84876c2
PA
1326 int from_tty)
1327{
1328 int saved_async = 0;
10568435
JK
1329#ifdef HAVE_PERSONALITY
1330 int personality_orig = 0, personality_set = 0;
1331#endif /* HAVE_PERSONALITY */
b84876c2
PA
1332
1333 /* The fork_child mechanism is synchronous and calls target_wait, so
1334 we have to mask the async mode. */
1335
1336 if (target_can_async_p ())
84e46146
PA
1337 /* Mask async mode. Creating a child requires a loop calling
1338 wait_for_inferior currently. */
b84876c2
PA
1339 saved_async = linux_nat_async_mask (0);
1340 else
1341 {
1342 /* Restore the original signal mask. */
1343 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1344 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1345 suspend_mask = normal_mask;
1346 sigdelset (&suspend_mask, SIGCHLD);
1347 }
1348
84e46146
PA
1349 /* Set SIGCHLD to the default action, until after execing the child,
1350 since the inferior inherits the superior's signal mask. It will
1351 be blocked again in linux_nat_wait, which is only reached after
1352 the inferior execing. */
1353 linux_nat_async_events (sigchld_default);
1354
10568435
JK
1355#ifdef HAVE_PERSONALITY
1356 if (disable_randomization)
1357 {
1358 errno = 0;
1359 personality_orig = personality (0xffffffff);
1360 if (errno == 0 && !(personality_orig & ADDR_NO_RANDOMIZE))
1361 {
1362 personality_set = 1;
1363 personality (personality_orig | ADDR_NO_RANDOMIZE);
1364 }
1365 if (errno != 0 || (personality_set
1366 && !(personality (0xffffffff) & ADDR_NO_RANDOMIZE)))
1367 warning (_("Error disabling address space randomization: %s"),
1368 safe_strerror (errno));
1369 }
1370#endif /* HAVE_PERSONALITY */
1371
136d6dae 1372 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1373
10568435
JK
1374#ifdef HAVE_PERSONALITY
1375 if (personality_set)
1376 {
1377 errno = 0;
1378 personality (personality_orig);
1379 if (errno != 0)
1380 warning (_("Error restoring address space randomization: %s"),
1381 safe_strerror (errno));
1382 }
1383#endif /* HAVE_PERSONALITY */
1384
b84876c2
PA
1385 if (saved_async)
1386 linux_nat_async_mask (saved_async);
1387}
1388
d6b0e80f 1389static void
136d6dae 1390linux_nat_attach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f
AC
1391{
1392 struct lwp_info *lp;
d6b0e80f 1393 int status;
af990527 1394 ptid_t ptid;
d6b0e80f
AC
1395
1396 /* FIXME: We should probably accept a list of process id's, and
1397 attach all of them. */
136d6dae 1398 linux_ops->to_attach (ops, args, from_tty);
d6b0e80f 1399
b84876c2
PA
1400 if (!target_can_async_p ())
1401 {
1402 /* Restore the original signal mask. */
1403 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1404 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1405 suspend_mask = normal_mask;
1406 sigdelset (&suspend_mask, SIGCHLD);
1407 }
1408
af990527
PA
1409 /* The ptrace base target adds the main thread with (pid,0,0)
1410 format. Decorate it with lwp info. */
1411 ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1412 thread_change_ptid (inferior_ptid, ptid);
1413
9f0bdab8 1414 /* Add the initial process as the first LWP to the list. */
af990527 1415 lp = add_lwp (ptid);
a0ef4274
DJ
1416
1417 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1418 &lp->signalled);
1419 lp->stopped = 1;
9f0bdab8 1420
a0ef4274 1421 /* Save the wait status to report later. */
d6b0e80f 1422 lp->resumed = 1;
a0ef4274
DJ
1423 if (debug_linux_nat)
1424 fprintf_unfiltered (gdb_stdlog,
1425 "LNA: waitpid %ld, saving status %s\n",
1426 (long) GET_PID (lp->ptid), status_to_str (status));
710151dd
PA
1427
1428 if (!target_can_async_p ())
a0ef4274 1429 lp->status = status;
710151dd
PA
1430 else
1431 {
1432 /* We already waited for this LWP, so put the wait result on the
1433 pipe. The event loop will wake up and gets us to handling
1434 this event. */
a0ef4274
DJ
1435 linux_nat_event_pipe_push (GET_PID (lp->ptid), status,
1436 lp->cloned ? __WCLONE : 0);
b84876c2
PA
1437 /* Register in the event loop. */
1438 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1439 }
1440}
1441
a0ef4274
DJ
1442/* Get pending status of LP. */
1443static int
1444get_pending_status (struct lwp_info *lp, int *status)
1445{
1446 struct target_waitstatus last;
1447 ptid_t last_ptid;
1448
1449 get_last_target_status (&last_ptid, &last);
1450
1451 /* If this lwp is the ptid that GDB is processing an event from, the
1452 signal will be in stop_signal. Otherwise, in all-stop + sync
1453 mode, we may cache pending events in lp->status while trying to
1454 stop all threads (see stop_wait_callback). In async mode, the
1455 events are always cached in waitpid_queue. */
1456
1457 *status = 0;
4c28f408
PA
1458
1459 if (non_stop)
a0ef4274 1460 {
4c28f408
PA
1461 enum target_signal signo = TARGET_SIGNAL_0;
1462
1463 if (is_executing (lp->ptid))
1464 {
1465 /* If the core thought this lwp was executing --- e.g., the
1466 executing property hasn't been updated yet, but the
1467 thread has been stopped with a stop_callback /
1468 stop_wait_callback sequence (see linux_nat_detach for
1469 example) --- we can only have pending events in the local
1470 queue. */
1471 if (queued_waitpid (GET_LWP (lp->ptid), status, __WALL) != -1)
1472 {
8b8655b3
TJB
1473 if (WIFSTOPPED (*status))
1474 signo = target_signal_from_host (WSTOPSIG (*status));
4c28f408
PA
1475
1476 /* If not stopped, then the lwp is gone, no use in
1477 resending a signal. */
1478 }
1479 }
1480 else
1481 {
1482 /* If the core knows the thread is not executing, then we
1483 have the last signal recorded in
2020b7ab 1484 thread_info->stop_signal. */
4c28f408 1485
2020b7ab
PA
1486 struct thread_info *tp = find_thread_pid (lp->ptid);
1487 signo = tp->stop_signal;
4c28f408
PA
1488 }
1489
1490 if (signo != TARGET_SIGNAL_0
1491 && !signal_pass_state (signo))
1492 {
1493 if (debug_linux_nat)
1494 fprintf_unfiltered (gdb_stdlog, "\
1495GPT: lwp %s had signal %s, but it is in no pass state\n",
1496 target_pid_to_str (lp->ptid),
1497 target_signal_to_string (signo));
1498 }
1499 else
1500 {
1501 if (signo != TARGET_SIGNAL_0)
1502 *status = W_STOPCODE (target_signal_to_host (signo));
1503
1504 if (debug_linux_nat)
1505 fprintf_unfiltered (gdb_stdlog,
1506 "GPT: lwp %s as pending signal %s\n",
1507 target_pid_to_str (lp->ptid),
1508 target_signal_to_string (signo));
1509 }
a0ef4274 1510 }
a0ef4274 1511 else
4c28f408
PA
1512 {
1513 if (GET_LWP (lp->ptid) == GET_LWP (last_ptid))
1514 {
2020b7ab
PA
1515 struct thread_info *tp = find_thread_pid (lp->ptid);
1516 if (tp->stop_signal != TARGET_SIGNAL_0
1517 && signal_pass_state (tp->stop_signal))
1518 *status = W_STOPCODE (target_signal_to_host (tp->stop_signal));
4c28f408
PA
1519 }
1520 else if (target_can_async_p ())
1521 queued_waitpid (GET_LWP (lp->ptid), status, __WALL);
1522 else
1523 *status = lp->status;
1524 }
a0ef4274
DJ
1525
1526 return 0;
1527}
1528
d6b0e80f
AC
1529static int
1530detach_callback (struct lwp_info *lp, void *data)
1531{
1532 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1533
1534 if (debug_linux_nat && lp->status)
1535 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1536 strsignal (WSTOPSIG (lp->status)),
1537 target_pid_to_str (lp->ptid));
1538
a0ef4274
DJ
1539 /* If there is a pending SIGSTOP, get rid of it. */
1540 if (lp->signalled)
d6b0e80f 1541 {
d6b0e80f
AC
1542 if (debug_linux_nat)
1543 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1544 "DC: Sending SIGCONT to %s\n",
1545 target_pid_to_str (lp->ptid));
d6b0e80f 1546
a0ef4274 1547 kill_lwp (GET_LWP (lp->ptid), SIGCONT);
d6b0e80f 1548 lp->signalled = 0;
d6b0e80f
AC
1549 }
1550
1551 /* We don't actually detach from the LWP that has an id equal to the
1552 overall process id just yet. */
1553 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1554 {
a0ef4274
DJ
1555 int status = 0;
1556
1557 /* Pass on any pending signal for this LWP. */
1558 get_pending_status (lp, &status);
1559
d6b0e80f
AC
1560 errno = 0;
1561 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
a0ef4274 1562 WSTOPSIG (status)) < 0)
8a3fe4f8 1563 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1564 safe_strerror (errno));
1565
1566 if (debug_linux_nat)
1567 fprintf_unfiltered (gdb_stdlog,
1568 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1569 target_pid_to_str (lp->ptid),
1570 strsignal (WSTOPSIG (lp->status)));
1571
1572 delete_lwp (lp->ptid);
1573 }
1574
1575 return 0;
1576}
1577
1578static void
136d6dae 1579linux_nat_detach (struct target_ops *ops, char *args, int from_tty)
d6b0e80f 1580{
b84876c2 1581 int pid;
a0ef4274
DJ
1582 int status;
1583 enum target_signal sig;
1584
b84876c2
PA
1585 if (target_can_async_p ())
1586 linux_nat_async (NULL, 0);
1587
4c28f408
PA
1588 /* Stop all threads before detaching. ptrace requires that the
1589 thread is stopped to sucessfully detach. */
1590 iterate_over_lwps (stop_callback, NULL);
1591 /* ... and wait until all of them have reported back that
1592 they're no longer running. */
1593 iterate_over_lwps (stop_wait_callback, NULL);
1594
d6b0e80f
AC
1595 iterate_over_lwps (detach_callback, NULL);
1596
1597 /* Only the initial process should be left right now. */
1598 gdb_assert (num_lwps == 1);
1599
a0ef4274
DJ
1600 /* Pass on any pending signal for the last LWP. */
1601 if ((args == NULL || *args == '\0')
1602 && get_pending_status (lwp_list, &status) != -1
1603 && WIFSTOPPED (status))
1604 {
1605 /* Put the signal number in ARGS so that inf_ptrace_detach will
1606 pass it along with PTRACE_DETACH. */
1607 args = alloca (8);
1608 sprintf (args, "%d", (int) WSTOPSIG (status));
1609 fprintf_unfiltered (gdb_stdlog,
1610 "LND: Sending signal %s to %s\n",
1611 args,
1612 target_pid_to_str (lwp_list->ptid));
1613 }
1614
d6b0e80f
AC
1615 /* Destroy LWP info; it's no longer valid. */
1616 init_lwp_list ();
1617
b84876c2
PA
1618 pid = GET_PID (inferior_ptid);
1619 inferior_ptid = pid_to_ptid (pid);
136d6dae 1620 linux_ops->to_detach (ops, args, from_tty);
b84876c2
PA
1621
1622 if (target_can_async_p ())
1623 drain_queued_events (pid);
d6b0e80f
AC
1624}
1625
1626/* Resume LP. */
1627
1628static int
1629resume_callback (struct lwp_info *lp, void *data)
1630{
1631 if (lp->stopped && lp->status == 0)
1632 {
10d6c8cd
DJ
1633 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1634 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1635 if (debug_linux_nat)
1636 fprintf_unfiltered (gdb_stdlog,
1637 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1638 target_pid_to_str (lp->ptid));
1639 lp->stopped = 0;
1640 lp->step = 0;
9f0bdab8 1641 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
d6b0e80f 1642 }
57380f4e
DJ
1643 else if (lp->stopped && debug_linux_nat)
1644 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (has pending)\n",
1645 target_pid_to_str (lp->ptid));
1646 else if (debug_linux_nat)
1647 fprintf_unfiltered (gdb_stdlog, "RC: Not resuming sibling %s (not stopped)\n",
1648 target_pid_to_str (lp->ptid));
d6b0e80f
AC
1649
1650 return 0;
1651}
1652
1653static int
1654resume_clear_callback (struct lwp_info *lp, void *data)
1655{
1656 lp->resumed = 0;
1657 return 0;
1658}
1659
1660static int
1661resume_set_callback (struct lwp_info *lp, void *data)
1662{
1663 lp->resumed = 1;
1664 return 0;
1665}
1666
1667static void
1668linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1669{
1670 struct lwp_info *lp;
1671 int resume_all;
1672
76f50ad1
DJ
1673 if (debug_linux_nat)
1674 fprintf_unfiltered (gdb_stdlog,
1675 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1676 step ? "step" : "resume",
1677 target_pid_to_str (ptid),
1678 signo ? strsignal (signo) : "0",
1679 target_pid_to_str (inferior_ptid));
1680
b84876c2
PA
1681 if (target_can_async_p ())
1682 /* Block events while we're here. */
84e46146 1683 linux_nat_async_events (sigchld_sync);
b84876c2 1684
d6b0e80f
AC
1685 /* A specific PTID means `step only this process id'. */
1686 resume_all = (PIDGET (ptid) == -1);
1687
4c28f408
PA
1688 if (non_stop && resume_all)
1689 internal_error (__FILE__, __LINE__,
1690 "can't resume all in non-stop mode");
1691
1692 if (!non_stop)
1693 {
1694 if (resume_all)
1695 iterate_over_lwps (resume_set_callback, NULL);
1696 else
1697 iterate_over_lwps (resume_clear_callback, NULL);
1698 }
d6b0e80f
AC
1699
1700 /* If PID is -1, it's the current inferior that should be
1701 handled specially. */
1702 if (PIDGET (ptid) == -1)
1703 ptid = inferior_ptid;
1704
1705 lp = find_lwp_pid (ptid);
9f0bdab8 1706 gdb_assert (lp != NULL);
d6b0e80f 1707
4c28f408 1708 /* Convert to something the lower layer understands. */
9f0bdab8 1709 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1710
9f0bdab8
DJ
1711 /* Remember if we're stepping. */
1712 lp->step = step;
d6b0e80f 1713
9f0bdab8
DJ
1714 /* Mark this LWP as resumed. */
1715 lp->resumed = 1;
76f50ad1 1716
9f0bdab8
DJ
1717 /* If we have a pending wait status for this thread, there is no
1718 point in resuming the process. But first make sure that
1719 linux_nat_wait won't preemptively handle the event - we
1720 should never take this short-circuit if we are going to
1721 leave LP running, since we have skipped resuming all the
1722 other threads. This bit of code needs to be synchronized
1723 with linux_nat_wait. */
76f50ad1 1724
710151dd
PA
1725 /* In async mode, we never have pending wait status. */
1726 if (target_can_async_p () && lp->status)
1727 internal_error (__FILE__, __LINE__, "Pending status in async mode");
1728
9f0bdab8
DJ
1729 if (lp->status && WIFSTOPPED (lp->status))
1730 {
d6b48e9c
PA
1731 int saved_signo;
1732 struct inferior *inf;
76f50ad1 1733
d6b48e9c
PA
1734 inf = find_inferior_pid (ptid_get_pid (ptid));
1735 gdb_assert (inf);
1736 saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
1737
1738 /* Defer to common code if we're gaining control of the
1739 inferior. */
1740 if (inf->stop_soon == NO_STOP_QUIETLY
1741 && signal_stop_state (saved_signo) == 0
9f0bdab8
DJ
1742 && signal_print_state (saved_signo) == 0
1743 && signal_pass_state (saved_signo) == 1)
d6b0e80f 1744 {
9f0bdab8
DJ
1745 if (debug_linux_nat)
1746 fprintf_unfiltered (gdb_stdlog,
1747 "LLR: Not short circuiting for ignored "
1748 "status 0x%x\n", lp->status);
1749
d6b0e80f
AC
1750 /* FIXME: What should we do if we are supposed to continue
1751 this thread with a signal? */
1752 gdb_assert (signo == TARGET_SIGNAL_0);
9f0bdab8
DJ
1753 signo = saved_signo;
1754 lp->status = 0;
1755 }
1756 }
76f50ad1 1757
9f0bdab8
DJ
1758 if (lp->status)
1759 {
1760 /* FIXME: What should we do if we are supposed to continue
1761 this thread with a signal? */
1762 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1763
9f0bdab8
DJ
1764 if (debug_linux_nat)
1765 fprintf_unfiltered (gdb_stdlog,
1766 "LLR: Short circuiting for status 0x%x\n",
1767 lp->status);
d6b0e80f 1768
9f0bdab8 1769 return;
d6b0e80f
AC
1770 }
1771
9f0bdab8
DJ
1772 /* Mark LWP as not stopped to prevent it from being continued by
1773 resume_callback. */
1774 lp->stopped = 0;
1775
d6b0e80f
AC
1776 if (resume_all)
1777 iterate_over_lwps (resume_callback, NULL);
1778
10d6c8cd 1779 linux_ops->to_resume (ptid, step, signo);
9f0bdab8
DJ
1780 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1781
d6b0e80f
AC
1782 if (debug_linux_nat)
1783 fprintf_unfiltered (gdb_stdlog,
1784 "LLR: %s %s, %s (resume event thread)\n",
1785 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1786 target_pid_to_str (ptid),
1787 signo ? strsignal (signo) : "0");
b84876c2
PA
1788
1789 if (target_can_async_p ())
8ea051c5 1790 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1791}
1792
1793/* Issue kill to specified lwp. */
1794
1795static int tkill_failed;
1796
1797static int
1798kill_lwp (int lwpid, int signo)
1799{
1800 errno = 0;
1801
1802/* Use tkill, if possible, in case we are using nptl threads. If tkill
1803 fails, then we are not using nptl threads and we should be using kill. */
1804
1805#ifdef HAVE_TKILL_SYSCALL
1806 if (!tkill_failed)
1807 {
1808 int ret = syscall (__NR_tkill, lwpid, signo);
1809 if (errno != ENOSYS)
1810 return ret;
1811 errno = 0;
1812 tkill_failed = 1;
1813 }
1814#endif
1815
1816 return kill (lwpid, signo);
1817}
1818
3d799a95
DJ
1819/* Handle a GNU/Linux extended wait response. If we see a clone
1820 event, we need to add the new LWP to our list (and not report the
1821 trap to higher layers). This function returns non-zero if the
1822 event should be ignored and we should wait again. If STOPPING is
1823 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1824
1825static int
3d799a95
DJ
1826linux_handle_extended_wait (struct lwp_info *lp, int status,
1827 int stopping)
d6b0e80f 1828{
3d799a95
DJ
1829 int pid = GET_LWP (lp->ptid);
1830 struct target_waitstatus *ourstatus = &lp->waitstatus;
1831 struct lwp_info *new_lp = NULL;
1832 int event = status >> 16;
d6b0e80f 1833
3d799a95
DJ
1834 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1835 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1836 {
3d799a95
DJ
1837 unsigned long new_pid;
1838 int ret;
1839
1840 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1841
3d799a95
DJ
1842 /* If we haven't already seen the new PID stop, wait for it now. */
1843 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1844 {
1845 /* The new child has a pending SIGSTOP. We can't affect it until it
1846 hits the SIGSTOP, but we're already attached. */
1847 ret = my_waitpid (new_pid, &status,
1848 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1849 if (ret == -1)
1850 perror_with_name (_("waiting for new child"));
1851 else if (ret != new_pid)
1852 internal_error (__FILE__, __LINE__,
1853 _("wait returned unexpected PID %d"), ret);
1854 else if (!WIFSTOPPED (status))
1855 internal_error (__FILE__, __LINE__,
1856 _("wait returned unexpected status 0x%x"), status);
1857 }
1858
3a3e9ee3 1859 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95
DJ
1860
1861 if (event == PTRACE_EVENT_FORK)
1862 ourstatus->kind = TARGET_WAITKIND_FORKED;
1863 else if (event == PTRACE_EVENT_VFORK)
1864 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 1865 else
3d799a95 1866 {
4c28f408
PA
1867 struct cleanup *old_chain;
1868
3d799a95
DJ
1869 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1870 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1871 new_lp->cloned = 1;
4c28f408 1872 new_lp->stopped = 1;
d6b0e80f 1873
3d799a95
DJ
1874 if (WSTOPSIG (status) != SIGSTOP)
1875 {
1876 /* This can happen if someone starts sending signals to
1877 the new thread before it gets a chance to run, which
1878 have a lower number than SIGSTOP (e.g. SIGUSR1).
1879 This is an unlikely case, and harder to handle for
1880 fork / vfork than for clone, so we do not try - but
1881 we handle it for clone events here. We'll send
1882 the other signal on to the thread below. */
1883
1884 new_lp->signalled = 1;
1885 }
1886 else
1887 status = 0;
d6b0e80f 1888
4c28f408 1889 if (non_stop)
3d799a95 1890 {
4c28f408
PA
1891 /* Add the new thread to GDB's lists as soon as possible
1892 so that:
1893
1894 1) the frontend doesn't have to wait for a stop to
1895 display them, and,
1896
1897 2) we tag it with the correct running state. */
1898
1899 /* If the thread_db layer is active, let it know about
1900 this new thread, and add it to GDB's list. */
1901 if (!thread_db_attach_lwp (new_lp->ptid))
1902 {
1903 /* We're not using thread_db. Add it to GDB's
1904 list. */
1905 target_post_attach (GET_LWP (new_lp->ptid));
1906 add_thread (new_lp->ptid);
1907 }
1908
1909 if (!stopping)
1910 {
1911 set_running (new_lp->ptid, 1);
1912 set_executing (new_lp->ptid, 1);
1913 }
1914 }
1915
1916 if (!stopping)
1917 {
1918 new_lp->stopped = 0;
3d799a95 1919 new_lp->resumed = 1;
4c28f408 1920 ptrace (PTRACE_CONT, new_pid, 0,
3d799a95
DJ
1921 status ? WSTOPSIG (status) : 0);
1922 }
d6b0e80f 1923
3d799a95
DJ
1924 if (debug_linux_nat)
1925 fprintf_unfiltered (gdb_stdlog,
1926 "LHEW: Got clone event from LWP %ld, resuming\n",
1927 GET_LWP (lp->ptid));
1928 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1929
1930 return 1;
1931 }
1932
1933 return 0;
d6b0e80f
AC
1934 }
1935
3d799a95
DJ
1936 if (event == PTRACE_EVENT_EXEC)
1937 {
1938 ourstatus->kind = TARGET_WAITKIND_EXECD;
1939 ourstatus->value.execd_pathname
6d8fd2b7 1940 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95
DJ
1941
1942 if (linux_parent_pid)
1943 {
1944 detach_breakpoints (linux_parent_pid);
1945 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1946
1947 linux_parent_pid = 0;
1948 }
1949
25b22b0a
PA
1950 /* At this point, all inserted breakpoints are gone. Doing this
1951 as soon as we detect an exec prevents the badness of deleting
1952 a breakpoint writing the current "shadow contents" to lift
1953 the bp. That shadow is NOT valid after an exec.
1954
1955 Note that we have to do this after the detach_breakpoints
1956 call above, otherwise breakpoints wouldn't be lifted from the
1957 parent on a vfork, because detach_breakpoints would think
1958 that breakpoints are not inserted. */
1959 mark_breakpoints_out ();
3d799a95
DJ
1960 return 0;
1961 }
1962
1963 internal_error (__FILE__, __LINE__,
1964 _("unknown ptrace event %d"), event);
d6b0e80f
AC
1965}
1966
1967/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1968 exited. */
1969
1970static int
1971wait_lwp (struct lwp_info *lp)
1972{
1973 pid_t pid;
1974 int status;
1975 int thread_dead = 0;
1976
1977 gdb_assert (!lp->stopped);
1978 gdb_assert (lp->status == 0);
1979
58aecb61 1980 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
1981 if (pid == -1 && errno == ECHILD)
1982 {
58aecb61 1983 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
1984 if (pid == -1 && errno == ECHILD)
1985 {
1986 /* The thread has previously exited. We need to delete it
1987 now because, for some vendor 2.4 kernels with NPTL
1988 support backported, there won't be an exit event unless
1989 it is the main thread. 2.6 kernels will report an exit
1990 event for each thread that exits, as expected. */
1991 thread_dead = 1;
1992 if (debug_linux_nat)
1993 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1994 target_pid_to_str (lp->ptid));
1995 }
1996 }
1997
1998 if (!thread_dead)
1999 {
2000 gdb_assert (pid == GET_LWP (lp->ptid));
2001
2002 if (debug_linux_nat)
2003 {
2004 fprintf_unfiltered (gdb_stdlog,
2005 "WL: waitpid %s received %s\n",
2006 target_pid_to_str (lp->ptid),
2007 status_to_str (status));
2008 }
2009 }
2010
2011 /* Check if the thread has exited. */
2012 if (WIFEXITED (status) || WIFSIGNALED (status))
2013 {
2014 thread_dead = 1;
2015 if (debug_linux_nat)
2016 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2017 target_pid_to_str (lp->ptid));
2018 }
2019
2020 if (thread_dead)
2021 {
e26af52f 2022 exit_lwp (lp);
d6b0e80f
AC
2023 return 0;
2024 }
2025
2026 gdb_assert (WIFSTOPPED (status));
2027
2028 /* Handle GNU/Linux's extended waitstatus for trace events. */
2029 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2030 {
2031 if (debug_linux_nat)
2032 fprintf_unfiltered (gdb_stdlog,
2033 "WL: Handling extended status 0x%06x\n",
2034 status);
3d799a95 2035 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
2036 return wait_lwp (lp);
2037 }
2038
2039 return status;
2040}
2041
9f0bdab8
DJ
2042/* Save the most recent siginfo for LP. This is currently only called
2043 for SIGTRAP; some ports use the si_addr field for
2044 target_stopped_data_address. In the future, it may also be used to
2045 restore the siginfo of requeued signals. */
2046
2047static void
2048save_siginfo (struct lwp_info *lp)
2049{
2050 errno = 0;
2051 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
2052 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
2053
2054 if (errno != 0)
2055 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
2056}
2057
d6b0e80f
AC
2058/* Send a SIGSTOP to LP. */
2059
2060static int
2061stop_callback (struct lwp_info *lp, void *data)
2062{
2063 if (!lp->stopped && !lp->signalled)
2064 {
2065 int ret;
2066
2067 if (debug_linux_nat)
2068 {
2069 fprintf_unfiltered (gdb_stdlog,
2070 "SC: kill %s **<SIGSTOP>**\n",
2071 target_pid_to_str (lp->ptid));
2072 }
2073 errno = 0;
2074 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
2075 if (debug_linux_nat)
2076 {
2077 fprintf_unfiltered (gdb_stdlog,
2078 "SC: lwp kill %d %s\n",
2079 ret,
2080 errno ? safe_strerror (errno) : "ERRNO-OK");
2081 }
2082
2083 lp->signalled = 1;
2084 gdb_assert (lp->status == 0);
2085 }
2086
2087 return 0;
2088}
2089
57380f4e 2090/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2091
2092static int
57380f4e
DJ
2093linux_nat_has_pending_sigint (int pid)
2094{
2095 sigset_t pending, blocked, ignored;
2096 int i;
2097
2098 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2099
2100 if (sigismember (&pending, SIGINT)
2101 && !sigismember (&ignored, SIGINT))
2102 return 1;
2103
2104 return 0;
2105}
2106
2107/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2108
2109static int
2110set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2111{
57380f4e
DJ
2112 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2113 flag to consume the next one. */
2114 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2115 && WSTOPSIG (lp->status) == SIGINT)
2116 lp->status = 0;
2117 else
2118 lp->ignore_sigint = 1;
2119
2120 return 0;
2121}
2122
2123/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2124 This function is called after we know the LWP has stopped; if the LWP
2125 stopped before the expected SIGINT was delivered, then it will never have
2126 arrived. Also, if the signal was delivered to a shared queue and consumed
2127 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2128
57380f4e
DJ
2129static void
2130maybe_clear_ignore_sigint (struct lwp_info *lp)
2131{
2132 if (!lp->ignore_sigint)
2133 return;
2134
2135 if (!linux_nat_has_pending_sigint (GET_LWP (lp->ptid)))
2136 {
2137 if (debug_linux_nat)
2138 fprintf_unfiltered (gdb_stdlog,
2139 "MCIS: Clearing bogus flag for %s\n",
2140 target_pid_to_str (lp->ptid));
2141 lp->ignore_sigint = 0;
2142 }
2143}
2144
2145/* Wait until LP is stopped. */
2146
2147static int
2148stop_wait_callback (struct lwp_info *lp, void *data)
2149{
d6b0e80f
AC
2150 if (!lp->stopped)
2151 {
2152 int status;
2153
2154 status = wait_lwp (lp);
2155 if (status == 0)
2156 return 0;
2157
57380f4e
DJ
2158 if (lp->ignore_sigint && WIFSTOPPED (status)
2159 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2160 {
57380f4e 2161 lp->ignore_sigint = 0;
d6b0e80f
AC
2162
2163 errno = 0;
2164 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2165 if (debug_linux_nat)
2166 fprintf_unfiltered (gdb_stdlog,
57380f4e 2167 "PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)\n",
d6b0e80f
AC
2168 target_pid_to_str (lp->ptid),
2169 errno ? safe_strerror (errno) : "OK");
2170
57380f4e 2171 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2172 }
2173
57380f4e
DJ
2174 maybe_clear_ignore_sigint (lp);
2175
d6b0e80f
AC
2176 if (WSTOPSIG (status) != SIGSTOP)
2177 {
2178 if (WSTOPSIG (status) == SIGTRAP)
2179 {
2180 /* If a LWP other than the LWP that we're reporting an
2181 event for has hit a GDB breakpoint (as opposed to
2182 some random trap signal), then just arrange for it to
2183 hit it again later. We don't keep the SIGTRAP status
2184 and don't forward the SIGTRAP signal to the LWP. We
2185 will handle the current event, eventually we will
2186 resume all LWPs, and this one will get its breakpoint
2187 trap again.
2188
2189 If we do not do this, then we run the risk that the
2190 user will delete or disable the breakpoint, but the
2191 thread will have already tripped on it. */
2192
9f0bdab8
DJ
2193 /* Save the trap's siginfo in case we need it later. */
2194 save_siginfo (lp);
2195
d6b0e80f
AC
2196 /* Now resume this LWP and get the SIGSTOP event. */
2197 errno = 0;
2198 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2199 if (debug_linux_nat)
2200 {
2201 fprintf_unfiltered (gdb_stdlog,
2202 "PTRACE_CONT %s, 0, 0 (%s)\n",
2203 target_pid_to_str (lp->ptid),
2204 errno ? safe_strerror (errno) : "OK");
2205
2206 fprintf_unfiltered (gdb_stdlog,
2207 "SWC: Candidate SIGTRAP event in %s\n",
2208 target_pid_to_str (lp->ptid));
2209 }
710151dd
PA
2210 /* Hold this event/waitstatus while we check to see if
2211 there are any more (we still want to get that SIGSTOP). */
57380f4e 2212 stop_wait_callback (lp, NULL);
710151dd
PA
2213
2214 if (target_can_async_p ())
d6b0e80f 2215 {
710151dd
PA
2216 /* Don't leave a pending wait status in async mode.
2217 Retrigger the breakpoint. */
2218 if (!cancel_breakpoint (lp))
d6b0e80f 2219 {
710151dd
PA
2220 /* There was no gdb breakpoint set at pc. Put
2221 the event back in the queue. */
2222 if (debug_linux_nat)
252fbfc8
PA
2223 fprintf_unfiltered (gdb_stdlog, "\
2224SWC: leaving SIGTRAP in local queue of %s\n", target_pid_to_str (lp->ptid));
2225 push_waitpid (GET_LWP (lp->ptid),
2226 W_STOPCODE (SIGTRAP),
2227 lp->cloned ? __WCLONE : 0);
710151dd
PA
2228 }
2229 }
2230 else
2231 {
2232 /* Hold the SIGTRAP for handling by
2233 linux_nat_wait. */
2234 /* If there's another event, throw it back into the
2235 queue. */
2236 if (lp->status)
2237 {
2238 if (debug_linux_nat)
2239 fprintf_unfiltered (gdb_stdlog,
2240 "SWC: kill %s, %s\n",
2241 target_pid_to_str (lp->ptid),
2242 status_to_str ((int) status));
2243 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
d6b0e80f 2244 }
710151dd
PA
2245 /* Save the sigtrap event. */
2246 lp->status = status;
d6b0e80f 2247 }
d6b0e80f
AC
2248 return 0;
2249 }
2250 else
2251 {
2252 /* The thread was stopped with a signal other than
2253 SIGSTOP, and didn't accidentally trip a breakpoint. */
2254
2255 if (debug_linux_nat)
2256 {
2257 fprintf_unfiltered (gdb_stdlog,
2258 "SWC: Pending event %s in %s\n",
2259 status_to_str ((int) status),
2260 target_pid_to_str (lp->ptid));
2261 }
2262 /* Now resume this LWP and get the SIGSTOP event. */
2263 errno = 0;
2264 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
2265 if (debug_linux_nat)
2266 fprintf_unfiltered (gdb_stdlog,
2267 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
2268 target_pid_to_str (lp->ptid),
2269 errno ? safe_strerror (errno) : "OK");
2270
2271 /* Hold this event/waitstatus while we check to see if
2272 there are any more (we still want to get that SIGSTOP). */
57380f4e 2273 stop_wait_callback (lp, NULL);
710151dd
PA
2274
2275 /* If the lp->status field is still empty, use it to
2276 hold this event. If not, then this event must be
2277 returned to the event queue of the LWP. */
2278 if (lp->status || target_can_async_p ())
d6b0e80f
AC
2279 {
2280 if (debug_linux_nat)
2281 {
2282 fprintf_unfiltered (gdb_stdlog,
2283 "SWC: kill %s, %s\n",
2284 target_pid_to_str (lp->ptid),
2285 status_to_str ((int) status));
2286 }
2287 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
2288 }
710151dd
PA
2289 else
2290 lp->status = status;
d6b0e80f
AC
2291 return 0;
2292 }
2293 }
2294 else
2295 {
2296 /* We caught the SIGSTOP that we intended to catch, so
2297 there's no SIGSTOP pending. */
2298 lp->stopped = 1;
2299 lp->signalled = 0;
2300 }
2301 }
2302
2303 return 0;
2304}
2305
d6b0e80f
AC
2306/* Return non-zero if LP has a wait status pending. */
2307
2308static int
2309status_callback (struct lwp_info *lp, void *data)
2310{
2311 /* Only report a pending wait status if we pretend that this has
2312 indeed been resumed. */
2313 return (lp->status != 0 && lp->resumed);
2314}
2315
2316/* Return non-zero if LP isn't stopped. */
2317
2318static int
2319running_callback (struct lwp_info *lp, void *data)
2320{
2321 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
2322}
2323
2324/* Count the LWP's that have had events. */
2325
2326static int
2327count_events_callback (struct lwp_info *lp, void *data)
2328{
2329 int *count = data;
2330
2331 gdb_assert (count != NULL);
2332
e09490f1
DJ
2333 /* Count only resumed LWPs that have a SIGTRAP event pending. */
2334 if (lp->status != 0 && lp->resumed
d6b0e80f
AC
2335 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2336 (*count)++;
2337
2338 return 0;
2339}
2340
2341/* Select the LWP (if any) that is currently being single-stepped. */
2342
2343static int
2344select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2345{
2346 if (lp->step && lp->status != 0)
2347 return 1;
2348 else
2349 return 0;
2350}
2351
2352/* Select the Nth LWP that has had a SIGTRAP event. */
2353
2354static int
2355select_event_lwp_callback (struct lwp_info *lp, void *data)
2356{
2357 int *selector = data;
2358
2359 gdb_assert (selector != NULL);
2360
e09490f1
DJ
2361 /* Select only resumed LWPs that have a SIGTRAP event pending. */
2362 if (lp->status != 0 && lp->resumed
d6b0e80f
AC
2363 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
2364 if ((*selector)-- == 0)
2365 return 1;
2366
2367 return 0;
2368}
2369
710151dd
PA
2370static int
2371cancel_breakpoint (struct lwp_info *lp)
2372{
2373 /* Arrange for a breakpoint to be hit again later. We don't keep
2374 the SIGTRAP status and don't forward the SIGTRAP signal to the
2375 LWP. We will handle the current event, eventually we will resume
2376 this LWP, and this breakpoint will trap again.
2377
2378 If we do not do this, then we run the risk that the user will
2379 delete or disable the breakpoint, but the LWP will have already
2380 tripped on it. */
2381
515630c5
UW
2382 struct regcache *regcache = get_thread_regcache (lp->ptid);
2383 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2384 CORE_ADDR pc;
2385
2386 pc = regcache_read_pc (regcache) - gdbarch_decr_pc_after_break (gdbarch);
2387 if (breakpoint_inserted_here_p (pc))
710151dd
PA
2388 {
2389 if (debug_linux_nat)
2390 fprintf_unfiltered (gdb_stdlog,
2391 "CB: Push back breakpoint for %s\n",
2392 target_pid_to_str (lp->ptid));
2393
2394 /* Back up the PC if necessary. */
515630c5
UW
2395 if (gdbarch_decr_pc_after_break (gdbarch))
2396 regcache_write_pc (regcache, pc);
2397
710151dd
PA
2398 return 1;
2399 }
2400 return 0;
2401}
2402
d6b0e80f
AC
2403static int
2404cancel_breakpoints_callback (struct lwp_info *lp, void *data)
2405{
2406 struct lwp_info *event_lp = data;
2407
2408 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
2409 if (lp == event_lp)
2410 return 0;
2411
2412 /* If a LWP other than the LWP that we're reporting an event for has
2413 hit a GDB breakpoint (as opposed to some random trap signal),
2414 then just arrange for it to hit it again later. We don't keep
2415 the SIGTRAP status and don't forward the SIGTRAP signal to the
2416 LWP. We will handle the current event, eventually we will resume
2417 all LWPs, and this one will get its breakpoint trap again.
2418
2419 If we do not do this, then we run the risk that the user will
2420 delete or disable the breakpoint, but the LWP will have already
2421 tripped on it. */
2422
2423 if (lp->status != 0
2424 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
710151dd
PA
2425 && cancel_breakpoint (lp))
2426 /* Throw away the SIGTRAP. */
2427 lp->status = 0;
d6b0e80f
AC
2428
2429 return 0;
2430}
2431
2432/* Select one LWP out of those that have events pending. */
2433
2434static void
2435select_event_lwp (struct lwp_info **orig_lp, int *status)
2436{
2437 int num_events = 0;
2438 int random_selector;
2439 struct lwp_info *event_lp;
2440
ac264b3b 2441 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2442 (*orig_lp)->status = *status;
2443
2444 /* Give preference to any LWP that is being single-stepped. */
2445 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2446 if (event_lp != NULL)
2447 {
2448 if (debug_linux_nat)
2449 fprintf_unfiltered (gdb_stdlog,
2450 "SEL: Select single-step %s\n",
2451 target_pid_to_str (event_lp->ptid));
2452 }
2453 else
2454 {
2455 /* No single-stepping LWP. Select one at random, out of those
2456 which have had SIGTRAP events. */
2457
2458 /* First see how many SIGTRAP events we have. */
2459 iterate_over_lwps (count_events_callback, &num_events);
2460
2461 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2462 random_selector = (int)
2463 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2464
2465 if (debug_linux_nat && num_events > 1)
2466 fprintf_unfiltered (gdb_stdlog,
2467 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2468 num_events, random_selector);
2469
2470 event_lp = iterate_over_lwps (select_event_lwp_callback,
2471 &random_selector);
2472 }
2473
2474 if (event_lp != NULL)
2475 {
2476 /* Switch the event LWP. */
2477 *orig_lp = event_lp;
2478 *status = event_lp->status;
2479 }
2480
2481 /* Flush the wait status for the event LWP. */
2482 (*orig_lp)->status = 0;
2483}
2484
2485/* Return non-zero if LP has been resumed. */
2486
2487static int
2488resumed_callback (struct lwp_info *lp, void *data)
2489{
2490 return lp->resumed;
2491}
2492
d6b0e80f
AC
2493/* Stop an active thread, verify it still exists, then resume it. */
2494
2495static int
2496stop_and_resume_callback (struct lwp_info *lp, void *data)
2497{
2498 struct lwp_info *ptr;
2499
2500 if (!lp->stopped && !lp->signalled)
2501 {
2502 stop_callback (lp, NULL);
2503 stop_wait_callback (lp, NULL);
2504 /* Resume if the lwp still exists. */
2505 for (ptr = lwp_list; ptr; ptr = ptr->next)
2506 if (lp == ptr)
2507 {
2508 resume_callback (lp, NULL);
2509 resume_set_callback (lp, NULL);
2510 }
2511 }
2512 return 0;
2513}
2514
02f3fc28 2515/* Check if we should go on and pass this event to common code.
fa2c6a57 2516 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
2517static struct lwp_info *
2518linux_nat_filter_event (int lwpid, int status, int options)
2519{
2520 struct lwp_info *lp;
2521
2522 lp = find_lwp_pid (pid_to_ptid (lwpid));
2523
2524 /* Check for stop events reported by a process we didn't already
2525 know about - anything not already in our LWP list.
2526
2527 If we're expecting to receive stopped processes after
2528 fork, vfork, and clone events, then we'll just add the
2529 new one to our list and go back to waiting for the event
2530 to be reported - the stopped process might be returned
2531 from waitpid before or after the event is. */
2532 if (WIFSTOPPED (status) && !lp)
2533 {
2534 linux_record_stopped_pid (lwpid, status);
2535 return NULL;
2536 }
2537
2538 /* Make sure we don't report an event for the exit of an LWP not in
2539 our list, i.e. not part of the current process. This can happen
2540 if we detach from a program we original forked and then it
2541 exits. */
2542 if (!WIFSTOPPED (status) && !lp)
2543 return NULL;
2544
2545 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2546 CLONE_PTRACE processes which do not use the thread library -
2547 otherwise we wouldn't find the new LWP this way. That doesn't
2548 currently work, and the following code is currently unreachable
2549 due to the two blocks above. If it's fixed some day, this code
2550 should be broken out into a function so that we can also pick up
2551 LWPs from the new interface. */
2552 if (!lp)
2553 {
2554 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2555 if (options & __WCLONE)
2556 lp->cloned = 1;
2557
2558 gdb_assert (WIFSTOPPED (status)
2559 && WSTOPSIG (status) == SIGSTOP);
2560 lp->signalled = 1;
2561
2562 if (!in_thread_list (inferior_ptid))
2563 {
2564 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2565 GET_PID (inferior_ptid));
2566 add_thread (inferior_ptid);
2567 }
2568
2569 add_thread (lp->ptid);
2570 }
2571
2572 /* Save the trap's siginfo in case we need it later. */
2573 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2574 save_siginfo (lp);
2575
2576 /* Handle GNU/Linux's extended waitstatus for trace events. */
2577 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2578 {
2579 if (debug_linux_nat)
2580 fprintf_unfiltered (gdb_stdlog,
2581 "LLW: Handling extended status 0x%06x\n",
2582 status);
2583 if (linux_handle_extended_wait (lp, status, 0))
2584 return NULL;
2585 }
2586
2587 /* Check if the thread has exited. */
2588 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2589 {
2590 /* If this is the main thread, we must stop all threads and
2591 verify if they are still alive. This is because in the nptl
2592 thread model, there is no signal issued for exiting LWPs
2593 other than the main thread. We only get the main thread exit
2594 signal once all child threads have already exited. If we
2595 stop all the threads and use the stop_wait_callback to check
2596 if they have exited we can determine whether this signal
2597 should be ignored or whether it means the end of the debugged
2598 application, regardless of which threading model is being
2599 used. */
2600 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2601 {
2602 lp->stopped = 1;
2603 iterate_over_lwps (stop_and_resume_callback, NULL);
2604 }
2605
2606 if (debug_linux_nat)
2607 fprintf_unfiltered (gdb_stdlog,
2608 "LLW: %s exited.\n",
2609 target_pid_to_str (lp->ptid));
2610
2611 exit_lwp (lp);
2612
2613 /* If there is at least one more LWP, then the exit signal was
2614 not the end of the debugged application and should be
2615 ignored. */
2616 if (num_lwps > 0)
4c28f408 2617 return NULL;
02f3fc28
PA
2618 }
2619
2620 /* Check if the current LWP has previously exited. In the nptl
2621 thread model, LWPs other than the main thread do not issue
2622 signals when they exit so we must check whenever the thread has
2623 stopped. A similar check is made in stop_wait_callback(). */
2624 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2625 {
2626 if (debug_linux_nat)
2627 fprintf_unfiltered (gdb_stdlog,
2628 "LLW: %s exited.\n",
2629 target_pid_to_str (lp->ptid));
2630
2631 exit_lwp (lp);
2632
2633 /* Make sure there is at least one thread running. */
2634 gdb_assert (iterate_over_lwps (running_callback, NULL));
2635
2636 /* Discard the event. */
2637 return NULL;
2638 }
2639
2640 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2641 an attempt to stop an LWP. */
2642 if (lp->signalled
2643 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2644 {
2645 if (debug_linux_nat)
2646 fprintf_unfiltered (gdb_stdlog,
2647 "LLW: Delayed SIGSTOP caught for %s.\n",
2648 target_pid_to_str (lp->ptid));
2649
2650 /* This is a delayed SIGSTOP. */
2651 lp->signalled = 0;
2652
2653 registers_changed ();
2654
2655 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2656 lp->step, TARGET_SIGNAL_0);
2657 if (debug_linux_nat)
2658 fprintf_unfiltered (gdb_stdlog,
2659 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2660 lp->step ?
2661 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2662 target_pid_to_str (lp->ptid));
2663
2664 lp->stopped = 0;
2665 gdb_assert (lp->resumed);
2666
2667 /* Discard the event. */
2668 return NULL;
2669 }
2670
57380f4e
DJ
2671 /* Make sure we don't report a SIGINT that we have already displayed
2672 for another thread. */
2673 if (lp->ignore_sigint
2674 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2675 {
2676 if (debug_linux_nat)
2677 fprintf_unfiltered (gdb_stdlog,
2678 "LLW: Delayed SIGINT caught for %s.\n",
2679 target_pid_to_str (lp->ptid));
2680
2681 /* This is a delayed SIGINT. */
2682 lp->ignore_sigint = 0;
2683
2684 registers_changed ();
2685 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2686 lp->step, TARGET_SIGNAL_0);
2687 if (debug_linux_nat)
2688 fprintf_unfiltered (gdb_stdlog,
2689 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2690 lp->step ?
2691 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2692 target_pid_to_str (lp->ptid));
2693
2694 lp->stopped = 0;
2695 gdb_assert (lp->resumed);
2696
2697 /* Discard the event. */
2698 return NULL;
2699 }
2700
02f3fc28
PA
2701 /* An interesting event. */
2702 gdb_assert (lp);
2703 return lp;
2704}
2705
b84876c2
PA
2706/* Get the events stored in the pipe into the local queue, so they are
2707 accessible to queued_waitpid. We need to do this, since it is not
2708 always the case that the event at the head of the pipe is the event
2709 we want. */
2710
2711static void
2712pipe_to_local_event_queue (void)
2713{
2714 if (debug_linux_nat_async)
2715 fprintf_unfiltered (gdb_stdlog,
2716 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2717 linux_nat_num_queued_events);
2718 while (linux_nat_num_queued_events)
2719 {
2720 int lwpid, status, options;
b84876c2 2721 lwpid = linux_nat_event_pipe_pop (&status, &options);
b84876c2
PA
2722 gdb_assert (lwpid > 0);
2723 push_waitpid (lwpid, status, options);
2724 }
2725}
2726
2727/* Get the unprocessed events stored in the local queue back into the
2728 pipe, so the event loop realizes there's something else to
2729 process. */
2730
2731static void
2732local_event_queue_to_pipe (void)
2733{
2734 struct waitpid_result *w = waitpid_queue;
2735 while (w)
2736 {
2737 struct waitpid_result *next = w->next;
2738 linux_nat_event_pipe_push (w->pid,
2739 w->status,
2740 w->options);
2741 xfree (w);
2742 w = next;
2743 }
2744 waitpid_queue = NULL;
2745
2746 if (debug_linux_nat_async)
2747 fprintf_unfiltered (gdb_stdlog,
2748 "LEQTP: linux_nat_num_queued_events(%d)\n",
2749 linux_nat_num_queued_events);
2750}
2751
d6b0e80f
AC
2752static ptid_t
2753linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
2754{
2755 struct lwp_info *lp = NULL;
2756 int options = 0;
2757 int status = 0;
2758 pid_t pid = PIDGET (ptid);
d6b0e80f 2759
b84876c2
PA
2760 if (debug_linux_nat_async)
2761 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2762
f973ed9c
DJ
2763 /* The first time we get here after starting a new inferior, we may
2764 not have added it to the LWP list yet - this is the earliest
2765 moment at which we know its PID. */
2766 if (num_lwps == 0)
2767 {
2768 gdb_assert (!is_lwp (inferior_ptid));
2769
27c9d204
PA
2770 /* Upgrade the main thread's ptid. */
2771 thread_change_ptid (inferior_ptid,
2772 BUILD_LWP (GET_PID (inferior_ptid),
2773 GET_PID (inferior_ptid)));
2774
f973ed9c
DJ
2775 lp = add_lwp (inferior_ptid);
2776 lp->resumed = 1;
2777 }
2778
84e46146
PA
2779 /* Block events while we're here. */
2780 linux_nat_async_events (sigchld_sync);
d6b0e80f
AC
2781
2782retry:
2783
f973ed9c
DJ
2784 /* Make sure there is at least one LWP that has been resumed. */
2785 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
d6b0e80f
AC
2786
2787 /* First check if there is a LWP with a wait status pending. */
2788 if (pid == -1)
2789 {
2790 /* Any LWP that's been resumed will do. */
2791 lp = iterate_over_lwps (status_callback, NULL);
2792 if (lp)
2793 {
710151dd
PA
2794 if (target_can_async_p ())
2795 internal_error (__FILE__, __LINE__,
2796 "Found an LWP with a pending status in async mode.");
2797
d6b0e80f
AC
2798 status = lp->status;
2799 lp->status = 0;
2800
2801 if (debug_linux_nat && status)
2802 fprintf_unfiltered (gdb_stdlog,
2803 "LLW: Using pending wait status %s for %s.\n",
2804 status_to_str (status),
2805 target_pid_to_str (lp->ptid));
2806 }
2807
b84876c2 2808 /* But if we don't find one, we'll have to wait, and check both
d6b0e80f
AC
2809 cloned and uncloned processes. We start with the cloned
2810 processes. */
2811 options = __WCLONE | WNOHANG;
2812 }
2813 else if (is_lwp (ptid))
2814 {
2815 if (debug_linux_nat)
2816 fprintf_unfiltered (gdb_stdlog,
2817 "LLW: Waiting for specific LWP %s.\n",
2818 target_pid_to_str (ptid));
2819
2820 /* We have a specific LWP to check. */
2821 lp = find_lwp_pid (ptid);
2822 gdb_assert (lp);
2823 status = lp->status;
2824 lp->status = 0;
2825
2826 if (debug_linux_nat && status)
2827 fprintf_unfiltered (gdb_stdlog,
2828 "LLW: Using pending wait status %s for %s.\n",
2829 status_to_str (status),
2830 target_pid_to_str (lp->ptid));
2831
2832 /* If we have to wait, take into account whether PID is a cloned
2833 process or not. And we have to convert it to something that
2834 the layer beneath us can understand. */
2835 options = lp->cloned ? __WCLONE : 0;
2836 pid = GET_LWP (ptid);
2837 }
2838
2839 if (status && lp->signalled)
2840 {
2841 /* A pending SIGSTOP may interfere with the normal stream of
2842 events. In a typical case where interference is a problem,
2843 we have a SIGSTOP signal pending for LWP A while
2844 single-stepping it, encounter an event in LWP B, and take the
2845 pending SIGSTOP while trying to stop LWP A. After processing
2846 the event in LWP B, LWP A is continued, and we'll never see
2847 the SIGTRAP associated with the last time we were
2848 single-stepping LWP A. */
2849
2850 /* Resume the thread. It should halt immediately returning the
2851 pending SIGSTOP. */
2852 registers_changed ();
10d6c8cd
DJ
2853 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2854 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
2855 if (debug_linux_nat)
2856 fprintf_unfiltered (gdb_stdlog,
2857 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2858 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2859 target_pid_to_str (lp->ptid));
2860 lp->stopped = 0;
2861 gdb_assert (lp->resumed);
2862
2863 /* This should catch the pending SIGSTOP. */
2864 stop_wait_callback (lp, NULL);
2865 }
2866
b84876c2
PA
2867 if (!target_can_async_p ())
2868 {
2869 /* Causes SIGINT to be passed on to the attached process. */
2870 set_sigint_trap ();
2871 set_sigio_trap ();
2872 }
d6b0e80f
AC
2873
2874 while (status == 0)
2875 {
2876 pid_t lwpid;
2877
b84876c2
PA
2878 if (target_can_async_p ())
2879 /* In async mode, don't ever block. Only look at the locally
2880 queued events. */
2881 lwpid = queued_waitpid (pid, &status, options);
2882 else
2883 lwpid = my_waitpid (pid, &status, options);
2884
d6b0e80f
AC
2885 if (lwpid > 0)
2886 {
2887 gdb_assert (pid == -1 || lwpid == pid);
2888
2889 if (debug_linux_nat)
2890 {
2891 fprintf_unfiltered (gdb_stdlog,
2892 "LLW: waitpid %ld received %s\n",
2893 (long) lwpid, status_to_str (status));
2894 }
2895
02f3fc28 2896 lp = linux_nat_filter_event (lwpid, status, options);
d6b0e80f
AC
2897 if (!lp)
2898 {
02f3fc28 2899 /* A discarded event. */
d6b0e80f
AC
2900 status = 0;
2901 continue;
2902 }
2903
2904 break;
2905 }
2906
2907 if (pid == -1)
2908 {
2909 /* Alternate between checking cloned and uncloned processes. */
2910 options ^= __WCLONE;
2911
b84876c2
PA
2912 /* And every time we have checked both:
2913 In async mode, return to event loop;
2914 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 2915 if (options & __WCLONE)
b84876c2
PA
2916 {
2917 if (target_can_async_p ())
2918 {
2919 /* No interesting event. */
2920 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2921
2922 /* Get ready for the next event. */
2923 target_async (inferior_event_handler, 0);
2924
2925 if (debug_linux_nat_async)
2926 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2927
2928 return minus_one_ptid;
2929 }
2930
2931 sigsuspend (&suspend_mask);
2932 }
d6b0e80f
AC
2933 }
2934
2935 /* We shouldn't end up here unless we want to try again. */
2936 gdb_assert (status == 0);
2937 }
2938
b84876c2
PA
2939 if (!target_can_async_p ())
2940 {
2941 clear_sigio_trap ();
2942 clear_sigint_trap ();
2943 }
d6b0e80f
AC
2944
2945 gdb_assert (lp);
2946
2947 /* Don't report signals that GDB isn't interested in, such as
2948 signals that are neither printed nor stopped upon. Stopping all
2949 threads can be a bit time-consuming so if we want decent
2950 performance with heavily multi-threaded programs, especially when
2951 they're using a high frequency timer, we'd better avoid it if we
2952 can. */
2953
2954 if (WIFSTOPPED (status))
2955 {
2956 int signo = target_signal_from_host (WSTOPSIG (status));
d6b48e9c
PA
2957 struct inferior *inf;
2958
2959 inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2960 gdb_assert (inf);
d6b0e80f 2961
d6b48e9c
PA
2962 /* Defer to common code if we get a signal while
2963 single-stepping, since that may need special care, e.g. to
2964 skip the signal handler, or, if we're gaining control of the
2965 inferior. */
d539ed7e 2966 if (!lp->step
d6b48e9c 2967 && inf->stop_soon == NO_STOP_QUIETLY
d539ed7e 2968 && signal_stop_state (signo) == 0
d6b0e80f
AC
2969 && signal_print_state (signo) == 0
2970 && signal_pass_state (signo) == 1)
2971 {
2972 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2973 here? It is not clear we should. GDB may not expect
2974 other threads to run. On the other hand, not resuming
2975 newly attached threads may cause an unwanted delay in
2976 getting them running. */
2977 registers_changed ();
10d6c8cd
DJ
2978 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2979 lp->step, signo);
d6b0e80f
AC
2980 if (debug_linux_nat)
2981 fprintf_unfiltered (gdb_stdlog,
2982 "LLW: %s %s, %s (preempt 'handle')\n",
2983 lp->step ?
2984 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2985 target_pid_to_str (lp->ptid),
2986 signo ? strsignal (signo) : "0");
2987 lp->stopped = 0;
2988 status = 0;
2989 goto retry;
2990 }
2991
1ad15515 2992 if (!non_stop)
d6b0e80f 2993 {
1ad15515
PA
2994 /* Only do the below in all-stop, as we currently use SIGINT
2995 to implement target_stop (see linux_nat_stop) in
2996 non-stop. */
2997 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2998 {
2999 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3000 forwarded to the entire process group, that is, all LWPs
3001 will receive it - unless they're using CLONE_THREAD to
3002 share signals. Since we only want to report it once, we
3003 mark it as ignored for all LWPs except this one. */
3004 iterate_over_lwps (set_ignore_sigint, NULL);
3005 lp->ignore_sigint = 0;
3006 }
3007 else
3008 maybe_clear_ignore_sigint (lp);
d6b0e80f
AC
3009 }
3010 }
3011
3012 /* This LWP is stopped now. */
3013 lp->stopped = 1;
3014
3015 if (debug_linux_nat)
3016 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
3017 status_to_str (status), target_pid_to_str (lp->ptid));
3018
4c28f408
PA
3019 if (!non_stop)
3020 {
3021 /* Now stop all other LWP's ... */
3022 iterate_over_lwps (stop_callback, NULL);
3023
3024 /* ... and wait until all of them have reported back that
3025 they're no longer running. */
57380f4e 3026 iterate_over_lwps (stop_wait_callback, NULL);
4c28f408
PA
3027
3028 /* If we're not waiting for a specific LWP, choose an event LWP
3029 from among those that have had events. Giving equal priority
3030 to all LWPs that have had events helps prevent
3031 starvation. */
3032 if (pid == -1)
3033 select_event_lwp (&lp, &status);
3034 }
d6b0e80f
AC
3035
3036 /* Now that we've selected our final event LWP, cancel any
3037 breakpoints in other LWPs that have hit a GDB breakpoint. See
3038 the comment in cancel_breakpoints_callback to find out why. */
3039 iterate_over_lwps (cancel_breakpoints_callback, lp);
3040
d6b0e80f
AC
3041 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
3042 {
d6b0e80f
AC
3043 if (debug_linux_nat)
3044 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3045 "LLW: trap ptid is %s.\n",
3046 target_pid_to_str (lp->ptid));
d6b0e80f 3047 }
d6b0e80f
AC
3048
3049 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3050 {
3051 *ourstatus = lp->waitstatus;
3052 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3053 }
3054 else
3055 store_waitstatus (ourstatus, status);
3056
b84876c2
PA
3057 /* Get ready for the next event. */
3058 if (target_can_async_p ())
3059 target_async (inferior_event_handler, 0);
3060
3061 if (debug_linux_nat_async)
3062 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3063
f973ed9c 3064 return lp->ptid;
d6b0e80f
AC
3065}
3066
3067static int
3068kill_callback (struct lwp_info *lp, void *data)
3069{
3070 errno = 0;
3071 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
3072 if (debug_linux_nat)
3073 fprintf_unfiltered (gdb_stdlog,
3074 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3075 target_pid_to_str (lp->ptid),
3076 errno ? safe_strerror (errno) : "OK");
3077
3078 return 0;
3079}
3080
3081static int
3082kill_wait_callback (struct lwp_info *lp, void *data)
3083{
3084 pid_t pid;
3085
3086 /* We must make sure that there are no pending events (delayed
3087 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3088 program doesn't interfere with any following debugging session. */
3089
3090 /* For cloned processes we must check both with __WCLONE and
3091 without, since the exit status of a cloned process isn't reported
3092 with __WCLONE. */
3093 if (lp->cloned)
3094 {
3095 do
3096 {
58aecb61 3097 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 3098 if (pid != (pid_t) -1)
d6b0e80f 3099 {
e85a822c
DJ
3100 if (debug_linux_nat)
3101 fprintf_unfiltered (gdb_stdlog,
3102 "KWC: wait %s received unknown.\n",
3103 target_pid_to_str (lp->ptid));
3104 /* The Linux kernel sometimes fails to kill a thread
3105 completely after PTRACE_KILL; that goes from the stop
3106 point in do_fork out to the one in
3107 get_signal_to_deliever and waits again. So kill it
3108 again. */
3109 kill_callback (lp, NULL);
d6b0e80f
AC
3110 }
3111 }
3112 while (pid == GET_LWP (lp->ptid));
3113
3114 gdb_assert (pid == -1 && errno == ECHILD);
3115 }
3116
3117 do
3118 {
58aecb61 3119 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 3120 if (pid != (pid_t) -1)
d6b0e80f 3121 {
e85a822c
DJ
3122 if (debug_linux_nat)
3123 fprintf_unfiltered (gdb_stdlog,
3124 "KWC: wait %s received unk.\n",
3125 target_pid_to_str (lp->ptid));
3126 /* See the call to kill_callback above. */
3127 kill_callback (lp, NULL);
d6b0e80f
AC
3128 }
3129 }
3130 while (pid == GET_LWP (lp->ptid));
3131
3132 gdb_assert (pid == -1 && errno == ECHILD);
3133 return 0;
3134}
3135
3136static void
3137linux_nat_kill (void)
3138{
f973ed9c
DJ
3139 struct target_waitstatus last;
3140 ptid_t last_ptid;
3141 int status;
d6b0e80f 3142
b84876c2
PA
3143 if (target_can_async_p ())
3144 target_async (NULL, 0);
3145
f973ed9c
DJ
3146 /* If we're stopped while forking and we haven't followed yet,
3147 kill the other task. We need to do this first because the
3148 parent will be sleeping if this is a vfork. */
d6b0e80f 3149
f973ed9c 3150 get_last_target_status (&last_ptid, &last);
d6b0e80f 3151
f973ed9c
DJ
3152 if (last.kind == TARGET_WAITKIND_FORKED
3153 || last.kind == TARGET_WAITKIND_VFORKED)
3154 {
3a3e9ee3 3155 ptrace (PT_KILL, PIDGET (last.value.related_pid), 0, 0);
f973ed9c
DJ
3156 wait (&status);
3157 }
3158
3159 if (forks_exist_p ())
b84876c2
PA
3160 {
3161 linux_fork_killall ();
3162 drain_queued_events (-1);
3163 }
f973ed9c
DJ
3164 else
3165 {
4c28f408
PA
3166 /* Stop all threads before killing them, since ptrace requires
3167 that the thread is stopped to sucessfully PTRACE_KILL. */
3168 iterate_over_lwps (stop_callback, NULL);
3169 /* ... and wait until all of them have reported back that
3170 they're no longer running. */
3171 iterate_over_lwps (stop_wait_callback, NULL);
3172
f973ed9c
DJ
3173 /* Kill all LWP's ... */
3174 iterate_over_lwps (kill_callback, NULL);
3175
3176 /* ... and wait until we've flushed all events. */
3177 iterate_over_lwps (kill_wait_callback, NULL);
3178 }
3179
3180 target_mourn_inferior ();
d6b0e80f
AC
3181}
3182
3183static void
136d6dae 3184linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3185{
d6b0e80f
AC
3186 /* Destroy LWP info; it's no longer valid. */
3187 init_lwp_list ();
3188
f973ed9c 3189 if (! forks_exist_p ())
b84876c2
PA
3190 {
3191 /* Normal case, no other forks available. */
3192 if (target_can_async_p ())
3193 linux_nat_async (NULL, 0);
136d6dae 3194 linux_ops->to_mourn_inferior (ops);
b84876c2 3195 }
f973ed9c
DJ
3196 else
3197 /* Multi-fork case. The current inferior_ptid has exited, but
3198 there are other viable forks to debug. Delete the exiting
3199 one and context-switch to the first available. */
3200 linux_fork_mourn_inferior ();
d6b0e80f
AC
3201}
3202
10d6c8cd
DJ
3203static LONGEST
3204linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3205 const char *annex, gdb_byte *readbuf,
3206 const gdb_byte *writebuf,
3207 ULONGEST offset, LONGEST len)
d6b0e80f
AC
3208{
3209 struct cleanup *old_chain = save_inferior_ptid ();
10d6c8cd 3210 LONGEST xfer;
d6b0e80f
AC
3211
3212 if (is_lwp (inferior_ptid))
3213 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
3214
10d6c8cd
DJ
3215 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
3216 offset, len);
d6b0e80f
AC
3217
3218 do_cleanups (old_chain);
3219 return xfer;
3220}
3221
3222static int
3223linux_nat_thread_alive (ptid_t ptid)
3224{
4c28f408
PA
3225 int err;
3226
d6b0e80f
AC
3227 gdb_assert (is_lwp (ptid));
3228
4c28f408
PA
3229 /* Send signal 0 instead of anything ptrace, because ptracing a
3230 running thread errors out claiming that the thread doesn't
3231 exist. */
3232 err = kill_lwp (GET_LWP (ptid), 0);
3233
d6b0e80f
AC
3234 if (debug_linux_nat)
3235 fprintf_unfiltered (gdb_stdlog,
4c28f408 3236 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 3237 target_pid_to_str (ptid),
4c28f408 3238 err ? safe_strerror (err) : "OK");
9c0dd46b 3239
4c28f408 3240 if (err != 0)
d6b0e80f
AC
3241 return 0;
3242
3243 return 1;
3244}
3245
3246static char *
3247linux_nat_pid_to_str (ptid_t ptid)
3248{
3249 static char buf[64];
3250
a0ef4274
DJ
3251 if (is_lwp (ptid)
3252 && ((lwp_list && lwp_list->next)
3253 || GET_PID (ptid) != GET_LWP (ptid)))
d6b0e80f
AC
3254 {
3255 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
3256 return buf;
3257 }
3258
3259 return normal_pid_to_str (ptid);
3260}
3261
d6b0e80f
AC
3262static void
3263sigchld_handler (int signo)
3264{
c6ebd6cf 3265 if (target_async_permitted
84e46146 3266 && linux_nat_async_events_state != sigchld_sync
b84876c2
PA
3267 && signo == SIGCHLD)
3268 /* It is *always* a bug to hit this. */
3269 internal_error (__FILE__, __LINE__,
3270 "sigchld_handler called when async events are enabled");
3271
d6b0e80f
AC
3272 /* Do nothing. The only reason for this handler is that it allows
3273 us to use sigsuspend in linux_nat_wait above to wait for the
3274 arrival of a SIGCHLD. */
3275}
3276
dba24537
AC
3277/* Accepts an integer PID; Returns a string representing a file that
3278 can be opened to get the symbols for the child process. */
3279
6d8fd2b7
UW
3280static char *
3281linux_child_pid_to_exec_file (int pid)
dba24537
AC
3282{
3283 char *name1, *name2;
3284
3285 name1 = xmalloc (MAXPATHLEN);
3286 name2 = xmalloc (MAXPATHLEN);
3287 make_cleanup (xfree, name1);
3288 make_cleanup (xfree, name2);
3289 memset (name2, 0, MAXPATHLEN);
3290
3291 sprintf (name1, "/proc/%d/exe", pid);
3292 if (readlink (name1, name2, MAXPATHLEN) > 0)
3293 return name2;
3294 else
3295 return name1;
3296}
3297
3298/* Service function for corefiles and info proc. */
3299
3300static int
3301read_mapping (FILE *mapfile,
3302 long long *addr,
3303 long long *endaddr,
3304 char *permissions,
3305 long long *offset,
3306 char *device, long long *inode, char *filename)
3307{
3308 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
3309 addr, endaddr, permissions, offset, device, inode);
3310
2e14c2ea
MS
3311 filename[0] = '\0';
3312 if (ret > 0 && ret != EOF)
dba24537
AC
3313 {
3314 /* Eat everything up to EOL for the filename. This will prevent
3315 weird filenames (such as one with embedded whitespace) from
3316 confusing this code. It also makes this code more robust in
3317 respect to annotations the kernel may add after the filename.
3318
3319 Note the filename is used for informational purposes
3320 only. */
3321 ret += fscanf (mapfile, "%[^\n]\n", filename);
3322 }
2e14c2ea 3323
dba24537
AC
3324 return (ret != 0 && ret != EOF);
3325}
3326
3327/* Fills the "to_find_memory_regions" target vector. Lists the memory
3328 regions in the inferior for a corefile. */
3329
3330static int
3331linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
3332 unsigned long,
3333 int, int, int, void *), void *obfd)
3334{
3335 long long pid = PIDGET (inferior_ptid);
3336 char mapsfilename[MAXPATHLEN];
3337 FILE *mapsfile;
3338 long long addr, endaddr, size, offset, inode;
3339 char permissions[8], device[8], filename[MAXPATHLEN];
3340 int read, write, exec;
3341 int ret;
7c8a8b04 3342 struct cleanup *cleanup;
dba24537
AC
3343
3344 /* Compose the filename for the /proc memory map, and open it. */
3345 sprintf (mapsfilename, "/proc/%lld/maps", pid);
3346 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 3347 error (_("Could not open %s."), mapsfilename);
7c8a8b04 3348 cleanup = make_cleanup_fclose (mapsfile);
dba24537
AC
3349
3350 if (info_verbose)
3351 fprintf_filtered (gdb_stdout,
3352 "Reading memory regions from %s\n", mapsfilename);
3353
3354 /* Now iterate until end-of-file. */
3355 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
3356 &offset, &device[0], &inode, &filename[0]))
3357 {
3358 size = endaddr - addr;
3359
3360 /* Get the segment's permissions. */
3361 read = (strchr (permissions, 'r') != 0);
3362 write = (strchr (permissions, 'w') != 0);
3363 exec = (strchr (permissions, 'x') != 0);
3364
3365 if (info_verbose)
3366 {
3367 fprintf_filtered (gdb_stdout,
3368 "Save segment, %lld bytes at 0x%s (%c%c%c)",
3369 size, paddr_nz (addr),
3370 read ? 'r' : ' ',
3371 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 3372 if (filename[0])
dba24537
AC
3373 fprintf_filtered (gdb_stdout, " for %s", filename);
3374 fprintf_filtered (gdb_stdout, "\n");
3375 }
3376
3377 /* Invoke the callback function to create the corefile
3378 segment. */
3379 func (addr, size, read, write, exec, obfd);
3380 }
7c8a8b04 3381 do_cleanups (cleanup);
dba24537
AC
3382 return 0;
3383}
3384
2020b7ab
PA
3385static int
3386find_signalled_thread (struct thread_info *info, void *data)
3387{
3388 if (info->stop_signal != TARGET_SIGNAL_0
3389 && ptid_get_pid (info->ptid) == ptid_get_pid (inferior_ptid))
3390 return 1;
3391
3392 return 0;
3393}
3394
3395static enum target_signal
3396find_stop_signal (void)
3397{
3398 struct thread_info *info =
3399 iterate_over_threads (find_signalled_thread, NULL);
3400
3401 if (info)
3402 return info->stop_signal;
3403 else
3404 return TARGET_SIGNAL_0;
3405}
3406
dba24537
AC
3407/* Records the thread's register state for the corefile note
3408 section. */
3409
3410static char *
3411linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2020b7ab
PA
3412 char *note_data, int *note_size,
3413 enum target_signal stop_signal)
dba24537
AC
3414{
3415 gdb_gregset_t gregs;
3416 gdb_fpregset_t fpregs;
dba24537 3417 unsigned long lwp = ptid_get_lwp (ptid);
594f7785
UW
3418 struct regcache *regcache = get_thread_regcache (ptid);
3419 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 3420 const struct regset *regset;
55e969c1 3421 int core_regset_p;
594f7785 3422 struct cleanup *old_chain;
17ea7499
CES
3423 struct core_regset_section *sect_list;
3424 char *gdb_regset;
594f7785
UW
3425
3426 old_chain = save_inferior_ptid ();
3427 inferior_ptid = ptid;
3428 target_fetch_registers (regcache, -1);
3429 do_cleanups (old_chain);
4f844a66
DM
3430
3431 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
17ea7499
CES
3432 sect_list = gdbarch_core_regset_sections (gdbarch);
3433
55e969c1
DM
3434 if (core_regset_p
3435 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
3436 sizeof (gregs))) != NULL
3437 && regset->collect_regset != NULL)
594f7785 3438 regset->collect_regset (regset, regcache, -1,
55e969c1 3439 &gregs, sizeof (gregs));
4f844a66 3440 else
594f7785 3441 fill_gregset (regcache, &gregs, -1);
4f844a66 3442
55e969c1
DM
3443 note_data = (char *) elfcore_write_prstatus (obfd,
3444 note_data,
3445 note_size,
3446 lwp,
3447 stop_signal, &gregs);
3448
17ea7499
CES
3449 /* The loop below uses the new struct core_regset_section, which stores
3450 the supported section names and sizes for the core file. Note that
3451 note PRSTATUS needs to be treated specially. But the other notes are
3452 structurally the same, so they can benefit from the new struct. */
3453 if (core_regset_p && sect_list != NULL)
3454 while (sect_list->sect_name != NULL)
3455 {
3456 /* .reg was already handled above. */
3457 if (strcmp (sect_list->sect_name, ".reg") == 0)
3458 {
3459 sect_list++;
3460 continue;
3461 }
3462 regset = gdbarch_regset_from_core_section (gdbarch,
3463 sect_list->sect_name,
3464 sect_list->size);
3465 gdb_assert (regset && regset->collect_regset);
3466 gdb_regset = xmalloc (sect_list->size);
3467 regset->collect_regset (regset, regcache, -1,
3468 gdb_regset, sect_list->size);
3469 note_data = (char *) elfcore_write_register_note (obfd,
3470 note_data,
3471 note_size,
3472 sect_list->sect_name,
3473 gdb_regset,
3474 sect_list->size);
3475 xfree (gdb_regset);
3476 sect_list++;
3477 }
dba24537 3478
17ea7499
CES
3479 /* For architectures that does not have the struct core_regset_section
3480 implemented, we use the old method. When all the architectures have
3481 the new support, the code below should be deleted. */
4f844a66 3482 else
17ea7499
CES
3483 {
3484 if (core_regset_p
3485 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
3486 sizeof (fpregs))) != NULL
3487 && regset->collect_regset != NULL)
3488 regset->collect_regset (regset, regcache, -1,
3489 &fpregs, sizeof (fpregs));
3490 else
3491 fill_fpregset (regcache, &fpregs, -1);
3492
3493 note_data = (char *) elfcore_write_prfpreg (obfd,
3494 note_data,
3495 note_size,
3496 &fpregs, sizeof (fpregs));
3497 }
4f844a66 3498
dba24537
AC
3499 return note_data;
3500}
3501
3502struct linux_nat_corefile_thread_data
3503{
3504 bfd *obfd;
3505 char *note_data;
3506 int *note_size;
3507 int num_notes;
2020b7ab 3508 enum target_signal stop_signal;
dba24537
AC
3509};
3510
3511/* Called by gdbthread.c once per thread. Records the thread's
3512 register state for the corefile note section. */
3513
3514static int
3515linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3516{
3517 struct linux_nat_corefile_thread_data *args = data;
dba24537 3518
dba24537
AC
3519 args->note_data = linux_nat_do_thread_registers (args->obfd,
3520 ti->ptid,
3521 args->note_data,
2020b7ab
PA
3522 args->note_size,
3523 args->stop_signal);
dba24537 3524 args->num_notes++;
56be3814 3525
dba24537
AC
3526 return 0;
3527}
3528
dba24537
AC
3529/* Fills the "to_make_corefile_note" target vector. Builds the note
3530 section for a corefile, and returns it in a malloc buffer. */
3531
3532static char *
3533linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3534{
3535 struct linux_nat_corefile_thread_data thread_args;
3536 struct cleanup *old_chain;
d99148ef 3537 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 3538 char fname[16] = { '\0' };
d99148ef 3539 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
3540 char psargs[80] = { '\0' };
3541 char *note_data = NULL;
3542 ptid_t current_ptid = inferior_ptid;
c6826062 3543 gdb_byte *auxv;
dba24537
AC
3544 int auxv_len;
3545
3546 if (get_exec_file (0))
3547 {
3548 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3549 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3550 if (get_inferior_args ())
3551 {
d99148ef
JK
3552 char *string_end;
3553 char *psargs_end = psargs + sizeof (psargs);
3554
3555 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3556 strings fine. */
3557 string_end = memchr (psargs, 0, sizeof (psargs));
3558 if (string_end != NULL)
3559 {
3560 *string_end++ = ' ';
3561 strncpy (string_end, get_inferior_args (),
3562 psargs_end - string_end);
3563 }
dba24537
AC
3564 }
3565 note_data = (char *) elfcore_write_prpsinfo (obfd,
3566 note_data,
3567 note_size, fname, psargs);
3568 }
3569
3570 /* Dump information for threads. */
3571 thread_args.obfd = obfd;
3572 thread_args.note_data = note_data;
3573 thread_args.note_size = note_size;
3574 thread_args.num_notes = 0;
2020b7ab 3575 thread_args.stop_signal = find_stop_signal ();
dba24537 3576 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
2020b7ab
PA
3577 gdb_assert (thread_args.num_notes != 0);
3578 note_data = thread_args.note_data;
dba24537 3579
13547ab6
DJ
3580 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3581 NULL, &auxv);
dba24537
AC
3582 if (auxv_len > 0)
3583 {
3584 note_data = elfcore_write_note (obfd, note_data, note_size,
3585 "CORE", NT_AUXV, auxv, auxv_len);
3586 xfree (auxv);
3587 }
3588
3589 make_cleanup (xfree, note_data);
3590 return note_data;
3591}
3592
3593/* Implement the "info proc" command. */
3594
3595static void
3596linux_nat_info_proc_cmd (char *args, int from_tty)
3597{
3598 long long pid = PIDGET (inferior_ptid);
3599 FILE *procfile;
3600 char **argv = NULL;
3601 char buffer[MAXPATHLEN];
3602 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3603 int cmdline_f = 1;
3604 int cwd_f = 1;
3605 int exe_f = 1;
3606 int mappings_f = 0;
3607 int environ_f = 0;
3608 int status_f = 0;
3609 int stat_f = 0;
3610 int all = 0;
3611 struct stat dummy;
3612
3613 if (args)
3614 {
3615 /* Break up 'args' into an argv array. */
d1a41061
PP
3616 argv = gdb_buildargv (args);
3617 make_cleanup_freeargv (argv);
dba24537
AC
3618 }
3619 while (argv != NULL && *argv != NULL)
3620 {
3621 if (isdigit (argv[0][0]))
3622 {
3623 pid = strtoul (argv[0], NULL, 10);
3624 }
3625 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3626 {
3627 mappings_f = 1;
3628 }
3629 else if (strcmp (argv[0], "status") == 0)
3630 {
3631 status_f = 1;
3632 }
3633 else if (strcmp (argv[0], "stat") == 0)
3634 {
3635 stat_f = 1;
3636 }
3637 else if (strcmp (argv[0], "cmd") == 0)
3638 {
3639 cmdline_f = 1;
3640 }
3641 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3642 {
3643 exe_f = 1;
3644 }
3645 else if (strcmp (argv[0], "cwd") == 0)
3646 {
3647 cwd_f = 1;
3648 }
3649 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3650 {
3651 all = 1;
3652 }
3653 else
3654 {
3655 /* [...] (future options here) */
3656 }
3657 argv++;
3658 }
3659 if (pid == 0)
8a3fe4f8 3660 error (_("No current process: you must name one."));
dba24537
AC
3661
3662 sprintf (fname1, "/proc/%lld", pid);
3663 if (stat (fname1, &dummy) != 0)
8a3fe4f8 3664 error (_("No /proc directory: '%s'"), fname1);
dba24537 3665
a3f17187 3666 printf_filtered (_("process %lld\n"), pid);
dba24537
AC
3667 if (cmdline_f || all)
3668 {
3669 sprintf (fname1, "/proc/%lld/cmdline", pid);
d5d6fca5 3670 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 3671 {
7c8a8b04 3672 struct cleanup *cleanup = make_cleanup_fclose (procfile);
dba24537
AC
3673 fgets (buffer, sizeof (buffer), procfile);
3674 printf_filtered ("cmdline = '%s'\n", buffer);
7c8a8b04 3675 do_cleanups (cleanup);
dba24537
AC
3676 }
3677 else
8a3fe4f8 3678 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3679 }
3680 if (cwd_f || all)
3681 {
3682 sprintf (fname1, "/proc/%lld/cwd", pid);
3683 memset (fname2, 0, sizeof (fname2));
3684 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3685 printf_filtered ("cwd = '%s'\n", fname2);
3686 else
8a3fe4f8 3687 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3688 }
3689 if (exe_f || all)
3690 {
3691 sprintf (fname1, "/proc/%lld/exe", pid);
3692 memset (fname2, 0, sizeof (fname2));
3693 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3694 printf_filtered ("exe = '%s'\n", fname2);
3695 else
8a3fe4f8 3696 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3697 }
3698 if (mappings_f || all)
3699 {
3700 sprintf (fname1, "/proc/%lld/maps", pid);
d5d6fca5 3701 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3702 {
3703 long long addr, endaddr, size, offset, inode;
3704 char permissions[8], device[8], filename[MAXPATHLEN];
7c8a8b04 3705 struct cleanup *cleanup;
dba24537 3706
7c8a8b04 3707 cleanup = make_cleanup_fclose (procfile);
a3f17187 3708 printf_filtered (_("Mapped address spaces:\n\n"));
17a912b6 3709 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3710 {
3711 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3712 "Start Addr",
3713 " End Addr",
3714 " Size", " Offset", "objfile");
3715 }
3716 else
3717 {
3718 printf_filtered (" %18s %18s %10s %10s %7s\n",
3719 "Start Addr",
3720 " End Addr",
3721 " Size", " Offset", "objfile");
3722 }
3723
3724 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3725 &offset, &device[0], &inode, &filename[0]))
3726 {
3727 size = endaddr - addr;
3728
3729 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3730 calls here (and possibly above) should be abstracted
3731 out into their own functions? Andrew suggests using
3732 a generic local_address_string instead to print out
3733 the addresses; that makes sense to me, too. */
3734
17a912b6 3735 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3736 {
3737 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3738 (unsigned long) addr, /* FIXME: pr_addr */
3739 (unsigned long) endaddr,
3740 (int) size,
3741 (unsigned int) offset,
3742 filename[0] ? filename : "");
3743 }
3744 else
3745 {
3746 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3747 (unsigned long) addr, /* FIXME: pr_addr */
3748 (unsigned long) endaddr,
3749 (int) size,
3750 (unsigned int) offset,
3751 filename[0] ? filename : "");
3752 }
3753 }
3754
7c8a8b04 3755 do_cleanups (cleanup);
dba24537
AC
3756 }
3757 else
8a3fe4f8 3758 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3759 }
3760 if (status_f || all)
3761 {
3762 sprintf (fname1, "/proc/%lld/status", pid);
d5d6fca5 3763 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537 3764 {
7c8a8b04 3765 struct cleanup *cleanup = make_cleanup_fclose (procfile);
dba24537
AC
3766 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3767 puts_filtered (buffer);
7c8a8b04 3768 do_cleanups (cleanup);
dba24537
AC
3769 }
3770 else
8a3fe4f8 3771 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3772 }
3773 if (stat_f || all)
3774 {
3775 sprintf (fname1, "/proc/%lld/stat", pid);
d5d6fca5 3776 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3777 {
3778 int itmp;
3779 char ctmp;
a25694b4 3780 long ltmp;
7c8a8b04 3781 struct cleanup *cleanup = make_cleanup_fclose (procfile);
dba24537
AC
3782
3783 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3784 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 3785 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 3786 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 3787 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 3788 printf_filtered (_("State: %c\n"), ctmp);
dba24537 3789 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3790 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 3791 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3792 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 3793 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3794 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 3795 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3796 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 3797 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3798 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
3799 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3800 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3801 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3802 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3803 (unsigned long) ltmp);
3804 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3805 printf_filtered (_("Minor faults, children: %lu\n"),
3806 (unsigned long) ltmp);
3807 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3808 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3809 (unsigned long) ltmp);
3810 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3811 printf_filtered (_("Major faults, children: %lu\n"),
3812 (unsigned long) ltmp);
3813 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3814 printf_filtered (_("utime: %ld\n"), ltmp);
3815 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3816 printf_filtered (_("stime: %ld\n"), ltmp);
3817 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3818 printf_filtered (_("utime, children: %ld\n"), ltmp);
3819 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3820 printf_filtered (_("stime, children: %ld\n"), ltmp);
3821 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3822 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3823 ltmp);
3824 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3825 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3826 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3827 printf_filtered (_("jiffies until next timeout: %lu\n"),
3828 (unsigned long) ltmp);
3829 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3830 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3831 (unsigned long) ltmp);
3832 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3833 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3834 ltmp);
3835 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3836 printf_filtered (_("Virtual memory size: %lu\n"),
3837 (unsigned long) ltmp);
3838 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3839 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3840 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3841 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3842 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3843 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3844 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3845 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3846 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3847 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
dba24537
AC
3848#if 0 /* Don't know how architecture-dependent the rest is...
3849 Anyway the signal bitmap info is available from "status". */
a25694b4
AS
3850 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3851 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3852 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3853 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3854 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3855 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3856 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3857 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3858 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3859 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3860 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3861 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3862 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3863 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537 3864#endif
7c8a8b04 3865 do_cleanups (cleanup);
dba24537
AC
3866 }
3867 else
8a3fe4f8 3868 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3869 }
3870}
3871
10d6c8cd
DJ
3872/* Implement the to_xfer_partial interface for memory reads using the /proc
3873 filesystem. Because we can use a single read() call for /proc, this
3874 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3875 but it doesn't support writes. */
3876
3877static LONGEST
3878linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3879 const char *annex, gdb_byte *readbuf,
3880 const gdb_byte *writebuf,
3881 ULONGEST offset, LONGEST len)
dba24537 3882{
10d6c8cd
DJ
3883 LONGEST ret;
3884 int fd;
dba24537
AC
3885 char filename[64];
3886
10d6c8cd 3887 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
3888 return 0;
3889
3890 /* Don't bother for one word. */
3891 if (len < 3 * sizeof (long))
3892 return 0;
3893
3894 /* We could keep this file open and cache it - possibly one per
3895 thread. That requires some juggling, but is even faster. */
3896 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3897 fd = open (filename, O_RDONLY | O_LARGEFILE);
3898 if (fd == -1)
3899 return 0;
3900
3901 /* If pread64 is available, use it. It's faster if the kernel
3902 supports it (only one syscall), and it's 64-bit safe even on
3903 32-bit platforms (for instance, SPARC debugging a SPARC64
3904 application). */
3905#ifdef HAVE_PREAD64
10d6c8cd 3906 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3907#else
10d6c8cd 3908 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3909#endif
3910 ret = 0;
3911 else
3912 ret = len;
3913
3914 close (fd);
3915 return ret;
3916}
3917
3918/* Parse LINE as a signal set and add its set bits to SIGS. */
3919
3920static void
3921add_line_to_sigset (const char *line, sigset_t *sigs)
3922{
3923 int len = strlen (line) - 1;
3924 const char *p;
3925 int signum;
3926
3927 if (line[len] != '\n')
8a3fe4f8 3928 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3929
3930 p = line;
3931 signum = len * 4;
3932 while (len-- > 0)
3933 {
3934 int digit;
3935
3936 if (*p >= '0' && *p <= '9')
3937 digit = *p - '0';
3938 else if (*p >= 'a' && *p <= 'f')
3939 digit = *p - 'a' + 10;
3940 else
8a3fe4f8 3941 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3942
3943 signum -= 4;
3944
3945 if (digit & 1)
3946 sigaddset (sigs, signum + 1);
3947 if (digit & 2)
3948 sigaddset (sigs, signum + 2);
3949 if (digit & 4)
3950 sigaddset (sigs, signum + 3);
3951 if (digit & 8)
3952 sigaddset (sigs, signum + 4);
3953
3954 p++;
3955 }
3956}
3957
3958/* Find process PID's pending signals from /proc/pid/status and set
3959 SIGS to match. */
3960
3961void
3962linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3963{
3964 FILE *procfile;
3965 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3966 int signum;
7c8a8b04 3967 struct cleanup *cleanup;
dba24537
AC
3968
3969 sigemptyset (pending);
3970 sigemptyset (blocked);
3971 sigemptyset (ignored);
3972 sprintf (fname, "/proc/%d/status", pid);
3973 procfile = fopen (fname, "r");
3974 if (procfile == NULL)
8a3fe4f8 3975 error (_("Could not open %s"), fname);
7c8a8b04 3976 cleanup = make_cleanup_fclose (procfile);
dba24537
AC
3977
3978 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3979 {
3980 /* Normal queued signals are on the SigPnd line in the status
3981 file. However, 2.6 kernels also have a "shared" pending
3982 queue for delivering signals to a thread group, so check for
3983 a ShdPnd line also.
3984
3985 Unfortunately some Red Hat kernels include the shared pending
3986 queue but not the ShdPnd status field. */
3987
3988 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3989 add_line_to_sigset (buffer + 8, pending);
3990 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3991 add_line_to_sigset (buffer + 8, pending);
3992 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3993 add_line_to_sigset (buffer + 8, blocked);
3994 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3995 add_line_to_sigset (buffer + 8, ignored);
3996 }
3997
7c8a8b04 3998 do_cleanups (cleanup);
dba24537
AC
3999}
4000
07e059b5
VP
4001static LONGEST
4002linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
4003 const char *annex, gdb_byte *readbuf,
4004 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4005{
4006 /* We make the process list snapshot when the object starts to be
4007 read. */
4008 static const char *buf;
4009 static LONGEST len_avail = -1;
4010 static struct obstack obstack;
4011
4012 DIR *dirp;
4013
4014 gdb_assert (object == TARGET_OBJECT_OSDATA);
4015
4016 if (strcmp (annex, "processes") != 0)
4017 return 0;
4018
4019 gdb_assert (readbuf && !writebuf);
4020
4021 if (offset == 0)
4022 {
4023 if (len_avail != -1 && len_avail != 0)
4024 obstack_free (&obstack, NULL);
4025 len_avail = 0;
4026 buf = NULL;
4027 obstack_init (&obstack);
4028 obstack_grow_str (&obstack, "<osdata type=\"processes\">\n");
4029
4030 dirp = opendir ("/proc");
4031 if (dirp)
4032 {
4033 struct dirent *dp;
4034 while ((dp = readdir (dirp)) != NULL)
4035 {
4036 struct stat statbuf;
4037 char procentry[sizeof ("/proc/4294967295")];
4038
4039 if (!isdigit (dp->d_name[0])
4040 || strlen (dp->d_name) > sizeof ("4294967295") - 1)
4041 continue;
4042
4043 sprintf (procentry, "/proc/%s", dp->d_name);
4044 if (stat (procentry, &statbuf) == 0
4045 && S_ISDIR (statbuf.st_mode))
4046 {
4047 char *pathname;
4048 FILE *f;
4049 char cmd[MAXPATHLEN + 1];
4050 struct passwd *entry;
4051
4052 pathname = xstrprintf ("/proc/%s/cmdline", dp->d_name);
4053 entry = getpwuid (statbuf.st_uid);
4054
4055 if ((f = fopen (pathname, "r")) != NULL)
4056 {
4057 size_t len = fread (cmd, 1, sizeof (cmd) - 1, f);
4058 if (len > 0)
4059 {
4060 int i;
4061 for (i = 0; i < len; i++)
4062 if (cmd[i] == '\0')
4063 cmd[i] = ' ';
4064 cmd[len] = '\0';
4065
4066 obstack_xml_printf (
4067 &obstack,
4068 "<item>"
4069 "<column name=\"pid\">%s</column>"
4070 "<column name=\"user\">%s</column>"
4071 "<column name=\"command\">%s</column>"
4072 "</item>",
4073 dp->d_name,
4074 entry ? entry->pw_name : "?",
4075 cmd);
4076 }
4077 fclose (f);
4078 }
4079
4080 xfree (pathname);
4081 }
4082 }
4083
4084 closedir (dirp);
4085 }
4086
4087 obstack_grow_str0 (&obstack, "</osdata>\n");
4088 buf = obstack_finish (&obstack);
4089 len_avail = strlen (buf);
4090 }
4091
4092 if (offset >= len_avail)
4093 {
4094 /* Done. Get rid of the obstack. */
4095 obstack_free (&obstack, NULL);
4096 buf = NULL;
4097 len_avail = 0;
4098 return 0;
4099 }
4100
4101 if (len > len_avail - offset)
4102 len = len_avail - offset;
4103 memcpy (readbuf, buf + offset, len);
4104
4105 return len;
4106}
4107
10d6c8cd
DJ
4108static LONGEST
4109linux_xfer_partial (struct target_ops *ops, enum target_object object,
4110 const char *annex, gdb_byte *readbuf,
4111 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
4112{
4113 LONGEST xfer;
4114
4115 if (object == TARGET_OBJECT_AUXV)
4116 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
4117 offset, len);
4118
07e059b5
VP
4119 if (object == TARGET_OBJECT_OSDATA)
4120 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
4121 offset, len);
4122
10d6c8cd
DJ
4123 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
4124 offset, len);
4125 if (xfer != 0)
4126 return xfer;
4127
4128 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
4129 offset, len);
4130}
4131
e9efe249 4132/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4133 it with local methods. */
4134
910122bf
UW
4135static void
4136linux_target_install_ops (struct target_ops *t)
10d6c8cd 4137{
6d8fd2b7
UW
4138 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
4139 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
4140 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
4141 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4142 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4143 t->to_post_attach = linux_child_post_attach;
4144 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4145 t->to_find_memory_regions = linux_nat_find_memory_regions;
4146 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
4147
4148 super_xfer_partial = t->to_xfer_partial;
4149 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
4150}
4151
4152struct target_ops *
4153linux_target (void)
4154{
4155 struct target_ops *t;
4156
4157 t = inf_ptrace_target ();
4158 linux_target_install_ops (t);
4159
4160 return t;
4161}
4162
4163struct target_ops *
7714d83a 4164linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4165{
4166 struct target_ops *t;
4167
4168 t = inf_ptrace_trad_target (register_u_offset);
4169 linux_target_install_ops (t);
10d6c8cd 4170
10d6c8cd
DJ
4171 return t;
4172}
4173
b84876c2
PA
4174/* target_is_async_p implementation. */
4175
4176static int
4177linux_nat_is_async_p (void)
4178{
4179 /* NOTE: palves 2008-03-21: We're only async when the user requests
c6ebd6cf 4180 it explicitly with the "maintenance set target-async" command.
b84876c2 4181 Someday, linux will always be async. */
c6ebd6cf 4182 if (!target_async_permitted)
b84876c2
PA
4183 return 0;
4184
4185 return 1;
4186}
4187
4188/* target_can_async_p implementation. */
4189
4190static int
4191linux_nat_can_async_p (void)
4192{
4193 /* NOTE: palves 2008-03-21: We're only async when the user requests
c6ebd6cf 4194 it explicitly with the "maintenance set target-async" command.
b84876c2 4195 Someday, linux will always be async. */
c6ebd6cf 4196 if (!target_async_permitted)
b84876c2
PA
4197 return 0;
4198
4199 /* See target.h/target_async_mask. */
4200 return linux_nat_async_mask_value;
4201}
4202
9908b566
VP
4203static int
4204linux_nat_supports_non_stop (void)
4205{
4206 return 1;
4207}
4208
b84876c2
PA
4209/* target_async_mask implementation. */
4210
4211static int
4212linux_nat_async_mask (int mask)
4213{
4214 int current_state;
4215 current_state = linux_nat_async_mask_value;
4216
4217 if (current_state != mask)
4218 {
4219 if (mask == 0)
4220 {
4221 linux_nat_async (NULL, 0);
4222 linux_nat_async_mask_value = mask;
b84876c2
PA
4223 }
4224 else
4225 {
b84876c2
PA
4226 linux_nat_async_mask_value = mask;
4227 linux_nat_async (inferior_event_handler, 0);
4228 }
4229 }
4230
4231 return current_state;
4232}
4233
4234/* Pop an event from the event pipe. */
4235
4236static int
4237linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
4238{
4239 struct waitpid_result event = {0};
4240 int ret;
4241
4242 do
4243 {
4244 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
4245 }
4246 while (ret == -1 && errno == EINTR);
4247
4248 gdb_assert (ret == sizeof (event));
4249
4250 *ptr_status = event.status;
4251 *ptr_options = event.options;
4252
4253 linux_nat_num_queued_events--;
4254
4255 return event.pid;
4256}
4257
4258/* Push an event into the event pipe. */
4259
4260static void
4261linux_nat_event_pipe_push (int pid, int status, int options)
4262{
4263 int ret;
4264 struct waitpid_result event = {0};
4265 event.pid = pid;
4266 event.status = status;
4267 event.options = options;
4268
4269 do
4270 {
4271 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
4272 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
4273 } while (ret == -1 && errno == EINTR);
4274
4275 linux_nat_num_queued_events++;
4276}
4277
4278static void
4279get_pending_events (void)
4280{
4281 int status, options, pid;
4282
c6ebd6cf 4283 if (!target_async_permitted
84e46146 4284 || linux_nat_async_events_state != sigchld_async)
b84876c2
PA
4285 internal_error (__FILE__, __LINE__,
4286 "get_pending_events called with async masked");
4287
4288 while (1)
4289 {
4290 status = 0;
4291 options = __WCLONE | WNOHANG;
4292
4293 do
4294 {
4295 pid = waitpid (-1, &status, options);
4296 }
4297 while (pid == -1 && errno == EINTR);
4298
4299 if (pid <= 0)
4300 {
4301 options = WNOHANG;
4302 do
4303 {
4304 pid = waitpid (-1, &status, options);
4305 }
4306 while (pid == -1 && errno == EINTR);
4307 }
4308
4309 if (pid <= 0)
4310 /* No more children reporting events. */
4311 break;
4312
4313 if (debug_linux_nat_async)
4314 fprintf_unfiltered (gdb_stdlog, "\
4315get_pending_events: pid(%d), status(%x), options (%x)\n",
4316 pid, status, options);
4317
4318 linux_nat_event_pipe_push (pid, status, options);
4319 }
4320
4321 if (debug_linux_nat_async)
4322 fprintf_unfiltered (gdb_stdlog, "\
4323get_pending_events: linux_nat_num_queued_events(%d)\n",
4324 linux_nat_num_queued_events);
4325}
4326
4327/* SIGCHLD handler for async mode. */
4328
4329static void
4330async_sigchld_handler (int signo)
4331{
4332 if (debug_linux_nat_async)
4333 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
4334
4335 get_pending_events ();
4336}
4337
84e46146 4338/* Set SIGCHLD handling state to STATE. Returns previous state. */
b84876c2 4339
84e46146
PA
4340static enum sigchld_state
4341linux_nat_async_events (enum sigchld_state state)
b84876c2 4342{
84e46146 4343 enum sigchld_state current_state = linux_nat_async_events_state;
b84876c2
PA
4344
4345 if (debug_linux_nat_async)
4346 fprintf_unfiltered (gdb_stdlog,
84e46146 4347 "LNAE: state(%d): linux_nat_async_events_state(%d), "
b84876c2 4348 "linux_nat_num_queued_events(%d)\n",
84e46146 4349 state, linux_nat_async_events_state,
b84876c2
PA
4350 linux_nat_num_queued_events);
4351
84e46146 4352 if (current_state != state)
b84876c2
PA
4353 {
4354 sigset_t mask;
4355 sigemptyset (&mask);
4356 sigaddset (&mask, SIGCHLD);
84e46146
PA
4357
4358 /* Always block before changing state. */
4359 sigprocmask (SIG_BLOCK, &mask, NULL);
4360
4361 /* Set new state. */
4362 linux_nat_async_events_state = state;
4363
4364 switch (state)
b84876c2 4365 {
84e46146
PA
4366 case sigchld_sync:
4367 {
4368 /* Block target events. */
4369 sigprocmask (SIG_BLOCK, &mask, NULL);
4370 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
4371 /* Get events out of queue, and make them available to
4372 queued_waitpid / my_waitpid. */
4373 pipe_to_local_event_queue ();
4374 }
4375 break;
4376 case sigchld_async:
4377 {
4378 /* Unblock target events for async mode. */
4379
4380 sigprocmask (SIG_BLOCK, &mask, NULL);
4381
4382 /* Put events we already waited on, in the pipe first, so
4383 events are FIFO. */
4384 local_event_queue_to_pipe ();
4385 /* While in masked async, we may have not collected all
4386 the pending events. Get them out now. */
4387 get_pending_events ();
4388
4389 /* Let'em come. */
4390 sigaction (SIGCHLD, &async_sigchld_action, NULL);
4391 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4392 }
4393 break;
4394 case sigchld_default:
4395 {
4396 /* SIGCHLD default mode. */
4397 sigaction (SIGCHLD, &sigchld_default_action, NULL);
4398
4399 /* Get events out of queue, and make them available to
4400 queued_waitpid / my_waitpid. */
4401 pipe_to_local_event_queue ();
4402
4403 /* Unblock SIGCHLD. */
4404 sigprocmask (SIG_UNBLOCK, &mask, NULL);
4405 }
4406 break;
b84876c2
PA
4407 }
4408 }
4409
4410 return current_state;
4411}
4412
4413static int async_terminal_is_ours = 1;
4414
4415/* target_terminal_inferior implementation. */
4416
4417static void
4418linux_nat_terminal_inferior (void)
4419{
4420 if (!target_is_async_p ())
4421 {
4422 /* Async mode is disabled. */
4423 terminal_inferior ();
4424 return;
4425 }
4426
4427 /* GDB should never give the terminal to the inferior, if the
4428 inferior is running in the background (run&, continue&, etc.).
4429 This check can be removed when the common code is fixed. */
4430 if (!sync_execution)
4431 return;
4432
4433 terminal_inferior ();
4434
4435 if (!async_terminal_is_ours)
4436 return;
4437
4438 delete_file_handler (input_fd);
4439 async_terminal_is_ours = 0;
4440 set_sigint_trap ();
4441}
4442
4443/* target_terminal_ours implementation. */
4444
4445void
4446linux_nat_terminal_ours (void)
4447{
4448 if (!target_is_async_p ())
4449 {
4450 /* Async mode is disabled. */
4451 terminal_ours ();
4452 return;
4453 }
4454
4455 /* GDB should never give the terminal to the inferior if the
4456 inferior is running in the background (run&, continue&, etc.),
4457 but claiming it sure should. */
4458 terminal_ours ();
4459
4460 if (!sync_execution)
4461 return;
4462
4463 if (async_terminal_is_ours)
4464 return;
4465
4466 clear_sigint_trap ();
4467 add_file_handler (input_fd, stdin_event_handler, 0);
4468 async_terminal_is_ours = 1;
4469}
4470
4471static void (*async_client_callback) (enum inferior_event_type event_type,
4472 void *context);
4473static void *async_client_context;
4474
4475static void
4476linux_nat_async_file_handler (int error, gdb_client_data client_data)
4477{
4478 async_client_callback (INF_REG_EVENT, async_client_context);
4479}
4480
4481/* target_async implementation. */
4482
4483static void
4484linux_nat_async (void (*callback) (enum inferior_event_type event_type,
4485 void *context), void *context)
4486{
c6ebd6cf 4487 if (linux_nat_async_mask_value == 0 || !target_async_permitted)
b84876c2
PA
4488 internal_error (__FILE__, __LINE__,
4489 "Calling target_async when async is masked");
4490
4491 if (callback != NULL)
4492 {
4493 async_client_callback = callback;
4494 async_client_context = context;
4495 add_file_handler (linux_nat_event_pipe[0],
4496 linux_nat_async_file_handler, NULL);
4497
84e46146 4498 linux_nat_async_events (sigchld_async);
b84876c2
PA
4499 }
4500 else
4501 {
4502 async_client_callback = callback;
4503 async_client_context = context;
4504
84e46146 4505 linux_nat_async_events (sigchld_sync);
b84876c2
PA
4506 delete_file_handler (linux_nat_event_pipe[0]);
4507 }
4508 return;
4509}
4510
252fbfc8
PA
4511/* Stop an LWP, and push a TARGET_SIGNAL_0 stop status if no other
4512 event came out. */
4513
4c28f408 4514static int
252fbfc8 4515linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4516{
252fbfc8
PA
4517 ptid_t ptid = * (ptid_t *) data;
4518
4519 if (ptid_equal (lwp->ptid, ptid)
4520 || ptid_equal (minus_one_ptid, ptid)
4521 || (ptid_is_pid (ptid)
4522 && ptid_get_pid (ptid) == ptid_get_pid (lwp->ptid)))
4523 {
4524 if (!lwp->stopped)
4525 {
4526 int pid, status;
4527
4528 if (debug_linux_nat)
4529 fprintf_unfiltered (gdb_stdlog,
4530 "LNSL: running -> suspending %s\n",
4531 target_pid_to_str (lwp->ptid));
4532
4533 /* Peek once, to check if we've already waited for this
4534 LWP. */
4535 pid = queued_waitpid_1 (ptid_get_lwp (lwp->ptid), &status,
4536 lwp->cloned ? __WCLONE : 0, 1 /* peek */);
4537
4538 if (pid == -1)
4539 {
4540 ptid_t ptid = lwp->ptid;
4541
4542 stop_callback (lwp, NULL);
4543 stop_wait_callback (lwp, NULL);
4544
4545 /* If the lwp exits while we try to stop it, there's
4546 nothing else to do. */
4547 lwp = find_lwp_pid (ptid);
4548 if (lwp == NULL)
4549 return 0;
4550
4551 pid = queued_waitpid_1 (ptid_get_lwp (lwp->ptid), &status,
4552 lwp->cloned ? __WCLONE : 0,
4553 1 /* peek */);
4554 }
4555
4556 /* If we didn't collect any signal other than SIGSTOP while
4557 stopping the LWP, push a SIGNAL_0 event. In either case,
4558 the event-loop will end up calling target_wait which will
4559 collect these. */
4560 if (pid == -1)
4561 push_waitpid (ptid_get_lwp (lwp->ptid), W_STOPCODE (0),
4562 lwp->cloned ? __WCLONE : 0);
4563 }
4564 else
4565 {
4566 /* Already known to be stopped; do nothing. */
4567
4568 if (debug_linux_nat)
4569 {
4570 if (find_thread_pid (lwp->ptid)->stop_requested)
4571 fprintf_unfiltered (gdb_stdlog, "\
4572LNSL: already stopped/stop_requested %s\n",
4573 target_pid_to_str (lwp->ptid));
4574 else
4575 fprintf_unfiltered (gdb_stdlog, "\
4576LNSL: already stopped/no stop_requested yet %s\n",
4577 target_pid_to_str (lwp->ptid));
4578 }
4579 }
4580 }
4c28f408
PA
4581 return 0;
4582}
4583
4584static void
4585linux_nat_stop (ptid_t ptid)
4586{
4587 if (non_stop)
4588 {
252fbfc8
PA
4589 linux_nat_async_events (sigchld_sync);
4590 iterate_over_lwps (linux_nat_stop_lwp, &ptid);
4591 target_async (inferior_event_handler, 0);
4c28f408
PA
4592 }
4593 else
4594 linux_ops->to_stop (ptid);
4595}
4596
f973ed9c
DJ
4597void
4598linux_nat_add_target (struct target_ops *t)
4599{
f973ed9c
DJ
4600 /* Save the provided single-threaded target. We save this in a separate
4601 variable because another target we've inherited from (e.g. inf-ptrace)
4602 may have saved a pointer to T; we want to use it for the final
4603 process stratum target. */
4604 linux_ops_saved = *t;
4605 linux_ops = &linux_ops_saved;
4606
4607 /* Override some methods for multithreading. */
b84876c2 4608 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4609 t->to_attach = linux_nat_attach;
4610 t->to_detach = linux_nat_detach;
4611 t->to_resume = linux_nat_resume;
4612 t->to_wait = linux_nat_wait;
4613 t->to_xfer_partial = linux_nat_xfer_partial;
4614 t->to_kill = linux_nat_kill;
4615 t->to_mourn_inferior = linux_nat_mourn_inferior;
4616 t->to_thread_alive = linux_nat_thread_alive;
4617 t->to_pid_to_str = linux_nat_pid_to_str;
4618 t->to_has_thread_control = tc_schedlock;
4619
b84876c2
PA
4620 t->to_can_async_p = linux_nat_can_async_p;
4621 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4622 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2
PA
4623 t->to_async = linux_nat_async;
4624 t->to_async_mask = linux_nat_async_mask;
4625 t->to_terminal_inferior = linux_nat_terminal_inferior;
4626 t->to_terminal_ours = linux_nat_terminal_ours;
4627
4c28f408
PA
4628 /* Methods for non-stop support. */
4629 t->to_stop = linux_nat_stop;
4630
f973ed9c
DJ
4631 /* We don't change the stratum; this target will sit at
4632 process_stratum and thread_db will set at thread_stratum. This
4633 is a little strange, since this is a multi-threaded-capable
4634 target, but we want to be on the stack below thread_db, and we
4635 also want to be used for single-threaded processes. */
4636
4637 add_target (t);
4638
4639 /* TODO: Eliminate this and have libthread_db use
4640 find_target_beneath. */
4641 thread_db_init (t);
4642}
4643
9f0bdab8
DJ
4644/* Register a method to call whenever a new thread is attached. */
4645void
4646linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
4647{
4648 /* Save the pointer. We only support a single registered instance
4649 of the GNU/Linux native target, so we do not need to map this to
4650 T. */
4651 linux_nat_new_thread = new_thread;
4652}
4653
4654/* Return the saved siginfo associated with PTID. */
4655struct siginfo *
4656linux_nat_get_siginfo (ptid_t ptid)
4657{
4658 struct lwp_info *lp = find_lwp_pid (ptid);
4659
4660 gdb_assert (lp != NULL);
4661
4662 return &lp->siginfo;
4663}
4664
c6ebd6cf
VP
4665/* Enable/Disable async mode. */
4666
4667static void
4668linux_nat_setup_async (void)
4669{
4670 if (pipe (linux_nat_event_pipe) == -1)
4671 internal_error (__FILE__, __LINE__,
4672 "creating event pipe failed.");
4673 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4674 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4675}
4676
d6b0e80f
AC
4677void
4678_initialize_linux_nat (void)
4679{
b84876c2 4680 sigset_t mask;
dba24537 4681
1bedd215
AC
4682 add_info ("proc", linux_nat_info_proc_cmd, _("\
4683Show /proc process information about any running process.\n\
dba24537
AC
4684Specify any process id, or use the program being debugged by default.\n\
4685Specify any of the following keywords for detailed info:\n\
4686 mappings -- list of mapped memory regions.\n\
4687 stat -- list a bunch of random process info.\n\
4688 status -- list a different bunch of random process info.\n\
1bedd215 4689 all -- list all available /proc info."));
d6b0e80f 4690
b84876c2
PA
4691 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4692 &debug_linux_nat, _("\
4693Set debugging of GNU/Linux lwp module."), _("\
4694Show debugging of GNU/Linux lwp module."), _("\
4695Enables printf debugging output."),
4696 NULL,
4697 show_debug_linux_nat,
4698 &setdebuglist, &showdebuglist);
4699
4700 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4701 &debug_linux_nat_async, _("\
4702Set debugging of GNU/Linux async lwp module."), _("\
4703Show debugging of GNU/Linux async lwp module."), _("\
4704Enables printf debugging output."),
4705 NULL,
4706 show_debug_linux_nat_async,
4707 &setdebuglist, &showdebuglist);
4708
84e46146
PA
4709 /* Get the default SIGCHLD action. Used while forking an inferior
4710 (see linux_nat_create_inferior/linux_nat_async_events). */
4711 sigaction (SIGCHLD, NULL, &sigchld_default_action);
4712
b84876c2
PA
4713 /* Block SIGCHLD by default. Doing this early prevents it getting
4714 unblocked if an exception is thrown due to an error while the
4715 inferior is starting (sigsetjmp/siglongjmp). */
4716 sigemptyset (&mask);
4717 sigaddset (&mask, SIGCHLD);
4718 sigprocmask (SIG_BLOCK, &mask, NULL);
4719
4720 /* Save this mask as the default. */
d6b0e80f
AC
4721 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4722
b84876c2
PA
4723 /* The synchronous SIGCHLD handler. */
4724 sync_sigchld_action.sa_handler = sigchld_handler;
4725 sigemptyset (&sync_sigchld_action.sa_mask);
4726 sync_sigchld_action.sa_flags = SA_RESTART;
4727
4728 /* Make it the default. */
4729 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
d6b0e80f
AC
4730
4731 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4732 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4733 sigdelset (&suspend_mask, SIGCHLD);
4734
b84876c2
PA
4735 /* SIGCHLD handler for async mode. */
4736 async_sigchld_action.sa_handler = async_sigchld_handler;
4737 sigemptyset (&async_sigchld_action.sa_mask);
4738 async_sigchld_action.sa_flags = SA_RESTART;
d6b0e80f 4739
c6ebd6cf 4740 linux_nat_setup_async ();
10568435
JK
4741
4742 add_setshow_boolean_cmd ("disable-randomization", class_support,
4743 &disable_randomization, _("\
4744Set disabling of debuggee's virtual address space randomization."), _("\
4745Show disabling of debuggee's virtual address space randomization."), _("\
4746When this mode is on (which is the default), randomization of the virtual\n\
4747address space is disabled. Standalone programs run with the randomization\n\
4748enabled by default on some platforms."),
4749 &set_disable_randomization,
4750 &show_disable_randomization,
4751 &setlist, &showlist);
d6b0e80f
AC
4752}
4753\f
4754
4755/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4756 the GNU/Linux Threads library and therefore doesn't really belong
4757 here. */
4758
4759/* Read variable NAME in the target and return its value if found.
4760 Otherwise return zero. It is assumed that the type of the variable
4761 is `int'. */
4762
4763static int
4764get_signo (const char *name)
4765{
4766 struct minimal_symbol *ms;
4767 int signo;
4768
4769 ms = lookup_minimal_symbol (name, NULL, NULL);
4770 if (ms == NULL)
4771 return 0;
4772
8e70166d 4773 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
4774 sizeof (signo)) != 0)
4775 return 0;
4776
4777 return signo;
4778}
4779
4780/* Return the set of signals used by the threads library in *SET. */
4781
4782void
4783lin_thread_get_thread_signals (sigset_t *set)
4784{
4785 struct sigaction action;
4786 int restart, cancel;
b84876c2 4787 sigset_t blocked_mask;
d6b0e80f 4788
b84876c2 4789 sigemptyset (&blocked_mask);
d6b0e80f
AC
4790 sigemptyset (set);
4791
4792 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
4793 cancel = get_signo ("__pthread_sig_cancel");
4794
4795 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4796 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4797 not provide any way for the debugger to query the signal numbers -
4798 fortunately they don't change! */
4799
d6b0e80f 4800 if (restart == 0)
17fbb0bd 4801 restart = __SIGRTMIN;
d6b0e80f 4802
d6b0e80f 4803 if (cancel == 0)
17fbb0bd 4804 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
4805
4806 sigaddset (set, restart);
4807 sigaddset (set, cancel);
4808
4809 /* The GNU/Linux Threads library makes terminating threads send a
4810 special "cancel" signal instead of SIGCHLD. Make sure we catch
4811 those (to prevent them from terminating GDB itself, which is
4812 likely to be their default action) and treat them the same way as
4813 SIGCHLD. */
4814
4815 action.sa_handler = sigchld_handler;
4816 sigemptyset (&action.sa_mask);
58aecb61 4817 action.sa_flags = SA_RESTART;
d6b0e80f
AC
4818 sigaction (cancel, &action, NULL);
4819
4820 /* We block the "cancel" signal throughout this code ... */
4821 sigaddset (&blocked_mask, cancel);
4822 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4823
4824 /* ... except during a sigsuspend. */
4825 sigdelset (&suspend_mask, cancel);
4826}
This page took 0.839378 seconds and 4 git commands to generate.