* yyscript.y: Fix spelling error in comment.
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
9b254dd1 3 Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
e26af52f 4 Free Software Foundation, Inc.
3993f6b1
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
20
21#include "defs.h"
22#include "inferior.h"
23#include "target.h"
d6b0e80f 24#include "gdb_string.h"
3993f6b1 25#include "gdb_wait.h"
d6b0e80f
AC
26#include "gdb_assert.h"
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
ac264b3b 33#include "linux-fork.h"
d6b0e80f
AC
34#include "gdbthread.h"
35#include "gdbcmd.h"
36#include "regcache.h"
4f844a66 37#include "regset.h"
10d6c8cd
DJ
38#include "inf-ptrace.h"
39#include "auxv.h"
dba24537
AC
40#include <sys/param.h> /* for MAXPATHLEN */
41#include <sys/procfs.h> /* for elf_gregset etc. */
42#include "elf-bfd.h" /* for elfcore_write_* */
43#include "gregset.h" /* for gregset */
44#include "gdbcore.h" /* for get_exec_file */
45#include <ctype.h> /* for isdigit */
46#include "gdbthread.h" /* for struct thread_info etc. */
47#include "gdb_stat.h" /* for struct stat */
48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
dba24537
AC
52
53#ifndef O_LARGEFILE
54#define O_LARGEFILE 0
55#endif
0274a8ce 56
3993f6b1
DJ
57/* If the system headers did not provide the constants, hard-code the normal
58 values. */
59#ifndef PTRACE_EVENT_FORK
60
61#define PTRACE_SETOPTIONS 0x4200
62#define PTRACE_GETEVENTMSG 0x4201
63
64/* options set using PTRACE_SETOPTIONS */
65#define PTRACE_O_TRACESYSGOOD 0x00000001
66#define PTRACE_O_TRACEFORK 0x00000002
67#define PTRACE_O_TRACEVFORK 0x00000004
68#define PTRACE_O_TRACECLONE 0x00000008
69#define PTRACE_O_TRACEEXEC 0x00000010
9016a515
DJ
70#define PTRACE_O_TRACEVFORKDONE 0x00000020
71#define PTRACE_O_TRACEEXIT 0x00000040
3993f6b1
DJ
72
73/* Wait extended result codes for the above trace options. */
74#define PTRACE_EVENT_FORK 1
75#define PTRACE_EVENT_VFORK 2
76#define PTRACE_EVENT_CLONE 3
77#define PTRACE_EVENT_EXEC 4
c874c7fc 78#define PTRACE_EVENT_VFORK_DONE 5
9016a515 79#define PTRACE_EVENT_EXIT 6
3993f6b1
DJ
80
81#endif /* PTRACE_EVENT_FORK */
82
83/* We can't always assume that this flag is available, but all systems
84 with the ptrace event handlers also have __WALL, so it's safe to use
85 here. */
86#ifndef __WALL
87#define __WALL 0x40000000 /* Wait for any child. */
88#endif
89
02d3ff8c
UW
90#ifndef PTRACE_GETSIGINFO
91#define PTRACE_GETSIGINFO 0x4202
92#endif
93
10d6c8cd
DJ
94/* The single-threaded native GNU/Linux target_ops. We save a pointer for
95 the use of the multi-threaded target. */
96static struct target_ops *linux_ops;
f973ed9c 97static struct target_ops linux_ops_saved;
10d6c8cd 98
9f0bdab8
DJ
99/* The method to call, if any, when a new thread is attached. */
100static void (*linux_nat_new_thread) (ptid_t);
101
ac264b3b
MS
102/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
103 Called by our to_xfer_partial. */
104static LONGEST (*super_xfer_partial) (struct target_ops *,
105 enum target_object,
106 const char *, gdb_byte *,
107 const gdb_byte *,
10d6c8cd
DJ
108 ULONGEST, LONGEST);
109
d6b0e80f 110static int debug_linux_nat;
920d2a44
AC
111static void
112show_debug_linux_nat (struct ui_file *file, int from_tty,
113 struct cmd_list_element *c, const char *value)
114{
115 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
116 value);
117}
d6b0e80f 118
b84876c2
PA
119static int debug_linux_nat_async = 0;
120static void
121show_debug_linux_nat_async (struct ui_file *file, int from_tty,
122 struct cmd_list_element *c, const char *value)
123{
124 fprintf_filtered (file, _("Debugging of GNU/Linux async lwp module is %s.\n"),
125 value);
126}
127
9016a515
DJ
128static int linux_parent_pid;
129
ae087d01
DJ
130struct simple_pid_list
131{
132 int pid;
3d799a95 133 int status;
ae087d01
DJ
134 struct simple_pid_list *next;
135};
136struct simple_pid_list *stopped_pids;
137
3993f6b1
DJ
138/* This variable is a tri-state flag: -1 for unknown, 0 if PTRACE_O_TRACEFORK
139 can not be used, 1 if it can. */
140
141static int linux_supports_tracefork_flag = -1;
142
9016a515
DJ
143/* If we have PTRACE_O_TRACEFORK, this flag indicates whether we also have
144 PTRACE_O_TRACEVFORKDONE. */
145
146static int linux_supports_tracevforkdone_flag = -1;
147
b84876c2
PA
148/* Async mode support */
149
150/* To listen to target events asynchronously, we install a SIGCHLD
151 handler whose duty is to call waitpid (-1, ..., WNOHANG) to get all
152 the pending events into a pipe. Whenever we're ready to handle
153 events asynchronously, this pipe is registered as the waitable file
154 handle in the event loop. When we get to entry target points
155 coming out of the common code (target_wait, target_resume, ...),
156 that are going to call waitpid, we block SIGCHLD signals, and
157 remove all the events placed in the pipe into a local queue. All
158 the subsequent calls to my_waitpid (a waitpid wrapper) check this
159 local queue first. */
160
161/* True if async mode is currently on. */
162static int linux_nat_async_enabled;
163
164/* Zero if the async mode, although enabled, is masked, which means
165 linux_nat_wait should behave as if async mode was off. */
166static int linux_nat_async_mask_value = 1;
167
168/* The read/write ends of the pipe registered as waitable file in the
169 event loop. */
170static int linux_nat_event_pipe[2] = { -1, -1 };
171
172/* Number of queued events in the pipe. */
173static volatile int linux_nat_num_queued_events;
174
175/* If async mode is on, true if we're listening for events; false if
176 target events are blocked. */
177static int linux_nat_async_events_enabled;
178
179static int linux_nat_async_events (int enable);
180static void pipe_to_local_event_queue (void);
181static void local_event_queue_to_pipe (void);
182static void linux_nat_event_pipe_push (int pid, int status, int options);
183static int linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options);
184static void linux_nat_set_async_mode (int on);
185static void linux_nat_async (void (*callback)
186 (enum inferior_event_type event_type, void *context),
187 void *context);
188static int linux_nat_async_mask (int mask);
189
190/* Captures the result of a successful waitpid call, along with the
191 options used in that call. */
192struct waitpid_result
193{
194 int pid;
195 int status;
196 int options;
197 struct waitpid_result *next;
198};
199
200/* A singly-linked list of the results of the waitpid calls performed
201 in the async SIGCHLD handler. */
202static struct waitpid_result *waitpid_queue = NULL;
203
204static int
205queued_waitpid (int pid, int *status, int flags)
206{
207 struct waitpid_result *msg = waitpid_queue, *prev = NULL;
208
209 if (debug_linux_nat_async)
210 fprintf_unfiltered (gdb_stdlog,
211 "\
212QWPID: linux_nat_async_events_enabled(%d), linux_nat_num_queued_events(%d)\n",
213 linux_nat_async_events_enabled,
214 linux_nat_num_queued_events);
215
216 if (flags & __WALL)
217 {
218 for (; msg; prev = msg, msg = msg->next)
219 if (pid == -1 || pid == msg->pid)
220 break;
221 }
222 else if (flags & __WCLONE)
223 {
224 for (; msg; prev = msg, msg = msg->next)
225 if (msg->options & __WCLONE
226 && (pid == -1 || pid == msg->pid))
227 break;
228 }
229 else
230 {
231 for (; msg; prev = msg, msg = msg->next)
232 if ((msg->options & __WCLONE) == 0
233 && (pid == -1 || pid == msg->pid))
234 break;
235 }
236
237 if (msg)
238 {
239 int pid;
240
241 if (prev)
242 prev->next = msg->next;
243 else
244 waitpid_queue = msg->next;
245
246 msg->next = NULL;
247 if (status)
248 *status = msg->status;
249 pid = msg->pid;
250
251 if (debug_linux_nat_async)
252 fprintf_unfiltered (gdb_stdlog, "QWPID: pid(%d), status(%x)\n",
253 pid, msg->status);
254 xfree (msg);
255
256 return pid;
257 }
258
259 if (debug_linux_nat_async)
260 fprintf_unfiltered (gdb_stdlog, "QWPID: miss\n");
261
262 if (status)
263 *status = 0;
264 return -1;
265}
266
267static void
268push_waitpid (int pid, int status, int options)
269{
270 struct waitpid_result *event, *new_event;
271
272 new_event = xmalloc (sizeof (*new_event));
273 new_event->pid = pid;
274 new_event->status = status;
275 new_event->options = options;
276 new_event->next = NULL;
277
278 if (waitpid_queue)
279 {
280 for (event = waitpid_queue;
281 event && event->next;
282 event = event->next)
283 ;
284
285 event->next = new_event;
286 }
287 else
288 waitpid_queue = new_event;
289}
290
291/* Drain all queued event of PID. If PID is -1, the effect is of
292 draining all events. */
293static void
294drain_queued_events (int pid)
295{
296 while (queued_waitpid (pid, NULL, __WALL) != -1)
297 ;
298}
299
ae087d01
DJ
300\f
301/* Trivial list manipulation functions to keep track of a list of
302 new stopped processes. */
303static void
3d799a95 304add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
305{
306 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
307 new_pid->pid = pid;
3d799a95 308 new_pid->status = status;
ae087d01
DJ
309 new_pid->next = *listp;
310 *listp = new_pid;
311}
312
313static int
3d799a95 314pull_pid_from_list (struct simple_pid_list **listp, int pid, int *status)
ae087d01
DJ
315{
316 struct simple_pid_list **p;
317
318 for (p = listp; *p != NULL; p = &(*p)->next)
319 if ((*p)->pid == pid)
320 {
321 struct simple_pid_list *next = (*p)->next;
3d799a95 322 *status = (*p)->status;
ae087d01
DJ
323 xfree (*p);
324 *p = next;
325 return 1;
326 }
327 return 0;
328}
329
3d799a95
DJ
330static void
331linux_record_stopped_pid (int pid, int status)
ae087d01 332{
3d799a95 333 add_to_pid_list (&stopped_pids, pid, status);
ae087d01
DJ
334}
335
3993f6b1
DJ
336\f
337/* A helper function for linux_test_for_tracefork, called after fork (). */
338
339static void
340linux_tracefork_child (void)
341{
342 int ret;
343
344 ptrace (PTRACE_TRACEME, 0, 0, 0);
345 kill (getpid (), SIGSTOP);
346 fork ();
48bb3cce 347 _exit (0);
3993f6b1
DJ
348}
349
b84876c2
PA
350/* Wrapper function for waitpid which handles EINTR, and checks for
351 locally queued events. */
b957e937
DJ
352
353static int
354my_waitpid (int pid, int *status, int flags)
355{
356 int ret;
b84876c2
PA
357
358 /* There should be no concurrent calls to waitpid. */
359 gdb_assert (!linux_nat_async_events_enabled);
360
361 ret = queued_waitpid (pid, status, flags);
362 if (ret != -1)
363 return ret;
364
b957e937
DJ
365 do
366 {
367 ret = waitpid (pid, status, flags);
368 }
369 while (ret == -1 && errno == EINTR);
370
371 return ret;
372}
373
374/* Determine if PTRACE_O_TRACEFORK can be used to follow fork events.
375
376 First, we try to enable fork tracing on ORIGINAL_PID. If this fails,
377 we know that the feature is not available. This may change the tracing
378 options for ORIGINAL_PID, but we'll be setting them shortly anyway.
379
380 However, if it succeeds, we don't know for sure that the feature is
381 available; old versions of PTRACE_SETOPTIONS ignored unknown options. We
3993f6b1 382 create a child process, attach to it, use PTRACE_SETOPTIONS to enable
b957e937
DJ
383 fork tracing, and let it fork. If the process exits, we assume that we
384 can't use TRACEFORK; if we get the fork notification, and we can extract
385 the new child's PID, then we assume that we can. */
3993f6b1
DJ
386
387static void
b957e937 388linux_test_for_tracefork (int original_pid)
3993f6b1
DJ
389{
390 int child_pid, ret, status;
391 long second_pid;
392
b957e937
DJ
393 linux_supports_tracefork_flag = 0;
394 linux_supports_tracevforkdone_flag = 0;
395
396 ret = ptrace (PTRACE_SETOPTIONS, original_pid, 0, PTRACE_O_TRACEFORK);
397 if (ret != 0)
398 return;
399
3993f6b1
DJ
400 child_pid = fork ();
401 if (child_pid == -1)
e2e0b3e5 402 perror_with_name (("fork"));
3993f6b1
DJ
403
404 if (child_pid == 0)
405 linux_tracefork_child ();
406
b957e937 407 ret = my_waitpid (child_pid, &status, 0);
3993f6b1 408 if (ret == -1)
e2e0b3e5 409 perror_with_name (("waitpid"));
3993f6b1 410 else if (ret != child_pid)
8a3fe4f8 411 error (_("linux_test_for_tracefork: waitpid: unexpected result %d."), ret);
3993f6b1 412 if (! WIFSTOPPED (status))
8a3fe4f8 413 error (_("linux_test_for_tracefork: waitpid: unexpected status %d."), status);
3993f6b1 414
3993f6b1
DJ
415 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0, PTRACE_O_TRACEFORK);
416 if (ret != 0)
417 {
b957e937
DJ
418 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
419 if (ret != 0)
420 {
8a3fe4f8 421 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937
DJ
422 return;
423 }
424
425 ret = my_waitpid (child_pid, &status, 0);
426 if (ret != child_pid)
8a3fe4f8 427 warning (_("linux_test_for_tracefork: failed to wait for killed child"));
b957e937 428 else if (!WIFSIGNALED (status))
8a3fe4f8
AC
429 warning (_("linux_test_for_tracefork: unexpected wait status 0x%x from "
430 "killed child"), status);
b957e937 431
3993f6b1
DJ
432 return;
433 }
434
9016a515
DJ
435 /* Check whether PTRACE_O_TRACEVFORKDONE is available. */
436 ret = ptrace (PTRACE_SETOPTIONS, child_pid, 0,
437 PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORKDONE);
438 linux_supports_tracevforkdone_flag = (ret == 0);
439
b957e937
DJ
440 ret = ptrace (PTRACE_CONT, child_pid, 0, 0);
441 if (ret != 0)
8a3fe4f8 442 warning (_("linux_test_for_tracefork: failed to resume child"));
b957e937
DJ
443
444 ret = my_waitpid (child_pid, &status, 0);
445
3993f6b1
DJ
446 if (ret == child_pid && WIFSTOPPED (status)
447 && status >> 16 == PTRACE_EVENT_FORK)
448 {
449 second_pid = 0;
450 ret = ptrace (PTRACE_GETEVENTMSG, child_pid, 0, &second_pid);
451 if (ret == 0 && second_pid != 0)
452 {
453 int second_status;
454
455 linux_supports_tracefork_flag = 1;
b957e937
DJ
456 my_waitpid (second_pid, &second_status, 0);
457 ret = ptrace (PTRACE_KILL, second_pid, 0, 0);
458 if (ret != 0)
8a3fe4f8 459 warning (_("linux_test_for_tracefork: failed to kill second child"));
97725dc4 460 my_waitpid (second_pid, &status, 0);
3993f6b1
DJ
461 }
462 }
b957e937 463 else
8a3fe4f8
AC
464 warning (_("linux_test_for_tracefork: unexpected result from waitpid "
465 "(%d, status 0x%x)"), ret, status);
3993f6b1 466
b957e937
DJ
467 ret = ptrace (PTRACE_KILL, child_pid, 0, 0);
468 if (ret != 0)
8a3fe4f8 469 warning (_("linux_test_for_tracefork: failed to kill child"));
b957e937 470 my_waitpid (child_pid, &status, 0);
3993f6b1
DJ
471}
472
473/* Return non-zero iff we have tracefork functionality available.
474 This function also sets linux_supports_tracefork_flag. */
475
476static int
b957e937 477linux_supports_tracefork (int pid)
3993f6b1
DJ
478{
479 if (linux_supports_tracefork_flag == -1)
b957e937 480 linux_test_for_tracefork (pid);
3993f6b1
DJ
481 return linux_supports_tracefork_flag;
482}
483
9016a515 484static int
b957e937 485linux_supports_tracevforkdone (int pid)
9016a515
DJ
486{
487 if (linux_supports_tracefork_flag == -1)
b957e937 488 linux_test_for_tracefork (pid);
9016a515
DJ
489 return linux_supports_tracevforkdone_flag;
490}
491
3993f6b1 492\f
4de4c07c
DJ
493void
494linux_enable_event_reporting (ptid_t ptid)
495{
d3587048 496 int pid = ptid_get_lwp (ptid);
4de4c07c
DJ
497 int options;
498
d3587048
DJ
499 if (pid == 0)
500 pid = ptid_get_pid (ptid);
501
b957e937 502 if (! linux_supports_tracefork (pid))
4de4c07c
DJ
503 return;
504
a2f23071
DJ
505 options = PTRACE_O_TRACEFORK | PTRACE_O_TRACEVFORK | PTRACE_O_TRACEEXEC
506 | PTRACE_O_TRACECLONE;
b957e937 507 if (linux_supports_tracevforkdone (pid))
9016a515
DJ
508 options |= PTRACE_O_TRACEVFORKDONE;
509
510 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to support
511 read-only process state. */
4de4c07c
DJ
512
513 ptrace (PTRACE_SETOPTIONS, pid, 0, options);
514}
515
6d8fd2b7
UW
516static void
517linux_child_post_attach (int pid)
4de4c07c
DJ
518{
519 linux_enable_event_reporting (pid_to_ptid (pid));
0ec9a092 520 check_for_thread_db ();
4de4c07c
DJ
521}
522
10d6c8cd 523static void
4de4c07c
DJ
524linux_child_post_startup_inferior (ptid_t ptid)
525{
526 linux_enable_event_reporting (ptid);
0ec9a092 527 check_for_thread_db ();
4de4c07c
DJ
528}
529
6d8fd2b7
UW
530static int
531linux_child_follow_fork (struct target_ops *ops, int follow_child)
3993f6b1 532{
4de4c07c
DJ
533 ptid_t last_ptid;
534 struct target_waitstatus last_status;
9016a515 535 int has_vforked;
4de4c07c
DJ
536 int parent_pid, child_pid;
537
b84876c2
PA
538 if (target_can_async_p ())
539 target_async (NULL, 0);
540
4de4c07c 541 get_last_target_status (&last_ptid, &last_status);
9016a515 542 has_vforked = (last_status.kind == TARGET_WAITKIND_VFORKED);
d3587048
DJ
543 parent_pid = ptid_get_lwp (last_ptid);
544 if (parent_pid == 0)
545 parent_pid = ptid_get_pid (last_ptid);
4de4c07c
DJ
546 child_pid = last_status.value.related_pid;
547
548 if (! follow_child)
549 {
550 /* We're already attached to the parent, by default. */
551
552 /* Before detaching from the child, remove all breakpoints from
553 it. (This won't actually modify the breakpoint list, but will
554 physically remove the breakpoints from the child.) */
9016a515
DJ
555 /* If we vforked this will remove the breakpoints from the parent
556 also, but they'll be reinserted below. */
4de4c07c
DJ
557 detach_breakpoints (child_pid);
558
ac264b3b
MS
559 /* Detach new forked process? */
560 if (detach_fork)
f75c00e4 561 {
e85a822c 562 if (info_verbose || debug_linux_nat)
ac264b3b
MS
563 {
564 target_terminal_ours ();
565 fprintf_filtered (gdb_stdlog,
566 "Detaching after fork from child process %d.\n",
567 child_pid);
568 }
4de4c07c 569
ac264b3b
MS
570 ptrace (PTRACE_DETACH, child_pid, 0, 0);
571 }
572 else
573 {
574 struct fork_info *fp;
575 /* Retain child fork in ptrace (stopped) state. */
576 fp = find_fork_pid (child_pid);
577 if (!fp)
578 fp = add_fork (child_pid);
579 fork_save_infrun_state (fp, 0);
580 }
9016a515
DJ
581
582 if (has_vforked)
583 {
b957e937
DJ
584 gdb_assert (linux_supports_tracefork_flag >= 0);
585 if (linux_supports_tracevforkdone (0))
9016a515
DJ
586 {
587 int status;
588
589 ptrace (PTRACE_CONT, parent_pid, 0, 0);
58aecb61 590 my_waitpid (parent_pid, &status, __WALL);
c874c7fc 591 if ((status >> 16) != PTRACE_EVENT_VFORK_DONE)
8a3fe4f8
AC
592 warning (_("Unexpected waitpid result %06x when waiting for "
593 "vfork-done"), status);
9016a515
DJ
594 }
595 else
596 {
597 /* We can't insert breakpoints until the child has
598 finished with the shared memory region. We need to
599 wait until that happens. Ideal would be to just
600 call:
601 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
602 - waitpid (parent_pid, &status, __WALL);
603 However, most architectures can't handle a syscall
604 being traced on the way out if it wasn't traced on
605 the way in.
606
607 We might also think to loop, continuing the child
608 until it exits or gets a SIGTRAP. One problem is
609 that the child might call ptrace with PTRACE_TRACEME.
610
611 There's no simple and reliable way to figure out when
612 the vforked child will be done with its copy of the
613 shared memory. We could step it out of the syscall,
614 two instructions, let it go, and then single-step the
615 parent once. When we have hardware single-step, this
616 would work; with software single-step it could still
617 be made to work but we'd have to be able to insert
618 single-step breakpoints in the child, and we'd have
619 to insert -just- the single-step breakpoint in the
620 parent. Very awkward.
621
622 In the end, the best we can do is to make sure it
623 runs for a little while. Hopefully it will be out of
624 range of any breakpoints we reinsert. Usually this
625 is only the single-step breakpoint at vfork's return
626 point. */
627
628 usleep (10000);
629 }
630
631 /* Since we vforked, breakpoints were removed in the parent
632 too. Put them back. */
633 reattach_breakpoints (parent_pid);
634 }
4de4c07c 635 }
3993f6b1 636 else
4de4c07c
DJ
637 {
638 char child_pid_spelling[40];
639
640 /* Needed to keep the breakpoint lists in sync. */
9016a515
DJ
641 if (! has_vforked)
642 detach_breakpoints (child_pid);
4de4c07c
DJ
643
644 /* Before detaching from the parent, remove all breakpoints from it. */
645 remove_breakpoints ();
646
e85a822c 647 if (info_verbose || debug_linux_nat)
f75c00e4
DJ
648 {
649 target_terminal_ours ();
ac264b3b
MS
650 fprintf_filtered (gdb_stdlog,
651 "Attaching after fork to child process %d.\n",
652 child_pid);
f75c00e4 653 }
4de4c07c 654
9016a515
DJ
655 /* If we're vforking, we may want to hold on to the parent until
656 the child exits or execs. At exec time we can remove the old
657 breakpoints from the parent and detach it; at exit time we
658 could do the same (or even, sneakily, resume debugging it - the
659 child's exec has failed, or something similar).
660
661 This doesn't clean up "properly", because we can't call
662 target_detach, but that's OK; if the current target is "child",
663 then it doesn't need any further cleanups, and lin_lwp will
664 generally not encounter vfork (vfork is defined to fork
665 in libpthread.so).
666
667 The holding part is very easy if we have VFORKDONE events;
668 but keeping track of both processes is beyond GDB at the
669 moment. So we don't expose the parent to the rest of GDB.
670 Instead we quietly hold onto it until such time as we can
671 safely resume it. */
672
673 if (has_vforked)
674 linux_parent_pid = parent_pid;
ac264b3b
MS
675 else if (!detach_fork)
676 {
677 struct fork_info *fp;
678 /* Retain parent fork in ptrace (stopped) state. */
679 fp = find_fork_pid (parent_pid);
680 if (!fp)
681 fp = add_fork (parent_pid);
682 fork_save_infrun_state (fp, 0);
683 }
9016a515 684 else
b84876c2 685 target_detach (NULL, 0);
4de4c07c 686
9f0bdab8 687 inferior_ptid = ptid_build (child_pid, child_pid, 0);
ee057212
DJ
688
689 /* Reinstall ourselves, since we might have been removed in
690 target_detach (which does other necessary cleanup). */
ac264b3b 691
ee057212 692 push_target (ops);
9f0bdab8 693 linux_nat_switch_fork (inferior_ptid);
ef29ce1a 694 check_for_thread_db ();
4de4c07c
DJ
695
696 /* Reset breakpoints in the child as appropriate. */
697 follow_inferior_reset_breakpoints ();
698 }
699
b84876c2
PA
700 if (target_can_async_p ())
701 target_async (inferior_event_handler, 0);
702
4de4c07c
DJ
703 return 0;
704}
705
4de4c07c 706\f
6d8fd2b7
UW
707static void
708linux_child_insert_fork_catchpoint (int pid)
4de4c07c 709{
b957e937 710 if (! linux_supports_tracefork (pid))
8a3fe4f8 711 error (_("Your system does not support fork catchpoints."));
3993f6b1
DJ
712}
713
6d8fd2b7
UW
714static void
715linux_child_insert_vfork_catchpoint (int pid)
3993f6b1 716{
b957e937 717 if (!linux_supports_tracefork (pid))
8a3fe4f8 718 error (_("Your system does not support vfork catchpoints."));
3993f6b1
DJ
719}
720
6d8fd2b7
UW
721static void
722linux_child_insert_exec_catchpoint (int pid)
3993f6b1 723{
b957e937 724 if (!linux_supports_tracefork (pid))
8a3fe4f8 725 error (_("Your system does not support exec catchpoints."));
3993f6b1
DJ
726}
727
d6b0e80f
AC
728/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
729 are processes sharing the same VM space. A multi-threaded process
730 is basically a group of such processes. However, such a grouping
731 is almost entirely a user-space issue; the kernel doesn't enforce
732 such a grouping at all (this might change in the future). In
733 general, we'll rely on the threads library (i.e. the GNU/Linux
734 Threads library) to provide such a grouping.
735
736 It is perfectly well possible to write a multi-threaded application
737 without the assistance of a threads library, by using the clone
738 system call directly. This module should be able to give some
739 rudimentary support for debugging such applications if developers
740 specify the CLONE_PTRACE flag in the clone system call, and are
741 using the Linux kernel 2.4 or above.
742
743 Note that there are some peculiarities in GNU/Linux that affect
744 this code:
745
746 - In general one should specify the __WCLONE flag to waitpid in
747 order to make it report events for any of the cloned processes
748 (and leave it out for the initial process). However, if a cloned
749 process has exited the exit status is only reported if the
750 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
751 we cannot use it since GDB must work on older systems too.
752
753 - When a traced, cloned process exits and is waited for by the
754 debugger, the kernel reassigns it to the original parent and
755 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
756 library doesn't notice this, which leads to the "zombie problem":
757 When debugged a multi-threaded process that spawns a lot of
758 threads will run out of processes, even if the threads exit,
759 because the "zombies" stay around. */
760
761/* List of known LWPs. */
9f0bdab8 762struct lwp_info *lwp_list;
d6b0e80f
AC
763
764/* Number of LWPs in the list. */
765static int num_lwps;
d6b0e80f
AC
766\f
767
d6b0e80f
AC
768/* If the last reported event was a SIGTRAP, this variable is set to
769 the process id of the LWP/thread that got it. */
770ptid_t trap_ptid;
771\f
772
d6b0e80f
AC
773/* Since we cannot wait (in linux_nat_wait) for the initial process and
774 any cloned processes with a single call to waitpid, we have to use
775 the WNOHANG flag and call waitpid in a loop. To optimize
776 things a bit we use `sigsuspend' to wake us up when a process has
777 something to report (it will send us a SIGCHLD if it has). To make
778 this work we have to juggle with the signal mask. We save the
779 original signal mask such that we can restore it before creating a
780 new process in order to avoid blocking certain signals in the
781 inferior. We then block SIGCHLD during the waitpid/sigsuspend
782 loop. */
783
784/* Original signal mask. */
785static sigset_t normal_mask;
786
787/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
788 _initialize_linux_nat. */
789static sigset_t suspend_mask;
790
b84876c2
PA
791/* SIGCHLD action for synchronous mode. */
792struct sigaction sync_sigchld_action;
793
794/* SIGCHLD action for asynchronous mode. */
795static struct sigaction async_sigchld_action;
d6b0e80f
AC
796\f
797
798/* Prototypes for local functions. */
799static int stop_wait_callback (struct lwp_info *lp, void *data);
800static int linux_nat_thread_alive (ptid_t ptid);
6d8fd2b7 801static char *linux_child_pid_to_exec_file (int pid);
d6b0e80f
AC
802\f
803/* Convert wait status STATUS to a string. Used for printing debug
804 messages only. */
805
806static char *
807status_to_str (int status)
808{
809 static char buf[64];
810
811 if (WIFSTOPPED (status))
812 snprintf (buf, sizeof (buf), "%s (stopped)",
813 strsignal (WSTOPSIG (status)));
814 else if (WIFSIGNALED (status))
815 snprintf (buf, sizeof (buf), "%s (terminated)",
816 strsignal (WSTOPSIG (status)));
817 else
818 snprintf (buf, sizeof (buf), "%d (exited)", WEXITSTATUS (status));
819
820 return buf;
821}
822
823/* Initialize the list of LWPs. Note that this module, contrary to
824 what GDB's generic threads layer does for its thread list,
825 re-initializes the LWP lists whenever we mourn or detach (which
826 doesn't involve mourning) the inferior. */
827
828static void
829init_lwp_list (void)
830{
831 struct lwp_info *lp, *lpnext;
832
833 for (lp = lwp_list; lp; lp = lpnext)
834 {
835 lpnext = lp->next;
836 xfree (lp);
837 }
838
839 lwp_list = NULL;
840 num_lwps = 0;
d6b0e80f
AC
841}
842
f973ed9c 843/* Add the LWP specified by PID to the list. Return a pointer to the
9f0bdab8
DJ
844 structure describing the new LWP. The LWP should already be stopped
845 (with an exception for the very first LWP). */
d6b0e80f
AC
846
847static struct lwp_info *
848add_lwp (ptid_t ptid)
849{
850 struct lwp_info *lp;
851
852 gdb_assert (is_lwp (ptid));
853
854 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
855
856 memset (lp, 0, sizeof (struct lwp_info));
857
858 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
859
860 lp->ptid = ptid;
861
862 lp->next = lwp_list;
863 lwp_list = lp;
f973ed9c 864 ++num_lwps;
d6b0e80f 865
9f0bdab8
DJ
866 if (num_lwps > 1 && linux_nat_new_thread != NULL)
867 linux_nat_new_thread (ptid);
868
d6b0e80f
AC
869 return lp;
870}
871
872/* Remove the LWP specified by PID from the list. */
873
874static void
875delete_lwp (ptid_t ptid)
876{
877 struct lwp_info *lp, *lpprev;
878
879 lpprev = NULL;
880
881 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
882 if (ptid_equal (lp->ptid, ptid))
883 break;
884
885 if (!lp)
886 return;
887
d6b0e80f
AC
888 num_lwps--;
889
890 if (lpprev)
891 lpprev->next = lp->next;
892 else
893 lwp_list = lp->next;
894
895 xfree (lp);
896}
897
898/* Return a pointer to the structure describing the LWP corresponding
899 to PID. If no corresponding LWP could be found, return NULL. */
900
901static struct lwp_info *
902find_lwp_pid (ptid_t ptid)
903{
904 struct lwp_info *lp;
905 int lwp;
906
907 if (is_lwp (ptid))
908 lwp = GET_LWP (ptid);
909 else
910 lwp = GET_PID (ptid);
911
912 for (lp = lwp_list; lp; lp = lp->next)
913 if (lwp == GET_LWP (lp->ptid))
914 return lp;
915
916 return NULL;
917}
918
919/* Call CALLBACK with its second argument set to DATA for every LWP in
920 the list. If CALLBACK returns 1 for a particular LWP, return a
921 pointer to the structure describing that LWP immediately.
922 Otherwise return NULL. */
923
924struct lwp_info *
925iterate_over_lwps (int (*callback) (struct lwp_info *, void *), void *data)
926{
927 struct lwp_info *lp, *lpnext;
928
929 for (lp = lwp_list; lp; lp = lpnext)
930 {
931 lpnext = lp->next;
932 if ((*callback) (lp, data))
933 return lp;
934 }
935
936 return NULL;
937}
938
f973ed9c
DJ
939/* Update our internal state when changing from one fork (checkpoint,
940 et cetera) to another indicated by NEW_PTID. We can only switch
941 single-threaded applications, so we only create one new LWP, and
942 the previous list is discarded. */
943
944void
945linux_nat_switch_fork (ptid_t new_ptid)
946{
947 struct lwp_info *lp;
948
949 init_lwp_list ();
950 lp = add_lwp (new_ptid);
951 lp->stopped = 1;
952}
953
e26af52f
DJ
954/* Record a PTID for later deletion. */
955
956struct saved_ptids
957{
958 ptid_t ptid;
959 struct saved_ptids *next;
960};
961static struct saved_ptids *threads_to_delete;
962
963static void
964record_dead_thread (ptid_t ptid)
965{
966 struct saved_ptids *p = xmalloc (sizeof (struct saved_ptids));
967 p->ptid = ptid;
968 p->next = threads_to_delete;
969 threads_to_delete = p;
970}
971
972/* Delete any dead threads which are not the current thread. */
973
974static void
975prune_lwps (void)
976{
977 struct saved_ptids **p = &threads_to_delete;
978
979 while (*p)
980 if (! ptid_equal ((*p)->ptid, inferior_ptid))
981 {
982 struct saved_ptids *tmp = *p;
983 delete_thread (tmp->ptid);
984 *p = tmp->next;
985 xfree (tmp);
986 }
987 else
988 p = &(*p)->next;
989}
990
e26af52f
DJ
991/* Handle the exit of a single thread LP. */
992
993static void
994exit_lwp (struct lwp_info *lp)
995{
996 if (in_thread_list (lp->ptid))
997 {
17faa917
DJ
998 if (print_thread_events)
999 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1000
e26af52f
DJ
1001 /* Core GDB cannot deal with us deleting the current thread. */
1002 if (!ptid_equal (lp->ptid, inferior_ptid))
1003 delete_thread (lp->ptid);
1004 else
1005 record_dead_thread (lp->ptid);
e26af52f
DJ
1006 }
1007
1008 delete_lwp (lp->ptid);
1009}
1010
d6b0e80f
AC
1011/* Attach to the LWP specified by PID. If VERBOSE is non-zero, print
1012 a message telling the user that a new LWP has been added to the
9ee57c33
DJ
1013 process. Return 0 if successful or -1 if the new LWP could not
1014 be attached. */
d6b0e80f 1015
9ee57c33 1016int
93815fbf 1017lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1018{
9ee57c33 1019 struct lwp_info *lp;
b84876c2 1020 int async_events_were_enabled = 0;
d6b0e80f
AC
1021
1022 gdb_assert (is_lwp (ptid));
1023
b84876c2
PA
1024 if (target_can_async_p ())
1025 async_events_were_enabled = linux_nat_async_events (0);
d6b0e80f 1026
9ee57c33 1027 lp = find_lwp_pid (ptid);
d6b0e80f
AC
1028
1029 /* We assume that we're already attached to any LWP that has an id
1030 equal to the overall process id, and to any LWP that is already
1031 in our list of LWPs. If we're not seeing exit events from threads
1032 and we've had PID wraparound since we last tried to stop all threads,
1033 this assumption might be wrong; fortunately, this is very unlikely
1034 to happen. */
9ee57c33 1035 if (GET_LWP (ptid) != GET_PID (ptid) && lp == NULL)
d6b0e80f
AC
1036 {
1037 pid_t pid;
1038 int status;
9f0bdab8 1039 int cloned = 0;
d6b0e80f
AC
1040
1041 if (ptrace (PTRACE_ATTACH, GET_LWP (ptid), 0, 0) < 0)
9ee57c33
DJ
1042 {
1043 /* If we fail to attach to the thread, issue a warning,
1044 but continue. One way this can happen is if thread
e9efe249 1045 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1046 bug may place threads in the thread list and then fail
1047 to create them. */
1048 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1049 safe_strerror (errno));
1050 return -1;
1051 }
1052
d6b0e80f
AC
1053 if (debug_linux_nat)
1054 fprintf_unfiltered (gdb_stdlog,
1055 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1056 target_pid_to_str (ptid));
1057
58aecb61 1058 pid = my_waitpid (GET_LWP (ptid), &status, 0);
d6b0e80f
AC
1059 if (pid == -1 && errno == ECHILD)
1060 {
1061 /* Try again with __WCLONE to check cloned processes. */
58aecb61 1062 pid = my_waitpid (GET_LWP (ptid), &status, __WCLONE);
9f0bdab8 1063 cloned = 1;
d6b0e80f
AC
1064 }
1065
1066 gdb_assert (pid == GET_LWP (ptid)
1067 && WIFSTOPPED (status) && WSTOPSIG (status));
1068
9f0bdab8
DJ
1069 if (lp == NULL)
1070 lp = add_lwp (ptid);
1071 lp->cloned = cloned;
1072
0ec9a092 1073 target_post_attach (pid);
d6b0e80f
AC
1074
1075 lp->stopped = 1;
1076
1077 if (debug_linux_nat)
1078 {
1079 fprintf_unfiltered (gdb_stdlog,
1080 "LLAL: waitpid %s received %s\n",
1081 target_pid_to_str (ptid),
1082 status_to_str (status));
1083 }
1084 }
1085 else
1086 {
1087 /* We assume that the LWP representing the original process is
1088 already stopped. Mark it as stopped in the data structure
155bd5d1
AC
1089 that the GNU/linux ptrace layer uses to keep track of
1090 threads. Note that this won't have already been done since
1091 the main thread will have, we assume, been stopped by an
1092 attach from a different layer. */
9ee57c33
DJ
1093 if (lp == NULL)
1094 lp = add_lwp (ptid);
d6b0e80f
AC
1095 lp->stopped = 1;
1096 }
9ee57c33 1097
b84876c2
PA
1098 if (async_events_were_enabled)
1099 linux_nat_async_events (1);
1100
9ee57c33 1101 return 0;
d6b0e80f
AC
1102}
1103
b84876c2
PA
1104static void
1105linux_nat_create_inferior (char *exec_file, char *allargs, char **env,
1106 int from_tty)
1107{
1108 int saved_async = 0;
1109
1110 /* The fork_child mechanism is synchronous and calls target_wait, so
1111 we have to mask the async mode. */
1112
1113 if (target_can_async_p ())
1114 saved_async = linux_nat_async_mask (0);
1115 else
1116 {
1117 /* Restore the original signal mask. */
1118 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1119 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1120 suspend_mask = normal_mask;
1121 sigdelset (&suspend_mask, SIGCHLD);
1122 }
1123
1124 linux_ops->to_create_inferior (exec_file, allargs, env, from_tty);
1125
1126 if (saved_async)
1127 linux_nat_async_mask (saved_async);
1128}
1129
d6b0e80f
AC
1130static void
1131linux_nat_attach (char *args, int from_tty)
1132{
1133 struct lwp_info *lp;
1134 pid_t pid;
1135 int status;
9f0bdab8 1136 int cloned = 0;
d6b0e80f
AC
1137
1138 /* FIXME: We should probably accept a list of process id's, and
1139 attach all of them. */
10d6c8cd 1140 linux_ops->to_attach (args, from_tty);
d6b0e80f 1141
b84876c2
PA
1142 if (!target_can_async_p ())
1143 {
1144 /* Restore the original signal mask. */
1145 sigprocmask (SIG_SETMASK, &normal_mask, NULL);
1146 /* Make sure we don't block SIGCHLD during a sigsuspend. */
1147 suspend_mask = normal_mask;
1148 sigdelset (&suspend_mask, SIGCHLD);
1149 }
1150
d6b0e80f
AC
1151 /* Make sure the initial process is stopped. The user-level threads
1152 layer might want to poke around in the inferior, and that won't
1153 work if things haven't stabilized yet. */
58aecb61 1154 pid = my_waitpid (GET_PID (inferior_ptid), &status, 0);
d6b0e80f
AC
1155 if (pid == -1 && errno == ECHILD)
1156 {
8a3fe4f8 1157 warning (_("%s is a cloned process"), target_pid_to_str (inferior_ptid));
d6b0e80f
AC
1158
1159 /* Try again with __WCLONE to check cloned processes. */
58aecb61 1160 pid = my_waitpid (GET_PID (inferior_ptid), &status, __WCLONE);
9f0bdab8 1161 cloned = 1;
d6b0e80f
AC
1162 }
1163
1164 gdb_assert (pid == GET_PID (inferior_ptid)
1165 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP);
1166
9f0bdab8
DJ
1167 /* Add the initial process as the first LWP to the list. */
1168 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid), GET_PID (inferior_ptid));
1169 lp = add_lwp (inferior_ptid);
1170 lp->cloned = cloned;
1171
d6b0e80f
AC
1172 lp->stopped = 1;
1173
1174 /* Fake the SIGSTOP that core GDB expects. */
1175 lp->status = W_STOPCODE (SIGSTOP);
1176 lp->resumed = 1;
1177 if (debug_linux_nat)
b84876c2
PA
1178 fprintf_unfiltered (gdb_stdlog,
1179 "LNA: waitpid %ld, faking SIGSTOP\n", (long) pid);
1180 if (target_can_async_p ())
d6b0e80f 1181 {
b84876c2
PA
1182 /* Wake event loop with special token, to get to WFI. */
1183 linux_nat_event_pipe_push (-1, -1, -1);
1184 /* Register in the event loop. */
1185 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1186 }
1187}
1188
1189static int
1190detach_callback (struct lwp_info *lp, void *data)
1191{
1192 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1193
1194 if (debug_linux_nat && lp->status)
1195 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1196 strsignal (WSTOPSIG (lp->status)),
1197 target_pid_to_str (lp->ptid));
1198
1199 while (lp->signalled && lp->stopped)
1200 {
1201 errno = 0;
1202 if (ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0,
1203 WSTOPSIG (lp->status)) < 0)
8a3fe4f8 1204 error (_("Can't continue %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1205 safe_strerror (errno));
1206
1207 if (debug_linux_nat)
1208 fprintf_unfiltered (gdb_stdlog,
1209 "DC: PTRACE_CONTINUE (%s, 0, %s) (OK)\n",
1210 target_pid_to_str (lp->ptid),
1211 status_to_str (lp->status));
1212
1213 lp->stopped = 0;
1214 lp->signalled = 0;
1215 lp->status = 0;
1216 /* FIXME drow/2003-08-26: There was a call to stop_wait_callback
1217 here. But since lp->signalled was cleared above,
1218 stop_wait_callback didn't do anything; the process was left
1219 running. Shouldn't we be waiting for it to stop?
1220 I've removed the call, since stop_wait_callback now does do
1221 something when called with lp->signalled == 0. */
1222
1223 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1224 }
1225
1226 /* We don't actually detach from the LWP that has an id equal to the
1227 overall process id just yet. */
1228 if (GET_LWP (lp->ptid) != GET_PID (lp->ptid))
1229 {
1230 errno = 0;
1231 if (ptrace (PTRACE_DETACH, GET_LWP (lp->ptid), 0,
1232 WSTOPSIG (lp->status)) < 0)
8a3fe4f8 1233 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1234 safe_strerror (errno));
1235
1236 if (debug_linux_nat)
1237 fprintf_unfiltered (gdb_stdlog,
1238 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1239 target_pid_to_str (lp->ptid),
1240 strsignal (WSTOPSIG (lp->status)));
1241
b84876c2 1242 drain_queued_events (GET_LWP (lp->ptid));
d6b0e80f
AC
1243 delete_lwp (lp->ptid);
1244 }
1245
1246 return 0;
1247}
1248
1249static void
1250linux_nat_detach (char *args, int from_tty)
1251{
b84876c2
PA
1252 int pid;
1253 if (target_can_async_p ())
1254 linux_nat_async (NULL, 0);
1255
d6b0e80f
AC
1256 iterate_over_lwps (detach_callback, NULL);
1257
1258 /* Only the initial process should be left right now. */
1259 gdb_assert (num_lwps == 1);
1260
1261 trap_ptid = null_ptid;
1262
1263 /* Destroy LWP info; it's no longer valid. */
1264 init_lwp_list ();
1265
b84876c2
PA
1266 pid = GET_PID (inferior_ptid);
1267 inferior_ptid = pid_to_ptid (pid);
10d6c8cd 1268 linux_ops->to_detach (args, from_tty);
b84876c2
PA
1269
1270 if (target_can_async_p ())
1271 drain_queued_events (pid);
d6b0e80f
AC
1272}
1273
1274/* Resume LP. */
1275
1276static int
1277resume_callback (struct lwp_info *lp, void *data)
1278{
1279 if (lp->stopped && lp->status == 0)
1280 {
10d6c8cd
DJ
1281 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
1282 0, TARGET_SIGNAL_0);
d6b0e80f
AC
1283 if (debug_linux_nat)
1284 fprintf_unfiltered (gdb_stdlog,
1285 "RC: PTRACE_CONT %s, 0, 0 (resume sibling)\n",
1286 target_pid_to_str (lp->ptid));
1287 lp->stopped = 0;
1288 lp->step = 0;
9f0bdab8 1289 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
d6b0e80f
AC
1290 }
1291
1292 return 0;
1293}
1294
1295static int
1296resume_clear_callback (struct lwp_info *lp, void *data)
1297{
1298 lp->resumed = 0;
1299 return 0;
1300}
1301
1302static int
1303resume_set_callback (struct lwp_info *lp, void *data)
1304{
1305 lp->resumed = 1;
1306 return 0;
1307}
1308
1309static void
1310linux_nat_resume (ptid_t ptid, int step, enum target_signal signo)
1311{
1312 struct lwp_info *lp;
1313 int resume_all;
1314
76f50ad1
DJ
1315 if (debug_linux_nat)
1316 fprintf_unfiltered (gdb_stdlog,
1317 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1318 step ? "step" : "resume",
1319 target_pid_to_str (ptid),
1320 signo ? strsignal (signo) : "0",
1321 target_pid_to_str (inferior_ptid));
1322
e26af52f
DJ
1323 prune_lwps ();
1324
b84876c2
PA
1325 if (target_can_async_p ())
1326 /* Block events while we're here. */
1327 linux_nat_async_events (0);
1328
d6b0e80f
AC
1329 /* A specific PTID means `step only this process id'. */
1330 resume_all = (PIDGET (ptid) == -1);
1331
1332 if (resume_all)
1333 iterate_over_lwps (resume_set_callback, NULL);
1334 else
1335 iterate_over_lwps (resume_clear_callback, NULL);
1336
1337 /* If PID is -1, it's the current inferior that should be
1338 handled specially. */
1339 if (PIDGET (ptid) == -1)
1340 ptid = inferior_ptid;
1341
1342 lp = find_lwp_pid (ptid);
9f0bdab8 1343 gdb_assert (lp != NULL);
d6b0e80f 1344
9f0bdab8 1345 ptid = pid_to_ptid (GET_LWP (lp->ptid));
d6b0e80f 1346
9f0bdab8
DJ
1347 /* Remember if we're stepping. */
1348 lp->step = step;
d6b0e80f 1349
9f0bdab8
DJ
1350 /* Mark this LWP as resumed. */
1351 lp->resumed = 1;
76f50ad1 1352
9f0bdab8
DJ
1353 /* If we have a pending wait status for this thread, there is no
1354 point in resuming the process. But first make sure that
1355 linux_nat_wait won't preemptively handle the event - we
1356 should never take this short-circuit if we are going to
1357 leave LP running, since we have skipped resuming all the
1358 other threads. This bit of code needs to be synchronized
1359 with linux_nat_wait. */
76f50ad1 1360
9f0bdab8
DJ
1361 if (lp->status && WIFSTOPPED (lp->status))
1362 {
1363 int saved_signo = target_signal_from_host (WSTOPSIG (lp->status));
76f50ad1 1364
9f0bdab8
DJ
1365 if (signal_stop_state (saved_signo) == 0
1366 && signal_print_state (saved_signo) == 0
1367 && signal_pass_state (saved_signo) == 1)
d6b0e80f 1368 {
9f0bdab8
DJ
1369 if (debug_linux_nat)
1370 fprintf_unfiltered (gdb_stdlog,
1371 "LLR: Not short circuiting for ignored "
1372 "status 0x%x\n", lp->status);
1373
d6b0e80f
AC
1374 /* FIXME: What should we do if we are supposed to continue
1375 this thread with a signal? */
1376 gdb_assert (signo == TARGET_SIGNAL_0);
9f0bdab8
DJ
1377 signo = saved_signo;
1378 lp->status = 0;
1379 }
1380 }
76f50ad1 1381
9f0bdab8
DJ
1382 if (lp->status)
1383 {
1384 /* FIXME: What should we do if we are supposed to continue
1385 this thread with a signal? */
1386 gdb_assert (signo == TARGET_SIGNAL_0);
76f50ad1 1387
9f0bdab8
DJ
1388 if (debug_linux_nat)
1389 fprintf_unfiltered (gdb_stdlog,
1390 "LLR: Short circuiting for status 0x%x\n",
1391 lp->status);
d6b0e80f 1392
b84876c2
PA
1393 if (target_can_async_p ())
1394 {
1395 /* Wake event loop with special token, to get to WFI. */
1396 linux_nat_event_pipe_push (-1, -1, -1);
1397
1398 target_async (inferior_event_handler, 0);
1399 }
9f0bdab8 1400 return;
d6b0e80f
AC
1401 }
1402
9f0bdab8
DJ
1403 /* Mark LWP as not stopped to prevent it from being continued by
1404 resume_callback. */
1405 lp->stopped = 0;
1406
d6b0e80f
AC
1407 if (resume_all)
1408 iterate_over_lwps (resume_callback, NULL);
1409
10d6c8cd 1410 linux_ops->to_resume (ptid, step, signo);
9f0bdab8
DJ
1411 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1412
d6b0e80f
AC
1413 if (debug_linux_nat)
1414 fprintf_unfiltered (gdb_stdlog,
1415 "LLR: %s %s, %s (resume event thread)\n",
1416 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1417 target_pid_to_str (ptid),
1418 signo ? strsignal (signo) : "0");
b84876c2
PA
1419
1420 if (target_can_async_p ())
1421 {
1422 target_executing = 1;
1423 target_async (inferior_event_handler, 0);
1424 }
d6b0e80f
AC
1425}
1426
1427/* Issue kill to specified lwp. */
1428
1429static int tkill_failed;
1430
1431static int
1432kill_lwp (int lwpid, int signo)
1433{
1434 errno = 0;
1435
1436/* Use tkill, if possible, in case we are using nptl threads. If tkill
1437 fails, then we are not using nptl threads and we should be using kill. */
1438
1439#ifdef HAVE_TKILL_SYSCALL
1440 if (!tkill_failed)
1441 {
1442 int ret = syscall (__NR_tkill, lwpid, signo);
1443 if (errno != ENOSYS)
1444 return ret;
1445 errno = 0;
1446 tkill_failed = 1;
1447 }
1448#endif
1449
1450 return kill (lwpid, signo);
1451}
1452
3d799a95
DJ
1453/* Handle a GNU/Linux extended wait response. If we see a clone
1454 event, we need to add the new LWP to our list (and not report the
1455 trap to higher layers). This function returns non-zero if the
1456 event should be ignored and we should wait again. If STOPPING is
1457 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1458
1459static int
3d799a95
DJ
1460linux_handle_extended_wait (struct lwp_info *lp, int status,
1461 int stopping)
d6b0e80f 1462{
3d799a95
DJ
1463 int pid = GET_LWP (lp->ptid);
1464 struct target_waitstatus *ourstatus = &lp->waitstatus;
1465 struct lwp_info *new_lp = NULL;
1466 int event = status >> 16;
d6b0e80f 1467
3d799a95
DJ
1468 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1469 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1470 {
3d799a95
DJ
1471 unsigned long new_pid;
1472 int ret;
1473
1474 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1475
3d799a95
DJ
1476 /* If we haven't already seen the new PID stop, wait for it now. */
1477 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1478 {
1479 /* The new child has a pending SIGSTOP. We can't affect it until it
1480 hits the SIGSTOP, but we're already attached. */
1481 ret = my_waitpid (new_pid, &status,
1482 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1483 if (ret == -1)
1484 perror_with_name (_("waiting for new child"));
1485 else if (ret != new_pid)
1486 internal_error (__FILE__, __LINE__,
1487 _("wait returned unexpected PID %d"), ret);
1488 else if (!WIFSTOPPED (status))
1489 internal_error (__FILE__, __LINE__,
1490 _("wait returned unexpected status 0x%x"), status);
1491 }
1492
1493 ourstatus->value.related_pid = new_pid;
1494
1495 if (event == PTRACE_EVENT_FORK)
1496 ourstatus->kind = TARGET_WAITKIND_FORKED;
1497 else if (event == PTRACE_EVENT_VFORK)
1498 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 1499 else
3d799a95
DJ
1500 {
1501 ourstatus->kind = TARGET_WAITKIND_IGNORE;
1502 new_lp = add_lwp (BUILD_LWP (new_pid, GET_PID (inferior_ptid)));
1503 new_lp->cloned = 1;
d6b0e80f 1504
3d799a95
DJ
1505 if (WSTOPSIG (status) != SIGSTOP)
1506 {
1507 /* This can happen if someone starts sending signals to
1508 the new thread before it gets a chance to run, which
1509 have a lower number than SIGSTOP (e.g. SIGUSR1).
1510 This is an unlikely case, and harder to handle for
1511 fork / vfork than for clone, so we do not try - but
1512 we handle it for clone events here. We'll send
1513 the other signal on to the thread below. */
1514
1515 new_lp->signalled = 1;
1516 }
1517 else
1518 status = 0;
d6b0e80f 1519
3d799a95
DJ
1520 if (stopping)
1521 new_lp->stopped = 1;
1522 else
1523 {
1524 new_lp->resumed = 1;
1525 ptrace (PTRACE_CONT, lp->waitstatus.value.related_pid, 0,
1526 status ? WSTOPSIG (status) : 0);
1527 }
d6b0e80f 1528
3d799a95
DJ
1529 if (debug_linux_nat)
1530 fprintf_unfiltered (gdb_stdlog,
1531 "LHEW: Got clone event from LWP %ld, resuming\n",
1532 GET_LWP (lp->ptid));
1533 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1534
1535 return 1;
1536 }
1537
1538 return 0;
d6b0e80f
AC
1539 }
1540
3d799a95
DJ
1541 if (event == PTRACE_EVENT_EXEC)
1542 {
1543 ourstatus->kind = TARGET_WAITKIND_EXECD;
1544 ourstatus->value.execd_pathname
6d8fd2b7 1545 = xstrdup (linux_child_pid_to_exec_file (pid));
3d799a95
DJ
1546
1547 if (linux_parent_pid)
1548 {
1549 detach_breakpoints (linux_parent_pid);
1550 ptrace (PTRACE_DETACH, linux_parent_pid, 0, 0);
1551
1552 linux_parent_pid = 0;
1553 }
1554
1555 return 0;
1556 }
1557
1558 internal_error (__FILE__, __LINE__,
1559 _("unknown ptrace event %d"), event);
d6b0e80f
AC
1560}
1561
1562/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
1563 exited. */
1564
1565static int
1566wait_lwp (struct lwp_info *lp)
1567{
1568 pid_t pid;
1569 int status;
1570 int thread_dead = 0;
1571
1572 gdb_assert (!lp->stopped);
1573 gdb_assert (lp->status == 0);
1574
58aecb61 1575 pid = my_waitpid (GET_LWP (lp->ptid), &status, 0);
d6b0e80f
AC
1576 if (pid == -1 && errno == ECHILD)
1577 {
58aecb61 1578 pid = my_waitpid (GET_LWP (lp->ptid), &status, __WCLONE);
d6b0e80f
AC
1579 if (pid == -1 && errno == ECHILD)
1580 {
1581 /* The thread has previously exited. We need to delete it
1582 now because, for some vendor 2.4 kernels with NPTL
1583 support backported, there won't be an exit event unless
1584 it is the main thread. 2.6 kernels will report an exit
1585 event for each thread that exits, as expected. */
1586 thread_dead = 1;
1587 if (debug_linux_nat)
1588 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
1589 target_pid_to_str (lp->ptid));
1590 }
1591 }
1592
1593 if (!thread_dead)
1594 {
1595 gdb_assert (pid == GET_LWP (lp->ptid));
1596
1597 if (debug_linux_nat)
1598 {
1599 fprintf_unfiltered (gdb_stdlog,
1600 "WL: waitpid %s received %s\n",
1601 target_pid_to_str (lp->ptid),
1602 status_to_str (status));
1603 }
1604 }
1605
1606 /* Check if the thread has exited. */
1607 if (WIFEXITED (status) || WIFSIGNALED (status))
1608 {
1609 thread_dead = 1;
1610 if (debug_linux_nat)
1611 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
1612 target_pid_to_str (lp->ptid));
1613 }
1614
1615 if (thread_dead)
1616 {
e26af52f 1617 exit_lwp (lp);
d6b0e80f
AC
1618 return 0;
1619 }
1620
1621 gdb_assert (WIFSTOPPED (status));
1622
1623 /* Handle GNU/Linux's extended waitstatus for trace events. */
1624 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
1625 {
1626 if (debug_linux_nat)
1627 fprintf_unfiltered (gdb_stdlog,
1628 "WL: Handling extended status 0x%06x\n",
1629 status);
3d799a95 1630 if (linux_handle_extended_wait (lp, status, 1))
d6b0e80f
AC
1631 return wait_lwp (lp);
1632 }
1633
1634 return status;
1635}
1636
9f0bdab8
DJ
1637/* Save the most recent siginfo for LP. This is currently only called
1638 for SIGTRAP; some ports use the si_addr field for
1639 target_stopped_data_address. In the future, it may also be used to
1640 restore the siginfo of requeued signals. */
1641
1642static void
1643save_siginfo (struct lwp_info *lp)
1644{
1645 errno = 0;
1646 ptrace (PTRACE_GETSIGINFO, GET_LWP (lp->ptid),
1647 (PTRACE_TYPE_ARG3) 0, &lp->siginfo);
1648
1649 if (errno != 0)
1650 memset (&lp->siginfo, 0, sizeof (lp->siginfo));
1651}
1652
d6b0e80f
AC
1653/* Send a SIGSTOP to LP. */
1654
1655static int
1656stop_callback (struct lwp_info *lp, void *data)
1657{
1658 if (!lp->stopped && !lp->signalled)
1659 {
1660 int ret;
1661
1662 if (debug_linux_nat)
1663 {
1664 fprintf_unfiltered (gdb_stdlog,
1665 "SC: kill %s **<SIGSTOP>**\n",
1666 target_pid_to_str (lp->ptid));
1667 }
1668 errno = 0;
1669 ret = kill_lwp (GET_LWP (lp->ptid), SIGSTOP);
1670 if (debug_linux_nat)
1671 {
1672 fprintf_unfiltered (gdb_stdlog,
1673 "SC: lwp kill %d %s\n",
1674 ret,
1675 errno ? safe_strerror (errno) : "ERRNO-OK");
1676 }
1677
1678 lp->signalled = 1;
1679 gdb_assert (lp->status == 0);
1680 }
1681
1682 return 0;
1683}
1684
1685/* Wait until LP is stopped. If DATA is non-null it is interpreted as
1686 a pointer to a set of signals to be flushed immediately. */
1687
1688static int
1689stop_wait_callback (struct lwp_info *lp, void *data)
1690{
1691 sigset_t *flush_mask = data;
1692
1693 if (!lp->stopped)
1694 {
1695 int status;
1696
1697 status = wait_lwp (lp);
1698 if (status == 0)
1699 return 0;
1700
1701 /* Ignore any signals in FLUSH_MASK. */
1702 if (flush_mask && sigismember (flush_mask, WSTOPSIG (status)))
1703 {
1704 if (!lp->signalled)
1705 {
1706 lp->stopped = 1;
1707 return 0;
1708 }
1709
1710 errno = 0;
1711 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1712 if (debug_linux_nat)
1713 fprintf_unfiltered (gdb_stdlog,
1714 "PTRACE_CONT %s, 0, 0 (%s)\n",
1715 target_pid_to_str (lp->ptid),
1716 errno ? safe_strerror (errno) : "OK");
1717
1718 return stop_wait_callback (lp, flush_mask);
1719 }
1720
1721 if (WSTOPSIG (status) != SIGSTOP)
1722 {
1723 if (WSTOPSIG (status) == SIGTRAP)
1724 {
1725 /* If a LWP other than the LWP that we're reporting an
1726 event for has hit a GDB breakpoint (as opposed to
1727 some random trap signal), then just arrange for it to
1728 hit it again later. We don't keep the SIGTRAP status
1729 and don't forward the SIGTRAP signal to the LWP. We
1730 will handle the current event, eventually we will
1731 resume all LWPs, and this one will get its breakpoint
1732 trap again.
1733
1734 If we do not do this, then we run the risk that the
1735 user will delete or disable the breakpoint, but the
1736 thread will have already tripped on it. */
1737
9f0bdab8
DJ
1738 /* Save the trap's siginfo in case we need it later. */
1739 save_siginfo (lp);
1740
d6b0e80f
AC
1741 /* Now resume this LWP and get the SIGSTOP event. */
1742 errno = 0;
1743 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1744 if (debug_linux_nat)
1745 {
1746 fprintf_unfiltered (gdb_stdlog,
1747 "PTRACE_CONT %s, 0, 0 (%s)\n",
1748 target_pid_to_str (lp->ptid),
1749 errno ? safe_strerror (errno) : "OK");
1750
1751 fprintf_unfiltered (gdb_stdlog,
1752 "SWC: Candidate SIGTRAP event in %s\n",
1753 target_pid_to_str (lp->ptid));
1754 }
1755 /* Hold the SIGTRAP for handling by linux_nat_wait. */
1756 stop_wait_callback (lp, data);
1757 /* If there's another event, throw it back into the queue. */
1758 if (lp->status)
1759 {
1760 if (debug_linux_nat)
1761 {
1762 fprintf_unfiltered (gdb_stdlog,
1763 "SWC: kill %s, %s\n",
1764 target_pid_to_str (lp->ptid),
1765 status_to_str ((int) status));
1766 }
1767 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (lp->status));
1768 }
1769 /* Save the sigtrap event. */
1770 lp->status = status;
1771 return 0;
1772 }
1773 else
1774 {
1775 /* The thread was stopped with a signal other than
1776 SIGSTOP, and didn't accidentally trip a breakpoint. */
1777
1778 if (debug_linux_nat)
1779 {
1780 fprintf_unfiltered (gdb_stdlog,
1781 "SWC: Pending event %s in %s\n",
1782 status_to_str ((int) status),
1783 target_pid_to_str (lp->ptid));
1784 }
1785 /* Now resume this LWP and get the SIGSTOP event. */
1786 errno = 0;
1787 ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1788 if (debug_linux_nat)
1789 fprintf_unfiltered (gdb_stdlog,
1790 "SWC: PTRACE_CONT %s, 0, 0 (%s)\n",
1791 target_pid_to_str (lp->ptid),
1792 errno ? safe_strerror (errno) : "OK");
1793
1794 /* Hold this event/waitstatus while we check to see if
1795 there are any more (we still want to get that SIGSTOP). */
1796 stop_wait_callback (lp, data);
1797 /* If the lp->status field is still empty, use it to hold
1798 this event. If not, then this event must be returned
1799 to the event queue of the LWP. */
1800 if (lp->status == 0)
1801 lp->status = status;
1802 else
1803 {
1804 if (debug_linux_nat)
1805 {
1806 fprintf_unfiltered (gdb_stdlog,
1807 "SWC: kill %s, %s\n",
1808 target_pid_to_str (lp->ptid),
1809 status_to_str ((int) status));
1810 }
1811 kill_lwp (GET_LWP (lp->ptid), WSTOPSIG (status));
1812 }
1813 return 0;
1814 }
1815 }
1816 else
1817 {
1818 /* We caught the SIGSTOP that we intended to catch, so
1819 there's no SIGSTOP pending. */
1820 lp->stopped = 1;
1821 lp->signalled = 0;
1822 }
1823 }
1824
1825 return 0;
1826}
1827
1828/* Check whether PID has any pending signals in FLUSH_MASK. If so set
1829 the appropriate bits in PENDING, and return 1 - otherwise return 0. */
1830
1831static int
1832linux_nat_has_pending (int pid, sigset_t *pending, sigset_t *flush_mask)
1833{
1834 sigset_t blocked, ignored;
1835 int i;
1836
1837 linux_proc_pending_signals (pid, pending, &blocked, &ignored);
1838
1839 if (!flush_mask)
1840 return 0;
1841
1842 for (i = 1; i < NSIG; i++)
1843 if (sigismember (pending, i))
1844 if (!sigismember (flush_mask, i)
1845 || sigismember (&blocked, i)
1846 || sigismember (&ignored, i))
1847 sigdelset (pending, i);
1848
1849 if (sigisemptyset (pending))
1850 return 0;
1851
1852 return 1;
1853}
1854
1855/* DATA is interpreted as a mask of signals to flush. If LP has
1856 signals pending, and they are all in the flush mask, then arrange
1857 to flush them. LP should be stopped, as should all other threads
1858 it might share a signal queue with. */
1859
1860static int
1861flush_callback (struct lwp_info *lp, void *data)
1862{
1863 sigset_t *flush_mask = data;
1864 sigset_t pending, intersection, blocked, ignored;
1865 int pid, status;
1866
1867 /* Normally, when an LWP exits, it is removed from the LWP list. The
1868 last LWP isn't removed till later, however. So if there is only
1869 one LWP on the list, make sure it's alive. */
1870 if (lwp_list == lp && lp->next == NULL)
1871 if (!linux_nat_thread_alive (lp->ptid))
1872 return 0;
1873
1874 /* Just because the LWP is stopped doesn't mean that new signals
1875 can't arrive from outside, so this function must be careful of
1876 race conditions. However, because all threads are stopped, we
1877 can assume that the pending mask will not shrink unless we resume
1878 the LWP, and that it will then get another signal. We can't
1879 control which one, however. */
1880
1881 if (lp->status)
1882 {
1883 if (debug_linux_nat)
a3f17187 1884 printf_unfiltered (_("FC: LP has pending status %06x\n"), lp->status);
d6b0e80f
AC
1885 if (WIFSTOPPED (lp->status) && sigismember (flush_mask, WSTOPSIG (lp->status)))
1886 lp->status = 0;
1887 }
1888
3d799a95
DJ
1889 /* While there is a pending signal we would like to flush, continue
1890 the inferior and collect another signal. But if there's already
1891 a saved status that we don't want to flush, we can't resume the
1892 inferior - if it stopped for some other reason we wouldn't have
1893 anywhere to save the new status. In that case, we must leave the
1894 signal unflushed (and possibly generate an extra SIGINT stop).
1895 That's much less bad than losing a signal. */
1896 while (lp->status == 0
1897 && linux_nat_has_pending (GET_LWP (lp->ptid), &pending, flush_mask))
d6b0e80f
AC
1898 {
1899 int ret;
1900
1901 errno = 0;
1902 ret = ptrace (PTRACE_CONT, GET_LWP (lp->ptid), 0, 0);
1903 if (debug_linux_nat)
1904 fprintf_unfiltered (gdb_stderr,
1905 "FC: Sent PTRACE_CONT, ret %d %d\n", ret, errno);
1906
1907 lp->stopped = 0;
1908 stop_wait_callback (lp, flush_mask);
1909 if (debug_linux_nat)
1910 fprintf_unfiltered (gdb_stderr,
1911 "FC: Wait finished; saved status is %d\n",
1912 lp->status);
1913 }
1914
1915 return 0;
1916}
1917
1918/* Return non-zero if LP has a wait status pending. */
1919
1920static int
1921status_callback (struct lwp_info *lp, void *data)
1922{
1923 /* Only report a pending wait status if we pretend that this has
1924 indeed been resumed. */
1925 return (lp->status != 0 && lp->resumed);
1926}
1927
1928/* Return non-zero if LP isn't stopped. */
1929
1930static int
1931running_callback (struct lwp_info *lp, void *data)
1932{
1933 return (lp->stopped == 0 || (lp->status != 0 && lp->resumed));
1934}
1935
1936/* Count the LWP's that have had events. */
1937
1938static int
1939count_events_callback (struct lwp_info *lp, void *data)
1940{
1941 int *count = data;
1942
1943 gdb_assert (count != NULL);
1944
1945 /* Count only LWPs that have a SIGTRAP event pending. */
1946 if (lp->status != 0
1947 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1948 (*count)++;
1949
1950 return 0;
1951}
1952
1953/* Select the LWP (if any) that is currently being single-stepped. */
1954
1955static int
1956select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
1957{
1958 if (lp->step && lp->status != 0)
1959 return 1;
1960 else
1961 return 0;
1962}
1963
1964/* Select the Nth LWP that has had a SIGTRAP event. */
1965
1966static int
1967select_event_lwp_callback (struct lwp_info *lp, void *data)
1968{
1969 int *selector = data;
1970
1971 gdb_assert (selector != NULL);
1972
1973 /* Select only LWPs that have a SIGTRAP event pending. */
1974 if (lp->status != 0
1975 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP)
1976 if ((*selector)-- == 0)
1977 return 1;
1978
1979 return 0;
1980}
1981
1982static int
1983cancel_breakpoints_callback (struct lwp_info *lp, void *data)
1984{
1985 struct lwp_info *event_lp = data;
1986
1987 /* Leave the LWP that has been elected to receive a SIGTRAP alone. */
1988 if (lp == event_lp)
1989 return 0;
1990
1991 /* If a LWP other than the LWP that we're reporting an event for has
1992 hit a GDB breakpoint (as opposed to some random trap signal),
1993 then just arrange for it to hit it again later. We don't keep
1994 the SIGTRAP status and don't forward the SIGTRAP signal to the
1995 LWP. We will handle the current event, eventually we will resume
1996 all LWPs, and this one will get its breakpoint trap again.
1997
1998 If we do not do this, then we run the risk that the user will
1999 delete or disable the breakpoint, but the LWP will have already
2000 tripped on it. */
2001
2002 if (lp->status != 0
2003 && WIFSTOPPED (lp->status) && WSTOPSIG (lp->status) == SIGTRAP
2004 && breakpoint_inserted_here_p (read_pc_pid (lp->ptid) -
b798847d
UW
2005 gdbarch_decr_pc_after_break
2006 (current_gdbarch)))
d6b0e80f
AC
2007 {
2008 if (debug_linux_nat)
2009 fprintf_unfiltered (gdb_stdlog,
2010 "CBC: Push back breakpoint for %s\n",
2011 target_pid_to_str (lp->ptid));
2012
2013 /* Back up the PC if necessary. */
b798847d
UW
2014 if (gdbarch_decr_pc_after_break (current_gdbarch))
2015 write_pc_pid (read_pc_pid (lp->ptid) - gdbarch_decr_pc_after_break
2016 (current_gdbarch),
2017 lp->ptid);
d6b0e80f
AC
2018
2019 /* Throw away the SIGTRAP. */
2020 lp->status = 0;
2021 }
2022
2023 return 0;
2024}
2025
2026/* Select one LWP out of those that have events pending. */
2027
2028static void
2029select_event_lwp (struct lwp_info **orig_lp, int *status)
2030{
2031 int num_events = 0;
2032 int random_selector;
2033 struct lwp_info *event_lp;
2034
ac264b3b 2035 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2036 (*orig_lp)->status = *status;
2037
2038 /* Give preference to any LWP that is being single-stepped. */
2039 event_lp = iterate_over_lwps (select_singlestep_lwp_callback, NULL);
2040 if (event_lp != NULL)
2041 {
2042 if (debug_linux_nat)
2043 fprintf_unfiltered (gdb_stdlog,
2044 "SEL: Select single-step %s\n",
2045 target_pid_to_str (event_lp->ptid));
2046 }
2047 else
2048 {
2049 /* No single-stepping LWP. Select one at random, out of those
2050 which have had SIGTRAP events. */
2051
2052 /* First see how many SIGTRAP events we have. */
2053 iterate_over_lwps (count_events_callback, &num_events);
2054
2055 /* Now randomly pick a LWP out of those that have had a SIGTRAP. */
2056 random_selector = (int)
2057 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2058
2059 if (debug_linux_nat && num_events > 1)
2060 fprintf_unfiltered (gdb_stdlog,
2061 "SEL: Found %d SIGTRAP events, selecting #%d\n",
2062 num_events, random_selector);
2063
2064 event_lp = iterate_over_lwps (select_event_lwp_callback,
2065 &random_selector);
2066 }
2067
2068 if (event_lp != NULL)
2069 {
2070 /* Switch the event LWP. */
2071 *orig_lp = event_lp;
2072 *status = event_lp->status;
2073 }
2074
2075 /* Flush the wait status for the event LWP. */
2076 (*orig_lp)->status = 0;
2077}
2078
2079/* Return non-zero if LP has been resumed. */
2080
2081static int
2082resumed_callback (struct lwp_info *lp, void *data)
2083{
2084 return lp->resumed;
2085}
2086
d6b0e80f
AC
2087/* Stop an active thread, verify it still exists, then resume it. */
2088
2089static int
2090stop_and_resume_callback (struct lwp_info *lp, void *data)
2091{
2092 struct lwp_info *ptr;
2093
2094 if (!lp->stopped && !lp->signalled)
2095 {
2096 stop_callback (lp, NULL);
2097 stop_wait_callback (lp, NULL);
2098 /* Resume if the lwp still exists. */
2099 for (ptr = lwp_list; ptr; ptr = ptr->next)
2100 if (lp == ptr)
2101 {
2102 resume_callback (lp, NULL);
2103 resume_set_callback (lp, NULL);
2104 }
2105 }
2106 return 0;
2107}
2108
02f3fc28 2109/* Check if we should go on and pass this event to common code.
fa2c6a57 2110 Return the affected lwp if we are, or NULL otherwise. */
02f3fc28
PA
2111static struct lwp_info *
2112linux_nat_filter_event (int lwpid, int status, int options)
2113{
2114 struct lwp_info *lp;
2115
2116 lp = find_lwp_pid (pid_to_ptid (lwpid));
2117
2118 /* Check for stop events reported by a process we didn't already
2119 know about - anything not already in our LWP list.
2120
2121 If we're expecting to receive stopped processes after
2122 fork, vfork, and clone events, then we'll just add the
2123 new one to our list and go back to waiting for the event
2124 to be reported - the stopped process might be returned
2125 from waitpid before or after the event is. */
2126 if (WIFSTOPPED (status) && !lp)
2127 {
2128 linux_record_stopped_pid (lwpid, status);
2129 return NULL;
2130 }
2131
2132 /* Make sure we don't report an event for the exit of an LWP not in
2133 our list, i.e. not part of the current process. This can happen
2134 if we detach from a program we original forked and then it
2135 exits. */
2136 if (!WIFSTOPPED (status) && !lp)
2137 return NULL;
2138
2139 /* NOTE drow/2003-06-17: This code seems to be meant for debugging
2140 CLONE_PTRACE processes which do not use the thread library -
2141 otherwise we wouldn't find the new LWP this way. That doesn't
2142 currently work, and the following code is currently unreachable
2143 due to the two blocks above. If it's fixed some day, this code
2144 should be broken out into a function so that we can also pick up
2145 LWPs from the new interface. */
2146 if (!lp)
2147 {
2148 lp = add_lwp (BUILD_LWP (lwpid, GET_PID (inferior_ptid)));
2149 if (options & __WCLONE)
2150 lp->cloned = 1;
2151
2152 gdb_assert (WIFSTOPPED (status)
2153 && WSTOPSIG (status) == SIGSTOP);
2154 lp->signalled = 1;
2155
2156 if (!in_thread_list (inferior_ptid))
2157 {
2158 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2159 GET_PID (inferior_ptid));
2160 add_thread (inferior_ptid);
2161 }
2162
2163 add_thread (lp->ptid);
2164 }
2165
2166 /* Save the trap's siginfo in case we need it later. */
2167 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2168 save_siginfo (lp);
2169
2170 /* Handle GNU/Linux's extended waitstatus for trace events. */
2171 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP && status >> 16 != 0)
2172 {
2173 if (debug_linux_nat)
2174 fprintf_unfiltered (gdb_stdlog,
2175 "LLW: Handling extended status 0x%06x\n",
2176 status);
2177 if (linux_handle_extended_wait (lp, status, 0))
2178 return NULL;
2179 }
2180
2181 /* Check if the thread has exited. */
2182 if ((WIFEXITED (status) || WIFSIGNALED (status)) && num_lwps > 1)
2183 {
2184 /* If this is the main thread, we must stop all threads and
2185 verify if they are still alive. This is because in the nptl
2186 thread model, there is no signal issued for exiting LWPs
2187 other than the main thread. We only get the main thread exit
2188 signal once all child threads have already exited. If we
2189 stop all the threads and use the stop_wait_callback to check
2190 if they have exited we can determine whether this signal
2191 should be ignored or whether it means the end of the debugged
2192 application, regardless of which threading model is being
2193 used. */
2194 if (GET_PID (lp->ptid) == GET_LWP (lp->ptid))
2195 {
2196 lp->stopped = 1;
2197 iterate_over_lwps (stop_and_resume_callback, NULL);
2198 }
2199
2200 if (debug_linux_nat)
2201 fprintf_unfiltered (gdb_stdlog,
2202 "LLW: %s exited.\n",
2203 target_pid_to_str (lp->ptid));
2204
2205 exit_lwp (lp);
2206
2207 /* If there is at least one more LWP, then the exit signal was
2208 not the end of the debugged application and should be
2209 ignored. */
2210 if (num_lwps > 0)
2211 {
2212 /* Make sure there is at least one thread running. */
2213 gdb_assert (iterate_over_lwps (running_callback, NULL));
2214
2215 /* Discard the event. */
2216 return NULL;
2217 }
2218 }
2219
2220 /* Check if the current LWP has previously exited. In the nptl
2221 thread model, LWPs other than the main thread do not issue
2222 signals when they exit so we must check whenever the thread has
2223 stopped. A similar check is made in stop_wait_callback(). */
2224 if (num_lwps > 1 && !linux_nat_thread_alive (lp->ptid))
2225 {
2226 if (debug_linux_nat)
2227 fprintf_unfiltered (gdb_stdlog,
2228 "LLW: %s exited.\n",
2229 target_pid_to_str (lp->ptid));
2230
2231 exit_lwp (lp);
2232
2233 /* Make sure there is at least one thread running. */
2234 gdb_assert (iterate_over_lwps (running_callback, NULL));
2235
2236 /* Discard the event. */
2237 return NULL;
2238 }
2239
2240 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2241 an attempt to stop an LWP. */
2242 if (lp->signalled
2243 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2244 {
2245 if (debug_linux_nat)
2246 fprintf_unfiltered (gdb_stdlog,
2247 "LLW: Delayed SIGSTOP caught for %s.\n",
2248 target_pid_to_str (lp->ptid));
2249
2250 /* This is a delayed SIGSTOP. */
2251 lp->signalled = 0;
2252
2253 registers_changed ();
2254
2255 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2256 lp->step, TARGET_SIGNAL_0);
2257 if (debug_linux_nat)
2258 fprintf_unfiltered (gdb_stdlog,
2259 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
2260 lp->step ?
2261 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2262 target_pid_to_str (lp->ptid));
2263
2264 lp->stopped = 0;
2265 gdb_assert (lp->resumed);
2266
2267 /* Discard the event. */
2268 return NULL;
2269 }
2270
2271 /* An interesting event. */
2272 gdb_assert (lp);
2273 return lp;
2274}
2275
b84876c2
PA
2276/* Get the events stored in the pipe into the local queue, so they are
2277 accessible to queued_waitpid. We need to do this, since it is not
2278 always the case that the event at the head of the pipe is the event
2279 we want. */
2280
2281static void
2282pipe_to_local_event_queue (void)
2283{
2284 if (debug_linux_nat_async)
2285 fprintf_unfiltered (gdb_stdlog,
2286 "PTLEQ: linux_nat_num_queued_events(%d)\n",
2287 linux_nat_num_queued_events);
2288 while (linux_nat_num_queued_events)
2289 {
2290 int lwpid, status, options;
2291
2292 lwpid = linux_nat_event_pipe_pop (&status, &options);
2293 if (lwpid == -1 && status == -1 && options == -1)
2294 /* Special wake up event loop token. */
2295 continue;
2296
2297 gdb_assert (lwpid > 0);
2298 push_waitpid (lwpid, status, options);
2299 }
2300}
2301
2302/* Get the unprocessed events stored in the local queue back into the
2303 pipe, so the event loop realizes there's something else to
2304 process. */
2305
2306static void
2307local_event_queue_to_pipe (void)
2308{
2309 struct waitpid_result *w = waitpid_queue;
2310 while (w)
2311 {
2312 struct waitpid_result *next = w->next;
2313 linux_nat_event_pipe_push (w->pid,
2314 w->status,
2315 w->options);
2316 xfree (w);
2317 w = next;
2318 }
2319 waitpid_queue = NULL;
2320
2321 if (debug_linux_nat_async)
2322 fprintf_unfiltered (gdb_stdlog,
2323 "LEQTP: linux_nat_num_queued_events(%d)\n",
2324 linux_nat_num_queued_events);
2325}
2326
d6b0e80f
AC
2327static ptid_t
2328linux_nat_wait (ptid_t ptid, struct target_waitstatus *ourstatus)
2329{
2330 struct lwp_info *lp = NULL;
2331 int options = 0;
2332 int status = 0;
2333 pid_t pid = PIDGET (ptid);
2334 sigset_t flush_mask;
2335
b84876c2
PA
2336 if (debug_linux_nat_async)
2337 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
2338
f973ed9c
DJ
2339 /* The first time we get here after starting a new inferior, we may
2340 not have added it to the LWP list yet - this is the earliest
2341 moment at which we know its PID. */
2342 if (num_lwps == 0)
2343 {
2344 gdb_assert (!is_lwp (inferior_ptid));
2345
2346 inferior_ptid = BUILD_LWP (GET_PID (inferior_ptid),
2347 GET_PID (inferior_ptid));
2348 lp = add_lwp (inferior_ptid);
2349 lp->resumed = 1;
2350 }
2351
d6b0e80f
AC
2352 sigemptyset (&flush_mask);
2353
b84876c2
PA
2354 if (target_can_async_p ())
2355 /* Block events while we're here. */
2356 target_async (NULL, 0);
d6b0e80f
AC
2357
2358retry:
2359
f973ed9c
DJ
2360 /* Make sure there is at least one LWP that has been resumed. */
2361 gdb_assert (iterate_over_lwps (resumed_callback, NULL));
d6b0e80f
AC
2362
2363 /* First check if there is a LWP with a wait status pending. */
2364 if (pid == -1)
2365 {
2366 /* Any LWP that's been resumed will do. */
2367 lp = iterate_over_lwps (status_callback, NULL);
2368 if (lp)
2369 {
2370 status = lp->status;
2371 lp->status = 0;
2372
2373 if (debug_linux_nat && status)
2374 fprintf_unfiltered (gdb_stdlog,
2375 "LLW: Using pending wait status %s for %s.\n",
2376 status_to_str (status),
2377 target_pid_to_str (lp->ptid));
2378 }
2379
b84876c2 2380 /* But if we don't find one, we'll have to wait, and check both
d6b0e80f
AC
2381 cloned and uncloned processes. We start with the cloned
2382 processes. */
2383 options = __WCLONE | WNOHANG;
2384 }
2385 else if (is_lwp (ptid))
2386 {
2387 if (debug_linux_nat)
2388 fprintf_unfiltered (gdb_stdlog,
2389 "LLW: Waiting for specific LWP %s.\n",
2390 target_pid_to_str (ptid));
2391
2392 /* We have a specific LWP to check. */
2393 lp = find_lwp_pid (ptid);
2394 gdb_assert (lp);
2395 status = lp->status;
2396 lp->status = 0;
2397
2398 if (debug_linux_nat && status)
2399 fprintf_unfiltered (gdb_stdlog,
2400 "LLW: Using pending wait status %s for %s.\n",
2401 status_to_str (status),
2402 target_pid_to_str (lp->ptid));
2403
2404 /* If we have to wait, take into account whether PID is a cloned
2405 process or not. And we have to convert it to something that
2406 the layer beneath us can understand. */
2407 options = lp->cloned ? __WCLONE : 0;
2408 pid = GET_LWP (ptid);
2409 }
2410
2411 if (status && lp->signalled)
2412 {
2413 /* A pending SIGSTOP may interfere with the normal stream of
2414 events. In a typical case where interference is a problem,
2415 we have a SIGSTOP signal pending for LWP A while
2416 single-stepping it, encounter an event in LWP B, and take the
2417 pending SIGSTOP while trying to stop LWP A. After processing
2418 the event in LWP B, LWP A is continued, and we'll never see
2419 the SIGTRAP associated with the last time we were
2420 single-stepping LWP A. */
2421
2422 /* Resume the thread. It should halt immediately returning the
2423 pending SIGSTOP. */
2424 registers_changed ();
10d6c8cd
DJ
2425 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2426 lp->step, TARGET_SIGNAL_0);
d6b0e80f
AC
2427 if (debug_linux_nat)
2428 fprintf_unfiltered (gdb_stdlog,
2429 "LLW: %s %s, 0, 0 (expect SIGSTOP)\n",
2430 lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2431 target_pid_to_str (lp->ptid));
2432 lp->stopped = 0;
2433 gdb_assert (lp->resumed);
2434
2435 /* This should catch the pending SIGSTOP. */
2436 stop_wait_callback (lp, NULL);
2437 }
2438
b84876c2
PA
2439 if (!target_can_async_p ())
2440 {
2441 /* Causes SIGINT to be passed on to the attached process. */
2442 set_sigint_trap ();
2443 set_sigio_trap ();
2444 }
d6b0e80f
AC
2445
2446 while (status == 0)
2447 {
2448 pid_t lwpid;
2449
b84876c2
PA
2450 if (target_can_async_p ())
2451 /* In async mode, don't ever block. Only look at the locally
2452 queued events. */
2453 lwpid = queued_waitpid (pid, &status, options);
2454 else
2455 lwpid = my_waitpid (pid, &status, options);
2456
d6b0e80f
AC
2457 if (lwpid > 0)
2458 {
2459 gdb_assert (pid == -1 || lwpid == pid);
2460
2461 if (debug_linux_nat)
2462 {
2463 fprintf_unfiltered (gdb_stdlog,
2464 "LLW: waitpid %ld received %s\n",
2465 (long) lwpid, status_to_str (status));
2466 }
2467
02f3fc28 2468 lp = linux_nat_filter_event (lwpid, status, options);
d6b0e80f
AC
2469 if (!lp)
2470 {
02f3fc28 2471 /* A discarded event. */
d6b0e80f
AC
2472 status = 0;
2473 continue;
2474 }
2475
2476 break;
2477 }
2478
2479 if (pid == -1)
2480 {
2481 /* Alternate between checking cloned and uncloned processes. */
2482 options ^= __WCLONE;
2483
b84876c2
PA
2484 /* And every time we have checked both:
2485 In async mode, return to event loop;
2486 In sync mode, suspend waiting for a SIGCHLD signal. */
d6b0e80f 2487 if (options & __WCLONE)
b84876c2
PA
2488 {
2489 if (target_can_async_p ())
2490 {
2491 /* No interesting event. */
2492 ourstatus->kind = TARGET_WAITKIND_IGNORE;
2493
2494 /* Get ready for the next event. */
2495 target_async (inferior_event_handler, 0);
2496
2497 if (debug_linux_nat_async)
2498 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
2499
2500 return minus_one_ptid;
2501 }
2502
2503 sigsuspend (&suspend_mask);
2504 }
d6b0e80f
AC
2505 }
2506
2507 /* We shouldn't end up here unless we want to try again. */
2508 gdb_assert (status == 0);
2509 }
2510
b84876c2
PA
2511 if (!target_can_async_p ())
2512 {
2513 clear_sigio_trap ();
2514 clear_sigint_trap ();
2515 }
d6b0e80f
AC
2516
2517 gdb_assert (lp);
2518
2519 /* Don't report signals that GDB isn't interested in, such as
2520 signals that are neither printed nor stopped upon. Stopping all
2521 threads can be a bit time-consuming so if we want decent
2522 performance with heavily multi-threaded programs, especially when
2523 they're using a high frequency timer, we'd better avoid it if we
2524 can. */
2525
2526 if (WIFSTOPPED (status))
2527 {
2528 int signo = target_signal_from_host (WSTOPSIG (status));
2529
d539ed7e
UW
2530 /* If we get a signal while single-stepping, we may need special
2531 care, e.g. to skip the signal handler. Defer to common code. */
2532 if (!lp->step
2533 && signal_stop_state (signo) == 0
d6b0e80f
AC
2534 && signal_print_state (signo) == 0
2535 && signal_pass_state (signo) == 1)
2536 {
2537 /* FIMXE: kettenis/2001-06-06: Should we resume all threads
2538 here? It is not clear we should. GDB may not expect
2539 other threads to run. On the other hand, not resuming
2540 newly attached threads may cause an unwanted delay in
2541 getting them running. */
2542 registers_changed ();
10d6c8cd
DJ
2543 linux_ops->to_resume (pid_to_ptid (GET_LWP (lp->ptid)),
2544 lp->step, signo);
d6b0e80f
AC
2545 if (debug_linux_nat)
2546 fprintf_unfiltered (gdb_stdlog,
2547 "LLW: %s %s, %s (preempt 'handle')\n",
2548 lp->step ?
2549 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2550 target_pid_to_str (lp->ptid),
2551 signo ? strsignal (signo) : "0");
2552 lp->stopped = 0;
2553 status = 0;
2554 goto retry;
2555 }
2556
2557 if (signo == TARGET_SIGNAL_INT && signal_pass_state (signo) == 0)
2558 {
2559 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
2560 forwarded to the entire process group, that is, all LWP's
2561 will receive it. Since we only want to report it once,
2562 we try to flush it from all LWPs except this one. */
2563 sigaddset (&flush_mask, SIGINT);
2564 }
2565 }
2566
2567 /* This LWP is stopped now. */
2568 lp->stopped = 1;
2569
2570 if (debug_linux_nat)
2571 fprintf_unfiltered (gdb_stdlog, "LLW: Candidate event %s in %s.\n",
2572 status_to_str (status), target_pid_to_str (lp->ptid));
2573
2574 /* Now stop all other LWP's ... */
2575 iterate_over_lwps (stop_callback, NULL);
2576
2577 /* ... and wait until all of them have reported back that they're no
2578 longer running. */
2579 iterate_over_lwps (stop_wait_callback, &flush_mask);
2580 iterate_over_lwps (flush_callback, &flush_mask);
2581
2582 /* If we're not waiting for a specific LWP, choose an event LWP from
2583 among those that have had events. Giving equal priority to all
2584 LWPs that have had events helps prevent starvation. */
2585 if (pid == -1)
2586 select_event_lwp (&lp, &status);
2587
2588 /* Now that we've selected our final event LWP, cancel any
2589 breakpoints in other LWPs that have hit a GDB breakpoint. See
2590 the comment in cancel_breakpoints_callback to find out why. */
2591 iterate_over_lwps (cancel_breakpoints_callback, lp);
2592
d6b0e80f
AC
2593 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP)
2594 {
f973ed9c 2595 trap_ptid = lp->ptid;
d6b0e80f
AC
2596 if (debug_linux_nat)
2597 fprintf_unfiltered (gdb_stdlog,
2598 "LLW: trap_ptid is %s.\n",
2599 target_pid_to_str (trap_ptid));
2600 }
2601 else
2602 trap_ptid = null_ptid;
2603
2604 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
2605 {
2606 *ourstatus = lp->waitstatus;
2607 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
2608 }
2609 else
2610 store_waitstatus (ourstatus, status);
2611
b84876c2
PA
2612 /* Get ready for the next event. */
2613 if (target_can_async_p ())
2614 target_async (inferior_event_handler, 0);
2615
2616 if (debug_linux_nat_async)
2617 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
2618
f973ed9c 2619 return lp->ptid;
d6b0e80f
AC
2620}
2621
2622static int
2623kill_callback (struct lwp_info *lp, void *data)
2624{
2625 errno = 0;
2626 ptrace (PTRACE_KILL, GET_LWP (lp->ptid), 0, 0);
2627 if (debug_linux_nat)
2628 fprintf_unfiltered (gdb_stdlog,
2629 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
2630 target_pid_to_str (lp->ptid),
2631 errno ? safe_strerror (errno) : "OK");
2632
2633 return 0;
2634}
2635
2636static int
2637kill_wait_callback (struct lwp_info *lp, void *data)
2638{
2639 pid_t pid;
2640
2641 /* We must make sure that there are no pending events (delayed
2642 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
2643 program doesn't interfere with any following debugging session. */
2644
2645 /* For cloned processes we must check both with __WCLONE and
2646 without, since the exit status of a cloned process isn't reported
2647 with __WCLONE. */
2648 if (lp->cloned)
2649 {
2650 do
2651 {
58aecb61 2652 pid = my_waitpid (GET_LWP (lp->ptid), NULL, __WCLONE);
e85a822c 2653 if (pid != (pid_t) -1)
d6b0e80f 2654 {
e85a822c
DJ
2655 if (debug_linux_nat)
2656 fprintf_unfiltered (gdb_stdlog,
2657 "KWC: wait %s received unknown.\n",
2658 target_pid_to_str (lp->ptid));
2659 /* The Linux kernel sometimes fails to kill a thread
2660 completely after PTRACE_KILL; that goes from the stop
2661 point in do_fork out to the one in
2662 get_signal_to_deliever and waits again. So kill it
2663 again. */
2664 kill_callback (lp, NULL);
d6b0e80f
AC
2665 }
2666 }
2667 while (pid == GET_LWP (lp->ptid));
2668
2669 gdb_assert (pid == -1 && errno == ECHILD);
2670 }
2671
2672 do
2673 {
58aecb61 2674 pid = my_waitpid (GET_LWP (lp->ptid), NULL, 0);
e85a822c 2675 if (pid != (pid_t) -1)
d6b0e80f 2676 {
e85a822c
DJ
2677 if (debug_linux_nat)
2678 fprintf_unfiltered (gdb_stdlog,
2679 "KWC: wait %s received unk.\n",
2680 target_pid_to_str (lp->ptid));
2681 /* See the call to kill_callback above. */
2682 kill_callback (lp, NULL);
d6b0e80f
AC
2683 }
2684 }
2685 while (pid == GET_LWP (lp->ptid));
2686
2687 gdb_assert (pid == -1 && errno == ECHILD);
2688 return 0;
2689}
2690
2691static void
2692linux_nat_kill (void)
2693{
f973ed9c
DJ
2694 struct target_waitstatus last;
2695 ptid_t last_ptid;
2696 int status;
d6b0e80f 2697
b84876c2
PA
2698 if (target_can_async_p ())
2699 target_async (NULL, 0);
2700
f973ed9c
DJ
2701 /* If we're stopped while forking and we haven't followed yet,
2702 kill the other task. We need to do this first because the
2703 parent will be sleeping if this is a vfork. */
d6b0e80f 2704
f973ed9c 2705 get_last_target_status (&last_ptid, &last);
d6b0e80f 2706
f973ed9c
DJ
2707 if (last.kind == TARGET_WAITKIND_FORKED
2708 || last.kind == TARGET_WAITKIND_VFORKED)
2709 {
2710 ptrace (PT_KILL, last.value.related_pid, 0, 0);
2711 wait (&status);
2712 }
2713
2714 if (forks_exist_p ())
b84876c2
PA
2715 {
2716 linux_fork_killall ();
2717 drain_queued_events (-1);
2718 }
f973ed9c
DJ
2719 else
2720 {
2721 /* Kill all LWP's ... */
2722 iterate_over_lwps (kill_callback, NULL);
2723
2724 /* ... and wait until we've flushed all events. */
2725 iterate_over_lwps (kill_wait_callback, NULL);
2726 }
2727
2728 target_mourn_inferior ();
d6b0e80f
AC
2729}
2730
2731static void
2732linux_nat_mourn_inferior (void)
2733{
2734 trap_ptid = null_ptid;
2735
2736 /* Destroy LWP info; it's no longer valid. */
2737 init_lwp_list ();
2738
f973ed9c 2739 if (! forks_exist_p ())
b84876c2
PA
2740 {
2741 /* Normal case, no other forks available. */
2742 if (target_can_async_p ())
2743 linux_nat_async (NULL, 0);
2744 linux_ops->to_mourn_inferior ();
2745 }
f973ed9c
DJ
2746 else
2747 /* Multi-fork case. The current inferior_ptid has exited, but
2748 there are other viable forks to debug. Delete the exiting
2749 one and context-switch to the first available. */
2750 linux_fork_mourn_inferior ();
d6b0e80f
AC
2751}
2752
10d6c8cd
DJ
2753static LONGEST
2754linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
2755 const char *annex, gdb_byte *readbuf,
2756 const gdb_byte *writebuf,
2757 ULONGEST offset, LONGEST len)
d6b0e80f
AC
2758{
2759 struct cleanup *old_chain = save_inferior_ptid ();
10d6c8cd 2760 LONGEST xfer;
d6b0e80f
AC
2761
2762 if (is_lwp (inferior_ptid))
2763 inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
2764
10d6c8cd
DJ
2765 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
2766 offset, len);
d6b0e80f
AC
2767
2768 do_cleanups (old_chain);
2769 return xfer;
2770}
2771
2772static int
2773linux_nat_thread_alive (ptid_t ptid)
2774{
2775 gdb_assert (is_lwp (ptid));
2776
2777 errno = 0;
2778 ptrace (PTRACE_PEEKUSER, GET_LWP (ptid), 0, 0);
2779 if (debug_linux_nat)
2780 fprintf_unfiltered (gdb_stdlog,
2781 "LLTA: PTRACE_PEEKUSER %s, 0, 0 (%s)\n",
2782 target_pid_to_str (ptid),
2783 errno ? safe_strerror (errno) : "OK");
9c0dd46b 2784
155bd5d1
AC
2785 /* Not every Linux kernel implements PTRACE_PEEKUSER. But we can
2786 handle that case gracefully since ptrace will first do a lookup
2787 for the process based upon the passed-in pid. If that fails we
2788 will get either -ESRCH or -EPERM, otherwise the child exists and
2789 is alive. */
a529be7c 2790 if (errno == ESRCH || errno == EPERM)
d6b0e80f
AC
2791 return 0;
2792
2793 return 1;
2794}
2795
2796static char *
2797linux_nat_pid_to_str (ptid_t ptid)
2798{
2799 static char buf[64];
2800
f973ed9c 2801 if (lwp_list && lwp_list->next && is_lwp (ptid))
d6b0e80f
AC
2802 {
2803 snprintf (buf, sizeof (buf), "LWP %ld", GET_LWP (ptid));
2804 return buf;
2805 }
2806
2807 return normal_pid_to_str (ptid);
2808}
2809
d6b0e80f
AC
2810static void
2811sigchld_handler (int signo)
2812{
b84876c2
PA
2813 if (linux_nat_async_enabled
2814 && linux_nat_async_events_enabled
2815 && signo == SIGCHLD)
2816 /* It is *always* a bug to hit this. */
2817 internal_error (__FILE__, __LINE__,
2818 "sigchld_handler called when async events are enabled");
2819
d6b0e80f
AC
2820 /* Do nothing. The only reason for this handler is that it allows
2821 us to use sigsuspend in linux_nat_wait above to wait for the
2822 arrival of a SIGCHLD. */
2823}
2824
dba24537
AC
2825/* Accepts an integer PID; Returns a string representing a file that
2826 can be opened to get the symbols for the child process. */
2827
6d8fd2b7
UW
2828static char *
2829linux_child_pid_to_exec_file (int pid)
dba24537
AC
2830{
2831 char *name1, *name2;
2832
2833 name1 = xmalloc (MAXPATHLEN);
2834 name2 = xmalloc (MAXPATHLEN);
2835 make_cleanup (xfree, name1);
2836 make_cleanup (xfree, name2);
2837 memset (name2, 0, MAXPATHLEN);
2838
2839 sprintf (name1, "/proc/%d/exe", pid);
2840 if (readlink (name1, name2, MAXPATHLEN) > 0)
2841 return name2;
2842 else
2843 return name1;
2844}
2845
2846/* Service function for corefiles and info proc. */
2847
2848static int
2849read_mapping (FILE *mapfile,
2850 long long *addr,
2851 long long *endaddr,
2852 char *permissions,
2853 long long *offset,
2854 char *device, long long *inode, char *filename)
2855{
2856 int ret = fscanf (mapfile, "%llx-%llx %s %llx %s %llx",
2857 addr, endaddr, permissions, offset, device, inode);
2858
2e14c2ea
MS
2859 filename[0] = '\0';
2860 if (ret > 0 && ret != EOF)
dba24537
AC
2861 {
2862 /* Eat everything up to EOL for the filename. This will prevent
2863 weird filenames (such as one with embedded whitespace) from
2864 confusing this code. It also makes this code more robust in
2865 respect to annotations the kernel may add after the filename.
2866
2867 Note the filename is used for informational purposes
2868 only. */
2869 ret += fscanf (mapfile, "%[^\n]\n", filename);
2870 }
2e14c2ea 2871
dba24537
AC
2872 return (ret != 0 && ret != EOF);
2873}
2874
2875/* Fills the "to_find_memory_regions" target vector. Lists the memory
2876 regions in the inferior for a corefile. */
2877
2878static int
2879linux_nat_find_memory_regions (int (*func) (CORE_ADDR,
2880 unsigned long,
2881 int, int, int, void *), void *obfd)
2882{
2883 long long pid = PIDGET (inferior_ptid);
2884 char mapsfilename[MAXPATHLEN];
2885 FILE *mapsfile;
2886 long long addr, endaddr, size, offset, inode;
2887 char permissions[8], device[8], filename[MAXPATHLEN];
2888 int read, write, exec;
2889 int ret;
2890
2891 /* Compose the filename for the /proc memory map, and open it. */
2892 sprintf (mapsfilename, "/proc/%lld/maps", pid);
2893 if ((mapsfile = fopen (mapsfilename, "r")) == NULL)
8a3fe4f8 2894 error (_("Could not open %s."), mapsfilename);
dba24537
AC
2895
2896 if (info_verbose)
2897 fprintf_filtered (gdb_stdout,
2898 "Reading memory regions from %s\n", mapsfilename);
2899
2900 /* Now iterate until end-of-file. */
2901 while (read_mapping (mapsfile, &addr, &endaddr, &permissions[0],
2902 &offset, &device[0], &inode, &filename[0]))
2903 {
2904 size = endaddr - addr;
2905
2906 /* Get the segment's permissions. */
2907 read = (strchr (permissions, 'r') != 0);
2908 write = (strchr (permissions, 'w') != 0);
2909 exec = (strchr (permissions, 'x') != 0);
2910
2911 if (info_verbose)
2912 {
2913 fprintf_filtered (gdb_stdout,
2914 "Save segment, %lld bytes at 0x%s (%c%c%c)",
2915 size, paddr_nz (addr),
2916 read ? 'r' : ' ',
2917 write ? 'w' : ' ', exec ? 'x' : ' ');
b260b6c1 2918 if (filename[0])
dba24537
AC
2919 fprintf_filtered (gdb_stdout, " for %s", filename);
2920 fprintf_filtered (gdb_stdout, "\n");
2921 }
2922
2923 /* Invoke the callback function to create the corefile
2924 segment. */
2925 func (addr, size, read, write, exec, obfd);
2926 }
2927 fclose (mapsfile);
2928 return 0;
2929}
2930
2931/* Records the thread's register state for the corefile note
2932 section. */
2933
2934static char *
2935linux_nat_do_thread_registers (bfd *obfd, ptid_t ptid,
2936 char *note_data, int *note_size)
2937{
2938 gdb_gregset_t gregs;
2939 gdb_fpregset_t fpregs;
2940#ifdef FILL_FPXREGSET
2941 gdb_fpxregset_t fpxregs;
2942#endif
2943 unsigned long lwp = ptid_get_lwp (ptid);
594f7785
UW
2944 struct regcache *regcache = get_thread_regcache (ptid);
2945 struct gdbarch *gdbarch = get_regcache_arch (regcache);
4f844a66 2946 const struct regset *regset;
55e969c1 2947 int core_regset_p;
594f7785
UW
2948 struct cleanup *old_chain;
2949
2950 old_chain = save_inferior_ptid ();
2951 inferior_ptid = ptid;
2952 target_fetch_registers (regcache, -1);
2953 do_cleanups (old_chain);
4f844a66
DM
2954
2955 core_regset_p = gdbarch_regset_from_core_section_p (gdbarch);
55e969c1
DM
2956 if (core_regset_p
2957 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg",
2958 sizeof (gregs))) != NULL
2959 && regset->collect_regset != NULL)
594f7785 2960 regset->collect_regset (regset, regcache, -1,
55e969c1 2961 &gregs, sizeof (gregs));
4f844a66 2962 else
594f7785 2963 fill_gregset (regcache, &gregs, -1);
4f844a66 2964
55e969c1
DM
2965 note_data = (char *) elfcore_write_prstatus (obfd,
2966 note_data,
2967 note_size,
2968 lwp,
2969 stop_signal, &gregs);
2970
2971 if (core_regset_p
2972 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg2",
2973 sizeof (fpregs))) != NULL
2974 && regset->collect_regset != NULL)
594f7785 2975 regset->collect_regset (regset, regcache, -1,
55e969c1 2976 &fpregs, sizeof (fpregs));
4f844a66 2977 else
594f7785 2978 fill_fpregset (regcache, &fpregs, -1);
4f844a66 2979
55e969c1
DM
2980 note_data = (char *) elfcore_write_prfpreg (obfd,
2981 note_data,
2982 note_size,
2983 &fpregs, sizeof (fpregs));
dba24537 2984
dba24537 2985#ifdef FILL_FPXREGSET
55e969c1
DM
2986 if (core_regset_p
2987 && (regset = gdbarch_regset_from_core_section (gdbarch, ".reg-xfp",
2988 sizeof (fpxregs))) != NULL
2989 && regset->collect_regset != NULL)
594f7785 2990 regset->collect_regset (regset, regcache, -1,
55e969c1 2991 &fpxregs, sizeof (fpxregs));
4f844a66 2992 else
594f7785 2993 fill_fpxregset (regcache, &fpxregs, -1);
4f844a66 2994
55e969c1
DM
2995 note_data = (char *) elfcore_write_prxfpreg (obfd,
2996 note_data,
2997 note_size,
2998 &fpxregs, sizeof (fpxregs));
dba24537
AC
2999#endif
3000 return note_data;
3001}
3002
3003struct linux_nat_corefile_thread_data
3004{
3005 bfd *obfd;
3006 char *note_data;
3007 int *note_size;
3008 int num_notes;
3009};
3010
3011/* Called by gdbthread.c once per thread. Records the thread's
3012 register state for the corefile note section. */
3013
3014static int
3015linux_nat_corefile_thread_callback (struct lwp_info *ti, void *data)
3016{
3017 struct linux_nat_corefile_thread_data *args = data;
dba24537 3018
dba24537
AC
3019 args->note_data = linux_nat_do_thread_registers (args->obfd,
3020 ti->ptid,
3021 args->note_data,
3022 args->note_size);
3023 args->num_notes++;
56be3814 3024
dba24537
AC
3025 return 0;
3026}
3027
3028/* Records the register state for the corefile note section. */
3029
3030static char *
3031linux_nat_do_registers (bfd *obfd, ptid_t ptid,
3032 char *note_data, int *note_size)
3033{
dba24537
AC
3034 return linux_nat_do_thread_registers (obfd,
3035 ptid_build (ptid_get_pid (inferior_ptid),
3036 ptid_get_pid (inferior_ptid),
3037 0),
3038 note_data, note_size);
dba24537
AC
3039}
3040
3041/* Fills the "to_make_corefile_note" target vector. Builds the note
3042 section for a corefile, and returns it in a malloc buffer. */
3043
3044static char *
3045linux_nat_make_corefile_notes (bfd *obfd, int *note_size)
3046{
3047 struct linux_nat_corefile_thread_data thread_args;
3048 struct cleanup *old_chain;
d99148ef 3049 /* The variable size must be >= sizeof (prpsinfo_t.pr_fname). */
dba24537 3050 char fname[16] = { '\0' };
d99148ef 3051 /* The variable size must be >= sizeof (prpsinfo_t.pr_psargs). */
dba24537
AC
3052 char psargs[80] = { '\0' };
3053 char *note_data = NULL;
3054 ptid_t current_ptid = inferior_ptid;
c6826062 3055 gdb_byte *auxv;
dba24537
AC
3056 int auxv_len;
3057
3058 if (get_exec_file (0))
3059 {
3060 strncpy (fname, strrchr (get_exec_file (0), '/') + 1, sizeof (fname));
3061 strncpy (psargs, get_exec_file (0), sizeof (psargs));
3062 if (get_inferior_args ())
3063 {
d99148ef
JK
3064 char *string_end;
3065 char *psargs_end = psargs + sizeof (psargs);
3066
3067 /* linux_elfcore_write_prpsinfo () handles zero unterminated
3068 strings fine. */
3069 string_end = memchr (psargs, 0, sizeof (psargs));
3070 if (string_end != NULL)
3071 {
3072 *string_end++ = ' ';
3073 strncpy (string_end, get_inferior_args (),
3074 psargs_end - string_end);
3075 }
dba24537
AC
3076 }
3077 note_data = (char *) elfcore_write_prpsinfo (obfd,
3078 note_data,
3079 note_size, fname, psargs);
3080 }
3081
3082 /* Dump information for threads. */
3083 thread_args.obfd = obfd;
3084 thread_args.note_data = note_data;
3085 thread_args.note_size = note_size;
3086 thread_args.num_notes = 0;
3087 iterate_over_lwps (linux_nat_corefile_thread_callback, &thread_args);
3088 if (thread_args.num_notes == 0)
3089 {
3090 /* iterate_over_threads didn't come up with any threads; just
3091 use inferior_ptid. */
3092 note_data = linux_nat_do_registers (obfd, inferior_ptid,
3093 note_data, note_size);
3094 }
3095 else
3096 {
3097 note_data = thread_args.note_data;
3098 }
3099
13547ab6
DJ
3100 auxv_len = target_read_alloc (&current_target, TARGET_OBJECT_AUXV,
3101 NULL, &auxv);
dba24537
AC
3102 if (auxv_len > 0)
3103 {
3104 note_data = elfcore_write_note (obfd, note_data, note_size,
3105 "CORE", NT_AUXV, auxv, auxv_len);
3106 xfree (auxv);
3107 }
3108
3109 make_cleanup (xfree, note_data);
3110 return note_data;
3111}
3112
3113/* Implement the "info proc" command. */
3114
3115static void
3116linux_nat_info_proc_cmd (char *args, int from_tty)
3117{
3118 long long pid = PIDGET (inferior_ptid);
3119 FILE *procfile;
3120 char **argv = NULL;
3121 char buffer[MAXPATHLEN];
3122 char fname1[MAXPATHLEN], fname2[MAXPATHLEN];
3123 int cmdline_f = 1;
3124 int cwd_f = 1;
3125 int exe_f = 1;
3126 int mappings_f = 0;
3127 int environ_f = 0;
3128 int status_f = 0;
3129 int stat_f = 0;
3130 int all = 0;
3131 struct stat dummy;
3132
3133 if (args)
3134 {
3135 /* Break up 'args' into an argv array. */
3136 if ((argv = buildargv (args)) == NULL)
3137 nomem (0);
3138 else
3139 make_cleanup_freeargv (argv);
3140 }
3141 while (argv != NULL && *argv != NULL)
3142 {
3143 if (isdigit (argv[0][0]))
3144 {
3145 pid = strtoul (argv[0], NULL, 10);
3146 }
3147 else if (strncmp (argv[0], "mappings", strlen (argv[0])) == 0)
3148 {
3149 mappings_f = 1;
3150 }
3151 else if (strcmp (argv[0], "status") == 0)
3152 {
3153 status_f = 1;
3154 }
3155 else if (strcmp (argv[0], "stat") == 0)
3156 {
3157 stat_f = 1;
3158 }
3159 else if (strcmp (argv[0], "cmd") == 0)
3160 {
3161 cmdline_f = 1;
3162 }
3163 else if (strncmp (argv[0], "exe", strlen (argv[0])) == 0)
3164 {
3165 exe_f = 1;
3166 }
3167 else if (strcmp (argv[0], "cwd") == 0)
3168 {
3169 cwd_f = 1;
3170 }
3171 else if (strncmp (argv[0], "all", strlen (argv[0])) == 0)
3172 {
3173 all = 1;
3174 }
3175 else
3176 {
3177 /* [...] (future options here) */
3178 }
3179 argv++;
3180 }
3181 if (pid == 0)
8a3fe4f8 3182 error (_("No current process: you must name one."));
dba24537
AC
3183
3184 sprintf (fname1, "/proc/%lld", pid);
3185 if (stat (fname1, &dummy) != 0)
8a3fe4f8 3186 error (_("No /proc directory: '%s'"), fname1);
dba24537 3187
a3f17187 3188 printf_filtered (_("process %lld\n"), pid);
dba24537
AC
3189 if (cmdline_f || all)
3190 {
3191 sprintf (fname1, "/proc/%lld/cmdline", pid);
d5d6fca5 3192 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3193 {
3194 fgets (buffer, sizeof (buffer), procfile);
3195 printf_filtered ("cmdline = '%s'\n", buffer);
3196 fclose (procfile);
3197 }
3198 else
8a3fe4f8 3199 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3200 }
3201 if (cwd_f || all)
3202 {
3203 sprintf (fname1, "/proc/%lld/cwd", pid);
3204 memset (fname2, 0, sizeof (fname2));
3205 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3206 printf_filtered ("cwd = '%s'\n", fname2);
3207 else
8a3fe4f8 3208 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3209 }
3210 if (exe_f || all)
3211 {
3212 sprintf (fname1, "/proc/%lld/exe", pid);
3213 memset (fname2, 0, sizeof (fname2));
3214 if (readlink (fname1, fname2, sizeof (fname2)) > 0)
3215 printf_filtered ("exe = '%s'\n", fname2);
3216 else
8a3fe4f8 3217 warning (_("unable to read link '%s'"), fname1);
dba24537
AC
3218 }
3219 if (mappings_f || all)
3220 {
3221 sprintf (fname1, "/proc/%lld/maps", pid);
d5d6fca5 3222 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3223 {
3224 long long addr, endaddr, size, offset, inode;
3225 char permissions[8], device[8], filename[MAXPATHLEN];
3226
a3f17187 3227 printf_filtered (_("Mapped address spaces:\n\n"));
17a912b6 3228 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3229 {
3230 printf_filtered ("\t%10s %10s %10s %10s %7s\n",
3231 "Start Addr",
3232 " End Addr",
3233 " Size", " Offset", "objfile");
3234 }
3235 else
3236 {
3237 printf_filtered (" %18s %18s %10s %10s %7s\n",
3238 "Start Addr",
3239 " End Addr",
3240 " Size", " Offset", "objfile");
3241 }
3242
3243 while (read_mapping (procfile, &addr, &endaddr, &permissions[0],
3244 &offset, &device[0], &inode, &filename[0]))
3245 {
3246 size = endaddr - addr;
3247
3248 /* FIXME: carlton/2003-08-27: Maybe the printf_filtered
3249 calls here (and possibly above) should be abstracted
3250 out into their own functions? Andrew suggests using
3251 a generic local_address_string instead to print out
3252 the addresses; that makes sense to me, too. */
3253
17a912b6 3254 if (gdbarch_addr_bit (current_gdbarch) == 32)
dba24537
AC
3255 {
3256 printf_filtered ("\t%#10lx %#10lx %#10x %#10x %7s\n",
3257 (unsigned long) addr, /* FIXME: pr_addr */
3258 (unsigned long) endaddr,
3259 (int) size,
3260 (unsigned int) offset,
3261 filename[0] ? filename : "");
3262 }
3263 else
3264 {
3265 printf_filtered (" %#18lx %#18lx %#10x %#10x %7s\n",
3266 (unsigned long) addr, /* FIXME: pr_addr */
3267 (unsigned long) endaddr,
3268 (int) size,
3269 (unsigned int) offset,
3270 filename[0] ? filename : "");
3271 }
3272 }
3273
3274 fclose (procfile);
3275 }
3276 else
8a3fe4f8 3277 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3278 }
3279 if (status_f || all)
3280 {
3281 sprintf (fname1, "/proc/%lld/status", pid);
d5d6fca5 3282 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3283 {
3284 while (fgets (buffer, sizeof (buffer), procfile) != NULL)
3285 puts_filtered (buffer);
3286 fclose (procfile);
3287 }
3288 else
8a3fe4f8 3289 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3290 }
3291 if (stat_f || all)
3292 {
3293 sprintf (fname1, "/proc/%lld/stat", pid);
d5d6fca5 3294 if ((procfile = fopen (fname1, "r")) != NULL)
dba24537
AC
3295 {
3296 int itmp;
3297 char ctmp;
a25694b4 3298 long ltmp;
dba24537
AC
3299
3300 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3301 printf_filtered (_("Process: %d\n"), itmp);
a25694b4 3302 if (fscanf (procfile, "(%[^)]) ", &buffer[0]) > 0)
a3f17187 3303 printf_filtered (_("Exec file: %s\n"), buffer);
dba24537 3304 if (fscanf (procfile, "%c ", &ctmp) > 0)
a3f17187 3305 printf_filtered (_("State: %c\n"), ctmp);
dba24537 3306 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3307 printf_filtered (_("Parent process: %d\n"), itmp);
dba24537 3308 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3309 printf_filtered (_("Process group: %d\n"), itmp);
dba24537 3310 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3311 printf_filtered (_("Session id: %d\n"), itmp);
dba24537 3312 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3313 printf_filtered (_("TTY: %d\n"), itmp);
dba24537 3314 if (fscanf (procfile, "%d ", &itmp) > 0)
a3f17187 3315 printf_filtered (_("TTY owner process group: %d\n"), itmp);
a25694b4
AS
3316 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3317 printf_filtered (_("Flags: 0x%lx\n"), ltmp);
3318 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3319 printf_filtered (_("Minor faults (no memory page): %lu\n"),
3320 (unsigned long) ltmp);
3321 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3322 printf_filtered (_("Minor faults, children: %lu\n"),
3323 (unsigned long) ltmp);
3324 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3325 printf_filtered (_("Major faults (memory page faults): %lu\n"),
3326 (unsigned long) ltmp);
3327 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3328 printf_filtered (_("Major faults, children: %lu\n"),
3329 (unsigned long) ltmp);
3330 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3331 printf_filtered (_("utime: %ld\n"), ltmp);
3332 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3333 printf_filtered (_("stime: %ld\n"), ltmp);
3334 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3335 printf_filtered (_("utime, children: %ld\n"), ltmp);
3336 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3337 printf_filtered (_("stime, children: %ld\n"), ltmp);
3338 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3339 printf_filtered (_("jiffies remaining in current time slice: %ld\n"),
3340 ltmp);
3341 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3342 printf_filtered (_("'nice' value: %ld\n"), ltmp);
3343 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3344 printf_filtered (_("jiffies until next timeout: %lu\n"),
3345 (unsigned long) ltmp);
3346 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3347 printf_filtered (_("jiffies until next SIGALRM: %lu\n"),
3348 (unsigned long) ltmp);
3349 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3350 printf_filtered (_("start time (jiffies since system boot): %ld\n"),
3351 ltmp);
3352 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3353 printf_filtered (_("Virtual memory size: %lu\n"),
3354 (unsigned long) ltmp);
3355 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3356 printf_filtered (_("Resident set size: %lu\n"), (unsigned long) ltmp);
3357 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3358 printf_filtered (_("rlim: %lu\n"), (unsigned long) ltmp);
3359 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3360 printf_filtered (_("Start of text: 0x%lx\n"), ltmp);
3361 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3362 printf_filtered (_("End of text: 0x%lx\n"), ltmp);
3363 if (fscanf (procfile, "%lu ", &ltmp) > 0)
3364 printf_filtered (_("Start of stack: 0x%lx\n"), ltmp);
dba24537
AC
3365#if 0 /* Don't know how architecture-dependent the rest is...
3366 Anyway the signal bitmap info is available from "status". */
a25694b4
AS
3367 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3368 printf_filtered (_("Kernel stack pointer: 0x%lx\n"), ltmp);
3369 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3370 printf_filtered (_("Kernel instr pointer: 0x%lx\n"), ltmp);
3371 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3372 printf_filtered (_("Pending signals bitmap: 0x%lx\n"), ltmp);
3373 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3374 printf_filtered (_("Blocked signals bitmap: 0x%lx\n"), ltmp);
3375 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3376 printf_filtered (_("Ignored signals bitmap: 0x%lx\n"), ltmp);
3377 if (fscanf (procfile, "%ld ", &ltmp) > 0)
3378 printf_filtered (_("Catched signals bitmap: 0x%lx\n"), ltmp);
3379 if (fscanf (procfile, "%lu ", &ltmp) > 0) /* FIXME arch? */
3380 printf_filtered (_("wchan (system call): 0x%lx\n"), ltmp);
dba24537
AC
3381#endif
3382 fclose (procfile);
3383 }
3384 else
8a3fe4f8 3385 warning (_("unable to open /proc file '%s'"), fname1);
dba24537
AC
3386 }
3387}
3388
10d6c8cd
DJ
3389/* Implement the to_xfer_partial interface for memory reads using the /proc
3390 filesystem. Because we can use a single read() call for /proc, this
3391 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3392 but it doesn't support writes. */
3393
3394static LONGEST
3395linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3396 const char *annex, gdb_byte *readbuf,
3397 const gdb_byte *writebuf,
3398 ULONGEST offset, LONGEST len)
dba24537 3399{
10d6c8cd
DJ
3400 LONGEST ret;
3401 int fd;
dba24537
AC
3402 char filename[64];
3403
10d6c8cd 3404 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
3405 return 0;
3406
3407 /* Don't bother for one word. */
3408 if (len < 3 * sizeof (long))
3409 return 0;
3410
3411 /* We could keep this file open and cache it - possibly one per
3412 thread. That requires some juggling, but is even faster. */
3413 sprintf (filename, "/proc/%d/mem", PIDGET (inferior_ptid));
3414 fd = open (filename, O_RDONLY | O_LARGEFILE);
3415 if (fd == -1)
3416 return 0;
3417
3418 /* If pread64 is available, use it. It's faster if the kernel
3419 supports it (only one syscall), and it's 64-bit safe even on
3420 32-bit platforms (for instance, SPARC debugging a SPARC64
3421 application). */
3422#ifdef HAVE_PREAD64
10d6c8cd 3423 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3424#else
10d6c8cd 3425 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3426#endif
3427 ret = 0;
3428 else
3429 ret = len;
3430
3431 close (fd);
3432 return ret;
3433}
3434
3435/* Parse LINE as a signal set and add its set bits to SIGS. */
3436
3437static void
3438add_line_to_sigset (const char *line, sigset_t *sigs)
3439{
3440 int len = strlen (line) - 1;
3441 const char *p;
3442 int signum;
3443
3444 if (line[len] != '\n')
8a3fe4f8 3445 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3446
3447 p = line;
3448 signum = len * 4;
3449 while (len-- > 0)
3450 {
3451 int digit;
3452
3453 if (*p >= '0' && *p <= '9')
3454 digit = *p - '0';
3455 else if (*p >= 'a' && *p <= 'f')
3456 digit = *p - 'a' + 10;
3457 else
8a3fe4f8 3458 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3459
3460 signum -= 4;
3461
3462 if (digit & 1)
3463 sigaddset (sigs, signum + 1);
3464 if (digit & 2)
3465 sigaddset (sigs, signum + 2);
3466 if (digit & 4)
3467 sigaddset (sigs, signum + 3);
3468 if (digit & 8)
3469 sigaddset (sigs, signum + 4);
3470
3471 p++;
3472 }
3473}
3474
3475/* Find process PID's pending signals from /proc/pid/status and set
3476 SIGS to match. */
3477
3478void
3479linux_proc_pending_signals (int pid, sigset_t *pending, sigset_t *blocked, sigset_t *ignored)
3480{
3481 FILE *procfile;
3482 char buffer[MAXPATHLEN], fname[MAXPATHLEN];
3483 int signum;
3484
3485 sigemptyset (pending);
3486 sigemptyset (blocked);
3487 sigemptyset (ignored);
3488 sprintf (fname, "/proc/%d/status", pid);
3489 procfile = fopen (fname, "r");
3490 if (procfile == NULL)
8a3fe4f8 3491 error (_("Could not open %s"), fname);
dba24537
AC
3492
3493 while (fgets (buffer, MAXPATHLEN, procfile) != NULL)
3494 {
3495 /* Normal queued signals are on the SigPnd line in the status
3496 file. However, 2.6 kernels also have a "shared" pending
3497 queue for delivering signals to a thread group, so check for
3498 a ShdPnd line also.
3499
3500 Unfortunately some Red Hat kernels include the shared pending
3501 queue but not the ShdPnd status field. */
3502
3503 if (strncmp (buffer, "SigPnd:\t", 8) == 0)
3504 add_line_to_sigset (buffer + 8, pending);
3505 else if (strncmp (buffer, "ShdPnd:\t", 8) == 0)
3506 add_line_to_sigset (buffer + 8, pending);
3507 else if (strncmp (buffer, "SigBlk:\t", 8) == 0)
3508 add_line_to_sigset (buffer + 8, blocked);
3509 else if (strncmp (buffer, "SigIgn:\t", 8) == 0)
3510 add_line_to_sigset (buffer + 8, ignored);
3511 }
3512
3513 fclose (procfile);
3514}
3515
10d6c8cd
DJ
3516static LONGEST
3517linux_xfer_partial (struct target_ops *ops, enum target_object object,
3518 const char *annex, gdb_byte *readbuf,
3519 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
3520{
3521 LONGEST xfer;
3522
3523 if (object == TARGET_OBJECT_AUXV)
3524 return procfs_xfer_auxv (ops, object, annex, readbuf, writebuf,
3525 offset, len);
3526
3527 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
3528 offset, len);
3529 if (xfer != 0)
3530 return xfer;
3531
3532 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
3533 offset, len);
3534}
3535
e9efe249 3536/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
3537 it with local methods. */
3538
910122bf
UW
3539static void
3540linux_target_install_ops (struct target_ops *t)
10d6c8cd 3541{
6d8fd2b7
UW
3542 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
3543 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
3544 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
3545 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 3546 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
3547 t->to_post_attach = linux_child_post_attach;
3548 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
3549 t->to_find_memory_regions = linux_nat_find_memory_regions;
3550 t->to_make_corefile_notes = linux_nat_make_corefile_notes;
3551
3552 super_xfer_partial = t->to_xfer_partial;
3553 t->to_xfer_partial = linux_xfer_partial;
910122bf
UW
3554}
3555
3556struct target_ops *
3557linux_target (void)
3558{
3559 struct target_ops *t;
3560
3561 t = inf_ptrace_target ();
3562 linux_target_install_ops (t);
3563
3564 return t;
3565}
3566
3567struct target_ops *
7714d83a 3568linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
3569{
3570 struct target_ops *t;
3571
3572 t = inf_ptrace_trad_target (register_u_offset);
3573 linux_target_install_ops (t);
10d6c8cd 3574
10d6c8cd
DJ
3575 return t;
3576}
3577
b84876c2
PA
3578/* Controls if async mode is permitted. */
3579static int linux_async_permitted = 0;
3580
3581/* The set command writes to this variable. If the inferior is
3582 executing, linux_nat_async_permitted is *not* updated. */
3583static int linux_async_permitted_1 = 0;
3584
3585static void
3586set_maintenance_linux_async_permitted (char *args, int from_tty,
3587 struct cmd_list_element *c)
3588{
3589 if (target_has_execution)
3590 {
3591 linux_async_permitted_1 = linux_async_permitted;
3592 error (_("Cannot change this setting while the inferior is running."));
3593 }
3594
3595 linux_async_permitted = linux_async_permitted_1;
3596 linux_nat_set_async_mode (linux_async_permitted);
3597}
3598
3599static void
3600show_maintenance_linux_async_permitted (struct ui_file *file, int from_tty,
3601 struct cmd_list_element *c, const char *value)
3602{
3603 fprintf_filtered (file, _("\
3604Controlling the GNU/Linux inferior in asynchronous mode is %s.\n"),
3605 value);
3606}
3607
3608/* target_is_async_p implementation. */
3609
3610static int
3611linux_nat_is_async_p (void)
3612{
3613 /* NOTE: palves 2008-03-21: We're only async when the user requests
3614 it explicitly with the "maintenance set linux-async" command.
3615 Someday, linux will always be async. */
3616 if (!linux_async_permitted)
3617 return 0;
3618
3619 return 1;
3620}
3621
3622/* target_can_async_p implementation. */
3623
3624static int
3625linux_nat_can_async_p (void)
3626{
3627 /* NOTE: palves 2008-03-21: We're only async when the user requests
3628 it explicitly with the "maintenance set linux-async" command.
3629 Someday, linux will always be async. */
3630 if (!linux_async_permitted)
3631 return 0;
3632
3633 /* See target.h/target_async_mask. */
3634 return linux_nat_async_mask_value;
3635}
3636
3637/* target_async_mask implementation. */
3638
3639static int
3640linux_nat_async_mask (int mask)
3641{
3642 int current_state;
3643 current_state = linux_nat_async_mask_value;
3644
3645 if (current_state != mask)
3646 {
3647 if (mask == 0)
3648 {
3649 linux_nat_async (NULL, 0);
3650 linux_nat_async_mask_value = mask;
3651 /* We're in sync mode. Make sure SIGCHLD isn't handled by
3652 async_sigchld_handler when we come out of sigsuspend in
3653 linux_nat_wait. */
3654 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
3655 }
3656 else
3657 {
3658 /* Restore the async handler. */
3659 sigaction (SIGCHLD, &async_sigchld_action, NULL);
3660 linux_nat_async_mask_value = mask;
3661 linux_nat_async (inferior_event_handler, 0);
3662 }
3663 }
3664
3665 return current_state;
3666}
3667
3668/* Pop an event from the event pipe. */
3669
3670static int
3671linux_nat_event_pipe_pop (int* ptr_status, int* ptr_options)
3672{
3673 struct waitpid_result event = {0};
3674 int ret;
3675
3676 do
3677 {
3678 ret = read (linux_nat_event_pipe[0], &event, sizeof (event));
3679 }
3680 while (ret == -1 && errno == EINTR);
3681
3682 gdb_assert (ret == sizeof (event));
3683
3684 *ptr_status = event.status;
3685 *ptr_options = event.options;
3686
3687 linux_nat_num_queued_events--;
3688
3689 return event.pid;
3690}
3691
3692/* Push an event into the event pipe. */
3693
3694static void
3695linux_nat_event_pipe_push (int pid, int status, int options)
3696{
3697 int ret;
3698 struct waitpid_result event = {0};
3699 event.pid = pid;
3700 event.status = status;
3701 event.options = options;
3702
3703 do
3704 {
3705 ret = write (linux_nat_event_pipe[1], &event, sizeof (event));
3706 gdb_assert ((ret == -1 && errno == EINTR) || ret == sizeof (event));
3707 } while (ret == -1 && errno == EINTR);
3708
3709 linux_nat_num_queued_events++;
3710}
3711
3712static void
3713get_pending_events (void)
3714{
3715 int status, options, pid;
3716
3717 if (!linux_nat_async_enabled || !linux_nat_async_events_enabled)
3718 internal_error (__FILE__, __LINE__,
3719 "get_pending_events called with async masked");
3720
3721 while (1)
3722 {
3723 status = 0;
3724 options = __WCLONE | WNOHANG;
3725
3726 do
3727 {
3728 pid = waitpid (-1, &status, options);
3729 }
3730 while (pid == -1 && errno == EINTR);
3731
3732 if (pid <= 0)
3733 {
3734 options = WNOHANG;
3735 do
3736 {
3737 pid = waitpid (-1, &status, options);
3738 }
3739 while (pid == -1 && errno == EINTR);
3740 }
3741
3742 if (pid <= 0)
3743 /* No more children reporting events. */
3744 break;
3745
3746 if (debug_linux_nat_async)
3747 fprintf_unfiltered (gdb_stdlog, "\
3748get_pending_events: pid(%d), status(%x), options (%x)\n",
3749 pid, status, options);
3750
3751 linux_nat_event_pipe_push (pid, status, options);
3752 }
3753
3754 if (debug_linux_nat_async)
3755 fprintf_unfiltered (gdb_stdlog, "\
3756get_pending_events: linux_nat_num_queued_events(%d)\n",
3757 linux_nat_num_queued_events);
3758}
3759
3760/* SIGCHLD handler for async mode. */
3761
3762static void
3763async_sigchld_handler (int signo)
3764{
3765 if (debug_linux_nat_async)
3766 fprintf_unfiltered (gdb_stdlog, "async_sigchld_handler\n");
3767
3768 get_pending_events ();
3769}
3770
3771/* Enable or disable async SIGCHLD handling. */
3772
3773static int
3774linux_nat_async_events (int enable)
3775{
3776 int current_state = linux_nat_async_events_enabled;
3777
3778 if (debug_linux_nat_async)
3779 fprintf_unfiltered (gdb_stdlog,
3780 "LNAE: enable(%d): linux_nat_async_events_enabled(%d), "
3781 "linux_nat_num_queued_events(%d)\n",
3782 enable, linux_nat_async_events_enabled,
3783 linux_nat_num_queued_events);
3784
3785 if (current_state != enable)
3786 {
3787 sigset_t mask;
3788 sigemptyset (&mask);
3789 sigaddset (&mask, SIGCHLD);
3790 if (enable)
3791 {
3792 /* Unblock target events. */
3793 linux_nat_async_events_enabled = 1;
3794
3795 local_event_queue_to_pipe ();
3796 /* While in masked async, we may have not collected all the
3797 pending events. Get them out now. */
3798 get_pending_events ();
3799 sigprocmask (SIG_UNBLOCK, &mask, NULL);
3800 }
3801 else
3802 {
3803 /* Block target events. */
3804 sigprocmask (SIG_BLOCK, &mask, NULL);
3805 linux_nat_async_events_enabled = 0;
3806 /* Get events out of queue, and make them available to
3807 queued_waitpid / my_waitpid. */
3808 pipe_to_local_event_queue ();
3809 }
3810 }
3811
3812 return current_state;
3813}
3814
3815static int async_terminal_is_ours = 1;
3816
3817/* target_terminal_inferior implementation. */
3818
3819static void
3820linux_nat_terminal_inferior (void)
3821{
3822 if (!target_is_async_p ())
3823 {
3824 /* Async mode is disabled. */
3825 terminal_inferior ();
3826 return;
3827 }
3828
3829 /* GDB should never give the terminal to the inferior, if the
3830 inferior is running in the background (run&, continue&, etc.).
3831 This check can be removed when the common code is fixed. */
3832 if (!sync_execution)
3833 return;
3834
3835 terminal_inferior ();
3836
3837 if (!async_terminal_is_ours)
3838 return;
3839
3840 delete_file_handler (input_fd);
3841 async_terminal_is_ours = 0;
3842 set_sigint_trap ();
3843}
3844
3845/* target_terminal_ours implementation. */
3846
3847void
3848linux_nat_terminal_ours (void)
3849{
3850 if (!target_is_async_p ())
3851 {
3852 /* Async mode is disabled. */
3853 terminal_ours ();
3854 return;
3855 }
3856
3857 /* GDB should never give the terminal to the inferior if the
3858 inferior is running in the background (run&, continue&, etc.),
3859 but claiming it sure should. */
3860 terminal_ours ();
3861
3862 if (!sync_execution)
3863 return;
3864
3865 if (async_terminal_is_ours)
3866 return;
3867
3868 clear_sigint_trap ();
3869 add_file_handler (input_fd, stdin_event_handler, 0);
3870 async_terminal_is_ours = 1;
3871}
3872
3873static void (*async_client_callback) (enum inferior_event_type event_type,
3874 void *context);
3875static void *async_client_context;
3876
3877static void
3878linux_nat_async_file_handler (int error, gdb_client_data client_data)
3879{
3880 async_client_callback (INF_REG_EVENT, async_client_context);
3881}
3882
3883/* target_async implementation. */
3884
3885static void
3886linux_nat_async (void (*callback) (enum inferior_event_type event_type,
3887 void *context), void *context)
3888{
3889 if (linux_nat_async_mask_value == 0 || !linux_nat_async_enabled)
3890 internal_error (__FILE__, __LINE__,
3891 "Calling target_async when async is masked");
3892
3893 if (callback != NULL)
3894 {
3895 async_client_callback = callback;
3896 async_client_context = context;
3897 add_file_handler (linux_nat_event_pipe[0],
3898 linux_nat_async_file_handler, NULL);
3899
3900 linux_nat_async_events (1);
3901 }
3902 else
3903 {
3904 async_client_callback = callback;
3905 async_client_context = context;
3906
3907 linux_nat_async_events (0);
3908 delete_file_handler (linux_nat_event_pipe[0]);
3909 }
3910 return;
3911}
3912
3913/* Enable/Disable async mode. */
3914
3915static void
3916linux_nat_set_async_mode (int on)
3917{
3918 if (linux_nat_async_enabled != on)
3919 {
3920 if (on)
3921 {
3922 gdb_assert (waitpid_queue == NULL);
3923 sigaction (SIGCHLD, &async_sigchld_action, NULL);
3924
3925 if (pipe (linux_nat_event_pipe) == -1)
3926 internal_error (__FILE__, __LINE__,
3927 "creating event pipe failed.");
3928
3929 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
3930 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
3931 }
3932 else
3933 {
3934 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
3935
3936 drain_queued_events (-1);
3937
3938 linux_nat_num_queued_events = 0;
3939 close (linux_nat_event_pipe[0]);
3940 close (linux_nat_event_pipe[1]);
3941 linux_nat_event_pipe[0] = linux_nat_event_pipe[1] = -1;
3942
3943 }
3944 }
3945 linux_nat_async_enabled = on;
3946}
3947
f973ed9c
DJ
3948void
3949linux_nat_add_target (struct target_ops *t)
3950{
f973ed9c
DJ
3951 /* Save the provided single-threaded target. We save this in a separate
3952 variable because another target we've inherited from (e.g. inf-ptrace)
3953 may have saved a pointer to T; we want to use it for the final
3954 process stratum target. */
3955 linux_ops_saved = *t;
3956 linux_ops = &linux_ops_saved;
3957
3958 /* Override some methods for multithreading. */
b84876c2 3959 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
3960 t->to_attach = linux_nat_attach;
3961 t->to_detach = linux_nat_detach;
3962 t->to_resume = linux_nat_resume;
3963 t->to_wait = linux_nat_wait;
3964 t->to_xfer_partial = linux_nat_xfer_partial;
3965 t->to_kill = linux_nat_kill;
3966 t->to_mourn_inferior = linux_nat_mourn_inferior;
3967 t->to_thread_alive = linux_nat_thread_alive;
3968 t->to_pid_to_str = linux_nat_pid_to_str;
3969 t->to_has_thread_control = tc_schedlock;
3970
b84876c2
PA
3971 t->to_can_async_p = linux_nat_can_async_p;
3972 t->to_is_async_p = linux_nat_is_async_p;
3973 t->to_async = linux_nat_async;
3974 t->to_async_mask = linux_nat_async_mask;
3975 t->to_terminal_inferior = linux_nat_terminal_inferior;
3976 t->to_terminal_ours = linux_nat_terminal_ours;
3977
f973ed9c
DJ
3978 /* We don't change the stratum; this target will sit at
3979 process_stratum and thread_db will set at thread_stratum. This
3980 is a little strange, since this is a multi-threaded-capable
3981 target, but we want to be on the stack below thread_db, and we
3982 also want to be used for single-threaded processes. */
3983
3984 add_target (t);
3985
3986 /* TODO: Eliminate this and have libthread_db use
3987 find_target_beneath. */
3988 thread_db_init (t);
3989}
3990
9f0bdab8
DJ
3991/* Register a method to call whenever a new thread is attached. */
3992void
3993linux_nat_set_new_thread (struct target_ops *t, void (*new_thread) (ptid_t))
3994{
3995 /* Save the pointer. We only support a single registered instance
3996 of the GNU/Linux native target, so we do not need to map this to
3997 T. */
3998 linux_nat_new_thread = new_thread;
3999}
4000
4001/* Return the saved siginfo associated with PTID. */
4002struct siginfo *
4003linux_nat_get_siginfo (ptid_t ptid)
4004{
4005 struct lwp_info *lp = find_lwp_pid (ptid);
4006
4007 gdb_assert (lp != NULL);
4008
4009 return &lp->siginfo;
4010}
4011
d6b0e80f
AC
4012void
4013_initialize_linux_nat (void)
4014{
b84876c2 4015 sigset_t mask;
dba24537 4016
1bedd215
AC
4017 add_info ("proc", linux_nat_info_proc_cmd, _("\
4018Show /proc process information about any running process.\n\
dba24537
AC
4019Specify any process id, or use the program being debugged by default.\n\
4020Specify any of the following keywords for detailed info:\n\
4021 mappings -- list of mapped memory regions.\n\
4022 stat -- list a bunch of random process info.\n\
4023 status -- list a different bunch of random process info.\n\
1bedd215 4024 all -- list all available /proc info."));
d6b0e80f 4025
b84876c2
PA
4026 add_setshow_zinteger_cmd ("lin-lwp", class_maintenance,
4027 &debug_linux_nat, _("\
4028Set debugging of GNU/Linux lwp module."), _("\
4029Show debugging of GNU/Linux lwp module."), _("\
4030Enables printf debugging output."),
4031 NULL,
4032 show_debug_linux_nat,
4033 &setdebuglist, &showdebuglist);
4034
4035 add_setshow_zinteger_cmd ("lin-lwp-async", class_maintenance,
4036 &debug_linux_nat_async, _("\
4037Set debugging of GNU/Linux async lwp module."), _("\
4038Show debugging of GNU/Linux async lwp module."), _("\
4039Enables printf debugging output."),
4040 NULL,
4041 show_debug_linux_nat_async,
4042 &setdebuglist, &showdebuglist);
4043
4044 add_setshow_boolean_cmd ("linux-async", class_maintenance,
4045 &linux_async_permitted_1, _("\
4046Set whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4047Show whether gdb controls the GNU/Linux inferior in asynchronous mode."), _("\
4048Tells gdb whether to control the GNU/Linux inferior in asynchronous mode."),
4049 set_maintenance_linux_async_permitted,
4050 show_maintenance_linux_async_permitted,
4051 &maintenance_set_cmdlist,
4052 &maintenance_show_cmdlist);
4053
4054 /* Block SIGCHLD by default. Doing this early prevents it getting
4055 unblocked if an exception is thrown due to an error while the
4056 inferior is starting (sigsetjmp/siglongjmp). */
4057 sigemptyset (&mask);
4058 sigaddset (&mask, SIGCHLD);
4059 sigprocmask (SIG_BLOCK, &mask, NULL);
4060
4061 /* Save this mask as the default. */
d6b0e80f
AC
4062 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4063
b84876c2
PA
4064 /* The synchronous SIGCHLD handler. */
4065 sync_sigchld_action.sa_handler = sigchld_handler;
4066 sigemptyset (&sync_sigchld_action.sa_mask);
4067 sync_sigchld_action.sa_flags = SA_RESTART;
4068
4069 /* Make it the default. */
4070 sigaction (SIGCHLD, &sync_sigchld_action, NULL);
d6b0e80f
AC
4071
4072 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4073 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4074 sigdelset (&suspend_mask, SIGCHLD);
4075
b84876c2
PA
4076 /* SIGCHLD handler for async mode. */
4077 async_sigchld_action.sa_handler = async_sigchld_handler;
4078 sigemptyset (&async_sigchld_action.sa_mask);
4079 async_sigchld_action.sa_flags = SA_RESTART;
d6b0e80f 4080
b84876c2
PA
4081 /* Install the default mode. */
4082 linux_nat_set_async_mode (linux_async_permitted);
d6b0e80f
AC
4083}
4084\f
4085
4086/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4087 the GNU/Linux Threads library and therefore doesn't really belong
4088 here. */
4089
4090/* Read variable NAME in the target and return its value if found.
4091 Otherwise return zero. It is assumed that the type of the variable
4092 is `int'. */
4093
4094static int
4095get_signo (const char *name)
4096{
4097 struct minimal_symbol *ms;
4098 int signo;
4099
4100 ms = lookup_minimal_symbol (name, NULL, NULL);
4101 if (ms == NULL)
4102 return 0;
4103
8e70166d 4104 if (target_read_memory (SYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
4105 sizeof (signo)) != 0)
4106 return 0;
4107
4108 return signo;
4109}
4110
4111/* Return the set of signals used by the threads library in *SET. */
4112
4113void
4114lin_thread_get_thread_signals (sigset_t *set)
4115{
4116 struct sigaction action;
4117 int restart, cancel;
b84876c2 4118 sigset_t blocked_mask;
d6b0e80f 4119
b84876c2 4120 sigemptyset (&blocked_mask);
d6b0e80f
AC
4121 sigemptyset (set);
4122
4123 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
4124 cancel = get_signo ("__pthread_sig_cancel");
4125
4126 /* LinuxThreads normally uses the first two RT signals, but in some legacy
4127 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
4128 not provide any way for the debugger to query the signal numbers -
4129 fortunately they don't change! */
4130
d6b0e80f 4131 if (restart == 0)
17fbb0bd 4132 restart = __SIGRTMIN;
d6b0e80f 4133
d6b0e80f 4134 if (cancel == 0)
17fbb0bd 4135 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
4136
4137 sigaddset (set, restart);
4138 sigaddset (set, cancel);
4139
4140 /* The GNU/Linux Threads library makes terminating threads send a
4141 special "cancel" signal instead of SIGCHLD. Make sure we catch
4142 those (to prevent them from terminating GDB itself, which is
4143 likely to be their default action) and treat them the same way as
4144 SIGCHLD. */
4145
4146 action.sa_handler = sigchld_handler;
4147 sigemptyset (&action.sa_mask);
58aecb61 4148 action.sa_flags = SA_RESTART;
d6b0e80f
AC
4149 sigaction (cancel, &action, NULL);
4150
4151 /* We block the "cancel" signal throughout this code ... */
4152 sigaddset (&blocked_mask, cancel);
4153 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
4154
4155 /* ... except during a sigsuspend. */
4156 sigdelset (&suspend_mask, cancel);
4157}
ac264b3b 4158
This page took 0.668839 seconds and 4 git commands to generate.