[ARC] Add SYNTAX_NOP and SYNTAX_1OP for extension instructions
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
618f726f 3 Copyright (C) 2001-2016 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
3993f6b1 26#include "gdb_wait.h"
d6b0e80f
AC
27#include <unistd.h>
28#include <sys/syscall.h>
5826e159 29#include "nat/gdb_ptrace.h"
0274a8ce 30#include "linux-nat.h"
125f8a3d
GB
31#include "nat/linux-ptrace.h"
32#include "nat/linux-procfs.h"
8cc73a39 33#include "nat/linux-personality.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
53ce3c39 47#include <sys/stat.h> /* for struct stat */
dba24537 48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
2978b111 54#include <dirent.h>
07e059b5 55#include "xml-support.h"
efcbbd14 56#include <sys/vfs.h>
6c95b8df 57#include "solib.h"
125f8a3d 58#include "nat/linux-osdata.h"
6432734d 59#include "linux-tdep.h"
7dcd53a0 60#include "symfile.h"
5808517f
YQ
61#include "agent.h"
62#include "tracepoint.h"
87b0bb13 63#include "buffer.h"
6ecd4729 64#include "target-descriptions.h"
614c279d 65#include "filestuff.h"
77e371c0 66#include "objfiles.h"
7a6a1731
GB
67#include "nat/linux-namespaces.h"
68#include "fileio.h"
efcbbd14
UW
69
70#ifndef SPUFS_MAGIC
71#define SPUFS_MAGIC 0x23c9b64e
72#endif
dba24537 73
1777feb0 74/* This comment documents high-level logic of this file.
8a77dff3
VP
75
76Waiting for events in sync mode
77===============================
78
4a6ed09b
PA
79When waiting for an event in a specific thread, we just use waitpid,
80passing the specific pid, and not passing WNOHANG.
81
82When waiting for an event in all threads, waitpid is not quite good:
83
84- If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
87 reaped.
88
89- When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
92
93The solution is to always use -1 and WNOHANG, together with
94sigsuspend.
95
96First, we use non-blocking waitpid to check for events. If nothing is
97found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98it means something happened to a child process. As soon as we know
99there's an event, we get back to calling nonblocking waitpid.
100
101Note that SIGCHLD should be blocked between waitpid and sigsuspend
102calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103when it's blocked, the signal becomes pending and sigsuspend
104immediately notices it and returns.
105
106Waiting for events in async mode (TARGET_WNOHANG)
107=================================================
8a77dff3 108
7feb7d06
PA
109In async mode, GDB should always be ready to handle both user input
110and target events, so neither blocking waitpid nor sigsuspend are
111viable options. Instead, we should asynchronously notify the GDB main
112event loop whenever there's an unprocessed event from the target. We
113detect asynchronous target events by handling SIGCHLD signals. To
114notify the event loop about target events, the self-pipe trick is used
115--- a pipe is registered as waitable event source in the event loop,
116the event loop select/poll's on the read end of this pipe (as well on
117other event sources, e.g., stdin), and the SIGCHLD handler writes a
118byte to this pipe. This is more portable than relying on
119pselect/ppoll, since on kernels that lack those syscalls, libc
120emulates them with select/poll+sigprocmask, and that is racy
121(a.k.a. plain broken).
122
123Obviously, if we fail to notify the event loop if there's a target
124event, it's bad. OTOH, if we notify the event loop when there's no
125event from the target, linux_nat_wait will detect that there's no real
126event to report, and return event of type TARGET_WAITKIND_IGNORE.
127This is mostly harmless, but it will waste time and is better avoided.
128
129The main design point is that every time GDB is outside linux-nat.c,
130we have a SIGCHLD handler installed that is called when something
131happens to the target and notifies the GDB event loop. Whenever GDB
132core decides to handle the event, and calls into linux-nat.c, we
133process things as in sync mode, except that the we never block in
134sigsuspend.
135
136While processing an event, we may end up momentarily blocked in
137waitpid calls. Those waitpid calls, while blocking, are guarantied to
138return quickly. E.g., in all-stop mode, before reporting to the core
139that an LWP hit a breakpoint, all LWPs are stopped by sending them
140SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141Note that this is different from blocking indefinitely waiting for the
142next event --- here, we're already handling an event.
8a77dff3
VP
143
144Use of signals
145==============
146
147We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148signal is not entirely significant; we just need for a signal to be delivered,
149so that we can intercept it. SIGSTOP's advantage is that it can not be
150blocked. A disadvantage is that it is not a real-time signal, so it can only
151be queued once; we do not keep track of other sources of SIGSTOP.
152
153Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154use them, because they have special behavior when the signal is generated -
155not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156kills the entire thread group.
157
158A delivered SIGSTOP would stop the entire thread group, not just the thread we
159tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162We could use a real-time signal instead. This would solve those problems; we
163could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165generates it, and there are races with trying to find a signal that is not
4a6ed09b
PA
166blocked.
167
168Exec events
169===========
170
171The case of a thread group (process) with 3 or more threads, and a
172thread other than the leader execs is worth detailing:
173
174On an exec, the Linux kernel destroys all threads except the execing
175one in the thread group, and resets the execing thread's tid to the
176tgid. No exit notification is sent for the execing thread -- from the
177ptracer's perspective, it appears as though the execing thread just
178vanishes. Until we reap all other threads except the leader and the
179execing thread, the leader will be zombie, and the execing thread will
180be in `D (disc sleep)' state. As soon as all other threads are
181reaped, the execing thread changes its tid to the tgid, and the
182previous (zombie) leader vanishes, giving place to the "new"
183leader. */
a0ef4274 184
dba24537
AC
185#ifndef O_LARGEFILE
186#define O_LARGEFILE 0
187#endif
0274a8ce 188
433bbbf8 189/* Does the current host support PTRACE_GETREGSET? */
0bdb2f78 190enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
433bbbf8 191
10d6c8cd
DJ
192/* The single-threaded native GNU/Linux target_ops. We save a pointer for
193 the use of the multi-threaded target. */
194static struct target_ops *linux_ops;
f973ed9c 195static struct target_ops linux_ops_saved;
10d6c8cd 196
9f0bdab8 197/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
198static void (*linux_nat_new_thread) (struct lwp_info *);
199
26cb8b7c
PA
200/* The method to call, if any, when a new fork is attached. */
201static linux_nat_new_fork_ftype *linux_nat_new_fork;
202
203/* The method to call, if any, when a process is no longer
204 attached. */
205static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
206
7b50312a
PA
207/* Hook to call prior to resuming a thread. */
208static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 209
5b009018
PA
210/* The method to call, if any, when the siginfo object needs to be
211 converted between the layout returned by ptrace, and the layout in
212 the architecture of the inferior. */
a5362b9a 213static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
214 gdb_byte *,
215 int);
216
ac264b3b
MS
217/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
218 Called by our to_xfer_partial. */
4ac248ca 219static target_xfer_partial_ftype *super_xfer_partial;
10d6c8cd 220
6a3cb8e8
PA
221/* The saved to_close method, inherited from inf-ptrace.c.
222 Called by our to_close. */
223static void (*super_close) (struct target_ops *);
224
ccce17b0 225static unsigned int debug_linux_nat;
920d2a44
AC
226static void
227show_debug_linux_nat (struct ui_file *file, int from_tty,
228 struct cmd_list_element *c, const char *value)
229{
230 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
231 value);
232}
d6b0e80f 233
ae087d01
DJ
234struct simple_pid_list
235{
236 int pid;
3d799a95 237 int status;
ae087d01
DJ
238 struct simple_pid_list *next;
239};
240struct simple_pid_list *stopped_pids;
241
3dd5b83d
PA
242/* Async mode support. */
243
b84876c2
PA
244/* The read/write ends of the pipe registered as waitable file in the
245 event loop. */
246static int linux_nat_event_pipe[2] = { -1, -1 };
247
198297aa
PA
248/* True if we're currently in async mode. */
249#define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
250
7feb7d06 251/* Flush the event pipe. */
b84876c2 252
7feb7d06
PA
253static void
254async_file_flush (void)
b84876c2 255{
7feb7d06
PA
256 int ret;
257 char buf;
b84876c2 258
7feb7d06 259 do
b84876c2 260 {
7feb7d06 261 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 262 }
7feb7d06 263 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
264}
265
7feb7d06
PA
266/* Put something (anything, doesn't matter what, or how much) in event
267 pipe, so that the select/poll in the event-loop realizes we have
268 something to process. */
252fbfc8 269
b84876c2 270static void
7feb7d06 271async_file_mark (void)
b84876c2 272{
7feb7d06 273 int ret;
b84876c2 274
7feb7d06
PA
275 /* It doesn't really matter what the pipe contains, as long we end
276 up with something in it. Might as well flush the previous
277 left-overs. */
278 async_file_flush ();
b84876c2 279
7feb7d06 280 do
b84876c2 281 {
7feb7d06 282 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 283 }
7feb7d06 284 while (ret == -1 && errno == EINTR);
b84876c2 285
7feb7d06
PA
286 /* Ignore EAGAIN. If the pipe is full, the event loop will already
287 be awakened anyway. */
b84876c2
PA
288}
289
7feb7d06
PA
290static int kill_lwp (int lwpid, int signo);
291
292static int stop_callback (struct lwp_info *lp, void *data);
2db9a427 293static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
7feb7d06
PA
294
295static void block_child_signals (sigset_t *prev_mask);
296static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
297
298struct lwp_info;
299static struct lwp_info *add_lwp (ptid_t ptid);
300static void purge_lwp_list (int pid);
4403d8e9 301static void delete_lwp (ptid_t ptid);
2277426b
PA
302static struct lwp_info *find_lwp_pid (ptid_t ptid);
303
8a99810d
PA
304static int lwp_status_pending_p (struct lwp_info *lp);
305
9c02b525
PA
306static int sigtrap_is_event (int status);
307static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
308
e7ad2f14
PA
309static void save_stop_reason (struct lwp_info *lp);
310
cff068da
GB
311\f
312/* LWP accessors. */
313
314/* See nat/linux-nat.h. */
315
316ptid_t
317ptid_of_lwp (struct lwp_info *lwp)
318{
319 return lwp->ptid;
320}
321
322/* See nat/linux-nat.h. */
323
4b134ca1
GB
324void
325lwp_set_arch_private_info (struct lwp_info *lwp,
326 struct arch_lwp_info *info)
327{
328 lwp->arch_private = info;
329}
330
331/* See nat/linux-nat.h. */
332
333struct arch_lwp_info *
334lwp_arch_private_info (struct lwp_info *lwp)
335{
336 return lwp->arch_private;
337}
338
339/* See nat/linux-nat.h. */
340
cff068da
GB
341int
342lwp_is_stopped (struct lwp_info *lwp)
343{
344 return lwp->stopped;
345}
346
347/* See nat/linux-nat.h. */
348
349enum target_stop_reason
350lwp_stop_reason (struct lwp_info *lwp)
351{
352 return lwp->stop_reason;
353}
354
ae087d01
DJ
355\f
356/* Trivial list manipulation functions to keep track of a list of
357 new stopped processes. */
358static void
3d799a95 359add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01 360{
8d749320 361 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
e0881a8e 362
ae087d01 363 new_pid->pid = pid;
3d799a95 364 new_pid->status = status;
ae087d01
DJ
365 new_pid->next = *listp;
366 *listp = new_pid;
367}
368
369static int
46a96992 370pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
371{
372 struct simple_pid_list **p;
373
374 for (p = listp; *p != NULL; p = &(*p)->next)
375 if ((*p)->pid == pid)
376 {
377 struct simple_pid_list *next = (*p)->next;
e0881a8e 378
46a96992 379 *statusp = (*p)->status;
ae087d01
DJ
380 xfree (*p);
381 *p = next;
382 return 1;
383 }
384 return 0;
385}
386
de0d863e
DB
387/* Return the ptrace options that we want to try to enable. */
388
389static int
390linux_nat_ptrace_options (int attached)
391{
392 int options = 0;
393
394 if (!attached)
395 options |= PTRACE_O_EXITKILL;
396
397 options |= (PTRACE_O_TRACESYSGOOD
398 | PTRACE_O_TRACEVFORKDONE
399 | PTRACE_O_TRACEVFORK
400 | PTRACE_O_TRACEFORK
401 | PTRACE_O_TRACEEXEC);
402
403 return options;
404}
405
96d7229d 406/* Initialize ptrace warnings and check for supported ptrace
beed38b8
JB
407 features given PID.
408
409 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
410
411static void
beed38b8 412linux_init_ptrace (pid_t pid, int attached)
3993f6b1 413{
de0d863e
DB
414 int options = linux_nat_ptrace_options (attached);
415
416 linux_enable_event_reporting (pid, options);
96d7229d 417 linux_ptrace_init_warnings ();
4de4c07c
DJ
418}
419
6d8fd2b7 420static void
f045800c 421linux_child_post_attach (struct target_ops *self, int pid)
4de4c07c 422{
beed38b8 423 linux_init_ptrace (pid, 1);
4de4c07c
DJ
424}
425
10d6c8cd 426static void
2e97a79e 427linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4de4c07c 428{
beed38b8 429 linux_init_ptrace (ptid_get_pid (ptid), 0);
4de4c07c
DJ
430}
431
4403d8e9
JK
432/* Return the number of known LWPs in the tgid given by PID. */
433
434static int
435num_lwps (int pid)
436{
437 int count = 0;
438 struct lwp_info *lp;
439
440 for (lp = lwp_list; lp; lp = lp->next)
441 if (ptid_get_pid (lp->ptid) == pid)
442 count++;
443
444 return count;
445}
446
447/* Call delete_lwp with prototype compatible for make_cleanup. */
448
449static void
450delete_lwp_cleanup (void *lp_voidp)
451{
9a3c8263 452 struct lwp_info *lp = (struct lwp_info *) lp_voidp;
4403d8e9
JK
453
454 delete_lwp (lp->ptid);
455}
456
d83ad864
DB
457/* Target hook for follow_fork. On entry inferior_ptid must be the
458 ptid of the followed inferior. At return, inferior_ptid will be
459 unchanged. */
460
6d8fd2b7 461static int
07107ca6
LM
462linux_child_follow_fork (struct target_ops *ops, int follow_child,
463 int detach_fork)
3993f6b1 464{
d83ad864 465 if (!follow_child)
4de4c07c 466 {
6c95b8df 467 struct lwp_info *child_lp = NULL;
d83ad864
DB
468 int status = W_STOPCODE (0);
469 struct cleanup *old_chain;
470 int has_vforked;
79639e11 471 ptid_t parent_ptid, child_ptid;
d83ad864
DB
472 int parent_pid, child_pid;
473
474 has_vforked = (inferior_thread ()->pending_follow.kind
475 == TARGET_WAITKIND_VFORKED);
79639e11
PA
476 parent_ptid = inferior_ptid;
477 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
478 parent_pid = ptid_get_lwp (parent_ptid);
479 child_pid = ptid_get_lwp (child_ptid);
4de4c07c 480
1777feb0 481 /* We're already attached to the parent, by default. */
d83ad864 482 old_chain = save_inferior_ptid ();
79639e11 483 inferior_ptid = child_ptid;
d83ad864
DB
484 child_lp = add_lwp (inferior_ptid);
485 child_lp->stopped = 1;
486 child_lp->last_resume_kind = resume_stop;
4de4c07c 487
ac264b3b
MS
488 /* Detach new forked process? */
489 if (detach_fork)
f75c00e4 490 {
4403d8e9
JK
491 make_cleanup (delete_lwp_cleanup, child_lp);
492
4403d8e9
JK
493 if (linux_nat_prepare_to_resume != NULL)
494 linux_nat_prepare_to_resume (child_lp);
c077881a
HZ
495
496 /* When debugging an inferior in an architecture that supports
497 hardware single stepping on a kernel without commit
498 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
499 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
500 set if the parent process had them set.
501 To work around this, single step the child process
502 once before detaching to clear the flags. */
503
504 if (!gdbarch_software_single_step_p (target_thread_architecture
505 (child_lp->ptid)))
506 {
c077881a
HZ
507 linux_disable_event_reporting (child_pid);
508 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
509 perror_with_name (_("Couldn't do single step"));
510 if (my_waitpid (child_pid, &status, 0) < 0)
511 perror_with_name (_("Couldn't wait vfork process"));
512 }
513
514 if (WIFSTOPPED (status))
9caaaa83
PA
515 {
516 int signo;
517
518 signo = WSTOPSIG (status);
519 if (signo != 0
520 && !signal_pass_state (gdb_signal_from_host (signo)))
521 signo = 0;
522 ptrace (PTRACE_DETACH, child_pid, 0, signo);
523 }
4403d8e9 524
d83ad864 525 /* Resets value of inferior_ptid to parent ptid. */
4403d8e9 526 do_cleanups (old_chain);
ac264b3b
MS
527 }
528 else
529 {
6c95b8df 530 /* Let the thread_db layer learn about this new process. */
2277426b 531 check_for_thread_db ();
ac264b3b 532 }
9016a515 533
d83ad864
DB
534 do_cleanups (old_chain);
535
9016a515
DJ
536 if (has_vforked)
537 {
3ced3da4 538 struct lwp_info *parent_lp;
6c95b8df 539
79639e11 540 parent_lp = find_lwp_pid (parent_ptid);
96d7229d 541 gdb_assert (linux_supports_tracefork () >= 0);
3ced3da4 542
96d7229d 543 if (linux_supports_tracevforkdone ())
9016a515 544 {
6c95b8df
PA
545 if (debug_linux_nat)
546 fprintf_unfiltered (gdb_stdlog,
547 "LCFF: waiting for VFORK_DONE on %d\n",
548 parent_pid);
3ced3da4 549 parent_lp->stopped = 1;
9016a515 550
6c95b8df
PA
551 /* We'll handle the VFORK_DONE event like any other
552 event, in target_wait. */
9016a515
DJ
553 }
554 else
555 {
556 /* We can't insert breakpoints until the child has
557 finished with the shared memory region. We need to
558 wait until that happens. Ideal would be to just
559 call:
560 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
561 - waitpid (parent_pid, &status, __WALL);
562 However, most architectures can't handle a syscall
563 being traced on the way out if it wasn't traced on
564 the way in.
565
566 We might also think to loop, continuing the child
567 until it exits or gets a SIGTRAP. One problem is
568 that the child might call ptrace with PTRACE_TRACEME.
569
570 There's no simple and reliable way to figure out when
571 the vforked child will be done with its copy of the
572 shared memory. We could step it out of the syscall,
573 two instructions, let it go, and then single-step the
574 parent once. When we have hardware single-step, this
575 would work; with software single-step it could still
576 be made to work but we'd have to be able to insert
577 single-step breakpoints in the child, and we'd have
578 to insert -just- the single-step breakpoint in the
579 parent. Very awkward.
580
581 In the end, the best we can do is to make sure it
582 runs for a little while. Hopefully it will be out of
583 range of any breakpoints we reinsert. Usually this
584 is only the single-step breakpoint at vfork's return
585 point. */
586
6c95b8df
PA
587 if (debug_linux_nat)
588 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
589 "LCFF: no VFORK_DONE "
590 "support, sleeping a bit\n");
6c95b8df 591
9016a515 592 usleep (10000);
9016a515 593
6c95b8df
PA
594 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
595 and leave it pending. The next linux_nat_resume call
596 will notice a pending event, and bypasses actually
597 resuming the inferior. */
3ced3da4
PA
598 parent_lp->status = 0;
599 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
600 parent_lp->stopped = 1;
6c95b8df
PA
601
602 /* If we're in async mode, need to tell the event loop
603 there's something here to process. */
d9d41e78 604 if (target_is_async_p ())
6c95b8df
PA
605 async_file_mark ();
606 }
9016a515 607 }
4de4c07c 608 }
3993f6b1 609 else
4de4c07c 610 {
3ced3da4 611 struct lwp_info *child_lp;
4de4c07c 612
3ced3da4
PA
613 child_lp = add_lwp (inferior_ptid);
614 child_lp->stopped = 1;
25289eb2 615 child_lp->last_resume_kind = resume_stop;
6c95b8df 616
6c95b8df 617 /* Let the thread_db layer learn about this new process. */
ef29ce1a 618 check_for_thread_db ();
4de4c07c
DJ
619 }
620
621 return 0;
622}
623
4de4c07c 624\f
77b06cd7 625static int
a863b201 626linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
4de4c07c 627{
96d7229d 628 return !linux_supports_tracefork ();
3993f6b1
DJ
629}
630
eb73ad13 631static int
973fc227 632linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
633{
634 return 0;
635}
636
77b06cd7 637static int
3ecc7da0 638linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
3993f6b1 639{
96d7229d 640 return !linux_supports_tracefork ();
3993f6b1
DJ
641}
642
eb73ad13 643static int
e98cf0cd 644linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
645{
646 return 0;
647}
648
77b06cd7 649static int
ba025e51 650linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
3993f6b1 651{
96d7229d 652 return !linux_supports_tracefork ();
3993f6b1
DJ
653}
654
eb73ad13 655static int
758e29d2 656linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
657{
658 return 0;
659}
660
a96d9b2e 661static int
ff214e67
TT
662linux_child_set_syscall_catchpoint (struct target_ops *self,
663 int pid, int needed, int any_count,
a96d9b2e
SDJ
664 int table_size, int *table)
665{
96d7229d 666 if (!linux_supports_tracesysgood ())
77b06cd7
TJB
667 return 1;
668
a96d9b2e
SDJ
669 /* On GNU/Linux, we ignore the arguments. It means that we only
670 enable the syscall catchpoints, but do not disable them.
77b06cd7 671
a96d9b2e
SDJ
672 Also, we do not use the `table' information because we do not
673 filter system calls here. We let GDB do the logic for us. */
674 return 0;
675}
676
d6b0e80f 677/* List of known LWPs. */
9f0bdab8 678struct lwp_info *lwp_list;
d6b0e80f
AC
679\f
680
d6b0e80f
AC
681/* Original signal mask. */
682static sigset_t normal_mask;
683
684/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
685 _initialize_linux_nat. */
686static sigset_t suspend_mask;
687
7feb7d06
PA
688/* Signals to block to make that sigsuspend work. */
689static sigset_t blocked_mask;
690
691/* SIGCHLD action. */
692struct sigaction sigchld_action;
b84876c2 693
7feb7d06
PA
694/* Block child signals (SIGCHLD and linux threads signals), and store
695 the previous mask in PREV_MASK. */
84e46146 696
7feb7d06
PA
697static void
698block_child_signals (sigset_t *prev_mask)
699{
700 /* Make sure SIGCHLD is blocked. */
701 if (!sigismember (&blocked_mask, SIGCHLD))
702 sigaddset (&blocked_mask, SIGCHLD);
703
704 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
705}
706
707/* Restore child signals mask, previously returned by
708 block_child_signals. */
709
710static void
711restore_child_signals_mask (sigset_t *prev_mask)
712{
713 sigprocmask (SIG_SETMASK, prev_mask, NULL);
714}
2455069d
UW
715
716/* Mask of signals to pass directly to the inferior. */
717static sigset_t pass_mask;
718
719/* Update signals to pass to the inferior. */
720static void
94bedb42
TT
721linux_nat_pass_signals (struct target_ops *self,
722 int numsigs, unsigned char *pass_signals)
2455069d
UW
723{
724 int signo;
725
726 sigemptyset (&pass_mask);
727
728 for (signo = 1; signo < NSIG; signo++)
729 {
2ea28649 730 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
731 if (target_signo < numsigs && pass_signals[target_signo])
732 sigaddset (&pass_mask, signo);
733 }
734}
735
d6b0e80f
AC
736\f
737
738/* Prototypes for local functions. */
739static int stop_wait_callback (struct lwp_info *lp, void *data);
8dd27370 740static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
20ba1ce6 741static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
710151dd 742
d6b0e80f 743\f
d6b0e80f 744
7b50312a
PA
745/* Destroy and free LP. */
746
747static void
748lwp_free (struct lwp_info *lp)
749{
750 xfree (lp->arch_private);
751 xfree (lp);
752}
753
d90e17a7
PA
754/* Remove all LWPs belong to PID from the lwp list. */
755
756static void
757purge_lwp_list (int pid)
758{
759 struct lwp_info *lp, *lpprev, *lpnext;
760
761 lpprev = NULL;
762
763 for (lp = lwp_list; lp; lp = lpnext)
764 {
765 lpnext = lp->next;
766
767 if (ptid_get_pid (lp->ptid) == pid)
768 {
769 if (lp == lwp_list)
770 lwp_list = lp->next;
771 else
772 lpprev->next = lp->next;
773
7b50312a 774 lwp_free (lp);
d90e17a7
PA
775 }
776 else
777 lpprev = lp;
778 }
779}
780
26cb8b7c
PA
781/* Add the LWP specified by PTID to the list. PTID is the first LWP
782 in the process. Return a pointer to the structure describing the
783 new LWP.
784
785 This differs from add_lwp in that we don't let the arch specific
786 bits know about this new thread. Current clients of this callback
787 take the opportunity to install watchpoints in the new thread, and
788 we shouldn't do that for the first thread. If we're spawning a
789 child ("run"), the thread executes the shell wrapper first, and we
790 shouldn't touch it until it execs the program we want to debug.
791 For "attach", it'd be okay to call the callback, but it's not
792 necessary, because watchpoints can't yet have been inserted into
793 the inferior. */
d6b0e80f
AC
794
795static struct lwp_info *
26cb8b7c 796add_initial_lwp (ptid_t ptid)
d6b0e80f
AC
797{
798 struct lwp_info *lp;
799
dfd4cc63 800 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 801
8d749320 802 lp = XNEW (struct lwp_info);
d6b0e80f
AC
803
804 memset (lp, 0, sizeof (struct lwp_info));
805
25289eb2 806 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
807 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
808
809 lp->ptid = ptid;
dc146f7c 810 lp->core = -1;
d6b0e80f
AC
811
812 lp->next = lwp_list;
813 lwp_list = lp;
d6b0e80f 814
26cb8b7c
PA
815 return lp;
816}
817
818/* Add the LWP specified by PID to the list. Return a pointer to the
819 structure describing the new LWP. The LWP should already be
820 stopped. */
821
822static struct lwp_info *
823add_lwp (ptid_t ptid)
824{
825 struct lwp_info *lp;
826
827 lp = add_initial_lwp (ptid);
828
6e012a6c
PA
829 /* Let the arch specific bits know about this new thread. Current
830 clients of this callback take the opportunity to install
26cb8b7c
PA
831 watchpoints in the new thread. We don't do this for the first
832 thread though. See add_initial_lwp. */
833 if (linux_nat_new_thread != NULL)
7b50312a 834 linux_nat_new_thread (lp);
9f0bdab8 835
d6b0e80f
AC
836 return lp;
837}
838
839/* Remove the LWP specified by PID from the list. */
840
841static void
842delete_lwp (ptid_t ptid)
843{
844 struct lwp_info *lp, *lpprev;
845
846 lpprev = NULL;
847
848 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
849 if (ptid_equal (lp->ptid, ptid))
850 break;
851
852 if (!lp)
853 return;
854
d6b0e80f
AC
855 if (lpprev)
856 lpprev->next = lp->next;
857 else
858 lwp_list = lp->next;
859
7b50312a 860 lwp_free (lp);
d6b0e80f
AC
861}
862
863/* Return a pointer to the structure describing the LWP corresponding
864 to PID. If no corresponding LWP could be found, return NULL. */
865
866static struct lwp_info *
867find_lwp_pid (ptid_t ptid)
868{
869 struct lwp_info *lp;
870 int lwp;
871
dfd4cc63
LM
872 if (ptid_lwp_p (ptid))
873 lwp = ptid_get_lwp (ptid);
d6b0e80f 874 else
dfd4cc63 875 lwp = ptid_get_pid (ptid);
d6b0e80f
AC
876
877 for (lp = lwp_list; lp; lp = lp->next)
dfd4cc63 878 if (lwp == ptid_get_lwp (lp->ptid))
d6b0e80f
AC
879 return lp;
880
881 return NULL;
882}
883
6d4ee8c6 884/* See nat/linux-nat.h. */
d6b0e80f
AC
885
886struct lwp_info *
d90e17a7 887iterate_over_lwps (ptid_t filter,
6d4ee8c6 888 iterate_over_lwps_ftype callback,
d90e17a7 889 void *data)
d6b0e80f
AC
890{
891 struct lwp_info *lp, *lpnext;
892
893 for (lp = lwp_list; lp; lp = lpnext)
894 {
895 lpnext = lp->next;
d90e17a7
PA
896
897 if (ptid_match (lp->ptid, filter))
898 {
6d4ee8c6 899 if ((*callback) (lp, data) != 0)
d90e17a7
PA
900 return lp;
901 }
d6b0e80f
AC
902 }
903
904 return NULL;
905}
906
2277426b
PA
907/* Update our internal state when changing from one checkpoint to
908 another indicated by NEW_PTID. We can only switch single-threaded
909 applications, so we only create one new LWP, and the previous list
910 is discarded. */
f973ed9c
DJ
911
912void
913linux_nat_switch_fork (ptid_t new_ptid)
914{
915 struct lwp_info *lp;
916
dfd4cc63 917 purge_lwp_list (ptid_get_pid (inferior_ptid));
2277426b 918
f973ed9c
DJ
919 lp = add_lwp (new_ptid);
920 lp->stopped = 1;
e26af52f 921
2277426b
PA
922 /* This changes the thread's ptid while preserving the gdb thread
923 num. Also changes the inferior pid, while preserving the
924 inferior num. */
925 thread_change_ptid (inferior_ptid, new_ptid);
926
927 /* We've just told GDB core that the thread changed target id, but,
928 in fact, it really is a different thread, with different register
929 contents. */
930 registers_changed ();
e26af52f
DJ
931}
932
e26af52f
DJ
933/* Handle the exit of a single thread LP. */
934
935static void
936exit_lwp (struct lwp_info *lp)
937{
e09875d4 938 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
939
940 if (th)
e26af52f 941 {
17faa917
DJ
942 if (print_thread_events)
943 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
944
4f8d22e3 945 delete_thread (lp->ptid);
e26af52f
DJ
946 }
947
948 delete_lwp (lp->ptid);
949}
950
a0ef4274
DJ
951/* Wait for the LWP specified by LP, which we have just attached to.
952 Returns a wait status for that LWP, to cache. */
953
954static int
4a6ed09b 955linux_nat_post_attach_wait (ptid_t ptid, int first, int *signalled)
a0ef4274 956{
dfd4cc63 957 pid_t new_pid, pid = ptid_get_lwp (ptid);
a0ef4274
DJ
958 int status;
959
644cebc9 960 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
961 {
962 if (debug_linux_nat)
963 fprintf_unfiltered (gdb_stdlog,
964 "LNPAW: Attaching to a stopped process\n");
965
966 /* The process is definitely stopped. It is in a job control
967 stop, unless the kernel predates the TASK_STOPPED /
968 TASK_TRACED distinction, in which case it might be in a
969 ptrace stop. Make sure it is in a ptrace stop; from there we
970 can kill it, signal it, et cetera.
971
972 First make sure there is a pending SIGSTOP. Since we are
973 already attached, the process can not transition from stopped
974 to running without a PTRACE_CONT; so we know this signal will
975 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
976 probably already in the queue (unless this kernel is old
977 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
978 is not an RT signal, it can only be queued once. */
979 kill_lwp (pid, SIGSTOP);
980
981 /* Finally, resume the stopped process. This will deliver the SIGSTOP
982 (or a higher priority signal, just like normal PTRACE_ATTACH). */
983 ptrace (PTRACE_CONT, pid, 0, 0);
984 }
985
986 /* Make sure the initial process is stopped. The user-level threads
987 layer might want to poke around in the inferior, and that won't
988 work if things haven't stabilized yet. */
4a6ed09b 989 new_pid = my_waitpid (pid, &status, __WALL);
dacc9cb2
PP
990 gdb_assert (pid == new_pid);
991
992 if (!WIFSTOPPED (status))
993 {
994 /* The pid we tried to attach has apparently just exited. */
995 if (debug_linux_nat)
996 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
997 pid, status_to_str (status));
998 return status;
999 }
a0ef4274
DJ
1000
1001 if (WSTOPSIG (status) != SIGSTOP)
1002 {
1003 *signalled = 1;
1004 if (debug_linux_nat)
1005 fprintf_unfiltered (gdb_stdlog,
1006 "LNPAW: Received %s after attaching\n",
1007 status_to_str (status));
1008 }
1009
1010 return status;
1011}
1012
b84876c2 1013static void
136d6dae
VP
1014linux_nat_create_inferior (struct target_ops *ops,
1015 char *exec_file, char *allargs, char **env,
b84876c2
PA
1016 int from_tty)
1017{
8cc73a39
SDJ
1018 struct cleanup *restore_personality
1019 = maybe_disable_address_space_randomization (disable_randomization);
b84876c2
PA
1020
1021 /* The fork_child mechanism is synchronous and calls target_wait, so
1022 we have to mask the async mode. */
1023
2455069d 1024 /* Make sure we report all signals during startup. */
94bedb42 1025 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1026
136d6dae 1027 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1028
8cc73a39 1029 do_cleanups (restore_personality);
b84876c2
PA
1030}
1031
8784d563
PA
1032/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1033 already attached. Returns true if a new LWP is found, false
1034 otherwise. */
1035
1036static int
1037attach_proc_task_lwp_callback (ptid_t ptid)
1038{
1039 struct lwp_info *lp;
1040
1041 /* Ignore LWPs we're already attached to. */
1042 lp = find_lwp_pid (ptid);
1043 if (lp == NULL)
1044 {
1045 int lwpid = ptid_get_lwp (ptid);
1046
1047 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1048 {
1049 int err = errno;
1050
1051 /* Be quiet if we simply raced with the thread exiting.
1052 EPERM is returned if the thread's task still exists, and
1053 is marked as exited or zombie, as well as other
1054 conditions, so in that case, confirm the status in
1055 /proc/PID/status. */
1056 if (err == ESRCH
1057 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1058 {
1059 if (debug_linux_nat)
1060 {
1061 fprintf_unfiltered (gdb_stdlog,
1062 "Cannot attach to lwp %d: "
1063 "thread is gone (%d: %s)\n",
1064 lwpid, err, safe_strerror (err));
1065 }
1066 }
1067 else
1068 {
f71f0b0d 1069 warning (_("Cannot attach to lwp %d: %s"),
8784d563
PA
1070 lwpid,
1071 linux_ptrace_attach_fail_reason_string (ptid,
1072 err));
1073 }
1074 }
1075 else
1076 {
1077 if (debug_linux_nat)
1078 fprintf_unfiltered (gdb_stdlog,
1079 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1080 target_pid_to_str (ptid));
1081
1082 lp = add_lwp (ptid);
8784d563
PA
1083
1084 /* The next time we wait for this LWP we'll see a SIGSTOP as
1085 PTRACE_ATTACH brings it to a halt. */
1086 lp->signalled = 1;
1087
1088 /* We need to wait for a stop before being able to make the
1089 next ptrace call on this LWP. */
1090 lp->must_set_ptrace_flags = 1;
1091 }
1092
1093 return 1;
1094 }
1095 return 0;
1096}
1097
d6b0e80f 1098static void
c0939df1 1099linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f
AC
1100{
1101 struct lwp_info *lp;
d6b0e80f 1102 int status;
af990527 1103 ptid_t ptid;
d6b0e80f 1104
2455069d 1105 /* Make sure we report all signals during attach. */
94bedb42 1106 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1107
492d29ea 1108 TRY
87b0bb13
JK
1109 {
1110 linux_ops->to_attach (ops, args, from_tty);
1111 }
492d29ea 1112 CATCH (ex, RETURN_MASK_ERROR)
87b0bb13
JK
1113 {
1114 pid_t pid = parse_pid_to_attach (args);
1115 struct buffer buffer;
1116 char *message, *buffer_s;
1117
1118 message = xstrdup (ex.message);
1119 make_cleanup (xfree, message);
1120
1121 buffer_init (&buffer);
7ae1a6a6 1122 linux_ptrace_attach_fail_reason (pid, &buffer);
87b0bb13
JK
1123
1124 buffer_grow_str0 (&buffer, "");
1125 buffer_s = buffer_finish (&buffer);
1126 make_cleanup (xfree, buffer_s);
1127
7ae1a6a6
PA
1128 if (*buffer_s != '\0')
1129 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1130 else
1131 throw_error (ex.error, "%s", message);
87b0bb13 1132 }
492d29ea 1133 END_CATCH
d6b0e80f 1134
af990527
PA
1135 /* The ptrace base target adds the main thread with (pid,0,0)
1136 format. Decorate it with lwp info. */
dfd4cc63
LM
1137 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1138 ptid_get_pid (inferior_ptid),
1139 0);
af990527
PA
1140 thread_change_ptid (inferior_ptid, ptid);
1141
9f0bdab8 1142 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1143 lp = add_initial_lwp (ptid);
a0ef4274 1144
4a6ed09b 1145 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->signalled);
dacc9cb2
PP
1146 if (!WIFSTOPPED (status))
1147 {
1148 if (WIFEXITED (status))
1149 {
1150 int exit_code = WEXITSTATUS (status);
1151
1152 target_terminal_ours ();
1153 target_mourn_inferior ();
1154 if (exit_code == 0)
1155 error (_("Unable to attach: program exited normally."));
1156 else
1157 error (_("Unable to attach: program exited with code %d."),
1158 exit_code);
1159 }
1160 else if (WIFSIGNALED (status))
1161 {
2ea28649 1162 enum gdb_signal signo;
dacc9cb2
PP
1163
1164 target_terminal_ours ();
1165 target_mourn_inferior ();
1166
2ea28649 1167 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1168 error (_("Unable to attach: program terminated with signal "
1169 "%s, %s."),
2ea28649
PA
1170 gdb_signal_to_name (signo),
1171 gdb_signal_to_string (signo));
dacc9cb2
PP
1172 }
1173
1174 internal_error (__FILE__, __LINE__,
1175 _("unexpected status %d for PID %ld"),
dfd4cc63 1176 status, (long) ptid_get_lwp (ptid));
dacc9cb2
PP
1177 }
1178
a0ef4274 1179 lp->stopped = 1;
9f0bdab8 1180
a0ef4274 1181 /* Save the wait status to report later. */
d6b0e80f 1182 lp->resumed = 1;
a0ef4274
DJ
1183 if (debug_linux_nat)
1184 fprintf_unfiltered (gdb_stdlog,
1185 "LNA: waitpid %ld, saving status %s\n",
dfd4cc63 1186 (long) ptid_get_pid (lp->ptid), status_to_str (status));
710151dd 1187
7feb7d06
PA
1188 lp->status = status;
1189
8784d563
PA
1190 /* We must attach to every LWP. If /proc is mounted, use that to
1191 find them now. The inferior may be using raw clone instead of
1192 using pthreads. But even if it is using pthreads, thread_db
1193 walks structures in the inferior's address space to find the list
1194 of threads/LWPs, and those structures may well be corrupted.
1195 Note that once thread_db is loaded, we'll still use it to list
1196 threads and associate pthread info with each LWP. */
1197 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1198 attach_proc_task_lwp_callback);
1199
7feb7d06 1200 if (target_can_async_p ())
6a3753b3 1201 target_async (1);
d6b0e80f
AC
1202}
1203
a0ef4274
DJ
1204/* Get pending status of LP. */
1205static int
1206get_pending_status (struct lwp_info *lp, int *status)
1207{
a493e3e2 1208 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1209
1210 /* If we paused threads momentarily, we may have stored pending
1211 events in lp->status or lp->waitstatus (see stop_wait_callback),
1212 and GDB core hasn't seen any signal for those threads.
1213 Otherwise, the last signal reported to the core is found in the
1214 thread object's stop_signal.
1215
1216 There's a corner case that isn't handled here at present. Only
1217 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1218 stop_signal make sense as a real signal to pass to the inferior.
1219 Some catchpoint related events, like
1220 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1221 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1222 those traps are debug API (ptrace in our case) related and
1223 induced; the inferior wouldn't see them if it wasn't being
1224 traced. Hence, we should never pass them to the inferior, even
1225 when set to pass state. Since this corner case isn't handled by
1226 infrun.c when proceeding with a signal, for consistency, neither
1227 do we handle it here (or elsewhere in the file we check for
1228 signal pass state). Normally SIGTRAP isn't set to pass state, so
1229 this is really a corner case. */
1230
1231 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1232 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1233 else if (lp->status)
2ea28649 1234 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
fbea99ea 1235 else if (target_is_non_stop_p () && !is_executing (lp->ptid))
ca2163eb
PA
1236 {
1237 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1238
16c381f0 1239 signo = tp->suspend.stop_signal;
ca2163eb 1240 }
fbea99ea 1241 else if (!target_is_non_stop_p ())
a0ef4274 1242 {
ca2163eb
PA
1243 struct target_waitstatus last;
1244 ptid_t last_ptid;
4c28f408 1245
ca2163eb 1246 get_last_target_status (&last_ptid, &last);
4c28f408 1247
dfd4cc63 1248 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
ca2163eb 1249 {
e09875d4 1250 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1251
16c381f0 1252 signo = tp->suspend.stop_signal;
4c28f408 1253 }
ca2163eb 1254 }
4c28f408 1255
ca2163eb 1256 *status = 0;
4c28f408 1257
a493e3e2 1258 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1259 {
1260 if (debug_linux_nat)
1261 fprintf_unfiltered (gdb_stdlog,
1262 "GPT: lwp %s has no pending signal\n",
1263 target_pid_to_str (lp->ptid));
1264 }
1265 else if (!signal_pass_state (signo))
1266 {
1267 if (debug_linux_nat)
3e43a32a
MS
1268 fprintf_unfiltered (gdb_stdlog,
1269 "GPT: lwp %s had signal %s, "
1270 "but it is in no pass state\n",
ca2163eb 1271 target_pid_to_str (lp->ptid),
2ea28649 1272 gdb_signal_to_string (signo));
a0ef4274 1273 }
a0ef4274 1274 else
4c28f408 1275 {
2ea28649 1276 *status = W_STOPCODE (gdb_signal_to_host (signo));
ca2163eb
PA
1277
1278 if (debug_linux_nat)
1279 fprintf_unfiltered (gdb_stdlog,
1280 "GPT: lwp %s has pending signal %s\n",
1281 target_pid_to_str (lp->ptid),
2ea28649 1282 gdb_signal_to_string (signo));
4c28f408 1283 }
a0ef4274
DJ
1284
1285 return 0;
1286}
1287
d6b0e80f
AC
1288static int
1289detach_callback (struct lwp_info *lp, void *data)
1290{
1291 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1292
1293 if (debug_linux_nat && lp->status)
1294 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1295 strsignal (WSTOPSIG (lp->status)),
1296 target_pid_to_str (lp->ptid));
1297
a0ef4274
DJ
1298 /* If there is a pending SIGSTOP, get rid of it. */
1299 if (lp->signalled)
d6b0e80f 1300 {
d6b0e80f
AC
1301 if (debug_linux_nat)
1302 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1303 "DC: Sending SIGCONT to %s\n",
1304 target_pid_to_str (lp->ptid));
d6b0e80f 1305
dfd4cc63 1306 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
d6b0e80f 1307 lp->signalled = 0;
d6b0e80f
AC
1308 }
1309
1310 /* We don't actually detach from the LWP that has an id equal to the
1311 overall process id just yet. */
dfd4cc63 1312 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
d6b0e80f 1313 {
a0ef4274
DJ
1314 int status = 0;
1315
1316 /* Pass on any pending signal for this LWP. */
1317 get_pending_status (lp, &status);
1318
7b50312a
PA
1319 if (linux_nat_prepare_to_resume != NULL)
1320 linux_nat_prepare_to_resume (lp);
d6b0e80f 1321 errno = 0;
dfd4cc63 1322 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
a0ef4274 1323 WSTOPSIG (status)) < 0)
8a3fe4f8 1324 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1325 safe_strerror (errno));
1326
1327 if (debug_linux_nat)
1328 fprintf_unfiltered (gdb_stdlog,
1329 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1330 target_pid_to_str (lp->ptid),
7feb7d06 1331 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1332
1333 delete_lwp (lp->ptid);
1334 }
1335
1336 return 0;
1337}
1338
1339static void
52554a0e 1340linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f 1341{
b84876c2 1342 int pid;
a0ef4274 1343 int status;
d90e17a7
PA
1344 struct lwp_info *main_lwp;
1345
dfd4cc63 1346 pid = ptid_get_pid (inferior_ptid);
a0ef4274 1347
ae5e0686
MK
1348 /* Don't unregister from the event loop, as there may be other
1349 inferiors running. */
b84876c2 1350
4c28f408
PA
1351 /* Stop all threads before detaching. ptrace requires that the
1352 thread is stopped to sucessfully detach. */
d90e17a7 1353 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1354 /* ... and wait until all of them have reported back that
1355 they're no longer running. */
d90e17a7 1356 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1357
d90e17a7 1358 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1359
1360 /* Only the initial process should be left right now. */
dfd4cc63 1361 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
d90e17a7
PA
1362
1363 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1364
a0ef4274
DJ
1365 /* Pass on any pending signal for the last LWP. */
1366 if ((args == NULL || *args == '\0')
d90e17a7 1367 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1368 && WIFSTOPPED (status))
1369 {
52554a0e
TT
1370 char *tem;
1371
a0ef4274
DJ
1372 /* Put the signal number in ARGS so that inf_ptrace_detach will
1373 pass it along with PTRACE_DETACH. */
224c3ddb 1374 tem = (char *) alloca (8);
cde33bf1 1375 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
52554a0e 1376 args = tem;
ddabfc73
TT
1377 if (debug_linux_nat)
1378 fprintf_unfiltered (gdb_stdlog,
1379 "LND: Sending signal %s to %s\n",
1380 args,
1381 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1382 }
1383
7b50312a
PA
1384 if (linux_nat_prepare_to_resume != NULL)
1385 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1386 delete_lwp (main_lwp->ptid);
b84876c2 1387
7a7d3353
PA
1388 if (forks_exist_p ())
1389 {
1390 /* Multi-fork case. The current inferior_ptid is being detached
1391 from, but there are other viable forks to debug. Detach from
1392 the current fork, and context-switch to the first
1393 available. */
1394 linux_fork_detach (args, from_tty);
7a7d3353
PA
1395 }
1396 else
1397 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1398}
1399
8a99810d
PA
1400/* Resume execution of the inferior process. If STEP is nonzero,
1401 single-step it. If SIGNAL is nonzero, give it that signal. */
1402
1403static void
23f238d3
PA
1404linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1405 enum gdb_signal signo)
8a99810d 1406{
8a99810d 1407 lp->step = step;
9c02b525
PA
1408
1409 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1410 We only presently need that if the LWP is stepped though (to
1411 handle the case of stepping a breakpoint instruction). */
1412 if (step)
1413 {
1414 struct regcache *regcache = get_thread_regcache (lp->ptid);
1415
1416 lp->stop_pc = regcache_read_pc (regcache);
1417 }
1418 else
1419 lp->stop_pc = 0;
1420
8a99810d
PA
1421 if (linux_nat_prepare_to_resume != NULL)
1422 linux_nat_prepare_to_resume (lp);
90ad5e1d 1423 linux_ops->to_resume (linux_ops, lp->ptid, step, signo);
23f238d3
PA
1424
1425 /* Successfully resumed. Clear state that no longer makes sense,
1426 and mark the LWP as running. Must not do this before resuming
1427 otherwise if that fails other code will be confused. E.g., we'd
1428 later try to stop the LWP and hang forever waiting for a stop
1429 status. Note that we must not throw after this is cleared,
1430 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1431 lp->stopped = 0;
23f238d3 1432 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8a99810d
PA
1433 registers_changed_ptid (lp->ptid);
1434}
1435
23f238d3
PA
1436/* Called when we try to resume a stopped LWP and that errors out. If
1437 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1438 or about to become), discard the error, clear any pending status
1439 the LWP may have, and return true (we'll collect the exit status
1440 soon enough). Otherwise, return false. */
1441
1442static int
1443check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1444{
1445 /* If we get an error after resuming the LWP successfully, we'd
1446 confuse !T state for the LWP being gone. */
1447 gdb_assert (lp->stopped);
1448
1449 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1450 because even if ptrace failed with ESRCH, the tracee may be "not
1451 yet fully dead", but already refusing ptrace requests. In that
1452 case the tracee has 'R (Running)' state for a little bit
1453 (observed in Linux 3.18). See also the note on ESRCH in the
1454 ptrace(2) man page. Instead, check whether the LWP has any state
1455 other than ptrace-stopped. */
1456
1457 /* Don't assume anything if /proc/PID/status can't be read. */
1458 if (linux_proc_pid_is_trace_stopped_nowarn (ptid_get_lwp (lp->ptid)) == 0)
1459 {
1460 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1461 lp->status = 0;
1462 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1463 return 1;
1464 }
1465 return 0;
1466}
1467
1468/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1469 disappears while we try to resume it. */
1470
1471static void
1472linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1473{
1474 TRY
1475 {
1476 linux_resume_one_lwp_throw (lp, step, signo);
1477 }
1478 CATCH (ex, RETURN_MASK_ERROR)
1479 {
1480 if (!check_ptrace_stopped_lwp_gone (lp))
1481 throw_exception (ex);
1482 }
1483 END_CATCH
1484}
1485
d6b0e80f
AC
1486/* Resume LP. */
1487
25289eb2 1488static void
e5ef252a 1489resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1490{
25289eb2 1491 if (lp->stopped)
6c95b8df 1492 {
c9657e70 1493 struct inferior *inf = find_inferior_ptid (lp->ptid);
25289eb2
PA
1494
1495 if (inf->vfork_child != NULL)
1496 {
1497 if (debug_linux_nat)
1498 fprintf_unfiltered (gdb_stdlog,
1499 "RC: Not resuming %s (vfork parent)\n",
1500 target_pid_to_str (lp->ptid));
1501 }
8a99810d 1502 else if (!lwp_status_pending_p (lp))
25289eb2
PA
1503 {
1504 if (debug_linux_nat)
1505 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1506 "RC: Resuming sibling %s, %s, %s\n",
1507 target_pid_to_str (lp->ptid),
1508 (signo != GDB_SIGNAL_0
1509 ? strsignal (gdb_signal_to_host (signo))
1510 : "0"),
1511 step ? "step" : "resume");
25289eb2 1512
8a99810d 1513 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1514 }
1515 else
1516 {
1517 if (debug_linux_nat)
1518 fprintf_unfiltered (gdb_stdlog,
1519 "RC: Not resuming sibling %s (has pending)\n",
1520 target_pid_to_str (lp->ptid));
1521 }
6c95b8df 1522 }
25289eb2 1523 else
d6b0e80f 1524 {
d90e17a7
PA
1525 if (debug_linux_nat)
1526 fprintf_unfiltered (gdb_stdlog,
25289eb2 1527 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1528 target_pid_to_str (lp->ptid));
d6b0e80f 1529 }
25289eb2 1530}
d6b0e80f 1531
8817a6f2
PA
1532/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1533 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1534
25289eb2 1535static int
8817a6f2 1536linux_nat_resume_callback (struct lwp_info *lp, void *except)
25289eb2 1537{
e5ef252a
PA
1538 enum gdb_signal signo = GDB_SIGNAL_0;
1539
8817a6f2
PA
1540 if (lp == except)
1541 return 0;
1542
e5ef252a
PA
1543 if (lp->stopped)
1544 {
1545 struct thread_info *thread;
1546
1547 thread = find_thread_ptid (lp->ptid);
1548 if (thread != NULL)
1549 {
70509625 1550 signo = thread->suspend.stop_signal;
e5ef252a
PA
1551 thread->suspend.stop_signal = GDB_SIGNAL_0;
1552 }
1553 }
1554
1555 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1556 return 0;
1557}
1558
1559static int
1560resume_clear_callback (struct lwp_info *lp, void *data)
1561{
1562 lp->resumed = 0;
25289eb2 1563 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1564 return 0;
1565}
1566
1567static int
1568resume_set_callback (struct lwp_info *lp, void *data)
1569{
1570 lp->resumed = 1;
25289eb2 1571 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1572 return 0;
1573}
1574
1575static void
28439f5e 1576linux_nat_resume (struct target_ops *ops,
2ea28649 1577 ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1578{
1579 struct lwp_info *lp;
d90e17a7 1580 int resume_many;
d6b0e80f 1581
76f50ad1
DJ
1582 if (debug_linux_nat)
1583 fprintf_unfiltered (gdb_stdlog,
1584 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1585 step ? "step" : "resume",
1586 target_pid_to_str (ptid),
a493e3e2 1587 (signo != GDB_SIGNAL_0
2ea28649 1588 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1589 target_pid_to_str (inferior_ptid));
1590
d6b0e80f 1591 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1592 resume_many = (ptid_equal (minus_one_ptid, ptid)
1593 || ptid_is_pid (ptid));
4c28f408 1594
e3e9f5a2
PA
1595 /* Mark the lwps we're resuming as resumed. */
1596 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1597
d90e17a7
PA
1598 /* See if it's the current inferior that should be handled
1599 specially. */
1600 if (resume_many)
1601 lp = find_lwp_pid (inferior_ptid);
1602 else
1603 lp = find_lwp_pid (ptid);
9f0bdab8 1604 gdb_assert (lp != NULL);
d6b0e80f 1605
9f0bdab8 1606 /* Remember if we're stepping. */
25289eb2 1607 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1608
9f0bdab8
DJ
1609 /* If we have a pending wait status for this thread, there is no
1610 point in resuming the process. But first make sure that
1611 linux_nat_wait won't preemptively handle the event - we
1612 should never take this short-circuit if we are going to
1613 leave LP running, since we have skipped resuming all the
1614 other threads. This bit of code needs to be synchronized
1615 with linux_nat_wait. */
76f50ad1 1616
9f0bdab8
DJ
1617 if (lp->status && WIFSTOPPED (lp->status))
1618 {
2455069d
UW
1619 if (!lp->step
1620 && WSTOPSIG (lp->status)
1621 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1622 {
9f0bdab8
DJ
1623 if (debug_linux_nat)
1624 fprintf_unfiltered (gdb_stdlog,
1625 "LLR: Not short circuiting for ignored "
1626 "status 0x%x\n", lp->status);
1627
d6b0e80f
AC
1628 /* FIXME: What should we do if we are supposed to continue
1629 this thread with a signal? */
a493e3e2 1630 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1631 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1632 lp->status = 0;
1633 }
1634 }
76f50ad1 1635
8a99810d 1636 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1637 {
1638 /* FIXME: What should we do if we are supposed to continue
1639 this thread with a signal? */
a493e3e2 1640 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1641
9f0bdab8
DJ
1642 if (debug_linux_nat)
1643 fprintf_unfiltered (gdb_stdlog,
1644 "LLR: Short circuiting for status 0x%x\n",
1645 lp->status);
d6b0e80f 1646
7feb7d06
PA
1647 if (target_can_async_p ())
1648 {
6a3753b3 1649 target_async (1);
7feb7d06
PA
1650 /* Tell the event loop we have something to process. */
1651 async_file_mark ();
1652 }
9f0bdab8 1653 return;
d6b0e80f
AC
1654 }
1655
d90e17a7 1656 if (resume_many)
8817a6f2 1657 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
d90e17a7 1658
d6b0e80f
AC
1659 if (debug_linux_nat)
1660 fprintf_unfiltered (gdb_stdlog,
1661 "LLR: %s %s, %s (resume event thread)\n",
1662 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2bf6fb9d 1663 target_pid_to_str (lp->ptid),
a493e3e2 1664 (signo != GDB_SIGNAL_0
2ea28649 1665 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 1666
2bf6fb9d
PA
1667 linux_resume_one_lwp (lp, step, signo);
1668
b84876c2 1669 if (target_can_async_p ())
6a3753b3 1670 target_async (1);
d6b0e80f
AC
1671}
1672
c5f62d5f 1673/* Send a signal to an LWP. */
d6b0e80f
AC
1674
1675static int
1676kill_lwp (int lwpid, int signo)
1677{
4a6ed09b 1678 int ret;
d6b0e80f 1679
4a6ed09b
PA
1680 errno = 0;
1681 ret = syscall (__NR_tkill, lwpid, signo);
1682 if (errno == ENOSYS)
1683 {
1684 /* If tkill fails, then we are not using nptl threads, a
1685 configuration we no longer support. */
1686 perror_with_name (("tkill"));
1687 }
1688 return ret;
d6b0e80f
AC
1689}
1690
ca2163eb
PA
1691/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1692 event, check if the core is interested in it: if not, ignore the
1693 event, and keep waiting; otherwise, we need to toggle the LWP's
1694 syscall entry/exit status, since the ptrace event itself doesn't
1695 indicate it, and report the trap to higher layers. */
1696
1697static int
1698linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1699{
1700 struct target_waitstatus *ourstatus = &lp->waitstatus;
1701 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1702 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1703
1704 if (stopping)
1705 {
1706 /* If we're stopping threads, there's a SIGSTOP pending, which
1707 makes it so that the LWP reports an immediate syscall return,
1708 followed by the SIGSTOP. Skip seeing that "return" using
1709 PTRACE_CONT directly, and let stop_wait_callback collect the
1710 SIGSTOP. Later when the thread is resumed, a new syscall
1711 entry event. If we didn't do this (and returned 0), we'd
1712 leave a syscall entry pending, and our caller, by using
1713 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1714 itself. Later, when the user re-resumes this LWP, we'd see
1715 another syscall entry event and we'd mistake it for a return.
1716
1717 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1718 (leaving immediately with LWP->signalled set, without issuing
1719 a PTRACE_CONT), it would still be problematic to leave this
1720 syscall enter pending, as later when the thread is resumed,
1721 it would then see the same syscall exit mentioned above,
1722 followed by the delayed SIGSTOP, while the syscall didn't
1723 actually get to execute. It seems it would be even more
1724 confusing to the user. */
1725
1726 if (debug_linux_nat)
1727 fprintf_unfiltered (gdb_stdlog,
1728 "LHST: ignoring syscall %d "
1729 "for LWP %ld (stopping threads), "
1730 "resuming with PTRACE_CONT for SIGSTOP\n",
1731 syscall_number,
dfd4cc63 1732 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1733
1734 lp->syscall_state = TARGET_WAITKIND_IGNORE;
dfd4cc63 1735 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 1736 lp->stopped = 0;
ca2163eb
PA
1737 return 1;
1738 }
1739
bfd09d20
JS
1740 /* Always update the entry/return state, even if this particular
1741 syscall isn't interesting to the core now. In async mode,
1742 the user could install a new catchpoint for this syscall
1743 between syscall enter/return, and we'll need to know to
1744 report a syscall return if that happens. */
1745 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1746 ? TARGET_WAITKIND_SYSCALL_RETURN
1747 : TARGET_WAITKIND_SYSCALL_ENTRY);
1748
ca2163eb
PA
1749 if (catch_syscall_enabled ())
1750 {
ca2163eb
PA
1751 if (catching_syscall_number (syscall_number))
1752 {
1753 /* Alright, an event to report. */
1754 ourstatus->kind = lp->syscall_state;
1755 ourstatus->value.syscall_number = syscall_number;
1756
1757 if (debug_linux_nat)
1758 fprintf_unfiltered (gdb_stdlog,
1759 "LHST: stopping for %s of syscall %d"
1760 " for LWP %ld\n",
3e43a32a
MS
1761 lp->syscall_state
1762 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
1763 ? "entry" : "return",
1764 syscall_number,
dfd4cc63 1765 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1766 return 0;
1767 }
1768
1769 if (debug_linux_nat)
1770 fprintf_unfiltered (gdb_stdlog,
1771 "LHST: ignoring %s of syscall %d "
1772 "for LWP %ld\n",
1773 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1774 ? "entry" : "return",
1775 syscall_number,
dfd4cc63 1776 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1777 }
1778 else
1779 {
1780 /* If we had been syscall tracing, and hence used PT_SYSCALL
1781 before on this LWP, it could happen that the user removes all
1782 syscall catchpoints before we get to process this event.
1783 There are two noteworthy issues here:
1784
1785 - When stopped at a syscall entry event, resuming with
1786 PT_STEP still resumes executing the syscall and reports a
1787 syscall return.
1788
1789 - Only PT_SYSCALL catches syscall enters. If we last
1790 single-stepped this thread, then this event can't be a
1791 syscall enter. If we last single-stepped this thread, this
1792 has to be a syscall exit.
1793
1794 The points above mean that the next resume, be it PT_STEP or
1795 PT_CONTINUE, can not trigger a syscall trace event. */
1796 if (debug_linux_nat)
1797 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
1798 "LHST: caught syscall event "
1799 "with no syscall catchpoints."
ca2163eb
PA
1800 " %d for LWP %ld, ignoring\n",
1801 syscall_number,
dfd4cc63 1802 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1803 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1804 }
1805
1806 /* The core isn't interested in this event. For efficiency, avoid
1807 stopping all threads only to have the core resume them all again.
1808 Since we're not stopping threads, if we're still syscall tracing
1809 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1810 subsequent syscall. Simply resume using the inf-ptrace layer,
1811 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1812
8a99810d 1813 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1814 return 1;
1815}
1816
3d799a95
DJ
1817/* Handle a GNU/Linux extended wait response. If we see a clone
1818 event, we need to add the new LWP to our list (and not report the
1819 trap to higher layers). This function returns non-zero if the
1820 event should be ignored and we should wait again. If STOPPING is
1821 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1822
1823static int
4dd63d48 1824linux_handle_extended_wait (struct lwp_info *lp, int status)
d6b0e80f 1825{
dfd4cc63 1826 int pid = ptid_get_lwp (lp->ptid);
3d799a95 1827 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1828 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1829
bfd09d20
JS
1830 /* All extended events we currently use are mid-syscall. Only
1831 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1832 you have to be using PTRACE_SEIZE to get that. */
1833 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1834
3d799a95
DJ
1835 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1836 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1837 {
3d799a95
DJ
1838 unsigned long new_pid;
1839 int ret;
1840
1841 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1842
3d799a95
DJ
1843 /* If we haven't already seen the new PID stop, wait for it now. */
1844 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1845 {
1846 /* The new child has a pending SIGSTOP. We can't affect it until it
1847 hits the SIGSTOP, but we're already attached. */
4a6ed09b 1848 ret = my_waitpid (new_pid, &status, __WALL);
3d799a95
DJ
1849 if (ret == -1)
1850 perror_with_name (_("waiting for new child"));
1851 else if (ret != new_pid)
1852 internal_error (__FILE__, __LINE__,
1853 _("wait returned unexpected PID %d"), ret);
1854 else if (!WIFSTOPPED (status))
1855 internal_error (__FILE__, __LINE__,
1856 _("wait returned unexpected status 0x%x"), status);
1857 }
1858
3a3e9ee3 1859 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 1860
26cb8b7c
PA
1861 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1862 {
1863 /* The arch-specific native code may need to know about new
1864 forks even if those end up never mapped to an
1865 inferior. */
1866 if (linux_nat_new_fork != NULL)
1867 linux_nat_new_fork (lp, new_pid);
1868 }
1869
2277426b 1870 if (event == PTRACE_EVENT_FORK
dfd4cc63 1871 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
2277426b 1872 {
2277426b
PA
1873 /* Handle checkpointing by linux-fork.c here as a special
1874 case. We don't want the follow-fork-mode or 'catch fork'
1875 to interfere with this. */
1876
1877 /* This won't actually modify the breakpoint list, but will
1878 physically remove the breakpoints from the child. */
d80ee84f 1879 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2277426b
PA
1880
1881 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1882 if (!find_fork_pid (new_pid))
1883 add_fork (new_pid);
2277426b
PA
1884
1885 /* Report as spurious, so that infrun doesn't want to follow
1886 this fork. We're actually doing an infcall in
1887 linux-fork.c. */
1888 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2277426b
PA
1889
1890 /* Report the stop to the core. */
1891 return 0;
1892 }
1893
3d799a95
DJ
1894 if (event == PTRACE_EVENT_FORK)
1895 ourstatus->kind = TARGET_WAITKIND_FORKED;
1896 else if (event == PTRACE_EVENT_VFORK)
1897 ourstatus->kind = TARGET_WAITKIND_VFORKED;
4dd63d48 1898 else if (event == PTRACE_EVENT_CLONE)
3d799a95 1899 {
78768c4a
JK
1900 struct lwp_info *new_lp;
1901
3d799a95 1902 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 1903
3c4d7e12
PA
1904 if (debug_linux_nat)
1905 fprintf_unfiltered (gdb_stdlog,
1906 "LHEW: Got clone event "
1907 "from LWP %d, new child is LWP %ld\n",
1908 pid, new_pid);
1909
dfd4cc63 1910 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
4c28f408 1911 new_lp->stopped = 1;
4dd63d48 1912 new_lp->resumed = 1;
d6b0e80f 1913
2db9a427
PA
1914 /* If the thread_db layer is active, let it record the user
1915 level thread id and status, and add the thread to GDB's
1916 list. */
1917 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 1918 {
2db9a427
PA
1919 /* The process is not using thread_db. Add the LWP to
1920 GDB's list. */
1921 target_post_attach (ptid_get_lwp (new_lp->ptid));
1922 add_thread (new_lp->ptid);
1923 }
4c28f408 1924
2ee52aa4 1925 /* Even if we're stopping the thread for some reason
4dd63d48
PA
1926 internal to this module, from the perspective of infrun
1927 and the user/frontend, this new thread is running until
1928 it next reports a stop. */
2ee52aa4 1929 set_running (new_lp->ptid, 1);
4dd63d48 1930 set_executing (new_lp->ptid, 1);
4c28f408 1931
4dd63d48 1932 if (WSTOPSIG (status) != SIGSTOP)
79395f92 1933 {
4dd63d48
PA
1934 /* This can happen if someone starts sending signals to
1935 the new thread before it gets a chance to run, which
1936 have a lower number than SIGSTOP (e.g. SIGUSR1).
1937 This is an unlikely case, and harder to handle for
1938 fork / vfork than for clone, so we do not try - but
1939 we handle it for clone events here. */
1940
1941 new_lp->signalled = 1;
1942
79395f92
PA
1943 /* We created NEW_LP so it cannot yet contain STATUS. */
1944 gdb_assert (new_lp->status == 0);
1945
1946 /* Save the wait status to report later. */
1947 if (debug_linux_nat)
1948 fprintf_unfiltered (gdb_stdlog,
1949 "LHEW: waitpid of new LWP %ld, "
1950 "saving status %s\n",
dfd4cc63 1951 (long) ptid_get_lwp (new_lp->ptid),
79395f92
PA
1952 status_to_str (status));
1953 new_lp->status = status;
1954 }
1955
3d799a95
DJ
1956 return 1;
1957 }
1958
1959 return 0;
d6b0e80f
AC
1960 }
1961
3d799a95
DJ
1962 if (event == PTRACE_EVENT_EXEC)
1963 {
a75724bc
PA
1964 if (debug_linux_nat)
1965 fprintf_unfiltered (gdb_stdlog,
1966 "LHEW: Got exec event from LWP %ld\n",
dfd4cc63 1967 ptid_get_lwp (lp->ptid));
a75724bc 1968
3d799a95
DJ
1969 ourstatus->kind = TARGET_WAITKIND_EXECD;
1970 ourstatus->value.execd_pathname
8dd27370 1971 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
3d799a95 1972
8af756ef
PA
1973 /* The thread that execed must have been resumed, but, when a
1974 thread execs, it changes its tid to the tgid, and the old
1975 tgid thread might have not been resumed. */
1976 lp->resumed = 1;
6c95b8df
PA
1977 return 0;
1978 }
1979
1980 if (event == PTRACE_EVENT_VFORK_DONE)
1981 {
1982 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 1983 {
6c95b8df 1984 if (debug_linux_nat)
3e43a32a
MS
1985 fprintf_unfiltered (gdb_stdlog,
1986 "LHEW: Got expected PTRACE_EVENT_"
1987 "VFORK_DONE from LWP %ld: stopping\n",
dfd4cc63 1988 ptid_get_lwp (lp->ptid));
3d799a95 1989
6c95b8df
PA
1990 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
1991 return 0;
3d799a95
DJ
1992 }
1993
6c95b8df 1994 if (debug_linux_nat)
3e43a32a
MS
1995 fprintf_unfiltered (gdb_stdlog,
1996 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
20ba1ce6 1997 "from LWP %ld: ignoring\n",
dfd4cc63 1998 ptid_get_lwp (lp->ptid));
6c95b8df 1999 return 1;
3d799a95
DJ
2000 }
2001
2002 internal_error (__FILE__, __LINE__,
2003 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2004}
2005
2006/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2007 exited. */
2008
2009static int
2010wait_lwp (struct lwp_info *lp)
2011{
2012 pid_t pid;
432b4d03 2013 int status = 0;
d6b0e80f 2014 int thread_dead = 0;
432b4d03 2015 sigset_t prev_mask;
d6b0e80f
AC
2016
2017 gdb_assert (!lp->stopped);
2018 gdb_assert (lp->status == 0);
2019
432b4d03
JK
2020 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2021 block_child_signals (&prev_mask);
2022
2023 for (;;)
d6b0e80f 2024 {
4a6ed09b 2025 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WALL | WNOHANG);
a9f4bb21
PA
2026 if (pid == -1 && errno == ECHILD)
2027 {
2028 /* The thread has previously exited. We need to delete it
4a6ed09b
PA
2029 now because if this was a non-leader thread execing, we
2030 won't get an exit event. See comments on exec events at
2031 the top of the file. */
a9f4bb21
PA
2032 thread_dead = 1;
2033 if (debug_linux_nat)
2034 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2035 target_pid_to_str (lp->ptid));
2036 }
432b4d03
JK
2037 if (pid != 0)
2038 break;
2039
2040 /* Bugs 10970, 12702.
2041 Thread group leader may have exited in which case we'll lock up in
2042 waitpid if there are other threads, even if they are all zombies too.
2043 Basically, we're not supposed to use waitpid this way.
4a6ed09b
PA
2044 tkill(pid,0) cannot be used here as it gets ESRCH for both
2045 for zombie and running processes.
432b4d03
JK
2046
2047 As a workaround, check if we're waiting for the thread group leader and
2048 if it's a zombie, and avoid calling waitpid if it is.
2049
2050 This is racy, what if the tgl becomes a zombie right after we check?
2051 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2052 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2053
dfd4cc63
LM
2054 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2055 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
d6b0e80f 2056 {
d6b0e80f
AC
2057 thread_dead = 1;
2058 if (debug_linux_nat)
432b4d03
JK
2059 fprintf_unfiltered (gdb_stdlog,
2060 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2061 target_pid_to_str (lp->ptid));
432b4d03 2062 break;
d6b0e80f 2063 }
432b4d03
JK
2064
2065 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2066 get invoked despite our caller had them intentionally blocked by
2067 block_child_signals. This is sensitive only to the loop of
2068 linux_nat_wait_1 and there if we get called my_waitpid gets called
2069 again before it gets to sigsuspend so we can safely let the handlers
2070 get executed here. */
2071
d36bf488
DE
2072 if (debug_linux_nat)
2073 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
432b4d03
JK
2074 sigsuspend (&suspend_mask);
2075 }
2076
2077 restore_child_signals_mask (&prev_mask);
2078
d6b0e80f
AC
2079 if (!thread_dead)
2080 {
dfd4cc63 2081 gdb_assert (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
2082
2083 if (debug_linux_nat)
2084 {
2085 fprintf_unfiltered (gdb_stdlog,
2086 "WL: waitpid %s received %s\n",
2087 target_pid_to_str (lp->ptid),
2088 status_to_str (status));
2089 }
d6b0e80f 2090
a9f4bb21
PA
2091 /* Check if the thread has exited. */
2092 if (WIFEXITED (status) || WIFSIGNALED (status))
2093 {
69dde7dc
PA
2094 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
2095 {
2096 if (debug_linux_nat)
2097 fprintf_unfiltered (gdb_stdlog, "WL: Process %d exited.\n",
2098 ptid_get_pid (lp->ptid));
2099
2100 /* This is the leader exiting, it means the whole
2101 process is gone. Store the status to report to the
2102 core. Store it in lp->waitstatus, because lp->status
2103 would be ambiguous (W_EXITCODE(0,0) == 0). */
2104 store_waitstatus (&lp->waitstatus, status);
2105 return 0;
2106 }
2107
a9f4bb21
PA
2108 thread_dead = 1;
2109 if (debug_linux_nat)
2110 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2111 target_pid_to_str (lp->ptid));
2112 }
d6b0e80f
AC
2113 }
2114
2115 if (thread_dead)
2116 {
e26af52f 2117 exit_lwp (lp);
d6b0e80f
AC
2118 return 0;
2119 }
2120
2121 gdb_assert (WIFSTOPPED (status));
8817a6f2 2122 lp->stopped = 1;
d6b0e80f 2123
8784d563
PA
2124 if (lp->must_set_ptrace_flags)
2125 {
2126 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
de0d863e 2127 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2128
de0d863e 2129 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
8784d563
PA
2130 lp->must_set_ptrace_flags = 0;
2131 }
2132
ca2163eb
PA
2133 /* Handle GNU/Linux's syscall SIGTRAPs. */
2134 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2135 {
2136 /* No longer need the sysgood bit. The ptrace event ends up
2137 recorded in lp->waitstatus if we care for it. We can carry
2138 on handling the event like a regular SIGTRAP from here
2139 on. */
2140 status = W_STOPCODE (SIGTRAP);
2141 if (linux_handle_syscall_trap (lp, 1))
2142 return wait_lwp (lp);
2143 }
bfd09d20
JS
2144 else
2145 {
2146 /* Almost all other ptrace-stops are known to be outside of system
2147 calls, with further exceptions in linux_handle_extended_wait. */
2148 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2149 }
ca2163eb 2150
d6b0e80f 2151 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2152 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2153 && linux_is_extended_waitstatus (status))
d6b0e80f
AC
2154 {
2155 if (debug_linux_nat)
2156 fprintf_unfiltered (gdb_stdlog,
2157 "WL: Handling extended status 0x%06x\n",
2158 status);
4dd63d48 2159 linux_handle_extended_wait (lp, status);
20ba1ce6 2160 return 0;
d6b0e80f
AC
2161 }
2162
2163 return status;
2164}
2165
2166/* Send a SIGSTOP to LP. */
2167
2168static int
2169stop_callback (struct lwp_info *lp, void *data)
2170{
2171 if (!lp->stopped && !lp->signalled)
2172 {
2173 int ret;
2174
2175 if (debug_linux_nat)
2176 {
2177 fprintf_unfiltered (gdb_stdlog,
2178 "SC: kill %s **<SIGSTOP>**\n",
2179 target_pid_to_str (lp->ptid));
2180 }
2181 errno = 0;
dfd4cc63 2182 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
d6b0e80f
AC
2183 if (debug_linux_nat)
2184 {
2185 fprintf_unfiltered (gdb_stdlog,
2186 "SC: lwp kill %d %s\n",
2187 ret,
2188 errno ? safe_strerror (errno) : "ERRNO-OK");
2189 }
2190
2191 lp->signalled = 1;
2192 gdb_assert (lp->status == 0);
2193 }
2194
2195 return 0;
2196}
2197
7b50312a
PA
2198/* Request a stop on LWP. */
2199
2200void
2201linux_stop_lwp (struct lwp_info *lwp)
2202{
2203 stop_callback (lwp, NULL);
2204}
2205
2db9a427
PA
2206/* See linux-nat.h */
2207
2208void
2209linux_stop_and_wait_all_lwps (void)
2210{
2211 /* Stop all LWP's ... */
2212 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
2213
2214 /* ... and wait until all of them have reported back that
2215 they're no longer running. */
2216 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
2217}
2218
2219/* See linux-nat.h */
2220
2221void
2222linux_unstop_all_lwps (void)
2223{
2224 iterate_over_lwps (minus_one_ptid,
2225 resume_stopped_resumed_lwps, &minus_one_ptid);
2226}
2227
57380f4e 2228/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2229
2230static int
57380f4e
DJ
2231linux_nat_has_pending_sigint (int pid)
2232{
2233 sigset_t pending, blocked, ignored;
57380f4e
DJ
2234
2235 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2236
2237 if (sigismember (&pending, SIGINT)
2238 && !sigismember (&ignored, SIGINT))
2239 return 1;
2240
2241 return 0;
2242}
2243
2244/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2245
2246static int
2247set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2248{
57380f4e
DJ
2249 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2250 flag to consume the next one. */
2251 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2252 && WSTOPSIG (lp->status) == SIGINT)
2253 lp->status = 0;
2254 else
2255 lp->ignore_sigint = 1;
2256
2257 return 0;
2258}
2259
2260/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2261 This function is called after we know the LWP has stopped; if the LWP
2262 stopped before the expected SIGINT was delivered, then it will never have
2263 arrived. Also, if the signal was delivered to a shared queue and consumed
2264 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2265
57380f4e
DJ
2266static void
2267maybe_clear_ignore_sigint (struct lwp_info *lp)
2268{
2269 if (!lp->ignore_sigint)
2270 return;
2271
dfd4cc63 2272 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
57380f4e
DJ
2273 {
2274 if (debug_linux_nat)
2275 fprintf_unfiltered (gdb_stdlog,
2276 "MCIS: Clearing bogus flag for %s\n",
2277 target_pid_to_str (lp->ptid));
2278 lp->ignore_sigint = 0;
2279 }
2280}
2281
ebec9a0f
PA
2282/* Fetch the possible triggered data watchpoint info and store it in
2283 LP.
2284
2285 On some archs, like x86, that use debug registers to set
2286 watchpoints, it's possible that the way to know which watched
2287 address trapped, is to check the register that is used to select
2288 which address to watch. Problem is, between setting the watchpoint
2289 and reading back which data address trapped, the user may change
2290 the set of watchpoints, and, as a consequence, GDB changes the
2291 debug registers in the inferior. To avoid reading back a stale
2292 stopped-data-address when that happens, we cache in LP the fact
2293 that a watchpoint trapped, and the corresponding data address, as
2294 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2295 registers meanwhile, we have the cached data we can rely on. */
2296
9c02b525
PA
2297static int
2298check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f
PA
2299{
2300 struct cleanup *old_chain;
2301
2302 if (linux_ops->to_stopped_by_watchpoint == NULL)
9c02b525 2303 return 0;
ebec9a0f
PA
2304
2305 old_chain = save_inferior_ptid ();
2306 inferior_ptid = lp->ptid;
2307
9c02b525 2308 if (linux_ops->to_stopped_by_watchpoint (linux_ops))
ebec9a0f 2309 {
15c66dd6 2310 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
9c02b525 2311
ebec9a0f
PA
2312 if (linux_ops->to_stopped_data_address != NULL)
2313 lp->stopped_data_address_p =
2314 linux_ops->to_stopped_data_address (&current_target,
2315 &lp->stopped_data_address);
2316 else
2317 lp->stopped_data_address_p = 0;
2318 }
2319
2320 do_cleanups (old_chain);
9c02b525 2321
15c66dd6 2322 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2323}
2324
9c02b525 2325/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f
PA
2326
2327static int
6a109b6b 2328linux_nat_stopped_by_watchpoint (struct target_ops *ops)
ebec9a0f
PA
2329{
2330 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2331
2332 gdb_assert (lp != NULL);
2333
15c66dd6 2334 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2335}
2336
2337static int
2338linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2339{
2340 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2341
2342 gdb_assert (lp != NULL);
2343
2344 *addr_p = lp->stopped_data_address;
2345
2346 return lp->stopped_data_address_p;
2347}
2348
26ab7092
JK
2349/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2350
2351static int
2352sigtrap_is_event (int status)
2353{
2354 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2355}
2356
26ab7092
JK
2357/* Set alternative SIGTRAP-like events recognizer. If
2358 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2359 applied. */
2360
2361void
2362linux_nat_set_status_is_event (struct target_ops *t,
2363 int (*status_is_event) (int status))
2364{
2365 linux_nat_status_is_event = status_is_event;
2366}
2367
57380f4e
DJ
2368/* Wait until LP is stopped. */
2369
2370static int
2371stop_wait_callback (struct lwp_info *lp, void *data)
2372{
c9657e70 2373 struct inferior *inf = find_inferior_ptid (lp->ptid);
6c95b8df
PA
2374
2375 /* If this is a vfork parent, bail out, it is not going to report
2376 any SIGSTOP until the vfork is done with. */
2377 if (inf->vfork_child != NULL)
2378 return 0;
2379
d6b0e80f
AC
2380 if (!lp->stopped)
2381 {
2382 int status;
2383
2384 status = wait_lwp (lp);
2385 if (status == 0)
2386 return 0;
2387
57380f4e
DJ
2388 if (lp->ignore_sigint && WIFSTOPPED (status)
2389 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2390 {
57380f4e 2391 lp->ignore_sigint = 0;
d6b0e80f
AC
2392
2393 errno = 0;
dfd4cc63 2394 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 2395 lp->stopped = 0;
d6b0e80f
AC
2396 if (debug_linux_nat)
2397 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2398 "PTRACE_CONT %s, 0, 0 (%s) "
2399 "(discarding SIGINT)\n",
d6b0e80f
AC
2400 target_pid_to_str (lp->ptid),
2401 errno ? safe_strerror (errno) : "OK");
2402
57380f4e 2403 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2404 }
2405
57380f4e
DJ
2406 maybe_clear_ignore_sigint (lp);
2407
d6b0e80f
AC
2408 if (WSTOPSIG (status) != SIGSTOP)
2409 {
e5ef252a 2410 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2411
e5ef252a
PA
2412 if (debug_linux_nat)
2413 fprintf_unfiltered (gdb_stdlog,
2414 "SWC: Pending event %s in %s\n",
2415 status_to_str ((int) status),
2416 target_pid_to_str (lp->ptid));
2417
2418 /* Save the sigtrap event. */
2419 lp->status = status;
e5ef252a 2420 gdb_assert (lp->signalled);
e7ad2f14 2421 save_stop_reason (lp);
d6b0e80f
AC
2422 }
2423 else
2424 {
2425 /* We caught the SIGSTOP that we intended to catch, so
2426 there's no SIGSTOP pending. */
e5ef252a
PA
2427
2428 if (debug_linux_nat)
2429 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2430 "SWC: Expected SIGSTOP caught for %s.\n",
e5ef252a
PA
2431 target_pid_to_str (lp->ptid));
2432
e5ef252a
PA
2433 /* Reset SIGNALLED only after the stop_wait_callback call
2434 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2435 lp->signalled = 0;
2436 }
2437 }
2438
2439 return 0;
2440}
2441
9c02b525
PA
2442/* Return non-zero if LP has a wait status pending. Discard the
2443 pending event and resume the LWP if the event that originally
2444 caused the stop became uninteresting. */
d6b0e80f
AC
2445
2446static int
2447status_callback (struct lwp_info *lp, void *data)
2448{
2449 /* Only report a pending wait status if we pretend that this has
2450 indeed been resumed. */
ca2163eb
PA
2451 if (!lp->resumed)
2452 return 0;
2453
eb54c8bf
PA
2454 if (!lwp_status_pending_p (lp))
2455 return 0;
2456
15c66dd6
PA
2457 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2458 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525
PA
2459 {
2460 struct regcache *regcache = get_thread_regcache (lp->ptid);
2461 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2462 CORE_ADDR pc;
2463 int discard = 0;
2464
9c02b525
PA
2465 pc = regcache_read_pc (regcache);
2466
2467 if (pc != lp->stop_pc)
2468 {
2469 if (debug_linux_nat)
2470 fprintf_unfiltered (gdb_stdlog,
2471 "SC: PC of %s changed. was=%s, now=%s\n",
2472 target_pid_to_str (lp->ptid),
2473 paddress (target_gdbarch (), lp->stop_pc),
2474 paddress (target_gdbarch (), pc));
2475 discard = 1;
2476 }
faf09f01
PA
2477
2478#if !USE_SIGTRAP_SIGINFO
9c02b525
PA
2479 else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2480 {
2481 if (debug_linux_nat)
2482 fprintf_unfiltered (gdb_stdlog,
2483 "SC: previous breakpoint of %s, at %s gone\n",
2484 target_pid_to_str (lp->ptid),
2485 paddress (target_gdbarch (), lp->stop_pc));
2486
2487 discard = 1;
2488 }
faf09f01 2489#endif
9c02b525
PA
2490
2491 if (discard)
2492 {
2493 if (debug_linux_nat)
2494 fprintf_unfiltered (gdb_stdlog,
2495 "SC: pending event of %s cancelled.\n",
2496 target_pid_to_str (lp->ptid));
2497
2498 lp->status = 0;
2499 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2500 return 0;
2501 }
9c02b525
PA
2502 }
2503
eb54c8bf 2504 return 1;
d6b0e80f
AC
2505}
2506
d6b0e80f
AC
2507/* Count the LWP's that have had events. */
2508
2509static int
2510count_events_callback (struct lwp_info *lp, void *data)
2511{
9a3c8263 2512 int *count = (int *) data;
d6b0e80f
AC
2513
2514 gdb_assert (count != NULL);
2515
9c02b525
PA
2516 /* Select only resumed LWPs that have an event pending. */
2517 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2518 (*count)++;
2519
2520 return 0;
2521}
2522
2523/* Select the LWP (if any) that is currently being single-stepped. */
2524
2525static int
2526select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2527{
25289eb2
PA
2528 if (lp->last_resume_kind == resume_step
2529 && lp->status != 0)
d6b0e80f
AC
2530 return 1;
2531 else
2532 return 0;
2533}
2534
8a99810d
PA
2535/* Returns true if LP has a status pending. */
2536
2537static int
2538lwp_status_pending_p (struct lwp_info *lp)
2539{
2540 /* We check for lp->waitstatus in addition to lp->status, because we
2541 can have pending process exits recorded in lp->status and
2542 W_EXITCODE(0,0) happens to be 0. */
2543 return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2544}
2545
b90fc188 2546/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2547
2548static int
2549select_event_lwp_callback (struct lwp_info *lp, void *data)
2550{
9a3c8263 2551 int *selector = (int *) data;
d6b0e80f
AC
2552
2553 gdb_assert (selector != NULL);
2554
9c02b525
PA
2555 /* Select only resumed LWPs that have an event pending. */
2556 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2557 if ((*selector)-- == 0)
2558 return 1;
2559
2560 return 0;
2561}
2562
e7ad2f14
PA
2563/* Called when the LWP stopped for a signal/trap. If it stopped for a
2564 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2565 and save the result in the LWP's stop_reason field. If it stopped
2566 for a breakpoint, decrement the PC if necessary on the lwp's
2567 architecture. */
9c02b525 2568
e7ad2f14
PA
2569static void
2570save_stop_reason (struct lwp_info *lp)
710151dd 2571{
e7ad2f14
PA
2572 struct regcache *regcache;
2573 struct gdbarch *gdbarch;
515630c5 2574 CORE_ADDR pc;
9c02b525 2575 CORE_ADDR sw_bp_pc;
faf09f01
PA
2576#if USE_SIGTRAP_SIGINFO
2577 siginfo_t siginfo;
2578#endif
9c02b525 2579
e7ad2f14
PA
2580 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2581 gdb_assert (lp->status != 0);
2582
2583 if (!linux_nat_status_is_event (lp->status))
2584 return;
2585
2586 regcache = get_thread_regcache (lp->ptid);
2587 gdbarch = get_regcache_arch (regcache);
2588
9c02b525 2589 pc = regcache_read_pc (regcache);
527a273a 2590 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2591
faf09f01
PA
2592#if USE_SIGTRAP_SIGINFO
2593 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2594 {
2595 if (siginfo.si_signo == SIGTRAP)
2596 {
e7ad2f14
PA
2597 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2598 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2599 {
e7ad2f14
PA
2600 /* The si_code is ambiguous on this arch -- check debug
2601 registers. */
2602 if (!check_stopped_by_watchpoint (lp))
2603 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2604 }
2605 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2606 {
2607 /* If we determine the LWP stopped for a SW breakpoint,
2608 trust it. Particularly don't check watchpoint
2609 registers, because at least on s390, we'd find
2610 stopped-by-watchpoint as long as there's a watchpoint
2611 set. */
faf09f01 2612 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
faf09f01 2613 }
e7ad2f14 2614 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2615 {
e7ad2f14
PA
2616 /* This can indicate either a hardware breakpoint or
2617 hardware watchpoint. Check debug registers. */
2618 if (!check_stopped_by_watchpoint (lp))
2619 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
faf09f01 2620 }
2bf6fb9d
PA
2621 else if (siginfo.si_code == TRAP_TRACE)
2622 {
2623 if (debug_linux_nat)
2624 fprintf_unfiltered (gdb_stdlog,
2625 "CSBB: %s stopped by trace\n",
2626 target_pid_to_str (lp->ptid));
e7ad2f14
PA
2627
2628 /* We may have single stepped an instruction that
2629 triggered a watchpoint. In that case, on some
2630 architectures (such as x86), instead of TRAP_HWBKPT,
2631 si_code indicates TRAP_TRACE, and we need to check
2632 the debug registers separately. */
2633 check_stopped_by_watchpoint (lp);
2bf6fb9d 2634 }
faf09f01
PA
2635 }
2636 }
2637#else
9c02b525
PA
2638 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2639 && software_breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2640 sw_bp_pc))
710151dd 2641 {
9c02b525
PA
2642 /* The LWP was either continued, or stepped a software
2643 breakpoint instruction. */
e7ad2f14
PA
2644 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2645 }
2646
2647 if (hardware_breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2648 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2649
2650 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2651 check_stopped_by_watchpoint (lp);
2652#endif
2653
2654 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2655 {
710151dd
PA
2656 if (debug_linux_nat)
2657 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2658 "CSBB: %s stopped by software breakpoint\n",
710151dd
PA
2659 target_pid_to_str (lp->ptid));
2660
2661 /* Back up the PC if necessary. */
9c02b525
PA
2662 if (pc != sw_bp_pc)
2663 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2664
e7ad2f14
PA
2665 /* Update this so we record the correct stop PC below. */
2666 pc = sw_bp_pc;
710151dd 2667 }
e7ad2f14 2668 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525
PA
2669 {
2670 if (debug_linux_nat)
2671 fprintf_unfiltered (gdb_stdlog,
e7ad2f14
PA
2672 "CSBB: %s stopped by hardware breakpoint\n",
2673 target_pid_to_str (lp->ptid));
2674 }
2675 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2676 {
2677 if (debug_linux_nat)
2678 fprintf_unfiltered (gdb_stdlog,
2679 "CSBB: %s stopped by hardware watchpoint\n",
9c02b525 2680 target_pid_to_str (lp->ptid));
9c02b525 2681 }
d6b0e80f 2682
e7ad2f14 2683 lp->stop_pc = pc;
d6b0e80f
AC
2684}
2685
faf09f01
PA
2686
2687/* Returns true if the LWP had stopped for a software breakpoint. */
2688
2689static int
2690linux_nat_stopped_by_sw_breakpoint (struct target_ops *ops)
2691{
2692 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2693
2694 gdb_assert (lp != NULL);
2695
2696 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2697}
2698
2699/* Implement the supports_stopped_by_sw_breakpoint method. */
2700
2701static int
2702linux_nat_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2703{
2704 return USE_SIGTRAP_SIGINFO;
2705}
2706
2707/* Returns true if the LWP had stopped for a hardware
2708 breakpoint/watchpoint. */
2709
2710static int
2711linux_nat_stopped_by_hw_breakpoint (struct target_ops *ops)
2712{
2713 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2714
2715 gdb_assert (lp != NULL);
2716
2717 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2718}
2719
2720/* Implement the supports_stopped_by_hw_breakpoint method. */
2721
2722static int
2723linux_nat_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2724{
2725 return USE_SIGTRAP_SIGINFO;
2726}
2727
d6b0e80f
AC
2728/* Select one LWP out of those that have events pending. */
2729
2730static void
d90e17a7 2731select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2732{
2733 int num_events = 0;
2734 int random_selector;
9c02b525 2735 struct lwp_info *event_lp = NULL;
d6b0e80f 2736
ac264b3b 2737 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2738 (*orig_lp)->status = *status;
2739
9c02b525
PA
2740 /* In all-stop, give preference to the LWP that is being
2741 single-stepped. There will be at most one, and it will be the
2742 LWP that the core is most interested in. If we didn't do this,
2743 then we'd have to handle pending step SIGTRAPs somehow in case
2744 the core later continues the previously-stepped thread, as
2745 otherwise we'd report the pending SIGTRAP then, and the core, not
2746 having stepped the thread, wouldn't understand what the trap was
2747 for, and therefore would report it to the user as a random
2748 signal. */
fbea99ea 2749 if (!target_is_non_stop_p ())
d6b0e80f 2750 {
9c02b525
PA
2751 event_lp = iterate_over_lwps (filter,
2752 select_singlestep_lwp_callback, NULL);
2753 if (event_lp != NULL)
2754 {
2755 if (debug_linux_nat)
2756 fprintf_unfiltered (gdb_stdlog,
2757 "SEL: Select single-step %s\n",
2758 target_pid_to_str (event_lp->ptid));
2759 }
d6b0e80f 2760 }
9c02b525
PA
2761
2762 if (event_lp == NULL)
d6b0e80f 2763 {
9c02b525 2764 /* Pick one at random, out of those which have had events. */
d6b0e80f 2765
9c02b525 2766 /* First see how many events we have. */
d90e17a7 2767 iterate_over_lwps (filter, count_events_callback, &num_events);
8bf3b159 2768 gdb_assert (num_events > 0);
d6b0e80f 2769
9c02b525
PA
2770 /* Now randomly pick a LWP out of those that have had
2771 events. */
d6b0e80f
AC
2772 random_selector = (int)
2773 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2774
2775 if (debug_linux_nat && num_events > 1)
2776 fprintf_unfiltered (gdb_stdlog,
9c02b525 2777 "SEL: Found %d events, selecting #%d\n",
d6b0e80f
AC
2778 num_events, random_selector);
2779
d90e17a7
PA
2780 event_lp = iterate_over_lwps (filter,
2781 select_event_lwp_callback,
d6b0e80f
AC
2782 &random_selector);
2783 }
2784
2785 if (event_lp != NULL)
2786 {
2787 /* Switch the event LWP. */
2788 *orig_lp = event_lp;
2789 *status = event_lp->status;
2790 }
2791
2792 /* Flush the wait status for the event LWP. */
2793 (*orig_lp)->status = 0;
2794}
2795
2796/* Return non-zero if LP has been resumed. */
2797
2798static int
2799resumed_callback (struct lwp_info *lp, void *data)
2800{
2801 return lp->resumed;
2802}
2803
02f3fc28 2804/* Check if we should go on and pass this event to common code.
9c02b525 2805 Return the affected lwp if we are, or NULL otherwise. */
12d9289a 2806
02f3fc28 2807static struct lwp_info *
9c02b525 2808linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2809{
2810 struct lwp_info *lp;
89a5711c 2811 int event = linux_ptrace_get_extended_event (status);
02f3fc28
PA
2812
2813 lp = find_lwp_pid (pid_to_ptid (lwpid));
2814
2815 /* Check for stop events reported by a process we didn't already
2816 know about - anything not already in our LWP list.
2817
2818 If we're expecting to receive stopped processes after
2819 fork, vfork, and clone events, then we'll just add the
2820 new one to our list and go back to waiting for the event
2821 to be reported - the stopped process might be returned
0e5bf2a8
PA
2822 from waitpid before or after the event is.
2823
2824 But note the case of a non-leader thread exec'ing after the
2825 leader having exited, and gone from our lists. The non-leader
2826 thread changes its tid to the tgid. */
2827
2828 if (WIFSTOPPED (status) && lp == NULL
89a5711c 2829 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
0e5bf2a8
PA
2830 {
2831 /* A multi-thread exec after we had seen the leader exiting. */
2832 if (debug_linux_nat)
2833 fprintf_unfiltered (gdb_stdlog,
2834 "LLW: Re-adding thread group leader LWP %d.\n",
2835 lwpid);
2836
dfd4cc63 2837 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
0e5bf2a8
PA
2838 lp->stopped = 1;
2839 lp->resumed = 1;
2840 add_thread (lp->ptid);
2841 }
2842
02f3fc28
PA
2843 if (WIFSTOPPED (status) && !lp)
2844 {
3b27ef47
PA
2845 if (debug_linux_nat)
2846 fprintf_unfiltered (gdb_stdlog,
2847 "LHEW: saving LWP %ld status %s in stopped_pids list\n",
2848 (long) lwpid, status_to_str (status));
84636d28 2849 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
2850 return NULL;
2851 }
2852
2853 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 2854 our list, i.e. not part of the current process. This can happen
fd62cb89 2855 if we detach from a program we originally forked and then it
02f3fc28
PA
2856 exits. */
2857 if (!WIFSTOPPED (status) && !lp)
2858 return NULL;
2859
8817a6f2
PA
2860 /* This LWP is stopped now. (And if dead, this prevents it from
2861 ever being continued.) */
2862 lp->stopped = 1;
2863
8784d563
PA
2864 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2865 {
2866 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
de0d863e 2867 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2868
de0d863e 2869 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
8784d563
PA
2870 lp->must_set_ptrace_flags = 0;
2871 }
2872
ca2163eb
PA
2873 /* Handle GNU/Linux's syscall SIGTRAPs. */
2874 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2875 {
2876 /* No longer need the sysgood bit. The ptrace event ends up
2877 recorded in lp->waitstatus if we care for it. We can carry
2878 on handling the event like a regular SIGTRAP from here
2879 on. */
2880 status = W_STOPCODE (SIGTRAP);
2881 if (linux_handle_syscall_trap (lp, 0))
2882 return NULL;
2883 }
bfd09d20
JS
2884 else
2885 {
2886 /* Almost all other ptrace-stops are known to be outside of system
2887 calls, with further exceptions in linux_handle_extended_wait. */
2888 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2889 }
02f3fc28 2890
ca2163eb 2891 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2892 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2893 && linux_is_extended_waitstatus (status))
02f3fc28
PA
2894 {
2895 if (debug_linux_nat)
2896 fprintf_unfiltered (gdb_stdlog,
2897 "LLW: Handling extended status 0x%06x\n",
2898 status);
4dd63d48 2899 if (linux_handle_extended_wait (lp, status))
02f3fc28
PA
2900 return NULL;
2901 }
2902
2903 /* Check if the thread has exited. */
9c02b525
PA
2904 if (WIFEXITED (status) || WIFSIGNALED (status))
2905 {
2906 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
02f3fc28 2907 {
9c02b525
PA
2908 if (debug_linux_nat)
2909 fprintf_unfiltered (gdb_stdlog,
2910 "LLW: %s exited.\n",
2911 target_pid_to_str (lp->ptid));
2912
4a6ed09b
PA
2913 /* If there is at least one more LWP, then the exit signal
2914 was not the end of the debugged application and should be
2915 ignored. */
2916 exit_lwp (lp);
2917 return NULL;
02f3fc28
PA
2918 }
2919
77598427
PA
2920 /* Note that even if the leader was ptrace-stopped, it can still
2921 exit, if e.g., some other thread brings down the whole
2922 process (calls `exit'). So don't assert that the lwp is
2923 resumed. */
02f3fc28
PA
2924 if (debug_linux_nat)
2925 fprintf_unfiltered (gdb_stdlog,
77598427
PA
2926 "Process %ld exited (resumed=%d)\n",
2927 ptid_get_lwp (lp->ptid), lp->resumed);
02f3fc28 2928
9c02b525
PA
2929 /* This was the last lwp in the process. Since events are
2930 serialized to GDB core, we may not be able report this one
2931 right now, but GDB core and the other target layers will want
2932 to be notified about the exit code/signal, leave the status
2933 pending for the next time we're able to report it. */
2934
2935 /* Dead LWP's aren't expected to reported a pending sigstop. */
2936 lp->signalled = 0;
2937
2938 /* Store the pending event in the waitstatus, because
2939 W_EXITCODE(0,0) == 0. */
2940 store_waitstatus (&lp->waitstatus, status);
2941 return lp;
02f3fc28
PA
2942 }
2943
02f3fc28
PA
2944 /* Make sure we don't report a SIGSTOP that we sent ourselves in
2945 an attempt to stop an LWP. */
2946 if (lp->signalled
2947 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
2948 {
02f3fc28
PA
2949 lp->signalled = 0;
2950
2bf6fb9d 2951 if (lp->last_resume_kind == resume_stop)
25289eb2 2952 {
2bf6fb9d
PA
2953 if (debug_linux_nat)
2954 fprintf_unfiltered (gdb_stdlog,
2955 "LLW: resume_stop SIGSTOP caught for %s.\n",
2956 target_pid_to_str (lp->ptid));
2957 }
2958 else
2959 {
2960 /* This is a delayed SIGSTOP. Filter out the event. */
02f3fc28 2961
25289eb2
PA
2962 if (debug_linux_nat)
2963 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2964 "LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
25289eb2
PA
2965 lp->step ?
2966 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2967 target_pid_to_str (lp->ptid));
02f3fc28 2968
2bf6fb9d 2969 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2 2970 gdb_assert (lp->resumed);
25289eb2
PA
2971 return NULL;
2972 }
02f3fc28
PA
2973 }
2974
57380f4e
DJ
2975 /* Make sure we don't report a SIGINT that we have already displayed
2976 for another thread. */
2977 if (lp->ignore_sigint
2978 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
2979 {
2980 if (debug_linux_nat)
2981 fprintf_unfiltered (gdb_stdlog,
2982 "LLW: Delayed SIGINT caught for %s.\n",
2983 target_pid_to_str (lp->ptid));
2984
2985 /* This is a delayed SIGINT. */
2986 lp->ignore_sigint = 0;
2987
8a99810d 2988 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
57380f4e
DJ
2989 if (debug_linux_nat)
2990 fprintf_unfiltered (gdb_stdlog,
2991 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
2992 lp->step ?
2993 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2994 target_pid_to_str (lp->ptid));
57380f4e
DJ
2995 gdb_assert (lp->resumed);
2996
2997 /* Discard the event. */
2998 return NULL;
2999 }
3000
9c02b525
PA
3001 /* Don't report signals that GDB isn't interested in, such as
3002 signals that are neither printed nor stopped upon. Stopping all
3003 threads can be a bit time-consuming so if we want decent
3004 performance with heavily multi-threaded programs, especially when
3005 they're using a high frequency timer, we'd better avoid it if we
3006 can. */
3007 if (WIFSTOPPED (status))
3008 {
3009 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3010
fbea99ea 3011 if (!target_is_non_stop_p ())
9c02b525
PA
3012 {
3013 /* Only do the below in all-stop, as we currently use SIGSTOP
3014 to implement target_stop (see linux_nat_stop) in
3015 non-stop. */
3016 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3017 {
3018 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3019 forwarded to the entire process group, that is, all LWPs
3020 will receive it - unless they're using CLONE_THREAD to
3021 share signals. Since we only want to report it once, we
3022 mark it as ignored for all LWPs except this one. */
3023 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3024 set_ignore_sigint, NULL);
3025 lp->ignore_sigint = 0;
3026 }
3027 else
3028 maybe_clear_ignore_sigint (lp);
3029 }
3030
3031 /* When using hardware single-step, we need to report every signal.
c9587f88
AT
3032 Otherwise, signals in pass_mask may be short-circuited
3033 except signals that might be caused by a breakpoint. */
9c02b525 3034 if (!lp->step
c9587f88
AT
3035 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3036 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3037 {
3038 linux_resume_one_lwp (lp, lp->step, signo);
3039 if (debug_linux_nat)
3040 fprintf_unfiltered (gdb_stdlog,
3041 "LLW: %s %s, %s (preempt 'handle')\n",
3042 lp->step ?
3043 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3044 target_pid_to_str (lp->ptid),
3045 (signo != GDB_SIGNAL_0
3046 ? strsignal (gdb_signal_to_host (signo))
3047 : "0"));
3048 return NULL;
3049 }
3050 }
3051
02f3fc28
PA
3052 /* An interesting event. */
3053 gdb_assert (lp);
ca2163eb 3054 lp->status = status;
e7ad2f14 3055 save_stop_reason (lp);
02f3fc28
PA
3056 return lp;
3057}
3058
0e5bf2a8
PA
3059/* Detect zombie thread group leaders, and "exit" them. We can't reap
3060 their exits until all other threads in the group have exited. */
3061
3062static void
3063check_zombie_leaders (void)
3064{
3065 struct inferior *inf;
3066
3067 ALL_INFERIORS (inf)
3068 {
3069 struct lwp_info *leader_lp;
3070
3071 if (inf->pid == 0)
3072 continue;
3073
3074 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3075 if (leader_lp != NULL
3076 /* Check if there are other threads in the group, as we may
3077 have raced with the inferior simply exiting. */
3078 && num_lwps (inf->pid) > 1
5f572dec 3079 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
3080 {
3081 if (debug_linux_nat)
3082 fprintf_unfiltered (gdb_stdlog,
3083 "CZL: Thread group leader %d zombie "
3084 "(it exited, or another thread execd).\n",
3085 inf->pid);
3086
3087 /* A leader zombie can mean one of two things:
3088
3089 - It exited, and there's an exit status pending
3090 available, or only the leader exited (not the whole
3091 program). In the latter case, we can't waitpid the
3092 leader's exit status until all other threads are gone.
3093
3094 - There are 3 or more threads in the group, and a thread
4a6ed09b
PA
3095 other than the leader exec'd. See comments on exec
3096 events at the top of the file. We could try
0e5bf2a8
PA
3097 distinguishing the exit and exec cases, by waiting once
3098 more, and seeing if something comes out, but it doesn't
3099 sound useful. The previous leader _does_ go away, and
3100 we'll re-add the new one once we see the exec event
3101 (which is just the same as what would happen if the
3102 previous leader did exit voluntarily before some other
3103 thread execs). */
3104
3105 if (debug_linux_nat)
3106 fprintf_unfiltered (gdb_stdlog,
3107 "CZL: Thread group leader %d vanished.\n",
3108 inf->pid);
3109 exit_lwp (leader_lp);
3110 }
3111 }
3112}
3113
d6b0e80f 3114static ptid_t
7feb7d06 3115linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3116 ptid_t ptid, struct target_waitstatus *ourstatus,
3117 int target_options)
d6b0e80f 3118{
fc9b8e47 3119 sigset_t prev_mask;
4b60df3d 3120 enum resume_kind last_resume_kind;
12d9289a 3121 struct lwp_info *lp;
12d9289a 3122 int status;
d6b0e80f 3123
01124a23 3124 if (debug_linux_nat)
b84876c2
PA
3125 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3126
f973ed9c
DJ
3127 /* The first time we get here after starting a new inferior, we may
3128 not have added it to the LWP list yet - this is the earliest
3129 moment at which we know its PID. */
d90e17a7 3130 if (ptid_is_pid (inferior_ptid))
f973ed9c 3131 {
27c9d204
PA
3132 /* Upgrade the main thread's ptid. */
3133 thread_change_ptid (inferior_ptid,
dfd4cc63
LM
3134 ptid_build (ptid_get_pid (inferior_ptid),
3135 ptid_get_pid (inferior_ptid), 0));
27c9d204 3136
26cb8b7c 3137 lp = add_initial_lwp (inferior_ptid);
f973ed9c
DJ
3138 lp->resumed = 1;
3139 }
3140
12696c10 3141 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3142 block_child_signals (&prev_mask);
d6b0e80f 3143
d6b0e80f 3144 /* First check if there is a LWP with a wait status pending. */
8a99810d
PA
3145 lp = iterate_over_lwps (ptid, status_callback, NULL);
3146 if (lp != NULL)
d6b0e80f
AC
3147 {
3148 if (debug_linux_nat)
d6b0e80f
AC
3149 fprintf_unfiltered (gdb_stdlog,
3150 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3151 status_to_str (lp->status),
d6b0e80f 3152 target_pid_to_str (lp->ptid));
d6b0e80f
AC
3153 }
3154
9c02b525
PA
3155 /* But if we don't find a pending event, we'll have to wait. Always
3156 pull all events out of the kernel. We'll randomly select an
3157 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3158
d90e17a7 3159 while (lp == NULL)
d6b0e80f
AC
3160 {
3161 pid_t lwpid;
3162
0e5bf2a8
PA
3163 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3164 quirks:
3165
3166 - If the thread group leader exits while other threads in the
3167 thread group still exist, waitpid(TGID, ...) hangs. That
3168 waitpid won't return an exit status until the other threads
3169 in the group are reapped.
3170
3171 - When a non-leader thread execs, that thread just vanishes
3172 without reporting an exit (so we'd hang if we waited for it
3173 explicitly in that case). The exec event is reported to
3174 the TGID pid. */
3175
3176 errno = 0;
4a6ed09b 3177 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
0e5bf2a8
PA
3178
3179 if (debug_linux_nat)
3180 fprintf_unfiltered (gdb_stdlog,
3181 "LNW: waitpid(-1, ...) returned %d, %s\n",
3182 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3183
d6b0e80f
AC
3184 if (lwpid > 0)
3185 {
d6b0e80f
AC
3186 if (debug_linux_nat)
3187 {
3188 fprintf_unfiltered (gdb_stdlog,
3189 "LLW: waitpid %ld received %s\n",
3190 (long) lwpid, status_to_str (status));
3191 }
3192
9c02b525 3193 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3194 /* Retry until nothing comes out of waitpid. A single
3195 SIGCHLD can indicate more than one child stopped. */
3196 continue;
d6b0e80f
AC
3197 }
3198
20ba1ce6
PA
3199 /* Now that we've pulled all events out of the kernel, resume
3200 LWPs that don't have an interesting event to report. */
3201 iterate_over_lwps (minus_one_ptid,
3202 resume_stopped_resumed_lwps, &minus_one_ptid);
3203
3204 /* ... and find an LWP with a status to report to the core, if
3205 any. */
9c02b525
PA
3206 lp = iterate_over_lwps (ptid, status_callback, NULL);
3207 if (lp != NULL)
3208 break;
3209
0e5bf2a8
PA
3210 /* Check for zombie thread group leaders. Those can't be reaped
3211 until all other threads in the thread group are. */
3212 check_zombie_leaders ();
d6b0e80f 3213
0e5bf2a8
PA
3214 /* If there are no resumed children left, bail. We'd be stuck
3215 forever in the sigsuspend call below otherwise. */
3216 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3217 {
3218 if (debug_linux_nat)
3219 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3220
0e5bf2a8 3221 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3222
0e5bf2a8
PA
3223 restore_child_signals_mask (&prev_mask);
3224 return minus_one_ptid;
d6b0e80f 3225 }
28736962 3226
0e5bf2a8
PA
3227 /* No interesting event to report to the core. */
3228
3229 if (target_options & TARGET_WNOHANG)
3230 {
01124a23 3231 if (debug_linux_nat)
28736962
PA
3232 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3233
0e5bf2a8 3234 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3235 restore_child_signals_mask (&prev_mask);
3236 return minus_one_ptid;
3237 }
d6b0e80f
AC
3238
3239 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3240 gdb_assert (lp == NULL);
0e5bf2a8
PA
3241
3242 /* Block until we get an event reported with SIGCHLD. */
d36bf488
DE
3243 if (debug_linux_nat)
3244 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
0e5bf2a8 3245 sigsuspend (&suspend_mask);
d6b0e80f
AC
3246 }
3247
d6b0e80f
AC
3248 gdb_assert (lp);
3249
ca2163eb
PA
3250 status = lp->status;
3251 lp->status = 0;
3252
fbea99ea 3253 if (!target_is_non_stop_p ())
4c28f408
PA
3254 {
3255 /* Now stop all other LWP's ... */
d90e17a7 3256 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3257
3258 /* ... and wait until all of them have reported back that
3259 they're no longer running. */
d90e17a7 3260 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
9c02b525
PA
3261 }
3262
3263 /* If we're not waiting for a specific LWP, choose an event LWP from
3264 among those that have had events. Giving equal priority to all
3265 LWPs that have had events helps prevent starvation. */
3266 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3267 select_event_lwp (ptid, &lp, &status);
3268
3269 gdb_assert (lp != NULL);
3270
3271 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3272 it was a software breakpoint, and we can't reliably support the
3273 "stopped by software breakpoint" stop reason. */
3274 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3275 && !USE_SIGTRAP_SIGINFO)
9c02b525
PA
3276 {
3277 struct regcache *regcache = get_thread_regcache (lp->ptid);
3278 struct gdbarch *gdbarch = get_regcache_arch (regcache);
527a273a 3279 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3280
9c02b525
PA
3281 if (decr_pc != 0)
3282 {
3283 CORE_ADDR pc;
d6b0e80f 3284
9c02b525
PA
3285 pc = regcache_read_pc (regcache);
3286 regcache_write_pc (regcache, pc + decr_pc);
3287 }
3288 }
e3e9f5a2 3289
9c02b525
PA
3290 /* We'll need this to determine whether to report a SIGSTOP as
3291 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3292 clears it. */
3293 last_resume_kind = lp->last_resume_kind;
4b60df3d 3294
fbea99ea 3295 if (!target_is_non_stop_p ())
9c02b525 3296 {
e3e9f5a2
PA
3297 /* In all-stop, from the core's perspective, all LWPs are now
3298 stopped until a new resume action is sent over. */
3299 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3300 }
3301 else
25289eb2 3302 {
4b60df3d 3303 resume_clear_callback (lp, NULL);
25289eb2 3304 }
d6b0e80f 3305
26ab7092 3306 if (linux_nat_status_is_event (status))
d6b0e80f 3307 {
d6b0e80f
AC
3308 if (debug_linux_nat)
3309 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3310 "LLW: trap ptid is %s.\n",
3311 target_pid_to_str (lp->ptid));
d6b0e80f 3312 }
d6b0e80f
AC
3313
3314 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3315 {
3316 *ourstatus = lp->waitstatus;
3317 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3318 }
3319 else
3320 store_waitstatus (ourstatus, status);
3321
01124a23 3322 if (debug_linux_nat)
b84876c2
PA
3323 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3324
7feb7d06 3325 restore_child_signals_mask (&prev_mask);
1e225492 3326
4b60df3d 3327 if (last_resume_kind == resume_stop
25289eb2
PA
3328 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3329 && WSTOPSIG (status) == SIGSTOP)
3330 {
3331 /* A thread that has been requested to stop by GDB with
3332 target_stop, and it stopped cleanly, so report as SIG0. The
3333 use of SIGSTOP is an implementation detail. */
a493e3e2 3334 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3335 }
3336
1e225492
JK
3337 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3338 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3339 lp->core = -1;
3340 else
2e794194 3341 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3342
f973ed9c 3343 return lp->ptid;
d6b0e80f
AC
3344}
3345
e3e9f5a2
PA
3346/* Resume LWPs that are currently stopped without any pending status
3347 to report, but are resumed from the core's perspective. */
3348
3349static int
3350resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3351{
9a3c8263 3352 ptid_t *wait_ptid_p = (ptid_t *) data;
e3e9f5a2 3353
4dd63d48
PA
3354 if (!lp->stopped)
3355 {
3356 if (debug_linux_nat)
3357 fprintf_unfiltered (gdb_stdlog,
3358 "RSRL: NOT resuming LWP %s, not stopped\n",
3359 target_pid_to_str (lp->ptid));
3360 }
3361 else if (!lp->resumed)
3362 {
3363 if (debug_linux_nat)
3364 fprintf_unfiltered (gdb_stdlog,
3365 "RSRL: NOT resuming LWP %s, not resumed\n",
3366 target_pid_to_str (lp->ptid));
3367 }
3368 else if (lwp_status_pending_p (lp))
3369 {
3370 if (debug_linux_nat)
3371 fprintf_unfiltered (gdb_stdlog,
3372 "RSRL: NOT resuming LWP %s, has pending status\n",
3373 target_pid_to_str (lp->ptid));
3374 }
3375 else
e3e9f5a2 3376 {
336060f3
PA
3377 struct regcache *regcache = get_thread_regcache (lp->ptid);
3378 struct gdbarch *gdbarch = get_regcache_arch (regcache);
336060f3 3379
23f238d3 3380 TRY
e3e9f5a2 3381 {
23f238d3
PA
3382 CORE_ADDR pc = regcache_read_pc (regcache);
3383 int leave_stopped = 0;
e3e9f5a2 3384
23f238d3
PA
3385 /* Don't bother if there's a breakpoint at PC that we'd hit
3386 immediately, and we're not waiting for this LWP. */
3387 if (!ptid_match (lp->ptid, *wait_ptid_p))
3388 {
3389 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3390 leave_stopped = 1;
3391 }
e3e9f5a2 3392
23f238d3
PA
3393 if (!leave_stopped)
3394 {
3395 if (debug_linux_nat)
3396 fprintf_unfiltered (gdb_stdlog,
3397 "RSRL: resuming stopped-resumed LWP %s at "
3398 "%s: step=%d\n",
3399 target_pid_to_str (lp->ptid),
3400 paddress (gdbarch, pc),
3401 lp->step);
3402
3403 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3404 }
3405 }
3406 CATCH (ex, RETURN_MASK_ERROR)
3407 {
3408 if (!check_ptrace_stopped_lwp_gone (lp))
3409 throw_exception (ex);
3410 }
3411 END_CATCH
e3e9f5a2
PA
3412 }
3413
3414 return 0;
3415}
3416
7feb7d06
PA
3417static ptid_t
3418linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3419 ptid_t ptid, struct target_waitstatus *ourstatus,
3420 int target_options)
7feb7d06
PA
3421{
3422 ptid_t event_ptid;
3423
3424 if (debug_linux_nat)
09826ec5
PA
3425 {
3426 char *options_string;
3427
3428 options_string = target_options_to_string (target_options);
3429 fprintf_unfiltered (gdb_stdlog,
3430 "linux_nat_wait: [%s], [%s]\n",
3431 target_pid_to_str (ptid),
3432 options_string);
3433 xfree (options_string);
3434 }
7feb7d06
PA
3435
3436 /* Flush the async file first. */
d9d41e78 3437 if (target_is_async_p ())
7feb7d06
PA
3438 async_file_flush ();
3439
e3e9f5a2
PA
3440 /* Resume LWPs that are currently stopped without any pending status
3441 to report, but are resumed from the core's perspective. LWPs get
3442 in this state if we find them stopping at a time we're not
3443 interested in reporting the event (target_wait on a
3444 specific_process, for example, see linux_nat_wait_1), and
3445 meanwhile the event became uninteresting. Don't bother resuming
3446 LWPs we're not going to wait for if they'd stop immediately. */
fbea99ea 3447 if (target_is_non_stop_p ())
e3e9f5a2
PA
3448 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3449
47608cb1 3450 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3451
3452 /* If we requested any event, and something came out, assume there
3453 may be more. If we requested a specific lwp or process, also
3454 assume there may be more. */
d9d41e78 3455 if (target_is_async_p ()
6953d224
PA
3456 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3457 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3458 || !ptid_equal (ptid, minus_one_ptid)))
3459 async_file_mark ();
3460
7feb7d06
PA
3461 return event_ptid;
3462}
3463
1d2736d4
PA
3464/* Kill one LWP. */
3465
3466static void
3467kill_one_lwp (pid_t pid)
d6b0e80f 3468{
ed731959
JK
3469 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3470
3471 errno = 0;
1d2736d4 3472 kill_lwp (pid, SIGKILL);
ed731959 3473 if (debug_linux_nat)
57745c90
PA
3474 {
3475 int save_errno = errno;
3476
3477 fprintf_unfiltered (gdb_stdlog,
1d2736d4 3478 "KC: kill (SIGKILL) %ld, 0, 0 (%s)\n", (long) pid,
57745c90
PA
3479 save_errno ? safe_strerror (save_errno) : "OK");
3480 }
ed731959
JK
3481
3482 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3483
d6b0e80f 3484 errno = 0;
1d2736d4 3485 ptrace (PTRACE_KILL, pid, 0, 0);
d6b0e80f 3486 if (debug_linux_nat)
57745c90
PA
3487 {
3488 int save_errno = errno;
3489
3490 fprintf_unfiltered (gdb_stdlog,
1d2736d4 3491 "KC: PTRACE_KILL %ld, 0, 0 (%s)\n", (long) pid,
57745c90
PA
3492 save_errno ? safe_strerror (save_errno) : "OK");
3493 }
d6b0e80f
AC
3494}
3495
1d2736d4
PA
3496/* Wait for an LWP to die. */
3497
3498static void
3499kill_wait_one_lwp (pid_t pid)
d6b0e80f 3500{
1d2736d4 3501 pid_t res;
d6b0e80f
AC
3502
3503 /* We must make sure that there are no pending events (delayed
3504 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3505 program doesn't interfere with any following debugging session. */
3506
d6b0e80f
AC
3507 do
3508 {
1d2736d4
PA
3509 res = my_waitpid (pid, NULL, __WALL);
3510 if (res != (pid_t) -1)
d6b0e80f 3511 {
e85a822c
DJ
3512 if (debug_linux_nat)
3513 fprintf_unfiltered (gdb_stdlog,
1d2736d4
PA
3514 "KWC: wait %ld received unknown.\n",
3515 (long) pid);
4a6ed09b
PA
3516 /* The Linux kernel sometimes fails to kill a thread
3517 completely after PTRACE_KILL; that goes from the stop
3518 point in do_fork out to the one in get_signal_to_deliver
3519 and waits again. So kill it again. */
1d2736d4 3520 kill_one_lwp (pid);
d6b0e80f
AC
3521 }
3522 }
1d2736d4
PA
3523 while (res == pid);
3524
3525 gdb_assert (res == -1 && errno == ECHILD);
3526}
3527
3528/* Callback for iterate_over_lwps. */
d6b0e80f 3529
1d2736d4
PA
3530static int
3531kill_callback (struct lwp_info *lp, void *data)
3532{
3533 kill_one_lwp (ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3534 return 0;
3535}
3536
1d2736d4
PA
3537/* Callback for iterate_over_lwps. */
3538
3539static int
3540kill_wait_callback (struct lwp_info *lp, void *data)
3541{
3542 kill_wait_one_lwp (ptid_get_lwp (lp->ptid));
3543 return 0;
3544}
3545
3546/* Kill the fork children of any threads of inferior INF that are
3547 stopped at a fork event. */
3548
3549static void
3550kill_unfollowed_fork_children (struct inferior *inf)
3551{
3552 struct thread_info *thread;
3553
3554 ALL_NON_EXITED_THREADS (thread)
3555 if (thread->inf == inf)
3556 {
3557 struct target_waitstatus *ws = &thread->pending_follow;
3558
3559 if (ws->kind == TARGET_WAITKIND_FORKED
3560 || ws->kind == TARGET_WAITKIND_VFORKED)
3561 {
3562 ptid_t child_ptid = ws->value.related_pid;
3563 int child_pid = ptid_get_pid (child_ptid);
3564 int child_lwp = ptid_get_lwp (child_ptid);
3565 int status;
3566
3567 kill_one_lwp (child_lwp);
3568 kill_wait_one_lwp (child_lwp);
3569
3570 /* Let the arch-specific native code know this process is
3571 gone. */
3572 linux_nat_forget_process (child_pid);
3573 }
3574 }
3575}
3576
d6b0e80f 3577static void
7d85a9c0 3578linux_nat_kill (struct target_ops *ops)
d6b0e80f 3579{
f973ed9c 3580 struct target_waitstatus last;
d6b0e80f 3581
f973ed9c
DJ
3582 /* If we're stopped while forking and we haven't followed yet,
3583 kill the other task. We need to do this first because the
3584 parent will be sleeping if this is a vfork. */
1d2736d4 3585 kill_unfollowed_fork_children (current_inferior ());
f973ed9c
DJ
3586
3587 if (forks_exist_p ())
7feb7d06 3588 linux_fork_killall ();
f973ed9c
DJ
3589 else
3590 {
d90e17a7 3591 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3592
4c28f408
PA
3593 /* Stop all threads before killing them, since ptrace requires
3594 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3595 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3596 /* ... and wait until all of them have reported back that
3597 they're no longer running. */
d90e17a7 3598 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3599
f973ed9c 3600 /* Kill all LWP's ... */
d90e17a7 3601 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3602
3603 /* ... and wait until we've flushed all events. */
d90e17a7 3604 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3605 }
3606
3607 target_mourn_inferior ();
d6b0e80f
AC
3608}
3609
3610static void
136d6dae 3611linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3612{
26cb8b7c
PA
3613 int pid = ptid_get_pid (inferior_ptid);
3614
3615 purge_lwp_list (pid);
d6b0e80f 3616
f973ed9c 3617 if (! forks_exist_p ())
d90e17a7
PA
3618 /* Normal case, no other forks available. */
3619 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3620 else
3621 /* Multi-fork case. The current inferior_ptid has exited, but
3622 there are other viable forks to debug. Delete the exiting
3623 one and context-switch to the first available. */
3624 linux_fork_mourn_inferior ();
26cb8b7c
PA
3625
3626 /* Let the arch-specific native code know this process is gone. */
3627 linux_nat_forget_process (pid);
d6b0e80f
AC
3628}
3629
5b009018
PA
3630/* Convert a native/host siginfo object, into/from the siginfo in the
3631 layout of the inferiors' architecture. */
3632
3633static void
a5362b9a 3634siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
3635{
3636 int done = 0;
3637
3638 if (linux_nat_siginfo_fixup != NULL)
3639 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3640
3641 /* If there was no callback, or the callback didn't do anything,
3642 then just do a straight memcpy. */
3643 if (!done)
3644 {
3645 if (direction == 1)
a5362b9a 3646 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3647 else
a5362b9a 3648 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3649 }
3650}
3651
9b409511 3652static enum target_xfer_status
4aa995e1
PA
3653linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3654 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3655 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3656 ULONGEST *xfered_len)
4aa995e1 3657{
4aa995e1 3658 int pid;
a5362b9a
TS
3659 siginfo_t siginfo;
3660 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3661
3662 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3663 gdb_assert (readbuf || writebuf);
3664
dfd4cc63 3665 pid = ptid_get_lwp (inferior_ptid);
4aa995e1 3666 if (pid == 0)
dfd4cc63 3667 pid = ptid_get_pid (inferior_ptid);
4aa995e1
PA
3668
3669 if (offset > sizeof (siginfo))
2ed4b548 3670 return TARGET_XFER_E_IO;
4aa995e1
PA
3671
3672 errno = 0;
3673 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3674 if (errno != 0)
2ed4b548 3675 return TARGET_XFER_E_IO;
4aa995e1 3676
5b009018
PA
3677 /* When GDB is built as a 64-bit application, ptrace writes into
3678 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3679 inferior with a 64-bit GDB should look the same as debugging it
3680 with a 32-bit GDB, we need to convert it. GDB core always sees
3681 the converted layout, so any read/write will have to be done
3682 post-conversion. */
3683 siginfo_fixup (&siginfo, inf_siginfo, 0);
3684
4aa995e1
PA
3685 if (offset + len > sizeof (siginfo))
3686 len = sizeof (siginfo) - offset;
3687
3688 if (readbuf != NULL)
5b009018 3689 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3690 else
3691 {
5b009018
PA
3692 memcpy (inf_siginfo + offset, writebuf, len);
3693
3694 /* Convert back to ptrace layout before flushing it out. */
3695 siginfo_fixup (&siginfo, inf_siginfo, 1);
3696
4aa995e1
PA
3697 errno = 0;
3698 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3699 if (errno != 0)
2ed4b548 3700 return TARGET_XFER_E_IO;
4aa995e1
PA
3701 }
3702
9b409511
YQ
3703 *xfered_len = len;
3704 return TARGET_XFER_OK;
4aa995e1
PA
3705}
3706
9b409511 3707static enum target_xfer_status
10d6c8cd
DJ
3708linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3709 const char *annex, gdb_byte *readbuf,
3710 const gdb_byte *writebuf,
9b409511 3711 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3712{
4aa995e1 3713 struct cleanup *old_chain;
9b409511 3714 enum target_xfer_status xfer;
d6b0e80f 3715
4aa995e1
PA
3716 if (object == TARGET_OBJECT_SIGNAL_INFO)
3717 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
9b409511 3718 offset, len, xfered_len);
4aa995e1 3719
c35b1492
PA
3720 /* The target is connected but no live inferior is selected. Pass
3721 this request down to a lower stratum (e.g., the executable
3722 file). */
3723 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
9b409511 3724 return TARGET_XFER_EOF;
c35b1492 3725
4aa995e1
PA
3726 old_chain = save_inferior_ptid ();
3727
dfd4cc63
LM
3728 if (ptid_lwp_p (inferior_ptid))
3729 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
d6b0e80f 3730
10d6c8cd 3731 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 3732 offset, len, xfered_len);
d6b0e80f
AC
3733
3734 do_cleanups (old_chain);
3735 return xfer;
3736}
3737
28439f5e
PA
3738static int
3739linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3740{
4a6ed09b
PA
3741 /* As long as a PTID is in lwp list, consider it alive. */
3742 return find_lwp_pid (ptid) != NULL;
28439f5e
PA
3743}
3744
8a06aea7
PA
3745/* Implement the to_update_thread_list target method for this
3746 target. */
3747
3748static void
3749linux_nat_update_thread_list (struct target_ops *ops)
3750{
a6904d5a
PA
3751 struct lwp_info *lwp;
3752
4a6ed09b
PA
3753 /* We add/delete threads from the list as clone/exit events are
3754 processed, so just try deleting exited threads still in the
3755 thread list. */
3756 delete_exited_threads ();
a6904d5a
PA
3757
3758 /* Update the processor core that each lwp/thread was last seen
3759 running on. */
3760 ALL_LWPS (lwp)
3761 lwp->core = linux_common_core_of_thread (lwp->ptid);
8a06aea7
PA
3762}
3763
d6b0e80f 3764static char *
117de6a9 3765linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
3766{
3767 static char buf[64];
3768
dfd4cc63
LM
3769 if (ptid_lwp_p (ptid)
3770 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3771 || num_lwps (ptid_get_pid (ptid)) > 1))
d6b0e80f 3772 {
dfd4cc63 3773 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
d6b0e80f
AC
3774 return buf;
3775 }
3776
3777 return normal_pid_to_str (ptid);
3778}
3779
73ede765 3780static const char *
503a628d 3781linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
4694da01 3782{
79efa585 3783 return linux_proc_tid_get_name (thr->ptid);
4694da01
TT
3784}
3785
dba24537
AC
3786/* Accepts an integer PID; Returns a string representing a file that
3787 can be opened to get the symbols for the child process. */
3788
6d8fd2b7 3789static char *
8dd27370 3790linux_child_pid_to_exec_file (struct target_ops *self, int pid)
dba24537 3791{
e0d86d2c 3792 return linux_proc_pid_to_exec_file (pid);
dba24537
AC
3793}
3794
10d6c8cd
DJ
3795/* Implement the to_xfer_partial interface for memory reads using the /proc
3796 filesystem. Because we can use a single read() call for /proc, this
3797 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3798 but it doesn't support writes. */
3799
9b409511 3800static enum target_xfer_status
10d6c8cd
DJ
3801linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3802 const char *annex, gdb_byte *readbuf,
3803 const gdb_byte *writebuf,
9b409511 3804 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
dba24537 3805{
10d6c8cd
DJ
3806 LONGEST ret;
3807 int fd;
dba24537
AC
3808 char filename[64];
3809
10d6c8cd 3810 if (object != TARGET_OBJECT_MEMORY || !readbuf)
f486487f 3811 return TARGET_XFER_EOF;
dba24537
AC
3812
3813 /* Don't bother for one word. */
3814 if (len < 3 * sizeof (long))
9b409511 3815 return TARGET_XFER_EOF;
dba24537
AC
3816
3817 /* We could keep this file open and cache it - possibly one per
3818 thread. That requires some juggling, but is even faster. */
cde33bf1
YQ
3819 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
3820 ptid_get_pid (inferior_ptid));
614c279d 3821 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
dba24537 3822 if (fd == -1)
9b409511 3823 return TARGET_XFER_EOF;
dba24537
AC
3824
3825 /* If pread64 is available, use it. It's faster if the kernel
3826 supports it (only one syscall), and it's 64-bit safe even on
3827 32-bit platforms (for instance, SPARC debugging a SPARC64
3828 application). */
3829#ifdef HAVE_PREAD64
10d6c8cd 3830 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3831#else
10d6c8cd 3832 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3833#endif
3834 ret = 0;
3835 else
3836 ret = len;
3837
3838 close (fd);
9b409511
YQ
3839
3840 if (ret == 0)
3841 return TARGET_XFER_EOF;
3842 else
3843 {
3844 *xfered_len = ret;
3845 return TARGET_XFER_OK;
3846 }
dba24537
AC
3847}
3848
efcbbd14
UW
3849
3850/* Enumerate spufs IDs for process PID. */
3851static LONGEST
b55e14c7 3852spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
efcbbd14 3853{
f5656ead 3854 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
efcbbd14
UW
3855 LONGEST pos = 0;
3856 LONGEST written = 0;
3857 char path[128];
3858 DIR *dir;
3859 struct dirent *entry;
3860
3861 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
3862 dir = opendir (path);
3863 if (!dir)
3864 return -1;
3865
3866 rewinddir (dir);
3867 while ((entry = readdir (dir)) != NULL)
3868 {
3869 struct stat st;
3870 struct statfs stfs;
3871 int fd;
3872
3873 fd = atoi (entry->d_name);
3874 if (!fd)
3875 continue;
3876
3877 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
3878 if (stat (path, &st) != 0)
3879 continue;
3880 if (!S_ISDIR (st.st_mode))
3881 continue;
3882
3883 if (statfs (path, &stfs) != 0)
3884 continue;
3885 if (stfs.f_type != SPUFS_MAGIC)
3886 continue;
3887
3888 if (pos >= offset && pos + 4 <= offset + len)
3889 {
3890 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
3891 written += 4;
3892 }
3893 pos += 4;
3894 }
3895
3896 closedir (dir);
3897 return written;
3898}
3899
3900/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
3901 object type, using the /proc file system. */
9b409511
YQ
3902
3903static enum target_xfer_status
efcbbd14
UW
3904linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
3905 const char *annex, gdb_byte *readbuf,
3906 const gdb_byte *writebuf,
9b409511 3907 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
efcbbd14
UW
3908{
3909 char buf[128];
3910 int fd = 0;
3911 int ret = -1;
dfd4cc63 3912 int pid = ptid_get_pid (inferior_ptid);
efcbbd14
UW
3913
3914 if (!annex)
3915 {
3916 if (!readbuf)
2ed4b548 3917 return TARGET_XFER_E_IO;
efcbbd14 3918 else
9b409511
YQ
3919 {
3920 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
3921
3922 if (l < 0)
3923 return TARGET_XFER_E_IO;
3924 else if (l == 0)
3925 return TARGET_XFER_EOF;
3926 else
3927 {
3928 *xfered_len = (ULONGEST) l;
3929 return TARGET_XFER_OK;
3930 }
3931 }
efcbbd14
UW
3932 }
3933
3934 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
614c279d 3935 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
efcbbd14 3936 if (fd <= 0)
2ed4b548 3937 return TARGET_XFER_E_IO;
efcbbd14
UW
3938
3939 if (offset != 0
3940 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
3941 {
3942 close (fd);
9b409511 3943 return TARGET_XFER_EOF;
efcbbd14
UW
3944 }
3945
3946 if (writebuf)
3947 ret = write (fd, writebuf, (size_t) len);
3948 else if (readbuf)
3949 ret = read (fd, readbuf, (size_t) len);
3950
3951 close (fd);
9b409511
YQ
3952
3953 if (ret < 0)
3954 return TARGET_XFER_E_IO;
3955 else if (ret == 0)
3956 return TARGET_XFER_EOF;
3957 else
3958 {
3959 *xfered_len = (ULONGEST) ret;
3960 return TARGET_XFER_OK;
3961 }
efcbbd14
UW
3962}
3963
3964
dba24537
AC
3965/* Parse LINE as a signal set and add its set bits to SIGS. */
3966
3967static void
3968add_line_to_sigset (const char *line, sigset_t *sigs)
3969{
3970 int len = strlen (line) - 1;
3971 const char *p;
3972 int signum;
3973
3974 if (line[len] != '\n')
8a3fe4f8 3975 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3976
3977 p = line;
3978 signum = len * 4;
3979 while (len-- > 0)
3980 {
3981 int digit;
3982
3983 if (*p >= '0' && *p <= '9')
3984 digit = *p - '0';
3985 else if (*p >= 'a' && *p <= 'f')
3986 digit = *p - 'a' + 10;
3987 else
8a3fe4f8 3988 error (_("Could not parse signal set: %s"), line);
dba24537
AC
3989
3990 signum -= 4;
3991
3992 if (digit & 1)
3993 sigaddset (sigs, signum + 1);
3994 if (digit & 2)
3995 sigaddset (sigs, signum + 2);
3996 if (digit & 4)
3997 sigaddset (sigs, signum + 3);
3998 if (digit & 8)
3999 sigaddset (sigs, signum + 4);
4000
4001 p++;
4002 }
4003}
4004
4005/* Find process PID's pending signals from /proc/pid/status and set
4006 SIGS to match. */
4007
4008void
3e43a32a
MS
4009linux_proc_pending_signals (int pid, sigset_t *pending,
4010 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4011{
4012 FILE *procfile;
d8d2a3ee 4013 char buffer[PATH_MAX], fname[PATH_MAX];
7c8a8b04 4014 struct cleanup *cleanup;
dba24537
AC
4015
4016 sigemptyset (pending);
4017 sigemptyset (blocked);
4018 sigemptyset (ignored);
cde33bf1 4019 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
614c279d 4020 procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4021 if (procfile == NULL)
8a3fe4f8 4022 error (_("Could not open %s"), fname);
7c8a8b04 4023 cleanup = make_cleanup_fclose (procfile);
dba24537 4024
d8d2a3ee 4025 while (fgets (buffer, PATH_MAX, procfile) != NULL)
dba24537
AC
4026 {
4027 /* Normal queued signals are on the SigPnd line in the status
4028 file. However, 2.6 kernels also have a "shared" pending
4029 queue for delivering signals to a thread group, so check for
4030 a ShdPnd line also.
4031
4032 Unfortunately some Red Hat kernels include the shared pending
4033 queue but not the ShdPnd status field. */
4034
61012eef 4035 if (startswith (buffer, "SigPnd:\t"))
dba24537 4036 add_line_to_sigset (buffer + 8, pending);
61012eef 4037 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4038 add_line_to_sigset (buffer + 8, pending);
61012eef 4039 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4040 add_line_to_sigset (buffer + 8, blocked);
61012eef 4041 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4042 add_line_to_sigset (buffer + 8, ignored);
4043 }
4044
7c8a8b04 4045 do_cleanups (cleanup);
dba24537
AC
4046}
4047
9b409511 4048static enum target_xfer_status
07e059b5 4049linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e 4050 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4051 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4052 ULONGEST *xfered_len)
07e059b5 4053{
07e059b5
VP
4054 gdb_assert (object == TARGET_OBJECT_OSDATA);
4055
9b409511
YQ
4056 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4057 if (*xfered_len == 0)
4058 return TARGET_XFER_EOF;
4059 else
4060 return TARGET_XFER_OK;
07e059b5
VP
4061}
4062
9b409511 4063static enum target_xfer_status
10d6c8cd
DJ
4064linux_xfer_partial (struct target_ops *ops, enum target_object object,
4065 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4066 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4067 ULONGEST *xfered_len)
10d6c8cd 4068{
9b409511 4069 enum target_xfer_status xfer;
10d6c8cd
DJ
4070
4071 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4072 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
9b409511 4073 offset, len, xfered_len);
10d6c8cd 4074
07e059b5
VP
4075 if (object == TARGET_OBJECT_OSDATA)
4076 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
9b409511 4077 offset, len, xfered_len);
07e059b5 4078
efcbbd14
UW
4079 if (object == TARGET_OBJECT_SPU)
4080 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
9b409511 4081 offset, len, xfered_len);
efcbbd14 4082
8f313923
JK
4083 /* GDB calculates all the addresses in possibly larget width of the address.
4084 Address width needs to be masked before its final use - either by
4085 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4086
4087 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4088
4089 if (object == TARGET_OBJECT_MEMORY)
4090 {
f5656ead 4091 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
8f313923
JK
4092
4093 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4094 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4095 }
4096
10d6c8cd 4097 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511
YQ
4098 offset, len, xfered_len);
4099 if (xfer != TARGET_XFER_EOF)
10d6c8cd
DJ
4100 return xfer;
4101
4102 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 4103 offset, len, xfered_len);
10d6c8cd
DJ
4104}
4105
5808517f
YQ
4106static void
4107cleanup_target_stop (void *arg)
4108{
4109 ptid_t *ptid = (ptid_t *) arg;
4110
4111 gdb_assert (arg != NULL);
4112
4113 /* Unpause all */
a493e3e2 4114 target_resume (*ptid, 0, GDB_SIGNAL_0);
5808517f
YQ
4115}
4116
4117static VEC(static_tracepoint_marker_p) *
c686c57f
TT
4118linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4119 const char *strid)
5808517f
YQ
4120{
4121 char s[IPA_CMD_BUF_SIZE];
4122 struct cleanup *old_chain;
4123 int pid = ptid_get_pid (inferior_ptid);
4124 VEC(static_tracepoint_marker_p) *markers = NULL;
4125 struct static_tracepoint_marker *marker = NULL;
4126 char *p = s;
4127 ptid_t ptid = ptid_build (pid, 0, 0);
4128
4129 /* Pause all */
4130 target_stop (ptid);
4131
4132 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4133 s[sizeof ("qTfSTM")] = 0;
4134
42476b70 4135 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4136
4137 old_chain = make_cleanup (free_current_marker, &marker);
4138 make_cleanup (cleanup_target_stop, &ptid);
4139
4140 while (*p++ == 'm')
4141 {
4142 if (marker == NULL)
4143 marker = XCNEW (struct static_tracepoint_marker);
4144
4145 do
4146 {
4147 parse_static_tracepoint_marker_definition (p, &p, marker);
4148
4149 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4150 {
4151 VEC_safe_push (static_tracepoint_marker_p,
4152 markers, marker);
4153 marker = NULL;
4154 }
4155 else
4156 {
4157 release_static_tracepoint_marker (marker);
4158 memset (marker, 0, sizeof (*marker));
4159 }
4160 }
4161 while (*p++ == ','); /* comma-separated list */
4162
4163 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4164 s[sizeof ("qTsSTM")] = 0;
42476b70 4165 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4166 p = s;
4167 }
4168
4169 do_cleanups (old_chain);
4170
4171 return markers;
4172}
4173
e9efe249 4174/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4175 it with local methods. */
4176
910122bf
UW
4177static void
4178linux_target_install_ops (struct target_ops *t)
10d6c8cd 4179{
6d8fd2b7 4180 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4181 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4182 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4183 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4184 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4185 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4186 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4187 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4188 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4189 t->to_post_attach = linux_child_post_attach;
4190 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4191
4192 super_xfer_partial = t->to_xfer_partial;
4193 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4194
4195 t->to_static_tracepoint_markers_by_strid
4196 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4197}
4198
4199struct target_ops *
4200linux_target (void)
4201{
4202 struct target_ops *t;
4203
4204 t = inf_ptrace_target ();
4205 linux_target_install_ops (t);
4206
4207 return t;
4208}
4209
4210struct target_ops *
7714d83a 4211linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4212{
4213 struct target_ops *t;
4214
4215 t = inf_ptrace_trad_target (register_u_offset);
4216 linux_target_install_ops (t);
10d6c8cd 4217
10d6c8cd
DJ
4218 return t;
4219}
4220
b84876c2
PA
4221/* target_is_async_p implementation. */
4222
4223static int
6a109b6b 4224linux_nat_is_async_p (struct target_ops *ops)
b84876c2 4225{
198297aa 4226 return linux_is_async_p ();
b84876c2
PA
4227}
4228
4229/* target_can_async_p implementation. */
4230
4231static int
6a109b6b 4232linux_nat_can_async_p (struct target_ops *ops)
b84876c2
PA
4233{
4234 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4235 it explicitly with the "set target-async" command.
b84876c2 4236 Someday, linux will always be async. */
3dd5b83d 4237 return target_async_permitted;
b84876c2
PA
4238}
4239
9908b566 4240static int
2a9a2795 4241linux_nat_supports_non_stop (struct target_ops *self)
9908b566
VP
4242{
4243 return 1;
4244}
4245
fbea99ea
PA
4246/* to_always_non_stop_p implementation. */
4247
4248static int
4249linux_nat_always_non_stop_p (struct target_ops *self)
4250{
f12899e9 4251 return 1;
fbea99ea
PA
4252}
4253
d90e17a7
PA
4254/* True if we want to support multi-process. To be removed when GDB
4255 supports multi-exec. */
4256
2277426b 4257int linux_multi_process = 1;
d90e17a7
PA
4258
4259static int
86ce2668 4260linux_nat_supports_multi_process (struct target_ops *self)
d90e17a7
PA
4261{
4262 return linux_multi_process;
4263}
4264
03583c20 4265static int
2bfc0540 4266linux_nat_supports_disable_randomization (struct target_ops *self)
03583c20
UW
4267{
4268#ifdef HAVE_PERSONALITY
4269 return 1;
4270#else
4271 return 0;
4272#endif
4273}
4274
b84876c2
PA
4275static int async_terminal_is_ours = 1;
4276
4d4ca2a1
DE
4277/* target_terminal_inferior implementation.
4278
4279 This is a wrapper around child_terminal_inferior to add async support. */
b84876c2
PA
4280
4281static void
d2f640d4 4282linux_nat_terminal_inferior (struct target_ops *self)
b84876c2 4283{
d6b64346 4284 child_terminal_inferior (self);
b84876c2 4285
d9d2d8b6 4286 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4287 if (!async_terminal_is_ours)
4288 return;
4289
4290 delete_file_handler (input_fd);
4291 async_terminal_is_ours = 0;
4292 set_sigint_trap ();
4293}
4294
4d4ca2a1
DE
4295/* target_terminal_ours implementation.
4296
4297 This is a wrapper around child_terminal_ours to add async support (and
4298 implement the target_terminal_ours vs target_terminal_ours_for_output
4299 distinction). child_terminal_ours is currently no different than
4300 child_terminal_ours_for_output.
4301 We leave target_terminal_ours_for_output alone, leaving it to
4302 child_terminal_ours_for_output. */
b84876c2 4303
2c0b251b 4304static void
e3594fd1 4305linux_nat_terminal_ours (struct target_ops *self)
b84876c2 4306{
b84876c2
PA
4307 /* GDB should never give the terminal to the inferior if the
4308 inferior is running in the background (run&, continue&, etc.),
4309 but claiming it sure should. */
d6b64346 4310 child_terminal_ours (self);
b84876c2 4311
b84876c2
PA
4312 if (async_terminal_is_ours)
4313 return;
4314
4315 clear_sigint_trap ();
4316 add_file_handler (input_fd, stdin_event_handler, 0);
4317 async_terminal_is_ours = 1;
4318}
4319
7feb7d06
PA
4320/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4321 so we notice when any child changes state, and notify the
4322 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4323 above to wait for the arrival of a SIGCHLD. */
4324
b84876c2 4325static void
7feb7d06 4326sigchld_handler (int signo)
b84876c2 4327{
7feb7d06
PA
4328 int old_errno = errno;
4329
01124a23
DE
4330 if (debug_linux_nat)
4331 ui_file_write_async_safe (gdb_stdlog,
4332 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4333
4334 if (signo == SIGCHLD
4335 && linux_nat_event_pipe[0] != -1)
4336 async_file_mark (); /* Let the event loop know that there are
4337 events to handle. */
4338
4339 errno = old_errno;
4340}
4341
4342/* Callback registered with the target events file descriptor. */
4343
4344static void
4345handle_target_event (int error, gdb_client_data client_data)
4346{
6a3753b3 4347 inferior_event_handler (INF_REG_EVENT, NULL);
7feb7d06
PA
4348}
4349
4350/* Create/destroy the target events pipe. Returns previous state. */
4351
4352static int
4353linux_async_pipe (int enable)
4354{
198297aa 4355 int previous = linux_is_async_p ();
7feb7d06
PA
4356
4357 if (previous != enable)
4358 {
4359 sigset_t prev_mask;
4360
12696c10
PA
4361 /* Block child signals while we create/destroy the pipe, as
4362 their handler writes to it. */
7feb7d06
PA
4363 block_child_signals (&prev_mask);
4364
4365 if (enable)
4366 {
614c279d 4367 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
7feb7d06
PA
4368 internal_error (__FILE__, __LINE__,
4369 "creating event pipe failed.");
4370
4371 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4372 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4373 }
4374 else
4375 {
4376 close (linux_nat_event_pipe[0]);
4377 close (linux_nat_event_pipe[1]);
4378 linux_nat_event_pipe[0] = -1;
4379 linux_nat_event_pipe[1] = -1;
4380 }
4381
4382 restore_child_signals_mask (&prev_mask);
4383 }
4384
4385 return previous;
b84876c2
PA
4386}
4387
4388/* target_async implementation. */
4389
4390static void
6a3753b3 4391linux_nat_async (struct target_ops *ops, int enable)
b84876c2 4392{
6a3753b3 4393 if (enable)
b84876c2 4394 {
7feb7d06
PA
4395 if (!linux_async_pipe (1))
4396 {
4397 add_file_handler (linux_nat_event_pipe[0],
4398 handle_target_event, NULL);
4399 /* There may be pending events to handle. Tell the event loop
4400 to poll them. */
4401 async_file_mark ();
4402 }
b84876c2
PA
4403 }
4404 else
4405 {
b84876c2 4406 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4407 linux_async_pipe (0);
b84876c2
PA
4408 }
4409 return;
4410}
4411
a493e3e2 4412/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4413 event came out. */
4414
4c28f408 4415static int
252fbfc8 4416linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4417{
d90e17a7 4418 if (!lwp->stopped)
252fbfc8 4419 {
d90e17a7
PA
4420 if (debug_linux_nat)
4421 fprintf_unfiltered (gdb_stdlog,
4422 "LNSL: running -> suspending %s\n",
4423 target_pid_to_str (lwp->ptid));
252fbfc8 4424
252fbfc8 4425
25289eb2
PA
4426 if (lwp->last_resume_kind == resume_stop)
4427 {
4428 if (debug_linux_nat)
4429 fprintf_unfiltered (gdb_stdlog,
4430 "linux-nat: already stopping LWP %ld at "
4431 "GDB's request\n",
4432 ptid_get_lwp (lwp->ptid));
4433 return 0;
4434 }
252fbfc8 4435
25289eb2
PA
4436 stop_callback (lwp, NULL);
4437 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4438 }
4439 else
4440 {
4441 /* Already known to be stopped; do nothing. */
252fbfc8 4442
d90e17a7
PA
4443 if (debug_linux_nat)
4444 {
e09875d4 4445 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
4446 fprintf_unfiltered (gdb_stdlog,
4447 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
4448 target_pid_to_str (lwp->ptid));
4449 else
3e43a32a
MS
4450 fprintf_unfiltered (gdb_stdlog,
4451 "LNSL: already stopped/no "
4452 "stop_requested yet %s\n",
d90e17a7 4453 target_pid_to_str (lwp->ptid));
252fbfc8
PA
4454 }
4455 }
4c28f408
PA
4456 return 0;
4457}
4458
4459static void
1eab8a48 4460linux_nat_stop (struct target_ops *self, ptid_t ptid)
4c28f408 4461{
bfedc46a
PA
4462 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4463}
4464
d90e17a7 4465static void
de90e03d 4466linux_nat_close (struct target_ops *self)
d90e17a7
PA
4467{
4468 /* Unregister from the event loop. */
9debeba0 4469 if (linux_nat_is_async_p (self))
6a3753b3 4470 linux_nat_async (self, 0);
d90e17a7 4471
d90e17a7 4472 if (linux_ops->to_close)
de90e03d 4473 linux_ops->to_close (linux_ops);
6a3cb8e8
PA
4474
4475 super_close (self);
d90e17a7
PA
4476}
4477
c0694254
PA
4478/* When requests are passed down from the linux-nat layer to the
4479 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4480 used. The address space pointer is stored in the inferior object,
4481 but the common code that is passed such ptid can't tell whether
4482 lwpid is a "main" process id or not (it assumes so). We reverse
4483 look up the "main" process id from the lwp here. */
4484
70221824 4485static struct address_space *
c0694254
PA
4486linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4487{
4488 struct lwp_info *lwp;
4489 struct inferior *inf;
4490 int pid;
4491
dfd4cc63 4492 if (ptid_get_lwp (ptid) == 0)
c0694254
PA
4493 {
4494 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4495 tgid. */
4496 lwp = find_lwp_pid (ptid);
dfd4cc63 4497 pid = ptid_get_pid (lwp->ptid);
c0694254
PA
4498 }
4499 else
4500 {
4501 /* A (pid,lwpid,0) ptid. */
dfd4cc63 4502 pid = ptid_get_pid (ptid);
c0694254
PA
4503 }
4504
4505 inf = find_inferior_pid (pid);
4506 gdb_assert (inf != NULL);
4507 return inf->aspace;
4508}
4509
dc146f7c
VP
4510/* Return the cached value of the processor core for thread PTID. */
4511
70221824 4512static int
dc146f7c
VP
4513linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4514{
4515 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4516
dc146f7c
VP
4517 if (info)
4518 return info->core;
4519 return -1;
4520}
4521
7a6a1731
GB
4522/* Implementation of to_filesystem_is_local. */
4523
4524static int
4525linux_nat_filesystem_is_local (struct target_ops *ops)
4526{
4527 struct inferior *inf = current_inferior ();
4528
4529 if (inf->fake_pid_p || inf->pid == 0)
4530 return 1;
4531
4532 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4533}
4534
4535/* Convert the INF argument passed to a to_fileio_* method
4536 to a process ID suitable for passing to its corresponding
4537 linux_mntns_* function. If INF is non-NULL then the
4538 caller is requesting the filesystem seen by INF. If INF
4539 is NULL then the caller is requesting the filesystem seen
4540 by the GDB. We fall back to GDB's filesystem in the case
4541 that INF is non-NULL but its PID is unknown. */
4542
4543static pid_t
4544linux_nat_fileio_pid_of (struct inferior *inf)
4545{
4546 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4547 return getpid ();
4548 else
4549 return inf->pid;
4550}
4551
4552/* Implementation of to_fileio_open. */
4553
4554static int
4555linux_nat_fileio_open (struct target_ops *self,
4556 struct inferior *inf, const char *filename,
4313b8c0
GB
4557 int flags, int mode, int warn_if_slow,
4558 int *target_errno)
7a6a1731
GB
4559{
4560 int nat_flags;
4561 mode_t nat_mode;
4562 int fd;
4563
4564 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4565 || fileio_to_host_mode (mode, &nat_mode) == -1)
4566 {
4567 *target_errno = FILEIO_EINVAL;
4568 return -1;
4569 }
4570
4571 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4572 filename, nat_flags, nat_mode);
4573 if (fd == -1)
4574 *target_errno = host_to_fileio_error (errno);
4575
4576 return fd;
4577}
4578
4579/* Implementation of to_fileio_readlink. */
4580
4581static char *
4582linux_nat_fileio_readlink (struct target_ops *self,
4583 struct inferior *inf, const char *filename,
4584 int *target_errno)
4585{
4586 char buf[PATH_MAX];
4587 int len;
4588 char *ret;
4589
4590 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4591 filename, buf, sizeof (buf));
4592 if (len < 0)
4593 {
4594 *target_errno = host_to_fileio_error (errno);
4595 return NULL;
4596 }
4597
224c3ddb 4598 ret = (char *) xmalloc (len + 1);
7a6a1731
GB
4599 memcpy (ret, buf, len);
4600 ret[len] = '\0';
4601 return ret;
4602}
4603
4604/* Implementation of to_fileio_unlink. */
4605
4606static int
4607linux_nat_fileio_unlink (struct target_ops *self,
4608 struct inferior *inf, const char *filename,
4609 int *target_errno)
4610{
4611 int ret;
4612
4613 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4614 filename);
4615 if (ret == -1)
4616 *target_errno = host_to_fileio_error (errno);
4617
4618 return ret;
4619}
4620
f973ed9c
DJ
4621void
4622linux_nat_add_target (struct target_ops *t)
4623{
f973ed9c
DJ
4624 /* Save the provided single-threaded target. We save this in a separate
4625 variable because another target we've inherited from (e.g. inf-ptrace)
4626 may have saved a pointer to T; we want to use it for the final
4627 process stratum target. */
4628 linux_ops_saved = *t;
4629 linux_ops = &linux_ops_saved;
4630
4631 /* Override some methods for multithreading. */
b84876c2 4632 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4633 t->to_attach = linux_nat_attach;
4634 t->to_detach = linux_nat_detach;
4635 t->to_resume = linux_nat_resume;
4636 t->to_wait = linux_nat_wait;
2455069d 4637 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
4638 t->to_xfer_partial = linux_nat_xfer_partial;
4639 t->to_kill = linux_nat_kill;
4640 t->to_mourn_inferior = linux_nat_mourn_inferior;
4641 t->to_thread_alive = linux_nat_thread_alive;
8a06aea7 4642 t->to_update_thread_list = linux_nat_update_thread_list;
f973ed9c 4643 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 4644 t->to_thread_name = linux_nat_thread_name;
f973ed9c 4645 t->to_has_thread_control = tc_schedlock;
c0694254 4646 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
4647 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4648 t->to_stopped_data_address = linux_nat_stopped_data_address;
faf09f01
PA
4649 t->to_stopped_by_sw_breakpoint = linux_nat_stopped_by_sw_breakpoint;
4650 t->to_supports_stopped_by_sw_breakpoint = linux_nat_supports_stopped_by_sw_breakpoint;
4651 t->to_stopped_by_hw_breakpoint = linux_nat_stopped_by_hw_breakpoint;
4652 t->to_supports_stopped_by_hw_breakpoint = linux_nat_supports_stopped_by_hw_breakpoint;
f973ed9c 4653
b84876c2
PA
4654 t->to_can_async_p = linux_nat_can_async_p;
4655 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4656 t->to_supports_non_stop = linux_nat_supports_non_stop;
fbea99ea 4657 t->to_always_non_stop_p = linux_nat_always_non_stop_p;
b84876c2 4658 t->to_async = linux_nat_async;
b84876c2
PA
4659 t->to_terminal_inferior = linux_nat_terminal_inferior;
4660 t->to_terminal_ours = linux_nat_terminal_ours;
6a3cb8e8
PA
4661
4662 super_close = t->to_close;
d90e17a7 4663 t->to_close = linux_nat_close;
b84876c2 4664
4c28f408
PA
4665 t->to_stop = linux_nat_stop;
4666
d90e17a7
PA
4667 t->to_supports_multi_process = linux_nat_supports_multi_process;
4668
03583c20
UW
4669 t->to_supports_disable_randomization
4670 = linux_nat_supports_disable_randomization;
4671
dc146f7c
VP
4672 t->to_core_of_thread = linux_nat_core_of_thread;
4673
7a6a1731
GB
4674 t->to_filesystem_is_local = linux_nat_filesystem_is_local;
4675 t->to_fileio_open = linux_nat_fileio_open;
4676 t->to_fileio_readlink = linux_nat_fileio_readlink;
4677 t->to_fileio_unlink = linux_nat_fileio_unlink;
4678
f973ed9c
DJ
4679 /* We don't change the stratum; this target will sit at
4680 process_stratum and thread_db will set at thread_stratum. This
4681 is a little strange, since this is a multi-threaded-capable
4682 target, but we want to be on the stack below thread_db, and we
4683 also want to be used for single-threaded processes. */
4684
4685 add_target (t);
f973ed9c
DJ
4686}
4687
9f0bdab8
DJ
4688/* Register a method to call whenever a new thread is attached. */
4689void
7b50312a
PA
4690linux_nat_set_new_thread (struct target_ops *t,
4691 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
4692{
4693 /* Save the pointer. We only support a single registered instance
4694 of the GNU/Linux native target, so we do not need to map this to
4695 T. */
4696 linux_nat_new_thread = new_thread;
4697}
4698
26cb8b7c
PA
4699/* See declaration in linux-nat.h. */
4700
4701void
4702linux_nat_set_new_fork (struct target_ops *t,
4703 linux_nat_new_fork_ftype *new_fork)
4704{
4705 /* Save the pointer. */
4706 linux_nat_new_fork = new_fork;
4707}
4708
4709/* See declaration in linux-nat.h. */
4710
4711void
4712linux_nat_set_forget_process (struct target_ops *t,
4713 linux_nat_forget_process_ftype *fn)
4714{
4715 /* Save the pointer. */
4716 linux_nat_forget_process_hook = fn;
4717}
4718
4719/* See declaration in linux-nat.h. */
4720
4721void
4722linux_nat_forget_process (pid_t pid)
4723{
4724 if (linux_nat_forget_process_hook != NULL)
4725 linux_nat_forget_process_hook (pid);
4726}
4727
5b009018
PA
4728/* Register a method that converts a siginfo object between the layout
4729 that ptrace returns, and the layout in the architecture of the
4730 inferior. */
4731void
4732linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 4733 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
4734 gdb_byte *,
4735 int))
4736{
4737 /* Save the pointer. */
4738 linux_nat_siginfo_fixup = siginfo_fixup;
4739}
4740
7b50312a
PA
4741/* Register a method to call prior to resuming a thread. */
4742
4743void
4744linux_nat_set_prepare_to_resume (struct target_ops *t,
4745 void (*prepare_to_resume) (struct lwp_info *))
4746{
4747 /* Save the pointer. */
4748 linux_nat_prepare_to_resume = prepare_to_resume;
4749}
4750
f865ee35
JK
4751/* See linux-nat.h. */
4752
4753int
4754linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4755{
da559b09 4756 int pid;
9f0bdab8 4757
dfd4cc63 4758 pid = ptid_get_lwp (ptid);
da559b09 4759 if (pid == 0)
dfd4cc63 4760 pid = ptid_get_pid (ptid);
f865ee35 4761
da559b09
JK
4762 errno = 0;
4763 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4764 if (errno != 0)
4765 {
4766 memset (siginfo, 0, sizeof (*siginfo));
4767 return 0;
4768 }
f865ee35 4769 return 1;
9f0bdab8
DJ
4770}
4771
7b669087
GB
4772/* See nat/linux-nat.h. */
4773
4774ptid_t
4775current_lwp_ptid (void)
4776{
4777 gdb_assert (ptid_lwp_p (inferior_ptid));
4778 return inferior_ptid;
4779}
4780
2c0b251b
PA
4781/* Provide a prototype to silence -Wmissing-prototypes. */
4782extern initialize_file_ftype _initialize_linux_nat;
4783
d6b0e80f
AC
4784void
4785_initialize_linux_nat (void)
4786{
ccce17b0
YQ
4787 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4788 &debug_linux_nat, _("\
b84876c2
PA
4789Set debugging of GNU/Linux lwp module."), _("\
4790Show debugging of GNU/Linux lwp module."), _("\
4791Enables printf debugging output."),
ccce17b0
YQ
4792 NULL,
4793 show_debug_linux_nat,
4794 &setdebuglist, &showdebuglist);
b84876c2 4795
7a6a1731
GB
4796 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4797 &debug_linux_namespaces, _("\
4798Set debugging of GNU/Linux namespaces module."), _("\
4799Show debugging of GNU/Linux namespaces module."), _("\
4800Enables printf debugging output."),
4801 NULL,
4802 NULL,
4803 &setdebuglist, &showdebuglist);
4804
b84876c2 4805 /* Save this mask as the default. */
d6b0e80f
AC
4806 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4807
7feb7d06
PA
4808 /* Install a SIGCHLD handler. */
4809 sigchld_action.sa_handler = sigchld_handler;
4810 sigemptyset (&sigchld_action.sa_mask);
4811 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4812
4813 /* Make it the default. */
7feb7d06 4814 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4815
4816 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4817 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4818 sigdelset (&suspend_mask, SIGCHLD);
4819
7feb7d06 4820 sigemptyset (&blocked_mask);
d6b0e80f
AC
4821}
4822\f
4823
4824/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4825 the GNU/Linux Threads library and therefore doesn't really belong
4826 here. */
4827
d6b0e80f
AC
4828/* Return the set of signals used by the threads library in *SET. */
4829
4830void
4831lin_thread_get_thread_signals (sigset_t *set)
4832{
d6b0e80f
AC
4833 sigemptyset (set);
4834
4a6ed09b
PA
4835 /* NPTL reserves the first two RT signals, but does not provide any
4836 way for the debugger to query the signal numbers - fortunately
4837 they don't change. */
4838 sigaddset (set, __SIGRTMIN);
4839 sigaddset (set, __SIGRTMIN + 1);
d6b0e80f 4840}
This page took 1.463364 seconds and 4 git commands to generate.