[Linux] Optimize PID -> struct lwp_info lookup
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
618f726f 3 Copyright (C) 2001-2016 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
3993f6b1 26#include "gdb_wait.h"
d6b0e80f
AC
27#include <unistd.h>
28#include <sys/syscall.h>
5826e159 29#include "nat/gdb_ptrace.h"
0274a8ce 30#include "linux-nat.h"
125f8a3d
GB
31#include "nat/linux-ptrace.h"
32#include "nat/linux-procfs.h"
8cc73a39 33#include "nat/linux-personality.h"
ac264b3b 34#include "linux-fork.h"
d6b0e80f
AC
35#include "gdbthread.h"
36#include "gdbcmd.h"
37#include "regcache.h"
4f844a66 38#include "regset.h"
dab06dbe 39#include "inf-child.h"
10d6c8cd
DJ
40#include "inf-ptrace.h"
41#include "auxv.h"
1777feb0 42#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
43#include "elf-bfd.h" /* for elfcore_write_* */
44#include "gregset.h" /* for gregset */
45#include "gdbcore.h" /* for get_exec_file */
46#include <ctype.h> /* for isdigit */
53ce3c39 47#include <sys/stat.h> /* for struct stat */
dba24537 48#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
49#include "inf-loop.h"
50#include "event-loop.h"
51#include "event-top.h"
07e059b5
VP
52#include <pwd.h>
53#include <sys/types.h>
2978b111 54#include <dirent.h>
07e059b5 55#include "xml-support.h"
efcbbd14 56#include <sys/vfs.h>
6c95b8df 57#include "solib.h"
125f8a3d 58#include "nat/linux-osdata.h"
6432734d 59#include "linux-tdep.h"
7dcd53a0 60#include "symfile.h"
5808517f
YQ
61#include "agent.h"
62#include "tracepoint.h"
87b0bb13 63#include "buffer.h"
6ecd4729 64#include "target-descriptions.h"
614c279d 65#include "filestuff.h"
77e371c0 66#include "objfiles.h"
7a6a1731
GB
67#include "nat/linux-namespaces.h"
68#include "fileio.h"
efcbbd14
UW
69
70#ifndef SPUFS_MAGIC
71#define SPUFS_MAGIC 0x23c9b64e
72#endif
dba24537 73
1777feb0 74/* This comment documents high-level logic of this file.
8a77dff3
VP
75
76Waiting for events in sync mode
77===============================
78
4a6ed09b
PA
79When waiting for an event in a specific thread, we just use waitpid,
80passing the specific pid, and not passing WNOHANG.
81
82When waiting for an event in all threads, waitpid is not quite good:
83
84- If the thread group leader exits while other threads in the thread
85 group still exist, waitpid(TGID, ...) hangs. That waitpid won't
86 return an exit status until the other threads in the group are
87 reaped.
88
89- When a non-leader thread execs, that thread just vanishes without
90 reporting an exit (so we'd hang if we waited for it explicitly in
91 that case). The exec event is instead reported to the TGID pid.
92
93The solution is to always use -1 and WNOHANG, together with
94sigsuspend.
95
96First, we use non-blocking waitpid to check for events. If nothing is
97found, we use sigsuspend to wait for SIGCHLD. When SIGCHLD arrives,
98it means something happened to a child process. As soon as we know
99there's an event, we get back to calling nonblocking waitpid.
100
101Note that SIGCHLD should be blocked between waitpid and sigsuspend
102calls, so that we don't miss a signal. If SIGCHLD arrives in between,
103when it's blocked, the signal becomes pending and sigsuspend
104immediately notices it and returns.
105
106Waiting for events in async mode (TARGET_WNOHANG)
107=================================================
8a77dff3 108
7feb7d06
PA
109In async mode, GDB should always be ready to handle both user input
110and target events, so neither blocking waitpid nor sigsuspend are
111viable options. Instead, we should asynchronously notify the GDB main
112event loop whenever there's an unprocessed event from the target. We
113detect asynchronous target events by handling SIGCHLD signals. To
114notify the event loop about target events, the self-pipe trick is used
115--- a pipe is registered as waitable event source in the event loop,
116the event loop select/poll's on the read end of this pipe (as well on
117other event sources, e.g., stdin), and the SIGCHLD handler writes a
118byte to this pipe. This is more portable than relying on
119pselect/ppoll, since on kernels that lack those syscalls, libc
120emulates them with select/poll+sigprocmask, and that is racy
121(a.k.a. plain broken).
122
123Obviously, if we fail to notify the event loop if there's a target
124event, it's bad. OTOH, if we notify the event loop when there's no
125event from the target, linux_nat_wait will detect that there's no real
126event to report, and return event of type TARGET_WAITKIND_IGNORE.
127This is mostly harmless, but it will waste time and is better avoided.
128
129The main design point is that every time GDB is outside linux-nat.c,
130we have a SIGCHLD handler installed that is called when something
131happens to the target and notifies the GDB event loop. Whenever GDB
132core decides to handle the event, and calls into linux-nat.c, we
133process things as in sync mode, except that the we never block in
134sigsuspend.
135
136While processing an event, we may end up momentarily blocked in
137waitpid calls. Those waitpid calls, while blocking, are guarantied to
138return quickly. E.g., in all-stop mode, before reporting to the core
139that an LWP hit a breakpoint, all LWPs are stopped by sending them
140SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
141Note that this is different from blocking indefinitely waiting for the
142next event --- here, we're already handling an event.
8a77dff3
VP
143
144Use of signals
145==============
146
147We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
148signal is not entirely significant; we just need for a signal to be delivered,
149so that we can intercept it. SIGSTOP's advantage is that it can not be
150blocked. A disadvantage is that it is not a real-time signal, so it can only
151be queued once; we do not keep track of other sources of SIGSTOP.
152
153Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
154use them, because they have special behavior when the signal is generated -
155not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
156kills the entire thread group.
157
158A delivered SIGSTOP would stop the entire thread group, not just the thread we
159tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
160cancel it (by PTRACE_CONT without passing SIGSTOP).
161
162We could use a real-time signal instead. This would solve those problems; we
163could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
164But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
165generates it, and there are races with trying to find a signal that is not
4a6ed09b
PA
166blocked.
167
168Exec events
169===========
170
171The case of a thread group (process) with 3 or more threads, and a
172thread other than the leader execs is worth detailing:
173
174On an exec, the Linux kernel destroys all threads except the execing
175one in the thread group, and resets the execing thread's tid to the
176tgid. No exit notification is sent for the execing thread -- from the
177ptracer's perspective, it appears as though the execing thread just
178vanishes. Until we reap all other threads except the leader and the
179execing thread, the leader will be zombie, and the execing thread will
180be in `D (disc sleep)' state. As soon as all other threads are
181reaped, the execing thread changes its tid to the tgid, and the
182previous (zombie) leader vanishes, giving place to the "new"
183leader. */
a0ef4274 184
dba24537
AC
185#ifndef O_LARGEFILE
186#define O_LARGEFILE 0
187#endif
0274a8ce 188
433bbbf8 189/* Does the current host support PTRACE_GETREGSET? */
0bdb2f78 190enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;
433bbbf8 191
10d6c8cd
DJ
192/* The single-threaded native GNU/Linux target_ops. We save a pointer for
193 the use of the multi-threaded target. */
194static struct target_ops *linux_ops;
f973ed9c 195static struct target_ops linux_ops_saved;
10d6c8cd 196
9f0bdab8 197/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
198static void (*linux_nat_new_thread) (struct lwp_info *);
199
26cb8b7c
PA
200/* The method to call, if any, when a new fork is attached. */
201static linux_nat_new_fork_ftype *linux_nat_new_fork;
202
203/* The method to call, if any, when a process is no longer
204 attached. */
205static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
206
7b50312a
PA
207/* Hook to call prior to resuming a thread. */
208static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 209
5b009018
PA
210/* The method to call, if any, when the siginfo object needs to be
211 converted between the layout returned by ptrace, and the layout in
212 the architecture of the inferior. */
a5362b9a 213static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
214 gdb_byte *,
215 int);
216
ac264b3b
MS
217/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
218 Called by our to_xfer_partial. */
4ac248ca 219static target_xfer_partial_ftype *super_xfer_partial;
10d6c8cd 220
6a3cb8e8
PA
221/* The saved to_close method, inherited from inf-ptrace.c.
222 Called by our to_close. */
223static void (*super_close) (struct target_ops *);
224
ccce17b0 225static unsigned int debug_linux_nat;
920d2a44
AC
226static void
227show_debug_linux_nat (struct ui_file *file, int from_tty,
228 struct cmd_list_element *c, const char *value)
229{
230 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
231 value);
232}
d6b0e80f 233
ae087d01
DJ
234struct simple_pid_list
235{
236 int pid;
3d799a95 237 int status;
ae087d01
DJ
238 struct simple_pid_list *next;
239};
240struct simple_pid_list *stopped_pids;
241
aa01bd36
PA
242/* Whether target_thread_events is in effect. */
243static int report_thread_events;
244
3dd5b83d
PA
245/* Async mode support. */
246
b84876c2
PA
247/* The read/write ends of the pipe registered as waitable file in the
248 event loop. */
249static int linux_nat_event_pipe[2] = { -1, -1 };
250
198297aa
PA
251/* True if we're currently in async mode. */
252#define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
253
7feb7d06 254/* Flush the event pipe. */
b84876c2 255
7feb7d06
PA
256static void
257async_file_flush (void)
b84876c2 258{
7feb7d06
PA
259 int ret;
260 char buf;
b84876c2 261
7feb7d06 262 do
b84876c2 263 {
7feb7d06 264 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 265 }
7feb7d06 266 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
267}
268
7feb7d06
PA
269/* Put something (anything, doesn't matter what, or how much) in event
270 pipe, so that the select/poll in the event-loop realizes we have
271 something to process. */
252fbfc8 272
b84876c2 273static void
7feb7d06 274async_file_mark (void)
b84876c2 275{
7feb7d06 276 int ret;
b84876c2 277
7feb7d06
PA
278 /* It doesn't really matter what the pipe contains, as long we end
279 up with something in it. Might as well flush the previous
280 left-overs. */
281 async_file_flush ();
b84876c2 282
7feb7d06 283 do
b84876c2 284 {
7feb7d06 285 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 286 }
7feb7d06 287 while (ret == -1 && errno == EINTR);
b84876c2 288
7feb7d06
PA
289 /* Ignore EAGAIN. If the pipe is full, the event loop will already
290 be awakened anyway. */
b84876c2
PA
291}
292
7feb7d06
PA
293static int kill_lwp (int lwpid, int signo);
294
295static int stop_callback (struct lwp_info *lp, void *data);
2db9a427 296static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
7feb7d06
PA
297
298static void block_child_signals (sigset_t *prev_mask);
299static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
300
301struct lwp_info;
302static struct lwp_info *add_lwp (ptid_t ptid);
303static void purge_lwp_list (int pid);
4403d8e9 304static void delete_lwp (ptid_t ptid);
2277426b
PA
305static struct lwp_info *find_lwp_pid (ptid_t ptid);
306
8a99810d
PA
307static int lwp_status_pending_p (struct lwp_info *lp);
308
9c02b525
PA
309static int sigtrap_is_event (int status);
310static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
311
e7ad2f14
PA
312static void save_stop_reason (struct lwp_info *lp);
313
cff068da
GB
314\f
315/* LWP accessors. */
316
317/* See nat/linux-nat.h. */
318
319ptid_t
320ptid_of_lwp (struct lwp_info *lwp)
321{
322 return lwp->ptid;
323}
324
325/* See nat/linux-nat.h. */
326
4b134ca1
GB
327void
328lwp_set_arch_private_info (struct lwp_info *lwp,
329 struct arch_lwp_info *info)
330{
331 lwp->arch_private = info;
332}
333
334/* See nat/linux-nat.h. */
335
336struct arch_lwp_info *
337lwp_arch_private_info (struct lwp_info *lwp)
338{
339 return lwp->arch_private;
340}
341
342/* See nat/linux-nat.h. */
343
cff068da
GB
344int
345lwp_is_stopped (struct lwp_info *lwp)
346{
347 return lwp->stopped;
348}
349
350/* See nat/linux-nat.h. */
351
352enum target_stop_reason
353lwp_stop_reason (struct lwp_info *lwp)
354{
355 return lwp->stop_reason;
356}
357
ae087d01
DJ
358\f
359/* Trivial list manipulation functions to keep track of a list of
360 new stopped processes. */
361static void
3d799a95 362add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01 363{
8d749320 364 struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);
e0881a8e 365
ae087d01 366 new_pid->pid = pid;
3d799a95 367 new_pid->status = status;
ae087d01
DJ
368 new_pid->next = *listp;
369 *listp = new_pid;
370}
371
372static int
46a96992 373pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
374{
375 struct simple_pid_list **p;
376
377 for (p = listp; *p != NULL; p = &(*p)->next)
378 if ((*p)->pid == pid)
379 {
380 struct simple_pid_list *next = (*p)->next;
e0881a8e 381
46a96992 382 *statusp = (*p)->status;
ae087d01
DJ
383 xfree (*p);
384 *p = next;
385 return 1;
386 }
387 return 0;
388}
389
de0d863e
DB
390/* Return the ptrace options that we want to try to enable. */
391
392static int
393linux_nat_ptrace_options (int attached)
394{
395 int options = 0;
396
397 if (!attached)
398 options |= PTRACE_O_EXITKILL;
399
400 options |= (PTRACE_O_TRACESYSGOOD
401 | PTRACE_O_TRACEVFORKDONE
402 | PTRACE_O_TRACEVFORK
403 | PTRACE_O_TRACEFORK
404 | PTRACE_O_TRACEEXEC);
405
406 return options;
407}
408
96d7229d 409/* Initialize ptrace warnings and check for supported ptrace
beed38b8
JB
410 features given PID.
411
412 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
413
414static void
beed38b8 415linux_init_ptrace (pid_t pid, int attached)
3993f6b1 416{
de0d863e
DB
417 int options = linux_nat_ptrace_options (attached);
418
419 linux_enable_event_reporting (pid, options);
96d7229d 420 linux_ptrace_init_warnings ();
4de4c07c
DJ
421}
422
6d8fd2b7 423static void
f045800c 424linux_child_post_attach (struct target_ops *self, int pid)
4de4c07c 425{
beed38b8 426 linux_init_ptrace (pid, 1);
4de4c07c
DJ
427}
428
10d6c8cd 429static void
2e97a79e 430linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4de4c07c 431{
beed38b8 432 linux_init_ptrace (ptid_get_pid (ptid), 0);
4de4c07c
DJ
433}
434
4403d8e9
JK
435/* Return the number of known LWPs in the tgid given by PID. */
436
437static int
438num_lwps (int pid)
439{
440 int count = 0;
441 struct lwp_info *lp;
442
443 for (lp = lwp_list; lp; lp = lp->next)
444 if (ptid_get_pid (lp->ptid) == pid)
445 count++;
446
447 return count;
448}
449
450/* Call delete_lwp with prototype compatible for make_cleanup. */
451
452static void
453delete_lwp_cleanup (void *lp_voidp)
454{
9a3c8263 455 struct lwp_info *lp = (struct lwp_info *) lp_voidp;
4403d8e9
JK
456
457 delete_lwp (lp->ptid);
458}
459
d83ad864
DB
460/* Target hook for follow_fork. On entry inferior_ptid must be the
461 ptid of the followed inferior. At return, inferior_ptid will be
462 unchanged. */
463
6d8fd2b7 464static int
07107ca6
LM
465linux_child_follow_fork (struct target_ops *ops, int follow_child,
466 int detach_fork)
3993f6b1 467{
d83ad864 468 if (!follow_child)
4de4c07c 469 {
6c95b8df 470 struct lwp_info *child_lp = NULL;
d83ad864
DB
471 int status = W_STOPCODE (0);
472 struct cleanup *old_chain;
473 int has_vforked;
79639e11 474 ptid_t parent_ptid, child_ptid;
d83ad864
DB
475 int parent_pid, child_pid;
476
477 has_vforked = (inferior_thread ()->pending_follow.kind
478 == TARGET_WAITKIND_VFORKED);
79639e11
PA
479 parent_ptid = inferior_ptid;
480 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
481 parent_pid = ptid_get_lwp (parent_ptid);
482 child_pid = ptid_get_lwp (child_ptid);
4de4c07c 483
1777feb0 484 /* We're already attached to the parent, by default. */
d83ad864 485 old_chain = save_inferior_ptid ();
79639e11 486 inferior_ptid = child_ptid;
d83ad864
DB
487 child_lp = add_lwp (inferior_ptid);
488 child_lp->stopped = 1;
489 child_lp->last_resume_kind = resume_stop;
4de4c07c 490
ac264b3b
MS
491 /* Detach new forked process? */
492 if (detach_fork)
f75c00e4 493 {
4403d8e9
JK
494 make_cleanup (delete_lwp_cleanup, child_lp);
495
4403d8e9
JK
496 if (linux_nat_prepare_to_resume != NULL)
497 linux_nat_prepare_to_resume (child_lp);
c077881a
HZ
498
499 /* When debugging an inferior in an architecture that supports
500 hardware single stepping on a kernel without commit
501 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
502 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
503 set if the parent process had them set.
504 To work around this, single step the child process
505 once before detaching to clear the flags. */
506
507 if (!gdbarch_software_single_step_p (target_thread_architecture
508 (child_lp->ptid)))
509 {
c077881a
HZ
510 linux_disable_event_reporting (child_pid);
511 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
512 perror_with_name (_("Couldn't do single step"));
513 if (my_waitpid (child_pid, &status, 0) < 0)
514 perror_with_name (_("Couldn't wait vfork process"));
515 }
516
517 if (WIFSTOPPED (status))
9caaaa83
PA
518 {
519 int signo;
520
521 signo = WSTOPSIG (status);
522 if (signo != 0
523 && !signal_pass_state (gdb_signal_from_host (signo)))
524 signo = 0;
525 ptrace (PTRACE_DETACH, child_pid, 0, signo);
526 }
4403d8e9 527
d83ad864 528 /* Resets value of inferior_ptid to parent ptid. */
4403d8e9 529 do_cleanups (old_chain);
ac264b3b
MS
530 }
531 else
532 {
6c95b8df 533 /* Let the thread_db layer learn about this new process. */
2277426b 534 check_for_thread_db ();
ac264b3b 535 }
9016a515 536
d83ad864
DB
537 do_cleanups (old_chain);
538
9016a515
DJ
539 if (has_vforked)
540 {
3ced3da4 541 struct lwp_info *parent_lp;
6c95b8df 542
79639e11 543 parent_lp = find_lwp_pid (parent_ptid);
96d7229d 544 gdb_assert (linux_supports_tracefork () >= 0);
3ced3da4 545
96d7229d 546 if (linux_supports_tracevforkdone ())
9016a515 547 {
6c95b8df
PA
548 if (debug_linux_nat)
549 fprintf_unfiltered (gdb_stdlog,
550 "LCFF: waiting for VFORK_DONE on %d\n",
551 parent_pid);
3ced3da4 552 parent_lp->stopped = 1;
9016a515 553
6c95b8df
PA
554 /* We'll handle the VFORK_DONE event like any other
555 event, in target_wait. */
9016a515
DJ
556 }
557 else
558 {
559 /* We can't insert breakpoints until the child has
560 finished with the shared memory region. We need to
561 wait until that happens. Ideal would be to just
562 call:
563 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
564 - waitpid (parent_pid, &status, __WALL);
565 However, most architectures can't handle a syscall
566 being traced on the way out if it wasn't traced on
567 the way in.
568
569 We might also think to loop, continuing the child
570 until it exits or gets a SIGTRAP. One problem is
571 that the child might call ptrace with PTRACE_TRACEME.
572
573 There's no simple and reliable way to figure out when
574 the vforked child will be done with its copy of the
575 shared memory. We could step it out of the syscall,
576 two instructions, let it go, and then single-step the
577 parent once. When we have hardware single-step, this
578 would work; with software single-step it could still
579 be made to work but we'd have to be able to insert
580 single-step breakpoints in the child, and we'd have
581 to insert -just- the single-step breakpoint in the
582 parent. Very awkward.
583
584 In the end, the best we can do is to make sure it
585 runs for a little while. Hopefully it will be out of
586 range of any breakpoints we reinsert. Usually this
587 is only the single-step breakpoint at vfork's return
588 point. */
589
6c95b8df
PA
590 if (debug_linux_nat)
591 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
592 "LCFF: no VFORK_DONE "
593 "support, sleeping a bit\n");
6c95b8df 594
9016a515 595 usleep (10000);
9016a515 596
6c95b8df
PA
597 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
598 and leave it pending. The next linux_nat_resume call
599 will notice a pending event, and bypasses actually
600 resuming the inferior. */
3ced3da4
PA
601 parent_lp->status = 0;
602 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
603 parent_lp->stopped = 1;
6c95b8df
PA
604
605 /* If we're in async mode, need to tell the event loop
606 there's something here to process. */
d9d41e78 607 if (target_is_async_p ())
6c95b8df
PA
608 async_file_mark ();
609 }
9016a515 610 }
4de4c07c 611 }
3993f6b1 612 else
4de4c07c 613 {
3ced3da4 614 struct lwp_info *child_lp;
4de4c07c 615
3ced3da4
PA
616 child_lp = add_lwp (inferior_ptid);
617 child_lp->stopped = 1;
25289eb2 618 child_lp->last_resume_kind = resume_stop;
6c95b8df 619
6c95b8df 620 /* Let the thread_db layer learn about this new process. */
ef29ce1a 621 check_for_thread_db ();
4de4c07c
DJ
622 }
623
624 return 0;
625}
626
4de4c07c 627\f
77b06cd7 628static int
a863b201 629linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
4de4c07c 630{
96d7229d 631 return !linux_supports_tracefork ();
3993f6b1
DJ
632}
633
eb73ad13 634static int
973fc227 635linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
636{
637 return 0;
638}
639
77b06cd7 640static int
3ecc7da0 641linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
3993f6b1 642{
96d7229d 643 return !linux_supports_tracefork ();
3993f6b1
DJ
644}
645
eb73ad13 646static int
e98cf0cd 647linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
648{
649 return 0;
650}
651
77b06cd7 652static int
ba025e51 653linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
3993f6b1 654{
96d7229d 655 return !linux_supports_tracefork ();
3993f6b1
DJ
656}
657
eb73ad13 658static int
758e29d2 659linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
660{
661 return 0;
662}
663
a96d9b2e 664static int
ff214e67
TT
665linux_child_set_syscall_catchpoint (struct target_ops *self,
666 int pid, int needed, int any_count,
a96d9b2e
SDJ
667 int table_size, int *table)
668{
96d7229d 669 if (!linux_supports_tracesysgood ())
77b06cd7
TJB
670 return 1;
671
a96d9b2e
SDJ
672 /* On GNU/Linux, we ignore the arguments. It means that we only
673 enable the syscall catchpoints, but do not disable them.
77b06cd7 674
a96d9b2e
SDJ
675 Also, we do not use the `table' information because we do not
676 filter system calls here. We let GDB do the logic for us. */
677 return 0;
678}
679
774113b0
PA
680/* List of known LWPs, keyed by LWP PID. This speeds up the common
681 case of mapping a PID returned from the kernel to our corresponding
682 lwp_info data structure. */
683static htab_t lwp_lwpid_htab;
684
685/* Calculate a hash from a lwp_info's LWP PID. */
686
687static hashval_t
688lwp_info_hash (const void *ap)
689{
690 const struct lwp_info *lp = (struct lwp_info *) ap;
691 pid_t pid = ptid_get_lwp (lp->ptid);
692
693 return iterative_hash_object (pid, 0);
694}
695
696/* Equality function for the lwp_info hash table. Compares the LWP's
697 PID. */
698
699static int
700lwp_lwpid_htab_eq (const void *a, const void *b)
701{
702 const struct lwp_info *entry = (const struct lwp_info *) a;
703 const struct lwp_info *element = (const struct lwp_info *) b;
704
705 return ptid_get_lwp (entry->ptid) == ptid_get_lwp (element->ptid);
706}
707
708/* Create the lwp_lwpid_htab hash table. */
709
710static void
711lwp_lwpid_htab_create (void)
712{
713 lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
714}
715
716/* Add LP to the hash table. */
717
718static void
719lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
720{
721 void **slot;
722
723 slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
724 gdb_assert (slot != NULL && *slot == NULL);
725 *slot = lp;
726}
727
728/* Head of doubly-linked list of known LWPs. Sorted by reverse
729 creation order. This order is assumed in some cases. E.g.,
730 reaping status after killing alls lwps of a process: the leader LWP
731 must be reaped last. */
9f0bdab8 732struct lwp_info *lwp_list;
774113b0
PA
733
734/* Add LP to sorted-by-reverse-creation-order doubly-linked list. */
735
736static void
737lwp_list_add (struct lwp_info *lp)
738{
739 lp->next = lwp_list;
740 if (lwp_list != NULL)
741 lwp_list->prev = lp;
742 lwp_list = lp;
743}
744
745/* Remove LP from sorted-by-reverse-creation-order doubly-linked
746 list. */
747
748static void
749lwp_list_remove (struct lwp_info *lp)
750{
751 /* Remove from sorted-by-creation-order list. */
752 if (lp->next != NULL)
753 lp->next->prev = lp->prev;
754 if (lp->prev != NULL)
755 lp->prev->next = lp->next;
756 if (lp == lwp_list)
757 lwp_list = lp->next;
758}
759
d6b0e80f
AC
760\f
761
d6b0e80f
AC
762/* Original signal mask. */
763static sigset_t normal_mask;
764
765/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
766 _initialize_linux_nat. */
767static sigset_t suspend_mask;
768
7feb7d06
PA
769/* Signals to block to make that sigsuspend work. */
770static sigset_t blocked_mask;
771
772/* SIGCHLD action. */
773struct sigaction sigchld_action;
b84876c2 774
7feb7d06
PA
775/* Block child signals (SIGCHLD and linux threads signals), and store
776 the previous mask in PREV_MASK. */
84e46146 777
7feb7d06
PA
778static void
779block_child_signals (sigset_t *prev_mask)
780{
781 /* Make sure SIGCHLD is blocked. */
782 if (!sigismember (&blocked_mask, SIGCHLD))
783 sigaddset (&blocked_mask, SIGCHLD);
784
785 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
786}
787
788/* Restore child signals mask, previously returned by
789 block_child_signals. */
790
791static void
792restore_child_signals_mask (sigset_t *prev_mask)
793{
794 sigprocmask (SIG_SETMASK, prev_mask, NULL);
795}
2455069d
UW
796
797/* Mask of signals to pass directly to the inferior. */
798static sigset_t pass_mask;
799
800/* Update signals to pass to the inferior. */
801static void
94bedb42
TT
802linux_nat_pass_signals (struct target_ops *self,
803 int numsigs, unsigned char *pass_signals)
2455069d
UW
804{
805 int signo;
806
807 sigemptyset (&pass_mask);
808
809 for (signo = 1; signo < NSIG; signo++)
810 {
2ea28649 811 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
812 if (target_signo < numsigs && pass_signals[target_signo])
813 sigaddset (&pass_mask, signo);
814 }
815}
816
d6b0e80f
AC
817\f
818
819/* Prototypes for local functions. */
820static int stop_wait_callback (struct lwp_info *lp, void *data);
8dd27370 821static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
20ba1ce6 822static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
710151dd 823
d6b0e80f 824\f
d6b0e80f 825
7b50312a
PA
826/* Destroy and free LP. */
827
828static void
829lwp_free (struct lwp_info *lp)
830{
831 xfree (lp->arch_private);
832 xfree (lp);
833}
834
774113b0 835/* Traversal function for purge_lwp_list. */
d90e17a7 836
774113b0
PA
837static int
838lwp_lwpid_htab_remove_pid (void **slot, void *info)
d90e17a7 839{
774113b0
PA
840 struct lwp_info *lp = (struct lwp_info *) *slot;
841 int pid = *(int *) info;
d90e17a7 842
774113b0 843 if (ptid_get_pid (lp->ptid) == pid)
d90e17a7 844 {
774113b0
PA
845 htab_clear_slot (lwp_lwpid_htab, slot);
846 lwp_list_remove (lp);
847 lwp_free (lp);
848 }
d90e17a7 849
774113b0
PA
850 return 1;
851}
d90e17a7 852
774113b0
PA
853/* Remove all LWPs belong to PID from the lwp list. */
854
855static void
856purge_lwp_list (int pid)
857{
858 htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
d90e17a7
PA
859}
860
26cb8b7c
PA
861/* Add the LWP specified by PTID to the list. PTID is the first LWP
862 in the process. Return a pointer to the structure describing the
863 new LWP.
864
865 This differs from add_lwp in that we don't let the arch specific
866 bits know about this new thread. Current clients of this callback
867 take the opportunity to install watchpoints in the new thread, and
868 we shouldn't do that for the first thread. If we're spawning a
869 child ("run"), the thread executes the shell wrapper first, and we
870 shouldn't touch it until it execs the program we want to debug.
871 For "attach", it'd be okay to call the callback, but it's not
872 necessary, because watchpoints can't yet have been inserted into
873 the inferior. */
d6b0e80f
AC
874
875static struct lwp_info *
26cb8b7c 876add_initial_lwp (ptid_t ptid)
d6b0e80f
AC
877{
878 struct lwp_info *lp;
879
dfd4cc63 880 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 881
8d749320 882 lp = XNEW (struct lwp_info);
d6b0e80f
AC
883
884 memset (lp, 0, sizeof (struct lwp_info));
885
25289eb2 886 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
887 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
888
889 lp->ptid = ptid;
dc146f7c 890 lp->core = -1;
d6b0e80f 891
774113b0
PA
892 /* Add to sorted-by-reverse-creation-order list. */
893 lwp_list_add (lp);
894
895 /* Add to keyed-by-pid htab. */
896 lwp_lwpid_htab_add_lwp (lp);
d6b0e80f 897
26cb8b7c
PA
898 return lp;
899}
900
901/* Add the LWP specified by PID to the list. Return a pointer to the
902 structure describing the new LWP. The LWP should already be
903 stopped. */
904
905static struct lwp_info *
906add_lwp (ptid_t ptid)
907{
908 struct lwp_info *lp;
909
910 lp = add_initial_lwp (ptid);
911
6e012a6c
PA
912 /* Let the arch specific bits know about this new thread. Current
913 clients of this callback take the opportunity to install
26cb8b7c
PA
914 watchpoints in the new thread. We don't do this for the first
915 thread though. See add_initial_lwp. */
916 if (linux_nat_new_thread != NULL)
7b50312a 917 linux_nat_new_thread (lp);
9f0bdab8 918
d6b0e80f
AC
919 return lp;
920}
921
922/* Remove the LWP specified by PID from the list. */
923
924static void
925delete_lwp (ptid_t ptid)
926{
774113b0
PA
927 struct lwp_info *lp;
928 void **slot;
929 struct lwp_info dummy;
d6b0e80f 930
774113b0
PA
931 dummy.ptid = ptid;
932 slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
933 if (slot == NULL)
934 return;
d6b0e80f 935
774113b0
PA
936 lp = *(struct lwp_info **) slot;
937 gdb_assert (lp != NULL);
d6b0e80f 938
774113b0 939 htab_clear_slot (lwp_lwpid_htab, slot);
d6b0e80f 940
774113b0
PA
941 /* Remove from sorted-by-creation-order list. */
942 lwp_list_remove (lp);
d6b0e80f 943
774113b0 944 /* Release. */
7b50312a 945 lwp_free (lp);
d6b0e80f
AC
946}
947
948/* Return a pointer to the structure describing the LWP corresponding
949 to PID. If no corresponding LWP could be found, return NULL. */
950
951static struct lwp_info *
952find_lwp_pid (ptid_t ptid)
953{
954 struct lwp_info *lp;
955 int lwp;
774113b0 956 struct lwp_info dummy;
d6b0e80f 957
dfd4cc63
LM
958 if (ptid_lwp_p (ptid))
959 lwp = ptid_get_lwp (ptid);
d6b0e80f 960 else
dfd4cc63 961 lwp = ptid_get_pid (ptid);
d6b0e80f 962
774113b0
PA
963 dummy.ptid = ptid_build (0, lwp, 0);
964 lp = (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
965 return lp;
d6b0e80f
AC
966}
967
6d4ee8c6 968/* See nat/linux-nat.h. */
d6b0e80f
AC
969
970struct lwp_info *
d90e17a7 971iterate_over_lwps (ptid_t filter,
6d4ee8c6 972 iterate_over_lwps_ftype callback,
d90e17a7 973 void *data)
d6b0e80f
AC
974{
975 struct lwp_info *lp, *lpnext;
976
977 for (lp = lwp_list; lp; lp = lpnext)
978 {
979 lpnext = lp->next;
d90e17a7
PA
980
981 if (ptid_match (lp->ptid, filter))
982 {
6d4ee8c6 983 if ((*callback) (lp, data) != 0)
d90e17a7
PA
984 return lp;
985 }
d6b0e80f
AC
986 }
987
988 return NULL;
989}
990
2277426b
PA
991/* Update our internal state when changing from one checkpoint to
992 another indicated by NEW_PTID. We can only switch single-threaded
993 applications, so we only create one new LWP, and the previous list
994 is discarded. */
f973ed9c
DJ
995
996void
997linux_nat_switch_fork (ptid_t new_ptid)
998{
999 struct lwp_info *lp;
1000
dfd4cc63 1001 purge_lwp_list (ptid_get_pid (inferior_ptid));
2277426b 1002
f973ed9c
DJ
1003 lp = add_lwp (new_ptid);
1004 lp->stopped = 1;
e26af52f 1005
2277426b
PA
1006 /* This changes the thread's ptid while preserving the gdb thread
1007 num. Also changes the inferior pid, while preserving the
1008 inferior num. */
1009 thread_change_ptid (inferior_ptid, new_ptid);
1010
1011 /* We've just told GDB core that the thread changed target id, but,
1012 in fact, it really is a different thread, with different register
1013 contents. */
1014 registers_changed ();
e26af52f
DJ
1015}
1016
e26af52f
DJ
1017/* Handle the exit of a single thread LP. */
1018
1019static void
1020exit_lwp (struct lwp_info *lp)
1021{
e09875d4 1022 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
1023
1024 if (th)
e26af52f 1025 {
17faa917
DJ
1026 if (print_thread_events)
1027 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
1028
4f8d22e3 1029 delete_thread (lp->ptid);
e26af52f
DJ
1030 }
1031
1032 delete_lwp (lp->ptid);
1033}
1034
a0ef4274
DJ
1035/* Wait for the LWP specified by LP, which we have just attached to.
1036 Returns a wait status for that LWP, to cache. */
1037
1038static int
4a6ed09b 1039linux_nat_post_attach_wait (ptid_t ptid, int first, int *signalled)
a0ef4274 1040{
dfd4cc63 1041 pid_t new_pid, pid = ptid_get_lwp (ptid);
a0ef4274
DJ
1042 int status;
1043
644cebc9 1044 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
1045 {
1046 if (debug_linux_nat)
1047 fprintf_unfiltered (gdb_stdlog,
1048 "LNPAW: Attaching to a stopped process\n");
1049
1050 /* The process is definitely stopped. It is in a job control
1051 stop, unless the kernel predates the TASK_STOPPED /
1052 TASK_TRACED distinction, in which case it might be in a
1053 ptrace stop. Make sure it is in a ptrace stop; from there we
1054 can kill it, signal it, et cetera.
1055
1056 First make sure there is a pending SIGSTOP. Since we are
1057 already attached, the process can not transition from stopped
1058 to running without a PTRACE_CONT; so we know this signal will
1059 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
1060 probably already in the queue (unless this kernel is old
1061 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
1062 is not an RT signal, it can only be queued once. */
1063 kill_lwp (pid, SIGSTOP);
1064
1065 /* Finally, resume the stopped process. This will deliver the SIGSTOP
1066 (or a higher priority signal, just like normal PTRACE_ATTACH). */
1067 ptrace (PTRACE_CONT, pid, 0, 0);
1068 }
1069
1070 /* Make sure the initial process is stopped. The user-level threads
1071 layer might want to poke around in the inferior, and that won't
1072 work if things haven't stabilized yet. */
4a6ed09b 1073 new_pid = my_waitpid (pid, &status, __WALL);
dacc9cb2
PP
1074 gdb_assert (pid == new_pid);
1075
1076 if (!WIFSTOPPED (status))
1077 {
1078 /* The pid we tried to attach has apparently just exited. */
1079 if (debug_linux_nat)
1080 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
1081 pid, status_to_str (status));
1082 return status;
1083 }
a0ef4274
DJ
1084
1085 if (WSTOPSIG (status) != SIGSTOP)
1086 {
1087 *signalled = 1;
1088 if (debug_linux_nat)
1089 fprintf_unfiltered (gdb_stdlog,
1090 "LNPAW: Received %s after attaching\n",
1091 status_to_str (status));
1092 }
1093
1094 return status;
1095}
1096
b84876c2 1097static void
136d6dae
VP
1098linux_nat_create_inferior (struct target_ops *ops,
1099 char *exec_file, char *allargs, char **env,
b84876c2
PA
1100 int from_tty)
1101{
8cc73a39
SDJ
1102 struct cleanup *restore_personality
1103 = maybe_disable_address_space_randomization (disable_randomization);
b84876c2
PA
1104
1105 /* The fork_child mechanism is synchronous and calls target_wait, so
1106 we have to mask the async mode. */
1107
2455069d 1108 /* Make sure we report all signals during startup. */
94bedb42 1109 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1110
136d6dae 1111 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1112
8cc73a39 1113 do_cleanups (restore_personality);
b84876c2
PA
1114}
1115
8784d563
PA
1116/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1117 already attached. Returns true if a new LWP is found, false
1118 otherwise. */
1119
1120static int
1121attach_proc_task_lwp_callback (ptid_t ptid)
1122{
1123 struct lwp_info *lp;
1124
1125 /* Ignore LWPs we're already attached to. */
1126 lp = find_lwp_pid (ptid);
1127 if (lp == NULL)
1128 {
1129 int lwpid = ptid_get_lwp (ptid);
1130
1131 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1132 {
1133 int err = errno;
1134
1135 /* Be quiet if we simply raced with the thread exiting.
1136 EPERM is returned if the thread's task still exists, and
1137 is marked as exited or zombie, as well as other
1138 conditions, so in that case, confirm the status in
1139 /proc/PID/status. */
1140 if (err == ESRCH
1141 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1142 {
1143 if (debug_linux_nat)
1144 {
1145 fprintf_unfiltered (gdb_stdlog,
1146 "Cannot attach to lwp %d: "
1147 "thread is gone (%d: %s)\n",
1148 lwpid, err, safe_strerror (err));
1149 }
1150 }
1151 else
1152 {
f71f0b0d 1153 warning (_("Cannot attach to lwp %d: %s"),
8784d563
PA
1154 lwpid,
1155 linux_ptrace_attach_fail_reason_string (ptid,
1156 err));
1157 }
1158 }
1159 else
1160 {
1161 if (debug_linux_nat)
1162 fprintf_unfiltered (gdb_stdlog,
1163 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1164 target_pid_to_str (ptid));
1165
1166 lp = add_lwp (ptid);
8784d563
PA
1167
1168 /* The next time we wait for this LWP we'll see a SIGSTOP as
1169 PTRACE_ATTACH brings it to a halt. */
1170 lp->signalled = 1;
1171
1172 /* We need to wait for a stop before being able to make the
1173 next ptrace call on this LWP. */
1174 lp->must_set_ptrace_flags = 1;
1175 }
1176
1177 return 1;
1178 }
1179 return 0;
1180}
1181
d6b0e80f 1182static void
c0939df1 1183linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f
AC
1184{
1185 struct lwp_info *lp;
d6b0e80f 1186 int status;
af990527 1187 ptid_t ptid;
d6b0e80f 1188
2455069d 1189 /* Make sure we report all signals during attach. */
94bedb42 1190 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1191
492d29ea 1192 TRY
87b0bb13
JK
1193 {
1194 linux_ops->to_attach (ops, args, from_tty);
1195 }
492d29ea 1196 CATCH (ex, RETURN_MASK_ERROR)
87b0bb13
JK
1197 {
1198 pid_t pid = parse_pid_to_attach (args);
1199 struct buffer buffer;
1200 char *message, *buffer_s;
1201
1202 message = xstrdup (ex.message);
1203 make_cleanup (xfree, message);
1204
1205 buffer_init (&buffer);
7ae1a6a6 1206 linux_ptrace_attach_fail_reason (pid, &buffer);
87b0bb13
JK
1207
1208 buffer_grow_str0 (&buffer, "");
1209 buffer_s = buffer_finish (&buffer);
1210 make_cleanup (xfree, buffer_s);
1211
7ae1a6a6
PA
1212 if (*buffer_s != '\0')
1213 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1214 else
1215 throw_error (ex.error, "%s", message);
87b0bb13 1216 }
492d29ea 1217 END_CATCH
d6b0e80f 1218
af990527
PA
1219 /* The ptrace base target adds the main thread with (pid,0,0)
1220 format. Decorate it with lwp info. */
dfd4cc63
LM
1221 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1222 ptid_get_pid (inferior_ptid),
1223 0);
af990527
PA
1224 thread_change_ptid (inferior_ptid, ptid);
1225
9f0bdab8 1226 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1227 lp = add_initial_lwp (ptid);
a0ef4274 1228
4a6ed09b 1229 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->signalled);
dacc9cb2
PP
1230 if (!WIFSTOPPED (status))
1231 {
1232 if (WIFEXITED (status))
1233 {
1234 int exit_code = WEXITSTATUS (status);
1235
1236 target_terminal_ours ();
1237 target_mourn_inferior ();
1238 if (exit_code == 0)
1239 error (_("Unable to attach: program exited normally."));
1240 else
1241 error (_("Unable to attach: program exited with code %d."),
1242 exit_code);
1243 }
1244 else if (WIFSIGNALED (status))
1245 {
2ea28649 1246 enum gdb_signal signo;
dacc9cb2
PP
1247
1248 target_terminal_ours ();
1249 target_mourn_inferior ();
1250
2ea28649 1251 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1252 error (_("Unable to attach: program terminated with signal "
1253 "%s, %s."),
2ea28649
PA
1254 gdb_signal_to_name (signo),
1255 gdb_signal_to_string (signo));
dacc9cb2
PP
1256 }
1257
1258 internal_error (__FILE__, __LINE__,
1259 _("unexpected status %d for PID %ld"),
dfd4cc63 1260 status, (long) ptid_get_lwp (ptid));
dacc9cb2
PP
1261 }
1262
a0ef4274 1263 lp->stopped = 1;
9f0bdab8 1264
a0ef4274 1265 /* Save the wait status to report later. */
d6b0e80f 1266 lp->resumed = 1;
a0ef4274
DJ
1267 if (debug_linux_nat)
1268 fprintf_unfiltered (gdb_stdlog,
1269 "LNA: waitpid %ld, saving status %s\n",
dfd4cc63 1270 (long) ptid_get_pid (lp->ptid), status_to_str (status));
710151dd 1271
7feb7d06
PA
1272 lp->status = status;
1273
8784d563
PA
1274 /* We must attach to every LWP. If /proc is mounted, use that to
1275 find them now. The inferior may be using raw clone instead of
1276 using pthreads. But even if it is using pthreads, thread_db
1277 walks structures in the inferior's address space to find the list
1278 of threads/LWPs, and those structures may well be corrupted.
1279 Note that once thread_db is loaded, we'll still use it to list
1280 threads and associate pthread info with each LWP. */
1281 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1282 attach_proc_task_lwp_callback);
1283
7feb7d06 1284 if (target_can_async_p ())
6a3753b3 1285 target_async (1);
d6b0e80f
AC
1286}
1287
a0ef4274
DJ
1288/* Get pending status of LP. */
1289static int
1290get_pending_status (struct lwp_info *lp, int *status)
1291{
a493e3e2 1292 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1293
1294 /* If we paused threads momentarily, we may have stored pending
1295 events in lp->status or lp->waitstatus (see stop_wait_callback),
1296 and GDB core hasn't seen any signal for those threads.
1297 Otherwise, the last signal reported to the core is found in the
1298 thread object's stop_signal.
1299
1300 There's a corner case that isn't handled here at present. Only
1301 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1302 stop_signal make sense as a real signal to pass to the inferior.
1303 Some catchpoint related events, like
1304 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1305 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1306 those traps are debug API (ptrace in our case) related and
1307 induced; the inferior wouldn't see them if it wasn't being
1308 traced. Hence, we should never pass them to the inferior, even
1309 when set to pass state. Since this corner case isn't handled by
1310 infrun.c when proceeding with a signal, for consistency, neither
1311 do we handle it here (or elsewhere in the file we check for
1312 signal pass state). Normally SIGTRAP isn't set to pass state, so
1313 this is really a corner case. */
1314
1315 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1316 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1317 else if (lp->status)
2ea28649 1318 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
fbea99ea 1319 else if (target_is_non_stop_p () && !is_executing (lp->ptid))
ca2163eb
PA
1320 {
1321 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1322
16c381f0 1323 signo = tp->suspend.stop_signal;
ca2163eb 1324 }
fbea99ea 1325 else if (!target_is_non_stop_p ())
a0ef4274 1326 {
ca2163eb
PA
1327 struct target_waitstatus last;
1328 ptid_t last_ptid;
4c28f408 1329
ca2163eb 1330 get_last_target_status (&last_ptid, &last);
4c28f408 1331
dfd4cc63 1332 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
ca2163eb 1333 {
e09875d4 1334 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1335
16c381f0 1336 signo = tp->suspend.stop_signal;
4c28f408 1337 }
ca2163eb 1338 }
4c28f408 1339
ca2163eb 1340 *status = 0;
4c28f408 1341
a493e3e2 1342 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1343 {
1344 if (debug_linux_nat)
1345 fprintf_unfiltered (gdb_stdlog,
1346 "GPT: lwp %s has no pending signal\n",
1347 target_pid_to_str (lp->ptid));
1348 }
1349 else if (!signal_pass_state (signo))
1350 {
1351 if (debug_linux_nat)
3e43a32a
MS
1352 fprintf_unfiltered (gdb_stdlog,
1353 "GPT: lwp %s had signal %s, "
1354 "but it is in no pass state\n",
ca2163eb 1355 target_pid_to_str (lp->ptid),
2ea28649 1356 gdb_signal_to_string (signo));
a0ef4274 1357 }
a0ef4274 1358 else
4c28f408 1359 {
2ea28649 1360 *status = W_STOPCODE (gdb_signal_to_host (signo));
ca2163eb
PA
1361
1362 if (debug_linux_nat)
1363 fprintf_unfiltered (gdb_stdlog,
1364 "GPT: lwp %s has pending signal %s\n",
1365 target_pid_to_str (lp->ptid),
2ea28649 1366 gdb_signal_to_string (signo));
4c28f408 1367 }
a0ef4274
DJ
1368
1369 return 0;
1370}
1371
d6b0e80f
AC
1372static int
1373detach_callback (struct lwp_info *lp, void *data)
1374{
1375 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1376
1377 if (debug_linux_nat && lp->status)
1378 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1379 strsignal (WSTOPSIG (lp->status)),
1380 target_pid_to_str (lp->ptid));
1381
a0ef4274
DJ
1382 /* If there is a pending SIGSTOP, get rid of it. */
1383 if (lp->signalled)
d6b0e80f 1384 {
d6b0e80f
AC
1385 if (debug_linux_nat)
1386 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1387 "DC: Sending SIGCONT to %s\n",
1388 target_pid_to_str (lp->ptid));
d6b0e80f 1389
dfd4cc63 1390 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
d6b0e80f 1391 lp->signalled = 0;
d6b0e80f
AC
1392 }
1393
1394 /* We don't actually detach from the LWP that has an id equal to the
1395 overall process id just yet. */
dfd4cc63 1396 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
d6b0e80f 1397 {
a0ef4274
DJ
1398 int status = 0;
1399
1400 /* Pass on any pending signal for this LWP. */
1401 get_pending_status (lp, &status);
1402
7b50312a
PA
1403 if (linux_nat_prepare_to_resume != NULL)
1404 linux_nat_prepare_to_resume (lp);
d6b0e80f 1405 errno = 0;
dfd4cc63 1406 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
a0ef4274 1407 WSTOPSIG (status)) < 0)
8a3fe4f8 1408 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1409 safe_strerror (errno));
1410
1411 if (debug_linux_nat)
1412 fprintf_unfiltered (gdb_stdlog,
1413 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1414 target_pid_to_str (lp->ptid),
7feb7d06 1415 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1416
1417 delete_lwp (lp->ptid);
1418 }
1419
1420 return 0;
1421}
1422
1423static void
52554a0e 1424linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f 1425{
b84876c2 1426 int pid;
a0ef4274 1427 int status;
d90e17a7
PA
1428 struct lwp_info *main_lwp;
1429
dfd4cc63 1430 pid = ptid_get_pid (inferior_ptid);
a0ef4274 1431
ae5e0686
MK
1432 /* Don't unregister from the event loop, as there may be other
1433 inferiors running. */
b84876c2 1434
4c28f408
PA
1435 /* Stop all threads before detaching. ptrace requires that the
1436 thread is stopped to sucessfully detach. */
d90e17a7 1437 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1438 /* ... and wait until all of them have reported back that
1439 they're no longer running. */
d90e17a7 1440 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1441
d90e17a7 1442 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1443
1444 /* Only the initial process should be left right now. */
dfd4cc63 1445 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
d90e17a7
PA
1446
1447 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1448
a0ef4274
DJ
1449 /* Pass on any pending signal for the last LWP. */
1450 if ((args == NULL || *args == '\0')
d90e17a7 1451 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1452 && WIFSTOPPED (status))
1453 {
52554a0e
TT
1454 char *tem;
1455
a0ef4274
DJ
1456 /* Put the signal number in ARGS so that inf_ptrace_detach will
1457 pass it along with PTRACE_DETACH. */
224c3ddb 1458 tem = (char *) alloca (8);
cde33bf1 1459 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
52554a0e 1460 args = tem;
ddabfc73
TT
1461 if (debug_linux_nat)
1462 fprintf_unfiltered (gdb_stdlog,
1463 "LND: Sending signal %s to %s\n",
1464 args,
1465 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1466 }
1467
7b50312a
PA
1468 if (linux_nat_prepare_to_resume != NULL)
1469 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1470 delete_lwp (main_lwp->ptid);
b84876c2 1471
7a7d3353
PA
1472 if (forks_exist_p ())
1473 {
1474 /* Multi-fork case. The current inferior_ptid is being detached
1475 from, but there are other viable forks to debug. Detach from
1476 the current fork, and context-switch to the first
1477 available. */
1478 linux_fork_detach (args, from_tty);
7a7d3353
PA
1479 }
1480 else
1481 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1482}
1483
8a99810d
PA
1484/* Resume execution of the inferior process. If STEP is nonzero,
1485 single-step it. If SIGNAL is nonzero, give it that signal. */
1486
1487static void
23f238d3
PA
1488linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1489 enum gdb_signal signo)
8a99810d 1490{
8a99810d 1491 lp->step = step;
9c02b525
PA
1492
1493 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1494 We only presently need that if the LWP is stepped though (to
1495 handle the case of stepping a breakpoint instruction). */
1496 if (step)
1497 {
1498 struct regcache *regcache = get_thread_regcache (lp->ptid);
1499
1500 lp->stop_pc = regcache_read_pc (regcache);
1501 }
1502 else
1503 lp->stop_pc = 0;
1504
8a99810d
PA
1505 if (linux_nat_prepare_to_resume != NULL)
1506 linux_nat_prepare_to_resume (lp);
90ad5e1d 1507 linux_ops->to_resume (linux_ops, lp->ptid, step, signo);
23f238d3
PA
1508
1509 /* Successfully resumed. Clear state that no longer makes sense,
1510 and mark the LWP as running. Must not do this before resuming
1511 otherwise if that fails other code will be confused. E.g., we'd
1512 later try to stop the LWP and hang forever waiting for a stop
1513 status. Note that we must not throw after this is cleared,
1514 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1515 lp->stopped = 0;
1ad3de98 1516 lp->core = -1;
23f238d3 1517 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8a99810d
PA
1518 registers_changed_ptid (lp->ptid);
1519}
1520
23f238d3
PA
1521/* Called when we try to resume a stopped LWP and that errors out. If
1522 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1523 or about to become), discard the error, clear any pending status
1524 the LWP may have, and return true (we'll collect the exit status
1525 soon enough). Otherwise, return false. */
1526
1527static int
1528check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1529{
1530 /* If we get an error after resuming the LWP successfully, we'd
1531 confuse !T state for the LWP being gone. */
1532 gdb_assert (lp->stopped);
1533
1534 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1535 because even if ptrace failed with ESRCH, the tracee may be "not
1536 yet fully dead", but already refusing ptrace requests. In that
1537 case the tracee has 'R (Running)' state for a little bit
1538 (observed in Linux 3.18). See also the note on ESRCH in the
1539 ptrace(2) man page. Instead, check whether the LWP has any state
1540 other than ptrace-stopped. */
1541
1542 /* Don't assume anything if /proc/PID/status can't be read. */
1543 if (linux_proc_pid_is_trace_stopped_nowarn (ptid_get_lwp (lp->ptid)) == 0)
1544 {
1545 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1546 lp->status = 0;
1547 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1548 return 1;
1549 }
1550 return 0;
1551}
1552
1553/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1554 disappears while we try to resume it. */
1555
1556static void
1557linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1558{
1559 TRY
1560 {
1561 linux_resume_one_lwp_throw (lp, step, signo);
1562 }
1563 CATCH (ex, RETURN_MASK_ERROR)
1564 {
1565 if (!check_ptrace_stopped_lwp_gone (lp))
1566 throw_exception (ex);
1567 }
1568 END_CATCH
1569}
1570
d6b0e80f
AC
1571/* Resume LP. */
1572
25289eb2 1573static void
e5ef252a 1574resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1575{
25289eb2 1576 if (lp->stopped)
6c95b8df 1577 {
c9657e70 1578 struct inferior *inf = find_inferior_ptid (lp->ptid);
25289eb2
PA
1579
1580 if (inf->vfork_child != NULL)
1581 {
1582 if (debug_linux_nat)
1583 fprintf_unfiltered (gdb_stdlog,
1584 "RC: Not resuming %s (vfork parent)\n",
1585 target_pid_to_str (lp->ptid));
1586 }
8a99810d 1587 else if (!lwp_status_pending_p (lp))
25289eb2
PA
1588 {
1589 if (debug_linux_nat)
1590 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1591 "RC: Resuming sibling %s, %s, %s\n",
1592 target_pid_to_str (lp->ptid),
1593 (signo != GDB_SIGNAL_0
1594 ? strsignal (gdb_signal_to_host (signo))
1595 : "0"),
1596 step ? "step" : "resume");
25289eb2 1597
8a99810d 1598 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1599 }
1600 else
1601 {
1602 if (debug_linux_nat)
1603 fprintf_unfiltered (gdb_stdlog,
1604 "RC: Not resuming sibling %s (has pending)\n",
1605 target_pid_to_str (lp->ptid));
1606 }
6c95b8df 1607 }
25289eb2 1608 else
d6b0e80f 1609 {
d90e17a7
PA
1610 if (debug_linux_nat)
1611 fprintf_unfiltered (gdb_stdlog,
25289eb2 1612 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1613 target_pid_to_str (lp->ptid));
d6b0e80f 1614 }
25289eb2 1615}
d6b0e80f 1616
8817a6f2
PA
1617/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1618 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1619
25289eb2 1620static int
8817a6f2 1621linux_nat_resume_callback (struct lwp_info *lp, void *except)
25289eb2 1622{
e5ef252a
PA
1623 enum gdb_signal signo = GDB_SIGNAL_0;
1624
8817a6f2
PA
1625 if (lp == except)
1626 return 0;
1627
e5ef252a
PA
1628 if (lp->stopped)
1629 {
1630 struct thread_info *thread;
1631
1632 thread = find_thread_ptid (lp->ptid);
1633 if (thread != NULL)
1634 {
70509625 1635 signo = thread->suspend.stop_signal;
e5ef252a
PA
1636 thread->suspend.stop_signal = GDB_SIGNAL_0;
1637 }
1638 }
1639
1640 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1641 return 0;
1642}
1643
1644static int
1645resume_clear_callback (struct lwp_info *lp, void *data)
1646{
1647 lp->resumed = 0;
25289eb2 1648 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1649 return 0;
1650}
1651
1652static int
1653resume_set_callback (struct lwp_info *lp, void *data)
1654{
1655 lp->resumed = 1;
25289eb2 1656 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1657 return 0;
1658}
1659
1660static void
28439f5e 1661linux_nat_resume (struct target_ops *ops,
2ea28649 1662 ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1663{
1664 struct lwp_info *lp;
d90e17a7 1665 int resume_many;
d6b0e80f 1666
76f50ad1
DJ
1667 if (debug_linux_nat)
1668 fprintf_unfiltered (gdb_stdlog,
1669 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1670 step ? "step" : "resume",
1671 target_pid_to_str (ptid),
a493e3e2 1672 (signo != GDB_SIGNAL_0
2ea28649 1673 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1674 target_pid_to_str (inferior_ptid));
1675
d6b0e80f 1676 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1677 resume_many = (ptid_equal (minus_one_ptid, ptid)
1678 || ptid_is_pid (ptid));
4c28f408 1679
e3e9f5a2
PA
1680 /* Mark the lwps we're resuming as resumed. */
1681 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1682
d90e17a7
PA
1683 /* See if it's the current inferior that should be handled
1684 specially. */
1685 if (resume_many)
1686 lp = find_lwp_pid (inferior_ptid);
1687 else
1688 lp = find_lwp_pid (ptid);
9f0bdab8 1689 gdb_assert (lp != NULL);
d6b0e80f 1690
9f0bdab8 1691 /* Remember if we're stepping. */
25289eb2 1692 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1693
9f0bdab8
DJ
1694 /* If we have a pending wait status for this thread, there is no
1695 point in resuming the process. But first make sure that
1696 linux_nat_wait won't preemptively handle the event - we
1697 should never take this short-circuit if we are going to
1698 leave LP running, since we have skipped resuming all the
1699 other threads. This bit of code needs to be synchronized
1700 with linux_nat_wait. */
76f50ad1 1701
9f0bdab8
DJ
1702 if (lp->status && WIFSTOPPED (lp->status))
1703 {
2455069d
UW
1704 if (!lp->step
1705 && WSTOPSIG (lp->status)
1706 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1707 {
9f0bdab8
DJ
1708 if (debug_linux_nat)
1709 fprintf_unfiltered (gdb_stdlog,
1710 "LLR: Not short circuiting for ignored "
1711 "status 0x%x\n", lp->status);
1712
d6b0e80f
AC
1713 /* FIXME: What should we do if we are supposed to continue
1714 this thread with a signal? */
a493e3e2 1715 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1716 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1717 lp->status = 0;
1718 }
1719 }
76f50ad1 1720
8a99810d 1721 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1722 {
1723 /* FIXME: What should we do if we are supposed to continue
1724 this thread with a signal? */
a493e3e2 1725 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1726
9f0bdab8
DJ
1727 if (debug_linux_nat)
1728 fprintf_unfiltered (gdb_stdlog,
1729 "LLR: Short circuiting for status 0x%x\n",
1730 lp->status);
d6b0e80f 1731
7feb7d06
PA
1732 if (target_can_async_p ())
1733 {
6a3753b3 1734 target_async (1);
7feb7d06
PA
1735 /* Tell the event loop we have something to process. */
1736 async_file_mark ();
1737 }
9f0bdab8 1738 return;
d6b0e80f
AC
1739 }
1740
d90e17a7 1741 if (resume_many)
8817a6f2 1742 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
d90e17a7 1743
d6b0e80f
AC
1744 if (debug_linux_nat)
1745 fprintf_unfiltered (gdb_stdlog,
1746 "LLR: %s %s, %s (resume event thread)\n",
1747 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
2bf6fb9d 1748 target_pid_to_str (lp->ptid),
a493e3e2 1749 (signo != GDB_SIGNAL_0
2ea28649 1750 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2 1751
2bf6fb9d
PA
1752 linux_resume_one_lwp (lp, step, signo);
1753
b84876c2 1754 if (target_can_async_p ())
6a3753b3 1755 target_async (1);
d6b0e80f
AC
1756}
1757
c5f62d5f 1758/* Send a signal to an LWP. */
d6b0e80f
AC
1759
1760static int
1761kill_lwp (int lwpid, int signo)
1762{
4a6ed09b 1763 int ret;
d6b0e80f 1764
4a6ed09b
PA
1765 errno = 0;
1766 ret = syscall (__NR_tkill, lwpid, signo);
1767 if (errno == ENOSYS)
1768 {
1769 /* If tkill fails, then we are not using nptl threads, a
1770 configuration we no longer support. */
1771 perror_with_name (("tkill"));
1772 }
1773 return ret;
d6b0e80f
AC
1774}
1775
ca2163eb
PA
1776/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1777 event, check if the core is interested in it: if not, ignore the
1778 event, and keep waiting; otherwise, we need to toggle the LWP's
1779 syscall entry/exit status, since the ptrace event itself doesn't
1780 indicate it, and report the trap to higher layers. */
1781
1782static int
1783linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1784{
1785 struct target_waitstatus *ourstatus = &lp->waitstatus;
1786 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1787 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1788
1789 if (stopping)
1790 {
1791 /* If we're stopping threads, there's a SIGSTOP pending, which
1792 makes it so that the LWP reports an immediate syscall return,
1793 followed by the SIGSTOP. Skip seeing that "return" using
1794 PTRACE_CONT directly, and let stop_wait_callback collect the
1795 SIGSTOP. Later when the thread is resumed, a new syscall
1796 entry event. If we didn't do this (and returned 0), we'd
1797 leave a syscall entry pending, and our caller, by using
1798 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1799 itself. Later, when the user re-resumes this LWP, we'd see
1800 another syscall entry event and we'd mistake it for a return.
1801
1802 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1803 (leaving immediately with LWP->signalled set, without issuing
1804 a PTRACE_CONT), it would still be problematic to leave this
1805 syscall enter pending, as later when the thread is resumed,
1806 it would then see the same syscall exit mentioned above,
1807 followed by the delayed SIGSTOP, while the syscall didn't
1808 actually get to execute. It seems it would be even more
1809 confusing to the user. */
1810
1811 if (debug_linux_nat)
1812 fprintf_unfiltered (gdb_stdlog,
1813 "LHST: ignoring syscall %d "
1814 "for LWP %ld (stopping threads), "
1815 "resuming with PTRACE_CONT for SIGSTOP\n",
1816 syscall_number,
dfd4cc63 1817 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1818
1819 lp->syscall_state = TARGET_WAITKIND_IGNORE;
dfd4cc63 1820 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 1821 lp->stopped = 0;
ca2163eb
PA
1822 return 1;
1823 }
1824
bfd09d20
JS
1825 /* Always update the entry/return state, even if this particular
1826 syscall isn't interesting to the core now. In async mode,
1827 the user could install a new catchpoint for this syscall
1828 between syscall enter/return, and we'll need to know to
1829 report a syscall return if that happens. */
1830 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1831 ? TARGET_WAITKIND_SYSCALL_RETURN
1832 : TARGET_WAITKIND_SYSCALL_ENTRY);
1833
ca2163eb
PA
1834 if (catch_syscall_enabled ())
1835 {
ca2163eb
PA
1836 if (catching_syscall_number (syscall_number))
1837 {
1838 /* Alright, an event to report. */
1839 ourstatus->kind = lp->syscall_state;
1840 ourstatus->value.syscall_number = syscall_number;
1841
1842 if (debug_linux_nat)
1843 fprintf_unfiltered (gdb_stdlog,
1844 "LHST: stopping for %s of syscall %d"
1845 " for LWP %ld\n",
3e43a32a
MS
1846 lp->syscall_state
1847 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
1848 ? "entry" : "return",
1849 syscall_number,
dfd4cc63 1850 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1851 return 0;
1852 }
1853
1854 if (debug_linux_nat)
1855 fprintf_unfiltered (gdb_stdlog,
1856 "LHST: ignoring %s of syscall %d "
1857 "for LWP %ld\n",
1858 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1859 ? "entry" : "return",
1860 syscall_number,
dfd4cc63 1861 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1862 }
1863 else
1864 {
1865 /* If we had been syscall tracing, and hence used PT_SYSCALL
1866 before on this LWP, it could happen that the user removes all
1867 syscall catchpoints before we get to process this event.
1868 There are two noteworthy issues here:
1869
1870 - When stopped at a syscall entry event, resuming with
1871 PT_STEP still resumes executing the syscall and reports a
1872 syscall return.
1873
1874 - Only PT_SYSCALL catches syscall enters. If we last
1875 single-stepped this thread, then this event can't be a
1876 syscall enter. If we last single-stepped this thread, this
1877 has to be a syscall exit.
1878
1879 The points above mean that the next resume, be it PT_STEP or
1880 PT_CONTINUE, can not trigger a syscall trace event. */
1881 if (debug_linux_nat)
1882 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
1883 "LHST: caught syscall event "
1884 "with no syscall catchpoints."
ca2163eb
PA
1885 " %d for LWP %ld, ignoring\n",
1886 syscall_number,
dfd4cc63 1887 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1888 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1889 }
1890
1891 /* The core isn't interested in this event. For efficiency, avoid
1892 stopping all threads only to have the core resume them all again.
1893 Since we're not stopping threads, if we're still syscall tracing
1894 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1895 subsequent syscall. Simply resume using the inf-ptrace layer,
1896 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1897
8a99810d 1898 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1899 return 1;
1900}
1901
3d799a95
DJ
1902/* Handle a GNU/Linux extended wait response. If we see a clone
1903 event, we need to add the new LWP to our list (and not report the
1904 trap to higher layers). This function returns non-zero if the
1905 event should be ignored and we should wait again. If STOPPING is
1906 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1907
1908static int
4dd63d48 1909linux_handle_extended_wait (struct lwp_info *lp, int status)
d6b0e80f 1910{
dfd4cc63 1911 int pid = ptid_get_lwp (lp->ptid);
3d799a95 1912 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1913 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1914
bfd09d20
JS
1915 /* All extended events we currently use are mid-syscall. Only
1916 PTRACE_EVENT_STOP is delivered more like a signal-stop, but
1917 you have to be using PTRACE_SEIZE to get that. */
1918 lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;
1919
3d799a95
DJ
1920 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1921 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1922 {
3d799a95
DJ
1923 unsigned long new_pid;
1924 int ret;
1925
1926 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1927
3d799a95
DJ
1928 /* If we haven't already seen the new PID stop, wait for it now. */
1929 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1930 {
1931 /* The new child has a pending SIGSTOP. We can't affect it until it
1932 hits the SIGSTOP, but we're already attached. */
4a6ed09b 1933 ret = my_waitpid (new_pid, &status, __WALL);
3d799a95
DJ
1934 if (ret == -1)
1935 perror_with_name (_("waiting for new child"));
1936 else if (ret != new_pid)
1937 internal_error (__FILE__, __LINE__,
1938 _("wait returned unexpected PID %d"), ret);
1939 else if (!WIFSTOPPED (status))
1940 internal_error (__FILE__, __LINE__,
1941 _("wait returned unexpected status 0x%x"), status);
1942 }
1943
3a3e9ee3 1944 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 1945
26cb8b7c
PA
1946 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1947 {
1948 /* The arch-specific native code may need to know about new
1949 forks even if those end up never mapped to an
1950 inferior. */
1951 if (linux_nat_new_fork != NULL)
1952 linux_nat_new_fork (lp, new_pid);
1953 }
1954
2277426b 1955 if (event == PTRACE_EVENT_FORK
dfd4cc63 1956 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
2277426b 1957 {
2277426b
PA
1958 /* Handle checkpointing by linux-fork.c here as a special
1959 case. We don't want the follow-fork-mode or 'catch fork'
1960 to interfere with this. */
1961
1962 /* This won't actually modify the breakpoint list, but will
1963 physically remove the breakpoints from the child. */
d80ee84f 1964 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2277426b
PA
1965
1966 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
1967 if (!find_fork_pid (new_pid))
1968 add_fork (new_pid);
2277426b
PA
1969
1970 /* Report as spurious, so that infrun doesn't want to follow
1971 this fork. We're actually doing an infcall in
1972 linux-fork.c. */
1973 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2277426b
PA
1974
1975 /* Report the stop to the core. */
1976 return 0;
1977 }
1978
3d799a95
DJ
1979 if (event == PTRACE_EVENT_FORK)
1980 ourstatus->kind = TARGET_WAITKIND_FORKED;
1981 else if (event == PTRACE_EVENT_VFORK)
1982 ourstatus->kind = TARGET_WAITKIND_VFORKED;
4dd63d48 1983 else if (event == PTRACE_EVENT_CLONE)
3d799a95 1984 {
78768c4a
JK
1985 struct lwp_info *new_lp;
1986
3d799a95 1987 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 1988
3c4d7e12
PA
1989 if (debug_linux_nat)
1990 fprintf_unfiltered (gdb_stdlog,
1991 "LHEW: Got clone event "
1992 "from LWP %d, new child is LWP %ld\n",
1993 pid, new_pid);
1994
dfd4cc63 1995 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
4c28f408 1996 new_lp->stopped = 1;
4dd63d48 1997 new_lp->resumed = 1;
d6b0e80f 1998
2db9a427
PA
1999 /* If the thread_db layer is active, let it record the user
2000 level thread id and status, and add the thread to GDB's
2001 list. */
2002 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 2003 {
2db9a427
PA
2004 /* The process is not using thread_db. Add the LWP to
2005 GDB's list. */
2006 target_post_attach (ptid_get_lwp (new_lp->ptid));
2007 add_thread (new_lp->ptid);
2008 }
4c28f408 2009
2ee52aa4 2010 /* Even if we're stopping the thread for some reason
4dd63d48
PA
2011 internal to this module, from the perspective of infrun
2012 and the user/frontend, this new thread is running until
2013 it next reports a stop. */
2ee52aa4 2014 set_running (new_lp->ptid, 1);
4dd63d48 2015 set_executing (new_lp->ptid, 1);
4c28f408 2016
4dd63d48 2017 if (WSTOPSIG (status) != SIGSTOP)
79395f92 2018 {
4dd63d48
PA
2019 /* This can happen if someone starts sending signals to
2020 the new thread before it gets a chance to run, which
2021 have a lower number than SIGSTOP (e.g. SIGUSR1).
2022 This is an unlikely case, and harder to handle for
2023 fork / vfork than for clone, so we do not try - but
2024 we handle it for clone events here. */
2025
2026 new_lp->signalled = 1;
2027
79395f92
PA
2028 /* We created NEW_LP so it cannot yet contain STATUS. */
2029 gdb_assert (new_lp->status == 0);
2030
2031 /* Save the wait status to report later. */
2032 if (debug_linux_nat)
2033 fprintf_unfiltered (gdb_stdlog,
2034 "LHEW: waitpid of new LWP %ld, "
2035 "saving status %s\n",
dfd4cc63 2036 (long) ptid_get_lwp (new_lp->ptid),
79395f92
PA
2037 status_to_str (status));
2038 new_lp->status = status;
2039 }
aa01bd36
PA
2040 else if (report_thread_events)
2041 {
2042 new_lp->waitstatus.kind = TARGET_WAITKIND_THREAD_CREATED;
2043 new_lp->status = status;
2044 }
79395f92 2045
3d799a95
DJ
2046 return 1;
2047 }
2048
2049 return 0;
d6b0e80f
AC
2050 }
2051
3d799a95
DJ
2052 if (event == PTRACE_EVENT_EXEC)
2053 {
a75724bc
PA
2054 if (debug_linux_nat)
2055 fprintf_unfiltered (gdb_stdlog,
2056 "LHEW: Got exec event from LWP %ld\n",
dfd4cc63 2057 ptid_get_lwp (lp->ptid));
a75724bc 2058
3d799a95
DJ
2059 ourstatus->kind = TARGET_WAITKIND_EXECD;
2060 ourstatus->value.execd_pathname
8dd27370 2061 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
3d799a95 2062
8af756ef
PA
2063 /* The thread that execed must have been resumed, but, when a
2064 thread execs, it changes its tid to the tgid, and the old
2065 tgid thread might have not been resumed. */
2066 lp->resumed = 1;
6c95b8df
PA
2067 return 0;
2068 }
2069
2070 if (event == PTRACE_EVENT_VFORK_DONE)
2071 {
2072 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2073 {
6c95b8df 2074 if (debug_linux_nat)
3e43a32a
MS
2075 fprintf_unfiltered (gdb_stdlog,
2076 "LHEW: Got expected PTRACE_EVENT_"
2077 "VFORK_DONE from LWP %ld: stopping\n",
dfd4cc63 2078 ptid_get_lwp (lp->ptid));
3d799a95 2079
6c95b8df
PA
2080 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2081 return 0;
3d799a95
DJ
2082 }
2083
6c95b8df 2084 if (debug_linux_nat)
3e43a32a
MS
2085 fprintf_unfiltered (gdb_stdlog,
2086 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
20ba1ce6 2087 "from LWP %ld: ignoring\n",
dfd4cc63 2088 ptid_get_lwp (lp->ptid));
6c95b8df 2089 return 1;
3d799a95
DJ
2090 }
2091
2092 internal_error (__FILE__, __LINE__,
2093 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2094}
2095
2096/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2097 exited. */
2098
2099static int
2100wait_lwp (struct lwp_info *lp)
2101{
2102 pid_t pid;
432b4d03 2103 int status = 0;
d6b0e80f 2104 int thread_dead = 0;
432b4d03 2105 sigset_t prev_mask;
d6b0e80f
AC
2106
2107 gdb_assert (!lp->stopped);
2108 gdb_assert (lp->status == 0);
2109
432b4d03
JK
2110 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2111 block_child_signals (&prev_mask);
2112
2113 for (;;)
d6b0e80f 2114 {
4a6ed09b 2115 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WALL | WNOHANG);
a9f4bb21
PA
2116 if (pid == -1 && errno == ECHILD)
2117 {
2118 /* The thread has previously exited. We need to delete it
4a6ed09b
PA
2119 now because if this was a non-leader thread execing, we
2120 won't get an exit event. See comments on exec events at
2121 the top of the file. */
a9f4bb21
PA
2122 thread_dead = 1;
2123 if (debug_linux_nat)
2124 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2125 target_pid_to_str (lp->ptid));
2126 }
432b4d03
JK
2127 if (pid != 0)
2128 break;
2129
2130 /* Bugs 10970, 12702.
2131 Thread group leader may have exited in which case we'll lock up in
2132 waitpid if there are other threads, even if they are all zombies too.
2133 Basically, we're not supposed to use waitpid this way.
4a6ed09b
PA
2134 tkill(pid,0) cannot be used here as it gets ESRCH for both
2135 for zombie and running processes.
432b4d03
JK
2136
2137 As a workaround, check if we're waiting for the thread group leader and
2138 if it's a zombie, and avoid calling waitpid if it is.
2139
2140 This is racy, what if the tgl becomes a zombie right after we check?
2141 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2142 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2143
dfd4cc63
LM
2144 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2145 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
d6b0e80f 2146 {
d6b0e80f
AC
2147 thread_dead = 1;
2148 if (debug_linux_nat)
432b4d03
JK
2149 fprintf_unfiltered (gdb_stdlog,
2150 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2151 target_pid_to_str (lp->ptid));
432b4d03 2152 break;
d6b0e80f 2153 }
432b4d03
JK
2154
2155 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2156 get invoked despite our caller had them intentionally blocked by
2157 block_child_signals. This is sensitive only to the loop of
2158 linux_nat_wait_1 and there if we get called my_waitpid gets called
2159 again before it gets to sigsuspend so we can safely let the handlers
2160 get executed here. */
2161
d36bf488
DE
2162 if (debug_linux_nat)
2163 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
432b4d03
JK
2164 sigsuspend (&suspend_mask);
2165 }
2166
2167 restore_child_signals_mask (&prev_mask);
2168
d6b0e80f
AC
2169 if (!thread_dead)
2170 {
dfd4cc63 2171 gdb_assert (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
2172
2173 if (debug_linux_nat)
2174 {
2175 fprintf_unfiltered (gdb_stdlog,
2176 "WL: waitpid %s received %s\n",
2177 target_pid_to_str (lp->ptid),
2178 status_to_str (status));
2179 }
d6b0e80f 2180
a9f4bb21
PA
2181 /* Check if the thread has exited. */
2182 if (WIFEXITED (status) || WIFSIGNALED (status))
2183 {
aa01bd36
PA
2184 if (report_thread_events
2185 || ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
69dde7dc
PA
2186 {
2187 if (debug_linux_nat)
aa01bd36 2188 fprintf_unfiltered (gdb_stdlog, "WL: LWP %d exited.\n",
69dde7dc
PA
2189 ptid_get_pid (lp->ptid));
2190
aa01bd36 2191 /* If this is the leader exiting, it means the whole
69dde7dc
PA
2192 process is gone. Store the status to report to the
2193 core. Store it in lp->waitstatus, because lp->status
2194 would be ambiguous (W_EXITCODE(0,0) == 0). */
2195 store_waitstatus (&lp->waitstatus, status);
2196 return 0;
2197 }
2198
a9f4bb21
PA
2199 thread_dead = 1;
2200 if (debug_linux_nat)
2201 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2202 target_pid_to_str (lp->ptid));
2203 }
d6b0e80f
AC
2204 }
2205
2206 if (thread_dead)
2207 {
e26af52f 2208 exit_lwp (lp);
d6b0e80f
AC
2209 return 0;
2210 }
2211
2212 gdb_assert (WIFSTOPPED (status));
8817a6f2 2213 lp->stopped = 1;
d6b0e80f 2214
8784d563
PA
2215 if (lp->must_set_ptrace_flags)
2216 {
2217 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
de0d863e 2218 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2219
de0d863e 2220 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
8784d563
PA
2221 lp->must_set_ptrace_flags = 0;
2222 }
2223
ca2163eb
PA
2224 /* Handle GNU/Linux's syscall SIGTRAPs. */
2225 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2226 {
2227 /* No longer need the sysgood bit. The ptrace event ends up
2228 recorded in lp->waitstatus if we care for it. We can carry
2229 on handling the event like a regular SIGTRAP from here
2230 on. */
2231 status = W_STOPCODE (SIGTRAP);
2232 if (linux_handle_syscall_trap (lp, 1))
2233 return wait_lwp (lp);
2234 }
bfd09d20
JS
2235 else
2236 {
2237 /* Almost all other ptrace-stops are known to be outside of system
2238 calls, with further exceptions in linux_handle_extended_wait. */
2239 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2240 }
ca2163eb 2241
d6b0e80f 2242 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2243 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2244 && linux_is_extended_waitstatus (status))
d6b0e80f
AC
2245 {
2246 if (debug_linux_nat)
2247 fprintf_unfiltered (gdb_stdlog,
2248 "WL: Handling extended status 0x%06x\n",
2249 status);
4dd63d48 2250 linux_handle_extended_wait (lp, status);
20ba1ce6 2251 return 0;
d6b0e80f
AC
2252 }
2253
2254 return status;
2255}
2256
2257/* Send a SIGSTOP to LP. */
2258
2259static int
2260stop_callback (struct lwp_info *lp, void *data)
2261{
2262 if (!lp->stopped && !lp->signalled)
2263 {
2264 int ret;
2265
2266 if (debug_linux_nat)
2267 {
2268 fprintf_unfiltered (gdb_stdlog,
2269 "SC: kill %s **<SIGSTOP>**\n",
2270 target_pid_to_str (lp->ptid));
2271 }
2272 errno = 0;
dfd4cc63 2273 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
d6b0e80f
AC
2274 if (debug_linux_nat)
2275 {
2276 fprintf_unfiltered (gdb_stdlog,
2277 "SC: lwp kill %d %s\n",
2278 ret,
2279 errno ? safe_strerror (errno) : "ERRNO-OK");
2280 }
2281
2282 lp->signalled = 1;
2283 gdb_assert (lp->status == 0);
2284 }
2285
2286 return 0;
2287}
2288
7b50312a
PA
2289/* Request a stop on LWP. */
2290
2291void
2292linux_stop_lwp (struct lwp_info *lwp)
2293{
2294 stop_callback (lwp, NULL);
2295}
2296
2db9a427
PA
2297/* See linux-nat.h */
2298
2299void
2300linux_stop_and_wait_all_lwps (void)
2301{
2302 /* Stop all LWP's ... */
2303 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
2304
2305 /* ... and wait until all of them have reported back that
2306 they're no longer running. */
2307 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
2308}
2309
2310/* See linux-nat.h */
2311
2312void
2313linux_unstop_all_lwps (void)
2314{
2315 iterate_over_lwps (minus_one_ptid,
2316 resume_stopped_resumed_lwps, &minus_one_ptid);
2317}
2318
57380f4e 2319/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2320
2321static int
57380f4e
DJ
2322linux_nat_has_pending_sigint (int pid)
2323{
2324 sigset_t pending, blocked, ignored;
57380f4e
DJ
2325
2326 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2327
2328 if (sigismember (&pending, SIGINT)
2329 && !sigismember (&ignored, SIGINT))
2330 return 1;
2331
2332 return 0;
2333}
2334
2335/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2336
2337static int
2338set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2339{
57380f4e
DJ
2340 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2341 flag to consume the next one. */
2342 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2343 && WSTOPSIG (lp->status) == SIGINT)
2344 lp->status = 0;
2345 else
2346 lp->ignore_sigint = 1;
2347
2348 return 0;
2349}
2350
2351/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2352 This function is called after we know the LWP has stopped; if the LWP
2353 stopped before the expected SIGINT was delivered, then it will never have
2354 arrived. Also, if the signal was delivered to a shared queue and consumed
2355 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2356
57380f4e
DJ
2357static void
2358maybe_clear_ignore_sigint (struct lwp_info *lp)
2359{
2360 if (!lp->ignore_sigint)
2361 return;
2362
dfd4cc63 2363 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
57380f4e
DJ
2364 {
2365 if (debug_linux_nat)
2366 fprintf_unfiltered (gdb_stdlog,
2367 "MCIS: Clearing bogus flag for %s\n",
2368 target_pid_to_str (lp->ptid));
2369 lp->ignore_sigint = 0;
2370 }
2371}
2372
ebec9a0f
PA
2373/* Fetch the possible triggered data watchpoint info and store it in
2374 LP.
2375
2376 On some archs, like x86, that use debug registers to set
2377 watchpoints, it's possible that the way to know which watched
2378 address trapped, is to check the register that is used to select
2379 which address to watch. Problem is, between setting the watchpoint
2380 and reading back which data address trapped, the user may change
2381 the set of watchpoints, and, as a consequence, GDB changes the
2382 debug registers in the inferior. To avoid reading back a stale
2383 stopped-data-address when that happens, we cache in LP the fact
2384 that a watchpoint trapped, and the corresponding data address, as
2385 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2386 registers meanwhile, we have the cached data we can rely on. */
2387
9c02b525
PA
2388static int
2389check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f
PA
2390{
2391 struct cleanup *old_chain;
2392
2393 if (linux_ops->to_stopped_by_watchpoint == NULL)
9c02b525 2394 return 0;
ebec9a0f
PA
2395
2396 old_chain = save_inferior_ptid ();
2397 inferior_ptid = lp->ptid;
2398
9c02b525 2399 if (linux_ops->to_stopped_by_watchpoint (linux_ops))
ebec9a0f 2400 {
15c66dd6 2401 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
9c02b525 2402
ebec9a0f
PA
2403 if (linux_ops->to_stopped_data_address != NULL)
2404 lp->stopped_data_address_p =
2405 linux_ops->to_stopped_data_address (&current_target,
2406 &lp->stopped_data_address);
2407 else
2408 lp->stopped_data_address_p = 0;
2409 }
2410
2411 do_cleanups (old_chain);
9c02b525 2412
15c66dd6 2413 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2414}
2415
9c02b525 2416/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f
PA
2417
2418static int
6a109b6b 2419linux_nat_stopped_by_watchpoint (struct target_ops *ops)
ebec9a0f
PA
2420{
2421 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2422
2423 gdb_assert (lp != NULL);
2424
15c66dd6 2425 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2426}
2427
2428static int
2429linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2430{
2431 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2432
2433 gdb_assert (lp != NULL);
2434
2435 *addr_p = lp->stopped_data_address;
2436
2437 return lp->stopped_data_address_p;
2438}
2439
26ab7092
JK
2440/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2441
2442static int
2443sigtrap_is_event (int status)
2444{
2445 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2446}
2447
26ab7092
JK
2448/* Set alternative SIGTRAP-like events recognizer. If
2449 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2450 applied. */
2451
2452void
2453linux_nat_set_status_is_event (struct target_ops *t,
2454 int (*status_is_event) (int status))
2455{
2456 linux_nat_status_is_event = status_is_event;
2457}
2458
57380f4e
DJ
2459/* Wait until LP is stopped. */
2460
2461static int
2462stop_wait_callback (struct lwp_info *lp, void *data)
2463{
c9657e70 2464 struct inferior *inf = find_inferior_ptid (lp->ptid);
6c95b8df
PA
2465
2466 /* If this is a vfork parent, bail out, it is not going to report
2467 any SIGSTOP until the vfork is done with. */
2468 if (inf->vfork_child != NULL)
2469 return 0;
2470
d6b0e80f
AC
2471 if (!lp->stopped)
2472 {
2473 int status;
2474
2475 status = wait_lwp (lp);
2476 if (status == 0)
2477 return 0;
2478
57380f4e
DJ
2479 if (lp->ignore_sigint && WIFSTOPPED (status)
2480 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2481 {
57380f4e 2482 lp->ignore_sigint = 0;
d6b0e80f
AC
2483
2484 errno = 0;
dfd4cc63 2485 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 2486 lp->stopped = 0;
d6b0e80f
AC
2487 if (debug_linux_nat)
2488 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2489 "PTRACE_CONT %s, 0, 0 (%s) "
2490 "(discarding SIGINT)\n",
d6b0e80f
AC
2491 target_pid_to_str (lp->ptid),
2492 errno ? safe_strerror (errno) : "OK");
2493
57380f4e 2494 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2495 }
2496
57380f4e
DJ
2497 maybe_clear_ignore_sigint (lp);
2498
d6b0e80f
AC
2499 if (WSTOPSIG (status) != SIGSTOP)
2500 {
e5ef252a 2501 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2502
e5ef252a
PA
2503 if (debug_linux_nat)
2504 fprintf_unfiltered (gdb_stdlog,
2505 "SWC: Pending event %s in %s\n",
2506 status_to_str ((int) status),
2507 target_pid_to_str (lp->ptid));
2508
2509 /* Save the sigtrap event. */
2510 lp->status = status;
e5ef252a 2511 gdb_assert (lp->signalled);
e7ad2f14 2512 save_stop_reason (lp);
d6b0e80f
AC
2513 }
2514 else
2515 {
2516 /* We caught the SIGSTOP that we intended to catch, so
2517 there's no SIGSTOP pending. */
e5ef252a
PA
2518
2519 if (debug_linux_nat)
2520 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2521 "SWC: Expected SIGSTOP caught for %s.\n",
e5ef252a
PA
2522 target_pid_to_str (lp->ptid));
2523
e5ef252a
PA
2524 /* Reset SIGNALLED only after the stop_wait_callback call
2525 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2526 lp->signalled = 0;
2527 }
2528 }
2529
2530 return 0;
2531}
2532
9c02b525
PA
2533/* Return non-zero if LP has a wait status pending. Discard the
2534 pending event and resume the LWP if the event that originally
2535 caused the stop became uninteresting. */
d6b0e80f
AC
2536
2537static int
2538status_callback (struct lwp_info *lp, void *data)
2539{
2540 /* Only report a pending wait status if we pretend that this has
2541 indeed been resumed. */
ca2163eb
PA
2542 if (!lp->resumed)
2543 return 0;
2544
eb54c8bf
PA
2545 if (!lwp_status_pending_p (lp))
2546 return 0;
2547
15c66dd6
PA
2548 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2549 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525
PA
2550 {
2551 struct regcache *regcache = get_thread_regcache (lp->ptid);
9c02b525
PA
2552 CORE_ADDR pc;
2553 int discard = 0;
2554
9c02b525
PA
2555 pc = regcache_read_pc (regcache);
2556
2557 if (pc != lp->stop_pc)
2558 {
2559 if (debug_linux_nat)
2560 fprintf_unfiltered (gdb_stdlog,
2561 "SC: PC of %s changed. was=%s, now=%s\n",
2562 target_pid_to_str (lp->ptid),
2563 paddress (target_gdbarch (), lp->stop_pc),
2564 paddress (target_gdbarch (), pc));
2565 discard = 1;
2566 }
faf09f01
PA
2567
2568#if !USE_SIGTRAP_SIGINFO
9c02b525
PA
2569 else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2570 {
2571 if (debug_linux_nat)
2572 fprintf_unfiltered (gdb_stdlog,
2573 "SC: previous breakpoint of %s, at %s gone\n",
2574 target_pid_to_str (lp->ptid),
2575 paddress (target_gdbarch (), lp->stop_pc));
2576
2577 discard = 1;
2578 }
faf09f01 2579#endif
9c02b525
PA
2580
2581 if (discard)
2582 {
2583 if (debug_linux_nat)
2584 fprintf_unfiltered (gdb_stdlog,
2585 "SC: pending event of %s cancelled.\n",
2586 target_pid_to_str (lp->ptid));
2587
2588 lp->status = 0;
2589 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2590 return 0;
2591 }
9c02b525
PA
2592 }
2593
eb54c8bf 2594 return 1;
d6b0e80f
AC
2595}
2596
d6b0e80f
AC
2597/* Count the LWP's that have had events. */
2598
2599static int
2600count_events_callback (struct lwp_info *lp, void *data)
2601{
9a3c8263 2602 int *count = (int *) data;
d6b0e80f
AC
2603
2604 gdb_assert (count != NULL);
2605
9c02b525
PA
2606 /* Select only resumed LWPs that have an event pending. */
2607 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2608 (*count)++;
2609
2610 return 0;
2611}
2612
2613/* Select the LWP (if any) that is currently being single-stepped. */
2614
2615static int
2616select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2617{
25289eb2
PA
2618 if (lp->last_resume_kind == resume_step
2619 && lp->status != 0)
d6b0e80f
AC
2620 return 1;
2621 else
2622 return 0;
2623}
2624
8a99810d
PA
2625/* Returns true if LP has a status pending. */
2626
2627static int
2628lwp_status_pending_p (struct lwp_info *lp)
2629{
2630 /* We check for lp->waitstatus in addition to lp->status, because we
2631 can have pending process exits recorded in lp->status and
2632 W_EXITCODE(0,0) happens to be 0. */
2633 return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2634}
2635
b90fc188 2636/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2637
2638static int
2639select_event_lwp_callback (struct lwp_info *lp, void *data)
2640{
9a3c8263 2641 int *selector = (int *) data;
d6b0e80f
AC
2642
2643 gdb_assert (selector != NULL);
2644
9c02b525
PA
2645 /* Select only resumed LWPs that have an event pending. */
2646 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2647 if ((*selector)-- == 0)
2648 return 1;
2649
2650 return 0;
2651}
2652
e7ad2f14
PA
2653/* Called when the LWP stopped for a signal/trap. If it stopped for a
2654 trap check what caused it (breakpoint, watchpoint, trace, etc.),
2655 and save the result in the LWP's stop_reason field. If it stopped
2656 for a breakpoint, decrement the PC if necessary on the lwp's
2657 architecture. */
9c02b525 2658
e7ad2f14
PA
2659static void
2660save_stop_reason (struct lwp_info *lp)
710151dd 2661{
e7ad2f14
PA
2662 struct regcache *regcache;
2663 struct gdbarch *gdbarch;
515630c5 2664 CORE_ADDR pc;
9c02b525 2665 CORE_ADDR sw_bp_pc;
faf09f01
PA
2666#if USE_SIGTRAP_SIGINFO
2667 siginfo_t siginfo;
2668#endif
9c02b525 2669
e7ad2f14
PA
2670 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
2671 gdb_assert (lp->status != 0);
2672
2673 if (!linux_nat_status_is_event (lp->status))
2674 return;
2675
2676 regcache = get_thread_regcache (lp->ptid);
2677 gdbarch = get_regcache_arch (regcache);
2678
9c02b525 2679 pc = regcache_read_pc (regcache);
527a273a 2680 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2681
faf09f01
PA
2682#if USE_SIGTRAP_SIGINFO
2683 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2684 {
2685 if (siginfo.si_signo == SIGTRAP)
2686 {
e7ad2f14
PA
2687 if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
2688 && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2689 {
e7ad2f14
PA
2690 /* The si_code is ambiguous on this arch -- check debug
2691 registers. */
2692 if (!check_stopped_by_watchpoint (lp))
2693 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2694 }
2695 else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
2696 {
2697 /* If we determine the LWP stopped for a SW breakpoint,
2698 trust it. Particularly don't check watchpoint
2699 registers, because at least on s390, we'd find
2700 stopped-by-watchpoint as long as there's a watchpoint
2701 set. */
faf09f01 2702 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
faf09f01 2703 }
e7ad2f14 2704 else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
faf09f01 2705 {
e7ad2f14
PA
2706 /* This can indicate either a hardware breakpoint or
2707 hardware watchpoint. Check debug registers. */
2708 if (!check_stopped_by_watchpoint (lp))
2709 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
faf09f01 2710 }
2bf6fb9d
PA
2711 else if (siginfo.si_code == TRAP_TRACE)
2712 {
2713 if (debug_linux_nat)
2714 fprintf_unfiltered (gdb_stdlog,
2715 "CSBB: %s stopped by trace\n",
2716 target_pid_to_str (lp->ptid));
e7ad2f14
PA
2717
2718 /* We may have single stepped an instruction that
2719 triggered a watchpoint. In that case, on some
2720 architectures (such as x86), instead of TRAP_HWBKPT,
2721 si_code indicates TRAP_TRACE, and we need to check
2722 the debug registers separately. */
2723 check_stopped_by_watchpoint (lp);
2bf6fb9d 2724 }
faf09f01
PA
2725 }
2726 }
2727#else
9c02b525
PA
2728 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2729 && software_breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2730 sw_bp_pc))
710151dd 2731 {
9c02b525
PA
2732 /* The LWP was either continued, or stepped a software
2733 breakpoint instruction. */
e7ad2f14
PA
2734 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2735 }
2736
2737 if (hardware_breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2738 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2739
2740 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON)
2741 check_stopped_by_watchpoint (lp);
2742#endif
2743
2744 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
2745 {
710151dd
PA
2746 if (debug_linux_nat)
2747 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 2748 "CSBB: %s stopped by software breakpoint\n",
710151dd
PA
2749 target_pid_to_str (lp->ptid));
2750
2751 /* Back up the PC if necessary. */
9c02b525
PA
2752 if (pc != sw_bp_pc)
2753 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2754
e7ad2f14
PA
2755 /* Update this so we record the correct stop PC below. */
2756 pc = sw_bp_pc;
710151dd 2757 }
e7ad2f14 2758 else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525
PA
2759 {
2760 if (debug_linux_nat)
2761 fprintf_unfiltered (gdb_stdlog,
e7ad2f14
PA
2762 "CSBB: %s stopped by hardware breakpoint\n",
2763 target_pid_to_str (lp->ptid));
2764 }
2765 else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
2766 {
2767 if (debug_linux_nat)
2768 fprintf_unfiltered (gdb_stdlog,
2769 "CSBB: %s stopped by hardware watchpoint\n",
9c02b525 2770 target_pid_to_str (lp->ptid));
9c02b525 2771 }
d6b0e80f 2772
e7ad2f14 2773 lp->stop_pc = pc;
d6b0e80f
AC
2774}
2775
faf09f01
PA
2776
2777/* Returns true if the LWP had stopped for a software breakpoint. */
2778
2779static int
2780linux_nat_stopped_by_sw_breakpoint (struct target_ops *ops)
2781{
2782 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2783
2784 gdb_assert (lp != NULL);
2785
2786 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2787}
2788
2789/* Implement the supports_stopped_by_sw_breakpoint method. */
2790
2791static int
2792linux_nat_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2793{
2794 return USE_SIGTRAP_SIGINFO;
2795}
2796
2797/* Returns true if the LWP had stopped for a hardware
2798 breakpoint/watchpoint. */
2799
2800static int
2801linux_nat_stopped_by_hw_breakpoint (struct target_ops *ops)
2802{
2803 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2804
2805 gdb_assert (lp != NULL);
2806
2807 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2808}
2809
2810/* Implement the supports_stopped_by_hw_breakpoint method. */
2811
2812static int
2813linux_nat_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2814{
2815 return USE_SIGTRAP_SIGINFO;
2816}
2817
d6b0e80f
AC
2818/* Select one LWP out of those that have events pending. */
2819
2820static void
d90e17a7 2821select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2822{
2823 int num_events = 0;
2824 int random_selector;
9c02b525 2825 struct lwp_info *event_lp = NULL;
d6b0e80f 2826
ac264b3b 2827 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2828 (*orig_lp)->status = *status;
2829
9c02b525
PA
2830 /* In all-stop, give preference to the LWP that is being
2831 single-stepped. There will be at most one, and it will be the
2832 LWP that the core is most interested in. If we didn't do this,
2833 then we'd have to handle pending step SIGTRAPs somehow in case
2834 the core later continues the previously-stepped thread, as
2835 otherwise we'd report the pending SIGTRAP then, and the core, not
2836 having stepped the thread, wouldn't understand what the trap was
2837 for, and therefore would report it to the user as a random
2838 signal. */
fbea99ea 2839 if (!target_is_non_stop_p ())
d6b0e80f 2840 {
9c02b525
PA
2841 event_lp = iterate_over_lwps (filter,
2842 select_singlestep_lwp_callback, NULL);
2843 if (event_lp != NULL)
2844 {
2845 if (debug_linux_nat)
2846 fprintf_unfiltered (gdb_stdlog,
2847 "SEL: Select single-step %s\n",
2848 target_pid_to_str (event_lp->ptid));
2849 }
d6b0e80f 2850 }
9c02b525
PA
2851
2852 if (event_lp == NULL)
d6b0e80f 2853 {
9c02b525 2854 /* Pick one at random, out of those which have had events. */
d6b0e80f 2855
9c02b525 2856 /* First see how many events we have. */
d90e17a7 2857 iterate_over_lwps (filter, count_events_callback, &num_events);
8bf3b159 2858 gdb_assert (num_events > 0);
d6b0e80f 2859
9c02b525
PA
2860 /* Now randomly pick a LWP out of those that have had
2861 events. */
d6b0e80f
AC
2862 random_selector = (int)
2863 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2864
2865 if (debug_linux_nat && num_events > 1)
2866 fprintf_unfiltered (gdb_stdlog,
9c02b525 2867 "SEL: Found %d events, selecting #%d\n",
d6b0e80f
AC
2868 num_events, random_selector);
2869
d90e17a7
PA
2870 event_lp = iterate_over_lwps (filter,
2871 select_event_lwp_callback,
d6b0e80f
AC
2872 &random_selector);
2873 }
2874
2875 if (event_lp != NULL)
2876 {
2877 /* Switch the event LWP. */
2878 *orig_lp = event_lp;
2879 *status = event_lp->status;
2880 }
2881
2882 /* Flush the wait status for the event LWP. */
2883 (*orig_lp)->status = 0;
2884}
2885
2886/* Return non-zero if LP has been resumed. */
2887
2888static int
2889resumed_callback (struct lwp_info *lp, void *data)
2890{
2891 return lp->resumed;
2892}
2893
02f3fc28 2894/* Check if we should go on and pass this event to common code.
9c02b525 2895 Return the affected lwp if we are, or NULL otherwise. */
12d9289a 2896
02f3fc28 2897static struct lwp_info *
9c02b525 2898linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
2899{
2900 struct lwp_info *lp;
89a5711c 2901 int event = linux_ptrace_get_extended_event (status);
02f3fc28
PA
2902
2903 lp = find_lwp_pid (pid_to_ptid (lwpid));
2904
2905 /* Check for stop events reported by a process we didn't already
2906 know about - anything not already in our LWP list.
2907
2908 If we're expecting to receive stopped processes after
2909 fork, vfork, and clone events, then we'll just add the
2910 new one to our list and go back to waiting for the event
2911 to be reported - the stopped process might be returned
0e5bf2a8
PA
2912 from waitpid before or after the event is.
2913
2914 But note the case of a non-leader thread exec'ing after the
2915 leader having exited, and gone from our lists. The non-leader
2916 thread changes its tid to the tgid. */
2917
2918 if (WIFSTOPPED (status) && lp == NULL
89a5711c 2919 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
0e5bf2a8
PA
2920 {
2921 /* A multi-thread exec after we had seen the leader exiting. */
2922 if (debug_linux_nat)
2923 fprintf_unfiltered (gdb_stdlog,
2924 "LLW: Re-adding thread group leader LWP %d.\n",
2925 lwpid);
2926
dfd4cc63 2927 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
0e5bf2a8
PA
2928 lp->stopped = 1;
2929 lp->resumed = 1;
2930 add_thread (lp->ptid);
2931 }
2932
02f3fc28
PA
2933 if (WIFSTOPPED (status) && !lp)
2934 {
3b27ef47
PA
2935 if (debug_linux_nat)
2936 fprintf_unfiltered (gdb_stdlog,
2937 "LHEW: saving LWP %ld status %s in stopped_pids list\n",
2938 (long) lwpid, status_to_str (status));
84636d28 2939 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
2940 return NULL;
2941 }
2942
2943 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 2944 our list, i.e. not part of the current process. This can happen
fd62cb89 2945 if we detach from a program we originally forked and then it
02f3fc28
PA
2946 exits. */
2947 if (!WIFSTOPPED (status) && !lp)
2948 return NULL;
2949
8817a6f2
PA
2950 /* This LWP is stopped now. (And if dead, this prevents it from
2951 ever being continued.) */
2952 lp->stopped = 1;
2953
8784d563
PA
2954 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
2955 {
2956 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
de0d863e 2957 int options = linux_nat_ptrace_options (inf->attach_flag);
8784d563 2958
de0d863e 2959 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), options);
8784d563
PA
2960 lp->must_set_ptrace_flags = 0;
2961 }
2962
ca2163eb
PA
2963 /* Handle GNU/Linux's syscall SIGTRAPs. */
2964 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2965 {
2966 /* No longer need the sysgood bit. The ptrace event ends up
2967 recorded in lp->waitstatus if we care for it. We can carry
2968 on handling the event like a regular SIGTRAP from here
2969 on. */
2970 status = W_STOPCODE (SIGTRAP);
2971 if (linux_handle_syscall_trap (lp, 0))
2972 return NULL;
2973 }
bfd09d20
JS
2974 else
2975 {
2976 /* Almost all other ptrace-stops are known to be outside of system
2977 calls, with further exceptions in linux_handle_extended_wait. */
2978 lp->syscall_state = TARGET_WAITKIND_IGNORE;
2979 }
02f3fc28 2980
ca2163eb 2981 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2982 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2983 && linux_is_extended_waitstatus (status))
02f3fc28
PA
2984 {
2985 if (debug_linux_nat)
2986 fprintf_unfiltered (gdb_stdlog,
2987 "LLW: Handling extended status 0x%06x\n",
2988 status);
4dd63d48 2989 if (linux_handle_extended_wait (lp, status))
02f3fc28
PA
2990 return NULL;
2991 }
2992
2993 /* Check if the thread has exited. */
9c02b525
PA
2994 if (WIFEXITED (status) || WIFSIGNALED (status))
2995 {
aa01bd36
PA
2996 if (!report_thread_events
2997 && num_lwps (ptid_get_pid (lp->ptid)) > 1)
02f3fc28 2998 {
9c02b525
PA
2999 if (debug_linux_nat)
3000 fprintf_unfiltered (gdb_stdlog,
3001 "LLW: %s exited.\n",
3002 target_pid_to_str (lp->ptid));
3003
4a6ed09b
PA
3004 /* If there is at least one more LWP, then the exit signal
3005 was not the end of the debugged application and should be
3006 ignored. */
3007 exit_lwp (lp);
3008 return NULL;
02f3fc28
PA
3009 }
3010
77598427
PA
3011 /* Note that even if the leader was ptrace-stopped, it can still
3012 exit, if e.g., some other thread brings down the whole
3013 process (calls `exit'). So don't assert that the lwp is
3014 resumed. */
02f3fc28
PA
3015 if (debug_linux_nat)
3016 fprintf_unfiltered (gdb_stdlog,
aa01bd36 3017 "LWP %ld exited (resumed=%d)\n",
77598427 3018 ptid_get_lwp (lp->ptid), lp->resumed);
02f3fc28 3019
9c02b525
PA
3020 /* Dead LWP's aren't expected to reported a pending sigstop. */
3021 lp->signalled = 0;
3022
3023 /* Store the pending event in the waitstatus, because
3024 W_EXITCODE(0,0) == 0. */
3025 store_waitstatus (&lp->waitstatus, status);
3026 return lp;
02f3fc28
PA
3027 }
3028
02f3fc28
PA
3029 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3030 an attempt to stop an LWP. */
3031 if (lp->signalled
3032 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3033 {
02f3fc28
PA
3034 lp->signalled = 0;
3035
2bf6fb9d 3036 if (lp->last_resume_kind == resume_stop)
25289eb2 3037 {
2bf6fb9d
PA
3038 if (debug_linux_nat)
3039 fprintf_unfiltered (gdb_stdlog,
3040 "LLW: resume_stop SIGSTOP caught for %s.\n",
3041 target_pid_to_str (lp->ptid));
3042 }
3043 else
3044 {
3045 /* This is a delayed SIGSTOP. Filter out the event. */
02f3fc28 3046
25289eb2
PA
3047 if (debug_linux_nat)
3048 fprintf_unfiltered (gdb_stdlog,
2bf6fb9d 3049 "LLW: %s %s, 0, 0 (discard delayed SIGSTOP)\n",
25289eb2
PA
3050 lp->step ?
3051 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3052 target_pid_to_str (lp->ptid));
02f3fc28 3053
2bf6fb9d 3054 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2 3055 gdb_assert (lp->resumed);
25289eb2
PA
3056 return NULL;
3057 }
02f3fc28
PA
3058 }
3059
57380f4e
DJ
3060 /* Make sure we don't report a SIGINT that we have already displayed
3061 for another thread. */
3062 if (lp->ignore_sigint
3063 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3064 {
3065 if (debug_linux_nat)
3066 fprintf_unfiltered (gdb_stdlog,
3067 "LLW: Delayed SIGINT caught for %s.\n",
3068 target_pid_to_str (lp->ptid));
3069
3070 /* This is a delayed SIGINT. */
3071 lp->ignore_sigint = 0;
3072
8a99810d 3073 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
57380f4e
DJ
3074 if (debug_linux_nat)
3075 fprintf_unfiltered (gdb_stdlog,
3076 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3077 lp->step ?
3078 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3079 target_pid_to_str (lp->ptid));
57380f4e
DJ
3080 gdb_assert (lp->resumed);
3081
3082 /* Discard the event. */
3083 return NULL;
3084 }
3085
9c02b525
PA
3086 /* Don't report signals that GDB isn't interested in, such as
3087 signals that are neither printed nor stopped upon. Stopping all
3088 threads can be a bit time-consuming so if we want decent
3089 performance with heavily multi-threaded programs, especially when
3090 they're using a high frequency timer, we'd better avoid it if we
3091 can. */
3092 if (WIFSTOPPED (status))
3093 {
3094 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3095
fbea99ea 3096 if (!target_is_non_stop_p ())
9c02b525
PA
3097 {
3098 /* Only do the below in all-stop, as we currently use SIGSTOP
3099 to implement target_stop (see linux_nat_stop) in
3100 non-stop. */
3101 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3102 {
3103 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3104 forwarded to the entire process group, that is, all LWPs
3105 will receive it - unless they're using CLONE_THREAD to
3106 share signals. Since we only want to report it once, we
3107 mark it as ignored for all LWPs except this one. */
3108 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3109 set_ignore_sigint, NULL);
3110 lp->ignore_sigint = 0;
3111 }
3112 else
3113 maybe_clear_ignore_sigint (lp);
3114 }
3115
3116 /* When using hardware single-step, we need to report every signal.
c9587f88
AT
3117 Otherwise, signals in pass_mask may be short-circuited
3118 except signals that might be caused by a breakpoint. */
9c02b525 3119 if (!lp->step
c9587f88
AT
3120 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3121 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3122 {
3123 linux_resume_one_lwp (lp, lp->step, signo);
3124 if (debug_linux_nat)
3125 fprintf_unfiltered (gdb_stdlog,
3126 "LLW: %s %s, %s (preempt 'handle')\n",
3127 lp->step ?
3128 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3129 target_pid_to_str (lp->ptid),
3130 (signo != GDB_SIGNAL_0
3131 ? strsignal (gdb_signal_to_host (signo))
3132 : "0"));
3133 return NULL;
3134 }
3135 }
3136
02f3fc28
PA
3137 /* An interesting event. */
3138 gdb_assert (lp);
ca2163eb 3139 lp->status = status;
e7ad2f14 3140 save_stop_reason (lp);
02f3fc28
PA
3141 return lp;
3142}
3143
0e5bf2a8
PA
3144/* Detect zombie thread group leaders, and "exit" them. We can't reap
3145 their exits until all other threads in the group have exited. */
3146
3147static void
3148check_zombie_leaders (void)
3149{
3150 struct inferior *inf;
3151
3152 ALL_INFERIORS (inf)
3153 {
3154 struct lwp_info *leader_lp;
3155
3156 if (inf->pid == 0)
3157 continue;
3158
3159 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3160 if (leader_lp != NULL
3161 /* Check if there are other threads in the group, as we may
3162 have raced with the inferior simply exiting. */
3163 && num_lwps (inf->pid) > 1
5f572dec 3164 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
3165 {
3166 if (debug_linux_nat)
3167 fprintf_unfiltered (gdb_stdlog,
3168 "CZL: Thread group leader %d zombie "
3169 "(it exited, or another thread execd).\n",
3170 inf->pid);
3171
3172 /* A leader zombie can mean one of two things:
3173
3174 - It exited, and there's an exit status pending
3175 available, or only the leader exited (not the whole
3176 program). In the latter case, we can't waitpid the
3177 leader's exit status until all other threads are gone.
3178
3179 - There are 3 or more threads in the group, and a thread
4a6ed09b
PA
3180 other than the leader exec'd. See comments on exec
3181 events at the top of the file. We could try
0e5bf2a8
PA
3182 distinguishing the exit and exec cases, by waiting once
3183 more, and seeing if something comes out, but it doesn't
3184 sound useful. The previous leader _does_ go away, and
3185 we'll re-add the new one once we see the exec event
3186 (which is just the same as what would happen if the
3187 previous leader did exit voluntarily before some other
3188 thread execs). */
3189
3190 if (debug_linux_nat)
3191 fprintf_unfiltered (gdb_stdlog,
3192 "CZL: Thread group leader %d vanished.\n",
3193 inf->pid);
3194 exit_lwp (leader_lp);
3195 }
3196 }
3197}
3198
aa01bd36
PA
3199/* Convenience function that is called when the kernel reports an exit
3200 event. This decides whether to report the event to GDB as a
3201 process exit event, a thread exit event, or to suppress the
3202 event. */
3203
3204static ptid_t
3205filter_exit_event (struct lwp_info *event_child,
3206 struct target_waitstatus *ourstatus)
3207{
3208 ptid_t ptid = event_child->ptid;
3209
3210 if (num_lwps (ptid_get_pid (ptid)) > 1)
3211 {
3212 if (report_thread_events)
3213 ourstatus->kind = TARGET_WAITKIND_THREAD_EXITED;
3214 else
3215 ourstatus->kind = TARGET_WAITKIND_IGNORE;
3216
3217 exit_lwp (event_child);
3218 }
3219
3220 return ptid;
3221}
3222
d6b0e80f 3223static ptid_t
7feb7d06 3224linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3225 ptid_t ptid, struct target_waitstatus *ourstatus,
3226 int target_options)
d6b0e80f 3227{
fc9b8e47 3228 sigset_t prev_mask;
4b60df3d 3229 enum resume_kind last_resume_kind;
12d9289a 3230 struct lwp_info *lp;
12d9289a 3231 int status;
d6b0e80f 3232
01124a23 3233 if (debug_linux_nat)
b84876c2
PA
3234 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3235
f973ed9c
DJ
3236 /* The first time we get here after starting a new inferior, we may
3237 not have added it to the LWP list yet - this is the earliest
3238 moment at which we know its PID. */
d90e17a7 3239 if (ptid_is_pid (inferior_ptid))
f973ed9c 3240 {
27c9d204
PA
3241 /* Upgrade the main thread's ptid. */
3242 thread_change_ptid (inferior_ptid,
dfd4cc63
LM
3243 ptid_build (ptid_get_pid (inferior_ptid),
3244 ptid_get_pid (inferior_ptid), 0));
27c9d204 3245
26cb8b7c 3246 lp = add_initial_lwp (inferior_ptid);
f973ed9c
DJ
3247 lp->resumed = 1;
3248 }
3249
12696c10 3250 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3251 block_child_signals (&prev_mask);
d6b0e80f 3252
d6b0e80f 3253 /* First check if there is a LWP with a wait status pending. */
8a99810d
PA
3254 lp = iterate_over_lwps (ptid, status_callback, NULL);
3255 if (lp != NULL)
d6b0e80f
AC
3256 {
3257 if (debug_linux_nat)
d6b0e80f
AC
3258 fprintf_unfiltered (gdb_stdlog,
3259 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3260 status_to_str (lp->status),
d6b0e80f 3261 target_pid_to_str (lp->ptid));
d6b0e80f
AC
3262 }
3263
9c02b525
PA
3264 /* But if we don't find a pending event, we'll have to wait. Always
3265 pull all events out of the kernel. We'll randomly select an
3266 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3267
d90e17a7 3268 while (lp == NULL)
d6b0e80f
AC
3269 {
3270 pid_t lwpid;
3271
0e5bf2a8
PA
3272 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3273 quirks:
3274
3275 - If the thread group leader exits while other threads in the
3276 thread group still exist, waitpid(TGID, ...) hangs. That
3277 waitpid won't return an exit status until the other threads
3278 in the group are reapped.
3279
3280 - When a non-leader thread execs, that thread just vanishes
3281 without reporting an exit (so we'd hang if we waited for it
3282 explicitly in that case). The exec event is reported to
3283 the TGID pid. */
3284
3285 errno = 0;
4a6ed09b 3286 lwpid = my_waitpid (-1, &status, __WALL | WNOHANG);
0e5bf2a8
PA
3287
3288 if (debug_linux_nat)
3289 fprintf_unfiltered (gdb_stdlog,
3290 "LNW: waitpid(-1, ...) returned %d, %s\n",
3291 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3292
d6b0e80f
AC
3293 if (lwpid > 0)
3294 {
d6b0e80f
AC
3295 if (debug_linux_nat)
3296 {
3297 fprintf_unfiltered (gdb_stdlog,
3298 "LLW: waitpid %ld received %s\n",
3299 (long) lwpid, status_to_str (status));
3300 }
3301
9c02b525 3302 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3303 /* Retry until nothing comes out of waitpid. A single
3304 SIGCHLD can indicate more than one child stopped. */
3305 continue;
d6b0e80f
AC
3306 }
3307
20ba1ce6
PA
3308 /* Now that we've pulled all events out of the kernel, resume
3309 LWPs that don't have an interesting event to report. */
3310 iterate_over_lwps (minus_one_ptid,
3311 resume_stopped_resumed_lwps, &minus_one_ptid);
3312
3313 /* ... and find an LWP with a status to report to the core, if
3314 any. */
9c02b525
PA
3315 lp = iterate_over_lwps (ptid, status_callback, NULL);
3316 if (lp != NULL)
3317 break;
3318
0e5bf2a8
PA
3319 /* Check for zombie thread group leaders. Those can't be reaped
3320 until all other threads in the thread group are. */
3321 check_zombie_leaders ();
d6b0e80f 3322
0e5bf2a8
PA
3323 /* If there are no resumed children left, bail. We'd be stuck
3324 forever in the sigsuspend call below otherwise. */
3325 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3326 {
3327 if (debug_linux_nat)
3328 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3329
0e5bf2a8 3330 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3331
0e5bf2a8
PA
3332 restore_child_signals_mask (&prev_mask);
3333 return minus_one_ptid;
d6b0e80f 3334 }
28736962 3335
0e5bf2a8
PA
3336 /* No interesting event to report to the core. */
3337
3338 if (target_options & TARGET_WNOHANG)
3339 {
01124a23 3340 if (debug_linux_nat)
28736962
PA
3341 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3342
0e5bf2a8 3343 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3344 restore_child_signals_mask (&prev_mask);
3345 return minus_one_ptid;
3346 }
d6b0e80f
AC
3347
3348 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3349 gdb_assert (lp == NULL);
0e5bf2a8
PA
3350
3351 /* Block until we get an event reported with SIGCHLD. */
d36bf488
DE
3352 if (debug_linux_nat)
3353 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
0e5bf2a8 3354 sigsuspend (&suspend_mask);
d6b0e80f
AC
3355 }
3356
d6b0e80f
AC
3357 gdb_assert (lp);
3358
ca2163eb
PA
3359 status = lp->status;
3360 lp->status = 0;
3361
fbea99ea 3362 if (!target_is_non_stop_p ())
4c28f408
PA
3363 {
3364 /* Now stop all other LWP's ... */
d90e17a7 3365 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3366
3367 /* ... and wait until all of them have reported back that
3368 they're no longer running. */
d90e17a7 3369 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
9c02b525
PA
3370 }
3371
3372 /* If we're not waiting for a specific LWP, choose an event LWP from
3373 among those that have had events. Giving equal priority to all
3374 LWPs that have had events helps prevent starvation. */
3375 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3376 select_event_lwp (ptid, &lp, &status);
3377
3378 gdb_assert (lp != NULL);
3379
3380 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3381 it was a software breakpoint, and we can't reliably support the
3382 "stopped by software breakpoint" stop reason. */
3383 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3384 && !USE_SIGTRAP_SIGINFO)
9c02b525
PA
3385 {
3386 struct regcache *regcache = get_thread_regcache (lp->ptid);
3387 struct gdbarch *gdbarch = get_regcache_arch (regcache);
527a273a 3388 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3389
9c02b525
PA
3390 if (decr_pc != 0)
3391 {
3392 CORE_ADDR pc;
d6b0e80f 3393
9c02b525
PA
3394 pc = regcache_read_pc (regcache);
3395 regcache_write_pc (regcache, pc + decr_pc);
3396 }
3397 }
e3e9f5a2 3398
9c02b525
PA
3399 /* We'll need this to determine whether to report a SIGSTOP as
3400 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3401 clears it. */
3402 last_resume_kind = lp->last_resume_kind;
4b60df3d 3403
fbea99ea 3404 if (!target_is_non_stop_p ())
9c02b525 3405 {
e3e9f5a2
PA
3406 /* In all-stop, from the core's perspective, all LWPs are now
3407 stopped until a new resume action is sent over. */
3408 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3409 }
3410 else
25289eb2 3411 {
4b60df3d 3412 resume_clear_callback (lp, NULL);
25289eb2 3413 }
d6b0e80f 3414
26ab7092 3415 if (linux_nat_status_is_event (status))
d6b0e80f 3416 {
d6b0e80f
AC
3417 if (debug_linux_nat)
3418 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3419 "LLW: trap ptid is %s.\n",
3420 target_pid_to_str (lp->ptid));
d6b0e80f 3421 }
d6b0e80f
AC
3422
3423 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3424 {
3425 *ourstatus = lp->waitstatus;
3426 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3427 }
3428 else
3429 store_waitstatus (ourstatus, status);
3430
01124a23 3431 if (debug_linux_nat)
b84876c2
PA
3432 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3433
7feb7d06 3434 restore_child_signals_mask (&prev_mask);
1e225492 3435
4b60df3d 3436 if (last_resume_kind == resume_stop
25289eb2
PA
3437 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3438 && WSTOPSIG (status) == SIGSTOP)
3439 {
3440 /* A thread that has been requested to stop by GDB with
3441 target_stop, and it stopped cleanly, so report as SIG0. The
3442 use of SIGSTOP is an implementation detail. */
a493e3e2 3443 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3444 }
3445
1e225492
JK
3446 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3447 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3448 lp->core = -1;
3449 else
2e794194 3450 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3451
aa01bd36
PA
3452 if (ourstatus->kind == TARGET_WAITKIND_EXITED)
3453 return filter_exit_event (lp, ourstatus);
3454
f973ed9c 3455 return lp->ptid;
d6b0e80f
AC
3456}
3457
e3e9f5a2
PA
3458/* Resume LWPs that are currently stopped without any pending status
3459 to report, but are resumed from the core's perspective. */
3460
3461static int
3462resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3463{
9a3c8263 3464 ptid_t *wait_ptid_p = (ptid_t *) data;
e3e9f5a2 3465
4dd63d48
PA
3466 if (!lp->stopped)
3467 {
3468 if (debug_linux_nat)
3469 fprintf_unfiltered (gdb_stdlog,
3470 "RSRL: NOT resuming LWP %s, not stopped\n",
3471 target_pid_to_str (lp->ptid));
3472 }
3473 else if (!lp->resumed)
3474 {
3475 if (debug_linux_nat)
3476 fprintf_unfiltered (gdb_stdlog,
3477 "RSRL: NOT resuming LWP %s, not resumed\n",
3478 target_pid_to_str (lp->ptid));
3479 }
3480 else if (lwp_status_pending_p (lp))
3481 {
3482 if (debug_linux_nat)
3483 fprintf_unfiltered (gdb_stdlog,
3484 "RSRL: NOT resuming LWP %s, has pending status\n",
3485 target_pid_to_str (lp->ptid));
3486 }
3487 else
e3e9f5a2 3488 {
336060f3
PA
3489 struct regcache *regcache = get_thread_regcache (lp->ptid);
3490 struct gdbarch *gdbarch = get_regcache_arch (regcache);
336060f3 3491
23f238d3 3492 TRY
e3e9f5a2 3493 {
23f238d3
PA
3494 CORE_ADDR pc = regcache_read_pc (regcache);
3495 int leave_stopped = 0;
e3e9f5a2 3496
23f238d3
PA
3497 /* Don't bother if there's a breakpoint at PC that we'd hit
3498 immediately, and we're not waiting for this LWP. */
3499 if (!ptid_match (lp->ptid, *wait_ptid_p))
3500 {
3501 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3502 leave_stopped = 1;
3503 }
e3e9f5a2 3504
23f238d3
PA
3505 if (!leave_stopped)
3506 {
3507 if (debug_linux_nat)
3508 fprintf_unfiltered (gdb_stdlog,
3509 "RSRL: resuming stopped-resumed LWP %s at "
3510 "%s: step=%d\n",
3511 target_pid_to_str (lp->ptid),
3512 paddress (gdbarch, pc),
3513 lp->step);
3514
3515 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3516 }
3517 }
3518 CATCH (ex, RETURN_MASK_ERROR)
3519 {
3520 if (!check_ptrace_stopped_lwp_gone (lp))
3521 throw_exception (ex);
3522 }
3523 END_CATCH
e3e9f5a2
PA
3524 }
3525
3526 return 0;
3527}
3528
7feb7d06
PA
3529static ptid_t
3530linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3531 ptid_t ptid, struct target_waitstatus *ourstatus,
3532 int target_options)
7feb7d06
PA
3533{
3534 ptid_t event_ptid;
3535
3536 if (debug_linux_nat)
09826ec5
PA
3537 {
3538 char *options_string;
3539
3540 options_string = target_options_to_string (target_options);
3541 fprintf_unfiltered (gdb_stdlog,
3542 "linux_nat_wait: [%s], [%s]\n",
3543 target_pid_to_str (ptid),
3544 options_string);
3545 xfree (options_string);
3546 }
7feb7d06
PA
3547
3548 /* Flush the async file first. */
d9d41e78 3549 if (target_is_async_p ())
7feb7d06
PA
3550 async_file_flush ();
3551
e3e9f5a2
PA
3552 /* Resume LWPs that are currently stopped without any pending status
3553 to report, but are resumed from the core's perspective. LWPs get
3554 in this state if we find them stopping at a time we're not
3555 interested in reporting the event (target_wait on a
3556 specific_process, for example, see linux_nat_wait_1), and
3557 meanwhile the event became uninteresting. Don't bother resuming
3558 LWPs we're not going to wait for if they'd stop immediately. */
fbea99ea 3559 if (target_is_non_stop_p ())
e3e9f5a2
PA
3560 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3561
47608cb1 3562 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3563
3564 /* If we requested any event, and something came out, assume there
3565 may be more. If we requested a specific lwp or process, also
3566 assume there may be more. */
d9d41e78 3567 if (target_is_async_p ()
6953d224
PA
3568 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3569 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3570 || !ptid_equal (ptid, minus_one_ptid)))
3571 async_file_mark ();
3572
7feb7d06
PA
3573 return event_ptid;
3574}
3575
1d2736d4
PA
3576/* Kill one LWP. */
3577
3578static void
3579kill_one_lwp (pid_t pid)
d6b0e80f 3580{
ed731959
JK
3581 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3582
3583 errno = 0;
1d2736d4 3584 kill_lwp (pid, SIGKILL);
ed731959 3585 if (debug_linux_nat)
57745c90
PA
3586 {
3587 int save_errno = errno;
3588
3589 fprintf_unfiltered (gdb_stdlog,
1d2736d4 3590 "KC: kill (SIGKILL) %ld, 0, 0 (%s)\n", (long) pid,
57745c90
PA
3591 save_errno ? safe_strerror (save_errno) : "OK");
3592 }
ed731959
JK
3593
3594 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3595
d6b0e80f 3596 errno = 0;
1d2736d4 3597 ptrace (PTRACE_KILL, pid, 0, 0);
d6b0e80f 3598 if (debug_linux_nat)
57745c90
PA
3599 {
3600 int save_errno = errno;
3601
3602 fprintf_unfiltered (gdb_stdlog,
1d2736d4 3603 "KC: PTRACE_KILL %ld, 0, 0 (%s)\n", (long) pid,
57745c90
PA
3604 save_errno ? safe_strerror (save_errno) : "OK");
3605 }
d6b0e80f
AC
3606}
3607
1d2736d4
PA
3608/* Wait for an LWP to die. */
3609
3610static void
3611kill_wait_one_lwp (pid_t pid)
d6b0e80f 3612{
1d2736d4 3613 pid_t res;
d6b0e80f
AC
3614
3615 /* We must make sure that there are no pending events (delayed
3616 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3617 program doesn't interfere with any following debugging session. */
3618
d6b0e80f
AC
3619 do
3620 {
1d2736d4
PA
3621 res = my_waitpid (pid, NULL, __WALL);
3622 if (res != (pid_t) -1)
d6b0e80f 3623 {
e85a822c
DJ
3624 if (debug_linux_nat)
3625 fprintf_unfiltered (gdb_stdlog,
1d2736d4
PA
3626 "KWC: wait %ld received unknown.\n",
3627 (long) pid);
4a6ed09b
PA
3628 /* The Linux kernel sometimes fails to kill a thread
3629 completely after PTRACE_KILL; that goes from the stop
3630 point in do_fork out to the one in get_signal_to_deliver
3631 and waits again. So kill it again. */
1d2736d4 3632 kill_one_lwp (pid);
d6b0e80f
AC
3633 }
3634 }
1d2736d4
PA
3635 while (res == pid);
3636
3637 gdb_assert (res == -1 && errno == ECHILD);
3638}
3639
3640/* Callback for iterate_over_lwps. */
d6b0e80f 3641
1d2736d4
PA
3642static int
3643kill_callback (struct lwp_info *lp, void *data)
3644{
3645 kill_one_lwp (ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3646 return 0;
3647}
3648
1d2736d4
PA
3649/* Callback for iterate_over_lwps. */
3650
3651static int
3652kill_wait_callback (struct lwp_info *lp, void *data)
3653{
3654 kill_wait_one_lwp (ptid_get_lwp (lp->ptid));
3655 return 0;
3656}
3657
3658/* Kill the fork children of any threads of inferior INF that are
3659 stopped at a fork event. */
3660
3661static void
3662kill_unfollowed_fork_children (struct inferior *inf)
3663{
3664 struct thread_info *thread;
3665
3666 ALL_NON_EXITED_THREADS (thread)
3667 if (thread->inf == inf)
3668 {
3669 struct target_waitstatus *ws = &thread->pending_follow;
3670
3671 if (ws->kind == TARGET_WAITKIND_FORKED
3672 || ws->kind == TARGET_WAITKIND_VFORKED)
3673 {
3674 ptid_t child_ptid = ws->value.related_pid;
3675 int child_pid = ptid_get_pid (child_ptid);
3676 int child_lwp = ptid_get_lwp (child_ptid);
1d2736d4
PA
3677
3678 kill_one_lwp (child_lwp);
3679 kill_wait_one_lwp (child_lwp);
3680
3681 /* Let the arch-specific native code know this process is
3682 gone. */
3683 linux_nat_forget_process (child_pid);
3684 }
3685 }
3686}
3687
d6b0e80f 3688static void
7d85a9c0 3689linux_nat_kill (struct target_ops *ops)
d6b0e80f 3690{
f973ed9c
DJ
3691 /* If we're stopped while forking and we haven't followed yet,
3692 kill the other task. We need to do this first because the
3693 parent will be sleeping if this is a vfork. */
1d2736d4 3694 kill_unfollowed_fork_children (current_inferior ());
f973ed9c
DJ
3695
3696 if (forks_exist_p ())
7feb7d06 3697 linux_fork_killall ();
f973ed9c
DJ
3698 else
3699 {
d90e17a7 3700 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3701
4c28f408
PA
3702 /* Stop all threads before killing them, since ptrace requires
3703 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3704 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3705 /* ... and wait until all of them have reported back that
3706 they're no longer running. */
d90e17a7 3707 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3708
f973ed9c 3709 /* Kill all LWP's ... */
d90e17a7 3710 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3711
3712 /* ... and wait until we've flushed all events. */
d90e17a7 3713 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3714 }
3715
3716 target_mourn_inferior ();
d6b0e80f
AC
3717}
3718
3719static void
136d6dae 3720linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3721{
26cb8b7c
PA
3722 int pid = ptid_get_pid (inferior_ptid);
3723
3724 purge_lwp_list (pid);
d6b0e80f 3725
f973ed9c 3726 if (! forks_exist_p ())
d90e17a7
PA
3727 /* Normal case, no other forks available. */
3728 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3729 else
3730 /* Multi-fork case. The current inferior_ptid has exited, but
3731 there are other viable forks to debug. Delete the exiting
3732 one and context-switch to the first available. */
3733 linux_fork_mourn_inferior ();
26cb8b7c
PA
3734
3735 /* Let the arch-specific native code know this process is gone. */
3736 linux_nat_forget_process (pid);
d6b0e80f
AC
3737}
3738
5b009018
PA
3739/* Convert a native/host siginfo object, into/from the siginfo in the
3740 layout of the inferiors' architecture. */
3741
3742static void
a5362b9a 3743siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
3744{
3745 int done = 0;
3746
3747 if (linux_nat_siginfo_fixup != NULL)
3748 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3749
3750 /* If there was no callback, or the callback didn't do anything,
3751 then just do a straight memcpy. */
3752 if (!done)
3753 {
3754 if (direction == 1)
a5362b9a 3755 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3756 else
a5362b9a 3757 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3758 }
3759}
3760
9b409511 3761static enum target_xfer_status
4aa995e1
PA
3762linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3763 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3764 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3765 ULONGEST *xfered_len)
4aa995e1 3766{
4aa995e1 3767 int pid;
a5362b9a
TS
3768 siginfo_t siginfo;
3769 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3770
3771 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3772 gdb_assert (readbuf || writebuf);
3773
dfd4cc63 3774 pid = ptid_get_lwp (inferior_ptid);
4aa995e1 3775 if (pid == 0)
dfd4cc63 3776 pid = ptid_get_pid (inferior_ptid);
4aa995e1
PA
3777
3778 if (offset > sizeof (siginfo))
2ed4b548 3779 return TARGET_XFER_E_IO;
4aa995e1
PA
3780
3781 errno = 0;
3782 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3783 if (errno != 0)
2ed4b548 3784 return TARGET_XFER_E_IO;
4aa995e1 3785
5b009018
PA
3786 /* When GDB is built as a 64-bit application, ptrace writes into
3787 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3788 inferior with a 64-bit GDB should look the same as debugging it
3789 with a 32-bit GDB, we need to convert it. GDB core always sees
3790 the converted layout, so any read/write will have to be done
3791 post-conversion. */
3792 siginfo_fixup (&siginfo, inf_siginfo, 0);
3793
4aa995e1
PA
3794 if (offset + len > sizeof (siginfo))
3795 len = sizeof (siginfo) - offset;
3796
3797 if (readbuf != NULL)
5b009018 3798 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3799 else
3800 {
5b009018
PA
3801 memcpy (inf_siginfo + offset, writebuf, len);
3802
3803 /* Convert back to ptrace layout before flushing it out. */
3804 siginfo_fixup (&siginfo, inf_siginfo, 1);
3805
4aa995e1
PA
3806 errno = 0;
3807 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3808 if (errno != 0)
2ed4b548 3809 return TARGET_XFER_E_IO;
4aa995e1
PA
3810 }
3811
9b409511
YQ
3812 *xfered_len = len;
3813 return TARGET_XFER_OK;
4aa995e1
PA
3814}
3815
9b409511 3816static enum target_xfer_status
10d6c8cd
DJ
3817linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3818 const char *annex, gdb_byte *readbuf,
3819 const gdb_byte *writebuf,
9b409511 3820 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3821{
4aa995e1 3822 struct cleanup *old_chain;
9b409511 3823 enum target_xfer_status xfer;
d6b0e80f 3824
4aa995e1
PA
3825 if (object == TARGET_OBJECT_SIGNAL_INFO)
3826 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
9b409511 3827 offset, len, xfered_len);
4aa995e1 3828
c35b1492
PA
3829 /* The target is connected but no live inferior is selected. Pass
3830 this request down to a lower stratum (e.g., the executable
3831 file). */
3832 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
9b409511 3833 return TARGET_XFER_EOF;
c35b1492 3834
4aa995e1
PA
3835 old_chain = save_inferior_ptid ();
3836
dfd4cc63
LM
3837 if (ptid_lwp_p (inferior_ptid))
3838 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
d6b0e80f 3839
10d6c8cd 3840 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 3841 offset, len, xfered_len);
d6b0e80f
AC
3842
3843 do_cleanups (old_chain);
3844 return xfer;
3845}
3846
28439f5e
PA
3847static int
3848linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
3849{
4a6ed09b
PA
3850 /* As long as a PTID is in lwp list, consider it alive. */
3851 return find_lwp_pid (ptid) != NULL;
28439f5e
PA
3852}
3853
8a06aea7
PA
3854/* Implement the to_update_thread_list target method for this
3855 target. */
3856
3857static void
3858linux_nat_update_thread_list (struct target_ops *ops)
3859{
a6904d5a
PA
3860 struct lwp_info *lwp;
3861
4a6ed09b
PA
3862 /* We add/delete threads from the list as clone/exit events are
3863 processed, so just try deleting exited threads still in the
3864 thread list. */
3865 delete_exited_threads ();
a6904d5a
PA
3866
3867 /* Update the processor core that each lwp/thread was last seen
3868 running on. */
3869 ALL_LWPS (lwp)
1ad3de98
PA
3870 {
3871 /* Avoid accessing /proc if the thread hasn't run since we last
3872 time we fetched the thread's core. Accessing /proc becomes
3873 noticeably expensive when we have thousands of LWPs. */
3874 if (lwp->core == -1)
3875 lwp->core = linux_common_core_of_thread (lwp->ptid);
3876 }
8a06aea7
PA
3877}
3878
d6b0e80f 3879static char *
117de6a9 3880linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
3881{
3882 static char buf[64];
3883
dfd4cc63
LM
3884 if (ptid_lwp_p (ptid)
3885 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
3886 || num_lwps (ptid_get_pid (ptid)) > 1))
d6b0e80f 3887 {
dfd4cc63 3888 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
d6b0e80f
AC
3889 return buf;
3890 }
3891
3892 return normal_pid_to_str (ptid);
3893}
3894
73ede765 3895static const char *
503a628d 3896linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
4694da01 3897{
79efa585 3898 return linux_proc_tid_get_name (thr->ptid);
4694da01
TT
3899}
3900
dba24537
AC
3901/* Accepts an integer PID; Returns a string representing a file that
3902 can be opened to get the symbols for the child process. */
3903
6d8fd2b7 3904static char *
8dd27370 3905linux_child_pid_to_exec_file (struct target_ops *self, int pid)
dba24537 3906{
e0d86d2c 3907 return linux_proc_pid_to_exec_file (pid);
dba24537
AC
3908}
3909
10d6c8cd
DJ
3910/* Implement the to_xfer_partial interface for memory reads using the /proc
3911 filesystem. Because we can use a single read() call for /proc, this
3912 can be much more efficient than banging away at PTRACE_PEEKTEXT,
3913 but it doesn't support writes. */
3914
9b409511 3915static enum target_xfer_status
10d6c8cd
DJ
3916linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
3917 const char *annex, gdb_byte *readbuf,
3918 const gdb_byte *writebuf,
9b409511 3919 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
dba24537 3920{
10d6c8cd
DJ
3921 LONGEST ret;
3922 int fd;
dba24537
AC
3923 char filename[64];
3924
10d6c8cd 3925 if (object != TARGET_OBJECT_MEMORY || !readbuf)
f486487f 3926 return TARGET_XFER_EOF;
dba24537
AC
3927
3928 /* Don't bother for one word. */
3929 if (len < 3 * sizeof (long))
9b409511 3930 return TARGET_XFER_EOF;
dba24537
AC
3931
3932 /* We could keep this file open and cache it - possibly one per
3933 thread. That requires some juggling, but is even faster. */
cde33bf1
YQ
3934 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
3935 ptid_get_pid (inferior_ptid));
614c279d 3936 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
dba24537 3937 if (fd == -1)
9b409511 3938 return TARGET_XFER_EOF;
dba24537
AC
3939
3940 /* If pread64 is available, use it. It's faster if the kernel
3941 supports it (only one syscall), and it's 64-bit safe even on
3942 32-bit platforms (for instance, SPARC debugging a SPARC64
3943 application). */
3944#ifdef HAVE_PREAD64
10d6c8cd 3945 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 3946#else
10d6c8cd 3947 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
3948#endif
3949 ret = 0;
3950 else
3951 ret = len;
3952
3953 close (fd);
9b409511
YQ
3954
3955 if (ret == 0)
3956 return TARGET_XFER_EOF;
3957 else
3958 {
3959 *xfered_len = ret;
3960 return TARGET_XFER_OK;
3961 }
dba24537
AC
3962}
3963
efcbbd14
UW
3964
3965/* Enumerate spufs IDs for process PID. */
3966static LONGEST
b55e14c7 3967spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
efcbbd14 3968{
f5656ead 3969 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
efcbbd14
UW
3970 LONGEST pos = 0;
3971 LONGEST written = 0;
3972 char path[128];
3973 DIR *dir;
3974 struct dirent *entry;
3975
3976 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
3977 dir = opendir (path);
3978 if (!dir)
3979 return -1;
3980
3981 rewinddir (dir);
3982 while ((entry = readdir (dir)) != NULL)
3983 {
3984 struct stat st;
3985 struct statfs stfs;
3986 int fd;
3987
3988 fd = atoi (entry->d_name);
3989 if (!fd)
3990 continue;
3991
3992 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
3993 if (stat (path, &st) != 0)
3994 continue;
3995 if (!S_ISDIR (st.st_mode))
3996 continue;
3997
3998 if (statfs (path, &stfs) != 0)
3999 continue;
4000 if (stfs.f_type != SPUFS_MAGIC)
4001 continue;
4002
4003 if (pos >= offset && pos + 4 <= offset + len)
4004 {
4005 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4006 written += 4;
4007 }
4008 pos += 4;
4009 }
4010
4011 closedir (dir);
4012 return written;
4013}
4014
4015/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4016 object type, using the /proc file system. */
9b409511
YQ
4017
4018static enum target_xfer_status
efcbbd14
UW
4019linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4020 const char *annex, gdb_byte *readbuf,
4021 const gdb_byte *writebuf,
9b409511 4022 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
efcbbd14
UW
4023{
4024 char buf[128];
4025 int fd = 0;
4026 int ret = -1;
dfd4cc63 4027 int pid = ptid_get_pid (inferior_ptid);
efcbbd14
UW
4028
4029 if (!annex)
4030 {
4031 if (!readbuf)
2ed4b548 4032 return TARGET_XFER_E_IO;
efcbbd14 4033 else
9b409511
YQ
4034 {
4035 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4036
4037 if (l < 0)
4038 return TARGET_XFER_E_IO;
4039 else if (l == 0)
4040 return TARGET_XFER_EOF;
4041 else
4042 {
4043 *xfered_len = (ULONGEST) l;
4044 return TARGET_XFER_OK;
4045 }
4046 }
efcbbd14
UW
4047 }
4048
4049 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
614c279d 4050 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
efcbbd14 4051 if (fd <= 0)
2ed4b548 4052 return TARGET_XFER_E_IO;
efcbbd14
UW
4053
4054 if (offset != 0
4055 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4056 {
4057 close (fd);
9b409511 4058 return TARGET_XFER_EOF;
efcbbd14
UW
4059 }
4060
4061 if (writebuf)
4062 ret = write (fd, writebuf, (size_t) len);
4063 else if (readbuf)
4064 ret = read (fd, readbuf, (size_t) len);
4065
4066 close (fd);
9b409511
YQ
4067
4068 if (ret < 0)
4069 return TARGET_XFER_E_IO;
4070 else if (ret == 0)
4071 return TARGET_XFER_EOF;
4072 else
4073 {
4074 *xfered_len = (ULONGEST) ret;
4075 return TARGET_XFER_OK;
4076 }
efcbbd14
UW
4077}
4078
4079
dba24537
AC
4080/* Parse LINE as a signal set and add its set bits to SIGS. */
4081
4082static void
4083add_line_to_sigset (const char *line, sigset_t *sigs)
4084{
4085 int len = strlen (line) - 1;
4086 const char *p;
4087 int signum;
4088
4089 if (line[len] != '\n')
8a3fe4f8 4090 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4091
4092 p = line;
4093 signum = len * 4;
4094 while (len-- > 0)
4095 {
4096 int digit;
4097
4098 if (*p >= '0' && *p <= '9')
4099 digit = *p - '0';
4100 else if (*p >= 'a' && *p <= 'f')
4101 digit = *p - 'a' + 10;
4102 else
8a3fe4f8 4103 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4104
4105 signum -= 4;
4106
4107 if (digit & 1)
4108 sigaddset (sigs, signum + 1);
4109 if (digit & 2)
4110 sigaddset (sigs, signum + 2);
4111 if (digit & 4)
4112 sigaddset (sigs, signum + 3);
4113 if (digit & 8)
4114 sigaddset (sigs, signum + 4);
4115
4116 p++;
4117 }
4118}
4119
4120/* Find process PID's pending signals from /proc/pid/status and set
4121 SIGS to match. */
4122
4123void
3e43a32a
MS
4124linux_proc_pending_signals (int pid, sigset_t *pending,
4125 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4126{
4127 FILE *procfile;
d8d2a3ee 4128 char buffer[PATH_MAX], fname[PATH_MAX];
7c8a8b04 4129 struct cleanup *cleanup;
dba24537
AC
4130
4131 sigemptyset (pending);
4132 sigemptyset (blocked);
4133 sigemptyset (ignored);
cde33bf1 4134 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
614c279d 4135 procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4136 if (procfile == NULL)
8a3fe4f8 4137 error (_("Could not open %s"), fname);
7c8a8b04 4138 cleanup = make_cleanup_fclose (procfile);
dba24537 4139
d8d2a3ee 4140 while (fgets (buffer, PATH_MAX, procfile) != NULL)
dba24537
AC
4141 {
4142 /* Normal queued signals are on the SigPnd line in the status
4143 file. However, 2.6 kernels also have a "shared" pending
4144 queue for delivering signals to a thread group, so check for
4145 a ShdPnd line also.
4146
4147 Unfortunately some Red Hat kernels include the shared pending
4148 queue but not the ShdPnd status field. */
4149
61012eef 4150 if (startswith (buffer, "SigPnd:\t"))
dba24537 4151 add_line_to_sigset (buffer + 8, pending);
61012eef 4152 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4153 add_line_to_sigset (buffer + 8, pending);
61012eef 4154 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4155 add_line_to_sigset (buffer + 8, blocked);
61012eef 4156 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4157 add_line_to_sigset (buffer + 8, ignored);
4158 }
4159
7c8a8b04 4160 do_cleanups (cleanup);
dba24537
AC
4161}
4162
9b409511 4163static enum target_xfer_status
07e059b5 4164linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e 4165 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4166 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4167 ULONGEST *xfered_len)
07e059b5 4168{
07e059b5
VP
4169 gdb_assert (object == TARGET_OBJECT_OSDATA);
4170
9b409511
YQ
4171 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4172 if (*xfered_len == 0)
4173 return TARGET_XFER_EOF;
4174 else
4175 return TARGET_XFER_OK;
07e059b5
VP
4176}
4177
9b409511 4178static enum target_xfer_status
10d6c8cd
DJ
4179linux_xfer_partial (struct target_ops *ops, enum target_object object,
4180 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4181 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4182 ULONGEST *xfered_len)
10d6c8cd 4183{
9b409511 4184 enum target_xfer_status xfer;
10d6c8cd
DJ
4185
4186 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4187 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
9b409511 4188 offset, len, xfered_len);
10d6c8cd 4189
07e059b5
VP
4190 if (object == TARGET_OBJECT_OSDATA)
4191 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
9b409511 4192 offset, len, xfered_len);
07e059b5 4193
efcbbd14
UW
4194 if (object == TARGET_OBJECT_SPU)
4195 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
9b409511 4196 offset, len, xfered_len);
efcbbd14 4197
8f313923
JK
4198 /* GDB calculates all the addresses in possibly larget width of the address.
4199 Address width needs to be masked before its final use - either by
4200 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4201
4202 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4203
4204 if (object == TARGET_OBJECT_MEMORY)
4205 {
f5656ead 4206 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
8f313923
JK
4207
4208 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4209 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4210 }
4211
10d6c8cd 4212 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511
YQ
4213 offset, len, xfered_len);
4214 if (xfer != TARGET_XFER_EOF)
10d6c8cd
DJ
4215 return xfer;
4216
4217 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 4218 offset, len, xfered_len);
10d6c8cd
DJ
4219}
4220
5808517f
YQ
4221static void
4222cleanup_target_stop (void *arg)
4223{
4224 ptid_t *ptid = (ptid_t *) arg;
4225
4226 gdb_assert (arg != NULL);
4227
4228 /* Unpause all */
a493e3e2 4229 target_resume (*ptid, 0, GDB_SIGNAL_0);
5808517f
YQ
4230}
4231
4232static VEC(static_tracepoint_marker_p) *
c686c57f
TT
4233linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4234 const char *strid)
5808517f
YQ
4235{
4236 char s[IPA_CMD_BUF_SIZE];
4237 struct cleanup *old_chain;
4238 int pid = ptid_get_pid (inferior_ptid);
4239 VEC(static_tracepoint_marker_p) *markers = NULL;
4240 struct static_tracepoint_marker *marker = NULL;
4241 char *p = s;
4242 ptid_t ptid = ptid_build (pid, 0, 0);
4243
4244 /* Pause all */
4245 target_stop (ptid);
4246
4247 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4248 s[sizeof ("qTfSTM")] = 0;
4249
42476b70 4250 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4251
4252 old_chain = make_cleanup (free_current_marker, &marker);
4253 make_cleanup (cleanup_target_stop, &ptid);
4254
4255 while (*p++ == 'm')
4256 {
4257 if (marker == NULL)
4258 marker = XCNEW (struct static_tracepoint_marker);
4259
4260 do
4261 {
4262 parse_static_tracepoint_marker_definition (p, &p, marker);
4263
4264 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4265 {
4266 VEC_safe_push (static_tracepoint_marker_p,
4267 markers, marker);
4268 marker = NULL;
4269 }
4270 else
4271 {
4272 release_static_tracepoint_marker (marker);
4273 memset (marker, 0, sizeof (*marker));
4274 }
4275 }
4276 while (*p++ == ','); /* comma-separated list */
4277
4278 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4279 s[sizeof ("qTsSTM")] = 0;
42476b70 4280 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4281 p = s;
4282 }
4283
4284 do_cleanups (old_chain);
4285
4286 return markers;
4287}
4288
e9efe249 4289/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4290 it with local methods. */
4291
910122bf
UW
4292static void
4293linux_target_install_ops (struct target_ops *t)
10d6c8cd 4294{
6d8fd2b7 4295 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4296 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4297 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4298 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4299 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4300 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4301 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4302 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4303 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4304 t->to_post_attach = linux_child_post_attach;
4305 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4306
4307 super_xfer_partial = t->to_xfer_partial;
4308 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4309
4310 t->to_static_tracepoint_markers_by_strid
4311 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4312}
4313
4314struct target_ops *
4315linux_target (void)
4316{
4317 struct target_ops *t;
4318
4319 t = inf_ptrace_target ();
4320 linux_target_install_ops (t);
4321
4322 return t;
4323}
4324
4325struct target_ops *
7714d83a 4326linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4327{
4328 struct target_ops *t;
4329
4330 t = inf_ptrace_trad_target (register_u_offset);
4331 linux_target_install_ops (t);
10d6c8cd 4332
10d6c8cd
DJ
4333 return t;
4334}
4335
b84876c2
PA
4336/* target_is_async_p implementation. */
4337
4338static int
6a109b6b 4339linux_nat_is_async_p (struct target_ops *ops)
b84876c2 4340{
198297aa 4341 return linux_is_async_p ();
b84876c2
PA
4342}
4343
4344/* target_can_async_p implementation. */
4345
4346static int
6a109b6b 4347linux_nat_can_async_p (struct target_ops *ops)
b84876c2
PA
4348{
4349 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4350 it explicitly with the "set target-async" command.
b84876c2 4351 Someday, linux will always be async. */
3dd5b83d 4352 return target_async_permitted;
b84876c2
PA
4353}
4354
9908b566 4355static int
2a9a2795 4356linux_nat_supports_non_stop (struct target_ops *self)
9908b566
VP
4357{
4358 return 1;
4359}
4360
fbea99ea
PA
4361/* to_always_non_stop_p implementation. */
4362
4363static int
4364linux_nat_always_non_stop_p (struct target_ops *self)
4365{
f12899e9 4366 return 1;
fbea99ea
PA
4367}
4368
d90e17a7
PA
4369/* True if we want to support multi-process. To be removed when GDB
4370 supports multi-exec. */
4371
2277426b 4372int linux_multi_process = 1;
d90e17a7
PA
4373
4374static int
86ce2668 4375linux_nat_supports_multi_process (struct target_ops *self)
d90e17a7
PA
4376{
4377 return linux_multi_process;
4378}
4379
03583c20 4380static int
2bfc0540 4381linux_nat_supports_disable_randomization (struct target_ops *self)
03583c20
UW
4382{
4383#ifdef HAVE_PERSONALITY
4384 return 1;
4385#else
4386 return 0;
4387#endif
4388}
4389
b84876c2
PA
4390static int async_terminal_is_ours = 1;
4391
4d4ca2a1
DE
4392/* target_terminal_inferior implementation.
4393
4394 This is a wrapper around child_terminal_inferior to add async support. */
b84876c2
PA
4395
4396static void
d2f640d4 4397linux_nat_terminal_inferior (struct target_ops *self)
b84876c2 4398{
d6b64346 4399 child_terminal_inferior (self);
b84876c2 4400
d9d2d8b6 4401 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4402 if (!async_terminal_is_ours)
4403 return;
4404
4405 delete_file_handler (input_fd);
4406 async_terminal_is_ours = 0;
4407 set_sigint_trap ();
4408}
4409
4d4ca2a1
DE
4410/* target_terminal_ours implementation.
4411
4412 This is a wrapper around child_terminal_ours to add async support (and
4413 implement the target_terminal_ours vs target_terminal_ours_for_output
4414 distinction). child_terminal_ours is currently no different than
4415 child_terminal_ours_for_output.
4416 We leave target_terminal_ours_for_output alone, leaving it to
4417 child_terminal_ours_for_output. */
b84876c2 4418
2c0b251b 4419static void
e3594fd1 4420linux_nat_terminal_ours (struct target_ops *self)
b84876c2 4421{
b84876c2
PA
4422 /* GDB should never give the terminal to the inferior if the
4423 inferior is running in the background (run&, continue&, etc.),
4424 but claiming it sure should. */
d6b64346 4425 child_terminal_ours (self);
b84876c2 4426
b84876c2
PA
4427 if (async_terminal_is_ours)
4428 return;
4429
4430 clear_sigint_trap ();
4431 add_file_handler (input_fd, stdin_event_handler, 0);
4432 async_terminal_is_ours = 1;
4433}
4434
7feb7d06
PA
4435/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4436 so we notice when any child changes state, and notify the
4437 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4438 above to wait for the arrival of a SIGCHLD. */
4439
b84876c2 4440static void
7feb7d06 4441sigchld_handler (int signo)
b84876c2 4442{
7feb7d06
PA
4443 int old_errno = errno;
4444
01124a23
DE
4445 if (debug_linux_nat)
4446 ui_file_write_async_safe (gdb_stdlog,
4447 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4448
4449 if (signo == SIGCHLD
4450 && linux_nat_event_pipe[0] != -1)
4451 async_file_mark (); /* Let the event loop know that there are
4452 events to handle. */
4453
4454 errno = old_errno;
4455}
4456
4457/* Callback registered with the target events file descriptor. */
4458
4459static void
4460handle_target_event (int error, gdb_client_data client_data)
4461{
6a3753b3 4462 inferior_event_handler (INF_REG_EVENT, NULL);
7feb7d06
PA
4463}
4464
4465/* Create/destroy the target events pipe. Returns previous state. */
4466
4467static int
4468linux_async_pipe (int enable)
4469{
198297aa 4470 int previous = linux_is_async_p ();
7feb7d06
PA
4471
4472 if (previous != enable)
4473 {
4474 sigset_t prev_mask;
4475
12696c10
PA
4476 /* Block child signals while we create/destroy the pipe, as
4477 their handler writes to it. */
7feb7d06
PA
4478 block_child_signals (&prev_mask);
4479
4480 if (enable)
4481 {
614c279d 4482 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
7feb7d06
PA
4483 internal_error (__FILE__, __LINE__,
4484 "creating event pipe failed.");
4485
4486 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4487 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4488 }
4489 else
4490 {
4491 close (linux_nat_event_pipe[0]);
4492 close (linux_nat_event_pipe[1]);
4493 linux_nat_event_pipe[0] = -1;
4494 linux_nat_event_pipe[1] = -1;
4495 }
4496
4497 restore_child_signals_mask (&prev_mask);
4498 }
4499
4500 return previous;
b84876c2
PA
4501}
4502
4503/* target_async implementation. */
4504
4505static void
6a3753b3 4506linux_nat_async (struct target_ops *ops, int enable)
b84876c2 4507{
6a3753b3 4508 if (enable)
b84876c2 4509 {
7feb7d06
PA
4510 if (!linux_async_pipe (1))
4511 {
4512 add_file_handler (linux_nat_event_pipe[0],
4513 handle_target_event, NULL);
4514 /* There may be pending events to handle. Tell the event loop
4515 to poll them. */
4516 async_file_mark ();
4517 }
b84876c2
PA
4518 }
4519 else
4520 {
b84876c2 4521 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4522 linux_async_pipe (0);
b84876c2
PA
4523 }
4524 return;
4525}
4526
a493e3e2 4527/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4528 event came out. */
4529
4c28f408 4530static int
252fbfc8 4531linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4532{
d90e17a7 4533 if (!lwp->stopped)
252fbfc8 4534 {
d90e17a7
PA
4535 if (debug_linux_nat)
4536 fprintf_unfiltered (gdb_stdlog,
4537 "LNSL: running -> suspending %s\n",
4538 target_pid_to_str (lwp->ptid));
252fbfc8 4539
252fbfc8 4540
25289eb2
PA
4541 if (lwp->last_resume_kind == resume_stop)
4542 {
4543 if (debug_linux_nat)
4544 fprintf_unfiltered (gdb_stdlog,
4545 "linux-nat: already stopping LWP %ld at "
4546 "GDB's request\n",
4547 ptid_get_lwp (lwp->ptid));
4548 return 0;
4549 }
252fbfc8 4550
25289eb2
PA
4551 stop_callback (lwp, NULL);
4552 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4553 }
4554 else
4555 {
4556 /* Already known to be stopped; do nothing. */
252fbfc8 4557
d90e17a7
PA
4558 if (debug_linux_nat)
4559 {
e09875d4 4560 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
4561 fprintf_unfiltered (gdb_stdlog,
4562 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
4563 target_pid_to_str (lwp->ptid));
4564 else
3e43a32a
MS
4565 fprintf_unfiltered (gdb_stdlog,
4566 "LNSL: already stopped/no "
4567 "stop_requested yet %s\n",
d90e17a7 4568 target_pid_to_str (lwp->ptid));
252fbfc8
PA
4569 }
4570 }
4c28f408
PA
4571 return 0;
4572}
4573
4574static void
1eab8a48 4575linux_nat_stop (struct target_ops *self, ptid_t ptid)
4c28f408 4576{
bfedc46a
PA
4577 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4578}
4579
d90e17a7 4580static void
de90e03d 4581linux_nat_close (struct target_ops *self)
d90e17a7
PA
4582{
4583 /* Unregister from the event loop. */
9debeba0 4584 if (linux_nat_is_async_p (self))
6a3753b3 4585 linux_nat_async (self, 0);
d90e17a7 4586
d90e17a7 4587 if (linux_ops->to_close)
de90e03d 4588 linux_ops->to_close (linux_ops);
6a3cb8e8
PA
4589
4590 super_close (self);
d90e17a7
PA
4591}
4592
c0694254
PA
4593/* When requests are passed down from the linux-nat layer to the
4594 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4595 used. The address space pointer is stored in the inferior object,
4596 but the common code that is passed such ptid can't tell whether
4597 lwpid is a "main" process id or not (it assumes so). We reverse
4598 look up the "main" process id from the lwp here. */
4599
70221824 4600static struct address_space *
c0694254
PA
4601linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4602{
4603 struct lwp_info *lwp;
4604 struct inferior *inf;
4605 int pid;
4606
dfd4cc63 4607 if (ptid_get_lwp (ptid) == 0)
c0694254
PA
4608 {
4609 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4610 tgid. */
4611 lwp = find_lwp_pid (ptid);
dfd4cc63 4612 pid = ptid_get_pid (lwp->ptid);
c0694254
PA
4613 }
4614 else
4615 {
4616 /* A (pid,lwpid,0) ptid. */
dfd4cc63 4617 pid = ptid_get_pid (ptid);
c0694254
PA
4618 }
4619
4620 inf = find_inferior_pid (pid);
4621 gdb_assert (inf != NULL);
4622 return inf->aspace;
4623}
4624
dc146f7c
VP
4625/* Return the cached value of the processor core for thread PTID. */
4626
70221824 4627static int
dc146f7c
VP
4628linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4629{
4630 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4631
dc146f7c
VP
4632 if (info)
4633 return info->core;
4634 return -1;
4635}
4636
7a6a1731
GB
4637/* Implementation of to_filesystem_is_local. */
4638
4639static int
4640linux_nat_filesystem_is_local (struct target_ops *ops)
4641{
4642 struct inferior *inf = current_inferior ();
4643
4644 if (inf->fake_pid_p || inf->pid == 0)
4645 return 1;
4646
4647 return linux_ns_same (inf->pid, LINUX_NS_MNT);
4648}
4649
4650/* Convert the INF argument passed to a to_fileio_* method
4651 to a process ID suitable for passing to its corresponding
4652 linux_mntns_* function. If INF is non-NULL then the
4653 caller is requesting the filesystem seen by INF. If INF
4654 is NULL then the caller is requesting the filesystem seen
4655 by the GDB. We fall back to GDB's filesystem in the case
4656 that INF is non-NULL but its PID is unknown. */
4657
4658static pid_t
4659linux_nat_fileio_pid_of (struct inferior *inf)
4660{
4661 if (inf == NULL || inf->fake_pid_p || inf->pid == 0)
4662 return getpid ();
4663 else
4664 return inf->pid;
4665}
4666
4667/* Implementation of to_fileio_open. */
4668
4669static int
4670linux_nat_fileio_open (struct target_ops *self,
4671 struct inferior *inf, const char *filename,
4313b8c0
GB
4672 int flags, int mode, int warn_if_slow,
4673 int *target_errno)
7a6a1731
GB
4674{
4675 int nat_flags;
4676 mode_t nat_mode;
4677 int fd;
4678
4679 if (fileio_to_host_openflags (flags, &nat_flags) == -1
4680 || fileio_to_host_mode (mode, &nat_mode) == -1)
4681 {
4682 *target_errno = FILEIO_EINVAL;
4683 return -1;
4684 }
4685
4686 fd = linux_mntns_open_cloexec (linux_nat_fileio_pid_of (inf),
4687 filename, nat_flags, nat_mode);
4688 if (fd == -1)
4689 *target_errno = host_to_fileio_error (errno);
4690
4691 return fd;
4692}
4693
4694/* Implementation of to_fileio_readlink. */
4695
4696static char *
4697linux_nat_fileio_readlink (struct target_ops *self,
4698 struct inferior *inf, const char *filename,
4699 int *target_errno)
4700{
4701 char buf[PATH_MAX];
4702 int len;
4703 char *ret;
4704
4705 len = linux_mntns_readlink (linux_nat_fileio_pid_of (inf),
4706 filename, buf, sizeof (buf));
4707 if (len < 0)
4708 {
4709 *target_errno = host_to_fileio_error (errno);
4710 return NULL;
4711 }
4712
224c3ddb 4713 ret = (char *) xmalloc (len + 1);
7a6a1731
GB
4714 memcpy (ret, buf, len);
4715 ret[len] = '\0';
4716 return ret;
4717}
4718
4719/* Implementation of to_fileio_unlink. */
4720
4721static int
4722linux_nat_fileio_unlink (struct target_ops *self,
4723 struct inferior *inf, const char *filename,
4724 int *target_errno)
4725{
4726 int ret;
4727
4728 ret = linux_mntns_unlink (linux_nat_fileio_pid_of (inf),
4729 filename);
4730 if (ret == -1)
4731 *target_errno = host_to_fileio_error (errno);
4732
4733 return ret;
4734}
4735
aa01bd36
PA
4736/* Implementation of the to_thread_events method. */
4737
4738static void
4739linux_nat_thread_events (struct target_ops *ops, int enable)
4740{
4741 report_thread_events = enable;
4742}
4743
f973ed9c
DJ
4744void
4745linux_nat_add_target (struct target_ops *t)
4746{
f973ed9c
DJ
4747 /* Save the provided single-threaded target. We save this in a separate
4748 variable because another target we've inherited from (e.g. inf-ptrace)
4749 may have saved a pointer to T; we want to use it for the final
4750 process stratum target. */
4751 linux_ops_saved = *t;
4752 linux_ops = &linux_ops_saved;
4753
4754 /* Override some methods for multithreading. */
b84876c2 4755 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4756 t->to_attach = linux_nat_attach;
4757 t->to_detach = linux_nat_detach;
4758 t->to_resume = linux_nat_resume;
4759 t->to_wait = linux_nat_wait;
2455069d 4760 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
4761 t->to_xfer_partial = linux_nat_xfer_partial;
4762 t->to_kill = linux_nat_kill;
4763 t->to_mourn_inferior = linux_nat_mourn_inferior;
4764 t->to_thread_alive = linux_nat_thread_alive;
8a06aea7 4765 t->to_update_thread_list = linux_nat_update_thread_list;
f973ed9c 4766 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 4767 t->to_thread_name = linux_nat_thread_name;
f973ed9c 4768 t->to_has_thread_control = tc_schedlock;
c0694254 4769 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
4770 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4771 t->to_stopped_data_address = linux_nat_stopped_data_address;
faf09f01
PA
4772 t->to_stopped_by_sw_breakpoint = linux_nat_stopped_by_sw_breakpoint;
4773 t->to_supports_stopped_by_sw_breakpoint = linux_nat_supports_stopped_by_sw_breakpoint;
4774 t->to_stopped_by_hw_breakpoint = linux_nat_stopped_by_hw_breakpoint;
4775 t->to_supports_stopped_by_hw_breakpoint = linux_nat_supports_stopped_by_hw_breakpoint;
aa01bd36 4776 t->to_thread_events = linux_nat_thread_events;
f973ed9c 4777
b84876c2
PA
4778 t->to_can_async_p = linux_nat_can_async_p;
4779 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4780 t->to_supports_non_stop = linux_nat_supports_non_stop;
fbea99ea 4781 t->to_always_non_stop_p = linux_nat_always_non_stop_p;
b84876c2 4782 t->to_async = linux_nat_async;
b84876c2
PA
4783 t->to_terminal_inferior = linux_nat_terminal_inferior;
4784 t->to_terminal_ours = linux_nat_terminal_ours;
6a3cb8e8
PA
4785
4786 super_close = t->to_close;
d90e17a7 4787 t->to_close = linux_nat_close;
b84876c2 4788
4c28f408
PA
4789 t->to_stop = linux_nat_stop;
4790
d90e17a7
PA
4791 t->to_supports_multi_process = linux_nat_supports_multi_process;
4792
03583c20
UW
4793 t->to_supports_disable_randomization
4794 = linux_nat_supports_disable_randomization;
4795
dc146f7c
VP
4796 t->to_core_of_thread = linux_nat_core_of_thread;
4797
7a6a1731
GB
4798 t->to_filesystem_is_local = linux_nat_filesystem_is_local;
4799 t->to_fileio_open = linux_nat_fileio_open;
4800 t->to_fileio_readlink = linux_nat_fileio_readlink;
4801 t->to_fileio_unlink = linux_nat_fileio_unlink;
4802
f973ed9c
DJ
4803 /* We don't change the stratum; this target will sit at
4804 process_stratum and thread_db will set at thread_stratum. This
4805 is a little strange, since this is a multi-threaded-capable
4806 target, but we want to be on the stack below thread_db, and we
4807 also want to be used for single-threaded processes. */
4808
4809 add_target (t);
f973ed9c
DJ
4810}
4811
9f0bdab8
DJ
4812/* Register a method to call whenever a new thread is attached. */
4813void
7b50312a
PA
4814linux_nat_set_new_thread (struct target_ops *t,
4815 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
4816{
4817 /* Save the pointer. We only support a single registered instance
4818 of the GNU/Linux native target, so we do not need to map this to
4819 T. */
4820 linux_nat_new_thread = new_thread;
4821}
4822
26cb8b7c
PA
4823/* See declaration in linux-nat.h. */
4824
4825void
4826linux_nat_set_new_fork (struct target_ops *t,
4827 linux_nat_new_fork_ftype *new_fork)
4828{
4829 /* Save the pointer. */
4830 linux_nat_new_fork = new_fork;
4831}
4832
4833/* See declaration in linux-nat.h. */
4834
4835void
4836linux_nat_set_forget_process (struct target_ops *t,
4837 linux_nat_forget_process_ftype *fn)
4838{
4839 /* Save the pointer. */
4840 linux_nat_forget_process_hook = fn;
4841}
4842
4843/* See declaration in linux-nat.h. */
4844
4845void
4846linux_nat_forget_process (pid_t pid)
4847{
4848 if (linux_nat_forget_process_hook != NULL)
4849 linux_nat_forget_process_hook (pid);
4850}
4851
5b009018
PA
4852/* Register a method that converts a siginfo object between the layout
4853 that ptrace returns, and the layout in the architecture of the
4854 inferior. */
4855void
4856linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 4857 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
4858 gdb_byte *,
4859 int))
4860{
4861 /* Save the pointer. */
4862 linux_nat_siginfo_fixup = siginfo_fixup;
4863}
4864
7b50312a
PA
4865/* Register a method to call prior to resuming a thread. */
4866
4867void
4868linux_nat_set_prepare_to_resume (struct target_ops *t,
4869 void (*prepare_to_resume) (struct lwp_info *))
4870{
4871 /* Save the pointer. */
4872 linux_nat_prepare_to_resume = prepare_to_resume;
4873}
4874
f865ee35
JK
4875/* See linux-nat.h. */
4876
4877int
4878linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4879{
da559b09 4880 int pid;
9f0bdab8 4881
dfd4cc63 4882 pid = ptid_get_lwp (ptid);
da559b09 4883 if (pid == 0)
dfd4cc63 4884 pid = ptid_get_pid (ptid);
f865ee35 4885
da559b09
JK
4886 errno = 0;
4887 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4888 if (errno != 0)
4889 {
4890 memset (siginfo, 0, sizeof (*siginfo));
4891 return 0;
4892 }
f865ee35 4893 return 1;
9f0bdab8
DJ
4894}
4895
7b669087
GB
4896/* See nat/linux-nat.h. */
4897
4898ptid_t
4899current_lwp_ptid (void)
4900{
4901 gdb_assert (ptid_lwp_p (inferior_ptid));
4902 return inferior_ptid;
4903}
4904
2c0b251b
PA
4905/* Provide a prototype to silence -Wmissing-prototypes. */
4906extern initialize_file_ftype _initialize_linux_nat;
4907
d6b0e80f
AC
4908void
4909_initialize_linux_nat (void)
4910{
ccce17b0
YQ
4911 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4912 &debug_linux_nat, _("\
b84876c2
PA
4913Set debugging of GNU/Linux lwp module."), _("\
4914Show debugging of GNU/Linux lwp module."), _("\
4915Enables printf debugging output."),
ccce17b0
YQ
4916 NULL,
4917 show_debug_linux_nat,
4918 &setdebuglist, &showdebuglist);
b84876c2 4919
7a6a1731
GB
4920 add_setshow_boolean_cmd ("linux-namespaces", class_maintenance,
4921 &debug_linux_namespaces, _("\
4922Set debugging of GNU/Linux namespaces module."), _("\
4923Show debugging of GNU/Linux namespaces module."), _("\
4924Enables printf debugging output."),
4925 NULL,
4926 NULL,
4927 &setdebuglist, &showdebuglist);
4928
b84876c2 4929 /* Save this mask as the default. */
d6b0e80f
AC
4930 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4931
7feb7d06
PA
4932 /* Install a SIGCHLD handler. */
4933 sigchld_action.sa_handler = sigchld_handler;
4934 sigemptyset (&sigchld_action.sa_mask);
4935 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4936
4937 /* Make it the default. */
7feb7d06 4938 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4939
4940 /* Make sure we don't block SIGCHLD during a sigsuspend. */
4941 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
4942 sigdelset (&suspend_mask, SIGCHLD);
4943
7feb7d06 4944 sigemptyset (&blocked_mask);
774113b0
PA
4945
4946 lwp_lwpid_htab_create ();
d6b0e80f
AC
4947}
4948\f
4949
4950/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
4951 the GNU/Linux Threads library and therefore doesn't really belong
4952 here. */
4953
d6b0e80f
AC
4954/* Return the set of signals used by the threads library in *SET. */
4955
4956void
4957lin_thread_get_thread_signals (sigset_t *set)
4958{
d6b0e80f
AC
4959 sigemptyset (set);
4960
4a6ed09b
PA
4961 /* NPTL reserves the first two RT signals, but does not provide any
4962 way for the debugger to query the signal numbers - fortunately
4963 they don't change. */
4964 sigaddset (set, __SIGRTMIN);
4965 sigaddset (set, __SIGRTMIN + 1);
d6b0e80f 4966}
This page took 2.034523 seconds and 4 git commands to generate.