Change signature of linux_target_ops.new_thread
[deliverable/binutils-gdb.git] / gdb / linux-nat.c
CommitLineData
3993f6b1 1/* GNU/Linux native-dependent code common to multiple platforms.
dba24537 2
32d0add0 3 Copyright (C) 2001-2015 Free Software Foundation, Inc.
3993f6b1
DJ
4
5 This file is part of GDB.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
a9762ec7 9 the Free Software Foundation; either version 3 of the License, or
3993f6b1
DJ
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
a9762ec7 18 along with this program. If not, see <http://www.gnu.org/licenses/>. */
3993f6b1
DJ
19
20#include "defs.h"
21#include "inferior.h"
45741a9c 22#include "infrun.h"
3993f6b1 23#include "target.h"
96d7229d
LM
24#include "nat/linux-nat.h"
25#include "nat/linux-waitpid.h"
3993f6b1 26#include "gdb_wait.h"
d6b0e80f
AC
27#ifdef HAVE_TKILL_SYSCALL
28#include <unistd.h>
29#include <sys/syscall.h>
30#endif
3993f6b1 31#include <sys/ptrace.h>
0274a8ce 32#include "linux-nat.h"
125f8a3d
GB
33#include "nat/linux-ptrace.h"
34#include "nat/linux-procfs.h"
8cc73a39 35#include "nat/linux-personality.h"
ac264b3b 36#include "linux-fork.h"
d6b0e80f
AC
37#include "gdbthread.h"
38#include "gdbcmd.h"
39#include "regcache.h"
4f844a66 40#include "regset.h"
dab06dbe 41#include "inf-child.h"
10d6c8cd
DJ
42#include "inf-ptrace.h"
43#include "auxv.h"
1777feb0 44#include <sys/procfs.h> /* for elf_gregset etc. */
dba24537
AC
45#include "elf-bfd.h" /* for elfcore_write_* */
46#include "gregset.h" /* for gregset */
47#include "gdbcore.h" /* for get_exec_file */
48#include <ctype.h> /* for isdigit */
53ce3c39 49#include <sys/stat.h> /* for struct stat */
dba24537 50#include <fcntl.h> /* for O_RDONLY */
b84876c2
PA
51#include "inf-loop.h"
52#include "event-loop.h"
53#include "event-top.h"
07e059b5
VP
54#include <pwd.h>
55#include <sys/types.h>
2978b111 56#include <dirent.h>
07e059b5 57#include "xml-support.h"
efcbbd14 58#include <sys/vfs.h>
6c95b8df 59#include "solib.h"
125f8a3d 60#include "nat/linux-osdata.h"
6432734d 61#include "linux-tdep.h"
7dcd53a0 62#include "symfile.h"
5808517f
YQ
63#include "agent.h"
64#include "tracepoint.h"
87b0bb13 65#include "buffer.h"
6ecd4729 66#include "target-descriptions.h"
614c279d 67#include "filestuff.h"
77e371c0 68#include "objfiles.h"
efcbbd14
UW
69
70#ifndef SPUFS_MAGIC
71#define SPUFS_MAGIC 0x23c9b64e
72#endif
dba24537 73
1777feb0 74/* This comment documents high-level logic of this file.
8a77dff3
VP
75
76Waiting for events in sync mode
77===============================
78
79When waiting for an event in a specific thread, we just use waitpid, passing
80the specific pid, and not passing WNOHANG.
81
1777feb0 82When waiting for an event in all threads, waitpid is not quite good. Prior to
8a77dff3 83version 2.4, Linux can either wait for event in main thread, or in secondary
1777feb0 84threads. (2.4 has the __WALL flag). So, if we use blocking waitpid, we might
8a77dff3
VP
85miss an event. The solution is to use non-blocking waitpid, together with
86sigsuspend. First, we use non-blocking waitpid to get an event in the main
1777feb0 87process, if any. Second, we use non-blocking waitpid with the __WCLONED
8a77dff3
VP
88flag to check for events in cloned processes. If nothing is found, we use
89sigsuspend to wait for SIGCHLD. When SIGCHLD arrives, it means something
90happened to a child process -- and SIGCHLD will be delivered both for events
91in main debugged process and in cloned processes. As soon as we know there's
3e43a32a
MS
92an event, we get back to calling nonblocking waitpid with and without
93__WCLONED.
8a77dff3
VP
94
95Note that SIGCHLD should be blocked between waitpid and sigsuspend calls,
1777feb0 96so that we don't miss a signal. If SIGCHLD arrives in between, when it's
8a77dff3
VP
97blocked, the signal becomes pending and sigsuspend immediately
98notices it and returns.
99
100Waiting for events in async mode
101================================
102
7feb7d06
PA
103In async mode, GDB should always be ready to handle both user input
104and target events, so neither blocking waitpid nor sigsuspend are
105viable options. Instead, we should asynchronously notify the GDB main
106event loop whenever there's an unprocessed event from the target. We
107detect asynchronous target events by handling SIGCHLD signals. To
108notify the event loop about target events, the self-pipe trick is used
109--- a pipe is registered as waitable event source in the event loop,
110the event loop select/poll's on the read end of this pipe (as well on
111other event sources, e.g., stdin), and the SIGCHLD handler writes a
112byte to this pipe. This is more portable than relying on
113pselect/ppoll, since on kernels that lack those syscalls, libc
114emulates them with select/poll+sigprocmask, and that is racy
115(a.k.a. plain broken).
116
117Obviously, if we fail to notify the event loop if there's a target
118event, it's bad. OTOH, if we notify the event loop when there's no
119event from the target, linux_nat_wait will detect that there's no real
120event to report, and return event of type TARGET_WAITKIND_IGNORE.
121This is mostly harmless, but it will waste time and is better avoided.
122
123The main design point is that every time GDB is outside linux-nat.c,
124we have a SIGCHLD handler installed that is called when something
125happens to the target and notifies the GDB event loop. Whenever GDB
126core decides to handle the event, and calls into linux-nat.c, we
127process things as in sync mode, except that the we never block in
128sigsuspend.
129
130While processing an event, we may end up momentarily blocked in
131waitpid calls. Those waitpid calls, while blocking, are guarantied to
132return quickly. E.g., in all-stop mode, before reporting to the core
133that an LWP hit a breakpoint, all LWPs are stopped by sending them
134SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
135Note that this is different from blocking indefinitely waiting for the
136next event --- here, we're already handling an event.
8a77dff3
VP
137
138Use of signals
139==============
140
141We stop threads by sending a SIGSTOP. The use of SIGSTOP instead of another
142signal is not entirely significant; we just need for a signal to be delivered,
143so that we can intercept it. SIGSTOP's advantage is that it can not be
144blocked. A disadvantage is that it is not a real-time signal, so it can only
145be queued once; we do not keep track of other sources of SIGSTOP.
146
147Two other signals that can't be blocked are SIGCONT and SIGKILL. But we can't
148use them, because they have special behavior when the signal is generated -
149not when it is delivered. SIGCONT resumes the entire thread group and SIGKILL
150kills the entire thread group.
151
152A delivered SIGSTOP would stop the entire thread group, not just the thread we
153tkill'd. But we never let the SIGSTOP be delivered; we always intercept and
154cancel it (by PTRACE_CONT without passing SIGSTOP).
155
156We could use a real-time signal instead. This would solve those problems; we
157could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
158But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
159generates it, and there are races with trying to find a signal that is not
160blocked. */
a0ef4274 161
dba24537
AC
162#ifndef O_LARGEFILE
163#define O_LARGEFILE 0
164#endif
0274a8ce 165
10d6c8cd
DJ
166/* The single-threaded native GNU/Linux target_ops. We save a pointer for
167 the use of the multi-threaded target. */
168static struct target_ops *linux_ops;
f973ed9c 169static struct target_ops linux_ops_saved;
10d6c8cd 170
9f0bdab8 171/* The method to call, if any, when a new thread is attached. */
7b50312a
PA
172static void (*linux_nat_new_thread) (struct lwp_info *);
173
26cb8b7c
PA
174/* The method to call, if any, when a new fork is attached. */
175static linux_nat_new_fork_ftype *linux_nat_new_fork;
176
177/* The method to call, if any, when a process is no longer
178 attached. */
179static linux_nat_forget_process_ftype *linux_nat_forget_process_hook;
180
7b50312a
PA
181/* Hook to call prior to resuming a thread. */
182static void (*linux_nat_prepare_to_resume) (struct lwp_info *);
9f0bdab8 183
5b009018
PA
184/* The method to call, if any, when the siginfo object needs to be
185 converted between the layout returned by ptrace, and the layout in
186 the architecture of the inferior. */
a5362b9a 187static int (*linux_nat_siginfo_fixup) (siginfo_t *,
5b009018
PA
188 gdb_byte *,
189 int);
190
ac264b3b
MS
191/* The saved to_xfer_partial method, inherited from inf-ptrace.c.
192 Called by our to_xfer_partial. */
4ac248ca 193static target_xfer_partial_ftype *super_xfer_partial;
10d6c8cd 194
6a3cb8e8
PA
195/* The saved to_close method, inherited from inf-ptrace.c.
196 Called by our to_close. */
197static void (*super_close) (struct target_ops *);
198
ccce17b0 199static unsigned int debug_linux_nat;
920d2a44
AC
200static void
201show_debug_linux_nat (struct ui_file *file, int from_tty,
202 struct cmd_list_element *c, const char *value)
203{
204 fprintf_filtered (file, _("Debugging of GNU/Linux lwp module is %s.\n"),
205 value);
206}
d6b0e80f 207
ae087d01
DJ
208struct simple_pid_list
209{
210 int pid;
3d799a95 211 int status;
ae087d01
DJ
212 struct simple_pid_list *next;
213};
214struct simple_pid_list *stopped_pids;
215
3dd5b83d
PA
216/* Async mode support. */
217
b84876c2
PA
218/* The read/write ends of the pipe registered as waitable file in the
219 event loop. */
220static int linux_nat_event_pipe[2] = { -1, -1 };
221
198297aa
PA
222/* True if we're currently in async mode. */
223#define linux_is_async_p() (linux_nat_event_pipe[0] != -1)
224
7feb7d06 225/* Flush the event pipe. */
b84876c2 226
7feb7d06
PA
227static void
228async_file_flush (void)
b84876c2 229{
7feb7d06
PA
230 int ret;
231 char buf;
b84876c2 232
7feb7d06 233 do
b84876c2 234 {
7feb7d06 235 ret = read (linux_nat_event_pipe[0], &buf, 1);
b84876c2 236 }
7feb7d06 237 while (ret >= 0 || (ret == -1 && errno == EINTR));
b84876c2
PA
238}
239
7feb7d06
PA
240/* Put something (anything, doesn't matter what, or how much) in event
241 pipe, so that the select/poll in the event-loop realizes we have
242 something to process. */
252fbfc8 243
b84876c2 244static void
7feb7d06 245async_file_mark (void)
b84876c2 246{
7feb7d06 247 int ret;
b84876c2 248
7feb7d06
PA
249 /* It doesn't really matter what the pipe contains, as long we end
250 up with something in it. Might as well flush the previous
251 left-overs. */
252 async_file_flush ();
b84876c2 253
7feb7d06 254 do
b84876c2 255 {
7feb7d06 256 ret = write (linux_nat_event_pipe[1], "+", 1);
b84876c2 257 }
7feb7d06 258 while (ret == -1 && errno == EINTR);
b84876c2 259
7feb7d06
PA
260 /* Ignore EAGAIN. If the pipe is full, the event loop will already
261 be awakened anyway. */
b84876c2
PA
262}
263
7feb7d06
PA
264static int kill_lwp (int lwpid, int signo);
265
266static int stop_callback (struct lwp_info *lp, void *data);
2db9a427 267static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
7feb7d06
PA
268
269static void block_child_signals (sigset_t *prev_mask);
270static void restore_child_signals_mask (sigset_t *prev_mask);
2277426b
PA
271
272struct lwp_info;
273static struct lwp_info *add_lwp (ptid_t ptid);
274static void purge_lwp_list (int pid);
4403d8e9 275static void delete_lwp (ptid_t ptid);
2277426b
PA
276static struct lwp_info *find_lwp_pid (ptid_t ptid);
277
8a99810d
PA
278static int lwp_status_pending_p (struct lwp_info *lp);
279
9c02b525
PA
280static int check_stopped_by_breakpoint (struct lwp_info *lp);
281static int sigtrap_is_event (int status);
282static int (*linux_nat_status_is_event) (int status) = sigtrap_is_event;
283
cff068da
GB
284\f
285/* LWP accessors. */
286
287/* See nat/linux-nat.h. */
288
289ptid_t
290ptid_of_lwp (struct lwp_info *lwp)
291{
292 return lwp->ptid;
293}
294
295/* See nat/linux-nat.h. */
296
297int
298lwp_is_stopped (struct lwp_info *lwp)
299{
300 return lwp->stopped;
301}
302
303/* See nat/linux-nat.h. */
304
305enum target_stop_reason
306lwp_stop_reason (struct lwp_info *lwp)
307{
308 return lwp->stop_reason;
309}
310
ae087d01
DJ
311\f
312/* Trivial list manipulation functions to keep track of a list of
313 new stopped processes. */
314static void
3d799a95 315add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
ae087d01
DJ
316{
317 struct simple_pid_list *new_pid = xmalloc (sizeof (struct simple_pid_list));
e0881a8e 318
ae087d01 319 new_pid->pid = pid;
3d799a95 320 new_pid->status = status;
ae087d01
DJ
321 new_pid->next = *listp;
322 *listp = new_pid;
323}
324
84636d28
PA
325static int
326in_pid_list_p (struct simple_pid_list *list, int pid)
327{
328 struct simple_pid_list *p;
329
330 for (p = list; p != NULL; p = p->next)
331 if (p->pid == pid)
332 return 1;
333 return 0;
334}
335
ae087d01 336static int
46a96992 337pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
ae087d01
DJ
338{
339 struct simple_pid_list **p;
340
341 for (p = listp; *p != NULL; p = &(*p)->next)
342 if ((*p)->pid == pid)
343 {
344 struct simple_pid_list *next = (*p)->next;
e0881a8e 345
46a96992 346 *statusp = (*p)->status;
ae087d01
DJ
347 xfree (*p);
348 *p = next;
349 return 1;
350 }
351 return 0;
352}
353
96d7229d 354/* Initialize ptrace warnings and check for supported ptrace
beed38b8
JB
355 features given PID.
356
357 ATTACHED should be nonzero iff we attached to the inferior. */
3993f6b1
DJ
358
359static void
beed38b8 360linux_init_ptrace (pid_t pid, int attached)
3993f6b1 361{
beed38b8 362 linux_enable_event_reporting (pid, attached);
96d7229d 363 linux_ptrace_init_warnings ();
4de4c07c
DJ
364}
365
6d8fd2b7 366static void
f045800c 367linux_child_post_attach (struct target_ops *self, int pid)
4de4c07c 368{
beed38b8 369 linux_init_ptrace (pid, 1);
4de4c07c
DJ
370}
371
10d6c8cd 372static void
2e97a79e 373linux_child_post_startup_inferior (struct target_ops *self, ptid_t ptid)
4de4c07c 374{
beed38b8 375 linux_init_ptrace (ptid_get_pid (ptid), 0);
4de4c07c
DJ
376}
377
4403d8e9
JK
378/* Return the number of known LWPs in the tgid given by PID. */
379
380static int
381num_lwps (int pid)
382{
383 int count = 0;
384 struct lwp_info *lp;
385
386 for (lp = lwp_list; lp; lp = lp->next)
387 if (ptid_get_pid (lp->ptid) == pid)
388 count++;
389
390 return count;
391}
392
393/* Call delete_lwp with prototype compatible for make_cleanup. */
394
395static void
396delete_lwp_cleanup (void *lp_voidp)
397{
398 struct lwp_info *lp = lp_voidp;
399
400 delete_lwp (lp->ptid);
401}
402
d83ad864
DB
403/* Target hook for follow_fork. On entry inferior_ptid must be the
404 ptid of the followed inferior. At return, inferior_ptid will be
405 unchanged. */
406
6d8fd2b7 407static int
07107ca6
LM
408linux_child_follow_fork (struct target_ops *ops, int follow_child,
409 int detach_fork)
3993f6b1 410{
d83ad864 411 if (!follow_child)
4de4c07c 412 {
6c95b8df 413 struct lwp_info *child_lp = NULL;
d83ad864
DB
414 int status = W_STOPCODE (0);
415 struct cleanup *old_chain;
416 int has_vforked;
79639e11 417 ptid_t parent_ptid, child_ptid;
d83ad864
DB
418 int parent_pid, child_pid;
419
420 has_vforked = (inferior_thread ()->pending_follow.kind
421 == TARGET_WAITKIND_VFORKED);
79639e11
PA
422 parent_ptid = inferior_ptid;
423 child_ptid = inferior_thread ()->pending_follow.value.related_pid;
424 parent_pid = ptid_get_lwp (parent_ptid);
425 child_pid = ptid_get_lwp (child_ptid);
4de4c07c 426
1777feb0 427 /* We're already attached to the parent, by default. */
d83ad864 428 old_chain = save_inferior_ptid ();
79639e11 429 inferior_ptid = child_ptid;
d83ad864
DB
430 child_lp = add_lwp (inferior_ptid);
431 child_lp->stopped = 1;
432 child_lp->last_resume_kind = resume_stop;
4de4c07c 433
ac264b3b
MS
434 /* Detach new forked process? */
435 if (detach_fork)
f75c00e4 436 {
4403d8e9
JK
437 make_cleanup (delete_lwp_cleanup, child_lp);
438
4403d8e9
JK
439 if (linux_nat_prepare_to_resume != NULL)
440 linux_nat_prepare_to_resume (child_lp);
c077881a
HZ
441
442 /* When debugging an inferior in an architecture that supports
443 hardware single stepping on a kernel without commit
444 6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
445 process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
446 set if the parent process had them set.
447 To work around this, single step the child process
448 once before detaching to clear the flags. */
449
450 if (!gdbarch_software_single_step_p (target_thread_architecture
451 (child_lp->ptid)))
452 {
c077881a
HZ
453 linux_disable_event_reporting (child_pid);
454 if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
455 perror_with_name (_("Couldn't do single step"));
456 if (my_waitpid (child_pid, &status, 0) < 0)
457 perror_with_name (_("Couldn't wait vfork process"));
458 }
459
460 if (WIFSTOPPED (status))
9caaaa83
PA
461 {
462 int signo;
463
464 signo = WSTOPSIG (status);
465 if (signo != 0
466 && !signal_pass_state (gdb_signal_from_host (signo)))
467 signo = 0;
468 ptrace (PTRACE_DETACH, child_pid, 0, signo);
469 }
4403d8e9 470
d83ad864 471 /* Resets value of inferior_ptid to parent ptid. */
4403d8e9 472 do_cleanups (old_chain);
ac264b3b
MS
473 }
474 else
475 {
6c95b8df 476 /* Let the thread_db layer learn about this new process. */
2277426b 477 check_for_thread_db ();
ac264b3b 478 }
9016a515 479
d83ad864
DB
480 do_cleanups (old_chain);
481
9016a515
DJ
482 if (has_vforked)
483 {
3ced3da4 484 struct lwp_info *parent_lp;
6c95b8df 485
79639e11 486 parent_lp = find_lwp_pid (parent_ptid);
96d7229d 487 gdb_assert (linux_supports_tracefork () >= 0);
3ced3da4 488
96d7229d 489 if (linux_supports_tracevforkdone ())
9016a515 490 {
6c95b8df
PA
491 if (debug_linux_nat)
492 fprintf_unfiltered (gdb_stdlog,
493 "LCFF: waiting for VFORK_DONE on %d\n",
494 parent_pid);
3ced3da4 495 parent_lp->stopped = 1;
9016a515 496
6c95b8df
PA
497 /* We'll handle the VFORK_DONE event like any other
498 event, in target_wait. */
9016a515
DJ
499 }
500 else
501 {
502 /* We can't insert breakpoints until the child has
503 finished with the shared memory region. We need to
504 wait until that happens. Ideal would be to just
505 call:
506 - ptrace (PTRACE_SYSCALL, parent_pid, 0, 0);
507 - waitpid (parent_pid, &status, __WALL);
508 However, most architectures can't handle a syscall
509 being traced on the way out if it wasn't traced on
510 the way in.
511
512 We might also think to loop, continuing the child
513 until it exits or gets a SIGTRAP. One problem is
514 that the child might call ptrace with PTRACE_TRACEME.
515
516 There's no simple and reliable way to figure out when
517 the vforked child will be done with its copy of the
518 shared memory. We could step it out of the syscall,
519 two instructions, let it go, and then single-step the
520 parent once. When we have hardware single-step, this
521 would work; with software single-step it could still
522 be made to work but we'd have to be able to insert
523 single-step breakpoints in the child, and we'd have
524 to insert -just- the single-step breakpoint in the
525 parent. Very awkward.
526
527 In the end, the best we can do is to make sure it
528 runs for a little while. Hopefully it will be out of
529 range of any breakpoints we reinsert. Usually this
530 is only the single-step breakpoint at vfork's return
531 point. */
532
6c95b8df
PA
533 if (debug_linux_nat)
534 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
535 "LCFF: no VFORK_DONE "
536 "support, sleeping a bit\n");
6c95b8df 537
9016a515 538 usleep (10000);
9016a515 539
6c95b8df
PA
540 /* Pretend we've seen a PTRACE_EVENT_VFORK_DONE event,
541 and leave it pending. The next linux_nat_resume call
542 will notice a pending event, and bypasses actually
543 resuming the inferior. */
3ced3da4
PA
544 parent_lp->status = 0;
545 parent_lp->waitstatus.kind = TARGET_WAITKIND_VFORK_DONE;
546 parent_lp->stopped = 1;
6c95b8df
PA
547
548 /* If we're in async mode, need to tell the event loop
549 there's something here to process. */
d9d41e78 550 if (target_is_async_p ())
6c95b8df
PA
551 async_file_mark ();
552 }
9016a515 553 }
4de4c07c 554 }
3993f6b1 555 else
4de4c07c 556 {
3ced3da4 557 struct lwp_info *child_lp;
4de4c07c 558
3ced3da4
PA
559 child_lp = add_lwp (inferior_ptid);
560 child_lp->stopped = 1;
25289eb2 561 child_lp->last_resume_kind = resume_stop;
6c95b8df 562
6c95b8df 563 /* Let the thread_db layer learn about this new process. */
ef29ce1a 564 check_for_thread_db ();
4de4c07c
DJ
565 }
566
567 return 0;
568}
569
4de4c07c 570\f
77b06cd7 571static int
a863b201 572linux_child_insert_fork_catchpoint (struct target_ops *self, int pid)
4de4c07c 573{
96d7229d 574 return !linux_supports_tracefork ();
3993f6b1
DJ
575}
576
eb73ad13 577static int
973fc227 578linux_child_remove_fork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
579{
580 return 0;
581}
582
77b06cd7 583static int
3ecc7da0 584linux_child_insert_vfork_catchpoint (struct target_ops *self, int pid)
3993f6b1 585{
96d7229d 586 return !linux_supports_tracefork ();
3993f6b1
DJ
587}
588
eb73ad13 589static int
e98cf0cd 590linux_child_remove_vfork_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
591{
592 return 0;
593}
594
77b06cd7 595static int
ba025e51 596linux_child_insert_exec_catchpoint (struct target_ops *self, int pid)
3993f6b1 597{
96d7229d 598 return !linux_supports_tracefork ();
3993f6b1
DJ
599}
600
eb73ad13 601static int
758e29d2 602linux_child_remove_exec_catchpoint (struct target_ops *self, int pid)
eb73ad13
PA
603{
604 return 0;
605}
606
a96d9b2e 607static int
ff214e67
TT
608linux_child_set_syscall_catchpoint (struct target_ops *self,
609 int pid, int needed, int any_count,
a96d9b2e
SDJ
610 int table_size, int *table)
611{
96d7229d 612 if (!linux_supports_tracesysgood ())
77b06cd7
TJB
613 return 1;
614
a96d9b2e
SDJ
615 /* On GNU/Linux, we ignore the arguments. It means that we only
616 enable the syscall catchpoints, but do not disable them.
77b06cd7 617
a96d9b2e
SDJ
618 Also, we do not use the `table' information because we do not
619 filter system calls here. We let GDB do the logic for us. */
620 return 0;
621}
622
d6b0e80f
AC
623/* On GNU/Linux there are no real LWP's. The closest thing to LWP's
624 are processes sharing the same VM space. A multi-threaded process
625 is basically a group of such processes. However, such a grouping
626 is almost entirely a user-space issue; the kernel doesn't enforce
627 such a grouping at all (this might change in the future). In
628 general, we'll rely on the threads library (i.e. the GNU/Linux
629 Threads library) to provide such a grouping.
630
631 It is perfectly well possible to write a multi-threaded application
632 without the assistance of a threads library, by using the clone
633 system call directly. This module should be able to give some
634 rudimentary support for debugging such applications if developers
635 specify the CLONE_PTRACE flag in the clone system call, and are
636 using the Linux kernel 2.4 or above.
637
638 Note that there are some peculiarities in GNU/Linux that affect
639 this code:
640
641 - In general one should specify the __WCLONE flag to waitpid in
642 order to make it report events for any of the cloned processes
643 (and leave it out for the initial process). However, if a cloned
644 process has exited the exit status is only reported if the
645 __WCLONE flag is absent. Linux kernel 2.4 has a __WALL flag, but
646 we cannot use it since GDB must work on older systems too.
647
648 - When a traced, cloned process exits and is waited for by the
649 debugger, the kernel reassigns it to the original parent and
650 keeps it around as a "zombie". Somehow, the GNU/Linux Threads
651 library doesn't notice this, which leads to the "zombie problem":
652 When debugged a multi-threaded process that spawns a lot of
653 threads will run out of processes, even if the threads exit,
654 because the "zombies" stay around. */
655
656/* List of known LWPs. */
9f0bdab8 657struct lwp_info *lwp_list;
d6b0e80f
AC
658\f
659
d6b0e80f
AC
660/* Original signal mask. */
661static sigset_t normal_mask;
662
663/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
664 _initialize_linux_nat. */
665static sigset_t suspend_mask;
666
7feb7d06
PA
667/* Signals to block to make that sigsuspend work. */
668static sigset_t blocked_mask;
669
670/* SIGCHLD action. */
671struct sigaction sigchld_action;
b84876c2 672
7feb7d06
PA
673/* Block child signals (SIGCHLD and linux threads signals), and store
674 the previous mask in PREV_MASK. */
84e46146 675
7feb7d06
PA
676static void
677block_child_signals (sigset_t *prev_mask)
678{
679 /* Make sure SIGCHLD is blocked. */
680 if (!sigismember (&blocked_mask, SIGCHLD))
681 sigaddset (&blocked_mask, SIGCHLD);
682
683 sigprocmask (SIG_BLOCK, &blocked_mask, prev_mask);
684}
685
686/* Restore child signals mask, previously returned by
687 block_child_signals. */
688
689static void
690restore_child_signals_mask (sigset_t *prev_mask)
691{
692 sigprocmask (SIG_SETMASK, prev_mask, NULL);
693}
2455069d
UW
694
695/* Mask of signals to pass directly to the inferior. */
696static sigset_t pass_mask;
697
698/* Update signals to pass to the inferior. */
699static void
94bedb42
TT
700linux_nat_pass_signals (struct target_ops *self,
701 int numsigs, unsigned char *pass_signals)
2455069d
UW
702{
703 int signo;
704
705 sigemptyset (&pass_mask);
706
707 for (signo = 1; signo < NSIG; signo++)
708 {
2ea28649 709 int target_signo = gdb_signal_from_host (signo);
2455069d
UW
710 if (target_signo < numsigs && pass_signals[target_signo])
711 sigaddset (&pass_mask, signo);
712 }
713}
714
d6b0e80f
AC
715\f
716
717/* Prototypes for local functions. */
718static int stop_wait_callback (struct lwp_info *lp, void *data);
28439f5e 719static int linux_thread_alive (ptid_t ptid);
8dd27370 720static char *linux_child_pid_to_exec_file (struct target_ops *self, int pid);
20ba1ce6 721static int resume_stopped_resumed_lwps (struct lwp_info *lp, void *data);
710151dd 722
d6b0e80f 723\f
d6b0e80f 724
7b50312a
PA
725/* Destroy and free LP. */
726
727static void
728lwp_free (struct lwp_info *lp)
729{
730 xfree (lp->arch_private);
731 xfree (lp);
732}
733
d90e17a7
PA
734/* Remove all LWPs belong to PID from the lwp list. */
735
736static void
737purge_lwp_list (int pid)
738{
739 struct lwp_info *lp, *lpprev, *lpnext;
740
741 lpprev = NULL;
742
743 for (lp = lwp_list; lp; lp = lpnext)
744 {
745 lpnext = lp->next;
746
747 if (ptid_get_pid (lp->ptid) == pid)
748 {
749 if (lp == lwp_list)
750 lwp_list = lp->next;
751 else
752 lpprev->next = lp->next;
753
7b50312a 754 lwp_free (lp);
d90e17a7
PA
755 }
756 else
757 lpprev = lp;
758 }
759}
760
26cb8b7c
PA
761/* Add the LWP specified by PTID to the list. PTID is the first LWP
762 in the process. Return a pointer to the structure describing the
763 new LWP.
764
765 This differs from add_lwp in that we don't let the arch specific
766 bits know about this new thread. Current clients of this callback
767 take the opportunity to install watchpoints in the new thread, and
768 we shouldn't do that for the first thread. If we're spawning a
769 child ("run"), the thread executes the shell wrapper first, and we
770 shouldn't touch it until it execs the program we want to debug.
771 For "attach", it'd be okay to call the callback, but it's not
772 necessary, because watchpoints can't yet have been inserted into
773 the inferior. */
d6b0e80f
AC
774
775static struct lwp_info *
26cb8b7c 776add_initial_lwp (ptid_t ptid)
d6b0e80f
AC
777{
778 struct lwp_info *lp;
779
dfd4cc63 780 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f
AC
781
782 lp = (struct lwp_info *) xmalloc (sizeof (struct lwp_info));
783
784 memset (lp, 0, sizeof (struct lwp_info));
785
25289eb2 786 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
787 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
788
789 lp->ptid = ptid;
dc146f7c 790 lp->core = -1;
d6b0e80f
AC
791
792 lp->next = lwp_list;
793 lwp_list = lp;
d6b0e80f 794
26cb8b7c
PA
795 return lp;
796}
797
798/* Add the LWP specified by PID to the list. Return a pointer to the
799 structure describing the new LWP. The LWP should already be
800 stopped. */
801
802static struct lwp_info *
803add_lwp (ptid_t ptid)
804{
805 struct lwp_info *lp;
806
807 lp = add_initial_lwp (ptid);
808
6e012a6c
PA
809 /* Let the arch specific bits know about this new thread. Current
810 clients of this callback take the opportunity to install
26cb8b7c
PA
811 watchpoints in the new thread. We don't do this for the first
812 thread though. See add_initial_lwp. */
813 if (linux_nat_new_thread != NULL)
7b50312a 814 linux_nat_new_thread (lp);
9f0bdab8 815
d6b0e80f
AC
816 return lp;
817}
818
819/* Remove the LWP specified by PID from the list. */
820
821static void
822delete_lwp (ptid_t ptid)
823{
824 struct lwp_info *lp, *lpprev;
825
826 lpprev = NULL;
827
828 for (lp = lwp_list; lp; lpprev = lp, lp = lp->next)
829 if (ptid_equal (lp->ptid, ptid))
830 break;
831
832 if (!lp)
833 return;
834
d6b0e80f
AC
835 if (lpprev)
836 lpprev->next = lp->next;
837 else
838 lwp_list = lp->next;
839
7b50312a 840 lwp_free (lp);
d6b0e80f
AC
841}
842
843/* Return a pointer to the structure describing the LWP corresponding
844 to PID. If no corresponding LWP could be found, return NULL. */
845
846static struct lwp_info *
847find_lwp_pid (ptid_t ptid)
848{
849 struct lwp_info *lp;
850 int lwp;
851
dfd4cc63
LM
852 if (ptid_lwp_p (ptid))
853 lwp = ptid_get_lwp (ptid);
d6b0e80f 854 else
dfd4cc63 855 lwp = ptid_get_pid (ptid);
d6b0e80f
AC
856
857 for (lp = lwp_list; lp; lp = lp->next)
dfd4cc63 858 if (lwp == ptid_get_lwp (lp->ptid))
d6b0e80f
AC
859 return lp;
860
861 return NULL;
862}
863
6d4ee8c6 864/* See nat/linux-nat.h. */
d6b0e80f
AC
865
866struct lwp_info *
d90e17a7 867iterate_over_lwps (ptid_t filter,
6d4ee8c6 868 iterate_over_lwps_ftype callback,
d90e17a7 869 void *data)
d6b0e80f
AC
870{
871 struct lwp_info *lp, *lpnext;
872
873 for (lp = lwp_list; lp; lp = lpnext)
874 {
875 lpnext = lp->next;
d90e17a7
PA
876
877 if (ptid_match (lp->ptid, filter))
878 {
6d4ee8c6 879 if ((*callback) (lp, data) != 0)
d90e17a7
PA
880 return lp;
881 }
d6b0e80f
AC
882 }
883
884 return NULL;
885}
886
2277426b
PA
887/* Update our internal state when changing from one checkpoint to
888 another indicated by NEW_PTID. We can only switch single-threaded
889 applications, so we only create one new LWP, and the previous list
890 is discarded. */
f973ed9c
DJ
891
892void
893linux_nat_switch_fork (ptid_t new_ptid)
894{
895 struct lwp_info *lp;
896
dfd4cc63 897 purge_lwp_list (ptid_get_pid (inferior_ptid));
2277426b 898
f973ed9c
DJ
899 lp = add_lwp (new_ptid);
900 lp->stopped = 1;
e26af52f 901
2277426b
PA
902 /* This changes the thread's ptid while preserving the gdb thread
903 num. Also changes the inferior pid, while preserving the
904 inferior num. */
905 thread_change_ptid (inferior_ptid, new_ptid);
906
907 /* We've just told GDB core that the thread changed target id, but,
908 in fact, it really is a different thread, with different register
909 contents. */
910 registers_changed ();
e26af52f
DJ
911}
912
e26af52f
DJ
913/* Handle the exit of a single thread LP. */
914
915static void
916exit_lwp (struct lwp_info *lp)
917{
e09875d4 918 struct thread_info *th = find_thread_ptid (lp->ptid);
063bfe2e
VP
919
920 if (th)
e26af52f 921 {
17faa917
DJ
922 if (print_thread_events)
923 printf_unfiltered (_("[%s exited]\n"), target_pid_to_str (lp->ptid));
924
4f8d22e3 925 delete_thread (lp->ptid);
e26af52f
DJ
926 }
927
928 delete_lwp (lp->ptid);
929}
930
a0ef4274
DJ
931/* Wait for the LWP specified by LP, which we have just attached to.
932 Returns a wait status for that LWP, to cache. */
933
934static int
935linux_nat_post_attach_wait (ptid_t ptid, int first, int *cloned,
936 int *signalled)
937{
dfd4cc63 938 pid_t new_pid, pid = ptid_get_lwp (ptid);
a0ef4274
DJ
939 int status;
940
644cebc9 941 if (linux_proc_pid_is_stopped (pid))
a0ef4274
DJ
942 {
943 if (debug_linux_nat)
944 fprintf_unfiltered (gdb_stdlog,
945 "LNPAW: Attaching to a stopped process\n");
946
947 /* The process is definitely stopped. It is in a job control
948 stop, unless the kernel predates the TASK_STOPPED /
949 TASK_TRACED distinction, in which case it might be in a
950 ptrace stop. Make sure it is in a ptrace stop; from there we
951 can kill it, signal it, et cetera.
952
953 First make sure there is a pending SIGSTOP. Since we are
954 already attached, the process can not transition from stopped
955 to running without a PTRACE_CONT; so we know this signal will
956 go into the queue. The SIGSTOP generated by PTRACE_ATTACH is
957 probably already in the queue (unless this kernel is old
958 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
959 is not an RT signal, it can only be queued once. */
960 kill_lwp (pid, SIGSTOP);
961
962 /* Finally, resume the stopped process. This will deliver the SIGSTOP
963 (or a higher priority signal, just like normal PTRACE_ATTACH). */
964 ptrace (PTRACE_CONT, pid, 0, 0);
965 }
966
967 /* Make sure the initial process is stopped. The user-level threads
968 layer might want to poke around in the inferior, and that won't
969 work if things haven't stabilized yet. */
970 new_pid = my_waitpid (pid, &status, 0);
971 if (new_pid == -1 && errno == ECHILD)
972 {
973 if (first)
974 warning (_("%s is a cloned process"), target_pid_to_str (ptid));
975
976 /* Try again with __WCLONE to check cloned processes. */
977 new_pid = my_waitpid (pid, &status, __WCLONE);
978 *cloned = 1;
979 }
980
dacc9cb2
PP
981 gdb_assert (pid == new_pid);
982
983 if (!WIFSTOPPED (status))
984 {
985 /* The pid we tried to attach has apparently just exited. */
986 if (debug_linux_nat)
987 fprintf_unfiltered (gdb_stdlog, "LNPAW: Failed to stop %d: %s",
988 pid, status_to_str (status));
989 return status;
990 }
a0ef4274
DJ
991
992 if (WSTOPSIG (status) != SIGSTOP)
993 {
994 *signalled = 1;
995 if (debug_linux_nat)
996 fprintf_unfiltered (gdb_stdlog,
997 "LNPAW: Received %s after attaching\n",
998 status_to_str (status));
999 }
1000
1001 return status;
1002}
1003
84636d28
PA
1004/* Attach to the LWP specified by PID. Return 0 if successful, -1 if
1005 the new LWP could not be attached, or 1 if we're already auto
1006 attached to this thread, but haven't processed the
1007 PTRACE_EVENT_CLONE event of its parent thread, so we just ignore
1008 its existance, without considering it an error. */
d6b0e80f 1009
9ee57c33 1010int
93815fbf 1011lin_lwp_attach_lwp (ptid_t ptid)
d6b0e80f 1012{
9ee57c33 1013 struct lwp_info *lp;
84636d28 1014 int lwpid;
d6b0e80f 1015
dfd4cc63 1016 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 1017
9ee57c33 1018 lp = find_lwp_pid (ptid);
dfd4cc63 1019 lwpid = ptid_get_lwp (ptid);
d6b0e80f 1020
3b27ef47 1021 /* We assume that we're already attached to any LWP that is already
d6b0e80f
AC
1022 in our list of LWPs. If we're not seeing exit events from threads
1023 and we've had PID wraparound since we last tried to stop all threads,
1024 this assumption might be wrong; fortunately, this is very unlikely
1025 to happen. */
3b27ef47 1026 if (lp == NULL)
d6b0e80f 1027 {
a0ef4274 1028 int status, cloned = 0, signalled = 0;
d6b0e80f 1029
84636d28 1030 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
9ee57c33 1031 {
96d7229d 1032 if (linux_supports_tracefork ())
84636d28
PA
1033 {
1034 /* If we haven't stopped all threads when we get here,
1035 we may have seen a thread listed in thread_db's list,
1036 but not processed the PTRACE_EVENT_CLONE yet. If
1037 that's the case, ignore this new thread, and let
1038 normal event handling discover it later. */
1039 if (in_pid_list_p (stopped_pids, lwpid))
1040 {
1041 /* We've already seen this thread stop, but we
1042 haven't seen the PTRACE_EVENT_CLONE extended
1043 event yet. */
3b27ef47
PA
1044 if (debug_linux_nat)
1045 fprintf_unfiltered (gdb_stdlog,
1046 "LLAL: attach failed, but already seen "
1047 "this thread %s stop\n",
1048 target_pid_to_str (ptid));
1049 return 1;
84636d28
PA
1050 }
1051 else
1052 {
1053 int new_pid;
1054 int status;
1055
3b27ef47
PA
1056 if (debug_linux_nat)
1057 fprintf_unfiltered (gdb_stdlog,
1058 "LLAL: attach failed, and haven't seen "
1059 "this thread %s stop yet\n",
1060 target_pid_to_str (ptid));
1061
1062 /* We may or may not be attached to the LWP already.
1063 Try waitpid on it. If that errors, we're not
1064 attached to the LWP yet. Otherwise, we're
1065 already attached. */
a33e3959 1066 gdb_assert (lwpid > 0);
84636d28
PA
1067 new_pid = my_waitpid (lwpid, &status, WNOHANG);
1068 if (new_pid == -1 && errno == ECHILD)
1069 new_pid = my_waitpid (lwpid, &status, __WCLONE | WNOHANG);
1070 if (new_pid != -1)
1071 {
3b27ef47
PA
1072 if (new_pid == 0)
1073 {
1074 /* The child hasn't stopped for its initial
1075 SIGSTOP stop yet. */
1076 if (debug_linux_nat)
1077 fprintf_unfiltered (gdb_stdlog,
1078 "LLAL: child hasn't "
1079 "stopped yet\n");
1080 }
1081 else if (WIFSTOPPED (status))
1082 {
1083 if (debug_linux_nat)
1084 fprintf_unfiltered (gdb_stdlog,
1085 "LLAL: adding to stopped_pids\n");
1086 add_to_pid_list (&stopped_pids, lwpid, status);
1087 }
84636d28
PA
1088 return 1;
1089 }
1090 }
1091 }
1092
9ee57c33
DJ
1093 /* If we fail to attach to the thread, issue a warning,
1094 but continue. One way this can happen is if thread
e9efe249 1095 creation is interrupted; as of Linux kernel 2.6.19, a
9ee57c33
DJ
1096 bug may place threads in the thread list and then fail
1097 to create them. */
1098 warning (_("Can't attach %s: %s"), target_pid_to_str (ptid),
1099 safe_strerror (errno));
1100 return -1;
1101 }
1102
d6b0e80f
AC
1103 if (debug_linux_nat)
1104 fprintf_unfiltered (gdb_stdlog,
1105 "LLAL: PTRACE_ATTACH %s, 0, 0 (OK)\n",
1106 target_pid_to_str (ptid));
1107
a0ef4274 1108 status = linux_nat_post_attach_wait (ptid, 0, &cloned, &signalled);
dacc9cb2 1109 if (!WIFSTOPPED (status))
12696c10 1110 return 1;
dacc9cb2 1111
a0ef4274
DJ
1112 lp = add_lwp (ptid);
1113 lp->stopped = 1;
3b27ef47 1114 lp->last_resume_kind = resume_stop;
a0ef4274
DJ
1115 lp->cloned = cloned;
1116 lp->signalled = signalled;
1117 if (WSTOPSIG (status) != SIGSTOP)
d6b0e80f 1118 {
a0ef4274
DJ
1119 lp->resumed = 1;
1120 lp->status = status;
d6b0e80f
AC
1121 }
1122
dfd4cc63 1123 target_post_attach (ptid_get_lwp (lp->ptid));
d6b0e80f
AC
1124
1125 if (debug_linux_nat)
1126 {
1127 fprintf_unfiltered (gdb_stdlog,
1128 "LLAL: waitpid %s received %s\n",
1129 target_pid_to_str (ptid),
1130 status_to_str (status));
1131 }
1132 }
9ee57c33 1133
9ee57c33 1134 return 0;
d6b0e80f
AC
1135}
1136
b84876c2 1137static void
136d6dae
VP
1138linux_nat_create_inferior (struct target_ops *ops,
1139 char *exec_file, char *allargs, char **env,
b84876c2
PA
1140 int from_tty)
1141{
8cc73a39
SDJ
1142 struct cleanup *restore_personality
1143 = maybe_disable_address_space_randomization (disable_randomization);
b84876c2
PA
1144
1145 /* The fork_child mechanism is synchronous and calls target_wait, so
1146 we have to mask the async mode. */
1147
2455069d 1148 /* Make sure we report all signals during startup. */
94bedb42 1149 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1150
136d6dae 1151 linux_ops->to_create_inferior (ops, exec_file, allargs, env, from_tty);
b84876c2 1152
8cc73a39 1153 do_cleanups (restore_personality);
b84876c2
PA
1154}
1155
8784d563
PA
1156/* Callback for linux_proc_attach_tgid_threads. Attach to PTID if not
1157 already attached. Returns true if a new LWP is found, false
1158 otherwise. */
1159
1160static int
1161attach_proc_task_lwp_callback (ptid_t ptid)
1162{
1163 struct lwp_info *lp;
1164
1165 /* Ignore LWPs we're already attached to. */
1166 lp = find_lwp_pid (ptid);
1167 if (lp == NULL)
1168 {
1169 int lwpid = ptid_get_lwp (ptid);
1170
1171 if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
1172 {
1173 int err = errno;
1174
1175 /* Be quiet if we simply raced with the thread exiting.
1176 EPERM is returned if the thread's task still exists, and
1177 is marked as exited or zombie, as well as other
1178 conditions, so in that case, confirm the status in
1179 /proc/PID/status. */
1180 if (err == ESRCH
1181 || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
1182 {
1183 if (debug_linux_nat)
1184 {
1185 fprintf_unfiltered (gdb_stdlog,
1186 "Cannot attach to lwp %d: "
1187 "thread is gone (%d: %s)\n",
1188 lwpid, err, safe_strerror (err));
1189 }
1190 }
1191 else
1192 {
f71f0b0d 1193 warning (_("Cannot attach to lwp %d: %s"),
8784d563
PA
1194 lwpid,
1195 linux_ptrace_attach_fail_reason_string (ptid,
1196 err));
1197 }
1198 }
1199 else
1200 {
1201 if (debug_linux_nat)
1202 fprintf_unfiltered (gdb_stdlog,
1203 "PTRACE_ATTACH %s, 0, 0 (OK)\n",
1204 target_pid_to_str (ptid));
1205
1206 lp = add_lwp (ptid);
1207 lp->cloned = 1;
1208
1209 /* The next time we wait for this LWP we'll see a SIGSTOP as
1210 PTRACE_ATTACH brings it to a halt. */
1211 lp->signalled = 1;
1212
1213 /* We need to wait for a stop before being able to make the
1214 next ptrace call on this LWP. */
1215 lp->must_set_ptrace_flags = 1;
1216 }
1217
1218 return 1;
1219 }
1220 return 0;
1221}
1222
d6b0e80f 1223static void
c0939df1 1224linux_nat_attach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f
AC
1225{
1226 struct lwp_info *lp;
d6b0e80f 1227 int status;
af990527 1228 ptid_t ptid;
d6b0e80f 1229
2455069d 1230 /* Make sure we report all signals during attach. */
94bedb42 1231 linux_nat_pass_signals (ops, 0, NULL);
2455069d 1232
492d29ea 1233 TRY
87b0bb13
JK
1234 {
1235 linux_ops->to_attach (ops, args, from_tty);
1236 }
492d29ea 1237 CATCH (ex, RETURN_MASK_ERROR)
87b0bb13
JK
1238 {
1239 pid_t pid = parse_pid_to_attach (args);
1240 struct buffer buffer;
1241 char *message, *buffer_s;
1242
1243 message = xstrdup (ex.message);
1244 make_cleanup (xfree, message);
1245
1246 buffer_init (&buffer);
7ae1a6a6 1247 linux_ptrace_attach_fail_reason (pid, &buffer);
87b0bb13
JK
1248
1249 buffer_grow_str0 (&buffer, "");
1250 buffer_s = buffer_finish (&buffer);
1251 make_cleanup (xfree, buffer_s);
1252
7ae1a6a6
PA
1253 if (*buffer_s != '\0')
1254 throw_error (ex.error, "warning: %s\n%s", buffer_s, message);
1255 else
1256 throw_error (ex.error, "%s", message);
87b0bb13 1257 }
492d29ea 1258 END_CATCH
d6b0e80f 1259
af990527
PA
1260 /* The ptrace base target adds the main thread with (pid,0,0)
1261 format. Decorate it with lwp info. */
dfd4cc63
LM
1262 ptid = ptid_build (ptid_get_pid (inferior_ptid),
1263 ptid_get_pid (inferior_ptid),
1264 0);
af990527
PA
1265 thread_change_ptid (inferior_ptid, ptid);
1266
9f0bdab8 1267 /* Add the initial process as the first LWP to the list. */
26cb8b7c 1268 lp = add_initial_lwp (ptid);
a0ef4274
DJ
1269
1270 status = linux_nat_post_attach_wait (lp->ptid, 1, &lp->cloned,
1271 &lp->signalled);
dacc9cb2
PP
1272 if (!WIFSTOPPED (status))
1273 {
1274 if (WIFEXITED (status))
1275 {
1276 int exit_code = WEXITSTATUS (status);
1277
1278 target_terminal_ours ();
1279 target_mourn_inferior ();
1280 if (exit_code == 0)
1281 error (_("Unable to attach: program exited normally."));
1282 else
1283 error (_("Unable to attach: program exited with code %d."),
1284 exit_code);
1285 }
1286 else if (WIFSIGNALED (status))
1287 {
2ea28649 1288 enum gdb_signal signo;
dacc9cb2
PP
1289
1290 target_terminal_ours ();
1291 target_mourn_inferior ();
1292
2ea28649 1293 signo = gdb_signal_from_host (WTERMSIG (status));
dacc9cb2
PP
1294 error (_("Unable to attach: program terminated with signal "
1295 "%s, %s."),
2ea28649
PA
1296 gdb_signal_to_name (signo),
1297 gdb_signal_to_string (signo));
dacc9cb2
PP
1298 }
1299
1300 internal_error (__FILE__, __LINE__,
1301 _("unexpected status %d for PID %ld"),
dfd4cc63 1302 status, (long) ptid_get_lwp (ptid));
dacc9cb2
PP
1303 }
1304
a0ef4274 1305 lp->stopped = 1;
9f0bdab8 1306
a0ef4274 1307 /* Save the wait status to report later. */
d6b0e80f 1308 lp->resumed = 1;
a0ef4274
DJ
1309 if (debug_linux_nat)
1310 fprintf_unfiltered (gdb_stdlog,
1311 "LNA: waitpid %ld, saving status %s\n",
dfd4cc63 1312 (long) ptid_get_pid (lp->ptid), status_to_str (status));
710151dd 1313
7feb7d06
PA
1314 lp->status = status;
1315
8784d563
PA
1316 /* We must attach to every LWP. If /proc is mounted, use that to
1317 find them now. The inferior may be using raw clone instead of
1318 using pthreads. But even if it is using pthreads, thread_db
1319 walks structures in the inferior's address space to find the list
1320 of threads/LWPs, and those structures may well be corrupted.
1321 Note that once thread_db is loaded, we'll still use it to list
1322 threads and associate pthread info with each LWP. */
1323 linux_proc_attach_tgid_threads (ptid_get_pid (lp->ptid),
1324 attach_proc_task_lwp_callback);
1325
7feb7d06
PA
1326 if (target_can_async_p ())
1327 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1328}
1329
a0ef4274
DJ
1330/* Get pending status of LP. */
1331static int
1332get_pending_status (struct lwp_info *lp, int *status)
1333{
a493e3e2 1334 enum gdb_signal signo = GDB_SIGNAL_0;
ca2163eb
PA
1335
1336 /* If we paused threads momentarily, we may have stored pending
1337 events in lp->status or lp->waitstatus (see stop_wait_callback),
1338 and GDB core hasn't seen any signal for those threads.
1339 Otherwise, the last signal reported to the core is found in the
1340 thread object's stop_signal.
1341
1342 There's a corner case that isn't handled here at present. Only
1343 if the thread stopped with a TARGET_WAITKIND_STOPPED does
1344 stop_signal make sense as a real signal to pass to the inferior.
1345 Some catchpoint related events, like
1346 TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
a493e3e2 1347 to GDB_SIGNAL_SIGTRAP when the catchpoint triggers. But,
ca2163eb
PA
1348 those traps are debug API (ptrace in our case) related and
1349 induced; the inferior wouldn't see them if it wasn't being
1350 traced. Hence, we should never pass them to the inferior, even
1351 when set to pass state. Since this corner case isn't handled by
1352 infrun.c when proceeding with a signal, for consistency, neither
1353 do we handle it here (or elsewhere in the file we check for
1354 signal pass state). Normally SIGTRAP isn't set to pass state, so
1355 this is really a corner case. */
1356
1357 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
a493e3e2 1358 signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal. */
ca2163eb 1359 else if (lp->status)
2ea28649 1360 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
ca2163eb
PA
1361 else if (non_stop && !is_executing (lp->ptid))
1362 {
1363 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1364
16c381f0 1365 signo = tp->suspend.stop_signal;
ca2163eb
PA
1366 }
1367 else if (!non_stop)
a0ef4274 1368 {
ca2163eb
PA
1369 struct target_waitstatus last;
1370 ptid_t last_ptid;
4c28f408 1371
ca2163eb 1372 get_last_target_status (&last_ptid, &last);
4c28f408 1373
dfd4cc63 1374 if (ptid_get_lwp (lp->ptid) == ptid_get_lwp (last_ptid))
ca2163eb 1375 {
e09875d4 1376 struct thread_info *tp = find_thread_ptid (lp->ptid);
e0881a8e 1377
16c381f0 1378 signo = tp->suspend.stop_signal;
4c28f408 1379 }
ca2163eb 1380 }
4c28f408 1381
ca2163eb 1382 *status = 0;
4c28f408 1383
a493e3e2 1384 if (signo == GDB_SIGNAL_0)
ca2163eb
PA
1385 {
1386 if (debug_linux_nat)
1387 fprintf_unfiltered (gdb_stdlog,
1388 "GPT: lwp %s has no pending signal\n",
1389 target_pid_to_str (lp->ptid));
1390 }
1391 else if (!signal_pass_state (signo))
1392 {
1393 if (debug_linux_nat)
3e43a32a
MS
1394 fprintf_unfiltered (gdb_stdlog,
1395 "GPT: lwp %s had signal %s, "
1396 "but it is in no pass state\n",
ca2163eb 1397 target_pid_to_str (lp->ptid),
2ea28649 1398 gdb_signal_to_string (signo));
a0ef4274 1399 }
a0ef4274 1400 else
4c28f408 1401 {
2ea28649 1402 *status = W_STOPCODE (gdb_signal_to_host (signo));
ca2163eb
PA
1403
1404 if (debug_linux_nat)
1405 fprintf_unfiltered (gdb_stdlog,
1406 "GPT: lwp %s has pending signal %s\n",
1407 target_pid_to_str (lp->ptid),
2ea28649 1408 gdb_signal_to_string (signo));
4c28f408 1409 }
a0ef4274
DJ
1410
1411 return 0;
1412}
1413
d6b0e80f
AC
1414static int
1415detach_callback (struct lwp_info *lp, void *data)
1416{
1417 gdb_assert (lp->status == 0 || WIFSTOPPED (lp->status));
1418
1419 if (debug_linux_nat && lp->status)
1420 fprintf_unfiltered (gdb_stdlog, "DC: Pending %s for %s on detach.\n",
1421 strsignal (WSTOPSIG (lp->status)),
1422 target_pid_to_str (lp->ptid));
1423
a0ef4274
DJ
1424 /* If there is a pending SIGSTOP, get rid of it. */
1425 if (lp->signalled)
d6b0e80f 1426 {
d6b0e80f
AC
1427 if (debug_linux_nat)
1428 fprintf_unfiltered (gdb_stdlog,
a0ef4274
DJ
1429 "DC: Sending SIGCONT to %s\n",
1430 target_pid_to_str (lp->ptid));
d6b0e80f 1431
dfd4cc63 1432 kill_lwp (ptid_get_lwp (lp->ptid), SIGCONT);
d6b0e80f 1433 lp->signalled = 0;
d6b0e80f
AC
1434 }
1435
1436 /* We don't actually detach from the LWP that has an id equal to the
1437 overall process id just yet. */
dfd4cc63 1438 if (ptid_get_lwp (lp->ptid) != ptid_get_pid (lp->ptid))
d6b0e80f 1439 {
a0ef4274
DJ
1440 int status = 0;
1441
1442 /* Pass on any pending signal for this LWP. */
1443 get_pending_status (lp, &status);
1444
7b50312a
PA
1445 if (linux_nat_prepare_to_resume != NULL)
1446 linux_nat_prepare_to_resume (lp);
d6b0e80f 1447 errno = 0;
dfd4cc63 1448 if (ptrace (PTRACE_DETACH, ptid_get_lwp (lp->ptid), 0,
a0ef4274 1449 WSTOPSIG (status)) < 0)
8a3fe4f8 1450 error (_("Can't detach %s: %s"), target_pid_to_str (lp->ptid),
d6b0e80f
AC
1451 safe_strerror (errno));
1452
1453 if (debug_linux_nat)
1454 fprintf_unfiltered (gdb_stdlog,
1455 "PTRACE_DETACH (%s, %s, 0) (OK)\n",
1456 target_pid_to_str (lp->ptid),
7feb7d06 1457 strsignal (WSTOPSIG (status)));
d6b0e80f
AC
1458
1459 delete_lwp (lp->ptid);
1460 }
1461
1462 return 0;
1463}
1464
1465static void
52554a0e 1466linux_nat_detach (struct target_ops *ops, const char *args, int from_tty)
d6b0e80f 1467{
b84876c2 1468 int pid;
a0ef4274 1469 int status;
d90e17a7
PA
1470 struct lwp_info *main_lwp;
1471
dfd4cc63 1472 pid = ptid_get_pid (inferior_ptid);
a0ef4274 1473
ae5e0686
MK
1474 /* Don't unregister from the event loop, as there may be other
1475 inferiors running. */
b84876c2 1476
4c28f408
PA
1477 /* Stop all threads before detaching. ptrace requires that the
1478 thread is stopped to sucessfully detach. */
d90e17a7 1479 iterate_over_lwps (pid_to_ptid (pid), stop_callback, NULL);
4c28f408
PA
1480 /* ... and wait until all of them have reported back that
1481 they're no longer running. */
d90e17a7 1482 iterate_over_lwps (pid_to_ptid (pid), stop_wait_callback, NULL);
4c28f408 1483
d90e17a7 1484 iterate_over_lwps (pid_to_ptid (pid), detach_callback, NULL);
d6b0e80f
AC
1485
1486 /* Only the initial process should be left right now. */
dfd4cc63 1487 gdb_assert (num_lwps (ptid_get_pid (inferior_ptid)) == 1);
d90e17a7
PA
1488
1489 main_lwp = find_lwp_pid (pid_to_ptid (pid));
d6b0e80f 1490
a0ef4274
DJ
1491 /* Pass on any pending signal for the last LWP. */
1492 if ((args == NULL || *args == '\0')
d90e17a7 1493 && get_pending_status (main_lwp, &status) != -1
a0ef4274
DJ
1494 && WIFSTOPPED (status))
1495 {
52554a0e
TT
1496 char *tem;
1497
a0ef4274
DJ
1498 /* Put the signal number in ARGS so that inf_ptrace_detach will
1499 pass it along with PTRACE_DETACH. */
52554a0e 1500 tem = alloca (8);
cde33bf1 1501 xsnprintf (tem, 8, "%d", (int) WSTOPSIG (status));
52554a0e 1502 args = tem;
ddabfc73
TT
1503 if (debug_linux_nat)
1504 fprintf_unfiltered (gdb_stdlog,
1505 "LND: Sending signal %s to %s\n",
1506 args,
1507 target_pid_to_str (main_lwp->ptid));
a0ef4274
DJ
1508 }
1509
7b50312a
PA
1510 if (linux_nat_prepare_to_resume != NULL)
1511 linux_nat_prepare_to_resume (main_lwp);
d90e17a7 1512 delete_lwp (main_lwp->ptid);
b84876c2 1513
7a7d3353
PA
1514 if (forks_exist_p ())
1515 {
1516 /* Multi-fork case. The current inferior_ptid is being detached
1517 from, but there are other viable forks to debug. Detach from
1518 the current fork, and context-switch to the first
1519 available. */
1520 linux_fork_detach (args, from_tty);
7a7d3353
PA
1521 }
1522 else
1523 linux_ops->to_detach (ops, args, from_tty);
d6b0e80f
AC
1524}
1525
8a99810d
PA
1526/* Resume execution of the inferior process. If STEP is nonzero,
1527 single-step it. If SIGNAL is nonzero, give it that signal. */
1528
1529static void
23f238d3
PA
1530linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
1531 enum gdb_signal signo)
8a99810d 1532{
8a99810d 1533 lp->step = step;
9c02b525
PA
1534
1535 /* stop_pc doubles as the PC the LWP had when it was last resumed.
1536 We only presently need that if the LWP is stepped though (to
1537 handle the case of stepping a breakpoint instruction). */
1538 if (step)
1539 {
1540 struct regcache *regcache = get_thread_regcache (lp->ptid);
1541
1542 lp->stop_pc = regcache_read_pc (regcache);
1543 }
1544 else
1545 lp->stop_pc = 0;
1546
8a99810d
PA
1547 if (linux_nat_prepare_to_resume != NULL)
1548 linux_nat_prepare_to_resume (lp);
90ad5e1d 1549 linux_ops->to_resume (linux_ops, lp->ptid, step, signo);
23f238d3
PA
1550
1551 /* Successfully resumed. Clear state that no longer makes sense,
1552 and mark the LWP as running. Must not do this before resuming
1553 otherwise if that fails other code will be confused. E.g., we'd
1554 later try to stop the LWP and hang forever waiting for a stop
1555 status. Note that we must not throw after this is cleared,
1556 otherwise handle_zombie_lwp_error would get confused. */
8a99810d 1557 lp->stopped = 0;
23f238d3 1558 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
8a99810d
PA
1559 registers_changed_ptid (lp->ptid);
1560}
1561
23f238d3
PA
1562/* Called when we try to resume a stopped LWP and that errors out. If
1563 the LWP is no longer in ptrace-stopped state (meaning it's zombie,
1564 or about to become), discard the error, clear any pending status
1565 the LWP may have, and return true (we'll collect the exit status
1566 soon enough). Otherwise, return false. */
1567
1568static int
1569check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
1570{
1571 /* If we get an error after resuming the LWP successfully, we'd
1572 confuse !T state for the LWP being gone. */
1573 gdb_assert (lp->stopped);
1574
1575 /* We can't just check whether the LWP is in 'Z (Zombie)' state,
1576 because even if ptrace failed with ESRCH, the tracee may be "not
1577 yet fully dead", but already refusing ptrace requests. In that
1578 case the tracee has 'R (Running)' state for a little bit
1579 (observed in Linux 3.18). See also the note on ESRCH in the
1580 ptrace(2) man page. Instead, check whether the LWP has any state
1581 other than ptrace-stopped. */
1582
1583 /* Don't assume anything if /proc/PID/status can't be read. */
1584 if (linux_proc_pid_is_trace_stopped_nowarn (ptid_get_lwp (lp->ptid)) == 0)
1585 {
1586 lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
1587 lp->status = 0;
1588 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
1589 return 1;
1590 }
1591 return 0;
1592}
1593
1594/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
1595 disappears while we try to resume it. */
1596
1597static void
1598linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
1599{
1600 TRY
1601 {
1602 linux_resume_one_lwp_throw (lp, step, signo);
1603 }
1604 CATCH (ex, RETURN_MASK_ERROR)
1605 {
1606 if (!check_ptrace_stopped_lwp_gone (lp))
1607 throw_exception (ex);
1608 }
1609 END_CATCH
1610}
1611
d6b0e80f
AC
1612/* Resume LP. */
1613
25289eb2 1614static void
e5ef252a 1615resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
d6b0e80f 1616{
25289eb2 1617 if (lp->stopped)
6c95b8df 1618 {
c9657e70 1619 struct inferior *inf = find_inferior_ptid (lp->ptid);
25289eb2
PA
1620
1621 if (inf->vfork_child != NULL)
1622 {
1623 if (debug_linux_nat)
1624 fprintf_unfiltered (gdb_stdlog,
1625 "RC: Not resuming %s (vfork parent)\n",
1626 target_pid_to_str (lp->ptid));
1627 }
8a99810d 1628 else if (!lwp_status_pending_p (lp))
25289eb2
PA
1629 {
1630 if (debug_linux_nat)
1631 fprintf_unfiltered (gdb_stdlog,
e5ef252a
PA
1632 "RC: Resuming sibling %s, %s, %s\n",
1633 target_pid_to_str (lp->ptid),
1634 (signo != GDB_SIGNAL_0
1635 ? strsignal (gdb_signal_to_host (signo))
1636 : "0"),
1637 step ? "step" : "resume");
25289eb2 1638
8a99810d 1639 linux_resume_one_lwp (lp, step, signo);
25289eb2
PA
1640 }
1641 else
1642 {
1643 if (debug_linux_nat)
1644 fprintf_unfiltered (gdb_stdlog,
1645 "RC: Not resuming sibling %s (has pending)\n",
1646 target_pid_to_str (lp->ptid));
1647 }
6c95b8df 1648 }
25289eb2 1649 else
d6b0e80f 1650 {
d90e17a7
PA
1651 if (debug_linux_nat)
1652 fprintf_unfiltered (gdb_stdlog,
25289eb2 1653 "RC: Not resuming sibling %s (not stopped)\n",
d6b0e80f 1654 target_pid_to_str (lp->ptid));
d6b0e80f 1655 }
25289eb2 1656}
d6b0e80f 1657
8817a6f2
PA
1658/* Callback for iterate_over_lwps. If LWP is EXCEPT, do nothing.
1659 Resume LWP with the last stop signal, if it is in pass state. */
e5ef252a 1660
25289eb2 1661static int
8817a6f2 1662linux_nat_resume_callback (struct lwp_info *lp, void *except)
25289eb2 1663{
e5ef252a
PA
1664 enum gdb_signal signo = GDB_SIGNAL_0;
1665
8817a6f2
PA
1666 if (lp == except)
1667 return 0;
1668
e5ef252a
PA
1669 if (lp->stopped)
1670 {
1671 struct thread_info *thread;
1672
1673 thread = find_thread_ptid (lp->ptid);
1674 if (thread != NULL)
1675 {
70509625 1676 signo = thread->suspend.stop_signal;
e5ef252a
PA
1677 thread->suspend.stop_signal = GDB_SIGNAL_0;
1678 }
1679 }
1680
1681 resume_lwp (lp, 0, signo);
d6b0e80f
AC
1682 return 0;
1683}
1684
1685static int
1686resume_clear_callback (struct lwp_info *lp, void *data)
1687{
1688 lp->resumed = 0;
25289eb2 1689 lp->last_resume_kind = resume_stop;
d6b0e80f
AC
1690 return 0;
1691}
1692
1693static int
1694resume_set_callback (struct lwp_info *lp, void *data)
1695{
1696 lp->resumed = 1;
25289eb2 1697 lp->last_resume_kind = resume_continue;
d6b0e80f
AC
1698 return 0;
1699}
1700
1701static void
28439f5e 1702linux_nat_resume (struct target_ops *ops,
2ea28649 1703 ptid_t ptid, int step, enum gdb_signal signo)
d6b0e80f
AC
1704{
1705 struct lwp_info *lp;
d90e17a7 1706 int resume_many;
d6b0e80f 1707
76f50ad1
DJ
1708 if (debug_linux_nat)
1709 fprintf_unfiltered (gdb_stdlog,
1710 "LLR: Preparing to %s %s, %s, inferior_ptid %s\n",
1711 step ? "step" : "resume",
1712 target_pid_to_str (ptid),
a493e3e2 1713 (signo != GDB_SIGNAL_0
2ea28649 1714 ? strsignal (gdb_signal_to_host (signo)) : "0"),
76f50ad1
DJ
1715 target_pid_to_str (inferior_ptid));
1716
d6b0e80f 1717 /* A specific PTID means `step only this process id'. */
d90e17a7
PA
1718 resume_many = (ptid_equal (minus_one_ptid, ptid)
1719 || ptid_is_pid (ptid));
4c28f408 1720
e3e9f5a2
PA
1721 /* Mark the lwps we're resuming as resumed. */
1722 iterate_over_lwps (ptid, resume_set_callback, NULL);
d6b0e80f 1723
d90e17a7
PA
1724 /* See if it's the current inferior that should be handled
1725 specially. */
1726 if (resume_many)
1727 lp = find_lwp_pid (inferior_ptid);
1728 else
1729 lp = find_lwp_pid (ptid);
9f0bdab8 1730 gdb_assert (lp != NULL);
d6b0e80f 1731
9f0bdab8 1732 /* Remember if we're stepping. */
25289eb2 1733 lp->last_resume_kind = step ? resume_step : resume_continue;
d6b0e80f 1734
9f0bdab8
DJ
1735 /* If we have a pending wait status for this thread, there is no
1736 point in resuming the process. But first make sure that
1737 linux_nat_wait won't preemptively handle the event - we
1738 should never take this short-circuit if we are going to
1739 leave LP running, since we have skipped resuming all the
1740 other threads. This bit of code needs to be synchronized
1741 with linux_nat_wait. */
76f50ad1 1742
9f0bdab8
DJ
1743 if (lp->status && WIFSTOPPED (lp->status))
1744 {
2455069d
UW
1745 if (!lp->step
1746 && WSTOPSIG (lp->status)
1747 && sigismember (&pass_mask, WSTOPSIG (lp->status)))
d6b0e80f 1748 {
9f0bdab8
DJ
1749 if (debug_linux_nat)
1750 fprintf_unfiltered (gdb_stdlog,
1751 "LLR: Not short circuiting for ignored "
1752 "status 0x%x\n", lp->status);
1753
d6b0e80f
AC
1754 /* FIXME: What should we do if we are supposed to continue
1755 this thread with a signal? */
a493e3e2 1756 gdb_assert (signo == GDB_SIGNAL_0);
2ea28649 1757 signo = gdb_signal_from_host (WSTOPSIG (lp->status));
9f0bdab8
DJ
1758 lp->status = 0;
1759 }
1760 }
76f50ad1 1761
8a99810d 1762 if (lwp_status_pending_p (lp))
9f0bdab8
DJ
1763 {
1764 /* FIXME: What should we do if we are supposed to continue
1765 this thread with a signal? */
a493e3e2 1766 gdb_assert (signo == GDB_SIGNAL_0);
76f50ad1 1767
9f0bdab8
DJ
1768 if (debug_linux_nat)
1769 fprintf_unfiltered (gdb_stdlog,
1770 "LLR: Short circuiting for status 0x%x\n",
1771 lp->status);
d6b0e80f 1772
7feb7d06
PA
1773 if (target_can_async_p ())
1774 {
1775 target_async (inferior_event_handler, 0);
1776 /* Tell the event loop we have something to process. */
1777 async_file_mark ();
1778 }
9f0bdab8 1779 return;
d6b0e80f
AC
1780 }
1781
d90e17a7 1782 if (resume_many)
8817a6f2 1783 iterate_over_lwps (ptid, linux_nat_resume_callback, lp);
d90e17a7 1784
8a99810d 1785 linux_resume_one_lwp (lp, step, signo);
9f0bdab8 1786
d6b0e80f
AC
1787 if (debug_linux_nat)
1788 fprintf_unfiltered (gdb_stdlog,
1789 "LLR: %s %s, %s (resume event thread)\n",
1790 step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
1791 target_pid_to_str (ptid),
a493e3e2 1792 (signo != GDB_SIGNAL_0
2ea28649 1793 ? strsignal (gdb_signal_to_host (signo)) : "0"));
b84876c2
PA
1794
1795 if (target_can_async_p ())
8ea051c5 1796 target_async (inferior_event_handler, 0);
d6b0e80f
AC
1797}
1798
c5f62d5f 1799/* Send a signal to an LWP. */
d6b0e80f
AC
1800
1801static int
1802kill_lwp (int lwpid, int signo)
1803{
c5f62d5f
DE
1804 /* Use tkill, if possible, in case we are using nptl threads. If tkill
1805 fails, then we are not using nptl threads and we should be using kill. */
d6b0e80f
AC
1806
1807#ifdef HAVE_TKILL_SYSCALL
c5f62d5f
DE
1808 {
1809 static int tkill_failed;
1810
1811 if (!tkill_failed)
1812 {
1813 int ret;
1814
1815 errno = 0;
1816 ret = syscall (__NR_tkill, lwpid, signo);
1817 if (errno != ENOSYS)
1818 return ret;
1819 tkill_failed = 1;
1820 }
1821 }
d6b0e80f
AC
1822#endif
1823
1824 return kill (lwpid, signo);
1825}
1826
ca2163eb
PA
1827/* Handle a GNU/Linux syscall trap wait response. If we see a syscall
1828 event, check if the core is interested in it: if not, ignore the
1829 event, and keep waiting; otherwise, we need to toggle the LWP's
1830 syscall entry/exit status, since the ptrace event itself doesn't
1831 indicate it, and report the trap to higher layers. */
1832
1833static int
1834linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
1835{
1836 struct target_waitstatus *ourstatus = &lp->waitstatus;
1837 struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
1838 int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, lp->ptid);
1839
1840 if (stopping)
1841 {
1842 /* If we're stopping threads, there's a SIGSTOP pending, which
1843 makes it so that the LWP reports an immediate syscall return,
1844 followed by the SIGSTOP. Skip seeing that "return" using
1845 PTRACE_CONT directly, and let stop_wait_callback collect the
1846 SIGSTOP. Later when the thread is resumed, a new syscall
1847 entry event. If we didn't do this (and returned 0), we'd
1848 leave a syscall entry pending, and our caller, by using
1849 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
1850 itself. Later, when the user re-resumes this LWP, we'd see
1851 another syscall entry event and we'd mistake it for a return.
1852
1853 If stop_wait_callback didn't force the SIGSTOP out of the LWP
1854 (leaving immediately with LWP->signalled set, without issuing
1855 a PTRACE_CONT), it would still be problematic to leave this
1856 syscall enter pending, as later when the thread is resumed,
1857 it would then see the same syscall exit mentioned above,
1858 followed by the delayed SIGSTOP, while the syscall didn't
1859 actually get to execute. It seems it would be even more
1860 confusing to the user. */
1861
1862 if (debug_linux_nat)
1863 fprintf_unfiltered (gdb_stdlog,
1864 "LHST: ignoring syscall %d "
1865 "for LWP %ld (stopping threads), "
1866 "resuming with PTRACE_CONT for SIGSTOP\n",
1867 syscall_number,
dfd4cc63 1868 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1869
1870 lp->syscall_state = TARGET_WAITKIND_IGNORE;
dfd4cc63 1871 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 1872 lp->stopped = 0;
ca2163eb
PA
1873 return 1;
1874 }
1875
1876 if (catch_syscall_enabled ())
1877 {
1878 /* Always update the entry/return state, even if this particular
1879 syscall isn't interesting to the core now. In async mode,
1880 the user could install a new catchpoint for this syscall
1881 between syscall enter/return, and we'll need to know to
1882 report a syscall return if that happens. */
1883 lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1884 ? TARGET_WAITKIND_SYSCALL_RETURN
1885 : TARGET_WAITKIND_SYSCALL_ENTRY);
1886
1887 if (catching_syscall_number (syscall_number))
1888 {
1889 /* Alright, an event to report. */
1890 ourstatus->kind = lp->syscall_state;
1891 ourstatus->value.syscall_number = syscall_number;
1892
1893 if (debug_linux_nat)
1894 fprintf_unfiltered (gdb_stdlog,
1895 "LHST: stopping for %s of syscall %d"
1896 " for LWP %ld\n",
3e43a32a
MS
1897 lp->syscall_state
1898 == TARGET_WAITKIND_SYSCALL_ENTRY
ca2163eb
PA
1899 ? "entry" : "return",
1900 syscall_number,
dfd4cc63 1901 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1902 return 0;
1903 }
1904
1905 if (debug_linux_nat)
1906 fprintf_unfiltered (gdb_stdlog,
1907 "LHST: ignoring %s of syscall %d "
1908 "for LWP %ld\n",
1909 lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
1910 ? "entry" : "return",
1911 syscall_number,
dfd4cc63 1912 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1913 }
1914 else
1915 {
1916 /* If we had been syscall tracing, and hence used PT_SYSCALL
1917 before on this LWP, it could happen that the user removes all
1918 syscall catchpoints before we get to process this event.
1919 There are two noteworthy issues here:
1920
1921 - When stopped at a syscall entry event, resuming with
1922 PT_STEP still resumes executing the syscall and reports a
1923 syscall return.
1924
1925 - Only PT_SYSCALL catches syscall enters. If we last
1926 single-stepped this thread, then this event can't be a
1927 syscall enter. If we last single-stepped this thread, this
1928 has to be a syscall exit.
1929
1930 The points above mean that the next resume, be it PT_STEP or
1931 PT_CONTINUE, can not trigger a syscall trace event. */
1932 if (debug_linux_nat)
1933 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
1934 "LHST: caught syscall event "
1935 "with no syscall catchpoints."
ca2163eb
PA
1936 " %d for LWP %ld, ignoring\n",
1937 syscall_number,
dfd4cc63 1938 ptid_get_lwp (lp->ptid));
ca2163eb
PA
1939 lp->syscall_state = TARGET_WAITKIND_IGNORE;
1940 }
1941
1942 /* The core isn't interested in this event. For efficiency, avoid
1943 stopping all threads only to have the core resume them all again.
1944 Since we're not stopping threads, if we're still syscall tracing
1945 and not stepping, we can't use PTRACE_CONT here, as we'd miss any
1946 subsequent syscall. Simply resume using the inf-ptrace layer,
1947 which knows when to use PT_SYSCALL or PT_CONTINUE. */
1948
8a99810d 1949 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
ca2163eb
PA
1950 return 1;
1951}
1952
3d799a95
DJ
1953/* Handle a GNU/Linux extended wait response. If we see a clone
1954 event, we need to add the new LWP to our list (and not report the
1955 trap to higher layers). This function returns non-zero if the
1956 event should be ignored and we should wait again. If STOPPING is
1957 true, the new LWP remains stopped, otherwise it is continued. */
d6b0e80f
AC
1958
1959static int
3d799a95
DJ
1960linux_handle_extended_wait (struct lwp_info *lp, int status,
1961 int stopping)
d6b0e80f 1962{
dfd4cc63 1963 int pid = ptid_get_lwp (lp->ptid);
3d799a95 1964 struct target_waitstatus *ourstatus = &lp->waitstatus;
89a5711c 1965 int event = linux_ptrace_get_extended_event (status);
d6b0e80f 1966
3d799a95
DJ
1967 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
1968 || event == PTRACE_EVENT_CLONE)
d6b0e80f 1969 {
3d799a95
DJ
1970 unsigned long new_pid;
1971 int ret;
1972
1973 ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);
6fc19103 1974
3d799a95
DJ
1975 /* If we haven't already seen the new PID stop, wait for it now. */
1976 if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
1977 {
1978 /* The new child has a pending SIGSTOP. We can't affect it until it
1979 hits the SIGSTOP, but we're already attached. */
1980 ret = my_waitpid (new_pid, &status,
1981 (event == PTRACE_EVENT_CLONE) ? __WCLONE : 0);
1982 if (ret == -1)
1983 perror_with_name (_("waiting for new child"));
1984 else if (ret != new_pid)
1985 internal_error (__FILE__, __LINE__,
1986 _("wait returned unexpected PID %d"), ret);
1987 else if (!WIFSTOPPED (status))
1988 internal_error (__FILE__, __LINE__,
1989 _("wait returned unexpected status 0x%x"), status);
1990 }
1991
3a3e9ee3 1992 ourstatus->value.related_pid = ptid_build (new_pid, new_pid, 0);
3d799a95 1993
26cb8b7c
PA
1994 if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
1995 {
1996 /* The arch-specific native code may need to know about new
1997 forks even if those end up never mapped to an
1998 inferior. */
1999 if (linux_nat_new_fork != NULL)
2000 linux_nat_new_fork (lp, new_pid);
2001 }
2002
2277426b 2003 if (event == PTRACE_EVENT_FORK
dfd4cc63 2004 && linux_fork_checkpointing_p (ptid_get_pid (lp->ptid)))
2277426b 2005 {
2277426b
PA
2006 /* Handle checkpointing by linux-fork.c here as a special
2007 case. We don't want the follow-fork-mode or 'catch fork'
2008 to interfere with this. */
2009
2010 /* This won't actually modify the breakpoint list, but will
2011 physically remove the breakpoints from the child. */
d80ee84f 2012 detach_breakpoints (ptid_build (new_pid, new_pid, 0));
2277426b
PA
2013
2014 /* Retain child fork in ptrace (stopped) state. */
14571dad
MS
2015 if (!find_fork_pid (new_pid))
2016 add_fork (new_pid);
2277426b
PA
2017
2018 /* Report as spurious, so that infrun doesn't want to follow
2019 this fork. We're actually doing an infcall in
2020 linux-fork.c. */
2021 ourstatus->kind = TARGET_WAITKIND_SPURIOUS;
2277426b
PA
2022
2023 /* Report the stop to the core. */
2024 return 0;
2025 }
2026
3d799a95
DJ
2027 if (event == PTRACE_EVENT_FORK)
2028 ourstatus->kind = TARGET_WAITKIND_FORKED;
2029 else if (event == PTRACE_EVENT_VFORK)
2030 ourstatus->kind = TARGET_WAITKIND_VFORKED;
6fc19103 2031 else
3d799a95 2032 {
78768c4a
JK
2033 struct lwp_info *new_lp;
2034
3d799a95 2035 ourstatus->kind = TARGET_WAITKIND_IGNORE;
78768c4a 2036
3c4d7e12
PA
2037 if (debug_linux_nat)
2038 fprintf_unfiltered (gdb_stdlog,
2039 "LHEW: Got clone event "
2040 "from LWP %d, new child is LWP %ld\n",
2041 pid, new_pid);
2042
dfd4cc63 2043 new_lp = add_lwp (ptid_build (ptid_get_pid (lp->ptid), new_pid, 0));
3d799a95 2044 new_lp->cloned = 1;
4c28f408 2045 new_lp->stopped = 1;
d6b0e80f 2046
3d799a95
DJ
2047 if (WSTOPSIG (status) != SIGSTOP)
2048 {
2049 /* This can happen if someone starts sending signals to
2050 the new thread before it gets a chance to run, which
2051 have a lower number than SIGSTOP (e.g. SIGUSR1).
2052 This is an unlikely case, and harder to handle for
2053 fork / vfork than for clone, so we do not try - but
2054 we handle it for clone events here. We'll send
2055 the other signal on to the thread below. */
2056
2057 new_lp->signalled = 1;
2058 }
2059 else
79395f92
PA
2060 {
2061 struct thread_info *tp;
2062
2063 /* When we stop for an event in some other thread, and
2064 pull the thread list just as this thread has cloned,
2065 we'll have seen the new thread in the thread_db list
2066 before handling the CLONE event (glibc's
2067 pthread_create adds the new thread to the thread list
2068 before clone'ing, and has the kernel fill in the
2069 thread's tid on the clone call with
2070 CLONE_PARENT_SETTID). If that happened, and the core
2071 had requested the new thread to stop, we'll have
2072 killed it with SIGSTOP. But since SIGSTOP is not an
2073 RT signal, it can only be queued once. We need to be
2074 careful to not resume the LWP if we wanted it to
2075 stop. In that case, we'll leave the SIGSTOP pending.
a493e3e2 2076 It will later be reported as GDB_SIGNAL_0. */
79395f92
PA
2077 tp = find_thread_ptid (new_lp->ptid);
2078 if (tp != NULL && tp->stop_requested)
2079 new_lp->last_resume_kind = resume_stop;
2080 else
2081 status = 0;
2082 }
d6b0e80f 2083
2db9a427
PA
2084 /* If the thread_db layer is active, let it record the user
2085 level thread id and status, and add the thread to GDB's
2086 list. */
2087 if (!thread_db_notice_clone (lp->ptid, new_lp->ptid))
3d799a95 2088 {
2db9a427
PA
2089 /* The process is not using thread_db. Add the LWP to
2090 GDB's list. */
2091 target_post_attach (ptid_get_lwp (new_lp->ptid));
2092 add_thread (new_lp->ptid);
2093 }
4c28f408 2094
2db9a427
PA
2095 if (!stopping)
2096 {
2097 set_running (new_lp->ptid, 1);
2098 set_executing (new_lp->ptid, 1);
2099 /* thread_db_attach_lwp -> lin_lwp_attach_lwp forced
2100 resume_stop. */
2101 new_lp->last_resume_kind = resume_continue;
4c28f408
PA
2102 }
2103
79395f92
PA
2104 if (status != 0)
2105 {
2106 /* We created NEW_LP so it cannot yet contain STATUS. */
2107 gdb_assert (new_lp->status == 0);
2108
2109 /* Save the wait status to report later. */
2110 if (debug_linux_nat)
2111 fprintf_unfiltered (gdb_stdlog,
2112 "LHEW: waitpid of new LWP %ld, "
2113 "saving status %s\n",
dfd4cc63 2114 (long) ptid_get_lwp (new_lp->ptid),
79395f92
PA
2115 status_to_str (status));
2116 new_lp->status = status;
2117 }
2118
20ba1ce6 2119 new_lp->resumed = !stopping;
3d799a95
DJ
2120 return 1;
2121 }
2122
2123 return 0;
d6b0e80f
AC
2124 }
2125
3d799a95
DJ
2126 if (event == PTRACE_EVENT_EXEC)
2127 {
a75724bc
PA
2128 if (debug_linux_nat)
2129 fprintf_unfiltered (gdb_stdlog,
2130 "LHEW: Got exec event from LWP %ld\n",
dfd4cc63 2131 ptid_get_lwp (lp->ptid));
a75724bc 2132
3d799a95
DJ
2133 ourstatus->kind = TARGET_WAITKIND_EXECD;
2134 ourstatus->value.execd_pathname
8dd27370 2135 = xstrdup (linux_child_pid_to_exec_file (NULL, pid));
3d799a95 2136
8af756ef
PA
2137 /* The thread that execed must have been resumed, but, when a
2138 thread execs, it changes its tid to the tgid, and the old
2139 tgid thread might have not been resumed. */
2140 lp->resumed = 1;
6c95b8df
PA
2141 return 0;
2142 }
2143
2144 if (event == PTRACE_EVENT_VFORK_DONE)
2145 {
2146 if (current_inferior ()->waiting_for_vfork_done)
3d799a95 2147 {
6c95b8df 2148 if (debug_linux_nat)
3e43a32a
MS
2149 fprintf_unfiltered (gdb_stdlog,
2150 "LHEW: Got expected PTRACE_EVENT_"
2151 "VFORK_DONE from LWP %ld: stopping\n",
dfd4cc63 2152 ptid_get_lwp (lp->ptid));
3d799a95 2153
6c95b8df
PA
2154 ourstatus->kind = TARGET_WAITKIND_VFORK_DONE;
2155 return 0;
3d799a95
DJ
2156 }
2157
6c95b8df 2158 if (debug_linux_nat)
3e43a32a
MS
2159 fprintf_unfiltered (gdb_stdlog,
2160 "LHEW: Got PTRACE_EVENT_VFORK_DONE "
20ba1ce6 2161 "from LWP %ld: ignoring\n",
dfd4cc63 2162 ptid_get_lwp (lp->ptid));
6c95b8df 2163 return 1;
3d799a95
DJ
2164 }
2165
2166 internal_error (__FILE__, __LINE__,
2167 _("unknown ptrace event %d"), event);
d6b0e80f
AC
2168}
2169
2170/* Wait for LP to stop. Returns the wait status, or 0 if the LWP has
2171 exited. */
2172
2173static int
2174wait_lwp (struct lwp_info *lp)
2175{
2176 pid_t pid;
432b4d03 2177 int status = 0;
d6b0e80f 2178 int thread_dead = 0;
432b4d03 2179 sigset_t prev_mask;
d6b0e80f
AC
2180
2181 gdb_assert (!lp->stopped);
2182 gdb_assert (lp->status == 0);
2183
432b4d03
JK
2184 /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below. */
2185 block_child_signals (&prev_mask);
2186
2187 for (;;)
d6b0e80f 2188 {
432b4d03
JK
2189 /* If my_waitpid returns 0 it means the __WCLONE vs. non-__WCLONE kind
2190 was right and we should just call sigsuspend. */
2191
dfd4cc63 2192 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, WNOHANG);
d6b0e80f 2193 if (pid == -1 && errno == ECHILD)
dfd4cc63 2194 pid = my_waitpid (ptid_get_lwp (lp->ptid), &status, __WCLONE | WNOHANG);
a9f4bb21
PA
2195 if (pid == -1 && errno == ECHILD)
2196 {
2197 /* The thread has previously exited. We need to delete it
2198 now because, for some vendor 2.4 kernels with NPTL
2199 support backported, there won't be an exit event unless
2200 it is the main thread. 2.6 kernels will report an exit
2201 event for each thread that exits, as expected. */
2202 thread_dead = 1;
2203 if (debug_linux_nat)
2204 fprintf_unfiltered (gdb_stdlog, "WL: %s vanished.\n",
2205 target_pid_to_str (lp->ptid));
2206 }
432b4d03
JK
2207 if (pid != 0)
2208 break;
2209
2210 /* Bugs 10970, 12702.
2211 Thread group leader may have exited in which case we'll lock up in
2212 waitpid if there are other threads, even if they are all zombies too.
2213 Basically, we're not supposed to use waitpid this way.
2214 __WCLONE is not applicable for the leader so we can't use that.
2215 LINUX_NAT_THREAD_ALIVE cannot be used here as it requires a STOPPED
2216 process; it gets ESRCH both for the zombie and for running processes.
2217
2218 As a workaround, check if we're waiting for the thread group leader and
2219 if it's a zombie, and avoid calling waitpid if it is.
2220
2221 This is racy, what if the tgl becomes a zombie right after we check?
2222 Therefore always use WNOHANG with sigsuspend - it is equivalent to
5f572dec 2223 waiting waitpid but linux_proc_pid_is_zombie is safe this way. */
432b4d03 2224
dfd4cc63
LM
2225 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid)
2226 && linux_proc_pid_is_zombie (ptid_get_lwp (lp->ptid)))
d6b0e80f 2227 {
d6b0e80f
AC
2228 thread_dead = 1;
2229 if (debug_linux_nat)
432b4d03
JK
2230 fprintf_unfiltered (gdb_stdlog,
2231 "WL: Thread group leader %s vanished.\n",
d6b0e80f 2232 target_pid_to_str (lp->ptid));
432b4d03 2233 break;
d6b0e80f 2234 }
432b4d03
JK
2235
2236 /* Wait for next SIGCHLD and try again. This may let SIGCHLD handlers
2237 get invoked despite our caller had them intentionally blocked by
2238 block_child_signals. This is sensitive only to the loop of
2239 linux_nat_wait_1 and there if we get called my_waitpid gets called
2240 again before it gets to sigsuspend so we can safely let the handlers
2241 get executed here. */
2242
d36bf488
DE
2243 if (debug_linux_nat)
2244 fprintf_unfiltered (gdb_stdlog, "WL: about to sigsuspend\n");
432b4d03
JK
2245 sigsuspend (&suspend_mask);
2246 }
2247
2248 restore_child_signals_mask (&prev_mask);
2249
d6b0e80f
AC
2250 if (!thread_dead)
2251 {
dfd4cc63 2252 gdb_assert (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
2253
2254 if (debug_linux_nat)
2255 {
2256 fprintf_unfiltered (gdb_stdlog,
2257 "WL: waitpid %s received %s\n",
2258 target_pid_to_str (lp->ptid),
2259 status_to_str (status));
2260 }
d6b0e80f 2261
a9f4bb21
PA
2262 /* Check if the thread has exited. */
2263 if (WIFEXITED (status) || WIFSIGNALED (status))
2264 {
2265 thread_dead = 1;
2266 if (debug_linux_nat)
2267 fprintf_unfiltered (gdb_stdlog, "WL: %s exited.\n",
2268 target_pid_to_str (lp->ptid));
2269 }
d6b0e80f
AC
2270 }
2271
2272 if (thread_dead)
2273 {
e26af52f 2274 exit_lwp (lp);
d6b0e80f
AC
2275 return 0;
2276 }
2277
2278 gdb_assert (WIFSTOPPED (status));
8817a6f2 2279 lp->stopped = 1;
d6b0e80f 2280
8784d563
PA
2281 if (lp->must_set_ptrace_flags)
2282 {
2283 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
2284
2285 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
2286 lp->must_set_ptrace_flags = 0;
2287 }
2288
ca2163eb
PA
2289 /* Handle GNU/Linux's syscall SIGTRAPs. */
2290 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
2291 {
2292 /* No longer need the sysgood bit. The ptrace event ends up
2293 recorded in lp->waitstatus if we care for it. We can carry
2294 on handling the event like a regular SIGTRAP from here
2295 on. */
2296 status = W_STOPCODE (SIGTRAP);
2297 if (linux_handle_syscall_trap (lp, 1))
2298 return wait_lwp (lp);
2299 }
2300
d6b0e80f 2301 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
2302 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
2303 && linux_is_extended_waitstatus (status))
d6b0e80f
AC
2304 {
2305 if (debug_linux_nat)
2306 fprintf_unfiltered (gdb_stdlog,
2307 "WL: Handling extended status 0x%06x\n",
2308 status);
20ba1ce6
PA
2309 linux_handle_extended_wait (lp, status, 1);
2310 return 0;
d6b0e80f
AC
2311 }
2312
2313 return status;
2314}
2315
2316/* Send a SIGSTOP to LP. */
2317
2318static int
2319stop_callback (struct lwp_info *lp, void *data)
2320{
2321 if (!lp->stopped && !lp->signalled)
2322 {
2323 int ret;
2324
2325 if (debug_linux_nat)
2326 {
2327 fprintf_unfiltered (gdb_stdlog,
2328 "SC: kill %s **<SIGSTOP>**\n",
2329 target_pid_to_str (lp->ptid));
2330 }
2331 errno = 0;
dfd4cc63 2332 ret = kill_lwp (ptid_get_lwp (lp->ptid), SIGSTOP);
d6b0e80f
AC
2333 if (debug_linux_nat)
2334 {
2335 fprintf_unfiltered (gdb_stdlog,
2336 "SC: lwp kill %d %s\n",
2337 ret,
2338 errno ? safe_strerror (errno) : "ERRNO-OK");
2339 }
2340
2341 lp->signalled = 1;
2342 gdb_assert (lp->status == 0);
2343 }
2344
2345 return 0;
2346}
2347
7b50312a
PA
2348/* Request a stop on LWP. */
2349
2350void
2351linux_stop_lwp (struct lwp_info *lwp)
2352{
2353 stop_callback (lwp, NULL);
2354}
2355
2db9a427
PA
2356/* See linux-nat.h */
2357
2358void
2359linux_stop_and_wait_all_lwps (void)
2360{
2361 /* Stop all LWP's ... */
2362 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
2363
2364 /* ... and wait until all of them have reported back that
2365 they're no longer running. */
2366 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
2367}
2368
2369/* See linux-nat.h */
2370
2371void
2372linux_unstop_all_lwps (void)
2373{
2374 iterate_over_lwps (minus_one_ptid,
2375 resume_stopped_resumed_lwps, &minus_one_ptid);
2376}
2377
57380f4e 2378/* Return non-zero if LWP PID has a pending SIGINT. */
d6b0e80f
AC
2379
2380static int
57380f4e
DJ
2381linux_nat_has_pending_sigint (int pid)
2382{
2383 sigset_t pending, blocked, ignored;
57380f4e
DJ
2384
2385 linux_proc_pending_signals (pid, &pending, &blocked, &ignored);
2386
2387 if (sigismember (&pending, SIGINT)
2388 && !sigismember (&ignored, SIGINT))
2389 return 1;
2390
2391 return 0;
2392}
2393
2394/* Set a flag in LP indicating that we should ignore its next SIGINT. */
2395
2396static int
2397set_ignore_sigint (struct lwp_info *lp, void *data)
d6b0e80f 2398{
57380f4e
DJ
2399 /* If a thread has a pending SIGINT, consume it; otherwise, set a
2400 flag to consume the next one. */
2401 if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
2402 && WSTOPSIG (lp->status) == SIGINT)
2403 lp->status = 0;
2404 else
2405 lp->ignore_sigint = 1;
2406
2407 return 0;
2408}
2409
2410/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
2411 This function is called after we know the LWP has stopped; if the LWP
2412 stopped before the expected SIGINT was delivered, then it will never have
2413 arrived. Also, if the signal was delivered to a shared queue and consumed
2414 by a different thread, it will never be delivered to this LWP. */
d6b0e80f 2415
57380f4e
DJ
2416static void
2417maybe_clear_ignore_sigint (struct lwp_info *lp)
2418{
2419 if (!lp->ignore_sigint)
2420 return;
2421
dfd4cc63 2422 if (!linux_nat_has_pending_sigint (ptid_get_lwp (lp->ptid)))
57380f4e
DJ
2423 {
2424 if (debug_linux_nat)
2425 fprintf_unfiltered (gdb_stdlog,
2426 "MCIS: Clearing bogus flag for %s\n",
2427 target_pid_to_str (lp->ptid));
2428 lp->ignore_sigint = 0;
2429 }
2430}
2431
ebec9a0f
PA
2432/* Fetch the possible triggered data watchpoint info and store it in
2433 LP.
2434
2435 On some archs, like x86, that use debug registers to set
2436 watchpoints, it's possible that the way to know which watched
2437 address trapped, is to check the register that is used to select
2438 which address to watch. Problem is, between setting the watchpoint
2439 and reading back which data address trapped, the user may change
2440 the set of watchpoints, and, as a consequence, GDB changes the
2441 debug registers in the inferior. To avoid reading back a stale
2442 stopped-data-address when that happens, we cache in LP the fact
2443 that a watchpoint trapped, and the corresponding data address, as
2444 soon as we see LP stop with a SIGTRAP. If GDB changes the debug
2445 registers meanwhile, we have the cached data we can rely on. */
2446
9c02b525
PA
2447static int
2448check_stopped_by_watchpoint (struct lwp_info *lp)
ebec9a0f
PA
2449{
2450 struct cleanup *old_chain;
2451
2452 if (linux_ops->to_stopped_by_watchpoint == NULL)
9c02b525 2453 return 0;
ebec9a0f
PA
2454
2455 old_chain = save_inferior_ptid ();
2456 inferior_ptid = lp->ptid;
2457
9c02b525 2458 if (linux_ops->to_stopped_by_watchpoint (linux_ops))
ebec9a0f 2459 {
15c66dd6 2460 lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
9c02b525 2461
ebec9a0f
PA
2462 if (linux_ops->to_stopped_data_address != NULL)
2463 lp->stopped_data_address_p =
2464 linux_ops->to_stopped_data_address (&current_target,
2465 &lp->stopped_data_address);
2466 else
2467 lp->stopped_data_address_p = 0;
2468 }
2469
2470 do_cleanups (old_chain);
9c02b525 2471
15c66dd6 2472 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
9c02b525
PA
2473}
2474
2475/* Called when the LWP stopped for a trap that could be explained by a
2476 watchpoint or a breakpoint. */
2477
2478static void
2479save_sigtrap (struct lwp_info *lp)
2480{
15c66dd6 2481 gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
9c02b525
PA
2482 gdb_assert (lp->status != 0);
2483
faf09f01
PA
2484 /* Check first if this was a SW/HW breakpoint before checking
2485 watchpoints, because at least s390 can't tell the data address of
2486 hardware watchpoint hits, and the kernel returns
2487 stopped-by-watchpoint as long as there's a watchpoint set. */
9c02b525
PA
2488 if (linux_nat_status_is_event (lp->status))
2489 check_stopped_by_breakpoint (lp);
faf09f01
PA
2490
2491 /* Note that TRAP_HWBKPT can indicate either a hardware breakpoint
2492 or hardware watchpoint. Check which is which if we got
2493 TARGET_STOPPED_BY_HW_BREAKPOINT. */
2494 if (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON
2495 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
2496 check_stopped_by_watchpoint (lp);
ebec9a0f
PA
2497}
2498
9c02b525 2499/* Returns true if the LWP had stopped for a watchpoint. */
ebec9a0f
PA
2500
2501static int
6a109b6b 2502linux_nat_stopped_by_watchpoint (struct target_ops *ops)
ebec9a0f
PA
2503{
2504 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2505
2506 gdb_assert (lp != NULL);
2507
15c66dd6 2508 return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
ebec9a0f
PA
2509}
2510
2511static int
2512linux_nat_stopped_data_address (struct target_ops *ops, CORE_ADDR *addr_p)
2513{
2514 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2515
2516 gdb_assert (lp != NULL);
2517
2518 *addr_p = lp->stopped_data_address;
2519
2520 return lp->stopped_data_address_p;
2521}
2522
26ab7092
JK
2523/* Commonly any breakpoint / watchpoint generate only SIGTRAP. */
2524
2525static int
2526sigtrap_is_event (int status)
2527{
2528 return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
2529}
2530
26ab7092
JK
2531/* Set alternative SIGTRAP-like events recognizer. If
2532 breakpoint_inserted_here_p there then gdbarch_decr_pc_after_break will be
2533 applied. */
2534
2535void
2536linux_nat_set_status_is_event (struct target_ops *t,
2537 int (*status_is_event) (int status))
2538{
2539 linux_nat_status_is_event = status_is_event;
2540}
2541
57380f4e
DJ
2542/* Wait until LP is stopped. */
2543
2544static int
2545stop_wait_callback (struct lwp_info *lp, void *data)
2546{
c9657e70 2547 struct inferior *inf = find_inferior_ptid (lp->ptid);
6c95b8df
PA
2548
2549 /* If this is a vfork parent, bail out, it is not going to report
2550 any SIGSTOP until the vfork is done with. */
2551 if (inf->vfork_child != NULL)
2552 return 0;
2553
d6b0e80f
AC
2554 if (!lp->stopped)
2555 {
2556 int status;
2557
2558 status = wait_lwp (lp);
2559 if (status == 0)
2560 return 0;
2561
57380f4e
DJ
2562 if (lp->ignore_sigint && WIFSTOPPED (status)
2563 && WSTOPSIG (status) == SIGINT)
d6b0e80f 2564 {
57380f4e 2565 lp->ignore_sigint = 0;
d6b0e80f
AC
2566
2567 errno = 0;
dfd4cc63 2568 ptrace (PTRACE_CONT, ptid_get_lwp (lp->ptid), 0, 0);
8817a6f2 2569 lp->stopped = 0;
d6b0e80f
AC
2570 if (debug_linux_nat)
2571 fprintf_unfiltered (gdb_stdlog,
3e43a32a
MS
2572 "PTRACE_CONT %s, 0, 0 (%s) "
2573 "(discarding SIGINT)\n",
d6b0e80f
AC
2574 target_pid_to_str (lp->ptid),
2575 errno ? safe_strerror (errno) : "OK");
2576
57380f4e 2577 return stop_wait_callback (lp, NULL);
d6b0e80f
AC
2578 }
2579
57380f4e
DJ
2580 maybe_clear_ignore_sigint (lp);
2581
d6b0e80f
AC
2582 if (WSTOPSIG (status) != SIGSTOP)
2583 {
e5ef252a 2584 /* The thread was stopped with a signal other than SIGSTOP. */
7feb7d06 2585
e5ef252a
PA
2586 if (debug_linux_nat)
2587 fprintf_unfiltered (gdb_stdlog,
2588 "SWC: Pending event %s in %s\n",
2589 status_to_str ((int) status),
2590 target_pid_to_str (lp->ptid));
2591
2592 /* Save the sigtrap event. */
2593 lp->status = status;
e5ef252a 2594 gdb_assert (lp->signalled);
9c02b525 2595 save_sigtrap (lp);
d6b0e80f
AC
2596 }
2597 else
2598 {
2599 /* We caught the SIGSTOP that we intended to catch, so
2600 there's no SIGSTOP pending. */
e5ef252a
PA
2601
2602 if (debug_linux_nat)
2603 fprintf_unfiltered (gdb_stdlog,
2604 "SWC: Delayed SIGSTOP caught for %s.\n",
2605 target_pid_to_str (lp->ptid));
2606
e5ef252a
PA
2607 /* Reset SIGNALLED only after the stop_wait_callback call
2608 above as it does gdb_assert on SIGNALLED. */
d6b0e80f
AC
2609 lp->signalled = 0;
2610 }
2611 }
2612
2613 return 0;
2614}
2615
9c02b525
PA
2616/* Return non-zero if LP has a wait status pending. Discard the
2617 pending event and resume the LWP if the event that originally
2618 caused the stop became uninteresting. */
d6b0e80f
AC
2619
2620static int
2621status_callback (struct lwp_info *lp, void *data)
2622{
2623 /* Only report a pending wait status if we pretend that this has
2624 indeed been resumed. */
ca2163eb
PA
2625 if (!lp->resumed)
2626 return 0;
2627
eb54c8bf
PA
2628 if (!lwp_status_pending_p (lp))
2629 return 0;
2630
15c66dd6
PA
2631 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
2632 || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
9c02b525
PA
2633 {
2634 struct regcache *regcache = get_thread_regcache (lp->ptid);
2635 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2636 CORE_ADDR pc;
2637 int discard = 0;
2638
9c02b525
PA
2639 pc = regcache_read_pc (regcache);
2640
2641 if (pc != lp->stop_pc)
2642 {
2643 if (debug_linux_nat)
2644 fprintf_unfiltered (gdb_stdlog,
2645 "SC: PC of %s changed. was=%s, now=%s\n",
2646 target_pid_to_str (lp->ptid),
2647 paddress (target_gdbarch (), lp->stop_pc),
2648 paddress (target_gdbarch (), pc));
2649 discard = 1;
2650 }
faf09f01
PA
2651
2652#if !USE_SIGTRAP_SIGINFO
9c02b525
PA
2653 else if (!breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2654 {
2655 if (debug_linux_nat)
2656 fprintf_unfiltered (gdb_stdlog,
2657 "SC: previous breakpoint of %s, at %s gone\n",
2658 target_pid_to_str (lp->ptid),
2659 paddress (target_gdbarch (), lp->stop_pc));
2660
2661 discard = 1;
2662 }
faf09f01 2663#endif
9c02b525
PA
2664
2665 if (discard)
2666 {
2667 if (debug_linux_nat)
2668 fprintf_unfiltered (gdb_stdlog,
2669 "SC: pending event of %s cancelled.\n",
2670 target_pid_to_str (lp->ptid));
2671
2672 lp->status = 0;
2673 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
2674 return 0;
2675 }
9c02b525
PA
2676 }
2677
eb54c8bf 2678 return 1;
d6b0e80f
AC
2679}
2680
2681/* Return non-zero if LP isn't stopped. */
2682
2683static int
2684running_callback (struct lwp_info *lp, void *data)
2685{
25289eb2 2686 return (!lp->stopped
8a99810d 2687 || (lwp_status_pending_p (lp) && lp->resumed));
d6b0e80f
AC
2688}
2689
2690/* Count the LWP's that have had events. */
2691
2692static int
2693count_events_callback (struct lwp_info *lp, void *data)
2694{
2695 int *count = data;
2696
2697 gdb_assert (count != NULL);
2698
9c02b525
PA
2699 /* Select only resumed LWPs that have an event pending. */
2700 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2701 (*count)++;
2702
2703 return 0;
2704}
2705
2706/* Select the LWP (if any) that is currently being single-stepped. */
2707
2708static int
2709select_singlestep_lwp_callback (struct lwp_info *lp, void *data)
2710{
25289eb2
PA
2711 if (lp->last_resume_kind == resume_step
2712 && lp->status != 0)
d6b0e80f
AC
2713 return 1;
2714 else
2715 return 0;
2716}
2717
8a99810d
PA
2718/* Returns true if LP has a status pending. */
2719
2720static int
2721lwp_status_pending_p (struct lwp_info *lp)
2722{
2723 /* We check for lp->waitstatus in addition to lp->status, because we
2724 can have pending process exits recorded in lp->status and
2725 W_EXITCODE(0,0) happens to be 0. */
2726 return lp->status != 0 || lp->waitstatus.kind != TARGET_WAITKIND_IGNORE;
2727}
2728
b90fc188 2729/* Select the Nth LWP that has had an event. */
d6b0e80f
AC
2730
2731static int
2732select_event_lwp_callback (struct lwp_info *lp, void *data)
2733{
2734 int *selector = data;
2735
2736 gdb_assert (selector != NULL);
2737
9c02b525
PA
2738 /* Select only resumed LWPs that have an event pending. */
2739 if (lp->resumed && lwp_status_pending_p (lp))
d6b0e80f
AC
2740 if ((*selector)-- == 0)
2741 return 1;
2742
2743 return 0;
2744}
2745
9c02b525
PA
2746/* Called when the LWP got a signal/trap that could be explained by a
2747 software or hardware breakpoint. */
2748
710151dd 2749static int
9c02b525 2750check_stopped_by_breakpoint (struct lwp_info *lp)
710151dd
PA
2751{
2752 /* Arrange for a breakpoint to be hit again later. We don't keep
2753 the SIGTRAP status and don't forward the SIGTRAP signal to the
2754 LWP. We will handle the current event, eventually we will resume
2755 this LWP, and this breakpoint will trap again.
2756
2757 If we do not do this, then we run the risk that the user will
2758 delete or disable the breakpoint, but the LWP will have already
2759 tripped on it. */
2760
515630c5
UW
2761 struct regcache *regcache = get_thread_regcache (lp->ptid);
2762 struct gdbarch *gdbarch = get_regcache_arch (regcache);
2763 CORE_ADDR pc;
9c02b525 2764 CORE_ADDR sw_bp_pc;
faf09f01
PA
2765#if USE_SIGTRAP_SIGINFO
2766 siginfo_t siginfo;
2767#endif
9c02b525
PA
2768
2769 pc = regcache_read_pc (regcache);
527a273a 2770 sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);
515630c5 2771
faf09f01
PA
2772#if USE_SIGTRAP_SIGINFO
2773 if (linux_nat_get_siginfo (lp->ptid, &siginfo))
2774 {
2775 if (siginfo.si_signo == SIGTRAP)
2776 {
2777 if (siginfo.si_code == GDB_ARCH_TRAP_BRKPT)
2778 {
2779 if (debug_linux_nat)
2780 fprintf_unfiltered (gdb_stdlog,
2781 "CSBB: Push back software "
2782 "breakpoint for %s\n",
2783 target_pid_to_str (lp->ptid));
2784
2785 /* Back up the PC if necessary. */
2786 if (pc != sw_bp_pc)
2787 regcache_write_pc (regcache, sw_bp_pc);
2788
2789 lp->stop_pc = sw_bp_pc;
2790 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
2791 return 1;
2792 }
2793 else if (siginfo.si_code == TRAP_HWBKPT)
2794 {
2795 if (debug_linux_nat)
2796 fprintf_unfiltered (gdb_stdlog,
2797 "CSBB: Push back hardware "
2798 "breakpoint/watchpoint for %s\n",
2799 target_pid_to_str (lp->ptid));
2800
2801 lp->stop_pc = pc;
2802 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
2803 return 1;
2804 }
2805 }
2806 }
2807#else
9c02b525
PA
2808 if ((!lp->step || lp->stop_pc == sw_bp_pc)
2809 && software_breakpoint_inserted_here_p (get_regcache_aspace (regcache),
2810 sw_bp_pc))
710151dd 2811 {
9c02b525
PA
2812 /* The LWP was either continued, or stepped a software
2813 breakpoint instruction. */
710151dd
PA
2814 if (debug_linux_nat)
2815 fprintf_unfiltered (gdb_stdlog,
9c02b525 2816 "CB: Push back software breakpoint for %s\n",
710151dd
PA
2817 target_pid_to_str (lp->ptid));
2818
2819 /* Back up the PC if necessary. */
9c02b525
PA
2820 if (pc != sw_bp_pc)
2821 regcache_write_pc (regcache, sw_bp_pc);
515630c5 2822
9c02b525 2823 lp->stop_pc = sw_bp_pc;
15c66dd6 2824 lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
710151dd
PA
2825 return 1;
2826 }
710151dd 2827
9c02b525
PA
2828 if (hardware_breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
2829 {
2830 if (debug_linux_nat)
2831 fprintf_unfiltered (gdb_stdlog,
2832 "CB: Push back hardware breakpoint for %s\n",
2833 target_pid_to_str (lp->ptid));
d6b0e80f 2834
9c02b525 2835 lp->stop_pc = pc;
15c66dd6 2836 lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
9c02b525
PA
2837 return 1;
2838 }
faf09f01 2839#endif
d6b0e80f
AC
2840
2841 return 0;
2842}
2843
faf09f01
PA
2844
2845/* Returns true if the LWP had stopped for a software breakpoint. */
2846
2847static int
2848linux_nat_stopped_by_sw_breakpoint (struct target_ops *ops)
2849{
2850 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2851
2852 gdb_assert (lp != NULL);
2853
2854 return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2855}
2856
2857/* Implement the supports_stopped_by_sw_breakpoint method. */
2858
2859static int
2860linux_nat_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2861{
2862 return USE_SIGTRAP_SIGINFO;
2863}
2864
2865/* Returns true if the LWP had stopped for a hardware
2866 breakpoint/watchpoint. */
2867
2868static int
2869linux_nat_stopped_by_hw_breakpoint (struct target_ops *ops)
2870{
2871 struct lwp_info *lp = find_lwp_pid (inferior_ptid);
2872
2873 gdb_assert (lp != NULL);
2874
2875 return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2876}
2877
2878/* Implement the supports_stopped_by_hw_breakpoint method. */
2879
2880static int
2881linux_nat_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2882{
2883 return USE_SIGTRAP_SIGINFO;
2884}
2885
d6b0e80f
AC
2886/* Select one LWP out of those that have events pending. */
2887
2888static void
d90e17a7 2889select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
d6b0e80f
AC
2890{
2891 int num_events = 0;
2892 int random_selector;
9c02b525 2893 struct lwp_info *event_lp = NULL;
d6b0e80f 2894
ac264b3b 2895 /* Record the wait status for the original LWP. */
d6b0e80f
AC
2896 (*orig_lp)->status = *status;
2897
9c02b525
PA
2898 /* In all-stop, give preference to the LWP that is being
2899 single-stepped. There will be at most one, and it will be the
2900 LWP that the core is most interested in. If we didn't do this,
2901 then we'd have to handle pending step SIGTRAPs somehow in case
2902 the core later continues the previously-stepped thread, as
2903 otherwise we'd report the pending SIGTRAP then, and the core, not
2904 having stepped the thread, wouldn't understand what the trap was
2905 for, and therefore would report it to the user as a random
2906 signal. */
2907 if (!non_stop)
d6b0e80f 2908 {
9c02b525
PA
2909 event_lp = iterate_over_lwps (filter,
2910 select_singlestep_lwp_callback, NULL);
2911 if (event_lp != NULL)
2912 {
2913 if (debug_linux_nat)
2914 fprintf_unfiltered (gdb_stdlog,
2915 "SEL: Select single-step %s\n",
2916 target_pid_to_str (event_lp->ptid));
2917 }
d6b0e80f 2918 }
9c02b525
PA
2919
2920 if (event_lp == NULL)
d6b0e80f 2921 {
9c02b525 2922 /* Pick one at random, out of those which have had events. */
d6b0e80f 2923
9c02b525 2924 /* First see how many events we have. */
d90e17a7 2925 iterate_over_lwps (filter, count_events_callback, &num_events);
8bf3b159 2926 gdb_assert (num_events > 0);
d6b0e80f 2927
9c02b525
PA
2928 /* Now randomly pick a LWP out of those that have had
2929 events. */
d6b0e80f
AC
2930 random_selector = (int)
2931 ((num_events * (double) rand ()) / (RAND_MAX + 1.0));
2932
2933 if (debug_linux_nat && num_events > 1)
2934 fprintf_unfiltered (gdb_stdlog,
9c02b525 2935 "SEL: Found %d events, selecting #%d\n",
d6b0e80f
AC
2936 num_events, random_selector);
2937
d90e17a7
PA
2938 event_lp = iterate_over_lwps (filter,
2939 select_event_lwp_callback,
d6b0e80f
AC
2940 &random_selector);
2941 }
2942
2943 if (event_lp != NULL)
2944 {
2945 /* Switch the event LWP. */
2946 *orig_lp = event_lp;
2947 *status = event_lp->status;
2948 }
2949
2950 /* Flush the wait status for the event LWP. */
2951 (*orig_lp)->status = 0;
2952}
2953
2954/* Return non-zero if LP has been resumed. */
2955
2956static int
2957resumed_callback (struct lwp_info *lp, void *data)
2958{
2959 return lp->resumed;
2960}
2961
12d9289a
PA
2962/* Stop an active thread, verify it still exists, then resume it. If
2963 the thread ends up with a pending status, then it is not resumed,
2964 and *DATA (really a pointer to int), is set. */
d6b0e80f
AC
2965
2966static int
2967stop_and_resume_callback (struct lwp_info *lp, void *data)
2968{
25289eb2 2969 if (!lp->stopped)
d6b0e80f 2970 {
25289eb2
PA
2971 ptid_t ptid = lp->ptid;
2972
d6b0e80f
AC
2973 stop_callback (lp, NULL);
2974 stop_wait_callback (lp, NULL);
25289eb2
PA
2975
2976 /* Resume if the lwp still exists, and the core wanted it
2977 running. */
12d9289a
PA
2978 lp = find_lwp_pid (ptid);
2979 if (lp != NULL)
25289eb2 2980 {
12d9289a 2981 if (lp->last_resume_kind == resume_stop
8a99810d 2982 && !lwp_status_pending_p (lp))
12d9289a
PA
2983 {
2984 /* The core wanted the LWP to stop. Even if it stopped
2985 cleanly (with SIGSTOP), leave the event pending. */
2986 if (debug_linux_nat)
2987 fprintf_unfiltered (gdb_stdlog,
2988 "SARC: core wanted LWP %ld stopped "
2989 "(leaving SIGSTOP pending)\n",
dfd4cc63 2990 ptid_get_lwp (lp->ptid));
12d9289a
PA
2991 lp->status = W_STOPCODE (SIGSTOP);
2992 }
2993
8a99810d 2994 if (!lwp_status_pending_p (lp))
12d9289a
PA
2995 {
2996 if (debug_linux_nat)
2997 fprintf_unfiltered (gdb_stdlog,
2998 "SARC: re-resuming LWP %ld\n",
dfd4cc63 2999 ptid_get_lwp (lp->ptid));
e5ef252a 3000 resume_lwp (lp, lp->step, GDB_SIGNAL_0);
12d9289a
PA
3001 }
3002 else
3003 {
3004 if (debug_linux_nat)
3005 fprintf_unfiltered (gdb_stdlog,
3006 "SARC: not re-resuming LWP %ld "
3007 "(has pending)\n",
dfd4cc63 3008 ptid_get_lwp (lp->ptid));
12d9289a 3009 }
25289eb2 3010 }
d6b0e80f
AC
3011 }
3012 return 0;
3013}
3014
02f3fc28 3015/* Check if we should go on and pass this event to common code.
9c02b525 3016 Return the affected lwp if we are, or NULL otherwise. */
12d9289a 3017
02f3fc28 3018static struct lwp_info *
9c02b525 3019linux_nat_filter_event (int lwpid, int status)
02f3fc28
PA
3020{
3021 struct lwp_info *lp;
89a5711c 3022 int event = linux_ptrace_get_extended_event (status);
02f3fc28
PA
3023
3024 lp = find_lwp_pid (pid_to_ptid (lwpid));
3025
3026 /* Check for stop events reported by a process we didn't already
3027 know about - anything not already in our LWP list.
3028
3029 If we're expecting to receive stopped processes after
3030 fork, vfork, and clone events, then we'll just add the
3031 new one to our list and go back to waiting for the event
3032 to be reported - the stopped process might be returned
0e5bf2a8
PA
3033 from waitpid before or after the event is.
3034
3035 But note the case of a non-leader thread exec'ing after the
3036 leader having exited, and gone from our lists. The non-leader
3037 thread changes its tid to the tgid. */
3038
3039 if (WIFSTOPPED (status) && lp == NULL
89a5711c 3040 && (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC))
0e5bf2a8
PA
3041 {
3042 /* A multi-thread exec after we had seen the leader exiting. */
3043 if (debug_linux_nat)
3044 fprintf_unfiltered (gdb_stdlog,
3045 "LLW: Re-adding thread group leader LWP %d.\n",
3046 lwpid);
3047
dfd4cc63 3048 lp = add_lwp (ptid_build (lwpid, lwpid, 0));
0e5bf2a8
PA
3049 lp->stopped = 1;
3050 lp->resumed = 1;
3051 add_thread (lp->ptid);
3052 }
3053
02f3fc28
PA
3054 if (WIFSTOPPED (status) && !lp)
3055 {
3b27ef47
PA
3056 if (debug_linux_nat)
3057 fprintf_unfiltered (gdb_stdlog,
3058 "LHEW: saving LWP %ld status %s in stopped_pids list\n",
3059 (long) lwpid, status_to_str (status));
84636d28 3060 add_to_pid_list (&stopped_pids, lwpid, status);
02f3fc28
PA
3061 return NULL;
3062 }
3063
3064 /* Make sure we don't report an event for the exit of an LWP not in
1777feb0 3065 our list, i.e. not part of the current process. This can happen
fd62cb89 3066 if we detach from a program we originally forked and then it
02f3fc28
PA
3067 exits. */
3068 if (!WIFSTOPPED (status) && !lp)
3069 return NULL;
3070
8817a6f2
PA
3071 /* This LWP is stopped now. (And if dead, this prevents it from
3072 ever being continued.) */
3073 lp->stopped = 1;
3074
8784d563
PA
3075 if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
3076 {
3077 struct inferior *inf = find_inferior_pid (ptid_get_pid (lp->ptid));
3078
3079 linux_enable_event_reporting (ptid_get_lwp (lp->ptid), inf->attach_flag);
3080 lp->must_set_ptrace_flags = 0;
3081 }
3082
ca2163eb
PA
3083 /* Handle GNU/Linux's syscall SIGTRAPs. */
3084 if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
3085 {
3086 /* No longer need the sysgood bit. The ptrace event ends up
3087 recorded in lp->waitstatus if we care for it. We can carry
3088 on handling the event like a regular SIGTRAP from here
3089 on. */
3090 status = W_STOPCODE (SIGTRAP);
3091 if (linux_handle_syscall_trap (lp, 0))
3092 return NULL;
3093 }
02f3fc28 3094
ca2163eb 3095 /* Handle GNU/Linux's extended waitstatus for trace events. */
89a5711c
DB
3096 if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
3097 && linux_is_extended_waitstatus (status))
02f3fc28
PA
3098 {
3099 if (debug_linux_nat)
3100 fprintf_unfiltered (gdb_stdlog,
3101 "LLW: Handling extended status 0x%06x\n",
3102 status);
3103 if (linux_handle_extended_wait (lp, status, 0))
3104 return NULL;
3105 }
3106
3107 /* Check if the thread has exited. */
9c02b525
PA
3108 if (WIFEXITED (status) || WIFSIGNALED (status))
3109 {
3110 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
02f3fc28 3111 {
9c02b525
PA
3112 /* If this is the main thread, we must stop all threads and
3113 verify if they are still alive. This is because in the
3114 nptl thread model on Linux 2.4, there is no signal issued
3115 for exiting LWPs other than the main thread. We only get
3116 the main thread exit signal once all child threads have
3117 already exited. If we stop all the threads and use the
3118 stop_wait_callback to check if they have exited we can
3119 determine whether this signal should be ignored or
3120 whether it means the end of the debugged application,
3121 regardless of which threading model is being used. */
3122 if (ptid_get_pid (lp->ptid) == ptid_get_lwp (lp->ptid))
3123 {
3124 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3125 stop_and_resume_callback, NULL);
3126 }
3127
3128 if (debug_linux_nat)
3129 fprintf_unfiltered (gdb_stdlog,
3130 "LLW: %s exited.\n",
3131 target_pid_to_str (lp->ptid));
3132
3133 if (num_lwps (ptid_get_pid (lp->ptid)) > 1)
3134 {
3135 /* If there is at least one more LWP, then the exit signal
3136 was not the end of the debugged application and should be
3137 ignored. */
3138 exit_lwp (lp);
3139 return NULL;
3140 }
02f3fc28
PA
3141 }
3142
9c02b525
PA
3143 gdb_assert (lp->resumed);
3144
02f3fc28
PA
3145 if (debug_linux_nat)
3146 fprintf_unfiltered (gdb_stdlog,
9c02b525
PA
3147 "Process %ld exited\n",
3148 ptid_get_lwp (lp->ptid));
02f3fc28 3149
9c02b525
PA
3150 /* This was the last lwp in the process. Since events are
3151 serialized to GDB core, we may not be able report this one
3152 right now, but GDB core and the other target layers will want
3153 to be notified about the exit code/signal, leave the status
3154 pending for the next time we're able to report it. */
3155
3156 /* Dead LWP's aren't expected to reported a pending sigstop. */
3157 lp->signalled = 0;
3158
3159 /* Store the pending event in the waitstatus, because
3160 W_EXITCODE(0,0) == 0. */
3161 store_waitstatus (&lp->waitstatus, status);
3162 return lp;
02f3fc28
PA
3163 }
3164
3165 /* Check if the current LWP has previously exited. In the nptl
3166 thread model, LWPs other than the main thread do not issue
3167 signals when they exit so we must check whenever the thread has
3168 stopped. A similar check is made in stop_wait_callback(). */
dfd4cc63 3169 if (num_lwps (ptid_get_pid (lp->ptid)) > 1 && !linux_thread_alive (lp->ptid))
02f3fc28 3170 {
dfd4cc63 3171 ptid_t ptid = pid_to_ptid (ptid_get_pid (lp->ptid));
d90e17a7 3172
02f3fc28
PA
3173 if (debug_linux_nat)
3174 fprintf_unfiltered (gdb_stdlog,
3175 "LLW: %s exited.\n",
3176 target_pid_to_str (lp->ptid));
3177
3178 exit_lwp (lp);
3179
3180 /* Make sure there is at least one thread running. */
d90e17a7 3181 gdb_assert (iterate_over_lwps (ptid, running_callback, NULL));
02f3fc28
PA
3182
3183 /* Discard the event. */
3184 return NULL;
3185 }
3186
3187 /* Make sure we don't report a SIGSTOP that we sent ourselves in
3188 an attempt to stop an LWP. */
3189 if (lp->signalled
3190 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
3191 {
3192 if (debug_linux_nat)
3193 fprintf_unfiltered (gdb_stdlog,
3194 "LLW: Delayed SIGSTOP caught for %s.\n",
3195 target_pid_to_str (lp->ptid));
3196
02f3fc28
PA
3197 lp->signalled = 0;
3198
25289eb2
PA
3199 if (lp->last_resume_kind != resume_stop)
3200 {
3201 /* This is a delayed SIGSTOP. */
02f3fc28 3202
8a99810d 3203 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
25289eb2
PA
3204 if (debug_linux_nat)
3205 fprintf_unfiltered (gdb_stdlog,
3206 "LLW: %s %s, 0, 0 (discard SIGSTOP)\n",
3207 lp->step ?
3208 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3209 target_pid_to_str (lp->ptid));
02f3fc28 3210
25289eb2 3211 gdb_assert (lp->resumed);
02f3fc28 3212
25289eb2
PA
3213 /* Discard the event. */
3214 return NULL;
3215 }
02f3fc28
PA
3216 }
3217
57380f4e
DJ
3218 /* Make sure we don't report a SIGINT that we have already displayed
3219 for another thread. */
3220 if (lp->ignore_sigint
3221 && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
3222 {
3223 if (debug_linux_nat)
3224 fprintf_unfiltered (gdb_stdlog,
3225 "LLW: Delayed SIGINT caught for %s.\n",
3226 target_pid_to_str (lp->ptid));
3227
3228 /* This is a delayed SIGINT. */
3229 lp->ignore_sigint = 0;
3230
8a99810d 3231 linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
57380f4e
DJ
3232 if (debug_linux_nat)
3233 fprintf_unfiltered (gdb_stdlog,
3234 "LLW: %s %s, 0, 0 (discard SIGINT)\n",
3235 lp->step ?
3236 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3237 target_pid_to_str (lp->ptid));
57380f4e
DJ
3238 gdb_assert (lp->resumed);
3239
3240 /* Discard the event. */
3241 return NULL;
3242 }
3243
9c02b525
PA
3244 /* Don't report signals that GDB isn't interested in, such as
3245 signals that are neither printed nor stopped upon. Stopping all
3246 threads can be a bit time-consuming so if we want decent
3247 performance with heavily multi-threaded programs, especially when
3248 they're using a high frequency timer, we'd better avoid it if we
3249 can. */
3250 if (WIFSTOPPED (status))
3251 {
3252 enum gdb_signal signo = gdb_signal_from_host (WSTOPSIG (status));
3253
3254 if (!non_stop)
3255 {
3256 /* Only do the below in all-stop, as we currently use SIGSTOP
3257 to implement target_stop (see linux_nat_stop) in
3258 non-stop. */
3259 if (signo == GDB_SIGNAL_INT && signal_pass_state (signo) == 0)
3260 {
3261 /* If ^C/BREAK is typed at the tty/console, SIGINT gets
3262 forwarded to the entire process group, that is, all LWPs
3263 will receive it - unless they're using CLONE_THREAD to
3264 share signals. Since we only want to report it once, we
3265 mark it as ignored for all LWPs except this one. */
3266 iterate_over_lwps (pid_to_ptid (ptid_get_pid (lp->ptid)),
3267 set_ignore_sigint, NULL);
3268 lp->ignore_sigint = 0;
3269 }
3270 else
3271 maybe_clear_ignore_sigint (lp);
3272 }
3273
3274 /* When using hardware single-step, we need to report every signal.
c9587f88
AT
3275 Otherwise, signals in pass_mask may be short-circuited
3276 except signals that might be caused by a breakpoint. */
9c02b525 3277 if (!lp->step
c9587f88
AT
3278 && WSTOPSIG (status) && sigismember (&pass_mask, WSTOPSIG (status))
3279 && !linux_wstatus_maybe_breakpoint (status))
9c02b525
PA
3280 {
3281 linux_resume_one_lwp (lp, lp->step, signo);
3282 if (debug_linux_nat)
3283 fprintf_unfiltered (gdb_stdlog,
3284 "LLW: %s %s, %s (preempt 'handle')\n",
3285 lp->step ?
3286 "PTRACE_SINGLESTEP" : "PTRACE_CONT",
3287 target_pid_to_str (lp->ptid),
3288 (signo != GDB_SIGNAL_0
3289 ? strsignal (gdb_signal_to_host (signo))
3290 : "0"));
3291 return NULL;
3292 }
3293 }
3294
02f3fc28
PA
3295 /* An interesting event. */
3296 gdb_assert (lp);
ca2163eb 3297 lp->status = status;
9c02b525 3298 save_sigtrap (lp);
02f3fc28
PA
3299 return lp;
3300}
3301
0e5bf2a8
PA
3302/* Detect zombie thread group leaders, and "exit" them. We can't reap
3303 their exits until all other threads in the group have exited. */
3304
3305static void
3306check_zombie_leaders (void)
3307{
3308 struct inferior *inf;
3309
3310 ALL_INFERIORS (inf)
3311 {
3312 struct lwp_info *leader_lp;
3313
3314 if (inf->pid == 0)
3315 continue;
3316
3317 leader_lp = find_lwp_pid (pid_to_ptid (inf->pid));
3318 if (leader_lp != NULL
3319 /* Check if there are other threads in the group, as we may
3320 have raced with the inferior simply exiting. */
3321 && num_lwps (inf->pid) > 1
5f572dec 3322 && linux_proc_pid_is_zombie (inf->pid))
0e5bf2a8
PA
3323 {
3324 if (debug_linux_nat)
3325 fprintf_unfiltered (gdb_stdlog,
3326 "CZL: Thread group leader %d zombie "
3327 "(it exited, or another thread execd).\n",
3328 inf->pid);
3329
3330 /* A leader zombie can mean one of two things:
3331
3332 - It exited, and there's an exit status pending
3333 available, or only the leader exited (not the whole
3334 program). In the latter case, we can't waitpid the
3335 leader's exit status until all other threads are gone.
3336
3337 - There are 3 or more threads in the group, and a thread
3338 other than the leader exec'd. On an exec, the Linux
3339 kernel destroys all other threads (except the execing
3340 one) in the thread group, and resets the execing thread's
3341 tid to the tgid. No exit notification is sent for the
3342 execing thread -- from the ptracer's perspective, it
3343 appears as though the execing thread just vanishes.
3344 Until we reap all other threads except the leader and the
3345 execing thread, the leader will be zombie, and the
3346 execing thread will be in `D (disc sleep)'. As soon as
3347 all other threads are reaped, the execing thread changes
3348 it's tid to the tgid, and the previous (zombie) leader
3349 vanishes, giving place to the "new" leader. We could try
3350 distinguishing the exit and exec cases, by waiting once
3351 more, and seeing if something comes out, but it doesn't
3352 sound useful. The previous leader _does_ go away, and
3353 we'll re-add the new one once we see the exec event
3354 (which is just the same as what would happen if the
3355 previous leader did exit voluntarily before some other
3356 thread execs). */
3357
3358 if (debug_linux_nat)
3359 fprintf_unfiltered (gdb_stdlog,
3360 "CZL: Thread group leader %d vanished.\n",
3361 inf->pid);
3362 exit_lwp (leader_lp);
3363 }
3364 }
3365}
3366
d6b0e80f 3367static ptid_t
7feb7d06 3368linux_nat_wait_1 (struct target_ops *ops,
47608cb1
PA
3369 ptid_t ptid, struct target_waitstatus *ourstatus,
3370 int target_options)
d6b0e80f 3371{
fc9b8e47 3372 sigset_t prev_mask;
4b60df3d 3373 enum resume_kind last_resume_kind;
12d9289a 3374 struct lwp_info *lp;
12d9289a 3375 int status;
d6b0e80f 3376
01124a23 3377 if (debug_linux_nat)
b84876c2
PA
3378 fprintf_unfiltered (gdb_stdlog, "LLW: enter\n");
3379
f973ed9c
DJ
3380 /* The first time we get here after starting a new inferior, we may
3381 not have added it to the LWP list yet - this is the earliest
3382 moment at which we know its PID. */
d90e17a7 3383 if (ptid_is_pid (inferior_ptid))
f973ed9c 3384 {
27c9d204
PA
3385 /* Upgrade the main thread's ptid. */
3386 thread_change_ptid (inferior_ptid,
dfd4cc63
LM
3387 ptid_build (ptid_get_pid (inferior_ptid),
3388 ptid_get_pid (inferior_ptid), 0));
27c9d204 3389
26cb8b7c 3390 lp = add_initial_lwp (inferior_ptid);
f973ed9c
DJ
3391 lp->resumed = 1;
3392 }
3393
12696c10 3394 /* Make sure SIGCHLD is blocked until the sigsuspend below. */
7feb7d06 3395 block_child_signals (&prev_mask);
d6b0e80f 3396
d6b0e80f 3397 /* First check if there is a LWP with a wait status pending. */
8a99810d
PA
3398 lp = iterate_over_lwps (ptid, status_callback, NULL);
3399 if (lp != NULL)
d6b0e80f
AC
3400 {
3401 if (debug_linux_nat)
d6b0e80f
AC
3402 fprintf_unfiltered (gdb_stdlog,
3403 "LLW: Using pending wait status %s for %s.\n",
ca2163eb 3404 status_to_str (lp->status),
d6b0e80f 3405 target_pid_to_str (lp->ptid));
d6b0e80f
AC
3406 }
3407
d9d41e78 3408 if (!target_is_async_p ())
b84876c2
PA
3409 {
3410 /* Causes SIGINT to be passed on to the attached process. */
3411 set_sigint_trap ();
b84876c2 3412 }
d6b0e80f 3413
9c02b525
PA
3414 /* But if we don't find a pending event, we'll have to wait. Always
3415 pull all events out of the kernel. We'll randomly select an
3416 event LWP out of all that have events, to prevent starvation. */
7feb7d06 3417
d90e17a7 3418 while (lp == NULL)
d6b0e80f
AC
3419 {
3420 pid_t lwpid;
3421
0e5bf2a8
PA
3422 /* Always use -1 and WNOHANG, due to couple of a kernel/ptrace
3423 quirks:
3424
3425 - If the thread group leader exits while other threads in the
3426 thread group still exist, waitpid(TGID, ...) hangs. That
3427 waitpid won't return an exit status until the other threads
3428 in the group are reapped.
3429
3430 - When a non-leader thread execs, that thread just vanishes
3431 without reporting an exit (so we'd hang if we waited for it
3432 explicitly in that case). The exec event is reported to
3433 the TGID pid. */
3434
3435 errno = 0;
3436 lwpid = my_waitpid (-1, &status, __WCLONE | WNOHANG);
3437 if (lwpid == 0 || (lwpid == -1 && errno == ECHILD))
3438 lwpid = my_waitpid (-1, &status, WNOHANG);
3439
3440 if (debug_linux_nat)
3441 fprintf_unfiltered (gdb_stdlog,
3442 "LNW: waitpid(-1, ...) returned %d, %s\n",
3443 lwpid, errno ? safe_strerror (errno) : "ERRNO-OK");
b84876c2 3444
d6b0e80f
AC
3445 if (lwpid > 0)
3446 {
d6b0e80f
AC
3447 if (debug_linux_nat)
3448 {
3449 fprintf_unfiltered (gdb_stdlog,
3450 "LLW: waitpid %ld received %s\n",
3451 (long) lwpid, status_to_str (status));
3452 }
3453
9c02b525 3454 linux_nat_filter_event (lwpid, status);
0e5bf2a8
PA
3455 /* Retry until nothing comes out of waitpid. A single
3456 SIGCHLD can indicate more than one child stopped. */
3457 continue;
d6b0e80f
AC
3458 }
3459
20ba1ce6
PA
3460 /* Now that we've pulled all events out of the kernel, resume
3461 LWPs that don't have an interesting event to report. */
3462 iterate_over_lwps (minus_one_ptid,
3463 resume_stopped_resumed_lwps, &minus_one_ptid);
3464
3465 /* ... and find an LWP with a status to report to the core, if
3466 any. */
9c02b525
PA
3467 lp = iterate_over_lwps (ptid, status_callback, NULL);
3468 if (lp != NULL)
3469 break;
3470
0e5bf2a8
PA
3471 /* Check for zombie thread group leaders. Those can't be reaped
3472 until all other threads in the thread group are. */
3473 check_zombie_leaders ();
d6b0e80f 3474
0e5bf2a8
PA
3475 /* If there are no resumed children left, bail. We'd be stuck
3476 forever in the sigsuspend call below otherwise. */
3477 if (iterate_over_lwps (ptid, resumed_callback, NULL) == NULL)
3478 {
3479 if (debug_linux_nat)
3480 fprintf_unfiltered (gdb_stdlog, "LLW: exit (no resumed LWP)\n");
b84876c2 3481
0e5bf2a8 3482 ourstatus->kind = TARGET_WAITKIND_NO_RESUMED;
b84876c2 3483
d9d41e78 3484 if (!target_is_async_p ())
0e5bf2a8 3485 clear_sigint_trap ();
b84876c2 3486
0e5bf2a8
PA
3487 restore_child_signals_mask (&prev_mask);
3488 return minus_one_ptid;
d6b0e80f 3489 }
28736962 3490
0e5bf2a8
PA
3491 /* No interesting event to report to the core. */
3492
3493 if (target_options & TARGET_WNOHANG)
3494 {
01124a23 3495 if (debug_linux_nat)
28736962
PA
3496 fprintf_unfiltered (gdb_stdlog, "LLW: exit (ignore)\n");
3497
0e5bf2a8 3498 ourstatus->kind = TARGET_WAITKIND_IGNORE;
28736962
PA
3499 restore_child_signals_mask (&prev_mask);
3500 return minus_one_ptid;
3501 }
d6b0e80f
AC
3502
3503 /* We shouldn't end up here unless we want to try again. */
d90e17a7 3504 gdb_assert (lp == NULL);
0e5bf2a8
PA
3505
3506 /* Block until we get an event reported with SIGCHLD. */
d36bf488
DE
3507 if (debug_linux_nat)
3508 fprintf_unfiltered (gdb_stdlog, "LNW: about to sigsuspend\n");
0e5bf2a8 3509 sigsuspend (&suspend_mask);
d6b0e80f
AC
3510 }
3511
d9d41e78 3512 if (!target_is_async_p ())
d26b5354 3513 clear_sigint_trap ();
d6b0e80f
AC
3514
3515 gdb_assert (lp);
3516
ca2163eb
PA
3517 status = lp->status;
3518 lp->status = 0;
3519
4c28f408
PA
3520 if (!non_stop)
3521 {
3522 /* Now stop all other LWP's ... */
d90e17a7 3523 iterate_over_lwps (minus_one_ptid, stop_callback, NULL);
4c28f408
PA
3524
3525 /* ... and wait until all of them have reported back that
3526 they're no longer running. */
d90e17a7 3527 iterate_over_lwps (minus_one_ptid, stop_wait_callback, NULL);
9c02b525
PA
3528 }
3529
3530 /* If we're not waiting for a specific LWP, choose an event LWP from
3531 among those that have had events. Giving equal priority to all
3532 LWPs that have had events helps prevent starvation. */
3533 if (ptid_equal (ptid, minus_one_ptid) || ptid_is_pid (ptid))
3534 select_event_lwp (ptid, &lp, &status);
3535
3536 gdb_assert (lp != NULL);
3537
3538 /* Now that we've selected our final event LWP, un-adjust its PC if
faf09f01
PA
3539 it was a software breakpoint, and we can't reliably support the
3540 "stopped by software breakpoint" stop reason. */
3541 if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
3542 && !USE_SIGTRAP_SIGINFO)
9c02b525
PA
3543 {
3544 struct regcache *regcache = get_thread_regcache (lp->ptid);
3545 struct gdbarch *gdbarch = get_regcache_arch (regcache);
527a273a 3546 int decr_pc = gdbarch_decr_pc_after_break (gdbarch);
4c28f408 3547
9c02b525
PA
3548 if (decr_pc != 0)
3549 {
3550 CORE_ADDR pc;
d6b0e80f 3551
9c02b525
PA
3552 pc = regcache_read_pc (regcache);
3553 regcache_write_pc (regcache, pc + decr_pc);
3554 }
3555 }
e3e9f5a2 3556
9c02b525
PA
3557 /* We'll need this to determine whether to report a SIGSTOP as
3558 GDB_SIGNAL_0. Need to take a copy because resume_clear_callback
3559 clears it. */
3560 last_resume_kind = lp->last_resume_kind;
4b60df3d 3561
9c02b525
PA
3562 if (!non_stop)
3563 {
e3e9f5a2
PA
3564 /* In all-stop, from the core's perspective, all LWPs are now
3565 stopped until a new resume action is sent over. */
3566 iterate_over_lwps (minus_one_ptid, resume_clear_callback, NULL);
3567 }
3568 else
25289eb2 3569 {
4b60df3d 3570 resume_clear_callback (lp, NULL);
25289eb2 3571 }
d6b0e80f 3572
26ab7092 3573 if (linux_nat_status_is_event (status))
d6b0e80f 3574 {
d6b0e80f
AC
3575 if (debug_linux_nat)
3576 fprintf_unfiltered (gdb_stdlog,
4fdebdd0
PA
3577 "LLW: trap ptid is %s.\n",
3578 target_pid_to_str (lp->ptid));
d6b0e80f 3579 }
d6b0e80f
AC
3580
3581 if (lp->waitstatus.kind != TARGET_WAITKIND_IGNORE)
3582 {
3583 *ourstatus = lp->waitstatus;
3584 lp->waitstatus.kind = TARGET_WAITKIND_IGNORE;
3585 }
3586 else
3587 store_waitstatus (ourstatus, status);
3588
01124a23 3589 if (debug_linux_nat)
b84876c2
PA
3590 fprintf_unfiltered (gdb_stdlog, "LLW: exit\n");
3591
7feb7d06 3592 restore_child_signals_mask (&prev_mask);
1e225492 3593
4b60df3d 3594 if (last_resume_kind == resume_stop
25289eb2
PA
3595 && ourstatus->kind == TARGET_WAITKIND_STOPPED
3596 && WSTOPSIG (status) == SIGSTOP)
3597 {
3598 /* A thread that has been requested to stop by GDB with
3599 target_stop, and it stopped cleanly, so report as SIG0. The
3600 use of SIGSTOP is an implementation detail. */
a493e3e2 3601 ourstatus->value.sig = GDB_SIGNAL_0;
25289eb2
PA
3602 }
3603
1e225492
JK
3604 if (ourstatus->kind == TARGET_WAITKIND_EXITED
3605 || ourstatus->kind == TARGET_WAITKIND_SIGNALLED)
3606 lp->core = -1;
3607 else
2e794194 3608 lp->core = linux_common_core_of_thread (lp->ptid);
1e225492 3609
f973ed9c 3610 return lp->ptid;
d6b0e80f
AC
3611}
3612
e3e9f5a2
PA
3613/* Resume LWPs that are currently stopped without any pending status
3614 to report, but are resumed from the core's perspective. */
3615
3616static int
3617resume_stopped_resumed_lwps (struct lwp_info *lp, void *data)
3618{
3619 ptid_t *wait_ptid_p = data;
3620
3621 if (lp->stopped
3622 && lp->resumed
8a99810d 3623 && !lwp_status_pending_p (lp))
e3e9f5a2 3624 {
336060f3
PA
3625 struct regcache *regcache = get_thread_regcache (lp->ptid);
3626 struct gdbarch *gdbarch = get_regcache_arch (regcache);
336060f3 3627
23f238d3 3628 TRY
e3e9f5a2 3629 {
23f238d3
PA
3630 CORE_ADDR pc = regcache_read_pc (regcache);
3631 int leave_stopped = 0;
e3e9f5a2 3632
23f238d3
PA
3633 /* Don't bother if there's a breakpoint at PC that we'd hit
3634 immediately, and we're not waiting for this LWP. */
3635 if (!ptid_match (lp->ptid, *wait_ptid_p))
3636 {
3637 if (breakpoint_inserted_here_p (get_regcache_aspace (regcache), pc))
3638 leave_stopped = 1;
3639 }
e3e9f5a2 3640
23f238d3
PA
3641 if (!leave_stopped)
3642 {
3643 if (debug_linux_nat)
3644 fprintf_unfiltered (gdb_stdlog,
3645 "RSRL: resuming stopped-resumed LWP %s at "
3646 "%s: step=%d\n",
3647 target_pid_to_str (lp->ptid),
3648 paddress (gdbarch, pc),
3649 lp->step);
3650
3651 linux_resume_one_lwp_throw (lp, lp->step, GDB_SIGNAL_0);
3652 }
3653 }
3654 CATCH (ex, RETURN_MASK_ERROR)
3655 {
3656 if (!check_ptrace_stopped_lwp_gone (lp))
3657 throw_exception (ex);
3658 }
3659 END_CATCH
e3e9f5a2
PA
3660 }
3661
3662 return 0;
3663}
3664
7feb7d06
PA
3665static ptid_t
3666linux_nat_wait (struct target_ops *ops,
47608cb1
PA
3667 ptid_t ptid, struct target_waitstatus *ourstatus,
3668 int target_options)
7feb7d06
PA
3669{
3670 ptid_t event_ptid;
3671
3672 if (debug_linux_nat)
09826ec5
PA
3673 {
3674 char *options_string;
3675
3676 options_string = target_options_to_string (target_options);
3677 fprintf_unfiltered (gdb_stdlog,
3678 "linux_nat_wait: [%s], [%s]\n",
3679 target_pid_to_str (ptid),
3680 options_string);
3681 xfree (options_string);
3682 }
7feb7d06
PA
3683
3684 /* Flush the async file first. */
d9d41e78 3685 if (target_is_async_p ())
7feb7d06
PA
3686 async_file_flush ();
3687
e3e9f5a2
PA
3688 /* Resume LWPs that are currently stopped without any pending status
3689 to report, but are resumed from the core's perspective. LWPs get
3690 in this state if we find them stopping at a time we're not
3691 interested in reporting the event (target_wait on a
3692 specific_process, for example, see linux_nat_wait_1), and
3693 meanwhile the event became uninteresting. Don't bother resuming
3694 LWPs we're not going to wait for if they'd stop immediately. */
3695 if (non_stop)
3696 iterate_over_lwps (minus_one_ptid, resume_stopped_resumed_lwps, &ptid);
3697
47608cb1 3698 event_ptid = linux_nat_wait_1 (ops, ptid, ourstatus, target_options);
7feb7d06
PA
3699
3700 /* If we requested any event, and something came out, assume there
3701 may be more. If we requested a specific lwp or process, also
3702 assume there may be more. */
d9d41e78 3703 if (target_is_async_p ()
6953d224
PA
3704 && ((ourstatus->kind != TARGET_WAITKIND_IGNORE
3705 && ourstatus->kind != TARGET_WAITKIND_NO_RESUMED)
7feb7d06
PA
3706 || !ptid_equal (ptid, minus_one_ptid)))
3707 async_file_mark ();
3708
7feb7d06
PA
3709 return event_ptid;
3710}
3711
d6b0e80f
AC
3712static int
3713kill_callback (struct lwp_info *lp, void *data)
3714{
ed731959
JK
3715 /* PTRACE_KILL may resume the inferior. Send SIGKILL first. */
3716
3717 errno = 0;
69ff6be5 3718 kill_lwp (ptid_get_lwp (lp->ptid), SIGKILL);
ed731959 3719 if (debug_linux_nat)
57745c90
PA
3720 {
3721 int save_errno = errno;
3722
3723 fprintf_unfiltered (gdb_stdlog,
3724 "KC: kill (SIGKILL) %s, 0, 0 (%s)\n",
3725 target_pid_to_str (lp->ptid),
3726 save_errno ? safe_strerror (save_errno) : "OK");
3727 }
ed731959
JK
3728
3729 /* Some kernels ignore even SIGKILL for processes under ptrace. */
3730
d6b0e80f 3731 errno = 0;
dfd4cc63 3732 ptrace (PTRACE_KILL, ptid_get_lwp (lp->ptid), 0, 0);
d6b0e80f 3733 if (debug_linux_nat)
57745c90
PA
3734 {
3735 int save_errno = errno;
3736
3737 fprintf_unfiltered (gdb_stdlog,
3738 "KC: PTRACE_KILL %s, 0, 0 (%s)\n",
3739 target_pid_to_str (lp->ptid),
3740 save_errno ? safe_strerror (save_errno) : "OK");
3741 }
d6b0e80f
AC
3742
3743 return 0;
3744}
3745
3746static int
3747kill_wait_callback (struct lwp_info *lp, void *data)
3748{
3749 pid_t pid;
3750
3751 /* We must make sure that there are no pending events (delayed
3752 SIGSTOPs, pending SIGTRAPs, etc.) to make sure the current
3753 program doesn't interfere with any following debugging session. */
3754
3755 /* For cloned processes we must check both with __WCLONE and
3756 without, since the exit status of a cloned process isn't reported
3757 with __WCLONE. */
3758 if (lp->cloned)
3759 {
3760 do
3761 {
dfd4cc63 3762 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, __WCLONE);
e85a822c 3763 if (pid != (pid_t) -1)
d6b0e80f 3764 {
e85a822c
DJ
3765 if (debug_linux_nat)
3766 fprintf_unfiltered (gdb_stdlog,
3767 "KWC: wait %s received unknown.\n",
3768 target_pid_to_str (lp->ptid));
3769 /* The Linux kernel sometimes fails to kill a thread
3770 completely after PTRACE_KILL; that goes from the stop
3771 point in do_fork out to the one in
3772 get_signal_to_deliever and waits again. So kill it
3773 again. */
3774 kill_callback (lp, NULL);
d6b0e80f
AC
3775 }
3776 }
dfd4cc63 3777 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3778
3779 gdb_assert (pid == -1 && errno == ECHILD);
3780 }
3781
3782 do
3783 {
dfd4cc63 3784 pid = my_waitpid (ptid_get_lwp (lp->ptid), NULL, 0);
e85a822c 3785 if (pid != (pid_t) -1)
d6b0e80f 3786 {
e85a822c
DJ
3787 if (debug_linux_nat)
3788 fprintf_unfiltered (gdb_stdlog,
3789 "KWC: wait %s received unk.\n",
3790 target_pid_to_str (lp->ptid));
3791 /* See the call to kill_callback above. */
3792 kill_callback (lp, NULL);
d6b0e80f
AC
3793 }
3794 }
dfd4cc63 3795 while (pid == ptid_get_lwp (lp->ptid));
d6b0e80f
AC
3796
3797 gdb_assert (pid == -1 && errno == ECHILD);
3798 return 0;
3799}
3800
3801static void
7d85a9c0 3802linux_nat_kill (struct target_ops *ops)
d6b0e80f 3803{
f973ed9c
DJ
3804 struct target_waitstatus last;
3805 ptid_t last_ptid;
3806 int status;
d6b0e80f 3807
f973ed9c
DJ
3808 /* If we're stopped while forking and we haven't followed yet,
3809 kill the other task. We need to do this first because the
3810 parent will be sleeping if this is a vfork. */
d6b0e80f 3811
f973ed9c 3812 get_last_target_status (&last_ptid, &last);
d6b0e80f 3813
f973ed9c
DJ
3814 if (last.kind == TARGET_WAITKIND_FORKED
3815 || last.kind == TARGET_WAITKIND_VFORKED)
3816 {
dfd4cc63 3817 ptrace (PT_KILL, ptid_get_pid (last.value.related_pid), 0, 0);
f973ed9c 3818 wait (&status);
26cb8b7c
PA
3819
3820 /* Let the arch-specific native code know this process is
3821 gone. */
dfd4cc63 3822 linux_nat_forget_process (ptid_get_pid (last.value.related_pid));
f973ed9c
DJ
3823 }
3824
3825 if (forks_exist_p ())
7feb7d06 3826 linux_fork_killall ();
f973ed9c
DJ
3827 else
3828 {
d90e17a7 3829 ptid_t ptid = pid_to_ptid (ptid_get_pid (inferior_ptid));
e0881a8e 3830
4c28f408
PA
3831 /* Stop all threads before killing them, since ptrace requires
3832 that the thread is stopped to sucessfully PTRACE_KILL. */
d90e17a7 3833 iterate_over_lwps (ptid, stop_callback, NULL);
4c28f408
PA
3834 /* ... and wait until all of them have reported back that
3835 they're no longer running. */
d90e17a7 3836 iterate_over_lwps (ptid, stop_wait_callback, NULL);
4c28f408 3837
f973ed9c 3838 /* Kill all LWP's ... */
d90e17a7 3839 iterate_over_lwps (ptid, kill_callback, NULL);
f973ed9c
DJ
3840
3841 /* ... and wait until we've flushed all events. */
d90e17a7 3842 iterate_over_lwps (ptid, kill_wait_callback, NULL);
f973ed9c
DJ
3843 }
3844
3845 target_mourn_inferior ();
d6b0e80f
AC
3846}
3847
3848static void
136d6dae 3849linux_nat_mourn_inferior (struct target_ops *ops)
d6b0e80f 3850{
26cb8b7c
PA
3851 int pid = ptid_get_pid (inferior_ptid);
3852
3853 purge_lwp_list (pid);
d6b0e80f 3854
f973ed9c 3855 if (! forks_exist_p ())
d90e17a7
PA
3856 /* Normal case, no other forks available. */
3857 linux_ops->to_mourn_inferior (ops);
f973ed9c
DJ
3858 else
3859 /* Multi-fork case. The current inferior_ptid has exited, but
3860 there are other viable forks to debug. Delete the exiting
3861 one and context-switch to the first available. */
3862 linux_fork_mourn_inferior ();
26cb8b7c
PA
3863
3864 /* Let the arch-specific native code know this process is gone. */
3865 linux_nat_forget_process (pid);
d6b0e80f
AC
3866}
3867
5b009018
PA
3868/* Convert a native/host siginfo object, into/from the siginfo in the
3869 layout of the inferiors' architecture. */
3870
3871static void
a5362b9a 3872siginfo_fixup (siginfo_t *siginfo, gdb_byte *inf_siginfo, int direction)
5b009018
PA
3873{
3874 int done = 0;
3875
3876 if (linux_nat_siginfo_fixup != NULL)
3877 done = linux_nat_siginfo_fixup (siginfo, inf_siginfo, direction);
3878
3879 /* If there was no callback, or the callback didn't do anything,
3880 then just do a straight memcpy. */
3881 if (!done)
3882 {
3883 if (direction == 1)
a5362b9a 3884 memcpy (siginfo, inf_siginfo, sizeof (siginfo_t));
5b009018 3885 else
a5362b9a 3886 memcpy (inf_siginfo, siginfo, sizeof (siginfo_t));
5b009018
PA
3887 }
3888}
3889
9b409511 3890static enum target_xfer_status
4aa995e1
PA
3891linux_xfer_siginfo (struct target_ops *ops, enum target_object object,
3892 const char *annex, gdb_byte *readbuf,
9b409511
YQ
3893 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
3894 ULONGEST *xfered_len)
4aa995e1 3895{
4aa995e1 3896 int pid;
a5362b9a
TS
3897 siginfo_t siginfo;
3898 gdb_byte inf_siginfo[sizeof (siginfo_t)];
4aa995e1
PA
3899
3900 gdb_assert (object == TARGET_OBJECT_SIGNAL_INFO);
3901 gdb_assert (readbuf || writebuf);
3902
dfd4cc63 3903 pid = ptid_get_lwp (inferior_ptid);
4aa995e1 3904 if (pid == 0)
dfd4cc63 3905 pid = ptid_get_pid (inferior_ptid);
4aa995e1
PA
3906
3907 if (offset > sizeof (siginfo))
2ed4b548 3908 return TARGET_XFER_E_IO;
4aa995e1
PA
3909
3910 errno = 0;
3911 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3912 if (errno != 0)
2ed4b548 3913 return TARGET_XFER_E_IO;
4aa995e1 3914
5b009018
PA
3915 /* When GDB is built as a 64-bit application, ptrace writes into
3916 SIGINFO an object with 64-bit layout. Since debugging a 32-bit
3917 inferior with a 64-bit GDB should look the same as debugging it
3918 with a 32-bit GDB, we need to convert it. GDB core always sees
3919 the converted layout, so any read/write will have to be done
3920 post-conversion. */
3921 siginfo_fixup (&siginfo, inf_siginfo, 0);
3922
4aa995e1
PA
3923 if (offset + len > sizeof (siginfo))
3924 len = sizeof (siginfo) - offset;
3925
3926 if (readbuf != NULL)
5b009018 3927 memcpy (readbuf, inf_siginfo + offset, len);
4aa995e1
PA
3928 else
3929 {
5b009018
PA
3930 memcpy (inf_siginfo + offset, writebuf, len);
3931
3932 /* Convert back to ptrace layout before flushing it out. */
3933 siginfo_fixup (&siginfo, inf_siginfo, 1);
3934
4aa995e1
PA
3935 errno = 0;
3936 ptrace (PTRACE_SETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, &siginfo);
3937 if (errno != 0)
2ed4b548 3938 return TARGET_XFER_E_IO;
4aa995e1
PA
3939 }
3940
9b409511
YQ
3941 *xfered_len = len;
3942 return TARGET_XFER_OK;
4aa995e1
PA
3943}
3944
9b409511 3945static enum target_xfer_status
10d6c8cd
DJ
3946linux_nat_xfer_partial (struct target_ops *ops, enum target_object object,
3947 const char *annex, gdb_byte *readbuf,
3948 const gdb_byte *writebuf,
9b409511 3949 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
d6b0e80f 3950{
4aa995e1 3951 struct cleanup *old_chain;
9b409511 3952 enum target_xfer_status xfer;
d6b0e80f 3953
4aa995e1
PA
3954 if (object == TARGET_OBJECT_SIGNAL_INFO)
3955 return linux_xfer_siginfo (ops, object, annex, readbuf, writebuf,
9b409511 3956 offset, len, xfered_len);
4aa995e1 3957
c35b1492
PA
3958 /* The target is connected but no live inferior is selected. Pass
3959 this request down to a lower stratum (e.g., the executable
3960 file). */
3961 if (object == TARGET_OBJECT_MEMORY && ptid_equal (inferior_ptid, null_ptid))
9b409511 3962 return TARGET_XFER_EOF;
c35b1492 3963
4aa995e1
PA
3964 old_chain = save_inferior_ptid ();
3965
dfd4cc63
LM
3966 if (ptid_lwp_p (inferior_ptid))
3967 inferior_ptid = pid_to_ptid (ptid_get_lwp (inferior_ptid));
d6b0e80f 3968
10d6c8cd 3969 xfer = linux_ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 3970 offset, len, xfered_len);
d6b0e80f
AC
3971
3972 do_cleanups (old_chain);
3973 return xfer;
3974}
3975
3976static int
28439f5e 3977linux_thread_alive (ptid_t ptid)
d6b0e80f 3978{
8c6a60d1 3979 int err, tmp_errno;
4c28f408 3980
dfd4cc63 3981 gdb_assert (ptid_lwp_p (ptid));
d6b0e80f 3982
4c28f408
PA
3983 /* Send signal 0 instead of anything ptrace, because ptracing a
3984 running thread errors out claiming that the thread doesn't
3985 exist. */
dfd4cc63 3986 err = kill_lwp (ptid_get_lwp (ptid), 0);
8c6a60d1 3987 tmp_errno = errno;
d6b0e80f
AC
3988 if (debug_linux_nat)
3989 fprintf_unfiltered (gdb_stdlog,
4c28f408 3990 "LLTA: KILL(SIG0) %s (%s)\n",
d6b0e80f 3991 target_pid_to_str (ptid),
8c6a60d1 3992 err ? safe_strerror (tmp_errno) : "OK");
9c0dd46b 3993
4c28f408 3994 if (err != 0)
d6b0e80f
AC
3995 return 0;
3996
3997 return 1;
3998}
3999
28439f5e
PA
4000static int
4001linux_nat_thread_alive (struct target_ops *ops, ptid_t ptid)
4002{
4003 return linux_thread_alive (ptid);
4004}
4005
d6b0e80f 4006static char *
117de6a9 4007linux_nat_pid_to_str (struct target_ops *ops, ptid_t ptid)
d6b0e80f
AC
4008{
4009 static char buf[64];
4010
dfd4cc63
LM
4011 if (ptid_lwp_p (ptid)
4012 && (ptid_get_pid (ptid) != ptid_get_lwp (ptid)
4013 || num_lwps (ptid_get_pid (ptid)) > 1))
d6b0e80f 4014 {
dfd4cc63 4015 snprintf (buf, sizeof (buf), "LWP %ld", ptid_get_lwp (ptid));
d6b0e80f
AC
4016 return buf;
4017 }
4018
4019 return normal_pid_to_str (ptid);
4020}
4021
4694da01 4022static char *
503a628d 4023linux_nat_thread_name (struct target_ops *self, struct thread_info *thr)
4694da01
TT
4024{
4025 int pid = ptid_get_pid (thr->ptid);
4026 long lwp = ptid_get_lwp (thr->ptid);
4027#define FORMAT "/proc/%d/task/%ld/comm"
4028 char buf[sizeof (FORMAT) + 30];
4029 FILE *comm_file;
4030 char *result = NULL;
4031
4032 snprintf (buf, sizeof (buf), FORMAT, pid, lwp);
614c279d 4033 comm_file = gdb_fopen_cloexec (buf, "r");
4694da01
TT
4034 if (comm_file)
4035 {
4036 /* Not exported by the kernel, so we define it here. */
4037#define COMM_LEN 16
4038 static char line[COMM_LEN + 1];
4039
4040 if (fgets (line, sizeof (line), comm_file))
4041 {
4042 char *nl = strchr (line, '\n');
4043
4044 if (nl)
4045 *nl = '\0';
4046 if (*line != '\0')
4047 result = line;
4048 }
4049
4050 fclose (comm_file);
4051 }
4052
4053#undef COMM_LEN
4054#undef FORMAT
4055
4056 return result;
4057}
4058
dba24537
AC
4059/* Accepts an integer PID; Returns a string representing a file that
4060 can be opened to get the symbols for the child process. */
4061
6d8fd2b7 4062static char *
8dd27370 4063linux_child_pid_to_exec_file (struct target_ops *self, int pid)
dba24537 4064{
b4ab256d
HZ
4065 static char buf[PATH_MAX];
4066 char name[PATH_MAX];
dba24537 4067
b4ab256d
HZ
4068 xsnprintf (name, PATH_MAX, "/proc/%d/exe", pid);
4069 memset (buf, 0, PATH_MAX);
4070 if (readlink (name, buf, PATH_MAX - 1) <= 0)
4071 strcpy (buf, name);
dba24537 4072
b4ab256d 4073 return buf;
dba24537
AC
4074}
4075
10d6c8cd
DJ
4076/* Implement the to_xfer_partial interface for memory reads using the /proc
4077 filesystem. Because we can use a single read() call for /proc, this
4078 can be much more efficient than banging away at PTRACE_PEEKTEXT,
4079 but it doesn't support writes. */
4080
9b409511 4081static enum target_xfer_status
10d6c8cd
DJ
4082linux_proc_xfer_partial (struct target_ops *ops, enum target_object object,
4083 const char *annex, gdb_byte *readbuf,
4084 const gdb_byte *writebuf,
9b409511 4085 ULONGEST offset, LONGEST len, ULONGEST *xfered_len)
dba24537 4086{
10d6c8cd
DJ
4087 LONGEST ret;
4088 int fd;
dba24537
AC
4089 char filename[64];
4090
10d6c8cd 4091 if (object != TARGET_OBJECT_MEMORY || !readbuf)
dba24537
AC
4092 return 0;
4093
4094 /* Don't bother for one word. */
4095 if (len < 3 * sizeof (long))
9b409511 4096 return TARGET_XFER_EOF;
dba24537
AC
4097
4098 /* We could keep this file open and cache it - possibly one per
4099 thread. That requires some juggling, but is even faster. */
cde33bf1
YQ
4100 xsnprintf (filename, sizeof filename, "/proc/%d/mem",
4101 ptid_get_pid (inferior_ptid));
614c279d 4102 fd = gdb_open_cloexec (filename, O_RDONLY | O_LARGEFILE, 0);
dba24537 4103 if (fd == -1)
9b409511 4104 return TARGET_XFER_EOF;
dba24537
AC
4105
4106 /* If pread64 is available, use it. It's faster if the kernel
4107 supports it (only one syscall), and it's 64-bit safe even on
4108 32-bit platforms (for instance, SPARC debugging a SPARC64
4109 application). */
4110#ifdef HAVE_PREAD64
10d6c8cd 4111 if (pread64 (fd, readbuf, len, offset) != len)
dba24537 4112#else
10d6c8cd 4113 if (lseek (fd, offset, SEEK_SET) == -1 || read (fd, readbuf, len) != len)
dba24537
AC
4114#endif
4115 ret = 0;
4116 else
4117 ret = len;
4118
4119 close (fd);
9b409511
YQ
4120
4121 if (ret == 0)
4122 return TARGET_XFER_EOF;
4123 else
4124 {
4125 *xfered_len = ret;
4126 return TARGET_XFER_OK;
4127 }
dba24537
AC
4128}
4129
efcbbd14
UW
4130
4131/* Enumerate spufs IDs for process PID. */
4132static LONGEST
b55e14c7 4133spu_enumerate_spu_ids (int pid, gdb_byte *buf, ULONGEST offset, ULONGEST len)
efcbbd14 4134{
f5656ead 4135 enum bfd_endian byte_order = gdbarch_byte_order (target_gdbarch ());
efcbbd14
UW
4136 LONGEST pos = 0;
4137 LONGEST written = 0;
4138 char path[128];
4139 DIR *dir;
4140 struct dirent *entry;
4141
4142 xsnprintf (path, sizeof path, "/proc/%d/fd", pid);
4143 dir = opendir (path);
4144 if (!dir)
4145 return -1;
4146
4147 rewinddir (dir);
4148 while ((entry = readdir (dir)) != NULL)
4149 {
4150 struct stat st;
4151 struct statfs stfs;
4152 int fd;
4153
4154 fd = atoi (entry->d_name);
4155 if (!fd)
4156 continue;
4157
4158 xsnprintf (path, sizeof path, "/proc/%d/fd/%d", pid, fd);
4159 if (stat (path, &st) != 0)
4160 continue;
4161 if (!S_ISDIR (st.st_mode))
4162 continue;
4163
4164 if (statfs (path, &stfs) != 0)
4165 continue;
4166 if (stfs.f_type != SPUFS_MAGIC)
4167 continue;
4168
4169 if (pos >= offset && pos + 4 <= offset + len)
4170 {
4171 store_unsigned_integer (buf + pos - offset, 4, byte_order, fd);
4172 written += 4;
4173 }
4174 pos += 4;
4175 }
4176
4177 closedir (dir);
4178 return written;
4179}
4180
4181/* Implement the to_xfer_partial interface for the TARGET_OBJECT_SPU
4182 object type, using the /proc file system. */
9b409511
YQ
4183
4184static enum target_xfer_status
efcbbd14
UW
4185linux_proc_xfer_spu (struct target_ops *ops, enum target_object object,
4186 const char *annex, gdb_byte *readbuf,
4187 const gdb_byte *writebuf,
9b409511 4188 ULONGEST offset, ULONGEST len, ULONGEST *xfered_len)
efcbbd14
UW
4189{
4190 char buf[128];
4191 int fd = 0;
4192 int ret = -1;
dfd4cc63 4193 int pid = ptid_get_pid (inferior_ptid);
efcbbd14
UW
4194
4195 if (!annex)
4196 {
4197 if (!readbuf)
2ed4b548 4198 return TARGET_XFER_E_IO;
efcbbd14 4199 else
9b409511
YQ
4200 {
4201 LONGEST l = spu_enumerate_spu_ids (pid, readbuf, offset, len);
4202
4203 if (l < 0)
4204 return TARGET_XFER_E_IO;
4205 else if (l == 0)
4206 return TARGET_XFER_EOF;
4207 else
4208 {
4209 *xfered_len = (ULONGEST) l;
4210 return TARGET_XFER_OK;
4211 }
4212 }
efcbbd14
UW
4213 }
4214
4215 xsnprintf (buf, sizeof buf, "/proc/%d/fd/%s", pid, annex);
614c279d 4216 fd = gdb_open_cloexec (buf, writebuf? O_WRONLY : O_RDONLY, 0);
efcbbd14 4217 if (fd <= 0)
2ed4b548 4218 return TARGET_XFER_E_IO;
efcbbd14
UW
4219
4220 if (offset != 0
4221 && lseek (fd, (off_t) offset, SEEK_SET) != (off_t) offset)
4222 {
4223 close (fd);
9b409511 4224 return TARGET_XFER_EOF;
efcbbd14
UW
4225 }
4226
4227 if (writebuf)
4228 ret = write (fd, writebuf, (size_t) len);
4229 else if (readbuf)
4230 ret = read (fd, readbuf, (size_t) len);
4231
4232 close (fd);
9b409511
YQ
4233
4234 if (ret < 0)
4235 return TARGET_XFER_E_IO;
4236 else if (ret == 0)
4237 return TARGET_XFER_EOF;
4238 else
4239 {
4240 *xfered_len = (ULONGEST) ret;
4241 return TARGET_XFER_OK;
4242 }
efcbbd14
UW
4243}
4244
4245
dba24537
AC
4246/* Parse LINE as a signal set and add its set bits to SIGS. */
4247
4248static void
4249add_line_to_sigset (const char *line, sigset_t *sigs)
4250{
4251 int len = strlen (line) - 1;
4252 const char *p;
4253 int signum;
4254
4255 if (line[len] != '\n')
8a3fe4f8 4256 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4257
4258 p = line;
4259 signum = len * 4;
4260 while (len-- > 0)
4261 {
4262 int digit;
4263
4264 if (*p >= '0' && *p <= '9')
4265 digit = *p - '0';
4266 else if (*p >= 'a' && *p <= 'f')
4267 digit = *p - 'a' + 10;
4268 else
8a3fe4f8 4269 error (_("Could not parse signal set: %s"), line);
dba24537
AC
4270
4271 signum -= 4;
4272
4273 if (digit & 1)
4274 sigaddset (sigs, signum + 1);
4275 if (digit & 2)
4276 sigaddset (sigs, signum + 2);
4277 if (digit & 4)
4278 sigaddset (sigs, signum + 3);
4279 if (digit & 8)
4280 sigaddset (sigs, signum + 4);
4281
4282 p++;
4283 }
4284}
4285
4286/* Find process PID's pending signals from /proc/pid/status and set
4287 SIGS to match. */
4288
4289void
3e43a32a
MS
4290linux_proc_pending_signals (int pid, sigset_t *pending,
4291 sigset_t *blocked, sigset_t *ignored)
dba24537
AC
4292{
4293 FILE *procfile;
d8d2a3ee 4294 char buffer[PATH_MAX], fname[PATH_MAX];
7c8a8b04 4295 struct cleanup *cleanup;
dba24537
AC
4296
4297 sigemptyset (pending);
4298 sigemptyset (blocked);
4299 sigemptyset (ignored);
cde33bf1 4300 xsnprintf (fname, sizeof fname, "/proc/%d/status", pid);
614c279d 4301 procfile = gdb_fopen_cloexec (fname, "r");
dba24537 4302 if (procfile == NULL)
8a3fe4f8 4303 error (_("Could not open %s"), fname);
7c8a8b04 4304 cleanup = make_cleanup_fclose (procfile);
dba24537 4305
d8d2a3ee 4306 while (fgets (buffer, PATH_MAX, procfile) != NULL)
dba24537
AC
4307 {
4308 /* Normal queued signals are on the SigPnd line in the status
4309 file. However, 2.6 kernels also have a "shared" pending
4310 queue for delivering signals to a thread group, so check for
4311 a ShdPnd line also.
4312
4313 Unfortunately some Red Hat kernels include the shared pending
4314 queue but not the ShdPnd status field. */
4315
61012eef 4316 if (startswith (buffer, "SigPnd:\t"))
dba24537 4317 add_line_to_sigset (buffer + 8, pending);
61012eef 4318 else if (startswith (buffer, "ShdPnd:\t"))
dba24537 4319 add_line_to_sigset (buffer + 8, pending);
61012eef 4320 else if (startswith (buffer, "SigBlk:\t"))
dba24537 4321 add_line_to_sigset (buffer + 8, blocked);
61012eef 4322 else if (startswith (buffer, "SigIgn:\t"))
dba24537
AC
4323 add_line_to_sigset (buffer + 8, ignored);
4324 }
4325
7c8a8b04 4326 do_cleanups (cleanup);
dba24537
AC
4327}
4328
9b409511 4329static enum target_xfer_status
07e059b5 4330linux_nat_xfer_osdata (struct target_ops *ops, enum target_object object,
e0881a8e 4331 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4332 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4333 ULONGEST *xfered_len)
07e059b5 4334{
07e059b5
VP
4335 gdb_assert (object == TARGET_OBJECT_OSDATA);
4336
9b409511
YQ
4337 *xfered_len = linux_common_xfer_osdata (annex, readbuf, offset, len);
4338 if (*xfered_len == 0)
4339 return TARGET_XFER_EOF;
4340 else
4341 return TARGET_XFER_OK;
07e059b5
VP
4342}
4343
9b409511 4344static enum target_xfer_status
10d6c8cd
DJ
4345linux_xfer_partial (struct target_ops *ops, enum target_object object,
4346 const char *annex, gdb_byte *readbuf,
9b409511
YQ
4347 const gdb_byte *writebuf, ULONGEST offset, ULONGEST len,
4348 ULONGEST *xfered_len)
10d6c8cd 4349{
9b409511 4350 enum target_xfer_status xfer;
10d6c8cd
DJ
4351
4352 if (object == TARGET_OBJECT_AUXV)
9f2982ff 4353 return memory_xfer_auxv (ops, object, annex, readbuf, writebuf,
9b409511 4354 offset, len, xfered_len);
10d6c8cd 4355
07e059b5
VP
4356 if (object == TARGET_OBJECT_OSDATA)
4357 return linux_nat_xfer_osdata (ops, object, annex, readbuf, writebuf,
9b409511 4358 offset, len, xfered_len);
07e059b5 4359
efcbbd14
UW
4360 if (object == TARGET_OBJECT_SPU)
4361 return linux_proc_xfer_spu (ops, object, annex, readbuf, writebuf,
9b409511 4362 offset, len, xfered_len);
efcbbd14 4363
8f313923
JK
4364 /* GDB calculates all the addresses in possibly larget width of the address.
4365 Address width needs to be masked before its final use - either by
4366 linux_proc_xfer_partial or inf_ptrace_xfer_partial.
4367
4368 Compare ADDR_BIT first to avoid a compiler warning on shift overflow. */
4369
4370 if (object == TARGET_OBJECT_MEMORY)
4371 {
f5656ead 4372 int addr_bit = gdbarch_addr_bit (target_gdbarch ());
8f313923
JK
4373
4374 if (addr_bit < (sizeof (ULONGEST) * HOST_CHAR_BIT))
4375 offset &= ((ULONGEST) 1 << addr_bit) - 1;
4376 }
4377
10d6c8cd 4378 xfer = linux_proc_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511
YQ
4379 offset, len, xfered_len);
4380 if (xfer != TARGET_XFER_EOF)
10d6c8cd
DJ
4381 return xfer;
4382
4383 return super_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 4384 offset, len, xfered_len);
10d6c8cd
DJ
4385}
4386
5808517f
YQ
4387static void
4388cleanup_target_stop (void *arg)
4389{
4390 ptid_t *ptid = (ptid_t *) arg;
4391
4392 gdb_assert (arg != NULL);
4393
4394 /* Unpause all */
a493e3e2 4395 target_resume (*ptid, 0, GDB_SIGNAL_0);
5808517f
YQ
4396}
4397
4398static VEC(static_tracepoint_marker_p) *
c686c57f
TT
4399linux_child_static_tracepoint_markers_by_strid (struct target_ops *self,
4400 const char *strid)
5808517f
YQ
4401{
4402 char s[IPA_CMD_BUF_SIZE];
4403 struct cleanup *old_chain;
4404 int pid = ptid_get_pid (inferior_ptid);
4405 VEC(static_tracepoint_marker_p) *markers = NULL;
4406 struct static_tracepoint_marker *marker = NULL;
4407 char *p = s;
4408 ptid_t ptid = ptid_build (pid, 0, 0);
4409
4410 /* Pause all */
4411 target_stop (ptid);
4412
4413 memcpy (s, "qTfSTM", sizeof ("qTfSTM"));
4414 s[sizeof ("qTfSTM")] = 0;
4415
42476b70 4416 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4417
4418 old_chain = make_cleanup (free_current_marker, &marker);
4419 make_cleanup (cleanup_target_stop, &ptid);
4420
4421 while (*p++ == 'm')
4422 {
4423 if (marker == NULL)
4424 marker = XCNEW (struct static_tracepoint_marker);
4425
4426 do
4427 {
4428 parse_static_tracepoint_marker_definition (p, &p, marker);
4429
4430 if (strid == NULL || strcmp (strid, marker->str_id) == 0)
4431 {
4432 VEC_safe_push (static_tracepoint_marker_p,
4433 markers, marker);
4434 marker = NULL;
4435 }
4436 else
4437 {
4438 release_static_tracepoint_marker (marker);
4439 memset (marker, 0, sizeof (*marker));
4440 }
4441 }
4442 while (*p++ == ','); /* comma-separated list */
4443
4444 memcpy (s, "qTsSTM", sizeof ("qTsSTM"));
4445 s[sizeof ("qTsSTM")] = 0;
42476b70 4446 agent_run_command (pid, s, strlen (s) + 1);
5808517f
YQ
4447 p = s;
4448 }
4449
4450 do_cleanups (old_chain);
4451
4452 return markers;
4453}
4454
e9efe249 4455/* Create a prototype generic GNU/Linux target. The client can override
10d6c8cd
DJ
4456 it with local methods. */
4457
910122bf
UW
4458static void
4459linux_target_install_ops (struct target_ops *t)
10d6c8cd 4460{
6d8fd2b7 4461 t->to_insert_fork_catchpoint = linux_child_insert_fork_catchpoint;
eb73ad13 4462 t->to_remove_fork_catchpoint = linux_child_remove_fork_catchpoint;
6d8fd2b7 4463 t->to_insert_vfork_catchpoint = linux_child_insert_vfork_catchpoint;
eb73ad13 4464 t->to_remove_vfork_catchpoint = linux_child_remove_vfork_catchpoint;
6d8fd2b7 4465 t->to_insert_exec_catchpoint = linux_child_insert_exec_catchpoint;
eb73ad13 4466 t->to_remove_exec_catchpoint = linux_child_remove_exec_catchpoint;
a96d9b2e 4467 t->to_set_syscall_catchpoint = linux_child_set_syscall_catchpoint;
6d8fd2b7 4468 t->to_pid_to_exec_file = linux_child_pid_to_exec_file;
10d6c8cd 4469 t->to_post_startup_inferior = linux_child_post_startup_inferior;
6d8fd2b7
UW
4470 t->to_post_attach = linux_child_post_attach;
4471 t->to_follow_fork = linux_child_follow_fork;
10d6c8cd
DJ
4472
4473 super_xfer_partial = t->to_xfer_partial;
4474 t->to_xfer_partial = linux_xfer_partial;
5808517f
YQ
4475
4476 t->to_static_tracepoint_markers_by_strid
4477 = linux_child_static_tracepoint_markers_by_strid;
910122bf
UW
4478}
4479
4480struct target_ops *
4481linux_target (void)
4482{
4483 struct target_ops *t;
4484
4485 t = inf_ptrace_target ();
4486 linux_target_install_ops (t);
4487
4488 return t;
4489}
4490
4491struct target_ops *
7714d83a 4492linux_trad_target (CORE_ADDR (*register_u_offset)(struct gdbarch *, int, int))
910122bf
UW
4493{
4494 struct target_ops *t;
4495
4496 t = inf_ptrace_trad_target (register_u_offset);
4497 linux_target_install_ops (t);
10d6c8cd 4498
10d6c8cd
DJ
4499 return t;
4500}
4501
b84876c2
PA
4502/* target_is_async_p implementation. */
4503
4504static int
6a109b6b 4505linux_nat_is_async_p (struct target_ops *ops)
b84876c2 4506{
198297aa 4507 return linux_is_async_p ();
b84876c2
PA
4508}
4509
4510/* target_can_async_p implementation. */
4511
4512static int
6a109b6b 4513linux_nat_can_async_p (struct target_ops *ops)
b84876c2
PA
4514{
4515 /* NOTE: palves 2008-03-21: We're only async when the user requests
7feb7d06 4516 it explicitly with the "set target-async" command.
b84876c2 4517 Someday, linux will always be async. */
3dd5b83d 4518 return target_async_permitted;
b84876c2
PA
4519}
4520
9908b566 4521static int
2a9a2795 4522linux_nat_supports_non_stop (struct target_ops *self)
9908b566
VP
4523{
4524 return 1;
4525}
4526
d90e17a7
PA
4527/* True if we want to support multi-process. To be removed when GDB
4528 supports multi-exec. */
4529
2277426b 4530int linux_multi_process = 1;
d90e17a7
PA
4531
4532static int
86ce2668 4533linux_nat_supports_multi_process (struct target_ops *self)
d90e17a7
PA
4534{
4535 return linux_multi_process;
4536}
4537
03583c20 4538static int
2bfc0540 4539linux_nat_supports_disable_randomization (struct target_ops *self)
03583c20
UW
4540{
4541#ifdef HAVE_PERSONALITY
4542 return 1;
4543#else
4544 return 0;
4545#endif
4546}
4547
b84876c2
PA
4548static int async_terminal_is_ours = 1;
4549
4d4ca2a1
DE
4550/* target_terminal_inferior implementation.
4551
4552 This is a wrapper around child_terminal_inferior to add async support. */
b84876c2
PA
4553
4554static void
d2f640d4 4555linux_nat_terminal_inferior (struct target_ops *self)
b84876c2 4556{
198297aa
PA
4557 /* Like target_terminal_inferior, use target_can_async_p, not
4558 target_is_async_p, since at this point the target is not async
4559 yet. If it can async, then we know it will become async prior to
4560 resume. */
4561 if (!target_can_async_p ())
b84876c2
PA
4562 {
4563 /* Async mode is disabled. */
d6b64346 4564 child_terminal_inferior (self);
b84876c2
PA
4565 return;
4566 }
4567
d6b64346 4568 child_terminal_inferior (self);
b84876c2 4569
d9d2d8b6 4570 /* Calls to target_terminal_*() are meant to be idempotent. */
b84876c2
PA
4571 if (!async_terminal_is_ours)
4572 return;
4573
4574 delete_file_handler (input_fd);
4575 async_terminal_is_ours = 0;
4576 set_sigint_trap ();
4577}
4578
4d4ca2a1
DE
4579/* target_terminal_ours implementation.
4580
4581 This is a wrapper around child_terminal_ours to add async support (and
4582 implement the target_terminal_ours vs target_terminal_ours_for_output
4583 distinction). child_terminal_ours is currently no different than
4584 child_terminal_ours_for_output.
4585 We leave target_terminal_ours_for_output alone, leaving it to
4586 child_terminal_ours_for_output. */
b84876c2 4587
2c0b251b 4588static void
e3594fd1 4589linux_nat_terminal_ours (struct target_ops *self)
b84876c2 4590{
b84876c2
PA
4591 /* GDB should never give the terminal to the inferior if the
4592 inferior is running in the background (run&, continue&, etc.),
4593 but claiming it sure should. */
d6b64346 4594 child_terminal_ours (self);
b84876c2 4595
b84876c2
PA
4596 if (async_terminal_is_ours)
4597 return;
4598
4599 clear_sigint_trap ();
4600 add_file_handler (input_fd, stdin_event_handler, 0);
4601 async_terminal_is_ours = 1;
4602}
4603
4604static void (*async_client_callback) (enum inferior_event_type event_type,
4605 void *context);
4606static void *async_client_context;
4607
7feb7d06
PA
4608/* SIGCHLD handler that serves two purposes: In non-stop/async mode,
4609 so we notice when any child changes state, and notify the
4610 event-loop; it allows us to use sigsuspend in linux_nat_wait_1
4611 above to wait for the arrival of a SIGCHLD. */
4612
b84876c2 4613static void
7feb7d06 4614sigchld_handler (int signo)
b84876c2 4615{
7feb7d06
PA
4616 int old_errno = errno;
4617
01124a23
DE
4618 if (debug_linux_nat)
4619 ui_file_write_async_safe (gdb_stdlog,
4620 "sigchld\n", sizeof ("sigchld\n") - 1);
7feb7d06
PA
4621
4622 if (signo == SIGCHLD
4623 && linux_nat_event_pipe[0] != -1)
4624 async_file_mark (); /* Let the event loop know that there are
4625 events to handle. */
4626
4627 errno = old_errno;
4628}
4629
4630/* Callback registered with the target events file descriptor. */
4631
4632static void
4633handle_target_event (int error, gdb_client_data client_data)
4634{
4635 (*async_client_callback) (INF_REG_EVENT, async_client_context);
4636}
4637
4638/* Create/destroy the target events pipe. Returns previous state. */
4639
4640static int
4641linux_async_pipe (int enable)
4642{
198297aa 4643 int previous = linux_is_async_p ();
7feb7d06
PA
4644
4645 if (previous != enable)
4646 {
4647 sigset_t prev_mask;
4648
12696c10
PA
4649 /* Block child signals while we create/destroy the pipe, as
4650 their handler writes to it. */
7feb7d06
PA
4651 block_child_signals (&prev_mask);
4652
4653 if (enable)
4654 {
614c279d 4655 if (gdb_pipe_cloexec (linux_nat_event_pipe) == -1)
7feb7d06
PA
4656 internal_error (__FILE__, __LINE__,
4657 "creating event pipe failed.");
4658
4659 fcntl (linux_nat_event_pipe[0], F_SETFL, O_NONBLOCK);
4660 fcntl (linux_nat_event_pipe[1], F_SETFL, O_NONBLOCK);
4661 }
4662 else
4663 {
4664 close (linux_nat_event_pipe[0]);
4665 close (linux_nat_event_pipe[1]);
4666 linux_nat_event_pipe[0] = -1;
4667 linux_nat_event_pipe[1] = -1;
4668 }
4669
4670 restore_child_signals_mask (&prev_mask);
4671 }
4672
4673 return previous;
b84876c2
PA
4674}
4675
4676/* target_async implementation. */
4677
4678static void
6a109b6b
TT
4679linux_nat_async (struct target_ops *ops,
4680 void (*callback) (enum inferior_event_type event_type,
4681 void *context),
4682 void *context)
b84876c2 4683{
b84876c2
PA
4684 if (callback != NULL)
4685 {
4686 async_client_callback = callback;
4687 async_client_context = context;
7feb7d06
PA
4688 if (!linux_async_pipe (1))
4689 {
4690 add_file_handler (linux_nat_event_pipe[0],
4691 handle_target_event, NULL);
4692 /* There may be pending events to handle. Tell the event loop
4693 to poll them. */
4694 async_file_mark ();
4695 }
b84876c2
PA
4696 }
4697 else
4698 {
4699 async_client_callback = callback;
4700 async_client_context = context;
b84876c2 4701 delete_file_handler (linux_nat_event_pipe[0]);
7feb7d06 4702 linux_async_pipe (0);
b84876c2
PA
4703 }
4704 return;
4705}
4706
a493e3e2 4707/* Stop an LWP, and push a GDB_SIGNAL_0 stop status if no other
252fbfc8
PA
4708 event came out. */
4709
4c28f408 4710static int
252fbfc8 4711linux_nat_stop_lwp (struct lwp_info *lwp, void *data)
4c28f408 4712{
d90e17a7 4713 if (!lwp->stopped)
252fbfc8 4714 {
d90e17a7
PA
4715 if (debug_linux_nat)
4716 fprintf_unfiltered (gdb_stdlog,
4717 "LNSL: running -> suspending %s\n",
4718 target_pid_to_str (lwp->ptid));
252fbfc8 4719
252fbfc8 4720
25289eb2
PA
4721 if (lwp->last_resume_kind == resume_stop)
4722 {
4723 if (debug_linux_nat)
4724 fprintf_unfiltered (gdb_stdlog,
4725 "linux-nat: already stopping LWP %ld at "
4726 "GDB's request\n",
4727 ptid_get_lwp (lwp->ptid));
4728 return 0;
4729 }
252fbfc8 4730
25289eb2
PA
4731 stop_callback (lwp, NULL);
4732 lwp->last_resume_kind = resume_stop;
d90e17a7
PA
4733 }
4734 else
4735 {
4736 /* Already known to be stopped; do nothing. */
252fbfc8 4737
d90e17a7
PA
4738 if (debug_linux_nat)
4739 {
e09875d4 4740 if (find_thread_ptid (lwp->ptid)->stop_requested)
3e43a32a
MS
4741 fprintf_unfiltered (gdb_stdlog,
4742 "LNSL: already stopped/stop_requested %s\n",
d90e17a7
PA
4743 target_pid_to_str (lwp->ptid));
4744 else
3e43a32a
MS
4745 fprintf_unfiltered (gdb_stdlog,
4746 "LNSL: already stopped/no "
4747 "stop_requested yet %s\n",
d90e17a7 4748 target_pid_to_str (lwp->ptid));
252fbfc8
PA
4749 }
4750 }
4c28f408
PA
4751 return 0;
4752}
4753
4754static void
1eab8a48 4755linux_nat_stop (struct target_ops *self, ptid_t ptid)
4c28f408
PA
4756{
4757 if (non_stop)
d90e17a7 4758 iterate_over_lwps (ptid, linux_nat_stop_lwp, NULL);
4c28f408 4759 else
1eab8a48 4760 linux_ops->to_stop (linux_ops, ptid);
4c28f408
PA
4761}
4762
d90e17a7 4763static void
de90e03d 4764linux_nat_close (struct target_ops *self)
d90e17a7
PA
4765{
4766 /* Unregister from the event loop. */
9debeba0
DE
4767 if (linux_nat_is_async_p (self))
4768 linux_nat_async (self, NULL, NULL);
d90e17a7 4769
d90e17a7 4770 if (linux_ops->to_close)
de90e03d 4771 linux_ops->to_close (linux_ops);
6a3cb8e8
PA
4772
4773 super_close (self);
d90e17a7
PA
4774}
4775
c0694254
PA
4776/* When requests are passed down from the linux-nat layer to the
4777 single threaded inf-ptrace layer, ptids of (lwpid,0,0) form are
4778 used. The address space pointer is stored in the inferior object,
4779 but the common code that is passed such ptid can't tell whether
4780 lwpid is a "main" process id or not (it assumes so). We reverse
4781 look up the "main" process id from the lwp here. */
4782
70221824 4783static struct address_space *
c0694254
PA
4784linux_nat_thread_address_space (struct target_ops *t, ptid_t ptid)
4785{
4786 struct lwp_info *lwp;
4787 struct inferior *inf;
4788 int pid;
4789
dfd4cc63 4790 if (ptid_get_lwp (ptid) == 0)
c0694254
PA
4791 {
4792 /* An (lwpid,0,0) ptid. Look up the lwp object to get at the
4793 tgid. */
4794 lwp = find_lwp_pid (ptid);
dfd4cc63 4795 pid = ptid_get_pid (lwp->ptid);
c0694254
PA
4796 }
4797 else
4798 {
4799 /* A (pid,lwpid,0) ptid. */
dfd4cc63 4800 pid = ptid_get_pid (ptid);
c0694254
PA
4801 }
4802
4803 inf = find_inferior_pid (pid);
4804 gdb_assert (inf != NULL);
4805 return inf->aspace;
4806}
4807
dc146f7c
VP
4808/* Return the cached value of the processor core for thread PTID. */
4809
70221824 4810static int
dc146f7c
VP
4811linux_nat_core_of_thread (struct target_ops *ops, ptid_t ptid)
4812{
4813 struct lwp_info *info = find_lwp_pid (ptid);
e0881a8e 4814
dc146f7c
VP
4815 if (info)
4816 return info->core;
4817 return -1;
4818}
4819
f973ed9c
DJ
4820void
4821linux_nat_add_target (struct target_ops *t)
4822{
f973ed9c
DJ
4823 /* Save the provided single-threaded target. We save this in a separate
4824 variable because another target we've inherited from (e.g. inf-ptrace)
4825 may have saved a pointer to T; we want to use it for the final
4826 process stratum target. */
4827 linux_ops_saved = *t;
4828 linux_ops = &linux_ops_saved;
4829
4830 /* Override some methods for multithreading. */
b84876c2 4831 t->to_create_inferior = linux_nat_create_inferior;
f973ed9c
DJ
4832 t->to_attach = linux_nat_attach;
4833 t->to_detach = linux_nat_detach;
4834 t->to_resume = linux_nat_resume;
4835 t->to_wait = linux_nat_wait;
2455069d 4836 t->to_pass_signals = linux_nat_pass_signals;
f973ed9c
DJ
4837 t->to_xfer_partial = linux_nat_xfer_partial;
4838 t->to_kill = linux_nat_kill;
4839 t->to_mourn_inferior = linux_nat_mourn_inferior;
4840 t->to_thread_alive = linux_nat_thread_alive;
4841 t->to_pid_to_str = linux_nat_pid_to_str;
4694da01 4842 t->to_thread_name = linux_nat_thread_name;
f973ed9c 4843 t->to_has_thread_control = tc_schedlock;
c0694254 4844 t->to_thread_address_space = linux_nat_thread_address_space;
ebec9a0f
PA
4845 t->to_stopped_by_watchpoint = linux_nat_stopped_by_watchpoint;
4846 t->to_stopped_data_address = linux_nat_stopped_data_address;
faf09f01
PA
4847 t->to_stopped_by_sw_breakpoint = linux_nat_stopped_by_sw_breakpoint;
4848 t->to_supports_stopped_by_sw_breakpoint = linux_nat_supports_stopped_by_sw_breakpoint;
4849 t->to_stopped_by_hw_breakpoint = linux_nat_stopped_by_hw_breakpoint;
4850 t->to_supports_stopped_by_hw_breakpoint = linux_nat_supports_stopped_by_hw_breakpoint;
f973ed9c 4851
b84876c2
PA
4852 t->to_can_async_p = linux_nat_can_async_p;
4853 t->to_is_async_p = linux_nat_is_async_p;
9908b566 4854 t->to_supports_non_stop = linux_nat_supports_non_stop;
b84876c2 4855 t->to_async = linux_nat_async;
b84876c2
PA
4856 t->to_terminal_inferior = linux_nat_terminal_inferior;
4857 t->to_terminal_ours = linux_nat_terminal_ours;
6a3cb8e8
PA
4858
4859 super_close = t->to_close;
d90e17a7 4860 t->to_close = linux_nat_close;
b84876c2 4861
4c28f408
PA
4862 /* Methods for non-stop support. */
4863 t->to_stop = linux_nat_stop;
4864
d90e17a7
PA
4865 t->to_supports_multi_process = linux_nat_supports_multi_process;
4866
03583c20
UW
4867 t->to_supports_disable_randomization
4868 = linux_nat_supports_disable_randomization;
4869
dc146f7c
VP
4870 t->to_core_of_thread = linux_nat_core_of_thread;
4871
f973ed9c
DJ
4872 /* We don't change the stratum; this target will sit at
4873 process_stratum and thread_db will set at thread_stratum. This
4874 is a little strange, since this is a multi-threaded-capable
4875 target, but we want to be on the stack below thread_db, and we
4876 also want to be used for single-threaded processes. */
4877
4878 add_target (t);
f973ed9c
DJ
4879}
4880
9f0bdab8
DJ
4881/* Register a method to call whenever a new thread is attached. */
4882void
7b50312a
PA
4883linux_nat_set_new_thread (struct target_ops *t,
4884 void (*new_thread) (struct lwp_info *))
9f0bdab8
DJ
4885{
4886 /* Save the pointer. We only support a single registered instance
4887 of the GNU/Linux native target, so we do not need to map this to
4888 T. */
4889 linux_nat_new_thread = new_thread;
4890}
4891
26cb8b7c
PA
4892/* See declaration in linux-nat.h. */
4893
4894void
4895linux_nat_set_new_fork (struct target_ops *t,
4896 linux_nat_new_fork_ftype *new_fork)
4897{
4898 /* Save the pointer. */
4899 linux_nat_new_fork = new_fork;
4900}
4901
4902/* See declaration in linux-nat.h. */
4903
4904void
4905linux_nat_set_forget_process (struct target_ops *t,
4906 linux_nat_forget_process_ftype *fn)
4907{
4908 /* Save the pointer. */
4909 linux_nat_forget_process_hook = fn;
4910}
4911
4912/* See declaration in linux-nat.h. */
4913
4914void
4915linux_nat_forget_process (pid_t pid)
4916{
4917 if (linux_nat_forget_process_hook != NULL)
4918 linux_nat_forget_process_hook (pid);
4919}
4920
5b009018
PA
4921/* Register a method that converts a siginfo object between the layout
4922 that ptrace returns, and the layout in the architecture of the
4923 inferior. */
4924void
4925linux_nat_set_siginfo_fixup (struct target_ops *t,
a5362b9a 4926 int (*siginfo_fixup) (siginfo_t *,
5b009018
PA
4927 gdb_byte *,
4928 int))
4929{
4930 /* Save the pointer. */
4931 linux_nat_siginfo_fixup = siginfo_fixup;
4932}
4933
7b50312a
PA
4934/* Register a method to call prior to resuming a thread. */
4935
4936void
4937linux_nat_set_prepare_to_resume (struct target_ops *t,
4938 void (*prepare_to_resume) (struct lwp_info *))
4939{
4940 /* Save the pointer. */
4941 linux_nat_prepare_to_resume = prepare_to_resume;
4942}
4943
f865ee35
JK
4944/* See linux-nat.h. */
4945
4946int
4947linux_nat_get_siginfo (ptid_t ptid, siginfo_t *siginfo)
9f0bdab8 4948{
da559b09 4949 int pid;
9f0bdab8 4950
dfd4cc63 4951 pid = ptid_get_lwp (ptid);
da559b09 4952 if (pid == 0)
dfd4cc63 4953 pid = ptid_get_pid (ptid);
f865ee35 4954
da559b09
JK
4955 errno = 0;
4956 ptrace (PTRACE_GETSIGINFO, pid, (PTRACE_TYPE_ARG3) 0, siginfo);
4957 if (errno != 0)
4958 {
4959 memset (siginfo, 0, sizeof (*siginfo));
4960 return 0;
4961 }
f865ee35 4962 return 1;
9f0bdab8
DJ
4963}
4964
7b669087
GB
4965/* See nat/linux-nat.h. */
4966
4967ptid_t
4968current_lwp_ptid (void)
4969{
4970 gdb_assert (ptid_lwp_p (inferior_ptid));
4971 return inferior_ptid;
4972}
4973
2c0b251b
PA
4974/* Provide a prototype to silence -Wmissing-prototypes. */
4975extern initialize_file_ftype _initialize_linux_nat;
4976
d6b0e80f
AC
4977void
4978_initialize_linux_nat (void)
4979{
ccce17b0
YQ
4980 add_setshow_zuinteger_cmd ("lin-lwp", class_maintenance,
4981 &debug_linux_nat, _("\
b84876c2
PA
4982Set debugging of GNU/Linux lwp module."), _("\
4983Show debugging of GNU/Linux lwp module."), _("\
4984Enables printf debugging output."),
ccce17b0
YQ
4985 NULL,
4986 show_debug_linux_nat,
4987 &setdebuglist, &showdebuglist);
b84876c2 4988
b84876c2 4989 /* Save this mask as the default. */
d6b0e80f
AC
4990 sigprocmask (SIG_SETMASK, NULL, &normal_mask);
4991
7feb7d06
PA
4992 /* Install a SIGCHLD handler. */
4993 sigchld_action.sa_handler = sigchld_handler;
4994 sigemptyset (&sigchld_action.sa_mask);
4995 sigchld_action.sa_flags = SA_RESTART;
b84876c2
PA
4996
4997 /* Make it the default. */
7feb7d06 4998 sigaction (SIGCHLD, &sigchld_action, NULL);
d6b0e80f
AC
4999
5000 /* Make sure we don't block SIGCHLD during a sigsuspend. */
5001 sigprocmask (SIG_SETMASK, NULL, &suspend_mask);
5002 sigdelset (&suspend_mask, SIGCHLD);
5003
7feb7d06 5004 sigemptyset (&blocked_mask);
8009206a
TT
5005
5006 /* Do not enable PTRACE_O_TRACEEXIT until GDB is more prepared to
5007 support read-only process state. */
5008 linux_ptrace_set_additional_flags (PTRACE_O_TRACESYSGOOD
5009 | PTRACE_O_TRACEVFORKDONE
5010 | PTRACE_O_TRACEVFORK
5011 | PTRACE_O_TRACEFORK
5012 | PTRACE_O_TRACEEXEC);
d6b0e80f
AC
5013}
5014\f
5015
5016/* FIXME: kettenis/2000-08-26: The stuff on this page is specific to
5017 the GNU/Linux Threads library and therefore doesn't really belong
5018 here. */
5019
5020/* Read variable NAME in the target and return its value if found.
5021 Otherwise return zero. It is assumed that the type of the variable
5022 is `int'. */
5023
5024static int
5025get_signo (const char *name)
5026{
3b7344d5 5027 struct bound_minimal_symbol ms;
d6b0e80f
AC
5028 int signo;
5029
5030 ms = lookup_minimal_symbol (name, NULL, NULL);
3b7344d5 5031 if (ms.minsym == NULL)
d6b0e80f
AC
5032 return 0;
5033
77e371c0 5034 if (target_read_memory (BMSYMBOL_VALUE_ADDRESS (ms), (gdb_byte *) &signo,
d6b0e80f
AC
5035 sizeof (signo)) != 0)
5036 return 0;
5037
5038 return signo;
5039}
5040
5041/* Return the set of signals used by the threads library in *SET. */
5042
5043void
5044lin_thread_get_thread_signals (sigset_t *set)
5045{
5046 struct sigaction action;
5047 int restart, cancel;
5048
b84876c2 5049 sigemptyset (&blocked_mask);
d6b0e80f
AC
5050 sigemptyset (set);
5051
5052 restart = get_signo ("__pthread_sig_restart");
17fbb0bd
DJ
5053 cancel = get_signo ("__pthread_sig_cancel");
5054
5055 /* LinuxThreads normally uses the first two RT signals, but in some legacy
5056 cases may use SIGUSR1/SIGUSR2. NPTL always uses RT signals, but does
5057 not provide any way for the debugger to query the signal numbers -
5058 fortunately they don't change! */
5059
d6b0e80f 5060 if (restart == 0)
17fbb0bd 5061 restart = __SIGRTMIN;
d6b0e80f 5062
d6b0e80f 5063 if (cancel == 0)
17fbb0bd 5064 cancel = __SIGRTMIN + 1;
d6b0e80f
AC
5065
5066 sigaddset (set, restart);
5067 sigaddset (set, cancel);
5068
5069 /* The GNU/Linux Threads library makes terminating threads send a
5070 special "cancel" signal instead of SIGCHLD. Make sure we catch
5071 those (to prevent them from terminating GDB itself, which is
5072 likely to be their default action) and treat them the same way as
5073 SIGCHLD. */
5074
5075 action.sa_handler = sigchld_handler;
5076 sigemptyset (&action.sa_mask);
58aecb61 5077 action.sa_flags = SA_RESTART;
d6b0e80f
AC
5078 sigaction (cancel, &action, NULL);
5079
5080 /* We block the "cancel" signal throughout this code ... */
5081 sigaddset (&blocked_mask, cancel);
5082 sigprocmask (SIG_BLOCK, &blocked_mask, NULL);
5083
5084 /* ... except during a sigsuspend. */
5085 sigdelset (&suspend_mask, cancel);
5086}
This page took 2.003162 seconds and 4 git commands to generate.